diff -ruw linux-6.13.12/Makefile linux-6.13.12-fbx/Makefile
--- linux-6.13.12/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/Makefile	2025-09-29 14:40:03.798771439 +0200
@@ -401,6 +401,8 @@
 # CROSS_COMPILE can be set on the command line
 # make CROSS_COMPILE=aarch64-linux-gnu-
 # Alternatively CROSS_COMPILE can be set in the environment.
+# A third alternative is to store a setting in .config so that plain
+# "make" in the configured kernel build directory always uses that.
 # Default value for CROSS_COMPILE is not to prefix executables
 # Note: Some architectures assign CROSS_COMPILE in their arch/*/Makefile
 ARCH		?= $(SUBARCH)
@@ -438,6 +440,9 @@
 KCONFIG_CONFIG	?= .config
 export KCONFIG_CONFIG
 
+CONFIG_CROSS_COMPILE := $(shell grep ^CONFIG_CROSS_COMPILE= $(KCONFIG_CONFIG) | cut -f 2 -d = | tr -d '"')
+CROSS_COMPILE	?= $(CONFIG_CROSS_COMPILE:"%"=%)
+
 # SHELL used by kbuild
 CONFIG_SHELL := sh
 
@@ -1053,6 +1058,9 @@
 KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
 KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
 
+#Currently, disable -Wunterminated-string-initialization as broken
+KBUILD_CFLAGS += $(call cc-option, -Wno-unterminated-string-initialization)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS	+= -fno-strict-overflow
 
@@ -1350,7 +1358,7 @@
 quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
       cmd_headers_install = \
 	mkdir -p $(INSTALL_HDR_PATH); \
-	rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
+	rsync -cmrl --include='*/' --include='*\.h' --exclude='*' \
 	usr/include $(INSTALL_HDR_PATH)
 
 PHONY += headers_install
diff -ruw linux-6.13.12/arch/arm64/Kconfig linux-6.13.12-fbx/arch/arm64/Kconfig
--- linux-6.13.12/arch/arm64/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/Kconfig	2025-09-29 14:40:03.798771439 +0200
@@ -2506,4 +2506,3 @@
 source "drivers/acpi/Kconfig"
 
 source "arch/arm64/kvm/Kconfig"
-
diff -ruw linux-6.13.12/arch/arm64/Kconfig.platforms linux-6.13.12-fbx/arch/arm64/Kconfig.platforms
--- linux-6.13.12/arch/arm64/Kconfig.platforms	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/Kconfig.platforms	2025-09-25 17:40:30.187340548 +0200
@@ -70,6 +70,8 @@
 config ARCH_BCMBCA
 	bool "Broadcom Broadband Carrier Access (BCA) origin SoC"
 	select GPIOLIB
+	select PINCTRL
+	select PINCTRL_BCM63138
 	help
 	  Say Y if you intend to run the kernel on a Broadcom Broadband ARM-based
 	  BCA chipset.
@@ -85,6 +87,14 @@
 	help
 	  This enables support for Broadcom's ARMv8 Set Top Box SoCs
 
+config ARCH_BCM63XX_SHARED_OSH
+	bool "Make shared pages and translation table walks outer shareable"
+	depends on ARCH_BCMBCA
+	default y
+	help
+	  This is required for HW coherency on bcm63158. Say Y here if
+	  you are compiling a kernel for a bcm63158 board.
+
 endif
 
 config ARCH_BERLIN
@@ -101,6 +111,35 @@
 	help
 	  This enables support for the Bitmain SoC Family.
 
+config ARCH_CORTINA
+	bool
+	select SOC_BUS
+
+config ARCH_CORTINA_ACCESS
+	bool "Cortina Access SoC Platforms"
+	select ARCH_CORTINA
+	help
+	  This enables support for Cortina-Access SoC Family
+
+config CORTINA_SMCC
+	bool
+	help
+	  SMC call for Cortina ARMv8 platform
+
+choice
+	prompt "Cortina Access Arch Type"
+	default ARCH_CORTINA_VENUS
+	depends on ARCH_CORTINA_ACCESS
+
+config ARCH_CORTINA_VENUS
+	bool "Venus"
+	select GENERIC_IRQ_CHIP
+	select CORTINA_SMCC
+	help
+	  This is Cortina Access Venus SoC (CA8289)
+
+endchoice
+
 config ARCH_EXYNOS
 	bool "Samsung Exynos SoC family"
 	select COMMON_CLK_SAMSUNG
@@ -270,6 +309,16 @@
 	help
 	  This enables support for the ARMv8 based Qualcomm chipsets.
 
+config ARCH_QCOM_DTB
+	bool "build qualcomm platforms DTB"
+	depends on ARCH_QCOM
+	default y
+
+config ARCH_QCOM_FBX_DTB
+	bool "build freebox DTB on qualcomm platform"
+	depends on ARCH_QCOM
+	default y
+
 config ARCH_REALTEK
 	bool "Realtek Platforms"
 	select RESET_CONTROLLER
diff -ruw linux-6.13.12/arch/arm64/boot/Makefile linux-6.13.12-fbx/arch/arm64/boot/Makefile
--- linux-6.13.12/arch/arm64/boot/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/Makefile	2025-09-25 17:40:30.187340548 +0200
@@ -55,3 +55,5 @@
 				$(NM) vmlinux|grep _kernel_codesize|cut -d' ' -f1)
 
 include $(srctree)/drivers/firmware/efi/libstub/Makefile.zboot
+
+subdir-y += dts/
diff -ruw linux-6.13.12/arch/arm64/boot/dts/Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/Makefile
--- linux-6.13.12/arch/arm64/boot/dts/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/Makefile	2025-09-25 17:40:30.187340548 +0200
@@ -12,6 +12,7 @@
 subdir-y += bitmain
 subdir-y += broadcom
 subdir-y += cavium
+subdir-y += cortina-access
 subdir-y += exynos
 subdir-y += freescale
 subdir-y += hisilicon
diff -ruw linux-6.13.12/arch/arm64/boot/dts/amlogic/Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/amlogic/Makefile
--- linux-6.13.12/arch/arm64/boot/dts/amlogic/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/amlogic/Makefile	2025-09-25 17:40:30.195340588 +0200
@@ -10,6 +10,12 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j100.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-3.dtb
+DTC_FLAGS += -@
+fbx-boards += \
+	fbxwmr.dtb \
+	fbxwmr-r1.dtb fbxwmr-r2.dtb \
+	fbxwmr-r3.dtb fbxwmr-r4.dtb
+dtb-$(CONFIG_ARCH_MESON) += $(fbx-boards)
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-fbx8am-brcm.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-fbx8am-realtek.dtb
diff -ruw linux-6.13.12/arch/arm64/boot/dts/broadcom/Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/Makefile
--- linux-6.13.12/arch/arm64/boot/dts/broadcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/Makefile	2025-09-25 17:40:30.215340687 +0200
@@ -16,3 +16,4 @@
 subdir-y	+= bcmbca
 subdir-y	+= northstar2
 subdir-y	+= stingray
+subdir-y	+= bcm63xx
diff -ruw linux-6.13.12/arch/arm64/boot/dts/marvell/Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/marvell/Makefile
--- linux-6.13.12/arch/arm64/boot/dts/marvell/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/marvell/Makefile	2025-09-25 17:40:30.259340905 +0200
@@ -28,7 +28,19 @@
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-B.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5x-rd-carrier-cn9131.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5-98dx35xx-rd.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-base.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-pro.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9131-cf-solidwan.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9132-clearfog.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_dsl_lte.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_pon.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_pericom.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_asmedia.dtb
+
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp2_ftth_p2p.dtb
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
diff -ruw linux-6.13.12/arch/arm64/boot/dts/qcom/Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/qcom/Makefile
--- linux-6.13.12/arch/arm64/boot/dts/qcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/qcom/Makefile	2025-09-25 17:40:30.291341064 +0200
@@ -1,288 +1,302 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc.dtb
 
 apq8016-sbc-usb-host-dtbs	:= apq8016-sbc.dtb apq8016-sbc-usb-host.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc-usb-host.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc-d3-camera-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-schneider-hmibsc.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8039-t2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-ifc6640.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5018-rdp432-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5018-tplink-archer-ax55-v1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp441.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp442.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp468.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp474.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq6018-cp01-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk01.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp418.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp433.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp449.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp453.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp454.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8216-samsung-fortuna3g.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-acer-a1-724.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-alcatel-idol347.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-asus-z00l.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-gplus-fl8005a.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-huawei-g7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-lg-c50.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-lg-m216.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8150.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8910.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-harpia.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-osprey.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-surnia.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a3u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a5u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gprimeltecan.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandmax.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandprimelte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt510.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt58.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j3ltetw.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-rossa.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-serranove.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-uf896.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-ufi001c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt86518.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt86528.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt88047.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-yiming-uz801v3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8929-wingtech-wt82918hd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-huawei-kiwi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-longcheer-l9100.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-samsung-a7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-sony-xperia-kanuti-tulip.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-wingtech-wt82918.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-wingtech-wt82918hd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-motorola-potter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-daisy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-mido.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-tissot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-vince.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-kugo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-suzu.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-h815.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-msft-lumia-octagon-talkman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-xiaomi-libra.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-huawei-angler-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-msft-lumia-octagon-cityman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-ivy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-karin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-sumire.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3t.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-dora.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-kagura.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-keyaki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-xiaomi-gemini.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-natrium.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-scorpio.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-asus-novago-tp370ql.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-fxtec-pro1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-hp-envy-x2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-lenovo-miix-630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-cheeseburger.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-dumpling.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-lilac.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-maple.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-poplar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-xiaomi-sagit.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-fairphone-fp5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-shift-otter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-1000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-4000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs6490-rb3gen2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs8550-aim300-aiot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs9100-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs9100-ride-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qdu1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb2210-rb1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb4210-rb2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc-usb-host.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc-d3-camera-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-schneider-hmibsc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8039-t2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-ifc6640.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5018-rdp432-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5018-tplink-archer-ax55-v1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp441.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp442.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp468.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp474.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq6018-cp01-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp418.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp433.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp449.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp453.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp454.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8216-samsung-fortuna3g.dtb
+dtb-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r.dtb jbxgw9r.dtb fbxgw9r-ltd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-acer-a1-724.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-alcatel-idol347.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-asus-z00l.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-gplus-fl8005a.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-huawei-g7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-lg-c50.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-lg-m216.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8150.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8910.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-harpia.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-osprey.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-surnia.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a3u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a5u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gprimeltecan.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandmax.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandprimelte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt510.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt58.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j3ltetw.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-rossa.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-serranove.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-uf896.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-ufi001c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt86518.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt86528.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt88047.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8929-wingtech-wt82918hd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-huawei-kiwi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-longcheer-l9100.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-samsung-a7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-sony-xperia-kanuti-tulip.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-wingtech-wt82918.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-wingtech-wt82918hd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-motorola-potter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-daisy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-mido.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-tissot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-vince.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-kugo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-suzu.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-h815.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-msft-lumia-octagon-talkman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-xiaomi-libra.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-huawei-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-msft-lumia-octagon-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-ivy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-karin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-sumire.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3t.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-dora.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-kagura.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-keyaki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-xiaomi-gemini.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-natrium.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-scorpio.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-asus-novago-tp370ql.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-fxtec-pro1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-hp-envy-x2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-lenovo-miix-630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-cheeseburger.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-dumpling.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-lilac.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-maple.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-poplar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-xiaomi-sagit.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-fairphone-fp5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-shift-otter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-1000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-4000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs6490-rb3gen2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs8550-aim300-aiot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs9100-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs9100-ride-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qdu1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb2210-rb1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb4210-rb2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5.dtb
 
 qrb5165-rb5-vision-mezzanine-dtbs	:= qrb5165-rb5.dtb qrb5165-rb5-vision-mezzanine.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5-vision-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qru1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8155p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8295p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8540p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-acer-aspire1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-kingoftown.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-wifi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd-pro.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-herobrine-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-crd-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8180x-lenovo-flex-5g.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8180x-primus.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-microsoft-arcata.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sda660-inforce-ifc6560.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-lenovo-tbx605f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-motorola-ali.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-ganges-kirin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-discovery.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-pioneer.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-voyager.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-fairphone-fp3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-motorola-ocean.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm636-sony-xperia-ganges-mermaid.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm660-xiaomi-lavender.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm670-google-sargo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5-vision-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qru1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8155p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8295p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8540p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-acer-aspire1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-kingoftown.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-wifi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd-pro.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-herobrine-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-crd-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8180x-lenovo-flex-5g.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8180x-primus.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-microsoft-arcata.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sda660-inforce-ifc6560.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-lenovo-tbx605f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-motorola-ali.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-ganges-kirin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-discovery.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-pioneer.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-voyager.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-fairphone-fp3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-motorola-ocean.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm636-sony-xperia-ganges-mermaid.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm660-xiaomi-lavender.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm670-google-sargo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c.dtb
 
 sdm845-db845c-navigation-mezzanine-dtbs	:= sdm845-db845c.dtb sdm845-db845c-navigation-mezzanine.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c-navigation-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyln.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-enchilada.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-fajita.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-samsung-starqltechn.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akari.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akatsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-apollo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-ebbg.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-tianma.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-polaris.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-shift-axolotl.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-lenovo-yoga-c630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-samsung-w737.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdx75-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4250-oneplus-billie2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115-fxtec-pro1x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115p-lenovo-j606f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-sony-xperia-seine-pdx201.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-xiaomi-laurel-sprout.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6350-sony-xperia-lena-pdx213.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6375-sony-xperia-murray-pdx225.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7125-xiaomi-curtana.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7125-xiaomi-joyeuse.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7225-fairphone-fp4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7325-nothing-spacewar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-microsoft-surface-duo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-bahamut.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-griffin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx203.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx206.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-csot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-pipa.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-microsoft-surface-duo2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx214.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx215.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx223.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx224.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-samsung-q5q.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-sony-xperia-yodo-pdx234.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c-navigation-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyln.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-enchilada.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-fajita.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-samsung-starqltechn.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akari.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akatsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-apollo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-ebbg.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-tianma.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-polaris.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-shift-axolotl.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-lenovo-yoga-c630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-samsung-w737.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdx75-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4250-oneplus-billie2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115-fxtec-pro1x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115p-lenovo-j606f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-sony-xperia-seine-pdx201.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-xiaomi-laurel-sprout.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6350-sony-xperia-lena-pdx213.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6375-sony-xperia-murray-pdx225.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7125-xiaomi-curtana.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7125-xiaomi-joyeuse.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7225-fairphone-fp4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7325-nothing-spacewar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-microsoft-surface-duo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-bahamut.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-griffin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx203.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx206.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-csot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-pipa.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-microsoft-surface-duo2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx214.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx215.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx223.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx224.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-samsung-q5q.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-sony-xperia-yodo-pdx234.dtb
 
 sm8650-hdk-display-card-dtbs	:= sm8650-hdk.dtb sm8650-hdk-display-card.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-hdk-display-card.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e78100-lenovo-thinkpad-t14s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-asus-vivobook-s15.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-dell-xps13-9345.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-lenovo-yoga-slim7x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-microsoft-romulus13.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-microsoft-romulus15.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-qcp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-hdk-display-card.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e78100-lenovo-thinkpad-t14s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-asus-vivobook-s15.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-dell-xps13-9345.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-lenovo-yoga-slim7x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-microsoft-romulus13.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-microsoft-romulus15.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-qcp.dtb
+
+always-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r_dtbs
+clean-files				+= fbxgw9r_dtbs
+board-dtbs				=  \
+					fbxgw9r.dtb \
+					fbxgw9r-ltd.dtb \
+					jbxgw9r.dtb
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw9r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
diff -ruw linux-6.13.12/arch/arm64/include/asm/assembler.h linux-6.13.12-fbx/arch/arm64/include/asm/assembler.h
--- linux-6.13.12/arch/arm64/include/asm/assembler.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/include/asm/assembler.h	2025-09-25 17:40:30.419341699 +0200
@@ -404,6 +404,45 @@
 
 /*
  * Macro to perform a data cache maintenance for the interval
+ *	[kaddr, kaddr + size)
+ *	This macro does not do "data synchronization barrier". Caller should
+ *	do "dsb" after transaction.
+ *
+ *	op:     operation passed to dc instruction
+ *	kaddr:      starting virtual address of the region
+ *	size:       size of the region
+ *	Corrupts:   kaddr, size, tmp1, tmp2
+ */
+	.macro dcache_by_line_op_no_dsb op, kaddr, size, tmp1, tmp2
+	dcache_line_size \tmp1, \tmp2
+	add \size, \kaddr, \size
+	sub \tmp2, \tmp1, #1
+	bic \kaddr, \kaddr, \tmp2
+9998:
+	.ifc    \op, cvau
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvac
+	__dcache_op_workaround_clean_cache \op, \kaddr
+	.else
+	.ifc	\op, cvap
+	sys	3, c7, c12, 1, \kaddr	// dc cvap
+	.else
+	.ifc	\op, cvadp
+	sys	3, c7, c13, 1, \kaddr	// dc cvadp
+	.else
+	dc	\op, \kaddr
+	.endif
+	.endif
+	.endif
+	.endif
+	add	\kaddr, \kaddr, \tmp1
+	cmp	\kaddr, \size
+	b.lo	9998b
+	.endm
+
+/*
+ * Macro to perform a data cache maintenance for the interval
  * [start, end)
  *
  * 	op:		operation passed to dc instruction
diff -ruw linux-6.13.12/arch/arm64/include/asm/cacheflush.h linux-6.13.12-fbx/arch/arm64/include/asm/cacheflush.h
--- linux-6.13.12/arch/arm64/include/asm/cacheflush.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/include/asm/cacheflush.h	2025-09-25 17:40:30.419341699 +0200
@@ -79,6 +79,17 @@
 extern long caches_clean_inval_user_pou(unsigned long start, unsigned long end);
 extern void sync_icache_aliases(unsigned long start, unsigned long end);
 
+extern void dmac_flush_range(const void *start, const void *end);
+extern void dmac_inv_range(const void *start, const void *end);
+extern void dmac_clean_range(const void *start, const void *end);
+extern void __dma_flush_area_no_dsb(const void *start, size_t size);
+extern void __dma_inv_area_no_dsb(const void *start, size_t size);
+extern void __dma_clean_area_no_dsb(const void *start, size_t size);
+
+extern void dmac_flush_range_no_dsb(const void *start, const void *end);
+extern void dmac_inv_range_no_dsb(const void *start, const void *end);
+extern void dmac_clean_range_no_dsb(const void *start, const void *end);
+
 static inline void flush_icache_range(unsigned long start, unsigned long end)
 {
 	caches_clean_inval_pou(start, end);
diff -ruw linux-6.13.12/arch/arm64/include/asm/memory.h linux-6.13.12-fbx/arch/arm64/include/asm/memory.h
--- linux-6.13.12/arch/arm64/include/asm/memory.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/include/asm/memory.h	2025-09-25 17:40:30.431341758 +0200
@@ -104,7 +104,7 @@
 #define _KASAN_SHADOW_START(va)	(KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
 #define KASAN_SHADOW_START	_KASAN_SHADOW_START(vabits_actual)
 #define PAGE_END		KASAN_SHADOW_START
-#define KASAN_THREAD_SHIFT	1
+#define KASAN_THREAD_SHIFT	2
 #else
 #define KASAN_THREAD_SHIFT	0
 #define PAGE_END		(_PAGE_END(VA_BITS_MIN))
diff -ruw linux-6.13.12/arch/arm64/include/asm/pgtable-hwdef.h linux-6.13.12-fbx/arch/arm64/include/asm/pgtable-hwdef.h
--- linux-6.13.12/arch/arm64/include/asm/pgtable-hwdef.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/include/asm/pgtable-hwdef.h	2025-09-25 17:40:30.435341778 +0200
@@ -165,7 +165,11 @@
 #define PTE_TABLE_BIT		(_AT(pteval_t, 1) << 1)
 #define PTE_USER		(_AT(pteval_t, 1) << 6)		/* AP[1] */
 #define PTE_RDONLY		(_AT(pteval_t, 1) << 7)		/* AP[2] */
+#ifdef CONFIG_ARCH_BCM63XX_SHARED_OSH
+#define PTE_SHARED		(_AT(pteval_t, 2) << 8)		/* SH[1:0], outer shareable */
+#else
 #define PTE_SHARED		(_AT(pteval_t, 3) << 8)		/* SH[1:0], inner shareable */
+#endif
 #define PTE_AF			(_AT(pteval_t, 1) << 10)	/* Access Flag */
 #define PTE_NG			(_AT(pteval_t, 1) << 11)	/* nG */
 #define PTE_GP			(_AT(pteval_t, 1) << 50)	/* BTI guarded */
@@ -283,12 +287,19 @@
 
 #define TCR_SH0_SHIFT		12
 #define TCR_SH0_MASK		(UL(3) << TCR_SH0_SHIFT)
+#define TCR_SH0_OUTER		(UL(2) << TCR_SH0_SHIFT)
 #define TCR_SH0_INNER		(UL(3) << TCR_SH0_SHIFT)
 
 #define TCR_SH1_SHIFT		28
 #define TCR_SH1_MASK		(UL(3) << TCR_SH1_SHIFT)
+#define TCR_SH1_OUTER		(UL(2) << TCR_SH1_SHIFT)
 #define TCR_SH1_INNER		(UL(3) << TCR_SH1_SHIFT)
+
+#ifdef CONFIG_ARCH_BCM63XX_SHARED_OSH
+#define TCR_SHARED		(TCR_SH0_OUTER | TCR_SH1_OUTER)
+#else
 #define TCR_SHARED		(TCR_SH0_INNER | TCR_SH1_INNER)
+#endif
 
 #define TCR_TG0_SHIFT		14
 #define TCR_TG0_MASK		(UL(3) << TCR_TG0_SHIFT)
diff -ruw linux-6.13.12/arch/arm64/mm/cache.S linux-6.13.12-fbx/arch/arm64/mm/cache.S
--- linux-6.13.12/arch/arm64/mm/cache.S	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/mm/cache.S	2025-09-25 17:40:30.511342155 +0200
@@ -164,6 +164,64 @@
 SYM_FUNC_ALIAS(dcache_inval_poc, __pi_dcache_inval_poc)
 
 /*
+ *  __dma_inv_area_no_dsb(start, size)
+ *
+ *	This macro does not do "data synchronization barrier". Caller should
+ *	do "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_inv_area_no_dsb)
+	add	x1, x1, x0
+	dcache_line_size	x2, x3
+	sub	x3, x2, #1
+	tst	x1, x3				// end cache line aligned?
+	bic	x1, x1, x3
+	b.eq	1f
+	dc	civac, x1			// clean & invalidate D / U line
+1:	tst	x0, x3				// start cache line aligned?
+	bic	x0, x0, x3
+	b.eq    2f
+	dc	civac, x0			// clean & invalidate D / U line
+	b	3f
+2:	dc  ivac, x0			// invalidate D / U line
+3:	add x0, x0, x2
+	cmp	x0, x1
+	b.lo	2b
+	ret
+SYM_FUNC_END(__dma_inv_area_no_dsb)
+
+/*
+ *  __dma_clean_area_no_dsb(start, size)
+ *
+ *	his macro does not do "data synchronization barrier". Caller should
+ *	o "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_clean_area_no_dsb)
+	dcache_by_line_op_no_dsb cvac, x0, x1, x2, x3
+	ret
+SYM_FUNC_END(__dma_clean_area_no_dsb)
+
+/*
+ *  __dma_flush_area_no_dsb(start, size)
+ *
+ *	clean & invalidate D / U line
+ *	his macro does not do "data synchronization barrier". Caller should
+ *	o "dsb" after transaction.
+ *
+ *	 start   - virtual start address of region
+ *	 size    - size in question
+ */
+SYM_FUNC_START(__dma_flush_area_no_dsb)
+	dcache_by_line_op_no_dsb civac, x0, x1, x2, x3
+	ret
+SYM_FUNC_END(__dma_flush_area_no_dsb)
+
+/*
  *	dcache_clean_poc(start, end)
  *
  * 	Ensure that any D-cache lines for the interval [start, end)
diff -ruw linux-6.13.12/arch/arm64/mm/flush.c linux-6.13.12-fbx/arch/arm64/mm/flush.c
--- linux-6.13.12/arch/arm64/mm/flush.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/arch/arm64/mm/flush.c	2025-09-25 17:40:30.511342155 +0200
@@ -100,3 +100,39 @@
 }
 EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
 #endif
+
+void dmac_flush_range(const void *start, const void *end)
+{
+	dcache_clean_inval_poc((unsigned long)start, (unsigned long)end);
+}
+EXPORT_SYMBOL(dmac_flush_range);
+
+void dmac_flush_range_no_dsb(const void *start, const void *end)
+{
+	__dma_flush_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_flush_range_no_dsb);
+
+void dmac_inv_range(const void *start, const void *end)
+{
+	dcache_inval_poc((unsigned long)start, (unsigned long)(end));
+}
+EXPORT_SYMBOL(dmac_inv_range);
+
+void dmac_inv_range_no_dsb(const void *start, const void *end)
+{
+	__dma_inv_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_inv_range_no_dsb);
+
+void dmac_clean_range(const void *start, const void *end)
+{
+      dcache_clean_poc((unsigned long)start, (unsigned long)end);
+}
+EXPORT_SYMBOL(dmac_clean_range);
+
+void dmac_clean_range_no_dsb(const void *start, const void *end)
+{
+	__dma_clean_area_no_dsb(start, (void *)(end) - (void *)(start));
+}
+EXPORT_SYMBOL(dmac_clean_range_no_dsb);
diff -ruw linux-6.13.12/block/blk-flush.c linux-6.13.12-fbx/block/blk-flush.c
--- linux-6.13.12/block/blk-flush.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/blk-flush.c	2025-09-25 17:40:31.115345150 +0200
@@ -157,10 +157,21 @@
 	rq->flush.seq |= seq;
 	cmd_flags = rq->cmd_flags;
 
-	if (likely(!error))
+	if (likely(!error)) {
 		seq = blk_flush_cur_seq(rq);
-	else
+	} else {
 		seq = REQ_FSEQ_DONE;
+		printk_once(KERN_ERR "%s: flush failed: data integrity problem\n",
+				   rq->q->disk ? rq->q->disk->disk_name : "?");
+		/*
+		 * returning an error to the FS is wrong: the data is all
+		 * there, it just might not be written out in the expected
+		 * order and thus have a window where the integrity is suspect
+		 * in a crash.  Given the small likelihood of actually
+		 * crashing, we should just log a warning here.
+		 */
+		error = 0;
+	}
 
 	switch (seq) {
 	case REQ_FSEQ_PREFLUSH:
diff -ruw linux-6.13.12/block/blk-mq.c linux-6.13.12-fbx/block/blk-mq.c
--- linux-6.13.12/block/blk-mq.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/blk-mq.c	2025-09-25 17:40:31.119345170 +0200
@@ -1031,7 +1031,7 @@
 }
 EXPORT_SYMBOL_GPL(blk_update_request);
 
-static inline void blk_account_io_done(struct request *req, u64 now)
+static inline void blk_account_io_done(struct request *req, u64 now, blk_status_t error)
 {
 	trace_block_io_done(req);
 
@@ -1049,6 +1049,8 @@
 		part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
 		part_stat_local_dec(req->part,
 				    in_flight[op_is_write(req_op(req))]);
+		if (error)
+			part_stat_inc(req->part, io_errors[rq_data_dir(req)]);
 		part_stat_unlock();
 	}
 }
@@ -1112,19 +1114,19 @@
 	part_stat_unlock();
 }
 
-static inline void __blk_mq_end_request_acct(struct request *rq, u64 now)
+static inline void __blk_mq_end_request_acct(struct request *rq, u64 now, blk_status_t error)
 {
 	if (rq->rq_flags & RQF_STATS)
 		blk_stat_add(rq, now);
 
 	blk_mq_sched_completed_request(rq, now);
-	blk_account_io_done(rq, now);
+	blk_account_io_done(rq, now, error);
 }
 
 inline void __blk_mq_end_request(struct request *rq, blk_status_t error)
 {
 	if (blk_mq_need_time_stamp(rq))
-		__blk_mq_end_request_acct(rq, blk_time_get_ns());
+		__blk_mq_end_request_acct(rq, blk_time_get_ns(), error);
 
 	blk_mq_finish_request(rq);
 
@@ -1175,7 +1177,7 @@
 
 		blk_complete_request(rq);
 		if (iob->need_ts)
-			__blk_mq_end_request_acct(rq, now);
+			__blk_mq_end_request_acct(rq, now, 0);
 
 		blk_mq_finish_request(rq);
 
@@ -3232,7 +3234,7 @@
 	blk_mq_run_dispatch_ops(q,
 			ret = blk_mq_request_issue_directly(rq, true));
 	if (ret)
-		blk_account_io_done(rq, blk_time_get_ns());
+		blk_account_io_done(rq, blk_time_get_ns(), 0);
 	return ret;
 }
 EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
diff -ruw linux-6.13.12/block/genhd.c linux-6.13.12-fbx/block/genhd.c
--- linux-6.13.12/block/genhd.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/genhd.c	2025-09-25 17:40:31.123345189 +0200
@@ -1004,6 +1004,7 @@
 		"%8u %8u %8u "
 		"%8lu %8lu %8llu %8u "
 		"%8lu %8u"
+		"%8lu %8lu"
 		"\n",
 		stat.ios[STAT_READ],
 		stat.merges[STAT_READ],
@@ -1025,7 +1026,9 @@
 		(unsigned long long)stat.sectors[STAT_DISCARD],
 		(unsigned int)div_u64(stat.nsecs[STAT_DISCARD], NSEC_PER_MSEC),
 		stat.ios[STAT_FLUSH],
-		(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC));
+		(unsigned int)div_u64(stat.nsecs[STAT_FLUSH], NSEC_PER_MSEC),
+		part_stat_read(bdev, io_errors[READ]),
+		part_stat_read(bdev, io_errors[WRITE]));
 }
 
 ssize_t part_inflight_show(struct device *dev, struct device_attribute *attr,
diff -ruw linux-6.13.12/block/partitions/Kconfig linux-6.13.12-fbx/block/partitions/Kconfig
--- linux-6.13.12/block/partitions/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/partitions/Kconfig	2025-09-25 17:40:31.127345209 +0200
@@ -279,4 +279,9 @@
 	  The format for the device tree node is just like MTD fixed-partition
 	  schema.
 
+config OF_PARTITION_IGNORE_RO
+	bool "ignore read-only flag"
+	depends on OF_PARTITION
+	select MMC_BLOCK_IGNORE_RO_AREA if MMC_BLOCK
+
 endmenu
diff -ruw linux-6.13.12/block/partitions/Makefile linux-6.13.12-fbx/block/partitions/Makefile
--- linux-6.13.12/block/partitions/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/partitions/Makefile	2025-09-25 17:40:31.127345209 +0200
@@ -12,7 +12,6 @@
 obj-$(CONFIG_MAC_PARTITION) += mac.o
 obj-$(CONFIG_LDM_PARTITION) += ldm.o
 obj-$(CONFIG_MSDOS_PARTITION) += msdos.o
-obj-$(CONFIG_OF_PARTITION) += of.o
 obj-$(CONFIG_OSF_PARTITION) += osf.o
 obj-$(CONFIG_SGI_PARTITION) += sgi.o
 obj-$(CONFIG_SUN_PARTITION) += sun.o
@@ -21,3 +20,5 @@
 obj-$(CONFIG_EFI_PARTITION) += efi.o
 obj-$(CONFIG_KARMA_PARTITION) += karma.o
 obj-$(CONFIG_SYSV68_PARTITION) += sysv68.o
+#obj-$(CONFIG_OF_PARTITION) += of.o
+obj-$(CONFIG_OF_PARTITION) += dt.o
diff -ruw linux-6.13.12/block/partitions/check.h linux-6.13.12-fbx/block/partitions/check.h
--- linux-6.13.12/block/partitions/check.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/partitions/check.h	2025-09-25 17:40:31.127345209 +0200
@@ -68,3 +68,4 @@
 int sun_partition(struct parsed_partitions *state);
 int sysv68_partition(struct parsed_partitions *state);
 int ultrix_partition(struct parsed_partitions *state);
+int dt_partition(struct parsed_partitions *);
diff -ruw linux-6.13.12/block/partitions/core.c linux-6.13.12-fbx/block/partitions/core.c
--- linux-6.13.12/block/partitions/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/block/partitions/core.c	2025-09-25 17:40:31.127345209 +0200
@@ -44,7 +44,7 @@
 	cmdline_partition,
 #endif
 #ifdef CONFIG_OF_PARTITION
-	of_partition,		/* cmdline have priority to OF */
+	dt_partition,		/* cmdline have priority to OF */
 #endif
 #ifdef CONFIG_EFI_PARTITION
 	efi_partition,		/* this must come before msdos */
diff -ruw linux-6.13.12/drivers/Kconfig linux-6.13.12-fbx/drivers/Kconfig
--- linux-6.13.12/drivers/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/Kconfig	2025-09-25 17:40:31.179345467 +0200
@@ -23,6 +23,8 @@
 
 source "drivers/gnss/Kconfig"
 
+source "drivers/fbxprocfs/Kconfig"
+
 source "drivers/mtd/Kconfig"
 
 source "drivers/of/Kconfig"
@@ -79,6 +81,10 @@
 
 source "drivers/gpio/Kconfig"
 
+source "drivers/fbxgpio/Kconfig"
+
+source "drivers/fbxjtag/Kconfig"
+
 source "drivers/w1/Kconfig"
 
 source "drivers/power/Kconfig"
@@ -87,6 +93,8 @@
 
 source "drivers/thermal/Kconfig"
 
+source "drivers/fbxwatchdog/Kconfig"
+
 source "drivers/watchdog/Kconfig"
 
 source "drivers/ssb/Kconfig"
diff -ruw linux-6.13.12/drivers/Makefile linux-6.13.12-fbx/drivers/Makefile
--- linux-6.13.12/drivers/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/Makefile	2025-09-25 17:40:31.179345467 +0200
@@ -20,7 +20,9 @@
 # LEDs must come before PCI, it is needed by NPEM driver
 obj-y				+= leds/
 
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio/
 obj-y				+= pci/
+obj-$(CONFIG_FREEBOX_JTAG)	+= fbxjtag/
 
 obj-$(CONFIG_PARISC)		+= parisc/
 obj-$(CONFIG_RAPIDIO)		+= rapidio/
@@ -120,6 +122,7 @@
 obj-y				+= power/
 obj-$(CONFIG_HWMON)		+= hwmon/
 obj-$(CONFIG_THERMAL)		+= thermal/
+obj-$(CONFIG_FREEBOX_WATCHDOG)	+= fbxwatchdog/
 obj-$(CONFIG_WATCHDOG)		+= watchdog/
 obj-$(CONFIG_MD)		+= md/
 obj-$(CONFIG_BT)		+= bluetooth/
@@ -195,3 +198,5 @@
 obj-$(CONFIG_DPLL)		+= dpll/
 
 obj-$(CONFIG_S390)		+= s390/
+
+obj-$(CONFIG_FREEBOX_PROCFS)	+= fbxprocfs/
diff -ruw linux-6.13.12/drivers/base/regmap/internal.h linux-6.13.12-fbx/drivers/base/regmap/internal.h
--- linux-6.13.12/drivers/base/regmap/internal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/base/regmap/internal.h	2025-09-25 17:40:31.295346042 +0200
@@ -345,4 +345,6 @@
 #define regmap_init_raw_ram(dev, config, data)				\
 	__regmap_lockdep_wrapper(__regmap_init_raw_ram, #dev, dev, config, data)
 
+void *regmap_mmio_ctx_get_base(const void *priv);
+
 #endif
diff -ruw linux-6.13.12/drivers/base/regmap/regmap-mmio.c linux-6.13.12-fbx/drivers/base/regmap/regmap-mmio.c
--- linux-6.13.12/drivers/base/regmap/regmap-mmio.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/base/regmap/regmap-mmio.c	2025-09-25 17:40:31.295346042 +0200
@@ -609,4 +609,10 @@
 }
 EXPORT_SYMBOL_GPL(regmap_mmio_detach_clk);
 
+void *regmap_mmio_ctx_get_base(const void *priv)
+{
+	struct regmap_mmio_context *ctx = (struct regmap_mmio_context *)priv;
+	return ctx->regs;
+}
+
 MODULE_LICENSE("GPL v2");
diff -ruw linux-6.13.12/drivers/base/regmap/regmap.c linux-6.13.12-fbx/drivers/base/regmap/regmap.c
--- linux-6.13.12/drivers/base/regmap/regmap.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/base/regmap/regmap.c	2025-09-25 17:40:31.299346062 +0200
@@ -3512,6 +3512,15 @@
 }
 EXPORT_SYMBOL_GPL(regmap_parse_val);
 
+#ifdef CONFIG_REGMAP_MMIO
+void *regmap_get_mmio_base_address(struct regmap *map)
+{
+	return regmap_mmio_ctx_get_base(map->bus_context);
+}
+
+EXPORT_SYMBOL_GPL(regmap_get_mmio_base_address);
+#endif
+
 static int __init regmap_initcall(void)
 {
 	regmap_debugfs_initcall();
diff -ruw linux-6.13.12/drivers/bus/mhi/host/boot.c linux-6.13.12-fbx/drivers/bus/mhi/host/boot.c
--- linux-6.13.12/drivers/bus/mhi/host/boot.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/bus/mhi/host/boot.c	2025-09-25 17:40:31.335346241 +0200
@@ -16,8 +16,12 @@
 #include <linux/random.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <linux/pci.h>
+#include <soc/qcom/license-manager-simple.h>
 #include "internal.h"
 
+#define PCIE_PCIE_LOCAL_REG_PCIE_LOCAL_RSV1     0x3168
+
 /* Setup RDDM vector table for RDDM transfer and program RXVEC */
 int mhi_rddm_prepare(struct mhi_controller *mhi_cntrl,
 		     struct image_info *img_info)
@@ -303,7 +307,7 @@
 	struct mhi_buf *mhi_buf = image_info->mhi_buf;
 
 	for (i = 0; i < image_info->entries; i++, mhi_buf++)
-		dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+		mhi_dma_free_coherent_no_dev(mhi_buf->len,
 				  mhi_buf->buf, mhi_buf->dma_addr);
 
 	kfree(image_info->mhi_buf);
@@ -340,7 +344,7 @@
 			vec_size = sizeof(struct bhi_vec_entry) * i;
 
 		mhi_buf->len = vec_size;
-		mhi_buf->buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev,
+		mhi_buf->buf = mhi_dma_alloc_coherent_no_dev(
 						  vec_size, &mhi_buf->dma_addr,
 						  GFP_KERNEL);
 		if (!mhi_buf->buf)
@@ -355,7 +359,7 @@
 
 error_alloc_segment:
 	for (--i, --mhi_buf; i >= 0; i--, mhi_buf--)
-		dma_free_coherent(mhi_cntrl->cntrl_dev, mhi_buf->len,
+		mhi_dma_free_coherent_no_dev(mhi_buf->len,
 				  mhi_buf->buf, mhi_buf->dma_addr);
 
 error_alloc_mhi_buf:
@@ -385,6 +389,38 @@
 	}
 }
 
+static void mhi_download_fw_license(struct mhi_controller *mhi_cntrl)
+{
+	struct device *dev = &mhi_cntrl->mhi_dev->dev;
+	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
+	int err;
+
+	err = lm_get_license_pcidev(pdev, &mhi_cntrl->license_buf);
+
+	if (err) {
+		mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+				PCIE_PCIE_LOCAL_REG_PCIE_LOCAL_RSV1, 0x0);
+		if (err != -ENOSYS)
+			dev_info(dev, "Unable to get license file from "
+				 "manager: %d\n", err);
+		return ;
+	}
+
+	/*
+	 * Let device know the about license data. The device used by
+	 * the license manager driver to allocate the dma coherent
+	 * memory has a 32bit dma coherent mask, so the DMA address so
+	 * using the lower 32bits of the DMA address will always work.
+	 */
+	mhi_write_reg(mhi_cntrl, mhi_cntrl->regs,
+		      PCIE_PCIE_LOCAL_REG_PCIE_LOCAL_RSV1,
+		      lower_32_bits(mhi_cntrl->license_buf.dma_addr));
+	dev_dbg(dev, "DMA address 0x%x is copied to EP's RSV1\n",
+		lower_32_bits(mhi_cntrl->license_buf.dma_addr));
+
+	return ;
+}
+
 void mhi_fw_load_handler(struct mhi_controller *mhi_cntrl)
 {
 	const struct firmware *firmware = NULL;
@@ -452,7 +488,7 @@
 	fw_sz = firmware->size;
 
 skip_req_fw:
-	buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, size, &dma_addr,
+	buf = mhi_dma_alloc_coherent_no_dev(size, &dma_addr,
 				 GFP_KERNEL);
 	if (!buf) {
 		release_firmware(firmware);
@@ -462,7 +498,7 @@
 	/* Download image using BHI */
 	memcpy(buf, fw_data, size);
 	ret = mhi_fw_load_bhi(mhi_cntrl, dma_addr, size);
-	dma_free_coherent(mhi_cntrl->cntrl_dev, size, buf, dma_addr);
+	mhi_dma_free_coherent_no_dev(size, buf, dma_addr);
 
 	/* Error or in EDL mode, we're done */
 	if (ret) {
@@ -528,11 +564,17 @@
 	struct image_info *image_info = mhi_cntrl->fbc_image;
 	struct device *dev = &mhi_cntrl->mhi_dev->dev;
 	enum mhi_pm_state new_state;
+	struct pci_dev *pdev = to_pci_dev(mhi_cntrl->cntrl_dev);
 	int ret;
 
 	if (!image_info)
 		return -EIO;
 
+	if (pdev && pdev->device == QCN9224_DEVICE_ID) {
+		/* Download the License */
+		mhi_download_fw_license(mhi_cntrl);
+	}
+
 	ret = mhi_fw_load_bhie(mhi_cntrl,
 			       /* Vector table is the last entry */
 			       &image_info->mhi_buf[image_info->entries - 1]);
diff -ruw linux-6.13.12/drivers/bus/mhi/host/init.c linux-6.13.12-fbx/drivers/bus/mhi/host/init.c
--- linux-6.13.12/drivers/bus/mhi/host/init.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/bus/mhi/host/init.c	2025-09-25 17:40:31.339346261 +0200
@@ -1472,6 +1472,70 @@
 	.dev_groups = mhi_dev_groups,
 };
 
+struct list_head mhi_dma_cache_list = LIST_HEAD_INIT(mhi_dma_cache_list);
+DEFINE_MUTEX(mhi_dma_cache_lock);
+
+static void mhi_dma_cache_cleanup(void)
+{
+	struct mhi_dma_cache_entry *e, *tmp;
+
+	mutex_lock(&mhi_dma_cache_lock);
+
+	list_for_each_entry_safe(e, tmp, &mhi_dma_cache_list, next) {
+		dma_free_coherent_no_dev(e->size, e->vaddr, e->paddr);
+		list_del(&e->next);
+		kfree(e);
+	}
+
+	mutex_unlock(&mhi_dma_cache_lock);
+}
+
+void* mhi_dma_alloc_coherent_no_dev(size_t sz, dma_addr_t *paddr, int flags)
+{
+	struct mhi_dma_cache_entry *e, *found = NULL;
+	void *vaddr;
+
+	mutex_lock(&mhi_dma_cache_lock);
+
+	list_for_each_entry(e, &mhi_dma_cache_list, next) {
+		if (e->size == sz) {
+			list_del(&e->next);
+			found = e;
+			break;
+		}
+	}
+	mutex_unlock(&mhi_dma_cache_lock);
+
+	if (found) {
+		vaddr = e->vaddr;
+		*paddr = e->paddr;
+		kfree(e);
+		return vaddr;
+	}
+
+	return dma_alloc_coherent_no_dev(sz, paddr, flags);
+}
+
+void mhi_dma_free_coherent_no_dev(size_t sz, void *vaddr, dma_addr_t paddr)
+{
+	struct mhi_dma_cache_entry *e;
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		goto free_buffer;
+
+	e->vaddr = vaddr;
+	e->paddr = paddr;
+	e->size = sz;
+
+	mutex_lock(&mhi_dma_cache_lock);
+	list_add_tail(&e->next, &mhi_dma_cache_list);
+	mutex_unlock(&mhi_dma_cache_lock);
+	return;
+free_buffer:
+	dma_free_coherent_no_dev(sz, vaddr, paddr);
+}
+
 static int __init mhi_init(void)
 {
 	mhi_debugfs_init();
@@ -1481,6 +1545,7 @@
 static void __exit mhi_exit(void)
 {
 	mhi_debugfs_exit();
+	mhi_dma_cache_cleanup();
 	bus_unregister(&mhi_bus_type);
 }
 
diff -ruw linux-6.13.12/drivers/bus/mhi/host/internal.h linux-6.13.12-fbx/drivers/bus/mhi/host/internal.h
--- linux-6.13.12/drivers/bus/mhi/host/internal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/bus/mhi/host/internal.h	2025-09-25 17:40:31.339346261 +0200
@@ -15,6 +15,20 @@
 #define MHI_SOC_RESET_REQ_OFFSET			0xb0
 #define MHI_SOC_RESET_REQ				BIT(0)
 
+#define QCN9224_DEVICE_ID				(0x1109)
+#define SOC_HW_VERSION_OFFS				0x224
+#define SOC_HW_VERSION_FAM_NUM_BMSK			GENMASK(31, 28)
+#define SOC_HW_VERSION_DEV_NUM_BMSK			GENMASK(27, 16)
+#define SOC_HW_VERSION_MAJOR_VER_BMSK			GENMASK(15, 8)
+#define SOC_HW_VERSION_MINOR_VER_BMSK			GENMASK(7, 0)
+
+struct mhi_dma_cache_entry {
+	struct list_head next;
+	size_t size;
+	void *vaddr;
+	dma_addr_t paddr;
+};
+
 struct mhi_ctxt {
 	struct mhi_event_ctxt *er_ctxt;
 	struct mhi_chan_ctxt *chan_ctxt;
@@ -420,4 +434,7 @@
 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl,
 			     struct mhi_buf_info *buf_info);
 
+void *mhi_dma_alloc_coherent_no_dev(size_t sz, dma_addr_t *paddr, int flags);
+void mhi_dma_free_coherent_no_dev(size_t sz, void *vaddr, dma_addr_t paddr);
+
 #endif /* _MHI_INT_H */
diff -ruw linux-6.13.12/drivers/bus/mhi/host/pm.c linux-6.13.12-fbx/drivers/bus/mhi/host/pm.c
--- linux-6.13.12/drivers/bus/mhi/host/pm.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/bus/mhi/host/pm.c	2025-09-25 17:40:31.339346261 +0200
@@ -14,6 +14,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/wait.h>
+#include <soc/qcom/license-manager-simple.h>
 #include "internal.h"
 #include "trace.h"
 
@@ -464,6 +465,11 @@
 	mhi_cntrl->wake_put(mhi_cntrl, false);
 	read_unlock_bh(&mhi_cntrl->pm_lock);
 
+	/*
+	 * Free license buffer
+	 */
+	lm_free_license(&mhi_cntrl->license_buf);
+
 	return ret;
 }
 
diff -ruw linux-6.13.12/drivers/char/Kconfig linux-6.13.12-fbx/drivers/char/Kconfig
--- linux-6.13.12/drivers/char/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/char/Kconfig	2025-09-25 17:40:31.343346280 +0200
@@ -317,6 +317,15 @@
 	  memory.
 	  When in doubt, say "Y".
 
+config DEVPHYSMEM
+	bool "/dev/physmem virtual device support"
+	default n
+	help
+	  Say Y here if you want to support the /dev/physmem device. The
+	  /dev/physmem device allows unprivileged access to physical memory
+	  unused by the kernel.
+	  When in doubt, say "N".
+
 config NVRAM
 	tristate "/dev/nvram support"
 	depends on X86 || HAVE_ARCH_NVRAM_OPS
@@ -424,3 +433,5 @@
 	  driver include crash and makedumpfile.
 
 endmenu
+
+source "drivers/char/diag/Kconfig"
diff -ruw linux-6.13.12/drivers/char/Makefile linux-6.13.12-fbx/drivers/char/Makefile
--- linux-6.13.12/drivers/char/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/char/Makefile	2025-09-25 17:40:31.343346280 +0200
@@ -43,3 +43,5 @@
 obj-$(CONFIG_XILLYBUS_CLASS)	+= xillybus/
 obj-$(CONFIG_POWERNV_OP_PANEL)	+= powernv-op-panel.o
 obj-$(CONFIG_ADI)		+= adi.o
+
+obj-$(CONFIG_DIAG_CHAR)		+= diag/
diff -ruw linux-6.13.12/drivers/char/hw_random/Kconfig linux-6.13.12-fbx/drivers/char/hw_random/Kconfig
--- linux-6.13.12/drivers/char/hw_random/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/char/hw_random/Kconfig	2025-09-25 17:40:31.355346340 +0200
@@ -125,6 +125,11 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_BCM63XX
+	tristate "Broadcom BCM63xx Random Number Generator support"
+	depends on ARCH_BCMBCA || BCM63XX
+	default HW_RANDOM
+
 config HW_RANDOM_IPROC_RNG200
 	tristate "Broadcom iProc/STB RNG200 support"
 	depends on ARCH_BCM_IPROC || ARCH_BCM2835 || ARCH_BCMBCA || ARCH_BRCMSTB || COMPILE_TEST
@@ -613,6 +618,17 @@
 
 	  If unsure, say Y.
 
+config HW_RANDOM_QCOM
+	tristate "Qualcomm Random Number Generator Driver"
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  This driver provides support for the Random Number
+	  Generator hardware found on Qualcomm SoCs.
+
+config HW_RANDOM_CLP800
+	depends on HAS_IOMEM && OF
+	tristate "Elliptic CLP800 Hardware Random Generator Driver"
+
 endif # HW_RANDOM
 
 config UML_RANDOM
diff -ruw linux-6.13.12/drivers/char/hw_random/Makefile linux-6.13.12-fbx/drivers/char/hw_random/Makefile
--- linux-6.13.12/drivers/char/hw_random/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/char/hw_random/Makefile	2025-09-25 17:40:31.355346340 +0200
@@ -33,6 +33,7 @@
 obj-$(CONFIG_HW_RANDOM_HISTB) += histb-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o
 obj-$(CONFIG_HW_RANDOM_BCM74110) += bcm74110-rng.o
+obj-$(CONFIG_HW_RANDOM_BCM63XX) += bcm63xx-rng.o
 obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o
 obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o
 obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o
@@ -52,3 +53,5 @@
 obj-$(CONFIG_HW_RANDOM_POLARFIRE_SOC) += mpfs-rng.o
 obj-$(CONFIG_HW_RANDOM_ROCKCHIP) += rockchip-rng.o
 obj-$(CONFIG_HW_RANDOM_JH7110) += jh7110-trng.o
+obj-$(CONFIG_HW_RANDOM_QCOM) += qcom-rng.o
+obj-$(CONFIG_HW_RANDOM_CLP800) += clp800-rng.o
diff -ruw linux-6.13.12/drivers/char/mem.c linux-6.13.12-fbx/drivers/char/mem.c
--- linux-6.13.12/drivers/char/mem.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/char/mem.c	2025-09-25 17:40:31.367346399 +0200
@@ -28,6 +28,8 @@
 #include <linux/export.h>
 #include <linux/io.h>
 #include <linux/uio.h>
+#include <linux/memblock.h>
+
 #include <linux/uaccess.h>
 #include <linux/security.h>
 
@@ -383,6 +385,14 @@
 	return 0;
 }
 
+static int mmap_physmem(struct file * file, struct vm_area_struct * vma)
+{
+	if (vma->vm_pgoff < max_pfn && !capable(CAP_SYS_RAWIO))
+		return -EPERM;
+
+	return mmap_mem(file, vma);
+}
+
 #ifdef CONFIG_DEVPORT
 static ssize_t read_port(struct file *file, char __user *buf,
 			 size_t count, loff_t *ppos)
@@ -626,6 +636,11 @@
 	return 0;
 }
 
+static int open_physmem(struct inode * inode, struct file * filp)
+{
+	return 0;
+}
+
 #define zero_lseek	null_lseek
 #define full_lseek      null_lseek
 #define write_zero	write_null
@@ -687,6 +702,14 @@
 	.splice_read	= copy_splice_read,
 };
 
+static const struct file_operations __maybe_unused physmem_fops = {
+	.mmap		= mmap_physmem,
+	.open		= open_physmem,
+#ifndef CONFIG_MMU
+	.get_unmapped_area = get_unmapped_area_mem,
+#endif
+};
+
 static const struct memdev {
 	const char *name;
 	const struct file_operations *fops;
@@ -707,6 +730,9 @@
 #ifdef CONFIG_PRINTK
 	[11] = { "kmsg", &kmsg_fops, 0, 0644 },
 #endif
+#ifdef CONFIG_DEVPHYSMEM
+	[16] = { "physmem", &physmem_fops, 0, 0 },
+#endif
 };
 
 static int memory_open(struct inode *inode, struct file *filp)
diff -ruw linux-6.13.12/drivers/clk/Kconfig linux-6.13.12-fbx/drivers/clk/Kconfig
--- linux-6.13.12/drivers/clk/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/clk/Kconfig	2025-09-25 17:40:31.375346439 +0200
@@ -497,6 +497,7 @@
 source "drivers/clk/analogbits/Kconfig"
 source "drivers/clk/baikal-t1/Kconfig"
 source "drivers/clk/bcm/Kconfig"
+source "drivers/clk/cortina/Kconfig"
 source "drivers/clk/hisilicon/Kconfig"
 source "drivers/clk/imgtec/Kconfig"
 source "drivers/clk/imx/Kconfig"
diff -ruw linux-6.13.12/drivers/clk/Makefile linux-6.13.12-fbx/drivers/clk/Makefile
--- linux-6.13.12/drivers/clk/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/clk/Makefile	2025-09-25 17:40:31.375346439 +0200
@@ -117,6 +117,7 @@
 obj-$(CONFIG_CLK_BAIKAL_T1)		+= baikal-t1/
 obj-y					+= bcm/
 obj-$(CONFIG_ARCH_BERLIN)		+= berlin/
+obj-y					+= cortina/
 obj-$(CONFIG_ARCH_DAVINCI)		+= davinci/
 obj-$(CONFIG_ARCH_HISI)			+= hisilicon/
 obj-y					+= imgtec/
diff -ruw linux-6.13.12/drivers/clk/qcom/Kconfig linux-6.13.12-fbx/drivers/clk/qcom/Kconfig
--- linux-6.13.12/drivers/clk/qcom/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/clk/qcom/Kconfig	2025-09-25 17:40:31.403346578 +0200
@@ -272,6 +272,12 @@
 	  Say Y or M if you want to use network features of switch or
 	  PHY device. Select this for the root clock of qca8k.
 
+config IPQ_NSSCC_9574
+	tristate "IPQ9574 NSS Clock Controller"
+	depends on IPQ_GCC_9574
+	help
+	  Support for NSS clock controller on ipq9574 devices.
+
 config MSM_GCC_8660
 	tristate "MSM8660 Global Clock Controller"
 	depends on ARM || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/clk/qcom/Makefile linux-6.13.12-fbx/drivers/clk/qcom/Makefile
--- linux-6.13.12/drivers/clk/qcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/clk/qcom/Makefile	2025-09-25 17:40:31.403346578 +0200
@@ -37,6 +37,7 @@
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
 obj-$(CONFIG_IPQ_GCC_8074) += gcc-ipq8074.o
 obj-$(CONFIG_IPQ_GCC_9574) += gcc-ipq9574.o
+obj-$(CONFIG_IPQ_NSSCC_9574) += nsscc-ipq9574.o
 obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_IPQ_NSSCC_QCA8K) += nsscc-qca8k.o
 obj-$(CONFIG_MDM_GCC_9607) += gcc-mdm9607.o
diff -ruw linux-6.13.12/drivers/cpufreq/Kconfig linux-6.13.12-fbx/drivers/cpufreq/Kconfig
--- linux-6.13.12/drivers/cpufreq/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/cpufreq/Kconfig	2025-09-25 17:40:31.483346975 +0200
@@ -325,6 +325,10 @@
 	  This adds the CPUFreq driver support for Freescale QorIQ SoCs
 	  which are capable of changing the CPU's frequency dynamically.
 
+config BCM63158_CPUFREQ
+	tristate "CPU frequency scaling driver for BCM63158 SoC"
+	depends on ARCH_BCMBCA
+
 config ACPI_CPPC_CPUFREQ
 	tristate "CPUFreq driver based on the ACPI CPPC spec"
 	depends on ACPI_PROCESSOR
diff -ruw linux-6.13.12/drivers/cpufreq/Kconfig.arm linux-6.13.12-fbx/drivers/cpufreq/Kconfig.arm
--- linux-6.13.12/drivers/cpufreq/Kconfig.arm	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/cpufreq/Kconfig.arm	2025-09-25 17:40:31.483346975 +0200
@@ -86,6 +86,14 @@
 
 	  If in doubt, say N.
 
+config ARM_CORTINA_CPUFREQ
+        tristate "Cortina Access CPUfreq driver"
+        depends on ARCH_CORTINA_ACCESS && CPUFREQ_DT
+        select PM_OPP
+        help
+          This enables the Cortina Access CPUfreq driver.
+
+          If in doubt, say N.
 config ARM_IMX6Q_CPUFREQ
 	tristate "Freescale i.MX6 cpufreq support"
 	depends on ARCH_MXC
diff -ruw linux-6.13.12/drivers/cpufreq/Makefile linux-6.13.12-fbx/drivers/cpufreq/Makefile
--- linux-6.13.12/drivers/cpufreq/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/cpufreq/Makefile	2025-09-25 17:40:31.483346975 +0200
@@ -58,6 +58,7 @@
 obj-$(CONFIG_ARM_ARMADA_8K_CPUFREQ)	+= armada-8k-cpufreq.o
 obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ)	+= brcmstb-avs-cpufreq.o
 obj-$(CONFIG_ACPI_CPPC_CPUFREQ)		+= cppc_cpufreq.o
+obj-$(CONFIG_ARM_CORTINA_CPUFREQ)	+= cortina-cpufreq.o
 obj-$(CONFIG_ARCH_DAVINCI)		+= davinci-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)	+= highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)		+= imx6q-cpufreq.o
@@ -107,3 +108,5 @@
 obj-$(CONFIG_SH_CPU_FREQ)		+= sh-cpufreq.o
 obj-$(CONFIG_SPARC_US2E_CPUFREQ)	+= sparc-us2e-cpufreq.o
 obj-$(CONFIG_SPARC_US3_CPUFREQ)		+= sparc-us3-cpufreq.o
+
+obj-$(CONFIG_BCM63158_CPUFREQ)		+= bcm63158-cpufreq.o
diff -ruw linux-6.13.12/drivers/gpio/Kconfig linux-6.13.12-fbx/drivers/gpio/Kconfig
--- linux-6.13.12/drivers/gpio/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/gpio/Kconfig	2025-09-25 17:40:31.651347808 +0200
@@ -1087,6 +1087,11 @@
 	  enough to represent all pins, but the driver will assume a
 	  register layout for 64 pins (8 registers).
 
+config GPIO_FBXGWR_PMU
+	tristate "Freebox PMU I2C GPIO expander"
+	depends on MFD_FBXGWR_PMU
+	select GPIOLIB_IRQCHIP
+
 config GPIO_FXL6408
 	tristate "FXL6408 I2C GPIO expander"
 	select GPIO_REGMAP
@@ -1915,6 +1920,9 @@
 	  This enables the GPIO simulator - a configfs-based GPIO testing
 	  driver.
 
+config GPIOLIB_NONEXCLUSIVE_TEST
+	tristate "GPIO non exclusive reference test driver"
+
 endmenu
 
 menu "GPIO Debugging utilities"
diff -ruw linux-6.13.12/drivers/gpio/Makefile linux-6.13.12-fbx/drivers/gpio/Makefile
--- linux-6.13.12/drivers/gpio/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/gpio/Makefile	2025-09-25 17:40:31.651347808 +0200
@@ -64,6 +64,7 @@
 obj-$(CONFIG_GPIO_EP93XX)		+= gpio-ep93xx.o
 obj-$(CONFIG_GPIO_EXAR)			+= gpio-exar.o
 obj-$(CONFIG_GPIO_F7188X)		+= gpio-f7188x.o
+obj-$(CONFIG_GPIO_FBXGWR_PMU)		+= gpio-fbxgwr-pmu.o
 obj-$(CONFIG_GPIO_FTGPIO010)		+= gpio-ftgpio010.o
 obj-$(CONFIG_GPIO_FXL6408)		+= gpio-fxl6408.o
 obj-$(CONFIG_GPIO_GE_FPGA)		+= gpio-ge.o
@@ -206,3 +207,4 @@
 obj-$(CONFIG_GPIO_ZEVIO)		+= gpio-zevio.o
 obj-$(CONFIG_GPIO_ZYNQ)			+= gpio-zynq.o
 obj-$(CONFIG_GPIO_ZYNQMP_MODEPIN)	+= gpio-zynqmp-modepin.o
+obj-$(CONFIG_GPIOLIB_NONEXCLUSIVE_TEST)	+= gpiolib-nonexclusive-test.o
diff -ruw linux-6.13.12/drivers/gpio/gpiolib.c linux-6.13.12-fbx/drivers/gpio/gpiolib.c
--- linux-6.13.12/drivers/gpio/gpiolib.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/gpio/gpiolib.c	2025-09-25 17:40:31.683347966 +0200
@@ -2398,12 +2398,23 @@
 	return ret;
 }
 
-static void gpiod_free_commit(struct gpio_desc *desc)
+static bool gpiod_free_commit(struct gpio_desc *desc)
 {
 	unsigned long flags;
 
 	might_sleep();
 
+	if (test_bit(FLAG_NONEXCLUSIVE_REF, &desc->flags)) {
+		/*
+		 * non exclusive use GPIO descriptor, free the memory
+		 * allocated in make_nonexclusive_desc().
+		 */
+		kfree(desc->name);
+		kfree(desc->label);
+		kfree(desc);
+		return false;
+	}
+
 	CLASS(gpio_chip_guard, guard)(desc);
 
 	flags = READ_ONCE(desc->flags);
@@ -2432,13 +2443,15 @@
 #endif
 		gpiod_line_state_notify(desc, GPIO_V2_LINE_CHANGED_RELEASED);
 	}
+	return true;
 }
 
 void gpiod_free(struct gpio_desc *desc)
 {
 	VALIDATE_DESC_VOID(desc);
 
-	gpiod_free_commit(desc);
+	if (!gpiod_free_commit(desc))
+		return;
 	module_put(desc->gdev->owner);
 	gpio_device_put(desc->gdev);
 }
@@ -4386,6 +4399,38 @@
 	return desc;
 }
 
+/*
+ * For non exclusive access to a gpio_desc, kmalloc a new one, and set
+ * its flags with FLAG_NONEXCLUSIVE_REF to recognize them when
+ * freeing.
+ */
+static struct gpio_desc *make_nonexclusive_desc(struct gpio_desc *desc)
+{
+	struct gpio_desc *ret;
+
+	ret = kmemdup(desc, sizeof (*desc), GFP_KERNEL);
+	if (!ret)
+		return ERR_PTR(-ENOMEM);
+	ret->label = NULL;
+
+	if (desc_set_label(ret, desc->label->str))
+		goto fail_enomem;
+
+	ret->name = kstrdup(desc->name, GFP_KERNEL);
+	if (!ret->name && desc->name)
+		goto fail_enomem;
+
+	ret->real_desc = desc;
+	set_bit(FLAG_NONEXCLUSIVE_REF, &ret->flags);
+	return ret;
+
+fail_enomem:
+	kfree(ret->label);
+	kfree(ret->name);
+	kfree(ret);
+	return ERR_PTR(-ENOMEM);
+}
+
 struct gpio_desc *gpiod_find_and_request(struct device *consumer,
 					 struct fwnode_handle *fwnode,
 					 const char *con_id,
@@ -4441,7 +4486,7 @@
 		 * FIXME: Make this more sane and safe.
 		 */
 		dev_info(consumer, "nonexclusive access to GPIO for %s\n", name);
-		return desc;
+		return make_nonexclusive_desc(desc);;
 	}
 
 	ret = gpiod_configure_flags(desc, con_id, lookupflags, flags);
diff -ruw linux-6.13.12/drivers/gpio/gpiolib.h linux-6.13.12-fbx/drivers/gpio/gpiolib.h
--- linux-6.13.12/drivers/gpio/gpiolib.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/gpio/gpiolib.h	2025-09-25 17:40:31.683347966 +0200
@@ -204,6 +204,7 @@
 #define FLAG_EDGE_FALLING    17	/* GPIO CDEV detects falling edge events */
 #define FLAG_EVENT_CLOCK_REALTIME	18 /* GPIO CDEV reports REALTIME timestamps in events */
 #define FLAG_EVENT_CLOCK_HTE		19 /* GPIO CDEV reports hardware timestamps in events */
+#define FLAG_NONEXCLUSIVE_REF		20 /* gpio_desc is kmalloc()'d because it is a non exclusive ref */
 
 	/* Connection label */
 	struct gpio_desc_label __rcu *label;
@@ -216,6 +217,12 @@
 	/* debounce period in microseconds */
 	unsigned int		debounce_period_us;
 #endif
+
+	/*
+	 * valid only for gpio_descs with FLAG_NONEXCLUSIVE_REF set,
+	 * points to the GPIO descriptors within struct gpio_device::descs[].
+	 */
+	struct gpio_desc *real_desc;
 };
 
 #define gpiod_not_found(desc)		(IS_ERR(desc) && PTR_ERR(desc) == -ENOENT)
@@ -275,9 +282,14 @@
 
 /*
  * Return the GPIO number of the passed descriptor relative to its chip
+ *
+ * In the case of a non exclusive GPIO descriptor, we must use the
+ * actual descriptor with the descs[] array.
  */
 static inline int gpio_chip_hwgpio(const struct gpio_desc *desc)
 {
+	if (test_bit(FLAG_NONEXCLUSIVE_REF, &desc->flags))
+		desc = desc->real_desc;
 	return desc - &desc->gdev->descs[0];
 }
 
diff -ruw linux-6.13.12/drivers/hwmon/Kconfig linux-6.13.12-fbx/drivers/hwmon/Kconfig
--- linux-6.13.12/drivers/hwmon/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/hwmon/Kconfig	2025-09-25 17:40:32.659352806 +0200
@@ -345,6 +345,14 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called fam15h_power.
 
+config SENSORS_FBXGWR_PMU
+	tristate "Freebox GWR PMU hardware monitoring driver"
+	depends on MFD_FBXGWR_PMU
+
+config SENSORS_PERICOM_PCIE
+	bool "Pericom's PI7C9X3G606GP PCIe switch hardware monitoring driver"
+	depends on PCIEPORTBUS
+
 config SENSORS_APPLESMC
 	tristate "Apple SMC (Motion sensor, light sensor, keyboard backlight)"
 	depends on INPUT && X86
@@ -2576,6 +2584,17 @@
 	  sensors monitor various telemetry data of different components on the
 	  card, e.g. board temperature, FPGA core temperature/voltage/current.
 
+config SENSORS_KIRKWOOD_CORETEMP
+	tristate "Kirkwood core temperature censor"
+	depends on MACH_KIRKWOOD
+
+config SENSORS_LD6710_FBX
+	tristate "LD6710 hardware monitoring driver (as seen on Freebox hardware)"
+	depends on I2C
+
+config SENSORS_AP806
+	tristate "Marvell AP806/CP110 hardware monitoring driver"
+
 if ACPI
 
 comment "ACPI drivers"
diff -ruw linux-6.13.12/drivers/hwmon/Makefile linux-6.13.12-fbx/drivers/hwmon/Makefile
--- linux-6.13.12/drivers/hwmon/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/hwmon/Makefile	2025-09-25 17:40:32.659352806 +0200
@@ -78,6 +78,7 @@
 obj-$(CONFIG_SENSORS_F71805F)	+= f71805f.o
 obj-$(CONFIG_SENSORS_F71882FG)	+= f71882fg.o
 obj-$(CONFIG_SENSORS_F75375S)	+= f75375s.o
+obj-$(CONFIG_SENSORS_FBXGWR_PMU)	+= fbxgwr_pmu_hwmon.o
 obj-$(CONFIG_SENSORS_FAM15H_POWER) += fam15h_power.o
 obj-$(CONFIG_SENSORS_FSCHMD)	+= fschmd.o
 obj-$(CONFIG_SENSORS_FTSTEUTATES) += ftsteutates.o
@@ -110,6 +111,7 @@
 obj-$(CONFIG_SENSORS_K10TEMP)	+= k10temp.o
 obj-$(CONFIG_SENSORS_LAN966X)	+= lan966x-hwmon.o
 obj-$(CONFIG_SENSORS_LENOVO_EC)	+= lenovo-ec-sensors.o
+obj-$(CONFIG_SENSORS_LD6710_FBX) += ld6710-fbx.o
 obj-$(CONFIG_SENSORS_LINEAGE)	+= lineage-pem.o
 obj-$(CONFIG_SENSORS_LOCHNAGAR)	+= lochnagar-hwmon.o
 obj-$(CONFIG_SENSORS_LM63)	+= lm63.o
@@ -186,6 +188,7 @@
 obj-$(CONFIG_SENSORS_PC87427)	+= pc87427.o
 obj-$(CONFIG_SENSORS_PCF8591)	+= pcf8591.o
 obj-$(CONFIG_SENSORS_POWERZ)	+= powerz.o
+obj-$(CONFIG_SENSORS_PERICOM_PCIE)	+= pericom_pcie.o
 obj-$(CONFIG_SENSORS_POWR1220)  += powr1220.o
 obj-$(CONFIG_SENSORS_PT5161L)	+= pt5161l.o
 obj-$(CONFIG_SENSORS_PWM_FAN)	+= pwm-fan.o
@@ -235,6 +238,8 @@
 obj-$(CONFIG_SENSORS_WM831X)	+= wm831x-hwmon.o
 obj-$(CONFIG_SENSORS_WM8350)	+= wm8350-hwmon.o
 obj-$(CONFIG_SENSORS_XGENE)	+= xgene-hwmon.o
+obj-$(CONFIG_SENSORS_KIRKWOOD_CORETEMP)+= kirkwood-coretemp.o
+obj-$(CONFIG_SENSORS_AP806)	+= ap806-hwmon.o
 
 obj-$(CONFIG_SENSORS_OCC)	+= occ/
 obj-$(CONFIG_SENSORS_PECI)	+= peci/
diff -ruw linux-6.13.12/drivers/hwmon/adt7475.c linux-6.13.12-fbx/drivers/hwmon/adt7475.c
--- linux-6.13.12/drivers/hwmon/adt7475.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/hwmon/adt7475.c	2025-09-25 17:40:32.663352826 +0200
@@ -136,7 +136,19 @@
 
 /* Macro to read the registers */
 
-#define adt7475_read(reg) i2c_smbus_read_byte_data(client, (reg))
+static inline s32 __adt7475_read(const struct i2c_client *client, u8 cmd)
+{
+	s32 ret;
+
+	ret = i2c_smbus_read_byte_data(client, cmd);
+	if (ret < 0) {
+		printk("__adt7475_read error: %d\n", ret);
+		return 0;
+	}
+	return ret;
+}
+
+#define adt7475_read(reg) __adt7475_read(client, (reg))
 
 /* Macros to easily index the registers */
 
diff -ruw linux-6.13.12/drivers/i2c/busses/Kconfig linux-6.13.12-fbx/drivers/i2c/busses/Kconfig
--- linux-6.13.12/drivers/i2c/busses/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/i2c/busses/Kconfig	2025-09-25 17:40:32.695352985 +0200
@@ -510,6 +510,10 @@
 
 	  If you do not need I2C interface, say N.
 
+config I2C_CORTINA
+	tristate "Cortina I2C Controller"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+
 config I2C_CADENCE
 	tristate "Cadence I2C Controller"
 	depends on ARCH_ZYNQ || ARM64 || XTENSA || RISCV || COMPILE_TEST
@@ -1543,4 +1547,8 @@
           This driver can also be built as a module. If so, the module
           will be called i2c-virtio.
 
+config I2C_FBXGWR_PMU
+	tristate "Freebox PMU I2C Proxy Adapter support"
+	depends on MFD_FBXGWR_PMU
+
 endmenu
diff -ruw linux-6.13.12/drivers/i2c/busses/Makefile linux-6.13.12-fbx/drivers/i2c/busses/Makefile
--- linux-6.13.12/drivers/i2c/busses/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/i2c/busses/Makefile	2025-09-25 17:40:32.695352985 +0200
@@ -16,6 +16,7 @@
 obj-$(CONFIG_I2C_AMD756)	+= i2c-amd756.o
 obj-$(CONFIG_I2C_AMD8111)	+= i2c-amd8111.o
 obj-$(CONFIG_I2C_CHT_WC)	+= i2c-cht-wc.o
+obj-$(CONFIG_I2C_CORTINA)	+= i2c-cortina.o
 obj-$(CONFIG_I2C_I801)		+= i2c-i801.o
 obj-$(CONFIG_I2C_ISCH)		+= i2c-isch.o
 obj-$(CONFIG_I2C_ISMT)		+= i2c-ismt.o
@@ -157,5 +158,6 @@
 obj-$(CONFIG_SCx200_ACB)	+= scx200_acb.o
 obj-$(CONFIG_I2C_FSI)		+= i2c-fsi.o
 obj-$(CONFIG_I2C_VIRTIO)	+= i2c-virtio.o
+obj-$(CONFIG_I2C_FBXGWR_PMU)	+= i2c-fbxgwr-pmu.o
 
 ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff -ruw linux-6.13.12/drivers/i2c/i2c-core-base.c linux-6.13.12-fbx/drivers/i2c/i2c-core-base.c
--- linux-6.13.12/drivers/i2c/i2c-core-base.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/i2c/i2c-core-base.c	2025-09-25 17:40:32.719353104 +0200
@@ -271,12 +271,14 @@
 			bri->set_sda(adap, scl);
 		ndelay(RECOVERY_NDELAY / 2);
 
+		if (0) {
 		if (scl) {
 			ret = i2c_generic_bus_free(adap);
 			if (ret == 0)
 				break;
 		}
 	}
+	}
 
 	/* If we can't check bus status, assume recovery worked */
 	if (ret == -EOPNOTSUPP)
diff -ruw linux-6.13.12/drivers/input/misc/Kconfig linux-6.13.12-fbx/drivers/input/misc/Kconfig
--- linux-6.13.12/drivers/input/misc/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/input/misc/Kconfig	2025-09-25 17:40:32.987354433 +0200
@@ -956,4 +956,9 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called stpmic1_onkey.
 
+config INPUT_SMSC_CAP1066
+	tristate "SMSC CAP1066 capacitive sensor driver"
+	select I2C
+	select INPUT_POLLDEV
+
 endif
diff -ruw linux-6.13.12/drivers/input/misc/Makefile linux-6.13.12-fbx/drivers/input/misc/Makefile
--- linux-6.13.12/drivers/input/misc/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/input/misc/Makefile	2025-09-25 17:40:32.987354433 +0200
@@ -92,3 +92,4 @@
 obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND)	+= xen-kbdfront.o
 obj-$(CONFIG_INPUT_YEALINK)		+= yealink.o
 obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR)	+= ideapad_slidebar.o
+obj-$(CONFIG_INPUT_SMSC_CAP1066)	+= smsc_cap1066.o
diff -ruw linux-6.13.12/drivers/irqchip/Kconfig linux-6.13.12-fbx/drivers/irqchip/Kconfig
--- linux-6.13.12/drivers/irqchip/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/irqchip/Kconfig	2025-09-25 17:40:33.047354730 +0200
@@ -39,6 +39,12 @@
 	select GENERIC_IRQ_EFFECTIVE_AFF_MASK if SMP
 	select HAVE_ARM_SMCCC_DISCOVERY
 
+config ARM_GIC_V3_CORTINA
+	bool "compile duplicate GICv3 driver for Cortina broken GIC integration"
+	depends on ARM_GIC_V3
+	depends on ARCH_CORTINA || COMPILE_TEST
+	default ARCH_CORTINA
+
 config ARM_GIC_V3_ITS
 	bool
 	select GENERIC_MSI_IRQ
@@ -137,6 +143,11 @@
 	select GENERIC_IRQ_CHIP
 	select IRQ_DOMAIN
 
+config RX5281_VEC_INT
+	bool "Realtek RX5281 vector interrupt support"
+	depends on MACH_CORTINA_SATURN || BMIPS_GENERIC
+	default MACH_CORTINA_SATURN
+
 config DAVINCI_CP_INTC
 	bool
 	select GENERIC_IRQ_CHIP
@@ -759,4 +770,8 @@
 	  chained controller, routing all interrupt source in P-Chip to
 	  the primary controller on C-Chip.
 
+config CORTINA_PERI_ICTL
+	bool "Cortina peripheral interrupt controller" if COMPILE_TEST
+	default ARCH_CORTINA || MACH_CORTINA_SATURN
+
 endmenu
diff -ruw linux-6.13.12/drivers/irqchip/Makefile linux-6.13.12-fbx/drivers/irqchip/Makefile
--- linux-6.13.12/drivers/irqchip/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/irqchip/Makefile	2025-09-25 17:40:33.047354730 +0200
@@ -34,6 +34,7 @@
 obj-$(CONFIG_ARM_GIC_V3)		+= irq-gic-v3.o irq-gic-v3-mbi.o irq-gic-common.o
 obj-$(CONFIG_ARM_GIC_V3_ITS)		+= irq-gic-v3-its.o irq-gic-v4.o irq-gic-v3-its-msi-parent.o
 obj-$(CONFIG_ARM_GIC_V3_ITS_FSL_MC)	+= irq-gic-v3-its-fsl-mc-msi.o
+obj-$(CONFIG_ARM_GIC_V3_CORTINA)	+= irq-gic-v3-ca.o irq-gic-common-ca.o
 obj-$(CONFIG_PARTITION_PERCPU)		+= irq-partition-percpu.o
 obj-$(CONFIG_HISILICON_IRQ_MBIGEN)	+= irq-mbigen.o
 obj-$(CONFIG_ARM_NVIC)			+= irq-nvic.o
@@ -46,6 +47,7 @@
 obj-$(CONFIG_IRQ_MIPS_CPU)		+= irq-mips-cpu.o
 obj-$(CONFIG_IXP4XX_IRQ)		+= irq-ixp4xx.o
 obj-$(CONFIG_JCORE_AIC)			+= irq-jcore-aic.o
+obj-$(CONFIG_RX5281_VEC_INT)		+= irq-rx5281-vec-cpu.o irq-rx5281-vec-cpu-handler.o
 obj-$(CONFIG_RDA_INTC)			+= irq-rda-intc.o
 obj-$(CONFIG_RENESAS_INTC_IRQPIN)	+= irq-renesas-intc-irqpin.o
 obj-$(CONFIG_RENESAS_IRQC)		+= irq-renesas-irqc.o
@@ -129,3 +131,4 @@
 obj-$(CONFIG_APPLE_AIC)			+= irq-apple-aic.o
 obj-$(CONFIG_MCHP_EIC)			+= irq-mchp-eic.o
 obj-$(CONFIG_SUNPLUS_SP7021_INTC)	+= irq-sp7021-intc.o
+obj-$(CONFIG_CORTINA_PERI_ICTL)		+= irq-cortina-peri-intc.o
diff -ruw linux-6.13.12/drivers/irqchip/irq-gic-common.h linux-6.13.12-fbx/drivers/irqchip/irq-gic-common.h
--- linux-6.13.12/drivers/irqchip/irq-gic-common.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/irqchip/irq-gic-common.h	2025-09-25 17:40:33.055354770 +0200
@@ -35,4 +35,12 @@
 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED     (1 << 1)
 #define RDIST_FLAGS_FORCE_NON_SHAREABLE        (1 << 2)
 
+/*
+ * _ca versions of those functions above to support irq-gicv3-ca.c
+ * code.
+ */
+int gic_configure_irq_ca(unsigned int irq, unsigned int type, phys_addr_t base);
+void gic_dist_config_ca(phys_addr_t base, int gic_irqs, u8 priority);
+void gic_cpu_config_ca(phys_addr_t base, int nr, u8 priority);
+
 #endif /* _IRQ_GIC_COMMON_H */
diff -ruw linux-6.13.12/drivers/leds/Kconfig linux-6.13.12-fbx/drivers/leds/Kconfig
--- linux-6.13.12/drivers/leds/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/leds/Kconfig	2025-09-25 17:40:33.087354928 +0200
@@ -392,6 +392,14 @@
 	  defined as platform devices and/or OpenFirmware platform devices.
 	  The code to use these bindings can be selected below.
 
+config LEDS_FBXGWR_PMU
+	tristate "Freebox GWR PMU LED controller"
+	depends on MFD_FBXGWR_PMU
+
+config LEDS_FBXGWR_ANNIV
+	tristate "Freebox GWR anniversary LED controller"
+	depends on I2C_FBXGWR_PMU
+
 config LEDS_LP3944
 	tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
 	depends on LEDS_CLASS
@@ -804,6 +812,13 @@
 	  LED controllers. They are I2C devices with multiple constant-current
 	  channels, each with independent 256-level PWM control.
 
+config LEDS_IS31FL3299
+	tristate "LED support for ISSI IS31FL3299 I2C LED controller"
+	depends on LEDS_CLASS && I2C && OF
+	select REGMAP_I2C
+	help
+	  This option enables support for the IS31FL3299 LED driver.
+
 config LEDS_SC27XX_BLTC
 	tristate "LED support for the SC27xx breathing light controller"
 	depends on LEDS_CLASS && MFD_SC27XX_PMIC
@@ -959,6 +974,17 @@
 	  This option enables support for the Power Button LED of
 	  Acer Iconia Tab A500.
 
+config LEDS_LED1202
+	tristate "LED support for STMicroElectronics LED1202"
+	depends on LEDS_CLASS && I2C && OF
+	select REGMAP_I2C
+	help
+	  This option enables support for the LED1202 12-channel
+	  LED driver.
+
+	  To compile this driver as a module, choose M here: the module
+	  will be called leds-led1202.
+
 source "drivers/leds/blink/Kconfig"
 
 comment "Flash and Torch LED drivers"
diff -ruw linux-6.13.12/drivers/leds/Makefile linux-6.13.12-fbx/drivers/leds/Makefile
--- linux-6.13.12/drivers/leds/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/leds/Makefile	2025-09-25 17:40:33.087354928 +0200
@@ -31,12 +31,14 @@
 obj-$(CONFIG_LEDS_DA9052)		+= leds-da9052.o
 obj-$(CONFIG_LEDS_GPIO)			+= leds-gpio.o
 obj-$(CONFIG_LEDS_GPIO_REGISTER)	+= leds-gpio-register.o
+obj-$(CONFIG_LEDS_FBXGWR_PMU)		+= leds-fbxgwr-pmu.o
 obj-$(CONFIG_LEDS_HP6XX)		+= leds-hp6xx.o
 obj-$(CONFIG_LEDS_INTEL_SS4200)		+= leds-ss4200.o
 obj-$(CONFIG_LEDS_IP30)			+= leds-ip30.o
 obj-$(CONFIG_LEDS_IPAQ_MICRO)		+= leds-ipaq-micro.o
 obj-$(CONFIG_LEDS_IS31FL319X)		+= leds-is31fl319x.o
 obj-$(CONFIG_LEDS_IS31FL32XX)		+= leds-is31fl32xx.o
+obj-$(CONFIG_LEDS_IS31FL3299)		+= leds-is31fl3299.o
 obj-$(CONFIG_LEDS_LM3530)		+= leds-lm3530.o
 obj-$(CONFIG_LEDS_LM3532)		+= leds-lm3532.o
 obj-$(CONFIG_LEDS_LM3533)		+= leds-lm3533.o
@@ -92,6 +94,7 @@
 obj-$(CONFIG_LEDS_WM831X_STATUS)	+= leds-wm831x-status.o
 obj-$(CONFIG_LEDS_WM8350)		+= leds-wm8350.o
 obj-$(CONFIG_LEDS_WRAP)			+= leds-wrap.o
+obj-$(CONFIG_LEDS_LED1202)		+= leds-led1202.o
 
 # Kinetic ExpressWire Protocol
 obj-$(CONFIG_LEDS_EXPRESSWIRE)		+= leds-expresswire.o
@@ -102,6 +105,9 @@
 obj-$(CONFIG_LEDS_EL15203000)		+= leds-el15203000.o
 obj-$(CONFIG_LEDS_SPI_BYTE)		+= leds-spi-byte.o
 
+# LED I2C Drivers
+obj-$(CONFIG_LEDS_FBXGWR_ANNIV)		+= leds-fbxgwr-anniv.o
+
 # LED Userspace Drivers
 obj-$(CONFIG_LEDS_USER)			+= uleds.o
 
diff -ruw linux-6.13.12/drivers/media/dvb-core/dvb_frontend.c linux-6.13.12-fbx/drivers/media/dvb-core/dvb_frontend.c
--- linux-6.13.12/drivers/media/dvb-core/dvb_frontend.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/media/dvb-core/dvb_frontend.c	2025-09-25 17:40:33.147355226 +0200
@@ -815,6 +815,7 @@
 	if (fe->exit != DVB_FE_DEVICE_REMOVED)
 		fe->exit = DVB_FE_NORMAL_EXIT;
 	mb();
+	wake_up_all(&fepriv->events.wait_queue);
 
 	if (!fepriv->thread)
 		return;
@@ -2754,6 +2755,9 @@
 
 	poll_wait(file, &fepriv->events.wait_queue, wait);
 
+	if (fe->exit)
+		return POLLERR | POLLHUP;
+
 	if (fepriv->events.eventw != fepriv->events.eventr)
 		return (EPOLLIN | EPOLLRDNORM | EPOLLPRI);
 
diff -ruw linux-6.13.12/drivers/media/rc/keymaps/Makefile linux-6.13.12-fbx/drivers/media/rc/keymaps/Makefile
--- linux-6.13.12/drivers/media/rc/keymaps/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/media/rc/keymaps/Makefile	2025-09-25 17:40:33.279355881 +0200
@@ -104,6 +104,7 @@
 			rc-purpletv.o \
 			rc-pv951.o \
 			rc-rc6-mce.o \
+			rc-rc6-freebox.o \
 			rc-real-audio-220-32-keys.o \
 			rc-reddo.o \
 			rc-snapstream-firefly.o \
diff -ruw linux-6.13.12/drivers/media/usb/dvb-usb/dib0700_devices.c linux-6.13.12-fbx/drivers/media/usb/dvb-usb/dib0700_devices.c
--- linux-6.13.12/drivers/media/usb/dvb-usb/dib0700_devices.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/media/usb/dvb-usb/dib0700_devices.c	2025-09-25 17:40:33.315356059 +0200
@@ -3909,6 +3909,7 @@
 	DIBCOM_STK8096PVR,
 	HAMA_DVBT_HYBRID,
 	MICROSOFT_XBOX_ONE_TUNER,
+	DIBCOM_HOOK_DEFAULT_STK7770P,
 };
 
 struct usb_device_id dib0700_usb_id_table[] = {
@@ -3999,6 +4000,7 @@
 	DVB_USB_DEV(DIBCOM, DIBCOM_STK8096PVR),
 	DVB_USB_DEV(HAMA, HAMA_DVBT_HYBRID),
 	DVB_USB_DEV(MICROSOFT, MICROSOFT_XBOX_ONE_TUNER),
+	DVB_USB_DEV(DIBCOM, DIBCOM_HOOK_DEFAULT_STK7770P),
 	{ }
 };
 
@@ -5242,6 +5244,30 @@
 				{ NULL },
 			},
 		},
+	}, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+		.num_adapters = 1,
+		.adapter = {
+			{
+			DIB0700_NUM_FRONTENDS(1),
+			.fe = {{
+				.caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+				.pid_filter_count = 32,
+				.pid_filter       = stk70x0p_pid_filter,
+				.pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+				.frontend_attach  = stk7770p_frontend_attach,
+				.tuner_attach     = dib7770p_tuner_attach,
+
+				DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+			}},
+			},
+		},
+		.num_device_descs = 1,
+		.devices = {
+			{   "DiBcom STK7770P reference design no IR",
+				{ &dib0700_usb_id_table[DIBCOM_HOOK_DEFAULT_STK7770P], NULL },
+				{ NULL },
+			},
+		},
 	},
 };
 
diff -ruw linux-6.13.12/drivers/mfd/Kconfig linux-6.13.12-fbx/drivers/mfd/Kconfig
--- linux-6.13.12/drivers/mfd/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mfd/Kconfig	2025-09-25 17:40:33.351356238 +0200
@@ -2192,6 +2192,24 @@
 	  additional drivers must be enabled in order to use the functionality
 	  of the device.
 
+config MFD_FBXGWR_PMU
+	tristate "Freebox fbxgwr PMU"
+	depends on I2C
+	depends on OF
+	select MFD_CORE
+	select REGMAP_I2C
+
+config MFD_FBXGW7R_PANEL
+	tristate "Freebox fbxgw7r panel support"
+	depends on FB
+	depends on SPI_MASTER
+	depends on OF
+	select FB_SYS_FOPS
+	select FB_SYS_FILLRECT
+	select FB_SYS_COPYAREA
+	select FB_SYS_IMAGEBLIT
+	select FB_DEFERRED_IO
+
 config MFD_WCD934X
 	tristate "Support for WCD9340/WCD9341 Codec"
 	depends on SLIMBUS
diff -ruw linux-6.13.12/drivers/mfd/Makefile linux-6.13.12-fbx/drivers/mfd/Makefile
--- linux-6.13.12/drivers/mfd/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mfd/Makefile	2025-09-25 17:40:33.351356238 +0200
@@ -287,6 +287,8 @@
 
 obj-$(CONFIG_MFD_ATC260X)	+= atc260x-core.o
 obj-$(CONFIG_MFD_ATC260X_I2C)	+= atc260x-i2c.o
+obj-$(CONFIG_MFD_FBXGWR_PMU)	+= fbxgwr-pmu.o
+obj-$(CONFIG_MFD_FBXGW7R_PANEL)	+= fbxgw7r-panel.o
 
 obj-$(CONFIG_MFD_RSMU_I2C)	+= rsmu_i2c.o rsmu_core.o
 obj-$(CONFIG_MFD_RSMU_SPI)	+= rsmu_spi.o rsmu_core.o
diff -ruw linux-6.13.12/drivers/misc/Kconfig linux-6.13.12-fbx/drivers/misc/Kconfig
--- linux-6.13.12/drivers/misc/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/misc/Kconfig	2025-09-25 17:40:33.375356357 +0200
@@ -413,6 +413,18 @@
 	  To compile this driver as a module, choose M here: the
 	  module will be called vmw_balloon.
 
+config INTELCE_PIC16PMU
+	tristate "PIC16 PMU, LED, hwmon support"
+	select INPUT_POLLDEV
+	select NEW_LEDS
+	select I2C
+	select HWMON
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  Freebox v6 HD PIC16 PMU interface support, enables
+	  control of the on-board LEDs and reports the power status,
+	  reset status and button status.
+
 config PCH_PHUB
 	tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) PHUB"
 	select GENERIC_NET_UTILS
@@ -434,6 +446,12 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called pch_phub.
 
+config FBXSERIAL_OF
+	bool "read fbxserial through DT chosen node"
+	depends on OF
+	select ARCH_HAS_FBXSERIAL
+	select FBXSERIAL
+
 config LATTICE_ECP3_CONFIG
 	tristate "Lattice ECP3 FPGA bitstream configuration via SPI"
 	depends on SPI && SYSFS
@@ -634,6 +652,9 @@
 	    - lan966x-miim (MDIO_MSCC_MIIM)
 	    - lan966x-switch (LAN966X_SWITCH)
 
+config DGASP
+	bool "dying gasp infrastructure"
+
 source "drivers/misc/c2port/Kconfig"
 source "drivers/misc/eeprom/Kconfig"
 source "drivers/misc/cb710/Kconfig"
@@ -651,4 +672,6 @@
 source "drivers/misc/pvpanic/Kconfig"
 source "drivers/misc/mchp_pci1xxxx/Kconfig"
 source "drivers/misc/keba/Kconfig"
+source "drivers/misc/remoti/Kconfig"
+source "drivers/misc/hdmi-cec/Kconfig"
 endmenu
diff -ruw linux-6.13.12/drivers/misc/Makefile linux-6.13.12-fbx/drivers/misc/Makefile
--- linux-6.13.12/drivers/misc/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/misc/Makefile	2025-09-25 17:40:33.375356357 +0200
@@ -22,7 +22,9 @@
 obj-$(CONFIG_SENSORS_APDS990X)	+= apds990x.o
 obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
 obj-$(CONFIG_KGDB_TESTS)	+= kgdbts.o
+obj-$(CONFIG_FBXSERIAL_OF)	+= fbxserial_of.o
 obj-$(CONFIG_SGI_XP)		+= sgi-xp/
+obj-$(CONFIG_INTELCE_PIC16PMU)	+= pic16-pmu.o
 obj-$(CONFIG_SGI_GRU)		+= sgi-gru/
 obj-$(CONFIG_SMPRO_ERRMON)	+= smpro-errmon.o
 obj-$(CONFIG_SMPRO_MISC)	+= smpro-misc.o
@@ -40,6 +42,7 @@
 obj-y				+= cb710/
 obj-$(CONFIG_VMWARE_BALLOON)	+= vmw_balloon.o
 obj-$(CONFIG_PCH_PHUB)		+= pch_phub.o
+obj-y				+= hdmi-cec/
 obj-y				+= lis3lv02d/
 obj-$(CONFIG_ALTERA_STAPL)	+=altera-stapl/
 obj-$(CONFIG_INTEL_MEI)		+= mei/
@@ -74,3 +77,5 @@
 lan966x-pci-objs		+= lan966x_pci.dtbo.o
 obj-$(CONFIG_MCHP_LAN966X_PCI)	+= lan966x-pci.o
 obj-y				+= keba/
+obj-y				+= remoti/
+obj-$(CONFIG_DGASP)		+= dgasp.o
diff -ruw linux-6.13.12/drivers/misc/eeprom/Kconfig linux-6.13.12-fbx/drivers/misc/eeprom/Kconfig
--- linux-6.13.12/drivers/misc/eeprom/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/misc/eeprom/Kconfig	2025-09-25 17:40:33.387356416 +0200
@@ -119,4 +119,8 @@
 	  This driver can also be built as a module.  If so, the module
 	  will be called ee1004.
 
+config EEPROM_EE1004_RAW
+	tristate "SPD EEPROMs on DDR4 memory modules (non smbus)"
+	depends on I2C && SYSFS
+
 endmenu
diff -ruw linux-6.13.12/drivers/misc/eeprom/Makefile linux-6.13.12-fbx/drivers/misc/eeprom/Makefile
--- linux-6.13.12/drivers/misc/eeprom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/misc/eeprom/Makefile	2025-09-25 17:40:33.387356416 +0200
@@ -7,3 +7,4 @@
 obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
 obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
 obj-$(CONFIG_EEPROM_EE1004)	+= ee1004.o
+obj-$(CONFIG_EEPROM_EE1004_RAW)	+= ee1004_raw.o
diff -ruw linux-6.13.12/drivers/misc/eeprom/at24.c linux-6.13.12-fbx/drivers/misc/eeprom/at24.c
--- linux-6.13.12/drivers/misc/eeprom/at24.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/misc/eeprom/at24.c	2025-09-25 17:40:33.391356436 +0200
@@ -608,7 +608,6 @@
 	bool full_power;
 	struct regmap *regmap;
 	bool writable;
-	u8 test_byte;
 	int err;
 
 	i2c_fn_i2c = i2c_check_functionality(client->adapter, I2C_FUNC_I2C);
@@ -629,8 +628,10 @@
 		page_size = 1;
 
 	flags = cdata->flags;
+#ifndef CONFIG_NVMEM_IGNORE_RO
 	if (device_property_present(dev, "read-only"))
 		flags |= AT24_FLAG_READONLY;
+#endif
 	if (device_property_present(dev, "no-read-rollover"))
 		flags |= AT24_FLAG_NO_RDROL;
 
@@ -770,21 +771,6 @@
 	}
 	pm_runtime_enable(dev);
 
-	/*
-	 * Perform a one-byte test read to verify that the chip is functional,
-	 * unless powering on the device is to be avoided during probe (i.e.
-	 * it's powered off right now).
-	 */
-	if (full_power) {
-		err = at24_read(at24, 0, &test_byte, 1);
-		if (err) {
-			pm_runtime_disable(dev);
-			if (!pm_runtime_status_suspended(dev))
-				regulator_disable(at24->vcc_reg);
-			return -ENODEV;
-		}
-	}
-
 	at24->nvmem = devm_nvmem_register(dev, &nvmem_config);
 	if (IS_ERR(at24->nvmem)) {
 		pm_runtime_disable(dev);
diff -ruw linux-6.13.12/drivers/mmc/core/Kconfig linux-6.13.12-fbx/drivers/mmc/core/Kconfig
--- linux-6.13.12/drivers/mmc/core/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/core/Kconfig	2025-09-25 17:40:33.411356535 +0200
@@ -64,6 +64,10 @@
 
 	  If unsure, say 8 here.
 
+config MMC_BLOCK_IGNORE_RO_AREA
+	bool
+	depends on MMC_BLOCK
+
 config SDIO_UART
 	tristate "SDIO UART/GPS class support"
 	depends on TTY
diff -ruw linux-6.13.12/drivers/mmc/core/block.c linux-6.13.12-fbx/drivers/mmc/core/block.c
--- linux-6.13.12/drivers/mmc/core/block.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/core/block.c	2025-09-25 17:40:33.411356535 +0200
@@ -2448,6 +2448,58 @@
 	return mmc_blk_rw_wait(mq, NULL);
 }
 
+static bool __mmc_within_ro_area(const struct ro_area *ro_area,
+				 u64 start, u64 end)
+{
+	if (end < start)
+		/*
+		 * overflow: be on the safe side.
+		 */
+		return true;
+
+	if (start < ro_area->end && end > ro_area->start)
+		return true;
+	return false;
+}
+
+static bool mmc_blk_within_ro_area(struct mmc_card *card,
+				   struct mmc_blk_data *md,
+				   struct request *req)
+{
+	const struct ro_area *ro_area;
+	u64 bytes_start, bytes_end;
+
+	switch (req_op(req)) {
+	case REQ_OP_DISCARD:
+	case REQ_OP_SECURE_ERASE:
+	case REQ_OP_WRITE_ZEROES:
+	case REQ_OP_WRITE:
+		break;
+	default:
+		return false;
+	}
+
+	switch (md->part_type) {
+	case 0x0:
+		// user area;
+		ro_area = &card->user_ro_area;
+		break;
+	case EXT_CSD_PART_CONFIG_ACC_BOOT0:
+	case EXT_CSD_PART_CONFIG_ACC_BOOT0 + 1:
+		// any boot partition
+		ro_area = &card->boot_ro_area;
+		break;
+	default:
+		return false;
+	}
+
+
+	bytes_start = blk_rq_pos(req) << 9;
+	bytes_end = bytes_start + (blk_rq_sectors(req) << 9);
+
+	return __mmc_within_ro_area(ro_area, bytes_start, bytes_end);
+}
+
 enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->blkdata;
@@ -2455,6 +2507,9 @@
 	struct mmc_host *host = card->host;
 	int ret;
 
+	if (mmc_blk_within_ro_area(card, md, req))
+		return MMC_REQ_WRITE_PROTECTION;
+
 	ret = mmc_blk_part_switch(card, md->part_type);
 	if (ret)
 		return MMC_REQ_FAILED_TO_START;
@@ -2655,7 +2710,7 @@
 	md->disk->private_data = md;
 	md->parent = parent;
 	set_disk_ro(md->disk, md->read_only || default_ro);
-	if (area_type & (MMC_BLK_DATA_AREA_RPMB | MMC_BLK_DATA_AREA_BOOT))
+	if (area_type & (MMC_BLK_DATA_AREA_RPMB))
 		md->disk->flags |= GENHD_FL_NO_PART;
 
 	/*
diff -ruw linux-6.13.12/drivers/mmc/core/mmc.c linux-6.13.12-fbx/drivers/mmc/core/mmc.c
--- linux-6.13.12/drivers/mmc/core/mmc.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/core/mmc.c	2025-09-25 17:40:33.415356555 +0200
@@ -1585,6 +1585,21 @@
 	return mmc_execute_tuning(card);
 }
 
+static void mmc_read_of_ro_area(struct mmc_card *card, const char *of_key,
+				struct ro_area *ro_area)
+{
+	u64 v[2];
+
+	if (!IS_ENABLED(CONFIG_MMC_BLOCK_IGNORE_RO_AREA) &&
+	    !of_property_read_u64_array(card->host->parent->of_node,
+					    of_key, v, 2)) {
+		ro_area->start = v[0];
+		ro_area->end = v[0] + v[1];
+	}
+	dev_info(&card->dev, "%s %08llx - %08llx\n", of_key,
+		 ro_area->start, ro_area->end);
+}
+
 /*
  * Handle the detection and initialisation of a card.
  *
@@ -1660,6 +1675,11 @@
 		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
 	}
 
+	mmc_read_of_ro_area(card, "boot-ro-area",
+			    &card->boot_ro_area);
+	mmc_read_of_ro_area(card, "user-ro-area",
+			    &card->user_ro_area);
+
 	/*
 	 * Call the optional HC's init_card function to handle quirks.
 	 */
diff -ruw linux-6.13.12/drivers/mmc/core/queue.c linux-6.13.12-fbx/drivers/mmc/core/queue.c
--- linux-6.13.12/drivers/mmc/core/queue.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/core/queue.c	2025-09-25 17:40:33.415356555 +0200
@@ -312,6 +312,9 @@
 	case MMC_REQ_FAILED_TO_START:
 		ret = BLK_STS_IOERR;
 		break;
+	case MMC_REQ_WRITE_PROTECTION:
+		ret = BLK_STS_PROTECTION;
+		break;
 	default:
 		ret = BLK_STS_OK;
 		break;
diff -ruw linux-6.13.12/drivers/mmc/core/queue.h linux-6.13.12-fbx/drivers/mmc/core/queue.h
--- linux-6.13.12/drivers/mmc/core/queue.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/core/queue.h	2025-09-25 17:40:33.415356555 +0200
@@ -12,6 +12,7 @@
 	MMC_REQ_STARTED,
 	MMC_REQ_BUSY,
 	MMC_REQ_FAILED_TO_START,
+	MMC_REQ_WRITE_PROTECTION,
 	MMC_REQ_FINISHED,
 };
 
diff -ruw linux-6.13.12/drivers/mmc/host/Kconfig linux-6.13.12-fbx/drivers/mmc/host/Kconfig
--- linux-6.13.12/drivers/mmc/host/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/host/Kconfig	2025-09-25 17:40:33.419356575 +0200
@@ -803,6 +803,15 @@
 	  the Synopsys DesignWare Memory Card Interface driver. Select this
 	  option for platforms based on Mellanox BlueField SoC's.
 
+config MMC_DW_CORTINA
+	tristate "Cortina specific extensions for Synopsys DW Memory Card Interface"
+	depends on MMC_DW
+	select MMC_DW_PLTFM
+	help
+	  This selects support for Cortina Access SoC specific extensions to the
+	  Synopsys DesignWare Memory Card Interface driver. Select this option
+	  for platforms based on Cortina Access SoC's.
+
 config MMC_DW_EXYNOS
 	tristate "Exynos specific extensions for Synopsys DW Memory Card Interface"
 	depends on MMC_DW
@@ -1039,7 +1048,7 @@
 
 config MMC_SDHCI_BRCMSTB
 	tristate "Broadcom SDIO/SD/MMC support"
-	depends on ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || COMPILE_TEST
+	depends on ARCH_BRCMSTB || ARCH_BCM2835 || BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
 	depends on MMC_SDHCI_PLTFM
 	select MMC_CQHCI
 	default ARCH_BRCMSTB || BMIPS_GENERIC
diff -ruw linux-6.13.12/drivers/mmc/host/Makefile linux-6.13.12-fbx/drivers/mmc/host/Makefile
--- linux-6.13.12/drivers/mmc/host/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/host/Makefile	2025-09-25 17:40:33.419356575 +0200
@@ -50,6 +50,7 @@
 obj-$(CONFIG_MMC_DW)		+= dw_mmc.o
 obj-$(CONFIG_MMC_DW_PLTFM)	+= dw_mmc-pltfm.o
 obj-$(CONFIG_MMC_DW_BLUEFIELD)	+= dw_mmc-bluefield.o
+obj-$(CONFIG_MMC_DW_CORTINA)+= dw_mmc-cortina.o
 obj-$(CONFIG_MMC_DW_EXYNOS)	+= dw_mmc-exynos.o
 obj-$(CONFIG_MMC_DW_HI3798CV200) += dw_mmc-hi3798cv200.o
 obj-$(CONFIG_MMC_DW_HI3798MV200) += dw_mmc-hi3798mv200.o
diff -ruw linux-6.13.12/drivers/mmc/host/sdhci-brcmstb.c linux-6.13.12-fbx/drivers/mmc/host/sdhci-brcmstb.c
--- linux-6.13.12/drivers/mmc/host/sdhci-brcmstb.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mmc/host/sdhci-brcmstb.c	2025-09-25 17:40:33.427356614 +0200
@@ -300,6 +300,7 @@
 	{ .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 },
 	{ .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 },
 	{ .compatible = "brcm,bcm74165b0-sdhci", .data = &match_priv_74165b0 },
+	{ .compatible = "brcm,bcm63xx-sdhci", .data = &match_priv_7445 },
 	{},
 };
 
@@ -401,11 +402,14 @@
 	}
 
 	/* Map in the non-standard CFG registers */
-	priv->cfg_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
+	if (platform_get_mem_or_io(pdev, 1)) {
+		priv->cfg_regs =
+			devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
 	if (IS_ERR(priv->cfg_regs)) {
 		res = PTR_ERR(priv->cfg_regs);
 		goto err;
 	}
+	}
 
 	sdhci_get_of_property(pdev);
 	res = mmc_of_parse(host->mmc);
diff -ruw linux-6.13.12/drivers/mtd/Kconfig linux-6.13.12-fbx/drivers/mtd/Kconfig
--- linux-6.13.12/drivers/mtd/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/Kconfig	2025-09-25 17:40:33.447356714 +0200
@@ -23,6 +23,9 @@
 	  WARNING: some of the tests will ERASE entire MTD device which they
 	  test. Do not use these tests unless you really know what you do.
 
+config MTD_ERASE_PRINTK
+	bool "write to kernel log when a block is erased"
+
 menu "Partition parsers"
 source "drivers/mtd/parsers/Kconfig"
 endmenu
diff -ruw linux-6.13.12/drivers/mtd/mtdchar.c linux-6.13.12-fbx/drivers/mtd/mtdchar.c
--- linux-6.13.12/drivers/mtd/mtdchar.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/mtdchar.c	2025-09-25 17:40:33.451356733 +0200
@@ -168,6 +168,7 @@
 		{
 			struct mtd_oob_ops ops = {};
 
+			memset(&ops, 0, sizeof (ops));
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
@@ -262,6 +263,7 @@
 		{
 			struct mtd_oob_ops ops = {};
 
+			memset(&ops, 0, sizeof (ops));
 			ops.mode = MTD_OPS_RAW;
 			ops.datbuf = kbuf;
 			ops.oobbuf = NULL;
@@ -947,6 +949,11 @@
 				erase->len = einfo32.length;
 			}
 
+#ifdef CONFIG_MTD_ERASE_PRINTK
+			printk(KERN_DEBUG "mtd: %s: ERASE offset=@%08llx\n",
+			       mtd->name, erase->addr);
+#endif
+
 			ret = mtd_erase(mtd, erase);
 			kfree(erase);
 		}
diff -ruw linux-6.13.12/drivers/mtd/mtdcore.c linux-6.13.12-fbx/drivers/mtd/mtdcore.c
--- linux-6.13.12/drivers/mtd/mtdcore.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/mtdcore.c	2025-09-25 17:40:33.451356733 +0200
@@ -339,6 +339,56 @@
 }
 MTD_DEVICE_ATTR_RO(bbt_blocks);
 
+static ssize_t mtd_nand_type_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->nand_type);
+}
+static DEVICE_ATTR(nand_type, S_IRUGO, mtd_nand_type_show, NULL);
+
+static ssize_t mtd_nand_manufacturer_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n", mtd->nand_manufacturer);
+}
+static DEVICE_ATTR(nand_manufacturer, S_IRUGO, mtd_nand_manufacturer_show, NULL);
+
+static ssize_t mtd_nand_onfi_ecc_bits_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", mtd->onfi_ecc_bits);
+}
+static DEVICE_ATTR(onfi_ecc_bits, S_IRUGO, mtd_nand_onfi_ecc_bits_show, NULL);
+
+static ssize_t mtd_nand_onfi_model_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%s\n",
+			mtd->onfi_model ? mtd->onfi_model : "unknown");
+}
+static DEVICE_ATTR(onfi_model, S_IRUGO, mtd_nand_onfi_model_show, NULL);
+
+static ssize_t mtd_nand_ids_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct mtd_info *mtd = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			mtd->nand_ids[0], mtd->nand_ids[1],
+			mtd->nand_ids[2], mtd->nand_ids[3],
+			mtd->nand_ids[4], mtd->nand_ids[5],
+			mtd->nand_ids[6], mtd->nand_ids[7]);
+}
+static DEVICE_ATTR(nand_ids, S_IRUGO, mtd_nand_ids_show, NULL);
+
 static struct attribute *mtd_attrs[] = {
 	&dev_attr_type.attr,
 	&dev_attr_flags.attr,
@@ -357,6 +407,11 @@
 	&dev_attr_bad_blocks.attr,
 	&dev_attr_bbt_blocks.attr,
 	&dev_attr_bitflip_threshold.attr,
+	&dev_attr_nand_type.attr,
+	&dev_attr_nand_manufacturer.attr,
+	&dev_attr_onfi_ecc_bits.attr,
+	&dev_attr_onfi_model.attr,
+	&dev_attr_nand_ids.attr,
 	NULL,
 };
 ATTRIBUTE_GROUPS(mtd);
diff -ruw linux-6.13.12/drivers/mtd/mtdpart.c linux-6.13.12-fbx/drivers/mtd/mtdpart.c
--- linux-6.13.12/drivers/mtd/mtdpart.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/mtdpart.c	2025-09-25 17:40:33.451356733 +0200
@@ -74,6 +74,11 @@
 	child->oobsize = parent->oobsize;
 	child->oobavail = parent->oobavail;
 	child->subpage_sft = parent->subpage_sft;
+	child->nand_type = parent->nand_type;
+	child->nand_manufacturer = parent->nand_manufacturer;
+	child->onfi_ecc_bits = parent->onfi_ecc_bits;
+	child->onfi_model = parent->onfi_model;
+	memcpy(child->nand_ids, parent->nand_ids, 8);
 
 	child->name = name;
 	child->owner = parent->owner;
diff -ruw linux-6.13.12/drivers/mtd/nand/raw/Kconfig linux-6.13.12-fbx/drivers/mtd/nand/raw/Kconfig
--- linux-6.13.12/drivers/mtd/nand/raw/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/nand/raw/Kconfig	2025-09-25 17:40:33.455356753 +0200
@@ -12,6 +12,14 @@
 
 comment "Raw/parallel NAND flash controllers"
 
+config MTD_FORCE_BAD_BLOCK_ERASE
+	bool "Force erase on bad blocks (useful for bootloader parts)"
+	default n
+	help
+	  Enable this option only when you need to force an erase on
+	  blocks being marked as "bad" by Linux (i.e: other ECC/bad block
+	  marker layout).
+
 config MTD_NAND_DENALI
 	tristate
 
@@ -31,6 +39,18 @@
 	  Enable the driver for NAND flash on platforms using a Denali NAND
 	  controller as a DT device.
 
+config MTD_NAND_DENALI_FBX
+	tristate "NAND Denali controller support"
+	depends on PCI
+	select BCH_CONST_PARAMS
+
+if MTD_NAND_DENALI_FBX
+	config BCH_CONST_M
+		default 13
+	config BCH_CONST_T
+		default 4
+endif
+
 config MTD_NAND_AMS_DELTA
 	tristate "Amstrad E3 NAND controller"
 	depends on MACH_AMS_DELTA || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/mtd/nand/raw/Makefile linux-6.13.12-fbx/drivers/mtd/nand/raw/Makefile
--- linux-6.13.12/drivers/mtd/nand/raw/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/nand/raw/Makefile	2025-09-25 17:40:33.455356753 +0200
@@ -8,6 +8,7 @@
 obj-$(CONFIG_MTD_NAND_DENALI)		+= denali.o
 obj-$(CONFIG_MTD_NAND_DENALI_PCI)	+= denali_pci.o
 obj-$(CONFIG_MTD_NAND_DENALI_DT)	+= denali_dt.o
+obj-$(CONFIG_MTD_NAND_DENALI_FBX)	+= denali_nand.o
 obj-$(CONFIG_MTD_NAND_AU1550)		+= au1550nd.o
 obj-$(CONFIG_MTD_NAND_S3C2410)		+= s3c2410.o
 obj-$(CONFIG_MTD_NAND_DAVINCI)		+= davinci_nand.o
diff -ruw linux-6.13.12/drivers/mtd/parsers/Kconfig linux-6.13.12-fbx/drivers/mtd/parsers/Kconfig
--- linux-6.13.12/drivers/mtd/parsers/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/parsers/Kconfig	2025-09-25 17:40:33.467356813 +0200
@@ -91,6 +91,10 @@
 	  two "firmware" partitions. Currently used firmware has to be detected
 	  using CFE environment variable.
 
+config MTD_OF_PARTS_IGNORE_RO
+	bool "ignore read-only flag"
+	depends on MTD_OF_PARTS
+
 config MTD_PARSER_IMAGETAG
 	tristate "Parser for BCM963XX Image Tag format partitions"
 	depends on BCM63XX || BMIPS_GENERIC || COMPILE_TEST
@@ -215,3 +219,14 @@
 	  partition map. This partition table contains real partition
 	  offsets, which may differ from device to device depending on the
 	  number and location of bad blocks on NAND.
+
+config MTD_FBX6HD_PARTS
+	tristate "Freebox V6 HD partitioning support"
+	help
+	  Freebox V6 HD partitioning support
+
+config MTD_FBX6HD_PARTS_WRITE_ALL
+	bool "make all partitions writeable"
+	depends on MTD_FBX6HD_PARTS
+	help
+	  Freebox V6 HD partitions support
diff -ruw linux-6.13.12/drivers/mtd/parsers/Makefile linux-6.13.12-fbx/drivers/mtd/parsers/Makefile
--- linux-6.13.12/drivers/mtd/parsers/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/parsers/Makefile	2025-09-25 17:40:33.471356833 +0200
@@ -15,3 +15,4 @@
 obj-$(CONFIG_MTD_SHARPSL_PARTS)		+= sharpslpart.o
 obj-$(CONFIG_MTD_REDBOOT_PARTS)		+= redboot.o
 obj-$(CONFIG_MTD_QCOMSMEM_PARTS)	+= qcomsmempart.o
+obj-$(CONFIG_MTD_FBX6HD_PARTS)	+= fbx6hd-mtdparts.o
diff -ruw linux-6.13.12/drivers/mtd/parsers/ofpart_core.c linux-6.13.12-fbx/drivers/mtd/parsers/ofpart_core.c
--- linux-6.13.12/drivers/mtd/parsers/ofpart_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/parsers/ofpart_core.c	2025-09-25 17:40:33.471356833 +0200
@@ -157,8 +157,10 @@
 			partname = of_get_property(pp, "name", &len);
 		parts[i].name = partname;
 
+#ifndef CONFIG_MTD_OF_PARTS_IGNORE_RO
 		if (of_property_read_bool(pp, "read-only"))
 			parts[i].mask_flags |= MTD_WRITEABLE;
+#endif
 
 		if (of_property_read_bool(pp, "lock"))
 			parts[i].mask_flags |= MTD_POWERUP_LOCK;
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/atmel.c linux-6.13.12-fbx/drivers/mtd/spi-nor/atmel.c
--- linux-6.13.12/drivers/mtd/spi-nor/atmel.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/atmel.c	2025-09-25 17:40:33.471356833 +0200
@@ -238,6 +238,14 @@
 		.flags = SPI_NOR_HAS_LOCK,
 		.no_sfdp_flags = SECT_4K,
 		.fixups = &at25fs_nor_fixups
+	}, {
+		/* Used on Freebox Gateways ... */
+		.id = SNOR_ID(0x1f, 0x65, 0x00),
+		.name = "at25f512b",
+		.alt_probe_id = 0x1f65,
+		.sector_size = SZ_32K,
+		.size = SZ_128K,
+		.flags = ALT_PROBE_ATMEL,
 	},
 };
 
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/core.c linux-6.13.12-fbx/drivers/mtd/spi-nor/core.c
--- linux-6.13.12/drivers/mtd/spi-nor/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/core.c	2025-09-25 17:40:33.471356833 +0200
@@ -787,6 +787,18 @@
 	return spi_nor_wait_till_ready(nor);
 }
 
+static void sst_ewrsr(struct spi_nor *nor)
+{
+	struct spi_mem_op op =
+		SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_EWRSR, 1),
+			   SPI_MEM_OP_NO_ADDR,
+			   SPI_MEM_OP_NO_DUMMY,
+			   SPI_MEM_OP_NO_DATA);
+
+	BUG_ON(!nor->spimem);
+	spi_mem_exec_op(nor->spimem, &op);
+}
+
 /**
  * spi_nor_write_sr() - Write the Status Register.
  * @nor:	pointer to 'struct spi_nor'.
@@ -803,6 +815,9 @@
 	if (ret)
 		return ret;
 
+	if (nor->info->flags & SST_EWRSR)
+		sst_ewrsr(nor);
+
 	if (nor->spimem) {
 		struct spi_mem_op op = SPI_NOR_WRSR_OP(sr, len);
 
@@ -2006,6 +2021,40 @@
 	return NULL;
 }
 
+static const struct flash_info *
+spi_nor_search_part_by_alt_id(const struct flash_info *parts,
+			      unsigned int nparts,
+			      const u32 id)
+
+{
+	int i;
+
+	for (i = 0; i < nparts; i++) {
+		const struct flash_info *info = &parts[i];
+		if ((info->flags & ALT_PROBE) && (info->alt_probe_id == id))
+			return info;
+	}
+
+	return NULL;
+}
+
+static const struct flash_info *
+spi_nor_search_part_by_atmel_id(const struct flash_info *parts,
+				unsigned int nparts,
+				const u32 id)
+
+{
+	int i;
+
+	for (i = 0; i < nparts; i++) {
+		const struct flash_info *info = &parts[i];
+		if ((info->flags & ALT_PROBE_ATMEL) && (info->alt_probe_id == id))
+			return info;
+	}
+
+	return NULL;
+}
+
 static const struct flash_info *spi_nor_detect(struct spi_nor *nor)
 {
 	const struct flash_info *info;
@@ -2040,6 +2089,80 @@
 	return info;
 }
 
+static const struct flash_info *spi_nor_alt_read_id(struct spi_nor *nor)
+{
+	u8 *data = nor->bouncebuf;
+	u16 id;
+	int err;
+	size_t i;
+	const struct flash_info *info;
+	struct spi_mem_op op =
+		SPI_MEM_OP(SPI_MEM_OP_CMD(SPINOR_OP_RDID_ALT, 1),
+			   SPI_MEM_OP_ADDR(3, 0, 1),
+			   SPI_MEM_OP_NO_DUMMY,
+			   SPI_MEM_OP_DATA_IN(2, data, 1));
+
+	BUG_ON(!nor->spimem);
+
+	err = spi_mem_exec_op(nor->spimem, &op);
+	if (err < 0) {
+		dev_err(nor->dev, "error %d reading alt ID\n", err);
+		return ERR_PTR(err);
+	}
+
+	id = (data[1] << 8) | data[0];
+
+	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+		info = spi_nor_search_part_by_alt_id(manufacturers[i]->parts,
+						     manufacturers[i]->nparts,
+						     id);
+		if (info) {
+			nor->manufacturer = manufacturers[i];
+			return info;
+		}
+	}
+
+
+	dev_err(nor->dev, "unrecognized ALT id %04x\n", id);
+	return ERR_PTR(-ENODEV);
+}
+
+static const struct flash_info *spi_nor_atmel_id(struct spi_nor *nor)
+{
+	u8 *data = nor->bouncebuf;
+	u16 id;
+	int err;
+	size_t i;
+	const struct flash_info *info;
+	struct spi_mem_op op =
+		SPI_MEM_OP(SPI_MEM_OP_CMD(0x15, 1),
+			   SPI_MEM_OP_NO_ADDR,
+			   SPI_MEM_OP_NO_DUMMY,
+			   SPI_MEM_OP_DATA_IN(2, data, 1));
+
+	BUG_ON(!nor->spimem);
+
+	err = spi_mem_exec_op(nor->spimem, &op);
+	if (err < 0) {
+		dev_err(nor->dev, "error %d reading atmel ID\n", err);
+		return ERR_PTR(err);
+	}
+	id = (data[1] << 8) | data[0];
+
+	for (i = 0; i < ARRAY_SIZE(manufacturers); i++) {
+		info = spi_nor_search_part_by_atmel_id(manufacturers[i]->parts,
+						       manufacturers[i]->nparts,
+						     id);
+		if (info) {
+			nor->manufacturer = manufacturers[i];
+			return info;
+		}
+	}
+
+	dev_err(nor->dev, "unrecognized ATMEL id %04x\n", id);
+	return ERR_PTR(-ENODEV);
+}
+
 static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
 			size_t *retlen, u_char *buf)
 {
@@ -3295,6 +3418,17 @@
 	return NULL;
 }
 
+static void sst_write_enable(struct spi_nor *nor)
+{
+	u8 *pzero = nor->bouncebuf;
+
+	spi_nor_write_enable(nor);
+	sst_ewrsr(nor);
+
+	pzero[0] = 0;
+	spi_nor_write_sr(nor, pzero, 1);
+}
+
 static const struct flash_info *spi_nor_get_flash_info(struct spi_nor *nor,
 						       const char *name)
 {
@@ -3310,6 +3444,12 @@
 	if (!info || info->id) {
 		const struct flash_info *jinfo;
 
+		jinfo = spi_nor_alt_read_id(nor);
+		if (IS_ERR(jinfo))
+			/* try ATMEL */
+			jinfo = spi_nor_atmel_id(nor);
+		if (IS_ERR(jinfo))
+			/* try JEDEC */
 		jinfo = spi_nor_detect(nor);
 		if (IS_ERR(jinfo))
 			return jinfo;
@@ -3485,6 +3625,9 @@
 	if (spi_nor_use_parallel_locking(nor))
 		init_waitqueue_head(&nor->rww.wait);
 
+	if (info->flags & SST_EWRSR)
+		sst_write_enable(nor);
+
 	/*
 	 * Configure the SPI memory:
 	 * - select op codes for (Fast) Read, Page Program and Sector Erase.
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/core.h linux-6.13.12-fbx/drivers/mtd/spi-nor/core.h
--- linux-6.13.12/drivers/mtd/spi-nor/core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/core.h	2025-09-25 17:40:33.471356833 +0200
@@ -505,6 +505,7 @@
 struct flash_info {
 	char *name;
 	const struct spi_nor_id *id;
+	uint32_t alt_probe_id;
 	size_t size;
 	unsigned sector_size;
 	u16 page_size;
@@ -521,6 +522,9 @@
 #define SPI_NOR_NO_ERASE		BIT(6)
 #define SPI_NOR_QUAD_PP			BIT(8)
 #define SPI_NOR_RWW			BIT(9)
+#define ALT_PROBE			BIT(10) /* only match during alt_probe */
+#define ALT_PROBE_ATMEL			BIT(11) /* only match during alt_probe_atmel */
+#define SST_EWRSR			BIT(12) /* EWRSR opcode before WRSR */
 
 	u8 no_sfdp_flags;
 #define SPI_NOR_SKIP_SFDP		BIT(0)
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/eon.c linux-6.13.12-fbx/drivers/mtd/spi-nor/eon.c
--- linux-6.13.12/drivers/mtd/spi-nor/eon.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/eon.c	2025-09-25 17:40:33.471356833 +0200
@@ -62,6 +62,13 @@
 	}, {
 		.id = SNOR_ID(0x1c, 0x70, 0x19),
 		.name = "en25qh256",
+	}, {
+		/* Used on Freebox Gateways ... */
+		.id = SNOR_ID(0x1c, 0x05, 0x00),
+		.alt_probe_id = 0x1c05,
+		.size = SZ_64K,
+		.name = "en25f05",
+		.flags = ALT_PROBE,
 	},
 };
 
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/macronix.c linux-6.13.12-fbx/drivers/mtd/spi-nor/macronix.c
--- linux-6.13.12/drivers/mtd/spi-nor/macronix.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/macronix.c	2025-09-25 17:40:33.471356833 +0200
@@ -56,6 +56,11 @@
 		.size = SZ_64K,
 		.no_sfdp_flags = SECT_4K,
 	}, {
+		.id = SNOR_ID(0xc2, 0x20, 0x11),
+		.name = "mx25v1006f",
+		.size = SZ_128K,
+		.no_sfdp_flags = SECT_4K,
+	}, {
 		.id = SNOR_ID(0xc2, 0x20, 0x12),
 		.name = "mx25l2005a",
 		.size = SZ_256K,
@@ -199,7 +204,15 @@
 		.name = "mx25l3255e",
 		.size = SZ_4M,
 		.no_sfdp_flags = SECT_4K,
+	}, {
+		/* Used on Freebox Gateways ... */
+		.id = SNOR_ID(0xc2, 0x05, 0x00),
+		.alt_probe_id = 0xc205,
+		.name = "mx25l512",
+		.size = SZ_64K,
+		.flags = ALT_PROBE,
 	},
+
 	/*
 	 * This spares us of adding new flash entries for flashes that can be
 	 * initialized solely based on the SFDP data, but still need the
diff -ruw linux-6.13.12/drivers/mtd/spi-nor/sst.c linux-6.13.12-fbx/drivers/mtd/spi-nor/sst.c
--- linux-6.13.12/drivers/mtd/spi-nor/sst.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/mtd/spi-nor/sst.c	2025-09-25 17:40:33.475356852 +0200
@@ -164,7 +164,14 @@
 		.name = "sst26wf016b",
 		.size = SZ_2M,
 		.no_sfdp_flags = SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ,
-	}
+	}, {
+		/* Used on Freebox Gateways ... */
+		.id = SNOR_ID(0xbf, 0x48, 0x00),
+		.alt_probe_id = 0xbf48,
+		.name = "sst25vf512a",
+		.size = SZ_64K,
+		.flags = ALT_PROBE | SST_EWRSR,
+	},
 };
 
 static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
diff -ruw linux-6.13.12/drivers/net/ethernet/broadcom/Kconfig linux-6.13.12-fbx/drivers/net/ethernet/broadcom/Kconfig
--- linux-6.13.12/drivers/net/ethernet/broadcom/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/Kconfig	2025-09-25 17:40:33.551357229 +0200
@@ -68,6 +68,39 @@
 	  This driver supports the ethernet MACs in the Broadcom 63xx
 	  MIPS chipset family (BCM63XX).
 
+config BCM63XX_ENET_RUNNER
+	tristate "Broadcom 63xx (63138) runner ethernet support"
+	select MII
+	select FIXED_PHY
+	select PHYLIB
+	select BCM7XXX_PHY
+	select BROADCOM_PHY
+	select SOC_BCM63XX_RDP
+
+config BCM63158_SF2
+	tristate "Broadcom 63158 SF2 support"
+	select MII
+	select PHYLINK
+	select BCM7XXX_PHY
+	select BROADCOM_PHY
+	select NET_DSA
+	select NET_DSA_TAG_BRCM_FBX
+
+config BCM63158_ENET_RUNNER
+	tristate "Broadcom 63158 runner ethernet support"
+	select MII
+	select PHYLINK
+	select SOC_BCM63XX_XRDP
+
+config BCM63158_ENET_RUNNER_FF
+	bool "fastpath support for freebox boards"
+	depends on BCM63158_ENET_RUNNER
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config BCMGENET
 	tristate "Broadcom GENET internal MAC support"
 	depends on HAS_IOMEM
diff -ruw linux-6.13.12/drivers/net/ethernet/broadcom/Makefile linux-6.13.12-fbx/drivers/net/ethernet/broadcom/Makefile
--- linux-6.13.12/drivers/net/ethernet/broadcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/Makefile	2025-09-25 17:40:33.551357229 +0200
@@ -18,3 +18,5 @@
 obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
 obj-$(CONFIG_BNXT) += bnxt/
 obj-$(CONFIG_BCMASP) += asp2/
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) += bcm63xx_enet_runner/
+obj-y += bcm63158/
diff -ruw linux-6.13.12/drivers/net/ethernet/cortina/Kconfig linux-6.13.12-fbx/drivers/net/ethernet/cortina/Kconfig
--- linux-6.13.12/drivers/net/ethernet/cortina/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/cortina/Kconfig	2025-09-25 17:40:33.631357626 +0200
@@ -2,7 +2,7 @@
 # Cortina ethernet devices
 
 config NET_VENDOR_CORTINA
-	bool "Cortina Gemini devices"
+	bool "Cortina devices"
 	default y
 	help
 	  If you have a network (Ethernet) card belonging to this class, say Y
@@ -20,4 +20,28 @@
 	help
 	  This driver supports StorLink SL351x (Gemini) dual Gigabit Ethernet.
 
+config CA_NI_ENET
+	tristate "Cortina ni ethernet driver (for 827x)"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+	help
+	  Not usable at the same time as cortina-network-engine
+	  proprietary module. Only use to ease low level development
+	  or if you need NFS root.
+
+config CORTINA_NETWORK_ENGINE
+	tristate "Cortina Network Engine driver (for ca8289)"
+	depends on ARCH_CORTINA || COMPILE_TEST
+
+if CORTINA_NETWORK_ENGINE
+
+config CORTINA_NETWORK_ENGINE_DEBUGFS
+	bool "DebugFS entry"
+	default n
+	help
+		If selected, the network engine driver will create a debugfs
+		entry letting us dump the internal state of the engine
+		from userland (counters, ...).
+
+endif # CORTINA_NETWORK_ENGINE
+
 endif # NET_VENDOR_CORTINA
diff -ruw linux-6.13.12/drivers/net/ethernet/cortina/Makefile linux-6.13.12-fbx/drivers/net/ethernet/cortina/Makefile
--- linux-6.13.12/drivers/net/ethernet/cortina/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/cortina/Makefile	2025-09-25 17:40:33.631357626 +0200
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0
-# Makefile for the Cortina Gemini network device drivers.
 
 obj-$(CONFIG_GEMINI_ETHERNET) += gemini.o
+obj-$(CONFIG_CA_NI_ENET) += ca_ni_enet.o
+
+obj-y += network_engine/
diff -ruw linux-6.13.12/drivers/net/ethernet/marvell/Kconfig linux-6.13.12-fbx/drivers/net/ethernet/marvell/Kconfig
--- linux-6.13.12/drivers/net/ethernet/marvell/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/marvell/Kconfig	2025-09-25 17:40:33.783358380 +0200
@@ -23,6 +23,7 @@
 	depends on INET
 	select PHYLIB
 	select MVMDIO
+	select MII
 	help
 	  This driver supports the gigabit ethernet MACs in the
 	  Marvell Discovery PPC/MIPS chipset family (MV643XX) and
@@ -31,6 +32,15 @@
 	  Some boards that use the Discovery chipset are the Momenco
 	  Ocelot C and Jaguar ATX and Pegasos II.
 
+config MV643XX_ETH_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on MV643XX_ETH
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config MVMDIO
 	tristate "Marvell MDIO interface support"
 	depends on HAS_IOMEM
@@ -90,6 +100,7 @@
 	select MVMDIO
 	select PHYLINK
 	select PAGE_POOL
+	select MII
 	help
 	  This driver supports the network interface units in the
 	  Marvell ARMADA 375, 7K and 8K SoCs.
@@ -99,6 +110,15 @@
 	depends on (PTP_1588_CLOCK = y && MVPP2 = y) || \
 		   (PTP_1588_CLOCK && MVPP2 = m)
 
+config MVPP2_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on MVPP2
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
 config PXA168_ETH
 	tristate "Marvell pxa168 ethernet support"
 	depends on HAS_IOMEM
diff -ruw linux-6.13.12/drivers/net/ethernet/qualcomm/Kconfig linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/Kconfig
--- linux-6.13.12/drivers/net/ethernet/qualcomm/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/Kconfig	2025-09-25 17:40:33.931359114 +0200
@@ -62,5 +62,6 @@
 	  Precision Clock Synchronization Protocol.
 
 source "drivers/net/ethernet/qualcomm/rmnet/Kconfig"
+source "drivers/net/ethernet/qualcomm/ipq95xx/Kconfig"
 
 endif # NET_VENDOR_QUALCOMM
diff -ruw linux-6.13.12/drivers/net/ethernet/qualcomm/Makefile linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/Makefile
--- linux-6.13.12/drivers/net/ethernet/qualcomm/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/Makefile	2025-09-25 17:40:33.931359114 +0200
@@ -12,3 +12,4 @@
 obj-y += emac/
 
 obj-$(CONFIG_RMNET) += rmnet/
+obj-y += ipq95xx/
diff -ruw linux-6.13.12/drivers/net/mdio/Kconfig linux-6.13.12-fbx/drivers/net/mdio/Kconfig
--- linux-6.13.12/drivers/net/mdio/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/mdio/Kconfig	2025-09-25 17:40:34.031359610 +0200
@@ -105,6 +105,10 @@
 config MDIO_CAVIUM
 	tristate
 
+config MDIO_CA_NI
+	tristate "Cortina ni mdio driver (for 82xx)"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+
 config MDIO_GPIO
 	tristate "GPIO lib-based bitbanged MDIO buses"
 	depends on MDIO_BITBANG
diff -ruw linux-6.13.12/drivers/net/mdio/Makefile linux-6.13.12-fbx/drivers/net/mdio/Makefile
--- linux-6.13.12/drivers/net/mdio/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/mdio/Makefile	2025-09-25 17:40:34.031359610 +0200
@@ -10,6 +10,7 @@
 obj-$(CONFIG_MDIO_BCM_UNIMAC)		+= mdio-bcm-unimac.o
 obj-$(CONFIG_MDIO_BITBANG)		+= mdio-bitbang.o
 obj-$(CONFIG_MDIO_CAVIUM)		+= mdio-cavium.o
+obj-$(CONFIG_MDIO_CA_NI)		+= mdio-ca-ni.o
 obj-$(CONFIG_MDIO_GPIO)			+= mdio-gpio.o
 obj-$(CONFIG_MDIO_HISI_FEMAC)		+= mdio-hisi-femac.o
 obj-$(CONFIG_MDIO_I2C)			+= mdio-i2c.o
diff -ruw linux-6.13.12/drivers/net/mdio/of_mdio.c linux-6.13.12-fbx/drivers/net/mdio/of_mdio.c
--- linux-6.13.12/drivers/net/mdio/of_mdio.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/mdio/of_mdio.c	2025-09-25 17:40:34.035359629 +0200
@@ -221,6 +221,8 @@
 	mdio->reset_post_delay_us = 0;
 	of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us);
 
+	mdio->keep_broken_phy = of_property_read_bool(np, "keep-broken-phy");
+
 	/* Register the MDIO bus */
 	rc = __mdiobus_register(mdio, owner);
 	if (rc)
diff -ruw linux-6.13.12/drivers/net/phy/Kconfig linux-6.13.12-fbx/drivers/net/phy/Kconfig
--- linux-6.13.12/drivers/net/phy/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/Kconfig	2025-09-25 17:40:34.039359649 +0200
@@ -207,6 +207,10 @@
 	help
 	  Currently supports the CS4340 phy.
 
+config CORTINA_NI_PHY
+	tristate "Cortina 827x integrated gigabit phy"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+
 config DAVICOM_PHY
 	tristate "Davicom PHYs"
 	help
@@ -338,6 +342,9 @@
 
 source "drivers/net/phy/qcom/Kconfig"
 
+config QCA8084_PHY
+	tristate "Qualcomm QCA8084 Quad-PHY"
+
 config QSEMI_PHY
 	tristate "Quality Semiconductor PHYs"
 	help
diff -ruw linux-6.13.12/drivers/net/phy/Makefile linux-6.13.12-fbx/drivers/net/phy/Makefile
--- linux-6.13.12/drivers/net/phy/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/Makefile	2025-09-25 17:40:34.039359649 +0200
@@ -55,6 +55,7 @@
 obj-$(CONFIG_BROADCOM_PHY)	+= broadcom.o
 obj-$(CONFIG_CICADA_PHY)	+= cicada.o
 obj-$(CONFIG_CORTINA_PHY)	+= cortina.o
+obj-$(CONFIG_CORTINA_NI_PHY)	+= cortina_ni.o
 obj-$(CONFIG_DAVICOM_PHY)	+= davicom.o
 obj-$(CONFIG_DP83640_PHY)	+= dp83640.o
 obj-$(CONFIG_DP83822_PHY)	+= dp83822.o
@@ -93,7 +94,15 @@
 obj-$(CONFIG_NXP_CBTX_PHY)	+= nxp-cbtx.o
 obj-$(CONFIG_NXP_TJA11XX_PHY)	+= nxp-tja11xx.o
 obj-y				+= qcom/
+obj-$(CONFIG_QCA8084_PHY) 	+= qca8084.o
 obj-$(CONFIG_QSEMI_PHY)		+= qsemi.o
+realtek-objs += realtek.o
+ifdef CONFIG_HWMON
+realtek-objs += realtek-hwmon.o
+endif
+ifdef CONFIG_ARCH_CORTINA_VENUS
+realtek-objs += realtek-cortina.o
+endif
 obj-$(CONFIG_REALTEK_PHY)	+= realtek.o
 obj-$(CONFIG_RENESAS_PHY)	+= uPD60620.o
 obj-$(CONFIG_ROCKCHIP_PHY)	+= rockchip.o
diff -ruw linux-6.13.12/drivers/net/phy/aquantia/aquantia_main.c linux-6.13.12-fbx/drivers/net/phy/aquantia/aquantia_main.c
--- linux-6.13.12/drivers/net/phy/aquantia/aquantia_main.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/aquantia/aquantia_main.c	2025-09-25 17:40:34.039359649 +0200
@@ -13,6 +13,7 @@
 #include <linux/bitfield.h>
 #include <linux/of.h>
 #include <linux/phy.h>
+#include <linux/firmware.h>
 
 #include "aquantia.h"
 
@@ -21,6 +22,7 @@
 #define PHY_ID_AQR105	0x03a1b4a2
 #define PHY_ID_AQR106	0x03a1b4d0
 #define PHY_ID_AQR107	0x03a1b4e0
+#define PHY_ID_AQR112C	0x31c31d12
 #define PHY_ID_AQCS109	0x03a1b5c2
 #define PHY_ID_AQR405	0x03a1b4b0
 #define PHY_ID_AQR111	0x03a1b610
@@ -45,6 +47,9 @@
 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OFF	9
 #define MDIO_PHYXS_VEND_IF_STATUS_TYPE_OCSGMII	10
 
+#define MDIO_PHYXS_VEND_PROV2			0xC441
+#define MDIO_PHYXS_VEND_PROV2_USX_AN		BIT(3)
+
 #define MDIO_AN_VEND_PROV			0xc400
 #define MDIO_AN_VEND_PROV_1000BASET_FULL	BIT(15)
 #define MDIO_AN_VEND_PROV_1000BASET_HALF	BIT(14)
@@ -101,12 +106,94 @@
 #define MDIO_AN_RX_VEND_STAT3			0xe832
 #define MDIO_AN_RX_VEND_STAT3_AFR		BIT(0)
 
+/* Vendor specific 1, MDIO_MMD_VEND1 */
+#define VEND1_GLOBAL_FW_ID			0x0020
+#define VEND1_GLOBAL_FW_ID_MAJOR		GENMASK(15, 8)
+#define VEND1_GLOBAL_FW_ID_MINOR		GENMASK(7, 0)
+
+#define VEND1_GLOBAL_GEN_STAT2			0xc831
+#define VEND1_GLOBAL_GEN_STAT2_OP_IN_PROG	BIT(15)
+
+/* The following registers all have similar layouts; first the registers... */
+#define VEND1_GLOBAL_CFG_10M			0x0310
+#define VEND1_GLOBAL_CFG_100M			0x031b
+#define VEND1_GLOBAL_CFG_1G			0x031c
+#define VEND1_GLOBAL_CFG_2_5G			0x031d
+#define VEND1_GLOBAL_CFG_5G			0x031e
+#define VEND1_GLOBAL_CFG_10G			0x031f
+/* ...and now the fields */
+#define VEND1_GLOBAL_CFG_RATE_ADAPT		GENMASK(8, 7)
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_NONE	0
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_USX		1
+#define VEND1_GLOBAL_CFG_RATE_ADAPT_PAUSE	2
+
+#define VEND1_GLOBAL_MAILBOX_CONTROL		0x0200
+#define VEND1_GLOBAL_MAILBOX_EXECUTE		BIT(15)
+#define VEND1_GLOBAL_MAILBOX_WRITE		BIT(14)
+#define VEND1_GLOBAL_MAILBOX_RESET_CRC		BIT(12)
+#define VEND1_GLOBAL_MAILBOX_BUSY		BIT(8)
+
+#define VEND1_GLOBAL_MAILBOX_CRC		0x0201
+
+#define VEND1_GLOBAL_MAILBOX_ADDR_MSW		0x0202
+#define VEND1_GLOBAL_MAILBOX_ADDR_LSW		0x0203
+
+#define VEND1_GLOBAL_MAILBOX_DATA_MSW		0x0204
+#define VEND1_GLOBAL_MAILBOX_DATA_LSW		0x0205
+
+#define VEND1_GLOBAL_UP_CONTROL			0xc001
+#define VEND1_GLOBAL_UP_RESET			BIT(15)
+#define VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE	BIT(6)
+#define VEND1_GLOBAL_UP_RUN_STALL		BIT(0)
+
+#define VEND1_GLOBAL_FAULT			0xc850
+
 /* Sleep and timeout for checking if the Processor-Intensive
  * MDIO operation is finished
  */
 #define AQR107_OP_IN_PROG_SLEEP		1000
 #define AQR107_OP_IN_PROG_TIMEOUT	100000
 
+/* registers in MDIO_MMD_VEND1 region */
+#define AQUANTIA_VND1_GLOBAL_SC			0x000
+#define  AQUANTIA_VND1_GLOBAL_SC_LP		BIT(0xb)
+
+/* global start rate, the protocol associated with this speed is used by default
+ * on SI.
+ */
+#define AQUANTIA_VND1_GSTART_RATE		0x31a
+#define  AQUANTIA_VND1_GSTART_RATE_OFF		0
+#define  AQUANTIA_VND1_GSTART_RATE_100M		1
+#define  AQUANTIA_VND1_GSTART_RATE_1G		2
+#define  AQUANTIA_VND1_GSTART_RATE_10G		3
+#define  AQUANTIA_VND1_GSTART_RATE_2_5G		4
+#define  AQUANTIA_VND1_GSTART_RATE_5G		5
+
+/* SYSCFG registers for 100M, 1G, 2.5G, 5G, 10G */
+#define AQUANTIA_VND1_GSYSCFG_BASE		0x31b
+#define AQUANTIA_VND1_GSYSCFG_100M		0
+#define AQUANTIA_VND1_GSYSCFG_1G		1
+#define AQUANTIA_VND1_GSYSCFG_2_5G		2
+#define AQUANTIA_VND1_GSYSCFG_5G		3
+#define AQUANTIA_VND1_GSYSCFG_10G		4
+
+/* addresses of memory segments in the phy */
+#define DRAM_BASE_ADDR		0x3FFE0000
+#define IRAM_BASE_ADDR		0x40000000
+
+/* firmware image format constants */
+#define VERSION_STRING_SIZE	0x40
+#define VERSION_STRING_OFFSET	0x0200
+#define HEADER_OFFSET		0x300
+
+struct aqr_fw_header {
+	u8	padding[4];
+	u8	iram_offset[3];
+	u8	iram_size[3];
+	u8	dram_offset[3];
+	u8	dram_size[3];
+};
+
 static int aqr107_get_sset_count(struct phy_device *phydev)
 {
 	return AQR107_SGMII_STAT_SZ;
@@ -552,7 +639,7 @@
 	build_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_FW_BUILD_ID, val);
 	prov_id = FIELD_GET(VEND1_GLOBAL_RSVD_STAT1_PROV_ID, val);
 
-	phydev_dbg(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
+	phydev_info(phydev, "FW %u.%u, Build %u, Provisioning %u\n",
 		   fw_major, fw_minor, build_id, prov_id);
 }
 
@@ -628,6 +715,492 @@
 	return 0;
 }
 
+static const u16 _crc16_lookuptable[256] = {
+    0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+    0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+    0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+    0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+    0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+    0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+    0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+    0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+    0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+    0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+    0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+    0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+    0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+    0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+    0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+    0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+    0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+    0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+    0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+    0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+    0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+    0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+    0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+    0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+    0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+    0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+    0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+    0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+    0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+    0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+    0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+    0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0
+};
+
+static u16 crc16_ccitt(u16 crc, const u8 *buf, size_t len)
+{
+    while (len--) {
+	    crc = ((crc << 8) ^ _crc16_lookuptable[((crc >> 8) ^
+						    ((*buf++) & 0x00FF))]);
+    }
+    return crc;
+}
+
+/* load data into the phy's memory */
+static int aqr112_load_chunk(struct phy_device *phydev, uint32_t addr,
+			     const uint8_t *data, size_t len)
+{
+	u16 crc = 0;
+	int up_crc;
+	size_t pos;
+	int err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_CONTROL,
+			    VEND1_GLOBAL_MAILBOX_RESET_CRC);
+	if (err < 0)
+		return err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_ADDR_MSW, addr >> 16);
+	if (err < 0)
+		return err;
+
+	err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_MAILBOX_ADDR_LSW, addr & 0xfffc);
+	if (err < 0)
+		return err;
+
+	for (pos = 0; pos < len; pos += min_t(u32, sizeof(u32), len - pos)) {
+		u32 word = 0;
+
+		memcpy(&word, &data[pos], min_t(u32, sizeof(u32), len - pos));
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_DATA_MSW,
+				    word >> 16);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_DATA_LSW,
+				    word & 0xffff);
+		if (err < 0)
+			return err;
+
+		err = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+				    VEND1_GLOBAL_MAILBOX_CONTROL,
+				    (VEND1_GLOBAL_MAILBOX_EXECUTE |
+				     VEND1_GLOBAL_MAILBOX_WRITE));
+		if (err < 0)
+			return err;
+
+		/* keep a big endian CRC to match the phy processor */
+		word = cpu_to_be32(word);
+		crc = crc16_ccitt(crc, (uint8_t *)&word, sizeof(word));
+	}
+
+	up_crc = phy_read_mmd(phydev, MDIO_MMD_VEND1,
+			      VEND1_GLOBAL_MAILBOX_CRC);
+	if (up_crc < 0)
+		return up_crc;
+
+	if (crc != up_crc) {
+		dev_err(&phydev->mdio.dev,
+			"crc mismatch: calculated 0x%04hx phy 0x%04hx\n",
+			crc, up_crc);
+		return -EIO;
+	}
+	return 0;
+}
+
+static u32 unpack_u24(const u8 *data)
+{
+	return (data[2] << 16) + (data[1] << 8) + data[0];
+}
+
+static int aqr_upload_firmware(struct phy_device *phydev,
+			       const char *name)
+{
+	struct device *dev = &phydev->mdio.dev;
+	const struct firmware *fw;
+	const struct aqr_fw_header *header;
+	char file_name[64];
+	char version[VERSION_STRING_SIZE + 1];
+	u32 primary_offset, iram_offset, iram_size, dram_offset, dram_size;
+	u16 calculated_crc, read_crc;
+	int ret;
+
+	scnprintf(file_name, sizeof (file_name),  "aquantia_phy/%s.uc", name);
+	ret = request_firmware_direct(&fw, file_name, dev);
+	if (ret) {
+		dev_err(dev, "failed to load firmware %s, ret: %d\n",
+			file_name, ret);
+		return ret;
+	}
+
+	if (fw->size < 16) {
+		dev_err(dev, "firmware too small\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	read_crc = (fw->data[fw->size - 2] << 8)  | fw->data[fw->size - 1];
+	calculated_crc = crc16_ccitt(0, fw->data, fw->size - 2);
+	if (read_crc != calculated_crc) {
+		dev_err(dev, "bad firmware crc: file 0x%04x "
+			"calculated 0x%04x\n",
+			read_crc, calculated_crc);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/* Find the DRAM and IRAM sections within the firmware file. */
+	primary_offset = ((fw->data[9] & 0xf) << 8 | fw->data[8]) << 12;
+	header = (const struct aqr_fw_header *)
+		&fw->data[primary_offset + HEADER_OFFSET];
+
+	iram_offset = primary_offset + unpack_u24(header->iram_offset);
+	iram_size = unpack_u24(header->iram_size);
+
+	dram_offset = primary_offset + unpack_u24(header->dram_offset);
+	dram_size = unpack_u24(header->dram_size);
+
+	strscpy(version,
+		(char *)&fw->data[dram_offset + VERSION_STRING_OFFSET],
+		VERSION_STRING_SIZE);
+	version[VERSION_STRING_SIZE] = 0;
+
+	dev_info(dev, "loading firmare version '%s'...\n", version);
+
+	/* stall the microcprocessor */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    (VEND1_GLOBAL_UP_RUN_STALL |
+			     VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE));
+	if (ret < 0)
+		goto fail;
+
+	ret = aqr112_load_chunk(phydev, DRAM_BASE_ADDR,
+				&fw->data[dram_offset],
+				dram_size);
+	if (ret)
+		goto fail;
+
+	ret = aqr112_load_chunk(phydev, IRAM_BASE_ADDR,
+				&fw->data[iram_offset],
+				iram_size);
+	if (ret)
+		goto fail;
+
+	/* make sure soft reset and low power mode are clear */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0, 0);
+	if (ret)
+		goto fail;
+
+	/* Release the microprocessor. UP_RESET must be held for 100 usec. */
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    (VEND1_GLOBAL_UP_RUN_STALL |
+			     VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE |
+			     VEND1_GLOBAL_UP_RESET));
+	if (ret)
+		goto fail;
+
+	msleep(10);
+
+	ret = phy_write_mmd(phydev, MDIO_MMD_VEND1,
+			    VEND1_GLOBAL_UP_CONTROL,
+			    VEND1_GLOBAL_UP_RUN_STALL_OVERRIDE);
+	if (ret)
+		goto fail;
+
+	ret = 0;
+
+fail:
+	release_firmware(fw);
+	return ret;
+}
+
+static int aqr112c_config_init(struct phy_device *phydev)
+{
+	int ret;
+
+	/* Check that the PHY interface type is compatible */
+	if (phydev->interface != PHY_INTERFACE_MODE_NA &&
+	    phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX) {
+		phydev_err(phydev, "requested interface mode not supp\n");
+		return -ENODEV;
+	}
+
+	ret = aqr_upload_firmware(phydev, "aqr112");
+	if (ret)
+		return ret;
+
+	aqr_wait_reset_complete(phydev);
+	aqr107_chip_info(phydev);
+
+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
+static int aqr112c_get_features(struct phy_device *phydev)
+{
+	int ret;
+
+	ret = genphy_c45_pma_read_abilities(phydev);
+	if (ret)
+		return ret;
+
+	phy_set_max_speed(phydev, SPEED_2500);
+	return 0;
+}
+
+static int aqr112c_probe(struct phy_device *phydev)
+{
+	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+				    sizeof(struct aqr107_priv), GFP_KERNEL);
+	if (!phydev->priv)
+		return -ENOMEM;
+
+	return aqr_hwmon_probe(phydev);
+}
+
+static void dump_rate_config(struct phy_device *phydev,
+			     const char *str, u32 val)
+{
+	phydev_info(phydev,
+		    "cfg %s: "
+		    "serdes_mode:%u aneg:%d training:%d rstt:%d ra:%d\n",
+		    str,
+		    val & 0x7,
+		    (val >> 3) & 0x1,
+		    (val >> 4) & 0x1,
+		    (val >> 5) & 0x1,
+		    (val >> 7) & 0x3);
+}
+
+static void dump_rates_config(struct phy_device *phydev)
+{
+	dump_rate_config(phydev, "100M", phy_read_mmd(phydev, 0x1e, 0x31b));
+	dump_rate_config(phydev, "1G  ", phy_read_mmd(phydev, 0x1e, 0x31c));
+	dump_rate_config(phydev, "2.5G", phy_read_mmd(phydev, 0x1e, 0x31d));
+	dump_rate_config(phydev, "5G  ", phy_read_mmd(phydev, 0x1e, 0x31e));
+	dump_rate_config(phydev, "10G ", phy_read_mmd(phydev, 0x1e, 0x31f));
+}
+
+#define SERDES_MODE_XFI		0
+#define SERDES_MODE_XAUI	1
+#define SERDES_MODE_RXAUI	2
+#define SERDES_MODE_SGMII	3
+#define SERDES_MODE_OCSGMII	4
+#define SERDES_MODE_LOW_POWER	5
+
+#define RA_METHOD_NONE		0
+#define RA_METHOD_USX		1
+#define RA_METHOD_PAUSE		2
+
+enum system_if_rate {
+	SIF_RATE_100,
+	SIF_RATE_1G,
+	SIF_RATE_2_5G,
+	SIF_RATE_5G,
+	SIF_RATE_10G,
+	SIF_RATE_MAX,
+};
+
+struct system_if_cfg {
+	bool	used;
+	u32	serdes_mode;
+	bool	autoneg_en;
+	bool	training_en;
+	bool	serdes_rst_transition_en;
+	bool	serdes_silence_en;
+	u32	ra_method;
+};
+
+static u32 gen_system_if_cfg(const struct system_if_cfg *cfg)
+{
+	u32 val;
+
+	if (!cfg->used)
+		return (SERDES_MODE_LOW_POWER << 0);
+
+	val = cfg->serdes_mode << 0;
+	if (cfg->autoneg_en)
+		val |= (1 << 3);
+	if (cfg->training_en)
+		val |= (1 << 4);
+	if (cfg->serdes_rst_transition_en)
+		val |= (1 << 5);
+	if (cfg->serdes_silence_en)
+		val |= (1 << 6);
+	val |= (cfg->ra_method << 7);
+	return val;
+}
+
+static int aqr113_config_init(struct phy_device *phydev)
+{
+	struct system_if_cfg scfgs[SIF_RATE_MAX];
+	bool use_inband_aneg;
+	int ret, val;
+
+	/* Check that the PHY interface type is compatible */
+	if (phydev->interface != PHY_INTERFACE_MODE_SGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_2500BASEX &&
+	    phydev->interface != PHY_INTERFACE_MODE_XGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_USXGMII &&
+	    phydev->interface != PHY_INTERFACE_MODE_10GKR &&
+	    phydev->interface != PHY_INTERFACE_MODE_10GBASER)
+		return -ENODEV;
+
+	/* check if a valid firmware is loaded */
+	val = phy_read_mmd(phydev, MDIO_MMD_VEND1, VEND1_GLOBAL_FW_ID);
+	if (!val) {
+		ret = aqr_upload_firmware(phydev, "aqr113");
+		if (ret)
+			return ret;
+	}
+
+	ret = aqr_wait_reset_complete(phydev);
+	if (ret) {
+		phydev_err(phydev, "phy firmware load timeout\n");
+		return -ENODEV;
+	}
+
+	aqr107_chip_info(phydev);
+
+	/* set PHY in low power mode so we can configure protocols */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GLOBAL_SC,
+		      AQUANTIA_VND1_GLOBAL_SC_LP);
+	msleep(10);
+
+	/* set the default rate to enable the SI link */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, AQUANTIA_VND1_GSTART_RATE,
+		      AQUANTIA_VND1_GSTART_RATE_OFF);
+
+	/* unfortunately we don't know if phylink uses MLO_AN_INBAND
+	 * or MLO_AN_PHY from here, so this needs to be tuned
+	 * manually, depending on the device tree node managed =
+	 * "in-band-status" presence */
+	use_inband_aneg = false;
+
+	memset(scfgs, 0, sizeof (scfgs));
+	switch (phydev->interface) {
+	case PHY_INTERFACE_MODE_SGMII:
+		scfgs[SIF_RATE_100] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_SGMII,
+			.autoneg_en = use_inband_aneg,
+		};
+		scfgs[SIF_RATE_1G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_SGMII,
+			.autoneg_en = use_inband_aneg,
+		};
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		scfgs[SIF_RATE_2_5G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_OCSGMII,
+			.autoneg_en = false,
+		};
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.autoneg_en = false,
+		};
+		break;
+	case PHY_INTERFACE_MODE_10GKR:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.autoneg_en = true,
+		};
+		break;
+	case PHY_INTERFACE_MODE_USXGMII:
+		scfgs[SIF_RATE_10G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_2_5G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_1G] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		scfgs[SIF_RATE_100] = (struct system_if_cfg) {
+			.used = true,
+			.serdes_mode = SERDES_MODE_XFI,
+			.ra_method = RA_METHOD_USX,
+			.autoneg_en = false,
+		};
+		break;
+	default:
+		break;
+	}
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_100M,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_100]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_1G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_1G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_2_5G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_2_5G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_5G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_5G]));
+
+	phy_write_mmd(phydev, MDIO_MMD_VEND1,
+		      AQUANTIA_VND1_GSYSCFG_BASE + AQUANTIA_VND1_GSYSCFG_10G,
+		      gen_system_if_cfg(&scfgs[SIF_RATE_10G]));
+
+	val = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_PROV2);
+	if (phydev->interface == PHY_INTERFACE_MODE_USXGMII &&
+	    use_inband_aneg)
+		val |= MDIO_PHYXS_VEND_PROV2_USX_AN;
+	else
+		val &= ~MDIO_PHYXS_VEND_PROV2_USX_AN;
+
+	phy_write_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_VEND_PROV2, val);
+
+	/* wake PHY back up */
+	phy_write_mmd(phydev, MDIO_MMD_VEND1, AQUANTIA_VND1_GLOBAL_SC, 0);
+	mdelay(10);
+
+	dump_rates_config(phydev);
+	return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT);
+}
+
 static int aqcs109_config_init(struct phy_device *phydev)
 {
 	int ret;
@@ -951,6 +1524,20 @@
 	.led_polarity_set = aqr_phy_led_polarity_set,
 },
 {
+	PHY_ID_MATCH_MODEL(PHY_ID_AQR112C),
+	.name		= "Aquantia AQR112C",
+	.probe		= aqr112c_probe,
+	.config_init	= aqr112c_config_init,
+	.config_aneg    = aqr_config_aneg,
+	.read_status	= aqr107_read_status,
+	.get_features	= aqr112c_get_features,
+	.suspend	= aqr107_suspend,
+	.resume		= aqr107_resume,
+	.get_sset_count	= aqr107_get_sset_count,
+	.get_strings	= aqr107_get_strings,
+	.get_stats	= aqr107_get_stats,
+},
+{
 	PHY_ID_MATCH_MODEL(PHY_ID_AQCS109),
 	.name		= "Aquantia AQCS109",
 	.probe		= aqr107_probe,
@@ -1103,7 +1690,7 @@
 	.name           = "Aquantia AQR113C",
 	.probe          = aqr107_probe,
 	.get_rate_matching = aqr107_get_rate_matching,
-	.config_init    = aqr113c_config_init,
+	.config_init    = aqr113_config_init,
 	.config_aneg    = aqr_config_aneg,
 	.config_intr    = aqr_config_intr,
 	.handle_interrupt       = aqr_handle_interrupt,
@@ -1206,6 +1793,7 @@
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR105) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR106) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR107) },
+	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR112C) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQCS109) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR405) },
 	{ PHY_ID_MATCH_MODEL(PHY_ID_AQR111) },
diff -ruw linux-6.13.12/drivers/net/phy/bcm7xxx.c linux-6.13.12-fbx/drivers/net/phy/bcm7xxx.c
--- linux-6.13.12/drivers/net/phy/bcm7xxx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/bcm7xxx.c	2025-09-25 17:40:34.043359669 +0200
@@ -45,6 +45,7 @@
 
 struct bcm7xxx_phy_priv {
 	u64	*stats;
+	bool	printed;
 };
 
 static int bcm7xxx_28nm_d0_afe_config_init(struct phy_device *phydev)
@@ -142,6 +143,7 @@
 
 static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
 {
+	struct bcm7xxx_phy_priv *priv = phydev->priv;
 	u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
 	u8 patch = PHY_BRCM_7XXX_PATCH(phydev->dev_flags);
 	u8 count;
@@ -153,8 +155,11 @@
 	if (rev == 0)
 		rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
 
-	pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
+	if (!priv->printed) {
+		pr_info("%s: %s PHY revision: 0x%02x, patch: %d\n",
 		     phydev_name(phydev), phydev->drv->name, rev, patch);
+		priv->printed = true;
+	}
 
 	/* Dummy read to a register to workaround an issue upon reset where the
 	 * internal inverter may not allow the first MDIO transaction to pass
@@ -387,11 +392,15 @@
 
 static int bcm7xxx_28nm_ephy_config_init(struct phy_device *phydev)
 {
+	struct bcm7xxx_phy_priv *priv = phydev->priv;
 	u8 rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
 	int ret = 0;
 
-	pr_info_once("%s: %s PHY revision: 0x%02x\n",
+	if (!priv->printed) {
+		pr_info("%s: %s PHY revision: 0x%02x\n",
 		     phydev_name(phydev), phydev->drv->name, rev);
+		priv->printed = true;
+	}
 
 	/* Dummy read to a register to workaround a possible issue upon reset
 	 * where the internal inverter may not allow the first MDIO transaction
@@ -921,6 +930,7 @@
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7439_2, "Broadcom BCM7439 (2)"),
 	BCM7XXX_28NM_GPHY(PHY_ID_BCM7445, "Broadcom BCM7445"),
+	BCM7XXX_28NM_GPHY(PHY_ID_BCM63138, "Broadcom BCM63138"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7346, "Broadcom BCM7346"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7362, "Broadcom BCM7362"),
 	BCM7XXX_40NM_EPHY(PHY_ID_BCM7425, "Broadcom BCM7425"),
@@ -950,6 +960,7 @@
 	{ PHY_ID_BCM7435, 0xfffffff0, },
 	{ PHY_ID_BCM7445, 0xfffffff0, },
 	{ PHY_ID_BCM7712, 0xfffffff0, },
+	{ PHY_ID_BCM63138, 0xfffffff0, },
 	{ }
 };
 
diff -ruw linux-6.13.12/drivers/net/phy/broadcom.c linux-6.13.12-fbx/drivers/net/phy/broadcom.c
--- linux-6.13.12/drivers/net/phy/broadcom.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/broadcom.c	2025-09-25 17:40:34.043359669 +0200
@@ -22,6 +22,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/gpio/consumer.h>
+#include <linux/debugfs.h>
 
 #define BRCM_PHY_MODEL(phydev) \
 	((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -1407,6 +1408,477 @@
 	return genphy_read_status(phydev);
 }
 
+#define BRCM_MIIEXT_BANK            0x1f
+# define BRCM_MIIEXT_BANK_MASK       0xfff0
+# define BRCM_MIIEXT_ADDR_RANGE      0xffe0
+# define BRCM_MIIEXT_DEF_BANK        0x8000
+#define BRCM_MIIEXT_OFFSET          0x10
+# define BRCM_MIIEXT_OFF_MASK    0xf
+
+static int bcm63138_ephy_read(struct phy_device *phydev, int reg)
+{
+	uint32_t bank;
+	uint32_t offset;
+	int val;
+	int error;
+
+	if (reg < 0x20)
+		return phy_read(phydev, reg);
+
+	bank = reg & BRCM_MIIEXT_BANK_MASK;
+	offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+	error = phy_write(phydev, BRCM_MIIEXT_BANK, bank);
+	val = phy_read(phydev, offset);
+	if (val < 0)
+		error = val;
+
+	error |= phy_write(phydev, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+        return (error < 0) ? error : val;
+}
+
+static int bcm63138_ephy_write(struct phy_device *phydev, int reg, u16 value)
+{
+        uint32_t bank;
+        uint32_t offset;
+        int error;
+
+        if (reg < 0x20)
+                return phy_write(phydev, reg, value);
+
+        bank = reg & BRCM_MIIEXT_BANK_MASK;
+        offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+        error = phy_write(phydev, BRCM_MIIEXT_BANK, bank);
+        error |= phy_write(phydev, offset, value);
+        error |= phy_write(phydev, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+
+        return error;
+}
+
+static int bcm63138s_get_features(struct phy_device *phydev)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, };
+
+	linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features);
+	linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, features);
+
+	linkmode_copy(phydev->supported, features);
+	linkmode_copy(phydev->advertising, features);
+
+	return 0;
+}
+
+/*
+ * BCM63138 SerDes phy amplitude setting registers.
+ *
+ * serdes_amplitude for bcm63138s attribute is of the form:
+ * full|half <amplitude_value_in_hex>
+ *
+ * it can be read and written.
+ *
+ * official limits are 0x3f-0x0c for amplitude values (unchecked
+ * here).
+ *
+ * values written to the attribute are reset to default after the
+ * interface is brought down & up.
+ */
+#define BCM63138S_AMP_VALUE_REG		0x8065
+#define  AMP_VALUE_MASK			(0x3f << 8)
+#define  AMP_VALUE_SHIFT		(8)
+
+#define BCM63138S_AMP_SCALE_REG		0x8066
+#define  AMP_SCALE_MASK			(1 << 1)
+#define  AMP_SCALE_1V			(0 << 1)
+#define  AMP_SCALE_0_5V			(1 << 1)
+
+/*
+ *
+ */
+static ssize_t bcm63138s_read_serdes_amplitude(struct file *file,
+					       char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	char buf[64];
+	struct phy_device *phydev = file->private_data;
+	bool amp_scale_full;
+	u32 amp_value;
+	u32 reg;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_SCALE_REG);
+	amp_scale_full = (reg & AMP_SCALE_MASK) == AMP_SCALE_1V;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_VALUE_REG);
+	amp_value = (reg & AMP_VALUE_MASK) >> AMP_VALUE_SHIFT;
+
+	snprintf(buf, sizeof (buf), "%s 0x%02x\n",
+		 (amp_scale_full) ? "full" : "half", amp_value);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+}
+
+#define SCALE_FULL_STR	"full "
+#define SCALE_HALF_STR	"half "
+
+/*
+ *
+ */
+static ssize_t bcm63138s_write_serdes_amplitude(struct file *file,
+						const char __user *user_buf,
+						size_t count, loff_t *ppos)
+{
+	int err;
+	char buf[32] = {0};
+	const char *val_start;
+	struct phy_device *phydev = file->private_data;
+	u32 scale_val;
+	u32 amp_val;
+	u32 reg;
+
+	/*
+	 * Yay, string parsing in the kernel.
+	 */
+	err = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf,
+				     count);
+	if (err < 0)
+		return err;
+
+	if (!strncmp(buf, SCALE_FULL_STR, strlen(SCALE_FULL_STR))) {
+		scale_val = AMP_SCALE_1V;
+		val_start = buf + strlen(SCALE_FULL_STR);
+	} else if (!strncmp(buf, SCALE_HALF_STR, strlen(SCALE_HALF_STR))) {
+		scale_val = AMP_SCALE_0_5V;
+		val_start = buf + strlen(SCALE_HALF_STR);
+	} else {
+		return -EINVAL;
+	}
+
+	err = kstrtou32(val_start, 16, &amp_val);
+	if (err)
+		return err;
+
+	/*
+	 * all done with parsing, now write the registers.
+	 */
+	amp_val  = (amp_val << AMP_VALUE_SHIFT) & AMP_VALUE_MASK;
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_SCALE_REG);
+	reg &= ~AMP_SCALE_MASK;
+	reg |= scale_val;
+	bcm63138_ephy_write(phydev, BCM63138S_AMP_SCALE_REG, reg);
+
+	reg = bcm63138_ephy_read(phydev, BCM63138S_AMP_VALUE_REG);
+	reg &= ~AMP_VALUE_MASK;
+	reg |= amp_val;
+	bcm63138_ephy_write(phydev, BCM63138S_AMP_VALUE_REG, reg);
+
+	return count;
+}
+
+static const struct file_operations fops_serdes_amplitude = {
+	.read = bcm63138s_read_serdes_amplitude,
+	.write = bcm63138s_write_serdes_amplitude,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+/*
+ * probe for bcm63138s phy, just create the debugfs entries for serdes
+ * amplitude tunning.
+ *
+ * won't work for more than one 63138s phy in the system.
+ */
+static int bcm63138s_probe(struct phy_device *phydev)
+{
+	struct dentry *dent;
+
+	dent = debugfs_create_dir("bcm63138s_phy", NULL);
+	if (IS_ERR(dent) && PTR_ERR(dent) != -EEXIST) {
+		WARN_ON("unable to create debugfs entry for bcm63138s phy");
+		return PTR_ERR(dent);
+	}
+
+	debugfs_create_file("serdes_amplitude",
+			    0600, dent, phydev, &fops_serdes_amplitude);
+
+	return 0;
+}
+
+static int bcm63138s_config_init(struct phy_device *phydev)
+{
+	static const unsigned short cfg_1000x[] = {
+		0x0010, 0x0c2f,
+		0x8182, 0x4000,
+		0x8186, 0x003c,
+		0x8300, 0x015d,
+		0x8301, 0x7,
+		0x0,    0x1140,
+		0x0010, 0x2c2f
+	};
+	int err;
+	size_t i;
+
+	err = genphy_soft_reset(phydev);
+	if (err < 0)
+		return err;
+
+	for (i = 0; i < ARRAY_SIZE(cfg_1000x); i += 2)
+                bcm63138_ephy_write(phydev, cfg_1000x[i], cfg_1000x[i + 1]);
+
+	return 0;
+}
+
+/**
+ * ethtool_adv_to_fiber_adv_t
+ * @ethadv: the ethtool advertisement settings
+ *
+ * A small helper function that translates ethtool advertisement
+ * settings to phy autonegotiation advertisements for the
+ * MII_ADV register for fiber link.
+ */
+static inline u32 ethtool_adv_to_fiber_adv_t(unsigned long *adv)
+{
+	u32 result = 0;
+
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, adv))
+		result |= ADVERTISE_1000XFULL;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT, adv))
+		result |= ADVERTISE_1000XFULL;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, adv))
+		result |= ADVERTISE_1000XHALF;
+
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, adv) &&
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, adv))
+		result |= ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM;
+	else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, adv))
+		result |= ADVERTISE_1000XPAUSE;
+
+	return result;
+}
+
+static int bcm63138s_config_aneg(struct phy_device *phydev)
+{
+	int oldadv, adv, err;
+	int changed;
+
+	if (phydev->autoneg != AUTONEG_ENABLE)
+		return genphy_setup_forced(phydev);
+
+	/* Setup fiber advertisement */
+	adv = phy_read(phydev, MII_ADVERTISE);
+	if (adv < 0)
+		return adv;
+
+	oldadv = adv;
+	adv &= ~(ADVERTISE_1000XFULL |
+		 ADVERTISE_1000XHALF |
+		 ADVERTISE_1000XPSE_ASYM |
+		 ADVERTISE_1000XPAUSE);
+	adv |= ethtool_adv_to_fiber_adv_t(phydev->advertising);
+
+	if (adv != oldadv) {
+		err = phy_write(phydev, MII_ADVERTISE, adv);
+		if (err < 0)
+			return err;
+
+		changed = 1;
+	}
+
+	if (changed == 0) {
+		/* Advertisement hasn't changed, but maybe aneg was never on to
+		 * begin with?	Or maybe phy was isolated?
+		 */
+		int ctl = phy_read(phydev, MII_BMCR);
+
+		if (ctl < 0)
+			return ctl;
+
+		if (!(ctl & BMCR_ANENABLE) || (ctl & BMCR_ISOLATE))
+			changed = 1; /* do restart aneg */
+	}
+
+	/* Only restart aneg if we are advertising something different
+	 * than we were before.
+	 */
+	if (changed > 0)
+		changed = genphy_restart_aneg(phydev);
+
+	return changed;
+}
+
+/**
+ * fiber_lpa_to_ethtool_lpa_t
+ * @lpa: value of the MII_LPA register for fiber link
+ *
+ * A small helper function that translates MII_LPA
+ * bits to ethtool LP advertisement settings.
+ */
+static void fiber_lpa_to_ethtool_lpa_t(u32 lpa, unsigned long *res)
+{
+	linkmode_zero(res);
+
+	if (lpa & LPA_1000XHALF)
+		linkmode_set_bit( ADVERTISED_1000baseT_Half, res);
+	if (lpa & LPA_1000XFULL)
+		linkmode_set_bit(ADVERTISED_1000baseT_Full, res);
+}
+
+static int bcm63138s_read_status_page_an(struct phy_device *phydev)
+{
+	int lpa, adv, common_adv;
+
+	lpa = phy_read(phydev, MII_LPA);
+	if (lpa < 0)
+		return lpa;
+
+	adv = phy_read(phydev, MII_ADVERTISE);
+	if (adv < 0)
+		return adv;
+
+	common_adv = lpa & adv;
+
+	phydev->speed = SPEED_10;
+	phydev->duplex = DUPLEX_HALF;
+	fiber_lpa_to_ethtool_lpa_t(lpa, phydev->lp_advertising);
+
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	if (common_adv & (LPA_1000XHALF | LPA_1000XFULL)) {
+		phydev->speed = SPEED_1000;
+		if (common_adv & LPA_1000XFULL)
+			phydev->duplex = DUPLEX_FULL;
+	}
+
+	if (phydev->duplex == DUPLEX_FULL) {
+		if (!(lpa & LPA_1000XPAUSE)) {
+			phydev->pause = 0;
+			phydev->asym_pause = 0;
+		} else if ((lpa & LPA_1000XPAUSE_ASYM)) {
+			phydev->pause = 1;
+			phydev->asym_pause = 1;
+		} else {
+			phydev->pause = 1;
+			phydev->asym_pause = 0;
+		}
+	}
+
+	return 0;
+}
+
+static int bcm63138s_read_status_page_fixed(struct phy_device *phydev)
+{
+	int bmcr = phy_read(phydev, MII_BMCR);
+
+	if (bmcr < 0)
+		return bmcr;
+
+	if (bmcr & BMCR_FULLDPLX)
+		phydev->duplex = DUPLEX_FULL;
+	else
+		phydev->duplex = DUPLEX_HALF;
+
+	phydev->speed = SPEED_1000;
+	phydev->pause = 0;
+	phydev->asym_pause = 0;
+
+	return 0;
+}
+
+static int bcm63138s_read_status(struct phy_device *phydev)
+{
+	int err;
+
+	genphy_update_link(phydev);
+
+	if (phydev->autoneg == AUTONEG_ENABLE)
+		err = bcm63138s_read_status_page_an(phydev);
+	else
+		err = bcm63138s_read_status_page_fixed(phydev);
+
+	return err;
+}
+
+#define MISC_ADDR(base, channel)	base, channel
+
+#define AFE_TXCONFIG_0			MISC_ADDR(0x39, 1)
+#define AFE_TXCONFIG_1			MISC_ADDR(0x3a, 2)
+#define AFE_TX_IQ_RX_LP			MISC_ADDR(0x39, 0)
+#define AFE_TEMPSEN_OTHERS		MISC_ADDR(0x3b, 0)
+
+static void r_rc_cal_reset(struct phy_device *phydev)
+{
+	/* Reset R_CAL/RC_CAL Engine */
+	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
+
+	/* Disable Reset R_AL/RC_CAL Engine */
+	bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
+}
+
+static int bcm63158_config_init(struct phy_device *phydev)
+{
+	/* Turn off AOF */
+	bcm_phy_write_misc(phydev, AFE_TXCONFIG_0, 0x0000);
+
+	/* 1g AB symmetry Iq */
+	bcm_phy_write_misc(phydev, AFE_TXCONFIG_1, 0x0BCC);
+
+	/* LPF BW */
+	bcm_phy_write_misc(phydev, AFE_TX_IQ_RX_LP, 0x233F);
+
+	/* RCAL +6LSB to make impedance from 112 to 100ohm */
+	bcm_phy_write_misc(phydev, AFE_TEMPSEN_OTHERS, 0xAD40);
+
+	/* since rcal make R smaller, make master current -4%  */
+	bcm_phy_write_misc(phydev, DSP_TAP10, 0x091B);
+
+	/* From EEE excel config file for Vitesse fix */
+	/* rx_on_tune 8 -> 0xf */
+	bcm_phy_write_misc(phydev, 0x0021, 0x0002, 0x87F6);
+
+	/* 100tx EEE bandwidth */
+	bcm_phy_write_misc(phydev, 0x0022, 0x0002, 0x017D);
+
+	/* enable ffe zero det for Vitesse interop */
+	bcm_phy_write_misc(phydev, 0x0026, 0x0002, 0x0015);
+
+	/* Reset R_CAL/RC_CAL engine */
+	r_rc_cal_reset(phydev);
+
+	return 0;
+}
+
+static int bcm63158_read_mmd(struct phy_device *phydev,
+			     int devnum, u16 regnum)
+{
+	struct mii_bus *bus = phydev->mdio.bus;
+	int phy_addr = phydev->mdio.addr;
+	int val;
+
+	/* MDIO_MMD_PCS/MDIO_PCS_EEE_ABLE is not set in broadcom PHY,
+	 * so we divert read_mmd to return fake value for this
+	 * register */
+	if (devnum == MDIO_MMD_PCS && regnum == MDIO_PCS_EEE_ABLE)
+		return MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+	/* Write the desired MMD Devad */
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL, devnum);
+
+	/* Write the desired MMD register address */
+	__mdiobus_write(bus, phy_addr, MII_MMD_DATA, regnum);
+
+	/* Select the Function : DATA with no post increment */
+	__mdiobus_write(bus, phy_addr, MII_MMD_CTRL,
+			devnum | MII_MMD_CTRL_NOINCR);
+
+	/* Read the content of the MMD's selected register */
+	val = __mdiobus_read(bus, phy_addr, MII_MMD_DATA);
+	return val;
+}
+
+
 static struct phy_driver broadcom_drivers[] = {
 {
 	.phy_id		= PHY_ID_BCM5411,
@@ -1713,6 +2185,29 @@
 	.config_intr    = bcm_phy_config_intr,
 	.handle_interrupt = bcm_phy_handle_interrupt,
 	.link_change_notify	= bcm54xx_link_change_notify,
+}, {
+	.phy_id		= PHY_ID_BCM63138S,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM63138S",
+	.probe		= bcm63138s_probe,
+	.get_features	= bcm63138s_get_features,
+	.config_init	= bcm63138s_config_init,
+	.config_aneg	= bcm63138s_config_aneg,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
+	.read_status	= bcm63138s_read_status,
+}, {
+	.phy_id		= PHY_ID_BCM63158,
+	.phy_id_mask	= 0xfffffff0,
+	.name		= "Broadcom BCM63158",
+	.features	= PHY_GBIT_FEATURES,
+	.flags		= PHY_IS_INTERNAL,
+	.config_init	= bcm63158_config_init,
+	.config_aneg	= genphy_config_aneg,
+	.read_status	= genphy_read_status,
+	.suspend	= genphy_suspend,
+	.resume		= genphy_resume,
+	.read_mmd	= bcm63158_read_mmd,
 } };
 
 module_phy_driver(broadcom_drivers);
@@ -1739,6 +2234,8 @@
 	{ PHY_ID_BCM53125, 0xfffffff0 },
 	{ PHY_ID_BCM53128, 0xfffffff0 },
 	{ PHY_ID_BCM89610, 0xfffffff0 },
+	{ PHY_ID_BCM63138S, 0xfffffff0 },
+	{ PHY_ID_BCM63158, 0xfffffff0 },
 	{ }
 };
 
diff -ruw linux-6.13.12/drivers/net/phy/mdio_bus.c linux-6.13.12-fbx/drivers/net/phy/mdio_bus.c
--- linux-6.13.12/drivers/net/phy/mdio_bus.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/mdio_bus.c	2025-09-25 17:40:34.047359689 +0200
@@ -685,7 +685,7 @@
 int __mdiobus_register(struct mii_bus *bus, struct module *owner)
 {
 	struct mdio_device *mdiodev;
-	struct gpio_desc *gpiod;
+	struct gpio_descs *gpiod;
 	bool prevent_c45_scan;
 	int i, err;
 
@@ -740,7 +740,7 @@
 	mutex_init(&bus->shared_lock);
 
 	/* assert bus level PHY GPIO reset */
-	gpiod = devm_gpiod_get_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
+	gpiod = devm_gpiod_get_array_optional(&bus->dev, "reset", GPIOD_OUT_HIGH);
 	if (IS_ERR(gpiod)) {
 		err = dev_err_probe(&bus->dev, PTR_ERR(gpiod),
 				    "mii_bus %s couldn't get reset GPIO\n",
@@ -750,7 +750,8 @@
 	} else	if (gpiod) {
 		bus->reset_gpiod = gpiod;
 		fsleep(bus->reset_delay_us);
-		gpiod_set_value_cansleep(gpiod, 0);
+		for (i = 0; i < gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(gpiod->desc[i], 0);
 		if (bus->reset_post_delay_us > 0)
 			fsleep(bus->reset_post_delay_us);
 	}
@@ -792,8 +793,10 @@
 	}
 error_reset_gpiod:
 	/* Put PHYs in RESET to save power */
-	if (bus->reset_gpiod)
-		gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+	if (bus->reset_gpiod) {
+		for (i = 0; i < bus->reset_gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(bus->reset_gpiod->desc[i], 1);
+	}
 
 	device_del(&bus->dev);
 	return err;
@@ -822,8 +825,10 @@
 	}
 
 	/* Put PHYs in RESET to save power */
-	if (bus->reset_gpiod)
-		gpiod_set_value_cansleep(bus->reset_gpiod, 1);
+	if (bus->reset_gpiod) {
+		for (i = 0; i < bus->reset_gpiod->ndescs; i++)
+			gpiod_set_value_cansleep(bus->reset_gpiod->desc[i], 1);
+	}
 
 	device_del(&bus->dev);
 }
diff -ruw linux-6.13.12/drivers/net/phy/phy-c45.c linux-6.13.12-fbx/drivers/net/phy/phy-c45.c
--- linux-6.13.12/drivers/net/phy/phy-c45.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/phy-c45.c	2025-09-25 17:40:34.059359748 +0200
@@ -784,6 +784,16 @@
 		mii_10base_t1_adv_mod_linkmode_t(adv, val);
 	}
 
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			      phydev->supported_eee)) {
+		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV2);
+		if (val < 0)
+			return val;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				 adv, val & MDIO_EEE_2_5GT);
+	}
+
 	return 0;
 }
 
@@ -831,6 +841,16 @@
 		mii_10base_t1_adv_mod_linkmode_t(lpa, val);
 	}
 
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+			      phydev->supported_eee)) {
+		val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE2);
+		if (val < 0)
+			return val;
+
+		linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT,
+				 lpa, val & MDIO_EEE_2_5GT);
+	}
+
 	return 0;
 }
 
@@ -1593,3 +1613,25 @@
 	.name           = "Generic Clause 45 PHY",
 	.read_status    = genphy_c45_read_status,
 };
+
+static int genphy_broken_c45_get_tunable(struct phy_device *phydev,
+					 struct ethtool_tunable *tuna,
+					 void *data)
+{
+	switch (tuna->id) {
+	case ETHTOOL_PHY_BROKEN:
+		*(u8*)data = 1;
+		return 0;
+	default:
+		return -ENOTSUPP;
+	}
+	return 0;
+}
+
+struct phy_driver genphy_broken_c45_driver = {
+	.phy_id         = 0xffffffff,
+	.phy_id_mask    = 0xffffffff,
+	.name           = "Generic Broken Clause 45 PHY",
+	.read_status    = genphy_c45_read_status,
+	.get_tunable	= genphy_broken_c45_get_tunable,
+};
diff -ruw linux-6.13.12/drivers/net/phy/phy-core.c linux-6.13.12-fbx/drivers/net/phy/phy-core.c
--- linux-6.13.12/drivers/net/phy/phy-core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/phy-core.c	2025-09-25 17:40:34.059359748 +0200
@@ -13,7 +13,7 @@
  */
 const char *phy_speed_to_str(int speed)
 {
-	BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103,
+	BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 109,
 		"Enum ethtool_link_mode_bit_indices and phylib are out of sync. "
 		"If a speed or mode has been added please update phy_speed_to_str "
 		"and the PHY settings array.\n");
@@ -127,6 +127,7 @@
 	case PHY_INTERFACE_MODE_TRGMII:
 	case PHY_INTERFACE_MODE_USXGMII:
 	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_HISGMII:
 	case PHY_INTERFACE_MODE_SMII:
 	case PHY_INTERFACE_MODE_1000BASEX:
 	case PHY_INTERFACE_MODE_2500BASEX:
@@ -138,6 +139,12 @@
 	case PHY_INTERFACE_MODE_RXAUI:
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_1000BASEKX:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		return 1;
 	case PHY_INTERFACE_MODE_QSGMII:
 	case PHY_INTERFACE_MODE_QUSGMII:
@@ -233,6 +240,10 @@
 	PHY_SETTING(  20000, FULL,  20000baseKR2_Full		),
 	PHY_SETTING(  20000, FULL,  20000baseMLD2_Full		),
 	/* 10G */
+	PHY_SETTING(  10000, FULL,  10000_1000basePRX_D_Full	),
+	PHY_SETTING(  10000, FULL,  10000_1000basePRX_U_Full	),
+	PHY_SETTING(  10000, FULL,  10000basePR_D_Full		),
+	PHY_SETTING(  10000, FULL,  10000basePR_U_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseCR_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseER_Full		),
 	PHY_SETTING(  10000, FULL,  10000baseKR_Full		),
@@ -248,6 +259,8 @@
 	PHY_SETTING(   2500, FULL,   2500baseT_Full		),
 	PHY_SETTING(   2500, FULL,   2500baseX_Full		),
 	/* 1G */
+	PHY_SETTING(   1000, FULL,   1000basePX_D_Full		),
+	PHY_SETTING(   1000, FULL,   1000basePX_U_Full		),
 	PHY_SETTING(   1000, FULL,   1000baseT_Full		),
 	PHY_SETTING(   1000, HALF,   1000baseT_Half		),
 	PHY_SETTING(   1000, FULL,   1000baseT1_Full		),
@@ -407,6 +420,10 @@
 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, modes);
 	if (of_property_read_bool(node, "eee-broken-10gkr"))
 		linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, modes);
+	if (of_property_read_bool(node, "eee-broken-2500t"))
+		linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, modes);
+	if (of_property_read_bool(node, "eee-broken-5000t"))
+		linkmode_set_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, modes);
 }
 
 /**
diff -ruw linux-6.13.12/drivers/net/phy/phy.c linux-6.13.12-fbx/drivers/net/phy/phy.c
--- linux-6.13.12/drivers/net/phy/phy.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/phy.c	2025-09-25 17:40:34.059359748 +0200
@@ -36,7 +36,7 @@
 #include <net/genetlink.h>
 #include <net/sock.h>
 
-#define PHY_STATE_TIME	HZ
+#define PHY_STATE_TIME	(HZ / 2)
 
 #define PHY_STATE_STR(_state)			\
 	case PHY_##_state:			\
diff -ruw linux-6.13.12/drivers/net/phy/phy_device.c linux-6.13.12-fbx/drivers/net/phy/phy_device.c
--- linux-6.13.12/drivers/net/phy/phy_device.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/phy_device.c	2025-09-25 17:40:34.059359748 +0200
@@ -884,7 +884,8 @@
 			return -EIO;
 	}
 
-	if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff) {
+	if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff ||
+	    !devs_in_pkg) {
 		/* If mostly Fs, there is no device there, then let's probe
 		 * MMD 0, as some 10G PHYs have zero Devices In package,
 		 * e.g. Cortina CS4315/CS4340 PHY.
@@ -894,9 +895,13 @@
 			return -EIO;
 
 		/* no device there, let's get out of here */
-		if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff)
+		if ((devs_in_pkg & 0x1fffffff) == 0x1fffffff ||
+		    !devs_in_pkg) {
+			if (bus->keep_broken_phy)
+				return 0;
 			return -ENODEV;
 	}
+	}
 
 	/* Now probe Device Identifiers for each device present. */
 	for (i = 1; i < num_ids; i++) {
@@ -1561,6 +1566,9 @@
 	 */
 	if (!d->driver) {
 		if (phydev->is_c45)
+			if (!phydev->c45_ids.mmds_present)
+				d->driver = &genphy_broken_c45_driver.mdiodrv.driver;
+			else
 			d->driver = &genphy_c45_driver.mdiodrv.driver;
 		else
 			d->driver = &genphy_driver.mdiodrv.driver;
@@ -3021,6 +3029,23 @@
 EXPORT_SYMBOL(phy_support_eee);
 
 /**
+ * phy_disable_eee - Disable EEE for the PHY
+ * @phydev: Target phy_device struct
+ *
+ * This function is used by MAC drivers for MAC's which don't support EEE.
+ * It disables EEE on the PHY layer.
+ */
+void phy_disable_eee(struct phy_device *phydev)
+{
+	linkmode_zero(phydev->advertising_eee);
+	phydev->eee_cfg.tx_lpi_enabled = false;
+	phydev->eee_cfg.eee_enabled = false;
+	/* don't let userspace re-enable EEE advertisement */
+	linkmode_fill(phydev->eee_broken_modes);
+}
+EXPORT_SYMBOL_GPL(phy_disable_eee);
+
+/**
  * phy_support_sym_pause - Enable support of symmetrical pause
  * @phydev: target phy_device struct
  *
@@ -3833,14 +3858,19 @@
 	if (rc)
 		goto err_mdio_bus;
 
+	rc = phy_driver_register(&genphy_broken_c45_driver, THIS_MODULE);
+	if (rc)
+		goto err_c45_broken;
+
 	rc = phy_driver_register(&genphy_driver, THIS_MODULE);
 	if (rc)
 		goto err_c45;
 
 	return 0;
-
 err_c45:
 	phy_driver_unregister(&genphy_c45_driver);
+err_c45_broken:
+	phy_driver_unregister(&genphy_broken_c45_driver);
 err_mdio_bus:
 	mdio_bus_exit();
 err_ethtool_phy_ops:
@@ -3854,6 +3884,7 @@
 
 static void __exit phy_exit(void)
 {
+	phy_driver_unregister(&genphy_broken_c45_driver);
 	phy_driver_unregister(&genphy_c45_driver);
 	phy_driver_unregister(&genphy_driver);
 	mdio_bus_exit();
diff -ruw linux-6.13.12/drivers/net/phy/phylink.c linux-6.13.12-fbx/drivers/net/phy/phylink.c
--- linux-6.13.12/drivers/net/phy/phylink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/phylink.c	2025-09-25 17:40:34.059359748 +0200
@@ -227,8 +227,11 @@
 	case PHY_INTERFACE_MODE_QUSGMII:
 	case PHY_INTERFACE_MODE_SGMII:
 	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
 		return SPEED_1000;
 
+	case PHY_INTERFACE_MODE_HISGMII:
 	case PHY_INTERFACE_MODE_2500BASEX:
 	case PHY_INTERFACE_MODE_10G_QXGMII:
 		return SPEED_2500;
@@ -242,6 +245,10 @@
 	case PHY_INTERFACE_MODE_10GBASER:
 	case PHY_INTERFACE_MODE_10GKR:
 	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		return SPEED_10000;
 
 	case PHY_INTERFACE_MODE_25GBASER:
@@ -250,6 +257,7 @@
 	case PHY_INTERFACE_MODE_XLGMII:
 		return SPEED_40000;
 
+
 	case PHY_INTERFACE_MODE_INTERNAL:
 	case PHY_INTERFACE_MODE_NA:
 	case PHY_INTERFACE_MODE_MAX:
@@ -503,6 +511,7 @@
 		caps |= MAC_10000FD | MAC_5000FD;
 		fallthrough;
 
+	case PHY_INTERFACE_MODE_HISGMII:
 	case PHY_INTERFACE_MODE_10G_QXGMII:
 		caps |= MAC_2500FD;
 		fallthrough;
@@ -539,6 +548,8 @@
 		fallthrough;
 	case PHY_INTERFACE_MODE_1000BASEKX:
 	case PHY_INTERFACE_MODE_TRGMII:
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
 		caps |= MAC_1000FD;
 		break;
 
@@ -555,6 +566,10 @@
 	case PHY_INTERFACE_MODE_XAUI:
 	case PHY_INTERFACE_MODE_10GBASER:
 	case PHY_INTERFACE_MODE_10GKR:
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
 		caps |= MAC_10000FD;
 		break;
 
@@ -567,10 +582,10 @@
 		break;
 
 	case PHY_INTERFACE_MODE_INTERNAL:
+	case PHY_INTERFACE_MODE_NA:
 		caps |= ~0;
 		break;
 
-	case PHY_INTERFACE_MODE_NA:
 	case PHY_INTERFACE_MODE_MAX:
 		break;
 	}
@@ -901,6 +916,7 @@
 
 		switch (pl->link_config.interface) {
 		case PHY_INTERFACE_MODE_SGMII:
+		case PHY_INTERFACE_MODE_HISGMII:
 		case PHY_INTERFACE_MODE_PSGMII:
 		case PHY_INTERFACE_MODE_QSGMII:
 		case PHY_INTERFACE_MODE_QUSGMII:
@@ -924,6 +940,25 @@
 			phylink_caps_to_linkmodes(pl->supported, caps);
 			break;
 
+		case PHY_INTERFACE_MODE_1000BASEPX_D:
+			phylink_set(pl->supported, 1000basePX_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_1000BASEPX_U:
+			phylink_set(pl->supported, 1000basePX_U_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000BASEPR_D:
+			phylink_set(pl->supported, 10000basePR_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000BASEPR_U:
+			phylink_set(pl->supported, 10000basePR_U_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+			phylink_set(pl->supported, 10000_1000basePRX_D_Full);
+			break;
+		case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+			phylink_set(pl->supported, 10000_1000basePRX_U_Full);
+			break;
+
 		default:
 			phylink_err(pl,
 				    "incorrect link mode %s for in-band status\n",
@@ -1113,6 +1148,7 @@
 	case PHY_INTERFACE_MODE_QSGMII:
 	case PHY_INTERFACE_MODE_QUSGMII:
 	case PHY_INTERFACE_MODE_USXGMII:
+	case PHY_INTERFACE_MODE_HISGMII:
 	case PHY_INTERFACE_MODE_10G_QXGMII:
 		/* These protocols are designed for use with a PHY which
 		 * communicates its negotiation result back to the MAC via
@@ -2182,12 +2218,13 @@
  * desired link mode(s) and negotiation style. This should be called from the
  * network device driver's &struct net_device_ops ndo_open() method.
  */
-void phylink_start(struct phylink *pl)
+static void __phylink_start(struct phylink *pl, bool silent)
 {
 	bool poll = false;
 
 	ASSERT_RTNL();
 
+	if (!silent)
 	phylink_info(pl, "configuring for %s/%s link mode\n",
 		     phylink_an_mode_str(pl->cur_link_an_mode),
 		     phy_modes(pl->link_config.interface));
@@ -2240,6 +2277,18 @@
 }
 EXPORT_SYMBOL_GPL(phylink_start);
 
+void phylink_start(struct phylink *pl)
+{
+	return __phylink_start(pl, false);
+}
+
+void phylink_start_silent(struct phylink *pl)
+{
+	return __phylink_start(pl, true);
+}
+
+EXPORT_SYMBOL_GPL(phylink_start_silent);
+
 /**
  * phylink_stop() - stop a phylink instance
  * @pl: a pointer to a &struct phylink returned from phylink_create()
@@ -2866,6 +2915,30 @@
 }
 EXPORT_SYMBOL_GPL(phylink_ethtool_set_eee);
 
+void phylink_support_eee(struct phylink *pl)
+{
+	if (pl->phydev)
+		phy_support_eee(pl->phydev);
+
+}
+EXPORT_SYMBOL_GPL(phylink_support_eee);
+
+void phylink_disable_eee(struct phylink *pl)
+{
+	if (pl->phydev)
+		phy_disable_eee(pl->phydev);
+
+}
+EXPORT_SYMBOL_GPL(phylink_disable_eee);
+
+void phylink_eee_update_cfg_timer(struct phylink *pl,
+				  unsigned int val)
+{
+	if (pl->phydev)
+		pl->phydev->eee_cfg.tx_lpi_timer = val;
+}
+EXPORT_SYMBOL_GPL(phylink_eee_update_cfg_timer);
+
 /* This emulates MII registers for a fixed-mode phy operating as per the
  * passed in state. "aneg" defines if we report negotiation is possible.
  *
@@ -3220,10 +3293,10 @@
 	struct phylink_link_state config;
 	int ret;
 
-	linkmode_copy(support, phy->supported);
+	linkmode_copy(support, pl->sfp_support);
 
 	memset(&config, 0, sizeof(config));
-	linkmode_copy(config.advertising, phy->advertising);
+	linkmode_copy(config.advertising, pl->sfp_support);
 	config.interface = PHY_INTERFACE_MODE_NA;
 	config.speed = SPEED_UNKNOWN;
 	config.duplex = DUPLEX_UNKNOWN;
@@ -3839,6 +3912,125 @@
 }
 EXPORT_SYMBOL_GPL(phylink_mii_c45_pcs_get_state);
 
+/*
+ * designed to be called from userland to override current link
+ * interface, used for both testing and to handle SFP from userland.
+ *
+ * While it could theoritically be used on phylink instance with a
+ * phy, phylink_of_phy_connect() or equivalent is called at netdevice
+ * probe time, so it's too late to override phy_interface, thus we
+ * restrict this to instances without phydev.
+ *
+ * For the same reasons, we don't allow this to be set on instance
+ * attached to an SFP bus, since the kernel will do the right thing
+ * when an SFP is plugged.
+ *
+ * The an_enabled has to be given because some devices either don't or
+ * only support autoneg for some interface, so we cannot rely on
+ * further ethtool call to enable/disable it, both the interface and
+ * autoneg have to be changed atomically.
+ */
+int phylink_set_interface(struct phylink *pl,
+			  phy_interface_t interface,
+			  bool an_enabled)
+{
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+	__ETHTOOL_DECLARE_LINK_MODE_MASK(supported1);
+	struct phylink_link_state config;
+	bool changed, changed_intf;
+	int ret;
+
+	if (pl->phydev)
+		return -ENOTSUPP;
+
+	if (pl->sfp_bus && pl->sfp_port)
+		return -ENOTSUPP;
+
+	memset(&config, 0, sizeof(config));
+	config.interface = PHY_INTERFACE_MODE_NA;
+	config.speed = SPEED_UNKNOWN;
+	config.duplex = DUPLEX_UNKNOWN;
+	config.pause = MLO_PAUSE_AN;
+	bitmap_fill(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
+	linkmode_copy(config.advertising, supported);
+
+	if (!an_enabled)
+		phylink_clear(config.advertising, Autoneg);
+
+	phylink_validate(pl, supported, &config);
+
+	config.interface = interface;
+	linkmode_copy(supported1, supported);
+
+	ret = phylink_validate(pl, supported1, &config);
+	if (ret) {
+		phylink_err(pl,
+			    "validation of %s/%s with support %*pb failed: %d\n",
+			    phylink_an_mode_str(pl->cfg_link_an_mode),
+			    phy_modes(config.interface),
+			    __ETHTOOL_LINK_MODE_MASK_NBITS, supported, ret);
+		return ret;
+	}
+
+	changed = !linkmode_equal(pl->supported, supported1) ||
+		!linkmode_equal(pl->link_config.advertising,
+				config.advertising);
+
+	if (changed) {
+		linkmode_copy(pl->supported, supported1);
+		linkmode_copy(pl->link_config.advertising, config.advertising);
+	}
+
+	changed_intf = (pl->link_config.interface != config.interface);
+
+	if (changed || changed_intf) {
+		if (pl->old_link_state) {
+			phylink_link_down(pl);
+			pl->old_link_state = false;
+		}
+		if (!test_bit(PHYLINK_DISABLE_STOPPED,
+			      &pl->phylink_disable_state)) {
+			phylink_pcs_poll_stop(pl);
+			cancel_work_sync(&pl->resolve);
+		}
+	}
+
+	if (changed_intf) {
+		pl->link_config.interface = config.interface;
+		phylink_info(pl, "switched to %s/%s link mode (userland)\n",
+			     phylink_an_mode_str(pl->cur_link_an_mode),
+			     phy_modes(pl->link_config.interface));
+	}
+
+	if ((changed || changed_intf) &&
+	    !test_bit(PHYLINK_DISABLE_STOPPED,
+		      &pl->phylink_disable_state)) {
+		phylink_mac_initial_config(pl, false);
+		phylink_run_resolve(pl);
+	}
+
+	return 0;
+
+}
+
+EXPORT_SYMBOL_GPL(phylink_set_interface);
+
+/*
+ * retrieve current interface & mode
+ */
+void phylink_get_interface(struct phylink *pl,
+			   phy_interface_t *interface,
+			   int *an_en,
+			   int *mode)
+{
+	*interface = pl->link_config.interface;
+	*an_en = linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+				   pl->link_config.advertising);
+	*mode = pl->cfg_link_an_mode;
+}
+
+EXPORT_SYMBOL_GPL(phylink_get_interface);
+
 static int __init phylink_init(void)
 {
 	for (int i = 0; i < ARRAY_SIZE(phylink_sfp_interface_preference); ++i)
diff -ruw linux-6.13.12/drivers/net/phy/realtek.c linux-6.13.12-fbx/drivers/net/phy/realtek.c
--- linux-6.13.12/drivers/net/phy/realtek.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/realtek.c	2025-09-25 17:40:34.063359768 +0200
@@ -13,6 +13,11 @@
 #include <linux/module.h>
 #include <linux/delay.h>
 #include <linux/clk.h>
+#include <linux/of.h>
+#include <dt-bindings/net/realtek-phy-rtl8211f.h>
+
+
+#include "realtek.h"
 
 #define RTL821x_PHYSR				0x11
 #define RTL821x_PHYSR_DUPLEX			BIT(13)
@@ -71,6 +76,7 @@
 #define RTL822X_VND1_SERDES_CTRL3			0x7580
 #define RTL822X_VND1_SERDES_CTRL3_MODE_MASK		GENMASK(5, 0)
 #define RTL822X_VND1_SERDES_CTRL3_MODE_SGMII			0x02
+#define RTL822X_VND1_SERDES_CTRL3_MODE_HISGMII			0x12
 #define RTL822X_VND1_SERDES_CTRL3_MODE_2500BASEX		0x16
 
 /* RTL822X_VND2_XXXXX registers are only accessible when phydev->is_c45
@@ -102,6 +108,75 @@
 
 #define RTL8211F_LED_COUNT			3
 
+#define RTL8211F_LCR_PAGE			0xd04
+#define RTL8211F_LCR_REG			0x10
+#define RTL8211F_LED_MODE_MASK(num)		(0x1b << ((num) * 5))
+#define RTL8211F_LED_MODE_SEL(num, mode)	((mode) << ((num) * 5))
+
+#define RTL8221B_SERDES_OPT_REG				0x697a
+#define  RTL8221B_SERDES_OPT_MODE_MASK			0x3f
+#define  RTL8221B_SERDES_OPT_MODE_2G5_SGMII		0x00
+#define  RTL8221B_SERDES_OPT_MODE_HISGMII_SGMII		0x01
+#define  RTL8221B_SERDES_OPT_MODE_2G5_ONLY		0x02
+#define  RTL8221B_SERDES_OPT_MODE_HISGMII_ONLY		0x03
+
+#define RTL8221B_SERDES_CTRL3_REG			0x7580
+#define  RTL8221B_SERDES_CTRL3_MODE_MASK		0x1f
+#define  RTL8221B_SERDES_CTRL3_MODE_SGMII		0x02
+#define  RTL8221B_SERDES_CTRL3_MODE_HISGMII		0x12
+#define  RTL8221B_SERDES_CTRL3_MODE_2G5BX		0x16
+#define  RTL8221B_SERDES_CTRL3_MODE_OFF			0x1f
+
+#define RTL8221B_SERDES_CTRL5_REG			0x7582
+#define  RTL8221B_SERDES_CTRL5_FDPX			(1 << 2)
+#define  RTL8221B_SERDES_CTRL5_SPEED_MASK		0x3003
+#define  RTL8221B_SERDES_CTRL5_SPEED_10			0x0000
+#define  RTL8221B_SERDES_CTRL5_SPEED_100		0x0001
+#define  RTL8221B_SERDES_CTRL5_SPEED_1000		0x0002
+#define  RTL8221B_SERDES_CTRL5_SPEED_2G5		0x1001
+#define  RTL8221B_SERDES_CTRL5_SPEED_2G5LITE		0x1003
+
+#define RTL8221B_FEDCR_REG				0xa400
+#define  RTL8221B_FEDCR_PCS_LB_EN			(1 << 14)
+
+#define RTL8221B_GBCR_REG				0xa412
+#define  RTL8221B_GBCR_ADV_1000BaseT			(1 << 9)
+
+#define RTL8221B_GANLPAR_REG				0xa414
+#define  RTL8221B_GANLPAR_LP_1000BaseTFD		(1 << 11)
+#define  RTL8221B_GANLPAR_LP_1000BaseTHD		(1 << 10)
+
+#define RTL8221B_LCR4_REG				0xd036
+#define  LCR4_SELECT_2G5				(1 << 5)
+#define  LCR4_SELECT_1G					(1 << 2)
+#define  LCR4_SELECT_100M				(1 << 1)
+#define  LCR4_SELECT_10M				(1 << 0)
+
+#define RTL8221B_LCR6_REG				0xd040
+#define  LCR6_BLINK_FREQ_MASK				(0x3 << 8)
+#define  LCR6_BLINK_FREQ_20MS				(0x0 << 8)
+#define  LCR6_BLINK_FREQ_40MS				(0x1 << 8)
+#define  LCR6_BLINK_FREQ_60MS				(0x2 << 8)
+#define  LCR6_BLINK_PATTERN_MODE_B			(0x1 << 5)
+
+#define RTL8221B_LCR7_REG				0xd044
+#define  LCR7_LED_EN_MASK(led)				(1 << (4 + (led)))
+#define  LCR7_LED_POL_HI_MASK(led)			(1 << (led))
+
+#define RTL8221B_RX_STATS_SELECT_REG			0xc800
+#define  RX_STATS_SELECT_UTP				0x5a02
+#define  RX_STATS_SELECT_SERDES				0x5a06
+
+#define RTL8221B_RX_STATS_CLEAR_REG			0xc802
+#define  RX_STATS_CLEAR_VAL				0x0073
+
+#define RTL8221B_RX_STATS_GOOD_PKT_LSB			0xc810
+#define RTL8221B_RX_STATS_GOOD_PKT_MSB			0xc812
+#define RTL8221B_RX_STATS_CRC_PKT			0xc814
+
+#define RTL8221B_UNKOWN_0xc40a				0xc40a
+#define RTL8221B_UNKOWN_0xc466				0xc466
+
 MODULE_DESCRIPTION("Realtek PHY driver");
 MODULE_AUTHOR("Johnson Leung");
 MODULE_LICENSE("GPL");
@@ -111,6 +186,8 @@
 	u16 phycr2;
 	bool has_phycr2;
 	struct clk *clk;
+#define MAX_LEDS 3
+	u8 leds_mode[MAX_LEDS];
 };
 
 static int rtl821x_read_page(struct phy_device *phydev)
@@ -371,12 +448,27 @@
 			    CTL1000_ENABLE_MASTER | CTL1000_AS_MASTER);
 }
 
+static void rtl8211f_config_led(struct phy_device *phydev)
+{
+	struct rtl821x_priv *priv = phydev->priv;
+	size_t i;
+
+	/* Configure led */
+	for (i = 0; i < MAX_LEDS; ++i) {
+		phy_modify_paged(phydev, RTL8211F_LCR_PAGE, RTL8211F_LCR_REG,
+				RTL8211F_LED_MODE_MASK(i),
+				RTL8211F_LED_MODE_SEL(i, priv->leds_mode[i]));
+	}
+}
+
 static int rtl8211f_config_init(struct phy_device *phydev)
 {
 	struct rtl821x_priv *priv = phydev->priv;
 	struct device *dev = &phydev->mdio.dev;
-	u16 val_txdly, val_rxdly;
 	int ret;
+	u16 val_txdly, val_rxdly;
+
+	rtl8211f_config_led(phydev);
 
 	ret = phy_modify_paged_changed(phydev, 0xa43, RTL8211F_PHYCR1,
 				       RTL8211F_ALDPS_PLL_OFF | RTL8211F_ALDPS_ENABLE | RTL8211F_ALDPS_XTAL_OFF,
@@ -587,6 +679,70 @@
 	return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg);
 }
 
+#ifdef CONFIG_OF_MDIO
+static int rtl8211f_dt_led_modes_get(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	struct device_node *of_node = dev->of_node;
+	struct rtl821x_priv *priv = phydev->priv;
+	int nr, i, ret;
+	char *led_dt_prop = "rtl8211f,led-mode";
+	uint8_t mode[MAX_LEDS << 1];
+
+	if (!of_node)
+		return -ENODEV;
+
+	nr = of_property_read_variable_u8_array(of_node, led_dt_prop, mode, 0,
+			ARRAY_SIZE(mode));
+
+	/* nr should be even */
+	if (nr & 0x1)
+		return -EINVAL;
+
+	ret = -EINVAL;
+	for (i = 0; i < nr; i += 2) {
+		if (mode[i] >= MAX_LEDS)
+			goto out;
+		if ((mode[i + 1] & ~RTL8211F_LED_MODE_MASK(0)) != 0)
+			goto out;
+		priv->leds_mode[mode[i]] = mode[i + 1];
+	}
+
+	ret = 0;
+
+out:
+	return ret;
+}
+
+
+#else
+static int rtl8211f_dt_led_modes_get(struct phy_device *phydev)
+{
+	return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int rtl8211f_probe(struct phy_device *phydev)
+{
+	struct rtl821x_priv *priv;
+	int error;
+	u8 default_mode[MAX_LEDS] = {
+		[0] = RTL8211F_LED_MODE_10M | RTL8211F_LED_MODE_100M |
+			RTL8211F_LED_MODE_1000M | RTL8211F_LED_MODE_ACT,
+		[1] = RTL8211F_LED_MODE_10M | RTL8211F_LED_MODE_100M |
+			RTL8211F_LED_MODE_1000M,
+		[2] = RTL8211F_LED_MODE_1000M | RTL8211F_LED_MODE_ACT,
+	};
+
+	error = rtl821x_probe(phydev);
+	if (error)
+		return error;
+
+	priv = phydev->priv;
+	memcpy(priv->leds_mode, default_mode, sizeof(priv->leds_mode));
+	return rtl8211f_dt_led_modes_get(phydev);
+}
+
 static int rtl8211e_config_init(struct phy_device *phydev)
 {
 	int ret = 0, oldpage;
@@ -1121,6 +1277,7 @@
 	return phydev->phy_id == RTL_8221B && rtlgen_supports_mmd(phydev);
 }
 
+#if 0
 static int rtl8221b_vb_cg_c22_match_phy_device(struct phy_device *phydev)
 {
 	return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, false);
@@ -1130,6 +1287,7 @@
 {
 	return rtlgen_is_c45_match(phydev, RTL_8221B_VB_CG, true);
 }
+#endif
 
 static int rtl8221b_vn_cg_c22_match_phy_device(struct phy_device *phydev)
 {
@@ -1299,6 +1457,321 @@
 	return IRQ_HANDLED;
 }
 
+enum {
+	RTL8221B_SERDES_STATS_RX_GOOD_PACKETS,
+	RTL8221B_SERDES_STATS_RX_BAD_CRC_PACKETS,
+
+	RTL8221B_SERDES_STATS_COUNT,
+};
+
+struct rtl8221b_stat {
+	u64 v;
+};
+
+struct rtl8221b_priv {
+	struct rtl8221b_stat stats[RTL8221B_SERDES_STATS_COUNT];
+};
+
+static int rtl8221b_get_sset_count(struct phy_device *phydev)
+{
+	return RTL8221B_SERDES_STATS_COUNT;
+}
+
+struct rtl8221b_stat_desc {
+	const char *name;
+	int reg;
+	int size;
+};
+
+enum {
+	E_STAT_SIZE_U16,
+	E_STAT_SIZE_U32,
+};
+
+struct rtl8221b_stat_desc rtl8221b_stats[RTL8221B_SERDES_STATS_COUNT] = {
+	{
+		.name = "sgmii_rx_good_frames",
+		.reg = RTL8221B_RX_STATS_GOOD_PKT_LSB,
+		.size = E_STAT_SIZE_U32
+	},
+	{
+		.name = "sgmii_rx_crc_frames",
+		.reg = RTL8221B_RX_STATS_CRC_PKT,
+		.size = E_STAT_SIZE_U16
+	},
+};
+
+static void rtl8221b_get_strings(struct phy_device *phydev, u8 *data)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(rtl8221b_stats); ++i) {
+		strscpy(data + i * ETH_GSTRING_LEN, rtl8221b_stats[i].name,
+			ETH_GSTRING_LEN);
+	}
+}
+
+static void rtl8221b_get_stats(struct phy_device *phydev,
+			       struct ethtool_stats *stats,
+			       u64 *data)
+{
+	struct rtl8221b_priv *priv = phydev->priv;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(rtl8221b_stats); ++i) {
+		struct rtl8221b_stat_desc *st_desc = &rtl8221b_stats[i];
+		struct rtl8221b_stat *st = &priv->stats[i];
+		u32 v;
+
+		switch (st_desc->size) {
+		case E_STAT_SIZE_U16:
+		default:
+			v = phy_read_mmd(phydev, MDIO_MMD_VEND2, st_desc->reg);
+			break;
+		case E_STAT_SIZE_U32:
+			v = phy_read_mmd(phydev, MDIO_MMD_VEND2, st_desc->reg) |
+				(phy_read_mmd(phydev, MDIO_MMD_VEND2,
+					      st_desc->reg + 2) << 16);
+			break;
+		}
+
+		st->v += v;
+		data[i] = st->v;
+	}
+
+	/*
+	 * FIXME: ask for autoclear feature of counters. clear
+	 * counters explicitely in the mean time.
+	 */
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_CLEAR_REG,
+		      RX_STATS_CLEAR_VAL);
+}
+
+static int rtl8221b_probe(struct phy_device *phydev)
+{
+	phydev->priv = devm_kzalloc(&phydev->mdio.dev,
+				    sizeof (struct rtl8221b_priv), GFP_KERNEL);
+	if (!phydev->priv)
+		return -ENOMEM;
+
+	return realtek_hwmon_probe(phydev);
+}
+
+static int rtl8221b_config_init(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+
+	/*
+	 * reset autoneg and PMA/PMD MMD.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_AN, MII_BMCR, BMCR_RESET);
+	phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MII_BMCR, BMCR_RESET);
+
+	/*
+	 * configure serdes side of the phy so that its speed is
+	 * chosen depending on the copper side autonegotiation.
+	 */
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND1, 0x75f3, BIT(0));
+	if (of_property_read_bool(dev->of_node, "realtek,hisgmii-sgmii")) {
+		phy_modify_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_OPT_REG,
+			       RTL8221B_SERDES_OPT_MODE_MASK,
+			       RTL8221B_SERDES_OPT_MODE_HISGMII_SGMII);
+	} else {
+		phy_modify_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_OPT_REG,
+			       RTL8221B_SERDES_OPT_MODE_MASK,
+			       RTL8221B_SERDES_OPT_MODE_2G5_SGMII);
+	}
+
+	/*
+	 * serdes mode change will be considered once copper link goes
+	 * down. make it go down by briefly activating the PCS
+	 * loopback feature.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_FEDCR_REG,
+			 RTL8221B_FEDCR_PCS_LB_EN);
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_FEDCR_REG,
+			   RTL8221B_FEDCR_PCS_LB_EN);
+
+	/*
+	 * led configuration: enable only LED 2, active low.
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			 LCR7_LED_EN_MASK(2));
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			   LCR7_LED_POL_HI_MASK(2));
+	phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR7_REG,
+			   LCR7_LED_EN_MASK(1) | LCR7_LED_EN_MASK(0));
+
+	/*
+	 * led blink frequency: 60 ms.
+	 */
+	phy_modify_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR6_REG,
+		       LCR6_BLINK_FREQ_MASK, LCR6_BLINK_FREQ_60MS);
+
+	/*
+	 * led2 selects all possible speeds
+	 */
+	phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_LCR4_REG,
+			 LCR4_SELECT_10M | LCR4_SELECT_100M | LCR4_SELECT_1G |
+			 LCR4_SELECT_2G5);
+
+	/*
+	 * select SerDes side for RX stats.
+	 */
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_SELECT_REG,
+		      RX_STATS_SELECT_SERDES);
+	phy_write_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_RX_STATS_CLEAR_REG,
+		      RX_STATS_CLEAR_VAL);
+
+	return 0;
+}
+
+static int rtl8221b_get_features(struct phy_device *phydev)
+{
+	int err;
+
+	err = genphy_c45_pma_read_abilities(phydev);
+	if (err)
+		return err;
+
+	phy_set_max_speed(phydev, SPEED_2500);
+
+	return 0;
+}
+
+static int rtl8221b_config_aneg(struct phy_device *phydev)
+{
+	bool changed = false;
+	int err;
+	u16 v;
+
+	if (phydev->autoneg == AUTONEG_DISABLE)
+		return genphy_c45_pma_setup_forced(phydev);
+
+	err = genphy_c45_an_config_aneg(phydev);
+	if (err < 0)
+		return err;
+	changed = (err > 0) || changed;
+
+	if (err > 0)
+		changed = true;
+
+	/*
+	 * 802.3-C45 doesn't provide a standardised way of advertising
+	 * 1000Base-T support, we have to use use vendor registers for
+	 * this.
+	 */
+	v = 0;
+	if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			      phydev->advertising) ||
+	    linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			      phydev->advertising))
+		v |= RTL8221B_GBCR_ADV_1000BaseT;
+
+	err = phy_modify_mmd_changed(phydev, MDIO_MMD_VEND2, RTL8221B_GBCR_REG,
+				     RTL8221B_GBCR_ADV_1000BaseT, v);
+	if (err < 0)
+		return err;
+	changed = (err > 0) || changed;
+
+	return genphy_c45_check_and_restart_aneg(phydev, changed);
+}
+
+static int rtl8221b_read_status(struct phy_device *phydev)
+{
+	int err;
+	int v;
+	bool fdpx;
+
+	if (phydev->autoneg == AUTONEG_ENABLE) {
+		/*
+		 * 802.3-C45 doesn't provide a standardised way of
+		 * getting LPA 1000Base-T support, we have to use use
+		 * vendor registers for this.
+		 */
+		v = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_GANLPAR_REG);
+		if (v < 0)
+			return v;
+	}
+
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+			 phydev->lp_advertising,
+			 v & RTL8221B_GANLPAR_LP_1000BaseTFD);
+	linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
+			 phydev->lp_advertising,
+			 v & RTL8221B_GANLPAR_LP_1000BaseTHD);
+
+
+	err = genphy_c45_read_status(phydev);
+	if (err)
+		return err;
+
+
+	if (!phydev->link || phydev->autoneg == AUTONEG_DISABLE)
+		return 0;
+
+	/*
+	 * sort out phydev interface depending on the current mode of
+	 * the SERDES on the PHY side.
+	 */
+	v = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_CTRL3_REG);
+	v &= RTL8221B_SERDES_CTRL3_MODE_MASK;
+
+	switch (v) {
+	case RTL8221B_SERDES_CTRL3_MODE_SGMII:
+		phydev->interface = PHY_INTERFACE_MODE_SGMII;
+		break;
+	case RTL8221B_SERDES_CTRL3_MODE_HISGMII:
+		phydev->interface = PHY_INTERFACE_MODE_HISGMII;
+		break;
+	case RTL8221B_SERDES_CTRL3_MODE_2G5BX:
+		phydev->interface = PHY_INTERFACE_MODE_2500BASEX;
+		break;
+	default:
+		phydev_warn(phydev, "invalid SERDES mode: %u\n", v);
+		return -EIO;
+	}
+
+	v = phy_read_mmd(phydev, MDIO_MMD_VEND1, RTL8221B_SERDES_CTRL5_REG);
+	fdpx = !!(v & RTL8221B_SERDES_CTRL5_FDPX);
+	v &= RTL8221B_SERDES_CTRL5_SPEED_MASK;
+
+	switch (v) {
+	case RTL8221B_SERDES_CTRL5_SPEED_2G5:
+		phydev->speed = SPEED_2500;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_1000:
+		phydev->speed = SPEED_1000;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_100:
+		phydev->speed = SPEED_100;
+		break;
+	case RTL8221B_SERDES_CTRL5_SPEED_10:
+		phydev->speed = SPEED_10;
+		break;
+	default:
+		phydev_warn(phydev, "invalid device speed: %04x\n", v);
+	}
+
+	/*
+	 * XXX: as recommended by realtek for SerDes RX stats.
+	 */
+	if (phydev->speed == 2500) {
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc40a, 0x0);
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc466, 0x2);
+	} else {
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc40a, 0x0);
+		phy_write_mmd(phydev, MDIO_MMD_VEND2,
+			      RTL8221B_UNKOWN_0xc466, 0x0);
+	}
+
+	phydev->duplex = fdpx ? DUPLEX_FULL : DUPLEX_HALF;
+	return 0;
+}
+
 static struct phy_driver realtek_drvs[] = {
 	{
 		PHY_ID_MATCH_EXACT(0x00008201),
@@ -1372,7 +1845,7 @@
 	}, {
 		PHY_ID_MATCH_EXACT(0x001cc916),
 		.name		= "RTL8211F Gigabit Ethernet",
-		.probe		= rtl821x_probe,
+		.probe		= &rtl8211f_probe,
 		.config_init	= &rtl8211f_config_init,
 		.read_status	= rtlgen_read_status,
 		.config_intr	= &rtl8211f_config_intr,
@@ -1388,7 +1861,7 @@
 	}, {
 		PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID),
 		.name		= "RTL8211F-VD Gigabit Ethernet",
-		.probe		= rtl821x_probe,
+		.probe		= rtl8211f_probe,
 		.config_init	= &rtl8211f_config_init,
 		.read_status	= rtlgen_read_status,
 		.config_intr	= &rtl8211f_config_intr,
@@ -1399,6 +1872,18 @@
 		.write_page	= rtl821x_write_page,
 		.flags		= PHY_ALWAYS_CALL_SUSPEND,
 	}, {
+		PHY_ID_MATCH_EXACT(0x001cc981),
+		.name		= "RTL8214F Quad Gigabit Ethernet Transceiver",
+		.features       = PHY_GBIT_FEATURES,
+#ifdef CONFIG_ARCH_CORTINA_VENUS
+		.probe		= realtek_cortina_probe,
+#endif
+		.handle_interrupt = rtl821x_handle_interrupt,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.read_page	= rtl821x_read_page,
+		.write_page	= rtl821x_write_page,
+	}, {
 		.name		= "Generic FE-GE Realtek PHY",
 		.match_phy_device = rtlgen_match_phy_device,
 		.read_status	= rtlgen_read_status,
@@ -1453,6 +1938,7 @@
 		.read_page      = rtl821x_read_page,
 		.write_page     = rtl821x_write_page,
 	}, {
+#if 0
 		.match_phy_device = rtl8221b_vb_cg_c22_match_phy_device,
 		.name           = "RTL8221B-VB-CG 2.5Gbps PHY (C22)",
 		.get_features   = rtl822x_get_features,
@@ -1475,6 +1961,7 @@
 		.suspend        = genphy_c45_pma_suspend,
 		.resume         = rtlgen_c45_resume,
 	}, {
+#endif
 		.match_phy_device = rtl8221b_vn_cg_c22_match_phy_device,
 		.name           = "RTL8221B-VM-CG 2.5Gbps PHY (C22)",
 		.get_features   = rtl822x_get_features,
@@ -1568,6 +2055,19 @@
 		.resume		= genphy_resume,
 		.read_mmd	= genphy_read_mmd_unsupported,
 		.write_mmd	= genphy_write_mmd_unsupported,
+	}, {
+		PHY_ID_MATCH_EXACT(0x001cc849),
+		.name		= "RTL8221B 2.5 Gigabit Ethernet",
+		.probe		= rtl8221b_probe,
+		.config_init	= rtl8221b_config_init,
+		.suspend	= genphy_suspend,
+		.resume		= genphy_resume,
+		.config_aneg	= rtl8221b_config_aneg,
+		.read_status	= rtl8221b_read_status,
+		.get_features	= rtl8221b_get_features,
+		.get_sset_count	= rtl8221b_get_sset_count,
+		.get_strings	= rtl8221b_get_strings,
+		.get_stats	= rtl8221b_get_stats,
 	},
 };
 
diff -ruw linux-6.13.12/drivers/net/phy/sfp.h linux-6.13.12-fbx/drivers/net/phy/sfp.h
--- linux-6.13.12/drivers/net/phy/sfp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/sfp.h	2025-09-25 17:40:34.067359788 +0200
@@ -26,6 +26,7 @@
 	int (*module_eeprom_by_page)(struct sfp *sfp,
 				     const struct ethtool_module_eeprom *page,
 				     struct netlink_ext_ack *extack);
+	int (*get_sfp_state)(struct sfp *sfp, struct ethtool_sfp_state *st);
 };
 
 int sfp_add_phy(struct sfp_bus *bus, struct phy_device *phydev);
diff -ruw linux-6.13.12/drivers/net/phy/swphy.c linux-6.13.12-fbx/drivers/net/phy/swphy.c
--- linux-6.13.12/drivers/net/phy/swphy.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/swphy.c	2025-09-25 17:40:34.067359788 +0200
@@ -29,6 +29,7 @@
 	SWMII_SPEED_10 = 0,
 	SWMII_SPEED_100,
 	SWMII_SPEED_1000,
+	SWMII_SPEED_2500,
 	SWMII_DUPLEX_HALF = 0,
 	SWMII_DUPLEX_FULL,
 };
@@ -51,6 +52,10 @@
 		.lpagb = LPA_1000FULL | LPA_1000HALF,
 		.estat = ESTATUS_1000_TFULL | ESTATUS_1000_THALF,
 	},
+	[SWMII_SPEED_2500] = {
+		.bmsr  = BMSR_ESTATEN,
+		.lpagb = LPA_1000FULL | LPA_1000HALF,
+	},
 };
 
 static const struct swmii_regs duplex[] = {
@@ -71,6 +76,8 @@
 static int swphy_decode_speed(int speed)
 {
 	switch (speed) {
+	case 2500:
+		return SWMII_SPEED_2500;
 	case 1000:
 		return SWMII_SPEED_1000;
 	case 100:
diff -ruw linux-6.13.12/drivers/net/ppp/ppp_generic.c linux-6.13.12-fbx/drivers/net/ppp/ppp_generic.c
--- linux-6.13.12/drivers/net/ppp/ppp_generic.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ppp/ppp_generic.c	2025-09-25 17:40:34.067359788 +0200
@@ -190,6 +190,7 @@
 	netns_tracker	ns_tracker;
 	struct list_head clist;		/* link in list of channels per unit */
 	rwlock_t	upl;		/* protects `ppp' and 'bridge' */
+	int		stopped;	/* channel is stopped */
 	struct channel __rcu *bridge;	/* "bridged" ppp channel */
 #ifdef CONFIG_PPP_MULTILINK
 	u8		avail;		/* flag used in multilink stuff */
@@ -1672,10 +1673,28 @@
 			ppp_send_frame(ppp, skb);
 		/* If there's no work left to do, tell the core net
 		   code that we can accept some more. */
-		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq))
+		if (!ppp->xmit_pending && !skb_peek(&ppp->file.xq)) {
+			/* only  enable  net  queue  if at  least  one
+			 * channel is not stopped */
+			struct list_head *list;
+			struct channel *pch;
+			bool need_wake;
+
+			list = &ppp->channels;
+			need_wake = false;
+			while ((list = list->next) != &ppp->channels) {
+				pch = list_entry(list, struct channel, clist);
+				if (!pch->stopped) {
+					need_wake = true;
+					break;
+				}
+			}
+
+			if (need_wake)
 			netif_wake_queue(ppp->dev);
 		else
 			netif_stop_queue(ppp->dev);
+		}
 	} else {
 		kfree_skb(skb);
 	}
@@ -3030,10 +3049,24 @@
 
 	if (!pch)
 		return;
+	pch->stopped = 0;
 	ppp_channel_push(pch);
 }
 
 /*
+ * Callback from a channel when it want to prevent further transmit on it
+ */
+void
+ppp_output_stop(struct ppp_channel *chan)
+{
+	struct channel *pch = chan->ppp;
+
+	if (pch == 0)
+		return;
+	pch->stopped = 1;
+}
+
+/*
  * Compression control.
  */
 
@@ -3627,6 +3660,7 @@
 EXPORT_SYMBOL(ppp_input);
 EXPORT_SYMBOL(ppp_input_error);
 EXPORT_SYMBOL(ppp_output_wakeup);
+EXPORT_SYMBOL(ppp_output_stop);
 EXPORT_SYMBOL(ppp_register_compressor);
 EXPORT_SYMBOL(ppp_unregister_compressor);
 MODULE_DESCRIPTION("Generic PPP layer driver");
diff -ruw linux-6.13.12/drivers/net/ppp/pptp.c linux-6.13.12-fbx/drivers/net/ppp/pptp.c
--- linux-6.13.12/drivers/net/ppp/pptp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/ppp/pptp.c	2025-09-25 17:40:34.071359808 +0200
@@ -370,6 +370,7 @@
 	po = lookup_chan(ntohs(header->call_id), iph->saddr);
 	if (po) {
 		skb_dst_drop(skb);
+		skb->mark = 0;
 		nf_reset_ct(skb);
 		return sk_receive_skb(sk_pppox(po), skb, 0);
 	}
diff -ruw linux-6.13.12/drivers/net/tun.c linux-6.13.12-fbx/drivers/net/tun.c
--- linux-6.13.12/drivers/net/tun.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/tun.c	2025-09-25 17:40:34.075359828 +0200
@@ -79,6 +79,10 @@
 #include <net/rose.h>
 #include <net/6lowpan.h>
 #include <net/rps.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
 
 #include <linux/uaccess.h>
 #include <linux/proc_fs.h>
@@ -165,6 +169,31 @@
 	unsigned long updated ____cacheline_aligned_in_smp;
 };
 
+/*
+ * smalltun definitions
+ */
+#define SMALLTUN_MAGIC			0x6660
+#define SMALLTUN_VERSION		0x1
+
+#define TYPE_MASK			0xf
+#define TYPE_CLT			(1 << 3)
+
+#define TYPE_TRIGGER			0x0
+#define TYPE_CHALLENGE			0x1
+#define TYPE_CLIENT_HELLO		0x2
+#define TYPE_SERVER_HELLO		0x3
+
+#define TYPE_CLT_DATA			(TYPE_CLT | 0x0)
+#define TYPE_CLT_GET_PARAMS		(TYPE_CLT | 0x1)
+#define TYPE_CLT_PARAMS			(TYPE_CLT | 0x2)
+
+struct smalltun_pkt_hdr {
+	u16		magic;
+	u8		version;
+	u8		flag_type;
+	u8		data[0];
+};
+
 #define TUN_NUM_FLOW_ENTRIES 1024
 #define TUN_MASK_FLOW_ENTRIES (TUN_NUM_FLOW_ENTRIES - 1)
 
@@ -184,6 +213,11 @@
 	kuid_t			owner;
 	kgid_t			group;
 
+	struct smalltun_fp	smalltun_fps[4];
+	unsigned int		smalltun_valid_count;
+	unsigned int		smalltun_valid[4];
+	struct rtable		*smalltun_rt_cache[4];
+
 	struct net_device	*dev;
 	netdev_features_t	set_features;
 #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
@@ -1031,6 +1065,184 @@
 	return 0;
 }
 
+static int smalltun_is_fastpath(struct tun_struct *tun,
+				struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	const struct smalltun_fp *fp;
+	struct rtable **prt_cache, *rt_cache;
+	struct flowi4 fl;
+	bool match;
+	size_t i;
+
+	if (!tun->smalltun_valid_count)
+		return 0;
+
+	if (skb->protocol != htons(ETH_P_IP))
+		return 0;
+
+	if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+		return 0;
+
+	iph = ip_hdr(skb);
+
+	/* lookup smalltun fastpath */
+	fp = NULL;
+	rt_cache = NULL;
+	for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+		if (!tun->smalltun_valid[i])
+			continue;
+
+		if (iph->daddr == tun->smalltun_fps[i].inner_dst) {
+			fp = &tun->smalltun_fps[i];
+			prt_cache = &tun->smalltun_rt_cache[i];
+			break;
+		}
+	}
+
+	if (!fp)
+		return 0;
+
+	if (fp->af != AF_INET) {
+		/* FIXME: implement IPv6 transport */
+		return 0;
+	}
+
+	if (!pskb_may_pull(skb, iph->ihl * 4))
+		return 0;
+
+	match = false;
+	for (i = 0; i < fp->rule_count; i++) {
+		const struct smalltun_rule *r = &fp->rules[i];
+		unsigned int sport, dport;
+
+		if (iph->protocol != r->proto)
+			continue;
+
+		switch (iph->protocol) {
+		case IPPROTO_UDP:
+		{
+			const struct udphdr *udp;
+			udp = (struct udphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = ntohs(udp->source);
+	                dport = ntohs(udp->dest);
+			break;
+		}
+		case IPPROTO_TCP:
+		{
+			const struct tcphdr *tcp;
+			tcp = (struct tcphdr *)((u8 *)iph + (iph->ihl << 2));
+			sport = ntohs(tcp->source);
+			dport = ntohs(tcp->dest);
+			break;
+		}
+		default:
+			match = true;
+			break;
+		}
+
+		if (match)
+			break;
+
+		if (r->src_port_start && r->src_port_end) {
+			if (sport < ntohs(r->src_port_start) ||
+			    sport > ntohs(r->src_port_end))
+				continue;
+		}
+
+		if (r->dst_port_start && r->dst_port_end) {
+			if (dport < ntohs(r->dst_port_start) ||
+			    dport > ntohs(r->dst_port_end))
+				continue;
+		}
+		match = true;
+	}
+
+	if (!match)
+		return 0;
+
+	if (fp->af == AF_INET) {
+		struct iphdr *oiph;
+		struct udphdr *oudph;
+		struct smalltun_pkt_hdr *pkt;
+		unsigned int payload_len;
+
+		payload_len = skb->len;
+
+		if (skb_cow_head(skb,
+				 sizeof (struct iphdr) +
+				 sizeof (struct udphdr) +
+				 sizeof (struct smalltun_pkt_hdr)))
+			return 0;
+
+		pkt = skb_push(skb, sizeof (struct smalltun_pkt_hdr));
+		oudph = skb_push(skb, sizeof (struct udphdr));
+		skb_reset_transport_header(skb);
+		oiph = skb_push(skb, sizeof (struct iphdr));
+		skb_reset_network_header(skb);
+
+		/* ip */
+		oiph->version = 4;
+		oiph->tos = 0;
+		oiph->id = 0;
+		oiph->ihl = 5;
+		oiph->frag_off = 0;
+		oiph->ttl = 64;
+		oiph->protocol = IPPROTO_UDP;
+		memcpy(&oiph->saddr, fp->outer_src, 4);
+		memcpy(&oiph->daddr, fp->outer_dst, 4);
+
+		/* udp */
+		oudph->source = fp->outer_src_port;
+		oudph->dest = fp->outer_dst_port;
+		oudph->len = htons(payload_len + sizeof (*oudph) +
+				   sizeof (*pkt));
+		oudph->check = 0;
+
+		/* smalltun */
+		pkt->magic = htons(SMALLTUN_MAGIC);
+		pkt->version = SMALLTUN_VERSION;
+		pkt->flag_type = TYPE_CLT_DATA;
+
+		memset(&fl, 0x00, sizeof (fl));
+		memcpy(&fl.saddr, fp->outer_src, 4);
+		memcpy(&fl.daddr, fp->outer_dst, 4);
+
+		if (*prt_cache && (*prt_cache)->dst.obsolete > 0) {
+			rt_cache = *prt_cache;
+			*prt_cache = NULL;
+			ip_rt_put(rt_cache);
+		}
+
+		rt_cache = *prt_cache;
+		if (!rt_cache) {
+			rt_cache = ip_route_output_key(&init_net, &fl);
+			if (IS_ERR(rt_cache)) {
+				pr_err("ip_route_output_key(%pI4): %li\n",
+				       &fl.daddr, PTR_ERR(rt_cache));
+				return 0;
+			}
+
+			if (!rt_cache->dst.dev) {
+				pr_err("ip_route_output_key(%pI4): no dev\n",
+				       &fl.daddr);
+				return 0;
+			}
+
+			*prt_cache = rt_cache;
+		}
+
+		skb_dst_set(skb, dst_clone(&rt_cache->dst));
+		skb->dev = skb_dst(skb)->dev;
+		ip_local_out(&init_net, NULL, skb);
+		return 1;
+	}
+
+	/* find route */
+
+	return 0;
+}
+
 /* Net device start xmit */
 static void tun_automq_xmit(struct tun_struct *tun, struct sk_buff *skb)
 {
@@ -1123,6 +1335,11 @@
 	 */
 	skb_orphan(skb);
 
+	if (smalltun_is_fastpath(tun, skb)) {
+		rcu_read_unlock();
+		return NETDEV_TX_OK;
+	}
+
 	nf_reset_ct(skb);
 
 	if (ptr_ring_produce(&tfile->tx_ring, skb)) {
@@ -3397,6 +3614,104 @@
 		ret = open_related_ns(&net->ns, get_net_ns);
 		break;
 
+	case TUNSMALLTUNSETFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+		int free_idx;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* look for duplicate */
+		ret = 0;
+		free_idx = -1;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (!tun->smalltun_valid[i]) {
+				if (free_idx == -1)
+					free_idx = i;
+				continue;
+			}
+
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = -EEXIST;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		if (free_idx == -1) {
+			ret = -ENOSPC;
+			break;
+		}
+
+		memcpy(&tun->smalltun_fps[free_idx], &fp, sizeof (fp));
+		tun->smalltun_valid[free_idx] = 1;
+		tun->smalltun_valid_count++;
+		netif_info(tun, tx_queued, tun->dev,
+			   "new fp rule for %pI4 <=> %pI4 (%u rules)\n",
+			   &fp.inner_src,
+			   &fp.inner_dst,
+			   fp.rule_count);
+
+		if (fp.af == AF_INET) {
+			netif_info(tun, tx_queued, tun->dev,
+				   "outer %pI4:%u <=> %pI4:%u\n",
+				   fp.outer_src,
+				   ntohs(fp.outer_src_port),
+				   fp.outer_dst,
+				   ntohs(fp.outer_dst_port));
+		} else {
+			netif_info(tun, tx_queued, tun->dev,
+				   "outer %pI6:%u <=> %pI6:%u\n",
+				   fp.outer_src,
+				   ntohs(fp.outer_src_port),
+				   fp.outer_dst,
+				   ntohs(fp.outer_dst_port));
+		}
+		break;
+	}
+
+	case TUNSMALLTUNDELFP:
+	{
+		struct smalltun_fp fp;
+		unsigned int i;
+
+		ret = -EFAULT;
+		if (copy_from_user(&fp, argp, sizeof(fp)))
+			break;
+
+		/* lookup */
+		ret = -ENOENT;
+		for (i = 0; i < ARRAY_SIZE(tun->smalltun_fps); i++) {
+			if (fp.inner_src == tun->smalltun_fps[i].inner_src &&
+			    fp.inner_dst == tun->smalltun_fps[i].inner_dst) {
+				ret = 0;
+				break;
+			}
+		}
+
+		if (ret)
+			break;
+
+		tun->smalltun_valid[i] = 0;
+		tun->smalltun_valid_count--;
+		if (tun->smalltun_rt_cache[i]) {
+			ip_rt_put(tun->smalltun_rt_cache[i]);
+			tun->smalltun_rt_cache[i] = NULL;
+		}
+
+		netif_info(tun, tx_queued, tun->dev,
+			   "removed fp rule for %pI4 <=> %pI4\n",
+			   &fp.inner_src,
+			   &fp.inner_dst);
+		break;
+	}
+
 	default:
 		ret = -EINVAL;
 		break;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/Kconfig linux-6.13.12-fbx/drivers/net/wireless/ath/Kconfig
--- linux-6.13.12/drivers/net/wireless/ath/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/Kconfig	2025-09-25 17:40:34.099359947 +0200
@@ -37,6 +37,9 @@
 	  This option enables tracepoints for atheros wireless drivers.
 	  Currently, ath9k makes use of this facility.
 
+config ATH_REG_IGNORE
+	bool "ignore all eeprom regulation"
+
 config ATH_REG_DYNAMIC_USER_REG_HINTS
 	bool "Atheros dynamic user regulatory hints"
 	depends on CFG80211_CERTIFICATION_ONUS
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath.h
--- linux-6.13.12/drivers/net/wireless/ath/ath.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath.h	2025-09-25 17:40:34.099359947 +0200
@@ -47,6 +47,7 @@
 	u32 rx_busy;
 	u32 rx_frame;
 	u32 tx_frame;
+	u32 rx_bss_frame;
 };
 
 enum ath_device_state {
@@ -188,6 +189,8 @@
 
 	int last_rssi;
 	struct ieee80211_supported_band sbands[NUM_NL80211_BANDS];
+
+	int dfs_pulse_valid_diff_ts;
 };
 
 static inline const struct ath_ps_ops *ath_ps_ops(struct ath_common *common)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/core.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/core.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/core.c	2025-09-25 17:40:34.099359947 +0200
@@ -32,6 +32,8 @@
 unsigned int ath10k_debug_mask;
 EXPORT_SYMBOL(ath10k_debug_mask);
 
+bool ath10k_offload_use_drv_tx;
+
 static unsigned int ath10k_cryptmode_param;
 static bool uart_print;
 static bool skip_otp;
@@ -43,6 +45,8 @@
 unsigned long ath10k_coredump_mask = BIT(ATH10K_FW_CRASH_DUMP_REGISTERS) |
 				     BIT(ATH10K_FW_CRASH_DUMP_CE_DATA);
 
+bool ath10k_no_fetch = 0;
+
 /* FIXME: most of these should be readonly */
 module_param_named(debug_mask, ath10k_debug_mask, uint, 0644);
 module_param_named(cryptmode, ath10k_cryptmode_param, uint, 0644);
@@ -51,6 +55,8 @@
 module_param(fw_diag_log, bool, 0644);
 module_param_named(frame_mode, ath10k_frame_mode, uint, 0644);
 module_param_named(coredump_mask, ath10k_coredump_mask, ulong, 0444);
+module_param_named(no_fetch, ath10k_no_fetch, bool, 0644);
+module_param_named(offload_use_drv_tx, ath10k_offload_use_drv_tx, bool, 0644);
 
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 MODULE_PARM_DESC(uart_print, "Uart target debugging");
@@ -60,6 +66,8 @@
 		 "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
 MODULE_PARM_DESC(coredump_mask, "Bitfield of what to include in firmware crash file");
 MODULE_PARM_DESC(fw_diag_log, "Diag based fw log debugging");
+MODULE_PARM_DESC(no_fetch, "Disable pull mode (push only mode)");
+MODULE_PARM_DESC(offload_use_drv_tx, "allow using drv_tx datapath for offload, no AQL");
 
 static const struct ath10k_hw_params ath10k_hw_params_list[] = {
 	{
@@ -85,8 +93,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -97,6 +104,7 @@
 		.hw_filter_reset_required = true,
 		.fw_diag_ce_download = false,
 		.credit_size_workaround = false,
+		.uart_pin_workaround = true,
 		.tx_stats_over_pktlog = true,
 		.dynamic_sar_support = false,
 		.hw_restart_disconnect = false,
@@ -126,8 +134,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -168,8 +175,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -246,8 +252,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -287,8 +292,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -328,8 +332,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -372,8 +375,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -420,8 +422,7 @@
 		.decap_align_bytes = 1,
 		.spectral_bin_discard = 4,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -473,8 +474,9 @@
 		/* Can do only 2x2 VHT160 or 80+80. 1560Mbps is 4x4 80Mhz
 		 * or 2x2 160Mhz, long-guard-interval.
 		 */
-		.vht160_mcs_rx_highest = 1560,
-		.vht160_mcs_tx_highest = 1560,
+		.vht_need_ext_nss = true,
+		.vht_over_supp_chan_width = 0,
+		.vht_over_ext_nss_bw = 2,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -524,8 +526,9 @@
 		/* Can do only 1x1 VHT160 or 80+80. 780Mbps is 2x2 80Mhz or
 		 * 1x1 160Mhz, long-guard-interval.
 		 */
-		.vht160_mcs_rx_highest = 780,
-		.vht160_mcs_tx_highest = 780,
+		.vht_need_ext_nss = true,
+		.vht_over_supp_chan_width = 0,
+		.vht_over_ext_nss_bw = 2,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -565,8 +568,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -608,8 +610,7 @@
 		.decap_align_bytes = 4,
 		.spectral_bin_discard = 0,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 8,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -690,8 +691,7 @@
 		.decap_align_bytes = 1,
 		.spectral_bin_discard = 4,
 		.spectral_bin_offset = 0,
-		.vht160_mcs_rx_highest = 0,
-		.vht160_mcs_tx_highest = 0,
+		.vht_need_ext_nss = false,
 		.n_cipher_suites = 11,
 		.ast_skid_limit = 0x10,
 		.num_wds_entries = 0x20,
@@ -1270,6 +1270,7 @@
 static int ath10k_fetch_cal_file(struct ath10k *ar)
 {
 	char filename[100];
+	unsigned int i;
 
 	/* pre-cal-<bus>-<id>.bin */
 	scnprintf(filename, sizeof(filename), "pre-cal-%s-%s.bin",
@@ -1283,6 +1284,11 @@
 	scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
 		  ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
 
+	for (i = 0; filename[i]; i++) {
+		if (filename[i] == ':')
+			filename[i] = '_';
+	}
+
 	ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
 	if (IS_ERR(ar->cal_file))
 		/* calibration file is optional, don't print any warnings */
@@ -1484,6 +1490,7 @@
 					      const char *boardname,
 					      const char *fallback_boardname1,
 					      const char *fallback_boardname2,
+					      const char *pci_boardname,
 					      const char *filename)
 {
 	size_t len, magic_len;
@@ -1528,7 +1535,11 @@
 	data += magic_len;
 	len -= magic_len;
 
-	/* attempt to find boardname in the IE list */
+	/* attempt to find pci_boardname in the IE list */
+	ret = ath10k_core_search_bd(ar, pci_boardname, data, len);
+
+	/* if we didn't find it try board name that */
+	if (ret == -ENOENT)
 	ret = ath10k_core_search_bd(ar, boardname, data, len);
 
 	/* if we didn't find it and have a fallback name, try that */
@@ -1540,8 +1551,8 @@
 
 	if (ret == -ENOENT) {
 		ath10k_err(ar,
-			   "failed to fetch board data for %s from %s/%s\n",
-			   boardname, ar->hw_params.fw.dir, filename);
+			   "failed to fetch board data for %s or %s from %s/%s\n",
+			   boardname, pci_boardname, ar->hw_params.fw.dir, filename);
 		ret = -ENODATA;
 	}
 
@@ -1557,7 +1568,8 @@
 
 static int ath10k_core_create_board_name(struct ath10k *ar, char *name,
 					 size_t name_len, bool with_variant,
-					 bool with_chip_id)
+					 bool with_chip_id,
+					 bool force_pci_id)
 {
 	/* strlen(',variant=') + strlen(ar->id.bdf_ext) */
 	char variant[9 + ATH10K_SMBIOS_BDF_EXT_STR_LENGTH] = { 0 };
@@ -1566,7 +1578,7 @@
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ar->id.bdf_ext);
 
-	if (ar->id.bmi_ids_valid) {
+	if (ar->id.bmi_ids_valid && !force_pci_id) {
 		scnprintf(name, name_len,
 			  "bus=%s,bmi-chip-id=%d,bmi-board-id=%d%s",
 			  ath10k_bus_str(ar->hif.bus),
@@ -1575,7 +1587,7 @@
 		goto out;
 	}
 
-	if (ar->id.qmi_ids_valid) {
+	if (ar->id.qmi_ids_valid && !force_pci_id) {
 		if (with_chip_id)
 			scnprintf(name, name_len,
 				  "bus=%s,qmi-board-id=%x,qmi-chip-id=%x%s",
@@ -1620,14 +1632,15 @@
 
 int ath10k_core_fetch_board_file(struct ath10k *ar, int bd_ie_type)
 {
-	char boardname[100], fallback_boardname1[100], fallback_boardname2[100];
+	char boardname[100], fallback_boardname1[100], fallback_boardname2[100],
+		pci_boardname[100];
 	int ret;
 
 	if (bd_ie_type == ATH10K_BD_IE_BOARD) {
 		/* With variant and chip id */
 		ret = ath10k_core_create_board_name(ar, boardname,
 						    sizeof(boardname), true,
-						    true);
+						    true, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create board name: %d", ret);
 			return ret;
@@ -1636,7 +1649,7 @@
 		/* Without variant and only chip-id */
 		ret = ath10k_core_create_board_name(ar, fallback_boardname1,
 						    sizeof(boardname), false,
-						    true);
+						    true, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create 1st fallback board name: %d",
 				   ret);
@@ -1646,12 +1659,20 @@
 		/* Without variant and without chip-id */
 		ret = ath10k_core_create_board_name(ar, fallback_boardname2,
 						    sizeof(boardname), false,
-						    false);
+						    false, false);
 		if (ret) {
 			ath10k_err(ar, "failed to create 2nd fallback board name: %d",
 				   ret);
 			return ret;
 		}
+
+		ret = ath10k_core_create_board_name(ar, pci_boardname,
+						    sizeof(pci_boardname),
+						    false, false, true);
+		if (ret) {
+			ath10k_err(ar, "failed to create pci board name: %d", ret);
+			return ret;
+		}
 	} else if (bd_ie_type == ATH10K_BD_IE_BOARD_EXT) {
 		ret = ath10k_core_create_eboard_name(ar, boardname,
 						     sizeof(boardname));
@@ -1665,6 +1686,7 @@
 	ret = ath10k_core_fetch_board_data_api_n(ar, boardname,
 						 fallback_boardname1,
 						 fallback_boardname2,
+						 pci_boardname,
 						 ATH10K_BOARD_API2_FILE);
 	if (!ret)
 		goto success;
@@ -2558,6 +2580,7 @@
 
 	switch (ar->state) {
 	case ATH10K_STATE_ON:
+	case ATH10K_STATE_PRE_ON:
 		ar->state = ATH10K_STATE_RESTARTING;
 		ath10k_halt(ar);
 		ath10k_scan_finish(ar);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/core.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/core.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/core.h	2025-09-29 14:23:07.585732311 +0200
@@ -37,7 +37,7 @@
 #define ATH10K_SCAN_ID 0
 #define ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD 10 /* msec */
 #define WMI_READY_TIMEOUT (5 * HZ)
-#define ATH10K_FLUSH_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_FLUSH_TIMEOUT_HZ (HZ / 2)
 #define ATH10K_CONNECTION_LOSS_HZ (3 * HZ)
 #define ATH10K_NUM_CHANS 41
 #define ATH10K_MAX_5G_CHAN 173
@@ -558,6 +558,8 @@
 	u8 rate_ctrl[ATH10K_TID_MAX];
 	u32 rate_code[ATH10K_TID_MAX];
 	int rtscts[ATH10K_TID_MAX];
+	wait_queue_head_t empty_tx_wq;
+	atomic_t num_fw_queued;
 };
 
 #define ATH10K_VDEV_SETUP_TIMEOUT_HZ	(5 * HZ)
@@ -700,10 +702,12 @@
 	void *cal_data;
 	u32 enable_extd_tx_stats;
 	u8 fw_dbglog_mode;
+	u32 burst_dur[4];
 };
 
 enum ath10k_state {
 	ATH10K_STATE_OFF = 0,
+	ATH10K_STATE_PRE_ON,
 	ATH10K_STATE_ON,
 
 	/* When doing firmware recovery the device is first powered down.
@@ -1020,6 +1024,7 @@
 		void *vaddr;
 	} msa;
 	u8 mac_addr[ETH_ALEN];
+	const char *fem_name;
 
 	enum ath10k_hw_rev hw_rev;
 	u16 dev_id;
@@ -1210,6 +1215,8 @@
 	struct work_struct restart_work;
 	struct work_struct bundle_tx_work;
 	struct work_struct tx_complete_work;
+	struct work_struct powerup_work;
+	bool powerup_pending;
 
 	/* cycle count is reported twice for each visited channel during scan.
 	 * access protected by data_lock
@@ -1330,6 +1337,8 @@
 
 extern unsigned int ath10k_frame_mode;
 extern unsigned long ath10k_coredump_mask;
+extern bool ath10k_no_fetch;
+extern bool ath10k_offload_use_drv_tx;
 
 void ath10k_core_napi_sync_disable(struct ath10k *ar);
 void ath10k_core_napi_enable(struct ath10k *ar);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/debug.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/debug.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/debug.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/debug.c	2025-09-25 17:40:34.103359967 +0200
@@ -2495,6 +2495,79 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t ath10k_write_burst_dur(struct file *file, const char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+
+        struct ath10k *ar = file->private_data;
+        u32 dur[4];
+        int ret;
+	int ac;
+	char buf[128];
+
+	simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
+
+	/* make sure that buf is null terminated */
+	buf[sizeof(buf) - 1] = 0;
+
+	ret = sscanf(buf, "%u %u %u %u", &dur[0], &dur[1], &dur[2], &dur[3]);
+
+	if (!ret)
+		return -EINVAL;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_ON &&
+	    ar->state != ATH10K_STATE_RESTARTED) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	for (ac = 0; ac < 4; ac++) {
+		if (dur[ac] < MIN_BURST_DUR || dur[ac] > MAX_BURST_DUR) {
+			ret = -EINVAL;
+			goto exit;
+		}
+
+		ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->aggr_burst,
+						(SM(ac, ATH10K_AGGR_BURST_AC) |
+						SM(dur[ac], ATH10K_AGGR_BURST_DUR)));
+		if (ret) {
+			ath10k_warn(ar, "failed to set aggr burst duration for ac %d: %d\n", ac, ret);
+			goto exit;
+		}
+		ar->debug.burst_dur[ac] = dur[ac];
+	}
+
+        ret = count;
+
+exit:
+        mutex_unlock(&ar->conf_mutex);
+        return ret;
+}
+
+static ssize_t ath10k_read_burst_dur(struct file *file, char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	struct ath10k *ar = file->private_data;
+	int len = 0;
+	char buf[128];
+
+	len = scnprintf(buf, sizeof(buf) - len, "%u %u %u %u\n",
+			ar->debug.burst_dur[0], ar->debug.burst_dur[1],
+			ar->debug.burst_dur[2], ar->debug.burst_dur[3]);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_burst_dur = {
+        .read = ath10k_read_burst_dur,
+        .write = ath10k_write_burst_dur,
+        .open = simple_open,
+        .owner = THIS_MODULE,
+        .llseek = default_llseek,
+};
+
 int ath10k_debug_create(struct ath10k *ar)
 {
 	ar->debug.cal_data = vzalloc(ATH10K_DEBUG_CAL_DATA_LEN);
@@ -2582,6 +2655,9 @@
 	debugfs_create_file("ani_enable", 0600, ar->debug.debugfs_phy, ar,
 			    &fops_ani_enable);
 
+	debugfs_create_file("burst_dur", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_phy, ar, &fops_burst_dur);
+
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_phy,
 				    ar, &fops_simulate_radar);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/debug.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/debug.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/debug.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/debug.h	2025-09-25 17:40:34.103359967 +0200
@@ -81,6 +81,15 @@
 __printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
 __printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
 
+#define ATH10K_AGGR_BURST_AC_MASK  0xff000000
+#define ATH10K_AGGR_BURST_AC_LSB   24
+#define ATH10K_AGGR_BURST_DUR_MASK 0x00ffffff
+#define ATH10K_AGGR_BURST_DUR_LSB  0
+
+/* burst duration in usec */
+#define MIN_BURST_DUR 0
+#define MAX_BURST_DUR 8000
+
 void ath10k_debug_print_hwfw_info(struct ath10k *ar);
 void ath10k_debug_print_board_info(struct ath10k *ar);
 void ath10k_debug_print_boot_info(struct ath10k *ar);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/htt.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/htt.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/htt.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/htt.h	2025-09-25 17:40:34.103359967 +0200
@@ -1870,6 +1870,7 @@
 	spinlock_t tx_lock;
 	int max_num_pending_tx;
 	int num_pending_tx;
+	int num_pending_per_queue[IEEE80211_MAX_QUEUES];
 	int num_pending_mgmt_tx;
 	struct idr pending_tx;
 	wait_queue_head_t empty_tx_wq;
@@ -2447,11 +2448,17 @@
 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw,
 			      struct ieee80211_txq *txq);
 void ath10k_htt_tx_txq_sync(struct ath10k *ar);
-void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
-int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt);
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+			       struct ieee80211_txq *txq);
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+			      struct ieee80211_txq *txq);
 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt);
 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt,
 				   bool is_presp);
+void ath10k_htt_tx_sta_inc_pending(struct ath10k_htt *htt,
+				   struct ieee80211_sta *sta);
+void ath10k_htt_tx_sta_dec_pending(struct ath10k_htt *htt,
+				   struct ieee80211_sta *sta);
 
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/htt_tx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/htt_tx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/htt_tx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/htt_tx.c	2025-09-25 17:40:34.103359967 +0200
@@ -140,19 +140,26 @@
 	spin_unlock_bh(&ar->htt.tx_lock);
 }
 
-void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
+void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt,
+			       struct ieee80211_txq *txq)
 {
+	int qnr = -1;
+
 	lockdep_assert_held(&htt->tx_lock);
 
 	htt->num_pending_tx--;
 	if (htt->num_pending_tx == htt->max_num_pending_tx - 1)
 		ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
-	if (htt->num_pending_tx == 0)
+	if (txq)
+		qnr = --htt->num_pending_per_queue[txq->vif->hw_queue[txq->ac]];
+
+	if (htt->num_pending_tx == 0 || qnr == 0)
 		wake_up(&htt->empty_tx_wq);
 }
 
-int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
+int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt,
+			      struct ieee80211_txq *txq)
 {
 	lockdep_assert_held(&htt->tx_lock);
 
@@ -163,6 +170,11 @@
 	if (htt->num_pending_tx == htt->max_num_pending_tx)
 		ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL);
 
+	if (!txq)
+		return 0;
+
+	htt->num_pending_per_queue[txq->vif->hw_queue[txq->ac]]++;
+
 	return 0;
 }
 
@@ -195,6 +207,37 @@
 	htt->num_pending_mgmt_tx--;
 }
 
+void ath10k_htt_tx_sta_inc_pending(struct ath10k_htt *htt,
+				   struct ieee80211_sta *sta)
+{
+	struct ath10k_sta *arsta;
+
+	if (!sta)
+		return;
+
+	arsta = (struct ath10k_sta *)sta->drv_priv;
+
+	atomic_inc(&arsta->num_fw_queued);
+}
+
+void ath10k_htt_tx_sta_dec_pending(struct ath10k_htt *htt,
+				   struct ieee80211_sta *sta)
+{
+	struct ath10k_sta *arsta;
+	int v;
+
+	if (!sta)
+		return;
+
+	arsta = (struct ath10k_sta *)sta->drv_priv;
+
+	v = atomic_dec_if_positive(&arsta->num_fw_queued);
+	if (v < 0)
+		WARN_ON_ONCE(1);
+	if (v == 0)
+		wake_up(&arsta->empty_tx_wq);
+}
+
 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
 {
 	struct ath10k *ar = htt->ar;
@@ -1403,6 +1446,7 @@
 	u32 txbuf_paddr;
 	struct htt_msdu_ext_desc *ext_desc = NULL;
 	struct htt_msdu_ext_desc *ext_desc_t = NULL;
+	int map_len;
 
 	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
 	if (res < 0)
@@ -1432,7 +1476,13 @@
 		}
 	}
 
-	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
+#if defined(CONFIG_IP_FFN)
+	map_len = skb_ffn_get_dirty_len(msdu);
+#else
+	map_len = msdu->len;
+#endif
+
+	skb_cb->paddr = dma_map_single(dev, msdu->data, map_len,
 				       DMA_TO_DEVICE);
 	res = dma_mapping_error(dev, skb_cb->paddr);
 	if (res) {
@@ -1440,6 +1490,12 @@
 		goto err_free_msdu_id;
 	}
 
+#if defined(CONFIG_IP_FFN)
+	/* mark it so page pool recycler will remember this */
+	if (msdu->ffn_ff_done)
+		msdu->ffn_ff_done |= BIT(1);
+#endif
+
 	if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN))
 		freq = ar->scan.roc_freq;
 
@@ -1576,7 +1632,7 @@
 	return 0;
 
 err_unmap_msdu:
-	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
+	dma_unmap_single(dev, skb_cb->paddr, map_len, DMA_TO_DEVICE);
 err_free_msdu_id:
 	spin_lock_bh(&htt->tx_lock);
 	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/hw.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/hw.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/hw.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/hw.h	2025-09-29 14:23:07.589732331 +0200
@@ -571,11 +571,14 @@
 	/* Number of bytes to be discarded for each FFT sample */
 	int spectral_bin_discard;
 
-	/* The board may have a restricted NSS for 160 or 80+80 vs what it
-	 * can do for 80Mhz.
+	/* The board may have a restricted NSS for 160 or 80+80 vs
+	 * what it can do for 80Mhz. To handle this, we have to use
+	 * Extended NSS support and overrides VHT capabilities from
+	 * firmware
 	 */
-	int vht160_mcs_rx_highest;
-	int vht160_mcs_tx_highest;
+	bool vht_need_ext_nss;
+	u32 vht_over_supp_chan_width;
+	u32 vht_over_ext_nss_bw;
 
 	/* Number of ciphers supported (i.e First N) in cipher_suites array */
 	int n_cipher_suites;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/mac.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/mac.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/mac.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/mac.c	2025-09-29 14:23:07.589732331 +0200
@@ -3,7 +3,7 @@
  * Copyright (c) 2005-2011 Atheros Communications Inc.
  * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
  * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "mac.h"
@@ -14,6 +14,8 @@
 #include <linux/acpi.h>
 #include <linux/of.h>
 #include <linux/bitfield.h>
+#include <linux/crc32.h>
+#include <linux/firmware.h>
 
 #include "hif.h"
 #include "core.h"
@@ -216,7 +218,9 @@
 	int ret;
 
 	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
-		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
+		platform_type = (ath10k_no_fetch) ?
+			WMI_HOST_PLATFORM_LOW_PERF_NO_FETCH :
+			WMI_HOST_PLATFORM_LOW_PERF;
 	else
 		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
 
@@ -2519,30 +2523,6 @@
 	return tx_mcs_set;
 }
 
-static u32 get_160mhz_nss_from_maxrate(int rate)
-{
-	u32 nss;
-
-	switch (rate) {
-	case 780:
-		nss = 1;
-		break;
-	case 1560:
-		nss = 2;
-		break;
-	case 2106:
-		nss = 3; /* not support MCS9 from spec*/
-		break;
-	case 3120:
-		nss = 4;
-		break;
-	default:
-		 nss = 1;
-	}
-
-	return nss;
-}
-
 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
 				    struct ieee80211_vif *vif,
 				    struct ieee80211_sta *sta,
@@ -2550,13 +2530,16 @@
 {
 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
-	struct ath10k_hw_params *hw = &ar->hw_params;
+	enum ieee80211_vht_chanwidth bw;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u16 *vht_mcs_mask;
 	u8 ampdu_factor;
-	u8 max_nss, vht_mcs;
-	int i;
+	u8 rx_nss;
+	struct ieee80211_vht_cap ieee_vht_cap = {
+		.vht_cap_info = cpu_to_le32(vht_cap->cap),
+		.supp_mcs = vht_cap->vht_mcs,
+	};
 
 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
 		return;
@@ -2599,15 +2582,20 @@
 	/* Calculate peer NSS capability from VHT capabilities if STA
 	 * supports VHT.
 	 */
-	for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
-		vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
-			  (2 * i) & 3;
-
-		if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
-		    vht_mcs_mask[i])
-			max_nss = i + 1;
+	switch (arg->peer_phymode) {
+	case MODE_11AC_VHT160:
+		bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+		break;
+	case MODE_11AC_VHT80_80:
+		bw = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+		break;
+	default:
+		bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+		break;
 	}
-	arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
+
+	rx_nss = ieee80211_get_vht_max_nss(&ieee_vht_cap, bw, 0, true, 0);
+	arg->peer_num_spatial_streams = rx_nss;
 	arg->peer_vht_rates.rx_max_rate =
 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
 	arg->peer_vht_rates.rx_mcs_set =
@@ -2622,20 +2610,6 @@
 	 */
 	if (arg->peer_phymode == MODE_11AC_VHT160 ||
 	    arg->peer_phymode == MODE_11AC_VHT80_80) {
-		u32 rx_nss;
-		u32 max_rate;
-
-		max_rate = arg->peer_vht_rates.rx_max_rate;
-		rx_nss = get_160mhz_nss_from_maxrate(max_rate);
-
-		if (rx_nss == 0)
-			rx_nss = arg->peer_num_spatial_streams;
-		else
-			rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
-
-		max_rate = hw->vht160_mcs_tx_highest;
-		rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
-
 		arg->peer_bw_rxnss_override =
 			FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
 			FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
@@ -4385,7 +4359,7 @@
 	u16 airtime;
 
 	spin_lock_bh(&ar->htt.tx_lock);
-	ret = ath10k_htt_tx_inc_pending(htt);
+	ret = ath10k_htt_tx_inc_pending(htt, txq);
 	spin_unlock_bh(&ar->htt.tx_lock);
 
 	if (ret)
@@ -4394,7 +4368,7 @@
 	skb = ieee80211_tx_dequeue_ni(hw, txq);
 	if (!skb) {
 		spin_lock_bh(&ar->htt.tx_lock);
-		ath10k_htt_tx_dec_pending(htt);
+		ath10k_htt_tx_dec_pending(htt, txq);
 		spin_unlock_bh(&ar->htt.tx_lock);
 
 		return -ENOENT;
@@ -4416,22 +4390,25 @@
 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
 
 		if (ret) {
-			ath10k_htt_tx_dec_pending(htt);
+			ath10k_htt_tx_dec_pending(htt, txq);
 			spin_unlock_bh(&ar->htt.tx_lock);
 			return ret;
 		}
 		spin_unlock_bh(&ar->htt.tx_lock);
 	}
 
+	ath10k_htt_tx_sta_inc_pending(&ar->htt, sta);
+
 	ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
 	if (unlikely(ret)) {
 		ath10k_warn(ar, "failed to push frame: %d\n", ret);
 
 		spin_lock_bh(&ar->htt.tx_lock);
-		ath10k_htt_tx_dec_pending(htt);
+		ath10k_htt_tx_dec_pending(htt, txq);
 		if (is_mgmt)
 			ath10k_htt_tx_mgmt_dec_pending(htt);
 		spin_unlock_bh(&ar->htt.tx_lock);
+		ath10k_htt_tx_sta_dec_pending(&ar->htt, sta);
 
 		return ret;
 	}
@@ -4690,7 +4667,7 @@
 			is_presp = ieee80211_is_probe_resp(hdr->frame_control);
 		}
 
-		ret = ath10k_htt_tx_inc_pending(htt);
+		ret = ath10k_htt_tx_inc_pending(htt, txq);
 		if (ret) {
 			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
 				    ret);
@@ -4703,7 +4680,7 @@
 		if (ret) {
 			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
 				   ret);
-			ath10k_htt_tx_dec_pending(htt);
+			ath10k_htt_tx_dec_pending(htt, txq);
 			spin_unlock_bh(&ar->htt.tx_lock);
 			ieee80211_free_txskb(ar->hw, skb);
 			return;
@@ -4716,7 +4693,7 @@
 		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
 		if (is_htt) {
 			spin_lock_bh(&ar->htt.tx_lock);
-			ath10k_htt_tx_dec_pending(htt);
+			ath10k_htt_tx_dec_pending(htt, txq);
 			if (is_mgmt)
 				ath10k_htt_tx_mgmt_dec_pending(htt);
 			spin_unlock_bh(&ar->htt.tx_lock);
@@ -4904,14 +4881,18 @@
 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
 
 	/* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
-	 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz.  Give
-	 * user-space a clue if that is the case.
+	 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz.
 	 */
 	if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
-	    (hw->vht160_mcs_rx_highest != 0 ||
-	     hw->vht160_mcs_tx_highest != 0)) {
-		vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
-		vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
+	    hw->vht_need_ext_nss) {
+		vht_cap.cap &= ~(IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
+				 IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
+		vht_cap.cap |= hw->vht_over_supp_chan_width <<
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_SHIFT;
+		vht_cap.cap |= hw->vht_over_ext_nss_bw <<
+			IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT;
+		vht_cap.vht_mcs.tx_highest |=
+			cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 	}
 
 	return vht_cap;
@@ -5064,13 +5045,18 @@
 	const char *fem_name;
 	int ret;
 
+	if (ar->fem_name)
+		fem_name = ar->fem_name;
+	else {
 	node = ar->dev->of_node;
 	if (!node)
 		return -ENOENT;
 
-	ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
+		ret = of_property_read_string_index(node, "ext-fem-name",
+						    0, &fem_name);
 	if (ret)
 		return -ENOENT;
+	}
 
 	/*
 	 * If external Front End module used in hardware, then default base band timing
@@ -5146,12 +5132,83 @@
 	return 0;
 }
 
+static int ath10k_get_powered(struct ieee80211_hw *hw, bool *up, bool *busy)
+{
+	struct ath10k *ar = hw->priv;
+	*up = (ar->state == ATH10K_STATE_ON ||
+	       ar->state == ATH10K_STATE_PRE_ON);
+	*busy = ar->powerup_pending;
+	return 0;
+}
+
+static int ath10k_set_powered(struct ieee80211_hw *hw)
+{
+	struct ath10k *ar = hw->priv;
+
+	switch (ar->state) {
+	case ATH10K_STATE_OFF:
+	case ATH10K_STATE_PRE_ON:
+		break;
+	default:
+		return 0;
+	}
+
+	if (ar->powerup_pending)
+		return 0;
+
+	queue_work(ar->workqueue, &ar->powerup_work);
+	ar->powerup_pending = true;
+	return 0;
+}
+
+static void ath10k_powerup_work(struct work_struct *work)
+{
+	struct ath10k *ar = container_of(work, struct ath10k, powerup_work);
+	int ret;
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state != ATH10K_STATE_OFF) {
+		mutex_unlock(&ar->conf_mutex);
+		return;
+	}
+
+	ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
+	if (ret) {
+		ath10k_err(ar, "Could not init hif: %d\n", ret);
+		goto err_off;
+	}
+
+	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
+				&ar->normal_mode_fw);
+	if (ret) {
+		ath10k_err(ar, "Could not init core: %d\n", ret);
+		goto err_power_down;
+	}
+
+	ar->state = ATH10K_STATE_PRE_ON;
+	ar->powerup_pending = false;
+	mutex_unlock(&ar->conf_mutex);
+	return;
+
+err_power_down:
+	ath10k_hif_power_down(ar);
+
+err_off:
+	ar->state = ATH10K_STATE_OFF;
+
+	ar->powerup_pending = false;
+	mutex_unlock(&ar->conf_mutex);
+	return;
+}
+
 static int ath10k_start(struct ieee80211_hw *hw)
 {
 	struct ath10k *ar = hw->priv;
 	u32 param;
 	int ret = 0;
 	struct wmi_bb_timing_cfg_arg bb_timing = {0};
+	bool skip_core_start = false;
 
 	/*
 	 * This makes sense only when restarting hw. It is harmless to call
@@ -5166,6 +5223,10 @@
 	case ATH10K_STATE_OFF:
 		ar->state = ATH10K_STATE_ON;
 		break;
+	case ATH10K_STATE_PRE_ON:
+		skip_core_start = true;
+		ar->state = ATH10K_STATE_ON;
+		break;
 	case ATH10K_STATE_RESTARTING:
 		ar->state = ATH10K_STATE_RESTARTED;
 		break;
@@ -5190,6 +5251,7 @@
 
 	spin_unlock_bh(&ar->data_lock);
 
+	if (!skip_core_start) {
 	ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
 	if (ret) {
 		ath10k_err(ar, "Could not init hif: %d\n", ret);
@@ -5202,6 +5264,7 @@
 		ath10k_err(ar, "Could not init core: %d\n", ret);
 		goto err_power_down;
 	}
+	}
 
 	if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
 		ret = ath10k_mac_rfkill_config(ar);
@@ -5370,6 +5433,9 @@
 
 	ath10k_drain_tx(ar);
 
+	cancel_work_sync(&ar->powerup_work);
+	ar->powerup_pending = false;
+
 	mutex_lock(&ar->conf_mutex);
 	if (ar->state != ATH10K_STATE_OFF) {
 		if (!ar->hw_rfkill_on) {
@@ -7481,7 +7547,7 @@
 		arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
 		INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
-
+		init_waitqueue_head(&arsta->empty_tx_wq);
 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
 			ath10k_mac_txq_init(sta->txq[i]);
 	}
@@ -8018,7 +8084,7 @@
  * in ath10k, but device-specific in mac80211.
  */
 
-static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u8 radio_id, u32 value)
 {
 	struct ath10k *ar = hw->priv;
 	struct ath10k_vif *arvif;
@@ -8056,10 +8122,12 @@
 	return -EOPNOTSUPP;
 }
 
-void ath10k_mac_wait_tx_complete(struct ath10k *ar)
+static void _ath10k_mac_wait_tx_complete(struct ath10k *ar,
+					 unsigned long queues)
 {
 	bool skip;
 	long time_left;
+	unsigned int q;
 
 	/* mac80211 doesn't care if we really xmit queued frames or not
 	 * we'll collect those frames either way if we stop/delete vdevs
@@ -8072,7 +8140,11 @@
 			bool empty;
 
 			spin_lock_bh(&ar->htt.tx_lock);
-			empty = (ar->htt.num_pending_tx == 0);
+			for_each_set_bit(q, &queues, ar->hw->queues) {
+				empty = (ar->htt.num_pending_per_queue[q] == 0);
+				if (!empty)
+					break;
+			}
 			spin_unlock_bh(&ar->htt.tx_lock);
 
 			skip = (ar->state == ATH10K_STATE_WEDGED) ||
@@ -8087,6 +8159,13 @@
 			    skip, ar->state, time_left);
 }
 
+void ath10k_mac_wait_tx_complete(struct ath10k *ar)
+{
+	unsigned int queues = GENMASK(ar->hw->queues - 1, 0);
+
+	_ath10k_mac_wait_tx_complete(ar, queues);
+}
+
 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 			 u32 queues, bool drop)
 {
@@ -8108,7 +8187,41 @@
 	}
 
 	mutex_lock(&ar->conf_mutex);
-	ath10k_mac_wait_tx_complete(ar);
+	_ath10k_mac_wait_tx_complete(ar, queues);
+	mutex_unlock(&ar->conf_mutex);
+}
+
+static void ath10k_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+			     struct ieee80211_sta *sta)
+{
+	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
+	struct ath10k *ar = hw->priv;
+	bool skip;
+	long time_left;
+
+	/* TODO do we need drop implemented here ? */
+
+	mutex_lock(&ar->conf_mutex);
+
+	if (ar->state == ATH10K_STATE_WEDGED)
+		goto out;
+
+	time_left = wait_event_timeout(arsta->empty_tx_wq, ({
+			bool empty;
+
+			empty = atomic_read(&arsta->num_fw_queued) == 0;
+
+			skip = (ar->state == ATH10K_STATE_WEDGED) ||
+			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
+					&ar->dev_flags);
+
+			(empty || skip);
+		}), ATH10K_FLUSH_TIMEOUT_HZ);
+
+	if (time_left == 0 || skip)
+		ath10k_warn(ar, "failed to flush sta txq (sta %pM skip %i ar-state %i): %ld\n",
+			    sta->addr, skip, ar->state, time_left);
+out:
 	mutex_unlock(&ar->conf_mutex);
 }
 
@@ -9438,6 +9551,8 @@
 static const struct ieee80211_ops ath10k_ops = {
 	.tx				= ath10k_mac_op_tx,
 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
+	.get_powered			= ath10k_get_powered,
+	.set_powered			= ath10k_set_powered,
 	.start				= ath10k_start,
 	.stop				= ath10k_stop,
 	.config				= ath10k_config,
@@ -9459,6 +9574,7 @@
 	.set_rts_threshold		= ath10k_set_rts_threshold,
 	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
 	.flush				= ath10k_flush,
+	.flush_sta			= ath10k_flush_sta,
 	.tx_last_beacon			= ath10k_tx_last_beacon,
 	.set_antenna			= ath10k_set_antenna,
 	.get_antenna			= ath10k_get_antenna,
@@ -9652,6 +9768,7 @@
 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
 					BIT(NL80211_CHAN_WIDTH_20) |
 					BIT(NL80211_CHAN_WIDTH_40) |
+					BIT(NL80211_CHAN_WIDTH_160) |
 					BIT(NL80211_CHAN_WIDTH_80),
 #endif
 	},
@@ -9835,6 +9952,8 @@
 #define WRD_METHOD "WRDD"
 #define WRDD_WIFI  (0x07)
 
+#define ATH10K_DFS_PULSE_VALID_DIFF_TS 100
+
 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
 {
 	union acpi_object *mcc_pkg;
@@ -9959,7 +10078,9 @@
 		WLAN_CIPHER_SUITE_CCMP_256,
 	};
 	struct ieee80211_supported_band *band;
+	const struct firmware *firmware;
 	void *channels;
+	u32 crc = 0;
 	int ret;
 
 	if (!is_valid_ether_addr(ar->mac_addr)) {
@@ -10048,15 +10169,20 @@
 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
+	ieee80211_hw_set(ar->hw, APVLAN_NEED_MCAST_TO_UCAST);
 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
 	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
 	ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
+	ieee80211_hw_set(ar->hw, SUPPORTS_VHT_EXT_NSS_BW);
 
 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
 
+	if (ath10k_offload_use_drv_tx)
+		ieee80211_hw_set(ar->hw, ALLOW_DRV_TX_FOR_DATA);
+
 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
 
@@ -10249,6 +10375,8 @@
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
 		/* Init ath dfs pattern detector */
 		ar->ath_common.debug_mask = ATH_DBG_DFS;
+		ar->ath_common.dfs_pulse_valid_diff_ts =
+					ATH10K_DFS_PULSE_VALID_DIFF_TS;
 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
 							     NL80211_DFS_UNSET);
 
@@ -10296,6 +10424,18 @@
 
 	ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
 
+	INIT_WORK(&ar->powerup_work, ath10k_powerup_work);
+	ar->powerup_pending = false;
+
+	firmware = ar->normal_mode_fw.fw_file.firmware;
+	if (firmware)
+		crc = crc32_le(0, firmware->data, firmware->size);
+	snprintf(ar->hw->wiphy->fw_version,
+		 sizeof(ar->hw->wiphy->fw_version),
+		 "%s, 0x%x",
+		 ar->hw_params.name,
+		 crc);
+
 	ret = ieee80211_register_hw(ar->hw);
 	if (ret) {
 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
@@ -10307,6 +10447,7 @@
 		ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
 	}
 
+#ifndef CONFIG_ATH_REG_IGNORE
 	if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
 	    !ath_is_world_regd(&ar->ath_common.regulatory)) {
 		ret = regulatory_hint(ar->hw->wiphy,
@@ -10314,10 +10455,13 @@
 		if (ret)
 			goto err_unregister;
 	}
+#endif
 
 	return 0;
 
+#ifndef CONFIG_ATH_REG_IGNORE
 err_unregister:
+#endif
 	ieee80211_unregister_hw(ar->hw);
 
 err_dfs_detector_exit:
@@ -10335,6 +10479,7 @@
 void ath10k_mac_unregister(struct ath10k *ar)
 {
 	ieee80211_unregister_hw(ar->hw);
+	cancel_work_sync(&ar->powerup_work);
 
 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
 		ar->dfs_detector->exit(ar->dfs_detector);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/pci.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/pci.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/pci.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/pci.c	2025-09-25 17:40:34.107359986 +0200
@@ -10,6 +10,7 @@
 #include <linux/interrupt.h>
 #include <linux/spinlock.h>
 #include <linux/bitops.h>
+#include <linux/delay.h>
 
 #include "core.h"
 #include "debug.h"
@@ -31,6 +32,7 @@
 
 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
+static char *fem_name;
 
 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
@@ -38,6 +40,9 @@
 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
 
+module_param(fem_name, charp, 0660);
+MODULE_PARM_DESC(fem_name, "force FEM type");
+
 /* how long wait to wait for target to initialise, in ms */
 #define ATH10K_PCI_TARGET_WAIT 3000
 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
@@ -715,7 +720,8 @@
 	/* Check if the shared legacy irq is for us */
 	cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
 				  PCIE_INTR_CAUSE_ADDRESS);
-	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
+	if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL) &&
+	    cause != 0xdeadbeef)
 		return true;
 
 	return false;
@@ -1320,12 +1326,20 @@
 	struct sk_buff *skb;
 
 	while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
+		int map_len;
+
 		/* no need to call tx completion for NULL pointers */
 		if (!skb)
 			continue;
 
+#if defined(CONFIG_IP_FFN)
+		map_len = skb_ffn_get_dirty_len(skb);
+#else
+		map_len = skb->len;
+#endif
+
 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
-				 skb->len, DMA_TO_DEVICE);
+				 map_len, DMA_TO_DEVICE);
 		ath10k_htt_hif_tx_complete(ar, skb);
 	}
 }
@@ -2657,12 +2671,6 @@
 	return 0;
 }
 
-static int ath10k_pci_qca99x0_soft_chip_reset(struct ath10k *ar)
-{
-	ath10k_pci_irq_disable(ar);
-	return ath10k_pci_qca99x0_chip_reset(ar);
-}
-
 static int ath10k_pci_safe_chip_reset(struct ath10k *ar)
 {
 	struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
@@ -3526,7 +3534,7 @@
 	.get_num_banks	= ath10k_pci_get_num_banks,
 };
 
-static int ath10k_pci_probe(struct pci_dev *pdev,
+static int __ath10k_pci_probe(struct pci_dev *pdev,
 			    const struct pci_device_id *pci_dev)
 {
 	int ret = 0;
@@ -3567,21 +3575,21 @@
 	case QCA99X0_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA99X0;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
 	case QCA9984_1_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA9984;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
 	case QCA9888_2_0_DEVICE_ID:
 		hw_rev = ATH10K_HW_QCA9888;
 		pci_ps = false;
-		pci_soft_reset = ath10k_pci_qca99x0_soft_chip_reset;
+		pci_soft_reset = NULL;;
 		pci_hard_reset = ath10k_pci_qca99x0_chip_reset;
 		targ_cpu_to_ce_addr = ath10k_pci_qca99x0_targ_cpu_to_ce_addr;
 		break;
@@ -3620,6 +3628,7 @@
 	ar_pci->targ_cpu_to_ce_addr = targ_cpu_to_ce_addr;
 	ar->ce_priv = &ar_pci->ce;
 
+	ar->fem_name = fem_name;
 	ar->id.vendor = pdev->vendor;
 	ar->id.device = pdev->device;
 	ar->id.subsystem_vendor = pdev->subsystem_vendor;
@@ -3778,6 +3787,23 @@
 			 ath10k_pci_pm_suspend,
 			 ath10k_pci_pm_resume);
 
+static int ath10k_pci_probe(struct pci_dev *pdev,
+			    const struct pci_device_id *pci_dev)
+{
+	int cnt = 0;
+	int rv;
+	do {
+		rv = __ath10k_pci_probe(pdev, pci_dev);
+		if (rv == 0)
+			return rv;
+
+		pr_err("ath10k: failed to probe PCI : %d, retry-count: %d\n", rv, cnt);
+		mdelay(10); /* let the ath10k firmware gerbil take a small break */
+	} while (cnt++ < 3);
+
+	return rv;
+}
+
 static struct pci_driver ath10k_pci_driver = {
 	.name = "ath10k_pci",
 	.id_table = ath10k_pci_id_table,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/thermal.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/thermal.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/thermal.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/thermal.c	2025-09-25 17:40:34.111360006 +0200
@@ -161,7 +161,9 @@
 	if (!test_bit(WMI_SERVICE_THERM_THROT, ar->wmi.svc_map))
 		return 0;
 
-	cdev = thermal_cooling_device_register("ath10k_thermal", ar,
+	cdev = thermal_cooling_device_register_with_parent(ar->dev,
+							   "ath10k_thermal",
+							   ar,
 					       &ath10k_thermal_ops);
 
 	if (IS_ERR(cdev)) {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/thermal.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/thermal.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/thermal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/thermal.h	2025-09-25 17:40:34.111360006 +0200
@@ -9,7 +9,7 @@
 #define ATH10K_QUIET_PERIOD_MIN         25
 #define ATH10K_QUIET_START_OFFSET       10
 #define ATH10K_HWMON_NAME_LEN           15
-#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+#define ATH10K_THERMAL_SYNC_TIMEOUT_HZ (HZ/10)
 #define ATH10K_THERMAL_THROTTLE_MAX     100
 
 struct ath10k_thermal {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/txrx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/txrx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/txrx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/txrx.c	2025-09-25 17:40:34.111360006 +0200
@@ -82,13 +82,16 @@
 
 	flags = skb_cb->flags;
 	ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
-	ath10k_htt_tx_dec_pending(htt);
+	ath10k_htt_tx_dec_pending(htt, txq);
 	spin_unlock_bh(&htt->tx_lock);
 
 	rcu_read_lock();
-	if (txq && txq->sta && skb_cb->airtime_est)
+	if (txq && txq->sta) {
+		if (skb_cb->airtime_est)
 		ieee80211_sta_register_airtime(txq->sta, txq->tid,
 					       skb_cb->airtime_est, 0);
+		ath10k_htt_tx_sta_dec_pending(htt, txq->sta);
+	}
 	rcu_read_unlock();
 
 	if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/wmi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/wmi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/wmi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/wmi.c	2025-09-29 14:23:07.593732350 +0200
@@ -964,7 +964,7 @@
 	.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
 	.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
 	.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
-	.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
+	.tx_encap_type = WMI_10X_VDEV_PARAM_TX_ENCAP_TYPE,
 	.ap_detect_out_of_sync_sleeping_sta_time_secs =
 		WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
 	.rc_num_retries = WMI_VDEV_PARAM_UNSUPPORTED,
@@ -5837,12 +5837,14 @@
 	survey->noise     = noise_floor;
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath10k/wmi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/wmi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath10k/wmi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath10k/wmi.h	2025-09-29 14:23:07.593732350 +0200
@@ -7389,6 +7389,7 @@
 enum wmi_host_platform_type {
 	WMI_HOST_PLATFORM_HIGH_PERF,
 	WMI_HOST_PLATFORM_LOW_PERF,
+	WMI_HOST_PLATFORM_LOW_PERF_NO_FETCH,
 };
 
 enum wmi_bss_survey_req_type {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/Kconfig linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/Kconfig
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/Kconfig	2025-09-29 14:23:07.593732350 +0200
@@ -58,3 +58,15 @@
 	  Enable ath11k spectral scan support
 
 	  Say Y to enable access to the FFT/spectral data via debugfs.
+
+config ATH11K_SMALL_DP_RINGS
+	bool "ath11k small datapath DMA rings for memory challenged platforms"
+	depends on ATH11K
+	help
+	  Select this to lower the memory requirements for DMA rings
+	  in the datapath code. This can free up to 17 MiB of RAM per
+	  chip.
+
+config ATH11K_QCN9074_FIXED_MEM_REGION
+	bool "QCA ath11k fixed memory region mode on QCN9074"
+	depends on ATH11K
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/core.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/core.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/core.c	2025-09-25 17:40:34.119360046 +0200
@@ -34,9 +34,14 @@
 		 "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
 
 bool ath11k_ftm_mode;
-module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0444);
+module_param_named(ftm_mode, ath11k_ftm_mode, bool, 0644);
 MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
 
+static char *ath11k_board_variant;
+module_param_named(board_variant, ath11k_board_variant, charp, 0644);
+MODULE_PARM_DESC(board_variant, "board variant to use for bdf lookup");
+
+
 static const struct ath11k_hw_params ath11k_hw_params[] = {
 	{
 		.hw_rev = ATH11K_HW_IPQ8074,
@@ -56,7 +61,7 @@
 		.host_ce_config = ath11k_host_ce_config_ipq8074,
 		.ce_count = 12,
 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
-		.target_ce_count = 11,
+		.target_ce_count = 12,
 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq8074,
 		.svc_to_ce_map_len = 21,
 		.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
@@ -94,6 +99,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -123,6 +129,8 @@
 		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
 		.smp2p_wow_exit = false,
 		.support_dual_stations = false,
+		.ce_fwlog_enable = false,
+		.pdev_suspend = false,
 	},
 	{
 		.hw_rev = ATH11K_HW_IPQ6018_HW10,
@@ -142,7 +150,7 @@
 		.host_ce_config = ath11k_host_ce_config_ipq8074,
 		.ce_count = 12,
 		.target_ce_config = ath11k_target_ce_config_wlan_ipq8074,
-		.target_ce_count = 11,
+		.target_ce_count = 12,
 		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_ipq6018,
 		.svc_to_ce_map_len = 19,
 		.ce_ie_addr = &ath11k_ce_ie_addr_ipq8074,
@@ -177,6 +185,7 @@
 		.num_vdevs = 16 + 1,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -207,6 +216,8 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = false,
 		.support_dual_stations = false,
+		.ce_fwlog_enable = false,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "qca6390 hw2.0",
@@ -263,6 +274,7 @@
 		.num_vdevs = 2 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_ipq8074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -296,6 +308,8 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = true,
 		.support_dual_stations = true,
+		.ce_fwlog_enable = false,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "qcn9074 hw1.0",
@@ -349,6 +363,7 @@
 		.num_vdevs = 8,
 		.num_peers = 128,
 		.supports_suspend = false,
+		.supports_ap_vlan = true,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.supports_regdb = false,
 		.fix_l1ss = true,
@@ -365,7 +380,7 @@
 		.bios_sar_capa = NULL,
 		.m3_fw_support = true,
 		.fixed_bdf_addr = false,
-		.fixed_mem_region = false,
+		.fixed_mem_region = IS_ENABLED(CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION),
 		.static_window_map = true,
 		.hybrid_bus_type = false,
 		.fixed_fw_mem = false,
@@ -379,6 +394,8 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = false,
 		.support_dual_stations = false,
+		.ce_fwlog_enable = true,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "wcn6855 hw2.0",
@@ -435,6 +452,7 @@
 		.num_vdevs = 2 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -468,6 +486,7 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = true,
 		.support_dual_stations = true,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "wcn6855 hw2.1",
@@ -522,6 +541,7 @@
 		.num_vdevs = 2 + 1,
 		.num_peers = 512,
 		.supports_suspend = true,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -555,6 +575,7 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = true,
 		.support_dual_stations = true,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "wcn6750 hw1.0",
@@ -607,6 +628,7 @@
 		.num_vdevs = 3,
 		.num_peers = 512,
 		.supports_suspend = false,
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.supports_regdb = true,
 		.fix_l1ss = false,
@@ -637,6 +659,7 @@
 		.smp2p_wow_exit = true,
 		.support_fw_mac_sequence = true,
 		.support_dual_stations = false,
+		.pdev_suspend = true,
 	},
 	{
 		.hw_rev = ATH11K_HW_IPQ5018_HW10,
@@ -649,6 +672,7 @@
 		.max_radios = MAX_RADIOS_5018,
 		.bdf_addr = 0x4BA00000,
 		/* hal_desc_sz and hw ops are similar to qcn9074 */
+		.supports_ap_vlan = false,
 		.hal_desc_sz = sizeof(struct hal_rx_desc_qcn9074),
 		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_IPQ8074,
 		.ring_mask = &ath11k_hw_ring_mask_ipq8074,
@@ -719,6 +743,7 @@
 		.smp2p_wow_exit = false,
 		.support_fw_mac_sequence = false,
 		.support_dual_stations = false,
+		.pdev_suspend = false,
 	},
 	{
 		.name = "qca2066 hw2.1",
@@ -809,6 +834,94 @@
 		.support_fw_mac_sequence = true,
 		.support_dual_stations = true,
 	},
+	{
+		.name = "qca6698aq hw2.1",
+		.hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+		.fw = {
+			.dir = "QCA6698AQ/hw2.1",
+			.board_size = 256 * 1024,
+			.cal_offset = 128 * 1024,
+		},
+		.max_radios = 3,
+		.bdf_addr = 0x4B0C0000,
+		.hw_ops = &wcn6855_ops,
+		.ring_mask = &ath11k_hw_ring_mask_qca6390,
+		.internal_sleep_clock = true,
+		.regs = &wcn6855_regs,
+		.qmi_service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390,
+		.host_ce_config = ath11k_host_ce_config_qca6390,
+		.ce_count = 9,
+		.target_ce_config = ath11k_target_ce_config_wlan_qca6390,
+		.target_ce_count = 9,
+		.svc_to_ce_map = ath11k_target_service_to_ce_map_wlan_qca6390,
+		.svc_to_ce_map_len = 14,
+		.single_pdev_only = true,
+		.rxdma1_enable = false,
+		.num_rxdma_per_pdev = 2,
+		.rx_mac_buf_ring = true,
+		.vdev_start_delay = true,
+		.htt_peer_map_v2 = false,
+
+		.spectral = {
+			.fft_sz = 0,
+			.fft_pad_sz = 0,
+			.summary_pad_sz = 0,
+			.fft_hdr_len = 0,
+			.max_fft_bins = 0,
+			.fragment_160mhz = false,
+		},
+
+		.interface_modes = BIT(NL80211_IFTYPE_STATION) |
+					BIT(NL80211_IFTYPE_AP) |
+					BIT(NL80211_IFTYPE_P2P_DEVICE) |
+					BIT(NL80211_IFTYPE_P2P_CLIENT) |
+					BIT(NL80211_IFTYPE_P2P_GO),
+		.supports_monitor = false,
+		.supports_shadow_regs = true,
+		.idle_ps = true,
+		.supports_sta_ps = true,
+		.coldboot_cal_mm = false,
+		.coldboot_cal_ftm = false,
+		.cbcal_restart_fw = false,
+		.fw_mem_mode = 0,
+		.num_vdevs = 2 + 1,
+		.num_peers = 512,
+		.supports_suspend = true,
+		.hal_desc_sz = sizeof(struct hal_rx_desc_wcn6855),
+		.supports_regdb = true,
+		.fix_l1ss = false,
+		.credit_flow = true,
+		.max_tx_ring = DP_TCL_NUM_RING_MAX_QCA6390,
+		.hal_params = &ath11k_hw_hal_params_qca6390,
+		.supports_dynamic_smps_6ghz = false,
+		.alloc_cacheable_memory = false,
+		.supports_rssi_stats = true,
+		.fw_wmi_diag_event = true,
+		.current_cc_support = true,
+		.dbr_debug_support = false,
+		.global_reset = true,
+		.bios_sar_capa = &ath11k_hw_sar_capa_wcn6855,
+		.m3_fw_support = true,
+		.fixed_bdf_addr = false,
+		.fixed_mem_region = false,
+		.static_window_map = false,
+		.hybrid_bus_type = false,
+		.fixed_fw_mem = false,
+		.support_off_channel_tx = true,
+		.supports_multi_bssid = true,
+
+		.sram_dump = {
+			.start = 0x01400000,
+			.end = 0x0177ffff,
+		},
+
+		.tcl_ring_retry = true,
+		.tx_ring_size = DP_TCL_DATA_RING_SIZE,
+		.smp2p_wow_exit = false,
+		.support_fw_mac_sequence = true,
+		.support_dual_stations = true,
+		.pdev_suspend = false,
+	},
 };
 
 static inline struct ath11k_pdev *ath11k_core_get_single_pdev(struct ath11k_base *ab)
@@ -1105,7 +1218,10 @@
 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
 	char variant[9 + ATH11K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
 
-	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
+	if (ath11k_board_variant)
+		scnprintf(variant, sizeof(variant), ",variant=%s",
+			  ath11k_board_variant);
+	else if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ab->qmi.target.bdf_ext);
 
@@ -1669,11 +1785,47 @@
 	return ret;
 }
 
+static void ath11k_core_pdev_suspend_target(struct ath11k_base *ab)
+{
+	struct ath11k *ar;
+	struct ath11k_pdev *pdev;
+	unsigned long time_left;
+	int ret;
+	int i;
+
+	if (!ab->hw_params.pdev_suspend)
+		return;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+
+		reinit_completion(&ab->htc_suspend);
+
+		ret = ath11k_wmi_pdev_suspend(ar, WMI_PDEV_SUSPEND_AND_DISABLE_INTR,
+					      pdev->pdev_id);
+		if (ret) {
+			ath11k_warn(ab, "could not suspend target :%d\n", ret);
+			/* pointless to try other pdevs */
+			return;
+		}
+
+		time_left = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ);
+
+		if (!time_left) {
+			ath11k_warn(ab, "suspend timed out - target pause event never came\n");
+			/* pointless to try other pdevs */
+			return;
+		}
+	}
+}
+
 static void ath11k_core_pdev_destroy(struct ath11k_base *ab)
 {
 	ath11k_spectral_deinit(ab);
 	ath11k_thermal_unregister(ab);
 	ath11k_mac_unregister(ab);
+	ath11k_core_pdev_suspend_target(ab);
 	ath11k_hif_irq_disable(ab);
 	ath11k_dp_pdev_free(ab);
 	ath11k_debugfs_pdev_destroy(ab);
@@ -1807,6 +1959,10 @@
 	return ret;
 }
 
+unsigned int ce_fwlog = 1;
+module_param_named(ce_fwlog, ce_fwlog, uint, 0644);
+MODULE_PARM_DESC(ce_fwlog, "Enable/Disable CE based FW logging");
+
 int ath11k_core_qmi_firmware_ready(struct ath11k_base *ab)
 {
 	int ret;
@@ -1859,6 +2015,15 @@
 		goto err_core_stop;
 	}
 	ath11k_hif_irq_enable(ab);
+
+	if (ab->hw_params.ce_fwlog_enable && ce_fwlog) {
+		ret = ath11k_enable_fwlog(ab);
+		if (ret < 0) {
+			ath11k_err(ab, "failed to enable fwlog: %d\n", ret);
+			goto err_core_stop;
+		}
+	}
+
 	mutex_unlock(&ab->core_lock);
 
 	return 0;
@@ -1924,6 +2089,7 @@
 	ath11k_mac_scan_finish(ar);
 	ath11k_mac_peer_cleanup_all(ar);
 	cancel_delayed_work_sync(&ar->scan.timeout);
+	cancel_work_sync(&ar->channel_update_work);
 	cancel_work_sync(&ar->regd_update_work);
 	cancel_work_sync(&ab->update_11d_work);
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/core.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/core.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/core.h	2025-09-29 14:23:07.593732350 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_CORE_H
@@ -148,6 +148,7 @@
 	ATH11K_HW_WCN6750_HW10,
 	ATH11K_HW_IPQ5018_HW10,
 	ATH11K_HW_QCA2066_HW21,
+	ATH11K_HW_QCA6698AQ_HW21,
 };
 
 enum ath11k_firmware_mode {
@@ -168,6 +169,7 @@
 
 struct ath11k_ext_irq_grp {
 	struct ath11k_base *ab;
+	char *name;
 	u32 irqs[ATH11K_EXT_IRQ_NUM_MAX];
 	u32 num_irq;
 	u32 grp_id;
@@ -340,7 +342,6 @@
  * @ap_power_type: type of power (SP/LPI/VLP)
  * @num_pwr_levels: number of power levels
  * @reg_max: Array of maximum TX power (dBm) per PSD value
- * @ap_constraint_power: AP constraint power (dBm)
  * @tpe: TPE values processed from TPE IE
  * @chan_power_info: power info to send to firmware
  */
@@ -350,7 +351,6 @@
 	enum wmi_reg_6ghz_ap_type ap_power_type;
 	u8 num_pwr_levels;
 	u8 reg_max[ATH11K_NUM_PWR_LEVELS];
-	u8 ap_constraint_power;
 	s8 tpe[ATH11K_NUM_PWR_LEVELS];
 	struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS];
 };
@@ -370,8 +370,8 @@
 	struct ath11k *ar;
 	struct ieee80211_vif *vif;
 
-	u16 tx_seq_no;
 	struct wmi_wmm_params_all_arg wmm_params;
+	struct wmi_wmm_params_all_arg muedca_params;
 	struct list_head list;
 	union {
 		struct {
@@ -551,6 +551,9 @@
 	struct ath11k_htt_tx_stats *tx_stats;
 	struct ath11k_rx_peer_stats *rx_stats;
 
+	atomic_t num_tx_pending;
+	wait_queue_head_t tx_empty_waitq;
+
 #ifdef CONFIG_MAC80211_DEBUGFS
 	/* protected by conf_mutex */
 	bool aggr_mode;
@@ -624,6 +627,7 @@
 	u32 mem_offset;
 	u32 module_id_bitmap[MAX_MODULE_ID_BITMAP_WORDS];
 	struct ath11k_debug_dbr *dbr_debug[WMI_DIRECT_BUF_MAX];
+	u32 mem_addr;
 };
 
 struct ath11k_per_peer_tx_stats {
@@ -645,6 +649,7 @@
 	struct ath11k_base *ab;
 	struct ath11k_pdev *pdev;
 	struct ieee80211_hw *hw;
+	struct ieee80211_ops *ops;
 	struct ath11k_pdev_wmi *wmi;
 	struct ath11k_pdev_dp dp;
 	u8 mac_addr[ETH_ALEN];
@@ -687,7 +692,7 @@
 	struct mutex conf_mutex;
 	/* protects the radio specific data like debug stats, ppdu_stats_info stats,
 	 * vdev_stop_status info, scan data, ath11k_sta info, ath11k_vif info,
-	 * channel context data, survey info, test mode data.
+	 * channel context data, survey info, test mode data, channel_update_queue.
 	 */
 	spinlock_t data_lock;
 
@@ -745,6 +750,9 @@
 	struct completion bss_survey_done;
 
 	struct work_struct regd_update_work;
+	struct work_struct channel_update_work;
+	/* protected with data_lock */
+	struct list_head channel_update_queue;
 
 	struct work_struct wmi_mgmt_tx_work;
 	struct sk_buff_head wmi_mgmt_tx_queue;
@@ -951,6 +959,7 @@
 	wait_queue_head_t peer_mapping_wq;
 	u8 mac_addr[ETH_ALEN];
 	int irq_num[ATH11K_IRQ_NUM_MAX];
+	char *irq_name[ATH11K_IRQ_NUM_MAX];
 	struct ath11k_ext_irq_grp ext_irq_grp[ATH11K_EXT_IRQ_GRP_NUM_MAX];
 	struct ath11k_targ_cap target_caps;
 	u32 ext_service_bitmap[WMI_SERVICE_EXT_BM_SIZE];
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dbring.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dbring.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dbring.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dbring.c	2025-09-25 17:40:34.119360046 +0200
@@ -81,6 +81,8 @@
 
 	buff->paddr = paddr;
 
+	dma_sync_single_for_device(ab->dev, paddr, ring->buf_sz, DMA_FROM_DEVICE);
+
 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/debugfs.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/debugfs.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/debugfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/debugfs.c	2025-09-25 17:40:34.119360046 +0200
@@ -16,6 +16,7 @@
 #include "debugfs_htt_stats.h"
 #include "peer.h"
 #include "hif.h"
+#include "qmi.h"
 
 static const char *htt_bp_umac_ring[HTT_SW_UMAC_RING_IDX_MAX] = {
 	"REO2SW1_RING",
@@ -178,7 +179,7 @@
 	 * received 'update stats' event, we keep a 3 seconds timeout in case,
 	 * fw_stats_done is not marked yet
 	 */
-	timeout = jiffies + msecs_to_jiffies(3 * 1000);
+	timeout = jiffies + secs_to_jiffies(3);
 
 	ath11k_debugfs_fw_stats_reset(ar);
 
@@ -557,6 +558,104 @@
 	.llseek = default_llseek,
 };
 
+
+static ssize_t ath11k_athdiag_read(struct file *file,
+				   char __user *user_buf,
+				   size_t count, loff_t *ppos)
+{
+	struct ath11k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos <= 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		 goto exit;
+	}
+
+	ret = ath11k_qmi_mem_read(ar->ab, *ppos, buf, count);
+	if (ret < 0) {
+		ath11k_warn(ar->ab, "failed to read address 0x%08x via diagnose window from debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		 goto exit;
+	}
+
+	ret = copy_to_user(user_buf, buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	count -= ret;
+	*ppos += count;
+	ret = count;
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+
+	return ret;
+}
+
+static ssize_t ath11k_athdiag_write(struct file *file,
+				    const char __user *user_buf,
+				    size_t count, loff_t *ppos)
+{
+	struct ath11k *ar = file->private_data;
+	u8 *buf;
+	int ret;
+
+	if (*ppos <= 0)
+		return -EINVAL;
+
+	if (!count)
+		return 0;
+
+	mutex_lock(&ar->conf_mutex);
+
+	buf = vmalloc(count);
+	if (!buf) {
+		ret = -ENOMEM;
+		goto exit;
+	}
+
+	ret = copy_from_user(buf, user_buf, count);
+	if (ret) {
+		ret = -EFAULT;
+		goto exit;
+	}
+
+	ret = ath11k_qmi_mem_write(ar->ab, *ppos, buf, count);
+	if (ret < 0) {
+		ath11k_warn(ar->ab, "failed to write address 0x%08x via diagnose window from debugfs: %d\n",
+			    (u32)(*ppos), ret);
+		goto exit;
+	}
+
+	*ppos += count;
+	ret = count;
+
+exit:
+	vfree(buf);
+	mutex_unlock(&ar->conf_mutex);
+	return ret;
+}
+
+static const struct file_operations fops_athdiag = {
+	.read = ath11k_athdiag_read,
+	.write = ath11k_athdiag_write,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 static ssize_t ath11k_write_enable_extd_tx_stats(struct file *file,
 						 const char __user *ubuf,
 						 size_t count, loff_t *ppos)
@@ -1649,6 +1748,10 @@
 				    &fops_reset_ps_duration);
 	}
 
+	debugfs_create_file("athdiag", S_IRUSR | S_IWUSR,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_athdiag);
+
 	return 0;
 }
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp.c	2025-09-25 17:40:34.119360046 +0200
@@ -282,8 +282,18 @@
 	case HAL_RXDMA_MONITOR_STATUS:
 		params.low_threshold = num_entries >> 3;
 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
-		params.intr_batch_cntr_thres_entries = 0;
 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
+		/* In case of PCI chipsets, we dont have PPDU end interrupts,
+		 * so MONITOR STATUS ring is reaped by receiving MSI from srng.
+		 * Keep batch threshold as 8 so that an interrupt is received for
+		 * every 4 frames in MONITOR_STATUS ring.
+		 */
+		if ((type == HAL_RXDMA_MONITOR_STATUS) &&
+				(params.flags & HAL_SRNG_FLAGS_MSI_INTR))
+			params.intr_batch_cntr_thres_entries = 4;
+		else
+			params.intr_batch_cntr_thres_entries = 0;
+
 		break;
 	case HAL_WBM2SW_RELEASE:
 		if (ring_num < 3) {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp.h	2025-09-25 17:40:34.123360066 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, 2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_DP_H
@@ -20,7 +20,6 @@
 
 struct dp_rx_tid {
 	u8 tid;
-	u32 *vaddr;
 	dma_addr_t paddr;
 	u32 size;
 	u32 ba_win_sz;
@@ -37,6 +36,9 @@
 	/* Timer info related to fragments */
 	struct timer_list frag_timer;
 	struct ath11k_base *ab;
+	u32 *vaddr_unaligned;
+	dma_addr_t paddr_unaligned;
+	u32 unaligned_size;
 };
 
 #define DP_REO_DESC_FREE_THRESHOLD  64
@@ -165,7 +167,6 @@
 	struct ath11k_pdev_mon_stats rx_mon_stats;
 	/* lock for monitor data */
 	spinlock_t mon_lock;
-	struct sk_buff_head rx_status_q;
 };
 
 struct ath11k_pdev_dp {
@@ -205,7 +206,6 @@
 #define DP_WBM_RELEASE_RING_SIZE	64
 #define DP_TCL_DATA_RING_SIZE		512
 #define DP_TCL_DATA_RING_SIZE_WCN6750	2048
-#define DP_TX_COMP_RING_SIZE		32768
 #define DP_TX_IDR_SIZE			DP_TX_COMP_RING_SIZE
 #define DP_TCL_CMD_RING_SIZE		32
 #define DP_TCL_STATUS_RING_SIZE		32
@@ -219,13 +219,22 @@
 #define DP_RXDMA_BUF_RING_SIZE		4096
 #define DP_RXDMA_REFILL_RING_SIZE	2048
 #define DP_RXDMA_ERR_DST_RING_SIZE	1024
-#define DP_RXDMA_MON_STATUS_RING_SIZE	1024
-#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
 #define DP_RXDMA_MONITOR_DESC_RING_SIZE	4096
 
 #define DP_RX_RELEASE_RING_NUM	3
 
+#ifdef CONFIG_ATH11K_SMALL_DP_RINGS
+# define DP_TX_COMP_RING_SIZE		8192
+# define DP_RXDMA_MON_STATUS_RING_SIZE  512
+# define DP_RXDMA_MONITOR_BUF_RING_SIZE 128
+# define DP_RXDMA_MONITOR_DST_RING_SIZE 128
+#else
+# define DP_TX_COMP_RING_SIZE		32768
+# define DP_RXDMA_MON_STATUS_RING_SIZE	1024
+# define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
+# define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
+#endif
+
 #define DP_RX_BUFFER_SIZE	2048
 #define	DP_RX_BUFFER_SIZE_LITE  1024
 #define DP_RX_BUFFER_ALIGN_SIZE	128
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_rx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_rx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_rx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_rx.c	2025-09-29 14:23:07.597732370 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/ieee80211.h>
@@ -675,11 +675,11 @@
 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
 		list_del(&cmd->list);
 		rx_tid = &cmd->data;
-		if (rx_tid->vaddr) {
-			dma_unmap_single(ab->dev, rx_tid->paddr,
-					 rx_tid->size, DMA_BIDIRECTIONAL);
-			kfree(rx_tid->vaddr);
-			rx_tid->vaddr = NULL;
+		if (rx_tid->vaddr_unaligned) {
+			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+					     rx_tid->vaddr_unaligned,
+					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+			rx_tid->vaddr_unaligned = NULL;
 		}
 		kfree(cmd);
 	}
@@ -689,11 +689,11 @@
 		list_del(&cmd_cache->list);
 		dp->reo_cmd_cache_flush_count--;
 		rx_tid = &cmd_cache->data;
-		if (rx_tid->vaddr) {
-			dma_unmap_single(ab->dev, rx_tid->paddr,
-					 rx_tid->size, DMA_BIDIRECTIONAL);
-			kfree(rx_tid->vaddr);
-			rx_tid->vaddr = NULL;
+		if (rx_tid->vaddr_unaligned) {
+			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+					     rx_tid->vaddr_unaligned,
+					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+			rx_tid->vaddr_unaligned = NULL;
 		}
 		kfree(cmd_cache);
 	}
@@ -708,11 +708,11 @@
 	if (status != HAL_REO_CMD_SUCCESS)
 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
 			    rx_tid->tid, status);
-	if (rx_tid->vaddr) {
-		dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
+	if (rx_tid->vaddr_unaligned) {
+		dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
+				     rx_tid->vaddr_unaligned,
+				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+		rx_tid->vaddr_unaligned = NULL;
 	}
 }
 
@@ -749,10 +749,10 @@
 	if (ret) {
 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
 			   rx_tid->tid, ret);
-		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
+		dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+				     rx_tid->vaddr_unaligned,
+				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+		rx_tid->vaddr_unaligned = NULL;
 	}
 }
 
@@ -784,6 +784,7 @@
 	dp->reo_cmd_cache_flush_count++;
 
 	/* Flush and invalidate aged REO desc from HW cache */
+retry:
 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
 				 list) {
 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
@@ -796,16 +797,17 @@
 			ath11k_dp_reo_cache_flush(ab, &elem->data);
 			kfree(elem);
 			spin_lock_bh(&dp->reo_cmd_lock);
+			goto retry;
 		}
 	}
 	spin_unlock_bh(&dp->reo_cmd_lock);
 
 	return;
 free_desc:
-	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-			 DMA_BIDIRECTIONAL);
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
+	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
+			     rx_tid->vaddr_unaligned,
+			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+	rx_tid->vaddr_unaligned = NULL;
 }
 
 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
@@ -831,14 +833,16 @@
 		if (ret != -ESHUTDOWN)
 			ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
 				   tid, ret);
-		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
+		dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
+				     rx_tid->vaddr_unaligned,
+				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+		rx_tid->vaddr_unaligned = NULL;
 	}
 
 	rx_tid->paddr = 0;
+	rx_tid->paddr_unaligned = 0;
 	rx_tid->size = 0;
+	rx_tid->unaligned_size = 0;
 }
 
 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
@@ -982,10 +986,9 @@
 	if (!rx_tid->active)
 		goto unlock_exit;
 
-	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-			 DMA_BIDIRECTIONAL);
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
+	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
+			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
+	rx_tid->vaddr_unaligned = NULL;
 
 	rx_tid->active = false;
 
@@ -1000,9 +1003,8 @@
 	struct ath11k_base *ab = ar->ab;
 	struct ath11k_peer *peer;
 	struct dp_rx_tid *rx_tid;
-	u32 hw_desc_sz;
-	u32 *addr_aligned;
-	void *vaddr;
+	u32 hw_desc_sz, *vaddr;
+	void *vaddr_unaligned;
 	dma_addr_t paddr;
 	int ret;
 
@@ -1050,37 +1052,34 @@
 	else
 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
 
-	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
-	if (!vaddr) {
+	rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
+	vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
+						DMA_BIDIRECTIONAL, GFP_ATOMIC);
+	if (!vaddr_unaligned) {
 		spin_unlock_bh(&ab->base_lock);
 		return -ENOMEM;
 	}
 
-	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
-
-	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
-				   ssn, pn_type);
-
-	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
-			       DMA_BIDIRECTIONAL);
-
-	ret = dma_mapping_error(ab->dev, paddr);
-	if (ret) {
-		spin_unlock_bh(&ab->base_lock);
-		ath11k_warn(ab, "failed to setup dma map for peer %pM rx tid %d: %d\n",
-			    peer_mac, tid, ret);
-		goto err_mem_free;
-	}
-
-	rx_tid->vaddr = vaddr;
-	rx_tid->paddr = paddr;
+	rx_tid->vaddr_unaligned = vaddr_unaligned;
+	vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
+	rx_tid->paddr_unaligned = paddr;
+	rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
+			(unsigned long)rx_tid->vaddr_unaligned);
+	ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
 	rx_tid->size = hw_desc_sz;
 	rx_tid->active = true;
 
+	/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
+	 * Since these changes are not reflected in the device, driver now needs to
+	 * explicitly call dma_sync_single_for_device.
+	 */
+	dma_sync_single_for_device(ab->dev, rx_tid->paddr,
+				   rx_tid->size,
+				   DMA_TO_DEVICE);
 	spin_unlock_bh(&ab->base_lock);
 
-	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
-						     paddr, tid, 1, ba_win_sz);
+	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
+						     tid, 1, ba_win_sz);
 	if (ret) {
 		ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
 			    peer_mac, tid, ret);
@@ -1088,12 +1087,6 @@
 	}
 
 	return ret;
-
-err_mem_free:
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
-
-	return ret;
 }
 
 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
@@ -2831,8 +2824,6 @@
 	rx_stats->dcm_count += ppdu_info->dcm;
 	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
 
-	arsta->rssi_comb = ppdu_info->rssi_comb;
-
 	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
 			     ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
 
@@ -4691,8 +4682,9 @@
 	}
 }
 
-static u32
-ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
 			  void *ring_entry, struct sk_buff **head_msdu,
 			  struct sk_buff **tail_msdu, u32 *npackets,
 			  u32 *ppdu_id)
@@ -4782,7 +4774,7 @@
 			if (!msdu) {
 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
 					   "msdu_pop: invalid buf_id %d\n", buf_id);
-				break;
+				goto next_msdu;
 			}
 			rxcb = ATH11K_SKB_RXCB(msdu);
 			if (!rxcb->unmapped) {
@@ -5147,7 +5139,7 @@
 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
 	const struct ath11k_hw_hal_params *hal_params;
 	void *ring_entry;
-	void *mon_dst_srng;
+	struct hal_srng *mon_dst_srng;
 	u32 ppdu_id;
 	u32 rx_bufs_used;
 	u32 ring_id;
@@ -5164,6 +5156,7 @@
 
 	spin_lock_bh(&pmon->mon_lock);
 
+	spin_lock_bh(&mon_dst_srng->lock);
 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
 
 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
@@ -5222,6 +5215,7 @@
 								mon_dst_srng);
 	}
 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+	spin_unlock_bh(&mon_dst_srng->lock);
 
 	spin_unlock_bh(&pmon->mon_lock);
 
@@ -5409,7 +5403,7 @@
 					   "full mon msdu_pop: invalid buf_id %d\n",
 					    buf_id);
 				spin_unlock_bh(&rx_ring->idr_lock);
-				break;
+				goto next_msdu;
 			}
 			idr_remove(&rx_ring->bufs_idr, buf_id);
 			spin_unlock_bh(&rx_ring->idr_lock);
@@ -5611,7 +5605,7 @@
 	struct hal_sw_mon_ring_entries *sw_mon_entries;
 	struct ath11k_pdev_mon_stats *rx_mon_stats;
 	struct sk_buff *head_msdu, *tail_msdu;
-	void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+	struct hal_srng *mon_dst_srng;
 	void *ring_entry;
 	u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
 	int quota = 0, ret;
@@ -5627,6 +5621,9 @@
 		goto reap_status_ring;
 	}
 
+	mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
+	spin_lock_bh(&mon_dst_srng->lock);
+
 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
 		head_msdu = NULL;
@@ -5670,6 +5667,7 @@
 	}
 
 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
+	spin_unlock_bh(&mon_dst_srng->lock);
 	spin_unlock_bh(&pmon->mon_lock);
 
 	if (rx_bufs_used) {
@@ -5706,8 +5704,6 @@
 	struct ath11k_pdev_dp *dp = &ar->dp;
 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
 
-	skb_queue_head_init(&pmon->rx_status_q);
-
 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
 
 	memset(&pmon->rx_mon_stats, 0,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_rx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_rx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_rx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_rx.h	2025-09-25 17:40:34.123360066 +0200
@@ -10,7 +10,7 @@
 #include "rx_desc.h"
 #include "debug.h"
 
-#define DP_MAX_NWIFI_HDR_LEN	30
+#define DP_MAX_NWIFI_HDR_LEN	36
 
 #define DP_RX_MPDU_ERR_FCS			BIT(0)
 #define DP_RX_MPDU_ERR_DECRYPT			BIT(1)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_tx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_tx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_tx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_tx.c	2025-09-25 17:40:34.123360066 +0200
@@ -79,6 +79,47 @@
 	}
 }
 
+#define HTT_META_DATA_ALIGNMENT    0x8
+
+static int ath11k_dp_metadata_align_skb(struct sk_buff *skb, u8 align_len)
+{
+	int ret;
+
+	ret = skb_cow_head(skb, align_len);
+	if (unlikely(ret))
+		return ret;
+
+	skb_push(skb, align_len);
+	memset(skb->data, 0, align_len);
+	return 0;
+}
+
+static int ath11k_dp_prepare_htt_metadata(struct sk_buff *skb,
+					  u8 *htt_metadata_size)
+{
+	u8 htt_desc_size;
+	/* Size rounded of multiple of 8 bytes */
+	u8 htt_desc_size_aligned;
+	struct htt_tx_msdu_desc_ext *desc_ext;
+	int ret;
+
+	htt_desc_size = sizeof(*desc_ext);
+	htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT);
+
+	ret = ath11k_dp_metadata_align_skb(skb, htt_desc_size_aligned);
+	if (unlikely(ret))
+		return ret;
+
+	desc_ext = (struct htt_tx_msdu_desc_ext *)skb->data;
+	desc_ext->info0 =
+		__cpu_to_le32(FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_VALID_ENCRYPT_TYPE, 1) |
+			      FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_ENCRYPT_TYPE, 0) |
+			      FIELD_PREP(HTT_TX_MSDU_DESC_INFO0_HOST_TX_DESC_POOL, 1));
+	*htt_metadata_size = htt_desc_size_aligned;
+
+	return 0;
+}
+
 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
 		 struct ath11k_sta *arsta, struct sk_buff *skb)
 {
@@ -97,9 +138,7 @@
 	u32 ring_selector = 0;
 	u8 ring_map = 0;
 	bool tcl_ring_retry;
-
-	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
-		return -ESHUTDOWN;
+	u8 align_pad, htt_meta_size = 0;
 
 	if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 		     !ieee80211_is_data(hdr->frame_control)))
@@ -208,15 +247,42 @@
 		goto fail_remove_idr;
 	}
 
+	/* Add metadata for sw encrypted vlan group traffic */
+	if (!test_bit(ATH11K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+	    !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
+	    !info->control.hw_key &&
+	    ieee80211_has_protected(hdr->frame_control)) {
+		/* HW requirement is that metadata should always point to a
+		 * 8-byte aligned address. So we add alignment pad to start of
+		 * buffer. HTT Metadata should be ensured to be multiple of 8-bytes
+		 *  to get 8-byte aligned start address along with align_pad added
+		 */
+		align_pad = ((unsigned long)skb->data) & (HTT_META_DATA_ALIGNMENT - 1);
+		ret = ath11k_dp_metadata_align_skb(skb, align_pad);
+		if (unlikely(ret))
+			goto fail_remove_idr;
+
+		ti.pkt_offset += align_pad;
+		ret = ath11k_dp_prepare_htt_metadata(skb, &htt_meta_size);
+		if (unlikely(ret))
+			goto fail_pull_skb;
+
+		ti.pkt_offset += htt_meta_size;
+		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
+		ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
+		ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+		ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+	}
+
 	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
 	if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
 		ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
 		ret = -ENOMEM;
-		goto fail_remove_idr;
+		goto fail_pull_skb;
 	}
 
-	ti.data_len = skb->len;
+	ti.data_len = skb->len - ti.pkt_offset;
 	skb_cb->paddr = ti.paddr;
 	skb_cb->vif = arvif->vif;
 	skb_cb->ar = ar;
@@ -265,12 +331,18 @@
 			skb->data, skb->len);
 
 	atomic_inc(&ar->dp.num_tx_pending);
+	if (arsta)
+		atomic_inc(&arsta->num_tx_pending);
 
 	return 0;
 
 fail_unmap_dma:
 	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
 
+fail_pull_skb:
+	if (ti.pkt_offset)
+		skb_pull(skb, ti.pkt_offset);
+
 fail_remove_idr:
 	spin_lock_bh(&tx_ring->tx_idr_lock);
 	idr_remove(&tx_ring->txbuf_idr,
@@ -322,6 +394,7 @@
 	struct ath11k_skb_cb *skb_cb;
 	struct ath11k *ar;
 	struct ath11k_peer *peer;
+	struct ath11k_sta *arsta;
 
 	spin_lock(&tx_ring->tx_idr_lock);
 	msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
@@ -381,6 +454,10 @@
 	status.sta = peer->sta;
 	status.info = info;
 	status.skb = msdu;
+	arsta = (struct ath11k_sta *) peer->sta->drv_priv;
+
+	if (atomic_dec_and_test(&arsta->num_tx_pending))
+		wake_up(&arsta->tx_empty_waitq);
 
 	ieee80211_tx_status_ext(ar->hw, &status);
 }
@@ -649,6 +726,9 @@
 
 	spin_unlock_bh(&ab->base_lock);
 
+	if (atomic_dec_and_test(&arsta->num_tx_pending))
+		wake_up(&arsta->tx_empty_waitq);
+
 	ieee80211_tx_status_ext(ar->hw, &status);
 }
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_tx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_tx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/dp_tx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/dp_tx.h	2025-09-25 17:40:34.123360066 +0200
@@ -17,6 +17,20 @@
 	u16 peer_id;
 };
 
+#define HTT_TX_MSDU_DESC_INFO0_VALID_ENCRYPT_TYPE	BIT(8)
+#define HTT_TX_MSDU_DESC_INFO0_ENCRYPT_TYPE		GENMASK(16, 15)
+#define HTT_TX_MSDU_DESC_INFO0_HOST_TX_DESC_POOL	BIT(31)
+
+struct htt_tx_msdu_desc_ext {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le32 info4;
+	__le32 info5;
+	__le32 info6;
+} __packed;
+
 void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts);
 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab);
 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/hal.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hal.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/hal.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hal.c	2025-09-25 17:40:34.127360086 +0200
@@ -829,11 +829,15 @@
 		srng->u.src_ring.cached_tp =
 			*(volatile u32 *)srng->u.src_ring.tp_addr;
 	} else {
-		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+		srng->u.dst_ring.cached_hp =
+			READ_ONCE(*srng->u.dst_ring.hp_addr);
 
 		/* Try to prefetch the next descriptor in the ring */
 		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
 			ath11k_hal_srng_prefetch_desc(ab, srng);
+
+		/* Make sure descriptor is read after the head pointer. */
+		dma_rmb();
 	}
 }
 
@@ -844,7 +848,6 @@
 {
 	lockdep_assert_held(&srng->lock);
 
-	/* TODO: See if we need a write memory barrier here */
 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
 		/* For LMAC rings, ring pointer updates are done through FW and
 		 * hence written to a shared memory location that is read by FW
@@ -852,15 +855,20 @@
 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 			srng->u.src_ring.last_tp =
 				*(volatile u32 *)srng->u.src_ring.tp_addr;
-			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+			/* Make sure desc is updated before the head pointer. */
+			dma_wmb();
+			WRITE_ONCE(*srng->u.src_ring.hp_addr,
+				   srng->u.src_ring.hp);
 		} else {
 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
-			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+			WRITE_ONCE(*srng->u.dst_ring.tp_addr,
+				   srng->u.dst_ring.tp);
 		}
 	} else {
 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 			srng->u.src_ring.last_tp =
 				*(volatile u32 *)srng->u.src_ring.tp_addr;
+			/* MMIO access, no need for wmb here */
 			ath11k_hif_write32(ab,
 					   (unsigned long)srng->u.src_ring.hp_addr -
 					   (unsigned long)ab->mem,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/hal.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hal.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/hal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hal.h	2025-07-01 14:10:42.700045625 +0200
@@ -700,7 +700,7 @@
 #define HAL_REO_CMD_FLG_UNBLK_RESOURCE		BIT(7)
 #define HAL_REO_CMD_FLG_UNBLK_CACHE		BIT(8)
 
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO0_UPD_* fields */
 #define HAL_REO_CMD_UPD0_RX_QUEUE_NUM		BIT(8)
 #define HAL_REO_CMD_UPD0_VLD			BIT(9)
 #define HAL_REO_CMD_UPD0_ALDC			BIT(10)
@@ -725,7 +725,7 @@
 #define HAL_REO_CMD_UPD0_PN_VALID		BIT(29)
 #define HAL_REO_CMD_UPD0_PN			BIT(30)
 
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO1_* fields */
 #define HAL_REO_CMD_UPD1_VLD			BIT(16)
 #define HAL_REO_CMD_UPD1_ALDC			GENMASK(18, 17)
 #define HAL_REO_CMD_UPD1_DIS_DUP_DETECTION	BIT(19)
@@ -741,7 +741,7 @@
 #define HAL_REO_CMD_UPD1_PN_HANDLE_ENABLE	BIT(30)
 #define HAL_REO_CMD_UPD1_IGNORE_AMPDU_FLG	BIT(31)
 
-/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* feilds */
+/* Should be matching with HAL_REO_UPD_RX_QUEUE_INFO2_* fields */
 #define HAL_REO_CMD_UPD2_SVLD			BIT(10)
 #define HAL_REO_CMD_UPD2_SSN			GENMASK(22, 11)
 #define HAL_REO_CMD_UPD2_SEQ_2K_ERR		BIT(23)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/hw.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hw.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/hw.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hw.c	2025-09-29 14:23:07.597732370 +0200
@@ -1399,6 +1399,14 @@
 	},
 
 	/* CE11 Not used */
+	{
+		.pipenum = __cpu_to_le32(11),
+		.pipedir = __cpu_to_le32(0),
+		.nentries = __cpu_to_le32(0),
+		.nbytes_max = __cpu_to_le32(0),
+		.flags = __cpu_to_le32(CE_ATTR_FLAGS | CE_ATTR_DIS_INTR),
+		.reserved = __cpu_to_le32(0),
+	},
 };
 
 /* Map from service/endpoint to Copy Engine.
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/hw.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hw.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/hw.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/hw.h	2025-09-29 14:23:07.597732370 +0200
@@ -194,6 +194,7 @@
 	u32 num_vdevs;
 	u32 num_peers;
 	bool supports_suspend;
+	bool supports_ap_vlan;
 	u32 hal_desc_sz;
 	bool supports_regdb;
 	bool fix_l1ss;
@@ -227,6 +228,8 @@
 	bool smp2p_wow_exit;
 	bool support_fw_mac_sequence;
 	bool support_dual_stations;
+	bool ce_fwlog_enable;
+	bool pdev_suspend;
 };
 
 struct ath11k_hw_ops {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/mac.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mac.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/mac.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mac.c	2025-09-29 14:23:07.601732390 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <net/mac80211.h>
@@ -1529,17 +1529,29 @@
 	return ret;
 }
 
-static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif)
+static struct ath11k_vif *ath11k_mac_get_tx_arvif(struct ath11k_vif *arvif,
+						  struct ieee80211_bss_conf *link_conf)
+{
+	struct ieee80211_bss_conf *tx_bss_conf;
+
+	lockdep_assert_wiphy(arvif->ar->hw->wiphy);
+
+	tx_bss_conf = wiphy_dereference(arvif->ar->hw->wiphy, link_conf->tx_bss_conf);
+	if (tx_bss_conf)
+		return ath11k_vif_to_arvif(tx_bss_conf->vif);
+
+	return NULL;
+}
+
+static int ath11k_mac_setup_bcn_tmpl_ema(struct ath11k_vif *arvif,
+					 struct ath11k_vif *tx_arvif)
 {
-	struct ath11k_vif *tx_arvif;
 	struct ieee80211_ema_beacons *beacons;
 	int ret = 0;
 	bool nontx_vif_params_set = false;
 	u32 params = 0;
 	u8 i = 0;
 
-	tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
 	beacons = ieee80211_beacon_get_template_ema_list(tx_arvif->ar->hw,
 							 tx_arvif->vif, 0);
 	if (!beacons || !beacons->cnt) {
@@ -1585,26 +1597,23 @@
 	return ret;
 }
 
-static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif)
+static int ath11k_mac_setup_bcn_tmpl_mbssid(struct ath11k_vif *arvif,
+					    struct ath11k_vif *tx_arvif)
 {
 	struct ath11k *ar = arvif->ar;
 	struct ath11k_base *ab = ar->ab;
-	struct ath11k_vif *tx_arvif = arvif;
 	struct ieee80211_hw *hw = ar->hw;
 	struct ieee80211_vif *vif = arvif->vif;
 	struct ieee80211_mutable_offsets offs = {};
 	struct sk_buff *bcn;
 	int ret;
 
-	if (vif->mbssid_tx_vif) {
-		tx_arvif = ath11k_vif_to_arvif(vif->mbssid_tx_vif);
 		if (tx_arvif != arvif) {
 			ar = tx_arvif->ar;
 			ab = ar->ab;
 			hw = ar->hw;
 			vif = tx_arvif->vif;
 		}
-	}
 
 	bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
 	if (!bcn) {
@@ -1632,6 +1641,7 @@
 static int ath11k_mac_setup_bcn_tmpl(struct ath11k_vif *arvif)
 {
 	struct ieee80211_vif *vif = arvif->vif;
+	struct ath11k_vif *tx_arvif;
 
 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
 		return 0;
@@ -1639,14 +1649,18 @@
 	/* Target does not expect beacon templates for the already up
 	 * non-transmitting interfaces, and results in a crash if sent.
 	 */
-	if (vif->mbssid_tx_vif &&
-	    arvif != ath11k_vif_to_arvif(vif->mbssid_tx_vif) && arvif->is_up)
+	tx_arvif = ath11k_mac_get_tx_arvif(arvif, &vif->bss_conf);
+	if (tx_arvif) {
+		if (arvif != tx_arvif && arvif->is_up)
 		return 0;
 
-	if (vif->bss_conf.ema_ap && vif->mbssid_tx_vif)
-		return ath11k_mac_setup_bcn_tmpl_ema(arvif);
+		if (vif->bss_conf.ema_ap)
+			return ath11k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif);
+	} else {
+		tx_arvif = arvif;
+	}
 
-	return ath11k_mac_setup_bcn_tmpl_mbssid(arvif);
+	return ath11k_mac_setup_bcn_tmpl_mbssid(arvif, tx_arvif);
 }
 
 void ath11k_mac_bcn_tx_event(struct ath11k_vif *arvif)
@@ -1674,7 +1688,7 @@
 				     struct ieee80211_bss_conf *info)
 {
 	struct ath11k *ar = arvif->ar;
-	struct ath11k_vif *tx_arvif = NULL;
+	struct ath11k_vif *tx_arvif;
 	int ret = 0;
 
 	lockdep_assert_held(&arvif->ar->conf_mutex);
@@ -1697,15 +1711,11 @@
 		return;
 	}
 
-	arvif->tx_seq_no = 0x1000;
-
 	arvif->aid = 0;
 
 	ether_addr_copy(arvif->bssid, info->bssid);
 
-	if (arvif->vif->mbssid_tx_vif)
-		tx_arvif = ath11k_vif_to_arvif(arvif->vif->mbssid_tx_vif);
-
+	tx_arvif = ath11k_mac_get_tx_arvif(arvif, &arvif->vif->bss_conf);
 	ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
 				 arvif->bssid,
 				 tx_arvif ? tx_arvif->bssid : NULL,
@@ -1857,7 +1867,7 @@
 	}
 
 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
-	if (rsnie || wpaie) {
+	if (ar->supports_6ghz || rsnie || wpaie) {
 		ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
 			   "%s: rsn ie found\n", __func__);
 		arg->need_ptk_4_way = true;
@@ -2230,7 +2240,7 @@
 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
 
 	/* In IPQ8074 platform, VHT mcs rate 10 and 11 is enabled by default.
-	 * VHT mcs rate 10 and 11 is not suppoerted in 11ac standard.
+	 * VHT mcs rate 10 and 11 is not supported in 11ac standard.
 	 * so explicitly disable the VHT MCS rate 10 and 11 in 11ac mode.
 	 */
 	arg->tx_mcs_set &= ~IEEE80211_VHT_MCS_SUPPORT_0_11_MASK;
@@ -3332,7 +3342,8 @@
 {
 	u32 bitmap[2], param_id, param_val, pdev_id;
 	int ret;
-	s8 non_srg_th = 0, srg_th = 0;
+	s8 non_srg_th = ATH11K_OBSS_PD_THRESHOLD_DISABLED;
+	s8 srg_th = 0;
 
 	pdev_id = ar->pdev->pdev_id;
 
@@ -3361,8 +3372,6 @@
 		if (he_obss_pd->sr_ctrl & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
 			non_srg_th = (ATH11K_OBSS_PD_MAX_THRESHOLD +
 				      he_obss_pd->non_srg_max_offset);
-		else
-			non_srg_th = ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD;
 
 		param_val |= ATH11K_OBSS_PD_NON_SRG_EN;
 	}
@@ -3377,6 +3386,7 @@
 		param_val |= ATH11K_OBSS_PD_THRESHOLD_IN_DBM;
 		param_val |= FIELD_PREP(GENMASK(15, 8), srg_th);
 	} else {
+		if ((non_srg_th & 0xff) != ATH11K_OBSS_PD_THRESHOLD_DISABLED)
 		non_srg_th -= ATH11K_DEFAULT_NOISE_FLOOR;
 		/* SRG not supported and threshold in dB */
 		param_val &= ~(ATH11K_OBSS_PD_SRG_EN |
@@ -4222,7 +4232,9 @@
 		return 0;
 
 	if (cmd == DISABLE_KEY) {
-		arg.key_cipher = WMI_CIPHER_NONE;
+		/* TODO: Check if FW expects  value other than NONE for del */
+		/* arg.key_cipher = WMI_CIPHER_NONE; */
+		arg.key_len = 0;
 		arg.key_data = NULL;
 		goto install;
 	}
@@ -4355,7 +4367,25 @@
 	 */
 	if (peer && sta && cmd == SET_KEY)
 		ath11k_peer_frags_flush(ar, peer);
+
+	/* Reset peer authorized flag in FW before deleting keys
+	 * to avoid races in FW during encryption of queued packets.
+	 */
+	if (peer && sta && cmd == DISABLE_KEY && peer->is_authorized) {
+		peer->is_authorized = false;
+		spin_unlock_bh(&ab->base_lock);
+		ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+						arvif->vdev_id,
+						WMI_PEER_AUTHORIZE,
+						0);
+		if (ret) {
+			ath11k_warn(ar->ab, "Unable to reset authorize flag for "
+				    "peer (%pM) vdev %d: %d\n",
+				    sta->addr, arvif->vdev_id, ret);
+		}
+	} else {
 	spin_unlock_bh(&ab->base_lock);
+	}
 
 	if (!peer) {
 		if (cmd == SET_KEY) {
@@ -5206,6 +5236,45 @@
 	return ret;
 }
 
+static int ath11k_mac_op_conf_tx_mu_edca(struct ieee80211_hw *hw,
+					 struct ieee80211_vif *vif,
+					 unsigned int link_id, u16 ac,
+					 const struct ieee80211_tx_queue_params *params)
+{
+	struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
+	struct ath11k *ar = hw->priv;
+	struct wmi_wmm_params_arg *p;
+	int ret;
+
+	switch (ac) {
+	case IEEE80211_AC_VO:
+		p = &arvif->muedca_params.ac_vo;
+		break;
+	case IEEE80211_AC_VI:
+		p = &arvif->muedca_params.ac_vi;
+		break;
+	case IEEE80211_AC_BE:
+		p = &arvif->muedca_params.ac_be;
+		break;
+	case IEEE80211_AC_BK:
+		p = &arvif->muedca_params.ac_bk;
+		break;
+	default:
+		ath11k_warn(ar->ab, "error ac: %d", ac);
+		return -EINVAL;
+	}
+
+	p->cwmin = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(3, 0));
+	p->cwmax = u8_get_bits(params->mu_edca_param_rec.ecw_min_max, GENMASK(7, 4));
+	p->aifs = u8_get_bits(params->mu_edca_param_rec.aifsn, GENMASK(3, 0));
+	p->txop = params->mu_edca_param_rec.mu_edca_timer;
+
+	ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
+						 &arvif->muedca_params,
+						 WMI_WMM_PARAM_TYPE_11AX_MU_EDCA);
+	return ret;
+}
+
 static int ath11k_mac_op_conf_tx(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
 				 unsigned int link_id, u16 ac,
@@ -5244,12 +5313,22 @@
 	p->txop = params->txop;
 
 	ret = ath11k_wmi_send_wmm_update_cmd_tlv(ar, arvif->vdev_id,
-						 &arvif->wmm_params);
+						 &arvif->wmm_params,
+						 WMI_WMM_PARAM_TYPE_LEGACY);
 	if (ret) {
 		ath11k_warn(ar->ab, "failed to set wmm params: %d\n", ret);
 		goto exit;
 	}
 
+	if (params->mu_edca) {
+		ret = ath11k_mac_op_conf_tx_mu_edca(hw, vif, link_id, ac,
+						    params);
+		if (ret) {
+			ath11k_warn(ar->ab, "failed to set mu_edca params: %d\n", ret);
+			goto exit;
+		}
+	}
+
 	ret = ath11k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
 
 	if (ret)
@@ -5338,8 +5417,6 @@
 	if (vht_cap & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)) {
 		nsts = vht_cap & IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 		nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
-		if (nsts > (ar->num_rx_chains - 1))
-			nsts = ar->num_rx_chains - 1;
 		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
 	}
 
@@ -5423,9 +5500,6 @@
 
 	/* Enable Beamformee STS Field only if SU BF is enabled */
 	if (subfee) {
-		if (nsts > (ar->num_rx_chains - 1))
-			nsts = ar->num_rx_chains - 1;
-
 		nsts <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
 		nsts &=  IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
 		*vht_cap |= nsts;
@@ -6063,6 +6137,11 @@
 	bool is_prb_rsp;
 	int ret;
 
+	if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) {
+		ieee80211_free_txskb(ar->hw, skb);
+		return;
+	}
+
 	memset(skb_cb, 0, sizeof(*skb_cb));
 	skb_cb->vif = vif;
 
@@ -6290,6 +6369,7 @@
 {
 	struct ath11k *ar = hw->priv;
 	struct htt_ppdu_stats_info *ppdu_stats, *tmp;
+	struct scan_chan_list_params *params;
 	int ret;
 
 	ath11k_mac_drain_tx(ar);
@@ -6305,6 +6385,7 @@
 	mutex_unlock(&ar->conf_mutex);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
+	cancel_work_sync(&ar->channel_update_work);
 	cancel_work_sync(&ar->regd_update_work);
 	cancel_work_sync(&ar->ab->update_11d_work);
 
@@ -6314,10 +6395,19 @@
 	}
 
 	spin_lock_bh(&ar->data_lock);
+
 	list_for_each_entry_safe(ppdu_stats, tmp, &ar->ppdu_stats_info, list) {
 		list_del(&ppdu_stats->list);
 		kfree(ppdu_stats);
 	}
+
+	while ((params = list_first_entry_or_null(&ar->channel_update_queue,
+						  struct scan_chan_list_params,
+						  list))) {
+		list_del(&params->list);
+		kfree(params);
+	}
+
 	spin_unlock_bh(&ar->data_lock);
 
 	rcu_assign_pointer(ar->ab->pdevs_active[ar->pdev_idx], NULL);
@@ -6332,23 +6422,20 @@
 {
 	struct ath11k *ar = arvif->ar;
 	struct ath11k_vif *tx_arvif;
-	struct ieee80211_vif *tx_vif;
 
 	*tx_vdev_id = 0;
-	tx_vif = arvif->vif->mbssid_tx_vif;
-	if (!tx_vif) {
+	tx_arvif = ath11k_mac_get_tx_arvif(arvif, &arvif->vif->bss_conf);
+	if (!tx_arvif) {
 		*flags = WMI_HOST_VDEV_FLAGS_NON_MBSSID_AP;
 		return 0;
 	}
 
-	tx_arvif = ath11k_vif_to_arvif(tx_vif);
-
 	if (arvif->vif->bss_conf.nontransmitted) {
-		if (ar->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+		if (ar->hw->wiphy != tx_arvif->ar->hw->wiphy)
 			return -EINVAL;
 
 		*flags = WMI_HOST_VDEV_FLAGS_NON_TRANSMIT_AP;
-		*tx_vdev_id = ath11k_vif_to_arvif(tx_vif)->vdev_id;
+		*tx_vdev_id = tx_arvif->vdev_id;
 	} else if (tx_arvif == arvif) {
 		*flags = WMI_HOST_VDEV_FLAGS_TRANSMIT_AP;
 	} else {
@@ -6952,7 +7039,7 @@
 	/* Recalc txpower for remaining vdev */
 	ath11k_mac_txpower_recalc(ar);
 
-	/* TODO: recal traffic pause state based on the available vdevs */
+	/* TODO: recalc traffic pause state based on the available vdevs */
 
 	mutex_unlock(&ar->conf_mutex);
 }
@@ -7308,8 +7395,7 @@
 			   int n_vifs)
 {
 	struct ath11k_base *ab = ar->ab;
-	struct ath11k_vif *arvif, *tx_arvif = NULL;
-	struct ieee80211_vif *mbssid_tx_vif;
+	struct ath11k_vif *arvif, *tx_arvif;
 	int ret;
 	int i;
 	bool monitor_vif = false;
@@ -7363,10 +7449,7 @@
 			ath11k_warn(ab, "failed to update bcn tmpl during csa: %d\n",
 				    ret);
 
-		mbssid_tx_vif = arvif->vif->mbssid_tx_vif;
-		if (mbssid_tx_vif)
-			tx_arvif = ath11k_vif_to_arvif(mbssid_tx_vif);
-
+		tx_arvif = ath11k_mac_get_tx_arvif(arvif, &arvif->vif->bss_conf);
 		ret = ath11k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
 					 arvif->bssid,
 					 tx_arvif ? tx_arvif->bssid : NULL,
@@ -8124,7 +8207,8 @@
 /* mac80211 stores device specific RTS/Fragmentation threshold value,
  * this is set interface specific to firmware from ath11k driver
  */
-static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath11k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u8 radio_id,
+					   u32 value)
 {
 	struct ath11k *ar = hw->priv;
 	int param_id = WMI_VDEV_PARAM_RTS_THRESHOLD;
@@ -8190,6 +8274,22 @@
 	ath11k_mac_flush_tx_complete(ar);
 }
 
+static void ath11k_mac_op_flush_sta(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_sta *sta)
+{
+	struct ath11k *ar = hw->priv;
+	struct ath11k_sta *arsta = (struct ath11k_sta *) sta->drv_priv;
+	long time_left;
+
+	time_left = wait_event_timeout(arsta->tx_empty_waitq,
+				       (atomic_read(&arsta->num_tx_pending) == 0),
+				       ATH11K_FLUSH_TIMEOUT);
+	if (time_left == 0)
+		ath11k_warn(ar->ab, "failed to flush sta transmit queue, data pkts pending %d\n",
+			    atomic_read(&arsta->num_tx_pending));
+}
+
 static bool
 ath11k_mac_has_single_legacy_rate(struct ath11k *ar,
 				  enum nl80211_band band,
@@ -9356,6 +9456,7 @@
 
 static int ath11k_mac_op_get_txpower(struct ieee80211_hw *hw,
 				     struct ieee80211_vif *vif,
+				     unsigned int link_id,
 				     int *dbm)
 {
 	struct ath11k *ar = hw->priv;
@@ -9583,6 +9684,7 @@
 		arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
 		INIT_WORK(&arsta->update_wk, ath11k_sta_rc_update_wk);
 		INIT_WORK(&arsta->set_4addr_wk, ath11k_sta_set_4addr_wk);
+		init_waitqueue_head(&arsta->tx_empty_waitq);
 
 		ret = ath11k_mac_station_add(ar, vif, sta);
 		if (ret)
@@ -9671,10 +9773,21 @@
 		spin_lock_bh(&ar->ab->base_lock);
 
 		peer = ath11k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
+		if (peer) {
 			peer->is_authorized = false;
-
 		spin_unlock_bh(&ar->ab->base_lock);
+			ret = ath11k_wmi_set_peer_param(ar, sta->addr,
+							arvif->vdev_id,
+							WMI_PEER_AUTHORIZE,
+							0);
+			if (ret) {
+				ath11k_warn(ar->ab, "Unable to reset authorize flag for "
+					    "peer (%pM) vdev %d: %d\n",
+					    sta->addr, arvif->vdev_id, ret);
+			}
+		} else {
+			spin_unlock_bh(&ar->ab->base_lock);
+		}
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTH &&
 		   (vif->type == NL80211_IFTYPE_AP ||
@@ -9725,6 +9838,7 @@
 	.set_bitrate_mask		= ath11k_mac_op_set_bitrate_mask,
 	.get_survey			= ath11k_mac_op_get_survey,
 	.flush				= ath11k_mac_op_flush,
+	.flush_sta			= ath11k_mac_op_flush_sta,
 	.sta_statistics			= ath11k_mac_op_sta_statistics,
 	CFG80211_TESTMODE_CMD(ath11k_tm_cmd)
 
@@ -9765,6 +9879,28 @@
 	}
 }
 
+#define ATH11k_5_DOT_9_MIN_FREQ	5845
+#define ATH11k_5_DOT_9_MAX_FREQ	5885
+
+static void ath11k_mac_update_5_dot_9_ch_list(struct ath11k *ar,
+				      struct ieee80211_supported_band *band)
+{
+	int i;
+
+	if (test_bit(WMI_TLV_SERVICE_5_DOT_9GHZ_SUPPORT,
+				ar->ab->wmi_ab.svc_map))
+		return;
+
+	if (ar->ab->dfs_region != ATH11K_DFS_REG_FCC)
+		return;
+
+	for (i = 0; i < band->n_channels; i++) {
+		if (band->channels[i].center_freq >= ATH11k_5_DOT_9_MIN_FREQ &&
+		    band->channels[i].center_freq <= ATH11k_5_DOT_9_MAX_FREQ)
+			band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
+	}
+}
+
 static u32 ath11k_get_phy_id(struct ath11k *ar, u32 band)
 {
 	struct ath11k_pdev *pdev = ar->pdev;
@@ -9875,6 +10011,7 @@
 			ath11k_mac_update_ch_list(ar, band,
 						  temp_reg_cap->low_5ghz_chan,
 						  temp_reg_cap->high_5ghz_chan);
+			ath11k_mac_update_5_dot_9_ch_list(ar, band);
 		}
 	}
 
@@ -10020,6 +10157,7 @@
 
 static void __ath11k_mac_unregister(struct ath11k *ar)
 {
+	cancel_work_sync(&ar->channel_update_work);
 	cancel_work_sync(&ar->regd_update_work);
 
 	ieee80211_unregister_hw(ar->hw);
@@ -10252,6 +10390,12 @@
 	    ab->hw_params.bios_sar_capa)
 		ar->hw->wiphy->sar_capa = ab->hw_params.bios_sar_capa;
 
+	snprintf(ar->hw->wiphy->fw_version,
+		 sizeof(ar->hw->wiphy->fw_version),
+		 "%s, 0x%x",
+		 ab->hw_params.name,
+		 ab->qmi.target.fw_version);
+
 	ret = ieee80211_register_hw(ar->hw);
 	if (ret) {
 		ath11k_err(ar->ab, "ieee80211 registration failed: %d\n", ret);
@@ -10266,6 +10410,11 @@
 		 */
 		ar->hw->wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MONITOR);
 
+	if (ab->hw_params.supports_ap_vlan) {
+		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
+		ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
+	}
+
 	/* Apply the regd received during initialization */
 	ret = ath11k_regd_update(ar);
 	if (ret) {
@@ -10369,6 +10518,7 @@
 	struct ieee80211_hw *hw;
 	struct ath11k *ar;
 	struct ath11k_pdev *pdev;
+	struct ieee80211_ops *ops;
 	int ret;
 	int i;
 
@@ -10376,17 +10526,25 @@
 		return 0;
 
 	for (i = 0; i < ab->num_radios; i++) {
+		ops = kmemdup(&ath11k_ops, sizeof(ath11k_ops), GFP_KERNEL);
+		if (!ops) {
+			ret = -ENOMEM;
+			goto err_free_mac;
+		}
+
 		pdev = &ab->pdevs[i];
-		hw = ieee80211_alloc_hw(sizeof(struct ath11k), &ath11k_ops);
+		hw = ieee80211_alloc_hw(sizeof(struct ath11k), ops);
 		if (!hw) {
 			ath11k_warn(ab, "failed to allocate mac80211 hw device\n");
 			ret = -ENOMEM;
+			kfree(ops);
 			goto err_free_mac;
 		}
 
 		ar = hw->priv;
 		ar->hw = hw;
 		ar->ab = ab;
+		ar->ops = ops;
 		ar->pdev = pdev;
 		ar->pdev_idx = i;
 		ar->lmac_id = ath11k_hw_get_mac_from_pdev_id(&ab->hw_params, i);
@@ -10419,6 +10577,8 @@
 		init_completion(&ar->thermal.wmi_sync);
 
 		INIT_DELAYED_WORK(&ar->scan.timeout, ath11k_scan_timeout_work);
+		INIT_WORK(&ar->channel_update_work, ath11k_regd_update_chan_list_work);
+		INIT_LIST_HEAD(&ar->channel_update_queue);
 		INIT_WORK(&ar->regd_update_work, ath11k_regd_update_work);
 
 		INIT_WORK(&ar->wmi_mgmt_tx_work, ath11k_mgmt_over_wmi_tx_work);
@@ -10446,6 +10606,7 @@
 {
 	struct ath11k *ar;
 	struct ath11k_pdev *pdev;
+	struct ieee80211_ops *ops;
 	int i;
 
 	for (i = 0; i < ab->num_radios; i++) {
@@ -10455,7 +10616,9 @@
 			continue;
 
 		ath11k_fw_stats_free(&ar->fw_stats);
+		ops = ar->ops;
 		ieee80211_free_hw(ar->hw);
+		kfree(ops);
 		pdev->ar = NULL;
 	}
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/mac.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mac.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/mac.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mac.h	2025-09-25 17:40:34.127360086 +0200
@@ -122,7 +122,7 @@
 #define ATH11K_PEER_RX_NSS_80_80MHZ		GENMASK(5, 3)
 
 #define ATH11K_OBSS_PD_MAX_THRESHOLD			-82
-#define ATH11K_OBSS_PD_NON_SRG_MAX_THRESHOLD		-62
+#define ATH11K_OBSS_PD_THRESHOLD_DISABLED		128
 #define ATH11K_OBSS_PD_THRESHOLD_IN_DBM			BIT(29)
 #define ATH11K_OBSS_PD_SRG_EN				BIT(30)
 #define ATH11K_OBSS_PD_NON_SRG_EN			BIT(31)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/mhi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mhi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/mhi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/mhi.c	2025-09-25 17:40:34.127360086 +0200
@@ -328,7 +328,7 @@
 	if (ret)
 		return ret;
 
-	mhi_ctrl->iova_start = res.start + 0x1000000;
+	mhi_ctrl->iova_start = res.start;
 	mhi_ctrl->iova_stop = res.end;
 
 	return 0;
@@ -398,6 +398,7 @@
 	case ATH11K_HW_WCN6855_HW20:
 	case ATH11K_HW_WCN6855_HW21:
 	case ATH11K_HW_QCA2066_HW21:
+	case ATH11K_HW_QCA6698AQ_HW21:
 		ath11k_mhi_config = &ath11k_mhi_config_qca6390;
 		break;
 	default:
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/pci.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pci.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/pci.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pci.c	2025-09-25 17:40:34.131360105 +0200
@@ -30,6 +30,7 @@
 #define WCN6855_DEVICE_ID		0x1103
 
 #define TCSR_SOC_HW_SUB_VER	0x1910010
+#define ATH11K_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0  0x1E03164
 
 static const struct pci_device_id ath11k_pci_id_table[] = {
 	{ PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
@@ -177,8 +178,16 @@
 
 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
 {
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
 	u32 val, delay;
 
+	/*
+	 * reset will reinitialise SoC registers to their default
+	 * values, we must not assume that the actual window cached
+	 * there is going to be valid after global reset.
+	 */
+	ab_pci->register_window = 0x00;
+
 	val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
 
 	val |= PCIE_SOC_GLOBAL_RESET_V;
@@ -371,9 +380,14 @@
 	ath11k_mhi_set_mhictrl_reset(ab);
 }
 
+#define ATH11K_QRTR_INSTANCE_PCI_DOMAIN		GENMASK(3, 0)
+#define ATH11K_QRTR_INSTANCE_PCI_BUS_NUM	GENMASK(7, 4)
+
 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
 {
 	struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	struct pci_bus *bus = ab_pci->pdev->bus;
 
 	cfg->tgt_ce = ab->hw_params.target_ce_config;
 	cfg->tgt_ce_len = ab->hw_params.target_ce_count;
@@ -384,6 +398,13 @@
 
 	ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
 				    &cfg->shadow_reg_v2_len);
+
+	ab_pci->instance_id =
+		FIELD_PREP(ATH11K_QRTR_INSTANCE_PCI_DOMAIN,
+			   pci_domain_nr(bus)) |
+		FIELD_PREP(ATH11K_QRTR_INSTANCE_PCI_BUS_NUM,
+			   bus->number);
+	ab->qmi.service_ins_id += ab_pci->instance_id;
 }
 
 static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
@@ -610,6 +631,18 @@
 						   PCI_EXP_LNKCTL_ASPMC);
 }
 
+static void ath11k_pci_update_qrtr_node_id(struct ath11k_base *ab)
+{
+	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
+	u32 reg;
+
+	reg = ATH11K_PCIE_LOCAL_REG_PCIE_LOCAL_RSV0 & ATH11K_PCI_WINDOW_RANGE_MASK;
+	ath11k_pcic_write32(ab, reg, ab_pci->instance_id);
+
+	ath11k_dbg(ab, ATH11K_DBG_PCI, "pci reg 0x%x instance_id 0x%x read val 0x%x\n",
+		   reg, ab_pci->instance_id, ath11k_pcic_read32(ab, reg));
+}
+
 static int ath11k_pci_power_up(struct ath11k_base *ab)
 {
 	struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -626,6 +659,8 @@
 
 	ath11k_pci_msi_enable(ab_pci);
 
+	ath11k_pci_update_qrtr_node_id(ab);
+
 	ret = ath11k_mhi_start(ab_pci);
 	if (ret) {
 		ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -846,6 +881,9 @@
 				case 0x1019D0E1:
 					ab->hw_rev = ATH11K_HW_QCA2066_HW21;
 					break;
+				case 0x001e60e1:
+					ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
+					break;
 				default:
 					ab->hw_rev = ATH11K_HW_WCN6855_HW21;
 				}
@@ -910,7 +948,7 @@
 
 	ath11k_pci_init_qmi_ce_config(ab);
 
-	ret = ath11k_pcic_config_irq(ab);
+	ret = ath11k_pcic_config_irq(ab, pci_name(pdev));
 	if (ret) {
 		ath11k_err(ab, "failed to config irq: %d\n", ret);
 		goto err_ce_free;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/pci.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pci.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/pci.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pci.h	2025-09-25 17:40:34.131360105 +0200
@@ -74,6 +74,7 @@
 	unsigned long flags;
 	u16 link_ctl;
 	u64 dma_mask;
+	u32 instance_id;
 };
 
 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/pcic.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pcic.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/pcic.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pcic.c	2025-09-25 17:40:34.131360105 +0200
@@ -1,17 +1,14 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "core.h"
 #include "pcic.h"
 #include "debug.h"
 
-static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
-	"bhi",
-	"mhi-er0",
-	"mhi-er1",
+static const char *ce_irq_name[] = {
 	"ce0",
 	"ce1",
 	"ce2",
@@ -24,42 +21,20 @@
 	"ce9",
 	"ce10",
 	"ce11",
-	"host2wbm-desc-feed",
-	"host2reo-re-injection",
-	"host2reo-command",
-	"host2rxdma-monitor-ring3",
-	"host2rxdma-monitor-ring2",
-	"host2rxdma-monitor-ring1",
-	"reo2ost-exception",
-	"wbm2host-rx-release",
-	"reo2host-status",
-	"reo2host-destination-ring4",
-	"reo2host-destination-ring3",
-	"reo2host-destination-ring2",
-	"reo2host-destination-ring1",
-	"rxdma2host-monitor-destination-mac3",
-	"rxdma2host-monitor-destination-mac2",
-	"rxdma2host-monitor-destination-mac1",
-	"ppdu-end-interrupts-mac3",
-	"ppdu-end-interrupts-mac2",
-	"ppdu-end-interrupts-mac1",
-	"rxdma2host-monitor-status-ring-mac3",
-	"rxdma2host-monitor-status-ring-mac2",
-	"rxdma2host-monitor-status-ring-mac1",
-	"host2rxdma-host-buf-ring-mac3",
-	"host2rxdma-host-buf-ring-mac2",
-	"host2rxdma-host-buf-ring-mac1",
-	"rxdma2host-destination-ring-mac3",
-	"rxdma2host-destination-ring-mac2",
-	"rxdma2host-destination-ring-mac1",
-	"host2tcl-input-ring4",
-	"host2tcl-input-ring3",
-	"host2tcl-input-ring2",
-	"host2tcl-input-ring1",
-	"wbm2host-tx-completions-ring3",
-	"wbm2host-tx-completions-ring2",
-	"wbm2host-tx-completions-ring1",
-	"tcl2host-status-ring",
+};
+
+static const char *dp_irq_name[ATH11K_EXT_IRQ_NUM_MAX] = {
+	"wbm2host_tx_completions_ring1",
+	"wbm2host_tx_completions_ring2",
+	"wbm2host_tx_completions_ring3",
+	"lmac_reo_misc_irq",
+	"reo2host_destination_ring1",
+	"reo2host_destination_ring2",
+	"reo2host_destination_ring3",
+	"reo2host_destination_ring4",
+	"dp_res1",
+	"dp_res2",
+	"dp_res3",
 };
 
 static const struct ath11k_msi_config ath11k_msi_config[] = {
@@ -126,6 +101,17 @@
 		},
 		.hw_rev = ATH11K_HW_QCA2066_HW21,
 	},
+	{
+		.total_vectors = 32,
+		.total_users = 4,
+		.users = (struct ath11k_msi_user[]) {
+			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
+			{ .name = "CE", .num_vectors = 10, .base_vector = 3 },
+			{ .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
+			{ .name = "DP", .num_vectors = 18, .base_vector = 14 },
+		},
+		.hw_rev = ATH11K_HW_QCA6698AQ_HW21,
+	},
 };
 
 int ath11k_pcic_init_msi_config(struct ath11k_base *ab)
@@ -317,6 +303,8 @@
 
 		netif_napi_del(&irq_grp->napi);
 		free_netdev(irq_grp->napi_ndev);
+		kfree(irq_grp->name);
+		irq_grp->name = NULL;
 	}
 }
 
@@ -329,6 +317,8 @@
 			continue;
 		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
+		kfree(ab->irq_name[i]);
+		ab->irq_name[i] = NULL;
 	}
 
 	ath11k_pcic_free_ext_irq(ab);
@@ -557,7 +547,8 @@
 	return ab->pci.ops->get_msi_irq(ab, vector);
 }
 
-static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab)
+static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab,
+				      const char *dev_name)
 {
 	int i, j, n, ret, num_vectors = 0;
 	u32 user_base_data = 0, base_vector = 0;
@@ -600,6 +591,13 @@
 			num_irq = 1;
 		}
 
+		irq_grp->name = kasprintf(GFP_KERNEL,
+					  "%s-%s",
+					  dev_name,
+					  dp_irq_name[i]);
+		if (!irq_grp->name)
+			return -ENOMEM;
+
 		irq_grp->num_irq = num_irq;
 		irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i;
 
@@ -620,7 +618,7 @@
 
 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
 			ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler,
-					  irq_flags, "DP_EXT_IRQ", irq_grp);
+					  irq_flags, irq_grp->name, irq_grp);
 			if (ret) {
 				ath11k_err(ab, "failed request irq %d: %d\n",
 					   vector, ret);
@@ -646,7 +644,8 @@
 	return ret;
 }
 
-int ath11k_pcic_config_irq(struct ath11k_base *ab)
+int ath11k_pcic_config_irq(struct ath11k_base *ab,
+			   const char *dev_name)
 {
 	struct ath11k_ce_pipe *ce_pipe;
 	u32 msi_data_start;
@@ -675,6 +674,13 @@
 		if (irq < 0)
 			return irq;
 
+		ab->irq_name[i] = kasprintf(GFP_KERNEL,
+					    "%s-%s",
+					    dev_name,
+					    ce_irq_name[i]);
+		if (!ab->irq_name[i])
+			return -ENOMEM;
+
 		ce_pipe = &ab->ce.ce_pipe[i];
 
 		irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
@@ -682,7 +688,7 @@
 		tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet);
 
 		ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler,
-				  irq_flags, irq_name[irq_idx], ce_pipe);
+				  irq_flags, ab->irq_name[i], ce_pipe);
 		if (ret) {
 			ath11k_err(ab, "failed to request irq %d: %d\n",
 				   irq_idx, ret);
@@ -695,7 +701,7 @@
 		ath11k_pcic_ce_irq_disable(ab, i);
 	}
 
-	ret = ath11k_pcic_ext_irq_config(ab);
+	ret = ath11k_pcic_ext_irq_config(ab, dev_name);
 	if (ret)
 		return ret;
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/pcic.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pcic.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/pcic.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/pcic.h	2025-09-25 17:40:34.131360105 +0200
@@ -35,7 +35,7 @@
 				 u32 *msi_addr_hi);
 void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx);
 void ath11k_pcic_free_irq(struct ath11k_base *ab);
-int ath11k_pcic_config_irq(struct ath11k_base *ab);
+int ath11k_pcic_config_irq(struct ath11k_base *ab, const char *dev_name);
 void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab);
 void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab);
 void ath11k_pcic_stop(struct ath11k_base *ab);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/qmi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/qmi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/qmi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/qmi.c	2025-09-25 17:40:34.131360105 +0200
@@ -29,6 +29,10 @@
 MODULE_PARM_DESC(cold_boot_cal,
 		 "Decrease the channel switch time but increase the driver load time (Default: true)");
 
+bool ath11k_skip_caldata = 0;
+module_param_named(skip_caldata, ath11k_skip_caldata, bool, 0644);
+MODULE_PARM_DESC(ath11k_skip_caldata, "Skip caldata download");
+
 static const struct qmi_elem_info qmi_wlanfw_host_cap_req_msg_v01_ei[] = {
 	{
 		.data_type	= QMI_OPT_FLAG,
@@ -1704,7 +1708,204 @@
 	},
 };
 
-static int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct qmi_wlanfw_mem_read_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_read_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_valid),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_read_resp_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x01,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   offset),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_4_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u32),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   mem_type),
+	},
+	{
+		.data_type      = QMI_DATA_LEN,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u16),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data_len),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = QMI_WLANFW_MAX_DATA_SIZE_V01,
+		.elem_size      = sizeof(u8),
+		.array_type     = VAR_LEN_ARRAY,
+		.tlv_type       = 0x03,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_req_msg_v01,
+					   data),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info qmi_wlanfw_mem_write_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct
+					   qmi_wlanfw_mem_write_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_host_cap_send(struct ath11k_base *ab)
 {
 	struct qmi_wlanfw_host_cap_req_msg_v01 req;
 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp;
@@ -2190,9 +2391,11 @@
 
 static int ath11k_qmi_request_target_cap(struct ath11k_base *ab)
 {
+	struct device *dev = ab->dev;
 	struct qmi_wlanfw_cap_req_msg_v01 req;
 	struct qmi_wlanfw_cap_resp_msg_v01 resp;
 	struct qmi_txn txn;
+	unsigned int board_id;
 	int ret = 0;
 	int r;
 	char *fw_build_id;
@@ -2237,10 +2440,13 @@
 		ab->qmi.target.chip_family = resp.chip_info.chip_family;
 	}
 
-	if (resp.board_info_valid)
+	if (!of_property_read_u32(dev->of_node, "qcom,board_id", &board_id) && board_id != 0xFF) {
+		ab->qmi.target.board_id = board_id;
+	} else if (resp.board_info_valid) {
 		ab->qmi.target.board_id = resp.board_info.board_id;
-	else
+	} else {
 		ab->qmi.target.board_id = 0xFF;
+	}
 
 	if (resp.soc_info_valid)
 		ab->qmi.target.soc_id = resp.soc_info.soc_id;
@@ -2449,6 +2655,11 @@
 	if (bdf_type == ATH11K_QMI_BDF_TYPE_ELF || bdf_type == ATH11K_QMI_BDF_TYPE_REGDB)
 		goto out;
 
+	if (ath11k_skip_caldata) {
+		ath11k_warn(ab, "Skipping caldata download\n");
+		goto out;
+	}
+
 	if (ab->qmi.target.eeprom_caldata) {
 		file_type = ATH11K_QMI_FILE_TYPE_EEPROM;
 		tmp = filename;
@@ -2475,9 +2686,8 @@
 			}
 			ret = PTR_ERR(fw_entry);
 			ath11k_warn(ab,
-				    "qmi failed to load CAL data file:%s\n",
-				    filename);
-			goto out;
+				    "qmi failed to load CAL data file:%s booting with minimal performance\n",ATH11K_DEFAULT_CAL_FILE);
+			return 0;
 		}
 success:
 		fw_size = min_t(u32, ab->hw_params.fw.board_size, fw_entry->size);
@@ -2570,7 +2780,9 @@
 	m3_mem->size = 0;
 }
 
-static int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath11k_qmi_wlanfw_m3_info_send(struct ath11k_base *ab)
 {
 	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
 	struct qmi_wlanfw_m3_info_req_msg_v01 req;
@@ -3113,6 +3325,130 @@
 	ath11k_dbg(ab, ATH11K_DBG_QMI, "firmware init done\n");
 }
 
+int ath11k_qmi_mem_read(struct ath11k_base *ab, u32 mem_addr, void *mem_value,size_t count)
+{
+	struct qmi_wlanfw_mem_read_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_read_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+
+	/* Firmware uses mem type to map to various memory regions.
+	 * If this is set to 0, firmware uses automatic mapping of regions.
+	 * i.e, if mem address is given and mem_type is 0, firmware will
+	 * find under which memory region that address belongs
+	 */
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_read_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret =
+	qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			 QMI_WLANFW_MEM_READ_REQ_V01,
+			 QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN,
+			 qmi_wlanfw_mem_read_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath11k_warn(ab, "Failed to send mem read request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "qmi mem read req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!resp->data_valid || resp->data_len != req->data_len) {
+		ath11k_warn(ab, "qmi mem read is invalid\n");
+		ret = -EINVAL;
+		goto out;
+	}
+	memcpy(mem_value, resp->data, resp->data_len);
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
+int ath11k_qmi_mem_write(struct ath11k_base *ab, u32 mem_addr, void* mem_value, size_t count)
+{
+	struct qmi_wlanfw_mem_write_req_msg_v01 *req;
+	struct qmi_wlanfw_mem_write_resp_msg_v01 *resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	resp = kzalloc(sizeof(*resp), GFP_KERNEL);
+	if (!resp) {
+		kfree(req);
+		return -ENOMEM;
+	}
+
+	req->offset = mem_addr;
+	req->mem_type = QMI_MEM_REGION_TYPE;
+	req->data_len = count;
+	memcpy(req->data, mem_value, req->data_len);
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   qmi_wlanfw_mem_write_resp_msg_v01_ei, resp);
+	if (ret < 0)
+		goto out;
+
+	ret =
+	qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			 QMI_WLANFW_MEM_WRITE_REQ_V01,
+			 QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN,
+			 qmi_wlanfw_mem_write_req_msg_v01_ei, req);
+	if (ret < 0) {
+		qmi_txn_cancel(&txn);
+		ath11k_warn(ab, "Failed to send mem write request, err %d\n",
+			    ret);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0)
+		goto out;
+
+	if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "qmi mem write req failed, result: %d, err: %d\n",
+			    resp->resp.result, resp->resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+
+out:
+	kfree(req);
+	kfree(resp);
+	return ret;
+}
+
 static const struct qmi_msg_handler ath11k_qmi_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -3302,6 +3638,55 @@
 	spin_unlock(&qmi->event_lock);
 }
 
+int ath11k_enable_fwlog(struct ath11k_base *ab)
+{
+	struct wlfw_ini_req_msg_v01 *req;
+	struct wlfw_ini_resp_msg_v01 resp;
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	memset(&resp, 0, sizeof(resp));
+
+	req->enablefwlog_valid = 1;
+	req->enablefwlog = 1;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn,
+			   wlfw_ini_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLFW_INI_REQ_V01,
+			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_ini_req_msg_v01_ei, req);
+
+	if (ret < 0) {
+		ath11k_warn(ab, "Failed to send init request for enabling fwlog = %d\n", ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH11K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath11k_warn(ab, "fwlog enable wait for resp failed: %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath11k_warn(ab, "fwlog enable request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+		goto out;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
 int ath11k_qmi_init_service(struct ath11k_base *ab)
 {
 	int ret;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/qmi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/qmi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/qmi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/qmi.h	2025-09-25 17:40:34.131360105 +0200
@@ -154,6 +154,7 @@
 #define BDF_MEM_REGION_TYPE				0x2
 #define M3_DUMP_REGION_TYPE				0x3
 #define CALDB_MEM_REGION_TYPE				0x4
+#define QMI_MEM_REGION_TYPE				0
 
 struct qmi_wlanfw_host_cap_req_msg_v01 {
 	u8 num_clients_valid;
@@ -228,6 +229,18 @@
 	u64 fw_status;
 };
 
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+
 #define QMI_WLANFW_REQUEST_MEM_IND_MSG_V01_MAX_LEN	1824
 #define QMI_WLANFW_RESPOND_MEM_REQ_MSG_V01_MAX_LEN	888
 #define QMI_WLANFW_RESPOND_MEM_RESP_MSG_V01_MAX_LEN	7
@@ -235,6 +248,11 @@
 #define QMI_WLANFW_RESPOND_MEM_REQ_V01			0x0036
 #define QMI_WLANFW_RESPOND_MEM_RESP_V01			0x0036
 #define QMI_WLANFW_MAX_NUM_MEM_CFG_V01			2
+#define QMI_WLANFW_MAX_STR_LEN_V01                      16
+#define QMI_WLANFW_MEM_WRITE_REQ_V01			0x0031
+#define QMI_WLANFW_MEM_WRITE_REQ_MSG_V01_MAX_MSG_LEN	6163
+#define QMI_WLANFW_MEM_READ_REQ_V01			0x0030
+#define QMI_WLANFW_MEM_READ_REQ_MSG_V01_MAX_MSG_LEN	21
 
 struct qmi_wlanfw_mem_cfg_s_v01 {
 	u64 offset;
@@ -511,6 +529,30 @@
 	struct qmi_response_type_v01 resp;
 };
 
+struct qmi_wlanfw_mem_read_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+};
+
+struct qmi_wlanfw_mem_read_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+	u8 data_valid;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_req_msg_v01 {
+	u32 offset;
+	u32 mem_type;
+	u32 data_len;
+	u8 data[QMI_WLANFW_MAX_DATA_SIZE_V01];
+};
+
+struct qmi_wlanfw_mem_write_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
 int ath11k_qmi_firmware_start(struct ath11k_base *ab,
 			      u32 mode);
 void ath11k_qmi_firmware_stop(struct ath11k_base *ab);
@@ -518,5 +560,8 @@
 int ath11k_qmi_init_service(struct ath11k_base *ab);
 void ath11k_qmi_free_resource(struct ath11k_base *ab);
 int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab);
+int ath11k_enable_fwlog(struct ath11k_base *ab);
+int ath11k_qmi_mem_read(struct ath11k_base *ab, u32 mem_addr, void *mem_value, size_t count);
+int ath11k_qmi_mem_write(struct ath11k_base *ab, u32 mem_addr, void* mem_value, size_t count);
 
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/reg.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/reg.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/reg.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/reg.c	2025-09-25 17:40:34.131360105 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/rtnetlink.h>
 
@@ -55,6 +55,19 @@
 	ath11k_dbg(ar->ab, ATH11K_DBG_REG,
 		   "Regulatory Notification received for %s\n", wiphy_name(wiphy));
 
+	if (request->initiator == NL80211_REGDOM_SET_BY_DRIVER) {
+		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+			   "driver initiated regd update\n");
+		if (ar->state != ATH11K_STATE_ON)
+			return;
+
+		ret = ath11k_reg_update_chan_list(ar, true);
+		if (ret)
+			ath11k_warn(ar->ab, "failed to update channel list: %d\n", ret);
+
+		return;
+	}
+
 	/* Currently supporting only General User Hints. Cell base user
 	 * hints to be handled later.
 	 * Hints from other sources like Core, Beacons are not expected for
@@ -111,32 +124,7 @@
 	struct channel_param *ch;
 	enum nl80211_band band;
 	int num_channels = 0;
-	int i, ret, left;
-
-	if (wait && ar->state_11d != ATH11K_11D_IDLE) {
-		left = wait_for_completion_timeout(&ar->completed_11d_scan,
-						   ATH11K_SCAN_TIMEOUT_HZ);
-		if (!left) {
-			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
-				   "failed to receive 11d scan complete: timed out\n");
-			ar->state_11d = ATH11K_11D_IDLE;
-		}
-		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
-			   "11d scan wait left time %d\n", left);
-	}
-
-	if (wait &&
-	    (ar->scan.state == ATH11K_SCAN_STARTING ||
-	    ar->scan.state == ATH11K_SCAN_RUNNING)) {
-		left = wait_for_completion_timeout(&ar->scan.completed,
-						   ATH11K_SCAN_TIMEOUT_HZ);
-		if (!left)
-			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
-				   "failed to receive hw scan complete: timed out\n");
-
-		ath11k_dbg(ar->ab, ATH11K_DBG_REG,
-			   "hw scan wait left time %d\n", left);
-	}
+	int i, ret = 0;
 
 	if (ar->state == ATH11K_STATE_RESTARTING)
 		return 0;
@@ -218,6 +206,16 @@
 		}
 	}
 
+	if (wait) {
+		spin_lock_bh(&ar->data_lock);
+		list_add_tail(&params->list, &ar->channel_update_queue);
+		spin_unlock_bh(&ar->data_lock);
+
+		queue_work(ar->ab->workqueue, &ar->channel_update_work);
+
+		return 0;
+	}
+
 	ret = ath11k_wmi_send_scan_chan_list_cmd(ar, params);
 	kfree(params);
 
@@ -293,12 +291,6 @@
 	if (ret)
 		goto err;
 
-	if (ar->state == ATH11K_STATE_ON) {
-		ret = ath11k_reg_update_chan_list(ar, true);
-		if (ret)
-			goto err;
-	}
-
 	return 0;
 err:
 	ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
@@ -784,7 +776,7 @@
 
 	tmp_regd->n_reg_rules = i;
 
-	if (intersect) {
+	if (0) {
 		default_regd = ab->default_regd[reg_info->phy_id];
 
 		/* Get a new regd by intersecting the received regd with
@@ -804,6 +796,54 @@
 	return new_regd;
 }
 
+void ath11k_regd_update_chan_list_work(struct work_struct *work)
+{
+	struct ath11k *ar = container_of(work, struct ath11k,
+					 channel_update_work);
+	struct scan_chan_list_params *params;
+	struct list_head local_update_list;
+	int left;
+
+	INIT_LIST_HEAD(&local_update_list);
+
+	spin_lock_bh(&ar->data_lock);
+	list_splice_tail_init(&ar->channel_update_queue, &local_update_list);
+	spin_unlock_bh(&ar->data_lock);
+
+	while ((params = list_first_entry_or_null(&local_update_list,
+						  struct scan_chan_list_params,
+						  list))) {
+		if (ar->state_11d != ATH11K_11D_IDLE) {
+			left = wait_for_completion_timeout(&ar->completed_11d_scan,
+							   ATH11K_SCAN_TIMEOUT_HZ);
+			if (!left) {
+				ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+					   "failed to receive 11d scan complete: timed out\n");
+				ar->state_11d = ATH11K_11D_IDLE;
+			}
+
+			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+				   "reg 11d scan wait left time %d\n", left);
+		}
+
+		if ((ar->scan.state == ATH11K_SCAN_STARTING ||
+		     ar->scan.state == ATH11K_SCAN_RUNNING)) {
+			left = wait_for_completion_timeout(&ar->scan.completed,
+							   ATH11K_SCAN_TIMEOUT_HZ);
+			if (!left)
+				ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+					   "failed to receive hw scan complete: timed out\n");
+
+			ath11k_dbg(ar->ab, ATH11K_DBG_REG,
+				   "reg hw scan wait left time %d\n", left);
+		}
+
+		ath11k_wmi_send_scan_chan_list_cmd(ar, params);
+		list_del(&params->list);
+		kfree(params);
+	}
+}
+
 static bool ath11k_reg_is_world_alpha(char *alpha)
 {
 	if (alpha[0] == '0' && alpha[1] == '0')
@@ -977,6 +1017,7 @@
 void ath11k_reg_init(struct ath11k *ar)
 {
 	ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
+	ar->hw->wiphy->flags |= WIPHY_FLAG_NOTIFY_REGDOM_BY_DRIVER;
 	ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
 }
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/reg.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/reg.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/reg.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/reg.h	2025-07-01 14:10:42.708045839 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_REG_H
@@ -33,6 +33,7 @@
 void ath11k_reg_reset_info(struct cur_regulatory_info *reg_info);
 void ath11k_reg_free(struct ath11k_base *ab);
 void ath11k_regd_update_work(struct work_struct *work);
+void ath11k_regd_update_chan_list_work(struct work_struct *work);
 struct ieee80211_regdomain *
 ath11k_reg_build_regd(struct ath11k_base *ab,
 		      struct cur_regulatory_info *reg_info, bool intersect,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode.c	2025-09-25 17:40:34.131360105 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2023-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "testmode.h"
@@ -15,15 +15,55 @@
 #define ATH11K_FTM_SEGHDR_CURRENT_SEQ		GENMASK(3, 0)
 #define ATH11K_FTM_SEGHDR_TOTAL_SEGMENTS	GENMASK(7, 4)
 
-static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
-	[ATH11K_TM_ATTR_CMD]		= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_DATA]		= { .type = NLA_BINARY,
-					    .len = ATH11K_TM_DATA_MAX_LEN },
-	[ATH11K_TM_ATTR_WMI_CMDID]	= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
-	[ATH11K_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+static const struct nla_policy ath11k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+	[ATH_TM_ATTR_CMD]		= { .type = NLA_U32 },
+	[ATH_TM_ATTR_DATA]		= { .type = NLA_BINARY,
+					    .len = ATH_TM_DATA_MAX_LEN },
+	[ATH_TM_ATTR_WMI_CMDID]		= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_FWLOG]		= { .type = NLA_BINARY,
+					    .len = ATH_FTM_FWLOG_MAX_LEN },
 };
 
+void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data, int len)
+{
+	struct sk_buff *nl_skb;
+	int ret, i;
+	struct ath11k *ar = NULL;
+	struct ath11k_pdev *pdev;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->ar) {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+	if (!ar)
+		return;
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
+						   len, GFP_ATOMIC);
+	if (!nl_skb) {
+		ath11k_warn(ab,
+			    "failed to allocate skb for fwlog event\n");
+		return;
+	}
+
+	ret = nla_put(nl_skb, ATH_TM_ATTR_FWLOG, len, data);
+	if (ret) {
+		ath11k_warn(ab,
+			    "failed to to put fwlog wmi event to nl: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
 static struct ath11k *ath11k_tm_get_ar(struct ath11k_base *ab)
 {
 	struct ath11k_pdev *pdev;
@@ -73,9 +113,9 @@
 		goto out;
 	}
 
-	if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI) ||
-	    nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
-	    nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data)) {
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
 		ath11k_warn(ab, "failed to populate testmode unsegmented event\n");
 		kfree_skb(nl_skb);
 		goto out;
@@ -140,7 +180,7 @@
 
 	data_pos = ab->testmode.data_pos;
 
-	if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
+	if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
 		ath11k_warn(ab, "Invalid ftm event length at %d: %d\n",
 			    data_pos, datalen);
 		ret = -EINVAL;
@@ -172,10 +212,10 @@
 		goto out;
 	}
 
-	if (nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
-			ATH11K_TM_CMD_WMI_FTM) ||
-	    nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id) ||
-	    nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+			ATH_TM_CMD_WMI_FTM) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
 		    &ab->testmode.eventdata[0])) {
 		ath11k_warn(ab, "failed to populate segmented testmode event");
 		kfree_skb(nl_skb);
@@ -235,23 +275,23 @@
 
 	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
 		   "cmd get version_major %d version_minor %d\n",
-		   ATH11K_TESTMODE_VERSION_MAJOR,
-		   ATH11K_TESTMODE_VERSION_MINOR);
+		   ATH_TESTMODE_VERSION_MAJOR,
+		   ATH_TESTMODE_VERSION_MINOR);
 
 	skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
 						nla_total_size(sizeof(u32)));
 	if (!skb)
 		return -ENOMEM;
 
-	ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
-			  ATH11K_TESTMODE_VERSION_MAJOR);
+	ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+			  ATH_TESTMODE_VERSION_MAJOR);
 	if (ret) {
 		kfree_skb(skb);
 		return ret;
 	}
 
-	ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
-			  ATH11K_TESTMODE_VERSION_MINOR);
+	ret = nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+			  ATH_TESTMODE_VERSION_MINOR);
 	if (ret) {
 		kfree_skb(skb);
 		return ret;
@@ -277,7 +317,7 @@
 		goto err;
 	}
 
-	ar->ab->testmode.eventdata = kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH,
+	ar->ab->testmode.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
 					     GFP_KERNEL);
 	if (!ar->ab->testmode.eventdata) {
 		ret = -ENOMEM;
@@ -310,25 +350,25 @@
 
 	mutex_lock(&ar->conf_mutex);
 
-	if (!tb[ATH11K_TM_ATTR_DATA]) {
+	if (!tb[ATH_TM_ATTR_DATA]) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
+	if (!tb[ATH_TM_ATTR_WMI_CMDID]) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
-	buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
 	if (!buf_len) {
 		ath11k_warn(ar->ab, "No data present in testmode wmi command\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
-	cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
+	cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
 
 	/* Make sure that the buffer length is long enough to
 	 * hold TLV and pdev/vdev id.
@@ -409,13 +449,13 @@
 		goto out;
 	}
 
-	if (!tb[ATH11K_TM_ATTR_DATA]) {
+	if (!tb[ATH_TM_ATTR_DATA]) {
 		ret = -EINVAL;
 		goto out;
 	}
 
-	buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
-	buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
 	cmd_id = WMI_PDEV_UTF_CMDID;
 
 	ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
@@ -476,25 +516,25 @@
 		  void *data, int len)
 {
 	struct ath11k *ar = hw->priv;
-	struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
+	struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
 	int ret;
 
-	ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
+	ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath11k_tm_policy,
 			NULL);
 	if (ret)
 		return ret;
 
-	if (!tb[ATH11K_TM_ATTR_CMD])
+	if (!tb[ATH_TM_ATTR_CMD])
 		return -EINVAL;
 
-	switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
-	case ATH11K_TM_CMD_GET_VERSION:
+	switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+	case ATH_TM_CMD_GET_VERSION:
 		return ath11k_tm_cmd_get_version(ar, tb);
-	case ATH11K_TM_CMD_WMI:
+	case ATH_TM_CMD_WMI:
 		return ath11k_tm_cmd_wmi(ar, tb, vif);
-	case ATH11K_TM_CMD_TESTMODE_START:
+	case ATH_TM_CMD_TESTMODE_START:
 		return ath11k_tm_cmd_testmode_start(ar, tb);
-	case ATH11K_TM_CMD_WMI_FTM:
+	case ATH_TM_CMD_WMI_FTM:
 		return ath11k_tm_cmd_wmi_ftm(ar, tb);
 	default:
 		return -EOPNOTSUPP;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode.h	2025-09-25 17:40:34.131360105 +0200
@@ -12,6 +12,7 @@
 int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		  void *data, int len);
 
+void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data, int len);
 #else
 
 static inline void ath11k_tm_wmi_event(struct ath11k_base *ab, u32 cmd_id,
@@ -26,4 +27,9 @@
 	return 0;
 }
 
+static inline void ath11k_fwlog_write(struct ath11k_base *ab,  u8 *data,
+				     int len)
+{
+
+}
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode_i.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode_i.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/testmode_i.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/testmode_i.h	2025-09-25 17:40:34.131360105 +0200
@@ -1,66 +1,69 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
- * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
-/* "API" level of the ath11k testmode interface. Bump it after every
+/* "API" level of the ath testmode interface. Bump it after every
  * incompatible interface change.
  */
-#define ATH11K_TESTMODE_VERSION_MAJOR 1
+#define ATH_TESTMODE_VERSION_MAJOR 1
 
 /* Bump this after every _compatible_ interface change, for example
  * addition of a new command or an attribute.
  */
-#define ATH11K_TESTMODE_VERSION_MINOR 1
+#define ATH_TESTMODE_VERSION_MINOR 0
 
-#define ATH11K_TM_DATA_MAX_LEN		5000
-#define ATH11K_FTM_EVENT_MAX_BUF_LENGTH 2048
+#define ATH_TM_DATA_MAX_LEN		5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 	2048
 
-enum ath11k_tm_attr {
-	__ATH11K_TM_ATTR_INVALID		= 0,
-	ATH11K_TM_ATTR_CMD			= 1,
-	ATH11K_TM_ATTR_DATA			= 2,
-	ATH11K_TM_ATTR_WMI_CMDID		= 3,
-	ATH11K_TM_ATTR_VERSION_MAJOR		= 4,
-	ATH11K_TM_ATTR_VERSION_MINOR		= 5,
-	ATH11K_TM_ATTR_WMI_OP_VERSION		= 6,
+#define ATH_FTM_FWLOG_MAX_LEN		2048
+
+enum ath_tm_attr {
+	__ATH_TM_ATTR_INVALID		= 0,
+	ATH_TM_ATTR_CMD			= 1,
+	ATH_TM_ATTR_DATA		= 2,
+	ATH_TM_ATTR_WMI_CMDID		= 3,
+	ATH_TM_ATTR_VERSION_MAJOR	= 4,
+	ATH_TM_ATTR_VERSION_MINOR	= 5,
+	ATH_TM_ATTR_WMI_OP_VERSION	= 6,
+	ATH_TM_ATTR_FWLOG		= 7,
+	ATH_TM_ATTR_DUAL_MAC		= 9,
 
 	/* keep last */
-	__ATH11K_TM_ATTR_AFTER_LAST,
-	ATH11K_TM_ATTR_MAX		= __ATH11K_TM_ATTR_AFTER_LAST - 1,
+	__ATH_TM_ATTR_AFTER_LAST,
+	ATH_TM_ATTR_MAX			= __ATH_TM_ATTR_AFTER_LAST - 1,
 };
 
-/* All ath11k testmode interface commands specified in
- * ATH11K_TM_ATTR_CMD
+/* All ath testmode interface commands specified in
+ * ATH_TM_ATTR_CMD
  */
-enum ath11k_tm_cmd {
-	/* Returns the supported ath11k testmode interface version in
-	 * ATH11K_TM_ATTR_VERSION. Always guaranteed to work. User space
+enum ath_tm_cmd {
+	/* Returns the supported ath testmode interface version in
+	 * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
 	 * uses this to verify it's using the correct version of the
 	 * testmode interface
 	 */
-	ATH11K_TM_CMD_GET_VERSION = 0,
+	ATH_TM_CMD_GET_VERSION = 0,
+
+	/* Set ar state to test mode. */
+	ATH_TM_CMD_TESTMODE_START = 1,
+
+	/* Set ar state back into OFF state. */
+	ATH_TM_CMD_TESTMODE_STOP = 2,
 
 	/* The command used to transmit a WMI command to the firmware and
 	 * the event to receive WMI events from the firmware. Without
 	 * struct wmi_cmd_hdr header, only the WMI payload. Command id is
-	 * provided with ATH11K_TM_ATTR_WMI_CMDID and payload in
-	 * ATH11K_TM_ATTR_DATA.
-	 */
-	ATH11K_TM_CMD_WMI = 1,
-
-	/* Boots the UTF firmware, the netdev interface must be down at the
-	 * time.
+	 * provided with ATH_TM_ATTR_WMI_CMDID and payload in
+	 * ATH_TM_ATTR_DATA.
 	 */
-	ATH11K_TM_CMD_TESTMODE_START = 2,
+	ATH_TM_CMD_WMI = 3,
 
 	/* The command used to transmit a FTM WMI command to the firmware
 	 * and the event to receive WMI events from the firmware. The data
-	 * received only contain the payload, need to add the tlv header
-	 * and send the cmd to firmware with command id WMI_PDEV_UTF_CMDID.
-	 * The data payload size could be large and the driver needs to
-	 * send segmented data to firmware.
+	 * received only contain the payload. Need to add the tlv
+	 * header and send the cmd to fw with commandid WMI_PDEV_UTF_CMDID.
 	 */
-	ATH11K_TM_CMD_WMI_FTM = 3,
+	ATH_TM_CMD_WMI_FTM = 4,
 };
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/thermal.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/thermal.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/thermal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/thermal.h	2025-09-25 17:40:34.131360105 +0200
@@ -12,7 +12,7 @@
 #define ATH11K_THERMAL_THROTTLE_MAX     100
 #define ATH11K_THERMAL_DEFAULT_DUTY_CYCLE 100
 #define ATH11K_HWMON_NAME_LEN           15
-#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+#define ATH11K_THERMAL_SYNC_TIMEOUT_HZ (HZ / 10)
 
 struct ath11k_thermal {
 	struct thermal_cooling_device *cdev;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/wmi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/wmi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/wmi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/wmi.c	2025-09-29 14:23:07.601732390 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/skbuff.h>
 #include <linux/ctype.h>
@@ -1854,7 +1854,6 @@
 	tlv = (struct wmi_tlv *)(skb->data + sizeof(*cmd));
 	tlv->header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
 		      FIELD_PREP(WMI_TLV_LEN, key_len_aligned);
-	if (arg->key_data)
 		memcpy(tlv->value, (u8 *)arg->key_data, key_len_aligned);
 
 	ret = ath11k_wmi_cmd_send(wmi, skb, WMI_VDEV_INSTALL_KEY_CMDID);
@@ -2662,7 +2661,8 @@
 }
 
 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
-				       struct wmi_wmm_params_all_arg *param)
+				       struct wmi_wmm_params_all_arg *param,
+				       enum wmi_wmm_params_type wmm_param_type)
 {
 	struct ath11k_pdev_wmi *wmi = ar->wmi;
 	struct wmi_vdev_set_wmm_params_cmd *cmd;
@@ -2681,7 +2681,7 @@
 			  FIELD_PREP(WMI_TLV_LEN, sizeof(*cmd) - TLV_HDR_SIZE);
 
 	cmd->vdev_id = vdev_id;
-	cmd->wmm_param_type = 0;
+	cmd->wmm_param_type = wmm_param_type;
 
 	for (ac = 0; ac < WME_NUM_AC; ac++) {
 		switch (ac) {
@@ -2714,8 +2714,8 @@
 		wmm_param->no_ack = wmi_wmm_arg->no_ack;
 
 		ath11k_dbg(ar->ab, ATH11K_DBG_WMI,
-			   "wmm set ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
-			   ac, wmm_param->aifs, wmm_param->cwmin,
+			   "wmm set type %d ac %d aifs %d cwmin %d cwmax %d txop %d acm %d no_ack %d\n",
+			   wmm_param_type, ac, wmm_param->aifs, wmm_param->cwmin,
 			   wmm_param->cwmax, wmm_param->txoplimit,
 			   wmm_param->acm, wmm_param->no_ack);
 	}
@@ -8013,12 +8013,14 @@
 	survey->noise     = bss_ch_info_ev.noise_floor;
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx   = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -8529,15 +8531,6 @@
 	complete(&ab->wow.wakeup_completed);
 }
 
-static void
-ath11k_wmi_diag_event(struct ath11k_base *ab,
-		      struct sk_buff *skb)
-{
-	ath11k_dbg(ab, ATH11K_DBG_WMI, "event diag");
-
-	trace_ath11k_wmi_diag(ab, skb->data, skb->len);
-}
-
 static const char *ath11k_wmi_twt_add_dialog_event_status(u32 status)
 {
 	switch (status) {
@@ -8705,6 +8698,29 @@
 	kfree(tb);
 }
 
+static void ath11k_wmi_diag_event(struct ath11k_base *ab, struct sk_buff *skb)
+{
+	const struct wmi_tlv *tlv;
+	u16 tlv_tag, tlv_len;
+	uint32_t *dev_id;
+	u8 *data;
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+
+	if (tlv_tag == WMI_TAG_ARRAY_BYTE) {
+		data = skb->data + sizeof(struct wmi_tlv);
+		dev_id = (uint32_t *)data;
+		*dev_id = ab->hw_params.hw_rev;
+	} else {
+		ath11k_warn(ab, "WMI Diag Event missing required tlv\n");
+		return;
+	}
+
+	ath11k_fwlog_write(ab,data, tlv_len);
+}
+
 static void ath11k_wmi_tlv_op_rx(struct ath11k_base *ab, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath11k/wmi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/wmi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath11k/wmi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath11k/wmi.h	2025-09-29 14:23:07.605732410 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH11K_WMI_H
@@ -2122,6 +2122,7 @@
 	WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
 	WMI_TLV_SERVICE_EXT2_MSG = 220,
 	WMI_TLV_SERVICE_PEER_POWER_SAVE_DURATION_SUPPORT = 246,
+	WMI_TLV_SERVICE_5_DOT_9GHZ_SUPPORT = 247,
 	WMI_TLV_SERVICE_SRG_SRP_SPATIAL_REUSE_SUPPORT = 249,
 	WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
 	WMI_TLV_SERVICE_PASSIVE_SCAN_START_TIME_ENHANCE = 263,
@@ -3817,6 +3818,7 @@
 };
 
 struct scan_chan_list_params {
+	struct list_head list;
 	u32 pdev_id;
 	u16 nallchans;
 	struct channel_param ch_param[];
@@ -6346,6 +6348,11 @@
 #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT	30
 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE	0
 
+enum wmi_wmm_params_type {
+	WMI_WMM_PARAM_TYPE_LEGACY = 0,
+	WMI_WMM_PARAM_TYPE_11AX_MU_EDCA = 1,
+};
+
 const void **ath11k_wmi_tlv_parse_alloc(struct ath11k_base *ab,
 					struct sk_buff *skb, gfp_t gfp);
 int ath11k_wmi_cmd_send(struct ath11k_pdev_wmi *wmi, struct sk_buff *skb,
@@ -6402,7 +6409,8 @@
 int ath11k_wmi_send_scan_stop_cmd(struct ath11k *ar,
 				  struct scan_cancel_param *param);
 int ath11k_wmi_send_wmm_update_cmd_tlv(struct ath11k *ar, u32 vdev_id,
-				       struct wmi_wmm_params_all_arg *param);
+				       struct wmi_wmm_params_all_arg *param,
+				       enum wmi_wmm_params_type wmm_param_type);
 int ath11k_wmi_pdev_suspend(struct ath11k *ar, u32 suspend_opt,
 			    u32 pdev_id);
 int ath11k_wmi_pdev_resume(struct ath11k *ar, u32 pdev_id);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/Kconfig linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/Kconfig
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/Kconfig	2025-09-29 14:23:07.605732410 +0200
@@ -52,3 +52,35 @@
 
 	  If unsure, say Y to make it easier to debug problems. But if
 	  dump collection not required choose N.
+
+choice
+	prompt "Memory profile"
+	depends on ATH12K
+	default ATH12K_MEM_PROFILE_DEFAULT
+	help
+	  Allows to select the memory profile
+
+	  By enabling this option, can choose between different memory
+	  profiles, such as the default profile or a 512 MB profile.
+
+config ATH12K_MEM_PROFILE_DEFAULT
+	bool "ath12k default memory profile"
+	depends on ATH12K
+	help
+	  Enables the default memory profile
+
+	  This profile is recommended when memory is not constraint or
+	  if unsure which profile to choose.
+
+config ATH12K_MEM_PROFILE_512M
+	bool "ath12k enable 512MB memory profile"
+	depends on ATH12K
+	help
+	  Enables the 512MB memory profile
+
+	  Selecting this option reduces memory allocation to the firmware,
+	  decreases the number of transmit and receive descriptors, and
+	  limits the monitor ring and the number of virtual devices
+	  (vdevs).
+
+endchoice
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/Makefile linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/Makefile
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/Makefile	2025-09-25 17:40:34.135360125 +0200
@@ -23,11 +23,13 @@
 	    fw.o \
 	    p2p.o
 
-ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o
+ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o debugfs_sta.o
 ath12k-$(CONFIG_ACPI) += acpi.o
 ath12k-$(CONFIG_ATH12K_TRACING) += trace.o
 ath12k-$(CONFIG_PM) += wow.o
 ath12k-$(CONFIG_ATH12K_COREDUMP) += coredump.o
+ath12k-$(CONFIG_NL80211_TESTMODE) += testmode.o
+ath12k-$(CONFIG_THERMAL) += thermal.o
 
 # for tracing framework to find trace.h
 CFLAGS_trace.o := -I$(src)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/acpi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/acpi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/acpi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/acpi.h	2025-07-01 14:10:42.716046054 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef ATH12K_ACPI_H
 #define ATH12K_ACPI_H
@@ -9,6 +9,8 @@
 #include <linux/acpi.h>
 
 #define ATH12K_ACPI_DSM_FUNC_SUPPORT_FUNCS	0
+#define ATH12K_ACPI_DSM_FUNC_DISABLE_FLAG	2
+#define ATH12K_ACPI_DSM_FUNC_BDF_EXT		3
 #define ATH12K_ACPI_DSM_FUNC_BIOS_SAR		4
 #define ATH12K_ACPI_DSM_FUNC_GEO_OFFSET		5
 #define ATH12K_ACPI_DSM_FUNC_INDEX_CCA		6
@@ -16,6 +18,8 @@
 #define ATH12K_ACPI_DSM_FUNC_TAS_DATA		9
 #define ATH12K_ACPI_DSM_FUNC_INDEX_BAND_EDGE		10
 
+#define ATH12K_ACPI_FUNC_BIT_DISABLE_FLAG		BIT(1)
+#define ATH12K_ACPI_FUNC_BIT_BDF_EXT			BIT(2)
 #define ATH12K_ACPI_FUNC_BIT_BIOS_SAR			BIT(3)
 #define ATH12K_ACPI_FUNC_BIT_GEO_OFFSET			BIT(4)
 #define ATH12K_ACPI_FUNC_BIT_CCA			BIT(5)
@@ -25,6 +29,7 @@
 
 #define ATH12K_ACPI_NOTIFY_EVENT			0x86
 #define ATH12K_ACPI_FUNC_BIT_VALID(_acdata, _func)	(((_acdata).func_bit) & (_func))
+#define ATH12K_ACPI_CHEK_BIT_VALID(_acdata, _func)	(((_acdata).bit_flag) & (_func))
 
 #define ATH12K_ACPI_TAS_DATA_VERSION		0x1
 #define ATH12K_ACPI_TAS_DATA_ENABLE		0x1
@@ -48,6 +53,16 @@
 #define ATH12K_ACPI_DSM_BAND_EDGE_DATA_SIZE		100
 #define ATH12K_ACPI_DSM_TAS_CFG_SIZE			108
 
+#define ATH12K_ACPI_DSM_FUNC_MIN_BITMAP_SIZE	1
+#define ATH12K_ACPI_DSM_FUNC_MAX_BITMAP_SIZE	4
+
+#define ATH12K_ACPI_DSM_DISABLE_11BE_BIT	BIT(0)
+#define ATH12K_ACPI_DSM_DISABLE_RFKILL_BIT	BIT(2)
+
+#define ATH12K_ACPI_BDF_ANCHOR_STRING_LEN	3
+#define ATH12K_ACPI_BDF_ANCHOR_STRING		"BDF"
+#define ATH12K_ACPI_BDF_MAX_LEN			100
+
 #define ATH12K_ACPI_DSM_GEO_OFFSET_DATA_SIZE (ATH12K_ACPI_GEO_OFFSET_DATA_OFFSET + \
 					      ATH12K_ACPI_BIOS_SAR_GEO_OFFSET_LEN)
 #define ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE (ATH12K_ACPI_POWER_LIMIT_DATA_OFFSET + \
@@ -59,6 +74,10 @@
 
 int ath12k_acpi_start(struct ath12k_base *ab);
 void ath12k_acpi_stop(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab);
+bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab);
+void ath12k_acpi_set_dsm_func(struct ath12k_base *ab);
+int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab);
 
 #else
 
@@ -71,6 +90,25 @@
 {
 }
 
+static inline bool ath12k_acpi_get_disable_rfkill(struct ath12k_base *ab)
+{
+	return false;
+}
+
+static inline bool ath12k_acpi_get_disable_11be(struct ath12k_base *ab)
+{
+	return false;
+}
+
+static inline void ath12k_acpi_set_dsm_func(struct ath12k_base *ab)
+{
+}
+
+static inline int ath12k_acpi_check_bdf_variant_name(struct ath12k_base *ab)
+{
+	return 0;
+}
+
 #endif /* CONFIG_ACPI */
 
 #endif /* ATH12K_ACPI_H */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/ce.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/ce.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/ce.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/ce.c	2025-09-25 17:40:34.135360125 +0200
@@ -545,11 +545,14 @@
 {
 	struct ath12k_ce_ring *ce_ring;
 	dma_addr_t base_addr;
+	size_t sz;
 
-	ce_ring = kzalloc(struct_size(ce_ring, skb, nentries), GFP_KERNEL);
+	sz = struct_size(ce_ring, skb, nentries);
+	ce_ring = ath12k_kzalloc_cache(sz, GFP_KERNEL);
 	if (!ce_ring)
 		return ERR_PTR(-ENOMEM);
 
+	ce_ring->alloc_sz = sz;
 	ce_ring->nentries = nentries;
 	ce_ring->nentries_mask = nentries - 1;
 
@@ -557,11 +560,11 @@
 	 * coherent DMA are unsupported
 	 */
 	ce_ring->base_addr_owner_space_unaligned =
-		dma_alloc_coherent(ab->dev,
+		ath12k_dma_alloc_coherent_no_dev(
 				   nentries * desc_sz + CE_DESC_RING_ALIGN,
 				   &base_addr, GFP_KERNEL);
 	if (!ce_ring->base_addr_owner_space_unaligned) {
-		kfree(ce_ring);
+		ath12k_kfree_cache(sz, ce_ring);
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -894,35 +897,35 @@
 
 		if (pipe->src_ring) {
 			desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_SRC);
-			dma_free_coherent(ab->dev,
+			ath12k_dma_free_coherent_no_dev(
 					  pipe->src_ring->nentries * desc_sz +
 					  CE_DESC_RING_ALIGN,
 					  pipe->src_ring->base_addr_owner_space,
 					  pipe->src_ring->base_addr_ce_space);
-			kfree(pipe->src_ring);
+			ath12k_kfree_cache(pipe->src_ring->alloc_sz, pipe->src_ring);
 			pipe->src_ring = NULL;
 		}
 
 		if (pipe->dest_ring) {
 			desc_sz = ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST);
-			dma_free_coherent(ab->dev,
+			ath12k_dma_free_coherent_no_dev(
 					  pipe->dest_ring->nentries * desc_sz +
 					  CE_DESC_RING_ALIGN,
 					  pipe->dest_ring->base_addr_owner_space,
 					  pipe->dest_ring->base_addr_ce_space);
-			kfree(pipe->dest_ring);
+			ath12k_kfree_cache(pipe->dest_ring->alloc_sz, pipe->dest_ring);
 			pipe->dest_ring = NULL;
 		}
 
 		if (pipe->status_ring) {
 			desc_sz =
 			  ath12k_hal_ce_get_desc_size(HAL_CE_DESC_DST_STATUS);
-			dma_free_coherent(ab->dev,
+			ath12k_dma_free_coherent_no_dev(
 					  pipe->status_ring->nentries * desc_sz +
 					  CE_DESC_RING_ALIGN,
 					  pipe->status_ring->base_addr_owner_space,
 					  pipe->status_ring->base_addr_ce_space);
-			kfree(pipe->status_ring);
+			ath12k_kfree_cache(pipe->status_ring->alloc_sz, pipe->status_ring);
 			pipe->status_ring = NULL;
 		}
 	}
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/ce.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/ce.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/ce.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/ce.h	2025-09-25 17:40:34.135360125 +0200
@@ -97,6 +97,9 @@
 #define CE_DESC_RING_ALIGN 8
 
 struct ath12k_ce_ring {
+	/* actual allocated size for bookkeeping */
+	size_t alloc_sz;
+
 	/* Number of entries in this ring; must be power of 2 */
 	unsigned int nentries;
 	unsigned int nentries_mask;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/core.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/core.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/core.c	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -9,6 +9,7 @@
 #include <linux/remoteproc.h>
 #include <linux/firmware.h>
 #include <linux/of.h>
+#include <linux/of_graph.h>
 #include "core.h"
 #include "dp_tx.h"
 #include "dp_rx.h"
@@ -22,6 +23,39 @@
 module_param_named(debug_mask, ath12k_debug_mask, uint, 0644);
 MODULE_PARM_DESC(debug_mask, "Debugging mask");
 
+bool ath12k_ftm_mode;
+module_param_named(ftm_mode, ath12k_ftm_mode, bool, 0444);
+MODULE_PARM_DESC(ftm_mode, "Boots up in factory test mode");
+
+unsigned int ath12k_frame_mode = ATH12K_HW_TXRX_ETHERNET;
+module_param_named(frame_mode, ath12k_frame_mode, uint, 0644);
+MODULE_PARM_DESC(frame_mode,
+		 "Datapath frame mode (0: raw, 1: native wifi (default), 2: ethernet)");
+
+static char *ath12k_board_variant;
+module_param_named(board_variant, ath12k_board_variant, charp, 0444);
+MODULE_PARM_DESC(board_variant, "board variant to use for bdf lookup");
+
+bool ath12k_en_shutdown;
+module_param_named(en_shutdown, ath12k_en_shutdown, bool, 0644);
+MODULE_PARM_DESC(en_shutdown, "enable pcie shutdown callback");
+
+unsigned int ath12k_mlo_enable = 0;
+module_param_named(mlo_enable, ath12k_mlo_enable, uint, 0644);
+MODULE_PARM_DESC(mlo_enable, "MLO enable");
+
+static unsigned int ath12k_en_fwlog = true;
+module_param_named(en_fwlog, ath12k_en_fwlog, uint, 0644);
+MODULE_PARM_DESC(en_fwlog, "fwlog: 0-disable, 1-enable");
+
+/* protected with ath12k_hw_group_mutex */
+static struct list_head ath12k_hw_group_list = LIST_HEAD_INIT(ath12k_hw_group_list);
+struct list_head ath12k_dma_cache = LIST_HEAD_INIT(ath12k_dma_cache);
+struct list_head ath12k_mem_cache = LIST_HEAD_INIT(ath12k_mem_cache);
+
+static DEFINE_MUTEX(ath12k_hw_group_mutex);
+static DEFINE_MUTEX(ath12k_cache_mutex);
+
 static int ath12k_core_rfkill_config(struct ath12k_base *ab)
 {
 	struct ath12k *ar;
@@ -30,6 +64,9 @@
 	if (!(ab->target_caps.sys_cap_info & WMI_SYS_CAP_INFO_RFKILL))
 		return 0;
 
+	if (ath12k_acpi_get_disable_rfkill(ab))
+		return 0;
+
 	for (i = 0; i < ab->num_radios; i++) {
 		ar = ab->pdevs[i].ar;
 
@@ -79,11 +116,17 @@
 		ar = ab->pdevs[i].ar;
 		if (!ar)
 			continue;
+
+		wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
 		ret = ath12k_mac_wait_tx_complete(ar);
 		if (ret) {
+			wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
 			ath12k_warn(ab, "failed to wait tx complete: %d\n", ret);
 			return ret;
 		}
+
+		wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
 	}
 
 	/* PM framework skips suspend_late/resume_early callbacks
@@ -161,12 +204,15 @@
 
 static int __ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
 					   size_t name_len, bool with_variant,
-					   bool bus_type_mode)
+					   bool bus_type_mode, bool with_default)
 {
 	/* strlen(',variant=') + strlen(ab->qmi.target.bdf_ext) */
 	char variant[9 + ATH12K_QMI_BDF_EXT_STR_LENGTH] = { 0 };
 
-	if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
+	if (ath12k_board_variant)
+		scnprintf(variant, sizeof(variant), ",variant=%s",
+			  ath12k_board_variant);
+	else if (with_variant && ab->qmi.target.bdf_ext[0] != '\0')
 		scnprintf(variant, sizeof(variant), ",variant=%s",
 			  ab->qmi.target.bdf_ext);
 
@@ -187,12 +233,27 @@
 				  ab->qmi.target.board_id,
 				  variant);
 		break;
+	case ATH12K_BDF_SEARCH_PCI_SUBSYS_AND_BOARD:
+		snprintf(name, name_len,
+			 "bus=%s,"
+			 "subsystem-vendor=%04x,"
+			 "subsystem-device=%04x,"
+			 "qmi-chip-id=%d,qmi-board-id=%d%s",
+			 ath12k_bus_str(ab->hif.bus),
+			 ab->id.subsystem_vendor,
+			 ab->id.subsystem_device,
+			 ab->qmi.target.chip_id,
+			 ab->qmi.target.board_id,
+			 variant);
+		break;
 	default:
 		scnprintf(name, name_len,
 			  "bus=%s,qmi-chip-id=%d,qmi-board-id=%d%s",
 			  ath12k_bus_str(ab->hif.bus),
 			  ab->qmi.target.chip_id,
-			  ab->qmi.target.board_id, variant);
+			  with_default ?
+			  ATH12K_BOARD_ID_DEFAULT : ab->qmi.target.board_id,
+			  variant);
 		break;
 	}
 
@@ -204,19 +265,19 @@
 static int ath12k_core_create_board_name(struct ath12k_base *ab, char *name,
 					 size_t name_len)
 {
-	return __ath12k_core_create_board_name(ab, name, name_len, true, false);
+	return __ath12k_core_create_board_name(ab, name, name_len, true, false, false);
 }
 
 static int ath12k_core_create_fallback_board_name(struct ath12k_base *ab, char *name,
 						  size_t name_len)
 {
-	return __ath12k_core_create_board_name(ab, name, name_len, false, false);
+	return __ath12k_core_create_board_name(ab, name, name_len, false, false, true);
 }
 
 static int ath12k_core_create_bus_type_board_name(struct ath12k_base *ab, char *name,
 						  size_t name_len)
 {
-	return __ath12k_core_create_board_name(ab, name, name_len, false, true);
+	return __ath12k_core_create_board_name(ab, name, name_len, false, true, true);
 }
 
 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
@@ -464,6 +525,7 @@
 {
 	char boardname[BOARD_NAME_SIZE], fallback_boardname[BOARD_NAME_SIZE];
 	char *filename, filepath[100];
+	const char *used_board;
 	int bd_api;
 	int ret;
 
@@ -480,8 +542,10 @@
 						 ATH12K_BD_IE_BOARD,
 						 ATH12K_BD_IE_BOARD_NAME,
 						 ATH12K_BD_IE_BOARD_DATA);
-	if (!ret)
+	if (!ret) {
+		used_board = boardname;
 		goto success;
+	}
 
 	ret = ath12k_core_create_fallback_board_name(ab, fallback_boardname,
 						     sizeof(fallback_boardname));
@@ -494,8 +558,10 @@
 						 ATH12K_BD_IE_BOARD,
 						 ATH12K_BD_IE_BOARD_NAME,
 						 ATH12K_BD_IE_BOARD_DATA);
-	if (!ret)
+	if (!ret) {
+		used_board = fallback_boardname;
 		goto success;
+	}
 
 	bd_api = 1;
 	ret = ath12k_core_fetch_board_data_api_1(ab, bd, ATH12K_DEFAULT_BOARD_FILE);
@@ -512,8 +578,11 @@
 			   ab->hw_params->fw.dir);
 		return ret;
 	}
+	used_board = "default";
 
 success:
+	ab->bdf_boardname = devm_kstrdup(ab->dev,
+					 used_board, GFP_KERNEL);
 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "using board api %d\n", bd_api);
 	return 0;
 }
@@ -591,16 +660,59 @@
 	return TARGET_NUM_TIDS(SINGLE);
 }
 
+static inline
+void ath12k_core_to_group_ref_get(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+
+	lockdep_assert_held(&ag->mutex);
+
+	if (ab->hw_group_ref) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already attached to group %d\n",
+			   ag->id);
+		return;
+	}
+
+	ab->hw_group_ref = true;
+	ag->num_started++;
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core attached to group %d, num_started %d\n",
+		   ag->id, ag->num_started);
+}
+
+static inline
+void ath12k_core_to_group_ref_put(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+
+	lockdep_assert_held(&ag->mutex);
+
+	if (!ab->hw_group_ref) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "core already de-attached from group %d\n",
+			   ag->id);
+		return;
+	}
+
+	ab->hw_group_ref = false;
+	ag->num_started--;
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "core de-attached from group %d, num_started %d\n",
+		   ag->id, ag->num_started);
+}
+
 static void ath12k_core_stop(struct ath12k_base *ab)
 {
+	ath12k_core_to_group_ref_put(ab);
+
 	if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags))
 		ath12k_qmi_firmware_stop(ab);
 
 	ath12k_acpi_stop(ab);
 
+	ath12k_dp_rx_pdev_reo_cleanup(ab);
 	ath12k_hif_stop(ab);
 	ath12k_wmi_detach(ab);
-	ath12k_dp_rx_pdev_reo_cleanup(ab);
+	ath12k_dp_free(ab);
 
 	/* De-Init of components as needed */
 }
@@ -678,6 +790,11 @@
 {
 	int ret;
 
+	if (ath12k_ftm_mode) {
+		ab->fw_mode = ATH12K_FIRMWARE_MODE_FTM;
+		ath12k_info(ab, "Booting in ftm mode\n");
+	}
+
 	ret = ath12k_qmi_init_service(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to initialize qmi :%d\n", ret);
@@ -702,7 +819,7 @@
 
 static void ath12k_core_soc_destroy(struct ath12k_base *ab)
 {
-	ath12k_dp_free(ab);
+	ath12k_hif_power_down(ab, false);
 	ath12k_reg_free(ab);
 	ath12k_debugfs_soc_destroy(ab);
 	ath12k_qmi_deinit_service(ab);
@@ -712,38 +829,26 @@
 {
 	int ret;
 
-	ret = ath12k_mac_register(ab);
-	if (ret) {
-		ath12k_err(ab, "failed register the radio with mac80211: %d\n", ret);
-		return ret;
-	}
-
 	ret = ath12k_dp_pdev_alloc(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to attach DP pdev: %d\n", ret);
-		goto err_mac_unregister;
+		return ret;
 	}
 
 	return 0;
-
-err_mac_unregister:
-	ath12k_mac_unregister(ab);
-
-	return ret;
 }
 
 static void ath12k_core_pdev_destroy(struct ath12k_base *ab)
 {
-	ath12k_mac_unregister(ab);
-	ath12k_hif_irq_disable(ab);
 	ath12k_dp_pdev_free(ab);
 }
 
-static int ath12k_core_start(struct ath12k_base *ab,
-			     enum ath12k_firmware_mode mode)
+static int ath12k_core_start(struct ath12k_base *ab)
 {
 	int ret;
 
+	lockdep_assert_held(&ab->core_lock);
+
 	ret = ath12k_wmi_attach(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to attach wmi: %d\n", ret);
@@ -793,19 +898,12 @@
 		goto err_hif_stop;
 	}
 
-	ret = ath12k_mac_allocate(ab);
-	if (ret) {
-		ath12k_err(ab, "failed to create new hw device with mac80211 :%d\n",
-			   ret);
-		goto err_hif_stop;
-	}
-
 	ath12k_dp_cc_config(ab);
 
 	ret = ath12k_dp_rx_pdev_reo_setup(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to initialize reo destination rings: %d\n", ret);
-		goto err_mac_destroy;
+		goto err_hif_stop;
 	}
 
 	ath12k_dp_hal_rx_desc_init(ab);
@@ -839,17 +937,15 @@
 		goto err_reo_cleanup;
 	}
 
-	ret = ath12k_acpi_start(ab);
-	if (ret)
-		/* ACPI is optional so continue in case of an error */
-		ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", ret);
+	ath12k_acpi_set_dsm_func(ab);
+
+	/* Indicate the core start in the appropriate group */
+	ath12k_core_to_group_ref_get(ab);
 
 	return 0;
 
 err_reo_cleanup:
 	ath12k_dp_rx_pdev_reo_cleanup(ab);
-err_mac_destroy:
-	ath12k_mac_destroy(ab);
 err_hif_stop:
 	ath12k_hif_stop(ab);
 err_wmi_detach:
@@ -857,6 +953,220 @@
 	return ret;
 }
 
+static void ath12k_core_device_cleanup(struct ath12k_base *ab)
+{
+	mutex_lock(&ab->core_lock);
+
+	ath12k_hif_irq_disable(ab);
+	ath12k_core_pdev_destroy(ab);
+
+	mutex_unlock(&ab->core_lock);
+}
+
+static void ath12k_core_hw_group_stop(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	lockdep_assert_held(&ag->mutex);
+
+	clear_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+	ath12k_mac_unregister(ag);
+
+	for (i = ag->num_devices - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		clear_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+		ath12k_thermal_unregister(ab);
+		ath12k_core_device_cleanup(ab);
+	}
+
+	ath12k_mac_destroy(ag);
+}
+
+u8 ath12k_get_num_partner_link(struct ath12k *ar)
+{
+	struct ath12k_base *partner_ab, *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_pdev *pdev;
+	u8 num_link = 0;
+	int i, j;
+
+	lockdep_assert_held(&ag->mutex);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		partner_ab = ag->ab[i];
+
+		for (j = 0; j < partner_ab->num_radios; j++) {
+			pdev = &partner_ab->pdevs[j];
+
+			/* Avoid the self link */
+			if (ar == pdev->ar)
+				continue;
+
+			num_link++;
+		}
+	}
+
+	return num_link;
+}
+
+static int __ath12k_mac_mlo_ready(struct ath12k *ar)
+{
+	u8 num_link = ath12k_get_num_partner_link(ar);
+	int ret;
+
+	if (num_link == 0)
+		return 0;
+
+	ret = ath12k_wmi_mlo_ready(ar);
+	if (ret) {
+		ath12k_err(ar->ab, "MLO ready failed for pdev %d: %d\n",
+			   ar->pdev_idx, ret);
+		return ret;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mlo ready done for pdev %d\n",
+		   ar->pdev_idx);
+
+	return 0;
+}
+
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	struct ath12k *ar;
+	int ret;
+	int i, j;
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		for_each_ar(ah, ar, j) {
+			ar = &ah->radio[j];
+			ret = __ath12k_mac_mlo_ready(ar);
+			if (ret)
+				goto out;
+		}
+	}
+
+out:
+	return ret;
+}
+
+static int ath12k_core_mlo_setup(struct ath12k_hw_group *ag)
+{
+	int ret, i;
+
+	if (!ag->mlo_capable)
+		return 0;
+
+	ret = ath12k_mac_mlo_setup(ag);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ag->num_devices; i++)
+		ath12k_dp_partner_cc_init(ag->ab[i]);
+
+	ret = ath12k_mac_mlo_ready(ag);
+	if (ret)
+		goto err_mlo_teardown;
+
+	return 0;
+
+err_mlo_teardown:
+	ath12k_mac_mlo_teardown(ag);
+
+	return ret;
+}
+
+static int ath12k_core_hw_group_start(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int ret, i;
+
+	lockdep_assert_held(&ag->mutex);
+
+	if (test_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags))
+		goto core_pdev_create;
+
+	ret = ath12k_mac_allocate(ag);
+	if (WARN_ON(ret))
+		return ret;
+
+	ret = ath12k_core_mlo_setup(ag);
+	if (WARN_ON(ret))
+		goto err_mac_destroy;
+
+	ret = ath12k_mac_register(ag);
+	if (WARN_ON(ret))
+		goto err_mlo_teardown;
+
+	set_bit(ATH12K_GROUP_FLAG_REGISTERED, &ag->flags);
+
+core_pdev_create:
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		mutex_lock(&ab->core_lock);
+
+		set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+
+		ret = ath12k_core_pdev_create(ab);
+		if (ret) {
+			ath12k_err(ab, "failed to create pdev core %d\n", ret);
+			mutex_unlock(&ab->core_lock);
+			goto err;
+		}
+
+		ret = ath12k_en_fwlog ? ath12k_enable_fwlog(ab) : 0;
+		if (ret < 0) {
+			mutex_unlock(&ab->core_lock);
+			ath12k_err(ab, "failed to enable fwlog: %d\n", ret);
+			goto err;
+		}
+
+		ret = ath12k_thermal_register(ab);
+		if (ret) {
+			ath12k_err(ab, "could not register thermal device: "
+				   "%d\n", ret);
+			goto err;
+		}
+
+		ath12k_hif_irq_enable(ab);
+
+		ret = ath12k_core_rfkill_config(ab);
+		if (ret && ret != -EOPNOTSUPP) {
+			mutex_unlock(&ab->core_lock);
+			goto err;
+		}
+
+		mutex_unlock(&ab->core_lock);
+	}
+
+	return 0;
+
+err:
+	ath12k_core_hw_group_stop(ag);
+	return ret;
+
+err_mlo_teardown:
+	ath12k_mac_mlo_teardown(ag);
+
+err_mac_destroy:
+	ath12k_mac_destroy(ag);
+
+	return ret;
+}
+
 static int ath12k_core_start_firmware(struct ath12k_base *ab,
 				      enum ath12k_firmware_mode mode)
 {
@@ -874,11 +1184,92 @@
 	return ret;
 }
 
+static inline
+bool ath12k_core_hw_group_start_ready(struct ath12k_hw_group *ag)
+{
+	lockdep_assert_held(&ag->mutex);
+
+	return (ag->num_started == ag->num_devices);
+}
+
+static void ath12k_fw_stats_pdevs_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_pdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+void ath12k_fw_stats_bcn_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_bcn *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+static void ath12k_fw_stats_vdevs_free(struct list_head *head)
+{
+	struct ath12k_fw_stats_vdev *i, *tmp;
+
+	list_for_each_entry_safe(i, tmp, head, list) {
+		list_del(&i->list);
+		kfree(i);
+	}
+}
+
+void ath12k_fw_stats_init(struct ath12k *ar)
+{
+	INIT_LIST_HEAD(&ar->fw_stats.vdevs);
+	INIT_LIST_HEAD(&ar->fw_stats.pdevs);
+	INIT_LIST_HEAD(&ar->fw_stats.bcn);
+	init_completion(&ar->fw_stats_complete);
+}
+
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats)
+{
+	ath12k_fw_stats_pdevs_free(&stats->pdevs);
+	ath12k_fw_stats_vdevs_free(&stats->vdevs);
+	ath12k_fw_stats_bcn_free(&stats->bcn);
+}
+
+void ath12k_fw_stats_reset(struct ath12k *ar)
+{
+	spin_lock_bh(&ar->data_lock);
+	ar->fw_stats.fw_stats_done = false;
+	ath12k_fw_stats_free(&ar->fw_stats);
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static void ath12k_core_trigger_partner(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
+	bool found = false;
+	int i;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		partner_ab = ag->ab[i];
+		if (!partner_ab)
+			continue;
+
+		if (found)
+			ath12k_qmi_trigger_host_cap(partner_ab);
+
+		found = (partner_ab == ab);
+	}
+}
+
 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab)
 {
-	int ret;
+	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+	int ret, i;
 
-	ret = ath12k_core_start_firmware(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+	ret = ath12k_core_start_firmware(ab, ab->fw_mode);
 	if (ret) {
 		ath12k_err(ab, "failed to start firmware: %d\n", ret);
 		return ret;
@@ -896,41 +1287,54 @@
 		goto err_firmware_stop;
 	}
 
+	mutex_lock(&ag->mutex);
 	mutex_lock(&ab->core_lock);
-	ret = ath12k_core_start(ab, ATH12K_FIRMWARE_MODE_NORMAL);
+
+	ret = ath12k_core_start(ab);
 	if (ret) {
 		ath12k_err(ab, "failed to start core: %d\n", ret);
 		goto err_dp_free;
 	}
 
-	ret = ath12k_core_pdev_create(ab);
+	mutex_unlock(&ab->core_lock);
+
+	if (ath12k_core_hw_group_start_ready(ag)) {
+		ret = ath12k_core_hw_group_start(ag);
 	if (ret) {
-		ath12k_err(ab, "failed to create pdev core: %d\n", ret);
+			ath12k_warn(ab, "unable to start hw group\n");
 		goto err_core_stop;
 	}
-	ath12k_hif_irq_enable(ab);
-
-	ret = ath12k_core_rfkill_config(ab);
-	if (ret && ret != -EOPNOTSUPP) {
-		ath12k_err(ab, "failed to config rfkill: %d\n", ret);
-		goto err_core_pdev_destroy;
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group %d started\n", ag->id);
+	} else {
+		ath12k_core_trigger_partner(ab);
 	}
 
-	mutex_unlock(&ab->core_lock);
+	mutex_unlock(&ag->mutex);
 
 	return 0;
 
-err_core_pdev_destroy:
-	ath12k_core_pdev_destroy(ab);
 err_core_stop:
+	for (i = ag->num_devices - 1; i >= 0; i--) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		mutex_lock(&ab->core_lock);
 	ath12k_core_stop(ab);
-	ath12k_mac_destroy(ab);
+		mutex_unlock(&ab->core_lock);
+	}
+	mutex_unlock(&ag->mutex);
+	goto exit;
+
 err_dp_free:
 	ath12k_dp_free(ab);
 	mutex_unlock(&ab->core_lock);
+	mutex_unlock(&ag->mutex);
+
 err_firmware_stop:
 	ath12k_qmi_firmware_stop(ab);
 
+exit:
 	return ret;
 }
 
@@ -972,6 +1376,7 @@
 static void ath12k_rfkill_work(struct work_struct *work)
 {
 	struct ath12k_base *ab = container_of(work, struct ath12k_base, rfkill_work);
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k *ar;
 	struct ath12k_hw *ah;
 	struct ieee80211_hw *hw;
@@ -982,8 +1387,8 @@
 	rfkill_radio_on = ab->rfkill_radio_on;
 	spin_unlock_bh(&ab->base_lock);
 
-	for (i = 0; i < ab->num_hw; i++) {
-		ah = ab->ah[i];
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ath12k_ag_to_ah(ag, i);
 		if (!ah)
 			continue;
 
@@ -1023,6 +1428,7 @@
 
 static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab)
 {
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k *ar;
 	struct ath12k_hw *ah;
 	int i, j;
@@ -1034,10 +1440,23 @@
 	if (ab->is_reset)
 		set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
 
-	for (i = 0; i < ab->num_hw; i++) {
-		ah = ab->ah[i];
-		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ath12k_ag_to_ah(ag, i);
+		if (!ah || ah->state == ATH12K_HW_STATE_OFF ||
+		    ah->state == ATH12K_HW_STATE_TM)
+			continue;
+
+		wiphy_lock(ah->hw->wiphy);
+
+		/* If queue 0 is stopped, it is safe to assume that all
+		 * other queues are stopped by driver via
+		 * ieee80211_stop_queues() below. This means, there is
+		 * no need to stop it again and hence continue
+		 */
+		if (ieee80211_queue_stopped(ah->hw, 0)) {
+			wiphy_unlock(ah->hw->wiphy);
 			continue;
+		}
 
 		ieee80211_stop_queues(ah->hw);
 
@@ -1054,13 +1473,21 @@
 			complete(&ar->vdev_setup_done);
 			complete(&ar->vdev_delete_done);
 			complete(&ar->bss_survey_done);
+			complete(&ar->thermal.wmi_sync);
 
 			wake_up(&ar->dp.tx_empty_waitq);
 			idr_for_each(&ar->txmgmt_idr,
 				     ath12k_mac_tx_mgmt_pending_free, ar);
 			idr_destroy(&ar->txmgmt_idr);
 			wake_up(&ar->txmgmt_empty_waitq);
+
+			ar->monitor_vdev_id = -1;
+			ar->monitor_conf_enabled = false;
+			ar->monitor_vdev_created = false;
+			ar->monitor_started = false;
 		}
+
+		wiphy_unlock(ah->hw->wiphy);
 	}
 
 	wake_up(&ab->wmi_ab.tx_credits_wq);
@@ -1069,12 +1496,13 @@
 
 static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab)
 {
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k_hw *ah;
 	struct ath12k *ar;
 	int i, j;
 
-	for (i = 0; i < ab->num_hw; i++) {
-		ah = ab->ah[i];
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ath12k_ag_to_ah(ag, i);
 		if (!ah || ah->state == ATH12K_HW_STATE_OFF)
 			continue;
 
@@ -1105,6 +1533,9 @@
 			ath12k_warn(ab,
 				    "device is wedged, will not restart hw %d\n", i);
 			break;
+		case ATH12K_HW_STATE_TM:
+			ath12k_warn(ab, "fw mode reset done radio %d\n", i);
+			break;
 		}
 
 		mutex_unlock(&ah->hw_mutex);
@@ -1117,6 +1548,7 @@
 static void ath12k_core_restart(struct work_struct *work)
 {
 	struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work);
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k_hw *ah;
 	int ret, i;
 
@@ -1127,22 +1559,41 @@
 	}
 
 	if (ab->is_reset) {
-		for (i = 0; i < ab->num_hw; i++) {
-			ah = ab->ah[i];
+		if (!test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+			atomic_dec(&ab->reset_count);
+			complete(&ab->reset_complete);
+			ab->is_reset = false;
+			atomic_set(&ab->fail_cont_count, 0);
+			ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset success\n");
+		}
+
+		mutex_lock(&ab->ag->mutex);
+
+		if (!ath12k_core_hw_group_start_ready(ab->ag)) {
+			mutex_unlock(&ab->ag->mutex);
+			goto exit;
+		}
+
+		for (i = 0; i < ag->num_hw; i++) {
+			ah = ath12k_ag_to_ah(ab->ag, i);
 			ieee80211_restart_hw(ah->hw);
 		}
+
+		mutex_unlock(&ab->ag->mutex);
 	}
 
+exit:
 	complete(&ab->restart_completed);
 }
 
 static void ath12k_core_reset(struct work_struct *work)
 {
 	struct ath12k_base *ab = container_of(work, struct ath12k_base, reset_work);
-	int reset_count, fail_cont_count;
+	struct ath12k_hw_group *ag = ab->ag;
+	int reset_count, fail_cont_count, i;
 	long time_left;
 
-	if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) {
+	if (!(test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags))) {
 		ath12k_warn(ab, "ignore reset dev flags 0x%lx\n", ab->dev_flags);
 		return;
 	}
@@ -1198,11 +1649,135 @@
 	ath12k_hif_ce_irq_disable(ab);
 
 	ath12k_hif_power_down(ab, false);
-	ath12k_hif_power_up(ab);
 
+	/* prepare for power up */
+	ab->qmi.num_radios = U8_MAX;
+
+	mutex_lock(&ag->mutex);
+	ath12k_core_to_group_ref_put(ab);
+
+	if (ag->num_started > 0) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT,
+			   "waiting for %d partner device(s) to reset\n",
+			   ag->num_started);
+		mutex_unlock(&ag->mutex);
+		return;
+	}
+
+	/* Prepare MLO global memory region for power up */
+	ath12k_qmi_reset_mlo_mem(ag);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		ath12k_hif_power_up(ab);
 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset started\n");
 }
 
+	mutex_unlock(&ag->mutex);
+}
+
+static int load_board_id_override(struct ath12k_base *ab)
+{
+	struct ath12k_bid_override *ov, *tmp;
+	const struct firmware *fw;
+	const char *p, *end;
+	size_t len;
+	int ret, count;
+
+	fw = ath12k_core_firmware_request(ab, ATH12K_BOARD_OVERRIDE_FILE);
+	if (IS_ERR(fw)) {
+		/* file is optional */
+		if (PTR_ERR(fw) == -ENOENT)
+			return 0;
+		return PTR_ERR(fw);
+	}
+
+	/* format is <pci_path>=<board_id> [...] */
+	p = fw->data;
+	len = fw->size;
+	end = p + len;
+	count = 0;
+
+	while (1) {
+		const char *pstart;
+		char *ppath, *pbid, endc;
+		unsigned int seg, bus, slot, func;
+		u16 board_id;
+
+		while (p != end && isspace(*p))
+			p++;
+		if (p == end)
+			break;
+
+		pstart = p;
+		while (p != end && !isspace(*p))
+			p++;
+
+		if (p == end) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ppath = kstrndup(pstart, p - pstart, GFP_KERNEL);
+		if (!pstart) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		pbid = strchr(ppath, '=');
+		if (!pbid) {
+			ath12k_err(ab, "bad key=value in override file\n");
+			ret = -EINVAL;
+			kfree(ppath);
+			goto fail;
+		}
+
+		*pbid++ = 0;
+
+		ret = sscanf(ppath, "pci:%x:%x:%x.%x%c", &seg, &bus, &slot,
+			     &func, &endc);
+		if (ret != 4) {
+			ath12k_err(ab, "invalid pci dev in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		if (kstrtou16(pbid, 0, &board_id)) {
+			ath12k_err(ab, "invalid board-id in override file\n");
+			kfree(ppath);
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		ov = kzalloc(sizeof (*ov), GFP_KERNEL);
+		ov->domain = seg;
+		ov->bus_nr = bus;
+		ov->slot = slot;
+		ov->func = func;
+		ov->board_id = board_id;
+		list_add_tail(&ov->next, &ab->board_id_overrides);
+		count++;
+	}
+
+	if (count)
+		ath12k_info(ab, "loaded %d entries from board-id "
+			    "override file\n", count);
+	release_firmware(fw);
+	return 0;
+
+fail:
+	ath12k_err(ab, "invalid board-id override file content\n");
+	release_firmware(fw);
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
+	INIT_LIST_HEAD(&ab->board_id_overrides);
+	return ret;
+}
+
 int ath12k_core_pre_init(struct ath12k_base *ab)
 {
 	int ret;
@@ -1213,6 +1788,10 @@
 		return ret;
 	}
 
+	ret = load_board_id_override(ab);
+	if (ret)
+		return ret;
+
 	ath12k_fw_map(ab);
 
 	return 0;
@@ -1235,51 +1814,453 @@
 					      &ab->panic_nb);
 }
 
-static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
+void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab)
 {
 	atomic_notifier_chain_unregister(&panic_notifier_list,
 					 &ab->panic_nb);
 }
 
-int ath12k_core_init(struct ath12k_base *ab)
+static inline
+bool ath12k_core_hw_group_create_ready(struct ath12k_hw_group *ag)
 {
-	int ret;
+	lockdep_assert_held(&ag->mutex);
+
+	return (ag->num_probed == ag->num_devices);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_alloc(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag;
+	int count = 0;
+
+	lockdep_assert_held(&ath12k_hw_group_mutex);
+
+	list_for_each_entry(ag, &ath12k_hw_group_list, list)
+		count++;
+
+	ag = kzalloc(sizeof(*ag), GFP_KERNEL);
+	if (!ag)
+		return NULL;
+
+	ag->id = count;
+	list_add(&ag->list, &ath12k_hw_group_list);
+	mutex_init(&ag->mutex);
+	ag->mlo_capable = false;
+
+	return ag;
+}
+
+static void ath12k_core_hw_group_free(struct ath12k_hw_group *ag)
+{
+	mutex_lock(&ath12k_hw_group_mutex);
+
+	list_del(&ag->list);
+	kfree(ag);
+
+	mutex_unlock(&ath12k_hw_group_mutex);
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_find_by_dt(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag;
+	int i;
+
+	if (!ab->dev->of_node)
+		return NULL;
+
+	list_for_each_entry(ag, &ath12k_hw_group_list, list)
+		for (i = 0; i < ag->num_devices; i++)
+			if (ag->wsi_node[i] == ab->dev->of_node)
+				return ag;
+
+	return NULL;
+}
+
+static int ath12k_core_get_wsi_info(struct ath12k_hw_group *ag,
+				    struct ath12k_base *ab)
+{
+	struct device_node *wsi_dev = ab->dev->of_node, *next_wsi_dev;
+	struct device_node *tx_endpoint, *next_rx_endpoint;
+	int device_count = 0;
+
+	next_wsi_dev = wsi_dev;
+
+	if (!next_wsi_dev)
+		return -ENODEV;
+
+	do {
+		ag->wsi_node[device_count] = next_wsi_dev;
+
+		tx_endpoint = of_graph_get_endpoint_by_regs(next_wsi_dev, 0, -1);
+		if (!tx_endpoint) {
+			of_node_put(next_wsi_dev);
+			return -ENODEV;
+		}
+
+		next_rx_endpoint = of_graph_get_remote_endpoint(tx_endpoint);
+		if (!next_rx_endpoint) {
+			of_node_put(next_wsi_dev);
+			of_node_put(tx_endpoint);
+			return -ENODEV;
+		}
+
+		of_node_put(tx_endpoint);
+		of_node_put(next_wsi_dev);
+
+		next_wsi_dev = of_graph_get_port_parent(next_rx_endpoint);
+		if (!next_wsi_dev) {
+			of_node_put(next_rx_endpoint);
+			return -ENODEV;
+		}
+
+		of_node_put(next_rx_endpoint);
+
+		device_count++;
+		if (device_count > ATH12K_MAX_DEVICES) {
+			ath12k_warn(ab, "device count in DT %d is more than limit %d\n",
+				    device_count, ATH12K_MAX_DEVICES);
+			of_node_put(next_wsi_dev);
+			return -EINVAL;
+		}
+	} while (wsi_dev != next_wsi_dev);
+
+	of_node_put(next_wsi_dev);
+	ag->num_devices = device_count;
+
+	return 0;
+}
+
+static int ath12k_core_get_wsi_index(struct ath12k_hw_group *ag,
+				     struct ath12k_base *ab)
+{
+	int i, wsi_controller_index = -1, node_index = -1;
+	bool control;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		control = of_property_read_bool(ag->wsi_node[i], "qcom,wsi-controller");
+		if (control)
+			wsi_controller_index = i;
+
+		if (ag->wsi_node[i] == ab->dev->of_node)
+			node_index = i;
+	}
+
+	if (wsi_controller_index == -1) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi controller is not defined in dt");
+		return -EINVAL;
+	}
+
+	if (node_index == -1) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "unable to get WSI node index");
+		return -EINVAL;
+	}
+
+	ab->wsi_info.index = (ag->num_devices + node_index - wsi_controller_index) %
+		ag->num_devices;
+
+	return 0;
+}
+
+static struct ath12k_hw_group *ath12k_core_hw_group_assign(struct ath12k_base *ab)
+{
+	struct ath12k_wsi_info *wsi = &ab->wsi_info;
+	struct ath12k_hw_group *ag;
+
+	lockdep_assert_held(&ath12k_hw_group_mutex);
+
+	if (ath12k_ftm_mode)
+		goto invalid_group;
+
+	/* The grouping of multiple devices will be done based on device tree file.
+	 * The platforms that do not have any valid group information would have
+	 * each device to be part of its own invalid group.
+	 *
+	 * We use group id ATH12K_INVALID_GROUP_ID for single device group
+	 * which didn't have dt entry or wrong dt entry, there could be many
+	 * groups with same group id, i.e ATH12K_INVALID_GROUP_ID. So
+	 * default group id of ATH12K_INVALID_GROUP_ID combined with
+	 * num devices in ath12k_hw_group determines if the group is
+	 * multi device or single device group
+	 */
+
+	ag = ath12k_core_hw_group_find_by_dt(ab);
+	if (!ag) {
+		ag = ath12k_core_hw_group_alloc(ab);
+		if (!ag) {
+			ath12k_warn(ab, "unable to create new hw group\n");
+			return NULL;
+		}
+
+		if (ath12k_core_get_wsi_info(ag, ab) ||
+		    ath12k_core_get_wsi_index(ag, ab)) {
+			ath12k_dbg(ab, ATH12K_DBG_BOOT,
+				   "unable to get wsi info from dt, grouping single device");
+			ag->id = ATH12K_INVALID_GROUP_ID;
+			ag->num_devices = 1;
+			memset(ag->wsi_node, 0, sizeof(ag->wsi_node));
+			wsi->index = 0;
+		}
+
+		goto exit;
+	} else if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "group id %d in unregister state\n",
+			   ag->id);
+		goto invalid_group;
+	} else {
+		if (ath12k_core_get_wsi_index(ag, ab))
+			goto invalid_group;
+		goto exit;
+	}
+
+invalid_group:
+	ag = ath12k_core_hw_group_alloc(ab);
+	if (!ag) {
+		ath12k_warn(ab, "unable to create new hw group\n");
+		return NULL;
+	}
+
+	ag->id = ATH12K_INVALID_GROUP_ID;
+	ag->num_devices = 1;
+	wsi->index = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "single device added to hardware group\n");
+
+exit:
+	if (ag->num_probed >= ag->num_devices) {
+		ath12k_warn(ab, "unable to add new device to group, max limit reached\n");
+		goto invalid_group;
+	}
+
+	ab->device_id = ag->num_probed++;
+	ag->ab[ab->device_id] = ab;
+	ab->ag = ag;
+
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "wsi group-id %d num-devices %d index %d",
+		   ag->id, ag->num_devices, wsi->index);
+
+	return ag;
+}
+
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ath12k_ab_to_ag(ab);
+	u8 device_id = ab->device_id;
+	int num_probed;
+
+	if (!ag)
+		return;
+
+	mutex_lock(&ag->mutex);
+
+	if (WARN_ON(device_id >= ag->num_devices)) {
+		mutex_unlock(&ag->mutex);
+		return;
+	}
+
+	if (WARN_ON(ag->ab[device_id] != ab)) {
+		mutex_unlock(&ag->mutex);
+		return;
+	}
+
+	ag->ab[device_id] = NULL;
+	ab->ag = NULL;
+	ab->device_id = ATH12K_INVALID_DEVICE_ID;
+
+	if (ag->num_probed)
+		ag->num_probed--;
+
+	num_probed = ag->num_probed;
+
+	mutex_unlock(&ag->mutex);
+
+	if (!num_probed)
+		ath12k_core_hw_group_free(ag);
+}
+
+static void ath12k_core_hw_group_destroy(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	if (WARN_ON(!ag))
+		return;
+
+	mutex_lock(&ag->mutex);
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		mutex_lock(&ab->core_lock);
+		ath12k_core_soc_destroy(ab);
+		mutex_unlock(&ab->core_lock);
+	}
+	mutex_unlock(&ag->mutex);
+}
+
+static void ath12k_core_hw_group_cleanup(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	if (!ag)
+		return;
+
+	mutex_lock(&ag->mutex);
+
+	if (test_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags)) {
+		mutex_unlock(&ag->mutex);
+		return;
+	}
+
+	set_bit(ATH12K_GROUP_FLAG_UNREGISTER, &ag->flags);
+
+	ath12k_core_hw_group_stop(ag);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		mutex_lock(&ab->core_lock);
+		ath12k_core_stop(ab);
+		mutex_unlock(&ab->core_lock);
+	}
+
+	mutex_unlock(&ag->mutex);
+}
+
+static int ath12k_core_hw_group_create(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i, ret;
+
+	lockdep_assert_held(&ag->mutex);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		mutex_lock(&ab->core_lock);
 
 	ret = ath12k_core_soc_create(ab);
 	if (ret) {
+			mutex_unlock(&ab->core_lock);
 		ath12k_err(ab, "failed to create soc core: %d\n", ret);
 		return ret;
 	}
 
+		mutex_unlock(&ab->core_lock);
+	}
+
+	return 0;
+}
+
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	if (ath12k_ftm_mode)
+		return;
+
+	lockdep_assert_held(&ag->mutex);
+
+	/* If more than one devices are grouped, then inter MLO
+	 * functionality can work still independent of whether internally
+	 * each device supports single_chip_mlo or not.
+	 * Only when there is one device, then disable for WCN chipsets
+	 * till the required driver implementation is in place.
+	 */
+	if (ag->num_devices == 1) {
+		ab = ag->ab[0];
+
+		/* WCN chipsets does not advertise in firmware features
+		 * hence skip checking
+		 */
+		if (ab->hw_params->def_num_link)
+			return;
+	}
+
+	ag->mlo_capable = ath12k_mlo_enable;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		/* even if 1 device's firmware feature indicates MLO
+		 * unsupported, make MLO unsupported for the whole group
+		 */
+		if (!test_bit(ATH12K_FW_FEATURE_MLO, ab->fw.fw_features)) {
+			ag->mlo_capable = false;
+			return;
+		}
+	}
+}
+
+int ath12k_core_init(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag;
+	int ret;
+
 	ret = ath12k_core_panic_notifier_register(ab);
 	if (ret)
 		ath12k_warn(ab, "failed to register panic handler: %d\n", ret);
 
-	return 0;
+	mutex_lock(&ath12k_hw_group_mutex);
+
+	ag = ath12k_core_hw_group_assign(ab);
+	if (!ag) {
+		mutex_unlock(&ath12k_hw_group_mutex);
+		ath12k_warn(ab, "unable to get hw group\n");
+		return -ENODEV;
 }
 
-void ath12k_core_deinit(struct ath12k_base *ab)
-{
-	ath12k_core_panic_notifier_unregister(ab);
+	mutex_unlock(&ath12k_hw_group_mutex);
 
-	mutex_lock(&ab->core_lock);
+	mutex_lock(&ag->mutex);
 
-	ath12k_core_pdev_destroy(ab);
-	ath12k_core_stop(ab);
+	ath12k_dbg(ab, ATH12K_DBG_BOOT, "num devices %d num probed %d\n",
+		   ag->num_devices, ag->num_probed);
 
-	mutex_unlock(&ab->core_lock);
+	if (ath12k_core_hw_group_create_ready(ag)) {
+		ret = ath12k_core_hw_group_create(ag);
+		if (ret) {
+			mutex_unlock(&ag->mutex);
+			ath12k_warn(ab, "unable to create hw group\n");
+			goto err;
+		}
+	}
 
-	ath12k_hif_power_down(ab, false);
-	ath12k_mac_destroy(ab);
-	ath12k_core_soc_destroy(ab);
-	ath12k_fw_unmap(ab);
+	mutex_unlock(&ag->mutex);
+
+	return 0;
+
+err:
+	ath12k_core_hw_group_destroy(ab->ag);
+	ath12k_core_hw_group_unassign(ab);
+	return ret;
+}
+
+void ath12k_core_deinit(struct ath12k_base *ab)
+{
+	ath12k_core_panic_notifier_unregister(ab);
+	ath12k_core_hw_group_cleanup(ab->ag);
+	ath12k_core_hw_group_destroy(ab->ag);
+	ath12k_core_hw_group_unassign(ab);
 }
 
 void ath12k_core_free(struct ath12k_base *ab)
 {
+	struct ath12k_bid_override *ov, *tmp;
+
 	timer_delete_sync(&ab->rx_replenish_retry);
 	destroy_workqueue(ab->workqueue_aux);
 	destroy_workqueue(ab->workqueue);
+	list_for_each_entry_safe(ov, tmp, &ab->board_id_overrides, next)
+		kfree(ov);
 	kfree(ab);
 }
 
@@ -1306,6 +2287,7 @@
 	spin_lock_init(&ab->base_lock);
 	init_completion(&ab->reset_complete);
 
+	INIT_LIST_HEAD(&ab->board_id_overrides);
 	INIT_LIST_HEAD(&ab->peers);
 	init_waitqueue_head(&ab->peer_mapping_wq);
 	init_waitqueue_head(&ab->wmi_ab.tx_credits_wq);
@@ -1322,7 +2304,6 @@
 	ab->dev = dev;
 	ab->hif.bus = bus;
 	ab->qmi.num_radios = U8_MAX;
-	ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT;
 
 	/* Device index used to identify the devices in a group.
 	 *
@@ -1343,5 +2324,121 @@
 	return NULL;
 }
 
+void ath12k_cache_cleanup(void)
+{
+	struct ath12k_cache_entry *e, *tmp;
+
+	mutex_lock(&ath12k_cache_mutex);
+
+	list_for_each_entry_safe(e, tmp, &ath12k_dma_cache, next) {
+		dma_free_coherent_no_dev(e->size, e->vaddr, e->paddr);
+		list_del(&e->next);
+		kfree(e);
+	}
+	list_for_each_entry_safe(e, tmp, &ath12k_mem_cache, next) {
+		kfree(e->vaddr);
+		list_del(&e->next);
+		kfree(e);
+	}
+
+	mutex_unlock(&ath12k_cache_mutex);
+}
+
+void* ath12k_dma_alloc_coherent_no_dev(size_t sz, dma_addr_t *paddr, int flags)
+{
+	struct ath12k_cache_entry *e, *found = NULL;
+	void *vaddr;
+
+	mutex_lock(&ath12k_cache_mutex);
+	list_for_each_entry(e, &ath12k_dma_cache, next) {
+		if (e->size == sz) {
+			list_del(&e->next);
+			found = e;
+			break;
+		}
+	}
+	mutex_unlock(&ath12k_cache_mutex);
+
+	if (found) {
+		vaddr = e->vaddr;
+		*paddr = e->paddr;
+		kfree(e);
+		memset(vaddr, 0, sz);
+		return vaddr;
+	}
+
+	return dma_alloc_coherent_no_dev(sz, paddr, flags);
+}
+
+void ath12k_dma_free_coherent_no_dev(size_t sz, void *vaddr, dma_addr_t paddr)
+{
+	struct ath12k_cache_entry *e;
+
+	if (!paddr)
+		return;
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		goto free_buffer;
+
+	e->vaddr = vaddr;
+	e->paddr = paddr;
+	e->size = sz;
+
+	mutex_lock(&ath12k_cache_mutex);
+	list_add_tail(&e->next, &ath12k_dma_cache);
+	mutex_unlock(&ath12k_cache_mutex);
+	return;
+free_buffer:
+	dma_free_coherent_no_dev(sz, vaddr, paddr);
+}
+
+void *ath12k_kzalloc_cache(size_t sz, int flags)
+{
+	struct ath12k_cache_entry *e, *found = NULL;
+	void *vaddr;
+
+	mutex_lock(&ath12k_cache_mutex);
+	list_for_each_entry(e, &ath12k_mem_cache, next) {
+		if (e->size == sz) {
+			list_del(&e->next);
+			found = e;
+			break;
+		}
+	}
+	mutex_unlock(&ath12k_cache_mutex);
+
+	if (found) {
+		vaddr = e->vaddr;
+		kfree(e);
+		memset(vaddr, 0, sz);
+		return vaddr;
+	}
+
+	return kzalloc(sz, flags);
+}
+
+void ath12k_kfree_cache(size_t sz, void *vaddr)
+{
+	struct ath12k_cache_entry *e;
+
+	if (!vaddr)
+		return;
+
+	e = kzalloc(sizeof(*e), GFP_KERNEL);
+	if (!e)
+		goto free_buffer;
+
+	e->vaddr = vaddr;
+	e->size = sz;
+
+	mutex_lock(&ath12k_cache_mutex);
+	list_add_tail(&e->next, &ath12k_mem_cache);
+	mutex_unlock(&ath12k_cache_mutex);
+	return;
+free_buffer:
+	kfree(vaddr);
+}
+
 MODULE_DESCRIPTION("Core module for Qualcomm Atheros 802.11be wireless LAN cards.");
 MODULE_LICENSE("Dual BSD/GPL");
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/core.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/core.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/core.h	2025-09-29 14:23:07.605732410 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_CORE_H
@@ -15,6 +15,8 @@
 #include <linux/ctype.h>
 #include <linux/firmware.h>
 #include <linux/panic_notifier.h>
+#include <linux/average.h>
+#include <linux/dma-mapping.h>
 #include "qmi.h"
 #include "htc.h"
 #include "wmi.h"
@@ -25,6 +27,7 @@
 #include "hw.h"
 #include "hal_rx.h"
 #include "reg.h"
+#include "thermal.h"
 #include "dbring.h"
 #include "fw.h"
 #include "acpi.h"
@@ -52,8 +55,6 @@
 
 #define ATH12K_INVALID_HW_MAC_ID	0xFF
 #define ATH12K_CONNECTION_LOSS_HZ	(3 * HZ)
-#define	ATH12K_RX_RATE_TABLE_NUM	320
-#define	ATH12K_RX_RATE_TABLE_11AX_NUM	576
 
 #define ATH12K_MON_TIMER_INTERVAL  10
 #define ATH12K_RESET_TIMEOUT_HZ			(20 * HZ)
@@ -63,9 +64,19 @@
 #define ATH12K_RECONFIGURE_TIMEOUT_HZ		(10 * HZ)
 #define ATH12K_RECOVER_START_TIMEOUT_HZ		(20 * HZ)
 
+#define ATH12K_MAX_DEVICES 3
+#define ATH12K_GROUP_MAX_RADIO (ATH12K_MAX_DEVICES * MAX_RADIOS)
+#define ATH12K_INVALID_GROUP_ID  0xFF
+#define ATH12K_INVALID_DEVICE_ID 0xFF
+
+#define ATH12K_MAX_MLO_PEERS            256
+#define ATH12K_MLO_PEER_ID_INVALID      0xFFFF
+
+extern unsigned int ath12k_frame_mode;
 enum ath12k_bdf_search {
 	ATH12K_BDF_SEARCH_DEFAULT,
 	ATH12K_BDF_SEARCH_BUS_AND_BOARD,
+	ATH12K_BDF_SEARCH_PCI_SUBSYS_AND_BOARD,
 };
 
 enum wme_ac {
@@ -79,6 +90,7 @@
 #define ATH12K_HT_MCS_MAX	7
 #define ATH12K_VHT_MCS_MAX	9
 #define ATH12K_HE_MCS_MAX	11
+#define ATH12K_EHT_MCS_MAX	15
 
 enum ath12k_crypt_mode {
 	/* Only use hardware crypto engine */
@@ -115,6 +127,7 @@
 	dma_addr_t paddr_ext_desc;
 	u32 cipher;
 	u8 flags;
+	u8 link_id;
 };
 
 struct ath12k_skb_rxcb {
@@ -127,19 +140,22 @@
 	struct hal_rx_desc *rx_desc;
 	u8 err_rel_src;
 	u8 err_code;
-	u8 mac_id;
+	u8 hw_link_id;
 	u8 unmapped;
 	u8 is_frag;
 	u8 tid;
 	u16 peer_id;
+	bool is_end_of_ppdu;
 };
 
 enum ath12k_hw_rev {
-	ATH12K_HW_QCN9274_HW10,
+	ATH12K_HW_QCN9274_HW10 = 0,
 	ATH12K_HW_QCN9274_HW20,
 	ATH12K_HW_WCN7850_HW20
 };
 
+#define ATH12K_DIAG_HW_ID_OFFSET 16
+
 enum ath12k_firmware_mode {
 	/* the default mode, standard 802.11 functionality */
 	ATH12K_FIRMWARE_MODE_NORMAL,
@@ -157,6 +173,7 @@
 	u32 num_irq;
 	u32 grp_id;
 	u64 timestamp;
+	bool napi_enabled;
 	struct napi_struct napi;
 	struct net_device *napi_ndev;
 };
@@ -208,8 +225,13 @@
 	ATH12K_SCAN_ABORTING,
 };
 
+enum ath12k_hw_group_flags {
+	ATH12K_GROUP_FLAG_REGISTERED,
+	ATH12K_GROUP_FLAG_UNREGISTER,
+};
+
 enum ath12k_dev_flags {
-	ATH12K_CAC_RUNNING,
+	ATH12K_FLAG_CAC_RUNNING,
 	ATH12K_FLAG_CRASH_FLUSH,
 	ATH12K_FLAG_RAW_MODE,
 	ATH12K_FLAG_HW_CRYPTO_DISABLED,
@@ -220,6 +242,8 @@
 	ATH12K_FLAG_HTC_SUSPEND_COMPLETE,
 	ATH12K_FLAG_CE_IRQ_ENABLED,
 	ATH12K_FLAG_EXT_IRQ_ENABLED,
+	ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+	ATH12K_FLAG_FTM_SEGMENTED,
 };
 
 struct ath12k_tx_conf {
@@ -262,6 +286,7 @@
 
 	int bank_id;
 	u8 vdev_id_check_en;
+	bool beacon_prot;
 
 	struct wmi_wmm_params_all_arg wmm_params;
 	struct list_head list;
@@ -283,6 +308,12 @@
 	u8 link_id;
 	struct ath12k_vif *ahvif;
 	struct ath12k_rekey_data rekey_data;
+
+	u8 current_cntdown_counter;
+	struct ath12k_link_stats link_stats;
+	spinlock_t link_stats_lock; /* Protects updates to link_stats */
+	bool is_scan_vif;
+	u32 key_cipher;
 };
 
 struct ath12k_vif {
@@ -309,15 +340,16 @@
 	} u;
 
 	u32 aid;
-	u32 key_cipher;
 	u8 tx_encap_type;
 	bool ps;
+	atomic_t mcbc_gsn;
 
 	struct ath12k_link_vif deflink;
-	struct ath12k_link_vif __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
+	struct ath12k_link_vif __rcu *link[ATH12K_NUM_MAX_LINKS];
 	struct ath12k_vif_cache *cache[IEEE80211_MLD_MAX_NUM_LINKS];
 	/* indicates bitmap of link vif created in FW */
-	u16 links_map;
+	u32 links_map;
+	u8 last_scan_link;
 
 	/* Must be last - ends in a flexible-array member.
 	 *
@@ -338,20 +370,20 @@
 #define HAL_RX_MAX_MCS_HT	31
 #define HAL_RX_MAX_MCS_VHT	9
 #define HAL_RX_MAX_MCS_HE	11
+#define HAL_RX_MAX_MCS_BE	15
 #define HAL_RX_MAX_NSS		8
 #define HAL_RX_MAX_NUM_LEGACY_RATES 12
-#define ATH12K_RX_RATE_TABLE_11AX_NUM	576
-#define ATH12K_RX_RATE_TABLE_NUM 320
 
 struct ath12k_rx_peer_rate_stats {
 	u64 ht_mcs_count[HAL_RX_MAX_MCS_HT + 1];
 	u64 vht_mcs_count[HAL_RX_MAX_MCS_VHT + 1];
 	u64 he_mcs_count[HAL_RX_MAX_MCS_HE + 1];
+	u64 be_mcs_count[HAL_RX_MAX_MCS_BE + 1];
 	u64 nss_count[HAL_RX_MAX_NSS];
 	u64 bw_count[HAL_RX_BW_MAX];
 	u64 gi_count[HAL_RX_GI_MAX];
 	u64 legacy_count[HAL_RX_MAX_NUM_LEGACY_RATES];
-	u64 rx_rate[ATH12K_RX_RATE_TABLE_11AX_NUM];
+	u64 rx_rate[HAL_RX_BW_MAX][HAL_RX_GI_MAX][HAL_RX_MAX_NSS][HAL_RX_MAX_MCS_HT + 1];
 };
 
 struct ath12k_rx_peer_stats {
@@ -365,10 +397,6 @@
 	u64 non_ampdu_msdu_count;
 	u64 stbc_count;
 	u64 beamformed_count;
-	u64 mcs_count[HAL_RX_MAX_MCS + 1];
-	u64 nss_count[HAL_RX_MAX_NSS];
-	u64 bw_count[HAL_RX_BW_MAX];
-	u64 gi_count[HAL_RX_GI_MAX];
 	u64 coding_count[HAL_RX_SU_MU_CODING_MAX];
 	u64 tid_count[IEEE80211_NUM_TIDS + 1];
 	u64 pream_cnt[HAL_RX_PREAMBLE_MAX];
@@ -465,10 +493,15 @@
 	u64 wbm_tx_comp_stats[HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX];
 };
 
+DECLARE_EWMA(avg_rssi, 10, 8)
+
 struct ath12k_link_sta {
 	struct ath12k_link_vif *arvif;
 	struct ath12k_sta *ahsta;
 
+	/* link address similar to ieee80211_link_sta */
+	u8 addr[ETH_ALEN];
+
 	/* the following are protected by ar->data_lock */
 	u32 changed; /* IEEE80211_RC_* */
 	u32 bw;
@@ -481,25 +514,50 @@
 	u64 rx_duration;
 	u64 tx_duration;
 	u8 rssi_comb;
+	struct ewma_avg_rssi avg_rssi;
 	u8 link_id;
 	struct ath12k_rx_peer_stats *rx_stats;
 	struct ath12k_wbm_tx_stats *wbm_tx_stats;
 	u32 bw_prev;
+	u32 peer_nss;
+	s8 rssi_beacon;
+
+	/* For now the assoc link will be considered primary */
+	bool is_assoc_link;
+
+	 /* for firmware use only */
+	u8 link_idx;
 };
 
 struct ath12k_sta {
+	struct ath12k_vif *ahvif;
 	enum hal_pn_type pn_type;
 	struct ath12k_link_sta deflink;
 	struct ath12k_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS];
 	/* indicates bitmap of link sta created in FW */
 	u16 links_map;
-};
-
-#define ATH12K_MIN_5G_FREQ 4150
-#define ATH12K_MIN_6G_FREQ 5925
-#define ATH12K_MAX_6G_FREQ 7115
+	u8 assoc_link_id;
+	u16 ml_peer_id;
+	u8 num_peer;
+
+	enum ieee80211_sta_state state;
+};
+
+#define ATH12K_HALF_20MHZ_BW	10
+#define ATH12K_2GHZ_MIN_CENTER	2412
+#define ATH12K_2GHZ_MAX_CENTER	2484
+#define ATH12K_5GHZ_MIN_CENTER	4900
+#define ATH12K_5GHZ_MAX_CENTER	5920
+#define ATH12K_6GHZ_MIN_CENTER	5935
+#define ATH12K_6GHZ_MAX_CENTER	7115
+#define ATH12K_MIN_2GHZ_FREQ	(ATH12K_2GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW - 1)
+#define ATH12K_MAX_2GHZ_FREQ	(ATH12K_2GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW + 1)
+#define ATH12K_MIN_5GHZ_FREQ	(ATH12K_5GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_5GHZ_FREQ	(ATH12K_5GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MIN_6GHZ_FREQ	(ATH12K_6GHZ_MIN_CENTER - ATH12K_HALF_20MHZ_BW)
+#define ATH12K_MAX_6GHZ_FREQ	(ATH12K_6GHZ_MAX_CENTER + ATH12K_HALF_20MHZ_BW)
 #define ATH12K_NUM_CHANS 101
-#define ATH12K_MAX_5G_CHAN 173
+#define ATH12K_MAX_5GHZ_CHAN 173
 
 enum ath12k_hw_state {
 	ATH12K_HW_STATE_OFF,
@@ -507,18 +565,26 @@
 	ATH12K_HW_STATE_RESTARTING,
 	ATH12K_HW_STATE_RESTARTED,
 	ATH12K_HW_STATE_WEDGED,
+	ATH12K_HW_STATE_TM,
 	/* Add other states as required */
 };
 
 /* Antenna noise floor */
 #define ATH12K_DEFAULT_NOISE_FLOOR -95
 
+struct ath12k_ftm_event_obj {
+	u32 data_pos;
+	u32 expected_seq;
+	u8 *eventdata;
+};
+
 struct ath12k_fw_stats {
 	u32 pdev_id;
 	u32 stats_id;
 	struct list_head pdevs;
 	struct list_head vdevs;
 	struct list_head bcn;
+	bool fw_stats_done;
 };
 
 struct ath12k_dbg_htt_stats {
@@ -532,6 +598,12 @@
 	struct dentry *debugfs_pdev;
 	struct dentry *debugfs_pdev_symlink;
 	struct ath12k_dbg_htt_stats htt_stats;
+	enum wmi_halphy_ctrl_path_stats_id tpc_stats_type;
+	bool tpc_request;
+	struct completion tpc_complete;
+	struct wmi_tpc_stats_arg *tpc_stats;
+	u32 rx_filter;
+	bool extd_rx_stats;
 };
 
 struct ath12k_per_peer_tx_stats {
@@ -572,9 +644,10 @@
 		struct delayed_work timeout;
 		enum ath12k_scan_state state;
 		bool is_roc;
-		int vdev_id;
 		int roc_freq;
 		bool roc_notify;
+		struct wiphy_work vdev_clean_wk;
+		struct ath12k_link_vif *arvif;
 	} scan;
 
 	struct {
@@ -657,7 +730,7 @@
 
 	struct work_struct regd_update_work;
 
-	struct work_struct wmi_mgmt_tx_work;
+	struct wiphy_work wmi_mgmt_tx_work;
 	struct sk_buff_head wmi_mgmt_tx_queue;
 
 	struct ath12k_wow wow;
@@ -675,19 +748,30 @@
 #endif
 
 	bool dfs_block_radar_events;
+	struct ath12k_thermal thermal;
 	bool monitor_conf_enabled;
 	bool monitor_vdev_created;
 	bool monitor_started;
 	int monitor_vdev_id;
 
-	u32 freq_low;
-	u32 freq_high;
+	struct wiphy_radio_freq_range freq_range;
+	u32 num_channels;
 
 	bool nlo_enabled;
+
+	struct completion fw_stats_complete;
+
+	struct completion mlo_setup_done;
+	u32 mlo_setup_status;
+	u8 ftm_msgref;
+	struct ath12k_fw_stats fw_stats;
+	unsigned long last_tx_power_update;
 };
 
 struct ath12k_hw {
 	struct ieee80211_hw *hw;
+	struct device *dev;
+
 	/* Protect the write operation of the hardware state ath12k_hw::state
 	 * between hardware start<=>reconfigure<=>stop transitions.
 	 */
@@ -698,6 +782,11 @@
 
 	u8 num_radio;
 
+	DECLARE_BITMAP(free_ml_peer_id_map, ATH12K_MAX_MLO_PEERS);
+
+	/* protected by wiphy_lock() */
+	struct list_head ml_peers;
+
 	/* Keep last */
 	struct ath12k radio[] __aligned(sizeof(void *));
 };
@@ -732,6 +821,10 @@
 	u32 tx_chain_mask_shift;
 	u32 rx_chain_mask_shift;
 	struct ath12k_band_cap band[NUM_NL80211_BANDS];
+	u32 eml_cap;
+	u32 mld_cap;
+	bool nss_ratio_enabled;
+	u8 nss_ratio_info;
 };
 
 struct mlo_timestamp {
@@ -766,7 +859,7 @@
 	size_t len;
 };
 
-struct ath12k_soc_dp_tx_err_stats {
+struct ath12k_device_dp_tx_err_stats {
 	/* TCL Ring Descriptor unavailable */
 	u32 desc_na[DP_TCL_NUM_RING_MAX];
 	/* Other failures during dp_tx due to mem allocation failure
@@ -775,28 +868,74 @@
 	atomic_t misc_fail;
 };
 
-struct ath12k_soc_dp_stats {
+struct ath12k_device_dp_stats {
 	u32 err_ring_pkts;
 	u32 invalid_rbm;
 	u32 rxdma_error[HAL_REO_ENTR_RING_RXDMA_ECODE_MAX];
 	u32 reo_error[HAL_REO_DEST_RING_ERROR_CODE_MAX];
 	u32 hal_reo_error[DP_REO_DST_RING_MAX];
-	struct ath12k_soc_dp_tx_err_stats tx_err;
+	struct ath12k_device_dp_tx_err_stats tx_err;
 };
 
-/**
- * enum ath12k_link_capable_flags - link capable flags
- *
- * Single/Multi link capability information
+struct ath12k_reg_freq {
+	u32 start_freq;
+	u32 end_freq;
+};
+
+struct ath12k_mlo_memory {
+	struct target_mem_chunk chunk[ATH12K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
+	int mlo_mem_size;
+	bool init_done;
+};
+
+struct ath12k_hw_link {
+	u8 device_id;
+	u8 pdev_idx;
+};
+
+/* Holds info on the group of devices that are registered as a single
+ * wiphy, protected with struct ath12k_hw_group::mutex.
+ */
+struct ath12k_hw_group {
+	struct list_head list;
+	u8 id;
+	u8 num_devices;
+	u8 num_probed;
+	u8 num_started;
+	unsigned long flags;
+	struct ath12k_base *ab[ATH12K_MAX_DEVICES];
+
+	/* protects access to this struct */
+	struct mutex mutex;
+
+	/* Holds information of wiphy (hw) registration.
  *
- * @ATH12K_INTRA_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- *	the links (radios) present within a device.
- * @ATH12K_INTER_DEVICE_MLO_SUPPORT: SLO/MLO form between the radio, where all
- *	the links (radios) present across the devices.
+	 * In Multi/Single Link Operation case, all pdevs are registered as
+	 * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
+	 * registered as separate wiphys.
  */
-enum ath12k_link_capable_flags {
-	ATH12K_INTRA_DEVICE_MLO_SUPPORT	= BIT(0),
-	ATH12K_INTER_DEVICE_MLO_SUPPORT	= BIT(1),
+	struct ath12k_hw *ah[ATH12K_GROUP_MAX_RADIO];
+	u8 num_hw;
+	bool mlo_capable;
+	struct device_node *wsi_node[ATH12K_MAX_DEVICES];
+	struct ath12k_mlo_memory mlo_mem;
+	struct ath12k_hw_link hw_links[ATH12K_GROUP_MAX_RADIO];
+	bool hw_link_id_init_done;
+};
+
+/* Holds WSI info specific to each device, excluding WSI group info */
+struct ath12k_wsi_info {
+	u32 index;
+	u32 hw_link_id_base;
+};
+
+struct ath12k_bid_override {
+	unsigned int domain;
+	unsigned int bus_nr;
+	unsigned int slot;
+	unsigned int func;
+	u16 board_id;
+	struct list_head next;
 };
 
 /* Master structure to hold the hw data which may be used in core module */
@@ -816,6 +955,7 @@
 	size_t ath12k_coredump_len;
 	struct work_struct dump_work;
 
+	struct list_head board_id_overrides;
 	struct ath12k_htc htc;
 
 	struct ath12k_dp dp;
@@ -862,15 +1002,6 @@
 
 	struct ath12k_pdev __rcu *pdevs_active[MAX_RADIOS];
 
-	/* Holds information of wiphy (hw) registration.
-	 *
-	 * In Multi/Single Link Operation case, all pdevs are registered as
-	 * a single wiphy. In other (legacy/Non-MLO) cases, each pdev is
-	 * registered as separate wiphys.
-	 */
-	struct ath12k_hw *ah[MAX_RADIOS];
-	u8 num_hw;
-
 	struct ath12k_wmi_hal_reg_capabilities_ext_arg hal_reg_cap[MAX_RADIOS];
 	unsigned long long free_vdev_map;
 	unsigned long long free_vdev_stats_id_map;
@@ -880,6 +1011,7 @@
 	bool wmi_ready;
 	u32 wlan_init_status;
 	int irq_num[ATH12K_IRQ_NUM_MAX];
+	char *irq_names[ATH12K_IRQ_NUM_MAX];
 	struct ath12k_ext_irq_grp ext_irq_grp[ATH12K_EXT_IRQ_GRP_NUM_MAX];
 	struct napi_struct *napi;
 	struct ath12k_wmi_target_cap_arg target_caps;
@@ -898,11 +1030,13 @@
 	/* This regd is set during dynamic country setting
 	 * This may or may not be used during the runtime
 	 */
+	bool regd_change_user_request[MAX_RADIOS];
 	struct ieee80211_regdomain *new_regd[MAX_RADIOS];
+	bool regd_freed;
 
 	/* Current DFS Regulatory */
 	enum ath12k_dfs_region dfs_region;
-	struct ath12k_soc_dp_stats soc_stats;
+	struct ath12k_device_dp_stats device_stats;
 #ifdef CONFIG_ATH12K_DEBUGFS
 	struct dentry *debugfs_soc;
 #endif
@@ -964,13 +1098,6 @@
 
 	const struct hal_rx_ops *hal_rx_ops;
 
-	/* mlo_capable_flags denotes the single/multi link operation
-	 * capabilities of the Device.
-	 *
-	 * See enum ath12k_link_capable_flags
-	 */
-	u8 mlo_capable_flags;
-
 	struct completion restart_completed;
 
 #ifdef CONFIG_ACPI
@@ -980,6 +1107,13 @@
 		u32 func_bit;
 		bool acpi_tas_enable;
 		bool acpi_bios_sar_enable;
+		bool acpi_disable_11be;
+		bool acpi_disable_rfkill;
+		bool acpi_cca_enable;
+		bool acpi_band_edge_enable;
+		bool acpi_enable_bdf;
+		u32 bit_flag;
+		char bdf_string[ATH12K_ACPI_BDF_MAX_LEN];
 		u8 tas_cfg[ATH12K_ACPI_DSM_TAS_CFG_SIZE];
 		u8 tas_sar_power_table[ATH12K_ACPI_DSM_TAS_DATA_SIZE];
 		u8 bios_sar_data[ATH12K_ACPI_DSM_BIOS_SAR_DATA_SIZE];
@@ -992,6 +1126,17 @@
 
 	struct notifier_block panic_nb;
 
+	const char *bdf_boardname;
+
+	struct ath12k_hw_group *ag;
+	struct ath12k_wsi_info wsi_info;
+	bool hw_group_ref;
+	enum ath12k_firmware_mode fw_mode;
+	struct ath12k_ftm_event_obj ftm_event_obj;
+
+	struct ath12k_reg_freq reg_freq_2ghz;
+	struct ath12k_reg_freq reg_freq_5ghz;
+	struct ath12k_reg_freq reg_freq_6ghz;
 	/* must be last */
 	u8 drv_priv[] __aligned(sizeof(void *));
 };
@@ -1001,6 +1146,108 @@
 	u8 pdev_idx;
 };
 
+struct ath12k_fw_stats_vdev {
+	struct list_head list;
+
+	u32 vdev_id;
+	u32 beacon_snr;
+	u32 data_snr;
+	u32 num_tx_frames[WLAN_MAX_AC];
+	u32 num_rx_frames;
+	u32 num_tx_frames_retries[WLAN_MAX_AC];
+	u32 num_tx_frames_failures[WLAN_MAX_AC];
+	u32 num_rts_fail;
+	u32 num_rts_success;
+	u32 num_rx_err;
+	u32 num_rx_discard;
+	u32 num_tx_not_acked;
+	u32 tx_rate_history[MAX_TX_RATE_VALUES];
+	u32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+};
+
+struct ath12k_fw_stats_bcn {
+	struct list_head list;
+
+	u32 vdev_id;
+	u32 tx_bcn_succ_cnt;
+	u32 tx_bcn_outage_cnt;
+};
+
+struct ath12k_fw_stats_pdev {
+	struct list_head list;
+
+	/* PDEV stats */
+	s32 ch_noise_floor;
+	u32 tx_frame_count;
+	u32 rx_frame_count;
+	u32 rx_clear_count;
+	u32 cycle_count;
+	u32 phy_err_count;
+	u32 chan_tx_power;
+	u32 ack_rx_bad;
+	u32 rts_bad;
+	u32 rts_good;
+	u32 fcs_bad;
+	u32 no_beacons;
+	u32 mib_int_count;
+
+	/* PDEV TX stats */
+	s32 comp_queued;
+	s32 comp_delivered;
+	s32 msdu_enqued;
+	s32 mpdu_enqued;
+	s32 wmm_drop;
+	s32 local_enqued;
+	s32 local_freed;
+	s32 hw_queued;
+	s32 hw_reaped;
+	s32 underrun;
+	s32 tx_abort;
+	s32 mpdus_requed;
+	u32 tx_ko;
+	u32 data_rc;
+	u32 self_triggers;
+	u32 sw_retry_failure;
+	u32 illgl_rate_phy_err;
+	u32 pdev_cont_xretry;
+	u32 pdev_tx_timeout;
+	u32 pdev_resets;
+	u32 stateless_tid_alloc_failure;
+	u32 phy_underrun;
+	u32 txop_ovf;
+
+	/* PDEV RX stats */
+	s32 mid_ppdu_route_change;
+	s32 status_rcvd;
+	s32 r0_frags;
+	s32 r1_frags;
+	s32 r2_frags;
+	s32 r3_frags;
+	s32 htt_msdus;
+	s32 htt_mpdus;
+	s32 loc_msdus;
+	s32 loc_mpdus;
+	s32 oversize_amsdu;
+	s32 phy_errs;
+	s32 phy_err_drop;
+	s32 mpdu_errs;
+};
+
+struct ath12k_cache_entry {
+	struct list_head next;
+	size_t size;
+	void *vaddr;
+	dma_addr_t paddr;
+};
+
+void *ath12k_dma_alloc_coherent_no_dev(size_t sz, dma_addr_t *paddr, int flags);
+void ath12k_dma_free_coherent_no_dev(size_t sz, void *vaddr, dma_addr_t paddr);
+void *ath12k_kzalloc_cache(size_t sz, int flags);
+void ath12k_kfree_cache(size_t sz, void *vaddr);
+void ath12k_cache_cleanup(void);
+
+void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab);
+
 int ath12k_core_qmi_firmware_ready(struct ath12k_base *ab);
 int ath12k_core_pre_init(struct ath12k_base *ab);
 int ath12k_core_init(struct ath12k_base *ath12k);
@@ -1022,6 +1269,8 @@
 int ath12k_core_resume(struct ath12k_base *ab);
 int ath12k_core_suspend(struct ath12k_base *ab);
 int ath12k_core_suspend_late(struct ath12k_base *ab);
+void ath12k_core_hw_group_unassign(struct ath12k_base *ab);
+u8 ath12k_get_num_partner_link(struct ath12k *ar);
 
 const struct firmware *ath12k_core_firmware_request(struct ath12k_base *ab,
 						    const char *filename);
@@ -1029,6 +1278,12 @@
 u32 ath12k_core_get_max_peers_per_radio(struct ath12k_base *ab);
 u32 ath12k_core_get_max_num_tids(struct ath12k_base *ab);
 
+void ath12k_core_hw_group_set_mlo_capable(struct ath12k_hw_group *ag);
+void ath12k_fw_stats_init(struct ath12k *ar);
+void ath12k_fw_stats_bcn_free(struct list_head *head);
+void ath12k_fw_stats_free(struct ath12k_fw_stats *stats);
+void ath12k_fw_stats_reset(struct ath12k *ar);
+
 static inline const char *ath12k_scan_state_str(enum ath12k_scan_state state)
 {
 	switch (state) {
@@ -1129,4 +1384,27 @@
 #define for_each_ar(ah, ar, index) \
 	for ((index) = 0; ((index) < (ah)->num_radio && \
 	     ((ar) = &(ah)->radio[(index)])); (index)++)
+
+static inline struct ath12k_hw *ath12k_ag_to_ah(struct ath12k_hw_group *ag, int idx)
+{
+	return ag->ah[idx];
+}
+
+static inline void ath12k_ag_set_ah(struct ath12k_hw_group *ag, int idx,
+				    struct ath12k_hw *ah)
+{
+	ag->ah[idx] = ah;
+}
+
+static inline struct ath12k_hw_group *ath12k_ab_to_ag(struct ath12k_base *ab)
+{
+	return ab->ag;
+}
+
+static inline struct ath12k_base *ath12k_ag_to_ab(struct ath12k_hw_group *ag,
+						  u8 device_id)
+{
+	return ag->ab[device_id];
+}
+
 #endif /* _CORE_H_ */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/coredump.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/coredump.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/coredump.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/coredump.h	2025-07-01 14:10:42.720046161 +0200
@@ -15,6 +15,7 @@
 	FW_CRASH_DUMP_PAGEABLE_DATA,
 	FW_CRASH_DUMP_M3_DUMP,
 	FW_CRASH_DUMP_NONE,
+	FW_CRASH_DUMP_MLO_GLOBAL_DATA,
 
 	/* keep last */
 	FW_CRASH_DUMP_TYPE_MAX,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debug.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debug.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debug.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debug.c	2025-07-01 14:35:02.270708730 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/vmalloc.h>
@@ -36,7 +36,7 @@
 	va_end(args);
 }
 
-void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...)
+void __ath12k_warn(struct device *dev, const char *fmt, ...)
 {
 	struct va_format vaf = {
 		.fmt = fmt,
@@ -45,7 +45,7 @@
 
 	va_start(args, fmt);
 	vaf.va = &args;
-	dev_warn_ratelimited(ab->dev, "%pV", &vaf);
+	dev_warn_ratelimited(dev, "%pV", &vaf);
 	/* TODO: Trace the log */
 	va_end(args);
 }
@@ -63,8 +63,10 @@
 	vaf.fmt = fmt;
 	vaf.va = &args;
 
-	if (ath12k_debug_mask & mask)
+	if (likely(ab))
 		dev_printk(KERN_DEBUG, ab->dev, "%pV", &vaf);
+	else
+		printk(KERN_DEBUG "ath12k: %pV", &vaf);
 
 	/* TODO: trace log */
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debug.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debug.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debug.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debug.h	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _ATH12K_DEBUG_H_
@@ -31,9 +31,14 @@
 
 __printf(2, 3) void ath12k_info(struct ath12k_base *ab, const char *fmt, ...);
 __printf(2, 3) void ath12k_err(struct ath12k_base *ab, const char *fmt, ...);
-__printf(2, 3) void ath12k_warn(struct ath12k_base *ab, const char *fmt, ...);
+__printf(2, 3) void __ath12k_warn(struct device *dev, const char *fmt, ...);
+
+#define ath12k_warn(ab, fmt, ...) __ath12k_warn((ab)->dev, fmt, ##__VA_ARGS__)
+#define ath12k_hw_warn(ah, fmt, ...) __ath12k_warn((ah)->dev, fmt, ##__VA_ARGS__)
 
 extern unsigned int ath12k_debug_mask;
+extern bool ath12k_ftm_mode;
+extern bool ath12k_en_shutdown;
 
 #ifdef CONFIG_ATH12K_DEBUG
 __printf(3, 4) void __ath12k_dbg(struct ath12k_base *ab,
@@ -58,11 +63,14 @@
 }
 #endif /* CONFIG_ATH12K_DEBUG */
 
-#define ath12k_dbg(ar, dbg_mask, fmt, ...)			\
+#define ath12k_dbg(ab, dbg_mask, fmt, ...)			\
 do {								\
 	typeof(dbg_mask) mask = (dbg_mask);			\
 	if (ath12k_debug_mask & mask)				\
-		__ath12k_dbg(ar, mask, fmt, ##__VA_ARGS__);	\
+		__ath12k_dbg(ab, mask, fmt, ##__VA_ARGS__);	\
 } while (0)
 
+#define ath12k_generic_dbg(dbg_mask, fmt, ...)			\
+	ath12k_dbg(NULL, dbg_mask, fmt, ##__VA_ARGS__)
+
 #endif /* _ATH12K_DEBUG_H_ */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs.c	2025-09-25 17:40:34.139360145 +0200
@@ -1,13 +1,41 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "core.h"
+#include "dp_tx.h"
+#include "debug.h"
 #include "debugfs.h"
 #include "debugfs_htt_stats.h"
 
+static ssize_t ath12k_debugfs_read_bdf_boardname(struct file *file,
+					  char __user *user_buf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k_base *ab = file->private_data;
+	char buf[256 + 1];
+	size_t len;
+	ssize_t ret;
+
+	len = scnprintf(buf, sizeof (buf), "%s\n", ab->bdf_boardname);
+	if (len > sizeof (buf))
+		len = sizeof (buf);
+
+	ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+
+	return ret;
+}
+
+static const struct file_operations fops_bdf_boardname = {
+	.read	= ath12k_debugfs_read_bdf_boardname,
+	.write	= NULL,
+	.open	= simple_open,
+	.owner	= THIS_MODULE,
+	.llseek	= default_llseek,
+};
+
 static ssize_t ath12k_write_simulate_radar(struct file *file,
 					   const char __user *user_buf,
 					   size_t count, loff_t *ppos)
@@ -31,6 +59,924 @@
 	.open = simple_open
 };
 
+static ssize_t ath12k_write_tpc_stats_type(struct file *file,
+					   const char __user *user_buf,
+					   size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	u8 type;
+	int ret;
+
+	ret = kstrtou8_from_user(user_buf, count, 0, &type);
+	if (ret)
+		return ret;
+
+	if (type >= WMI_HALPHY_PDEV_TX_STATS_MAX)
+		return -EINVAL;
+
+	spin_lock_bh(&ar->data_lock);
+	ar->debug.tpc_stats_type = type;
+	spin_unlock_bh(&ar->data_lock);
+
+	return count;
+}
+
+static int ath12k_debug_tpc_stats_request(struct ath12k *ar)
+{
+	enum wmi_halphy_ctrl_path_stats_id tpc_stats_sub_id;
+	struct ath12k_base *ab = ar->ab;
+	int ret;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	reinit_completion(&ar->debug.tpc_complete);
+
+	spin_lock_bh(&ar->data_lock);
+	ar->debug.tpc_request = true;
+	tpc_stats_sub_id = ar->debug.tpc_stats_type;
+	spin_unlock_bh(&ar->data_lock);
+
+	ret = ath12k_wmi_send_tpc_stats_request(ar, tpc_stats_sub_id);
+	if (ret) {
+		ath12k_warn(ab, "failed to request pdev tpc stats: %d\n", ret);
+		spin_lock_bh(&ar->data_lock);
+		ar->debug.tpc_request = false;
+		spin_unlock_bh(&ar->data_lock);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath12k_get_tpc_ctl_mode_idx(struct wmi_tpc_stats_arg *tpc_stats,
+				       enum wmi_tpc_pream_bw pream_bw, int *mode_idx)
+{
+	u32 chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+	u8 band;
+
+	band = ((chan_freq > ATH12K_MIN_6GHZ_FREQ) ? NL80211_BAND_6GHZ :
+		((chan_freq > ATH12K_MIN_5GHZ_FREQ) ? NL80211_BAND_5GHZ :
+		NL80211_BAND_2GHZ));
+
+	if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+		switch (pream_bw) {
+		case WMI_TPC_PREAM_HT20:
+		case WMI_TPC_PREAM_VHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_HE20:
+		case WMI_TPC_PREAM_EHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_HT40:
+		case WMI_TPC_PREAM_VHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_HE40:
+		case WMI_TPC_PREAM_EHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_VHT80:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_EHT60:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20;
+			break;
+		case WMI_TPC_PREAM_HE80:
+		case WMI_TPC_PREAM_EHT80:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_VHT160:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_EHT120:
+		case WMI_TPC_PREAM_EHT140:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20;
+			break;
+		case WMI_TPC_PREAM_HE160:
+		case WMI_TPC_PREAM_EHT160:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ;
+			break;
+		case WMI_TPC_PREAM_EHT200:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120;
+			break;
+		case WMI_TPC_PREAM_EHT240:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80;
+			break;
+		case WMI_TPC_PREAM_EHT280:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40;
+			break;
+		case WMI_TPC_PREAM_EHT320:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ;
+			break;
+		default:
+			/* for 5GHZ and 6GHZ, default case will be for OFDM */
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ;
+			break;
+		}
+	} else {
+		switch (pream_bw) {
+		case WMI_TPC_PREAM_OFDM:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ;
+			break;
+		case WMI_TPC_PREAM_HT20:
+		case WMI_TPC_PREAM_VHT20:
+		case WMI_TPC_PREAM_HE20:
+		case WMI_TPC_PREAM_EHT20:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ;
+			break;
+		case WMI_TPC_PREAM_HT40:
+		case WMI_TPC_PREAM_VHT40:
+		case WMI_TPC_PREAM_HE40:
+		case WMI_TPC_PREAM_EHT40:
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ;
+			break;
+		default:
+			/* for 2GHZ, default case will be CCK */
+			*mode_idx = ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ;
+			break;
+		}
+	}
+
+	return 0;
+}
+
+static s16 ath12k_tpc_get_rate(struct ath12k *ar,
+			       struct wmi_tpc_stats_arg *tpc_stats,
+			       u32 rate_idx, u32 num_chains, u32 rate_code,
+			       enum wmi_tpc_pream_bw pream_bw,
+			       enum wmi_halphy_ctrl_path_stats_id type,
+			       u32 eht_rate_idx)
+{
+	u32 tot_nss, tot_modes, txbf_on_off, index_offset1, index_offset2, index_offset3;
+	u8 chain_idx, stm_idx, num_streams;
+	bool is_mu, txbf_enabled = 0;
+	s8 rates_ctl_min, tpc_ctl;
+	s16 rates, tpc, reg_pwr;
+	u16 rate1, rate2;
+	int mode, ret;
+
+	num_streams = 1 + ATH12K_HW_NSS(rate_code);
+	chain_idx = num_chains - 1;
+	stm_idx = num_streams - 1;
+	mode = -1;
+
+	ret = ath12k_get_tpc_ctl_mode_idx(tpc_stats, pream_bw, &mode);
+	if (ret) {
+		ath12k_warn(ar->ab, "Invalid mode index received\n");
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (num_chains < num_streams) {
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (le32_to_cpu(tpc_stats->tpc_config.num_tx_chain) <= 1) {
+		tpc = TPC_INVAL;
+		goto out;
+	}
+
+	if (type == WMI_HALPHY_PDEV_TX_SUTXBF_STATS ||
+	    type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS)
+		txbf_enabled = 1;
+
+	if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+	    type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+		is_mu = true;
+	} else {
+		is_mu = false;
+	}
+
+	/* Below is the min calculation of ctl array, rates array and
+	 * regulator power table. tpc is minimum of all 3
+	 */
+	if (pream_bw >= WMI_TPC_PREAM_EHT20 && pream_bw <= WMI_TPC_PREAM_EHT320) {
+		rate2 = tpc_stats->rates_array2.rate_array[eht_rate_idx];
+		if (is_mu)
+			rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_MU);
+		else
+			rates = u32_get_bits(rate2, ATH12K_TPC_RATE_ARRAY_SU);
+	} else {
+		rate1 = tpc_stats->rates_array1.rate_array[rate_idx];
+		if (is_mu)
+			rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_MU);
+		else
+			rates = u32_get_bits(rate1, ATH12K_TPC_RATE_ARRAY_SU);
+	}
+
+	if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+		tot_nss = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d1);
+		tot_modes = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d2);
+		txbf_on_off = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.d3);
+		index_offset1 = txbf_on_off * tot_modes * tot_nss;
+		index_offset2 = tot_modes * tot_nss;
+		index_offset3 = tot_nss;
+
+		tpc_ctl = *(tpc_stats->ctl_array.ctl_pwr_table +
+			    chain_idx * index_offset1 + txbf_enabled * index_offset2
+			    + mode * index_offset3 + stm_idx);
+	} else {
+		tpc_ctl = TPC_MAX;
+		ath12k_warn(ar->ab,
+			    "ctl array for tpc stats not received from fw\n");
+	}
+
+	rates_ctl_min = min_t(s16, rates, tpc_ctl);
+
+	reg_pwr = tpc_stats->max_reg_allowed_power.reg_pwr_array[chain_idx];
+
+	if (reg_pwr < 0)
+		reg_pwr = TPC_INVAL;
+
+	tpc = min_t(s16, rates_ctl_min, reg_pwr);
+
+	/* MODULATION_LIMIT is the maximum power limit,tpc should not exceed
+	 * modulation limit even if min tpc of all three array is greater
+	 * modulation limit
+	 */
+	tpc = min_t(s16, tpc, MODULATION_LIMIT);
+
+out:
+	return tpc;
+}
+
+static u16 ath12k_get_ratecode(u16 pream_idx, u16 nss, u16 mcs_rate)
+{
+	u16 mode_type = ~0;
+
+	/* Below assignments are just for printing purpose only */
+	switch (pream_idx) {
+	case WMI_TPC_PREAM_CCK:
+		mode_type = WMI_RATE_PREAMBLE_CCK;
+		break;
+	case WMI_TPC_PREAM_OFDM:
+		mode_type = WMI_RATE_PREAMBLE_OFDM;
+		break;
+	case WMI_TPC_PREAM_HT20:
+	case WMI_TPC_PREAM_HT40:
+		mode_type = WMI_RATE_PREAMBLE_HT;
+		break;
+	case WMI_TPC_PREAM_VHT20:
+	case WMI_TPC_PREAM_VHT40:
+	case WMI_TPC_PREAM_VHT80:
+	case WMI_TPC_PREAM_VHT160:
+		mode_type = WMI_RATE_PREAMBLE_VHT;
+		break;
+	case WMI_TPC_PREAM_HE20:
+	case WMI_TPC_PREAM_HE40:
+	case WMI_TPC_PREAM_HE80:
+	case WMI_TPC_PREAM_HE160:
+		mode_type = WMI_RATE_PREAMBLE_HE;
+		break;
+	case WMI_TPC_PREAM_EHT20:
+	case WMI_TPC_PREAM_EHT40:
+	case WMI_TPC_PREAM_EHT60:
+	case WMI_TPC_PREAM_EHT80:
+	case WMI_TPC_PREAM_EHT120:
+	case WMI_TPC_PREAM_EHT140:
+	case WMI_TPC_PREAM_EHT160:
+	case WMI_TPC_PREAM_EHT200:
+	case WMI_TPC_PREAM_EHT240:
+	case WMI_TPC_PREAM_EHT280:
+	case WMI_TPC_PREAM_EHT320:
+		mode_type = WMI_RATE_PREAMBLE_EHT;
+		if (mcs_rate == 0 || mcs_rate == 1)
+			mcs_rate += 14;
+		else
+			mcs_rate -= 2;
+		break;
+	default:
+		return mode_type;
+	}
+	return ((mode_type << 8) | ((nss & 0x7) << 5) | (mcs_rate & 0x1F));
+}
+
+static bool ath12k_he_supports_extra_mcs(struct ath12k *ar, int freq)
+{
+	struct ath12k_pdev_cap *cap = &ar->pdev->cap;
+	struct ath12k_band_cap *cap_band;
+	bool extra_mcs_supported;
+
+	if (freq <= ATH12K_2GHZ_MAX_FREQUENCY)
+		cap_band = &cap->band[NL80211_BAND_2GHZ];
+	else if (freq <= ATH12K_5GHZ_MAX_FREQUENCY)
+		cap_band = &cap->band[NL80211_BAND_5GHZ];
+	else
+		cap_band = &cap->band[NL80211_BAND_6GHZ];
+
+	extra_mcs_supported = u32_get_bits(cap_band->he_cap_info[1],
+					   HE_EXTRA_MCS_SUPPORT);
+	return extra_mcs_supported;
+}
+
+static int ath12k_tpc_fill_pream(struct ath12k *ar, char *buf, int buf_len, int len,
+				 enum wmi_tpc_pream_bw pream_bw, u32 max_rix,
+				 int max_nss, int max_rates, int pream_type,
+				 enum wmi_halphy_ctrl_path_stats_id tpc_type,
+				 int rate_idx, int eht_rate_idx)
+{
+	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+	int nss, rates, chains;
+	u8 active_tx_chains;
+	u16 rate_code;
+	s16 tpc;
+
+	static const char *const pream_str[] = {
+		[WMI_TPC_PREAM_CCK]     = "CCK",
+		[WMI_TPC_PREAM_OFDM]    = "OFDM",
+		[WMI_TPC_PREAM_HT20]    = "HT20",
+		[WMI_TPC_PREAM_HT40]    = "HT40",
+		[WMI_TPC_PREAM_VHT20]   = "VHT20",
+		[WMI_TPC_PREAM_VHT40]   = "VHT40",
+		[WMI_TPC_PREAM_VHT80]   = "VHT80",
+		[WMI_TPC_PREAM_VHT160]  = "VHT160",
+		[WMI_TPC_PREAM_HE20]    = "HE20",
+		[WMI_TPC_PREAM_HE40]    = "HE40",
+		[WMI_TPC_PREAM_HE80]    = "HE80",
+		[WMI_TPC_PREAM_HE160]   = "HE160",
+		[WMI_TPC_PREAM_EHT20]   = "EHT20",
+		[WMI_TPC_PREAM_EHT40]   = "EHT40",
+		[WMI_TPC_PREAM_EHT60]   = "EHT60",
+		[WMI_TPC_PREAM_EHT80]   = "EHT80",
+		[WMI_TPC_PREAM_EHT120]   = "EHT120",
+		[WMI_TPC_PREAM_EHT140]   = "EHT140",
+		[WMI_TPC_PREAM_EHT160]   = "EHT160",
+		[WMI_TPC_PREAM_EHT200]   = "EHT200",
+		[WMI_TPC_PREAM_EHT240]   = "EHT240",
+		[WMI_TPC_PREAM_EHT280]   = "EHT280",
+		[WMI_TPC_PREAM_EHT320]   = "EHT320"};
+
+	active_tx_chains = ar->num_tx_chains;
+
+	for (nss = 0; nss < max_nss; nss++) {
+		for (rates = 0; rates < max_rates; rates++, rate_idx++, max_rix++) {
+			/* FW send extra MCS(10&11) for VHT and HE rates,
+			 *  this is not used. Hence skipping it here
+			 */
+			if (pream_type == WMI_RATE_PREAMBLE_VHT &&
+			    rates > ATH12K_VHT_MCS_MAX)
+				continue;
+
+			if (pream_type == WMI_RATE_PREAMBLE_HE &&
+			    rates > ATH12K_HE_MCS_MAX)
+				continue;
+
+			if (pream_type == WMI_RATE_PREAMBLE_EHT &&
+			    rates > ATH12K_EHT_MCS_MAX)
+				continue;
+
+			rate_code = ath12k_get_ratecode(pream_bw, nss, rates);
+			len += scnprintf(buf + len, buf_len - len,
+					 "%d\t %s\t 0x%03x\t", max_rix,
+					 pream_str[pream_bw], rate_code);
+
+			for (chains = 0; chains < active_tx_chains; chains++) {
+				if (nss > chains) {
+					len += scnprintf(buf + len,
+							 buf_len - len,
+							 "\t%s", "NA");
+				} else {
+					tpc = ath12k_tpc_get_rate(ar, tpc_stats,
+								  rate_idx, chains + 1,
+								  rate_code, pream_bw,
+								  tpc_type,
+								  eht_rate_idx);
+
+					if (tpc == TPC_INVAL) {
+						len += scnprintf(buf + len,
+								 buf_len - len, "\tNA");
+					} else {
+						len += scnprintf(buf + len,
+								 buf_len - len, "\t%d",
+								 tpc);
+					}
+				}
+			}
+			len += scnprintf(buf + len, buf_len - len, "\n");
+
+			if (pream_type == WMI_RATE_PREAMBLE_EHT)
+				/*For fetching the next eht rates pwr from rates array2*/
+				++eht_rate_idx;
+		}
+	}
+
+	return len;
+}
+
+static int ath12k_tpc_stats_print(struct ath12k *ar,
+				  struct wmi_tpc_stats_arg *tpc_stats,
+				  char *buf, size_t len,
+				  enum wmi_halphy_ctrl_path_stats_id type)
+{
+	u32 eht_idx = 0, pream_idx = 0, rate_pream_idx = 0, total_rates = 0, max_rix = 0;
+	u32 chan_freq, num_tx_chain, caps, i, j = 1;
+	size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+	u8 nss, active_tx_chains;
+	bool he_ext_mcs;
+	static const char *const type_str[WMI_HALPHY_PDEV_TX_STATS_MAX] = {
+		[WMI_HALPHY_PDEV_TX_SU_STATS]		= "SU",
+		[WMI_HALPHY_PDEV_TX_SUTXBF_STATS]	= "SU WITH TXBF",
+		[WMI_HALPHY_PDEV_TX_MU_STATS]		= "MU",
+		[WMI_HALPHY_PDEV_TX_MUTXBF_STATS]	= "MU WITH TXBF"};
+
+	u8 max_rates[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]     = ATH12K_CCK_RATES,
+		[WMI_TPC_PREAM_OFDM]    = ATH12K_OFDM_RATES,
+		[WMI_TPC_PREAM_HT20]    = ATH12K_HT_RATES,
+		[WMI_TPC_PREAM_HT40]    = ATH12K_HT_RATES,
+		[WMI_TPC_PREAM_VHT20]   = ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT40]   = ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT80]   = ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_VHT160]  = ATH12K_VHT_RATES,
+		[WMI_TPC_PREAM_HE20]    = ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE40]    = ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE80]    = ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_HE160]   = ATH12K_HE_RATES,
+		[WMI_TPC_PREAM_EHT20]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT40]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT60]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT80]   = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT120]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT140]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT160]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT200]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT240]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT280]  = ATH12K_EHT_RATES,
+		[WMI_TPC_PREAM_EHT320]  = ATH12K_EHT_RATES};
+	static const u8 max_nss[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]     = ATH12K_NSS_1,
+		[WMI_TPC_PREAM_OFDM]    = ATH12K_NSS_1,
+		[WMI_TPC_PREAM_HT20]    = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_HT40]    = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_VHT20]   = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT40]   = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT80]   = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_VHT160]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_HE20]    = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE40]    = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE80]    = ATH12K_NSS_8,
+		[WMI_TPC_PREAM_HE160]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT20]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT40]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT60]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT80]   = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT120]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT140]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT160]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT200]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT240]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT280]  = ATH12K_NSS_4,
+		[WMI_TPC_PREAM_EHT320]  = ATH12K_NSS_4};
+
+	u16 rate_idx[WMI_TPC_PREAM_MAX] = {}, eht_rate_idx[WMI_TPC_PREAM_MAX] = {};
+	static const u8 pream_type[WMI_TPC_PREAM_MAX] = {
+		[WMI_TPC_PREAM_CCK]     = WMI_RATE_PREAMBLE_CCK,
+		[WMI_TPC_PREAM_OFDM]    = WMI_RATE_PREAMBLE_OFDM,
+		[WMI_TPC_PREAM_HT20]    = WMI_RATE_PREAMBLE_HT,
+		[WMI_TPC_PREAM_HT40]    = WMI_RATE_PREAMBLE_HT,
+		[WMI_TPC_PREAM_VHT20]   = WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT40]   = WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT80]   = WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_VHT160]  = WMI_RATE_PREAMBLE_VHT,
+		[WMI_TPC_PREAM_HE20]    = WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE40]    = WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE80]    = WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_HE160]   = WMI_RATE_PREAMBLE_HE,
+		[WMI_TPC_PREAM_EHT20]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT40]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT60]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT80]   = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT120]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT140]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT160]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT200]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT240]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT280]  = WMI_RATE_PREAMBLE_EHT,
+		[WMI_TPC_PREAM_EHT320]  = WMI_RATE_PREAMBLE_EHT};
+
+	chan_freq = le32_to_cpu(tpc_stats->tpc_config.chan_freq);
+	num_tx_chain = le32_to_cpu(tpc_stats->tpc_config.num_tx_chain);
+	caps = le32_to_cpu(tpc_stats->tpc_config.caps);
+
+	active_tx_chains = ar->num_tx_chains;
+	he_ext_mcs = ath12k_he_supports_extra_mcs(ar, chan_freq);
+
+	/* mcs 12&13 is sent by FW for certain HWs in rate array, skipping it as
+	 * it is not supported
+	 */
+	if (he_ext_mcs) {
+		for (i = WMI_TPC_PREAM_HE20; i <= WMI_TPC_PREAM_HE160; ++i)
+			max_rates[i] = ATH12K_HE_RATES;
+	}
+
+	if (type == WMI_HALPHY_PDEV_TX_MU_STATS ||
+	    type == WMI_HALPHY_PDEV_TX_MUTXBF_STATS) {
+		pream_idx = WMI_TPC_PREAM_VHT20;
+
+		for (i = WMI_TPC_PREAM_CCK; i <= WMI_TPC_PREAM_HT40; ++i)
+			max_rix += max_nss[i] * max_rates[i];
+	}
+	/* Enumerate all the rate indices */
+	for (i = rate_pream_idx + 1; i < WMI_TPC_PREAM_MAX; i++) {
+		nss = (max_nss[i - 1] < num_tx_chain ?
+		       max_nss[i - 1] : num_tx_chain);
+
+		rate_idx[i] = rate_idx[i - 1] + max_rates[i - 1] * nss;
+
+		if (pream_type[i] == WMI_RATE_PREAMBLE_EHT) {
+			eht_rate_idx[j] = eht_rate_idx[j - 1] + max_rates[i] * nss;
+			++j;
+		}
+	}
+
+	for (i = 0; i < WMI_TPC_PREAM_MAX; i++) {
+		nss = (max_nss[i] < num_tx_chain ?
+		       max_nss[i] : num_tx_chain);
+		total_rates += max_rates[i] * nss;
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "No.of rates-%d\n", total_rates);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "**************** %s ****************\n",
+			 type_str[type]);
+	len += scnprintf(buf + len, buf_len - len,
+			 "\t\t\t\tTPC values for Active chains\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "Rate idx Preamble Rate code");
+
+	for (i = 1; i <= active_tx_chains; ++i) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\t%d-Chain", i);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	for (i = pream_idx; i < WMI_TPC_PREAM_MAX; i++) {
+		if (chan_freq <= 2483) {
+			if (i == WMI_TPC_PREAM_VHT80 ||
+			    i == WMI_TPC_PREAM_VHT160 ||
+			    i == WMI_TPC_PREAM_HE80 ||
+			    i == WMI_TPC_PREAM_HE160 ||
+			    (i >= WMI_TPC_PREAM_EHT60 &&
+			     i <= WMI_TPC_PREAM_EHT320)) {
+				max_rix += max_nss[i] * max_rates[i];
+				continue;
+			}
+		} else {
+			if (i == WMI_TPC_PREAM_CCK) {
+				max_rix += max_rates[i];
+				continue;
+			}
+		}
+
+		nss = (max_nss[i] < ar->num_tx_chains ? max_nss[i] : ar->num_tx_chains);
+
+		if (!(caps &
+		    (1 << ATH12K_TPC_STATS_SUPPORT_BE_PUNC))) {
+			if (i == WMI_TPC_PREAM_EHT60 || i == WMI_TPC_PREAM_EHT120 ||
+			    i == WMI_TPC_PREAM_EHT140 || i == WMI_TPC_PREAM_EHT200 ||
+			    i == WMI_TPC_PREAM_EHT240 || i == WMI_TPC_PREAM_EHT280) {
+				max_rix += max_nss[i] * max_rates[i];
+				continue;
+			}
+		}
+
+		len = ath12k_tpc_fill_pream(ar, buf, buf_len, len, i, max_rix, nss,
+					    max_rates[i], pream_type[i],
+					    type, rate_idx[i], eht_rate_idx[eht_idx]);
+
+		if (pream_type[i] == WMI_RATE_PREAMBLE_EHT)
+			/*For fetch the next index eht rates from rates array2*/
+			++eht_idx;
+
+		max_rix += max_nss[i] * max_rates[i];
+	}
+	return len;
+}
+
+static void ath12k_tpc_stats_fill(struct ath12k *ar,
+				  struct wmi_tpc_stats_arg *tpc_stats,
+				  char *buf)
+{
+	size_t buf_len = ATH12K_TPC_STATS_BUF_SIZE;
+	struct wmi_tpc_config_params *tpc;
+	size_t len = 0;
+
+	if (!tpc_stats) {
+		ath12k_warn(ar->ab, "failed to find tpc stats\n");
+		return;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+	tpc = &tpc_stats->tpc_config;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "*************** TPC config **************\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "* powers are in 0.25 dBm steps\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "reg domain-%d\t\tchan freq-%d\n",
+			 tpc->reg_domain, tpc->chan_freq);
+	len += scnprintf(buf + len, buf_len - len,
+			 "power limit-%d\t\tmax reg-domain Power-%d\n",
+			 le32_to_cpu(tpc->twice_max_reg_power) / 2, tpc->power_limit);
+	len += scnprintf(buf + len, buf_len - len,
+			 "No.of tx chain-%d\t",
+			 ar->num_tx_chains);
+
+	ath12k_tpc_stats_print(ar, tpc_stats, buf, len,
+			       ar->debug.tpc_stats_type);
+
+	spin_unlock_bh(&ar->data_lock);
+}
+
+static int ath12k_open_tpc_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+	int ret;
+
+	guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (ah->state != ATH12K_HW_STATE_ON) {
+		ath12k_warn(ar->ab, "Interface not up\n");
+		return -ENETDOWN;
+	}
+
+	void *buf __free(kfree) = kzalloc(ATH12K_TPC_STATS_BUF_SIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	ret = ath12k_debug_tpc_stats_request(ar);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to request tpc stats: %d\n",
+			    ret);
+		return ret;
+	}
+
+	if (!wait_for_completion_timeout(&ar->debug.tpc_complete, TPC_STATS_WAIT_TIME)) {
+		spin_lock_bh(&ar->data_lock);
+		ath12k_wmi_free_tpc_stats_mem(ar);
+		ar->debug.tpc_request = false;
+		spin_unlock_bh(&ar->data_lock);
+		return -ETIMEDOUT;
+	}
+
+	ath12k_tpc_stats_fill(ar, ar->debug.tpc_stats, buf);
+	file->private_data = no_free_ptr(buf);
+
+	spin_lock_bh(&ar->data_lock);
+	ath12k_wmi_free_tpc_stats_mem(ar);
+	spin_unlock_bh(&ar->data_lock);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_tpc_stats(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static int ath12k_release_tpc_stats(struct inode *inode,
+				    struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static const struct file_operations fops_tpc_stats = {
+	.open = ath12k_open_tpc_stats,
+	.release = ath12k_release_tpc_stats,
+	.read = ath12k_read_tpc_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static const struct file_operations fops_tpc_stats_type = {
+	.write = ath12k_write_tpc_stats_type,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_write_extd_rx_stats(struct file *file,
+					  const char __user *ubuf,
+					  size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	struct htt_rx_ring_tlv_filter tlv_filter = {0};
+	u32 ring_id, rx_filter = 0;
+	bool enable;
+	int ret, i;
+
+	if (kstrtobool_from_user(ubuf, count, &enable))
+		return -EINVAL;
+
+	wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (!ar->ab->hw_params->rxdma1_enable) {
+		ret = count;
+		goto exit;
+	}
+
+	if (ar->ah->state != ATH12K_HW_STATE_ON) {
+		ret = -ENETDOWN;
+		goto exit;
+	}
+
+	if (enable == ar->debug.extd_rx_stats) {
+		ret = count;
+		goto exit;
+	}
+
+	if (enable) {
+		rx_filter =  HTT_RX_FILTER_TLV_FLAGS_MPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE;
+		rx_filter |= HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO;
+
+		tlv_filter.rx_filter = rx_filter;
+		tlv_filter.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0;
+		tlv_filter.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1;
+		tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2;
+		tlv_filter.pkt_filter_flags3 = HTT_RX_FP_CTRL_FILTER_FLASG3 |
+			HTT_RX_FP_DATA_FILTER_FLASG3;
+	} else {
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+	}
+
+	ar->debug.rx_filter = tlv_filter.rx_filter;
+
+	for (i = 0; i < ar->ab->hw_params->num_rxdma_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
+						       DP_RXDMA_REFILL_RING_SIZE,
+						       &tlv_filter);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to set rx filter for monitor status ring\n");
+			goto exit;
+		}
+	}
+
+	ar->debug.extd_rx_stats = !!enable;
+	ret = count;
+exit:
+	wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+	return ret;
+}
+
+static ssize_t ath12k_read_extd_rx_stats(struct file *file,
+					 char __user *ubuf,
+					 size_t count, loff_t *ppos)
+{
+	struct ath12k *ar = file->private_data;
+	char buf[32];
+	int len = 0;
+
+	wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+	len = scnprintf(buf, sizeof(buf) - len, "%d\n",
+			ar->debug.extd_rx_stats);
+	wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+
+	return simple_read_from_buffer(ubuf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_extd_rx_stats = {
+	.read = ath12k_read_extd_rx_stats,
+	.write = ath12k_write_extd_rx_stats,
+	.open = simple_open,
+};
+
+static int ath12k_open_link_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k_vif *ahvif = inode->i_private;
+	size_t len = 0, buf_len = (PAGE_SIZE * 2);
+	struct ath12k_link_stats linkstat;
+	struct ath12k_link_vif *arvif;
+	unsigned long links_map;
+	struct wiphy *wiphy;
+	int link_id, i;
+	char *buf;
+
+	if (!ahvif)
+		return -EINVAL;
+
+	buf = kzalloc(buf_len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	wiphy = ahvif->ah->hw->wiphy;
+	wiphy_lock(wiphy);
+
+	links_map = ahvif->links_map;
+	for_each_set_bit(link_id, &links_map,
+			 IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = rcu_dereference_protected(ahvif->link[link_id],
+						  lockdep_is_held(&wiphy->mtx));
+
+		spin_lock_bh(&arvif->link_stats_lock);
+		linkstat = arvif->link_stats;
+		spin_unlock_bh(&arvif->link_stats_lock);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] Tx Unicast Frames Enqueued  = %d\n",
+				 link_id, linkstat.tx_enqueued);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] Tx Broadcast Frames Enqueued = %d\n",
+				 link_id, linkstat.tx_bcast_mcast);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] Tx Frames Completed = %d\n",
+				 link_id, linkstat.tx_completed);
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] Tx Frames Dropped = %d\n",
+				 link_id, linkstat.tx_dropped);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "link[%d] Tx Frame descriptor Encap Type = ",
+				 link_id);
+
+		len += scnprintf(buf + len, buf_len - len,
+					 " raw:%d",
+					 linkstat.tx_encap_type[0]);
+
+		len += scnprintf(buf + len, buf_len - len,
+					 " native_wifi:%d",
+					 linkstat.tx_encap_type[1]);
+
+		len += scnprintf(buf + len, buf_len - len,
+					 " ethernet:%d",
+					 linkstat.tx_encap_type[2]);
+
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nlink[%d] Tx Frame descriptor Encrypt Type = ",
+				 link_id);
+
+		for (i = 0; i < HAL_ENCRYPT_TYPE_MAX; i++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 " %d:%d", i,
+					 linkstat.tx_encrypt_type[i]);
+		}
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nlink[%d] Tx Frame descriptor Type = buffer:%d extension:%d\n",
+				 link_id, linkstat.tx_desc_type[0],
+				 linkstat.tx_desc_type[1]);
+
+		len += scnprintf(buf + len, buf_len - len,
+				"------------------------------------------------------\n");
+	}
+
+	wiphy_unlock(wiphy);
+
+	file->private_data = buf;
+
+	return 0;
+}
+
+static int ath12k_release_link_stats(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+	return 0;
+}
+
+static ssize_t ath12k_read_link_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations ath12k_fops_link_stats = {
+	.open = ath12k_open_link_stats,
+	.release = ath12k_release_link_stats,
+	.read = ath12k_read_link_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+
+	debugfs_create_file("link_stats", 0400, vif->debugfs_dir, ahvif,
+			    &ath12k_fops_link_stats);
+}
+
 void ath12k_debugfs_soc_create(struct ath12k_base *ab)
 {
 	bool dput_needed;
@@ -55,6 +1001,9 @@
 
 	if (dput_needed)
 		dput(debugfs_ath12k);
+
+	debugfs_create_file("bdf_boardname", 0600, ab->debugfs_soc, ab,
+			    &fops_bdf_boardname);
 }
 
 void ath12k_debugfs_soc_destroy(struct ath12k_base *ab)
@@ -68,6 +1017,278 @@
 	 */
 }
 
+void
+ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+				struct ath12k_fw_stats *stats)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_pdev *pdev;
+	bool is_end;
+	static unsigned int num_vdev, num_bcn;
+	size_t total_vdevs_started = 0;
+	int i;
+
+	if (stats->stats_id == WMI_REQUEST_VDEV_STAT) {
+		if (list_empty(&stats->vdevs)) {
+			ath12k_warn(ab, "empty vdev stats");
+			return;
+		}
+		/* FW sends all the active VDEV stats irrespective of PDEV,
+		 * hence limit until the count of all VDEVs started
+		 */
+		rcu_read_lock();
+		for (i = 0; i < ab->num_radios; i++) {
+			pdev = rcu_dereference(ab->pdevs_active[i]);
+			if (pdev && pdev->ar)
+				total_vdevs_started += pdev->ar->num_started_vdevs;
+		}
+		rcu_read_unlock();
+
+		is_end = ((++num_vdev) == total_vdevs_started);
+
+		list_splice_tail_init(&stats->vdevs,
+				      &ar->fw_stats.vdevs);
+
+		if (is_end) {
+			ar->fw_stats.fw_stats_done = true;
+			num_vdev = 0;
+		}
+		return;
+	}
+	if (stats->stats_id == WMI_REQUEST_BCN_STAT) {
+		if (list_empty(&stats->bcn)) {
+			ath12k_warn(ab, "empty beacon stats");
+			return;
+		}
+		/* Mark end until we reached the count of all started VDEVs
+		 * within the PDEV
+		 */
+		is_end = ((++num_bcn) == ar->num_started_vdevs);
+
+		list_splice_tail_init(&stats->bcn,
+				      &ar->fw_stats.bcn);
+
+		if (is_end) {
+			ar->fw_stats.fw_stats_done = true;
+			num_bcn = 0;
+		}
+	}
+}
+
+static int ath12k_open_vdev_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_fw_stats_req_params param;
+	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+	int ret;
+
+	guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (!ah)
+		return -ENETDOWN;
+
+	if (ah->state != ATH12K_HW_STATE_ON)
+		return -ENETDOWN;
+
+	void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+	if (!buf)
+		return -ENOMEM;
+
+	param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+	/* VDEV stats is always sent for all active VDEVs from FW */
+	param.vdev_id = 0;
+	param.stats_id = WMI_REQUEST_VDEV_STAT;
+
+	ret = ath12k_mac_get_fw_stats(ar, &param);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to request fw vdev stats: %d\n", ret);
+		return ret;
+	}
+
+	ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+				 buf);
+
+	file->private_data = no_free_ptr(buf);
+
+	return 0;
+}
+
+static int ath12k_release_vdev_stats(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_vdev_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_vdev_stats = {
+	.open = ath12k_open_vdev_stats,
+	.release = ath12k_release_vdev_stats,
+	.read = ath12k_read_vdev_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_open_bcn_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_fw_stats_req_params param;
+	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+	int ret;
+
+	guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (ah && ah->state != ATH12K_HW_STATE_ON)
+		return -ENETDOWN;
+
+	void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+	if (!buf)
+		return -ENOMEM;
+
+	param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+	param.stats_id = WMI_REQUEST_BCN_STAT;
+
+	/* loop all active VDEVs for bcn stats */
+	list_for_each_entry(arvif, &ar->arvifs, list) {
+		if (!arvif->is_up)
+			continue;
+
+		param.vdev_id = arvif->vdev_id;
+		ret = ath12k_mac_get_fw_stats(ar, &param);
+		if (ret) {
+			ath12k_warn(ar->ab, "failed to request fw bcn stats: %d\n", ret);
+			return ret;
+		}
+	}
+
+	ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+				 buf);
+	/* since beacon stats request is looped for all active VDEVs, saved fw
+	 * stats is not freed for each request until done for all active VDEVs
+	 */
+	spin_lock_bh(&ar->data_lock);
+	ath12k_fw_stats_bcn_free(&ar->fw_stats.bcn);
+	spin_unlock_bh(&ar->data_lock);
+
+	file->private_data = no_free_ptr(buf);
+
+	return 0;
+}
+
+static int ath12k_release_bcn_stats(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_bcn_stats(struct file *file,
+				     char __user *user_buf,
+				     size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_bcn_stats = {
+	.open = ath12k_open_bcn_stats,
+	.release = ath12k_release_bcn_stats,
+	.read = ath12k_read_bcn_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static int ath12k_open_pdev_stats(struct inode *inode, struct file *file)
+{
+	struct ath12k *ar = inode->i_private;
+	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_fw_stats_req_params param;
+	int ret;
+
+	guard(wiphy)(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (ah && ah->state != ATH12K_HW_STATE_ON)
+		return -ENETDOWN;
+
+	void *buf __free(kfree) = kzalloc(ATH12K_FW_STATS_BUF_SIZE, GFP_ATOMIC);
+	if (!buf)
+		return -ENOMEM;
+
+	param.pdev_id = ath12k_mac_get_target_pdev_id(ar);
+	param.vdev_id = 0;
+	param.stats_id = WMI_REQUEST_PDEV_STAT;
+
+	ret = ath12k_mac_get_fw_stats(ar, &param);
+	if (ret) {
+		ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+		return ret;
+	}
+
+	ath12k_wmi_fw_stats_dump(ar, &ar->fw_stats, param.stats_id,
+				 buf);
+
+	file->private_data = no_free_ptr(buf);
+
+	return 0;
+}
+
+static int ath12k_release_pdev_stats(struct inode *inode, struct file *file)
+{
+	kfree(file->private_data);
+
+	return 0;
+}
+
+static ssize_t ath12k_read_pdev_stats(struct file *file,
+				      char __user *user_buf,
+				      size_t count, loff_t *ppos)
+{
+	const char *buf = file->private_data;
+	size_t len = strlen(buf);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static const struct file_operations fops_pdev_stats = {
+	.open = ath12k_open_pdev_stats,
+	.release = ath12k_release_pdev_stats,
+	.read = ath12k_read_pdev_stats,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static
+void ath12k_debugfs_fw_stats_register(struct ath12k *ar)
+{
+	struct dentry *fwstats_dir = debugfs_create_dir("fw_stats",
+							ar->debug.debugfs_pdev);
+
+	/* all stats debugfs files created are under "fw_stats" directory
+	 * created per PDEV
+	 */
+	debugfs_create_file("vdev_stats", 0600, fwstats_dir, ar,
+			    &fops_vdev_stats);
+	debugfs_create_file("beacon_stats", 0600, fwstats_dir, ar,
+			    &fops_bcn_stats);
+	debugfs_create_file("pdev_stats", 0600, fwstats_dir, ar,
+			    &fops_pdev_stats);
+
+	ath12k_fw_stats_init(ar);
+}
+
 void ath12k_debugfs_register(struct ath12k *ar)
 {
 	struct ath12k_base *ab = ar->ab;
@@ -89,9 +1310,23 @@
 		debugfs_create_file("dfs_simulate_radar", 0200,
 				    ar->debug.debugfs_pdev, ar,
 				    &fops_simulate_radar);
+		debugfs_create_bool("dfs_block_radar_events", 0200,
+				    ar->debug.debugfs_pdev,
+				    &ar->dfs_block_radar_events);
 	}
 
+	debugfs_create_file("tpc_stats", 0400, ar->debug.debugfs_pdev, ar,
+			    &fops_tpc_stats);
+	debugfs_create_file("tpc_stats_type", 0200, ar->debug.debugfs_pdev,
+			    ar, &fops_tpc_stats_type);
+	init_completion(&ar->debug.tpc_complete);
+
 	ath12k_debugfs_htt_stats_register(ar);
+	ath12k_debugfs_fw_stats_register(ar);
+
+	debugfs_create_file("ext_rx_stats", 0644,
+			    ar->debug.debugfs_pdev, ar,
+			    &fops_extd_rx_stats);
 }
 
 void ath12k_debugfs_unregister(struct ath12k *ar)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs.h	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef _ATH12K_DEBUGFS_H_
@@ -12,6 +12,102 @@
 void ath12k_debugfs_soc_destroy(struct ath12k_base *ab);
 void ath12k_debugfs_register(struct ath12k *ar);
 void ath12k_debugfs_unregister(struct ath12k *ar);
+void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+				     struct ath12k_fw_stats *stats);
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+	return ar->debug.extd_rx_stats;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+	return ar->debug.rx_filter;
+}
+
+void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+			       struct ieee80211_vif *vif);
+
+#define ATH12K_CCK_RATES			4
+#define ATH12K_OFDM_RATES			8
+#define ATH12K_HT_RATES				8
+#define ATH12K_VHT_RATES			12
+#define ATH12K_HE_RATES				12
+#define ATH12K_HE_RATES_WITH_EXTRA_MCS		14
+#define ATH12K_EHT_RATES			16
+#define HE_EXTRA_MCS_SUPPORT			GENMASK(31, 16)
+#define ATH12K_NSS_1				1
+#define ATH12K_NSS_4				4
+#define ATH12K_NSS_8				8
+#define ATH12K_HW_NSS(_rcode)			(((_rcode) >> 5) & 0x7)
+#define TPC_STATS_WAIT_TIME			(1 * HZ)
+#define MAX_TPC_PREAM_STR_LEN			7
+#define TPC_INVAL				-128
+#define TPC_MAX					127
+#define TPC_STATS_WAIT_TIME			(1 * HZ)
+#define TPC_STATS_TOT_ROW			700
+#define TPC_STATS_TOT_COLUMN			100
+#define MODULATION_LIMIT			126
+
+#define ATH12K_TPC_STATS_BUF_SIZE	(TPC_STATS_TOT_ROW * TPC_STATS_TOT_COLUMN)
+
+enum wmi_tpc_pream_bw {
+	WMI_TPC_PREAM_CCK,
+	WMI_TPC_PREAM_OFDM,
+	WMI_TPC_PREAM_HT20,
+	WMI_TPC_PREAM_HT40,
+	WMI_TPC_PREAM_VHT20,
+	WMI_TPC_PREAM_VHT40,
+	WMI_TPC_PREAM_VHT80,
+	WMI_TPC_PREAM_VHT160,
+	WMI_TPC_PREAM_HE20,
+	WMI_TPC_PREAM_HE40,
+	WMI_TPC_PREAM_HE80,
+	WMI_TPC_PREAM_HE160,
+	WMI_TPC_PREAM_EHT20,
+	WMI_TPC_PREAM_EHT40,
+	WMI_TPC_PREAM_EHT60,
+	WMI_TPC_PREAM_EHT80,
+	WMI_TPC_PREAM_EHT120,
+	WMI_TPC_PREAM_EHT140,
+	WMI_TPC_PREAM_EHT160,
+	WMI_TPC_PREAM_EHT200,
+	WMI_TPC_PREAM_EHT240,
+	WMI_TPC_PREAM_EHT280,
+	WMI_TPC_PREAM_EHT320,
+	WMI_TPC_PREAM_MAX
+};
+
+enum ath12k_debug_tpc_stats_ctl_mode {
+	ATH12K_TPC_STATS_CTL_MODE_LEGACY_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HT_VHT20_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT20_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HT_VHT40_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT40_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_VHT80_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT80_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_VHT160_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT160_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HE_EHT320_5GHZ_6GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_CCK_2GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_LEGACY_2GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HT20_2GHZ,
+	ATH12K_TPC_STATS_CTL_MODE_HT40_2GHZ,
+
+	ATH12K_TPC_STATS_CTL_MODE_EHT80_SU_PUNC20 = 23,
+	ATH12K_TPC_STATS_CTL_MODE_EHT160_SU_PUNC20,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC40,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC80,
+	ATH12K_TPC_STATS_CTL_MODE_EHT320_SU_PUNC120
+};
+
+enum ath12k_debug_tpc_stats_support_modes {
+	ATH12K_TPC_STATS_SUPPORT_160 = 0,
+	ATH12K_TPC_STATS_SUPPORT_320,
+	ATH12K_TPC_STATS_SUPPORT_AX,
+	ATH12K_TPC_STATS_SUPPORT_AX_EXTRA_MCS,
+	ATH12K_TPC_STATS_SUPPORT_BE,
+	ATH12K_TPC_STATS_SUPPORT_BE_PUNC,
+};
 #else
 static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab)
 {
@@ -29,6 +125,25 @@
 {
 }
 
+static inline void ath12k_debugfs_fw_stats_process(struct ath12k *ar,
+						   struct ath12k_fw_stats *stats)
+{
+}
+
+static inline bool ath12k_debugfs_is_extd_rx_stats_enabled(struct ath12k *ar)
+{
+	return false;
+}
+
+static inline int ath12k_debugfs_rx_filter(struct ath12k *ar)
+{
+	return 0;
+}
+
+static inline void ath12k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+					     struct ieee80211_vif *vif)
+{
+}
 #endif /* CONFIG_ATH12K_DEBUGFS */
 
 #endif /* _ATH12K_DEBUGFS_H_ */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/vmalloc.h>
@@ -48,6 +48,56 @@
 					footer);
 }
 
+static u32
+print_array_to_buf_s8(u8 *buf, u32 offset, const char *header, u32 stats_index,
+		      const s8 *array, u32 array_len, const char *footer)
+{
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	int index = 0;
+	u8 i;
+
+	if (header)
+		index += scnprintf(buf + offset, buf_len - offset, "%s = ", header);
+
+	for (i = 0; i < array_len; i++) {
+		index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+				   " %u:%d,", stats_index++, array[i]);
+	}
+
+	index--;
+	if ((offset + index) < buf_len)
+		buf[offset + index] = '\0';
+
+	if (footer) {
+		index += scnprintf(buf + offset + index, (buf_len - offset) - index,
+				   "%s", footer);
+	}
+
+	return index;
+}
+
+static const char *ath12k_htt_ax_tx_rx_ru_size_to_str(u8 ru_size)
+{
+	switch (ru_size) {
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26:
+		return "26";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52:
+		return "52";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106:
+		return "106";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242:
+		return "242";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484:
+		return "484";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996:
+		return "996";
+	case ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2:
+		return "996x2";
+	default:
+		return "unknown";
+	}
+}
+
 static const char *ath12k_htt_be_tx_rx_ru_size_to_str(u8 ru_size)
 {
 	switch (ru_size) {
@@ -88,6 +138,17 @@
 	}
 }
 
+static const char*
+ath12k_tx_ru_size_to_str(enum ath12k_htt_stats_ru_type ru_type, u8 ru_size)
+{
+	if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+		return ath12k_htt_ax_tx_rx_ru_size_to_str(ru_size);
+	else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+		return ath12k_htt_be_tx_rx_ru_size_to_str(ru_size);
+	else
+		return "unknown";
+}
+
 static void
 htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len,
 				struct debug_htt_stats_req *stats_req)
@@ -2277,7 +2338,7 @@
 	len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_grp_size",
 				  htt_stats_buf->ul_mumimo_grp_best_grp_size,
 				  ATH12K_HTT_STATS_NUM_MAX_MUMIMO_SZ, "\n");
-	len += print_array_to_buf_index(buf, len, "ul_mumimo_grp_best_num_usrs = ", 1,
+	len += print_array_to_buf(buf, len, "ul_mumimo_grp_best_num_usrs = ",
 					htt_stats_buf->ul_mumimo_grp_best_usrs,
 					ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS, "\n");
 	len += print_array_to_buf(buf, len,
@@ -2479,6 +2540,268 @@
 }
 
 static void
+ath12k_htt_print_tx_sounding_stats_tlv(const void *tag_buf, u16 tag_len,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_tx_sounding_stats_tlv *htt_stats_buf = tag_buf;
+	const __le32 *cbf_20, *cbf_40, *cbf_80, *cbf_160, *cbf_320;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 tx_sounding_mode;
+	u8 i, u;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	cbf_20 = htt_stats_buf->cbf_20;
+	cbf_40 = htt_stats_buf->cbf_40;
+	cbf_80 = htt_stats_buf->cbf_80;
+	cbf_160 = htt_stats_buf->cbf_160;
+	cbf_320 = htt_stats_buf->cbf_320;
+	tx_sounding_mode = le32_to_cpu(htt_stats_buf->tx_sounding_mode);
+
+	if (tx_sounding_mode == ATH12K_HTT_TX_AC_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "HTT_TX_AC_SOUNDING_STATS_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ac_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+		for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS; u++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "Sounding User_%u = 20MHz: %u, ", u,
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+		}
+	} else if (tx_sounding_mode == ATH12K_HTT_TX_AX_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nHTT_TX_AX_SOUNDING_STATS_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "ax_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+
+		for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS; u++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "Sounding User_%u = 20MHz: %u, ", u,
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "160MHz: %u\n",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+		}
+	} else if (tx_sounding_mode == ATH12K_HTT_TX_BE_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nHTT_TX_BE_SOUNDING_STATS_TLV:\n");
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_cbf_20 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_20[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_cbf_40 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_40[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_cbf_80 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_80[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_cbf_160 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_160[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len,
+				 "be_cbf_320 = IBF: %u, SU_SIFS: %u, SU_RBO: %u, ",
+				 le32_to_cpu(cbf_320[ATH12K_HTT_IMPL_STEER_STATS]),
+				 le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_SURBO_STEER_STATS]));
+		len += scnprintf(buf + len, buf_len - len, "MU_SIFS: %u, MU_RBO: %u\n",
+				 le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MUSIFS_STEER_STATS]),
+				 le32_to_cpu(cbf_320[ATH12K_HTT_EXPL_MURBO_STEER_STATS]));
+		for (u = 0, i = 0; u < ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS; u++) {
+			len += scnprintf(buf + len, buf_len - len,
+					 "Sounding User_%u = 20MHz: %u, ", u,
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "40MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len, "80MHz: %u, ",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]));
+			len += scnprintf(buf + len, buf_len - len,
+					 "160MHz: %u, 320MHz: %u\n",
+					 le32_to_cpu(htt_stats_buf->sounding[i++]),
+					 le32_to_cpu(htt_stats_buf->sounding_320[u]));
+		}
+	} else if (tx_sounding_mode == ATH12K_HTT_TX_CMN_SOUNDING_MODE) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "\nCV UPLOAD HANDLER STATS:\n");
+		len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch_err = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_nc_mismatch_err));
+		len += scnprintf(buf + len, buf_len - len, "cv_fcs_err = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_fcs_err));
+		len += scnprintf(buf + len, buf_len - len, "cv_frag_idx_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_frag_idx_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_invalid_peer_id = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_invalid_peer_id));
+		len += scnprintf(buf + len, buf_len - len, "cv_no_txbf_setup = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_no_txbf_setup));
+		len += scnprintf(buf + len, buf_len - len, "cv_expiry_in_update = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_expiry_in_update));
+		len += scnprintf(buf + len, buf_len - len, "cv_pkt_bw_exceed = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_pkt_bw_exceed));
+		len += scnprintf(buf + len, buf_len - len, "cv_dma_not_done_err = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_dma_not_done_err));
+		len += scnprintf(buf + len, buf_len - len, "cv_update_failed = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_update_failed));
+		len += scnprintf(buf + len, buf_len - len, "cv_dma_timeout_error = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_dma_timeout_error));
+		len += scnprintf(buf + len, buf_len - len, "cv_buf_ibf_uploads = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_buf_ibf_uploads));
+		len += scnprintf(buf + len, buf_len - len, "cv_buf_ebf_uploads = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_buf_ebf_uploads));
+		len += scnprintf(buf + len, buf_len - len, "cv_buf_received = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_buf_received));
+		len += scnprintf(buf + len, buf_len - len, "cv_buf_fed_back = %u\n\n",
+				 le32_to_cpu(htt_stats_buf->cv_buf_fed_back));
+
+		len += scnprintf(buf + len, buf_len - len, "CV QUERY STATS:\n");
+		len += scnprintf(buf + len, buf_len - len, "cv_total_query = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_total_query));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_total_pattern_query = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_total_pattern_query));
+		len += scnprintf(buf + len, buf_len - len, "cv_total_bw_query = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_total_bw_query));
+		len += scnprintf(buf + len, buf_len - len, "cv_invalid_bw_coding = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_invalid_bw_coding));
+		len += scnprintf(buf + len, buf_len - len, "cv_forced_sounding = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_forced_sounding));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_standalone_sounding = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_standalone_sounding));
+		len += scnprintf(buf + len, buf_len - len, "cv_nc_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_nc_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_fb_type_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_fb_type_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_ofdma_bw_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_ofdma_bw_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_bw_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_bw_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_pattern_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_pattern_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_preamble_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_preamble_mismatch));
+		len += scnprintf(buf + len, buf_len - len, "cv_nr_mismatch = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_nr_mismatch));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_in_use_cnt_exceeded = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_in_use_cnt_exceeded));
+		len += scnprintf(buf + len, buf_len - len, "cv_ntbr_sounding = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_ntbr_sounding));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_found_upload_in_progress = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_found_upload_in_progress));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_expired_during_query = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_expired_during_query));
+		len += scnprintf(buf + len, buf_len - len, "cv_found = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_found));
+		len += scnprintf(buf + len, buf_len - len, "cv_not_found = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_not_found));
+		len += scnprintf(buf + len, buf_len - len, "cv_total_query_ibf = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_total_query_ibf));
+		len += scnprintf(buf + len, buf_len - len, "cv_found_ibf = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_found_ibf));
+		len += scnprintf(buf + len, buf_len - len, "cv_not_found_ibf = %u\n",
+				 le32_to_cpu(htt_stats_buf->cv_not_found_ibf));
+		len += scnprintf(buf + len, buf_len - len,
+				 "cv_expired_during_query_ibf = %u\n\n",
+				 le32_to_cpu(htt_stats_buf->cv_expired_during_query_ibf));
+	}
+
+	stats_req->buf_len = len;
+}
+
+static void
 ath12k_htt_print_pdev_obss_pd_stats_tlv(const void *tag_buf, u16 tag_len,
 					struct debug_htt_stats_req *stats_req)
 {
@@ -2544,6 +2867,1472 @@
 }
 
 static void
+ath12k_htt_print_latency_prof_ctx_tlv(const void *tag_buf, u16 tag_len,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_latency_prof_ctx_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CTX_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "duration = %u\n",
+			 le32_to_cpu(htt_stats_buf->duration));
+	len += scnprintf(buf + len, buf_len - len, "tx_msdu_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_msdu_cnt));
+	len += scnprintf(buf + len, buf_len - len, "tx_mpdu_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_mpdu_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rx_msdu_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_msdu_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rx_mpdu_cnt = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->rx_mpdu_cnt));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_cnt(const void *tag_buf, u16 tag_len,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_latency_prof_cnt_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_LATENCY_CNT_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "prof_enable_cnt = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->prof_enable_cnt));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_latency_prof_stats_tlv(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_latency_prof_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	if (le32_to_cpu(htt_stats_buf->print_header) == 1) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "HTT_STATS_LATENCY_PROF_TLV:\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "Latency name = %s\n",
+			 htt_stats_buf->latency_prof_name);
+	len += scnprintf(buf + len, buf_len - len, "count = %u\n",
+			 le32_to_cpu(htt_stats_buf->cnt));
+	len += scnprintf(buf + len, buf_len - len, "minimum = %u\n",
+			 le32_to_cpu(htt_stats_buf->min));
+	len += scnprintf(buf + len, buf_len - len, "maximum = %u\n",
+			 le32_to_cpu(htt_stats_buf->max));
+	len += scnprintf(buf + len, buf_len - len, "last = %u\n",
+			 le32_to_cpu(htt_stats_buf->last));
+	len += scnprintf(buf + len, buf_len - len, "total = %u\n",
+			 le32_to_cpu(htt_stats_buf->tot));
+	len += scnprintf(buf + len, buf_len - len, "average = %u\n",
+			 le32_to_cpu(htt_stats_buf->avg));
+	len += scnprintf(buf + len, buf_len - len, "histogram interval = %u\n",
+			 le32_to_cpu(htt_stats_buf->hist_intvl));
+	len += print_array_to_buf(buf, len, "histogram", htt_stats_buf->hist,
+				  ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_trigger_stats(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 mac_id;
+	u8 j;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_UL_TRIGGER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+				  htt_stats_buf->ul_ofdma_rx_mcs,
+				  ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+	for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+		len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_gi[%u]", j);
+		len += print_array_to_buf(buf, len, "",
+					  htt_stats_buf->ul_ofdma_rx_gi[j],
+					  ATH12K_HTT_RX_NUM_MCS_CNTRS, "\n");
+	}
+
+	len += print_array_to_buf_index(buf, len, "ul_ofdma_rx_nss", 1,
+					htt_stats_buf->ul_ofdma_rx_nss,
+					ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+				  htt_stats_buf->ul_ofdma_rx_bw,
+				  ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+
+	for (j = 0; j < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; j++) {
+		len += scnprintf(buf + len, buf_len - len, j == 0 ?
+				 "half_ul_ofdma_rx_bw" :
+				 "quarter_ul_ofdma_rx_bw");
+		len += print_array_to_buf(buf, len, "", htt_stats_buf->red_bw[j],
+					  ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+	}
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ru_size_ppdu = ");
+	for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+				 ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+				 le32_to_cpu(htt_stats_buf->data_ru_size_ppdu[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_non_data_ru_size_ppdu = ");
+	for (j = 0; j < ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+				 ath12k_htt_ax_tx_rx_ru_size_to_str(j),
+				 le32_to_cpu(htt_stats_buf->non_data_ru_size_ppdu[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += print_array_to_buf(buf, len, "rx_rssi_track_sta_aid",
+				  htt_stats_buf->uplink_sta_aid,
+				  ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+	len += print_array_to_buf(buf, len, "rx_sta_target_rssi",
+				  htt_stats_buf->uplink_sta_target_rssi,
+				  ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+	len += print_array_to_buf(buf, len, "rx_sta_fd_rssi",
+				  htt_stats_buf->uplink_sta_fd_rssi,
+				  ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+	len += print_array_to_buf(buf, len, "rx_sta_power_headroom",
+				  htt_stats_buf->uplink_sta_power_headroom,
+				  ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK, "\n");
+	len += scnprintf(buf + len, buf_len - len,
+			 "ul_ofdma_basic_trigger_rx_qos_null_only = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->ul_ofdma_bsc_trig_rx_qos_null_only));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_ofdma_user_stats(const void *tag_buf, u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 user_index;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	user_index = __le32_to_cpu(htt_stats_buf->user_index);
+
+	if (!user_index)
+		len += scnprintf(buf + len, buf_len - len,
+				 "HTT_RX_PDEV_UL_OFDMA_USER_STAS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_non_data_ppdu_%u = %u\n",
+			 user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_ppdu));
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_ppdu_%u = %u\n",
+			 user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_data_ppdu));
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_ok_%u = %u\n",
+			 user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_ok));
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_mpdu_fail_%u = %u\n",
+			 user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_mpdu_fail));
+	len += scnprintf(buf + len, buf_len - len,
+			 "rx_ulofdma_non_data_nusers_%u = %u\n", user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_non_data_nusers));
+	len += scnprintf(buf + len, buf_len - len, "rx_ulofdma_data_nusers_%u = %u\n\n",
+			 user_index,
+			 le32_to_cpu(htt_stats_buf->rx_ulofdma_data_nusers));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ul_mumimo_trig_stats(const void *tag_buf, u16 tag_len,
+				      struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv *htt_stats_buf = tag_buf;
+	char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 mac_id;
+	u16 index;
+	u8 i, j;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_RX_PDEV_UL_MUMIMO_TRIG_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_mumimo = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_ul_mumimo));
+	index = 0;
+	memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+	for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+		index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+				  " %u:%u,", i,
+				  le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs[i]));
+
+	for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+		index += scnprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+				  " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+				  le32_to_cpu(htt_stats_buf->ul_mumimo_rx_mcs_ext[i]));
+	str_buf[--index] = '\0';
+	len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_mcs = %s\n", str_buf);
+
+	for (j = 0; j < ATH12K_HTT_RX_NUM_GI_CNTRS; j++) {
+		index = 0;
+		memset(&str_buf[index], 0x0, ATH12K_HTT_MAX_STRING_LEN);
+		for (i = 0; i < ATH12K_HTT_RX_NUM_MCS_CNTRS; i++)
+			index += scnprintf(&str_buf[index],
+					  ATH12K_HTT_MAX_STRING_LEN - index,
+					  " %u:%u,", i,
+					  le32_to_cpu(htt_stats_buf->ul_rx_gi[j][i]));
+
+		for (i = 0; i < ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS; i++)
+			index += scnprintf(&str_buf[index],
+					  ATH12K_HTT_MAX_STRING_LEN - index,
+					  " %u:%u,", i + ATH12K_HTT_RX_NUM_MCS_CNTRS,
+					  le32_to_cpu(htt_stats_buf->ul_gi_ext[j][i]));
+		str_buf[--index] = '\0';
+		len += scnprintf(buf + len, buf_len - len,
+				 "ul_mumimo_rx_gi_%u = %s\n", j, str_buf);
+	}
+
+	index = 0;
+	memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+	len += print_array_to_buf_index(buf, len, "ul_mumimo_rx_nss", 1,
+					htt_stats_buf->ul_mumimo_rx_nss,
+					ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+
+	len += print_array_to_buf(buf, len, "ul_mumimo_rx_bw",
+				  htt_stats_buf->ul_mumimo_rx_bw,
+				  ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+	for (i = 0; i < ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES; i++) {
+		index = 0;
+		memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+		for (j = 0; j < ATH12K_HTT_RX_NUM_BW_CNTRS; j++)
+			index += scnprintf(&str_buf[index],
+					  ATH12K_HTT_MAX_STRING_LEN - index,
+					  " %u:%u,", j,
+					  le32_to_cpu(htt_stats_buf->red_bw[i][j]));
+		str_buf[--index] = '\0';
+		len += scnprintf(buf + len, buf_len - len, "%s = %s\n",
+				 i == 0 ? "half_ul_mumimo_rx_bw" :
+				 "quarter_ul_mumimo_rx_bw", str_buf);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_stbc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_mumimo_rx_stbc));
+	len += scnprintf(buf + len, buf_len - len, "ul_mumimo_rx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_mumimo_rx_ldpc));
+
+	for (j = 0; j < ATH12K_HTT_RX_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ul_mumimo_rssi_in_dbm: chain%u ", j);
+		len += print_array_to_buf_s8(buf, len, "", 0,
+					     htt_stats_buf->ul_rssi[j],
+					     ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ul_mumimo_target_rssi: user_%u ", j);
+		len += print_array_to_buf_s8(buf, len, "", 0,
+					     htt_stats_buf->tgt_rssi[j],
+					     ATH12K_HTT_RX_NUM_BW_CNTRS, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ul_mumimo_fd_rssi: user_%u ", j);
+		len += print_array_to_buf_s8(buf, len, "", 0,
+					     htt_stats_buf->fd[j],
+					     ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_UL_MUMIMO_USER_STATS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ulmumimo_pilot_evm_db_mean: user_%u ", j);
+		len += print_array_to_buf_s8(buf, len, "", 0,
+					     htt_stats_buf->db[j],
+					     ATH12K_HTT_RX_NUM_SPATIAL_STREAMS, "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "ul_mumimo_basic_trigger_rx_qos_null_only = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->mumimo_bsc_trig_rx_qos_null_only));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_rx_fse_stats_tlv(const void *tag_buf, u16 tag_len,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_fse_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_STATS_RX_FSE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "=== Software RX FSE STATS ===\n");
+	len += scnprintf(buf + len, buf_len - len, "Enable count  = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_enable_cnt));
+	len += scnprintf(buf + len, buf_len - len, "Disable count = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_disable_cnt));
+	len += scnprintf(buf + len, buf_len - len, "Cache invalidate entry count = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_cache_invalidate_entry_cnt));
+	len += scnprintf(buf + len, buf_len - len, "Full cache invalidate count = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_full_cache_invalidate_cnt));
+
+	len += scnprintf(buf + len, buf_len - len, "\n=== Hardware RX FSE STATS ===\n");
+	len += scnprintf(buf + len, buf_len - len, "Cache hits count = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_num_cache_hits_cnt));
+	len += scnprintf(buf + len, buf_len - len, "Cache no. of searches = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_num_searches_cnt));
+	len += scnprintf(buf + len, buf_len - len, "Cache occupancy peak count:\n");
+	len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[0]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[1]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[2]));
+	len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[3]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[4]));
+	len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[5]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[6]));
+	len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[7]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[8]));
+	len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_peak_cnt[9]));
+	len += scnprintf(buf + len, buf_len - len, "Cache occupancy current count:\n");
+	len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-16] = %u [17-32] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[0]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[1]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[2]));
+	len += scnprintf(buf + len, buf_len - len, "[33-48] = %u [49-64] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[3]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[4]));
+	len += scnprintf(buf + len, buf_len - len, "[65-80] = %u [81-96] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[5]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[6]));
+	len += scnprintf(buf + len, buf_len - len, "[97-112] = %u [113-127] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[7]),
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[8]));
+	len += scnprintf(buf + len, buf_len - len, "[128] = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_cache_occupancy_curr_cnt[9]));
+	len += scnprintf(buf + len, buf_len - len, "Cache search square count:\n");
+	len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-50] = %u [51-100] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[0]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[1]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[2]));
+	len += scnprintf(buf + len, buf_len - len, "[101-200] = %u [201-255] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[3]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[4]));
+	len += scnprintf(buf + len, buf_len - len, "[256] = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_square_cnt[5]));
+	len += scnprintf(buf + len, buf_len - len, "Cache search peak pending count:\n");
+	len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[0]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[1]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[2]));
+	len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_peak_cnt[3]));
+	len += scnprintf(buf + len, buf_len - len, "Cache search tot pending count:\n");
+	len += scnprintf(buf + len, buf_len - len, "[0] = %u [1-2] = %u [3-4] = %u ",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[0]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[1]),
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[2]));
+	len += scnprintf(buf + len, buf_len - len, "[Greater/Equal to 5] = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->fse_search_stat_pending_cnt[3]));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(const void *tag_buf, u16 tag_len,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_pdev_txrate_txbf_stats_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u8 i;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_STATS_PDEV_TX_RATE_TXBF_STATS:\n");
+	len += scnprintf(buf + len, buf_len - len, "Legacy OFDM Rates: 6 Mbps: %u, ",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]));
+	len += scnprintf(buf + len, buf_len - len, "9 Mbps: %u, 12 Mbps: %u, ",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]));
+	len += scnprintf(buf + len, buf_len - len, "18 Mbps: %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]));
+	len += scnprintf(buf + len, buf_len - len, "24 Mbps: %u, 36 Mbps: %u, ",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]));
+	len += scnprintf(buf + len, buf_len - len, "48 Mbps: %u, 54 Mbps: %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+
+	len += print_array_to_buf(buf, len, "tx_ol_mcs", htt_stats_buf->tx_su_ol_mcs,
+				  ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "tx_ibf_mcs", htt_stats_buf->tx_su_ibf_mcs,
+				  ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "tx_txbf_mcs", htt_stats_buf->tx_su_txbf_mcs,
+				  ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf_index(buf, len, "tx_ol_nss", 1,
+					htt_stats_buf->tx_su_ol_nss,
+					ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+					"\n");
+	len += print_array_to_buf_index(buf, len, "tx_ibf_nss", 1,
+					htt_stats_buf->tx_su_ibf_nss,
+					ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+					"\n");
+	len += print_array_to_buf_index(buf, len, "tx_txbf_nss", 1,
+					htt_stats_buf->tx_su_txbf_nss,
+					ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+					"\n");
+	len += print_array_to_buf(buf, len, "tx_ol_bw", htt_stats_buf->tx_su_ol_bw,
+				  ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+	for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+		len += print_array_to_buf(buf, len, i ? "quarter_tx_ol_bw" :
+					  "half_tx_ol_bw",
+					  htt_stats_buf->ol[i],
+					  ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+					  "\n");
+
+	len += print_array_to_buf(buf, len, "tx_ibf_bw", htt_stats_buf->tx_su_ibf_bw,
+				  ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+	for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+		len += print_array_to_buf(buf, len, i ? "quarter_tx_ibf_bw" :
+					  "half_tx_ibf_bw",
+					  htt_stats_buf->ibf[i],
+					  ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+					  "\n");
+
+	len += print_array_to_buf(buf, len, "tx_txbf_bw", htt_stats_buf->tx_su_txbf_bw,
+				  ATH12K_HTT_TXBF_NUM_BW_CNTRS, "\n");
+	for (i = 0; i < ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES; i++)
+		len += print_array_to_buf(buf, len, i ? "quarter_tx_txbf_bw" :
+					  "half_tx_txbf_bw",
+					  htt_stats_buf->txbf[i],
+					  ATH12K_HTT_TXBF_NUM_BW_CNTRS,
+					  "\n");
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_STATS_PDEV_TXBF_FLAG_RETURN_STATS:\n");
+	len += scnprintf(buf + len, buf_len - len, "TXBF_reason_code_stats: 0:%u, 1:%u,",
+			 le32_to_cpu(htt_stats_buf->txbf_flag_set_mu_mode),
+			 le32_to_cpu(htt_stats_buf->txbf_flag_set_final_status));
+	len += scnprintf(buf + len, buf_len - len, " 2:%u, 3:%u, 4:%u, 5:%u, ",
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_verified_txbf_mode),
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_p2p_access),
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_max_nss_in_he160),
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_disable_uldlofdma));
+	len += scnprintf(buf + len, buf_len - len, "6:%u, 7:%u\n\n",
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_mcs_threshold_val),
+			 le32_to_cpu(htt_stats_buf->txbf_flag_not_set_final_status));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(const void *tag_buf, u16 tag_len,
+					      struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 num_elements;
+	u8 i;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndpa_arr);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDPA_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndpa_queued =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_queued));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_tried =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_tried));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_flushed =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_flush));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndpa_err =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndpa[i].ax_ofdma_ndpa_err));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(const void *tag_buf, u16 tag_len,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 num_elements;
+	u8 i;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	num_elements = le32_to_cpu(stats_buf->num_elems_ax_ndp_arr);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_NDP_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_ofdma_ndp_queued =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_queued));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_tried =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_tried));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_flushed =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_flush));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_ndp_err =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_ndp[i].ax_ofdma_ndp_err));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(const void *tag_buf, u16 tag_len,
+					     struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 num_elements;
+	u8 i;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	num_elements = le32_to_cpu(stats_buf->num_elems_ax_brp_arr);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TXBF_OFDMA_AX_BRP_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_ofdma_brpoll_queued =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_queued));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_tied =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_tried));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brpoll_flushed =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_flushed));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_brp_err));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_brp_err_num_cbf_rcvd =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_brp[i].ax_ofdma_num_cbf_rcvd));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(const void *tag_buf, u16 tag_len,
+					       struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 num_elements;
+	u8 i;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	num_elements = le32_to_cpu(stats_buf->num_elems_ax_steer_arr);
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TXBF_OFDMA_AX_STEER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ax_ofdma_num_ppdu_steer =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_steer[i].num_ppdu_steer));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_prefetch =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_steer[i].num_usr_prefetch));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_sound =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_steer[i].num_usr_sound));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\nax_ofdma_num_usrs_force_sound =");
+	for (i = 0; i < num_elements; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,", i + 1,
+				 le32_to_cpu(stats_buf->ax_steer[i].num_usr_force_sound));
+	len--;
+	*(buf + len) = '\0';
+
+	len += scnprintf(buf + len, buf_len - len, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(const void *tag_buf, u16 tag_len,
+						    struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len,
+			 "HTT_TXBF_OFDMA_AX_STEER_MPDU_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_tried = %u\n",
+			 le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "rbo_steer_mpdus_failed = %u\n",
+			 le32_to_cpu(stats_buf->ax_ofdma_rbo_steer_mpdus_failed));
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_tried = %u\n",
+			 le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "sifs_steer_mpdus_failed = %u\n\n",
+			 le32_to_cpu(stats_buf->ax_ofdma_sifs_steer_mpdus_failed));
+
+	stats_req->buf_len = len;
+}
+
+static void ath12k_htt_print_dlpager_entry(const struct ath12k_htt_pgs_info *pg_info,
+					   int idx, char *str_buf)
+{
+	u64 page_timestamp;
+	u16 index = 0;
+
+	page_timestamp = ath12k_le32hilo_to_u64(pg_info->ts_msb, pg_info->ts_lsb);
+
+	index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+			  "Index - %u ; Page Number - %u ; ",
+			  idx, le32_to_cpu(pg_info->page_num));
+	index += snprintf(&str_buf[index], ATH12K_HTT_MAX_STRING_LEN - index,
+			  "Num of pages - %u ; Timestamp - %lluus\n",
+			  le32_to_cpu(pg_info->num_pgs), page_timestamp);
+}
+
+static void
+ath12k_htt_print_dlpager_stats_tlv(const void *tag_buf, u16 tag_len,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_dl_pager_stats_tlv *stat_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 dword_lock, dword_unlock;
+	int i;
+	u8 *buf = stats_req->buf;
+	u8 pg_locked;
+	u8 pg_unlock;
+	char str_buf[ATH12K_HTT_MAX_STRING_LEN] = {0};
+
+	if (tag_len < sizeof(*stat_buf))
+		return;
+
+	dword_lock = le32_get_bits(stat_buf->info2,
+				   ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2);
+	dword_unlock = le32_get_bits(stat_buf->info2,
+				     ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2);
+
+	pg_locked = ATH12K_HTT_STATS_PAGE_LOCKED;
+	pg_unlock = ATH12K_HTT_STATS_PAGE_UNLOCKED;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_DLPAGER_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ASYNC locked pages = %u\n",
+			 le32_get_bits(stat_buf->info0,
+				       ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0));
+	len += scnprintf(buf + len, buf_len - len, "SYNC locked pages = %u\n",
+			 le32_get_bits(stat_buf->info0,
+				       ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0));
+	len += scnprintf(buf + len, buf_len - len, "Total locked pages = %u\n",
+			 le32_get_bits(stat_buf->info1,
+				       ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1));
+	len += scnprintf(buf + len, buf_len - len, "Total free pages = %u\n",
+			 le32_get_bits(stat_buf->info1,
+				       ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1));
+
+	len += scnprintf(buf + len, buf_len - len, "\nLOCKED PAGES HISTORY\n");
+	len += scnprintf(buf + len, buf_len - len, "last_locked_page_idx = %u\n",
+			 dword_lock ? dword_lock - 1 : (ATH12K_PAGER_MAX - 1));
+
+	for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+		memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+		ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_locked][i],
+					       i, str_buf);
+		len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nUNLOCKED PAGES HISTORY\n");
+	len += scnprintf(buf + len, buf_len - len, "last_unlocked_page_idx = %u\n",
+			 dword_unlock ? dword_unlock - 1 : ATH12K_PAGER_MAX - 1);
+
+	for (i = 0; i < ATH12K_PAGER_MAX; i++) {
+		memset(str_buf, 0x0, ATH12K_HTT_MAX_STRING_LEN);
+		ath12k_htt_print_dlpager_entry(&stat_buf->pgs_info[pg_unlock][i],
+					       i, str_buf);
+		len += scnprintf(buf + len, buf_len - len, "%s", str_buf);
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_stats_tlv(const void *tag_buf, u16 tag_len,
+			       struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_phy_stats_tlv *htt_stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf, i;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_STATS_TLV:\n");
+	for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+		len += scnprintf(buf + len, buf_len - len, "bdf_nf_chain[%d] = %d\n",
+				 i, a_sle32_to_cpu(htt_stats_buf->nf_chain[i]));
+	for (i = 0; i < ATH12K_HTT_STATS_MAX_CHAINS; i++)
+		len += scnprintf(buf + len, buf_len - len, "runtime_nf_chain[%d] = %d\n",
+				 i, a_sle32_to_cpu(htt_stats_buf->runtime_nf_chain[i]));
+	len += scnprintf(buf + len, buf_len - len, "false_radar_cnt = %u / %u (mins)\n",
+			 le32_to_cpu(htt_stats_buf->false_radar_cnt),
+			 le32_to_cpu(htt_stats_buf->fw_run_time));
+	len += scnprintf(buf + len, buf_len - len, "radar_cs_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->radar_cs_cnt));
+	len += scnprintf(buf + len, buf_len - len, "ani_level = %d\n\n",
+			 a_sle32_to_cpu(htt_stats_buf->ani_level));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_counters_tlv(const void *tag_buf, u16 tag_len,
+				  struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_phy_counters_tlv *htt_stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_COUNTERS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rx_ofdma_timing_err_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_ofdma_timing_err_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rx_cck_fail_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_cck_fail_cnt));
+	len += scnprintf(buf + len, buf_len - len, "mactx_abort_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->mactx_abort_cnt));
+	len += scnprintf(buf + len, buf_len - len, "macrx_abort_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->macrx_abort_cnt));
+	len += scnprintf(buf + len, buf_len - len, "phytx_abort_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->phytx_abort_cnt));
+	len += scnprintf(buf + len, buf_len - len, "phyrx_abort_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->phyrx_abort_cnt));
+	len += scnprintf(buf + len, buf_len - len, "phyrx_defer_abort_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->phyrx_defer_abort_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_lstf_event_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_gain_adj_lstf_event_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rx_gain_adj_non_legacy_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_gain_adj_non_legacy_cnt));
+	len += print_array_to_buf(buf, len, "rx_pkt_cnt", htt_stats_buf->rx_pkt_cnt,
+				  ATH12K_HTT_MAX_RX_PKT_CNT, "\n");
+	len += print_array_to_buf(buf, len, "rx_pkt_crc_pass_cnt",
+				  htt_stats_buf->rx_pkt_crc_pass_cnt,
+				  ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT, "\n");
+	len += print_array_to_buf(buf, len, "per_blk_err_cnt",
+				  htt_stats_buf->per_blk_err_cnt,
+				  ATH12K_HTT_MAX_PER_BLK_ERR_CNT, "\n");
+	len += print_array_to_buf(buf, len, "rx_ota_err_cnt",
+				  htt_stats_buf->rx_ota_err_cnt,
+				  ATH12K_HTT_MAX_RX_OTA_ERR_CNT, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_stats_tlv(const void *tag_buf, u16 tag_len,
+				     struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_phy_reset_stats_tlv *htt_stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 le32_to_cpu(htt_stats_buf->pdev_id));
+	len += scnprintf(buf + len, buf_len - len, "chan_mhz = %u\n",
+			 le32_to_cpu(htt_stats_buf->chan_mhz));
+	len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq1 = %u\n",
+			 le32_to_cpu(htt_stats_buf->chan_band_center_freq1));
+	len += scnprintf(buf + len, buf_len - len, "chan_band_center_freq2 = %u\n",
+			 le32_to_cpu(htt_stats_buf->chan_band_center_freq2));
+	len += scnprintf(buf + len, buf_len - len, "chan_phy_mode = %u\n",
+			 le32_to_cpu(htt_stats_buf->chan_phy_mode));
+	len += scnprintf(buf + len, buf_len - len, "chan_flags = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->chan_flags));
+	len += scnprintf(buf + len, buf_len - len, "chan_num = %u\n",
+			 le32_to_cpu(htt_stats_buf->chan_num));
+	len += scnprintf(buf + len, buf_len - len, "reset_cause = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->reset_cause));
+	len += scnprintf(buf + len, buf_len - len, "prev_reset_cause = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->prev_reset_cause));
+	len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_src = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_warm_reset_src));
+	len += scnprintf(buf + len, buf_len - len, "rx_gain_tbl_mode = %d\n",
+			 le32_to_cpu(htt_stats_buf->rx_gain_tbl_mode));
+	len += scnprintf(buf + len, buf_len - len, "xbar_val = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->xbar_val));
+	len += scnprintf(buf + len, buf_len - len, "force_calibration = %u\n",
+			 le32_to_cpu(htt_stats_buf->force_calibration));
+	len += scnprintf(buf + len, buf_len - len, "phyrf_mode = %u\n",
+			 le32_to_cpu(htt_stats_buf->phyrf_mode));
+	len += scnprintf(buf + len, buf_len - len, "phy_homechan = %u\n",
+			 le32_to_cpu(htt_stats_buf->phy_homechan));
+	len += scnprintf(buf + len, buf_len - len, "phy_tx_ch_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_tx_ch_mask));
+	len += scnprintf(buf + len, buf_len - len, "phy_rx_ch_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_rx_ch_mask));
+	len += scnprintf(buf + len, buf_len - len, "phybb_ini_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phybb_ini_mask));
+	len += scnprintf(buf + len, buf_len - len, "phyrf_ini_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phyrf_ini_mask));
+	len += scnprintf(buf + len, buf_len - len, "phy_dfs_en_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_dfs_en_mask));
+	len += scnprintf(buf + len, buf_len - len, "phy_sscan_en_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_sscan_en_mask));
+	len += scnprintf(buf + len, buf_len - len, "phy_synth_sel_mask = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->phy_synth_sel_mask));
+	len += scnprintf(buf + len, buf_len - len, "phy_adfs_freq = %u\n",
+			 le32_to_cpu(htt_stats_buf->phy_adfs_freq));
+	len += scnprintf(buf + len, buf_len - len, "cck_fir_settings = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->cck_fir_settings));
+	len += scnprintf(buf + len, buf_len - len, "phy_dyn_pri_chan = %u\n",
+			 le32_to_cpu(htt_stats_buf->phy_dyn_pri_chan));
+	len += scnprintf(buf + len, buf_len - len, "cca_thresh = 0x%0x\n",
+			 le32_to_cpu(htt_stats_buf->cca_thresh));
+	len += scnprintf(buf + len, buf_len - len, "dyn_cca_status = %u\n",
+			 le32_to_cpu(htt_stats_buf->dyn_cca_status));
+	len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_hw = 0x%x\n",
+			 le32_to_cpu(htt_stats_buf->rxdesense_thresh_hw));
+	len += scnprintf(buf + len, buf_len - len, "rxdesense_thresh_sw = 0x%x\n\n",
+			 le32_to_cpu(htt_stats_buf->rxdesense_thresh_sw));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_reset_counters_tlv(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_phy_reset_counters_tlv *htt_stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_RESET_COUNTERS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 le32_to_cpu(htt_stats_buf->pdev_id));
+	len += scnprintf(buf + len, buf_len - len, "cf_active_low_fail_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->cf_active_low_fail_cnt));
+	len += scnprintf(buf + len, buf_len - len, "cf_active_low_pass_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->cf_active_low_pass_cnt));
+	len += scnprintf(buf + len, buf_len - len, "phy_off_through_vreg_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->phy_off_through_vreg_cnt));
+	len += scnprintf(buf + len, buf_len - len, "force_calibration_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->force_calibration_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rf_mode_switch_phy_off_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rf_mode_switch_phy_off_cnt));
+	len += scnprintf(buf + len, buf_len - len, "temperature_recal_cnt = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->temperature_recal_cnt));
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_phy_tpc_stats_tlv(const void *tag_buf, u16 tag_len,
+				   struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_phy_tpc_stats_tlv *htt_stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PHY_TPC_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 le32_to_cpu(htt_stats_buf->pdev_id));
+	len += scnprintf(buf + len, buf_len - len, "tx_power_scale = %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_power_scale));
+	len += scnprintf(buf + len, buf_len - len, "tx_power_scale_db = %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_power_scale_db));
+	len += scnprintf(buf + len, buf_len - len, "min_negative_tx_power = %d\n",
+			 le32_to_cpu(htt_stats_buf->min_negative_tx_power));
+	len += scnprintf(buf + len, buf_len - len, "reg_ctl_domain = %u\n",
+			 le32_to_cpu(htt_stats_buf->reg_ctl_domain));
+	len += scnprintf(buf + len, buf_len - len, "twice_max_rd_power = %u\n",
+			 le32_to_cpu(htt_stats_buf->twice_max_rd_power));
+	len += scnprintf(buf + len, buf_len - len, "max_tx_power = %u\n",
+			 le32_to_cpu(htt_stats_buf->max_tx_power));
+	len += scnprintf(buf + len, buf_len - len, "home_max_tx_power = %u\n",
+			 le32_to_cpu(htt_stats_buf->home_max_tx_power));
+	len += scnprintf(buf + len, buf_len - len, "psd_power = %d\n",
+			 le32_to_cpu(htt_stats_buf->psd_power));
+	len += scnprintf(buf + len, buf_len - len, "eirp_power = %u\n",
+			 le32_to_cpu(htt_stats_buf->eirp_power));
+	len += scnprintf(buf + len, buf_len - len, "power_type_6ghz = %u\n",
+			 le32_to_cpu(htt_stats_buf->power_type_6ghz));
+	len += print_array_to_buf(buf, len, "max_reg_allowed_power",
+				  htt_stats_buf->max_reg_allowed_power,
+				  ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+	len += print_array_to_buf(buf, len, "max_reg_allowed_power_6ghz",
+				  htt_stats_buf->max_reg_allowed_power_6ghz,
+				  ATH12K_HTT_STATS_MAX_CHAINS, "\n");
+	len += print_array_to_buf(buf, len, "sub_band_cfreq",
+				  htt_stats_buf->sub_band_cfreq,
+				  ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n");
+	len += print_array_to_buf(buf, len, "sub_band_txpower",
+				  htt_stats_buf->sub_band_txpower,
+				  ATH12K_HTT_MAX_CH_PWR_INFO_SIZE, "\n\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_soc_txrx_stats_common_tlv(const void *tag_buf, u16 tag_len,
+					   struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_t2h_soc_txrx_stats_common_tlv *htt_stats_buf = tag_buf;
+	u64 drop_count;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 *buf = stats_req->buf;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	drop_count = ath12k_le32hilo_to_u64(htt_stats_buf->inv_peers_msdu_drop_count_hi,
+					    htt_stats_buf->inv_peers_msdu_drop_count_lo);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_SOC_COMMON_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "soc_drop_count = %llu\n\n",
+			 drop_count);
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_tx_per_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+				       struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_tx_per_rate_stats_tlv *stats_buf = tag_buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 ru_size_cnt = 0;
+	u32 rc_mode, ru_type;
+	u8 *buf = stats_req->buf, i;
+	const char *mode_prefix;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	rc_mode = le32_to_cpu(stats_buf->rc_mode);
+	ru_type = le32_to_cpu(stats_buf->ru_type);
+
+	switch (rc_mode) {
+	case ATH12K_HTT_STATS_RC_MODE_DLSU:
+		len += scnprintf(buf + len, buf_len - len, "HTT_TX_PER_STATS:\n");
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_SU:\n");
+		mode_prefix = "su";
+		break;
+	case ATH12K_HTT_STATS_RC_MODE_DLMUMIMO:
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_MUMIMO:\n");
+		mode_prefix = "mu";
+		break;
+	case ATH12K_HTT_STATS_RC_MODE_DLOFDMA:
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_DL_OFDMA:\n");
+		mode_prefix = "ofdma";
+		if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+			ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+		else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+			ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+		break;
+	case ATH12K_HTT_STATS_RC_MODE_ULMUMIMO:
+		len += scnprintf(buf + len, buf_len - len, "HTT_RX_PER_STATS:\n");
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_MUMIMO:\n");
+		mode_prefix = "ulmu";
+		break;
+	case ATH12K_HTT_STATS_RC_MODE_ULOFDMA:
+		len += scnprintf(buf + len, buf_len - len, "\nPER_STATS_UL_OFDMA:\n");
+		mode_prefix = "ulofdma";
+		if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY)
+			ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS;
+		else if (ru_type == ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU)
+			ru_size_cnt = ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS;
+		break;
+	default:
+		return;
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per BW:\n");
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_bw[i].ppdus_tried));
+	len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+			 le32_to_cpu(stats_buf->per_bw320.ppdus_tried));
+
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_bw[i].ppdus_ack_failed));
+	len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+			 le32_to_cpu(stats_buf->per_bw320.ppdus_ack_failed));
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_bw[i].mpdus_tried));
+	len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+			 le32_to_cpu(stats_buf->per_bw320.mpdus_tried));
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u", i,
+				 le32_to_cpu(stats_buf->per_bw[i].mpdus_failed));
+	len += scnprintf(buf + len, buf_len - len, " %u:%u\n", i,
+			 le32_to_cpu(stats_buf->per_bw320.mpdus_failed));
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per NSS:\n");
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+				 le32_to_cpu(stats_buf->per_nss[i].ppdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+				 le32_to_cpu(stats_buf->per_nss[i].ppdus_ack_failed));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+				 le32_to_cpu(stats_buf->per_nss[i].mpdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i + 1,
+				 le32_to_cpu(stats_buf->per_nss[i].mpdus_failed));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "\nPER per MCS:\n");
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_mcs[i].ppdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA ||
+	    rc_mode == ATH12K_HTT_STATS_RC_MODE_ULMUMIMO)
+		len += scnprintf(buf + len, buf_len - len, "non_data_ppdus_%s = ",
+				 mode_prefix);
+	else
+		len += scnprintf(buf + len, buf_len - len, "ppdus_ack_failed_%s = ",
+				 mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_mcs[i].ppdus_ack_failed));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_mcs[i].mpdus_tried));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ", mode_prefix);
+	for (i = 0; i < ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS; i++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u ", i,
+				 le32_to_cpu(stats_buf->per_mcs[i].mpdus_failed));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	if ((rc_mode == ATH12K_HTT_STATS_RC_MODE_DLOFDMA ||
+	     rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA) &&
+	     ru_type != ATH12K_HTT_STATS_RU_TYPE_INVALID) {
+		len += scnprintf(buf + len, buf_len - len, "\nPER per RU:\n");
+
+		if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+			len += scnprintf(buf + len, buf_len - len, "data_ppdus_%s = ",
+					 mode_prefix);
+		else
+			len += scnprintf(buf + len, buf_len - len, "ppdus_tried_%s = ",
+					 mode_prefix);
+		for (i = 0; i < ru_size_cnt; i++)
+			len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+					 ath12k_tx_ru_size_to_str(ru_type, i),
+					 le32_to_cpu(stats_buf->ru[i].ppdus_tried));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+
+		if (rc_mode == ATH12K_HTT_STATS_RC_MODE_ULOFDMA)
+			len += scnprintf(buf + len, buf_len - len,
+					 "non_data_ppdus_%s = ", mode_prefix);
+		else
+			len += scnprintf(buf + len, buf_len - len,
+					 "ppdus_ack_failed_%s = ", mode_prefix);
+		for (i = 0; i < ru_size_cnt; i++)
+			len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+					 ath12k_tx_ru_size_to_str(ru_type, i),
+					 le32_to_cpu(stats_buf->ru[i].ppdus_ack_failed));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+
+		len += scnprintf(buf + len, buf_len - len, "mpdus_tried_%s = ",
+				 mode_prefix);
+		for (i = 0; i < ru_size_cnt; i++)
+			len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+					 ath12k_tx_ru_size_to_str(ru_type, i),
+					 le32_to_cpu(stats_buf->ru[i].mpdus_tried));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+
+		len += scnprintf(buf + len, buf_len - len, "mpdus_failed_%s = ",
+				 mode_prefix);
+		for (i = 0; i < ru_size_cnt; i++)
+			len += scnprintf(buf + len, buf_len - len, " %s:%u ",
+					 ath12k_tx_ru_size_to_str(ru_type, i),
+					 le32_to_cpu(stats_buf->ru[i].mpdus_failed));
+		len += scnprintf(buf + len, buf_len - len, "\n\n");
+	}
+
+	if (rc_mode == ATH12K_HTT_STATS_RC_MODE_DLMUMIMO) {
+		len += scnprintf(buf + len, buf_len - len, "\nlast_probed_bw  = %u\n",
+				 le32_to_cpu(stats_buf->last_probed_bw));
+		len += scnprintf(buf + len, buf_len - len, "last_probed_nss = %u\n",
+				 le32_to_cpu(stats_buf->last_probed_nss));
+		len += scnprintf(buf + len, buf_len - len, "last_probed_mcs = %u\n",
+				 le32_to_cpu(stats_buf->last_probed_mcs));
+		len += print_array_to_buf(buf, len, "MU Probe count per RC MODE",
+					  stats_buf->probe_cnt,
+					  ATH12K_HTT_RC_MODE_2D_COUNT, "\n\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static void
+ath12k_htt_print_ast_entry_tlv(const void *tag_buf, u16 tag_len,
+			       struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_ast_entry_tlv *htt_stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	u32 mac_addr_l32;
+	u32 mac_addr_h16;
+	u32 ast_info;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_addr_l32 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_l32);
+	mac_addr_h16 = le32_to_cpu(htt_stats_buf->mac_addr.mac_addr_h16);
+	ast_info = le32_to_cpu(htt_stats_buf->info);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_AST_ENTRY_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "ast_index = %u\n",
+			 le32_to_cpu(htt_stats_buf->ast_index));
+	len += scnprintf(buf + len, buf_len - len,
+			 "mac_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+			 u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_0),
+			 u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_1),
+			 u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_2),
+			 u32_get_bits(mac_addr_l32, ATH12K_HTT_MAC_ADDR_L32_3),
+			 u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_0),
+			 u32_get_bits(mac_addr_h16, ATH12K_HTT_MAC_ADDR_H16_1));
+
+	len += scnprintf(buf + len, buf_len - len, "sw_peer_id = %u\n",
+			 le32_to_cpu(htt_stats_buf->sw_peer_id));
+	len += scnprintf(buf + len, buf_len - len, "pdev_id = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_PDEV_ID_INFO));
+	len += scnprintf(buf + len, buf_len - len, "vdev_id = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_VDEV_ID_INFO));
+	len += scnprintf(buf + len, buf_len - len, "next_hop = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_NEXT_HOP_INFO));
+	len += scnprintf(buf + len, buf_len - len, "mcast = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_MCAST_INFO));
+	len += scnprintf(buf + len, buf_len - len, "monitor_direct = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_MONITOR_DIRECT_INFO));
+	len += scnprintf(buf + len, buf_len - len, "mesh_sta = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_MESH_STA_INFO));
+	len += scnprintf(buf + len, buf_len - len, "mec = %u\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_MEC_INFO));
+	len += scnprintf(buf + len, buf_len - len, "intra_bss = %u\n\n",
+			 u32_get_bits(ast_info, ATH12K_HTT_AST_INTRA_BSS_INFO));
+
+	stats_req->buf_len = len;
+}
+
+static const char*
+ath12k_htt_get_punct_dir_type_str(enum ath12k_htt_stats_direction direction)
+{
+	switch (direction) {
+	case ATH12K_HTT_STATS_DIRECTION_TX:
+		return "tx";
+	case ATH12K_HTT_STATS_DIRECTION_RX:
+		return "rx";
+	default:
+		return "unknown";
+	}
+}
+
+static const char*
+ath12k_htt_get_punct_ppdu_type_str(enum ath12k_htt_stats_ppdu_type ppdu_type)
+{
+	switch (ppdu_type) {
+	case ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU:
+		return "su";
+	case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO:
+		return "dl_mu_mimo";
+	case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO:
+		return "ul_mu_mimo";
+	case ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA:
+		return "dl_mu_ofdma";
+	case ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA:
+		return "ul_mu_ofdma";
+	default:
+		return "unknown";
+	}
+}
+
+static const char*
+ath12k_htt_get_punct_pream_type_str(enum ath12k_htt_stats_param_type pream_type)
+{
+	switch (pream_type) {
+	case ATH12K_HTT_STATS_PREAM_OFDM:
+		return "ofdm";
+	case ATH12K_HTT_STATS_PREAM_CCK:
+		return "cck";
+	case ATH12K_HTT_STATS_PREAM_HT:
+		return "ht";
+	case ATH12K_HTT_STATS_PREAM_VHT:
+		return "ac";
+	case ATH12K_HTT_STATS_PREAM_HE:
+		return "ax";
+	case ATH12K_HTT_STATS_PREAM_EHT:
+		return "be";
+	default:
+		return "unknown";
+	}
+}
+
+static void
+ath12k_htt_print_puncture_stats_tlv(const void *tag_buf, u16 tag_len,
+				    struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_pdev_puncture_stats_tlv *stats_buf = tag_buf;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 len = stats_req->buf_len;
+	u8 *buf = stats_req->buf;
+	const char *direction;
+	const char *ppdu_type;
+	const char *preamble;
+	u32 mac_id__word;
+	u32 subband_limit;
+	u8 i;
+
+	if (tag_len < sizeof(*stats_buf))
+		return;
+
+	mac_id__word = le32_to_cpu(stats_buf->mac_id__word);
+	subband_limit = min(le32_to_cpu(stats_buf->subband_cnt),
+			    ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT);
+
+	direction = ath12k_htt_get_punct_dir_type_str(le32_to_cpu(stats_buf->direction));
+	ppdu_type = ath12k_htt_get_punct_ppdu_type_str(le32_to_cpu(stats_buf->ppdu_type));
+	preamble = ath12k_htt_get_punct_pream_type_str(le32_to_cpu(stats_buf->preamble));
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_PDEV_PUNCTURE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id__word, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len,
+			 "%s_%s_%s_last_used_pattern_mask = 0x%08x\n",
+			 direction, preamble, ppdu_type,
+			 le32_to_cpu(stats_buf->last_used_pattern_mask));
+
+	for (i = 0; i < subband_limit; i++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "%s_%s_%s_num_subbands_used_cnt_%02d = %u\n",
+				 direction, preamble, ppdu_type, i + 1,
+				 le32_to_cpu(stats_buf->num_subbands_used_cnt[i]));
+	}
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static void
 ath12k_htt_print_dmac_reset_stats_tlv(const void *tag_buf, u16 tag_len,
 				      struct debug_htt_stats_req *stats_req)
 {
@@ -2562,7 +4351,6 @@
 	time = ath12k_le32hilo_to_u64(htt_stats_buf->reset_time_hi_ms,
 				      htt_stats_buf->reset_time_lo_ms);
 	len += scnprintf(buf + len, buf_len - len, "reset_time_ms = %llu\n", time);
-
 	time = ath12k_le32hilo_to_u64(htt_stats_buf->disengage_time_hi_ms,
 				      htt_stats_buf->disengage_time_lo_ms);
 	len += scnprintf(buf + len, buf_len - len, "disengage_time_ms = %llu\n", time);
@@ -2681,7 +4469,7 @@
 	len += scnprintf(buf + len, buf_len - len, "\n");
 	len += print_array_to_buf_index(buf, len, "be_ofdma_tx_nss = ", 1,
 					htt_stats_buf->be_ofdma_tx_nss,
-					ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS,
+					ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS,
 					"\n");
 	len += print_array_to_buf(buf, len, "be_ofdma_tx_bw",
 				  htt_stats_buf->be_ofdma_tx_bw,
@@ -2697,6 +4485,536 @@
 	stats_req->buf_len = len;
 }
 
+static void
+ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(const void *tag_buf, u16 tag_len,
+						  struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u32 mac_id_word;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_MBSSID_CTRL_FRAME_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "basic_trigger_across_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->basic_trigger_across_bss));
+	len += scnprintf(buf + len, buf_len - len, "basic_trigger_within_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->basic_trigger_within_bss));
+	len += scnprintf(buf + len, buf_len - len, "bsr_trigger_across_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->bsr_trigger_across_bss));
+	len += scnprintf(buf + len, buf_len - len, "bsr_trigger_within_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->bsr_trigger_within_bss));
+	len += scnprintf(buf + len, buf_len - len, "mu_rts_across_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->mu_rts_across_bss));
+	len += scnprintf(buf + len, buf_len - len, "mu_rts_within_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->mu_rts_within_bss));
+	len += scnprintf(buf + len, buf_len - len, "ul_mumimo_trigger_across_bss = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_across_bss));
+	len += scnprintf(buf + len, buf_len - len,
+			 "ul_mumimo_trigger_within_bss = %u\n\n",
+			 le32_to_cpu(htt_stats_buf->ul_mumimo_trigger_within_bss));
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_tx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_tx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i, j;
+	u32 mac_id_word;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "tx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "ofdma_tx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ofdma_tx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rts_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rts_success = %u\n",
+			 le32_to_cpu(htt_stats_buf->rts_success));
+	len += scnprintf(buf + len, buf_len - len, "ack_rssi = %u\n",
+			 le32_to_cpu(htt_stats_buf->ack_rssi));
+	len += scnprintf(buf + len, buf_len - len,
+			 "Legacy CCK Rates: 1 Mbps: %u, 2 Mbps: %u, 5.5 Mbps: %u, 12 Mbps: %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[0]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[1]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[2]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_cck_rate[3]));
+	len += scnprintf(buf + len, buf_len - len,
+			 "Legacy OFDM Rates: 6 Mbps: %u, 9 Mbps: %u, 12 Mbps: %u, 18 Mbps: %u\n"
+			 "                   24 Mbps: %u, 36 Mbps: %u, 48 Mbps: %u, 54 Mbps: %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[0]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[1]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[2]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[3]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[4]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[5]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[6]),
+			 le32_to_cpu(htt_stats_buf->tx_legacy_ofdm_rate[7]));
+	len += scnprintf(buf + len, buf_len - len, "HE LTF: 1x: %u, 2x: %u, 4x: %u\n",
+			 le32_to_cpu(htt_stats_buf->tx_he_ltf[1]),
+			 le32_to_cpu(htt_stats_buf->tx_he_ltf[2]),
+			 le32_to_cpu(htt_stats_buf->tx_he_ltf[3]));
+
+	len += print_array_to_buf(buf, len, "tx_mcs", htt_stats_buf->tx_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->tx_mcs_ext[j]));
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS +
+				 ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->tx_mcs_ext_2[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_mcs",
+				  htt_stats_buf->ax_mu_mimo_tx_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_mcs_ext[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += print_array_to_buf(buf, len, "ofdma_tx_mcs",
+				  htt_stats_buf->ofdma_tx_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->ofdma_tx_mcs_ext[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "tx_nss =");
+	for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+				 j, le32_to_cpu(htt_stats_buf->tx_nss[j - 1]));
+	len--;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ac_mu_mimo_tx_nss =");
+	for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+				 j, le32_to_cpu(htt_stats_buf->ac_mu_mimo_tx_nss[j - 1]));
+	len--;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ax_mu_mimo_tx_nss =");
+	for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+				 j, le32_to_cpu(htt_stats_buf->ax_mu_mimo_tx_nss[j - 1]));
+	len--;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += scnprintf(buf + len, buf_len - len, "ofdma_tx_nss =");
+	for (j = 1; j <= ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++)
+		len += scnprintf(buf + len, buf_len - len, " %u:%u,",
+				 j, le32_to_cpu(htt_stats_buf->ofdma_tx_nss[j - 1]));
+	len--;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	len += print_array_to_buf(buf, len, "tx_bw", htt_stats_buf->tx_bw,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, NULL);
+	len += scnprintf(buf + len, buf_len - len, ", %u:%u\n",
+			 ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS,
+			 le32_to_cpu(htt_stats_buf->tx_bw_320mhz));
+
+	len += print_array_to_buf(buf, len, "tx_stbc",
+				  htt_stats_buf->tx_stbc,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->tx_stbc_ext[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, (buf_len - len),
+				 "tx_gi[%u] =", j);
+		len += print_array_to_buf(buf, len, NULL, htt_stats_buf->tx_gi[j],
+					  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  NULL);
+		for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+			len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+					 i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					 le32_to_cpu(htt_stats_buf->tx_gi_ext[j][i]));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, (buf_len - len),
+				 "ac_mu_mimo_tx_gi[%u] =", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->ac_mu_mimo_tx_gi[j],
+					  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, (buf_len - len),
+				 "ax_mu_mimo_tx_gi[%u] =", j);
+		len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ax_mimo_tx_gi[j],
+					  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  NULL);
+		for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+			len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+					 i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					 le32_to_cpu(htt_stats_buf->ax_tx_gi_ext[j][i]));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, (buf_len - len),
+				 "ofdma_tx_gi[%u] = ", j);
+		len += print_array_to_buf(buf, len, NULL, htt_stats_buf->ofdma_tx_gi[j],
+					  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  NULL);
+		for (i = 0; i < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; i++)
+			len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+					 i + ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS,
+					 le32_to_cpu(htt_stats_buf->ofd_tx_gi_ext[j][i]));
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	len += print_array_to_buf(buf, len, "tx_su_mcs", htt_stats_buf->tx_su_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "tx_mu_mcs", htt_stats_buf->tx_mu_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_mcs",
+				  htt_stats_buf->ac_mu_mimo_tx_mcs,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "ac_mu_mimo_tx_bw",
+				  htt_stats_buf->ac_mu_mimo_tx_bw,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "ax_mu_mimo_tx_bw",
+				  htt_stats_buf->ax_mu_mimo_tx_bw,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "ofdma_tx_bw",
+				  htt_stats_buf->ofdma_tx_bw,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "tx_pream", htt_stats_buf->tx_pream,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+	len += print_array_to_buf(buf, len, "tx_dcm", htt_stats_buf->tx_dcm,
+				  ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_stats_tlv(const void *tag_buf, u16 tag_len,
+					struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_pdev_rate_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 i, j;
+	u32 mac_id_word;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	mac_id_word = le32_to_cpu(htt_stats_buf->mac_id_word);
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
+			 u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
+	len += scnprintf(buf + len, buf_len - len, "nsts = %u\n",
+			 le32_to_cpu(htt_stats_buf->nsts));
+	len += scnprintf(buf + len, buf_len - len, "rx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "rts_cnt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rts_cnt));
+	len += scnprintf(buf + len, buf_len - len, "rssi_mgmt = %u\n",
+			 le32_to_cpu(htt_stats_buf->rssi_mgmt));
+	len += scnprintf(buf + len, buf_len - len, "rssi_data = %u\n",
+			 le32_to_cpu(htt_stats_buf->rssi_data));
+	len += scnprintf(buf + len, buf_len - len, "rssi_comb = %u\n",
+			 le32_to_cpu(htt_stats_buf->rssi_comb));
+	len += scnprintf(buf + len, buf_len - len, "rssi_in_dbm = %d\n",
+			 le32_to_cpu(htt_stats_buf->rssi_in_dbm));
+	len += scnprintf(buf + len, buf_len - len, "rx_evm_nss_count = %u\n",
+			 le32_to_cpu(htt_stats_buf->nss_count));
+	len += scnprintf(buf + len, buf_len - len, "rx_evm_pilot_count = %u\n",
+			 le32_to_cpu(htt_stats_buf->pilot_count));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_su_ext = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_su_ext));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ac_mumimo = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ac_mumimo));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_mumimo = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_mumimo));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ofdma = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_ofdma));
+	len += scnprintf(buf + len, buf_len - len, "txbf = %u\n",
+			 le32_to_cpu(htt_stats_buf->txbf));
+	len += scnprintf(buf + len, buf_len - len, "rx_su_ndpa = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_su_ndpa));
+	len += scnprintf(buf + len, buf_len - len, "rx_mu_ndpa = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_mu_ndpa));
+	len += scnprintf(buf + len, buf_len - len, "rx_br_poll = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_br_poll));
+	len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_low = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_active_dur_us_low));
+	len += scnprintf(buf + len, buf_len - len, "rx_active_dur_us_high = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_active_dur_us_high));
+	len += scnprintf(buf + len, buf_len - len, "rx_11ax_ul_ofdma = %u\n",
+			 le32_to_cpu(htt_stats_buf->rx_11ax_ul_ofdma));
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_stbc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_ofdma_rx_stbc));
+	len += scnprintf(buf + len, buf_len - len, "ul_ofdma_rx_ldpc = %u\n",
+			 le32_to_cpu(htt_stats_buf->ul_ofdma_rx_ldpc));
+	len += scnprintf(buf + len, buf_len - len, "per_chain_rssi_pkt_type = %#x\n",
+			 le32_to_cpu(htt_stats_buf->per_chain_rssi_pkt_type));
+
+	len += print_array_to_buf(buf, len, "rx_nss", htt_stats_buf->rx_nss,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	len += print_array_to_buf(buf, len, "rx_dcm", htt_stats_buf->rx_dcm,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_stbc", htt_stats_buf->rx_stbc,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_bw", htt_stats_buf->rx_bw,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_pream", htt_stats_buf->rx_pream,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs",
+				  htt_stats_buf->rx_11ax_su_txbf_mcs,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs",
+				  htt_stats_buf->rx_11ax_mu_txbf_mcs,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_legacy_cck_rate",
+				  htt_stats_buf->rx_legacy_cck_rate,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS, "\n");
+	len += print_array_to_buf(buf, len, "rx_legacy_ofdm_rate",
+				  htt_stats_buf->rx_legacy_ofdm_rate,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS, "\n");
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs",
+				  htt_stats_buf->ul_ofdma_rx_mcs,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_nss",
+				  htt_stats_buf->ul_ofdma_rx_nss,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS, "\n");
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_bw",
+				  htt_stats_buf->ul_ofdma_rx_bw,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_ppdu",
+				  htt_stats_buf->rx_ulofdma_non_data_ppdu,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_data_ppdu",
+				  htt_stats_buf->rx_ulofdma_data_ppdu,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_ok",
+				  htt_stats_buf->rx_ulofdma_mpdu_ok,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_mpdu_fail",
+				  htt_stats_buf->rx_ulofdma_mpdu_fail,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_non_data_nusers",
+				  htt_stats_buf->rx_ulofdma_non_data_nusers,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulofdma_data_nusers",
+				  htt_stats_buf->rx_ulofdma_data_nusers,
+				  ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs",
+				  htt_stats_buf->rx_11ax_dl_ofdma_mcs,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_ru",
+				  htt_stats_buf->rx_11ax_dl_ofdma_ru,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulmumimo_non_data_ppdu",
+				  htt_stats_buf->rx_ulmumimo_non_data_ppdu,
+				  ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulmumimo_data_ppdu",
+				  htt_stats_buf->rx_ulmumimo_data_ppdu,
+				  ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_ok",
+				  htt_stats_buf->rx_ulmumimo_mpdu_ok,
+				  ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+	len += print_array_to_buf(buf, len, "rx_ulmumimo_mpdu_fail",
+				  htt_stats_buf->rx_ulmumimo_mpdu_fail,
+				  ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER, "\n");
+
+	len += print_array_to_buf(buf, len, "rx_mcs",
+				  htt_stats_buf->rx_mcs,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS, NULL);
+	for (j = 0; j < ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+				 le32_to_cpu(htt_stats_buf->rx_mcs_ext[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "pilot_evm_db[%u] =", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->rx_pil_evm_db[j],
+					  ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS,
+					  "\n");
+	}
+
+	len += scnprintf(buf + len, buf_len - len, "pilot_evm_db_mean =");
+	for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; i++)
+		len += scnprintf(buf + len,
+				 buf_len - len,
+				 " %u:%d,", i,
+				 le32_to_cpu(htt_stats_buf->rx_pilot_evm_db_mean[i]));
+	len--;
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rssi_chain_in_db[%u] = ", j);
+		for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u: %d,", i,
+					 htt_stats_buf->rssi_chain_in_db[j][i]);
+		len--;
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_gi[%u] = ", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->rx_gi[j],
+					  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ul_ofdma_rx_gi[%u] = ", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->ul_ofdma_rx_gi[j],
+					  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS,
+					  "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_ul_fd_rssi: nss[%u] = ", j);
+		for (i = 0; i < ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i, htt_stats_buf->rx_ul_fd_rssi[j][i]);
+		len--;
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_per_chain_rssi_in_dbm[%u] =", j);
+		for (i = 0; i < ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS; i++)
+			len += scnprintf(buf + len,
+					 buf_len - len,
+					 " %u:%d,",
+					 i,
+					 htt_stats_buf->rx_per_chain_rssi_in_dbm[j][i]);
+		len--;
+		len += scnprintf(buf + len, buf_len - len, "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
+static inline void
+ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(const void *tag_buf, u16 tag_len,
+					    struct debug_htt_stats_req *stats_req)
+{
+	const struct ath12k_htt_rx_pdev_rate_ext_stats_tlv *htt_stats_buf = tag_buf;
+	u8 *buf = stats_req->buf;
+	u32 len = stats_req->buf_len;
+	u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
+	u8 j;
+
+	if (tag_len < sizeof(*htt_stats_buf))
+		return;
+
+	len += scnprintf(buf + len, buf_len - len, "HTT_RX_PDEV_RATE_EXT_STATS_TLV:\n");
+	len += scnprintf(buf + len, buf_len - len, "rssi_mgmt_in_dbm = %d\n",
+			 le32_to_cpu(htt_stats_buf->rssi_mgmt_in_dbm));
+
+	len += print_array_to_buf(buf, len, "rx_stbc_ext",
+				  htt_stats_buf->rx_stbc_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	len += print_array_to_buf(buf, len, "ul_ofdma_rx_mcs_ext",
+				  htt_stats_buf->ul_ofdma_rx_mcs_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_su_txbf_mcs_ext",
+				  htt_stats_buf->rx_11ax_su_txbf_mcs_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_mu_txbf_mcs_ext",
+				  htt_stats_buf->rx_11ax_mu_txbf_mcs_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	len += print_array_to_buf(buf, len, "rx_11ax_dl_ofdma_mcs_ext",
+				  htt_stats_buf->rx_11ax_dl_ofdma_mcs_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT, "\n");
+	len += print_array_to_buf(buf, len, "rx_bw_ext",
+				  htt_stats_buf->rx_bw_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS, "\n");
+	len += print_array_to_buf(buf, len, "rx_su_punctured_mode",
+				  htt_stats_buf->rx_su_punctured_mode,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS,
+				  "\n");
+
+	len += print_array_to_buf(buf, len, "rx_mcs_ext",
+				  htt_stats_buf->rx_mcs_ext,
+				  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+				  NULL);
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS; j++)
+		len += scnprintf(buf + len, buf_len - len, ", %u:%u",
+				 j + ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+				 le32_to_cpu(htt_stats_buf->rx_mcs_ext_2[j]));
+	len += scnprintf(buf + len, buf_len - len, "\n");
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "rx_gi_ext[%u] = ", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->rx_gi_ext[j],
+					  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+					  "\n");
+	}
+
+	for (j = 0; j < ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS; j++) {
+		len += scnprintf(buf + len, buf_len - len,
+				 "ul_ofdma_rx_gi_ext[%u] = ", j);
+		len += print_array_to_buf(buf, len, NULL,
+					  htt_stats_buf->ul_ofdma_rx_gi_ext[j],
+					  ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT,
+					  "\n");
+	}
+
+	stats_req->buf_len = len;
+}
+
 static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
 					  u16 tag, u16 len, const void *tag_buf,
 					  void *user_data)
@@ -2867,9 +5185,82 @@
 	case HTT_STATS_PDEV_CCA_COUNTERS_TAG:
 		ath12k_htt_print_pdev_stats_cca_counters_tlv(tag_buf, len, stats_req);
 		break;
+	case HTT_STATS_TX_SOUNDING_STATS_TAG:
+		ath12k_htt_print_tx_sounding_stats_tlv(tag_buf, len, stats_req);
+		break;
 	case HTT_STATS_PDEV_OBSS_PD_TAG:
 		ath12k_htt_print_pdev_obss_pd_stats_tlv(tag_buf, len, stats_req);
 		break;
+	case HTT_STATS_LATENCY_CTX_TAG:
+		ath12k_htt_print_latency_prof_ctx_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_LATENCY_CNT_TAG:
+		ath12k_htt_print_latency_prof_cnt(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_LATENCY_PROF_STATS_TAG:
+		ath12k_htt_print_latency_prof_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG:
+		ath12k_htt_print_ul_ofdma_trigger_stats(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG:
+		ath12k_htt_print_ul_ofdma_user_stats(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG:
+		ath12k_htt_print_ul_mumimo_trig_stats(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_FSE_STATS_TAG:
+		ath12k_htt_print_rx_fse_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG:
+		ath12k_htt_print_pdev_tx_rate_txbf_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG:
+		ath12k_htt_print_txbf_ofdma_ax_ndpa_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG:
+		ath12k_htt_print_txbf_ofdma_ax_ndp_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG:
+		ath12k_htt_print_txbf_ofdma_ax_brp_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG:
+		ath12k_htt_print_txbf_ofdma_ax_steer_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG:
+		ath12k_htt_print_txbf_ofdma_ax_steer_mpdu_stats_tlv(tag_buf, len,
+								    stats_req);
+		break;
+	case HTT_STATS_DLPAGER_STATS_TAG:
+		ath12k_htt_print_dlpager_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PHY_STATS_TAG:
+		ath12k_htt_print_phy_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PHY_COUNTERS_TAG:
+		ath12k_htt_print_phy_counters_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PHY_RESET_STATS_TAG:
+		ath12k_htt_print_phy_reset_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PHY_RESET_COUNTERS_TAG:
+		ath12k_htt_print_phy_reset_counters_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PHY_TPC_STATS_TAG:
+		ath12k_htt_print_phy_tpc_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_SOC_TXRX_STATS_COMMON_TAG:
+		ath12k_htt_print_soc_txrx_stats_common_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PER_RATE_STATS_TAG:
+		ath12k_htt_print_tx_per_rate_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_AST_ENTRY_TAG:
+		ath12k_htt_print_ast_entry_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_PDEV_PUNCTURE_STATS_TAG:
+		ath12k_htt_print_puncture_stats_tlv(tag_buf, len, stats_req);
+		break;
 	case HTT_STATS_DMAC_RESET_STATS_TAG:
 		ath12k_htt_print_dmac_reset_stats_tlv(tag_buf, len, stats_req);
 		break;
@@ -2879,6 +5270,19 @@
 	case HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG:
 		ath12k_htt_print_tx_pdev_rate_stats_be_ofdma_tlv(tag_buf, len, stats_req);
 		break;
+	case HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG:
+		ath12k_htt_print_pdev_mbssid_ctrl_frame_stats_tlv(tag_buf, len,
+								  stats_req);
+		break;
+	case HTT_STATS_TX_PDEV_RATE_STATS_TAG:
+		ath12k_htt_print_tx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_RATE_STATS_TAG:
+		ath12k_htt_print_rx_pdev_rate_stats_tlv(tag_buf, len, stats_req);
+		break;
+	case HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG:
+		ath12k_htt_print_rx_pdev_rate_ext_stats_tlv(tag_buf, len, stats_req);
+		break;
 	default:
 		break;
 	}
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef DEBUG_HTT_STATS_H
@@ -129,15 +129,32 @@
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR		= 5,
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM		= 6,
 	ATH12K_DBG_HTT_EXT_STATS_TX_DE_INFO		= 8,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE			= 9,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE			= 10,
 	ATH12K_DBG_HTT_EXT_STATS_TX_SELFGEN_INFO	= 12,
 	ATH12K_DBG_HTT_EXT_STATS_SRNG_INFO		= 15,
 	ATH12K_DBG_HTT_EXT_STATS_SFM_INFO		= 16,
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_MU		= 17,
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_CCA_STATS		= 19,
+	ATH12K_DBG_HTT_EXT_STATS_TX_SOUNDING_INFO		= 22,
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_OBSS_PD_STATS	= 23,
+	ATH12K_DBG_HTT_EXT_STATS_LATENCY_PROF_STATS		= 25,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_TRIG_STATS		= 26,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_UL_MUMIMO_TRIG_STATS	= 27,
+	ATH12K_DBG_HTT_EXT_STATS_FSE_RX				= 28,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_RX_RATE_EXT		= 30,
+	ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_RATE_TXBF		= 31,
+	ATH12K_DBG_HTT_EXT_STATS_TXBF_OFDMA			= 32,
+	ATH12K_DBG_HTT_EXT_STATS_DLPAGER_STATS			= 36,
+	ATH12K_DBG_HTT_EXT_PHY_COUNTERS_AND_PHY_STATS		= 37,
+	ATH12K_DBG_HTT_EXT_VDEVS_TXRX_STATS			= 38,
+	ATH12K_DBG_HTT_EXT_PDEV_PER_STATS			= 40,
+	ATH12K_DBG_HTT_EXT_AST_ENTRIES				= 41,
 	ATH12K_DBG_HTT_EXT_STATS_SOC_ERROR		= 45,
+	ATH12K_DBG_HTT_DBG_PDEV_PUNCTURE_STATS			= 46,
 	ATH12K_DBG_HTT_EXT_STATS_PDEV_SCHED_ALGO	= 49,
 	ATH12K_DBG_HTT_EXT_STATS_MANDATORY_MUOFDMA	= 51,
+	ATH12K_DGB_HTT_EXT_STATS_PDEV_MBSSID_CTRL_FRAME		= 54,
 
 	/* keep this last */
 	ATH12K_DBG_HTT_NUM_EXT_STATS,
@@ -164,6 +181,8 @@
 	HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG		= 25,
 	HTT_STATS_SFM_CMN_TAG				= 26,
 	HTT_STATS_SRING_STATS_TAG			= 27,
+	HTT_STATS_TX_PDEV_RATE_STATS_TAG		= 34,
+	HTT_STATS_RX_PDEV_RATE_STATS_TAG		= 35,
 	HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG	= 36,
 	HTT_STATS_TX_SCHED_CMN_TAG			= 37,
 	HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG		= 39,
@@ -186,22 +205,48 @@
 	HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG		= 72,
 	HTT_STATS_PDEV_CCA_COUNTERS_TAG			= 73,
 	HTT_STATS_TX_PDEV_MPDU_STATS_TAG		= 74,
+	HTT_STATS_TX_SOUNDING_STATS_TAG			= 80,
 	HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG		= 86,
 	HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG	= 87,
 	HTT_STATS_PDEV_OBSS_PD_TAG			= 88,
 	HTT_STATS_HW_WAR_TAG				= 89,
+	HTT_STATS_LATENCY_PROF_STATS_TAG		= 91,
+	HTT_STATS_LATENCY_CTX_TAG			= 92,
+	HTT_STATS_LATENCY_CNT_TAG			= 93,
+	HTT_STATS_RX_PDEV_UL_TRIG_STATS_TAG		= 94,
+	HTT_STATS_RX_PDEV_UL_OFDMA_USER_STATS_TAG	= 95,
+	HTT_STATS_RX_PDEV_UL_MUMIMO_TRIG_STATS_TAG	= 97,
+	HTT_STATS_RX_FSE_STATS_TAG			= 98,
 	HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG	= 100,
 	HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG		= 102,
+	HTT_STATS_RX_PDEV_RATE_EXT_STATS_TAG		= 103,
+	HTT_STATS_PDEV_TX_RATE_TXBF_STATS_TAG		= 108,
 	HTT_STATS_TX_SELFGEN_AC_SCHED_STATUS_STATS_TAG	= 111,
 	HTT_STATS_TX_SELFGEN_AX_SCHED_STATUS_STATS_TAG	= 112,
+	HTT_STATS_DLPAGER_STATS_TAG			= 120,
+	HTT_STATS_PHY_COUNTERS_TAG			= 121,
+	HTT_STATS_PHY_STATS_TAG				= 122,
+	HTT_STATS_PHY_RESET_COUNTERS_TAG		= 123,
+	HTT_STATS_PHY_RESET_STATS_TAG			= 124,
+	HTT_STATS_SOC_TXRX_STATS_COMMON_TAG		= 125,
+	HTT_STATS_PER_RATE_STATS_TAG			= 128,
 	HTT_STATS_MU_PPDU_DIST_TAG			= 129,
 	HTT_STATS_TX_PDEV_MUMIMO_GRP_STATS_TAG		= 130,
+	HTT_STATS_AST_ENTRY_TAG				= 132,
 	HTT_STATS_TX_PDEV_RATE_STATS_BE_OFDMA_TAG	= 135,
 	HTT_STATS_TX_SELFGEN_BE_ERR_STATS_TAG		= 137,
 	HTT_STATS_TX_SELFGEN_BE_STATS_TAG		= 138,
 	HTT_STATS_TX_SELFGEN_BE_SCHED_STATUS_STATS_TAG	= 139,
+	HTT_STATS_TXBF_OFDMA_AX_NDPA_STATS_TAG		= 147,
+	HTT_STATS_TXBF_OFDMA_AX_NDP_STATS_TAG		= 148,
+	HTT_STATS_TXBF_OFDMA_AX_BRP_STATS_TAG		= 149,
+	HTT_STATS_TXBF_OFDMA_AX_STEER_STATS_TAG		= 150,
 	HTT_STATS_DMAC_RESET_STATS_TAG			= 155,
+	HTT_STATS_PHY_TPC_STATS_TAG			= 157,
+	HTT_STATS_PDEV_PUNCTURE_STATS_TAG		= 158,
 	HTT_STATS_PDEV_SCHED_ALGO_OFDMA_STATS_TAG	= 165,
+	HTT_STATS_TXBF_OFDMA_AX_STEER_MPDU_STATS_TAG	= 172,
+	HTT_STATS_PDEV_MBSSID_CTRL_FRAME_STATS_TAG	= 176,
 
 	HTT_STATS_MAX_TAG,
 };
@@ -361,6 +406,182 @@
 	__le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
 } __packed;
 
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS        12
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS          4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS         5
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS          4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS      8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES       7
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS     4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS    8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LTF                  4
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS   2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS  2
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES   6
+
+struct ath12k_htt_tx_pdev_rate_stats_tlv {
+	__le32 mac_id_word;
+	__le32 tx_ldpc;
+	__le32 rts_cnt;
+	__le32 ack_rssi;
+	__le32 tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_su_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_mu_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 tx_stbc[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_pream[ATH12K_HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+	__le32 tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_dcm[ATH12K_HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
+	__le32 rts_success;
+	__le32 tx_legacy_cck_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+	__le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+	__le32 ac_mu_mimo_tx_ldpc;
+	__le32 ax_mu_mimo_tx_ldpc;
+	__le32 ofdma_tx_ldpc;
+	__le32 tx_he_ltf[ATH12K_HTT_TX_PDEV_STATS_NUM_LTF];
+	__le32 ac_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ax_mu_mimo_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ac_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 ax_mu_mimo_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 ac_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 ax_mu_mimo_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 ofdma_tx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 ac_mu_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			    [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ax_mimo_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			    [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ofdma_tx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		       [ATH12K_HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 trigger_type_11ax[ATH12K_HTT_TX_PDEV_STATS_NUM_11AX_TRIGGER_TYPES];
+	__le32 tx_11ax_su_ext;
+	__le32 tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 tx_stbc_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+		     [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 ax_mu_mimo_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 ofdma_tx_mcs_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 ax_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+				[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 ofd_tx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			   [ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+	__le32 tx_mcs_ext_2[ATH12K_HTT_TX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	__le32 tx_bw_320mhz;
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS		4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS		8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS		12
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS		4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS		5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS		4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS		8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES		7
+#define ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER			8
+#define ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS		16
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS		6
+#define ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER		8
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS		2
+
+struct ath12k_htt_rx_pdev_rate_stats_tlv {
+	__le32 mac_id_word;
+	__le32 nsts;
+	__le32 rx_ldpc;
+	__le32 rts_cnt;
+	__le32 rssi_mgmt;
+	__le32 rssi_data;
+	__le32 rssi_comb;
+	__le32 rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rx_nss[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 rx_dcm[ATH12K_HTT_RX_PDEV_STATS_NUM_DCM_COUNTERS];
+	__le32 rx_stbc[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rx_bw[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 rx_pream[ATH12K_HTT_RX_PDEV_STATS_NUM_PREAMBLE_TYPES];
+	u8 rssi_chain_in_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+		     [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 rx_gi[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+		[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rssi_in_dbm;
+	__le32 rx_11ax_su_ext;
+	__le32 rx_11ac_mumimo;
+	__le32 rx_11ax_mumimo;
+	__le32 rx_11ax_ofdma;
+	__le32 txbf;
+	__le32 rx_legacy_cck_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+	__le32 rx_legacy_ofdm_rate[ATH12K_HTT_RX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+	__le32 rx_active_dur_us_low;
+	__le32 rx_active_dur_us_high;
+	__le32 rx_11ax_ul_ofdma;
+	__le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ul_ofdma_rx_gi[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			  [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 ul_ofdma_rx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 ul_ofdma_rx_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 ul_ofdma_rx_stbc;
+	__le32 ul_ofdma_rx_ldpc;
+	__le32 rx_ulofdma_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 rx_ulofdma_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 rx_ulofdma_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 rx_ulofdma_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 nss_count;
+	__le32 pilot_count;
+	__le32 rx_pil_evm_db[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			   [ATH12K_HTT_RX_PDEV_STATS_RXEVM_MAX_PILOTS_NSS];
+	__le32 rx_pilot_evm_db_mean[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	s8 rx_ul_fd_rssi[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 per_chain_rssi_pkt_type;
+	s8 rx_per_chain_rssi_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+				   [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_COUNTERS];
+	__le32 rx_su_ndpa;
+	__le32 rx_11ax_su_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rx_mu_ndpa;
+	__le32 rx_11ax_mu_txbf_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rx_br_poll;
+	__le32 rx_11ax_dl_ofdma_mcs[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS];
+	__le32 rx_11ax_dl_ofdma_ru[ATH12K_HTT_RX_PDEV_STATS_NUM_RU_SIZE_COUNTERS];
+	__le32 rx_ulmumimo_non_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	__le32 rx_ulmumimo_data_ppdu[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	__le32 rx_ulmumimo_mpdu_ok[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	__le32 rx_ulmumimo_mpdu_fail[ATH12K_HTT_RX_PDEV_MAX_ULMUMIMO_NUM_USER];
+	__le32 rx_ulofdma_non_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 rx_ulofdma_data_nusers[ATH12K_HTT_RX_PDEV_MAX_OFDMA_NUM_USER];
+	__le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA_MCS_COUNTERS];
+};
+
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS		4
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT		14
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS	2
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS		5
+#define ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS	5
+
+struct ath12k_htt_rx_pdev_rate_ext_stats_tlv {
+	u8 rssi_chain_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+			 [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+	s8 rx_per_chain_rssi_ext_in_dbm[ATH12K_HTT_RX_PDEV_STATS_NUM_SPATIAL_STREAMS]
+				       [ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT_COUNTERS];
+	__le32 rssi_mcast_in_dbm;
+	__le32 rssi_mgmt_in_dbm;
+	__le32 rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_stbc_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_gi_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+		     [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 ul_ofdma_rx_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 ul_ofdma_rx_gi_ext[ATH12K_HTT_TX_PDEV_STATS_NUM_GI_COUNTERS]
+			      [ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_11ax_su_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_11ax_mu_txbf_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_11ax_dl_ofdma_mcs_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_MCS_COUNTERS_EXT];
+	__le32 rx_mcs_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	__le32 rx_bw_ext[ATH12K_HTT_RX_PDEV_STATS_NUM_BW_EXT2_COUNTERS];
+	__le32 rx_gi_ext_2[ATH12K_HTT_RX_PDEV_STATS_NUM_GI_COUNTERS]
+		[ATH12K_HTT_RX_PDEV_STATS_NUM_EXTRA2_MCS_COUNTERS];
+	__le32 rx_su_punctured_mode[ATH12K_HTT_RX_PDEV_STATS_NUM_PUNCTURED_MODE_COUNTERS];
+};
+
 #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID	GENMASK(7, 0)
 #define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID	GENMASK(15, 8)
 
@@ -1032,6 +1253,82 @@
 	__le32 collection_interval;
 } __packed;
 
+#define ATH12K_HTT_TX_CV_CORR_MAX_NUM_COLUMNS		8
+#define ATH12K_HTT_TX_NUM_AC_MUMIMO_USER_STATS		4
+#define ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS          8
+#define ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS		8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS	4
+#define ATH12K_HTT_TX_NUM_MCS_CNTRS			12
+#define ATH12K_HTT_TX_NUM_EXTRA_MCS_CNTRS		2
+
+#define ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS \
+	(ATH12K_HTT_TX_PDEV_STATS_NUM_BW_COUNTERS * \
+	 ATH12K_HTT_TX_NUM_AX_MUMIMO_USER_STATS)
+
+enum ath12k_htt_txbf_sound_steer_modes {
+	ATH12K_HTT_IMPL_STEER_STATS		= 0,
+	ATH12K_HTT_EXPL_SUSIFS_STEER_STATS	= 1,
+	ATH12K_HTT_EXPL_SURBO_STEER_STATS	= 2,
+	ATH12K_HTT_EXPL_MUSIFS_STEER_STATS	= 3,
+	ATH12K_HTT_EXPL_MURBO_STEER_STATS	= 4,
+	ATH12K_HTT_TXBF_MAX_NUM_OF_MODES	= 5
+};
+
+enum ath12k_htt_stats_sounding_tx_mode {
+	ATH12K_HTT_TX_AC_SOUNDING_MODE		= 0,
+	ATH12K_HTT_TX_AX_SOUNDING_MODE		= 1,
+	ATH12K_HTT_TX_BE_SOUNDING_MODE		= 2,
+	ATH12K_HTT_TX_CMN_SOUNDING_MODE		= 3,
+};
+
+struct ath12k_htt_tx_sounding_stats_tlv {
+	__le32 tx_sounding_mode;
+	__le32 cbf_20[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+	__le32 cbf_40[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+	__le32 cbf_80[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+	__le32 cbf_160[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+	__le32 sounding[ATH12K_HTT_TX_NUM_OF_SOUNDING_STATS_WORDS];
+	__le32 cv_nc_mismatch_err;
+	__le32 cv_fcs_err;
+	__le32 cv_frag_idx_mismatch;
+	__le32 cv_invalid_peer_id;
+	__le32 cv_no_txbf_setup;
+	__le32 cv_expiry_in_update;
+	__le32 cv_pkt_bw_exceed;
+	__le32 cv_dma_not_done_err;
+	__le32 cv_update_failed;
+	__le32 cv_total_query;
+	__le32 cv_total_pattern_query;
+	__le32 cv_total_bw_query;
+	__le32 cv_invalid_bw_coding;
+	__le32 cv_forced_sounding;
+	__le32 cv_standalone_sounding;
+	__le32 cv_nc_mismatch;
+	__le32 cv_fb_type_mismatch;
+	__le32 cv_ofdma_bw_mismatch;
+	__le32 cv_bw_mismatch;
+	__le32 cv_pattern_mismatch;
+	__le32 cv_preamble_mismatch;
+	__le32 cv_nr_mismatch;
+	__le32 cv_in_use_cnt_exceeded;
+	__le32 cv_found;
+	__le32 cv_not_found;
+	__le32 sounding_320[ATH12K_HTT_TX_NUM_BE_MUMIMO_USER_STATS];
+	__le32 cbf_320[ATH12K_HTT_TXBF_MAX_NUM_OF_MODES];
+	__le32 cv_ntbr_sounding;
+	__le32 cv_found_upload_in_progress;
+	__le32 cv_expired_during_query;
+	__le32 cv_dma_timeout_error;
+	__le32 cv_buf_ibf_uploads;
+	__le32 cv_buf_ebf_uploads;
+	__le32 cv_buf_received;
+	__le32 cv_buf_fed_back;
+	__le32 cv_total_query_ibf;
+	__le32 cv_found_ibf;
+	__le32 cv_not_found_ibf;
+	__le32 cv_expired_during_query_ibf;
+} __packed;
+
 struct ath12k_htt_pdev_obss_pd_stats_tlv {
 	__le32 num_obss_tx_ppdu_success;
 	__le32 num_obss_tx_ppdu_failure;
@@ -1054,6 +1351,396 @@
 	__le32 num_sr_ppdu_abort_flush_cnt;
 } __packed;
 
+#define ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN	32
+#define ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST		3
+#define ATH12K_HTT_INTERRUPTS_LATENCY_PROFILE_MAX_HIST	3
+
+struct ath12k_htt_latency_prof_stats_tlv {
+	__le32 print_header;
+	s8 latency_prof_name[ATH12K_HTT_STATS_MAX_PROF_STATS_NAME_LEN];
+	__le32 cnt;
+	__le32 min;
+	__le32 max;
+	__le32 last;
+	__le32 tot;
+	__le32 avg;
+	__le32 hist_intvl;
+	__le32 hist[ATH12K_HTT_LATENCY_PROFILE_NUM_MAX_HIST];
+}  __packed;
+
+struct ath12k_htt_latency_prof_ctx_tlv {
+	__le32 duration;
+	__le32 tx_msdu_cnt;
+	__le32 tx_mpdu_cnt;
+	__le32 tx_ppdu_cnt;
+	__le32 rx_msdu_cnt;
+	__le32 rx_mpdu_cnt;
+} __packed;
+
+struct ath12k_htt_latency_prof_cnt_tlv {
+	__le32 prof_enable_cnt;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MCS_CNTRS		12
+#define ATH12K_HTT_RX_NUM_GI_CNTRS		4
+#define ATH12K_HTT_RX_NUM_SPATIAL_STREAMS	8
+#define ATH12K_HTT_RX_NUM_BW_CNTRS		4
+#define ATH12K_HTT_RX_NUM_RU_SIZE_CNTRS		6
+#define ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS	7
+#define ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK	5
+#define ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES	2
+#define ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS	2
+
+enum ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE {
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_26,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_52,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_106,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_242,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_484,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996,
+	ATH12K_HTT_TX_RX_PDEV_STATS_AX_RU_SIZE_996x2,
+	ATH12K_HTT_TX_RX_PDEV_STATS_NUM_AX_RU_SIZE_CNTRS,
+};
+
+struct ath12k_htt_rx_pdev_ul_ofdma_user_stats_tlv {
+	__le32 user_index;
+	__le32 rx_ulofdma_non_data_ppdu;
+	__le32 rx_ulofdma_data_ppdu;
+	__le32 rx_ulofdma_mpdu_ok;
+	__le32 rx_ulofdma_mpdu_fail;
+	__le32 rx_ulofdma_non_data_nusers;
+	__le32 rx_ulofdma_data_nusers;
+} __packed;
+
+struct ath12k_htt_rx_pdev_ul_trigger_stats_tlv {
+	__le32 mac_id__word;
+	__le32 rx_11ax_ul_ofdma;
+	__le32 ul_ofdma_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+	__le32 ul_ofdma_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+	__le32 ul_ofdma_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+	__le32 ul_ofdma_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+	__le32 ul_ofdma_rx_stbc;
+	__le32 ul_ofdma_rx_ldpc;
+	__le32 data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+	__le32 non_data_ru_size_ppdu[ATH12K_HTT_RX_NUM_RU_SIZE_160MHZ_CNTRS];
+	__le32 uplink_sta_aid[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	__le32 uplink_sta_target_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	__le32 uplink_sta_fd_rssi[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	__le32 uplink_sta_power_headroom[ATH12K_HTT_RX_UL_MAX_UPLINK_RSSI_TRACK];
+	__le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+	__le32 ul_ofdma_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_TX_UL_MUMIMO_USER_STATS	8
+
+struct ath12k_htt_rx_ul_mumimo_trig_stats_tlv {
+	__le32 mac_id__word;
+	__le32 rx_11ax_ul_mumimo;
+	__le32 ul_mumimo_rx_mcs[ATH12K_HTT_RX_NUM_MCS_CNTRS];
+	__le32 ul_rx_gi[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_MCS_CNTRS];
+	__le32 ul_mumimo_rx_nss[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+	__le32 ul_mumimo_rx_bw[ATH12K_HTT_RX_NUM_BW_CNTRS];
+	__le32 ul_mumimo_rx_stbc;
+	__le32 ul_mumimo_rx_ldpc;
+	__le32 ul_mumimo_rx_mcs_ext[ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+	__le32 ul_gi_ext[ATH12K_HTT_RX_NUM_GI_CNTRS][ATH12K_HTT_RX_NUM_EXTRA_MCS_CNTRS];
+	s8 ul_rssi[ATH12K_HTT_RX_NUM_SPATIAL_STREAMS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+	s8 tgt_rssi[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_BW_CNTRS];
+	s8 fd[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+	s8 db[ATH12K_HTT_TX_UL_MUMIMO_USER_STATS][ATH12K_HTT_RX_NUM_SPATIAL_STREAMS];
+	__le32 red_bw[ATH12K_HTT_RX_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_RX_NUM_BW_CNTRS];
+	__le32 mumimo_bsc_trig_rx_qos_null_only;
+} __packed;
+
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX	10
+#define ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX	10
+#define ATH12K_HTT_RX_NUM_SQUARE_INDEX			6
+#define ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX		4
+#define ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX	4
+
+struct ath12k_htt_rx_fse_stats_tlv {
+	__le32 fse_enable_cnt;
+	__le32 fse_disable_cnt;
+	__le32 fse_cache_invalidate_entry_cnt;
+	__le32 fse_full_cache_invalidate_cnt;
+	__le32 fse_num_cache_hits_cnt;
+	__le32 fse_num_searches_cnt;
+	__le32 fse_cache_occupancy_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_OCCUPANCY_INDEX];
+	__le32 fse_cache_occupancy_curr_cnt[ATH12K_HTT_RX_NUM_MAX_CURR_OCCUPANCY_INDEX];
+	__le32 fse_search_stat_square_cnt[ATH12K_HTT_RX_NUM_SQUARE_INDEX];
+	__le32 fse_search_stat_peak_cnt[ATH12K_HTT_RX_NUM_MAX_PEAK_SEARCH_INDEX];
+	__le32 fse_search_stat_pending_cnt[ATH12K_HTT_RX_NUM_MAX_PENDING_SEARCH_INDEX];
+} __packed;
+
+#define ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS		14
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS		8
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS		8
+#define ATH12K_HTT_TXBF_NUM_BW_CNTRS				5
+#define ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES			2
+
+struct ath12k_htt_pdev_txrate_txbf_stats_tlv {
+	__le32 tx_su_txbf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_su_ibf_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_su_ol_mcs[ATH12K_HTT_TX_BF_RATE_STATS_NUM_MCS_COUNTERS];
+	__le32 tx_su_txbf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 tx_su_ibf_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 tx_su_ol_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 tx_su_txbf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 tx_su_ibf_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 tx_su_ol_bw[ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 tx_legacy_ofdm_rate[ATH12K_HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+	__le32 txbf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 ibf[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 ol[ATH12K_HTT_TXBF_NUM_REDUCED_CHAN_TYPES][ATH12K_HTT_TXBF_NUM_BW_CNTRS];
+	__le32 txbf_flag_set_mu_mode;
+	__le32 txbf_flag_set_final_status;
+	__le32 txbf_flag_not_set_verified_txbf_mode;
+	__le32 txbf_flag_not_set_disable_p2p_access;
+	__le32 txbf_flag_not_set_max_nss_in_he160;
+	__le32 txbf_flag_not_set_disable_uldlofdma;
+	__le32 txbf_flag_not_set_mcs_threshold_val;
+	__le32 txbf_flag_not_set_final_status;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t {
+	__le32 ax_ofdma_ndpa_queued;
+	__le32 ax_ofdma_ndpa_tried;
+	__le32 ax_ofdma_ndpa_flush;
+	__le32 ax_ofdma_ndpa_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_tlv {
+	__le32 num_elems_ax_ndpa_arr;
+	__le32 arr_elem_size_ax_ndpa;
+	DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndpa_stats_elem_t, ax_ndpa);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t {
+	__le32 ax_ofdma_ndp_queued;
+	__le32 ax_ofdma_ndp_tried;
+	__le32 ax_ofdma_ndp_flush;
+	__le32 ax_ofdma_ndp_err;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_ndp_stats_tlv {
+	__le32 num_elems_ax_ndp_arr;
+	__le32 arr_elem_size_ax_ndp;
+	DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_ndp_stats_elem_t, ax_ndp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t {
+	__le32 ax_ofdma_brp_queued;
+	__le32 ax_ofdma_brp_tried;
+	__le32 ax_ofdma_brp_flushed;
+	__le32 ax_ofdma_brp_err;
+	__le32 ax_ofdma_num_cbf_rcvd;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_brp_stats_tlv {
+	__le32 num_elems_ax_brp_arr;
+	__le32 arr_elem_size_ax_brp;
+	DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_brp_stats_elem_t, ax_brp);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t {
+	__le32 num_ppdu_steer;
+	__le32 num_ppdu_ol;
+	__le32 num_usr_prefetch;
+	__le32 num_usr_sound;
+	__le32 num_usr_force_sound;
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_stats_tlv {
+	__le32 num_elems_ax_steer_arr;
+	__le32 arr_elem_size_ax_steer;
+	DECLARE_FLEX_ARRAY(struct ath12k_htt_txbf_ofdma_ax_steer_stats_elem_t, ax_steer);
+} __packed;
+
+struct ath12k_htt_txbf_ofdma_ax_steer_mpdu_stats_tlv {
+	__le32 ax_ofdma_rbo_steer_mpdus_tried;
+	__le32 ax_ofdma_rbo_steer_mpdus_failed;
+	__le32 ax_ofdma_sifs_steer_mpdus_tried;
+	__le32 ax_ofdma_sifs_steer_mpdus_failed;
+} __packed;
+
+enum ath12k_htt_stats_page_lock_state {
+	ATH12K_HTT_STATS_PAGE_LOCKED	= 0,
+	ATH12K_HTT_STATS_PAGE_UNLOCKED	= 1,
+	ATH12K_NUM_PG_LOCK_STATE
+};
+
+#define ATH12K_PAGER_MAX	10
+
+#define ATH12K_HTT_DLPAGER_ASYNC_LOCK_PG_CNT_INFO0	GENMASK(7, 0)
+#define ATH12K_HTT_DLPAGER_SYNC_LOCK_PG_CNT_INFO0	GENMASK(15, 8)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO1	GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO1	GENMASK(31, 16)
+#define ATH12K_HTT_DLPAGER_TOTAL_LOCK_PAGES_INFO2	GENMASK(15, 0)
+#define ATH12K_HTT_DLPAGER_TOTAL_FREE_PAGES_INFO2	GENMASK(31, 16)
+
+struct ath12k_htt_pgs_info {
+	__le32 page_num;
+	__le32 num_pgs;
+	__le32 ts_lsb;
+	__le32 ts_msb;
+} __packed;
+
+struct ath12k_htt_dl_pager_stats_tlv {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+	struct ath12k_htt_pgs_info pgs_info[ATH12K_NUM_PG_LOCK_STATE][ATH12K_PAGER_MAX];
+} __packed;
+
+#define ATH12K_HTT_STATS_MAX_CHAINS		8
+#define ATH12K_HTT_MAX_RX_PKT_CNT		8
+#define ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT	8
+#define ATH12K_HTT_MAX_PER_BLK_ERR_CNT		20
+#define ATH12K_HTT_MAX_RX_OTA_ERR_CNT		14
+#define ATH12K_HTT_MAX_CH_PWR_INFO_SIZE		16
+
+struct ath12k_htt_phy_stats_tlv {
+	a_sle32 nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+	__le32 false_radar_cnt;
+	__le32 radar_cs_cnt;
+	a_sle32 ani_level;
+	__le32 fw_run_time;
+	a_sle32 runtime_nf_chain[ATH12K_HTT_STATS_MAX_CHAINS];
+} __packed;
+
+struct ath12k_htt_phy_counters_tlv {
+	__le32 rx_ofdma_timing_err_cnt;
+	__le32 rx_cck_fail_cnt;
+	__le32 mactx_abort_cnt;
+	__le32 macrx_abort_cnt;
+	__le32 phytx_abort_cnt;
+	__le32 phyrx_abort_cnt;
+	__le32 phyrx_defer_abort_cnt;
+	__le32 rx_gain_adj_lstf_event_cnt;
+	__le32 rx_gain_adj_non_legacy_cnt;
+	__le32 rx_pkt_cnt[ATH12K_HTT_MAX_RX_PKT_CNT];
+	__le32 rx_pkt_crc_pass_cnt[ATH12K_HTT_MAX_RX_PKT_CRC_PASS_CNT];
+	__le32 per_blk_err_cnt[ATH12K_HTT_MAX_PER_BLK_ERR_CNT];
+	__le32 rx_ota_err_cnt[ATH12K_HTT_MAX_RX_OTA_ERR_CNT];
+} __packed;
+
+struct ath12k_htt_phy_reset_stats_tlv {
+	__le32 pdev_id;
+	__le32 chan_mhz;
+	__le32 chan_band_center_freq1;
+	__le32 chan_band_center_freq2;
+	__le32 chan_phy_mode;
+	__le32 chan_flags;
+	__le32 chan_num;
+	__le32 reset_cause;
+	__le32 prev_reset_cause;
+	__le32 phy_warm_reset_src;
+	__le32 rx_gain_tbl_mode;
+	__le32 xbar_val;
+	__le32 force_calibration;
+	__le32 phyrf_mode;
+	__le32 phy_homechan;
+	__le32 phy_tx_ch_mask;
+	__le32 phy_rx_ch_mask;
+	__le32 phybb_ini_mask;
+	__le32 phyrf_ini_mask;
+	__le32 phy_dfs_en_mask;
+	__le32 phy_sscan_en_mask;
+	__le32 phy_synth_sel_mask;
+	__le32 phy_adfs_freq;
+	__le32 cck_fir_settings;
+	__le32 phy_dyn_pri_chan;
+	__le32 cca_thresh;
+	__le32 dyn_cca_status;
+	__le32 rxdesense_thresh_hw;
+	__le32 rxdesense_thresh_sw;
+} __packed;
+
+struct ath12k_htt_phy_reset_counters_tlv {
+	__le32 pdev_id;
+	__le32 cf_active_low_fail_cnt;
+	__le32 cf_active_low_pass_cnt;
+	__le32 phy_off_through_vreg_cnt;
+	__le32 force_calibration_cnt;
+	__le32 rf_mode_switch_phy_off_cnt;
+	__le32 temperature_recal_cnt;
+} __packed;
+
+struct ath12k_htt_phy_tpc_stats_tlv {
+	__le32 pdev_id;
+	__le32 tx_power_scale;
+	__le32 tx_power_scale_db;
+	__le32 min_negative_tx_power;
+	__le32 reg_ctl_domain;
+	__le32 max_reg_allowed_power[ATH12K_HTT_STATS_MAX_CHAINS];
+	__le32 max_reg_allowed_power_6ghz[ATH12K_HTT_STATS_MAX_CHAINS];
+	__le32 twice_max_rd_power;
+	__le32 max_tx_power;
+	__le32 home_max_tx_power;
+	__le32 psd_power;
+	__le32 eirp_power;
+	__le32 power_type_6ghz;
+	__le32 sub_band_cfreq[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+	__le32 sub_band_txpower[ATH12K_HTT_MAX_CH_PWR_INFO_SIZE];
+} __packed;
+
+struct ath12k_htt_t2h_soc_txrx_stats_common_tlv {
+	__le32 inv_peers_msdu_drop_count_hi;
+	__le32 inv_peers_msdu_drop_count_lo;
+} __packed;
+
+#define ATH12K_HTT_AST_PDEV_ID_INFO		GENMASK(1, 0)
+#define ATH12K_HTT_AST_VDEV_ID_INFO		GENMASK(9, 2)
+#define ATH12K_HTT_AST_NEXT_HOP_INFO		BIT(10)
+#define ATH12K_HTT_AST_MCAST_INFO		BIT(11)
+#define ATH12K_HTT_AST_MONITOR_DIRECT_INFO	BIT(12)
+#define ATH12K_HTT_AST_MESH_STA_INFO		BIT(13)
+#define ATH12K_HTT_AST_MEC_INFO			BIT(14)
+#define ATH12K_HTT_AST_INTRA_BSS_INFO		BIT(15)
+
+struct ath12k_htt_ast_entry_tlv {
+	__le32 sw_peer_id;
+	__le32 ast_index;
+	struct htt_mac_addr mac_addr;
+	__le32 info;
+} __packed;
+
+enum ath12k_htt_stats_direction {
+	ATH12K_HTT_STATS_DIRECTION_TX,
+	ATH12K_HTT_STATS_DIRECTION_RX
+};
+
+enum ath12k_htt_stats_ppdu_type {
+	ATH12K_HTT_STATS_PPDU_TYPE_MODE_SU,
+	ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_MIMO,
+	ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_MIMO,
+	ATH12K_HTT_STATS_PPDU_TYPE_DL_MU_OFDMA,
+	ATH12K_HTT_STATS_PPDU_TYPE_UL_MU_OFDMA
+};
+
+enum ath12k_htt_stats_param_type {
+	ATH12K_HTT_STATS_PREAM_OFDM,
+	ATH12K_HTT_STATS_PREAM_CCK,
+	ATH12K_HTT_STATS_PREAM_HT,
+	ATH12K_HTT_STATS_PREAM_VHT,
+	ATH12K_HTT_STATS_PREAM_HE,
+	ATH12K_HTT_STATS_PREAM_EHT,
+	ATH12K_HTT_STATS_PREAM_RSVD1,
+	ATH12K_HTT_STATS_PREAM_COUNT,
+};
+
+#define ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT	32
+
+struct ath12k_htt_pdev_puncture_stats_tlv {
+	__le32 mac_id__word;
+	__le32 direction;
+	__le32 preamble;
+	__le32 ppdu_type;
+	__le32 subband_cnt;
+	__le32 last_used_pattern_mask;
+	__le32 num_subbands_used_cnt[ATH12K_HTT_PUNCT_STATS_MAX_SUBBAND_CNT];
+} __packed;
+
 struct ath12k_htt_dmac_reset_stats_tlv {
 	__le32 reset_count;
 	__le32 reset_time_lo_ms;
@@ -1085,6 +1772,10 @@
 	__le32 dlofdma_disabled_consec_no_mpdus_success[ATH12K_HTT_NUM_AC_WMM];
 } __packed;
 
+#define ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS		4
+#define ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS	8
+#define ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS		14
+
 enum ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE {
 	ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_26,
 	ATH12K_HTT_TX_RX_PDEV_STATS_BE_RU_SIZE_52,
@@ -1105,7 +1796,54 @@
 	ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS,
 };
 
-#define ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS	8
+enum ATH12K_HTT_RC_MODE {
+	ATH12K_HTT_RC_MODE_SU_OL,
+	ATH12K_HTT_RC_MODE_SU_BF,
+	ATH12K_HTT_RC_MODE_MU1_INTF,
+	ATH12K_HTT_RC_MODE_MU2_INTF,
+	ATH12K_HTT_RC_MODE_MU3_INTF,
+	ATH12K_HTT_RC_MODE_MU4_INTF,
+	ATH12K_HTT_RC_MODE_MU5_INTF,
+	ATH12K_HTT_RC_MODE_MU6_INTF,
+	ATH12K_HTT_RC_MODE_MU7_INTF,
+	ATH12K_HTT_RC_MODE_2D_COUNT
+};
+
+enum ath12k_htt_stats_rc_mode {
+	ATH12K_HTT_STATS_RC_MODE_DLSU     = 0,
+	ATH12K_HTT_STATS_RC_MODE_DLMUMIMO = 1,
+	ATH12K_HTT_STATS_RC_MODE_DLOFDMA  = 2,
+	ATH12K_HTT_STATS_RC_MODE_ULMUMIMO = 3,
+	ATH12K_HTT_STATS_RC_MODE_ULOFDMA  = 4,
+};
+
+enum ath12k_htt_stats_ru_type {
+	ATH12K_HTT_STATS_RU_TYPE_INVALID,
+	ATH12K_HTT_STATS_RU_TYPE_SINGLE_RU_ONLY,
+	ATH12K_HTT_STATS_RU_TYPE_SINGLE_AND_MULTI_RU,
+};
+
+struct ath12k_htt_tx_rate_stats {
+	__le32 ppdus_tried;
+	__le32 ppdus_ack_failed;
+	__le32 mpdus_tried;
+	__le32 mpdus_failed;
+} __packed;
+
+struct ath12k_htt_tx_per_rate_stats_tlv {
+	__le32 rc_mode;
+	__le32 last_probed_mcs;
+	__le32 last_probed_nss;
+	__le32 last_probed_bw;
+	struct ath12k_htt_tx_rate_stats per_bw[ATH12K_HTT_TX_PDEV_STATS_NUM_BW_CNTRS];
+	struct ath12k_htt_tx_rate_stats per_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
+	struct ath12k_htt_tx_rate_stats per_mcs[ATH12K_HTT_TXBF_RATE_STAT_NUM_MCS_CNTRS];
+	struct ath12k_htt_tx_rate_stats per_bw320;
+	__le32 probe_cnt[ATH12K_HTT_RC_MODE_2D_COUNT];
+	__le32 ru_type;
+	struct ath12k_htt_tx_rate_stats ru[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
+} __packed;
+
 #define ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS		16
 #define ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS		5
 #define ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS	4
@@ -1115,11 +1853,23 @@
 	__le32 mac_id__word;
 	__le32 be_ofdma_tx_ldpc;
 	__le32 be_ofdma_tx_mcs[ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
-	__le32 be_ofdma_tx_nss[ATH12K_HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+	__le32 be_ofdma_tx_nss[ATH12K_HTT_PDEV_STAT_NUM_SPATIAL_STREAMS];
 	__le32 be_ofdma_tx_bw[ATH12K_HTT_TX_PDEV_NUM_BE_BW_CNTRS];
 	__le32 gi[ATH12K_HTT_TX_PDEV_NUM_GI_CNTRS][ATH12K_HTT_TX_PDEV_NUM_BE_MCS_CNTRS];
 	__le32 be_ofdma_tx_ru_size[ATH12K_HTT_TX_RX_PDEV_NUM_BE_RU_SIZE_CNTRS];
 	__le32 be_ofdma_eht_sig_mcs[ATH12K_HTT_TX_PDEV_NUM_EHT_SIG_MCS_CNTRS];
 } __packed;
 
+struct ath12k_htt_pdev_mbssid_ctrl_frame_tlv {
+	__le32 mac_id__word;
+	__le32 basic_trigger_across_bss;
+	__le32 basic_trigger_within_bss;
+	__le32 bsr_trigger_across_bss;
+	__le32 bsr_trigger_within_bss;
+	__le32 mu_rts_across_bss;
+	__le32 mu_rts_within_bss;
+	__le32 ul_mumimo_trigger_across_bss;
+	__le32 ul_mumimo_trigger_within_bss;
+} __packed;
+
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp.c	2025-09-25 17:40:34.139360145 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <crypto/hash.h>
@@ -41,6 +41,11 @@
 		return;
 	}
 
+	if (!peer->primary_link) {
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+
 	ath12k_dp_rx_peer_tid_cleanup(ar, peer);
 	crypto_free_shash(peer->tfm_mmic);
 	peer->dp_setup_done = false;
@@ -51,6 +56,7 @@
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_peer *peer;
+	struct ath12k_dp_rx_tid_delete_ctx dctx;
 	u32 reo_dest;
 	int ret = 0, tid;
 
@@ -96,11 +102,13 @@
 		return -ENOENT;
 	}
 
+	memset(&dctx, 0, sizeof (dctx));
 	for (; tid >= 0; tid--)
-		ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
+		ath12k_dp_rx_peer_tid_delete_prepare(ar, peer, tid, &dctx);
 
 	spin_unlock_bh(&ab->base_lock);
 
+	ath12k_dp_rx_peer_tid_delete_finalize(ab, &dctx);
 	return ret;
 }
 
@@ -109,7 +117,7 @@
 	if (!ring->vaddr_unaligned)
 		return;
 
-	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
+	ath12k_dma_free_coherent_no_dev(ring->size, ring->vaddr_unaligned,
 			  ring->paddr_unaligned);
 
 	ring->vaddr_unaligned = NULL;
@@ -246,7 +254,7 @@
 		num_entries = max_entries;
 
 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
-	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
+	ring->vaddr_unaligned = ath12k_dma_alloc_coherent_no_dev(ring->size,
 						   &ring->paddr_unaligned,
 						   GFP_KERNEL);
 	if (!ring->vaddr_unaligned)
@@ -339,7 +347,7 @@
 	if (ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
 	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
 		bank_config |=
-			u32_encode_bits(ath12k_dp_tx_get_encrypt_type(ahvif->key_cipher),
+			u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
 					HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
 
 	bank_config |= u32_encode_bits(ahvif->tx_encap_type,
@@ -348,7 +356,9 @@
 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
 
-	/* only valid if idx_lookup_override is not set in tcl_data_cmd */
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
+		bank_config |= u32_encode_bits(1, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
+	else
 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
 
 	bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
@@ -681,7 +691,7 @@
 
 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
 		if (link_desc_banks[i].vaddr_unaligned) {
-			dma_free_coherent(ab->dev,
+			ath12k_dma_free_coherent_no_dev(
 					  link_desc_banks[i].size,
 					  link_desc_banks[i].vaddr_unaligned,
 					  link_desc_banks[i].paddr_unaligned);
@@ -705,7 +715,7 @@
 			desc_sz = last_bank_sz;
 
 		desc_bank[i].vaddr_unaligned =
-					dma_alloc_coherent(ab->dev, desc_sz,
+					ath12k_dma_alloc_coherent_no_dev(desc_sz,
 							   &desc_bank[i].paddr_unaligned,
 							   GFP_KERNEL);
 		if (!desc_bank[i].vaddr_unaligned) {
@@ -977,28 +987,24 @@
 {
 	int i;
 
+	if (!ab->mon_reap_timer.function)
+		return;
+
 	del_timer_sync(&ab->mon_reap_timer);
 
 	for (i = 0; i < ab->num_radios; i++)
 		ath12k_dp_rx_pdev_free(ab, i);
 }
 
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar)
 {
-	struct ath12k *ar;
-	struct ath12k_pdev_dp *dp;
-	int i;
+	struct ath12k_pdev_dp *dp = &ar->dp;
 
-	for (i = 0; i <  ab->num_radios; i++) {
-		ar = ab->pdevs[i].ar;
-		dp = &ar->dp;
-		dp->mac_id = i;
+	dp->mac_id = ar->pdev_idx;
 		atomic_set(&dp->num_tx_pending, 0);
 		init_waitqueue_head(&dp->tx_empty_waitq);
-
 		/* TODO: Add any RXDMA setup required per pdev */
 	}
-}
 
 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
 {
@@ -1110,7 +1116,7 @@
 		 * is not part of peer mapv3
 		 */
 		arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
-		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
+		arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
 		break;
 	case WMI_VDEV_TYPE_AP:
 	case WMI_VDEV_TYPE_IBSS:
@@ -1205,6 +1211,14 @@
 			if (!skb)
 				continue;
 
+			if (tx_desc_info->skb_ext_desc) {
+				dma_unmap_single(ab->dev,
+						 ATH12K_SKB_CB(skb)->paddr_ext_desc,
+						 tx_desc_info->skb_ext_desc->len,
+						 DMA_TO_DEVICE);
+				dev_kfree_skb_any(tx_desc_info->skb_ext_desc);
+			}
+
 			/* if we are unregistering, hw would've been destroyed and
 			 * ar is no longer valid.
 			 */
@@ -1260,15 +1274,23 @@
 	if (!ab->hw_params->reoq_lut_support)
 		return;
 
-	if (!dp->reoq_lut.vaddr)
-		return;
-
-	dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
+	if (dp->reoq_lut.vaddr) {
+		ath12k_hif_write32(ab,
+				   HAL_SEQ_WCSS_UMAC_REO_REG +
+				   HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+		ath12k_dma_free_coherent_no_dev(DP_REOQ_LUT_SIZE,
 			  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
 	dp->reoq_lut.vaddr = NULL;
+	}
 
+	if (dp->ml_reoq_lut.vaddr) {
 	ath12k_hif_write32(ab,
-			   HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
+				   HAL_SEQ_WCSS_UMAC_REO_REG +
+				   HAL_REO1_QDESC_LUT_BASE1(ab), 0);
+		ath12k_dma_free_coherent_no_dev(DP_REOQ_LUT_SIZE,
+				  dp->ml_reoq_lut.vaddr, dp->ml_reoq_lut.paddr);
+		dp->ml_reoq_lut.vaddr = NULL;
+	}
 }
 
 void ath12k_dp_free(struct ath12k_base *ab)
@@ -1276,6 +1298,9 @@
 	struct ath12k_dp *dp = &ab->dp;
 	int i;
 
+	if (!dp->ab)
+		return;
+
 	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
 
@@ -1283,16 +1308,18 @@
 	ath12k_dp_reoq_lut_cleanup(ab);
 	ath12k_dp_deinit_bank_profiles(ab);
 	ath12k_dp_srng_common_cleanup(ab);
-
-	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
+	cancel_delayed_work_sync(&dp->reo_cmd_work);
 
 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
-		kfree(dp->tx_ring[i].tx_status);
+		ath12k_kfree_cache(
+			sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE,
+			dp->tx_ring[i].tx_status);
 		dp->tx_ring[i].tx_status = NULL;
 	}
 
 	ath12k_dp_rx_free(ab);
 	/* Deinit any SOC level resource */
+	dp->ab = NULL;
 }
 
 void ath12k_dp_cc_config(struct ath12k_base *ab)
@@ -1302,6 +1329,9 @@
 	u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
 	u32 val = 0;
 
+	if (ath12k_ftm_mode)
+		return;
+
 	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
 
 	val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
@@ -1432,6 +1462,7 @@
 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
 			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j);
 			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
+			rx_descs[j].device_id = ab->device_id;
 			list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
 
 			/* Update descriptor VA in SPT */
@@ -1508,6 +1539,19 @@
 	return 0;
 }
 
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int i;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		if (ag->ab[i] == ab)
+			continue;
+
+		ath12k_dp_cmem_init(ab, &ag->ab[i]->dp, ATH12K_DP_RX_DESC);
+	}
+}
+
 static int ath12k_dp_cc_init(struct ath12k_base *ab)
 {
 	struct ath12k_dp *dp = &ab->dp;
@@ -1585,7 +1629,7 @@
 	if (!ab->hw_params->reoq_lut_support)
 		return 0;
 
-	dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
+	dp->reoq_lut.vaddr = ath12k_dma_alloc_coherent_no_dev(
 						DP_REOQ_LUT_SIZE,
 						&dp->reoq_lut.paddr,
 						GFP_KERNEL | __GFP_ZERO);
@@ -1594,8 +1638,23 @@
 		return -ENOMEM;
 	}
 
+	dp->ml_reoq_lut.vaddr = ath12k_dma_alloc_coherent_no_dev(
+						   DP_REOQ_LUT_SIZE,
+						   &dp->ml_reoq_lut.paddr,
+						   GFP_KERNEL | __GFP_ZERO);
+	if (!dp->ml_reoq_lut.vaddr) {
+		ath12k_warn(ab, "failed to allocate memory for ML reoq table");
+		ath12k_dma_free_coherent_no_dev(DP_REOQ_LUT_SIZE,
+				  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
+		dp->reoq_lut.vaddr = NULL;
+		return -ENOMEM;
+	}
+
 	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
 			   dp->reoq_lut.paddr);
+	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE1(ab),
+			   dp->ml_reoq_lut.paddr >> 8);
+
 	return 0;
 }
 
@@ -1628,11 +1687,11 @@
 
 	dp->ab = ab;
 
-	INIT_LIST_HEAD(&dp->reo_cmd_list);
-	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
+	INIT_LIST_HEAD(&dp->reo_cmd_pending);
+	INIT_LIST_HEAD(&dp->reo_cmd_sent);
 	spin_lock_init(&dp->reo_cmd_lock);
+	INIT_DELAYED_WORK(&dp->reo_cmd_work, ath12k_dp_reo_cmd_work_func);
 
-	dp->reo_cmd_cache_flush_count = 0;
 	dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab);
 
 	ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
@@ -1679,7 +1738,7 @@
 
 		dp->tx_ring[i].tx_status_head = 0;
 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
-		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
+		dp->tx_ring[i].tx_status = ath12k_kzalloc_cache(size, GFP_KERNEL);
 		if (!dp->tx_ring[i].tx_status) {
 			ret = -ENOMEM;
 			/* FIXME: The allocated tx status is not freed
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp.h	2025-09-25 17:40:34.143360165 +0200
@@ -1,12 +1,13 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_DP_H
 #define ATH12K_DP_H
 
+#include "hal_desc.h"
 #include "hal_rx.h"
 #include "hw.h"
 
@@ -23,6 +24,8 @@
 #define DP_MON_PURGE_TIMEOUT_MS     100
 #define DP_MON_SERVICE_BUDGET       128
 
+#define DP_REO_CMD_TIMEOUT	(10 * HZ)
+
 struct dp_srng {
 	u32 *vaddr_unaligned;
 	u32 *vaddr;
@@ -106,6 +109,8 @@
 	struct list_head list;
 	struct sk_buff *head;
 	struct sk_buff *tail;
+	u32 err_bitmap;
+	u8 decap_format;
 };
 
 #define DP_MON_MAX_STATUS_BUF 32
@@ -125,7 +130,6 @@
 	struct sk_buff_head rx_status_q;
 	struct dp_mon_mpdu *mon_mpdu;
 	struct list_head dp_rx_mon_mpdu_list;
-	struct sk_buff *dest_skb_q[DP_MON_MAX_STATUS_BUF];
 	struct dp_mon_tx_ppdu_info *tx_prot_ppdu_info;
 	struct dp_mon_tx_ppdu_info *tx_data_ppdu_info;
 };
@@ -157,9 +161,21 @@
 
 #define DP_IDLE_SCATTER_BUFS_MAX 16
 
+#ifdef CONFIG_ATH12K_MEM_PROFILE_DEFAULT
+#define DP_TX_COMP_RING_SIZE		32768
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
+#define DP_RXDMA_MONITOR_DST_RING_SIZE	8092
+#define ATH12K_NUM_POOL_TX_DESC		32768
+
+#elif defined(CONFIG_ATH12K_MEM_PROFILE_512M)
+#define DP_TX_COMP_RING_SIZE            8192
+#define DP_RXDMA_MONITOR_BUF_RING_SIZE  256
+#define DP_RXDMA_MONITOR_DST_RING_SIZE  512
+#define ATH12K_NUM_POOL_TX_DESC	8192
+#endif
+
 #define DP_WBM_RELEASE_RING_SIZE	64
 #define DP_TCL_DATA_RING_SIZE		512
-#define DP_TX_COMP_RING_SIZE		32768
 #define DP_TX_IDR_SIZE			DP_TX_COMP_RING_SIZE
 #define DP_TCL_CMD_RING_SIZE		32
 #define DP_TCL_STATUS_RING_SIZE		32
@@ -175,8 +191,6 @@
 #define DP_RXDMA_REFILL_RING_SIZE	2048
 #define DP_RXDMA_ERR_DST_RING_SIZE	1024
 #define DP_RXDMA_MON_STATUS_RING_SIZE	1024
-#define DP_RXDMA_MONITOR_BUF_RING_SIZE	4096
-#define DP_RXDMA_MONITOR_DST_RING_SIZE	2048
 #define DP_RXDMA_MONITOR_DESC_RING_SIZE	4096
 #define DP_TX_MONITOR_BUF_RING_SIZE	4096
 #define DP_TX_MONITOR_DEST_RING_SIZE	2048
@@ -202,11 +216,6 @@
 #define ATH12K_SHADOW_DP_TIMER_INTERVAL 20
 #define ATH12K_SHADOW_CTRL_TIMER_INTERVAL 10
 
-#define ATH12K_NUM_POOL_TX_DESC	32768
-
-/* TODO: revisit this count during testing */
-#define ATH12K_RX_DESC_COUNT	(12288)
-
 #define ATH12K_PAGE_SIZE	PAGE_SIZE
 
 /* Total 1024 entries in PPT, i.e 4K/4 considering 4K aligned
@@ -217,6 +226,9 @@
 /* Total 512 entries in a SPT, i.e 4K Page/8 */
 #define ATH12K_MAX_SPT_ENTRIES	512
 
+#define ATH12K_RX_DESC_COUNT	ALIGN((DP_REO_REINJECT_RING_SIZE + \
+				       DP_RXDMA_BUF_RING_SIZE) * 3 / 2, \
+				      ATH12K_MAX_SPT_ENTRIES)
 #define ATH12K_NUM_RX_SPT_PAGES	((ATH12K_RX_DESC_COUNT) / ATH12K_MAX_SPT_ENTRIES)
 
 #define ATH12K_TX_SPT_PAGES_PER_POOL (ATH12K_NUM_POOL_TX_DESC / \
@@ -287,12 +299,14 @@
 	u32 cookie;
 	u32 magic;
 	u8 in_use	: 1,
-	   reserved	: 7;
+	   device_id	: 3,
+	   reserved	: 4;
 };
 
 struct ath12k_tx_desc_info {
 	struct list_head list;
 	struct sk_buff *skb;
+	struct sk_buff *skb_ext_desc;
 	u32 desc_id; /* Cookie */
 	u8 mac_id;
 	u8 pool_id;
@@ -313,6 +327,16 @@
 	u32 *vaddr;
 };
 
+struct ath12k_link_stats {
+	u32 tx_enqueued;
+	u32 tx_completed;
+	u32 tx_bcast_mcast;
+	u32 tx_dropped;
+	u32 tx_encap_type[HAL_TCL_ENCAP_TYPE_MAX];
+	u32 tx_encrypt_type[HAL_ENCRYPT_TYPE_MAX];
+	u32 tx_desc_type[HAL_TCL_DESC_TYPE_MAX];
+};
+
 struct ath12k_dp {
 	struct ath12k_base *ab;
 	u8 num_bank_profiles;
@@ -336,17 +360,13 @@
 	struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX];
 	struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX];
 	struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX];
-	struct list_head reo_cmd_list;
-	struct list_head reo_cmd_cache_flush_list;
-	u32 reo_cmd_cache_flush_count;
-
-	/* protects access to below fields,
-	 * - reo_cmd_list
-	 * - reo_cmd_cache_flush_list
-	 * - reo_cmd_cache_flush_count
-	 */
+
+	struct list_head reo_cmd_pending;
+	struct list_head reo_cmd_sent;
 	spinlock_t reo_cmd_lock;
-	struct ath12k_hp_update_timer reo_cmd_timer;
+	bool reo_cmd_full;
+	struct delayed_work reo_cmd_work;
+
 	struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX];
 	struct ath12k_spt_info *spt_info;
 	u32 num_spt_pages;
@@ -368,20 +388,33 @@
 	struct dp_rxdma_mon_ring rxdma_mon_buf_ring;
 	struct dp_rxdma_mon_ring tx_mon_buf_ring;
 	struct ath12k_reo_q_addr_lut reoq_lut;
+	struct ath12k_reo_q_addr_lut ml_reoq_lut;
 };
 
 /* HTT definitions */
+#define HTT_TAG_TCL_METADATA_VERSION		5
 
-#define HTT_TCL_META_DATA_TYPE			BIT(0)
-#define HTT_TCL_META_DATA_VALID_HTT		BIT(1)
+#define HTT_TCL_META_DATA_TYPE			GENMASK(1, 0)
+
+#define HTT_TCL_META_DATA_VALID_HTT_FTM         BIT(1)
+#define HTT_TCL_META_DATA_VALID_HTT_MISSION     BIT(2)
+#define HTT_TCL_META_DATA_VALID_HTT	\
+			(ath12k_ftm_mode ? HTT_TCL_META_DATA_VALID_HTT_FTM : \
+					  HTT_TCL_META_DATA_VALID_HTT_MISSION)
 
 /* vdev meta data */
-#define HTT_TCL_META_DATA_VDEV_ID		GENMASK(9, 2)
-#define HTT_TCL_META_DATA_PDEV_ID		GENMASK(11, 10)
-#define HTT_TCL_META_DATA_HOST_INSPECTED	BIT(12)
+#define HTT_TCL_META_DATA_VDEV_ID		 GENMASK(10, 3)
+#define HTT_TCL_META_DATA_PDEV_ID		 GENMASK(12, 11)
+#define HTT_TCL_META_DATA_HOST_INSPECTED_MISSION BIT(13)
 
 /* peer meta data */
-#define HTT_TCL_META_DATA_PEER_ID		GENMASK(15, 2)
+#define HTT_TCL_META_DATA_PEER_ID		GENMASK(15, 3)
+
+/* Global sequence number */
+#define HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM		3
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_HOST_INSPECTED	BIT(2)
+#define HTT_TCL_META_DATA_GLOBAL_SEQ_NUM		GENMASK(14, 3)
+#define HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID	128
 
 /* HTT tx completion is overlaid in wbm_release_ring */
 #define HTT_TX_WBM_COMP_INFO0_STATUS		GENMASK(16, 13)
@@ -412,9 +445,15 @@
 };
 
 #define HTT_VER_REQ_INFO_MSG_ID		GENMASK(7, 0)
+#define HTT_OPTION_TCL_METADATA_VER_V2	2
+#define HTT_OPTION_TAG			GENMASK(7, 0)
+#define HTT_OPTION_LEN			GENMASK(15, 8)
+#define HTT_OPTION_VALUE		GENMASK(31, 16)
+#define HTT_TCL_METADATA_VER_SZ		4
 
 struct htt_ver_req_cmd {
 	__le32 ver_reg_info;
+	__le32 tcl_metadata_version;
 } __packed;
 
 enum htt_srng_ring_type {
@@ -432,8 +471,11 @@
 	HTT_HOST1_TO_FW_RXBUF_RING,
 	HTT_HOST2_TO_FW_RXBUF_RING,
 	HTT_RXDMA_NON_MONITOR_DEST_RING,
+	HTT_RXDMA_HOST_BUF_RING2,
 	HTT_TX_MON_HOST2MON_BUF_RING,
 	HTT_TX_MON_MON2HOST_DEST_RING,
+	HTT_RX_MON_HOST2MON_BUF_RING,
+	HTT_RX_MON_MON2HOST_DEST_RING,
 };
 
 /* host -> target  HTT_SRING_SETUP message
@@ -694,9 +736,9 @@
  *
  *    The message would appear as follows:
  *
- *    |31       26|25|24|23            16|15             8|7             0|
- *    |-----------------+----------------+----------------+---------------|
- *    |   rsvd1   |PS|SS|     ring_id    |     pdev_id    |    msg_type   |
+ *    |31   29|28|27|26|25|24|23       16|15             8|7             0|
+ *    |-------+--+--+--+--+--+-----------+----------------+---------------|
+ *    | rsvd1 |ED|DT|OV|PS|SS|  ring_id  |     pdev_id    |    msg_type   |
  *    |-------------------------------------------------------------------|
  *    |              rsvd2               |           ring_buffer_size     |
  *    |-------------------------------------------------------------------|
@@ -723,7 +765,13 @@
  *                    More details can be got from enum htt_srng_ring_id
  *          b'24    - status_swap: 1 is to swap status TLV
  *          b'25    - pkt_swap:  1 is to swap packet TLV
- *          b'26:31 - rsvd1:  reserved for future use
+ *          b'26    - rx_offset_valid (OV): flag to indicate rx offsets
+ *		      configuration fields are valid
+ *          b'27    - drop_thresh_valid (DT): flag to indicate if the
+ *		      rx_drop_threshold field is valid
+ *          b'28    - rx_mon_global_en: Enable/Disable global register
+ *		      configuration in Rx monitor module.
+ *          b'29:31 - rsvd1:  reserved for future use
  * dword1 - b'0:16  - ring_buffer_size: size of buffers referenced by rx ring,
  *                    in byte units.
  *                    Valid only for HW_TO_SW_RING and SW_TO_HW_RING
@@ -759,8 +807,22 @@
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID	GENMASK(23, 16)
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS		BIT(24)
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS		BIT(25)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID	BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL	BIT(27)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON		BIT(28)
+
 #define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE	GENMASK(15, 0)
-#define HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID      BIT(26)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT	GENMASK(18, 16)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL	GENMASK(21, 19)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA	GENMASK(24, 22)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD	GENMASK(9, 0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE	BIT(17)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE	BIT(18)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE	BIT(19)
+
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET	BIT(0)
+#define HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET	GENMASK(14, 1)
 
 #define HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET      GENMASK(15, 0)
 #define HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET      GENMASK(31, 16)
@@ -789,6 +851,7 @@
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS	= BIT(10),
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT	= BIT(11),
 	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE	= BIT(12),
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO	= BIT(13),
 };
 
 enum htt_rx_mgmt_pkt_filter_tlv_flags0 {
@@ -1077,6 +1140,21 @@
 		HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
 		HTT_RX_FILTER_TLV_FLAGS_ATTENTION)
 
+#define HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING \
+	(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
+	HTT_RX_FILTER_TLV_FLAGS_MSDU_START | \
+	HTT_RX_FILTER_TLV_FLAGS_RX_PACKET | \
+	HTT_RX_FILTER_TLV_FLAGS_MSDU_END | \
+	HTT_RX_FILTER_TLV_FLAGS_MPDU_END | \
+	HTT_RX_FILTER_TLV_FLAGS_PACKET_HEADER | \
+	HTT_RX_FILTER_TLV_FLAGS_PER_MSDU_HEADER | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_START | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_END | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_USER_STATS_EXT | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE | \
+	HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO)
+
 /* msdu start. mpdu end, attention, rx hdr tlv's are not subscribed */
 #define HTT_RX_TLV_FLAGS_RXDMA_RING \
 		(HTT_RX_FILTER_TLV_FLAGS_MPDU_START | \
@@ -1105,6 +1183,10 @@
 	__le32 info3;
 } __packed;
 
+#define HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE	32
+#define HTT_RX_RING_DEFAULT_DMA_LENGTH		0x7
+#define HTT_RX_RING_PKT_TLV_OFFSET		0x1
+
 struct htt_rx_ring_tlv_filter {
 	u32 rx_filter; /* see htt_rx_filter_tlv_flags */
 	u32 pkt_filter_flags0; /* MGMT */
@@ -1122,6 +1204,17 @@
 	u16 rx_mpdu_start_wmask;
 	u16 rx_mpdu_end_wmask;
 	u32 rx_msdu_end_wmask;
+	u32 conf_len_ctrl;
+	u32 conf_len_mgmt;
+	u32 conf_len_data;
+	u16 rx_drop_threshold;
+	bool enable_log_mgmt_type;
+	bool enable_log_ctrl_type;
+	bool enable_log_data_type;
+	bool enable_rx_tlv_offset;
+	u16 rx_tlv_offset;
+	bool drop_threshold_valid;
+	bool rxmon_disable;
 };
 
 #define HTT_STATS_FRAME_CTRL_TYPE_MGMT  0x0
@@ -1239,6 +1332,8 @@
 #define HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16	GENMASK(15, 0)
 #define HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID	GENMASK(31, 16)
 #define HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL	GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID	GENMASK(15, 0)
+#define HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL	GENMASK(31, 16)
 #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_M	BIT(16)
 #define HTT_T2H_PEER_MAP_INFO2_NEXT_HOP_S	16
 
@@ -1440,6 +1535,8 @@
 #define HTT_PPDU_STATS_USER_RATE_FLAGS_DCM_M		BIT(28)
 #define HTT_PPDU_STATS_USER_RATE_FLAGS_LDPC_M		BIT(29)
 
+#define HTT_USR_RATE_PPDU_TYPE(_val) \
+		le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_INFO1_PPDU_TYPE_M)
 #define HTT_USR_RATE_PREAMBLE(_val) \
 		le32_get_bits(_val, HTT_PPDU_STATS_USER_RATE_FLAGS_PREAMBLE_M)
 #define HTT_USR_RATE_BW(_val) \
@@ -1790,6 +1887,18 @@
 	ATH12K_STATS_TIMER_DUR_2SEC = 3,
 };
 
+#define ATH12K_HTT_MAC_ADDR_L32_0	GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_L32_1	GENMASK(15, 8)
+#define ATH12K_HTT_MAC_ADDR_L32_2	GENMASK(23, 16)
+#define ATH12K_HTT_MAC_ADDR_L32_3	GENMASK(31, 24)
+#define ATH12K_HTT_MAC_ADDR_H16_0	GENMASK(7, 0)
+#define ATH12K_HTT_MAC_ADDR_H16_1	GENMASK(15, 8)
+
+struct htt_mac_addr {
+	__le32 mac_addr_l32;
+	__le32 mac_addr_h16;
+} __packed;
+
 static inline void ath12k_dp_get_mac_addr(u32 addr_l32, u16 addr_h16, u8 *addr)
 {
 	memcpy(addr, &addr_l32, 4);
@@ -1804,8 +1913,9 @@
 void ath12k_dp_free(struct ath12k_base *ab);
 int ath12k_dp_alloc(struct ath12k_base *ab);
 void ath12k_dp_cc_config(struct ath12k_base *ab);
+void ath12k_dp_partner_cc_init(struct ath12k_base *ab);
 int ath12k_dp_pdev_alloc(struct ath12k_base *ab);
-void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab);
+void ath12k_dp_pdev_pre_alloc(struct ath12k *ar);
 void ath12k_dp_pdev_free(struct ath12k_base *ab);
 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
 				int mac_id, enum hal_ring_type ring_type);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_mon.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_mon.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_mon.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_mon.c	2025-09-25 17:40:34.143360165 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "dp_mon.h"
@@ -10,11 +10,18 @@
 #include "dp_tx.h"
 #include "peer.h"
 
-static void ath12k_dp_mon_rx_handle_ofdma_info(void *rx_tlv,
+#define ATH12K_LE32_DEC_ENC(value, dec_bits, enc_bits)	\
+		u32_encode_bits(le32_get_bits(value, dec_bits), enc_bits)
+
+#define ATH12K_LE64_DEC_ENC(value, dec_bits, enc_bits) \
+		u32_encode_bits(le64_get_bits(value, dec_bits), enc_bits)
+
+#define ATH12K_INVALID_RSSI (0x80)
+
+static void
+ath12k_dp_mon_rx_handle_ofdma_info(const struct hal_rx_ppdu_end_user_stats *ppdu_end_user,
 					       struct hal_rx_user_status *rx_user_status)
 {
-	struct hal_rx_ppdu_end_user_stats *ppdu_end_user = rx_tlv;
-
 	rx_user_status->ul_ofdma_user_v0_word0 =
 		__le32_to_cpu(ppdu_end_user->usr_resp_ref);
 	rx_user_status->ul_ofdma_user_v0_word1 =
@@ -35,7 +42,7 @@
 }
 
 static void
-ath12k_dp_mon_rx_populate_mu_user_info(void *rx_tlv,
+ath12k_dp_mon_rx_populate_mu_user_info(const struct hal_rx_ppdu_end_user_stats *rx_tlv,
 				       struct hal_rx_mon_ppdu_info *ppdu_info,
 				       struct hal_rx_user_status *rx_user_status)
 {
@@ -73,12 +80,10 @@
 	ath12k_dp_mon_rx_populate_byte_count(rx_tlv, ppdu_info, rx_user_status);
 }
 
-static void ath12k_dp_mon_parse_vht_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_vht_sig_a(const struct hal_rx_vht_sig_a_info *vht_sig,
 					  struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_vht_sig_a_info *vht_sig =
-			(struct hal_rx_vht_sig_a_info *)tlv_data;
-	u32 nsts, group_id, info0, info1;
+	u32 nsts, info0, info1;
 	u8 gi_setting;
 
 	info0 = __le32_to_cpu(vht_sig->info0);
@@ -102,16 +107,12 @@
 	if (ppdu_info->is_stbc && nsts > 0)
 		nsts = ((nsts + 1) >> 1) - 1;
 
-	ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK);
+	ppdu_info->nss = u32_get_bits(nsts, VHT_SIG_SU_NSS_MASK) + 1;
 	ppdu_info->bw = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_BW);
 	ppdu_info->beamformed = u32_get_bits(info1,
 					     HAL_RX_VHT_SIG_A_INFO_INFO1_BEAMFORMED);
-	group_id = u32_get_bits(info0, HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
-	if (group_id == 0 || group_id == 63)
-		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
-	else
-		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
-	ppdu_info->vht_flag_values5 = group_id;
+	ppdu_info->vht_flag_values5 = u32_get_bits(info0,
+						   HAL_RX_VHT_SIG_A_INFO_INFO0_GROUP_ID);
 	ppdu_info->vht_flag_values3[0] = (((ppdu_info->mcs) << 4) |
 					    ppdu_info->nss);
 	ppdu_info->vht_flag_values2 = ppdu_info->bw;
@@ -119,11 +120,9 @@
 		u32_get_bits(info1, HAL_RX_VHT_SIG_A_INFO_INFO1_SU_MU_CODING);
 }
 
-static void ath12k_dp_mon_parse_ht_sig(u8 *tlv_data,
+static void ath12k_dp_mon_parse_ht_sig(const struct hal_rx_ht_sig_info *ht_sig,
 				       struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_ht_sig_info *ht_sig =
-			(struct hal_rx_ht_sig_info *)tlv_data;
 	u32 info0 = __le32_to_cpu(ht_sig->info0);
 	u32 info1 = __le32_to_cpu(ht_sig->info1);
 
@@ -132,15 +131,12 @@
 	ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_STBC);
 	ppdu_info->ldpc = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_FEC_CODING);
 	ppdu_info->gi = u32_get_bits(info1, HAL_RX_HT_SIG_INFO_INFO1_GI);
-	ppdu_info->nss = (ppdu_info->mcs >> 3);
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+	ppdu_info->nss = (ppdu_info->mcs >> 3) + 1;
 }
 
-static void ath12k_dp_mon_parse_l_sig_b(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_b(const struct hal_rx_lsig_b_info *lsigb,
 					struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_lsig_b_info *lsigb =
-			(struct hal_rx_lsig_b_info *)tlv_data;
 	u32 info0 = __le32_to_cpu(lsigb->info0);
 	u8 rate;
 
@@ -167,14 +163,11 @@
 
 	ppdu_info->rate = rate;
 	ppdu_info->cck_flag = 1;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
 }
 
-static void ath12k_dp_mon_parse_l_sig_a(u8 *tlv_data,
+static void ath12k_dp_mon_parse_l_sig_a(const struct hal_rx_lsig_a_info *lsiga,
 					struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_lsig_a_info *lsiga =
-			(struct hal_rx_lsig_a_info *)tlv_data;
 	u32 info0 = __le32_to_cpu(lsiga->info0);
 	u8 rate;
 
@@ -209,17 +202,15 @@
 	}
 
 	ppdu_info->rate = rate;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
 }
 
-static void ath12k_dp_mon_parse_he_sig_b2_ofdma(u8 *tlv_data,
+static void
+ath12k_dp_mon_parse_he_sig_b2_ofdma(const struct hal_rx_he_sig_b2_ofdma_info *ofdma,
 						struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_he_sig_b2_ofdma_info *he_sig_b2_ofdma =
-			(struct hal_rx_he_sig_b2_ofdma_info *)tlv_data;
 	u32 info0, value;
 
-	info0 = __le32_to_cpu(he_sig_b2_ofdma->info0);
+	info0 = __le32_to_cpu(ofdma->info0);
 
 	ppdu_info->he_data1 |= HE_MCS_KNOWN | HE_DCM_KNOWN | HE_CODING_KNOWN;
 
@@ -244,17 +235,16 @@
 	value = value << HE_STA_ID_SHIFT;
 	ppdu_info->he_data4 |= value;
 
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS);
+	ppdu_info->nss = u32_get_bits(info0,
+				      HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS) + 1;
 	ppdu_info->beamformed = u32_get_bits(info0,
 					     HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF);
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
 }
 
-static void ath12k_dp_mon_parse_he_sig_b2_mu(u8 *tlv_data,
+static void
+ath12k_dp_mon_parse_he_sig_b2_mu(const struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu,
 					     struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_he_sig_b2_mu_info *he_sig_b2_mu =
-			(struct hal_rx_he_sig_b2_mu_info *)tlv_data;
 	u32 info0, value;
 
 	info0 = __le32_to_cpu(he_sig_b2_mu->info0);
@@ -274,14 +264,13 @@
 	value = value << HE_STA_ID_SHIFT;
 	ppdu_info->he_data4 |= value;
 
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS);
+	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_B2_MU_INFO_INFO0_STA_NSTS) + 1;
 }
 
-static void ath12k_dp_mon_parse_he_sig_b1_mu(u8 *tlv_data,
+static void
+ath12k_dp_mon_parse_he_sig_b1_mu(const struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu,
 					     struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_he_sig_b1_mu_info *he_sig_b1_mu =
-			(struct hal_rx_he_sig_b1_mu_info *)tlv_data;
 	u32 info0 = __le32_to_cpu(he_sig_b1_mu->info0);
 	u16 ru_tones;
 
@@ -289,14 +278,12 @@
 				HAL_RX_HE_SIG_B1_MU_INFO_INFO0_RU_ALLOCATION);
 	ppdu_info->ru_alloc = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
 	ppdu_info->he_RU[0] = ru_tones;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
 }
 
-static void ath12k_dp_mon_parse_he_sig_mu(u8 *tlv_data,
+static void
+ath12k_dp_mon_parse_he_sig_mu(const struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl,
 					  struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_he_sig_a_mu_dl_info *he_sig_a_mu_dl =
-			(struct hal_rx_he_sig_a_mu_dl_info *)tlv_data;
 	u32 info0, info1, value;
 	u16 he_gi = 0, he_ltf = 0;
 
@@ -424,14 +411,11 @@
 
 	ppdu_info->is_stbc = info1 &
 			     HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC;
-	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
 }
 
-static void ath12k_dp_mon_parse_he_sig_su(u8 *tlv_data,
+static void ath12k_dp_mon_parse_he_sig_su(const struct hal_rx_he_sig_a_su_info *he_sig_a,
 					  struct hal_rx_mon_ppdu_info *ppdu_info)
 {
-	struct hal_rx_he_sig_a_su_info *he_sig_a =
-			(struct hal_rx_he_sig_a_su_info *)tlv_data;
 	u32 info0, info1, value;
 	u32 dcm;
 	u8 he_dcm = 0, he_stbc = 0;
@@ -572,23 +556,893 @@
 	ppdu_info->is_stbc = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_STBC);
 	ppdu_info->beamformed = u32_get_bits(info1, HAL_RX_HE_SIG_A_SU_INFO_INFO1_TXBF);
 	dcm = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_DCM);
-	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS);
+	ppdu_info->nss = u32_get_bits(info0, HAL_RX_HE_SIG_A_SU_INFO_INFO0_NSTS) + 1;
 	ppdu_info->dcm = dcm;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_cmn(const struct hal_mon_usig_cmn *cmn,
+				     struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u32 common;
+
+	ppdu_info->u_sig_info.bw = le32_get_bits(cmn->info0,
+						 HAL_RX_USIG_CMN_INFO0_BW);
+	ppdu_info->u_sig_info.ul_dl = le32_get_bits(cmn->info0,
+						    HAL_RX_USIG_CMN_INFO0_UL_DL);
+
+	common = __le32_to_cpu(ppdu_info->u_sig_info.usig.common);
+	common |= IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER_KNOWN |
+		  IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_KNOWN |
+		  IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL_KNOWN |
+		  IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR_KNOWN |
+		  IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP_KNOWN |
+		  ATH12K_LE32_DEC_ENC(cmn->info0,
+				      HAL_RX_USIG_CMN_INFO0_PHY_VERSION,
+				      IEEE80211_RADIOTAP_EHT_USIG_COMMON_PHY_VER) |
+		  u32_encode_bits(ppdu_info->u_sig_info.bw,
+				  IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW) |
+		  u32_encode_bits(ppdu_info->u_sig_info.ul_dl,
+				  IEEE80211_RADIOTAP_EHT_USIG_COMMON_UL_DL) |
+		  ATH12K_LE32_DEC_ENC(cmn->info0,
+				      HAL_RX_USIG_CMN_INFO0_BSS_COLOR,
+				      IEEE80211_RADIOTAP_EHT_USIG_COMMON_BSS_COLOR) |
+		  ATH12K_LE32_DEC_ENC(cmn->info0,
+				      HAL_RX_USIG_CMN_INFO0_TXOP,
+				      IEEE80211_RADIOTAP_EHT_USIG_COMMON_TXOP);
+	ppdu_info->u_sig_info.usig.common = cpu_to_le32(common);
+
+	switch (ppdu_info->u_sig_info.bw) {
+	default:
+		fallthrough;
+	case HAL_EHT_BW_20:
+		ppdu_info->bw = HAL_RX_BW_20MHZ;
+		break;
+	case HAL_EHT_BW_40:
+		ppdu_info->bw = HAL_RX_BW_40MHZ;
+		break;
+	case HAL_EHT_BW_80:
+		ppdu_info->bw = HAL_RX_BW_80MHZ;
+		break;
+	case HAL_EHT_BW_160:
+		ppdu_info->bw = HAL_RX_BW_160MHZ;
+		break;
+	case HAL_EHT_BW_320_1:
+	case HAL_EHT_BW_320_2:
+		ppdu_info->bw = HAL_RX_BW_320MHZ;
+		break;
+	}
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_tb(const struct hal_mon_usig_tb *usig_tb,
+				    struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+	enum ieee80211_radiotap_eht_usig_tb spatial_reuse1, spatial_reuse2;
+	u32 common, value, mask;
+
+	spatial_reuse1 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B3_B6_SPATIAL_REUSE_1;
+	spatial_reuse2 = IEEE80211_RADIOTAP_EHT_USIG2_TB_B7_B10_SPATIAL_REUSE_2;
+
+	common = __le32_to_cpu(usig->common);
+	value = __le32_to_cpu(usig->value);
+	mask = __le32_to_cpu(usig->mask);
+
+	ppdu_info->u_sig_info.ppdu_type_comp_mode =
+				le32_get_bits(usig_tb->info0,
+					      HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE);
+
+	common |= ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				      HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS,
+				      IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+	value |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+		 u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+				 IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE) |
+		 IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+		 ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1,
+				     spatial_reuse1) |
+		 ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2,
+				     spatial_reuse2) |
+		 IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+		 ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_CRC,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC) |
+		 ATH12K_LE32_DEC_ENC(usig_tb->info0,
+				     HAL_RX_USIG_TB_INFO0_TAIL,
+				     IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL);
+
+	mask |= IEEE80211_RADIOTAP_EHT_USIG1_TB_B20_B25_DISREGARD |
+		IEEE80211_RADIOTAP_EHT_USIG2_TB_B0_B1_PPDU_TYPE |
+		IEEE80211_RADIOTAP_EHT_USIG2_TB_B2_VALIDATE |
+		spatial_reuse1 | spatial_reuse2 |
+		IEEE80211_RADIOTAP_EHT_USIG2_TB_B11_B15_DISREGARD |
+		IEEE80211_RADIOTAP_EHT_USIG2_TB_B16_B19_CRC |
+		IEEE80211_RADIOTAP_EHT_USIG2_TB_B20_B25_TAIL;
+
+	usig->common = cpu_to_le32(common);
+	usig->value = cpu_to_le32(value);
+	usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_mu(const struct hal_mon_usig_mu *usig_mu,
+				    struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct ieee80211_radiotap_eht_usig *usig = &ppdu_info->u_sig_info.usig;
+	enum ieee80211_radiotap_eht_usig_mu sig_symb, punc;
+	u32 common, value, mask;
+
+	sig_symb = IEEE80211_RADIOTAP_EHT_USIG2_MU_B11_B15_EHT_SIG_SYMBOLS;
+	punc = IEEE80211_RADIOTAP_EHT_USIG2_MU_B3_B7_PUNCTURED_INFO;
+
+	common = __le32_to_cpu(usig->common);
+	value = __le32_to_cpu(usig->value);
+	mask = __le32_to_cpu(usig->mask);
+
+	ppdu_info->u_sig_info.ppdu_type_comp_mode =
+				le32_get_bits(usig_mu->info0,
+					      HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+	ppdu_info->u_sig_info.eht_sig_mcs =
+				le32_get_bits(usig_mu->info0,
+					      HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS);
+	ppdu_info->u_sig_info.num_eht_sig_sym =
+				le32_get_bits(usig_mu->info0,
+					      HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM);
+
+	common |= ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				      HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS,
+				      IEEE80211_RADIOTAP_EHT_USIG_COMMON_BAD_USIG_CRC);
+
+	value |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+		 IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+		 u32_encode_bits(ppdu_info->u_sig_info.ppdu_type_comp_mode,
+				 IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE) |
+		 IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+		 ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO,
+				     punc) |
+		 IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+		 u32_encode_bits(ppdu_info->u_sig_info.eht_sig_mcs,
+				 IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS) |
+		 u32_encode_bits(ppdu_info->u_sig_info.num_eht_sig_sym,
+				 sig_symb) |
+		 ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_CRC,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC) |
+		 ATH12K_LE32_DEC_ENC(usig_mu->info0,
+				     HAL_RX_USIG_MU_INFO0_TAIL,
+				     IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL);
+
+	mask |= IEEE80211_RADIOTAP_EHT_USIG1_MU_B20_B24_DISREGARD |
+		IEEE80211_RADIOTAP_EHT_USIG1_MU_B25_VALIDATE |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B0_B1_PPDU_TYPE |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B2_VALIDATE |
+		punc |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B8_VALIDATE |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B9_B10_SIG_MCS |
+		sig_symb |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B16_B19_CRC |
+		IEEE80211_RADIOTAP_EHT_USIG2_MU_B20_B25_TAIL;
+
+	usig->common = cpu_to_le32(common);
+	usig->value = cpu_to_le32(value);
+	usig->mask = cpu_to_le32(mask);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_u_sig_hdr(const struct hal_mon_usig_hdr *usig,
+				     struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	u8 comp_mode;
+
+	ppdu_info->eht_usig = true;
+
+	ath12k_dp_mon_hal_rx_parse_u_sig_cmn(&usig->cmn, ppdu_info);
+
+	comp_mode = le32_get_bits(usig->non_cmn.mu.info0,
+				  HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE);
+
+	if (comp_mode == 0 && ppdu_info->u_sig_info.ul_dl)
+		ath12k_dp_mon_hal_rx_parse_u_sig_tb(&usig->non_cmn.tb, ppdu_info);
+	else
+		ath12k_dp_mon_hal_rx_parse_u_sig_mu(&usig->non_cmn.mu, ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_aggr_tlv(struct hal_rx_mon_ppdu_info *ppdu_info,
+			   u16 tlv_len, const void *tlv_data)
+{
+	if (tlv_len <= HAL_RX_MON_MAX_AGGR_SIZE - ppdu_info->tlv_aggr.cur_len) {
+		memcpy(ppdu_info->tlv_aggr.buf + ppdu_info->tlv_aggr.cur_len,
+		       tlv_data, tlv_len);
+		ppdu_info->tlv_aggr.cur_len += tlv_len;
+	}
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_frame_type_ndp(const struct hal_rx_u_sig_info *usig_info)
+{
+	if (usig_info->ppdu_type_comp_mode == 1 &&
+	    usig_info->eht_sig_mcs == 0 &&
+	    usig_info->num_eht_sig_sym == 0)
+		return true;
+
+	return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_non_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+	u32 ppdu_type_comp_mode = usig_info->ppdu_type_comp_mode;
+	u32 ul_dl = usig_info->ul_dl;
+
+	if ((ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO && ul_dl == 0) ||
+	    (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_OFDMA && ul_dl == 0) ||
+	    (ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_MU_MIMO  && ul_dl == 1))
+		return true;
+
+	return false;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_ofdma(const struct hal_rx_u_sig_info *usig_info)
+{
+	if (usig_info->ppdu_type_comp_mode == 0 && usig_info->ul_dl == 0)
+		return true;
+
+	return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(const struct hal_eht_sig_ndp_cmn_eb *eht_sig_ndp,
+				       struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+	u32 known, data;
+
+	known = __le32_to_cpu(eht->known);
+	known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_NSS_S |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_BEAMFORMED_S |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_S |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_CRC1 |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_TAIL1;
+	eht->known = cpu_to_le32(known);
+
+	data = __le32_to_cpu(eht->data[0]);
+	data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE,
+				    IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+	/* GI and LTF size are separately indicated in radiotap header
+	 * and hence will be parsed from other TLV
+	 */
+	data |=	ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM,
+				    IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+	data |=	ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC,
+				    IEEE80211_RADIOTAP_EHT_DATA0_CRC1_O);
+
+	data |= ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD,
+				    IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_S);
+	eht->data[0] = cpu_to_le32(data);
+
+	data = __le32_to_cpu(eht->data[7]);
+	data |=	ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS,
+				    IEEE80211_RADIOTAP_EHT_DATA7_NSS_S);
+
+	data |=	ATH12K_LE32_DEC_ENC(eht_sig_ndp->info0,
+				    HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED,
+				    IEEE80211_RADIOTAP_EHT_DATA7_BEAMFORMED_S);
+	eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_usig_overflow(const struct hal_eht_sig_usig_overflow *ovflow,
+					 struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+	u32 known, data;
+
+	known = __le32_to_cpu(eht->known);
+	known |= IEEE80211_RADIOTAP_EHT_KNOWN_SPATIAL_REUSE |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_EHT_LTF |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_LDPC_EXTRA_SYM_OM |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_PRE_PADD_FACOR_OM |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_PE_DISAMBIGUITY_OM |
+		 IEEE80211_RADIOTAP_EHT_KNOWN_DISREGARD_O;
+	eht->known = cpu_to_le32(known);
+
+	data = __le32_to_cpu(eht->data[0]);
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE,
+				    IEEE80211_RADIOTAP_EHT_DATA0_SPATIAL_REUSE);
+
+	/* GI and LTF size are separately indicated in radiotap header
+	 * and hence will be parsed from other TLV
+	 */
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM,
+				    IEEE80211_RADIOTAP_EHT_DATA0_EHT_LTF);
+
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM,
+				    IEEE80211_RADIOTAP_EHT_DATA0_LDPC_EXTRA_SYM_OM);
+
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR,
+				    IEEE80211_RADIOTAP_EHT_DATA0_PRE_PADD_FACOR_OM);
+
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY,
+				    IEEE80211_RADIOTAP_EHT_DATA0_PE_DISAMBIGUITY_OM);
+
+	data |=	ATH12K_LE32_DEC_ENC(ovflow->info0,
+				    HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD,
+				    IEEE80211_RADIOTAP_EHT_DATA0_DISREGARD_O);
+	eht->data[0] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_non_ofdma_users(const struct hal_eht_sig_non_ofdma_cmn_eb *eb,
+					   struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+	u32 known, data;
+
+	known = __le32_to_cpu(eht->known);
+	known |= IEEE80211_RADIOTAP_EHT_KNOWN_NR_NON_OFDMA_USERS_M;
+	eht->known = cpu_to_le32(known);
+
+	data = __le32_to_cpu(eht->data[7]);
+	data |=	ATH12K_LE32_DEC_ENC(eb->info0,
+				    HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS,
+				    IEEE80211_RADIOTAP_EHT_DATA7_NUM_OF_NON_OFDMA_USERS);
+	eht->data[7] = cpu_to_le32(data);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(const struct hal_eht_sig_mu_mimo *user,
+					   struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+	u32 user_idx;
+
+	if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+		return;
+
+	user_idx = eht_info->num_user_info++;
+
+	eht_info->user_info[user_idx] |=
+		IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_KNOWN_M |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_SPATIAL_CONFIG_M);
+
+	ppdu_info->mcs = le32_get_bits(user->info0,
+				       HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(const struct hal_eht_sig_non_mu_mimo *user,
+					       struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_eht_info *eht_info = &ppdu_info->eht_info;
+	u32 user_idx;
+
+	if (eht_info->num_user_info >= ARRAY_SIZE(eht_info->user_info))
+		return;
+
+	user_idx = eht_info->num_user_info++;
+
+	eht_info->user_info[user_idx] |=
+		IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O |
+		IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_KNOWN_O |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_STA_ID) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_CODING) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O) |
+		ATH12K_LE32_DEC_ENC(user->info0,
+				    HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED,
+				    IEEE80211_RADIOTAP_EHT_USER_INFO_BEAMFORMING_O);
+
+	ppdu_info->mcs = le32_get_bits(user->info0,
+				       HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS);
+
+	ppdu_info->nss = le32_get_bits(user->info0,
+				       HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS) + 1;
+}
+
+static inline bool
+ath12k_dp_mon_hal_rx_is_mu_mimo_user(const struct hal_rx_u_sig_info *usig_info)
+{
+	if (usig_info->ppdu_type_comp_mode == HAL_RX_RECEPTION_TYPE_SU &&
+	    usig_info->ul_dl == 1)
+		return true;
+
+	return false;
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(const void *tlv,
+					     struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	const struct hal_eht_sig_non_ofdma_cmn_eb *eb = tlv;
+
+	ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+	ath12k_dp_mon_hal_rx_parse_non_ofdma_users(eb, ppdu_info);
+
+	if (ath12k_dp_mon_hal_rx_is_mu_mimo_user(&ppdu_info->u_sig_info))
+		ath12k_dp_mon_hal_rx_parse_eht_mumimo_user(&eb->user_field.mu_mimo,
+							   ppdu_info);
+	else
+		ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&eb->user_field.n_mu_mimo,
+							       ppdu_info);
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_ru_allocation(const struct hal_eht_sig_ofdma_cmn_eb *eb,
+					 struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	const struct hal_eht_sig_ofdma_cmn_eb1 *ofdma_cmn_eb1 = &eb->eb1;
+	const struct hal_eht_sig_ofdma_cmn_eb2 *ofdma_cmn_eb2 = &eb->eb2;
+	struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+	enum ieee80211_radiotap_eht_data ru_123, ru_124, ru_125, ru_126;
+	enum ieee80211_radiotap_eht_data ru_121, ru_122, ru_112, ru_111;
+	u32 data;
+
+	ru_123 = IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3;
+	ru_124 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4;
+	ru_125 = IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5;
+	ru_126 = IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6;
+	ru_121 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1;
+	ru_122 = IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2;
+	ru_112 = IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2;
+	ru_111 = IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1;
+
+	switch (ppdu_info->u_sig_info.bw) {
+	case HAL_EHT_BW_320_2:
+	case HAL_EHT_BW_320_1:
+		data = __le32_to_cpu(eht->data[4]);
+		/* CC1 2::3 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA4_RU_ALLOC_CC_1_2_3_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3,
+					    ru_123);
+		eht->data[4] = cpu_to_le32(data);
+
+		data = __le32_to_cpu(eht->data[5]);
+		/* CC1 2::4 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_4_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4,
+					    ru_124);
+
+		/* CC1 2::5 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA5_RU_ALLOC_CC_1_2_5_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5,
+					    ru_125);
+		eht->data[5] = cpu_to_le32(data);
+
+		data = __le32_to_cpu(eht->data[6]);
+		/* CC1 2::6 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA6_RU_ALLOC_CC_1_2_6_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6,
+					    ru_126);
+		eht->data[6] = cpu_to_le32(data);
+
+		fallthrough;
+	case HAL_EHT_BW_160:
+		data = __le32_to_cpu(eht->data[3]);
+		/* CC1 2::1 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_1_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1,
+					    ru_121);
+		/* CC1 2::2 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA3_RU_ALLOC_CC_1_2_2_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb2->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2,
+					    ru_122);
+		eht->data[3] = cpu_to_le32(data);
+
+		fallthrough;
+	case HAL_EHT_BW_80:
+		data = __le32_to_cpu(eht->data[2]);
+		/* CC1 1::2 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA2_RU_ALLOC_CC_1_1_2_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2,
+					    ru_112);
+		eht->data[2] = cpu_to_le32(data);
+
+		fallthrough;
+	case HAL_EHT_BW_40:
+		fallthrough;
+	case HAL_EHT_BW_20:
+		data = __le32_to_cpu(eht->data[1]);
+		/* CC1 1::1 */
+		data |=	IEEE80211_RADIOTAP_EHT_DATA1_RU_ALLOC_CC_1_1_1_KNOWN |
+			ATH12K_LE64_DEC_ENC(ofdma_cmn_eb1->info0,
+					    HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1,
+					    ru_111);
+		eht->data[1] = cpu_to_le32(data);
+		break;
+	default:
+		break;
+	}
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(const void *tlv,
+					 struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	const struct hal_eht_sig_ofdma_cmn_eb *ofdma = tlv;
+
+	ath12k_dp_mon_hal_rx_parse_usig_overflow(tlv, ppdu_info);
+	ath12k_dp_mon_hal_rx_parse_ru_allocation(ofdma, ppdu_info);
+
+	ath12k_dp_mon_hal_rx_parse_eht_non_mumimo_user(&ofdma->user_field.n_mu_mimo,
+						       ppdu_info);
+}
+
+static void
+ath12k_dp_mon_parse_eht_sig_hdr(struct hal_rx_mon_ppdu_info *ppdu_info,
+				const void *tlv_data)
+{
+	ppdu_info->is_eht = true;
+
+	if (ath12k_dp_mon_hal_rx_is_frame_type_ndp(&ppdu_info->u_sig_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_ndp(tlv_data, ppdu_info);
+	else if (ath12k_dp_mon_hal_rx_is_non_ofdma(&ppdu_info->u_sig_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_non_ofdma(tlv_data, ppdu_info);
+	else if (ath12k_dp_mon_hal_rx_is_ofdma(&ppdu_info->u_sig_info))
+		ath12k_dp_mon_hal_rx_parse_eht_sig_ofdma(tlv_data, ppdu_info);
+}
+
+static inline enum ath12k_eht_ru_size
+hal_rx_mon_hal_ru_size_to_ath12k_ru_size(u32 hal_ru_size)
+{
+	switch (hal_ru_size) {
+	case HAL_EHT_RU_26:
+		return ATH12K_EHT_RU_26;
+	case HAL_EHT_RU_52:
+		return ATH12K_EHT_RU_52;
+	case HAL_EHT_RU_78:
+		return ATH12K_EHT_RU_52_26;
+	case HAL_EHT_RU_106:
+		return ATH12K_EHT_RU_106;
+	case HAL_EHT_RU_132:
+		return ATH12K_EHT_RU_106_26;
+	case HAL_EHT_RU_242:
+		return ATH12K_EHT_RU_242;
+	case HAL_EHT_RU_484:
+		return ATH12K_EHT_RU_484;
+	case HAL_EHT_RU_726:
+		return ATH12K_EHT_RU_484_242;
+	case HAL_EHT_RU_996:
+		return ATH12K_EHT_RU_996;
+	case HAL_EHT_RU_996x2:
+		return ATH12K_EHT_RU_996x2;
+	case HAL_EHT_RU_996x3:
+		return ATH12K_EHT_RU_996x3;
+	case HAL_EHT_RU_996x4:
+		return ATH12K_EHT_RU_996x4;
+	case HAL_EHT_RU_NONE:
+		return ATH12K_EHT_RU_INVALID;
+	case HAL_EHT_RU_996_484:
+		return ATH12K_EHT_RU_996_484;
+	case HAL_EHT_RU_996x2_484:
+		return ATH12K_EHT_RU_996x2_484;
+	case HAL_EHT_RU_996x3_484:
+		return ATH12K_EHT_RU_996x3_484;
+	case HAL_EHT_RU_996_484_242:
+		return ATH12K_EHT_RU_996_484_242;
+	default:
+		return ATH12K_EHT_RU_INVALID;
+	}
+}
+
+static inline u32
+hal_rx_ul_ofdma_ru_size_to_width(enum ath12k_eht_ru_size ru_size)
+{
+	switch (ru_size) {
+	case ATH12K_EHT_RU_26:
+		return RU_26;
+	case ATH12K_EHT_RU_52:
+		return RU_52;
+	case ATH12K_EHT_RU_52_26:
+		return RU_52_26;
+	case ATH12K_EHT_RU_106:
+		return RU_106;
+	case ATH12K_EHT_RU_106_26:
+		return RU_106_26;
+	case ATH12K_EHT_RU_242:
+		return RU_242;
+	case ATH12K_EHT_RU_484:
+		return RU_484;
+	case ATH12K_EHT_RU_484_242:
+		return RU_484_242;
+	case ATH12K_EHT_RU_996:
+		return RU_996;
+	case ATH12K_EHT_RU_996_484:
+		return RU_996_484;
+	case ATH12K_EHT_RU_996_484_242:
+		return RU_996_484_242;
+	case ATH12K_EHT_RU_996x2:
+		return RU_2X996;
+	case ATH12K_EHT_RU_996x2_484:
+		return RU_2X996_484;
+	case ATH12K_EHT_RU_996x3:
+		return RU_3X996;
+	case ATH12K_EHT_RU_996x3_484:
+		return RU_3X996_484;
+	case ATH12K_EHT_RU_996x4:
+		return RU_4X996;
+	default:
+		return RU_INVALID;
+	}
+}
+
+static void
+ath12k_dp_mon_hal_rx_parse_user_info(const struct hal_receive_user_info *rx_usr_info,
+				     u16 user_id,
+				     struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	struct hal_rx_user_status *mon_rx_user_status = NULL;
+	struct hal_rx_radiotap_eht *eht = &ppdu_info->eht_info.eht;
+	enum ath12k_eht_ru_size rtap_ru_size = ATH12K_EHT_RU_INVALID;
+	u32 ru_width, reception_type, ru_index = HAL_EHT_RU_INVALID;
+	u32 ru_type_80_0, ru_start_index_80_0;
+	u32 ru_type_80_1, ru_start_index_80_1;
+	u32 ru_type_80_2, ru_start_index_80_2;
+	u32 ru_type_80_3, ru_start_index_80_3;
+	u32 ru_size = 0, num_80mhz_with_ru = 0;
+	u64 ru_index_320mhz = 0;
+	u32 ru_index_per80mhz;
+
+	reception_type = le32_get_bits(rx_usr_info->info0,
+				       HAL_RX_USR_INFO0_RECEPTION_TYPE);
+
+	switch (reception_type) {
+	case HAL_RECEPTION_TYPE_SU:
 	ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_SU;
+		break;
+	case HAL_RECEPTION_TYPE_DL_MU_MIMO:
+	case HAL_RECEPTION_TYPE_UL_MU_MIMO:
+		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_MIMO;
+		break;
+	case HAL_RECEPTION_TYPE_DL_MU_OFMA:
+	case HAL_RECEPTION_TYPE_UL_MU_OFDMA:
+		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA;
+		break;
+	case HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO:
+	case HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO:
+		ppdu_info->reception_type = HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO;
+	}
+
+	ppdu_info->is_stbc = le32_get_bits(rx_usr_info->info0, HAL_RX_USR_INFO0_STBC);
+	ppdu_info->ldpc = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_LDPC);
+	ppdu_info->dcm = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_STA_DCM);
+	ppdu_info->bw = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_RX_BW);
+	ppdu_info->mcs = le32_get_bits(rx_usr_info->info1, HAL_RX_USR_INFO1_MCS);
+	ppdu_info->nss = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_NSS) + 1;
+
+	if (user_id < HAL_MAX_UL_MU_USERS) {
+		mon_rx_user_status = &ppdu_info->userstats[user_id];
+		mon_rx_user_status->mcs = ppdu_info->mcs;
+		mon_rx_user_status->nss = ppdu_info->nss;
+	}
+
+	if (!(ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_MIMO ||
+	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA ||
+	      ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_MU_OFDMA_MIMO))
+		return;
+
+	/* RU allocation present only for OFDMA reception */
+	ru_type_80_0 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_0);
+	ru_start_index_80_0 = le32_get_bits(rx_usr_info->info3,
+					    HAL_RX_USR_INFO3_RU_START_IDX_80_0);
+	if (ru_type_80_0 != HAL_EHT_RU_NONE) {
+		ru_size += ru_type_80_0;
+		ru_index_per80mhz = ru_start_index_80_0;
+		ru_index = ru_index_per80mhz;
+		ru_index_320mhz |= HAL_RU_PER80(ru_type_80_0, 0, ru_index_per80mhz);
+		num_80mhz_with_ru++;
+	}
+
+	ru_type_80_1 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_1);
+	ru_start_index_80_1 = le32_get_bits(rx_usr_info->info3,
+					    HAL_RX_USR_INFO3_RU_START_IDX_80_1);
+	if (ru_type_80_1 != HAL_EHT_RU_NONE) {
+		ru_size += ru_type_80_1;
+		ru_index_per80mhz = ru_start_index_80_1;
+		ru_index = ru_index_per80mhz;
+		ru_index_320mhz |= HAL_RU_PER80(ru_type_80_1, 1, ru_index_per80mhz);
+		num_80mhz_with_ru++;
+	}
+
+	ru_type_80_2 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_2);
+	ru_start_index_80_2 = le32_get_bits(rx_usr_info->info3,
+					    HAL_RX_USR_INFO3_RU_START_IDX_80_2);
+	if (ru_type_80_2 != HAL_EHT_RU_NONE) {
+		ru_size += ru_type_80_2;
+		ru_index_per80mhz = ru_start_index_80_2;
+		ru_index = ru_index_per80mhz;
+		ru_index_320mhz |= HAL_RU_PER80(ru_type_80_2, 2, ru_index_per80mhz);
+		num_80mhz_with_ru++;
+	}
+
+	ru_type_80_3 = le32_get_bits(rx_usr_info->info2, HAL_RX_USR_INFO2_RU_TYPE_80_3);
+	ru_start_index_80_3 = le32_get_bits(rx_usr_info->info2,
+					    HAL_RX_USR_INFO3_RU_START_IDX_80_3);
+	if (ru_type_80_3 != HAL_EHT_RU_NONE) {
+		ru_size += ru_type_80_3;
+		ru_index_per80mhz = ru_start_index_80_3;
+		ru_index = ru_index_per80mhz;
+		ru_index_320mhz |= HAL_RU_PER80(ru_type_80_3, 3, ru_index_per80mhz);
+		num_80mhz_with_ru++;
+	}
+
+	if (num_80mhz_with_ru > 1) {
+		/* Calculate the MRU index */
+		switch (ru_index_320mhz) {
+		case HAL_EHT_RU_996_484_0:
+		case HAL_EHT_RU_996x2_484_0:
+		case HAL_EHT_RU_996x3_484_0:
+			ru_index = 0;
+			break;
+		case HAL_EHT_RU_996_484_1:
+		case HAL_EHT_RU_996x2_484_1:
+		case HAL_EHT_RU_996x3_484_1:
+			ru_index = 1;
+			break;
+		case HAL_EHT_RU_996_484_2:
+		case HAL_EHT_RU_996x2_484_2:
+		case HAL_EHT_RU_996x3_484_2:
+			ru_index = 2;
+			break;
+		case HAL_EHT_RU_996_484_3:
+		case HAL_EHT_RU_996x2_484_3:
+		case HAL_EHT_RU_996x3_484_3:
+			ru_index = 3;
+			break;
+		case HAL_EHT_RU_996_484_4:
+		case HAL_EHT_RU_996x2_484_4:
+		case HAL_EHT_RU_996x3_484_4:
+			ru_index = 4;
+			break;
+		case HAL_EHT_RU_996_484_5:
+		case HAL_EHT_RU_996x2_484_5:
+		case HAL_EHT_RU_996x3_484_5:
+			ru_index = 5;
+			break;
+		case HAL_EHT_RU_996_484_6:
+		case HAL_EHT_RU_996x2_484_6:
+		case HAL_EHT_RU_996x3_484_6:
+			ru_index = 6;
+			break;
+		case HAL_EHT_RU_996_484_7:
+		case HAL_EHT_RU_996x2_484_7:
+		case HAL_EHT_RU_996x3_484_7:
+			ru_index = 7;
+			break;
+		case HAL_EHT_RU_996x2_484_8:
+			ru_index = 8;
+			break;
+		case HAL_EHT_RU_996x2_484_9:
+			ru_index = 9;
+			break;
+		case HAL_EHT_RU_996x2_484_10:
+			ru_index = 10;
+			break;
+		case HAL_EHT_RU_996x2_484_11:
+			ru_index = 11;
+			break;
+		default:
+			ru_index = HAL_EHT_RU_INVALID;
+			break;
+		}
+
+		ru_size += 4;
+	}
+
+	rtap_ru_size = hal_rx_mon_hal_ru_size_to_ath12k_ru_size(ru_size);
+	if (rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+		u32 known, data;
+
+		known = __le32_to_cpu(eht->known);
+		known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_SIZE_OM;
+		eht->known = cpu_to_le32(known);
+
+		data = __le32_to_cpu(eht->data[1]);
+		data |=	u32_encode_bits(rtap_ru_size,
+					IEEE80211_RADIOTAP_EHT_DATA1_RU_SIZE);
+		eht->data[1] = cpu_to_le32(data);
+	}
+
+	if (ru_index != HAL_EHT_RU_INVALID) {
+		u32 known, data;
+
+		known = __le32_to_cpu(eht->known);
+		known |= IEEE80211_RADIOTAP_EHT_KNOWN_RU_MRU_INDEX_OM;
+		eht->known = cpu_to_le32(known);
+
+		data = __le32_to_cpu(eht->data[1]);
+		data |=	u32_encode_bits(rtap_ru_size,
+					IEEE80211_RADIOTAP_EHT_DATA1_RU_INDEX);
+		eht->data[1] = cpu_to_le32(data);
+	}
+
+	if (mon_rx_user_status && ru_index != HAL_EHT_RU_INVALID &&
+	    rtap_ru_size != ATH12K_EHT_RU_INVALID) {
+		mon_rx_user_status->ul_ofdma_ru_start_index = ru_index;
+		mon_rx_user_status->ul_ofdma_ru_size = rtap_ru_size;
+
+		ru_width = hal_rx_ul_ofdma_ru_size_to_width(rtap_ru_size);
+
+		mon_rx_user_status->ul_ofdma_ru_width = ru_width;
+		mon_rx_user_status->ofdma_info_valid = 1;
+	}
 }
 
 static enum hal_rx_mon_status
-ath12k_dp_mon_rx_parse_status_tlv(struct ath12k_base *ab,
+ath12k_dp_mon_rx_parse_status_tlv(struct ath12k *ar,
 				  struct ath12k_mon_data *pmon,
-				  u32 tlv_tag, u8 *tlv_data, u32 userid)
+				  const struct hal_tlv_64_hdr *tlv)
 {
 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
-	u32 info[7];
+	const void *tlv_data = tlv->value;
+	u32 info[7], userid;
+	u16 tlv_tag, tlv_len;
+
+	tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
+	tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
+	userid = le64_get_bits(tlv->tl, HAL_TLV_64_USR_ID);
+
+	if (ppdu_info->tlv_aggr.in_progress && ppdu_info->tlv_aggr.tlv_tag != tlv_tag) {
+		ath12k_dp_mon_parse_eht_sig_hdr(ppdu_info, ppdu_info->tlv_aggr.buf);
+
+		ppdu_info->tlv_aggr.in_progress = false;
+		ppdu_info->tlv_aggr.cur_len = 0;
+	}
 
 	switch (tlv_tag) {
 	case HAL_RX_PPDU_START: {
-		struct hal_rx_ppdu_start *ppdu_start =
-			(struct hal_rx_ppdu_start *)tlv_data;
+		const struct hal_rx_ppdu_start *ppdu_start = tlv_data;
 
 		u64 ppdu_ts = ath12k_le32hilo_to_u64(ppdu_start->ppdu_start_ts_63_32,
 						     ppdu_start->ppdu_start_ts_31_0);
@@ -615,8 +1469,8 @@
 		break;
 	}
 	case HAL_RX_PPDU_END_USER_STATS: {
-		struct hal_rx_ppdu_end_user_stats *eu_stats =
-			(struct hal_rx_ppdu_end_user_stats *)tlv_data;
+		const struct hal_rx_ppdu_end_user_stats *eu_stats = tlv_data;
+		u32 tid_bitmap;
 
 		info[0] = __le32_to_cpu(eu_stats->info0);
 		info[1] = __le32_to_cpu(eu_stats->info1);
@@ -629,10 +1483,9 @@
 			u32_get_bits(info[2], HAL_RX_PPDU_END_USER_STATS_INFO2_AST_INDEX);
 		ppdu_info->fc_valid =
 			u32_get_bits(info[1], HAL_RX_PPDU_END_USER_STATS_INFO1_FC_VALID);
-		ppdu_info->tid =
-			ffs(u32_get_bits(info[6],
-					 HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP)
-					 - 1);
+		tid_bitmap = u32_get_bits(info[6],
+					  HAL_RX_PPDU_END_USER_STATS_INFO6_TID_BITMAP);
+		ppdu_info->tid = ffs(tid_bitmap) - 1;
 		ppdu_info->tcp_msdu_count =
 			u32_get_bits(info[4],
 				     HAL_RX_PPDU_END_USER_STATS_INFO4_TCP_MSDU_CNT);
@@ -654,6 +1507,9 @@
 		ppdu_info->num_mpdu_fcs_err =
 			u32_get_bits(info[0],
 				     HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR);
+		ppdu_info->peer_id =
+			u32_get_bits(info[0], HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID);
+
 		switch (ppdu_info->preamble_type) {
 		case HAL_RX_PREAMBLE_11N:
 			ppdu_info->ht_flags = 1;
@@ -664,6 +1520,9 @@
 		case HAL_RX_PREAMBLE_11AX:
 			ppdu_info->he_flags = 1;
 			break;
+		case HAL_RX_PREAMBLE_11BE:
+			ppdu_info->is_eht = true;
+			break;
 		default:
 			break;
 		}
@@ -671,10 +1530,15 @@
 		if (userid < HAL_MAX_UL_MU_USERS) {
 			struct hal_rx_user_status *rxuser_stats =
 				&ppdu_info->userstats[userid];
+
+			if (ppdu_info->num_mpdu_fcs_ok > 1 ||
+			    ppdu_info->num_mpdu_fcs_err > 1)
+				ppdu_info->userstats[userid].ampdu_present = true;
+
 			ppdu_info->num_users += 1;
 
-			ath12k_dp_mon_rx_handle_ofdma_info(tlv_data, rxuser_stats);
-			ath12k_dp_mon_rx_populate_mu_user_info(tlv_data, ppdu_info,
+			ath12k_dp_mon_rx_handle_ofdma_info(eu_stats, rxuser_stats);
+			ath12k_dp_mon_rx_populate_mu_user_info(eu_stats, ppdu_info,
 							       rxuser_stats);
 		}
 		ppdu_info->mpdu_fcs_ok_bitmap[0] = __le32_to_cpu(eu_stats->rsvd1[0]);
@@ -682,8 +1546,8 @@
 		break;
 	}
 	case HAL_RX_PPDU_END_USER_STATS_EXT: {
-		struct hal_rx_ppdu_end_user_stats_ext *eu_stats =
-			(struct hal_rx_ppdu_end_user_stats_ext *)tlv_data;
+		const struct hal_rx_ppdu_end_user_stats_ext *eu_stats = tlv_data;
+
 		ppdu_info->mpdu_fcs_ok_bitmap[2] = __le32_to_cpu(eu_stats->info1);
 		ppdu_info->mpdu_fcs_ok_bitmap[3] = __le32_to_cpu(eu_stats->info2);
 		ppdu_info->mpdu_fcs_ok_bitmap[4] = __le32_to_cpu(eu_stats->info3);
@@ -729,8 +1593,7 @@
 		break;
 
 	case HAL_PHYRX_RSSI_LEGACY: {
-		struct hal_rx_phyrx_rssi_legacy_info *rssi =
-			(struct hal_rx_phyrx_rssi_legacy_info *)tlv_data;
+		const struct hal_rx_phyrx_rssi_legacy_info *rssi = tlv_data;
 
 		info[0] = __le32_to_cpu(rssi->info0);
 		info[1] = __le32_to_cpu(rssi->info1);
@@ -747,9 +1610,19 @@
 					     HAL_RX_PHYRX_RSSI_LEGACY_INFO_INFO0_RX_BW);
 		break;
 	}
+	case HAL_PHYRX_OTHER_RECEIVE_INFO: {
+		const struct hal_phyrx_common_user_info *cmn_usr_info = tlv_data;
+
+		ppdu_info->gi = le32_get_bits(cmn_usr_info->info0,
+					      HAL_RX_PHY_CMN_USER_INFO0_GI);
+		break;
+	}
+	case HAL_RX_PPDU_START_USER_INFO:
+		ath12k_dp_mon_hal_rx_parse_user_info(tlv_data, userid, ppdu_info);
+		break;
+
 	case HAL_RXPCU_PPDU_END_INFO: {
-		struct hal_rx_ppdu_end_duration *ppdu_rx_duration =
-			(struct hal_rx_ppdu_end_duration *)tlv_data;
+		const struct hal_rx_ppdu_end_duration *ppdu_rx_duration = tlv_data;
 
 		info[0] = __le32_to_cpu(ppdu_rx_duration->info0);
 		ppdu_info->rx_duration =
@@ -760,9 +1633,7 @@
 		break;
 	}
 	case HAL_RX_MPDU_START: {
-		struct hal_rx_mpdu_start *mpdu_start =
-			(struct hal_rx_mpdu_start *)tlv_data;
-		struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+		const struct hal_rx_mpdu_start *mpdu_start = tlv_data;
 		u16 peer_id;
 
 		info[1] = __le32_to_cpu(mpdu_start->info1);
@@ -775,70 +1646,38 @@
 		if (userid < HAL_MAX_UL_MU_USERS) {
 			info[0] = __le32_to_cpu(mpdu_start->info0);
 			ppdu_info->userid = userid;
-			ppdu_info->ampdu_id[userid] =
-				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO1_PEERID);
+			ppdu_info->userstats[userid].ampdu_id =
+				u32_get_bits(info[0], HAL_RX_MPDU_START_INFO0_PPDU_ID);
 		}
 
-		mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
-		if (!mon_mpdu)
-			return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
-
-		break;
+		return HAL_RX_MON_STATUS_MPDU_START;
 	}
 	case HAL_RX_MSDU_START:
 		/* TODO: add msdu start parsing logic */
 		break;
-	case HAL_MON_BUF_ADDR: {
-		struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
-		struct dp_mon_packet_info *packet_info =
-			(struct dp_mon_packet_info *)tlv_data;
-		int buf_id = u32_get_bits(packet_info->cookie,
-					  DP_RXDMA_BUF_COOKIE_BUF_ID);
-		struct sk_buff *msdu;
-		struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
-		struct ath12k_skb_rxcb *rxcb;
-
-		spin_lock_bh(&buf_ring->idr_lock);
-		msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
-		spin_unlock_bh(&buf_ring->idr_lock);
-
-		if (unlikely(!msdu)) {
-			ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
-				    buf_id);
-			return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
+	case HAL_MON_BUF_ADDR:
+		return HAL_RX_MON_STATUS_BUF_ADDR;
+	case HAL_RX_MSDU_END:
+		return HAL_RX_MON_STATUS_MSDU_END;
+	case HAL_RX_MPDU_END:
+		return HAL_RX_MON_STATUS_MPDU_END;
+	case HAL_PHYRX_GENERIC_U_SIG:
+		ath12k_dp_mon_hal_rx_parse_u_sig_hdr(tlv_data, ppdu_info);
+		break;
+	case HAL_PHYRX_GENERIC_EHT_SIG:
+		/* Handle the case where aggregation is in progress
+		 * or the current TLV is one of the TLVs which should be
+		 * aggregated
+		 */
+		if (!ppdu_info->tlv_aggr.in_progress) {
+			ppdu_info->tlv_aggr.in_progress = true;
+			ppdu_info->tlv_aggr.tlv_tag = tlv_tag;
+			ppdu_info->tlv_aggr.cur_len = 0;
 		}
 
-		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
-				 msdu->len + skb_tailroom(msdu),
-				 DMA_FROM_DEVICE);
-
-		if (mon_mpdu->tail)
-			mon_mpdu->tail->next = msdu;
-		else
-			mon_mpdu->tail = msdu;
-
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+		ppdu_info->is_eht = true;
 
-		break;
-	}
-	case HAL_RX_MSDU_END: {
-		struct rx_msdu_end_qcn9274 *msdu_end =
-			(struct rx_msdu_end_qcn9274 *)tlv_data;
-		bool is_first_msdu_in_mpdu;
-		u16 msdu_end_info;
-
-		msdu_end_info = __le16_to_cpu(msdu_end->info5);
-		is_first_msdu_in_mpdu = u32_get_bits(msdu_end_info,
-						     RX_MSDU_END_INFO5_FIRST_MSDU);
-		if (is_first_msdu_in_mpdu) {
-			pmon->mon_mpdu->head = pmon->mon_mpdu->tail;
-			pmon->mon_mpdu->tail = NULL;
-		}
-		break;
-	}
-	case HAL_RX_MPDU_END:
-		list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+		ath12k_dp_mon_hal_aggr_tlv(ppdu_info, tlv_len, tlv_data);
 		break;
 	case HAL_DUMMY:
 		return HAL_RX_MON_STATUS_BUF_DONE;
@@ -852,55 +1691,168 @@
 	return HAL_RX_MON_STATUS_PPDU_NOT_DONE;
 }
 
-static void ath12k_dp_mon_rx_msdus_set_payload(struct ath12k *ar,
-					       struct sk_buff *head_msdu,
-					       struct sk_buff *tail_msdu)
-{
-	u32 rx_pkt_offset, l2_hdr_offset;
-
-	rx_pkt_offset = ar->ab->hal.hal_desc_sz;
-	l2_hdr_offset =
-		ath12k_dp_rx_h_l3pad(ar->ab, (struct hal_rx_desc *)tail_msdu->data);
-	skb_pull(head_msdu, rx_pkt_offset + l2_hdr_offset);
+static void
+ath12k_dp_mon_fill_rx_stats_info(struct ath12k *ar,
+				 struct hal_rx_mon_ppdu_info *ppdu_info,
+				 struct ieee80211_rx_status *rx_status)
+{
+	u32 center_freq = ppdu_info->freq;
+
+	rx_status->freq = center_freq;
+	rx_status->bw = ath12k_mac_bw_to_mac80211_bw(ppdu_info->bw);
+	rx_status->nss = ppdu_info->nss;
+	rx_status->rate_idx = 0;
+	rx_status->encoding = RX_ENC_LEGACY;
+	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
+
+	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
+		rx_status->band = NL80211_BAND_6GHZ;
+	} else if (center_freq >= ATH12K_MIN_2GHZ_FREQ &&
+		   center_freq <= ATH12K_MAX_2GHZ_FREQ) {
+		rx_status->band = NL80211_BAND_2GHZ;
+	} else if (center_freq >= ATH12K_MIN_5GHZ_FREQ &&
+		   center_freq <= ATH12K_MAX_5GHZ_FREQ) {
+		rx_status->band = NL80211_BAND_5GHZ;
+	} else {
+		rx_status->band = NUM_NL80211_BANDS;
+	}
+}
+
+static void
+ath12k_dp_mon_fill_rx_rate(struct ath12k *ar,
+			   struct hal_rx_mon_ppdu_info *ppdu_info,
+			   struct ieee80211_rx_status *rx_status)
+{
+	struct ieee80211_supported_band *sband;
+	enum rx_msdu_start_pkt_type pkt_type;
+	u8 rate_mcs, nss, sgi;
+	bool is_cck;
+
+	pkt_type = ppdu_info->preamble_type;
+	rate_mcs = ppdu_info->rate;
+	nss = ppdu_info->nss;
+	sgi = ppdu_info->gi;
+
+	switch (pkt_type) {
+	case RX_MSDU_START_PKT_TYPE_11A:
+	case RX_MSDU_START_PKT_TYPE_11B:
+		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
+		if (rx_status->band < NUM_NL80211_BANDS) {
+			sband = &ar->mac.sbands[rx_status->band];
+			rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
+									is_cck);
+		}
+		break;
+	case RX_MSDU_START_PKT_TYPE_11N:
+		rx_status->encoding = RX_ENC_HT;
+		if (rate_mcs > ATH12K_HT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in HT mode %d\n",
+				     rate_mcs);
+			break;
+		}
+		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
+		if (sgi)
+			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		break;
+	case RX_MSDU_START_PKT_TYPE_11AC:
+		rx_status->encoding = RX_ENC_VHT;
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in VHT mode %d\n",
+				     rate_mcs);
+			break;
+		}
+		if (sgi)
+			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
+		break;
+	case RX_MSDU_START_PKT_TYPE_11AX:
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_HE_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in HE mode %d\n",
+				    rate_mcs);
+			break;
+		}
+		rx_status->encoding = RX_ENC_HE;
+		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+		break;
+	case RX_MSDU_START_PKT_TYPE_11BE:
+		rx_status->rate_idx = rate_mcs;
+		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in EHT mode %d\n",
+				    rate_mcs);
+			break;
+		}
+		rx_status->encoding = RX_ENC_EHT;
+		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
+		break;
+	default:
+		ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+			   "monitor receives invalid preamble type %d",
+			    pkt_type);
+		break;
+	}
 }
 
 static struct sk_buff *
-ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar, u32 mac_id,
-			    struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
-			    struct ieee80211_rx_status *rxs, bool *fcs_err)
+ath12k_dp_mon_rx_merg_msdus(struct ath12k *ar,
+			    struct dp_mon_mpdu *mon_mpdu,
+			    struct hal_rx_mon_ppdu_info *ppdu_info,
+			    struct ieee80211_rx_status *rxs)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct sk_buff *msdu, *mpdu_buf, *prev_buf, *head_frag_list;
-	struct hal_rx_desc *rx_desc, *tail_rx_desc;
-	u8 *hdr_desc, *dest, decap_format;
+	struct sk_buff *head_msdu;
+	struct hal_rx_desc *rx_desc;
+	u8 *hdr_desc, *dest, decap_format = mon_mpdu->decap_format;
 	struct ieee80211_hdr_3addr *wh;
-	u32 err_bitmap, frag_list_sum_len = 0;
+	struct ieee80211_channel *channel;
+	u32 frag_list_sum_len = 0;
+	u8 channel_num = ppdu_info->chan_num;
 
 	mpdu_buf = NULL;
+	head_msdu = mon_mpdu->head;
 
 	if (!head_msdu)
 		goto err_merge_fail;
 
-	rx_desc = (struct hal_rx_desc *)head_msdu->data;
-	tail_rx_desc = (struct hal_rx_desc *)tail_msdu->data;
+	ath12k_dp_mon_fill_rx_stats_info(ar, ppdu_info, rxs);
 
-	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, tail_rx_desc);
-	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
-		*fcs_err = true;
+	if (unlikely(rxs->band == NUM_NL80211_BANDS ||
+		     !ath12k_ar_to_hw(ar)->wiphy->bands[rxs->band])) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
+			   "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
+			   rxs->band, channel_num, ppdu_info->freq, ar->pdev_idx);
 
-	decap_format = ath12k_dp_rx_h_decap_type(ab, tail_rx_desc);
+		spin_lock_bh(&ar->data_lock);
+		channel = ar->rx_channel;
+		if (channel) {
+			rxs->band = channel->band;
+			channel_num =
+				ieee80211_frequency_to_channel(channel->center_freq);
+		}
+		spin_unlock_bh(&ar->data_lock);
+	}
+
+	if (rxs->band < NUM_NL80211_BANDS)
+		rxs->freq = ieee80211_channel_to_frequency(channel_num,
+							   rxs->band);
 
-	ath12k_dp_rx_h_ppdu(ar, tail_rx_desc, rxs);
+	ath12k_dp_mon_fill_rx_rate(ar, ppdu_info, rxs);
 
 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
-		ath12k_dp_mon_rx_msdus_set_payload(ar, head_msdu, tail_msdu);
+		skb_pull(head_msdu, ATH12K_MON_RX_PKT_OFFSET);
 
 		prev_buf = head_msdu;
 		msdu = head_msdu->next;
 		head_frag_list = NULL;
 
 		while (msdu) {
-			ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+			skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET);
 
 			if (!head_frag_list)
 				head_frag_list = msdu;
@@ -912,7 +1864,7 @@
 
 		prev_buf->next = NULL;
 
-		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
+		skb_trim(prev_buf, prev_buf->len);
 		if (head_frag_list) {
 			skb_shinfo(head_msdu)->frag_list = head_frag_list;
 			head_msdu->data_len = frag_list_sum_len;
@@ -935,7 +1887,7 @@
 		msdu = head_msdu;
 
 		while (msdu) {
-			ath12k_dp_mon_rx_msdus_set_payload(ar, msdu, tail_msdu);
+			skb_pull(msdu, ATH12K_MON_RX_PKT_OFFSET);
 			if (qos_pkt) {
 				dest = skb_push(msdu, sizeof(__le16));
 				if (!dest)
@@ -1026,18 +1978,72 @@
 {
 	struct ieee80211_supported_band *sband;
 	u8 *ptr = NULL;
-	u16 ampdu_id = ppduinfo->ampdu_id[ppduinfo->userid];
 
 	rxs->flag |= RX_FLAG_MACTIME_START;
 	rxs->signal = ppduinfo->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
-	rxs->nss = ppduinfo->nss + 1;
+	if (ppduinfo->rssi_comb == ATH12K_INVALID_RSSI)
+		rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+	rxs->nss = ppduinfo->nss;
 
-	if (ampdu_id) {
+	if (ppduinfo->userstats[ppduinfo->userid].ampdu_present) {
 		rxs->flag |= RX_FLAG_AMPDU_DETAILS;
-		rxs->ampdu_reference = ampdu_id;
+		rxs->ampdu_reference = ppduinfo->userstats[ppduinfo->userid].ampdu_id;
 	}
 
-	if (ppduinfo->he_mu_flags) {
+	if (ppduinfo->is_eht || ppduinfo->eht_usig) {
+		struct ieee80211_radiotap_tlv *tlv;
+		struct ieee80211_radiotap_eht *eht;
+		struct ieee80211_radiotap_eht_usig *usig;
+		u16 len = 0, i, eht_len, usig_len;
+		u8 user;
+
+		if (ppduinfo->is_eht) {
+			eht_len = struct_size(eht,
+					      user_info,
+					      ppduinfo->eht_info.num_user_info);
+			len += sizeof(*tlv) + eht_len;
+		}
+
+		if (ppduinfo->eht_usig) {
+			usig_len = sizeof(*usig);
+			len += sizeof(*tlv) + usig_len;
+		}
+
+		rxs->flag |= RX_FLAG_RADIOTAP_TLV_AT_END;
+		rxs->encoding = RX_ENC_EHT;
+
+		skb_reset_mac_header(mon_skb);
+
+		tlv = skb_push(mon_skb, len);
+
+		if (ppduinfo->is_eht) {
+			tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT);
+			tlv->len = cpu_to_le16(eht_len);
+
+			eht = (struct ieee80211_radiotap_eht *)tlv->data;
+			eht->known = ppduinfo->eht_info.eht.known;
+
+			for (i = 0;
+			     i < ARRAY_SIZE(eht->data) &&
+			     i < ARRAY_SIZE(ppduinfo->eht_info.eht.data);
+			     i++)
+				eht->data[i] = ppduinfo->eht_info.eht.data[i];
+
+			for (user = 0; user < ppduinfo->eht_info.num_user_info; user++)
+				put_unaligned_le32(ppduinfo->eht_info.user_info[user],
+						   &eht->user_info[user]);
+
+			tlv = (struct ieee80211_radiotap_tlv *)&tlv->data[eht_len];
+		}
+
+		if (ppduinfo->eht_usig) {
+			tlv->type = cpu_to_le16(IEEE80211_RADIOTAP_EHT_USIG);
+			tlv->len = cpu_to_le16(usig_len);
+
+			usig = (struct ieee80211_radiotap_eht_usig *)tlv->data;
+			*usig = ppduinfo->u_sig_info.usig;
+		}
+	} else if (ppduinfo->he_mu_flags) {
 		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
 		rxs->encoding = RX_ENC_HE;
 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
@@ -1066,7 +2072,9 @@
 
 static void ath12k_dp_mon_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
 					  struct sk_buff *msdu,
-					  struct ieee80211_rx_status *status)
+					  const struct hal_rx_mon_ppdu_info *ppduinfo,
+					  struct ieee80211_rx_status *status,
+					  u8 decap)
 {
 	static const struct ieee80211_radiotap_he known = {
 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
@@ -1078,10 +2086,11 @@
 	struct ieee80211_sta *pubsta = NULL;
 	struct ath12k_peer *peer;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
-	u8 decap = DP_RX_DECAP_TYPE_RAW;
 	bool is_mcbc = rxcb->is_mcbc;
 	bool is_eapol_tkip = rxcb->is_eapol;
 
+	status->link_valid = 0;
+
 	if ((status->encoding == RX_ENC_HE) && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
 		he = skb_push(msdu, sizeof(known));
@@ -1089,12 +2098,16 @@
 		status->flag |= RX_FLAG_RADIOTAP_HE;
 	}
 
-	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
-		decap = ath12k_dp_rx_h_decap_type(ar->ab, rxcb->rx_desc);
 	spin_lock_bh(&ar->ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
-	if (peer && peer->sta)
+	peer = ath12k_peer_find_by_id(ar->ab, ppduinfo->peer_id);
+	if (peer && peer->sta) {
 		pubsta = peer->sta;
+		if (pubsta->valid_links) {
+			status->link_valid = 1;
+			status->link_id = peer->link_id;
+		}
+	}
+
 	spin_unlock_bh(&ar->ab->base_lock);
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_DATA,
@@ -1140,26 +2153,24 @@
 	ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
 }
 
-static int ath12k_dp_mon_rx_deliver(struct ath12k *ar, u32 mac_id,
-				    struct sk_buff *head_msdu, struct sk_buff *tail_msdu,
+static int ath12k_dp_mon_rx_deliver(struct ath12k *ar,
+				    struct dp_mon_mpdu *mon_mpdu,
 				    struct hal_rx_mon_ppdu_info *ppduinfo,
 				    struct napi_struct *napi)
 {
 	struct ath12k_pdev_dp *dp = &ar->dp;
 	struct sk_buff *mon_skb, *skb_next, *header;
 	struct ieee80211_rx_status *rxs = &dp->rx_status;
-	bool fcs_err = false;
+	u8 decap = DP_RX_DECAP_TYPE_RAW;
 
-	mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mac_id,
-					      head_msdu, tail_msdu,
-					      rxs, &fcs_err);
+	mon_skb = ath12k_dp_mon_rx_merg_msdus(ar, mon_mpdu, ppduinfo, rxs);
 	if (!mon_skb)
 		goto mon_deliver_fail;
 
 	header = mon_skb;
 	rxs->flag = 0;
 
-	if (fcs_err)
+	if (mon_mpdu->err_bitmap & HAL_RX_MPDU_ERR_FCS)
 		rxs->flag = RX_FLAG_FAILED_FCS_CRC;
 
 	do {
@@ -1176,8 +2187,12 @@
 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
 		}
 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
+
+		if (!(rxs->flag & RX_FLAG_ONLY_MONITOR))
+			decap = mon_mpdu->decap_format;
+
 		ath12k_dp_mon_update_radiotap(ar, ppduinfo, mon_skb, rxs);
-		ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, rxs);
+		ath12k_dp_mon_rx_deliver_msdu(ar, napi, mon_skb, ppduinfo, rxs, decap);
 		mon_skb = skb_next;
 	} while (mon_skb);
 	rxs->flag = 0;
@@ -1185,7 +2200,7 @@
 	return 0;
 
 mon_deliver_fail:
-	mon_skb = head_msdu;
+	mon_skb = mon_mpdu->head;
 	while (mon_skb) {
 		skb_next = mon_skb->next;
 		dev_kfree_skb_any(mon_skb);
@@ -1194,25 +2209,157 @@
 	return -EINVAL;
 }
 
+static int ath12k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
+{
+	if (skb->len > len) {
+		skb_trim(skb, len);
+	} else {
+		if (skb_tailroom(skb) < len - skb->len) {
+			if ((pskb_expand_head(skb, 0,
+					      len - skb->len - skb_tailroom(skb),
+					      GFP_ATOMIC))) {
+				return -ENOMEM;
+			}
+		}
+		skb_put(skb, (len - skb->len));
+	}
+
+	return 0;
+}
+
+static void ath12k_dp_mon_parse_rx_msdu_end_err(u32 info, u32 *errmap)
+{
+	if (info & RX_MSDU_END_INFO13_FCS_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_FCS;
+
+	if (info & RX_MSDU_END_INFO13_DECRYPT_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_DECRYPT;
+
+	if (info & RX_MSDU_END_INFO13_TKIP_MIC_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_TKIP_MIC;
+
+	if (info & RX_MSDU_END_INFO13_A_MSDU_ERROR)
+		*errmap |= HAL_RX_MPDU_ERR_AMSDU_ERR;
+
+	if (info & RX_MSDU_END_INFO13_OVERFLOW_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_OVERFLOW;
+
+	if (info & RX_MSDU_END_INFO13_MSDU_LEN_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_MSDU_LEN;
+
+	if (info & RX_MSDU_END_INFO13_MPDU_LEN_ERR)
+		*errmap |= HAL_RX_MPDU_ERR_MPDU_LEN;
+}
+
+static int
+ath12k_dp_mon_parse_status_msdu_end(struct ath12k_mon_data *pmon,
+				    const struct hal_rx_msdu_end *msdu_end)
+{
+	struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
+
+	ath12k_dp_mon_parse_rx_msdu_end_err(__le32_to_cpu(msdu_end->info2),
+					    &mon_mpdu->err_bitmap);
+
+	mon_mpdu->decap_format = le32_get_bits(msdu_end->info1,
+					       RX_MSDU_END_INFO11_DECAP_FORMAT);
+
+	return 0;
+}
+
+static int
+ath12k_dp_mon_parse_status_buf(struct ath12k *ar,
+			       struct ath12k_mon_data *pmon,
+			       const struct dp_mon_packet_info *packet_info)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct dp_rxdma_mon_ring *buf_ring = &ab->dp.rxdma_mon_buf_ring;
+	struct sk_buff *msdu;
+	int buf_id;
+	u32 offset;
+
+	buf_id = u32_get_bits(packet_info->cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
+
+	spin_lock_bh(&buf_ring->idr_lock);
+	msdu = idr_remove(&buf_ring->bufs_idr, buf_id);
+	spin_unlock_bh(&buf_ring->idr_lock);
+
+	if (unlikely(!msdu)) {
+		ath12k_warn(ab, "mon dest desc with inval buf_id %d\n", buf_id);
+		return 0;
+	}
+
+	dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(msdu)->paddr,
+			 msdu->len + skb_tailroom(msdu),
+			 DMA_FROM_DEVICE);
+
+	offset = packet_info->dma_length + ATH12K_MON_RX_DOT11_OFFSET;
+	if (ath12k_dp_pkt_set_pktlen(msdu, offset)) {
+		dev_kfree_skb_any(msdu);
+		goto dest_replenish;
+	}
+
+	if (!pmon->mon_mpdu->head)
+		pmon->mon_mpdu->head = msdu;
+	else
+		pmon->mon_mpdu->tail->next = msdu;
+
+	pmon->mon_mpdu->tail = msdu;
+
+dest_replenish:
+	ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+
+	return 0;
+}
+
+static int
+ath12k_dp_mon_parse_rx_dest_tlv(struct ath12k *ar,
+				struct ath12k_mon_data *pmon,
+				enum hal_rx_mon_status hal_status,
+				const void *tlv_data)
+{
+	switch (hal_status) {
+	case HAL_RX_MON_STATUS_MPDU_START:
+		if (WARN_ON_ONCE(pmon->mon_mpdu))
+			break;
+
+		pmon->mon_mpdu = kzalloc(sizeof(*pmon->mon_mpdu), GFP_ATOMIC);
+		if (!pmon->mon_mpdu)
+			return -ENOMEM;
+		break;
+	case HAL_RX_MON_STATUS_BUF_ADDR:
+		return ath12k_dp_mon_parse_status_buf(ar, pmon, tlv_data);
+	case HAL_RX_MON_STATUS_MPDU_END:
+		/* If no MSDU then free empty MPDU */
+		if (pmon->mon_mpdu->tail) {
+			pmon->mon_mpdu->tail->next = NULL;
+			list_add_tail(&pmon->mon_mpdu->list, &pmon->dp_rx_mon_mpdu_list);
+		} else {
+			kfree(pmon->mon_mpdu);
+		}
+		pmon->mon_mpdu = NULL;
+		break;
+	case HAL_RX_MON_STATUS_MSDU_END:
+		return ath12k_dp_mon_parse_status_msdu_end(pmon, tlv_data);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
 static enum hal_rx_mon_status
-ath12k_dp_mon_parse_rx_dest(struct ath12k_base *ab, struct ath12k_mon_data *pmon,
+ath12k_dp_mon_parse_rx_dest(struct ath12k *ar, struct ath12k_mon_data *pmon,
 			    struct sk_buff *skb)
 {
-	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
-	struct hal_tlv_hdr *tlv;
+	struct hal_tlv_64_hdr *tlv;
+	struct ath12k_skb_rxcb *rxcb;
 	enum hal_rx_mon_status hal_status;
-	u32 tlv_userid = 0;
 	u16 tlv_tag, tlv_len;
 	u8 *ptr = skb->data;
 
-	memset(ppdu_info, 0, sizeof(struct hal_rx_mon_ppdu_info));
-
 	do {
-		tlv = (struct hal_tlv_hdr *)ptr;
-		tlv_tag = le32_get_bits(tlv->tl, HAL_TLV_HDR_TAG);
-		tlv_len = le32_get_bits(tlv->tl, HAL_TLV_HDR_LEN);
-		tlv_userid = le32_get_bits(tlv->tl, HAL_TLV_USR_ID);
-		ptr += sizeof(*tlv);
+		tlv = (struct hal_tlv_64_hdr *)ptr;
+		tlv_tag = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_TAG);
 
 		/* The actual length of PPDU_END is the combined length of many PHY
 		 * TLVs that follow. Skip the TLV header and
@@ -1222,16 +2369,30 @@
 
 		if (tlv_tag == HAL_RX_PPDU_END)
 			tlv_len = sizeof(struct hal_rx_rxpcu_classification_overview);
+		else
+			tlv_len = le64_get_bits(tlv->tl, HAL_TLV_64_HDR_LEN);
 
-		hal_status = ath12k_dp_mon_rx_parse_status_tlv(ab, pmon,
-							       tlv_tag, ptr, tlv_userid);
-		ptr += tlv_len;
-		ptr = PTR_ALIGN(ptr, HAL_TLV_ALIGN);
+		hal_status = ath12k_dp_mon_rx_parse_status_tlv(ar, pmon, tlv);
+
+		if (ar->monitor_started &&
+		    ath12k_dp_mon_parse_rx_dest_tlv(ar, pmon, hal_status, tlv->value))
+			return HAL_RX_MON_STATUS_PPDU_DONE;
+
+		ptr += sizeof(*tlv) + tlv_len;
+		ptr = PTR_ALIGN(ptr, HAL_TLV_64_ALIGN);
 
-		if ((ptr - skb->data) >= DP_RX_BUFFER_SIZE)
+		if ((ptr - skb->data) > skb->len)
 			break;
 
-	} while (hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE);
+	} while ((hal_status == HAL_RX_MON_STATUS_PPDU_NOT_DONE) ||
+		 (hal_status == HAL_RX_MON_STATUS_BUF_ADDR) ||
+		 (hal_status == HAL_RX_MON_STATUS_MPDU_START) ||
+		 (hal_status == HAL_RX_MON_STATUS_MPDU_END) ||
+		 (hal_status == HAL_RX_MON_STATUS_MSDU_END));
+
+	rxcb = ATH12K_SKB_RXCB(skb);
+	if (rxcb->is_end_of_ppdu)
+		hal_status = HAL_RX_MON_STATUS_PPDU_DONE;
 
 	return hal_status;
 }
@@ -1239,31 +2400,27 @@
 enum hal_rx_mon_status
 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
 				  struct ath12k_mon_data *pmon,
-				  int mac_id,
 				  struct sk_buff *skb,
 				  struct napi_struct *napi)
 {
-	struct ath12k_base *ab = ar->ab;
 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
 	struct dp_mon_mpdu *tmp;
 	struct dp_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
-	struct sk_buff *head_msdu, *tail_msdu;
-	enum hal_rx_mon_status hal_status = HAL_RX_MON_STATUS_BUF_DONE;
+	enum hal_rx_mon_status hal_status;
 
-	ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+	hal_status = ath12k_dp_mon_parse_rx_dest(ar, pmon, skb);
+	if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE)
+		return hal_status;
 
 	list_for_each_entry_safe(mon_mpdu, tmp, &pmon->dp_rx_mon_mpdu_list, list) {
 		list_del(&mon_mpdu->list);
-		head_msdu = mon_mpdu->head;
-		tail_msdu = mon_mpdu->tail;
 
-		if (head_msdu && tail_msdu) {
-			ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu,
-						 tail_msdu, ppdu_info, napi);
-		}
+		if (mon_mpdu->head && mon_mpdu->tail)
+			ath12k_dp_mon_rx_deliver(ar, mon_mpdu, ppdu_info, napi);
 
 		kfree(mon_mpdu);
 	}
+
 	return hal_status;
 }
 
@@ -1603,7 +2760,7 @@
 static enum dp_mon_tx_tlv_status
 ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab,
 				  struct ath12k_mon_data *pmon,
-				  u16 tlv_tag, u8 *tlv_data, u32 userid)
+				  u16 tlv_tag, const void *tlv_data, u32 userid)
 {
 	struct dp_mon_tx_ppdu_info *tx_ppdu_info;
 	enum dp_mon_tx_tlv_status status = DP_MON_TX_STATUS_PPDU_NOT_DONE;
@@ -1613,8 +2770,7 @@
 
 	switch (tlv_tag) {
 	case HAL_TX_FES_SETUP: {
-		struct hal_tx_fes_setup *tx_fes_setup =
-					(struct hal_tx_fes_setup *)tlv_data;
+		const struct hal_tx_fes_setup *tx_fes_setup = tlv_data;
 
 		info[0] = __le32_to_cpu(tx_fes_setup->info0);
 		tx_ppdu_info->ppdu_id = __le32_to_cpu(tx_fes_setup->schedule_id);
@@ -1625,8 +2781,7 @@
 	}
 
 	case HAL_TX_FES_STATUS_END: {
-		struct hal_tx_fes_status_end *tx_fes_status_end =
-			(struct hal_tx_fes_status_end *)tlv_data;
+		const struct hal_tx_fes_status_end *tx_fes_status_end = tlv_data;
 		u32 tst_15_0, tst_31_16;
 
 		info[0] = __le32_to_cpu(tx_fes_status_end->info0);
@@ -1643,8 +2798,7 @@
 	}
 
 	case HAL_RX_RESPONSE_REQUIRED_INFO: {
-		struct hal_rx_resp_req_info *rx_resp_req_info =
-			(struct hal_rx_resp_req_info *)tlv_data;
+		const struct hal_rx_resp_req_info *rx_resp_req_info = tlv_data;
 		u32 addr_32;
 		u16 addr_16;
 
@@ -1689,8 +2843,7 @@
 	}
 
 	case HAL_PCU_PPDU_SETUP_INIT: {
-		struct hal_tx_pcu_ppdu_setup_init *ppdu_setup =
-			(struct hal_tx_pcu_ppdu_setup_init *)tlv_data;
+		const struct hal_tx_pcu_ppdu_setup_init *ppdu_setup = tlv_data;
 		u32 addr_32;
 		u16 addr_16;
 
@@ -1736,8 +2889,7 @@
 	}
 
 	case HAL_TX_QUEUE_EXTENSION: {
-		struct hal_tx_queue_exten *tx_q_exten =
-			(struct hal_tx_queue_exten *)tlv_data;
+		const struct hal_tx_queue_exten *tx_q_exten = tlv_data;
 
 		info[0] = __le32_to_cpu(tx_q_exten->info0);
 
@@ -1749,8 +2901,7 @@
 	}
 
 	case HAL_TX_FES_STATUS_START: {
-		struct hal_tx_fes_status_start *tx_fes_start =
-			(struct hal_tx_fes_status_start *)tlv_data;
+		const struct hal_tx_fes_status_start *tx_fes_start = tlv_data;
 
 		info[0] = __le32_to_cpu(tx_fes_start->info0);
 
@@ -1761,8 +2912,7 @@
 	}
 
 	case HAL_TX_FES_STATUS_PROT: {
-		struct hal_tx_fes_status_prot *tx_fes_status =
-			(struct hal_tx_fes_status_prot *)tlv_data;
+		const struct hal_tx_fes_status_prot *tx_fes_status = tlv_data;
 		u32 start_timestamp;
 		u32 end_timestamp;
 
@@ -1789,8 +2939,7 @@
 
 	case HAL_TX_FES_STATUS_START_PPDU:
 	case HAL_TX_FES_STATUS_START_PROT: {
-		struct hal_tx_fes_status_start_prot *tx_fes_stat_start =
-			(struct hal_tx_fes_status_start_prot *)tlv_data;
+		const struct hal_tx_fes_status_start_prot *tx_fes_stat_start = tlv_data;
 		u64 ppdu_ts;
 
 		info[0] = __le32_to_cpu(tx_fes_stat_start->info0);
@@ -1805,8 +2954,7 @@
 	}
 
 	case HAL_TX_FES_STATUS_USER_PPDU: {
-		struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu =
-			(struct hal_tx_fes_status_user_ppdu *)tlv_data;
+		const struct hal_tx_fes_status_user_ppdu *tx_fes_usr_ppdu = tlv_data;
 
 		info[0] = __le32_to_cpu(tx_fes_usr_ppdu->info0);
 
@@ -1849,8 +2997,7 @@
 		break;
 
 	case HAL_RX_FRAME_BITMAP_ACK: {
-		struct hal_rx_frame_bitmap_ack *fbm_ack =
-			(struct hal_rx_frame_bitmap_ack *)tlv_data;
+		const struct hal_rx_frame_bitmap_ack *fbm_ack = tlv_data;
 		u32 addr_32;
 		u16 addr_16;
 
@@ -1868,8 +3015,7 @@
 	}
 
 	case HAL_MACTX_PHY_DESC: {
-		struct hal_tx_phy_desc *tx_phy_desc =
-			(struct hal_tx_phy_desc *)tlv_data;
+		const struct hal_tx_phy_desc *tx_phy_desc = tlv_data;
 
 		info[0] = __le32_to_cpu(tx_phy_desc->info0);
 		info[1] = __le32_to_cpu(tx_phy_desc->info1);
@@ -1950,21 +3096,18 @@
 }
 
 static void
-ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar, int mac_id,
+ath12k_dp_mon_tx_process_ppdu_info(struct ath12k *ar,
 				   struct napi_struct *napi,
 				   struct dp_mon_tx_ppdu_info *tx_ppdu_info)
 {
 	struct dp_mon_mpdu *tmp, *mon_mpdu;
-	struct sk_buff *head_msdu, *tail_msdu;
 
 	list_for_each_entry_safe(mon_mpdu, tmp,
 				 &tx_ppdu_info->dp_tx_mon_mpdu_list, list) {
 		list_del(&mon_mpdu->list);
-		head_msdu = mon_mpdu->head;
-		tail_msdu = mon_mpdu->tail;
 
-		if (head_msdu)
-			ath12k_dp_mon_rx_deliver(ar, mac_id, head_msdu, tail_msdu,
+		if (mon_mpdu->head)
+			ath12k_dp_mon_rx_deliver(ar, mon_mpdu,
 						 &tx_ppdu_info->rx_status, napi);
 
 		kfree(mon_mpdu);
@@ -1974,7 +3117,6 @@
 enum hal_rx_mon_status
 ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
 				  struct ath12k_mon_data *pmon,
-				  int mac_id,
 				  struct sk_buff *skb,
 				  struct napi_struct *napi,
 				  u32 ppdu_id)
@@ -2021,155 +3163,43 @@
 			break;
 	} while (tlv_status != DP_MON_TX_FES_STATUS_END);
 
-	ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_data_ppdu_info);
-	ath12k_dp_mon_tx_process_ppdu_info(ar, mac_id, napi, tx_prot_ppdu_info);
+	ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_data_ppdu_info);
+	ath12k_dp_mon_tx_process_ppdu_info(ar, napi, tx_prot_ppdu_info);
 
 	return tlv_status;
 }
 
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget,
-			       enum dp_monitor_mode monitor_mode,
-			       struct napi_struct *napi)
-{
-	struct hal_mon_dest_desc *mon_dst_desc;
-	struct ath12k_pdev_dp *pdev_dp = &ar->dp;
-	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&pdev_dp->mon_data;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_dp *dp = &ab->dp;
-	struct sk_buff *skb;
-	struct ath12k_skb_rxcb *rxcb;
-	struct dp_srng *mon_dst_ring;
-	struct hal_srng *srng;
-	struct dp_rxdma_mon_ring *buf_ring;
-	u64 cookie;
-	u32 ppdu_id;
-	int num_buffs_reaped = 0, srng_id, buf_id;
-	u8 dest_idx = 0, i;
-	bool end_of_ppdu;
-	struct hal_rx_mon_ppdu_info *ppdu_info;
-	struct ath12k_peer *peer = NULL;
-
-	ppdu_info = &pmon->mon_ppdu_info;
-	memset(ppdu_info, 0, sizeof(*ppdu_info));
-	ppdu_info->peer_id = HAL_INVALID_PEERID;
-
-	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
-
-	if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE) {
-		mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
-		buf_ring = &dp->rxdma_mon_buf_ring;
-	} else {
-		return 0;
-	}
-
-	srng = &ab->hal.srng_list[mon_dst_ring->ring_id];
-
-	spin_lock_bh(&srng->lock);
-	ath12k_hal_srng_access_begin(ab, srng);
-
-	while (likely(*budget)) {
-		*budget -= 1;
-		mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
-		if (unlikely(!mon_dst_desc))
-			break;
-
-		cookie = le32_to_cpu(mon_dst_desc->cookie);
-		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
-
-		spin_lock_bh(&buf_ring->idr_lock);
-		skb = idr_remove(&buf_ring->bufs_idr, buf_id);
-		spin_unlock_bh(&buf_ring->idr_lock);
-
-		if (unlikely(!skb)) {
-			ath12k_warn(ab, "monitor destination with invalid buf_id %d\n",
-				    buf_id);
-			goto move_next;
-		}
-
-		rxcb = ATH12K_SKB_RXCB(skb);
-		dma_unmap_single(ab->dev, rxcb->paddr,
-				 skb->len + skb_tailroom(skb),
-				 DMA_FROM_DEVICE);
-
-		pmon->dest_skb_q[dest_idx] = skb;
-		dest_idx++;
-		ppdu_id = le32_to_cpu(mon_dst_desc->ppdu_id);
-		end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
-					    HAL_MON_DEST_INFO0_END_OF_PPDU);
-		if (!end_of_ppdu)
-			continue;
-
-		for (i = 0; i < dest_idx; i++) {
-			skb = pmon->dest_skb_q[i];
-
-			if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
-				ath12k_dp_mon_rx_parse_mon_status(ar, pmon, mac_id,
-								  skb, napi);
-			else
-				ath12k_dp_mon_tx_parse_mon_status(ar, pmon, mac_id,
-								  skb, napi, ppdu_id);
-
-			peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
-
-			if (!peer || !peer->sta) {
-				ath12k_dbg(ab, ATH12K_DBG_DATA,
-					   "failed to find the peer with peer_id %d\n",
-					   ppdu_info->peer_id);
-				dev_kfree_skb_any(skb);
-				continue;
-			}
-
-			dev_kfree_skb_any(skb);
-			pmon->dest_skb_q[i] = NULL;
-		}
-
-		dest_idx = 0;
-move_next:
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
-		ath12k_hal_srng_src_get_next_entry(ab, srng);
-		num_buffs_reaped++;
-	}
-
-	ath12k_hal_srng_access_end(ab, srng);
-	spin_unlock_bh(&srng->lock);
-
-	return num_buffs_reaped;
-}
-
 static void
 ath12k_dp_mon_rx_update_peer_rate_table_stats(struct ath12k_rx_peer_stats *rx_stats,
 					      struct hal_rx_mon_ppdu_info *ppdu_info,
 					      struct hal_rx_user_status *user_stats,
 					      u32 num_msdu)
 {
-	u32 rate_idx = 0;
+	struct ath12k_rx_peer_rate_stats *stats;
 	u32 mcs_idx = (user_stats) ? user_stats->mcs : ppdu_info->mcs;
 	u32 nss_idx = (user_stats) ? user_stats->nss - 1 : ppdu_info->nss - 1;
 	u32 bw_idx = ppdu_info->bw;
 	u32 gi_idx = ppdu_info->gi;
+	u32 len;
 
-	if ((mcs_idx > HAL_RX_MAX_MCS_HE) || (nss_idx >= HAL_RX_MAX_NSS) ||
-	    (bw_idx >= HAL_RX_BW_MAX) || (gi_idx >= HAL_RX_GI_MAX)) {
+	if (mcs_idx > HAL_RX_MAX_MCS_HT || nss_idx >= HAL_RX_MAX_NSS ||
+	    bw_idx >= HAL_RX_BW_MAX || gi_idx >= HAL_RX_GI_MAX) {
 		return;
 	}
 
-	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11N ||
-	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AC) {
-		rate_idx = mcs_idx * 8 + 8 * 10 * nss_idx;
-		rate_idx += bw_idx * 2 + gi_idx;
-	} else if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX) {
+	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11AX ||
+	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE)
 		gi_idx = ath12k_he_gi_to_nl80211_he_gi(ppdu_info->gi);
-		rate_idx = mcs_idx * 12 + 12 * 12 * nss_idx;
-		rate_idx += bw_idx * 3 + gi_idx;
-	} else {
-		return;
-	}
 
-	rx_stats->pkt_stats.rx_rate[rate_idx] += num_msdu;
+	rx_stats->pkt_stats.rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += num_msdu;
+	stats = &rx_stats->byte_stats;
+
 	if (user_stats)
-		rx_stats->byte_stats.rx_rate[rate_idx] += user_stats->mpdu_ok_byte_count;
+		len = user_stats->mpdu_ok_byte_count;
 	else
-		rx_stats->byte_stats.rx_rate[rate_idx] += ppdu_info->mpdu_len;
+		len = ppdu_info->mpdu_len;
+
+	stats->rx_rate[bw_idx][gi_idx][nss_idx][mcs_idx] += len;
 }
 
 static void ath12k_dp_mon_rx_update_peer_su_stats(struct ath12k *ar,
@@ -2179,10 +3209,14 @@
 	struct ath12k_rx_peer_stats *rx_stats = arsta->rx_stats;
 	u32 num_msdu;
 
-	if (!rx_stats)
-		return;
 
+	if (ppdu_info->rssi_comb != ATH12K_INVALID_RSSI) {
 	arsta->rssi_comb = ppdu_info->rssi_comb;
+		ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+	}
+
+	if (!rx_stats)
+		return;
 
 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
@@ -2255,6 +3289,12 @@
 		rx_stats->byte_stats.he_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
 	}
 
+	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11BE &&
+	    ppdu_info->mcs <= HAL_RX_MAX_MCS_BE) {
+		rx_stats->pkt_stats.be_mcs_count[ppdu_info->mcs] += num_msdu;
+		rx_stats->byte_stats.be_mcs_count[ppdu_info->mcs] += ppdu_info->mpdu_len;
+	}
+
 	if ((ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
 	     ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) &&
 	     ppdu_info->rate < HAL_RX_LEGACY_RATE_INVALID) {
@@ -2336,14 +3376,14 @@
 	struct ath12k_peer *peer;
 	u32 num_msdu;
 
-	if (user_stats->ast_index == 0 || user_stats->ast_index == 0xFFFF)
+	if (ppdu_info->peer_id == HAL_INVALID_PEERID)
 		return;
 
-	peer = ath12k_peer_find_by_ast(ar->ab, user_stats->ast_index);
+	peer = ath12k_peer_find_by_id(ar->ab, ppdu_info->peer_id);
 
 	if (!peer) {
-		ath12k_warn(ar->ab, "peer ast idx %d can't be found\n",
-			    user_stats->ast_index);
+		ath12k_warn(ar->ab, "peer with peer id %d can't be found\n",
+			    ppdu_info->peer_id);
 		return;
 	}
 
@@ -2351,11 +3391,14 @@
 	arsta = &ahsta->deflink;
 	rx_stats = arsta->rx_stats;
 
+	if (ppdu_info->rssi_comb != ATH12K_INVALID_RSSI) {
+		arsta->rssi_comb = ppdu_info->rssi_comb;
+		ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
+	}
+
 	if (!rx_stats)
 		return;
 
-	arsta->rssi_comb = ppdu_info->rssi_comb;
-
 	num_msdu = user_stats->tcp_msdu_count + user_stats->tcp_ack_msdu_count +
 		   user_stats->udp_msdu_count + user_stats->other_msdu_count;
 
@@ -2441,8 +3484,15 @@
 		ath12k_dp_mon_rx_update_user_stats(ar, ppdu_info, i);
 }
 
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
-				   struct napi_struct *napi, int *budget)
+static void
+ath12k_dp_mon_rx_memset_ppdu_info(struct hal_rx_mon_ppdu_info *ppdu_info)
+{
+	memset(ppdu_info, 0, sizeof(*ppdu_info));
+	ppdu_info->peer_id = HAL_INVALID_PEERID;
+}
+
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget,
+			       struct napi_struct *napi)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_pdev_dp *pdev_dp = &ar->dp;
@@ -2458,13 +3508,14 @@
 	struct ath12k_sta *ahsta = NULL;
 	struct ath12k_link_sta *arsta;
 	struct ath12k_peer *peer;
+	struct sk_buff_head skb_list;
 	u64 cookie;
 	int num_buffs_reaped = 0, srng_id, buf_id;
-	u8 dest_idx = 0, i;
-	bool end_of_ppdu;
-	u32 hal_status;
+	u32 hal_status, end_offset, info0, end_reason;
+	u8 pdev_idx = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, ar->pdev_idx);
 
-	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, mac_id);
+	__skb_queue_head_init(&skb_list);
+	srng_id = ath12k_hw_mac_id_to_srng_id(ab->hw_params, pdev_idx);
 	mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id];
 	buf_ring = &dp->rxdma_mon_buf_ring;
 
@@ -2477,6 +3528,15 @@
 		mon_dst_desc = ath12k_hal_srng_dst_peek(ab, srng);
 		if (unlikely(!mon_dst_desc))
 			break;
+
+		/* In case of empty descriptor, the cookie in the ring descriptor
+		 * is invalid. Therefore, this entry is skipped, and ring processing
+		 * continues.
+		 */
+		info0 = le32_to_cpu(mon_dst_desc->info0);
+		if (u32_get_bits(info0, HAL_MON_DEST_INFO0_EMPTY_DESC))
+			goto move_next;
+
 		cookie = le32_to_cpu(mon_dst_desc->cookie);
 		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
 
@@ -2494,34 +3554,81 @@
 		dma_unmap_single(ab->dev, rxcb->paddr,
 				 skb->len + skb_tailroom(skb),
 				 DMA_FROM_DEVICE);
-		pmon->dest_skb_q[dest_idx] = skb;
-		dest_idx++;
-		end_of_ppdu = le32_get_bits(mon_dst_desc->info0,
-					    HAL_MON_DEST_INFO0_END_OF_PPDU);
-		if (!end_of_ppdu)
-			continue;
 
-		for (i = 0; i < dest_idx; i++) {
-			skb = pmon->dest_skb_q[i];
-			hal_status = ath12k_dp_mon_parse_rx_dest(ab, pmon, skb);
+		end_reason = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_REASON);
+
+		/* HAL_MON_FLUSH_DETECTED implies that an rx flush received at the end of
+		 * rx PPDU and HAL_MON_PPDU_TRUNCATED implies that the PPDU got
+		 * truncated due to a system level error. In both the cases, buffer data
+		 * can be discarded
+		 */
+		if ((end_reason == HAL_MON_FLUSH_DETECTED) ||
+		    (end_reason == HAL_MON_PPDU_TRUNCATED)) {
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				   "Monitor dest descriptor end reason %d", end_reason);
+			dev_kfree_skb_any(skb);
+			goto move_next;
+		}
+
+		/* Calculate the budget when the ring descriptor with the
+		 * HAL_MON_END_OF_PPDU to ensure that one PPDU worth of data is always
+		 * reaped. This helps to efficiently utilize the NAPI budget.
+		 */
+		if (end_reason == HAL_MON_END_OF_PPDU) {
+			*budget -= 1;
+			rxcb->is_end_of_ppdu = true;
+		}
+
+		end_offset = u32_get_bits(info0, HAL_MON_DEST_INFO0_END_OFFSET);
+		if (likely(end_offset <= DP_RX_BUFFER_SIZE)) {
+			skb_put(skb, end_offset);
+		} else {
+			ath12k_warn(ab,
+				    "invalid offset on mon stats destination %u\n",
+				    end_offset);
+			skb_put(skb, DP_RX_BUFFER_SIZE);
+		}
+
+		__skb_queue_tail(&skb_list, skb);
+
+move_next:
+		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
+		ath12k_hal_srng_dst_get_next_entry(ab, srng);
+		num_buffs_reaped++;
+	}
 
-			if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
-			    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+	ath12k_hal_srng_access_end(ab, srng);
+	spin_unlock_bh(&srng->lock);
+
+	if (!num_buffs_reaped)
+		return 0;
+
+	/* In some cases, one PPDU worth of data can be spread across multiple NAPI
+	 * schedules, To avoid losing existing parsed ppdu_info information, skip
+	 * the memset of the ppdu_info structure and continue processing it.
+	 */
+	if (!ppdu_info->ppdu_continuation)
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
+
+	while ((skb = __skb_dequeue(&skb_list))) {
+		hal_status = ath12k_dp_mon_rx_parse_mon_status(ar, pmon, skb, napi);
+		if (hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
+			ppdu_info->ppdu_continuation = true;
 				dev_kfree_skb_any(skb);
 				continue;
 			}
 
+		if (ppdu_info->peer_id == HAL_INVALID_PEERID)
+			goto free_skb;
+
 			rcu_read_lock();
 			spin_lock_bh(&ab->base_lock);
 			peer = ath12k_peer_find_by_id(ab, ppdu_info->peer_id);
 			if (!peer || !peer->sta) {
 				ath12k_dbg(ab, ATH12K_DBG_DATA,
-					   "failed to find the peer with peer_id %d\n",
+				   "failed to find the peer with monitor peer_id %d\n",
 					   ppdu_info->peer_id);
-				spin_unlock_bh(&ab->base_lock);
-				rcu_read_unlock();
-				dev_kfree_skb_any(skb);
-				continue;
+			goto next_skb;
 			}
 
 			if (ppdu_info->reception_type == HAL_RX_RECEPTION_TYPE_SU) {
@@ -2535,22 +3642,14 @@
 				ath12k_dp_mon_rx_update_peer_mu_stats(ar, ppdu_info);
 			}
 
+next_skb:
 			spin_unlock_bh(&ab->base_lock);
 			rcu_read_unlock();
+free_skb:
 			dev_kfree_skb_any(skb);
-			memset(ppdu_info, 0, sizeof(*ppdu_info));
-			ppdu_info->peer_id = HAL_INVALID_PEERID;
+		ath12k_dp_mon_rx_memset_ppdu_info(ppdu_info);
 		}
 
-		dest_idx = 0;
-move_next:
-		ath12k_dp_mon_buf_replenish(ab, buf_ring, 1);
-		ath12k_hal_srng_dst_get_next_entry(ab, srng);
-		num_buffs_reaped++;
-	}
-
-	ath12k_hal_srng_access_end(ab, srng);
-	spin_unlock_bh(&srng->lock);
 	return num_buffs_reaped;
 }
 
@@ -2561,11 +3660,10 @@
 	struct ath12k *ar = ath12k_ab_to_ar(ab, mac_id);
 	int num_buffs_reaped = 0;
 
-	if (!ar->monitor_started)
-		ath12k_dp_mon_rx_process_stats(ar, mac_id, napi, &budget);
-	else
-		num_buffs_reaped = ath12k_dp_mon_srng_process(ar, mac_id, &budget,
-							      monitor_mode, napi);
+	if (ab->hw_params->rxdma1_enable) {
+		if (monitor_mode == ATH12K_DP_RX_MONITOR_MODE)
+			num_buffs_reaped = ath12k_dp_mon_srng_process(ar, &budget, napi);
+	}
 
 	return num_buffs_reaped;
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_mon.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_mon.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_mon.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_mon.h	2025-09-25 17:40:34.143360165 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_DP_MON_H
@@ -9,6 +9,9 @@
 
 #include "core.h"
 
+#define ATH12K_MON_RX_DOT11_OFFSET	5
+#define ATH12K_MON_RX_PKT_OFFSET	8
+
 enum dp_monitor_mode {
 	ATH12K_DP_TX_MONITOR_MODE,
 	ATH12K_DP_RX_MONITOR_MODE
@@ -77,14 +80,11 @@
 enum hal_rx_mon_status
 ath12k_dp_mon_rx_parse_mon_status(struct ath12k *ar,
 				  struct ath12k_mon_data *pmon,
-				  int mac_id, struct sk_buff *skb,
+				  struct sk_buff *skb,
 				  struct napi_struct *napi);
 int ath12k_dp_mon_buf_replenish(struct ath12k_base *ab,
 				struct dp_rxdma_mon_ring *buf_ring,
 				int req_entries);
-int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id,
-			       int *budget, enum dp_monitor_mode monitor_mode,
-			       struct napi_struct *napi);
 int ath12k_dp_mon_process_ring(struct ath12k_base *ab, int mac_id,
 			       struct napi_struct *napi, int budget,
 			       enum dp_monitor_mode monitor_mode);
@@ -96,11 +96,9 @@
 enum hal_rx_mon_status
 ath12k_dp_mon_tx_parse_mon_status(struct ath12k *ar,
 				  struct ath12k_mon_data *pmon,
-				  int mac_id,
 				  struct sk_buff *skb,
 				  struct napi_struct *napi,
 				  u32 ppdu_id);
 void ath12k_dp_mon_rx_process_ulofdma(struct hal_rx_mon_ppdu_info *ppdu_info);
-int ath12k_dp_mon_rx_process_stats(struct ath12k *ar, int mac_id,
-				   struct napi_struct *napi, int *budget);
+int ath12k_dp_mon_srng_process(struct ath12k *ar, int *budget, struct napi_struct *napi);
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_rx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_rx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_rx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_rx.c	2025-09-29 14:23:07.605732410 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/ieee80211.h>
@@ -228,12 +228,6 @@
 	ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
 }
 
-static u16 ath12k_dp_rxdesc_get_mpdu_frame_ctrl(struct ath12k_base *ab,
-						struct hal_rx_desc *desc)
-{
-	return ab->hal_rx_ops->rx_desc_get_mpdu_frame_ctl(desc);
-}
-
 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
 						struct hal_rx_desc *desc)
 {
@@ -349,9 +343,10 @@
 				 skb->data);
 		}
 
-		paddr = dma_map_single(ab->dev, skb->data,
+		paddr = dma_map_single_attrs(ab->dev, skb->data,
 				       skb->len + skb_tailroom(skb),
-				       DMA_FROM_DEVICE);
+					     DMA_FROM_DEVICE,
+					     DMA_ATTR_SKIP_CPU_SYNC);
 		if (dma_mapping_error(ab->dev, paddr))
 			goto fail_free_skb;
 
@@ -547,56 +542,11 @@
 	return 0;
 }
 
-void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
-{
-	struct ath12k_dp *dp = &ab->dp;
-	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
-	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
-
-	spin_lock_bh(&dp->reo_cmd_lock);
-	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
-		list_del(&cmd->list);
-		dma_unmap_single(ab->dev, cmd->data.paddr,
-				 cmd->data.size, DMA_BIDIRECTIONAL);
-		kfree(cmd->data.vaddr);
-		kfree(cmd);
-	}
-
-	list_for_each_entry_safe(cmd_cache, tmp_cache,
-				 &dp->reo_cmd_cache_flush_list, list) {
-		list_del(&cmd_cache->list);
-		dp->reo_cmd_cache_flush_count--;
-		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
-				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
-		kfree(cmd_cache->data.vaddr);
-		kfree(cmd_cache);
-	}
-	spin_unlock_bh(&dp->reo_cmd_lock);
-}
-
-static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
-				   enum hal_reo_cmd_status status)
-{
-	struct ath12k_dp_rx_tid *rx_tid = ctx;
-
-	if (status != HAL_REO_CMD_SUCCESS)
-		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
-			    rx_tid->tid, status);
-
-	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
-			 DMA_BIDIRECTIONAL);
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
-}
-
-static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
+static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab,
 				  enum hal_reo_cmd_type type,
-				  struct ath12k_hal_reo_cmd *cmd,
-				  void (*cb)(struct ath12k_dp *dp, void *ctx,
-					     enum hal_reo_cmd_status status))
+				  struct ath12k_hal_reo_cmd *cmd)
 {
 	struct ath12k_dp *dp = &ab->dp;
-	struct ath12k_dp_rx_reo_cmd *dp_cmd;
 	struct hal_srng *cmd_ring;
 	int cmd_num;
 
@@ -611,128 +561,134 @@
 	if (cmd_num == 0)
 		return -EINVAL;
 
-	if (!cb)
-		return 0;
-
-	/* Can this be optimized so that we keep the pending command list only
-	 * for tid delete command to free up the resource on the command status
-	 * indication?
-	 */
-	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
-
-	if (!dp_cmd)
-		return -ENOMEM;
+	return cmd_num;
+}
 
-	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
-	dp_cmd->cmd_num = cmd_num;
-	dp_cmd->handler = cb;
+void ath12k_dp_reo_cmd_work_func(struct work_struct *work)
+{
+	struct ath12k_dp *dp = container_of(work, struct ath12k_dp,
+					    reo_cmd_work.work);
+	struct ath12k_base *ab = dp->ab;
+	struct ath12k_dp_rx_reo_cmd_ctx *cmd_ctx, *tmp;
 
 	spin_lock_bh(&dp->reo_cmd_lock);
-	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
-	spin_unlock_bh(&dp->reo_cmd_lock);
 
-	return 0;
+	/* push all new commands */
+	list_for_each_entry_safe(cmd_ctx, tmp, &dp->reo_cmd_pending, next) {
+		int ret;
+
+		ret = ath12k_dp_reo_cmd_send(ab, cmd_ctx->type,
+					     &cmd_ctx->cmd);
+		if (ret == -ENOBUFS) {
+			/* queue full, will push more later */
+			dp->reo_cmd_full = true;
+			break;
+		}
+		dp->reo_cmd_full = false;
+
+		if (ret < 0) {
+			/* unknown error */
+			cmd_ctx->status = HAL_REO_CMD_FAILED;
+			list_del_init(&cmd_ctx->next);
+			ath12k_warn(ab,
+				    "failed to push reo cmd type %d: ret=%d\n",
+				    cmd_ctx->type,
+				    ret);
+			complete(&cmd_ctx->complete);
+			continue;
 }
 
-static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
-				      struct ath12k_dp_rx_tid *rx_tid)
-{
-	struct ath12k_hal_reo_cmd cmd = {0};
-	unsigned long tot_desc_sz, desc_sz;
-	int ret;
+		cmd_ctx->deadline = jiffies + DP_REO_CMD_TIMEOUT;
+		cmd_ctx->cmd_num = ret;
+		list_move_tail(&cmd_ctx->next, &dp->reo_cmd_sent);
+	}
 
-	tot_desc_sz = rx_tid->size;
-	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+	/* process timeouts */
+	list_for_each_entry_safe(cmd_ctx, tmp, &dp->reo_cmd_sent, next) {
+		unsigned long now = jiffies;
+
+		if (!time_after(now, cmd_ctx->deadline)) {
+			/* assume all commands have the same timeout,
+			 * no need to find the minimum */
+			queue_delayed_work(ab->workqueue,
+					   &dp->reo_cmd_work,
+					   cmd_ctx->deadline - now);
+			break;
+		}
 
-	while (tot_desc_sz > desc_sz) {
-		tot_desc_sz -= desc_sz;
-		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
-		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
-					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
-					     NULL);
-		if (ret)
 			ath12k_warn(ab,
-				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
-				    rx_tid->tid, ret);
+			    "reo cmd type %d num %d timeout, "
+			    "canceled\n",
+			    cmd_ctx->type, cmd_ctx->cmd_num);
+		cmd_ctx->status = HAL_REO_CMD_FAILED;
+		list_del_init(&cmd_ctx->next);
+		complete(&cmd_ctx->complete);
 	}
 
-	memset(&cmd, 0, sizeof(cmd));
-	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
-	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
-	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
-				     HAL_REO_CMD_FLUSH_CACHE,
-				     &cmd, ath12k_dp_reo_cmd_free);
-	if (ret) {
-		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
-			   rx_tid->tid, ret);
-		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
-	}
+	spin_unlock_bh(&dp->reo_cmd_lock);
 }
 
-static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
+static void
+ath12k_dp_reo_cmd_handle_done(struct ath12k_base *ab,
+			      int cmd_num,
 				      enum hal_reo_cmd_status status)
 {
-	struct ath12k_base *ab = dp->ab;
-	struct ath12k_dp_rx_tid *rx_tid = ctx;
-	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
+	struct ath12k_dp *dp = &ab->dp;
+	struct ath12k_dp_rx_reo_cmd_ctx *cmd_ctx, *tmp;
 
-	if (status == HAL_REO_CMD_DRAIN) {
-		goto free_desc;
-	} else if (status != HAL_REO_CMD_SUCCESS) {
-		/* Shouldn't happen! Cleanup in case of other failure? */
-		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
-			    rx_tid->tid, status);
-		return;
+	spin_lock_bh(&dp->reo_cmd_lock);
+	list_for_each_entry_safe(cmd_ctx, tmp, &dp->reo_cmd_sent, next) {
+		if (cmd_ctx->cmd_num != cmd_num)
+			continue;
+
+		cmd_ctx->status = status;
+		list_del_init(&cmd_ctx->next);
+		complete(&cmd_ctx->complete);
+
+		/* kick workqueue to push more commands if it was
+		 * full */
+		if (dp->reo_cmd_full)
+			mod_delayed_work(ab->workqueue, &dp->reo_cmd_work, 0);
+	}
+	spin_unlock_bh(&dp->reo_cmd_lock);
 	}
 
-	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
-	if (!elem)
-		goto free_desc;
+static void
+ath12k_dp_reo_cmd_enqueue(struct ath12k_base *ab,
+			  struct ath12k_dp_rx_reo_cmd_ctx *cmds,
+			  size_t count)
+{
+	struct ath12k_dp *dp = &ab->dp;
+	size_t i;
 
-	elem->ts = jiffies;
-	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
+	for (i = 0; i < count; i++) {
+		init_completion(&cmds[i].complete);
+		/* all command shall be waited for, this is how we
+		 * honour completion */
+		cmds[i].cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
+	}
 
 	spin_lock_bh(&dp->reo_cmd_lock);
-	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
-	dp->reo_cmd_cache_flush_count++;
-
-	/* Flush and invalidate aged REO desc from HW cache */
-	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
-				 list) {
-		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
-		    time_after(jiffies, elem->ts +
-			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
-			list_del(&elem->list);
-			dp->reo_cmd_cache_flush_count--;
-
-			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
-			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
-			 * is used in only two contexts, one is in this function called
-			 * from napi and the other in ath12k_dp_free during core destroy.
-			 * Before dp_free, the irqs would be disabled and would wait to
-			 * synchronize. Hence there wouldn’t be any race against add or
-			 * delete to this list. Hence unlock-lock is safe here.
-			 */
+	for (i = 0; i < count; i++)
+		list_add_tail(&cmds[i].next, &dp->reo_cmd_pending);
 			spin_unlock_bh(&dp->reo_cmd_lock);
-
-			ath12k_dp_reo_cache_flush(ab, &elem->data);
-			kfree(elem);
-			spin_lock_bh(&dp->reo_cmd_lock);
+	mod_delayed_work(ab->workqueue, &dp->reo_cmd_work, 0);
 		}
+
+static void
+ath12k_dp_reo_cmd_wait(struct ath12k_dp_rx_reo_cmd_ctx *cmd)
+{
+	wait_for_completion(&cmd->complete);
 	}
-	spin_unlock_bh(&dp->reo_cmd_lock);
 
-	return;
-free_desc:
-	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
-			 DMA_BIDIRECTIONAL);
-	kfree(rx_tid->vaddr);
-	rx_tid->vaddr = NULL;
+static void
+ath12k_dp_reo_cmd_wait_all(struct ath12k_dp_rx_reo_cmd_ctx *cmds,
+			   size_t count)
+{
+	size_t i;
+
+	for (i = 0; i < count; i++)
+		ath12k_dp_reo_cmd_wait(&cmds[i]);
 }
 
 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
@@ -740,13 +696,20 @@
 {
 	struct ath12k_reo_queue_ref *qref;
 	struct ath12k_dp *dp = &ab->dp;
+	bool ml_peer = false;
 
 	if (!ab->hw_params->reoq_lut_support)
 		return;
 
-	/* TODO: based on ML peer or not, select the LUT. below assumes non
-	 * ML peer
-	 */
+	if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+		peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+		ml_peer = true;
+	}
+
+	if (ml_peer)
+		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+	else
 	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 
@@ -761,13 +724,20 @@
 {
 	struct ath12k_reo_queue_ref *qref;
 	struct ath12k_dp *dp = &ab->dp;
+	bool ml_peer = false;
 
 	if (!ab->hw_params->reoq_lut_support)
 		return;
 
-	/* TODO: based on ML peer or not, select the LUT. below assumes non
-	 * ML peer
-	 */
+	if (peer_id & ATH12K_PEER_ML_ID_VALID) {
+		peer_id &= ~ATH12K_PEER_ML_ID_VALID;
+		ml_peer = true;
+	}
+
+	if (ml_peer)
+		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
+				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
+	else
 	qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
 			(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
 
@@ -776,35 +746,162 @@
 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
 }
 
-void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
-				  struct ath12k_peer *peer, u8 tid)
+void
+ath12k_dp_rx_peer_tid_delete_prepare(struct ath12k *ar,
+				     struct ath12k_peer *peer, u8 tid,
+				     struct ath12k_dp_rx_tid_delete_ctx *dctx)
 {
-	struct ath12k_hal_reo_cmd cmd = {0};
 	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
-	int ret;
+	struct ath12k_tid_del_ctx *del_tid = &dctx->del_tid[tid];
+
+	del_tid->tid = rx_tid->tid;
+	del_tid->vaddr = rx_tid->vaddr;
+	del_tid->paddr = rx_tid->paddr;
+	del_tid->size = rx_tid->size;
+	del_tid->active = rx_tid->active;
 
 	if (!rx_tid->active)
 		return;
 
-	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
+	if (peer->mlo)
+		ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
+	else
+		ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+
+	rx_tid->active = false;
+}
+
+static size_t
+ath12k_dp_reo_cache_get_cmd_count(struct ath12k_base *ab,
+				  struct ath12k_tid_del_ctx *del_tid)
+{
+	unsigned long desc_sz;
+	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+	return DIV_ROUND_UP(del_tid->size, desc_sz);
+}
+
+static size_t
+ath12k_dp_reo_cache_flush_fill_cmds(struct ath12k_base *ab,
+				    struct ath12k_tid_del_ctx *rx_tid,
+				    struct ath12k_dp_rx_reo_cmd_ctx *cmds,
+				    size_t count)
+{
+	struct ath12k_hal_reo_cmd cmd = {0};
+	struct ath12k_dp_rx_reo_cmd_ctx *cmd_ctx;
+	unsigned long tot_desc_sz, desc_sz;
+	size_t used = 0;
+
+	tot_desc_sz = rx_tid->size;
+	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
+
+	while (tot_desc_sz > desc_sz) {
+		tot_desc_sz -= desc_sz;
+
+		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
+		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+
+		BUG_ON(used >= count);
+		cmd_ctx = &cmds[used++];
+		cmd_ctx->cmd = cmd;
+		cmd_ctx->type = HAL_REO_CMD_FLUSH_CACHE;
+	}
+
+	memset(&cmd, 0, sizeof(cmd));
 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
+
+	BUG_ON(used >= count);
+	cmd_ctx = &cmds[used++];
+	cmd_ctx->cmd = cmd;
+	cmd_ctx->type = HAL_REO_CMD_FLUSH_CACHE;
+
+	return used;
+}
+
+void
+ath12k_dp_rx_peer_tid_delete_finalize(struct ath12k_base *ab,
+				      struct ath12k_dp_rx_tid_delete_ctx *dctx)
+{
+	size_t i;
+	bool skip_reo_flush[IEEE80211_NUM_TIDS + 1];
+
+	memset(skip_reo_flush, 0, sizeof (skip_reo_flush));
+	for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+		struct ath12k_tid_del_ctx *del_tid = &dctx->del_tid[i];
+		struct ath12k_dp_rx_reo_cmd_ctx cmd_ctx;
+		struct ath12k_hal_reo_cmd cmd = {0};
+
+		if (!del_tid->active)
+			continue;
+
+		cmd.addr_lo = lower_32_bits(del_tid->paddr);
+		cmd.addr_hi = upper_32_bits(del_tid->paddr);
 	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
-	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
-				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
-				     ath12k_dp_rx_tid_del_func);
-	if (ret) {
-		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
-			   tid, ret);
-		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
-				 DMA_BIDIRECTIONAL);
-		kfree(rx_tid->vaddr);
-		rx_tid->vaddr = NULL;
+
+		cmd_ctx.type = HAL_REO_CMD_UPDATE_RX_QUEUE;
+		cmd_ctx.cmd = cmd;
+
+		ath12k_dp_reo_cmd_enqueue(ab, &cmd_ctx, 1);
+		ath12k_dp_reo_cmd_wait(&cmd_ctx);
+
+		if (cmd_ctx.status == HAL_REO_CMD_DRAIN) {
+			skip_reo_flush[i] = true;
+		} else if (cmd_ctx.status != HAL_REO_CMD_SUCCESS) {
+			ath12k_warn(ab,
+				    "failed to update rx tid, "
+				    "tid %zu status %d\n",
+				    i, cmd_ctx.status);
+			skip_reo_flush[i] = true;
+		}
 	}
 
-	ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
+	for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+		struct ath12k_tid_del_ctx *del_tid = &dctx->del_tid[i];
+		struct ath12k_dp_rx_reo_cmd_ctx *cmds;
+		size_t needed, count, j;
 
-	rx_tid->active = false;
+		if (!del_tid->active)
+			continue;
+
+		if (skip_reo_flush[i])
+			continue;
+
+		needed = ath12k_dp_reo_cache_get_cmd_count(ab, del_tid);
+		cmds = kcalloc(needed, sizeof (*cmds), GFP_KERNEL);
+		if (!cmds) {
+			ath12k_warn(ab, "async command alloc failed\n");
+			break;
+		}
+
+		count = ath12k_dp_reo_cache_flush_fill_cmds(ab,
+							    del_tid,
+							    cmds,
+							    needed);
+		ath12k_dp_reo_cmd_enqueue(ab, cmds, count);
+		ath12k_dp_reo_cmd_wait_all(cmds, count);
+
+		for (j = 0; j < count; j++) {
+			if (cmds[j].status != HAL_REO_CMD_SUCCESS) {
+				ath12k_warn(ab,
+					    "failed to flush rx tid hw desc, "
+					    "tid %zu status %d\n",
+					    i, cmds[j].status);
+			}
+		}
+
+		kfree(cmds);
+	}
+
+	for (i = 0; i < IEEE80211_NUM_TIDS + 1; i++) {
+		struct ath12k_tid_del_ctx *del_tid = &dctx->del_tid[i];
+
+		if (!del_tid->active)
+			continue;
+
+		dma_unmap_single(ab->dev, del_tid->paddr, del_tid->size,
+				 DMA_BIDIRECTIONAL);
+		kfree(del_tid->vaddr);
+	}
 }
 
 /* TODO: it's strange (and ugly) that struct hal_reo_dest_ring is converted
@@ -867,30 +964,36 @@
 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
 {
 	struct ath12k_dp_rx_tid *rx_tid;
+	struct ath12k_dp_rx_tid_delete_ctx dctx;
 	int i;
 
 	lockdep_assert_held(&ar->ab->base_lock);
+	memset(&dctx, 0, sizeof (dctx));
 
 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 		rx_tid = &peer->rx_tid[i];
 
-		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
+		ath12k_dp_rx_peer_tid_delete_prepare(ar, peer, i, &dctx);
 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
 
 		spin_unlock_bh(&ar->ab->base_lock);
 		del_timer_sync(&rx_tid->frag_timer);
 		spin_lock_bh(&ar->ab->base_lock);
 	}
+
+	spin_unlock_bh(&ar->ab->base_lock);
+	ath12k_dp_rx_peer_tid_delete_finalize(ar->ab, &dctx);
+	spin_lock_bh(&ar->ab->base_lock);
 }
 
-static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
+static void ath12k_peer_rx_tid_reo_update_prepare(struct ath12k *ar,
 					 struct ath12k_peer *peer,
 					 struct ath12k_dp_rx_tid *rx_tid,
 					 u32 ba_win_sz, u16 ssn,
-					 bool update_ssn)
+						  bool update_ssn,
+						  struct ath12k_dp_rx_reo_cmd_ctx *cmd_ctx)
 {
 	struct ath12k_hal_reo_cmd cmd = {0};
-	int ret;
 
 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
@@ -903,18 +1006,10 @@
 		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
 	}
 
-	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
-				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
-				     NULL);
-	if (ret) {
-		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
-			    rx_tid->tid, ret);
-		return ret;
-	}
+	cmd_ctx->cmd = cmd;
+	cmd_ctx->type = HAL_REO_CMD_UPDATE_RX_QUEUE;
 
 	rx_tid->ba_win_sz = ba_win_sz;
-
-	return 0;
 }
 
 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
@@ -926,6 +1021,7 @@
 	struct hal_rx_reo_queue *addr_aligned;
 	struct ath12k_peer *peer;
 	struct ath12k_dp_rx_tid *rx_tid;
+	struct ath12k_dp_rx_reo_cmd_ctx cmd_ctx;
 	u32 hw_desc_sz;
 	void *vaddr;
 	dma_addr_t paddr;
@@ -940,7 +1036,13 @@
 		return -ENOENT;
 	}
 
-	if (ab->hw_params->reoq_lut_support && !dp->reoq_lut.vaddr) {
+	if (!peer->primary_link) {
+		spin_unlock_bh(&ab->base_lock);
+		return 0;
+	}
+
+	if (ab->hw_params->reoq_lut_support &&
+	    (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
 		spin_unlock_bh(&ab->base_lock);
 		ath12k_warn(ab, "reo qref table is not setup\n");
 		return -EINVAL;
@@ -957,10 +1059,14 @@
 	/* Update the tid queue if it is already setup */
 	if (rx_tid->active) {
 		paddr = rx_tid->paddr;
-		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
-						    ba_win_sz, ssn, true);
+		ath12k_peer_rx_tid_reo_update_prepare(ar, peer, rx_tid,
+						      ba_win_sz, ssn, true,
+						      &cmd_ctx);
 		spin_unlock_bh(&ab->base_lock);
-		if (ret) {
+
+		ath12k_dp_reo_cmd_enqueue(ab, &cmd_ctx, 1);
+		ath12k_dp_reo_cmd_wait(&cmd_ctx);
+		if (cmd_ctx.status != HAL_REO_CMD_SUCCESS) {
 			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
 			return ret;
 		}
@@ -1021,7 +1127,11 @@
 		/* Update the REO queue LUT at the corresponding peer id
 		 * and tid with qaddr.
 		 */
+		if (peer->mlo)
+			ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid, paddr);
+		else
 		ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid, paddr);
+
 		spin_unlock_bh(&ab->base_lock);
 	} else {
 		spin_unlock_bh(&ab->base_lock);
@@ -1038,15 +1148,25 @@
 }
 
 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
-			     struct ieee80211_ampdu_params *params)
+			     struct ieee80211_ampdu_params *params,
+			     u8 link_id)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
-	struct ath12k_link_sta *arsta = &ahsta->deflink;
-	int vdev_id = arsta->arvif->vdev_id;
+	struct ath12k_link_sta *arsta;
+	int vdev_id;
 	int ret;
 
-	ret = ath12k_dp_rx_peer_tid_setup(ar, params->sta->addr, vdev_id,
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				  ahsta->link[link_id]);
+	if (!arsta)
+		return -ENOLINK;
+
+	vdev_id = arsta->arvif->vdev_id;
+
+	ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
 					  params->tid, params->buf_size,
 					  params->ssn, arsta->ahsta->pn_type);
 	if (ret)
@@ -1056,19 +1176,30 @@
 }
 
 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
-			    struct ieee80211_ampdu_params *params)
+			    struct ieee80211_ampdu_params *params,
+			    u8 link_id)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_peer *peer;
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
-	struct ath12k_link_sta *arsta = &ahsta->deflink;
-	int vdev_id = arsta->arvif->vdev_id;
+	struct ath12k_link_sta *arsta;
+	struct ath12k_dp_rx_reo_cmd_ctx cmd_ctx;
+	int vdev_id;
 	bool active;
 	int ret;
 
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				  ahsta->link[link_id]);
+	if (!arsta)
+		return -ENOLINK;
+
+	vdev_id = arsta->arvif->vdev_id;
+
 	spin_lock_bh(&ab->base_lock);
 
-	peer = ath12k_peer_find(ab, vdev_id, params->sta->addr);
+	peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
 	if (!peer) {
 		spin_unlock_bh(&ab->base_lock);
 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
@@ -1082,15 +1213,19 @@
 		return 0;
 	}
 
-	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
+	ath12k_peer_rx_tid_reo_update_prepare(ar, peer, peer->rx_tid, 1, 0, false,
+					      &cmd_ctx);
 	spin_unlock_bh(&ab->base_lock);
-	if (ret) {
+
+	ath12k_dp_reo_cmd_enqueue(ab, &cmd_ctx, 1);
+	ath12k_dp_reo_cmd_wait(&cmd_ctx);
+	if (cmd_ctx.status != HAL_REO_CMD_SUCCESS) {
 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
 			    params->tid, ret);
 		return ret;
 	}
 
-	return ret;
+	return 0;
 }
 
 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
@@ -1103,6 +1238,8 @@
 	struct ath12k_hal_reo_cmd cmd = {0};
 	struct ath12k_peer *peer;
 	struct ath12k_dp_rx_tid *rx_tid;
+	struct ath12k_dp_rx_reo_cmd_ctx *cmds;
+	size_t i, cmd_count;
 	u8 tid;
 	int ret = 0;
 
@@ -1113,6 +1250,10 @@
 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
 		return 0;
 
+	cmds = kcalloc(IEEE80211_NUM_TIDS + 1, sizeof (*cmds), GFP_KERNEL);
+	if (!cmds)
+		return -ENOMEM;
+
 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
 	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
 		    HAL_REO_CMD_UPD0_PN_SIZE |
@@ -1142,26 +1283,39 @@
 		spin_unlock_bh(&ab->base_lock);
 		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
 			    peer_addr);
+		kfree(cmds);
 		return -ENOENT;
 	}
 
+	cmd_count = 0;
 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
+		struct ath12k_dp_rx_reo_cmd_ctx *cmd_ctx;
+
 		rx_tid = &peer->rx_tid[tid];
 		if (!rx_tid->active)
 			continue;
+
 		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
-		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
-					     HAL_REO_CMD_UPDATE_RX_QUEUE,
-					     &cmd, NULL);
-		if (ret) {
+
+		cmd_ctx = &cmds[cmd_count++];
+		cmd_ctx->cmd = cmd;
+		cmd_ctx->type = HAL_REO_CMD_UPDATE_RX_QUEUE;
+	}
+
+	spin_unlock_bh(&ab->base_lock);
+
+	ath12k_dp_reo_cmd_enqueue(ab, cmds, cmd_count);
+	ath12k_dp_reo_cmd_wait_all(cmds, cmd_count);
+
+	for (i = 0; i < cmd_count; i++) {
+		if (cmds[i].status != HAL_REO_CMD_SUCCESS) {
 			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
 				    tid, peer_addr, ret);
 			break;
 		}
 	}
-
-	spin_unlock_bh(&ab->base_lock);
+	kfree(cmds);
 
 	return ret;
 }
@@ -1322,12 +1476,12 @@
 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
 	int ret;
-	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
+	u8 flags, mcs, nss, bw, sgi, dcm, ppdu_type, rate_idx = 0;
 	u32 v, succ_bytes = 0;
 	u16 tones, rate = 0, succ_pkts = 0;
 	u32 tx_duration = 0;
 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
-	bool is_ampdu = false;
+	bool is_ofdma, is_ampdu = false;
 
 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
 		return;
@@ -1355,6 +1509,9 @@
 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
+	ppdu_type = HTT_USR_RATE_PPDU_TYPE(user_rate->rate_flags);
+	is_ofdma = (ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA) |
+		(ppdu_type == HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA);
 
 	/* Note: If host configured fixed rates and in some other special
 	 * cases, the broadcast/management frames are sent in different rates.
@@ -1431,6 +1588,16 @@
 		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
 		arsta->txrate.he_ru_alloc = v;
 		break;
+	case WMI_RATE_PREAMBLE_EHT:
+		arsta->txrate.mcs = mcs;
+		arsta->txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+		arsta->txrate.he_dcm = dcm;
+		arsta->txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+		tones = le16_to_cpu(user_rate->ru_end) -
+			le16_to_cpu(user_rate->ru_start) + 1;
+		v = ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(tones);
+		arsta->txrate.eht_ru_alloc = v;
+		break;
 	}
 
 	arsta->txrate.nss = nss;
@@ -1438,6 +1605,13 @@
 	arsta->tx_duration += tx_duration;
 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
 
+	if (is_ofdma) {
+		if (flags == WMI_RATE_PREAMBLE_HE)
+			arsta->txrate.bw = RATE_INFO_BW_HE_RU;
+		else if (flags == WMI_RATE_PREAMBLE_EHT)
+			arsta->txrate.bw = RATE_INFO_BW_EHT_RU;
+	}
+
 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
 	 * So skip peer stats update for mgmt packets.
 	 */
@@ -1650,7 +1824,11 @@
 	rcu_read_lock();
 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
 	if (!ar) {
-		ath12k_warn(ab, "invalid pdev id %d on htt mlo offset\n", pdev_id);
+		/* It is possible that the ar is not yet active (started).
+		 * The above function will only look for the active pdev
+		 * and hence %NULL return is possible. Just silently
+		 * discard this message
+		 */
 		goto exit;
 	}
 
@@ -1733,8 +1911,12 @@
 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
 				       peer_mac_h16, mac_addr);
+		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
+					 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
+		hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
+					   HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
-				      peer_id);
+				      hw_peer_id);
 		break;
 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
@@ -1772,6 +1954,7 @@
 	struct hal_rx_desc *ldesc;
 	int space_extra, rem_len, buf_len;
 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
+	bool is_continuation;
 
 	/* As the msdu is spread across multiple rx buffers,
 	 * find the offset to the start of msdu for computing
@@ -1820,7 +2003,8 @@
 	rem_len = msdu_len - buf_first_len;
 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
 		rxcb = ATH12K_SKB_RXCB(skb);
-		if (rxcb->is_continuation)
+		is_continuation = rxcb->is_continuation;
+		if (is_continuation)
 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
 		else
 			buf_len = rem_len;
@@ -1838,7 +2022,7 @@
 		dev_kfree_skb_any(skb);
 
 		rem_len -= buf_len;
-		if (!rxcb->is_continuation)
+		if (!is_continuation)
 			break;
 	}
 
@@ -1863,21 +2047,14 @@
 	return NULL;
 }
 
-static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu)
+static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
+					struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
-	struct ath12k_base *ab = ar->ab;
-	bool ip_csum_fail, l4_csum_fail;
-
-	ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rxcb->rx_desc);
-	l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rxcb->rx_desc);
-
-	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
+	msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
 }
 
-static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar,
-				       enum hal_encrypt_type enctype)
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
 {
 	switch (enctype) {
 	case HAL_ENCRYPT_TYPE_OPEN:
@@ -2071,10 +2248,13 @@
 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
 	struct ath12k_base *ab = ar->ab;
 	size_t hdr_len, crypto_len;
-	struct ieee80211_hdr *hdr;
+	struct ieee80211_hdr hdr;
 	u16 qos_ctl;
-	__le16 fc;
-	u8 *crypto_hdr;
+	u8 *crypto_hdr, mesh_ctrl;
+
+	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
+	hdr_len = ieee80211_hdrlen(hdr.frame_control);
+	mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
 
 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
@@ -2082,26 +2262,22 @@
 		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
 	}
 
-	fc = cpu_to_le16(ath12k_dp_rxdesc_get_mpdu_frame_ctrl(ab, rx_desc));
-	hdr_len = ieee80211_hdrlen(fc);
 	skb_push(msdu, hdr_len);
-	hdr = (struct ieee80211_hdr *)msdu->data;
-	hdr->frame_control = fc;
-
-	/* Get wifi header from rx_desc */
-	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, hdr);
+	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
 
 	if (rxcb->is_mcbc)
 		status->flag &= ~RX_FLAG_PN_VALIDATED;
 
 	/* Add QOS header */
-	if (ieee80211_is_data_qos(hdr->frame_control)) {
+	if (ieee80211_is_data_qos(hdr.frame_control)) {
+		struct ieee80211_hdr *qhdr = (struct ieee80211_hdr *)msdu->data;
+
 		qos_ctl = rxcb->tid;
-		if (ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc))
+		if (mesh_ctrl)
 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
 
 		/* TODO: Add other QoS ctl fields when required */
-		memcpy(msdu->data + (hdr_len - IEEE80211_QOS_CTL_LEN),
+		memcpy(ieee80211_get_qos_ctl(qhdr),
 		       &qos_ctl, IEEE80211_QOS_CTL_LEN);
 	}
 }
@@ -2178,10 +2354,10 @@
 }
 
 struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu)
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+			 struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
-	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
 	struct ath12k_peer *peer = NULL;
 
 	lockdep_assert_held(&ab->base_lock);
@@ -2192,40 +2368,41 @@
 	if (peer)
 		return peer;
 
-	if (!rx_desc || !(ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
-		return NULL;
+	if (rx_info->filled & BIT_ULL(ATH12K_RX_INFO_ADDR2))
+		peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
 
-	peer = ath12k_peer_find_by_addr(ab,
-					ath12k_dp_rxdesc_get_mpdu_start_addr2(ab,
-									      rx_desc));
 	return peer;
 }
 
 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
 				struct sk_buff *msdu,
 				struct hal_rx_desc *rx_desc,
-				struct ieee80211_rx_status *rx_status)
+				struct ath12k_dp_rx_info *rx_info)
 {
-	bool  fill_crypto_hdr;
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_skb_rxcb *rxcb;
 	enum hal_encrypt_type enctype;
 	bool is_decrypted = false;
 	struct ieee80211_hdr *hdr;
 	struct ath12k_peer *peer;
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
 	u32 err_bitmap;
 
 	/* PN for multicast packets will be checked in mac80211 */
 	rxcb = ATH12K_SKB_RXCB(msdu);
-	fill_crypto_hdr = ath12k_dp_rx_h_is_da_mcbc(ar->ab, rx_desc);
-	rxcb->is_mcbc = fill_crypto_hdr;
+	rxcb->is_mcbc = rx_info->is_mcbc;
 
 	if (rxcb->is_mcbc)
-		rxcb->peer_id = ath12k_dp_rx_h_peer_id(ar->ab, rx_desc);
+		rxcb->peer_id = rx_info->peer_id;
 
 	spin_lock_bh(&ar->ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu);
+	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
 	if (peer) {
+		/* resetting mcbc bit because mcbc packets are unicast
+		 * packets only for AP as STA sends unicast packets.
+		 */
+		rxcb->is_mcbc = rxcb->is_mcbc && !peer->is_reset_mcbc;
+
 		if (rxcb->is_mcbc)
 			enctype = peer->sec_type_grp;
 		else
@@ -2254,7 +2431,7 @@
 	if (is_decrypted) {
 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
 
-		if (fill_crypto_hdr)
+		if (rx_info->is_mcbc)
 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
 					RX_FLAG_ICV_STRIPPED;
 		else
@@ -2262,37 +2439,28 @@
 					   RX_FLAG_PN_VALIDATED;
 	}
 
-	ath12k_dp_rx_h_csum_offload(ar, msdu);
+	ath12k_dp_rx_h_csum_offload(msdu, rx_info);
 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
 			       enctype, rx_status, is_decrypted);
 
-	if (!is_decrypted || fill_crypto_hdr)
+	if (!is_decrypted || rx_info->is_mcbc)
 		return;
 
-	if (ath12k_dp_rx_h_decap_type(ar->ab, rx_desc) !=
-	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
+	if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
 		hdr = (void *)msdu->data;
 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 	}
 }
 
-static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-				struct ieee80211_rx_status *rx_status)
+static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_base *ab = ar->ab;
 	struct ieee80211_supported_band *sband;
-	enum rx_msdu_start_pkt_type pkt_type;
-	u8 bw;
-	u8 rate_mcs, nss;
-	u8 sgi;
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
+	enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
+	u8 bw = rx_info->bw, sgi = rx_info->sgi;
+	u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
 	bool is_cck;
 
-	pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
-	bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
-	rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
-	nss = ath12k_dp_rx_h_nss(ab, rx_desc);
-	sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
-
 	switch (pkt_type) {
 	case RX_MSDU_START_PKT_TYPE_11A:
 	case RX_MSDU_START_PKT_TYPE_11B:
@@ -2341,13 +2509,52 @@
 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
 		break;
+	case RX_MSDU_START_PKT_TYPE_11BE:
+		rx_status->rate_idx = rate_mcs;
+
+		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
+			ath12k_warn(ar->ab,
+				    "Received with invalid mcs in EHT mode %d\n",
+				    rate_mcs);
+			break;
+		}
+
+		rx_status->encoding = RX_ENC_EHT;
+		rx_status->nss = nss;
+		rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
+		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
+		break;
+	default:
+		break;
 	}
 }
 
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-			 struct ieee80211_rx_status *rx_status)
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
+			       struct ath12k_dp_rx_info *rx_info)
 {
-	struct ath12k_base *ab = ar->ab;
+	rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
+	rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
+	rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
+	rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
+	rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
+	rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
+	rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
+	rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
+	rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
+	rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
+	rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
+	rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+
+	if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
+		ether_addr_copy(rx_info->addr2,
+				ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
+		rx_info->filled |= BIT_ULL(ATH12K_RX_INFO_ADDR2);
+	}
+}
+
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
+{
+	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
 	u8 channel_num;
 	u32 center_freq, meta_data;
 	struct ieee80211_channel *channel;
@@ -2361,12 +2568,12 @@
 
 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
 
-	meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
+	meta_data = rx_info->phy_meta_data;
 	channel_num = meta_data;
 	center_freq = meta_data >> 16;
 
-	if (center_freq >= ATH12K_MIN_6G_FREQ &&
-	    center_freq <= ATH12K_MAX_6G_FREQ) {
+	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
+	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
 		rx_status->band = NL80211_BAND_6GHZ;
 		rx_status->freq = center_freq;
 	} else if (channel_num >= 1 && channel_num <= 14) {
@@ -2382,55 +2589,45 @@
 				ieee80211_frequency_to_channel(channel->center_freq);
 		}
 		spin_unlock_bh(&ar->data_lock);
-		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
-				rx_desc, sizeof(*rx_desc));
 	}
 
 	if (rx_status->band != NL80211_BAND_6GHZ)
 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
 								 rx_status->band);
 
-	ath12k_dp_rx_h_rate(ar, rx_desc, rx_status);
+	ath12k_dp_rx_h_rate(ar, rx_info);
 }
 
 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
 				      struct sk_buff *msdu,
-				      struct ieee80211_rx_status *status)
+				      struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
-	static const struct ieee80211_radiotap_he known = {
-		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
-				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
-		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
-	};
-	struct ieee80211_radiotap_he *he;
 	struct ieee80211_rx_status *rx_status;
 	struct ieee80211_sta *pubsta;
 	struct ath12k_peer *peer;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
+	struct ieee80211_rx_status *status = rx_info->rx_status;
 	u8 decap = DP_RX_DECAP_TYPE_RAW;
 	bool is_mcbc = rxcb->is_mcbc;
 	bool is_eapol = rxcb->is_eapol;
 
-	if (status->encoding == RX_ENC_HE && !(status->flag & RX_FLAG_RADIOTAP_HE) &&
-	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
-		he = skb_push(msdu, sizeof(known));
-		memcpy(he, &known, sizeof(known));
-		status->flag |= RX_FLAG_RADIOTAP_HE;
-	}
-
-	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
-		decap = ath12k_dp_rx_h_decap_type(ab, rxcb->rx_desc);
+	decap = rx_info->decap_type;
 
 	spin_lock_bh(&ab->base_lock);
-	peer = ath12k_dp_rx_h_find_peer(ab, msdu);
+	peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
 
 	pubsta = peer ? peer->sta : NULL;
 
+	if (pubsta && pubsta->valid_links) {
+		status->link_valid = 1;
+		status->link_id = peer->link_id;
+	}
+
 	spin_unlock_bh(&ab->base_lock);
 
 	ath12k_dbg(ab, ATH12K_DBG_DATA,
-		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
+		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
 		   msdu,
 		   msdu->len,
 		   peer ? peer->addr : NULL,
@@ -2441,6 +2638,7 @@
 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
 		   (status->encoding == RX_ENC_HE) ? "he" : "",
+		   (status->encoding == RX_ENC_EHT) ? "eht" : "",
 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
@@ -2492,7 +2690,7 @@
 	if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
 		return true;
 
-	ab->soc_stats.invalid_rbm++;
+	ab->device_stats.invalid_rbm++;
 	WARN_ON_ONCE(1);
 	return false;
 }
@@ -2500,7 +2698,7 @@
 static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
 				     struct sk_buff *msdu,
 				     struct sk_buff_head *msdu_list,
-				     struct ieee80211_rx_status *rx_status)
+				     struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct hal_rx_desc *rx_desc, *lrx_desc;
@@ -2555,15 +2753,16 @@
 		}
 	}
 
+	ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
 		ret = -EINVAL;
 		goto free_out;
 	}
 
-	ath12k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
-	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
+	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
 
-	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
+	rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
 
 	return 0;
 
@@ -2576,34 +2775,44 @@
 						  struct sk_buff_head *msdu_list,
 						  int ring_id)
 {
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ieee80211_rx_status rx_status = {0};
 	struct ath12k_skb_rxcb *rxcb;
 	struct sk_buff *msdu;
 	struct ath12k *ar;
-	u8 mac_id, pdev_id;
+	struct ath12k_hw_link *hw_links = ag->hw_links;
+	struct ath12k_base *partner_ab;
+	struct ath12k_dp_rx_info rx_info;
+	u8 hw_link_id, pdev_id;
 	int ret;
 
 	if (skb_queue_empty(msdu_list))
 		return;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = &rx_status;
+
 	rcu_read_lock();
 
 	while ((msdu = __skb_dequeue(msdu_list))) {
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		mac_id = rxcb->mac_id;
-		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
-		ar = ab->pdevs[pdev_id].ar;
-		if (!rcu_dereference(ab->pdevs_active[pdev_id])) {
+		hw_link_id = rxcb->hw_link_id;
+		partner_ab = ath12k_ag_to_ab(ag,
+					     hw_links[hw_link_id].device_id);
+		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+						      hw_links[hw_link_id].pdev_idx);
+		ar = partner_ab->pdevs[pdev_id].ar;
+		if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+		if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
+		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
 		if (ret) {
 			ath12k_dbg(ab, ATH12K_DBG_DATA,
 				   "Unable to process msdu %d", ret);
@@ -2611,7 +2820,7 @@
 			continue;
 		}
 
-		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
+		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
 	}
 
 	rcu_read_unlock();
@@ -2643,23 +2852,29 @@
 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
 			 struct napi_struct *napi, int budget)
 {
-	LIST_HEAD(rx_desc_used_list);
+	struct ath12k_hw_group *ag = ab->ag;
+	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+	struct ath12k_hw_link *hw_links = ag->hw_links;
+	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
 	struct ath12k_rx_desc_info *desc_info;
 	struct ath12k_dp *dp = &ab->dp;
 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
 	struct hal_reo_dest_ring *desc;
-	int num_buffs_reaped = 0;
+	struct ath12k_base *partner_ab;
 	struct sk_buff_head msdu_list;
 	struct ath12k_skb_rxcb *rxcb;
 	int total_msdu_reaped = 0;
+	u8 hw_link_id, device_id;
 	struct hal_srng *srng;
 	struct sk_buff *msdu;
 	bool done = false;
-	int mac_id;
 	u64 desc_va;
 
 	__skb_queue_head_init(&msdu_list);
 
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
 
 	spin_lock_bh(&srng->lock);
@@ -2676,18 +2891,29 @@
 		cookie = le32_get_bits(desc->buf_addr_info.info1,
 				       BUFFER_ADDR_INFO1_SW_COOKIE);
 
-		mac_id = le32_get_bits(desc->info0,
+		hw_link_id = le32_get_bits(desc->info0,
 				       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
 
 		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
 			   le32_to_cpu(desc->buf_va_lo));
 		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
 
+		device_id = hw_links[hw_link_id].device_id;
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		if (unlikely(!partner_ab)) {
+			if (desc_info->skb) {
+				dev_kfree_skb_any(desc_info->skb);
+				desc_info->skb = NULL;
+			}
+
+			continue;
+		}
+
 		/* retry manual desc retrieval */
 		if (!desc_info) {
-			desc_info = ath12k_dp_get_rx_desc(ab, cookie);
+			desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
 			if (!desc_info) {
-				ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
+				ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
 					    cookie);
 				continue;
 			}
@@ -2699,21 +2925,21 @@
 		msdu = desc_info->skb;
 		desc_info->skb = NULL;
 
-		list_add_tail(&desc_info->list, &rx_desc_used_list);
+		list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
 
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
+		dma_unmap_single(partner_ab->dev, rxcb->paddr,
 				 msdu->len + skb_tailroom(msdu),
 				 DMA_FROM_DEVICE);
 
-		num_buffs_reaped++;
+		num_buffs_reaped[device_id]++;
 
 		push_reason = le32_get_bits(desc->info0,
 					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
 		if (push_reason !=
 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
 			dev_kfree_skb_any(msdu);
-			ab->soc_stats.hal_reo_error[ring_id]++;
+			ab->device_stats.hal_reo_error[ring_id]++;
 			continue;
 		}
 
@@ -2726,7 +2952,7 @@
 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
 		rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
-		rxcb->mac_id = mac_id;
+		rxcb->hw_link_id = hw_link_id;
 		rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
 							 mpdu_info->peer_meta_data);
 		rxcb->tid = le32_get_bits(mpdu_info->info0,
@@ -2763,8 +2989,17 @@
 	if (!total_msdu_reaped)
 		goto exit;
 
-	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
-				    num_buffs_reaped);
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+		if (!num_buffs_reaped[device_id])
+			continue;
+
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+		ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+					    &rx_desc_used_list[device_id],
+					    num_buffs_reaped[device_id]);
+	}
 
 	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
 					      ring_id);
@@ -2809,6 +3044,12 @@
 		return -ENOENT;
 	}
 
+	if (!peer->primary_link) {
+		spin_unlock_bh(&ab->base_lock);
+		crypto_free_shash(tfm);
+		return 0;
+	}
+
 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
 		rx_tid = &peer->rx_tid[i];
 		rx_tid->ab = ab;
@@ -2872,6 +3113,7 @@
 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
 	struct ieee80211_key_conf *key_conf;
 	struct ieee80211_hdr *hdr;
+	struct ath12k_dp_rx_info rx_info;
 	u8 mic[IEEE80211_CCMP_MIC_LEN];
 	int head_len, tail_len, ret;
 	size_t data_len;
@@ -2882,6 +3124,9 @@
 	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
 		return 0;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = rxs;
+
 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
@@ -2908,6 +3153,8 @@
 	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
 	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
 
+	ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
+
 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
 	skb_pull(msdu, hal_rx_desc_sz);
@@ -2915,7 +3162,7 @@
 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
 		return -EINVAL;
 
-	ath12k_dp_rx_h_ppdu(ar, rx_desc, rxs);
+	ath12k_dp_rx_h_ppdu(ar, &rx_info);
 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
 	ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
@@ -3157,8 +3404,8 @@
 	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
 	spin_unlock_bh(&dp->rx_desc_lock);
 err_unmap_dma:
-	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
-			 DMA_TO_DEVICE);
+	dma_unmap_single_attrs(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
+			       DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 	return ret;
 }
 
@@ -3406,9 +3653,10 @@
 	list_add_tail(&desc_info->list, used_list);
 
 	rxcb = ATH12K_SKB_RXCB(msdu);
-	dma_unmap_single(ar->ab->dev, rxcb->paddr,
+	dmac_inv_range_no_dsb(msdu->data, msdu->data + (msdu->len + skb_tailroom(msdu)));
+	dma_unmap_single_attrs(ar->ab->dev, rxcb->paddr,
 			 msdu->len + skb_tailroom(msdu),
-			 DMA_FROM_DEVICE);
+			       DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
 
 	if (drop) {
 		dev_kfree_skb_any(msdu);
@@ -3421,7 +3669,7 @@
 		goto exit;
 	}
 
-	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+	if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
 		dev_kfree_skb_any(msdu);
 		goto exit;
 	}
@@ -3451,7 +3699,10 @@
 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
 			     int budget)
 {
+	struct ath12k_hw_group *ag = ab->ag;
+	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
+	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
 	struct dp_link_desc_bank *link_desc_banks;
 	enum hal_rx_buf_return_buf_manager rbm;
 	struct hal_rx_msdu_link *link_desc_va;
@@ -3459,11 +3710,11 @@
 	struct hal_reo_dest_ring *reo_desc;
 	struct dp_rxdma_ring *rx_ring;
 	struct dp_srng *reo_except;
-	LIST_HEAD(rx_desc_used_list);
+	struct ath12k_hw_link *hw_links = ag->hw_links;
+	struct ath12k_base *partner_ab;
+	u8 hw_link_id, device_id;
 	u32 desc_bank, num_msdus;
 	struct hal_srng *srng;
-	struct ath12k_dp *dp;
-	int mac_id;
 	struct ath12k *ar;
 	dma_addr_t paddr;
 	bool is_frag;
@@ -3473,9 +3724,10 @@
 	tot_n_bufs_reaped = 0;
 	quota = budget;
 
-	dp = &ab->dp;
-	reo_except = &dp->reo_except_ring;
-	link_desc_banks = dp->link_desc_banks;
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
+	reo_except = &ab->dp.reo_except_ring;
 
 	srng = &ab->hal.srng_list[reo_except->ring_id];
 
@@ -3486,7 +3738,7 @@
 	while (budget &&
 	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
 		drop = false;
-		ab->soc_stats.err_ring_pkts++;
+		ab->device_stats.err_ring_pkts++;
 
 		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
 						    &desc_bank);
@@ -3495,16 +3747,27 @@
 				    ret);
 			continue;
 		}
+
+		hw_link_id = le32_get_bits(reo_desc->info0,
+					   HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
+		device_id = hw_links[hw_link_id].device_id;
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+
+		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+						      hw_links[hw_link_id].pdev_idx);
+		ar = partner_ab->pdevs[pdev_id].ar;
+
+		link_desc_banks = partner_ab->dp.link_desc_banks;
 		link_desc_va = link_desc_banks[desc_bank].vaddr +
 			       (paddr - link_desc_banks[desc_bank].paddr);
 		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
 						 &rbm);
-		if (rbm != dp->idle_link_rbm &&
+		if (rbm != partner_ab->dp.idle_link_rbm &&
 		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
-		    rbm != ab->hw_params->hal_params->rx_buf_rbm) {
-			ab->soc_stats.invalid_rbm++;
+		    rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
+			ab->device_stats.invalid_rbm++;
 			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
-			ath12k_dp_rx_link_desc_return(ab, reo_desc,
+			ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
 			continue;
 		}
@@ -3514,27 +3777,27 @@
 
 		/* Process only rx fragments with one msdu per link desc below, and drop
 		 * msdu's indicated due to error reasons.
+		 * Dynamic fragmentation not supported in Multi-link client, so drop the
+		 * partner device buffers.
 		 */
-		if (!is_frag || num_msdus > 1) {
+		if (!is_frag || num_msdus > 1 ||
+		    partner_ab->device_id != ab->device_id) {
 			drop = true;
+
 			/* Return the link desc back to wbm idle list */
-			ath12k_dp_rx_link_desc_return(ab, reo_desc,
+			ath12k_dp_rx_link_desc_return(partner_ab, reo_desc,
 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
 		}
 
 		for (i = 0; i < num_msdus; i++) {
-			mac_id = le32_get_bits(reo_desc->info0,
-					       HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
-
-			pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
-			ar = ab->pdevs[pdev_id].ar;
-
 			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
-							  &rx_desc_used_list,
+							  &rx_desc_used_list[device_id],
 							  drop,
-							  msdu_cookies[i]))
+							  msdu_cookies[i])) {
+				num_buffs_reaped[device_id]++;
 				tot_n_bufs_reaped++;
 		}
+		}
 
 		if (tot_n_bufs_reaped >= quota) {
 			tot_n_bufs_reaped = quota;
@@ -3549,10 +3812,17 @@
 
 	spin_unlock_bh(&srng->lock);
 
-	rx_ring = &dp->rx_refill_buf_ring;
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+		if (!num_buffs_reaped[device_id])
+			continue;
+
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
 
-	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
-				    tot_n_bufs_reaped);
+		ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
+					    &rx_desc_used_list[device_id],
+					    num_buffs_reaped[device_id]);
+	}
 
 	return tot_n_bufs_reaped;
 }
@@ -3582,7 +3852,7 @@
 }
 
 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
-				      struct ieee80211_rx_status *status,
+				      struct ath12k_dp_rx_info *rx_info,
 				      struct sk_buff_head *msdu_list)
 {
 	struct ath12k_base *ab = ar->ab;
@@ -3635,14 +3905,17 @@
 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
 	}
+
+	ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
+
 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
 		return -EINVAL;
 
-	ath12k_dp_rx_h_ppdu(ar, desc, status);
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
 
-	ath12k_dp_rx_h_mpdu(ar, msdu, desc, status);
+	ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
 
-	rxcb->tid = ath12k_dp_rx_h_tid(ab, desc);
+	rxcb->tid = rx_info->tid;
 
 	/* Please note that caller will having the access to msdu and completing
 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
@@ -3652,17 +3925,17 @@
 }
 
 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
-				   struct ieee80211_rx_status *status,
+				   struct ath12k_dp_rx_info *rx_info,
 				   struct sk_buff_head *msdu_list)
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	bool drop = false;
 
-	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
+	ar->ab->device_stats.reo_error[rxcb->err_code]++;
 
 	switch (rxcb->err_code) {
 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
-		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
+		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
 			drop = true;
 		break;
 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
@@ -3683,7 +3956,7 @@
 }
 
 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
-					struct ieee80211_rx_status *status)
+					struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	u16 msdu_len;
@@ -3697,24 +3970,32 @@
 
 	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
+
+	if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
+		ath12k_warn(ab, "invalid msdu len in tkip mirc err %u\n", msdu_len);
+		ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
+				sizeof(struct hal_rx_desc));
+		return true;
+	}
+
 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
 
 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
 		return true;
 
-	ath12k_dp_rx_h_ppdu(ar, desc, status);
+	ath12k_dp_rx_h_ppdu(ar, rx_info);
 
-	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
+	rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
 			 RX_FLAG_DECRYPTED);
 
 	ath12k_dp_rx_h_undecap(ar, msdu, desc,
-			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
+			       HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
 	return false;
 }
 
 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
-				     struct ieee80211_rx_status *status)
+				     struct ath12k_dp_rx_info *rx_info)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
@@ -3722,14 +4003,15 @@
 	bool drop = false;
 	u32 err_bitmap;
 
-	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
+	ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
 
 	switch (rxcb->err_code) {
 	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
 		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
 		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
-			drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, status);
+			ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
+			drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
 			break;
 		}
 		fallthrough;
@@ -3751,14 +4033,18 @@
 {
 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
 	struct ieee80211_rx_status rxs = {0};
+	struct ath12k_dp_rx_info rx_info;
 	bool drop = true;
 
+	rx_info.filled = 0;
+	rx_info.rx_status = &rxs;
+
 	switch (rxcb->err_rel_src) {
 	case HAL_WBM_REL_SRC_MODULE_REO:
-		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
+		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
 		break;
 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
-		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
+		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
 		break;
 	default:
 		/* msdu will get freed */
@@ -3770,13 +4056,15 @@
 		return;
 	}
 
-	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
+	rx_info.rx_status->flag |= RX_FLAG_SKIP_MONITOR;
+	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
 }
 
 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
 				 struct napi_struct *napi, int budget)
 {
-	LIST_HEAD(rx_desc_used_list);
+	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
+	struct ath12k_hw_group *ag = ab->ag;
 	struct ath12k *ar;
 	struct ath12k_dp *dp = &ab->dp;
 	struct dp_rxdma_ring *rx_ring;
@@ -3786,17 +4074,22 @@
 	struct sk_buff_head msdu_list, scatter_msdu_list;
 	struct ath12k_skb_rxcb *rxcb;
 	void *rx_desc;
-	u8 mac_id;
-	int num_buffs_reaped = 0;
+	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
+	int total_num_buffs_reaped = 0;
 	struct ath12k_rx_desc_info *desc_info;
+	struct ath12k_hw_link *hw_links = ag->hw_links;
+	struct ath12k_base *partner_ab;
+	u8 hw_link_id, device_id;
 	int ret, pdev_id;
 	struct hal_rx_desc *msdu_data;
 
 	__skb_queue_head_init(&msdu_list);
 	__skb_queue_head_init(&scatter_msdu_list);
 
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
+		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
+
 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
-	rx_ring = &dp->rx_refill_buf_ring;
 	spin_lock_bh(&srng->lock);
 
 	ath12k_hal_srng_access_begin(ab, srng);
@@ -3832,14 +4125,27 @@
 		msdu = desc_info->skb;
 		desc_info->skb = NULL;
 
-		list_add_tail(&desc_info->list, &rx_desc_used_list);
+		device_id = desc_info->device_id;
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		if (unlikely(!partner_ab)) {
+			dev_kfree_skb_any(msdu);
+
+			/* In any case continuation bit is set
+			 * in the previous record, cleanup scatter_msdu_list
+			 */
+			ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
+			continue;
+		}
+
+		list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
 
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		dma_unmap_single(ab->dev, rxcb->paddr,
+		dma_unmap_single(partner_ab->dev, rxcb->paddr,
 				 msdu->len + skb_tailroom(msdu),
 				 DMA_FROM_DEVICE);
 
-		num_buffs_reaped++;
+		num_buffs_reaped[device_id]++;
+		total_num_buffs_reaped++;
 
 		if (!err_info.continuation)
 			budget--;
@@ -3863,9 +4169,9 @@
 			continue;
 		}
 
-		mac_id = ath12k_dp_rx_get_msdu_src_link(ab,
+		hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
 							msdu_data);
-		if (mac_id >= MAX_RADIOS) {
+		if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
 			dev_kfree_skb_any(msdu);
 
 			/* In any case continuation bit is set
@@ -3880,7 +4186,7 @@
 
 			skb_queue_walk(&scatter_msdu_list, msdu) {
 				rxcb = ATH12K_SKB_RXCB(msdu);
-				rxcb->mac_id = mac_id;
+				rxcb->hw_link_id = hw_link_id;
 			}
 
 			skb_queue_splice_tail_init(&scatter_msdu_list,
@@ -3888,7 +4194,7 @@
 		}
 
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		rxcb->mac_id = mac_id;
+		rxcb->hw_link_id = hw_link_id;
 		__skb_queue_tail(&msdu_list, msdu);
 	}
 
@@ -3901,26 +4207,46 @@
 
 	spin_unlock_bh(&srng->lock);
 
-	if (!num_buffs_reaped)
+	if (!total_num_buffs_reaped)
 		goto done;
 
-	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &rx_desc_used_list,
-				    num_buffs_reaped);
+	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
+		if (!num_buffs_reaped[device_id])
+			continue;
+
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
+
+		ath12k_dp_rx_bufs_replenish(ab, rx_ring,
+					    &rx_desc_used_list[device_id],
+					    num_buffs_reaped[device_id]);
+	}
 
 	rcu_read_lock();
 	while ((msdu = __skb_dequeue(&msdu_list))) {
 		rxcb = ATH12K_SKB_RXCB(msdu);
-		mac_id = rxcb->mac_id;
+		hw_link_id = rxcb->hw_link_id;
 
-		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
-		ar = ab->pdevs[pdev_id].ar;
+		device_id = hw_links[hw_link_id].device_id;
+		partner_ab = ath12k_ag_to_ab(ag, device_id);
+		if (unlikely(!partner_ab)) {
+			ath12k_dbg(ab, ATH12K_DBG_DATA,
+				   "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
+				   hw_link_id, device_id);
+			dev_kfree_skb_any(msdu);
+			continue;
+		}
+
+		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
+						      hw_links[hw_link_id].pdev_idx);
+		ar = partner_ab->pdevs[pdev_id].ar;
 
-		if (!ar || !rcu_dereference(ar->ab->pdevs_active[mac_id])) {
+		if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
 
-		if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
+		if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
 			dev_kfree_skb_any(msdu);
 			continue;
 		}
@@ -3928,7 +4254,7 @@
 	}
 	rcu_read_unlock();
 done:
-	return num_buffs_reaped;
+	return total_num_buffs_reaped;
 }
 
 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
@@ -3936,8 +4262,6 @@
 	struct ath12k_dp *dp = &ab->dp;
 	struct hal_tlv_64_hdr *hdr;
 	struct hal_srng *srng;
-	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
-	bool found = false;
 	u16 tag;
 	struct hal_reo_status reo_status;
 
@@ -3950,7 +4274,7 @@
 	ath12k_hal_srng_access_begin(ab, srng);
 
 	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
-		tag = u64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
+		tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
 
 		switch (tag) {
 		case HAL_REO_GET_QUEUE_STATS_STATUS:
@@ -3986,23 +4310,9 @@
 			continue;
 		}
 
-		spin_lock_bh(&dp->reo_cmd_lock);
-		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
-			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
-				found = true;
-				list_del(&cmd->list);
-				break;
-			}
-		}
-		spin_unlock_bh(&dp->reo_cmd_lock);
-
-		if (found) {
-			cmd->handler(dp, (void *)&cmd->data,
+		ath12k_dp_reo_cmd_handle_done(ab,
+					      reo_status.uniform_hdr.cmd_num,
 				     reo_status.uniform_hdr.cmd_status);
-			kfree(cmd);
-		}
-
-		found = false;
 	}
 
 	ath12k_hal_srng_access_end(ab, srng);
@@ -4307,6 +4617,7 @@
 
 	pmon->mon_last_linkdesc_paddr = 0;
 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
+	INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
 	spin_lock_init(&pmon->mon_lock);
 
 	return 0;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_rx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_rx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_rx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_rx.h	2025-09-29 14:23:07.605732410 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef ATH12K_DP_RX_H
 #define ATH12K_DP_RX_H
@@ -33,18 +33,28 @@
 	struct ath12k_base *ab;
 };
 
-struct ath12k_dp_rx_reo_cache_flush_elem {
-	struct list_head list;
-	struct ath12k_dp_rx_tid data;
-	unsigned long ts;
-};
+struct ath12k_dp_rx_reo_cmd_ctx {
+	enum hal_reo_cmd_type type;
+	struct ath12k_hal_reo_cmd cmd;
 
-struct ath12k_dp_rx_reo_cmd {
-	struct list_head list;
-	struct ath12k_dp_rx_tid data;
 	int cmd_num;
-	void (*handler)(struct ath12k_dp *dp, void *ctx,
-			enum hal_reo_cmd_status status);
+	enum hal_reo_cmd_status status;
+	unsigned long deadline;
+
+	struct completion complete;
+	struct list_head next;
+};
+
+struct ath12k_tid_del_ctx {
+	u8 tid;
+	u32 *vaddr;
+	dma_addr_t paddr;
+	u32 size;
+	bool active;
+};
+
+struct ath12k_dp_rx_tid_delete_ctx {
+	struct ath12k_tid_del_ctx del_tid[IEEE80211_NUM_TIDS + 1];
 };
 
 #define ATH12K_DP_RX_REO_DESC_FREE_THRES  64
@@ -65,6 +75,26 @@
 	__be16 snap_type;
 } __packed;
 
+#define ATH12K_RX_INFO_ADDR2 BIT(0)
+
+struct ath12k_dp_rx_info {
+	bool ip_csum_fail;
+	bool l4_csum_fail;
+	bool is_mcbc;
+	u8 decap_type;
+	u8 pkt_type;
+	u8 sgi;
+	u8 rate_mcs;
+	u8 bw;
+	u8 nss;
+	u8 addr2[ETH_ALEN];
+	u8 tid;
+	u16 peer_id;
+	u32 phy_meta_data;
+	u32 filled;
+	struct ieee80211_rx_status *rx_status;
+};
+
 static inline u32 ath12k_he_gi_to_nl80211_he_gi(u8 sgi)
 {
 	u32 ret = 0;
@@ -79,22 +109,35 @@
 	case RX_MSDU_START_SGI_3_2_US:
 		ret = NL80211_RATE_INFO_HE_GI_3_2;
 		break;
+	default:
+		ret = NL80211_RATE_INFO_HE_GI_0_8;
+		break;
 	}
 
 	return ret;
 }
 
+void ath12k_dp_reo_cmd_work_func(struct work_struct *work);
+
 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
-			     struct ieee80211_ampdu_params *params);
+			     struct ieee80211_ampdu_params *params,
+			     u8 link_id);
 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
-			    struct ieee80211_ampdu_params *params);
+			    struct ieee80211_ampdu_params *params,
+			    u8 link_id);
 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
 				       const u8 *peer_addr,
 				       enum set_key_cmd key_cmd,
 				       struct ieee80211_key_conf *key);
 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer);
-void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
-				  struct ath12k_peer *peer, u8 tid);
+void
+ath12k_dp_rx_peer_tid_delete_prepare(struct ath12k *ar,
+				     struct ath12k_peer *peer, u8 tid,
+				     struct ath12k_dp_rx_tid_delete_ctx *dctx);
+void
+ath12k_dp_rx_peer_tid_delete_finalize(struct ath12k_base *ab,
+				      struct ath12k_dp_rx_tid_delete_ctx *dctx);
+
 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
 				u8 tid, u32 ba_win_sz, u16 ssn,
 				enum hal_pn_type pn_type);
@@ -126,16 +169,13 @@
 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
 			struct hal_rx_desc *desc);
 struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
+ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
+			 struct ath12k_dp_rx_info *rx_info);
 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
 			     struct hal_rx_desc *desc);
 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
 			    struct hal_rx_desc *desc);
-void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc,
-			 struct ieee80211_rx_status *rx_status);
-struct ath12k_peer *
-ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu);
-
+void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info);
 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab);
 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab);
 
@@ -143,4 +183,8 @@
 			   int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
 				       const void *ptr, void *data),
 			   void *data);
+void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab,  struct hal_rx_desc *rx_desc,
+			       struct ath12k_dp_rx_info *rx_info);
+int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype);
+
 #endif /* ATH12K_DP_RX_H */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_tx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_tx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_tx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_tx.c	2025-09-25 17:40:34.147360185 +0200
@@ -1,13 +1,16 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "core.h"
 #include "dp_tx.h"
 #include "debug.h"
+#include "debugfs.h"
 #include "hw.h"
+#include "peer.h"
+#include "mac.h"
 
 static enum hal_tcl_encap_type
 ath12k_dp_tx_get_encap_type(struct ath12k_link_vif *arvif, struct sk_buff *skb)
@@ -81,6 +84,7 @@
 				       u8 pool_id)
 {
 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
+	tx_desc->skb_ext_desc = NULL;
 	list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
 }
@@ -117,7 +121,7 @@
 			       le32_encode_bits(ti->data_len,
 						HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
 
-	tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
+	tcl_ext_cmd->info1 |= le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
 				le32_encode_bits(ti->encap_type,
 						 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
 				le32_encode_bits(ti->encrypt_type,
@@ -217,7 +221,8 @@
 }
 
 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
-		 struct sk_buff *skb)
+		 struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+		 bool is_mcast)
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_dp *dp = &ab->dp;
@@ -227,7 +232,7 @@
 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
 	struct hal_tcl_data_cmd *hal_tcl_desc;
 	struct hal_tx_msdu_ext_desc *msg;
-	struct sk_buff *skb_ext_desc;
+	struct sk_buff *skb_ext_desc = NULL;
 	struct hal_srng *tcl_ring;
 	struct ieee80211_hdr *hdr = (void *)skb->data;
 	struct ath12k_vif *ahvif = arvif->ahvif;
@@ -240,6 +245,8 @@
 	bool msdu_ext_desc = false;
 	bool add_htt_metadata = false;
 	u32 iova_mask = ab->hw_params->iova_mask;
+	bool is_diff_encap = false, is_null = false;
+	int flush_len;
 
 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
 		return -ESHUTDOWN;
@@ -290,13 +297,27 @@
 		msdu_ext_desc = true;
 	}
 
+	if (gsn_valid) {
+		/* Reset and Initialize meta_data_flags with Global Sequence
+		 * Number (GSN) info.
+		 */
+		ti.meta_data_flags =
+			u32_encode_bits(HTT_TCL_META_DATA_TYPE_GLOBAL_SEQ_NUM,
+					HTT_TCL_META_DATA_TYPE) |
+			u32_encode_bits(mcbc_gsn, HTT_TCL_META_DATA_GLOBAL_SEQ_NUM);
+	}
+
 	ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
 	ti.addr_search_flags = arvif->hal_addr_search_flags;
 	ti.search_type = arvif->search_type;
 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
 	ti.pkt_offset = 0;
 	ti.lmac_id = ar->lmac_id;
+
 	ti.vdev_id = arvif->vdev_id;
+	if (gsn_valid)
+		ti.vdev_id += HTT_TX_MLO_MCAST_HOST_REINJECT_BASE_VDEV_ID;
+
 	ti.bss_ast_hash = arvif->ast_hash;
 	ti.bss_ast_idx = arvif->ast_idx;
 	ti.dscp_tid_tbl_idx = 0;
@@ -313,9 +334,13 @@
 	ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
 
 	ti.tid = ath12k_dp_tx_get_tid(skb);
-
 	switch (ti.encap_type) {
 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
+		is_null = ieee80211_is_nullfunc(hdr->frame_control);
+		if ((ahvif->vif->offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED) &&
+		    (skb->protocol == cpu_to_be16(ETH_P_PAE) || is_null))
+			is_diff_encap = true;
+		else
 		ath12k_dp_tx_encap_nwifi(skb);
 		break;
 	case HAL_TCL_ENCAP_TYPE_RAW:
@@ -331,10 +356,19 @@
 	default:
 		/* TODO: Take care of other encap modes as well */
 		ret = -EINVAL;
-		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+		atomic_inc(&ab->device_stats.tx_err.misc_fail);
 		goto fail_remove_tx_buf;
 	}
 
+	if (unlikely(ahvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_ETHERNET &&
+		     !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP))) {
+		msdu_ext_desc = true;
+		if (skb->protocol == cpu_to_be16(ETH_P_PAE)) {
+			ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
+			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
+		}
+	}
+
 	if (iova_mask &&
 	    (unsigned long)skb->data & iova_mask) {
 		ret = ath12k_dp_tx_align_payload(ab, &skb);
@@ -352,22 +386,36 @@
 		hdr = (void *)skb->data;
 	}
 map:
-	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
+#if defined(CONFIG_IP_FFN)
+        flush_len = skb_ffn_get_dirty_len(skb);
+	/* mark it so page pool recycler will remember this */
+        if (skb->ffn_ff_done)
+                skb->ffn_ff_done |= BIT(1);
+#else
+        flush_len = skb->len;
+#endif
+
+	dmac_clean_range_no_dsb(skb->data, skb->data + flush_len);
+	ti.paddr = dma_map_single_attrs(ab->dev, skb->data, flush_len, DMA_TO_DEVICE,
+		DMA_ATTR_SKIP_CPU_SYNC);
 	if (dma_mapping_error(ab->dev, ti.paddr)) {
-		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
+		atomic_inc(&ab->device_stats.tx_err.misc_fail);
 		ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
 		ret = -ENOMEM;
 		goto fail_remove_tx_buf;
 	}
 
-	if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+	if ((!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
 	    !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
 	    !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
-	    ieee80211_has_protected(hdr->frame_control)) {
+	     ieee80211_has_protected(hdr->frame_control)) ||
+	     is_diff_encap) {
 		/* Add metadata for sw encrypted vlan group traffic */
 		add_htt_metadata = true;
 		msdu_ext_desc = true;
+		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
 		ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW);
+		ti.meta_data_flags |= HTT_TCL_META_DATA_VALID_HTT;
 		ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW;
 		ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
 	}
@@ -379,6 +427,7 @@
 	skb_cb->paddr = ti.paddr;
 	skb_cb->vif = ahvif->vif;
 	skb_cb->ar = ar;
+	skb_cb->paddr_ext_desc = 0;
 
 	if (msdu_ext_desc) {
 		skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
@@ -398,22 +447,25 @@
 			if (ret < 0) {
 				ath12k_dbg(ab, ATH12K_DBG_DP_TX,
 					   "Failed to add HTT meta data, dropping packet\n");
-				goto fail_unmap_dma;
+				goto fail_free_ext_skb;
 			}
 		}
 
-		ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
-					  skb_ext_desc->len, DMA_TO_DEVICE);
+		dmac_clean_range_no_dsb(skb_ext_desc->data,
+					skb_ext_desc->data + skb_ext_desc->len);
+		ti.paddr = dma_map_single_attrs(ab->dev, skb_ext_desc->data,
+						skb_ext_desc->len,
+						DMA_TO_DEVICE,
+						DMA_ATTR_SKIP_CPU_SYNC);
 		ret = dma_mapping_error(ab->dev, ti.paddr);
-		if (ret) {
-			kfree_skb(skb_ext_desc);
-			goto fail_unmap_dma;
-		}
+		if (ret)
+			goto fail_free_ext_skb;
 
 		ti.data_len = skb_ext_desc->len;
 		ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
 
 		skb_cb->paddr_ext_desc = ti.paddr;
+		tx_desc->skb_ext_desc = skb_ext_desc;
 	}
 
 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
@@ -429,7 +481,7 @@
 		 * desc because the desc is directly enqueued onto hw queue.
 		 */
 		ath12k_hal_srng_access_end(ab, tcl_ring);
-		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
+		ab->device_stats.tx_err.desc_na[ti.ring_id]++;
 		spin_unlock_bh(&tcl_ring->lock);
 		ret = -ENOMEM;
 
@@ -444,11 +496,22 @@
 			ring_selector++;
 		}
 
-		goto fail_unmap_dma;
+		goto fail_unmap_dma_ext;
 	}
 
-	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+	spin_lock_bh(&arvif->link_stats_lock);
+	arvif->link_stats.tx_encap_type[ti.encap_type]++;
+	arvif->link_stats.tx_encrypt_type[ti.encrypt_type]++;
+	arvif->link_stats.tx_desc_type[ti.type]++;
 
+	if (is_mcast)
+		arvif->link_stats.tx_bcast_mcast++;
+	else
+		arvif->link_stats.tx_enqueued++;
+	spin_unlock_bh(&arvif->link_stats_lock);
+
+	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
+	dsb(st);
 	ath12k_hal_srng_access_end(ab, tcl_ring);
 
 	spin_unlock_bh(&tcl_ring->lock);
@@ -460,16 +523,25 @@
 
 	return 0;
 
-fail_unmap_dma:
-	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
-
+fail_unmap_dma_ext:
 	if (skb_cb->paddr_ext_desc)
 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc),
+				 skb_ext_desc->len,
 				 DMA_TO_DEVICE);
+fail_free_ext_skb:
+	if (skb_ext_desc)
+		kfree_skb(skb_ext_desc);
+
+fail_unmap_dma:
+	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
 
 fail_remove_tx_buf:
 	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
+
+	spin_lock_bh(&arvif->link_stats_lock);
+	arvif->link_stats.tx_dropped++;
+	spin_unlock_bh(&arvif->link_stats_lock);
+
 	if (tcl_ring_retry)
 		goto tcl_ring_sel;
 
@@ -478,7 +550,8 @@
 
 static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
 				    struct sk_buff *msdu, u8 mac_id,
-				    struct dp_tx_ring *tx_ring)
+				    struct dp_tx_ring *tx_ring,
+				    struct sk_buff *skb_ext_desc)
 {
 	struct ath12k *ar;
 	struct ath12k_skb_cb *skb_cb;
@@ -487,10 +560,14 @@
 	skb_cb = ATH12K_SKB_CB(msdu);
 	ar = ab->pdevs[pdev_id].ar;
 
-	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
-		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+	dma_unmap_single_attrs(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE,
+			       DMA_ATTR_SKIP_CPU_SYNC);
+	if (skb_cb->paddr_ext_desc) {
+		dma_unmap_single_attrs(ab->dev, skb_cb->paddr_ext_desc,
+				 skb_ext_desc->len, DMA_TO_DEVICE,
+				 DMA_ATTR_SKIP_CPU_SYNC);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
 
 	ieee80211_free_txskb(ar->ah->hw, msdu);
 
@@ -502,10 +579,17 @@
 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
 				 struct sk_buff *msdu,
 				 struct dp_tx_ring *tx_ring,
-				 struct ath12k_dp_htt_wbm_tx_status *ts)
+				 struct ath12k_dp_htt_wbm_tx_status *ts,
+				 struct sk_buff *skb_ext_desc,
+				 u16 peer_id)
 {
+	struct ieee80211_tx_status status = { 0 };
 	struct ieee80211_tx_info *info;
+	struct ath12k_link_vif *arvif;
 	struct ath12k_skb_cb *skb_cb;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_peer *peer;
 	struct ath12k *ar;
 
 	skb_cb = ATH12K_SKB_CB(msdu);
@@ -517,9 +601,24 @@
 		wake_up(&ar->dp.tx_empty_waitq);
 
 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
+	if (skb_cb->paddr_ext_desc) {
 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+				 skb_ext_desc->len, DMA_TO_DEVICE);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
+
+	vif = skb_cb->vif;
+	if (vif) {
+		ahvif = ath12k_vif_to_ahvif(vif);
+		rcu_read_lock();
+		arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+		if (arvif) {
+			spin_lock_bh(&arvif->link_stats_lock);
+			arvif->link_stats.tx_completed++;
+			spin_unlock_bh(&arvif->link_stats_lock);
+		}
+		rcu_read_unlock();
+	}
 
 	memset(&info->status, 0, sizeof(info->status));
 
@@ -538,18 +637,31 @@
 		}
 	}
 
-	ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_id(ab, peer_id);
+	if (!peer || !peer->sta)
+		ath12k_dbg(ab, ATH12K_DBG_DATA,
+			   "dp_tx: failed to find the peer with peer_id %d\n", peer_id);
+	else
+		status.sta = peer->sta;
+	spin_unlock_bh(&ab->base_lock);
+
+	status.info = info;
+	status.skb = msdu;
+	ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
 }
 
 static void
 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
 				     void *desc, u8 mac_id,
 				     struct sk_buff *msdu,
-				     struct dp_tx_ring *tx_ring)
+				     struct dp_tx_ring *tx_ring,
+				     struct sk_buff *skb_ext_desc)
 {
 	struct htt_tx_wbm_completion *status_desc;
 	struct ath12k_dp_htt_wbm_tx_status ts = {0};
 	enum hal_wbm_htt_tx_comp_status wbm_status;
+	u16 peer_id;
 
 	status_desc = desc;
 
@@ -558,16 +670,21 @@
 
 	switch (wbm_status) {
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
-	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
 		ts.ack_rssi = le32_get_bits(status_desc->info2,
 					    HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
-		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
+		peer_id = le32_get_bits(((struct hal_wbm_completion_ring_tx *)desc)->
+				info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts, skb_ext_desc,
+						 peer_id);
 		break;
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
-		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
+	case HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH:
+		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring, skb_ext_desc);
 		break;
 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
 		/* This event is to be handled only when the driver decides to
@@ -575,19 +692,143 @@
 		 */
 		break;
 	default:
-		ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
+		ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
+		break;
+	}
+}
+
+static void ath12k_dp_tx_update_txcompl(struct ath12k *ar, struct hal_tx_status *ts)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_peer *peer;
+	struct ieee80211_sta *sta;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	struct rate_info txrate = {0};
+	u16 rate, ru_tones;
+	u8 rate_idx = 0;
+	int ret;
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+	if (!peer || !peer->sta) {
+		ath12k_dbg(ab, ATH12K_DBG_DP_TX,
+			   "failed to find the peer by id %u\n", ts->peer_id);
+		spin_unlock_bh(&ab->base_lock);
+		return;
+	}
+	sta = peer->sta;
+	ahsta = ath12k_sta_to_ahsta(sta);
+	arsta = &ahsta->deflink;
+
+	/* This is to prefer choose the real NSS value arsta->last_txrate.nss,
+	 * if it is invalid, then choose the NSS value while assoc.
+	 */
+	if (arsta->last_txrate.nss)
+		txrate.nss = arsta->last_txrate.nss;
+	else
+		txrate.nss = arsta->peer_nss;
+	spin_unlock_bh(&ab->base_lock);
+
+	switch (ts->pkt_type) {
+	case HAL_TX_RATE_STATS_PKT_TYPE_11A:
+	case HAL_TX_RATE_STATS_PKT_TYPE_11B:
+		ret = ath12k_mac_hw_ratecode_to_legacy_rate(ts->mcs,
+							    ts->pkt_type,
+							    &rate_idx,
+							    &rate);
+		if (ret < 0) {
+			ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
+			return;
+		}
+
+		txrate.legacy = rate;
+		break;
+	case HAL_TX_RATE_STATS_PKT_TYPE_11N:
+		if (ts->mcs > ATH12K_HT_MCS_MAX) {
+			ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
+			return;
+		}
+
+		if (txrate.nss != 0)
+			txrate.mcs = ts->mcs + 8 * (txrate.nss - 1);
+
+		txrate.flags = RATE_INFO_FLAGS_MCS;
+
+		if (ts->sgi)
+			txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
 		break;
+	case HAL_TX_RATE_STATS_PKT_TYPE_11AC:
+		if (ts->mcs > ATH12K_VHT_MCS_MAX) {
+			ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
+			return;
 	}
+
+		txrate.mcs = ts->mcs;
+		txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
+
+		if (ts->sgi)
+			txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
+		break;
+	case HAL_TX_RATE_STATS_PKT_TYPE_11AX:
+		if (ts->mcs > ATH12K_HE_MCS_MAX) {
+			ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
+			return;
+		}
+
+		txrate.mcs = ts->mcs;
+		txrate.flags = RATE_INFO_FLAGS_HE_MCS;
+		txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(ts->sgi);
+		break;
+	case HAL_TX_RATE_STATS_PKT_TYPE_11BE:
+		if (ts->mcs > ATH12K_EHT_MCS_MAX) {
+			ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
+			return;
+		}
+
+		txrate.mcs = ts->mcs;
+		txrate.flags = RATE_INFO_FLAGS_EHT_MCS;
+		txrate.eht_gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(ts->sgi);
+		break;
+	default:
+		ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
+		return;
+	}
+
+	txrate.bw = ath12k_mac_bw_to_mac80211_bw(ts->bw);
+
+	if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
+		txrate.bw = RATE_INFO_BW_HE_RU;
+		ru_tones = ath12k_mac_he_convert_tones_to_ru_tones(ts->tones);
+		txrate.he_ru_alloc =
+			ath12k_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
+	}
+
+	if (ts->ofdma && ts->pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11BE) {
+		txrate.bw = RATE_INFO_BW_EHT_RU;
+		txrate.eht_ru_alloc =
+			ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(ts->tones);
+	}
+
+	spin_lock_bh(&ab->base_lock);
+	arsta->txrate = txrate;
+	spin_unlock_bh(&ab->base_lock);
 }
 
 static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
 				       struct sk_buff *msdu,
-				       struct hal_tx_status *ts)
+				       struct hal_tx_status *ts,
+				       struct sk_buff *skb_ext_desc)
 {
+	struct ieee80211_tx_status status = { 0 };
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_hw *ah = ar->ah;
 	struct ieee80211_tx_info *info;
+	struct ath12k_link_vif *arvif;
 	struct ath12k_skb_cb *skb_cb;
+	struct ieee80211_vif *vif;
+	struct ath12k_vif *ahvif;
+	struct ath12k_peer *peer;
 
 	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
 		/* Must not happen */
@@ -597,9 +838,11 @@
 	skb_cb = ATH12K_SKB_CB(msdu);
 
 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
-	if (skb_cb->paddr_ext_desc)
+	if (skb_cb->paddr_ext_desc) {
 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
-				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
+				 skb_ext_desc->len, DMA_TO_DEVICE);
+		dev_kfree_skb_any(skb_ext_desc);
+	}
 
 	rcu_read_lock();
 
@@ -613,6 +856,17 @@
 		goto exit;
 	}
 
+	vif = skb_cb->vif;
+	if (vif) {
+		ahvif = ath12k_vif_to_ahvif(vif);
+		arvif = rcu_dereference(ahvif->link[skb_cb->link_id]);
+		if (arvif) {
+			spin_lock_bh(&arvif->link_stats_lock);
+			arvif->link_stats.tx_completed++;
+			spin_unlock_bh(&arvif->link_stats_lock);
+		}
+	}
+
 	info = IEEE80211_SKB_CB(msdu);
 	memset(&info->status, 0, sizeof(info->status));
 
@@ -658,7 +912,24 @@
 	 * Might end up reporting it out-of-band from HTT stats.
 	 */
 
-	ieee80211_tx_status_skb(ath12k_ar_to_hw(ar), msdu);
+	ath12k_dp_tx_update_txcompl(ar, ts);
+
+	spin_lock_bh(&ab->base_lock);
+	peer = ath12k_peer_find_by_id(ab, ts->peer_id);
+	if (!peer || !peer->sta) {
+		ath12k_err(ab,
+			   "dp_tx: failed to find the peer with peer_id %d\n",
+			   ts->peer_id);
+		spin_unlock_bh(&ab->base_lock);
+		ieee80211_free_txskb(ath12k_ar_to_hw(ar), msdu);
+		goto exit;
+	}
+	spin_unlock_bh(&ab->base_lock);
+
+	status.sta = peer->sta;
+	status.info = info;
+	status.skb = msdu;
+	ieee80211_tx_status_ext(ath12k_ar_to_hw(ar), &status);
 
 exit:
 	rcu_read_unlock();
@@ -668,6 +939,8 @@
 				      struct hal_wbm_completion_ring_tx *desc,
 				      struct hal_tx_status *ts)
 {
+	u32 info0 = le32_to_cpu(desc->rate_stats.info0);
+
 	ts->buf_rel_source =
 		le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
@@ -682,10 +955,19 @@
 
 	ts->ppdu_id = le32_get_bits(desc->info1,
 				    HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
-	if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
-		ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
-	else
-		ts->rate_stats = 0;
+	ts->ack_rssi = le32_get_bits(desc->info2,
+				     HAL_WBM_COMPL_TX_INFO2_ACK_FRAME_RSSI);
+
+	ts->peer_id = le32_get_bits(desc->info3, HAL_WBM_COMPL_TX_INFO3_PEER_ID);
+
+	if (info0 & HAL_TX_RATE_STATS_INFO0_VALID) {
+		ts->pkt_type = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_PKT_TYPE);
+		ts->mcs = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_MCS);
+		ts->sgi = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_SGI);
+		ts->bw = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_BW);
+		ts->tones = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_TONES_IN_RU);
+		ts->ofdma = u32_get_bits(info0, HAL_TX_RATE_STATS_INFO0_OFDMA_TX);
+	}
 }
 
 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
@@ -695,7 +977,7 @@
 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
 	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
 	struct ath12k_tx_desc_info *tx_desc = NULL;
-	struct sk_buff *msdu;
+	struct sk_buff *msdu, *skb_ext_desc;
 	struct hal_tx_status ts = { 0 };
 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
 	struct hal_wbm_release_ring *desc;
@@ -755,6 +1037,7 @@
 
 		msdu = tx_desc->skb;
 		mac_id = tx_desc->mac_id;
+		skb_ext_desc = tx_desc->skb_ext_desc;
 
 		/* Release descriptor as soon as extracting necessary info
 		 * to reduce contention
@@ -764,7 +1047,7 @@
 			ath12k_dp_tx_process_htt_tx_complete(ab,
 							     (void *)tx_status,
 							     mac_id, msdu,
-							     tx_ring);
+							     tx_ring, skb_ext_desc);
 			continue;
 		}
 
@@ -774,7 +1057,7 @@
 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
 			wake_up(&ar->dp.tx_empty_waitq);
 
-		ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
+		ath12k_dp_tx_complete_msdu(ar, msdu, &ts, skb_ext_desc);
 	}
 }
 
@@ -814,7 +1097,7 @@
 		*htt_ring_type = HTT_HW_TO_SW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_BUF:
-		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
+		*htt_ring_id = HTT_RX_MON_HOST2MON_BUF_RING;
 		*htt_ring_type = HTT_SW_TO_HW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_STATUS:
@@ -822,7 +1105,7 @@
 		*htt_ring_type = HTT_SW_TO_HW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_DST:
-		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
+		*htt_ring_id = HTT_RX_MON_MON2HOST_DEST_RING;
 		*htt_ring_type = HTT_HW_TO_SW_RING;
 		break;
 	case HAL_RXDMA_MONITOR_DESC:
@@ -971,7 +1254,16 @@
 	skb_put(skb, len);
 	cmd = (struct htt_ver_req_cmd *)skb->data;
 	cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
-					     HTT_VER_REQ_INFO_MSG_ID);
+					     HTT_OPTION_TAG);
+
+	if (!ath12k_ftm_mode) {
+		cmd->tcl_metadata_version = le32_encode_bits(HTT_TAG_TCL_METADATA_VERSION,
+							     HTT_OPTION_TAG) |
+					    le32_encode_bits(HTT_TCL_METADATA_VER_SZ,
+							     HTT_OPTION_LEN) |
+					    le32_encode_bits(HTT_OPTION_TCL_METADATA_VER_V2,
+						     	     HTT_OPTION_VALUE);
+	}
 
 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
 	if (ret) {
@@ -1077,15 +1369,46 @@
 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
 	cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
-				       HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
+				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_OFFSET_VALID);
+	cmd->info0 |=
+		le32_encode_bits(tlv_filter->drop_threshold_valid,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_DROP_THRES_VAL);
+	cmd->info0 |= le32_encode_bits(!tlv_filter->rxmon_disable,
+				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_EN_RXMON);
+
 	cmd->info1 = le32_encode_bits(rx_buf_size,
 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
+	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_mgmt,
+				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
+	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_ctrl,
+				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
+	cmd->info1 |= le32_encode_bits(tlv_filter->conf_len_data,
+				       HTT_RX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
 	cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
 	cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
 	cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
 	cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
 	cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
 
+	cmd->info2 = le32_encode_bits(tlv_filter->rx_drop_threshold,
+				      HTT_RX_RING_SELECTION_CFG_CMD_INFO2_DROP_THRESHOLD);
+	cmd->info2 |=
+		le32_encode_bits(tlv_filter->enable_log_mgmt_type,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_MGMT_TYPE);
+	cmd->info2 |=
+		le32_encode_bits(tlv_filter->enable_log_ctrl_type,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_CTRL_TYPE);
+	cmd->info2 |=
+		le32_encode_bits(tlv_filter->enable_log_data_type,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO2_EN_LOG_DATA_TYPE);
+
+	cmd->info3 =
+		le32_encode_bits(tlv_filter->enable_rx_tlv_offset,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_EN_TLV_PKT_OFFSET);
+	cmd->info3 |=
+		le32_encode_bits(tlv_filter->rx_tlv_offset,
+				 HTT_RX_RING_SELECTION_CFG_CMD_INFO3_PKT_TLV_OFFSET);
+
 	if (tlv_filter->offset_valid) {
 		cmd->rx_packet_offset =
 			le32_encode_bits(tlv_filter->rx_packet_offset,
@@ -1210,15 +1533,28 @@
 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
 {
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_dp *dp = &ab->dp;
 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
-	int ret, ring_id;
+	int ret, ring_id, i;
 
-	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
 	tlv_filter.offset_valid = false;
 
 	if (!reset) {
-		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
+		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_DEST_RING;
+
+		tlv_filter.drop_threshold_valid = true;
+		tlv_filter.rx_drop_threshold = HTT_RX_RING_TLV_DROP_THRESHOLD_VALUE;
+
+		tlv_filter.enable_log_mgmt_type = true;
+		tlv_filter.enable_log_ctrl_type = true;
+		tlv_filter.enable_log_data_type = true;
+
+		tlv_filter.conf_len_ctrl = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+		tlv_filter.conf_len_mgmt = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+		tlv_filter.conf_len_data = HTT_RX_RING_DEFAULT_DMA_LENGTH;
+
+		tlv_filter.enable_rx_tlv_offset = true;
+		tlv_filter.rx_tlv_offset = HTT_RX_RING_PKT_TLV_OFFSET;
+
 		tlv_filter.pkt_filter_flags0 =
 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
@@ -1233,19 +1569,29 @@
 					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
 					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
 					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
+	} else {
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+
+		if (ath12k_debugfs_is_extd_rx_stats_enabled(ar))
+			tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
 	}
 
 	if (ab->hw_params->rxdma1_enable) {
-		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
-						       HAL_RXDMA_MONITOR_BUF,
+		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+			ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+			ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
+							       ar->dp.mac_id + i,
+							       HAL_RXDMA_MONITOR_DST,
 						       DP_RXDMA_REFILL_RING_SIZE,
 						       &tlv_filter);
 		if (ret) {
 			ath12k_err(ab,
-				   "failed to setup filter for monitor buf %d\n", ret);
+					   "failed to setup filter for monitor buf %d\n",
+					   ret);
 			return ret;
 		}
 	}
+	}
 
 	return 0;
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_tx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_tx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/dp_tx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/dp_tx.h	2025-07-01 14:10:42.732046482 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_DP_TX_H
@@ -17,7 +17,8 @@
 
 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab);
 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_link_vif *arvif,
-		 struct sk_buff *skb);
+		 struct sk_buff *skb, bool gsn_valid, int mcbc_gsn,
+		 bool is_mcast);
 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id);
 
 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/fw.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/fw.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/fw.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/fw.h	2025-09-25 17:40:34.147360185 +0200
@@ -23,6 +23,9 @@
 	 */
 	ATH12K_FW_FEATURE_MULTI_QRTR_ID = 0,
 
+	/* The firmware supports MLO capability */
+	ATH12K_FW_FEATURE_MLO,
+
 	/* keep last */
 	ATH12K_FW_FEATURE_COUNT,
 };
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal.c	2025-09-29 14:23:07.605732410 +0200
@@ -181,7 +181,7 @@
 		.max_size = HAL_WBM2PPE_RELEASE_RING_BASE_MSB_RING_SIZE,
 	},
 	[HAL_TX_MONITOR_BUF] = {
-		.start_ring_id = HAL_SRNG_SW2TXMON_BUF0,
+		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 		.max_rings = 1,
 		.entry_size = sizeof(struct hal_mon_buf_ring) >> 2,
 		.mac_type = ATH12K_HAL_SRNG_PMAC,
@@ -449,8 +449,8 @@
 
 static bool ath12k_hw_qcn9274_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
 {
-	return __le32_to_cpu(desc->u.qcn9274.mpdu_start.info6) &
-	       RX_MPDU_START_INFO6_MCAST_BCAST;
+	return __le16_to_cpu(desc->u.qcn9274.msdu_end.info5) &
+	       RX_MSDU_END_INFO5_DA_IS_MCBC;
 }
 
 static void ath12k_hw_qcn9274_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -511,11 +511,6 @@
 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274.mpdu_start.pn[1]);
 }
 
-static u16 ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
-	return __le16_to_cpu(desc->u.qcn9274.mpdu_start.frame_ctrl);
-}
-
 static int ath12k_hal_srng_create_config_qcn9274(struct ath12k_base *ab)
 {
 	struct ath12k_hal *hal = &ab->hal;
@@ -736,7 +731,6 @@
 	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_rx_desc_is_da_mcbc,
 	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_rx_desc_get_dot11_hdr,
 	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_rx_desc_get_crypto_hdr,
-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_qcn9274_rx_desc_get_mpdu_frame_ctl,
 	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_dp_rx_h_msdu_done,
 	.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_l4_cksum_fail,
 	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_dp_rx_h_ip_cksum_fail,
@@ -908,8 +902,8 @@
 
 static bool ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc(struct hal_rx_desc *desc)
 {
-	return __le32_to_cpu(desc->u.qcn9274_compact.mpdu_start.info6) &
-	       RX_MPDU_START_INFO6_MCAST_BCAST;
+	return __le16_to_cpu(desc->u.qcn9274_compact.msdu_end.info5) &
+	       RX_MSDU_END_INFO5_DA_IS_MCBC;
 }
 
 static void ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr(struct hal_rx_desc *desc,
@@ -975,11 +969,6 @@
 		HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.qcn9274_compact.mpdu_start.pn[1]);
 }
 
-static u16 ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
-	return __le16_to_cpu(desc->u.qcn9274_compact.mpdu_start.frame_ctrl);
-}
-
 static bool ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done(struct hal_rx_desc *desc)
 {
 	return !!le32_get_bits(desc->u.qcn9274_compact.msdu_end.info14,
@@ -1080,8 +1069,6 @@
 	.rx_desc_is_da_mcbc = ath12k_hw_qcn9274_compact_rx_desc_is_da_mcbc,
 	.rx_desc_get_dot11_hdr = ath12k_hw_qcn9274_compact_rx_desc_get_dot11_hdr,
 	.rx_desc_get_crypto_header = ath12k_hw_qcn9274_compact_rx_desc_get_crypto_hdr,
-	.rx_desc_get_mpdu_frame_ctl =
-		ath12k_hw_qcn9274_compact_rx_desc_get_mpdu_frame_ctl,
 	.dp_rx_h_msdu_done = ath12k_hw_qcn9274_compact_dp_rx_h_msdu_done,
 	.dp_rx_h_l4_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_l4_cksum_fail,
 	.dp_rx_h_ip_cksum_fail = ath12k_hw_qcn9274_compact_dp_rx_h_ip_cksum_fail,
@@ -1330,11 +1317,6 @@
 	crypto_hdr[7] = HAL_RX_MPDU_INFO_PN_GET_BYTE2(desc->u.wcn7850.mpdu_start.pn[1]);
 }
 
-static u16 ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl(struct hal_rx_desc *desc)
-{
-	return __le16_to_cpu(desc->u.wcn7850.mpdu_start.frame_ctrl);
-}
-
 static int ath12k_hal_srng_create_config_wcn7850(struct ath12k_base *ab)
 {
 	struct ath12k_hal *hal = &ab->hal;
@@ -1555,7 +1537,6 @@
 	.rx_desc_is_da_mcbc = ath12k_hw_wcn7850_rx_desc_is_da_mcbc,
 	.rx_desc_get_dot11_hdr = ath12k_hw_wcn7850_rx_desc_get_dot11_hdr,
 	.rx_desc_get_crypto_header = ath12k_hw_wcn7850_rx_desc_get_crypto_hdr,
-	.rx_desc_get_mpdu_frame_ctl = ath12k_hw_wcn7850_rx_desc_get_mpdu_frame_ctl,
 	.dp_rx_h_msdu_done = ath12k_hw_wcn7850_dp_rx_h_msdu_done,
 	.dp_rx_h_l4_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_l4_cksum_fail,
 	.dp_rx_h_ip_cksum_fail = ath12k_hw_wcn7850_dp_rx_h_ip_cksum_fail,
@@ -2128,11 +2109,16 @@
 {
 	lockdep_assert_held(&srng->lock);
 
-	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
+	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 		srng->u.src_ring.cached_tp =
 			*(volatile u32 *)srng->u.src_ring.tp_addr;
-	else
-		srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
+	} else {
+		srng->u.dst_ring.cached_hp =
+			READ_ONCE(*srng->u.dst_ring.hp_addr);
+
+		/* Make sure descriptor is read after the head pointer. */
+		dma_rmb();
+	}
 }
 
 /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
@@ -2142,7 +2128,6 @@
 {
 	lockdep_assert_held(&srng->lock);
 
-	/* TODO: See if we need a write memory barrier here */
 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
 		/* For LMAC rings, ring pointer updates are done through FW and
 		 * hence written to a shared memory location that is read by FW
@@ -2150,15 +2135,20 @@
 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 			srng->u.src_ring.last_tp =
 				*(volatile u32 *)srng->u.src_ring.tp_addr;
-			*srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
+			/* Make sure desc is written before head pointer */
+			dma_wmb();
+			WRITE_ONCE(*srng->u.src_ring.hp_addr,
+				   srng->u.src_ring.hp);
 		} else {
 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
-			*srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
+			WRITE_ONCE(*srng->u.dst_ring.tp_addr,
+				   srng->u.dst_ring.tp);
 		}
 	} else {
 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
 			srng->u.src_ring.last_tp =
 				*(volatile u32 *)srng->u.src_ring.tp_addr;
+			/* MMIO access, no need for wmb here */
 			ath12k_hif_write32(ab,
 					   (unsigned long)srng->u.src_ring.hp_addr -
 					   (unsigned long)ab->mem,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal.h	2025-09-29 14:23:07.605732410 +0200
@@ -485,8 +485,8 @@
 	HAL_SRNG_RING_ID_WMAC1_RXMON2SW0 = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
 	HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
 	HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
-	HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 	HAL_SRNG_RING_ID_WMAC1_TXMON2SW0_BUF0,
+	HAL_SRNG_RING_ID_WMAC1_SW2TXMON_BUF0,
 
 	HAL_SRNG_RING_ID_PMAC1_ID_END,
 };
@@ -1068,7 +1068,6 @@
 	bool (*rx_desc_is_da_mcbc)(struct hal_rx_desc *desc);
 	void (*rx_desc_get_dot11_hdr)(struct hal_rx_desc *desc,
 				      struct ieee80211_hdr *hdr);
-	u16 (*rx_desc_get_mpdu_frame_ctl)(struct hal_rx_desc *desc);
 	void (*rx_desc_get_crypto_header)(struct hal_rx_desc *desc,
 					  u8 *crypto_hdr,
 					  enum hal_encrypt_type enctype);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_desc.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_desc.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_desc.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_desc.h	2025-09-25 17:40:34.151360205 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include "core.h"
 
@@ -522,7 +522,7 @@
 	HAL_PHYRXHT_SIG_USR_SU					= 468 /* 0x1d4 */,
 	HAL_PHYRXHT_SIG_USR_MU_MIMO				= 469 /* 0x1d5 */,
 	HAL_PHYRX_GENERIC_U_SIG					= 470 /* 0x1d6 */,
-	HAL_PHYRX_GENERICHT_SIG					= 471 /* 0x1d7 */,
+	HAL_PHYRX_GENERIC_EHT_SIG				= 471 /* 0x1d7 */,
 	HAL_OVERWRITE_RESP_START				= 472 /* 0x1d8 */,
 	HAL_OVERWRITE_RESP_PREAMBLE_INFO			= 473 /* 0x1d9 */,
 	HAL_OVERWRITE_RESP_FRAME_INFO				= 474 /* 0x1da */,
@@ -579,9 +579,11 @@
 
 #define HAL_TLV_64_HDR_TAG		GENMASK(9, 1)
 #define HAL_TLV_64_HDR_LEN		GENMASK(21, 10)
+#define HAL_TLV_64_USR_ID		GENMASK(31, 26)
+#define HAL_TLV_64_ALIGN		8
 
 struct hal_tlv_64_hdr {
-	u64 tl;
+	__le64 tl;
 	u8 value[];
 } __packed;
 
@@ -1261,6 +1263,7 @@
 
 #define HAL_TCL_DATA_CMD_INFO5_RING_ID			GENMASK(27, 20)
 #define HAL_TCL_DATA_CMD_INFO5_LOOPING_COUNT		GENMASK(31, 28)
+#define HAL_ENCRYPT_TYPE_MAX 12
 
 enum hal_encrypt_type {
 	HAL_ENCRYPT_TYPE_WEP_40,
@@ -1282,11 +1285,13 @@
 	HAL_TCL_ENCAP_TYPE_NATIVE_WIFI,
 	HAL_TCL_ENCAP_TYPE_ETHERNET,
 	HAL_TCL_ENCAP_TYPE_802_3 = 3,
+	HAL_TCL_ENCAP_TYPE_MAX
 };
 
 enum hal_tcl_desc_type {
 	HAL_TCL_DESC_TYPE_BUFFER,
 	HAL_TCL_DESC_TYPE_EXT_DESC,
+	HAL_TCL_DESC_TYPE_MAX,
 };
 
 enum hal_wbm_htt_tx_comp_status {
@@ -1296,6 +1301,7 @@
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY,
+	HAL_WBM_REL_HTT_TX_COMP_STATUS_VDEVID_MISMATCH,
 	HAL_WBM_REL_HTT_TX_COMP_STATUS_MAX,
 };
 
@@ -2966,9 +2972,8 @@
 
 #define HAL_MON_DEST_COOKIE_BUF_ID      GENMASK(17, 0)
 
-#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(15, 0)
-#define HAL_MON_DEST_INFO0_FLUSH_DETECTED	BIT(16)
-#define HAL_MON_DEST_INFO0_END_OF_PPDU		BIT(17)
+#define HAL_MON_DEST_INFO0_END_OFFSET		GENMASK(11, 0)
+#define HAL_MON_DEST_INFO0_END_REASON		GENMASK(17, 16)
 #define HAL_MON_DEST_INFO0_INITIATOR		BIT(18)
 #define HAL_MON_DEST_INFO0_EMPTY_DESC		BIT(19)
 #define HAL_MON_DEST_INFO0_RING_ID		GENMASK(27, 20)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_rx.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_rx.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_rx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_rx.c	2025-09-25 17:40:34.151360205 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include "debug.h"
@@ -26,8 +26,8 @@
 {
 	struct hal_reo_get_queue_stats *desc;
 
-	tlv->tl = u32_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
-		  u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+	tlv->tl = le64_encode_bits(HAL_REO_GET_QUEUE_STATS, HAL_TLV_HDR_TAG) |
+		  le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
 
 	desc = (struct hal_reo_get_queue_stats *)tlv->value;
 	memset_startat(desc, 0, queue_addr_lo);
@@ -59,8 +59,8 @@
 		hal->current_blk_index = avail_slot;
 	}
 
-	tlv->tl = u32_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
-		  u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+	tlv->tl = le64_encode_bits(HAL_REO_FLUSH_CACHE, HAL_TLV_HDR_TAG) |
+		  le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
 
 	desc = (struct hal_reo_flush_cache *)tlv->value;
 	memset_startat(desc, 0, cache_addr_lo);
@@ -97,8 +97,8 @@
 {
 	struct hal_reo_update_rx_queue *desc;
 
-	tlv->tl = u32_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
-		  u32_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
+	tlv->tl = le64_encode_bits(HAL_REO_UPDATE_RX_REO_QUEUE, HAL_TLV_HDR_TAG) |
+		  le64_encode_bits(sizeof(*desc), HAL_TLV_HDR_LEN);
 
 	desc = (struct hal_reo_update_rx_queue *)tlv->value;
 	memset_startat(desc, 0, queue_addr_lo);
@@ -326,7 +326,7 @@
 				    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
 	err_code = le32_get_bits(desc->info0,
 				 HAL_REO_DEST_RING_INFO0_ERROR_CODE);
-	ab->soc_stats.reo_error[err_code]++;
+	ab->device_stats.reo_error[err_code]++;
 
 	if (push_reason != HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED &&
 	    push_reason != HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
@@ -381,7 +381,7 @@
 		val = le32_get_bits(wbm_desc->buf_addr_info.info1,
 				    BUFFER_ADDR_INFO1_RET_BUF_MGR);
 		if (val != HAL_RX_BUF_RBM_SW3_BM) {
-			ab->soc_stats.invalid_rbm++;
+			ab->device_stats.invalid_rbm++;
 			return -EINVAL;
 		}
 
@@ -393,7 +393,7 @@
 		val = le32_get_bits(wbm_cc_desc->info0,
 				    HAL_WBM_RELEASE_RX_CC_INFO0_RBM);
 		if (val != HAL_RX_BUF_RBM_SW3_BM) {
-			ab->soc_stats.invalid_rbm++;
+			ab->device_stats.invalid_rbm++;
 			return -EINVAL;
 		}
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_rx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_rx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_rx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_rx.h	2025-09-25 17:40:34.155360224 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_HAL_RX_H
@@ -19,12 +19,9 @@
 	bool hw_cc_done;
 };
 
-#define HAL_INVALID_PEERID 0xffff
+#define HAL_INVALID_PEERID	0x3fff
 #define VHT_SIG_SU_NSS_MASK 0x7
 
-#define HAL_RX_MAX_MCS 12
-#define HAL_RX_MAX_NSS 8
-
 #define HAL_RX_MPDU_INFO_PN_GET_BYTE1(__val) \
 	le32_get_bits((__val), GENMASK(7, 0))
 
@@ -71,6 +68,8 @@
 	HAL_RX_PREAMBLE_11N,
 	HAL_RX_PREAMBLE_11AC,
 	HAL_RX_PREAMBLE_11AX,
+	HAL_RX_PREAMBLE_11BA,
+	HAL_RX_PREAMBLE_11BE,
 	HAL_RX_PREAMBLE_MAX,
 };
 
@@ -108,9 +107,13 @@
 	HAL_RX_MON_STATUS_PPDU_NOT_DONE,
 	HAL_RX_MON_STATUS_PPDU_DONE,
 	HAL_RX_MON_STATUS_BUF_DONE,
+	HAL_RX_MON_STATUS_BUF_ADDR,
+	HAL_RX_MON_STATUS_MPDU_START,
+	HAL_RX_MON_STATUS_MPDU_END,
+	HAL_RX_MON_STATUS_MSDU_END,
 };
 
-#define HAL_RX_MAX_MPDU		256
+#define HAL_RX_MAX_MPDU				1024
 #define HAL_RX_NUM_WORDS_PER_PPDU_BITMAP	(HAL_RX_MAX_MPDU >> 5)
 
 struct hal_rx_user_status {
@@ -143,10 +146,43 @@
 	u32 mpdu_fcs_ok_bitmap[HAL_RX_NUM_WORDS_PER_PPDU_BITMAP];
 	u32 mpdu_ok_byte_count;
 	u32 mpdu_err_byte_count;
+	bool ampdu_present;
+	u16 ampdu_id;
 };
 
 #define HAL_MAX_UL_MU_USERS	37
 
+struct hal_rx_u_sig_info {
+	bool ul_dl;
+	u8 bw;
+	u8 ppdu_type_comp_mode;
+	u8 eht_sig_mcs;
+	u8 num_eht_sig_sym;
+	struct ieee80211_radiotap_eht_usig usig;
+};
+
+#define HAL_RX_MON_MAX_AGGR_SIZE	128
+
+struct hal_rx_tlv_aggr_info {
+	bool in_progress;
+	u16 cur_len;
+	u16 tlv_tag;
+	u8 buf[HAL_RX_MON_MAX_AGGR_SIZE];
+};
+
+struct hal_rx_radiotap_eht {
+	__le32 known;
+	__le32 data[9];
+};
+
+#define EHT_MAX_USER_INFO	4
+
+struct hal_rx_eht_info {
+	u8 num_user_info;
+	struct hal_rx_radiotap_eht eht;
+	u32 user_info[EHT_MAX_USER_INFO];
+};
+
 struct hal_rx_mon_ppdu_info {
 	u32 ppdu_id;
 	u32 last_ppdu_id;
@@ -227,10 +263,15 @@
 	u8 addr4[ETH_ALEN];
 	struct hal_rx_user_status userstats[HAL_MAX_UL_MU_USERS];
 	u8 userid;
-	u16 ampdu_id[HAL_MAX_UL_MU_USERS];
 	bool first_msdu_in_mpdu;
 	bool is_ampdu;
 	u8 medium_prot_type;
+	bool ppdu_continuation;
+	bool eht_usig;
+	struct hal_rx_u_sig_info u_sig_info;
+	bool is_eht;
+	struct hal_rx_eht_info eht_info;
+	struct hal_rx_tlv_aggr_info tlv_aggr;
 };
 
 #define HAL_RX_PPDU_START_INFO0_PPDU_ID			GENMASK(15, 0)
@@ -245,6 +286,8 @@
 	__le32 rsvd[2];
 } __packed;
 
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_PEER_ID		GENMASK(13, 0)
+#define HAL_RX_PPDU_END_USER_STATS_INFO0_DEVICE_ID		GENMASK(15, 14)
 #define HAL_RX_PPDU_END_USER_STATS_INFO0_MPDU_CNT_FCS_ERR	GENMASK(26, 16)
 
 #define HAL_RX_PPDU_END_USER_STATS_INFO1_MPDU_CNT_FCS_OK	GENMASK(10, 0)
@@ -299,6 +342,7 @@
 	__le32 info4;
 	__le32 info5;
 	__le32 info6;
+	__le32 rsvd;
 } __packed;
 
 #define HAL_RX_HT_SIG_INFO_INFO0_MCS		GENMASK(6, 0)
@@ -395,11 +439,9 @@
 #define HAL_RX_HE_SIG_A_MU_DL_INFO0_DOPPLER_INDICATION	BIT(25)
 
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXOP_DURATION	GENMASK(6, 0)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_CODING		BIT(7)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_NUM_LTF_SYMB	GENMASK(10, 8)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_LDPC_EXTRA		BIT(11)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_STBC		BIT(12)
-#define HAL_RX_HE_SIG_A_MU_DL_INFO1_TXBF		BIT(10)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_FACTOR	GENMASK(14, 13)
 #define HAL_RX_HE_SIG_A_MU_DL_INFO1_PKT_EXT_PE_DISAM	BIT(15)
 
@@ -425,7 +467,7 @@
 
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_ID	GENMASK(10, 0)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_NSTS	GENMASK(13, 11)
-#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF	BIT(19)
+#define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_TXBF	BIT(14)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_MCS	GENMASK(18, 15)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_DCM	BIT(19)
 #define HAL_RX_HE_SIG_B2_OFDMA_INFO_INFO0_STA_CODING	BIT(20)
@@ -453,7 +495,8 @@
 } __packed;
 
 #define HAL_RX_MPDU_START_INFO0_PPDU_ID			GENMASK(31, 16)
-#define HAL_RX_MPDU_START_INFO1_PEERID			GENMASK(31, 16)
+#define HAL_RX_MPDU_START_INFO1_PEERID			GENMASK(29, 16)
+#define HAL_RX_MPDU_START_INFO1_DEVICE_ID		GENMASK(31, 30)
 #define HAL_RX_MPDU_START_INFO2_MPDU_LEN		GENMASK(13, 0)
 struct hal_rx_mpdu_start {
 	__le32 rsvd0[9];
@@ -464,11 +507,23 @@
 	__le32 rsvd2[16];
 } __packed;
 
+struct hal_rx_msdu_end {
+	__le32 info0;
+	__le32 rsvd0[9];
+	__le16 info00;
+	__le16 info01;
+	__le32 rsvd00[8];
+	__le32 info1;
+	__le32 rsvd1[10];
+	__le32 info2;
+	__le32 rsvd2;
+} __packed;
+
 #define HAL_RX_PPDU_END_DURATION	GENMASK(23, 0)
 struct hal_rx_ppdu_end_duration {
 	__le32 rsvd0[9];
 	__le32 info0;
-	__le32 rsvd1[4];
+	__le32 rsvd1[18];
 } __packed;
 
 struct hal_rx_rxpcu_classification_overview {
@@ -639,6 +694,395 @@
 #define HAL_RX_MPDU_ERR_MPDU_LEN		BIT(6)
 #define HAL_RX_MPDU_ERR_UNENCRYPTED_FRAME	BIT(7)
 
+#define HAL_RX_PHY_CMN_USER_INFO0_GI		GENMASK(17, 16)
+
+struct hal_phyrx_common_user_info {
+	__le32 rsvd[2];
+	__le32 info0;
+	__le32 rsvd1;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_SPATIAL_REUSE	GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_GI_LTF		GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NUM_LTF_SYM	GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_NSS		GENMASK(10, 7)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_BEAMFORMED		BIT(11)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_DISREGARD		GENMASK(13, 12)
+#define HAL_RX_EHT_SIG_NDP_CMN_INFO0_CRC		GENMASK(17, 14)
+
+struct hal_eht_sig_ndp_cmn_eb {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_SPATIAL_REUSE		GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_GI_LTF			GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_NUM_LTF_SYM		GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_LDPC_EXTA_SYM		BIT(9)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_PRE_FEC_PAD_FACTOR	GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISAMBIGUITY		BIT(12)
+#define HAL_RX_EHT_SIG_OVERFLOW_INFO0_DISREGARD			GENMASK(16, 13)
+
+struct hal_eht_sig_usig_overflow {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_STA_ID	GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_MCS	GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_VALIDATE	BIT(15)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_NSS	GENMASK(19, 16)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_BEAMFORMED	BIT(20)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CODING	BIT(21)
+#define HAL_RX_EHT_SIG_NON_MUMIMO_USER_INFO0_CRC	GENMASK(25, 22)
+
+struct hal_eht_sig_non_mu_mimo {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_STA_ID		GENMASK(10, 0)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_MCS		GENMASK(14, 11)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CODING		BIT(15)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_SPATIAL_CODING	GENMASK(22, 16)
+#define HAL_RX_EHT_SIG_MUMIMO_USER_INFO0_CRC		GENMASK(26, 23)
+
+struct hal_eht_sig_mu_mimo {
+	__le32 info0;
+} __packed;
+
+union hal_eht_sig_user_field {
+	struct hal_eht_sig_mu_mimo mu_mimo;
+	struct hal_eht_sig_non_mu_mimo n_mu_mimo;
+};
+
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_SPATIAL_REUSE		GENMASK(3, 0)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_GI_LTF			GENMASK(5, 4)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_LTF_SYM		GENMASK(8, 6)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_LDPC_EXTA_SYM		BIT(9)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_PRE_FEC_PAD_FACTOR	GENMASK(11, 10)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISAMBIGUITY		BIT(12)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_DISREGARD		GENMASK(16, 13)
+#define HAL_RX_EHT_SIG_NON_OFDMA_INFO0_NUM_USERS		GENMASK(19, 17)
+
+struct hal_eht_sig_non_ofdma_cmn_eb {
+	__le32 info0;
+	union hal_eht_sig_user_field user_field;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB1_SPATIAL_REUSE		GENMASK_ULL(3, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_GI_LTF			GENMASK_ULL(5, 4)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_NUM_LFT_SYM		GENMASK_ULL(8, 6)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_LDPC_EXTRA_SYM		BIT(9)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_FEC_PAD_FACTOR	GENMASK_ULL(11, 10)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_PRE_DISAMBIGUITY	BIT(12)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_DISREGARD		GENMASK_ULL(16, 13)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_1		GENMASK_ULL(25, 17)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_RU_ALLOC_1_2		GENMASK_ULL(34, 26)
+#define HAL_RX_EHT_SIG_OFDMA_EB1_CRC			GENMASK_ULL(30, 27)
+
+struct hal_eht_sig_ofdma_cmn_eb1 {
+	__le64 info0;
+} __packed;
+
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_1		GENMASK_ULL(8, 0)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_2		GENMASK_ULL(17, 9)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_3		GENMASK_ULL(26, 18)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_4		GENMASK_ULL(35, 27)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_5		GENMASK_ULL(44, 36)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_RU_ALLOC_2_6		GENMASK_ULL(53, 45)
+#define HAL_RX_EHT_SIG_OFDMA_EB2_MCS			GNEMASK_ULL(57, 54)
+
+struct hal_eht_sig_ofdma_cmn_eb2 {
+	__le64 info0;
+} __packed;
+
+struct hal_eht_sig_ofdma_cmn_eb {
+	struct hal_eht_sig_ofdma_cmn_eb1 eb1;
+	struct hal_eht_sig_ofdma_cmn_eb2 eb2;
+	union hal_eht_sig_user_field user_field;
+} __packed;
+
+enum hal_eht_bw {
+	HAL_EHT_BW_20,
+	HAL_EHT_BW_40,
+	HAL_EHT_BW_80,
+	HAL_EHT_BW_160,
+	HAL_EHT_BW_320_1,
+	HAL_EHT_BW_320_2,
+};
+
+#define HAL_RX_USIG_CMN_INFO0_PHY_VERSION	GENMASK(2, 0)
+#define HAL_RX_USIG_CMN_INFO0_BW		GENMASK(5, 3)
+#define HAL_RX_USIG_CMN_INFO0_UL_DL		BIT(6)
+#define HAL_RX_USIG_CMN_INFO0_BSS_COLOR		GENMASK(12, 7)
+#define HAL_RX_USIG_CMN_INFO0_TXOP		GENMASK(19, 13)
+#define HAL_RX_USIG_CMN_INFO0_DISREGARD		GENMASK(25, 20)
+#define HAL_RX_USIG_CMN_INFO0_VALIDATE		BIT(26)
+
+struct hal_mon_usig_cmn {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_TB_INFO0_PPDU_TYPE_COMP_MODE	GENMASK(1, 0)
+#define HAL_RX_USIG_TB_INFO0_VALIDATE			BIT(2)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_1		GENMASK(6, 3)
+#define HAL_RX_USIG_TB_INFO0_SPATIAL_REUSE_2		GENMASK(10, 7)
+#define HAL_RX_USIG_TB_INFO0_DISREGARD_1		GENMASK(15, 11)
+#define HAL_RX_USIG_TB_INFO0_CRC			GENMASK(19, 16)
+#define HAL_RX_USIG_TB_INFO0_TAIL			GENMASK(25, 20)
+#define HAL_RX_USIG_TB_INFO0_RX_INTEG_CHECK_PASS	BIT(31)
+
+struct hal_mon_usig_tb {
+	__le32 info0;
+} __packed;
+
+#define HAL_RX_USIG_MU_INFO0_PPDU_TYPE_COMP_MODE	GENMASK(1, 0)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_1			BIT(2)
+#define HAL_RX_USIG_MU_INFO0_PUNC_CH_INFO		GENMASK(7, 3)
+#define HAL_RX_USIG_MU_INFO0_VALIDATE_2			BIT(8)
+#define HAL_RX_USIG_MU_INFO0_EHT_SIG_MCS		GENMASK(10, 9)
+#define HAL_RX_USIG_MU_INFO0_NUM_EHT_SIG_SYM		GENMASK(15, 11)
+#define HAL_RX_USIG_MU_INFO0_CRC			GENMASK(20, 16)
+#define HAL_RX_USIG_MU_INFO0_TAIL			GENMASK(26, 21)
+#define HAL_RX_USIG_MU_INFO0_RX_INTEG_CHECK_PASS	BIT(31)
+
+struct hal_mon_usig_mu {
+	__le32 info0;
+} __packed;
+
+union hal_mon_usig_non_cmn {
+	struct hal_mon_usig_tb tb;
+	struct hal_mon_usig_mu mu;
+};
+
+struct hal_mon_usig_hdr {
+	struct hal_mon_usig_cmn cmn;
+	union hal_mon_usig_non_cmn non_cmn;
+} __packed;
+
+#define HAL_RX_USR_INFO0_PHY_PPDU_ID		GENMASK(15, 0)
+#define HAL_RX_USR_INFO0_USR_RSSI		GENMASK(23, 16)
+#define HAL_RX_USR_INFO0_PKT_TYPE		GENMASK(27, 24)
+#define HAL_RX_USR_INFO0_STBC			BIT(28)
+#define HAL_RX_USR_INFO0_RECEPTION_TYPE		GENMASK(31, 29)
+
+#define HAL_RX_USR_INFO1_MCS			GENMASK(3, 0)
+#define HAL_RX_USR_INFO1_SGI			GENMASK(5, 4)
+#define HAL_RX_USR_INFO1_HE_RANGING_NDP		BIT(6)
+#define HAL_RX_USR_INFO1_MIMO_SS_BITMAP		GENMASK(15, 8)
+#define HAL_RX_USR_INFO1_RX_BW			GENMASK(18, 16)
+#define HAL_RX_USR_INFO1_DL_OFMDA_USR_IDX	GENMASK(31, 24)
+
+#define HAL_RX_USR_INFO2_DL_OFDMA_CONTENT_CHAN	BIT(0)
+#define HAL_RX_USR_INFO2_NSS			GENMASK(10, 8)
+#define HAL_RX_USR_INFO2_STREAM_OFFSET		GENMASK(13, 11)
+#define HAL_RX_USR_INFO2_STA_DCM		BIT(14)
+#define HAL_RX_USR_INFO2_LDPC			BIT(15)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_0		GENMASK(19, 16)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_1		GENMASK(23, 20)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_2		GENMASK(27, 24)
+#define HAL_RX_USR_INFO2_RU_TYPE_80_3		GENMASK(31, 28)
+
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_0	GENMASK(5, 0)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_1	GENMASK(13, 8)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_2	GENMASK(21, 16)
+#define HAL_RX_USR_INFO3_RU_START_IDX_80_3	GENMASK(29, 24)
+
+struct hal_receive_user_info {
+	__le32 info0;
+	__le32 info1;
+	__le32 info2;
+	__le32 info3;
+	__le32 user_fd_rssi_seg0;
+	__le32 user_fd_rssi_seg1;
+	__le32 user_fd_rssi_seg2;
+	__le32 user_fd_rssi_seg3;
+} __packed;
+
+enum hal_mon_reception_type {
+	HAL_RECEPTION_TYPE_SU,
+	HAL_RECEPTION_TYPE_DL_MU_MIMO,
+	HAL_RECEPTION_TYPE_DL_MU_OFMA,
+	HAL_RECEPTION_TYPE_DL_MU_OFDMA_MIMO,
+	HAL_RECEPTION_TYPE_UL_MU_MIMO,
+	HAL_RECEPTION_TYPE_UL_MU_OFDMA,
+	HAL_RECEPTION_TYPE_UL_MU_OFDMA_MIMO,
+};
+
+/* Different allowed RU in 11BE */
+#define HAL_EHT_RU_26		0ULL
+#define HAL_EHT_RU_52		1ULL
+#define HAL_EHT_RU_78		2ULL
+#define HAL_EHT_RU_106		3ULL
+#define HAL_EHT_RU_132		4ULL
+#define HAL_EHT_RU_242		5ULL
+#define HAL_EHT_RU_484		6ULL
+#define HAL_EHT_RU_726		7ULL
+#define HAL_EHT_RU_996		8ULL
+#define HAL_EHT_RU_996x2	9ULL
+#define HAL_EHT_RU_996x3	10ULL
+#define HAL_EHT_RU_996x4	11ULL
+#define HAL_EHT_RU_NONE		15ULL
+#define HAL_EHT_RU_INVALID	31ULL
+/* MRUs spanning above 80Mhz
+ * HAL_EHT_RU_996_484 = HAL_EHT_RU_484 + HAL_EHT_RU_996 + 4 (reserved)
+ */
+#define HAL_EHT_RU_996_484	18ULL
+#define HAL_EHT_RU_996x2_484	28ULL
+#define HAL_EHT_RU_996x3_484	40ULL
+#define HAL_EHT_RU_996_484_242	23ULL
+
+#define NUM_RU_BITS_PER80	16
+#define NUM_RU_BITS_PER20	4
+
+/* Different per_80Mhz band in 320Mhz bandwidth */
+#define HAL_80_0	0
+#define HAL_80_1	1
+#define HAL_80_2	2
+#define HAL_80_3	3
+
+#define HAL_RU_80MHZ(num_band)		((num_band) * NUM_RU_BITS_PER80)
+#define HAL_RU_20MHZ(idx_per_80)	((idx_per_80) * NUM_RU_BITS_PER20)
+
+#define HAL_RU_SHIFT(num_band, idx_per_80)	\
+		(HAL_RU_80MHZ(num_band) + HAL_RU_20MHZ(idx_per_80))
+
+#define HAL_RU(ru, num_band, idx_per_80)	\
+		((u64)(ru) << HAL_RU_SHIFT(num_band, idx_per_80))
+
+/* MRU-996+484 */
+#define HAL_EHT_RU_996_484_0	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_1	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_2	(HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1))
+#define HAL_EHT_RU_996_484_3	(HAL_RU(HAL_EHT_RU_996, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0))
+#define HAL_EHT_RU_996_484_4	(HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_5	(HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996, HAL_80_3, 0))
+#define HAL_EHT_RU_996_484_6	(HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996_484_7	(HAL_RU(HAL_EHT_RU_996, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x2+484 */
+#define HAL_EHT_RU_996x2_484_0	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_1	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_2	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_3	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_4	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1))
+#define HAL_EHT_RU_996x2_484_5	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0))
+#define HAL_EHT_RU_996x2_484_6	(HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_7	(HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_8	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_9	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_3, 0))
+#define HAL_EHT_RU_996x2_484_10	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x2_484_11	(HAL_RU(HAL_EHT_RU_996x2, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x2, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+/* MRU-996x3+484 */
+#define HAL_EHT_RU_996x3_484_0	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_1	(HAL_RU(HAL_EHT_RU_484, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_2	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_3	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_4	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 1) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_5	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_3, 0))
+#define HAL_EHT_RU_996x3_484_6	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 1))
+#define HAL_EHT_RU_996x3_484_7	(HAL_RU(HAL_EHT_RU_996x3, HAL_80_0, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_1, 0) |	\
+				 HAL_RU(HAL_EHT_RU_996x3, HAL_80_2, 0) |	\
+				 HAL_RU(HAL_EHT_RU_484, HAL_80_3, 0))
+
+#define HAL_RU_PER80(ru_per80, num_80mhz, ru_idx_per80mhz) \
+			(HAL_RU(ru_per80, num_80mhz, ru_idx_per80mhz))
+
+#define RU_INVALID		0
+#define RU_26			1
+#define RU_52			2
+#define RU_106			4
+#define RU_242			9
+#define RU_484			18
+#define RU_996			37
+#define RU_2X996		74
+#define RU_3X996		111
+#define RU_4X996		148
+#define RU_52_26		(RU_52 + RU_26)
+#define RU_106_26		(RU_106 + RU_26)
+#define RU_484_242		(RU_484 + RU_242)
+#define RU_996_484		(RU_996 + RU_484)
+#define RU_996_484_242		(RU_996 + RU_484_242)
+#define RU_2X996_484		(RU_2X996 + RU_484)
+#define RU_3X996_484		(RU_3X996 + RU_484)
+
+enum ath12k_eht_ru_size {
+	ATH12K_EHT_RU_26,
+	ATH12K_EHT_RU_52,
+	ATH12K_EHT_RU_106,
+	ATH12K_EHT_RU_242,
+	ATH12K_EHT_RU_484,
+	ATH12K_EHT_RU_996,
+	ATH12K_EHT_RU_996x2,
+	ATH12K_EHT_RU_996x4,
+	ATH12K_EHT_RU_52_26,
+	ATH12K_EHT_RU_106_26,
+	ATH12K_EHT_RU_484_242,
+	ATH12K_EHT_RU_996_484,
+	ATH12K_EHT_RU_996_484_242,
+	ATH12K_EHT_RU_996x2_484,
+	ATH12K_EHT_RU_996x3,
+	ATH12K_EHT_RU_996x3_484,
+
+	/* Keep last */
+	ATH12K_EHT_RU_INVALID,
+};
+
+#define HAL_RX_RU_ALLOC_TYPE_MAX	ATH12K_EHT_RU_INVALID
+
 static inline
 enum nl80211_he_ru_alloc ath12k_he_ru_tones_to_nl80211_he_ru_alloc(u16 ru_tones)
 {
@@ -660,6 +1104,9 @@
 	case RU_996:
 		ret = NL80211_RATE_INFO_HE_RU_ALLOC_996;
 		break;
+	case RU_2X996:
+		ret = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
+		break;
 	case RU_26:
 		fallthrough;
 	default:
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_tx.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_tx.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hal_tx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hal_tx.h	2025-07-01 14:10:42.736046589 +0200
@@ -1,7 +1,8 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022, 2024-2025 Qualcomm Innovation Center, Inc.
+ * All rights reserved.
  */
 
 #ifndef ATH12K_HAL_TX_H
@@ -63,7 +64,12 @@
 	u8 try_cnt;
 	u8 tid;
 	u16 peer_id;
-	u32 rate_stats;
+	enum hal_tx_rate_stats_pkt_type pkt_type;
+	enum hal_tx_rate_stats_sgi sgi;
+	enum ath12k_supported_bw bw;
+	u8 mcs;
+	u16 tones;
+	u8 ofdma;
 };
 
 #define HAL_TX_PHY_DESC_INFO0_BF_TYPE		GENMASK(17, 16)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hw.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hw.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hw.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hw.c	2025-09-25 17:40:34.155360224 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/types.h>
@@ -543,7 +543,11 @@
 		ATH12K_TX_RING_MASK_3,
 	},
 	.rx_mon_dest = {
-		0, 0, 0,
+		0, 0, 0, 0,
+		0, 0, 0, 0,
+		ATH12K_RX_MON_RING_MASK_0,
+		ATH12K_RX_MON_RING_MASK_1,
+		ATH12K_RX_MON_RING_MASK_2,
 	},
 	.rx = {
 		0, 0, 0, 0,
@@ -928,6 +932,7 @@
 		.iova_mask = 0,
 
 		.supports_aspm = false,
+		.handle_beacon_miss = true,
 	},
 	{
 		.name = "wcn7850 hw2.0",
@@ -1008,6 +1013,7 @@
 		.iova_mask = ATH12K_PCIE_MAX_PAYLOAD_SIZE - 1,
 
 		.supports_aspm = true,
+		.handle_beacon_miss = false,
 	},
 	{
 		.name = "qcn9274 hw2.0",
@@ -1035,7 +1041,7 @@
 
 		.hal_params = &ath12k_hw_hal_params_qcn9274,
 
-		.rxdma1_enable = false,
+		.rxdma1_enable = true,
 		.num_rxdma_per_pdev = 1,
 		.num_rxdma_dst_ring = 0,
 		.rx_mac_buf_ring = false,
@@ -1045,7 +1051,7 @@
 					BIT(NL80211_IFTYPE_AP) |
 					BIT(NL80211_IFTYPE_MESH_POINT) |
 					BIT(NL80211_IFTYPE_AP_VLAN),
-		.supports_monitor = false,
+		.supports_monitor = true,
 
 		.idle_ps = false,
 		.download_calib = true,
@@ -1084,6 +1090,7 @@
 		.iova_mask = 0,
 
 		.supports_aspm = false,
+		.handle_beacon_miss = true,
 	},
 };
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/hw.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hw.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/hw.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/hw.h	2025-09-25 17:40:34.155360224 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_HW_H
@@ -15,8 +15,28 @@
 
 /* Target configuration defines */
 
+#ifdef CONFIG_ATH12K_MEM_PROFILE_DEFAULT
 /* Num VDEVS per radio */
 #define TARGET_NUM_VDEVS	(16 + 1)
+#define ATH12K_QMI_TARGET_MEM_MODE      ATH12K_QMI_TARGET_MEM_MODE_DEFAULT
+
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE	512
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS		128
+
+#elif defined(CONFIG_ATH12K_MEM_PROFILE_512M)
+/* Num VDEVS per radio */
+#define TARGET_NUM_VDEVS        (8 + 1)
+#define ATH12K_QMI_TARGET_MEM_MODE      ATH12K_QMI_TARGET_MEM_MODE_512M
+
+/* Max num of stations for Single Radio mode */
+#define TARGET_NUM_STATIONS_SINGLE	128
+
+/* Max num of stations for DBS */
+#define TARGET_NUM_STATIONS_DBS		64
+#endif
 
 #define TARGET_NUM_PEERS_PDEV_SINGLE	(TARGET_NUM_STATIONS_SINGLE + \
 					 TARGET_NUM_VDEVS)
@@ -34,12 +54,6 @@
 /* Num of peers for DBS_SBS */
 #define TARGET_NUM_PEERS_DBS_SBS	(3 * TARGET_NUM_PEERS_PDEV_DBS_SBS)
 
-/* Max num of stations for Single Radio mode */
-#define TARGET_NUM_STATIONS_SINGLE	512
-
-/* Max num of stations for DBS */
-#define TARGET_NUM_STATIONS_DBS		128
-
 /* Max num of stations for DBS_SBS */
 #define TARGET_NUM_STATIONS_DBS_SBS	128
 
@@ -90,9 +104,11 @@
 
 #define ATH12K_BOARD_MAGIC		"QCA-ATH12K-BOARD"
 #define ATH12K_BOARD_API2_FILE		"board-2.bin"
+#define ATH12K_BOARD_OVERRIDE_FILE	"board-id-override.txt"
 #define ATH12K_DEFAULT_BOARD_FILE	"board.bin"
 #define ATH12K_DEFAULT_CAL_FILE		"caldata.bin"
 #define ATH12K_AMSS_FILE		"amss.bin"
+#define ATH12K_AMSS_DUAL_FILE		"amss_dualmac.bin"
 #define ATH12K_M3_FILE			"m3.bin"
 #define ATH12K_REGDB_FILE_NAME		"regdb.bin"
 
@@ -220,6 +236,7 @@
 	bool supports_dynamic_smps_6ghz;
 
 	u32 iova_mask;
+	bool handle_beacon_miss;
 };
 
 struct ath12k_hw_ops {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/mac.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mac.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/mac.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mac.c	2025-09-29 14:23:07.609732430 +0200
@@ -1,10 +1,11 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <net/mac80211.h>
+#include <net/cfg80211.h>
 #include <linux/etherdevice.h>
 
 #include "mac.h"
@@ -14,10 +15,12 @@
 #include "hw.h"
 #include "dp_tx.h"
 #include "dp_rx.h"
+#include "testmode.h"
 #include "peer.h"
 #include "debugfs.h"
 #include "hif.h"
 #include "wow.h"
+#include "debugfs_sta.h"
 
 #define CHAN2G(_channel, _freq, _flags) { \
 	.band                   = NL80211_BAND_2GHZ, \
@@ -206,7 +209,7 @@
 			[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
 			[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
 			[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
-			[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+			[NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
 	},
 	[NL80211_BAND_6GHZ] = {
@@ -217,7 +220,7 @@
 			[NL80211_CHAN_WIDTH_40] = MODE_11BE_EHT40,
 			[NL80211_CHAN_WIDTH_80] = MODE_11BE_EHT80,
 			[NL80211_CHAN_WIDTH_160] = MODE_11BE_EHT160,
-			[NL80211_CHAN_WIDTH_80P80] = MODE_11BE_EHT80_80,
+			[NL80211_CHAN_WIDTH_80P80] = MODE_UNKNOWN,
 			[NL80211_CHAN_WIDTH_320] = MODE_11BE_EHT320,
 	},
 
@@ -226,7 +229,8 @@
 const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default = {
 	.rx_filter = HTT_RX_FILTER_TLV_FLAGS_MPDU_START |
 		     HTT_RX_FILTER_TLV_FLAGS_PPDU_END |
-		     HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE,
+		     HTT_RX_FILTER_TLV_FLAGS_PPDU_END_STATUS_DONE |
+		     HTT_RX_FILTER_TLV_FLAGS_PPDU_START_USER_INFO,
 	.pkt_filter_flags0 = HTT_RX_FP_MGMT_FILTER_FLAGS0,
 	.pkt_filter_flags1 = HTT_RX_FP_MGMT_FILTER_FLAGS1,
 	.pkt_filter_flags2 = HTT_RX_FP_CTRL_FILTER_FLASG2,
@@ -336,6 +340,82 @@
 	return "<unknown>";
 }
 
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones)
+{
+	switch (tones) {
+	case 26:
+		return RU_26;
+	case 52:
+		return RU_52;
+	case 106:
+		return RU_106;
+	case 242:
+		return RU_242;
+	case 484:
+		return RU_484;
+	case 996:
+		return RU_996;
+	case (996 * 2):
+		return RU_2X996;
+	default:
+		return RU_26;
+	}
+}
+
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi)
+{
+	switch (sgi) {
+	case RX_MSDU_START_SGI_0_8_US:
+		return NL80211_RATE_INFO_EHT_GI_0_8;
+	case RX_MSDU_START_SGI_1_6_US:
+		return NL80211_RATE_INFO_EHT_GI_1_6;
+	case RX_MSDU_START_SGI_3_2_US:
+		return NL80211_RATE_INFO_EHT_GI_3_2;
+	default:
+		return NL80211_RATE_INFO_EHT_GI_0_8;
+	}
+}
+
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones)
+{
+	switch (ru_tones) {
+	case 26:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+	case 52:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_52;
+	case (52 + 26):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_52P26;
+	case 106:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_106;
+	case (106 + 26):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_106P26;
+	case 242:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_242;
+	case 484:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_484;
+	case (484 + 242):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_484P242;
+	case 996:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996;
+	case (996 + 484):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484;
+	case (996 + 484 + 242):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_996P484P242;
+	case (2 * 996):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996;
+	case (2 * 996 + 484):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_2x996P484;
+	case (3 * 996):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996;
+	case (3 * 996 + 484):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_3x996P484;
+	case (4 * 996):
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_4x996;
+	default:
+		return NL80211_RATE_INFO_EHT_RU_ALLOC_26;
+	}
+}
+
 enum rate_info_bw
 ath12k_mac_bw_to_mac80211_bw(enum ath12k_supported_bw bw)
 {
@@ -441,6 +521,18 @@
 	return 1;
 }
 
+static u32
+ath12k_mac_max_he_nss(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+	int nss;
+
+	for (nss = NL80211_HE_NSS_MAX - 1; nss >= 0; nss--)
+		if (he_mcs_mask[nss])
+			return nss + 1;
+
+	return 1;
+}
+
 static u8 ath12k_parse_mpdudensity(u8 mpdudensity)
 {
 /*  From IEEE Std 802.11-2020 defined values for "Minimum MPDU Start Spacing":
@@ -501,6 +593,61 @@
 	return 0;
 }
 
+static struct ath12k_link_vif *
+ath12k_mac_get_tx_arvif(struct ath12k_link_vif *arvif,
+			struct ieee80211_bss_conf *link_conf)
+{
+	struct ieee80211_bss_conf *tx_bss_conf;
+	struct ath12k_vif *tx_ahvif;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(arvif->ar)->wiphy);
+
+	tx_bss_conf = wiphy_dereference(ath12k_ar_to_hw(arvif->ar)->wiphy,
+					link_conf->tx_bss_conf);
+	if (tx_bss_conf) {
+		tx_ahvif = ath12k_vif_to_ahvif(tx_bss_conf->vif);
+		return wiphy_dereference(tx_ahvif->ah->hw->wiphy,
+					 tx_ahvif->link[tx_bss_conf->link_id]);
+	}
+
+	return NULL;
+}
+
+struct ieee80211_bss_conf *
+ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k *ar = arvif->ar;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	link_conf = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				      vif->link_conf[arvif->link_id]);
+
+	return link_conf;
+}
+
+static struct ieee80211_link_sta *ath12k_mac_get_link_sta(struct ath12k_link_sta *arsta)
+{
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+	struct ieee80211_link_sta *link_sta;
+
+	lockdep_assert_wiphy(ahsta->ahvif->ah->hw->wiphy);
+
+	if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	link_sta = wiphy_dereference(ahsta->ahvif->ah->hw->wiphy,
+				     sta->link[arsta->link_id]);
+
+	return link_sta;
+}
+
 static bool ath12k_mac_bitrate_is_cck(int bitrate)
 {
 	switch (bitrate) {
@@ -639,6 +786,9 @@
 		return NULL;
 
 	for (i = 0; i < ab->num_radios; i++) {
+		if (ab->fw_mode == ATH12K_FIRMWARE_MODE_FTM)
+			pdev = &ab->pdevs[i];
+		else
 		pdev = rcu_dereference(ab->pdevs_active[i]);
 
 		if (pdev && pdev->pdev_id == pdev_id)
@@ -648,6 +798,18 @@
 	return NULL;
 }
 
+static bool ath12k_mac_is_ml_arvif(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+
+	lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+	if (ahvif->vif->valid_links & BIT(arvif->link_id))
+		return true;
+
+	return false;
+}
+
 static struct ath12k *ath12k_mac_get_ar_by_chan(struct ieee80211_hw *hw,
 						struct ieee80211_channel *channel)
 {
@@ -661,8 +823,8 @@
 		return ar;
 
 	for_each_ar(ah, ar, i) {
-		if (channel->center_freq >= ar->freq_low &&
-		    channel->center_freq <= ar->freq_high)
+		if (channel->center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+		    channel->center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
 			return ar;
 	}
 	return NULL;
@@ -677,12 +839,15 @@
 	return ath12k_mac_get_ar_by_chan(hw, ctx->def.chan);
 }
 
-static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
-					   struct ieee80211_vif *vif)
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    u8 link_id)
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
-	struct ath12k_link_vif *arvif = &ahvif->deflink;
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k_link_vif *arvif;
+
+	lockdep_assert_wiphy(hw->wiphy);
 
 	/* If there is one pdev within ah, then we return
 	 * ar directly.
@@ -690,12 +855,27 @@
 	if (ah->num_radio == 1)
 		return ah->radio;
 
-	if (arvif->is_created)
+	if (!(ahvif->links_map & BIT(link_id)))
+		return NULL;
+
+	arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+	if (arvif && arvif->is_created)
 		return arvif->ar;
 
 	return NULL;
 }
 
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+					  struct ieee80211_chanctx_conf *conf,
+					  void *data)
+{
+	struct ath12k_mac_get_any_chanctx_conf_arg *arg = data;
+	struct ath12k *ctx_ar = ath12k_get_ar_by_ctx(hw, conf);
+
+	if (ctx_ar == arg->ar)
+		arg->chanctx_conf = conf;
+}
+
 static struct ath12k_link_vif *ath12k_mac_get_vif_up(struct ath12k *ar)
 {
 	struct ath12k_link_vif *arvif;
@@ -714,12 +894,12 @@
 {
 	switch (band1) {
 	case NL80211_BAND_2GHZ:
-		if (band2 & WMI_HOST_WLAN_2G_CAP)
+		if (band2 & WMI_HOST_WLAN_2GHZ_CAP)
 			return true;
 		break;
 	case NL80211_BAND_5GHZ:
 	case NL80211_BAND_6GHZ:
-		if (band2 & WMI_HOST_WLAN_5G_CAP)
+		if (band2 & WMI_HOST_WLAN_5GHZ_CAP)
 			return true;
 		break;
 	default:
@@ -820,7 +1000,7 @@
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower to set in hw %d\n",
 		   txpower / 2);
 
-	if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) &&
+	if ((pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) &&
 	    ar->txpower_limit_2g != txpower) {
 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT2G;
 		ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -830,7 +1010,7 @@
 		ar->txpower_limit_2g = txpower;
 	}
 
-	if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) &&
+	if ((pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) &&
 	    ar->txpower_limit_5g != txpower) {
 		param = WMI_PDEV_PARAM_TXPOWER_LIMIT5G;
 		ret = ath12k_wmi_pdev_set_param(ar, param,
@@ -1112,12 +1292,12 @@
 	arg.pdev_id = pdev->pdev_id;
 	arg.if_stats_id = ATH12K_INVAL_VDEV_STATS_ID;
 
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		arg.chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
 		arg.chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
 	}
 
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
 		arg.chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
 		arg.chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
 	}
@@ -1209,9 +1389,15 @@
 		return ret;
 	}
 
+	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+	if (ret) {
+		ath12k_warn(ar->ab, "fail to set monitor filter: %d\n", ret);
+		return ret;
+	}
+
 	ar->monitor_started = true;
 	ar->num_started_vdevs++;
-	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, false);
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac monitor started ret %d\n", ret);
 
 	return ret;
@@ -1239,7 +1425,7 @@
 	return ret;
 }
 
-static int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif)
 {
 	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ath12k *ar = arvif->ar;
@@ -1269,8 +1455,8 @@
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "vdev %pM stopped, vdev_id %d\n",
 		   ahvif->vif->addr, arvif->vdev_id);
 
-	if (test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) {
-		clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+	if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
+		clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
 		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "CAC Stopped for vdev %d\n",
 			   arvif->vdev_id);
 	}
@@ -1391,11 +1577,13 @@
 	return 0;
 }
 
-static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif, struct sk_buff *bcn,
+static void ath12k_mac_set_arvif_ies(struct ath12k_link_vif *arvif,
+				     struct ath12k_link_vif *tx_arvif,
+					 struct sk_buff *bcn,
 				     u8 bssid_index, bool *nontx_profile_found)
 {
 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data;
-	const struct element *elem, *nontx, *index, *nie;
+	const struct element *elem, *nontx, *index, *nie, *ext_cap_ie;
 	const u8 *start, *tail;
 	u16 rem_len;
 	u8 i;
@@ -1413,6 +1601,11 @@
 				    start, rem_len))
 		arvif->wpaie_present = true;
 
+	ext_cap_ie = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, start, rem_len);
+	if (ext_cap_ie && ext_cap_ie->datalen >= 11 &&
+	    (ext_cap_ie->data[10] & WLAN_EXT_CAPA11_BCN_PROTECT))
+		tx_arvif->beacon_prot = true;
+
 	/* Return from here for the transmitted profile */
 	if (!bssid_index)
 		return;
@@ -1455,6 +1648,19 @@
 
 			if (index->data[0] == bssid_index) {
 				*nontx_profile_found = true;
+
+				/* Check if nontx BSS has beacon protection enabled */
+				if (!tx_arvif->beacon_prot) {
+					ext_cap_ie =
+					    cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY,
+							       nontx->data,
+							       nontx->datalen);
+					if (ext_cap_ie && ext_cap_ie->datalen >= 11 &&
+					    (ext_cap_ie->data[10] &
+					     WLAN_EXT_CAPA11_BCN_PROTECT))
+						tx_arvif->beacon_prot = true;
+				}
+
 				if (cfg80211_find_ie(WLAN_EID_RSN,
 						     nontx->data,
 						     nontx->datalen)) {
@@ -1483,22 +1689,19 @@
 	}
 }
 
-static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif)
+static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_link_vif *arvif,
+					 struct ath12k_link_vif *tx_arvif,
+					 u8 bssid_index)
 {
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ieee80211_bss_conf *bss_conf = &ahvif->vif->bss_conf;
 	struct ath12k_wmi_bcn_tmpl_ema_arg ema_args;
 	struct ieee80211_ema_beacons *beacons;
-	struct ath12k_link_vif *tx_arvif;
 	bool nontx_profile_found = false;
-	struct ath12k_vif *tx_ahvif;
 	int ret = 0;
 	u8 i;
 
-	tx_ahvif = ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
-	tx_arvif = &tx_ahvif->deflink;
 	beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar),
-							 tx_ahvif->vif, 0);
+							 tx_arvif->ahvif->vif,
+							 tx_arvif->link_id);
 	if (!beacons || !beacons->cnt) {
 		ath12k_warn(arvif->ar->ab,
 			    "failed to get ema beacon templates from mac80211\n");
@@ -1506,18 +1709,17 @@
 	}
 
 	if (tx_arvif == arvif)
-		ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL);
+		ath12k_mac_set_arvif_ies(arvif, tx_arvif, beacons->bcn[0].skb, 0, NULL);
 
 	for (i = 0; i < beacons->cnt; i++) {
 		if (tx_arvif != arvif && !nontx_profile_found)
-			ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb,
-						 bss_conf->bssid_index,
+			ath12k_mac_set_arvif_ies(arvif, tx_arvif, beacons->bcn[i].skb,
+						 bssid_index,
 						 &nontx_profile_found);
 
 		ema_args.bcn_cnt = beacons->cnt;
 		ema_args.bcn_index = i;
-		ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id,
-					  &beacons->bcn[i].offs,
+		ret = ath12k_wmi_bcn_tmpl(tx_arvif, &beacons->bcn[i].offs,
 					  beacons->bcn[i].skb, &ema_args);
 		if (ret) {
 			ath12k_warn(tx_arvif->ar->ab,
@@ -1530,7 +1732,7 @@
 	if (tx_arvif != arvif && !nontx_profile_found)
 		ath12k_warn(arvif->ar->ab,
 			    "nontransmitted bssid index %u not found in beacon template\n",
-			    bss_conf->bssid_index);
+			    bssid_index);
 
 	ieee80211_beacon_free_ema_list(beacons);
 	return ret;
@@ -1540,11 +1742,11 @@
 {
 	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
-	struct ath12k_link_vif *tx_arvif = arvif;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_link_vif *tx_arvif;
 	struct ath12k *ar = arvif->ar;
 	struct ath12k_base *ab = ar->ab;
 	struct ieee80211_mutable_offsets offs = {};
-	struct ath12k_vif *tx_ahvif = ahvif;
 	bool nontx_profile_found = false;
 	struct sk_buff *bcn;
 	int ret;
@@ -1552,28 +1754,38 @@
 	if (ahvif->vdev_type != WMI_VDEV_TYPE_AP)
 		return 0;
 
-	if (vif->mbssid_tx_vif) {
-		tx_ahvif = ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
-		tx_arvif = &tx_ahvif->deflink;
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf to set bcn tmpl for vif %pM link %u\n",
+			    vif->addr, arvif->link_id);
+		return -ENOLINK;
+	}
+
+	tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
+	if (tx_arvif) {
 		if (tx_arvif != arvif && arvif->is_up)
 			return 0;
 
-		if (vif->bss_conf.ema_ap)
-			return ath12k_mac_setup_bcn_tmpl_ema(arvif);
+		if (link_conf->ema_ap)
+			return ath12k_mac_setup_bcn_tmpl_ema(arvif, tx_arvif,
+							     link_conf->bssid_index);
+	} else {
+		tx_arvif = arvif;
 	}
 
-	bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_ahvif->vif,
-					    &offs, 0);
+	bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar),
+					    tx_arvif->ahvif->vif,
+					    &offs, tx_arvif->link_id);
 	if (!bcn) {
 		ath12k_warn(ab, "failed to get beacon template from mac80211\n");
 		return -EPERM;
 	}
 
 	if (tx_arvif == arvif) {
-		ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL);
+		ath12k_mac_set_arvif_ies(arvif, tx_arvif, bcn, 0, NULL);
 	} else {
-		ath12k_mac_set_arvif_ies(arvif, bcn,
-					 ahvif->vif->bss_conf.bssid_index,
+		ath12k_mac_set_arvif_ies(arvif, tx_arvif, bcn,
+					 link_conf->bssid_index,
 					 &nontx_profile_found);
 		if (!nontx_profile_found)
 			ath12k_warn(ab,
@@ -1603,7 +1815,7 @@
 		}
 	}
 
-	ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL);
+	ret = ath12k_wmi_bcn_tmpl(arvif, &offs, bcn, NULL);
 
 	if (ret)
 		ath12k_warn(ab, "failed to submit beacon template command: %d\n",
@@ -1619,7 +1831,9 @@
 {
 	struct ath12k_wmi_vdev_up_params params = {};
 	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_link_vif *tx_arvif;
 	struct ath12k *ar = arvif->ar;
+	struct ieee80211_bss_conf *link_conf;
 	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(arvif->ar)->wiphy);
@@ -1644,16 +1858,21 @@
 
 	ahvif->aid = 0;
 
-	ether_addr_copy(arvif->bssid, info->bssid);
+	ether_addr_copy(arvif->bssid, info->addr);
 
 	params.vdev_id = arvif->vdev_id;
 	params.aid = ahvif->aid;
 	params.bssid = arvif->bssid;
-	if (ahvif->vif->mbssid_tx_vif) {
-		struct ath12k_vif *tx_ahvif =
-			ath12k_vif_to_ahvif(ahvif->vif->mbssid_tx_vif);
-		struct ath12k_link_vif *tx_arvif = &tx_ahvif->deflink;
 
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf to set vdev up params for vif %pM link %u\n",
+			    ahvif->vif->addr, arvif->link_id);
+		return;
+	}
+
+	tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
+	if (tx_arvif) {
 		params.tx_bssid = tx_arvif->bssid;
 		params.nontx_profile_idx = info->bssid_index;
 		params.nontx_profile_cnt = 1 << info->bssid_indicator;
@@ -1701,7 +1920,7 @@
 	u32 *vdev_id = data;
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_link_vif *arvif = &ahvif->deflink;
-	struct ath12k *ar = arvif->ar;
+	struct ath12k *ar = ahvif->deflink.ar;
 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
 
 	if (arvif->vdev_id != *vdev_id)
@@ -1749,6 +1968,7 @@
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
+	struct ieee80211_bss_conf *bss_conf;
 	u32 aid;
 
 	lockdep_assert_wiphy(hw->wiphy);
@@ -1758,14 +1978,22 @@
 	else
 		aid = sta->aid;
 
-	ether_addr_copy(arg->peer_mac, sta->addr);
+	ether_addr_copy(arg->peer_mac, arsta->addr);
 	arg->vdev_id = arvif->vdev_id;
 	arg->peer_associd = aid;
 	arg->auth_flag = true;
 	/* TODO: STA WAR in ath10k for listen interval required? */
 	arg->peer_listen_intval = hw->conf.listen_interval;
 	arg->peer_nss = 1;
-	arg->peer_caps = vif->bss_conf.assoc_capability;
+
+	bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!bss_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc for vif %pM link %u\n",
+			    vif->addr, arvif->link_id);
+		return;
+	}
+
+	arg->peer_caps = bss_conf->assoc_capability;
 }
 
 static void ath12k_peer_assoc_h_crypto(struct ath12k *ar,
@@ -1775,7 +2003,7 @@
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	struct ieee80211_bss_conf *info = &vif->bss_conf;
+	struct ieee80211_bss_conf *info;
 	struct cfg80211_chan_def def;
 	struct cfg80211_bss *bss;
 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
@@ -1784,6 +2012,13 @@
 
 	lockdep_assert_wiphy(hw->wiphy);
 
+	info = ath12k_mac_get_link_bss_conf(arvif);
+	if (!info) {
+		ath12k_warn(ar->ab, "unable to access bss link conf for peer assoc crypto for vif %pM link %u\n",
+			    vif->addr, arvif->link_id);
+		return;
+	}
+
 	if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
 		return;
 
@@ -1839,6 +2074,7 @@
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
+	struct ieee80211_link_sta *link_sta;
 	struct cfg80211_chan_def def;
 	const struct ieee80211_supported_band *sband;
 	const struct ieee80211_rate *rates;
@@ -1853,9 +2089,16 @@
 	if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
 		return;
 
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc rates for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
 	band = def.chan->band;
 	sband = hw->wiphy->bands[band];
-	ratemask = sta->deflink.supp_rates[band];
+	ratemask = link_sta->supp_rates[band];
 	ratemask &= arvif->bitrate_mask.control[band].legacy;
 	rates = sband->bitrates;
 
@@ -1902,7 +2145,8 @@
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+	const struct ieee80211_sta_ht_cap *ht_cap;
+	struct ieee80211_link_sta *link_sta;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
@@ -1915,6 +2159,14 @@
 	if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
 		return;
 
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc ht for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
+	ht_cap = &link_sta->ht_cap;
 	if (!ht_cap->ht_supported)
 		return;
 
@@ -1938,14 +2190,20 @@
 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
 		arg->ldpc_flag = true;
 
-	if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
+	if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
 		arg->bw_40 = true;
 		arg->peer_rate_caps |= WMI_HOST_RC_CW40_FLAG;
 	}
 
-	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
-		if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 |
-		    IEEE80211_HT_CAP_SGI_40))
+	/* As firmware handles these two flags (IEEE80211_HT_CAP_SGI_20
+	 * and IEEE80211_HT_CAP_SGI_40) for enabling SGI, reset both
+	 * flags if guard interval is to force Long GI
+	 */
+	if (arvif->bitrate_mask.control[band].gi == NL80211_TXRATE_FORCE_LGI) {
+		arg->peer_ht_caps &= ~(IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40);
+	} else {
+		/* Enable SGI flag if either SGI_20 or SGI_40 is supported */
+		if (ht_cap->cap & (IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40))
 			arg->peer_rate_caps |= WMI_HOST_RC_SGI_FLAG;
 	}
 
@@ -1988,7 +2246,7 @@
 			arg->peer_ht_rates.rates[i] = i;
 	} else {
 		arg->peer_ht_rates.num_rates = n;
-		arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+		arg->peer_nss = min(link_sta->rx_nss, max_nss);
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
@@ -2057,6 +2315,34 @@
 	return tx_mcs_set;
 }
 
+static u8 ath12k_get_nss_160mhz(struct ath12k *ar,
+				u8 max_nss)
+{
+	u8 nss_ratio_info = ar->pdev->cap.nss_ratio_info;
+	u8 max_sup_nss = 0;
+
+	switch (nss_ratio_info) {
+	case WMI_NSS_RATIO_1BY2_NSS:
+		max_sup_nss = max_nss >> 1;
+		break;
+	case WMI_NSS_RATIO_3BY4_NSS:
+		ath12k_warn(ar->ab, "WMI_NSS_RATIO_3BY4_NSS not supported\n");
+		break;
+	case WMI_NSS_RATIO_1_NSS:
+		max_sup_nss = max_nss;
+		break;
+	case WMI_NSS_RATIO_2_NSS:
+		ath12k_warn(ar->ab, "WMI_NSS_RATIO_2_NSS not supported\n");
+		break;
+	default:
+		ath12k_warn(ar->ab, "invalid nss ratio received from fw: %d\n",
+			    nss_ratio_info);
+		break;
+	}
+
+	return max_sup_nss;
+}
+
 static void ath12k_peer_assoc_h_vht(struct ath12k *ar,
 				    struct ath12k_link_vif *arvif,
 				    struct ath12k_link_sta *arsta,
@@ -2064,20 +2350,31 @@
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
+	const struct ieee80211_sta_vht_cap *vht_cap;
+	struct ieee80211_link_sta *link_sta;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
-	const u16 *vht_mcs_mask;
+	u16 *vht_mcs_mask;
 	u16 tx_mcs_map;
 	u8 ampdu_factor;
 	u8 max_nss, vht_mcs;
-	int i;
+	int i, vht_nss, nss_idx;
+	bool user_rate_valid = true;
+	u32 rx_nss, tx_nss, nss_160;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
 	if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
 		return;
 
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc vht for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
+	vht_cap = &link_sta->vht_cap;
 	if (!vht_cap->vht_supported)
 		return;
 
@@ -2110,12 +2407,31 @@
 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
 					ampdu_factor)) - 1);
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		arg->bw_80 = true;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
 		arg->bw_160 = true;
 
+	vht_nss =  ath12k_mac_max_vht_nss(vht_mcs_mask);
+
+	if (vht_nss > link_sta->rx_nss) {
+		user_rate_valid = false;
+		for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+			if (vht_mcs_mask[nss_idx]) {
+				user_rate_valid = true;
+				break;
+			}
+		}
+	}
+
+	if (!user_rate_valid) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "Setting vht range MCS value to peer supported nss:%d for peer %pM\n",
+			   link_sta->rx_nss, arsta->addr);
+		vht_mcs_mask[link_sta->rx_nss - 1] = vht_mcs_mask[vht_nss - 1];
+	}
+
 	/* Calculate peer NSS capability from VHT capabilities if STA
 	 * supports VHT.
 	 */
@@ -2127,7 +2443,7 @@
 		    vht_mcs_mask[i])
 			max_nss = i + 1;
 	}
-	arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
 	arg->rx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
 	arg->rx_mcs_set = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
 	arg->tx_max_rate = __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
@@ -2149,10 +2465,90 @@
 	/* TODO:  Check */
 	arg->tx_max_mcs_nss = 0xFF;
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
-		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
+	if (arg->peer_phymode == MODE_11AC_VHT160) {
+		tx_nss = ath12k_get_nss_160mhz(ar, max_nss);
+		rx_nss = min(arg->peer_nss, tx_nss);
+		arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+		if (!rx_nss) {
+			ath12k_warn(ar->ab, "invalid max_nss\n");
+			return;
+		}
+
+		nss_160 = u32_encode_bits(rx_nss - 1, ATH12K_PEER_RX_NSS_160MHZ);
+		arg->peer_bw_rxnss_override |= nss_160;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac vht peer %pM max_mpdu %d flags 0x%x nss_override 0x%x\n",
+		   arsta->addr, arg->peer_max_mpdu, arg->peer_flags,
+		   arg->peer_bw_rxnss_override);
+}
 
-	/* TODO: rxnss_override */
+static int ath12k_mac_get_max_he_mcs_map(u16 mcs_map, int nss)
+{
+	switch ((mcs_map >> (2 * nss)) & 0x3) {
+	case IEEE80211_HE_MCS_SUPPORT_0_7: return BIT(8) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_9: return BIT(10) - 1;
+	case IEEE80211_HE_MCS_SUPPORT_0_11: return BIT(12) - 1;
+	}
+	return 0;
+}
+
+static u16 ath12k_peer_assoc_h_he_limit(u16 tx_mcs_set,
+					const u16 *he_mcs_limit)
+{
+	int idx_limit;
+	int nss;
+	u16 mcs_map;
+	u16 mcs;
+
+	for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++) {
+		mcs_map = ath12k_mac_get_max_he_mcs_map(tx_mcs_set, nss) &
+			he_mcs_limit[nss];
+
+		if (mcs_map)
+			idx_limit = fls(mcs_map) - 1;
+		else
+			idx_limit = -1;
+
+		switch (idx_limit) {
+		case 0 ... 7:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_7;
+			break;
+		case 8:
+		case 9:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_9;
+			break;
+		case 10:
+		case 11:
+			mcs = IEEE80211_HE_MCS_SUPPORT_0_11;
+			break;
+		default:
+			WARN_ON(1);
+			fallthrough;
+		case -1:
+			mcs = IEEE80211_HE_MCS_NOT_SUPPORTED;
+			break;
+		}
+
+		tx_mcs_set &= ~(0x3 << (nss * 2));
+		tx_mcs_set |= mcs << (nss * 2);
+	}
+
+	return tx_mcs_set;
+}
+
+static bool
+ath12k_peer_assoc_h_he_masked(const u16 he_mcs_mask[NL80211_HE_NSS_MAX])
+{
+	int nss;
+
+	for (nss = 0; nss < NL80211_HE_NSS_MAX; nss++)
+		if (he_mcs_mask[nss])
+			return false;
+
+	return true;
 }
 
 static void ath12k_peer_assoc_h_he(struct ath12k *ar,
@@ -2162,18 +2558,52 @@
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	struct ieee80211_bss_conf *link_conf;
+	struct ieee80211_link_sta *link_sta;
+	struct cfg80211_chan_def def;
 	int i;
 	u8 ampdu_factor, max_nss;
 	u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED;
 	u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED;
 	u16 mcs_160_map, mcs_80_map;
+	u8 link_id = arvif->link_id;
 	bool support_160;
-	u16 v;
+	enum nl80211_band band;
+	u16 *he_mcs_mask;
+	u8 he_mcs;
+	u16 he_tx_mcs = 0, v = 0;
+	int he_nss, nss_idx;
+	bool user_rate_valid = true;
+	u32 rx_nss, tx_nss, nss_160;
 
+	if (WARN_ON(ath12k_mac_vif_link_chan(vif, link_id, &def)))
+		return;
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in peer assoc he for vif %pM link %u",
+			    vif->addr, link_id);
+		return;
+	}
+
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
+	he_cap = &link_sta->he_cap;
 	if (!he_cap->has_he)
 		return;
 
+	band = def.chan->band;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+	if (ath12k_peer_assoc_h_he_masked(he_mcs_mask))
+		return;
+
 	arg->he_flag = true;
 
 	support_160 = !!(he_cap->he_cap_elem.phy_cap_info[0] &
@@ -2208,13 +2638,13 @@
 	else
 		max_nss = rx_mcs_80;
 
-	arg->peer_nss = min(sta->deflink.rx_nss, max_nss);
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
 
 	memcpy(&arg->peer_he_cap_macinfo, he_cap->he_cap_elem.mac_cap_info,
 	       sizeof(he_cap->he_cap_elem.mac_cap_info));
 	memcpy(&arg->peer_he_cap_phyinfo, he_cap->he_cap_elem.phy_cap_info,
 	       sizeof(he_cap->he_cap_elem.phy_cap_info));
-	arg->peer_he_ops = vif->bss_conf.he_oper.params;
+	arg->peer_he_ops = link_conf->he_oper.params;
 
 	/* the top most byte is used to indicate BSS color info */
 	arg->peer_he_ops &= 0xffffff;
@@ -2235,10 +2665,10 @@
 				   IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
 
 	if (ampdu_factor) {
-		if (sta->deflink.vht_cap.vht_supported)
+		if (link_sta->vht_cap.vht_supported)
 			arg->peer_max_mpdu = (1 << (IEEE80211_HE_VHT_MAX_AMPDU_FACTOR +
 						    ampdu_factor)) - 1;
-		else if (sta->deflink.ht_cap.ht_supported)
+		else if (link_sta->ht_cap.ht_supported)
 			arg->peer_max_mpdu = (1 << (IEEE80211_HE_HT_MAX_AMPDU_FACTOR +
 						    ampdu_factor)) - 1;
 	}
@@ -2279,25 +2709,36 @@
 	if (he_cap->he_cap_elem.mac_cap_info[0] & IEEE80211_HE_MAC_CAP0_TWT_REQ)
 		arg->twt_requester = true;
 
-	switch (sta->deflink.bandwidth) {
-	case IEEE80211_STA_RX_BW_160:
-		if (he_cap->he_cap_elem.phy_cap_info[0] &
-		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) {
-			v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_80p80);
-			arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+	he_nss = ath12k_mac_max_he_nss(he_mcs_mask);
 
-			v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80p80);
-			arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80_80] = v;
+	if (he_nss > link_sta->rx_nss) {
+		user_rate_valid = false;
+		for (nss_idx = link_sta->rx_nss - 1; nss_idx >= 0; nss_idx--) {
+			if (he_mcs_mask[nss_idx]) {
+				user_rate_valid = true;
+				break;
+			}
+		}
+	}
 
-			arg->peer_he_mcs_count++;
+	if (!user_rate_valid) {
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+			   "Setting he range MCS value to peer supported nss:%d for peer %pM\n",
+			   link_sta->rx_nss, arsta->addr);
+		he_mcs_mask[link_sta->rx_nss - 1] = he_mcs_mask[he_nss - 1];
 		}
+
+	switch (link_sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_160:
 		v = le16_to_cpu(he_cap->he_mcs_nss_supp.rx_mcs_160);
 		arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
 
-		v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_160);
+		v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
 		arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_160] = v;
 
 		arg->peer_he_mcs_count++;
+		if (!he_tx_mcs)
+			he_tx_mcs = v;
 		fallthrough;
 
 	default:
@@ -2305,11 +2746,53 @@
 		arg->peer_he_rx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
 
 		v = le16_to_cpu(he_cap->he_mcs_nss_supp.tx_mcs_80);
+		v = ath12k_peer_assoc_h_he_limit(v, he_mcs_mask);
 		arg->peer_he_tx_mcs_set[WMI_HECAP_TXRX_MCS_NSS_IDX_80] = v;
 
 		arg->peer_he_mcs_count++;
+		if (!he_tx_mcs)
+			he_tx_mcs = v;
 		break;
 	}
+
+	/* Calculate peer NSS capability from HE capabilities if STA
+	 * supports HE.
+	 */
+	for (i = 0, max_nss = 0, he_mcs = 0; i < NL80211_HE_NSS_MAX; i++) {
+		he_mcs = he_tx_mcs >> (2 * i) & 3;
+
+		/* In case of fixed rates, MCS Range in he_tx_mcs might have
+		 * unsupported range, with he_mcs_mask set, so check either of them
+		 * to find nss.
+		 */
+		if (he_mcs != IEEE80211_HE_MCS_NOT_SUPPORTED ||
+		    he_mcs_mask[i])
+			max_nss = i + 1;
+	}
+	arg->peer_nss = min(link_sta->rx_nss, max_nss);
+	max_nss = min(max_nss, ar->num_tx_chains);
+
+	if (arg->peer_phymode == MODE_11AX_HE160) {
+		tx_nss = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+		rx_nss = min(arg->peer_nss, tx_nss);
+
+		arg->peer_nss = min(link_sta->rx_nss, ar->num_rx_chains);
+		arg->peer_bw_rxnss_override = ATH12K_BW_NSS_MAP_ENABLE;
+
+		if (!rx_nss) {
+			ath12k_warn(ar->ab, "invalid max_nss\n");
+			return;
+		}
+
+		nss_160 = u32_encode_bits(rx_nss - 1, ATH12K_PEER_RX_NSS_160MHZ);
+		arg->peer_bw_rxnss_override |= nss_160;
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac he peer %pM nss %d mcs cnt %d nss_override 0x%x\n",
+		   arsta->addr, arg->peer_nss,
+		   arg->peer_he_mcs_count,
+		   arg->peer_bw_rxnss_override);
 }
 
 static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar,
@@ -2319,7 +2802,8 @@
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	struct ieee80211_link_sta *link_sta;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	u8 ampdu_factor, mpdu_density;
@@ -2329,22 +2813,31 @@
 
 	band = def.chan->band;
 
-	if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa)
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he 6ghz for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
 		return;
+	}
+
+	he_cap = &link_sta->he_cap;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (!arg->he_flag || band != NL80211_BAND_6GHZ || !link_sta->he_6ghz_capa.capa)
+		return;
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		arg->bw_40 = true;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		arg->bw_80 = true;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160)
 		arg->bw_160 = true;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320)
 		arg->bw_320 = true;
 
-	arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa);
+	arg->peer_he_caps_6ghz = le16_to_cpu(link_sta->he_6ghz_capa.capa);
 
 	mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz,
 				    IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
@@ -2369,6 +2862,7 @@
 }
 
 static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap,
+				     const struct ieee80211_sta_he_cap *he_cap,
 				     const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
 				     int *smps)
 {
@@ -2378,6 +2872,11 @@
 		*smps = le16_get_bits(he_6ghz_capa->capa,
 				      IEEE80211_HE_6GHZ_CAP_SM_PS);
 
+	if (he_cap && he_cap->has_he)
+		if (he_cap->he_cap_elem.mac_cap_info[5] &
+		    IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS)
+			*smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
+
 	if (*smps >= ARRAY_SIZE(ath12k_smps_map))
 		return -EINVAL;
 
@@ -2388,14 +2887,29 @@
 				     struct ath12k_wmi_peer_assoc_arg *arg)
 {
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa;
-	const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
+	const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
+	struct ath12k_link_vif *arvif = arsta->arvif;
+	const struct ieee80211_sta_ht_cap *ht_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	struct ieee80211_link_sta *link_sta;
+	struct ath12k *ar = arvif->ar;
 	int smps;
 
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
+	he_cap = &link_sta->he_cap;
+	he_6ghz_capa = &link_sta->he_6ghz_capa;
+	ht_cap = &link_sta->ht_cap;
+
 	if (!ht_cap->ht_supported && !he_6ghz_capa->capa)
 		return;
 
-	if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps))
+	if (ath12k_get_smps_from_capa(ht_cap, he_cap, he_6ghz_capa, &smps))
 		return;
 
 	switch (smps) {
@@ -2446,7 +2960,7 @@
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM qos %d\n",
-		   sta->addr, arg->qos_flag);
+		   arsta->addr, arg->qos_flag);
 }
 
 static int ath12k_peer_assoc_qos_ap(struct ath12k *ar,
@@ -2486,26 +3000,26 @@
 
 	arg.param = WMI_AP_PS_PEER_PARAM_UAPSD;
 	arg.value = uapsd;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	arg.param = WMI_AP_PS_PEER_PARAM_MAX_SP;
 	arg.value = max_sp;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	/* TODO: revisit during testing */
 	arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_FRMTYPE;
 	arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
 	arg.param = WMI_AP_PS_PEER_PARAM_SIFS_RESP_UAPSD;
 	arg.value = DISABLE_SIFS_RESPONSE_TRIGGER;
-	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, sta->addr, &arg);
+	ret = ath12k_wmi_send_set_ap_ps_param_cmd(ar, arsta->addr, &arg);
 	if (ret)
 		goto err;
 
@@ -2517,96 +3031,96 @@
 	return ret;
 }
 
-static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
+static bool ath12k_mac_sta_has_ofdm_only(struct ieee80211_link_sta *sta)
 {
-	return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
+	return sta->supp_rates[NL80211_BAND_2GHZ] >>
 	       ATH12K_MAC_FIRST_OFDM_RATE_IDX;
 }
 
 static enum wmi_phy_mode ath12k_mac_get_phymode_vht(struct ath12k *ar,
-						    struct ieee80211_sta *sta)
+						    struct ieee80211_link_sta *link_sta)
 {
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
-		switch (sta->deflink.vht_cap.cap &
-			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
-		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		if (link_sta->vht_cap.cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
+		    IEEE80211_VHT_CAP_EXT_NSS_BW_MASK))
 			return MODE_11AC_VHT160;
-		case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
-			return MODE_11AC_VHT80_80;
-		default:
-			/* not sure if this is a valid case? */
-			return MODE_11AC_VHT160;
-		}
+
+		ath12k_warn(ar->ab, "invalid VHT PHY capability info for 160 Mhz: %d\n",
+			link_sta->vht_cap.cap);
+
+		return MODE_UNKNOWN;
 	}
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		return MODE_11AC_VHT80;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		return MODE_11AC_VHT40;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
 		return MODE_11AC_VHT20;
 
 	return MODE_UNKNOWN;
 }
 
 static enum wmi_phy_mode ath12k_mac_get_phymode_he(struct ath12k *ar,
-						   struct ieee80211_sta *sta)
+						   struct ieee80211_link_sta *link_sta)
 {
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
 		     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
 			return MODE_11AX_HE160;
-		else if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
-		     IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
-			return MODE_11AX_HE80_80;
-		/* not sure if this is a valid case? */
-		return MODE_11AX_HE160;
+
+		ath12k_warn(ar->ab, "invalid HE PHY capability info for 160 Mhz: %d\n",
+			link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
+
+		return MODE_UNKNOWN;
 	}
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		return MODE_11AX_HE80;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		return MODE_11AX_HE40;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
 		return MODE_11AX_HE20;
 
 	return MODE_UNKNOWN;
 }
 
 static enum wmi_phy_mode ath12k_mac_get_phymode_eht(struct ath12k *ar,
-						    struct ieee80211_sta *sta)
+						    struct ieee80211_link_sta *link_sta)
 {
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320)
-		if (sta->deflink.eht_cap.eht_cap_elem.phy_cap_info[0] &
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_320) {
+		if (link_sta->eht_cap.eht_cap_elem.phy_cap_info[0] &
 		    IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ)
 			return MODE_11BE_EHT320;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
+		ath12k_warn(ar->ab, "invalid EHT PHY capability info for 320 Mhz: %d\n",
+			link_sta->eht_cap.eht_cap_elem.phy_cap_info[0]);
+
+		return MODE_UNKNOWN;
+	}
+
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_160) {
+		if (link_sta->he_cap.he_cap_elem.phy_cap_info[0] &
 		    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
 			return MODE_11BE_EHT160;
 
-		if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[0] &
-			 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
-			return MODE_11BE_EHT80_80;
-
 		ath12k_warn(ar->ab, "invalid EHT PHY capability info for 160 Mhz: %d\n",
-			    sta->deflink.he_cap.he_cap_elem.phy_cap_info[0]);
+			    link_sta->he_cap.he_cap_elem.phy_cap_info[0]);
 
-		return MODE_11BE_EHT160;
+		return MODE_UNKNOWN;
 	}
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 		return MODE_11BE_EHT80;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 		return MODE_11BE_EHT40;
 
-	if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
+	if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20)
 		return MODE_11BE_EHT20;
 
 	return MODE_UNKNOWN;
@@ -2617,10 +3131,12 @@
 					struct ath12k_link_sta *arsta,
 					struct ath12k_wmi_peer_assoc_arg *arg)
 {
+	struct ieee80211_link_sta *link_sta;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
+	const u16 *he_mcs_mask;
 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -2634,34 +3150,43 @@
 	band = def.chan->band;
 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
+
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
 
 	switch (band) {
 	case NL80211_BAND_2GHZ:
-		if (sta->deflink.eht_cap.has_eht) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+		if (link_sta->eht_cap.has_eht) {
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11BE_EHT40_2G;
 			else
 				phymode = MODE_11BE_EHT20_2G;
-		} else if (sta->deflink.he_cap.has_he) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
+		} else if (link_sta->he_cap.has_he &&
+			   !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_80)
 				phymode = MODE_11AX_HE80_2G;
-			else if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			else if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AX_HE40_2G;
 			else
 				phymode = MODE_11AX_HE20_2G;
-		} else if (sta->deflink.vht_cap.vht_supported &&
+		} else if (link_sta->vht_cap.vht_supported &&
 		    !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11AC_VHT40;
 			else
 				phymode = MODE_11AC_VHT20;
-		} else if (sta->deflink.ht_cap.ht_supported &&
+		} else if (link_sta->ht_cap.ht_supported &&
 			   !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
-			if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth == IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NG_HT40;
 			else
 				phymode = MODE_11NG_HT20;
-		} else if (ath12k_mac_sta_has_ofdm_only(sta)) {
+		} else if (ath12k_mac_sta_has_ofdm_only(link_sta)) {
 			phymode = MODE_11G;
 		} else {
 			phymode = MODE_11B;
@@ -2670,16 +3195,17 @@
 	case NL80211_BAND_5GHZ:
 	case NL80211_BAND_6GHZ:
 		/* Check EHT first */
-		if (sta->deflink.eht_cap.has_eht) {
-			phymode = ath12k_mac_get_phymode_eht(ar, sta);
-		} else if (sta->deflink.he_cap.has_he) {
-			phymode = ath12k_mac_get_phymode_he(ar, sta);
-		} else if (sta->deflink.vht_cap.vht_supported &&
+		if (link_sta->eht_cap.has_eht) {
+			phymode = ath12k_mac_get_phymode_eht(ar, link_sta);
+		} else if (link_sta->he_cap.has_he &&
+			   !ath12k_peer_assoc_h_he_masked(he_mcs_mask)) {
+			phymode = ath12k_mac_get_phymode_he(ar, link_sta);
+		} else if (link_sta->vht_cap.vht_supported &&
 		    !ath12k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
-			phymode = ath12k_mac_get_phymode_vht(ar, sta);
-		} else if (sta->deflink.ht_cap.ht_supported &&
+			phymode = ath12k_mac_get_phymode_vht(ar, link_sta);
+		} else if (link_sta->ht_cap.ht_supported &&
 			   !ath12k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
-			if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
+			if (link_sta->bandwidth >= IEEE80211_STA_RX_BW_40)
 				phymode = MODE_11NA_HT40;
 			else
 				phymode = MODE_11NA_HT20;
@@ -2692,7 +3218,7 @@
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac peer %pM phymode %s\n",
-		   sta->addr, ath12k_mac_phymode_str(phymode));
+		   arsta->addr, ath12k_mac_phymode_str(phymode));
 
 	arg->peer_phymode = phymode;
 	WARN_ON(phymode == MODE_UNKNOWN);
@@ -2767,15 +3293,32 @@
 				    struct ath12k_wmi_peer_assoc_arg *arg)
 {
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	const struct ieee80211_sta_eht_cap *eht_cap = &sta->deflink.eht_cap;
-	const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap;
 	const struct ieee80211_eht_mcs_nss_supp_20mhz_only *bw_20;
 	const struct ieee80211_eht_mcs_nss_supp_bw *bw;
+	const struct ieee80211_sta_eht_cap *eht_cap;
+	const struct ieee80211_sta_he_cap *he_cap;
+	struct ieee80211_link_sta *link_sta;
+	struct ieee80211_bss_conf *link_conf;
 	u32 *rx_mcs, *tx_mcs;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
-	if (!sta->deflink.he_cap.has_he || !eht_cap->has_eht)
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in peer assoc eht for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access link_conf in peer assoc eht set\n");
+		return;
+	}
+
+	eht_cap = &link_sta->eht_cap;
+	he_cap = &link_sta->he_cap;
+	if (!he_cap->has_he || !eht_cap->has_eht)
 		return;
 
 	arg->eht_flag = true;
@@ -2794,7 +3337,7 @@
 	rx_mcs = arg->peer_eht_rx_mcs_set;
 	tx_mcs = arg->peer_eht_tx_mcs_set;
 
-	switch (sta->deflink.bandwidth) {
+	switch (link_sta->bandwidth) {
 	case IEEE80211_STA_RX_BW_320:
 		bw = &eht_cap->eht_mcs_nss_supp.bw._320;
 		ath12k_mac_set_eht_mcs(bw->rx_tx_mcs9_max_nss,
@@ -2844,6 +3387,69 @@
 	}
 
 	arg->punct_bitmap = ~arvif->punct_bitmap;
+	arg->enable_mcs15 = link_conf->enable_mcs15;
+}
+
+static void ath12k_peer_assoc_h_mlo(struct ath12k_link_sta *arsta,
+				    struct ath12k_wmi_peer_assoc_arg *arg)
+{
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+	struct peer_assoc_mlo_params *ml = &arg->ml;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ath12k_link_sta *arsta_p;
+	struct ath12k_link_vif *arvif;
+	unsigned long links;
+	u8 link_id;
+	int i;
+
+	if (!sta->mlo || ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID)
+		return;
+
+	ml->enabled = true;
+	ml->assoc_link = arsta->is_assoc_link;
+
+	/* For now considering the primary umac based on assoc link */
+	ml->primary_umac = arsta->is_assoc_link;
+	ml->peer_id_valid = true;
+	ml->logical_link_idx_valid = true;
+
+	ether_addr_copy(ml->mld_addr, sta->addr);
+	ml->logical_link_idx = arsta->link_idx;
+	ml->ml_peer_id = ahsta->ml_peer_id;
+	ml->ieee_link_id = arsta->link_id;
+	ml->num_partner_links = 0;
+	ml->eml_cap = sta->eml_cap;
+	links = ahsta->links_map;
+
+	rcu_read_lock();
+
+	i = 0;
+
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		if (i >= ATH12K_WMI_MLO_MAX_LINKS)
+			break;
+
+		arsta_p = rcu_dereference(ahsta->link[link_id]);
+		arvif = rcu_dereference(ahsta->ahvif->link[link_id]);
+
+		if (arsta_p == arsta)
+			continue;
+
+		if (!arvif->is_started)
+			continue;
+
+		ml->partner_info[i].vdev_id = arvif->vdev_id;
+		ml->partner_info[i].hw_link_id = arvif->ar->pdev->hw_link_id;
+		ml->partner_info[i].assoc_link = arsta_p->is_assoc_link;
+		ml->partner_info[i].primary_umac = arsta_p->is_assoc_link;
+		ml->partner_info[i].logical_link_idx_valid = true;
+		ml->partner_info[i].logical_link_idx = arsta_p->link_idx;
+		ml->num_partner_links++;
+
+		i++;
+	}
+
+	rcu_read_unlock();
 }
 
 static void ath12k_peer_assoc_prepare(struct ath12k *ar,
@@ -2870,21 +3476,24 @@
 	ath12k_peer_assoc_h_qos(ar, arvif, arsta, arg);
 	ath12k_peer_assoc_h_phymode(ar, arvif, arsta, arg);
 	ath12k_peer_assoc_h_smps(arsta, arg);
+	ath12k_peer_assoc_h_mlo(arsta, arg);
 
+	arsta->peer_nss = arg->peer_nss;
 	/* TODO: amsdu_disable req? */
 }
 
 static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_link_vif *arvif,
 				  const u8 *addr,
 				  const struct ieee80211_sta_ht_cap *ht_cap,
+				  const struct ieee80211_sta_he_cap *he_cap,
 				  const struct ieee80211_he_6ghz_capa *he_6ghz_capa)
 {
 	int smps, ret = 0;
 
-	if (!ht_cap->ht_supported && !he_6ghz_capa)
+	if (!ht_cap->ht_supported && !he_6ghz_capa && !he_cap)
 		return 0;
 
-	ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps);
+	ret = ath12k_get_smps_from_capa(ht_cap, he_cap, he_6ghz_capa, &smps);
 	if (ret < 0)
 		return ret;
 
@@ -2893,6 +3502,209 @@
 					 ath12k_smps_map[smps]);
 }
 
+static int ath12k_mac_set_he_txbf_conf(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	u32 param = WMI_VDEV_PARAM_SET_HEMU_MODE;
+	u32 value = 0;
+	int ret;
+	struct ieee80211_bss_conf *link_conf;
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in txbf conf\n");
+		return false;
+	}
+
+	if (!link_conf->he_support)
+		return true;
+
+	if (link_conf->he_su_beamformer) {
+		value |= u32_encode_bits(HE_SU_BFER_ENABLE, HE_MODE_SU_TX_BFER);
+		if (link_conf->he_mu_beamformer &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			value |= u32_encode_bits(HE_MU_BFER_ENABLE, HE_MODE_MU_TX_BFER);
+	}
+
+	if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+		value |= u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+			 u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+
+		if (link_conf->he_full_ul_mumimo)
+			value |= u32_encode_bits(HE_UL_MUMIMO_ENABLE, HE_MODE_UL_MUMIMO);
+
+		if (link_conf->he_su_beamformee)
+			value |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+	}
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d HE MU mode: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	param = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
+	value =	u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
+		u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
+				HE_TRIG_NONTRIG_SOUNDING_MODE);
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d sounding mode: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_vif_recalc_sta_he_txbf(struct ath12k *ar,
+					     struct ath12k_link_vif *arvif,
+					     struct ieee80211_sta_he_cap *he_cap,
+					     int *hemode)
+{
+	struct ieee80211_vif *vif = arvif->ahvif->vif;
+	struct ieee80211_he_cap_elem he_cap_elem = {};
+	struct ieee80211_sta_he_cap *cap_band;
+	struct cfg80211_chan_def def;
+	u8 link_id = arvif->link_id;
+	struct ieee80211_bss_conf *link_conf;
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in recalc txbf conf\n");
+		return 0;
+	}
+
+	if (!link_conf->he_support)
+		return 0;
+
+	if (vif->type != NL80211_IFTYPE_STATION)
+		return -EINVAL;
+
+	if (WARN_ON(ath12k_mac_vif_link_chan(vif, link_id, &def)))
+		return -EINVAL;
+
+	if (def.chan->band == NL80211_BAND_2GHZ)
+		cap_band = &ar->mac.iftype[NL80211_BAND_2GHZ][vif->type].he_cap;
+	else
+		cap_band = &ar->mac.iftype[NL80211_BAND_5GHZ][vif->type].he_cap;
+
+	memcpy(&he_cap_elem, &cap_band->he_cap_elem, sizeof(he_cap_elem));
+
+	*hemode = 0;
+	if (HECAP_PHY_SUBFME_GET(he_cap_elem.phy_cap_info)) {
+		if (HECAP_PHY_SUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+			*hemode |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+		if (HECAP_PHY_MUBFMR_GET(he_cap->he_cap_elem.phy_cap_info))
+			*hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+	}
+
+	if (vif->type != NL80211_IFTYPE_MESH_POINT) {
+		*hemode |= u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
+			  u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
+
+		if (HECAP_PHY_ULMUMIMO_GET(he_cap_elem.phy_cap_info))
+			if (HECAP_PHY_ULMUMIMO_GET(he_cap->he_cap_elem.phy_cap_info))
+				*hemode |= u32_encode_bits(HE_UL_MUMIMO_ENABLE,
+							  HE_MODE_UL_MUMIMO);
+
+		if (u32_get_bits(*hemode, HE_MODE_MU_TX_BFEE))
+			*hemode |= u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE);
+
+		if (u32_get_bits(*hemode, HE_MODE_MU_TX_BFER))
+			*hemode |= u32_encode_bits(HE_SU_BFER_ENABLE, HE_MODE_SU_TX_BFER);
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_set_eht_txbf_conf(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k *ar = arvif->ar;
+	u32 param = WMI_VDEV_PARAM_SET_EHT_MU_MODE;
+	u32 value = 0;
+	int ret;
+	struct ieee80211_bss_conf *link_conf;
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in eht txbf conf\n");
+		return -ENOENT;
+	}
+
+	if (!link_conf->eht_support)
+		return 0;
+
+	if (link_conf->eht_su_beamformer) {
+		value |= u32_encode_bits(EHT_SU_BFER_ENABLE, EHT_MODE_SU_TX_BFER);
+		if (link_conf->eht_mu_beamformer &&
+		    ahvif->vdev_type == WMI_VDEV_TYPE_AP)
+			value |= u32_encode_bits(EHT_MU_BFER_ENABLE,
+						 EHT_MODE_MU_TX_BFER) |
+				 u32_encode_bits(EHT_DL_MUOFDMA_ENABLE,
+						 EHT_MODE_DL_OFDMA_MUMIMO) |
+				 u32_encode_bits(EHT_UL_MUOFDMA_ENABLE,
+						 EHT_MODE_UL_OFDMA_MUMIMO);
+	}
+
+	if (ahvif->vif->type != NL80211_IFTYPE_MESH_POINT) {
+		value |= u32_encode_bits(EHT_DL_MUOFDMA_ENABLE, EHT_MODE_DL_OFDMA) |
+			 u32_encode_bits(EHT_UL_MUOFDMA_ENABLE, EHT_MODE_UL_OFDMA);
+
+		if (link_conf->eht_80mhz_full_bw_ul_mumimo)
+			value |= u32_encode_bits(EHT_UL_MUMIMO_ENABLE, EHT_MODE_MUMIMO);
+
+		if (link_conf->eht_su_beamformee)
+			value |= u32_encode_bits(EHT_SU_BFEE_ENABLE,
+						 EHT_MODE_SU_TX_BFEE);
+	}
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id, param, value);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set vdev %d EHT MU mode: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
+					      struct ieee80211_link_sta *link_sta)
+{
+	u32 bw;
+
+	switch (link_sta->bandwidth) {
+	case IEEE80211_STA_RX_BW_20:
+		bw = WMI_PEER_CHWIDTH_20MHZ;
+		break;
+	case IEEE80211_STA_RX_BW_40:
+		bw = WMI_PEER_CHWIDTH_40MHZ;
+		break;
+	case IEEE80211_STA_RX_BW_80:
+		bw = WMI_PEER_CHWIDTH_80MHZ;
+		break;
+	case IEEE80211_STA_RX_BW_160:
+		bw = WMI_PEER_CHWIDTH_160MHZ;
+		break;
+	case IEEE80211_STA_RX_BW_320:
+		bw = WMI_PEER_CHWIDTH_320MHZ;
+		break;
+	default:
+		ath12k_warn(ar->ab, "Invalid bandwidth %d for link station %pM\n",
+			    link_sta->bandwidth, link_sta->addr);
+		bw = WMI_PEER_CHWIDTH_20MHZ;
+		break;
+	}
+
+	return bw;
+}
+
 static void ath12k_bss_assoc(struct ath12k *ar,
 			     struct ath12k_link_vif *arvif,
 			     struct ieee80211_bss_conf *bss_conf)
@@ -2900,42 +3712,78 @@
 	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
 	struct ath12k_wmi_vdev_up_params params = {};
-	struct ath12k_wmi_peer_assoc_arg peer_arg;
+	struct ieee80211_link_sta *link_sta;
+	u8 link_id = bss_conf->link_id;
 	struct ath12k_link_sta *arsta;
 	struct ieee80211_sta *ap_sta;
 	struct ath12k_sta *ahsta;
 	struct ath12k_peer *peer;
 	bool is_auth = false;
+	u32 hemode = 0;
 	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
-		   arvif->vdev_id, arvif->bssid, ahvif->aid);
+	struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+					kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+	if (!peer_arg)
+		return;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac vdev %i link id %u assoc bssid %pM aid %d\n",
+		   arvif->vdev_id, link_id, arvif->bssid, ahvif->aid);
 
 	rcu_read_lock();
 
-	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
+	/* During ML connection, cfg.ap_addr has the MLD address. For
+	 * non-ML connection, it has the BSSID.
+	 */
+	ap_sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
 	if (!ap_sta) {
 		ath12k_warn(ar->ab, "failed to find station entry for bss %pM vdev %i\n",
-			    bss_conf->bssid, arvif->vdev_id);
+			    vif->cfg.ap_addr, arvif->vdev_id);
 		rcu_read_unlock();
 		return;
 	}
 
 	ahsta = ath12k_sta_to_ahsta(ap_sta);
-	arsta = &ahsta->deflink;
 
+	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				  ahsta->link[link_id]);
 	if (WARN_ON(!arsta)) {
 		rcu_read_unlock();
 		return;
 	}
 
-	ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, false);
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (WARN_ON(!link_sta)) {
+		rcu_read_unlock();
+		return;
+	}
+
+	ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, false);
+
+	/* link_sta->he_cap must be protected by rcu_read_lock */
+	ret = ath12k_mac_vif_recalc_sta_he_txbf(ar, arvif, &link_sta->he_cap, &hemode);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to recalc he txbf for vdev %i on bss %pM: %d\n",
+			    arvif->vdev_id, bss_conf->bssid, ret);
+		rcu_read_unlock();
+		return;
+	}
 
 	rcu_read_unlock();
 
-	ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+	/* keep this before ath12k_wmi_send_peer_assoc_cmd() */
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_SET_HEMU_MODE, hemode);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit vdev param txbf 0x%x: %d\n",
+			    hemode, ret);
+		return;
+	}
+
+	ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to run peer assoc for %pM vdev %i: %d\n",
 			    bss_conf->bssid, arvif->vdev_id, ret);
@@ -2949,8 +3797,8 @@
 	}
 
 	ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid,
-				     &ap_sta->deflink.ht_cap,
-				     &ap_sta->deflink.he_6ghz_capa);
+				     &link_sta->ht_cap, &link_sta->he_cap,
+				     &link_sta->he_6ghz_capa);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
@@ -3058,6 +3906,7 @@
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
 	const struct ieee80211_supported_band *sband;
+	struct ieee80211_bss_conf *bss_conf;
 	u8 basic_rate_idx;
 	int hw_rate_code;
 	u32 vdev_param;
@@ -3066,8 +3915,17 @@
 
 	lockdep_assert_wiphy(hw->wiphy);
 
+	bss_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!bss_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in mgmt rate calc for vif %pM link %u\n",
+			    vif->addr, arvif->link_id);
+		return;
+	}
+
 	sband = hw->wiphy->bands[def->chan->band];
-	basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
+	basic_rate_idx = ffs(bss_conf->basic_rates);
+	if (basic_rate_idx)
+		basic_rate_idx -= 1;
 	bitrate = sband->bitrates[basic_rate_idx].bitrate;
 
 	hw_rate_code = ath12k_mac_get_rate_hw_value(bitrate);
@@ -3089,12 +3947,177 @@
 		ath12k_warn(ar->ab, "failed to set beacon tx rate %d\n", ret);
 }
 
+static void ath12k_mac_init_arvif(struct ath12k_vif *ahvif,
+				  struct ath12k_link_vif *arvif, int link_id)
+{
+	struct ath12k_hw *ah = ahvif->ah;
+	u8 _link_id;
+	int i;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (WARN_ON(!arvif))
+		return;
+
+	if (WARN_ON(link_id >= ATH12K_NUM_MAX_LINKS))
+		return;
+
+	if (link_id < 0)
+		_link_id = 0;
+	else
+		_link_id = link_id;
+
+	arvif->ahvif = ahvif;
+	arvif->link_id = _link_id;
+
+	/* Protects the datapath stats update on a per link basis */
+	spin_lock_init(&arvif->link_stats_lock);
+
+	INIT_LIST_HEAD(&arvif->list);
+	INIT_DELAYED_WORK(&arvif->connection_loss_work,
+			  ath12k_mac_vif_sta_connection_loss_work);
+
+	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
+		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
+		arvif->bitrate_mask.control[i].gi = NL80211_TXRATE_DEFAULT_GI;
+		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
+		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
+		memset(arvif->bitrate_mask.control[i].he_mcs, 0xff,
+		       sizeof(arvif->bitrate_mask.control[i].he_mcs));
+	}
+
+	/* Handle MLO related assignments */
+	if (link_id >= 0) {
+		rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
+		ahvif->links_map |= BIT(_link_id);
+	}
+
+	ath12k_generic_dbg(ATH12K_DBG_MAC,
+			   "mac init link arvif (link_id %d%s) for vif %pM. links_map 0x%x",
+			   _link_id, (link_id < 0) ? " deflink" : "", ahvif->vif->addr,
+			   ahvif->links_map);
+}
+
+static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
+					     struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar = arvif->ar;
+	int ret;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	cancel_delayed_work_sync(&arvif->connection_loss_work);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
+		   arvif->vdev_id, arvif->link_id);
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
+		ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
+				    arvif->vdev_id, arvif->link_id, ret);
+	}
+	ath12k_mac_vdev_delete(ar, arvif);
+}
+
+static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
+							  struct ieee80211_vif *vif,
+							  u8 link_id)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_link_vif *arvif;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+	if (arvif)
+		return arvif;
+
+	/* If this is the first link arvif being created for an ML VIF
+	 * use the preallocated deflink memory except for scan arvifs
+	 */
+	if (!ahvif->links_map && link_id < ATH12K_DEFAULT_SCAN_LINK) {
+		arvif = &ahvif->deflink;
+	} else {
+		arvif = (struct ath12k_link_vif *)
+		kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
+		if (!arvif)
+			return NULL;
+	}
+
+	ath12k_mac_init_arvif(ahvif, arvif, link_id);
+
+	return arvif;
+}
+
+static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ath12k_hw *ah = ahvif->ah;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
+	synchronize_rcu();
+	ahvif->links_map &= ~BIT(arvif->link_id);
+
+	if (arvif != &ahvif->deflink)
+		kfree(arvif);
+	else
+		memset(arvif, 0, sizeof(*arvif));
+}
+
 static int
 ath12k_mac_op_change_vif_links(struct ieee80211_hw *hw,
 			       struct ieee80211_vif *vif,
 			       u16 old_links, u16 new_links,
 			       struct ieee80211_bss_conf *ol[IEEE80211_MLD_MAX_NUM_LINKS])
 {
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	unsigned long to_remove = old_links & ~new_links;
+	unsigned long to_add = ~old_links & new_links;
+	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k_link_vif *arvif;
+	u8 link_id;
+
+	lockdep_assert_wiphy(hw->wiphy);
+
+	ath12k_generic_dbg(ATH12K_DBG_MAC,
+			   "mac vif link changed for MLD %pM old_links 0x%x new_links 0x%x\n",
+			   vif->addr, old_links, new_links);
+
+	for_each_set_bit(link_id, &to_add, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+		/* mac80211 wants to add link but driver already has the
+		 * link. This should not happen ideally.
+		 */
+		if (WARN_ON(arvif))
+			return -EINVAL;
+
+		arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
+		if (WARN_ON(!arvif))
+			return -EINVAL;
+	}
+
+	for_each_set_bit(link_id, &to_remove, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+		if (WARN_ON(!arvif))
+			return -EINVAL;
+
+		if (!arvif->is_created)
+			continue;
+
+		if (WARN_ON(!arvif->ar))
+			return -EINVAL;
+
+		ath12k_mac_remove_link_interface(hw, arvif);
+		ath12k_mac_unassign_link_vif(arvif);
+	}
+
 	return 0;
 }
 
@@ -3151,6 +4174,7 @@
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	unsigned long links = ahvif->links_map;
+	struct ieee80211_bss_conf *info;
 	struct ath12k_link_vif *arvif;
 	struct ath12k *ar;
 	u8 link_id;
@@ -3171,13 +4195,18 @@
 
 			ar = arvif->ar;
 
-			if (vif->cfg.assoc)
-				ath12k_bss_assoc(ar, arvif, &vif->bss_conf);
-			else
+			if (vif->cfg.assoc) {
+				info = ath12k_mac_get_link_bss_conf(arvif);
+				if (!info)
+					continue;
+
+				ath12k_bss_assoc(ar, arvif, info);
+			} else {
 				ath12k_bss_disassoc(ar, arvif);
 		}
 	}
 }
+}
 
 static void ath12k_mac_vif_setup_ps(struct ath12k_link_vif *arvif)
 {
@@ -3185,6 +4214,7 @@
 	struct ieee80211_vif *vif = arvif->ahvif->vif;
 	struct ieee80211_conf *conf = &ath12k_ar_to_hw(ar)->conf;
 	enum wmi_sta_powersave_param param;
+	struct ieee80211_bss_conf *info;
 	enum wmi_sta_ps_mode psmode;
 	int ret;
 	int timeout;
@@ -3202,8 +4232,15 @@
 
 		timeout = conf->dynamic_ps_timeout;
 		if (timeout == 0) {
+			info = ath12k_mac_get_link_bss_conf(arvif);
+			if (!info) {
+				ath12k_warn(ar->ab, "unable to access bss link conf in setup ps for vif %pM link %u\n",
+					    vif->addr, arvif->link_id);
+				return;
+			}
+
 			/* firmware doesn't like 0 */
-			timeout = ieee80211_tu_to_usec(vif->bss_conf.beacon_int) / 1000;
+			timeout = ieee80211_tu_to_usec(info->beacon_int) / 1000;
 		}
 
 		ret = ath12k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
@@ -3248,6 +4285,9 @@
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return;
+
 	if (changed & BSS_CHANGED_BEACON_INT) {
 		arvif->beacon_interval = info->beacon_int;
 
@@ -3312,10 +4352,23 @@
 		ether_addr_copy(arvif->bssid, info->bssid);
 
 	if (changed & BSS_CHANGED_BEACON_ENABLED) {
+		if (info->enable_beacon) {
+			ret = ath12k_mac_set_he_txbf_conf(arvif);
+			if (ret)
+				ath12k_warn(ar->ab,
+					    "failed to set HE TXBF config for vdev: %d\n",
+					    arvif->vdev_id);
+
+			ret = ath12k_mac_set_eht_txbf_conf(arvif);
+			if (ret)
+				ath12k_warn(ar->ab,
+					    "failed to set EHT TXBF config for vdev: %d\n",
+					    arvif->vdev_id);
+		}
 		ath12k_control_beaconing(arvif, info);
 
-		if (arvif->is_up && vif->bss_conf.he_support &&
-		    vif->bss_conf.he_oper.params) {
+		if (arvif->is_up && info->he_support &&
+		    info->he_oper.params) {
 			/* TODO: Extend to support 1024 BA Bitmap size */
 			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 							    WMI_VDEV_PARAM_BA_MODE,
@@ -3326,7 +4379,7 @@
 					    arvif->vdev_id);
 
 			param_id = WMI_VDEV_PARAM_HEOPS_0_31;
-			param_value = vif->bss_conf.he_oper.params;
+			param_value = info->he_oper.params;
 			ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 							    param_id, param_value);
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
@@ -3418,14 +4471,17 @@
 	if (changed & BSS_CHANGED_MCAST_RATE &&
 	    !ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)) {
 		band = def.chan->band;
-		mcast_rate = vif->bss_conf.mcast_rate[band];
+		mcast_rate = info->mcast_rate[band];
 
-		if (mcast_rate > 0)
+		if (mcast_rate > 0) {
 			rateidx = mcast_rate - 1;
-		else
-			rateidx = ffs(vif->bss_conf.basic_rates) - 1;
+		} else {
+			rateidx = ffs(info->basic_rates);
+			if (rateidx)
+				rateidx -= 1;
+		}
 
-		if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP)
+		if (ar->pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP)
 			rateidx += ATH12K_MAC_FIRST_OFDM_RATE_IDX;
 
 		bitrate = ath12k_legacy_rates[rateidx].bitrate;
@@ -3537,6 +4593,9 @@
 
 static void ath12k_ahvif_put_link_cache(struct ath12k_vif *ahvif, u8 link_id)
 {
+	if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return;
+
 	ath12k_ahvif_put_link_key_cache(ahvif->cache[link_id]);
 	kfree(ahvif->cache[link_id]);
 	ahvif->cache[link_id] = NULL;
@@ -3576,109 +4635,6 @@
 	ath12k_mac_bss_info_changed(ar, arvif, info, changed);
 }
 
-static struct ath12k_link_vif *ath12k_mac_assign_link_vif(struct ath12k_hw *ah,
-							  struct ieee80211_vif *vif,
-							  u8 link_id)
-{
-	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
-	struct ath12k_link_vif *arvif;
-	int i;
-
-	lockdep_assert_wiphy(ah->hw->wiphy);
-
-	arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
-	if (arvif)
-		return arvif;
-
-	if (!vif->valid_links) {
-		/* Use deflink for Non-ML VIFs and mark the link id as 0
-		 */
-		link_id = 0;
-		arvif = &ahvif->deflink;
-	} else {
-		/* If this is the first link arvif being created for an ML VIF
-		 * use the preallocated deflink memory
-		 */
-		if (!ahvif->links_map) {
-			arvif = &ahvif->deflink;
-		} else {
-			arvif = (struct ath12k_link_vif *)
-			kzalloc(sizeof(struct ath12k_link_vif), GFP_KERNEL);
-			if (!arvif)
-				return NULL;
-		}
-	}
-
-	arvif->ahvif = ahvif;
-	arvif->link_id = link_id;
-	ahvif->links_map |= BIT(link_id);
-
-	INIT_LIST_HEAD(&arvif->list);
-	INIT_DELAYED_WORK(&arvif->connection_loss_work,
-			  ath12k_mac_vif_sta_connection_loss_work);
-
-	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
-		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
-		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
-		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
-	}
-
-	/* Allocate Default Queue now and reassign during actual vdev create */
-	vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
-	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
-		vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
-
-	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-
-	rcu_assign_pointer(ahvif->link[arvif->link_id], arvif);
-	ahvif->links_map |= BIT(link_id);
-	synchronize_rcu();
-	return arvif;
-}
-
-static void ath12k_mac_unassign_link_vif(struct ath12k_link_vif *arvif)
-{
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ath12k_hw *ah = ahvif->ah;
-
-	lockdep_assert_wiphy(ah->hw->wiphy);
-
-	rcu_assign_pointer(ahvif->link[arvif->link_id], NULL);
-	synchronize_rcu();
-	ahvif->links_map &= ~BIT(arvif->link_id);
-
-	if (arvif != &ahvif->deflink)
-		kfree(arvif);
-	else
-		memset(arvif, 0, sizeof(*arvif));
-}
-
-static void ath12k_mac_remove_link_interface(struct ieee80211_hw *hw,
-					     struct ath12k_link_vif *arvif)
-{
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ath12k_hw *ah = hw->priv;
-	struct ath12k *ar = arvif->ar;
-	int ret;
-
-	lockdep_assert_wiphy(ah->hw->wiphy);
-
-	cancel_delayed_work_sync(&arvif->connection_loss_work);
-
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac remove link interface (vdev %d link id %d)",
-		   arvif->vdev_id, arvif->link_id);
-
-	if (ahvif->vdev_type == WMI_VDEV_TYPE_AP) {
-		ret = ath12k_peer_delete(ar, arvif->vdev_id, arvif->bssid);
-		if (ret)
-			ath12k_warn(ar->ab, "failed to submit AP self-peer removal on vdev %d link id %d: %d",
-				    arvif->vdev_id, arvif->link_id, ret);
-	}
-	ath12k_mac_vdev_delete(ar, arvif);
-}
-
 static struct ath12k*
 ath12k_mac_select_scan_device(struct ieee80211_hw *hw,
 			      struct ieee80211_vif *vif,
@@ -3699,19 +4655,23 @@
 	 * split the hw request and perform multiple scans
 	 */
 
-	if (center_freq < ATH12K_MIN_5G_FREQ)
+	if (center_freq < ATH12K_MIN_5GHZ_FREQ)
 		band = NL80211_BAND_2GHZ;
-	else if (center_freq < ATH12K_MIN_6G_FREQ)
+	else if (center_freq < ATH12K_MIN_6GHZ_FREQ)
 		band = NL80211_BAND_5GHZ;
 	else
 		band = NL80211_BAND_6GHZ;
 
 	for_each_ar(ah, ar, i) {
-		/* TODO 5 GHz low high split changes */
+		if (band == NL80211_BAND_5GHZ || band == NL80211_BAND_6GHZ) {
+			if (center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+			    center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
 		if (ar->mac.sbands[band].channels)
 			return ar;
+		} else if (ar->mac.sbands[band].channels) {
+			return ar;
+		}
 	}
-
 	return NULL;
 }
 
@@ -3730,22 +4690,9 @@
 			ieee80211_remain_on_channel_expired(hw);
 		fallthrough;
 	case ATH12K_SCAN_STARTING:
-		if (!ar->scan.is_roc) {
-			struct cfg80211_scan_info info = {
-				.aborted = ((ar->scan.state ==
-					    ATH12K_SCAN_ABORTING) ||
-					    (ar->scan.state ==
-					    ATH12K_SCAN_STARTING)),
-			};
-
-			ieee80211_scan_completed(hw, &info);
-		}
-
-		ar->scan.state = ATH12K_SCAN_IDLE;
-		ar->scan_channel = NULL;
-		ar->scan.roc_freq = 0;
 		cancel_delayed_work(&ar->scan.timeout);
 		complete(&ar->scan.completed);
+		wiphy_work_queue(ar->ah->hw->wiphy, &ar->scan.vdev_clean_wk);
 		break;
 	}
 }
@@ -3786,15 +4733,15 @@
 	}
 
 out:
-	/* Scan state should be updated upon scan completion but in case
-	 * firmware fails to deliver the event (for whatever reason) it is
-	 * desired to clean up scan state anyway. Firmware may have just
-	 * dropped the scan completion event delivery due to transport pipe
-	 * being overflown with data and/or it can recover on its own before
-	 * next scan request is submitted.
+	/* Scan state should be updated in scan completion worker but in
+	 * case firmware fails to deliver the event (for whatever reason)
+	 * it is desired to clean up scan state anyway. Firmware may have
+	 * just dropped the scan completion event delivery due to transport
+	 * pipe being overflown with data and/or it can recover on its own
+	 * before next scan request is submitted.
 	 */
 	spin_lock_bh(&ar->data_lock);
-	if (ar->scan.state != ATH12K_SCAN_IDLE)
+	if (ret)
 		__ath12k_mac_scan_finish(ar);
 	spin_unlock_bh(&ar->data_lock);
 
@@ -3845,6 +4792,84 @@
 	wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
 }
 
+static void ath12k_mac_scan_send_complete(struct ath12k *ar,
+					  struct cfg80211_scan_info info)
+{
+	struct ath12k *partner_ar;
+	struct ath12k_pdev *pdev;
+	struct ath12k_base *ab;
+	struct ath12k_hw_group *ag = ar->ab->ag;
+	bool send_completion = true;
+	struct ath12k_hw *ah = ar->ah;
+	int i, j;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			partner_ar = pdev->ar;
+
+			if (!partner_ar || partner_ar == ar)
+				continue;
+			if (partner_ar->scan.state == ATH12K_SCAN_RUNNING) {
+				send_completion = false;
+				break;
+			}
+		}
+		if (!send_completion)
+			break;
+	}
+	if (send_completion)
+		ieee80211_scan_completed(ah->hw, &info);
+}
+
+static void ath12k_scan_vdev_clean_work(struct wiphy *wiphy, struct wiphy_work *work)
+{
+	struct ath12k *ar = container_of(work, struct ath12k,
+					 scan.vdev_clean_wk);
+	struct ath12k_hw *ah = ar->ah;
+	struct ath12k_link_vif *arvif;
+
+	lockdep_assert_wiphy(wiphy);
+
+	arvif = ar->scan.arvif;
+
+	/* The scan vdev has already been deleted. This can occur when a
+	 * new scan request is made on the same vif with a different
+	 * frequency, causing the scan arvif to move from one radio to
+	 * another. Or, scan was abrupted and via remove interface, the
+	 * arvif is already deleted. Alternatively, if the scan vdev is not
+	 * being used as an actual vdev, then do not delete it.
+	 */
+	if (!arvif || arvif->is_started)
+		goto work_complete;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac clean scan vdev (link id %u)",
+		   arvif->link_id);
+
+	ath12k_mac_remove_link_interface(ah->hw, arvif);
+	ath12k_mac_unassign_link_vif(arvif);
+
+work_complete:
+	spin_lock_bh(&ar->data_lock);
+	ar->scan.arvif = NULL;
+	if (!ar->scan.is_roc) {
+		struct cfg80211_scan_info info = {
+			.aborted = ((ar->scan.state ==
+				    ATH12K_SCAN_ABORTING) ||
+				    (ar->scan.state ==
+				    ATH12K_SCAN_STARTING)),
+		};
+
+		ath12k_mac_scan_send_complete(ar, info);
+	}
+
+	ar->scan.state = ATH12K_SCAN_IDLE;
+	ar->scan_channel = NULL;
+	ar->scan.roc_freq = 0;
+	spin_unlock_bh(&ar->data_lock);
+}
+
 static int ath12k_start_scan(struct ath12k *ar,
 			     struct ath12k_wmi_scan_req_arg *arg)
 {
@@ -3879,17 +4904,157 @@
 	return 0;
 }
 
+int ath12k_mac_get_fw_stats(struct ath12k *ar,
+			    struct ath12k_fw_stats_req_params *param)
+{
+	struct ath12k_base *ab = ar->ab;
+	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
+	unsigned long timeout, time_left;
+	int ret;
+
+	guard(mutex)(&ah->hw_mutex);
+
+	if (ah->state != ATH12K_HW_STATE_ON)
+		return -ENETDOWN;
+
+	/* FW stats can get split when exceeding the stats data buffer limit.
+	 * In that case, since there is no end marking for the back-to-back
+	 * received 'update stats' event, we keep a 3 seconds timeout in case,
+	 * fw_stats_done is not marked yet
+	 */
+	timeout = jiffies + msecs_to_jiffies(3 * 1000);
+	ath12k_fw_stats_reset(ar);
+
+	reinit_completion(&ar->fw_stats_complete);
+
+	ret = ath12k_wmi_send_stats_request_cmd(ar, param->stats_id,
+						param->vdev_id, param->pdev_id);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to request fw stats: %d\n", ret);
+		return ret;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "get fw stat pdev id %d vdev id %d stats id 0x%x\n",
+		   param->pdev_id, param->vdev_id, param->stats_id);
+
+	time_left = wait_for_completion_timeout(&ar->fw_stats_complete, 1 * HZ);
+
+	if (!time_left) {
+		ath12k_warn(ab, "time out while waiting for get fw stats\n");
+		return -ETIMEDOUT;
+	}
+
+	/* Firmware sends WMI_UPDATE_STATS_EVENTID back-to-back
+	 * when stats data buffer limit is reached. fw_stats_complete
+	 * is completed once host receives first event from firmware, but
+	 * still end might not be marked in the TLV.
+	 * Below loop is to confirm that firmware completed sending all the event
+	 * and fw_stats_done is marked true when end is marked in the TLV.
+	 */
+	for (;;) {
+		if (time_after(jiffies, timeout))
+			break;
+		spin_lock_bh(&ar->data_lock);
+		if (ar->fw_stats.fw_stats_done) {
+			spin_unlock_bh(&ar->data_lock);
+			break;
+		}
+		spin_unlock_bh(&ar->data_lock);
+	}
+	return 0;
+}
+
+static int ath12k_mac_op_get_txpower(struct ieee80211_hw *hw,
+				     struct ieee80211_vif *vif,
+				     unsigned int link_id,
+				     int *dbm)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_fw_stats_req_params params = {};
+	struct ath12k_fw_stats_pdev *pdev;
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_base *ab;
+	struct ath12k *ar;
+	int ret;
+
+	/* Final Tx power is minimum of Target Power, CTL power, Regulatory
+	 * Power, PSD EIRP Power. We just know the Regulatory power from the
+	 * regulatory rules obtained. FW knows all these power and sets the min
+	 * of these. Hence, we request the FW pdev stats in which FW reports
+	 * the minimum of all vdev's channel Tx power.
+	 */
+	lockdep_assert_wiphy(hw->wiphy);
+
+	arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+	if (!arvif || !arvif->ar)
+		return -EINVAL;
+
+	ar = arvif->ar;
+	ab = ar->ab;
+	if (ah->state != ATH12K_HW_STATE_ON)
+		goto err_fallback;
+
+	if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags))
+		return -EAGAIN;
+
+	/* Limit the requests to Firmware for fetching the tx power */
+	if (ar->chan_tx_pwr != ATH12K_PDEV_TX_POWER_INVALID &&
+	    time_before(jiffies,
+			msecs_to_jiffies(ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS) +
+					 ar->last_tx_power_update))
+		goto send_tx_power;
+
+	params.pdev_id = ar->pdev->pdev_id;
+	params.vdev_id = arvif->vdev_id;
+	params.stats_id = WMI_REQUEST_PDEV_STAT;
+	ret = ath12k_mac_get_fw_stats(ar, &params);
+	if (ret) {
+		ath12k_warn(ab, "failed to request fw pdev stats: %d\n", ret);
+		goto err_fallback;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	pdev = list_first_entry_or_null(&ar->fw_stats.pdevs,
+					struct ath12k_fw_stats_pdev, list);
+	if (!pdev) {
+		spin_unlock_bh(&ar->data_lock);
+		goto err_fallback;
+	}
+
+	/* tx power reported by firmware is in units of 0.5 dBm */
+	ar->chan_tx_pwr = pdev->chan_tx_power / 2;
+	spin_unlock_bh(&ar->data_lock);
+	ar->last_tx_power_update = jiffies;
+
+send_tx_power:
+	*dbm = ar->chan_tx_pwr;
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower fetched from firmware %d dBm\n",
+		   *dbm);
+	return 0;
+
+err_fallback:
+	/* We didn't get txpower from FW. Hence, relying on vif->bss_conf.txpower */
+	*dbm = vif->bss_conf.txpower;
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "txpower from firmware NaN, reported %d dBm\n",
+		   *dbm);
+	return 0;
+}
+
 static u8
 ath12k_mac_find_link_id_by_ar(struct ath12k_vif *ahvif, struct ath12k *ar)
 {
 	struct ath12k_link_vif *arvif;
 	struct ath12k_hw *ah = ahvif->ah;
 	unsigned long links = ahvif->links_map;
+	unsigned long scan_links_map;
 	u8 link_id;
 
 	lockdep_assert_wiphy(ah->hw->wiphy);
 
-	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+	for_each_set_bit(link_id, &links, ATH12K_NUM_MAX_LINKS) {
 		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
 
 		if (!arvif || !arvif->is_created)
@@ -3899,18 +5064,26 @@
 			return link_id;
 	}
 
-	/* input ar is not assigned to any of the links, use link id
-	 * 0 for scan vdev creation.
+	/* input ar is not assigned to any of the links of ML VIF, use next
+	 * available scan link for scan vdev creation. There are cases where
+	 * single scan req needs to be split in driver and initiate separate
+	 * scan requests to firmware based on device.
 	 */
-	return 0;
+
+	/* Set all non-scan links (0-14) of scan_links_map so that ffs() will
+	 * choose an available link among scan links (i.e link id >= 15)
+	 */
+	scan_links_map = ahvif->links_map | ~ATH12K_SCAN_LINKS_MASK;
+	return ffs(~scan_links_map) - 1;
 }
 
-static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+static int ath12k_mac_initiate_hw_scan(struct ieee80211_hw *hw,
 				 struct ieee80211_vif *vif,
-				 struct ieee80211_scan_request *hw_req)
+				       struct ieee80211_scan_request *hw_req,
+				       struct ath12k *ar,
+				       u8 from_index, u8 to_index)
 {
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
-	struct ath12k *ar;
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_link_vif *arvif;
 	struct cfg80211_scan_request *req = &hw_req->req;
@@ -3919,25 +5092,32 @@
 	int ret;
 	int i;
 	bool create = true;
+	u8 n_channels = to_index - from_index;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	arvif = &ahvif->deflink;
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
 
-	/* Since the targeted scan device could depend on the frequency
-	 * requested in the hw_req, select the corresponding radio
-	 */
-	ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq);
-	if (!ar)
-		return -EINVAL;
+	arvif = &ahvif->deflink;
 
 	/* check if any of the links of ML VIF is already started on
 	 * radio(ar) correpsondig to given scan frequency and use it,
-	 * if not use deflink(link 0) for scan purpose.
+	 * if not use scan link (link 15) for scan purpose.
 	 */
 	link_id = ath12k_mac_find_link_id_by_ar(ahvif, ar);
+	/* All scan links are occupied. ideally this shouldn't happen as
+	 * mac80211 won't schedule scan for same band until ongoing scan is
+	 * completed, dont try to exceed max links just in case if it happens.
+	 */
+	if (link_id >= ATH12K_NUM_MAX_LINKS)
+		return -EBUSY;
+
 	arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac link ID %d selected for scan",
+		   arvif->link_id);
+
 	/* If the vif is already assigned to a specific vdev of an ar,
 	 * check whether its already started, vdev which is started
 	 * are not allowed to switch to a new radio.
@@ -3961,14 +5141,17 @@
 			create = false;
 		}
 	}
+
 	if (create) {
 		/* Previous arvif would've been cleared in radio switch block
 		 * above, assign arvif again for create.
 		 */
 		arvif = ath12k_mac_assign_link_vif(ah, vif, link_id);
 
+		arvif->is_scan_vif = true;
 		ret = ath12k_mac_vdev_create(ar, arvif);
 		if (ret) {
+			ath12k_mac_unassign_link_vif(arvif);
 			ath12k_warn(ar->ab, "unable to create scan vdev %d\n", ret);
 			return -EINVAL;
 		}
@@ -3981,7 +5164,7 @@
 		reinit_completion(&ar->scan.completed);
 		ar->scan.state = ATH12K_SCAN_STARTING;
 		ar->scan.is_roc = false;
-		ar->scan.vdev_id = arvif->vdev_id;
+		ar->scan.arvif = arvif;
 		ret = 0;
 		break;
 	case ATH12K_SCAN_STARTING:
@@ -4022,8 +5205,8 @@
 		arg->scan_f_passive = 1;
 	}
 
-	if (req->n_channels) {
-		arg->num_chan = req->n_channels;
+	if (n_channels) {
+		arg->num_chan = n_channels;
 		arg->chan_list = kcalloc(arg->num_chan, sizeof(*arg->chan_list),
 					 GFP_KERNEL);
 		if (!arg->chan_list) {
@@ -4032,7 +5215,7 @@
 		}
 
 		for (i = 0; i < arg->num_chan; i++)
-			arg->chan_list[i] = req->channels[i]->center_freq;
+			arg->chan_list[i] = req->channels[i + from_index]->center_freq;
 	}
 
 	ret = ath12k_start_scan(ar, arg);
@@ -4043,6 +5226,15 @@
 		spin_unlock_bh(&ar->data_lock);
 	}
 
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac scan started");
+
+	/* As per cfg80211/mac80211 scan design, it allows only one
+	 * scan at a time. Hence last_scan link id is used for
+	 * tracking the link id on which the scan is been done on
+	 * this vif.
+	 */
+	ahvif->last_scan_link = arvif->link_id;
+
 	/* Add a margin to account for event/command processing */
 	ieee80211_queue_delayed_work(ath12k_ar_to_hw(ar), &ar->scan.timeout,
 				     msecs_to_jiffies(arg->max_scan_time +
@@ -4058,18 +5250,61 @@
 	return ret;
 }
 
+static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw,
+				 struct ieee80211_vif *vif,
+				 struct ieee80211_scan_request *hw_req)
+{
+	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k *ar, *prev_ar;
+	int i, from_index, to_index;
+
+	lockdep_assert_wiphy(hw->wiphy);
+	/* Since the targeted scan device could depend on the frequency
+	 * requested in the hw_req, select the corresponding radio
+	 */
+	prev_ar = ath12k_mac_select_scan_device(hw, vif,
+						hw_req->req.channels[0]->center_freq);
+	if (!prev_ar) {
+		ath12k_hw_warn(ah, "unable to select device for scan\n");
+		return -EINVAL;
+	}
+
+	/* NOTE: There could be 5G low/high channels as mac80211 sees
+	 * it as an single band. In that case split the hw request and
+	 * perform multiple scans
+	 */
+	from_index = 0;
+	for (i = 1; i < hw_req->req.n_channels; i++) {
+		ar = ath12k_mac_select_scan_device(hw, vif,
+						   hw_req->req.channels[i]->center_freq);
+		if (!ar) {
+			ath12k_hw_warn(ah, "unable to select device for scan\n");
+			return -EINVAL;
+		}
+		if (prev_ar == ar)
+			continue;
+
+		to_index = i;
+		ath12k_mac_initiate_hw_scan(hw, vif, hw_req, prev_ar,
+					    from_index, to_index);
+		from_index = to_index;
+		prev_ar = ar;
+	}
+	return ath12k_mac_initiate_hw_scan(hw, vif, hw_req, prev_ar, from_index, i);
+}
+
 static void ath12k_mac_op_cancel_hw_scan(struct ieee80211_hw *hw,
 					 struct ieee80211_vif *vif)
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	u16 link_id = ahvif->last_scan_link;
 	struct ath12k_link_vif *arvif;
 	struct ath12k *ar;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	arvif = &ahvif->deflink;
-
-	if (!arvif->is_created)
+	arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+	if (!arvif || arvif->is_started)
 		return;
 
 	ar = arvif->ar;
@@ -4094,8 +5329,6 @@
 		.key_flags = flags,
 		.macaddr = macaddr,
 	};
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
@@ -4114,6 +5347,7 @@
 
 	switch (key->cipher) {
 	case WLAN_CIPHER_SUITE_CCMP:
+	case WLAN_CIPHER_SUITE_CCMP_256:
 		arg.key_cipher = WMI_CIPHER_AES_CCM;
 		/* TODO: Re-check if flag is valid */
 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
@@ -4123,12 +5357,20 @@
 		arg.key_txmic_len = 8;
 		arg.key_rxmic_len = 8;
 		break;
-	case WLAN_CIPHER_SUITE_CCMP_256:
-		arg.key_cipher = WMI_CIPHER_AES_CCM;
-		break;
 	case WLAN_CIPHER_SUITE_GCMP:
 	case WLAN_CIPHER_SUITE_GCMP_256:
 		arg.key_cipher = WMI_CIPHER_AES_GCM;
+		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
+		break;
+	case WLAN_CIPHER_SUITE_AES_CMAC:
+		arg.key_cipher = WMI_CIPHER_AES_CMAC;
+		break;
+	case WLAN_CIPHER_SUITE_BIP_GMAC_128:
+	case WLAN_CIPHER_SUITE_BIP_GMAC_256:
+		arg.key_cipher = WMI_CIPHER_AES_GMAC;
+		break;
+	case WLAN_CIPHER_SUITE_BIP_CMAC_256:
+		arg.key_cipher = WMI_CIPHER_AES_CMAC;
 		break;
 	default:
 		ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher);
@@ -4148,8 +5390,8 @@
 	if (!wait_for_completion_timeout(&ar->install_key_done, 1 * HZ))
 		return -ETIMEDOUT;
 
-	if (ether_addr_equal(macaddr, vif->addr))
-		ahvif->key_cipher = key->cipher;
+	if (ether_addr_equal(macaddr, arvif->bssid))
+		arvif->key_cipher = key->cipher;
 
 	return ar->install_key_status ? -EINVAL : 0;
 }
@@ -4201,8 +5443,6 @@
 			      struct ath12k_link_sta *arsta,
 			      struct ieee80211_key_conf *key)
 {
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
 	struct ieee80211_sta *sta = NULL;
 	struct ath12k_base *ab = ar->ab;
 	struct ath12k_peer *peer;
@@ -4220,11 +5460,9 @@
 		return 1;
 
 	if (sta)
-		peer_addr = sta->addr;
-	else if (ahvif->vdev_type == WMI_VDEV_TYPE_STA)
-		peer_addr = vif->bss_conf.bssid;
+		peer_addr = arsta->addr;
 	else
-		peer_addr = vif->addr;
+		peer_addr = arvif->bssid;
 
 	key->hw_key_idx = key->keyidx;
 
@@ -4365,13 +5603,9 @@
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	/* BIP needs to be done in software */
-	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
-	    key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) {
+	/* IGTK needs to be done in host software */
+	if (key->keyidx == 4 || key->keyidx == 5)
 		return 1;
-	}
 
 	if (key->keyidx > WMI_MAX_KEY_INDEX)
 		return -ENOSPC;
@@ -4459,12 +5693,25 @@
 }
 
 static int
+ath12k_mac_bitrate_mask_num_he_rates(struct ath12k *ar,
+				     enum nl80211_band band,
+				     const struct cfg80211_bitrate_mask *mask)
+{
+	int num_rates = 0;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++)
+		num_rates += hweight16(mask->control[band].he_mcs[i]);
+
+	return num_rates;
+}
+
+static int
 ath12k_mac_set_peer_vht_fixed_rate(struct ath12k_link_vif *arvif,
 				   struct ath12k_link_sta *arsta,
 				   const struct cfg80211_bitrate_mask *mask,
 				   enum nl80211_band band)
 {
-	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
 	struct ath12k *ar = arvif->ar;
 	u8 vht_rate, nss;
 	u32 rate_code;
@@ -4483,80 +5730,156 @@
 
 	if (!nss) {
 		ath12k_warn(ar->ab, "No single VHT Fixed rate found to set for %pM",
-			    sta->addr);
+			    arsta->addr);
 		return -EINVAL;
 	}
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "Setting Fixed VHT Rate for peer %pM. Device will not switch to any other selected rates",
-		   sta->addr);
+		   arsta->addr);
 
 	rate_code = ATH12K_HW_RATE_CODE(vht_rate, nss - 1,
 					WMI_RATE_PREAMBLE_VHT);
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
 					arvif->vdev_id,
 					WMI_PEER_PARAM_FIXED_RATE,
 					rate_code);
 	if (ret)
 		ath12k_warn(ar->ab,
 			    "failed to update STA %pM Fixed Rate %d: %d\n",
-			     sta->addr, rate_code, ret);
+			     arsta->addr, rate_code, ret);
 
 	return ret;
 }
 
-static int ath12k_station_assoc(struct ath12k *ar,
+static int
+ath12k_mac_set_peer_he_fixed_rate(struct ath12k_link_vif *arvif,
+				  struct ath12k_link_sta *arsta,
+				  const struct cfg80211_bitrate_mask *mask,
+				  enum nl80211_band band)
+{
+	struct ath12k *ar = arvif->ar;
+	u8 he_rate, nss;
+	u32 rate_code;
+	int ret, i;
+	struct ath12k_sta *ahsta = arsta->ahsta;
+	struct ieee80211_sta *sta;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	sta = ath12k_ahsta_to_sta(ahsta);
+	nss = 0;
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+		if (hweight16(mask->control[band].he_mcs[i]) == 1) {
+			nss = i + 1;
+			he_rate = ffs(mask->control[band].he_mcs[i]) - 1;
+		}
+	}
+
+	if (!nss) {
+		ath12k_warn(ar->ab, "No single HE Fixed rate found to set for %pM",
+			    arsta->addr);
+		return -EINVAL;
+	}
+
+	/* Avoid updating invalid nss as fixed rate*/
+	if (nss > sta->deflink.rx_nss)
+		return -EINVAL;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "Setting Fixed HE Rate for peer %pM. Device will not switch to any other selected rates",
+		   arsta->addr);
+
+	rate_code = ATH12K_HW_RATE_CODE(he_rate, nss - 1,
+					WMI_RATE_PREAMBLE_HE);
+
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+					arvif->vdev_id,
+					WMI_PEER_PARAM_FIXED_RATE,
+					rate_code);
+	if (ret)
+		ath12k_warn(ar->ab,
+			    "failed to update STA %pM Fixed Rate %d: %d\n",
+			    arsta->addr, rate_code, ret);
+
+	return ret;
+}
+
+static int ath12k_mac_station_assoc(struct ath12k *ar,
 				struct ath12k_link_vif *arvif,
 				struct ath12k_link_sta *arsta,
 				bool reassoc)
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	struct ath12k_wmi_peer_assoc_arg peer_arg;
+	struct ieee80211_link_sta *link_sta;
 	int ret;
 	struct cfg80211_chan_def def;
 	enum nl80211_band band;
 	struct cfg80211_bitrate_mask *mask;
-	u8 num_vht_rates;
+	u8 num_vht_rates, num_he_rates;
+	u8 link_id = arvif->link_id;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
 	if (WARN_ON(ath12k_mac_vif_link_chan(vif, arvif->link_id, &def)))
 		return -EPERM;
 
+	if (WARN_ON(!rcu_access_pointer(sta->link[link_id])))
+		return -EINVAL;
+
 	band = def.chan->band;
 	mask = &arvif->bitrate_mask;
 
-	ath12k_peer_assoc_prepare(ar, arvif, arsta, &peer_arg, reassoc);
+	struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+		kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+	if (!peer_arg)
+		return -ENOMEM;
 
-	if (peer_arg.peer_nss < 1) {
+	ath12k_peer_assoc_prepare(ar, arvif, arsta, peer_arg, reassoc);
+
+	if (peer_arg->peer_nss < 1) {
 		ath12k_warn(ar->ab,
-			    "invalid peer NSS %d\n", peer_arg.peer_nss);
+			    "invalid peer NSS %d\n", peer_arg->peer_nss);
 		return -EINVAL;
 	}
-	ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+	ret = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
-			    sta->addr, arvif->vdev_id, ret);
+			    arsta->addr, arvif->vdev_id, ret);
 		return ret;
 	}
 
 	if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ)) {
 		ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
-			    sta->addr, arvif->vdev_id);
+			    arsta->addr, arvif->vdev_id);
 		return -ETIMEDOUT;
 	}
 
 	num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask);
+	num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask);
 
-	/* If single VHT rate is configured (by set_bitrate_mask()),
-	 * peer_assoc will disable VHT. This is now enabled by a peer specific
+	/* If single VHT/HE rate is configured (by set_bitrate_mask()),
+	 * peer_assoc will disable VHT/HE. This is now enabled by a peer specific
 	 * fixed param.
 	 * Note that all other rates and NSS will be disabled for this peer.
 	 */
-	if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
-		ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
-							 band);
+	link_sta = ath12k_mac_get_link_sta(arsta);
+	if (!link_sta) {
+		ath12k_warn(ar->ab, "unable to access link sta in station assoc\n");
+		return -EINVAL;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+	arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
+	arsta->bw_prev = link_sta->bandwidth;
+	spin_unlock_bh(&ar->data_lock);
+
+	if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
+		ret = ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask, band);
+	} else if (link_sta->he_cap.has_he && num_he_rates == 1) {
+		ret = ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band);
 		if (ret)
 			return ret;
 	}
@@ -4567,9 +5890,8 @@
 	if (reassoc)
 		return 0;
 
-	ret = ath12k_setup_peer_smps(ar, arvif, sta->addr,
-				     &sta->deflink.ht_cap,
-				     &sta->deflink.he_6ghz_capa);
+	ret = ath12k_setup_peer_smps(ar, arvif, arsta->addr, &link_sta->ht_cap,
+				     &link_sta->he_cap, &link_sta->he_6ghz_capa);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n",
 			    arvif->vdev_id, ret);
@@ -4587,7 +5909,7 @@
 		ret = ath12k_peer_assoc_qos_ap(ar, arvif, arsta);
 		if (ret) {
 			ath12k_warn(ar->ab, "failed to set qos params for STA %pM for vdev %i: %d\n",
-				    sta->addr, arvif->vdev_id, ret);
+				    arsta->addr, arvif->vdev_id, ret);
 			return ret;
 		}
 	}
@@ -4595,33 +5917,25 @@
 	return 0;
 }
 
-static int ath12k_station_disassoc(struct ath12k *ar,
+static int ath12k_mac_station_disassoc(struct ath12k *ar,
 				   struct ath12k_link_vif *arvif,
 				   struct ath12k_link_sta *arsta)
 {
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
 	if (!sta->wme) {
 		arvif->num_legacy_stations--;
-		ret = ath12k_recalc_rtscts_prot(arvif);
-		if (ret)
-			return ret;
+		return ath12k_recalc_rtscts_prot(arvif);
 	}
 
-	ret = ath12k_clear_peer_keys(arvif, sta->addr);
-	if (ret) {
-		ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
-			    arvif->vdev_id, ret);
-		return ret;
-	}
 	return 0;
 }
 
 static void ath12k_sta_rc_update_wk(struct wiphy *wiphy, struct wiphy_work *wk)
 {
+	struct ieee80211_link_sta *link_sta;
 	struct ath12k *ar;
 	struct ath12k_link_vif *arvif;
 	struct ieee80211_sta *sta;
@@ -4629,10 +5943,10 @@
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
-	u32 changed, bw, nss, smps, bw_prev;
-	int err, num_vht_rates;
+	const u16 *he_mcs_mask;
+	u32 changed, bw, nss, mac_nss, smps, bw_prev;
+	int err, num_vht_rates, num_he_rates;
 	const struct cfg80211_bitrate_mask *mask;
-	struct ath12k_wmi_peer_assoc_arg peer_arg;
 	enum wmi_phy_mode peer_phymode;
 	struct ath12k_link_sta *arsta;
 	struct ieee80211_vif *vif;
@@ -4651,6 +5965,7 @@
 	band = def.chan->band;
 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
+	he_mcs_mask = arvif->bitrate_mask.control[band].he_mcs;
 
 	spin_lock_bh(&ar->data_lock);
 
@@ -4665,12 +5980,19 @@
 	spin_unlock_bh(&ar->data_lock);
 
 	nss = max_t(u32, 1, nss);
-	nss = min(nss, max(ath12k_mac_max_ht_nss(ht_mcs_mask),
-			   ath12k_mac_max_vht_nss(vht_mcs_mask)));
+	mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask),
+		       ath12k_mac_max_vht_nss(vht_mcs_mask),
+		       ath12k_mac_max_he_nss(he_mcs_mask));
+	nss = min(nss, mac_nss);
+
+	struct ath12k_wmi_peer_assoc_arg *peer_arg __free(kfree) =
+					kzalloc(sizeof(*peer_arg), GFP_KERNEL);
+	if (!peer_arg)
+		return;
 
 	if (changed & IEEE80211_RC_BW_CHANGED) {
-		ath12k_peer_assoc_h_phymode(ar, arvif, arsta, &peer_arg);
-		peer_phymode = peer_arg.peer_phymode;
+		ath12k_peer_assoc_h_phymode(ar, arvif, arsta, peer_arg);
+		peer_phymode = peer_arg->peer_phymode;
 
 		if (bw > bw_prev) {
 			/* Phymode shows maximum supported channel width, if we
@@ -4679,71 +6001,73 @@
 			 * WMI_PEER_CHWIDTH
 			 */
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth upgrade for sta %pM new %d old %d\n",
-				   sta->addr, bw, bw_prev);
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+				   arsta->addr, bw, bw_prev);
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_PHYMODE,
 							peer_phymode);
 			if (err) {
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
-					    sta->addr, peer_phymode, err);
+					    arsta->addr, peer_phymode, err);
 				return;
 			}
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_CHWIDTH,
 							bw);
 			if (err)
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer bandwidth %d: %d\n",
-					    sta->addr, bw, err);
+					    arsta->addr, bw, err);
 		} else {
 			/* When we downgrade bandwidth this will conflict with phymode
 			 * and cause to trigger firmware crash. In this case we send
 			 * WMI_PEER_CHWIDTH followed by WMI_PEER_PHYMODE
 			 */
 			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac bandwidth downgrade for sta %pM new %d old %d\n",
-				   sta->addr, bw, bw_prev);
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+				   arsta->addr, bw, bw_prev);
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_CHWIDTH,
 							bw);
 			if (err) {
 				ath12k_warn(ar->ab, "failed to update STA %pM peer to bandwidth %d: %d\n",
-					    sta->addr, bw, err);
+					    arsta->addr, bw, err);
 				return;
 			}
-			err = ath12k_wmi_set_peer_param(ar, sta->addr,
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
 							arvif->vdev_id, WMI_PEER_PHYMODE,
 							peer_phymode);
 			if (err)
 				ath12k_warn(ar->ab, "failed to update STA %pM to peer phymode %d: %d\n",
-					    sta->addr, peer_phymode, err);
+					    arsta->addr, peer_phymode, err);
 		}
 	}
 
 	if (changed & IEEE80211_RC_NSS_CHANGED) {
 		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM nss %d\n",
-			   sta->addr, nss);
+			   arsta->addr, nss);
 
-		err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+		err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 						WMI_PEER_NSS, nss);
 		if (err)
 			ath12k_warn(ar->ab, "failed to update STA %pM nss %d: %d\n",
-				    sta->addr, nss, err);
+				    arsta->addr, nss, err);
 	}
 
 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
 		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac update sta %pM smps %d\n",
-			   sta->addr, smps);
+			   arsta->addr, smps);
 
-		err = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+		err = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 						WMI_PEER_MIMO_PS_STATE, smps);
 		if (err)
 			ath12k_warn(ar->ab, "failed to update STA %pM smps %d: %d\n",
-				    sta->addr, smps, err);
+				    arsta->addr, smps, err);
 	}
 
 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
 		mask = &arvif->bitrate_mask;
 		num_vht_rates = ath12k_mac_bitrate_mask_num_vht_rates(ar, band,
 								      mask);
+		num_he_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band,
+								    mask);
 
 		/* Peer_assoc_prepare will reject vht rates in
 		 * bitrate_mask if its not available in range format and
@@ -4756,27 +6080,76 @@
 		 * TODO: Check RATEMASK_CMDID to support auto rates selection
 		 * across HT/VHT and for multiple VHT MCS support.
 		 */
-		if (sta->deflink.vht_cap.vht_supported && num_vht_rates == 1) {
+		link_sta = ath12k_mac_get_link_sta(arsta);
+		if (!link_sta) {
+			ath12k_warn(ar->ab, "unable to access link sta in peer assoc he for sta %pM link %u\n",
+				    sta->addr, arsta->link_id);
+			return;
+		}
+
+		if (link_sta->vht_cap.vht_supported && num_vht_rates == 1) {
 			ath12k_mac_set_peer_vht_fixed_rate(arvif, arsta, mask,
 							   band);
+		} else if (link_sta->he_cap.has_he && num_he_rates == 1) {
+			ath12k_mac_set_peer_he_fixed_rate(arvif, arsta, mask, band);
 		} else {
-			/* If the peer is non-VHT or no fixed VHT rate
+			/* If the peer is non-VHT/HE or no fixed VHT/HE rate
 			 * is provided in the new bitrate mask we set the
-			 * other rates using peer_assoc command.
+			 * other rates using peer_assoc command. Also clear
+			 * the peer fixed rate settings as it has higher proprity
+			 * than peer assoc
 			 */
+			err = ath12k_wmi_set_peer_param(ar, arsta->addr,
+							arvif->vdev_id,
+							WMI_PEER_PARAM_FIXED_RATE,
+							WMI_FIXED_RATE_NONE);
+			if (err)
+				ath12k_warn(ar->ab,
+					    "failed to disable peer fixed rate for STA %pM ret %d\n",
+					    arsta->addr, err);
+
 			ath12k_peer_assoc_prepare(ar, arvif, arsta,
-						  &peer_arg, true);
+						  peer_arg, true);
 
-			err = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg);
+			err = ath12k_wmi_send_peer_assoc_cmd(ar, peer_arg);
 			if (err)
 				ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n",
-					    sta->addr, arvif->vdev_id, err);
+					    arsta->addr, arvif->vdev_id, err);
 
 			if (!wait_for_completion_timeout(&ar->peer_assoc_done, 1 * HZ))
 				ath12k_warn(ar->ab, "failed to get peer assoc conf event for %pM vdev %i\n",
-					    sta->addr, arvif->vdev_id);
+					    arsta->addr, arvif->vdev_id);
+		}
 		}
 	}
+
+static void ath12k_mac_free_unassign_link_sta(struct ath12k_hw *ah,
+					      struct ath12k_sta *ahsta,
+					      u8 link_id)
+{
+	struct ath12k_link_sta *arsta;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
+		return;
+
+	arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+	if (WARN_ON(!arsta))
+		return;
+
+	ahsta->links_map &= ~BIT(link_id);
+	rcu_assign_pointer(ahsta->link[link_id], NULL);
+	synchronize_rcu();
+
+	if (arsta == &ahsta->deflink) {
+		arsta->link_id = ATH12K_INVALID_LINK_ID;
+		arsta->ahsta = NULL;
+		arsta->arvif = NULL;
+		return;
+	}
+
+	kfree(arsta);
 }
 
 static int ath12k_mac_inc_num_stations(struct ath12k_link_vif *arvif,
@@ -4809,9 +6182,149 @@
 	if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
 		return;
 
+	WARN_ON(!ar->num_stations);
+	if (ar->num_stations)
 	ar->num_stations--;
 }
 
+static void ath12k_mac_station_post_remove(struct ath12k *ar,
+					   struct ath12k_link_vif *arvif,
+					   struct ath12k_link_sta *arsta)
+{
+	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+	struct ath12k_peer *peer;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	ath12k_mac_dec_num_stations(arvif, arsta);
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer && peer->sta == sta) {
+		ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
+			    vif->addr, arvif->vdev_id);
+		peer->sta = NULL;
+		list_del(&peer->list);
+		kfree(peer);
+		ar->num_peers--;
+	}
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	kfree(arsta->rx_stats);
+	arsta->rx_stats = NULL;
+}
+
+static int ath12k_mac_station_unauthorize(struct ath12k *ar,
+					  struct ath12k_link_vif *arvif,
+					  struct ath12k_link_sta *arsta)
+{
+	struct ath12k_peer *peer;
+	int ret;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer)
+		peer->is_authorized = false;
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	/* Driver must clear the keys during the state change from
+	 * IEEE80211_STA_AUTHORIZED to IEEE80211_STA_ASSOC, since after
+	 * returning from here, mac80211 is going to delete the keys
+	 * in __sta_info_destroy_part2(). This will ensure that the driver does
+	 * not retain stale key references after mac80211 deletes the keys.
+	 */
+	ret = ath12k_clear_peer_keys(arvif, arsta->addr);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to clear all peer keys for vdev %i: %d\n",
+			    arvif->vdev_id, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_station_authorize(struct ath12k *ar,
+					struct ath12k_link_vif *arvif,
+					struct ath12k_link_sta *arsta)
+{
+	struct ath12k_peer *peer;
+	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+	int ret;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
+	if (peer)
+		peer->is_authorized = true;
+
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
+						arvif->vdev_id,
+						WMI_PEER_AUTHORIZE,
+						1);
+		if (ret) {
+			ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
+				    arsta->addr, arvif->vdev_id, ret);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static int ath12k_mac_station_remove(struct ath12k *ar,
+				     struct ath12k_link_vif *arvif,
+				     struct ath12k_link_sta *arsta)
+{
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	int ret = 0;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	wiphy_work_cancel(ar->ah->hw->wiphy, &arsta->update_wk);
+
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
+		ath12k_bss_disassoc(ar, arvif);
+		ret = ath12k_mac_vdev_stop(arvif);
+		if (ret)
+			ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
+				    arvif->vdev_id, ret);
+	}
+
+	if (sta->mlo)
+		return ret;
+
+	ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+	ret = ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+	if (ret)
+		ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
+			    arsta->addr, arvif->vdev_id);
+	else
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
+			   arsta->addr, arvif->vdev_id);
+
+	ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+	if (sta->valid_links)
+		ath12k_mac_free_unassign_link_sta(ahvif->ah,
+						  arsta->ahsta, arsta->link_id);
+
+	return ret;
+}
+
 static int ath12k_mac_station_add(struct ath12k *ar,
 				  struct ath12k_link_vif *arvif,
 				  struct ath12k_link_sta *arsta)
@@ -4819,7 +6332,7 @@
 	struct ath12k_base *ab = ar->ab;
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
 	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(arsta->ahsta);
-	struct ath12k_wmi_peer_create_arg peer_param;
+	struct ath12k_wmi_peer_create_arg peer_param = {0};
 	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -4830,41 +6343,45 @@
 			    ar->max_num_stations);
 		goto exit;
 	}
+
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar) && !arsta->rx_stats) {
 	arsta->rx_stats = kzalloc(sizeof(*arsta->rx_stats), GFP_KERNEL);
 	if (!arsta->rx_stats) {
 		ret = -ENOMEM;
 		goto dec_num_station;
 	}
+	}
 
 	peer_param.vdev_id = arvif->vdev_id;
-	peer_param.peer_addr = sta->addr;
+	peer_param.peer_addr = arsta->addr;
 	peer_param.peer_type = WMI_PEER_TYPE_DEFAULT;
+	peer_param.ml_enabled = sta->mlo;
 
 	ret = ath12k_peer_create(ar, arvif, sta, &peer_param);
 	if (ret) {
 		ath12k_warn(ab, "Failed to add peer: %pM for VDEV: %d\n",
-			    sta->addr, arvif->vdev_id);
+			    arsta->addr, arvif->vdev_id);
 		goto free_peer;
 	}
 
 	ath12k_dbg(ab, ATH12K_DBG_MAC, "Added peer: %pM for VDEV: %d\n",
-		   sta->addr, arvif->vdev_id);
+		   arsta->addr, arvif->vdev_id);
 
 	if (ieee80211_vif_is_mesh(vif)) {
-		ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+		ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
 						arvif->vdev_id,
 						WMI_PEER_USE_4ADDR, 1);
 		if (ret) {
 			ath12k_warn(ab, "failed to STA %pM 4addr capability: %d\n",
-				    sta->addr, ret);
+				    arsta->addr, ret);
 			goto free_peer;
 		}
 	}
 
-	ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, sta->addr);
+	ret = ath12k_dp_peer_setup(ar, arvif->vdev_id, arsta->addr);
 	if (ret) {
 		ath12k_warn(ab, "failed to setup dp for peer %pM on vdev %i (%d)\n",
-			    sta->addr, arvif->vdev_id, ret);
+			    arsta->addr, arvif->vdev_id, ret);
 		goto free_peer;
 	}
 
@@ -4878,187 +6395,293 @@
 		}
 	}
 
+	ewma_avg_rssi_init(&arsta->avg_rssi);
 	return 0;
 
 free_peer:
-	ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
+	ath12k_peer_delete(ar, arvif->vdev_id, arsta->addr);
+	kfree(arsta->rx_stats);
+	arsta->rx_stats = NULL;
 dec_num_station:
 	ath12k_mac_dec_num_stations(arvif, arsta);
 exit:
 	return ret;
 }
 
-static u32 ath12k_mac_ieee80211_sta_bw_to_wmi(struct ath12k *ar,
-					      struct ieee80211_sta *sta)
+static int ath12k_mac_assign_link_sta(struct ath12k_hw *ah,
+				      struct ath12k_sta *ahsta,
+				      struct ath12k_link_sta *arsta,
+				      struct ath12k_vif *ahvif,
+				      u8 link_id)
 {
-	u32 bw = WMI_PEER_CHWIDTH_20MHZ;
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+	struct ieee80211_link_sta *link_sta;
+	struct ath12k_link_vif *arvif;
 
-	switch (sta->deflink.bandwidth) {
-	case IEEE80211_STA_RX_BW_20:
-		bw = WMI_PEER_CHWIDTH_20MHZ;
-		break;
-	case IEEE80211_STA_RX_BW_40:
-		bw = WMI_PEER_CHWIDTH_40MHZ;
-		break;
-	case IEEE80211_STA_RX_BW_80:
-		bw = WMI_PEER_CHWIDTH_80MHZ;
-		break;
-	case IEEE80211_STA_RX_BW_160:
-		bw = WMI_PEER_CHWIDTH_160MHZ;
-		break;
-	case IEEE80211_STA_RX_BW_320:
-		bw = WMI_PEER_CHWIDTH_320MHZ;
-		break;
-	default:
-		ath12k_warn(ar->ab, "Invalid bandwidth %d in rc update for %pM\n",
-			    sta->deflink.bandwidth, sta->addr);
-		bw = WMI_PEER_CHWIDTH_20MHZ;
-		break;
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (!arsta || link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return -EINVAL;
+
+	arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+	if (!arvif)
+		return -EINVAL;
+
+	memset(arsta, 0, sizeof(*arsta));
+
+	link_sta = wiphy_dereference(ah->hw->wiphy, sta->link[link_id]);
+	if (!link_sta)
+		return -EINVAL;
+
+	ether_addr_copy(arsta->addr, link_sta->addr);
+
+	/* logical index of the link sta in order of creation */
+	arsta->link_idx = ahsta->num_peer++;
+
+	arsta->link_id = link_id;
+	ahsta->links_map |= BIT(arsta->link_id);
+	arsta->arvif = arvif;
+	arsta->ahsta = ahsta;
+	ahsta->ahvif = ahvif;
+
+	wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
+
+	rcu_assign_pointer(ahsta->link[link_id], arsta);
+
+	return 0;
 	}
 
-	return bw;
+static void ath12k_mac_ml_station_remove(struct ath12k_vif *ahvif,
+					 struct ath12k_sta *ahsta)
+{
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+	struct ath12k_hw *ah = ahvif->ah;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	unsigned long links;
+	struct ath12k *ar;
+	u8 link_id;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	ath12k_peer_mlo_link_peers_delete(ahvif, ahsta);
+
+	/* validate link station removal and clear arsta links */
+	links = ahsta->links_map;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+		arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+
+		ath12k_mac_station_post_remove(ar, arvif, arsta);
+
+		ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
 }
 
-static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
-				   struct ieee80211_vif *vif,
-				   struct ieee80211_sta *sta,
+	ath12k_peer_ml_delete(ah, sta);
+}
+
+static int ath12k_mac_handle_link_sta_state(struct ieee80211_hw *hw,
+					    struct ath12k_link_vif *arvif,
+					    struct ath12k_link_sta *arsta,
 				   enum ieee80211_sta_state old_state,
 				   enum ieee80211_sta_state new_state)
 {
-	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
-	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
-	struct ath12k *ar;
-	struct ath12k_link_vif *arvif;
-	struct ath12k_link_sta *arsta;
-	struct ath12k_peer *peer;
+	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+	struct ath12k *ar = arvif->ar;
 	int ret = 0;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	arvif = &ahvif->deflink;
-	arsta = &ahsta->deflink;
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
 
-	ar = ath12k_get_ar_by_vif(hw, vif);
-	if (!ar) {
-		WARN_ON_ONCE(1);
-		return -EINVAL;
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac handle link %u sta %pM state %d -> %d\n",
+		   arsta->link_id, arsta->addr, old_state, new_state);
+
+	/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST: Remove the station
+	 * from driver
+	 */
+	if ((old_state == IEEE80211_STA_NONE &&
+	     new_state == IEEE80211_STA_NOTEXIST)) {
+		ret = ath12k_mac_station_remove(ar, arvif, arsta);
+		if (ret) {
+			ath12k_warn(ar->ab, "Failed to remove station: %pM for VDEV: %d\n",
+				    arsta->addr, arvif->vdev_id);
+			goto exit;
+		}
 	}
 
+	/* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE: Add new station to driver */
 	if (old_state == IEEE80211_STA_NOTEXIST &&
 	    new_state == IEEE80211_STA_NONE) {
-		memset(arsta, 0, sizeof(*arsta));
-		rcu_assign_pointer(ahsta->link[0], arsta);
-		/* TODO use appropriate link id once MLO support is added  */
-		arsta->link_id = ATH12K_DEFAULT_LINK_ID;
-		ahsta->links_map = BIT(arsta->link_id);
-		arsta->ahsta = ahsta;
-		arsta->arvif = arvif;
-		wiphy_work_init(&arsta->update_wk, ath12k_sta_rc_update_wk);
-
-		synchronize_rcu();
-
 		ret = ath12k_mac_station_add(ar, arvif, arsta);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
-				    sta->addr, arvif->vdev_id);
-	} else if ((old_state == IEEE80211_STA_NONE &&
-		    new_state == IEEE80211_STA_NOTEXIST)) {
-		wiphy_work_cancel(hw->wiphy, &arsta->update_wk);
+				    arsta->addr, arvif->vdev_id);
 
-		if (ahvif->vdev_type == WMI_VDEV_TYPE_STA) {
-			ath12k_bss_disassoc(ar, arvif);
-			ret = ath12k_mac_vdev_stop(arvif);
-			if (ret)
-				ath12k_warn(ar->ab, "failed to stop vdev %i: %d\n",
-					    arvif->vdev_id, ret);
-		}
-		ath12k_dp_peer_cleanup(ar, arvif->vdev_id, sta->addr);
-
-		ret = ath12k_peer_delete(ar, arvif->vdev_id, sta->addr);
-		if (ret)
-			ath12k_warn(ar->ab, "Failed to delete peer: %pM for VDEV: %d\n",
-				    sta->addr, arvif->vdev_id);
-		else
-			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "Removed peer: %pM for VDEV: %d\n",
-				   sta->addr, arvif->vdev_id);
-
-		ath12k_mac_dec_num_stations(arvif, arsta);
-		spin_lock_bh(&ar->ab->base_lock);
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer && peer->sta == sta) {
-			ath12k_warn(ar->ab, "Found peer entry %pM n vdev %i after it was supposedly removed\n",
-				    vif->addr, arvif->vdev_id);
-			peer->sta = NULL;
-			list_del(&peer->list);
-			kfree(peer);
-			ar->num_peers--;
-		}
-		spin_unlock_bh(&ar->ab->base_lock);
-
-		kfree(arsta->rx_stats);
-		arsta->rx_stats = NULL;
-
-		if (arsta->link_id < IEEE80211_MLD_MAX_NUM_LINKS) {
-			rcu_assign_pointer(ahsta->link[arsta->link_id], NULL);
-			synchronize_rcu();
-			ahsta->links_map &= ~(BIT(arsta->link_id));
-			arsta->link_id = ATH12K_INVALID_LINK_ID;
-			arsta->ahsta = NULL;
-		}
+	/* IEEE80211_STA_AUTH -> IEEE80211_STA_ASSOC: Send station assoc command for
+	 * peer associated to AP/Mesh/ADHOC vif type.
+	 */
 	} else if (old_state == IEEE80211_STA_AUTH &&
 		   new_state == IEEE80211_STA_ASSOC &&
 		   (vif->type == NL80211_IFTYPE_AP ||
 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
 		    vif->type == NL80211_IFTYPE_ADHOC)) {
-		ret = ath12k_station_assoc(ar, arvif, arsta, false);
+		ret = ath12k_mac_station_assoc(ar, arvif, arsta, false);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to associate station: %pM\n",
-				    sta->addr);
+				    arsta->addr);
 
-		spin_lock_bh(&ar->data_lock);
-
-		arsta->bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
-		arsta->bw_prev = sta->deflink.bandwidth;
-
-		spin_unlock_bh(&ar->data_lock);
+	/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTHORIZED: set peer status as
+	 * authorized
+	 */
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTHORIZED) {
-		spin_lock_bh(&ar->ab->base_lock);
-
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
-			peer->is_authorized = true;
-
-		spin_unlock_bh(&ar->ab->base_lock);
-
-		if (vif->type == NL80211_IFTYPE_STATION && arvif->is_up) {
-			ret = ath12k_wmi_set_peer_param(ar, sta->addr,
-							arvif->vdev_id,
-							WMI_PEER_AUTHORIZE,
-							1);
+		ret = ath12k_mac_station_authorize(ar, arvif, arsta);
 			if (ret)
-				ath12k_warn(ar->ab, "Unable to authorize peer %pM vdev %d: %d\n",
-					    sta->addr, arvif->vdev_id, ret);
-		}
+			ath12k_warn(ar->ab, "Failed to authorize station: %pM\n",
+				    arsta->addr);
+
+	/* IEEE80211_STA_AUTHORIZED -> IEEE80211_STA_ASSOC: station may be in removal,
+	 * deauthorize it.
+	 */
 	} else if (old_state == IEEE80211_STA_AUTHORIZED &&
 		   new_state == IEEE80211_STA_ASSOC) {
-		spin_lock_bh(&ar->ab->base_lock);
-
-		peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
-		if (peer)
-			peer->is_authorized = false;
+		ath12k_mac_station_unauthorize(ar, arvif, arsta);
 
-		spin_unlock_bh(&ar->ab->base_lock);
+	/* IEEE80211_STA_ASSOC -> IEEE80211_STA_AUTH: disassoc peer connected to
+	 * AP/mesh/ADHOC vif type.
+	 */
 	} else if (old_state == IEEE80211_STA_ASSOC &&
 		   new_state == IEEE80211_STA_AUTH &&
 		   (vif->type == NL80211_IFTYPE_AP ||
 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
 		    vif->type == NL80211_IFTYPE_ADHOC)) {
-		ret = ath12k_station_disassoc(ar, arvif, arsta);
+		ret = ath12k_mac_station_disassoc(ar, arvif, arsta);
 		if (ret)
 			ath12k_warn(ar->ab, "Failed to disassociate station: %pM\n",
+				    arsta->addr);
+	}
+
+exit:
+	return ret;
+}
+
+static int ath12k_mac_op_sta_state(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_sta *sta,
+				   enum ieee80211_sta_state old_state,
+				   enum ieee80211_sta_state new_state)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	unsigned long valid_links;
+	u8 link_id = 0;
+	int ret;
+
+	lockdep_assert_wiphy(hw->wiphy);
+
+	if (ieee80211_vif_is_mld(vif) && sta->valid_links) {
+		WARN_ON(!sta->mlo && hweight16(sta->valid_links) != 1);
+		link_id = ffs(sta->valid_links) - 1;
+	}
+
+	/* IEEE80211_STA_NOTEXIST -> IEEE80211_STA_NONE:
+	 * New station add received. If this is a ML station then
+	 * ahsta->links_map will be zero and sta->valid_links will be 1.
+	 * Assign default link to the first link sta.
+	 */
+	if (old_state == IEEE80211_STA_NOTEXIST &&
+	    new_state == IEEE80211_STA_NONE) {
+		memset(ahsta, 0, sizeof(*ahsta));
+
+		arsta = &ahsta->deflink;
+
+		/* ML sta */
+		if (sta->mlo && !ahsta->links_map &&
+		    (hweight16(sta->valid_links) == 1)) {
+			ret = ath12k_peer_ml_create(ah, sta);
+			if (ret) {
+				ath12k_hw_warn(ah, "unable to create ML peer for sta %pM",
 				    sta->addr);
+				goto exit;
 	}
+		}
+
+		ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif,
+						 link_id);
+		if (ret) {
+			ath12k_hw_warn(ah, "unable assign link %d for sta %pM",
+				       link_id, sta->addr);
+			goto exit;
+		}
+
+		/* above arsta will get memset, hence do this after assign
+		 * link sta
+		 */
+		if (sta->mlo) {
+			arsta->is_assoc_link = true;
+			ahsta->assoc_link_id = link_id;
+		}
+	}
+
+	/* In the ML station scenario, activate all partner links once the
+	 * client is transitioning to the associated state.
+	 *
+	 * FIXME: Ideally, this activation should occur when the client
+	 * transitions to the authorized state. However, there are some
+	 * issues with handling this in the firmware. Until the firmware
+	 * can manage it properly, activate the links when the client is
+	 * about to move to the associated state.
+	 */
+	if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION &&
+	    old_state == IEEE80211_STA_AUTH && new_state == IEEE80211_STA_ASSOC)
+		ieee80211_set_active_links(vif, ieee80211_vif_usable_links(vif));
+
+	/* Handle all the other state transitions in generic way */
+	valid_links = ahsta->links_map;
+	for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+		arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
+		/* some assumptions went wrong! */
+		if (WARN_ON(!arvif || !arsta))
+			continue;
+
+		/* vdev might be in deleted */
+		if (WARN_ON(!arvif->ar))
+			continue;
+
+		ret = ath12k_mac_handle_link_sta_state(hw, arvif, arsta,
+						       old_state, new_state);
+		if (ret) {
+			ath12k_hw_warn(ah, "unable to move link sta %d of sta %pM from state %d to %d",
+				       link_id, arsta->addr, old_state, new_state);
+			goto exit;
+		}
+	}
+
+	/* IEEE80211_STA_NONE -> IEEE80211_STA_NOTEXIST:
+	 * Remove the station from driver (handle ML sta here since that
+	 * needs special handling. Normal sta will be handled in generic
+	 * handler below
+	 */
+	if (old_state == IEEE80211_STA_NONE &&
+	    new_state == IEEE80211_STA_NOTEXIST && sta->mlo)
+		ath12k_mac_ml_station_remove(ahvif, ahsta);
+
+	ret = 0;
+
+exit:
+	/* update the state if everything went well */
+	if (!ret)
+		ahsta->state = new_state;
 
 	return ret;
 }
@@ -5067,16 +6690,22 @@
 				       struct ieee80211_vif *vif,
 				       struct ieee80211_sta *sta)
 {
-	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
 	struct ath12k *ar;
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	u8 link_id;
 	int ret;
 	s16 txpwr;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	arvif = &ahvif->deflink;
+	/* TODO: use link id from mac80211 once that's implemented */
+	link_id = 0;
+
+	arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+	arsta = wiphy_dereference(hw->wiphy, ahsta->link[link_id]);
 
 	if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
 		txpwr = 0;
@@ -5093,9 +6722,9 @@
 		goto out;
 	}
 
-	ar = ath12k_ah_to_ar(ah, 0);
+	ar = arvif->ar;
 
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr, arvif->vdev_id,
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr, arvif->vdev_id,
 					WMI_PEER_USE_FIXED_PWR, txpwr);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to set tx power for station ret: %d\n",
@@ -5107,7 +6736,7 @@
 	return ret;
 }
 
-static void ath12k_mac_op_sta_rc_update(struct ieee80211_hw *hw,
+static void ath12k_mac_op_link_sta_rc_update(struct ieee80211_hw *hw,
 					struct ieee80211_vif *vif,
 					struct ieee80211_link_sta *link_sta,
 					u32 changed)
@@ -5116,69 +6745,76 @@
 	struct ath12k *ar;
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
 	struct ath12k_link_sta *arsta;
 	struct ath12k_link_vif *arvif;
 	struct ath12k_peer *peer;
 	u32 bw, smps;
-	/* TODO: use proper link id once link sta specific rc update support is
-	 * available in mac80211.
-	 */
-	u8 link_id = ATH12K_DEFAULT_LINK_ID;
-
-	ar = ath12k_get_ar_by_vif(hw, vif);
-	if (!ar) {
-		WARN_ON_ONCE(1);
-		return;
-	}
 
 	rcu_read_lock();
-	arvif = rcu_dereference(ahvif->link[link_id]);
+	arvif = rcu_dereference(ahvif->link[link_sta->link_id]);
 	if (!arvif) {
-		ath12k_warn(ar->ab, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
-			    link_id, sta->addr);
+		ath12k_hw_warn(ah, "mac sta rc update failed to fetch link vif on link id %u for peer %pM\n",
+			       link_sta->link_id, sta->addr);
 		rcu_read_unlock();
 		return;
 	}
-	arsta = rcu_dereference(ahsta->link[link_id]);
+
+	ar = arvif->ar;
+
+	arsta = rcu_dereference(ahsta->link[link_sta->link_id]);
 	if (!arsta) {
 		rcu_read_unlock();
 		ath12k_warn(ar->ab, "mac sta rc update failed to fetch link sta on link id %u for peer %pM\n",
-			    link_id, sta->addr);
+			    link_sta->link_id, sta->addr);
 		return;
 	}
 	spin_lock_bh(&ar->ab->base_lock);
 
-	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, sta->addr);
+	peer = ath12k_peer_find(ar->ab, arvif->vdev_id, arsta->addr);
 	if (!peer) {
 		spin_unlock_bh(&ar->ab->base_lock);
 		rcu_read_unlock();
 		ath12k_warn(ar->ab, "mac sta rc update failed to find peer %pM on vdev %i\n",
-			    sta->addr, arvif->vdev_id);
+			    arsta->addr, arvif->vdev_id);
 		return;
 	}
 
 	spin_unlock_bh(&ar->ab->base_lock);
 
+	if (arsta->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+		rcu_read_unlock();
+		return;
+	}
+
+	link_sta = rcu_dereference(sta->link[arsta->link_id]);
+	if (!link_sta) {
+		rcu_read_unlock();
+		ath12k_warn(ar->ab, "unable to access link sta in rc update for sta %pM link %u\n",
+			    sta->addr, arsta->link_id);
+		return;
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
-		   sta->addr, changed, sta->deflink.bandwidth, sta->deflink.rx_nss,
-		   sta->deflink.smps_mode);
+		   arsta->addr, changed, link_sta->bandwidth, link_sta->rx_nss,
+		   link_sta->smps_mode);
 
 	spin_lock_bh(&ar->data_lock);
 
 	if (changed & IEEE80211_RC_BW_CHANGED) {
-		bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, sta);
+		bw = ath12k_mac_ieee80211_sta_bw_to_wmi(ar, link_sta);
 		arsta->bw_prev = arsta->bw;
 		arsta->bw = bw;
 	}
 
 	if (changed & IEEE80211_RC_NSS_CHANGED)
-		arsta->nss = sta->deflink.rx_nss;
+		arsta->nss = link_sta->rx_nss;
 
 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
 		smps = WMI_PEER_SMPS_PS_NONE;
 
-		switch (sta->deflink.smps_mode) {
+		switch (link_sta->smps_mode) {
 		case IEEE80211_SMPS_AUTOMATIC:
 		case IEEE80211_SMPS_OFF:
 			smps = WMI_PEER_SMPS_PS_NONE;
@@ -5190,8 +6826,8 @@
 			smps = WMI_PEER_SMPS_DYNAMIC;
 			break;
 		default:
-			ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM\n",
-				    sta->deflink.smps_mode, sta->addr);
+			ath12k_warn(ar->ab, "Invalid smps %d in sta rc update for %pM link %u\n",
+				    link_sta->smps_mode, arsta->addr, link_sta->link_id);
 			smps = WMI_PEER_SMPS_PS_NONE;
 			break;
 		}
@@ -5208,6 +6844,110 @@
 	rcu_read_unlock();
 }
 
+static struct ath12k_link_sta *ath12k_mac_alloc_assign_link_sta(struct ath12k_hw *ah,
+								struct ath12k_sta *ahsta,
+								struct ath12k_vif *ahvif,
+								u8 link_id)
+{
+	struct ath12k_link_sta *arsta;
+	int ret;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS)
+		return NULL;
+
+	arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+	if (arsta)
+		return NULL;
+
+	arsta = kmalloc(sizeof(*arsta), GFP_KERNEL);
+	if (!arsta)
+		return NULL;
+
+	ret = ath12k_mac_assign_link_sta(ah, ahsta, arsta, ahvif, link_id);
+	if (ret) {
+		kfree(arsta);
+		return NULL;
+	}
+
+	return arsta;
+}
+
+static int ath12k_mac_op_change_sta_links(struct ieee80211_hw *hw,
+					  struct ieee80211_vif *vif,
+					  struct ieee80211_sta *sta,
+					  u16 old_links, u16 new_links)
+{
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	unsigned long valid_links;
+	struct ath12k *ar;
+	u8 link_id;
+	int ret;
+
+	lockdep_assert_wiphy(hw->wiphy);
+
+	if (!sta->valid_links)
+		return -EINVAL;
+
+	/* Firmware does not support removal of one of link stas. All sta
+	 * would be removed during ML STA delete in sta_state(), hence link
+	 * sta removal is not handled here.
+	 */
+	if (new_links < old_links)
+		return 0;
+
+	if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+		ath12k_hw_warn(ah, "unable to add link for ml sta %pM", sta->addr);
+		return -EINVAL;
+	}
+
+	/* this op is expected only after initial sta insertion with default link */
+	if (WARN_ON(ahsta->links_map == 0))
+		return -EINVAL;
+
+	valid_links = new_links;
+	for_each_set_bit(link_id, &valid_links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		if (ahsta->links_map & BIT(link_id))
+			continue;
+
+		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+		arsta = ath12k_mac_alloc_assign_link_sta(ah, ahsta, ahvif, link_id);
+
+		if (!arvif || !arsta) {
+			ath12k_hw_warn(ah, "Failed to alloc/assign link sta");
+			continue;
+		}
+
+		ar = arvif->ar;
+		if (!ar)
+			continue;
+
+		ret = ath12k_mac_station_add(ar, arvif, arsta);
+		if (ret) {
+			ath12k_warn(ar->ab, "Failed to add station: %pM for VDEV: %d\n",
+				    arsta->addr, arvif->vdev_id);
+			ath12k_mac_free_unassign_link_sta(ah, ahsta, link_id);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static bool ath12k_mac_op_can_activate_links(struct ieee80211_hw *hw,
+					     struct ieee80211_vif *vif,
+					     u16 active_links)
+{
+	/* TODO: Handle recovery case */
+
+	return true;
+}
+
 static int ath12k_conf_tx_uapsd(struct ath12k_link_vif *arvif,
 				u16 ac, bool enable)
 {
@@ -5520,10 +7260,8 @@
 
 	ath12k_set_vht_txbf_cap(ar, &vht_cap.cap);
 
-	/* TODO: Enable back VHT160 mode once association issues are fixed */
-	/* Disabling VHT160 and VHT80+80 modes */
-	vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
-	vht_cap.cap &= ~IEEE80211_VHT_CAP_SHORT_GI_160;
+	/* 80P80 is not supported */
+	vht_cap.cap &= ~IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ;
 
 	rxmcs_map = 0;
 	txmcs_map = 0;
@@ -5545,6 +7283,12 @@
 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(rxmcs_map);
 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(txmcs_map);
 
+	/* Check if the HW supports 1:1 NSS ratio and reset
+	 * EXT NSS BW Support field to 0 to indicate 1:1 ratio
+	 */
+	if (ar->pdev->cap.nss_ratio_info == WMI_NSS_RATIO_1_NSS)
+		vht_cap.cap &= ~IEEE80211_VHT_CAP_EXT_NSS_BW_MASK;
+
 	return vht_cap;
 }
 
@@ -5560,7 +7304,7 @@
 	rate_cap_tx_chainmask = ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift;
 	rate_cap_rx_chainmask = ar->cfg_rx_chainmask >> cap->rx_chain_mask_shift;
 
-	if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+	if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
 		ht_cap = cap->band[NL80211_BAND_2GHZ].ht_cap_info;
 		if (ht_cap_info)
@@ -5569,7 +7313,7 @@
 						    rate_cap_rx_chainmask);
 	}
 
-	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+	if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
 	    (ar->ab->hw_params->single_pdev_only ||
 	     !ar->supports_6ghz)) {
 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
@@ -5722,12 +7466,55 @@
 	return cpu_to_le16(bcap->he_6ghz_capa);
 }
 
-static void ath12k_mac_copy_he_cap(struct ath12k_band_cap *band_cap,
+static void ath12k_mac_set_hemcsmap(struct ath12k *ar,
+				    struct ath12k_pdev_cap *cap,
+				    struct ieee80211_sta_he_cap *he_cap)
+{
+	struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp;
+	u8 maxtxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_tx_chains);
+	u8 maxrxnss_160 = ath12k_get_nss_160mhz(ar, ar->num_rx_chains);
+	u16 txmcs_map_160 = 0, rxmcs_map_160 = 0;
+	u16 txmcs_map = 0, rxmcs_map = 0;
+	u32 i;
+
+	for (i = 0; i < 8; i++) {
+		if (i < ar->num_tx_chains &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			txmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			txmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+		if (i < ar->num_rx_chains &&
+		    (ar->cfg_rx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			rxmcs_map |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			rxmcs_map |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+		if (i < maxtxnss_160 &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			txmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			txmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+
+		if (i < maxrxnss_160 &&
+		    (ar->cfg_tx_chainmask >> cap->tx_chain_mask_shift) & BIT(i))
+			rxmcs_map_160 |= IEEE80211_HE_MCS_SUPPORT_0_11 << (i * 2);
+		else
+			rxmcs_map_160 |= IEEE80211_HE_MCS_NOT_SUPPORTED << (i * 2);
+	}
+
+	mcs_nss->rx_mcs_80 = cpu_to_le16(rxmcs_map & 0xffff);
+	mcs_nss->tx_mcs_80 = cpu_to_le16(txmcs_map & 0xffff);
+	mcs_nss->rx_mcs_160 = cpu_to_le16(rxmcs_map_160 & 0xffff);
+	mcs_nss->tx_mcs_160 = cpu_to_le16(txmcs_map_160 & 0xffff);
+}
+
+static void ath12k_mac_copy_he_cap(struct ath12k *ar,
+				   struct ath12k_band_cap *band_cap,
 				   int iftype, u8 num_tx_chains,
 				   struct ieee80211_sta_he_cap *he_cap)
 {
 	struct ieee80211_he_cap_elem *he_cap_elem = &he_cap->he_cap_elem;
-	struct ieee80211_he_mcs_nss_supp *mcs_nss = &he_cap->he_mcs_nss_supp;
 
 	he_cap->has_he = true;
 	memcpy(he_cap_elem->mac_cap_info, band_cap->he_cap_info,
@@ -5737,15 +7524,20 @@
 
 	he_cap_elem->mac_cap_info[1] &=
 		IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK;
-
+	he_cap_elem->phy_cap_info[0] &=
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
+		IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G;
+	/* 80PLUS80 is not supported */
+	he_cap_elem->phy_cap_info[0] &=
+		~IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G;
 	he_cap_elem->phy_cap_info[5] &=
 		~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK;
-	he_cap_elem->phy_cap_info[5] &=
-		~IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK;
 	he_cap_elem->phy_cap_info[5] |= num_tx_chains - 1;
 
 	switch (iftype) {
 	case NL80211_IFTYPE_AP:
+		he_cap_elem->mac_cap_info[2] &= ~IEEE80211_HE_MAC_CAP2_BCAST_TWT;
 		he_cap_elem->phy_cap_info[3] &=
 			~IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK;
 		he_cap_elem->phy_cap_info[9] |=
@@ -5762,13 +7554,7 @@
 		break;
 	}
 
-	mcs_nss->rx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
-	mcs_nss->tx_mcs_80 = cpu_to_le16(band_cap->he_mcs & 0xffff);
-	mcs_nss->rx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
-	mcs_nss->tx_mcs_160 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
-	mcs_nss->rx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
-	mcs_nss->tx_mcs_80p80 = cpu_to_le16((band_cap->he_mcs >> 16) & 0xffff);
-
+	ath12k_mac_set_hemcsmap(ar, &ar->pdev->cap, he_cap);
 	memset(he_cap->ppe_thres, 0, sizeof(he_cap->ppe_thres));
 	if (he_cap_elem->phy_cap_info[6] &
 	    IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT)
@@ -5891,7 +7677,8 @@
 
 	memset(eht_cap, 0, sizeof(struct ieee80211_sta_eht_cap));
 
-	if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)))
+	if (!(test_bit(WMI_TLV_SERVICE_11BE, ar->ab->wmi_ab.svc_map)) ||
+	    ath12k_acpi_get_disable_11be(ar->ab))
 		return;
 
 	eht_cap->has_eht = true;
@@ -5957,7 +7744,7 @@
 
 		data[idx].types_mask = BIT(i);
 
-		ath12k_mac_copy_he_cap(band_cap, i, ar->num_tx_chains, he_cap);
+		ath12k_mac_copy_he_cap(ar, band_cap, i, ar->num_tx_chains, he_cap);
 		if (band == NL80211_BAND_6GHZ) {
 			data[idx].he_6ghz_capa.capa =
 				ath12k_mac_setup_he_6ghz_cap(cap, band_cap);
@@ -5977,7 +7764,7 @@
 	enum nl80211_band band;
 	int count;
 
-	if (cap->supported_bands & WMI_HOST_WLAN_2G_CAP) {
+	if (cap->supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		band = NL80211_BAND_2GHZ;
 		count = ath12k_mac_copy_sband_iftype_data(ar, cap,
 							  ar->mac.iftype[band],
@@ -5987,7 +7774,7 @@
 						 count);
 	}
 
-	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP) {
+	if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
 		band = NL80211_BAND_5GHZ;
 		count = ath12k_mac_copy_sband_iftype_data(ar, cap,
 							  ar->mac.iftype[band],
@@ -5997,7 +7784,7 @@
 						 count);
 	}
 
-	if (cap->supported_bands & WMI_HOST_WLAN_5G_CAP &&
+	if (cap->supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
 	    ar->supports_6ghz) {
 		band = NL80211_BAND_6GHZ;
 		count = ath12k_mac_copy_sband_iftype_data(ar, cap,
@@ -6067,6 +7854,8 @@
 {
 	int num_mgmt;
 
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
 	ieee80211_free_txskb(ath12k_ar_to_hw(ar), skb);
 
 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -6078,23 +7867,32 @@
 		wake_up(&ar->txmgmt_empty_waitq);
 }
 
-int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+static void ath12k_mac_tx_mgmt_free(struct ath12k *ar, int buf_id)
 {
-	struct sk_buff *msdu = skb;
+	struct sk_buff *msdu;
 	struct ieee80211_tx_info *info;
-	struct ath12k *ar = ctx;
-	struct ath12k_base *ab = ar->ab;
 
 	spin_lock_bh(&ar->txmgmt_idr_lock);
-	idr_remove(&ar->txmgmt_idr, buf_id);
+	msdu = idr_remove(&ar->txmgmt_idr, buf_id);
 	spin_unlock_bh(&ar->txmgmt_idr_lock);
-	dma_unmap_single(ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
+
+	if (!msdu)
+		return;
+
+	dma_unmap_single(ar->ab->dev, ATH12K_SKB_CB(msdu)->paddr, msdu->len,
 			 DMA_TO_DEVICE);
 
 	info = IEEE80211_SKB_CB(msdu);
 	memset(&info->status, 0, sizeof(info->status));
 
-	ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+	ath12k_mgmt_over_wmi_tx_drop(ar, msdu);
+}
+
+int ath12k_mac_tx_mgmt_pending_free(int buf_id, void *skb, void *ctx)
+{
+	struct ath12k *ar = ctx;
+
+	ath12k_mac_tx_mgmt_free(ar, buf_id);
 
 	return 0;
 }
@@ -6103,17 +7901,10 @@
 {
 	struct ieee80211_vif *vif = ctx;
 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
-	struct sk_buff *msdu = skb;
 	struct ath12k *ar = skb_cb->ar;
-	struct ath12k_base *ab = ar->ab;
 
-	if (skb_cb->vif == vif) {
-		spin_lock_bh(&ar->txmgmt_idr_lock);
-		idr_remove(&ar->txmgmt_idr, buf_id);
-		spin_unlock_bh(&ar->txmgmt_idr_lock);
-		dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len,
-				 DMA_TO_DEVICE);
-	}
+	if (skb_cb->vif == vif)
+		ath12k_mac_tx_mgmt_free(ar, buf_id);
 
 	return 0;
 }
@@ -6123,12 +7914,17 @@
 {
 	struct ath12k_base *ab = ar->ab;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
 	struct ieee80211_tx_info *info;
+	enum hal_encrypt_type enctype;
+	unsigned int mic_len;
 	dma_addr_t paddr;
 	int buf_id;
 	int ret;
 
-	ATH12K_SKB_CB(skb)->ar = ar;
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	skb_cb->ar = ar;
 	spin_lock_bh(&ar->txmgmt_idr_lock);
 	buf_id = idr_alloc(&ar->txmgmt_idr, skb, 0,
 			   ATH12K_TX_MGMT_NUM_PENDING_MAX, GFP_ATOMIC);
@@ -6142,7 +7938,12 @@
 		     ieee80211_is_deauth(hdr->frame_control) ||
 		     ieee80211_is_disassoc(hdr->frame_control)) &&
 		     ieee80211_has_protected(hdr->frame_control)) {
-			skb_put(skb, IEEE80211_CCMP_MIC_LEN);
+			if (!(skb_cb->flags & ATH12K_SKB_CIPHER_SET))
+				ath12k_warn(ab, "WMI protected management tx frame without ATH12K_SKB_CIPHER_SET");
+
+			enctype = ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
+			mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype);
+			skb_put(skb, mic_len);
 		}
 	}
 
@@ -6153,7 +7954,7 @@
 		goto err_free_idr;
 	}
 
-	ATH12K_SKB_CB(skb)->paddr = paddr;
+	skb_cb->paddr = paddr;
 
 	ret = ath12k_wmi_mgmt_send(ar, arvif->vdev_id, buf_id, skb);
 	if (ret) {
@@ -6164,7 +7965,7 @@
 	return 0;
 
 err_unmap_buf:
-	dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
+	dma_unmap_single(ab->dev, skb_cb->paddr,
 			 skb->len, DMA_TO_DEVICE);
 err_free_idr:
 	spin_lock_bh(&ar->txmgmt_idr_lock);
@@ -6182,15 +7983,18 @@
 		ath12k_mgmt_over_wmi_tx_drop(ar, skb);
 }
 
-static void ath12k_mgmt_over_wmi_tx_work(struct work_struct *work)
+static void ath12k_mgmt_over_wmi_tx_work(struct wiphy *wiphy, struct wiphy_work *work)
 {
 	struct ath12k *ar = container_of(work, struct ath12k, wmi_mgmt_tx_work);
+	struct ath12k_hw *ah = ar->ah;
 	struct ath12k_skb_cb *skb_cb;
 	struct ath12k_vif *ahvif;
 	struct ath12k_link_vif *arvif;
 	struct sk_buff *skb;
 	int ret;
 
+	lockdep_assert_wiphy(wiphy);
+
 	while ((skb = skb_dequeue(&ar->wmi_mgmt_tx_queue)) != NULL) {
 		skb_cb = ATH12K_SKB_CB(skb);
 		if (!skb_cb->vif) {
@@ -6200,7 +8004,15 @@
 		}
 
 		ahvif = ath12k_vif_to_ahvif(skb_cb->vif);
-		arvif = &ahvif->deflink;
+		if (!(ahvif->links_map & BIT(skb_cb->link_id))) {
+			ath12k_warn(ar->ab,
+				    "invalid linkid %u in mgmt over wmi tx with linkmap 0x%x\n",
+				    skb_cb->link_id, ahvif->links_map);
+			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
+			continue;
+		}
+
+		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[skb_cb->link_id]);
 		if (ar->allocated_vdev_map & (1LL << arvif->vdev_id)) {
 			ret = ath12k_mac_mgmt_tx_wmi(ar, arvif, skb);
 			if (ret) {
@@ -6210,8 +8022,9 @@
 			}
 		} else {
 			ath12k_warn(ar->ab,
-				    "dropping mgmt frame for vdev %d, is_started %d\n",
+				    "dropping mgmt frame for vdev %d link %u is_started %d\n",
 				    arvif->vdev_id,
+				    skb_cb->link_id,
 				    arvif->is_started);
 			ath12k_mgmt_over_wmi_tx_drop(ar, skb);
 		}
@@ -6245,7 +8058,7 @@
 
 	skb_queue_tail(q, skb);
 	atomic_inc(&ar->num_pending_mgmt_tx);
-	ieee80211_queue_work(ath12k_ar_to_hw(ar), &ar->wmi_mgmt_tx_work);
+	wiphy_work_queue(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
 
 	return 0;
 }
@@ -6271,6 +8084,121 @@
 	spin_unlock_bh(&ar->data_lock);
 }
 
+/* Note: called under rcu_read_lock() */
+static void ath12k_mlo_mcast_update_tx_link_address(struct ieee80211_vif *vif,
+						    u8 link_id, struct sk_buff *skb,
+						    u32 info_flags)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ieee80211_bss_conf *bss_conf;
+
+	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+		return;
+
+	bss_conf = rcu_dereference(vif->link_conf[link_id]);
+	if (bss_conf)
+		ether_addr_copy(hdr->addr2, bss_conf->addr);
+}
+
+/* Note: called under rcu_read_lock() */
+static u8 ath12k_mac_get_tx_link(struct ieee80211_sta *sta, struct ieee80211_vif *vif,
+				 u8 link, struct sk_buff *skb, u32 info_flags)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
+	struct ieee80211_link_sta *link_sta;
+	struct ieee80211_bss_conf *bss_conf;
+	struct ath12k_sta *ahsta;
+
+	/* Use the link id passed or the default vif link */
+	if (!sta) {
+		if (link != IEEE80211_LINK_UNSPECIFIED)
+			return link;
+
+		return ahvif->deflink.link_id;
+	}
+
+	ahsta = ath12k_sta_to_ahsta(sta);
+
+	/* Below translation ensures we pass proper A2 & A3 for non ML clients.
+	 * Also it assumes for now support only for MLO AP in this path
+	 */
+	if (!sta->mlo) {
+		link = ahsta->deflink.link_id;
+
+		if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
+			return link;
+
+		bss_conf = rcu_dereference(vif->link_conf[link]);
+		if (bss_conf) {
+			ether_addr_copy(hdr->addr2, bss_conf->addr);
+			if (!ieee80211_has_tods(hdr->frame_control) &&
+			    !ieee80211_has_fromds(hdr->frame_control))
+				ether_addr_copy(hdr->addr3, bss_conf->addr);
+		}
+
+		return link;
+	}
+
+	/* enqueue eth enacap & data frames on primary link, FW does link
+	 * selection and address translation.
+	 */
+	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP ||
+	    ieee80211_is_data(hdr->frame_control))
+		return ahsta->assoc_link_id;
+
+	/* 802.11 frame cases */
+	if (link == IEEE80211_LINK_UNSPECIFIED)
+		link = ahsta->deflink.link_id;
+
+	if (!ieee80211_is_mgmt(hdr->frame_control))
+		return link;
+
+	/* Perform address conversion for ML STA Tx */
+	bss_conf = rcu_dereference(vif->link_conf[link]);
+	link_sta = rcu_dereference(sta->link[link]);
+
+	if (bss_conf && link_sta) {
+		ether_addr_copy(hdr->addr1, link_sta->addr);
+		ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+		if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+			ether_addr_copy(hdr->addr3, bss_conf->bssid);
+		else if (vif->type == NL80211_IFTYPE_AP)
+			ether_addr_copy(hdr->addr3, bss_conf->addr);
+
+		return link;
+	}
+
+	if (bss_conf) {
+		/* In certain cases where a ML sta associated and added subset of
+		 * links on which the ML AP is active, but now sends some frame
+		 * (ex. Probe request) on a different link which is active in our
+		 * MLD but was not added during previous association, we can
+		 * still honor the Tx to that ML STA via the requested link.
+		 * The control would reach here in such case only when that link
+		 * address is same as the MLD address or in worst case clients
+		 * used MLD address at TA wrongly which would have helped
+		 * identify the ML sta object and pass it here.
+		 * If the link address of that STA is different from MLD address,
+		 * then the sta object would be NULL and control won't reach
+		 * here but return at the start of the function itself with !sta
+		 * check. Also this would not need any translation at hdr->addr1
+		 * from MLD to link address since the RA is the MLD address
+		 * (same as that link address ideally) already.
+		 */
+		ether_addr_copy(hdr->addr2, bss_conf->addr);
+
+		if (vif->type == NL80211_IFTYPE_STATION && bss_conf->bssid)
+			ether_addr_copy(hdr->addr3, bss_conf->bssid);
+		else if (vif->type == NL80211_IFTYPE_AP)
+			ether_addr_copy(hdr->addr3, bss_conf->addr);
+	}
+
+	return link;
+}
+
+/* Note: called under rcu_read_lock() */
 static void ath12k_mac_op_tx(struct ieee80211_hw *hw,
 			     struct ieee80211_tx_control *control,
 			     struct sk_buff *skb)
@@ -6280,13 +8208,29 @@
 	struct ieee80211_vif *vif = info->control.vif;
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_link_vif *arvif = &ahvif->deflink;
-	struct ath12k *ar = arvif->ar;
 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 	struct ieee80211_key_conf *key = info->control.hw_key;
+	struct ieee80211_sta *sta = control->sta;
+	struct ath12k_link_vif *tmp_arvif;
 	u32 info_flags = info->flags;
+	struct sk_buff *msdu_copied;
+	struct ath12k *ar, *tmp_ar;
+	struct ath12k_peer *peer;
+	unsigned long links_map;
+	bool is_mcast = false, is_eth = false;
+	bool is_dvlan = false;
+	struct ethhdr *eth;
 	bool is_prb_rsp;
+	u16 mcbc_gsn;
+	u8 link_id;
 	int ret;
 
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR) {
+		ieee80211_free_txskb(hw, skb);
+		return;
+	}
+
+	link_id = u32_get_bits(info->control.flags, IEEE80211_TX_CTRL_MLO_LINK);
 	memset(skb_cb, 0, sizeof(*skb_cb));
 	skb_cb->vif = vif;
 
@@ -6295,9 +8239,33 @@
 		skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
 	}
 
+	/* handle only for MLO case, use deflink for non MLO case */
+	if (ieee80211_vif_is_mld(vif)) {
+		link_id = ath12k_mac_get_tx_link(sta, vif, link_id, skb, info_flags);
+		if (link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+			ieee80211_free_txskb(hw, skb);
+			return;
+		}
+	} else {
+		link_id = 0;
+	}
+
+	arvif = rcu_dereference(ahvif->link[link_id]);
+	if (!arvif || !arvif->ar) {
+		ath12k_warn(ahvif->ah, "failed to find arvif link id %u for frame transmission",
+			    link_id);
+		ieee80211_free_txskb(hw, skb);
+		return;
+	}
+
+	ar = arvif->ar;
+	skb_cb->link_id = link_id;
 	is_prb_rsp = ieee80211_is_probe_resp(hdr->frame_control);
 
 	if (info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+		eth = (struct ethhdr *)skb->data;
+		is_mcast = is_multicast_ether_addr(eth->h_dest);
+		is_eth = true;
 		skb_cb->flags |= ATH12K_SKB_HW_80211_ENCAP;
 	} else if (ieee80211_is_mgmt(hdr->frame_control)) {
 		ret = ath12k_mac_mgmt_tx(ar, skb, is_prb_rsp);
@@ -6309,30 +8277,154 @@
 		return;
 	}
 
+	if (!(info_flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
+		is_mcast = is_multicast_ether_addr(hdr->addr1);
+
 	/* This is case only for P2P_GO */
 	if (vif->type == NL80211_IFTYPE_AP && vif->p2p)
 		ath12k_mac_add_p2p_noa_ie(ar, vif, skb, is_prb_rsp);
 
-	ret = ath12k_dp_tx(ar, arvif, skb);
-	if (ret) {
+	/* Checking if it is a DVLAN frame */
+	if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
+	    !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) &&
+	    !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) &&
+	    ieee80211_has_protected(hdr->frame_control))
+		is_dvlan = true;
+
+	if (!vif->valid_links || !is_mcast || is_dvlan || is_eth ||
+	    test_bit(ATH12K_FLAG_RAW_MODE, &ar->ab->dev_flags)) {
+		ret = ath12k_dp_tx(ar, arvif, skb, false, 0, is_mcast);
+		if (unlikely(ret)) {
 		ath12k_warn(ar->ab, "failed to transmit frame %d\n", ret);
-		ieee80211_free_txskb(hw, skb);
+			ieee80211_free_txskb(ar->ah->hw, skb);
+			return;
+		}
+	} else {
+		mcbc_gsn = atomic_inc_return(&ahvif->mcbc_gsn) & 0xfff;
+
+		links_map = ahvif->links_map;
+		for_each_set_bit(link_id, &links_map,
+				 IEEE80211_MLD_MAX_NUM_LINKS) {
+			tmp_arvif = rcu_dereference(ahvif->link[link_id]);
+			if (!tmp_arvif || !tmp_arvif->is_up)
+				continue;
+
+			tmp_ar = tmp_arvif->ar;
+			msdu_copied = skb_copy(skb, GFP_ATOMIC);
+			if (!msdu_copied) {
+				ath12k_err(ar->ab,
+					   "skb copy failure link_id 0x%X vdevid 0x%X\n",
+					   link_id, tmp_arvif->vdev_id);
+				continue;
+			}
+
+			ath12k_mlo_mcast_update_tx_link_address(vif, link_id,
+								msdu_copied,
+								info_flags);
+
+			skb_cb = ATH12K_SKB_CB(msdu_copied);
+			skb_cb->link_id = link_id;
+
+			/* For open mode, skip peer find logic */
+			if (unlikely(arvif->key_cipher == WMI_CIPHER_NONE))
+				goto skip_peer_find;
+
+			spin_lock_bh(&tmp_ar->ab->base_lock);
+			peer = ath12k_peer_find_by_addr(tmp_ar->ab, tmp_arvif->bssid);
+			if (!peer) {
+				spin_unlock_bh(&tmp_ar->ab->base_lock);
+				ath12k_warn(tmp_ar->ab,
+					    "failed to find peer for vdev_id 0x%X addr %pM link_map 0x%X\n",
+					    tmp_arvif->vdev_id, tmp_arvif->bssid,
+					    ahvif->links_map);
+				dev_kfree_skb_any(msdu_copied);
+				continue;
+			}
+
+			key = peer->keys[peer->mcast_keyidx];
+			if (key) {
+				skb_cb->cipher = key->cipher;
+				skb_cb->flags |= ATH12K_SKB_CIPHER_SET;
+
+				hdr = (struct ieee80211_hdr *)msdu_copied->data;
+				if (!ieee80211_has_protected(hdr->frame_control))
+					hdr->frame_control |=
+						cpu_to_le16(IEEE80211_FCTL_PROTECTED);
+			}
+			spin_unlock_bh(&tmp_ar->ab->base_lock);
+
+skip_peer_find:
+			ret = ath12k_dp_tx(tmp_ar, tmp_arvif,
+					   msdu_copied, true, mcbc_gsn, is_mcast);
+			if (unlikely(ret)) {
+				if (ret == -ENOMEM) {
+					/* Drops are expected during heavy multicast
+					 * frame flood. Print with debug log
+					 * level to avoid lot of console prints
+					 */
+					ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+						   "failed to transmit frame %d\n",
+						   ret);
+				} else {
+					ath12k_warn(ar->ab,
+						    "failed to transmit frame %d\n",
+						    ret);
+				}
+
+				dev_kfree_skb_any(msdu_copied);
+			}
+		}
+		ieee80211_free_txskb(ar->ah->hw, skb);
 	}
 }
 
 void ath12k_mac_drain_tx(struct ath12k *ar)
 {
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
 	/* make sure rcu-protected mac80211 tx path itself is drained */
 	synchronize_net();
 
-	cancel_work_sync(&ar->wmi_mgmt_tx_work);
+	wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->wmi_mgmt_tx_work);
 	ath12k_mgmt_over_wmi_tx_purge(ar);
 }
 
 static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable)
 {
-	return -EOPNOTSUPP;
-	/* TODO: Need to support new monitor mode */
+	struct htt_rx_ring_tlv_filter tlv_filter = {};
+	struct ath12k_base *ab = ar->ab;
+	u32 ring_id, i;
+	int ret = 0;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	if (!ab->hw_params->rxdma1_enable)
+		return ret;
+
+	if (enable) {
+		tlv_filter = ath12k_mac_mon_status_filter_default;
+
+		if (ath12k_debugfs_rx_filter(ar))
+			tlv_filter.rx_filter = ath12k_debugfs_rx_filter(ar);
+	} else {
+		tlv_filter.rxmon_disable = true;
+	}
+
+	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
+		ring_id = ar->dp.rxdma_mon_dst_ring[i].ring_id;
+		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
+						       ar->dp.mac_id + i,
+						       HAL_RXDMA_MONITOR_DST,
+						       DP_RXDMA_REFILL_RING_SIZE,
+						       &tlv_filter);
+		if (ret) {
+			ath12k_err(ab,
+				   "failed to setup filter for monitor buf %d\n",
+				   ret);
+		}
+	}
+
+	return ret;
 }
 
 static int ath12k_mac_start(struct ath12k *ar)
@@ -6349,7 +8441,7 @@
 					1, pdev->pdev_id);
 
 	if (ret) {
-		ath12k_err(ab, "failed to enable PMF QOS: (%d\n", ret);
+		ath12k_err(ab, "failed to enable PMF QOS: (%d)\n", ret);
 		goto err;
 	}
 
@@ -6394,12 +8486,22 @@
 
 	/* TODO: Do we need to enable ANI? */
 
-	ath12k_reg_update_chan_list(ar);
+	ret = ath12k_reg_update_chan_list(ar);
+	/* the ar state alone can be turned off for non supported country
+	 * without returning the error value. As we need to update the channel
+	 * for the next ar
+	 */
+	if (ret) {
+		if (ret == -EOPNOTSUPP)
+			ret = 0;
+		goto err;
+	}
 
 	ar->num_started_vdevs = 0;
 	ar->num_created_vdevs = 0;
 	ar->num_peers = 0;
 	ar->allocated_vdev_map = 0;
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
 
 	/* Configure monitor status ring with default rx_filter to get rx status
 	 * such as rssi, rx_duration.
@@ -6439,9 +8541,16 @@
 
 static void ath12k_drain_tx(struct ath12k_hw *ah)
 {
-	struct ath12k *ar;
+	struct ath12k *ar = ah->radio;
 	int i;
 
+	if (ath12k_ftm_mode) {
+		ath12k_err(ar->ab, "fail to start mac operations in ftm mode\n");
+		return;
+	}
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
 	for_each_ar(ah, ar, i)
 		ath12k_mac_drain_tx(ar);
 }
@@ -6468,6 +8577,7 @@
 	case ATH12K_HW_STATE_RESTARTED:
 	case ATH12K_HW_STATE_WEDGED:
 	case ATH12K_HW_STATE_ON:
+	case ATH12K_HW_STATE_TM:
 		ah->state = ATH12K_HW_STATE_OFF;
 
 		WARN_ON(1);
@@ -6567,9 +8677,10 @@
 		ath12k_err(ar->ab, "failed to clear rx_filter for monitor status ring: (%d)\n",
 			   ret);
 
-	clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+	clear_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
+	wiphy_work_cancel(ath12k_ar_to_hw(ar)->wiphy, &ar->scan.vdev_clean_wk);
 	cancel_work_sync(&ar->regd_update_work);
 	cancel_work_sync(&ar->ab->rfkill_work);
 
@@ -6634,19 +8745,24 @@
 					       u32 *flags, u32 *tx_vdev_id)
 {
 	struct ath12k_vif *ahvif = arvif->ahvif;
-	struct ieee80211_vif *tx_vif = ahvif->vif->mbssid_tx_vif;
+	struct ieee80211_bss_conf *link_conf;
 	struct ath12k *ar = arvif->ar;
 	struct ath12k_link_vif *tx_arvif;
-	struct ath12k_vif *tx_ahvif;
 
-	if (!tx_vif)
-		return 0;
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in set mbssid params for vif %pM link %u\n",
+			    ahvif->vif->addr, arvif->link_id);
+		return -ENOLINK;
+	}
 
-	tx_ahvif = ath12k_vif_to_ahvif(tx_vif);
-	tx_arvif = &tx_ahvif->deflink;
+	tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
+	if (!tx_arvif)
+		return 0;
 
-	if (ahvif->vif->bss_conf.nontransmitted) {
-		if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy)
+	if (link_conf->nontransmitted) {
+		if (ath12k_ar_to_hw(ar)->wiphy !=
+		    ath12k_ar_to_hw(tx_arvif->ar)->wiphy)
 			return -EINVAL;
 
 		*flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP;
@@ -6657,7 +8773,7 @@
 		return -EINVAL;
 	}
 
-	if (ahvif->vif->bss_conf.ema_ap)
+	if (link_conf->ema_ap)
 		*flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE;
 
 	return 0;
@@ -6671,6 +8787,8 @@
 	struct ath12k_vif *ahvif = arvif->ahvif;
 	int ret;
 
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
 	arg->if_id = arvif->vdev_id;
 	arg->type = ahvif->vdev_type;
 	arg->subtype = ahvif->vdev_subtype;
@@ -6687,88 +8805,33 @@
 			return ret;
 	}
 
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) {
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains;
 		arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains;
 	}
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP) {
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
 		arg->chains[NL80211_BAND_5GHZ].tx = ar->num_tx_chains;
 		arg->chains[NL80211_BAND_5GHZ].rx = ar->num_rx_chains;
 	}
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5G_CAP &&
+	if (pdev->cap.supported_bands & WMI_HOST_WLAN_5GHZ_CAP &&
 	    ar->supports_6ghz) {
 		arg->chains[NL80211_BAND_6GHZ].tx = ar->num_tx_chains;
 		arg->chains[NL80211_BAND_6GHZ].rx = ar->num_rx_chains;
 	}
 
 	arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif);
-	return 0;
-}
-
-static u32
-ath12k_mac_prepare_he_mode(struct ath12k_pdev *pdev, u32 viftype)
-{
-	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
-	struct ath12k_band_cap *cap_band = NULL;
-	u32 *hecap_phy_ptr = NULL;
-	u32 hemode;
-
-	if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP)
-		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
-	else
-		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
 
-	hecap_phy_ptr = &cap_band->he_cap_phy_info[0];
-
-	hemode = u32_encode_bits(HE_SU_BFEE_ENABLE, HE_MODE_SU_TX_BFEE) |
-		 u32_encode_bits(HECAP_PHY_SUBFMR_GET(hecap_phy_ptr),
-				 HE_MODE_SU_TX_BFER) |
-		 u32_encode_bits(HECAP_PHY_ULMUMIMO_GET(hecap_phy_ptr),
-				 HE_MODE_UL_MUMIMO);
-
-	/* TODO: WDS and other modes */
-	if (viftype == NL80211_IFTYPE_AP) {
-		hemode |= u32_encode_bits(HECAP_PHY_MUBFMR_GET(hecap_phy_ptr),
-					  HE_MODE_MU_TX_BFER) |
-			  u32_encode_bits(HE_DL_MUOFDMA_ENABLE, HE_MODE_DL_OFDMA) |
-			  u32_encode_bits(HE_UL_MUOFDMA_ENABLE, HE_MODE_UL_OFDMA);
-	} else {
-		hemode |= u32_encode_bits(HE_MU_BFEE_ENABLE, HE_MODE_MU_TX_BFEE);
+	if (ath12k_mac_is_ml_arvif(arvif)) {
+		if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS) {
+			ath12k_warn(ar->ab, "too many MLO links during setting up vdev: %d",
+				    ahvif->vif->valid_links);
+			return -EINVAL;
 	}
 
-	return hemode;
+		ether_addr_copy(arg->mld_addr, ahvif->vif->addr);
 }
 
-static int ath12k_set_he_mu_sounding_mode(struct ath12k *ar,
-					  struct ath12k_link_vif *arvif)
-{
-	u32 param_id, param_value;
-	struct ath12k_base *ab = ar->ab;
-	struct ath12k_vif *ahvif = arvif->ahvif;
-	int ret;
-
-	param_id = WMI_VDEV_PARAM_SET_HEMU_MODE;
-	param_value = ath12k_mac_prepare_he_mode(ar->pdev, ahvif->vif->type);
-	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    param_id, param_value);
-	if (ret) {
-		ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d param_value %x\n",
-			    arvif->vdev_id, ret, param_value);
-		return ret;
-	}
-	param_id = WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE;
-	param_value =
-		u32_encode_bits(HE_VHT_SOUNDING_MODE_ENABLE, HE_VHT_SOUNDING_MODE) |
-		u32_encode_bits(HE_TRIG_NONTRIG_SOUNDING_MODE_ENABLE,
-				HE_TRIG_NONTRIG_SOUNDING_MODE);
-	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    param_id, param_value);
-	if (ret) {
-		ath12k_warn(ab, "failed to set vdev %d HE MU mode: %d\n",
-			    arvif->vdev_id, ret);
-		return ret;
-	}
-	return ret;
+	return 0;
 }
 
 static void ath12k_mac_update_vif_offload(struct ath12k_link_vif *arvif)
@@ -6781,8 +8844,9 @@
 	int ret;
 
 	param_id = WMI_VDEV_PARAM_TX_ENCAP_TYPE;
-	if (vif->type != NL80211_IFTYPE_STATION &&
-	    vif->type != NL80211_IFTYPE_AP)
+	if (ath12k_frame_mode != ATH12K_HW_TXRX_ETHERNET ||
+	    (vif->type != NL80211_IFTYPE_STATION &&
+	     vif->type != NL80211_IFTYPE_AP))
 		vif->offload_flags &= ~(IEEE80211_OFFLOAD_ENCAP_ENABLED |
 					IEEE80211_OFFLOAD_DECAP_ENABLED);
 
@@ -6852,23 +8916,37 @@
 	struct ath12k_vif *ahvif = arvif->ahvif;
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
 	struct ath12k_wmi_vdev_create_arg vdev_arg = {0};
-	struct ath12k_wmi_peer_create_arg peer_param;
-	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_wmi_peer_create_arg peer_param = {0};
+	struct ieee80211_bss_conf *link_conf = NULL;
 	u32 param_id, param_value;
 	u16 nss;
 	int i;
 	int ret, vdev_id;
+	u8 link_id;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[arvif->link_id]);
-	if (!link_conf) {
+	/* If no link is active and scan vdev is requested
+	 * use a default link conf for scan address purpose.
+	 */
+	if (arvif->link_id >= ATH12K_DEFAULT_SCAN_LINK && vif->valid_links)
+		link_id = ffs(vif->valid_links) - 1;
+	else
+		link_id = arvif->link_id;
+
+	if (link_id < ATH12K_DEFAULT_SCAN_LINK) {
+		link_conf = wiphy_dereference(hw->wiphy, vif->link_conf[link_id]);
+		if (!link_conf && !arvif->is_scan_vif) {
 		ath12k_warn(ar->ab, "unable to access bss link conf in vdev create for vif %pM link %u\n",
 			    vif->addr, arvif->link_id);
 		return -ENOLINK;
 	}
+	}
 
+	if (link_conf)
 	memcpy(arvif->bssid, link_conf->addr, ETH_ALEN);
+	else
+		memcpy(arvif->bssid, vif->addr, ETH_ALEN);
 
 	arvif->ar = ar;
 	vdev_id = __ffs64(ab->free_vdev_map);
@@ -7012,7 +9090,10 @@
 		break;
 	}
 
-	arvif->txpower = vif->bss_conf.txpower;
+	if (link_conf)
+		arvif->txpower = link_conf->txpower;
+	else
+		arvif->txpower = NL80211_TX_POWER_AUTOMATIC;
 	ret = ath12k_mac_txpower_recalc(ar);
 	if (ret)
 		goto err_peer_del;
@@ -7047,13 +9128,15 @@
 		ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id,
 						       arvif->bssid);
 		if (ret)
-			/* KVALO: why not goto err? */
-			return ret;
+			goto err_vdev_del;
 
 		ar->num_peers--;
 	}
 
 err_vdev_del:
+	if (ahvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
+		ar->monitor_vdev_id = -1;
+
 	ath12k_wmi_vdev_delete(ar, arvif->vdev_id);
 	ar->num_created_vdevs--;
 	arvif->is_created = false;
@@ -7062,6 +9145,7 @@
 	ab->free_vdev_map |= 1LL << arvif->vdev_id;
 	ab->free_vdev_stats_id_map &= ~(1LL << arvif->vdev_stats_id);
 	spin_lock_bh(&ar->data_lock);
+	if (!list_empty(&ar->arvifs))
 	list_del(&arvif->list);
 	spin_unlock_bh(&ar->data_lock);
 
@@ -7110,6 +9194,7 @@
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
 	struct ath12k_vif_cache *cache = ahvif->cache[arvif->link_id];
 	struct ath12k_base *ab = ar->ab;
+	struct ieee80211_bss_conf *link_conf;
 
 	int ret;
 
@@ -7128,7 +9213,13 @@
 	}
 
 	if (cache->bss_conf_changed) {
-		ath12k_mac_bss_info_changed(ar, arvif, &vif->bss_conf,
+		link_conf = ath12k_mac_get_link_bss_conf(arvif);
+		if (!link_conf) {
+			ath12k_warn(ar->ab, "unable to access bss link conf in cache flush for vif %pM link %u\n",
+				    vif->addr, arvif->link_id);
+			return;
+		}
+		ath12k_mac_bss_info_changed(ar, arvif, link_conf,
 					    cache->bss_conf_changed);
 	}
 
@@ -7142,11 +9233,14 @@
 						    struct ath12k_link_vif *arvif,
 						    struct ieee80211_chanctx_conf *ctx)
 {
-	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(ahvif);
+	struct ath12k_link_vif *scan_arvif;
 	struct ath12k_hw *ah = hw->priv;
 	struct ath12k *ar;
 	struct ath12k_base *ab;
-	u8 link_id = arvif->link_id;
+	u8 link_id = arvif->link_id, scan_link;
+	unsigned long scan_link_map;
 	int ret;
 
 	lockdep_assert_wiphy(hw->wiphy);
@@ -7161,6 +9255,21 @@
 	if (!ar)
 		return NULL;
 
+	/* cleanup the scan vdev if we are done scan on that ar
+	 * and now we want to create for actual usage.
+	 */
+	if (ieee80211_vif_is_mld(vif)) {
+		scan_link_map = ahvif->links_map & ATH12K_SCAN_LINKS_MASK;
+		for_each_set_bit(scan_link, &scan_link_map, ATH12K_NUM_MAX_LINKS) {
+			scan_arvif = wiphy_dereference(hw->wiphy, ahvif->link[scan_link]);
+			if (scan_arvif && scan_arvif->ar == ar) {
+				ar->scan.arvif = NULL;
+				ath12k_mac_remove_link_interface(hw, scan_arvif);
+				ath12k_mac_unassign_link_vif(scan_arvif);
+			}
+		}
+	}
+
 	if (arvif->ar) {
 		/* This is not expected really */
 		if (WARN_ON(!arvif->is_created)) {
@@ -7236,19 +9345,8 @@
 	ahvif->ah = ah;
 	ahvif->vif = vif;
 	arvif = &ahvif->deflink;
-	arvif->ahvif = ahvif;
 
-	INIT_LIST_HEAD(&arvif->list);
-	INIT_DELAYED_WORK(&arvif->connection_loss_work,
-			  ath12k_mac_vif_sta_connection_loss_work);
-
-	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
-		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
-		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
-		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
-		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
-	}
+	ath12k_mac_init_arvif(ahvif, arvif, -1);
 
 	/* Allocate Default Queue now and reassign during actual vdev create */
 	vif->cab_queue = ATH12K_HW_DEFAULT_QUEUE;
@@ -7256,14 +9354,9 @@
 		vif->hw_queue[i] = ATH12K_HW_DEFAULT_QUEUE;
 
 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
-	/* For non-ml vifs, vif->addr is the actual vdev address but for
-	 * ML vif link(link BSSID) address is the vdev address and it can be a
-	 * different one from vif->addr (i.e ML address).
-	 * Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
+	/* Defer vdev creation until assign_chanctx or hw_scan is initiated as driver
 	 * will not know if this interface is an ML vif at this point.
 	 */
-	ath12k_mac_assign_vif_to_vdev(hw, arvif, NULL);
-
 	return 0;
 }
 
@@ -7334,6 +9427,7 @@
 
 err_vdev_del:
 	spin_lock_bh(&ar->data_lock);
+	if (!list_empty(&ar->arvifs))
 	list_del(&arvif->list);
 	spin_unlock_bh(&ar->data_lock);
 
@@ -7361,11 +9455,12 @@
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_link_vif *arvif;
+	struct ath12k *ar;
 	u8 link_id;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+	for (link_id = 0; link_id < ATH12K_NUM_MAX_LINKS; link_id++) {
 		/* if we cached some config but never received assign chanctx,
 		 * free the allocated cache.
 		 */
@@ -7374,6 +9469,31 @@
 		if (!arvif || !arvif->is_created)
 			continue;
 
+		ar = arvif->ar;
+
+		/* Scan abortion is in progress since before this, cancel_hw_scan()
+		 * is expected to be executed. Since link is anyways going to be removed
+		 * now, just cancel the worker and send the scan aborted to user space
+		 */
+		if (ar->scan.arvif == arvif) {
+			wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
+
+			spin_lock_bh(&ar->data_lock);
+			ar->scan.arvif = NULL;
+			if (!ar->scan.is_roc) {
+				struct cfg80211_scan_info info = {
+					.aborted = true,
+				};
+
+				ieee80211_scan_completed(ar->ah->hw, &info);
+			}
+
+			ar->scan.state = ATH12K_SCAN_IDLE;
+			ar->scan_channel = NULL;
+			ar->scan.roc_freq = 0;
+			spin_unlock_bh(&ar->data_lock);
+		}
+
 		ath12k_mac_remove_link_interface(hw, arvif);
 		ath12k_mac_unassign_link_vif(arvif);
 	}
@@ -7389,29 +9509,6 @@
 	FIF_PROBE_REQ |				\
 	FIF_FCSFAIL)
 
-static void ath12k_mac_configure_filter(struct ath12k *ar,
-					unsigned int total_flags)
-{
-	bool reset_flag;
-	int ret;
-
-	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
-
-	ar->filter_flags = total_flags;
-
-	/* For monitor mode */
-	reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC);
-
-	ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag);
-	if (ret)
-		ath12k_warn(ar->ab,
-			    "fail to set monitor filter: %d\n", ret);
-
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
-		   "total_flags:0x%x, reset_flag:%d\n",
-		   total_flags, reset_flag);
-}
-
 static void ath12k_mac_op_configure_filter(struct ieee80211_hw *hw,
 					   unsigned int changed_flags,
 					   unsigned int *total_flags,
@@ -7425,7 +9522,7 @@
 	ar = ath12k_ah_to_ar(ah, 0);
 
 	*total_flags &= SUPPORTED_FILTERS;
-	ath12k_mac_configure_filter(ar, *total_flags);
+	ar->filter_flags = *total_flags;
 }
 
 static int ath12k_mac_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
@@ -7466,20 +9563,29 @@
 	return ret;
 }
 
-static int ath12k_mac_ampdu_action(struct ath12k_link_vif *arvif,
-				   struct ieee80211_ampdu_params *params)
+static int ath12k_mac_ampdu_action(struct ieee80211_hw *hw,
+				   struct ieee80211_vif *vif,
+				   struct ieee80211_ampdu_params *params,
+				   u8 link_id)
 {
-	struct ath12k *ar = arvif->ar;
+	struct ath12k *ar;
 	int ret = -EINVAL;
 
-	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+	lockdep_assert_wiphy(hw->wiphy);
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_id);
+	if (!ar)
+		return -EINVAL;
+
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return -ESHUTDOWN;
 
 	switch (params->action) {
 	case IEEE80211_AMPDU_RX_START:
-		ret = ath12k_dp_rx_ampdu_start(ar, params);
+		ret = ath12k_dp_rx_ampdu_start(ar, params, link_id);
 		break;
 	case IEEE80211_AMPDU_RX_STOP:
-		ret = ath12k_dp_rx_ampdu_stop(ar, params);
+		ret = ath12k_dp_rx_ampdu_stop(ar, params, link_id);
 		break;
 	case IEEE80211_AMPDU_TX_START:
 	case IEEE80211_AMPDU_TX_STOP_CONT:
@@ -7493,6 +9599,10 @@
 		break;
 	}
 
+	if (ret)
+		ath12k_warn(ar->ab, "unable to perform ampdu action %d for vif %pM link %u ret %d\n",
+			    params->action, vif->addr, link_id, ret);
+
 	return ret;
 }
 
@@ -7500,29 +9610,26 @@
 				      struct ieee80211_vif *vif,
 				      struct ieee80211_ampdu_params *params)
 {
-	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
-	struct ath12k *ar;
-	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
-	struct ath12k_link_vif *arvif;
+	struct ieee80211_sta *sta = params->sta;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	unsigned long links_map = ahsta->links_map;
 	int ret = -EINVAL;
+	u8 link_id;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
-	ar = ath12k_get_ar_by_vif(hw, vif);
-	if (!ar)
-		return -EINVAL;
-
-	ar = ath12k_ah_to_ar(ah, 0);
-	arvif = &ahvif->deflink;
+	if (WARN_ON(!links_map))
+		return ret;
 
-	ret = ath12k_mac_ampdu_action(arvif, params);
+	for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		ret = ath12k_mac_ampdu_action(hw, vif, params, link_id);
 	if (ret)
-		ath12k_warn(ar->ab, "pdev idx %d unable to perform ampdu action %d ret %d\n",
-			    ar->pdev_idx, params->action, ret);
-
 	return ret;
 }
 
+	return 0;
+}
+
 static int ath12k_mac_op_add_chanctx(struct ieee80211_hw *hw,
 				     struct ieee80211_chanctx_conf *ctx)
 {
@@ -7547,6 +9654,7 @@
 	 */
 	ar->rx_channel = ctx->def.chan;
 	spin_unlock_bh(&ar->data_lock);
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
 
 	return 0;
 }
@@ -7575,6 +9683,7 @@
 	 */
 	ar->rx_channel = NULL;
 	spin_unlock_bh(&ar->data_lock);
+	ar->chan_tx_pwr = ATH12K_PDEV_TX_POWER_INVALID;
 }
 
 static enum wmi_phy_mode
@@ -7640,6 +9749,61 @@
 	return down_mode;
 }
 
+static void
+ath12k_mac_mlo_get_vdev_args(struct ath12k_link_vif *arvif,
+			     struct wmi_ml_arg *ml_arg)
+{
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct wmi_ml_partner_info *partner_info;
+	struct ieee80211_bss_conf *link_conf;
+	struct ath12k_link_vif *arvif_p;
+	unsigned long links;
+	u8 link_id;
+
+	lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
+
+	if (!ath12k_mac_is_ml_arvif(arvif))
+		return;
+
+	if (hweight16(ahvif->vif->valid_links) > ATH12K_WMI_MLO_MAX_LINKS)
+		return;
+
+	ml_arg->enabled = true;
+
+	/* Driver always add a new link via VDEV START, FW takes
+	 * care of internally adding this link to existing
+	 * link vdevs which are advertised as partners below
+	 */
+	ml_arg->link_add = true;
+	partner_info = ml_arg->partner_info;
+
+	links = ahvif->links_map;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif_p = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+
+		if (WARN_ON(!arvif_p))
+			continue;
+
+		if (arvif == arvif_p)
+			continue;
+
+		if (!arvif_p->is_created)
+			continue;
+
+		link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+					      ahvif->vif->link_conf[arvif_p->link_id]);
+
+		if (!link_conf)
+			continue;
+
+		partner_info->vdev_id = arvif_p->vdev_id;
+		partner_info->hw_link_id = arvif_p->ar->pdev->hw_link_id;
+		ether_addr_copy(partner_info->addr, link_conf->addr);
+		ml_arg->num_partner_links++;
+		partner_info++;
+	}
+}
+
 static int
 ath12k_mac_vdev_start_restart(struct ath12k_link_vif *arvif,
 			      struct ieee80211_chanctx_conf *ctx,
@@ -7649,11 +9813,20 @@
 	struct ath12k_base *ab = ar->ab;
 	struct wmi_vdev_start_req_arg arg = {};
 	const struct cfg80211_chan_def *chandef = &ctx->def;
+	struct ieee80211_hw *hw = ath12k_ar_to_hw(ar);
 	struct ath12k_vif *ahvif = arvif->ahvif;
-	int he_support = ahvif->vif->bss_conf.he_support;
+	struct ieee80211_bss_conf *link_conf;
+	unsigned int dfs_cac_time;
 	int ret;
 
-	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+	lockdep_assert_wiphy(hw->wiphy);
+
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf) {
+		ath12k_warn(ar->ab, "unable to access bss link conf in vdev start for vif %pM link %u\n",
+			    ahvif->vif->addr, arvif->link_id);
+		return -ENOLINK;
+	}
 
 	reinit_completion(&ar->vdev_setup_done);
 
@@ -7706,18 +9879,13 @@
 		spin_unlock_bh(&ab->base_lock);
 
 		/* TODO: Notify if secondary 80Mhz also needs radar detection */
-		if (he_support) {
-			ret = ath12k_set_he_mu_sounding_mode(ar, arvif);
-			if (ret) {
-				ath12k_warn(ar->ab, "failed to set he mode vdev %i\n",
-					    arg.vdev_id);
-				return ret;
-			}
-		}
 	}
 
 	arg.passive |= !!(chandef->chan->flags & IEEE80211_CHAN_NO_IR);
 
+	if (!restart)
+		ath12k_mac_mlo_get_vdev_args(arvif, &arg.ml);
+
 	ath12k_dbg(ab, ATH12K_DBG_MAC,
 		   "mac vdev %d start center_freq %d phymode %s punct_bitmap 0x%x\n",
 		   arg.vdev_id, arg.freq,
@@ -7741,20 +9909,20 @@
 	ath12k_dbg(ab, ATH12K_DBG_MAC,  "vdev %pM started, vdev_id %d\n",
 		   ahvif->vif->addr, arvif->vdev_id);
 
-	/* Enable CAC Flag in the driver by checking the channel DFS cac time,
-	 * i.e dfs_cac_ms value which will be valid only for radar channels
-	 * and state as NL80211_DFS_USABLE which indicates CAC needs to be
-	 * done before channel usage. This flags is used to drop rx packets.
+	/* Enable CAC Running Flag in the driver by checking all sub-channel's DFS
+	 * state as NL80211_DFS_USABLE which indicates CAC needs to be
+	 * done before channel usage. This flag is used to drop rx packets.
 	 * during CAC.
 	 */
 	/* TODO: Set the flag for other interface types as required */
-	if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP &&
-	    chandef->chan->dfs_cac_ms &&
-	    chandef->chan->dfs_state == NL80211_DFS_USABLE) {
-		set_bit(ATH12K_CAC_RUNNING, &ar->dev_flags);
+	if (arvif->ahvif->vdev_type == WMI_VDEV_TYPE_AP && ctx->radar_enabled &&
+	    cfg80211_chandef_dfs_usable(hw->wiphy, chandef)) {
+		set_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags);
+		dfs_cac_time = cfg80211_chandef_dfs_cac_time(hw->wiphy, chandef);
+
 		ath12k_dbg(ab, ATH12K_DBG_MAC,
-			   "CAC Started in chan_freq %d for vdev %d\n",
-			   arg.freq, arg.vdev_id);
+			   "CAC started dfs_cac_time %u center_freq %d center_freq1 %d for vdev %d\n",
+			   dfs_cac_time, arg.freq, arg.band_center_freq1, arg.vdev_id);
 	}
 
 	ret = ath12k_mac_set_txbf_conf(arvif);
@@ -7791,20 +9959,33 @@
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_mac_change_chanctx_arg *arg = data;
+	struct ieee80211_bss_conf *link_conf;
 	struct ath12k_link_vif *arvif;
+	unsigned long links_map;
+	u8 link_id;
 
 	lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
 
-	arvif = &ahvif->deflink;
+	links_map = ahvif->links_map;
+	for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+		if (WARN_ON(!arvif))
+			continue;
 
 	if (arvif->ar != arg->ar)
-		return;
+			continue;
 
-	if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
-		return;
+		link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+					      vif->link_conf[link_id]);
+		if (WARN_ON(!link_conf))
+			continue;
+
+		if (rcu_access_pointer(link_conf->chanctx_conf) != arg->ctx)
+			continue;
 
 	arg->n_vifs++;
 }
+}
 
 static void
 ath12k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
@@ -7812,19 +9993,31 @@
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_mac_change_chanctx_arg *arg = data;
+	struct ieee80211_bss_conf *link_conf;
 	struct ieee80211_chanctx_conf *ctx;
 	struct ath12k_link_vif *arvif;
+	unsigned long links_map;
+	u8 link_id;
 
 	lockdep_assert_wiphy(ahvif->ah->hw->wiphy);
 
-	arvif = &ahvif->deflink;
+	links_map = ahvif->links_map;
+	for_each_set_bit(link_id, &links_map, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(ahvif->ah->hw->wiphy, ahvif->link[link_id]);
+		if (WARN_ON(!arvif))
+			continue;
 
 	if (arvif->ar != arg->ar)
-		return;
+			continue;
 
-	ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
+		link_conf = wiphy_dereference(ahvif->ah->hw->wiphy,
+					      vif->link_conf[arvif->link_id]);
+		if (WARN_ON(!link_conf))
+			continue;
+
+		ctx = rcu_access_pointer(link_conf->chanctx_conf);
 	if (ctx != arg->ctx)
-		return;
+			continue;
 
 	if (WARN_ON(arg->next_vif == arg->n_vifs))
 		return;
@@ -7832,8 +10025,10 @@
 	arg->vifs[arg->next_vif].vif = vif;
 	arg->vifs[arg->next_vif].old_ctx = ctx;
 	arg->vifs[arg->next_vif].new_ctx = ctx;
+		arg->vifs[arg->next_vif].link_conf = link_conf;
 	arg->next_vif++;
 }
+}
 
 static u32 ath12k_mac_nlwidth_to_wmiwidth(enum nl80211_chan_width width)
 {
@@ -7892,10 +10087,12 @@
 			   int n_vifs)
 {
 	struct ath12k_wmi_vdev_up_params params = {};
+	struct ath12k_link_vif *arvif, *tx_arvif;
+	struct ieee80211_bss_conf *link_conf;
 	struct ath12k_base *ab = ar->ab;
-	struct ath12k_link_vif *arvif;
 	struct ieee80211_vif *vif;
 	struct ath12k_vif *ahvif;
+	u8 link_id;
 	int ret;
 	int i;
 	bool monitor_vif = false;
@@ -7905,7 +10102,10 @@
 	for (i = 0; i < n_vifs; i++) {
 		vif = vifs[i].vif;
 		ahvif = ath12k_vif_to_ahvif(vif);
-		arvif = &ahvif->deflink;
+		link_conf = vifs[i].link_conf;
+		link_id = link_conf->link_id;
+		arvif = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+					  ahvif->link[link_id]);
 
 		if (vif->type == NL80211_IFTYPE_MONITOR)
 			monitor_vif = true;
@@ -7957,14 +10157,12 @@
 		params.vdev_id = arvif->vdev_id;
 		params.aid = ahvif->aid;
 		params.bssid = arvif->bssid;
-		if (vif->mbssid_tx_vif) {
-			struct ath12k_vif *ahvif =
-				ath12k_vif_to_ahvif(vif->mbssid_tx_vif);
-			struct ath12k_link_vif *arvif = &ahvif->deflink;
 
-			params.tx_bssid = arvif->bssid;
-			params.nontx_profile_idx = vif->bss_conf.bssid_index;
-			params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator;
+		tx_arvif = ath12k_mac_get_tx_arvif(arvif, link_conf);
+		if (tx_arvif) {
+			params.tx_bssid = tx_arvif->bssid;
+			params.nontx_profile_idx = link_conf->bssid_index;
+			params.nontx_profile_cnt = 1 << link_conf->bssid_indicator;
 		}
 		ret = ath12k_wmi_vdev_up(arvif->ar, &params);
 		if (ret) {
@@ -8112,11 +10310,8 @@
 		return -ENOMEM;
 	}
 
-	if (!arvif->is_started) {
 		ar = ath12k_mac_assign_vif_to_vdev(hw, arvif, ctx);
-		if (!ar)
-			return -EINVAL;
-	} else {
+	if (!ar) {
 		ath12k_warn(arvif->ar->ab, "failed to assign chanctx for vif %pM link id %u link vif is already started",
 			    vif->addr, link_id);
 		return -EINVAL;
@@ -8203,6 +10398,9 @@
 	ar = arvif->ar;
 	ab = ar->ab;
 
+	if (unlikely(test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
+		return;
+
 	ath12k_dbg(ab, ATH12K_DBG_MAC,
 		   "mac chanctx unassign ptr %p vdev_id %i\n",
 		   ctx, arvif->vdev_id);
@@ -8230,9 +10428,6 @@
 	if (ahvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
 	    ar->num_started_vdevs == 1 && ar->monitor_vdev_created)
 		ath12k_mac_monitor_stop(ar);
-
-	ath12k_mac_remove_link_interface(hw, arvif);
-	ath12k_mac_unassign_link_vif(arvif);
 }
 
 static int
@@ -8288,7 +10483,8 @@
 /* mac80211 stores device specific RTS/Fragmentation threshold value,
  * this is set interface specific to firmware from ath12k driver
  */
-static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
+static int ath12k_mac_op_set_rts_threshold(struct ieee80211_hw *hw, u8 radio_id,
+					   u32 value)
 {
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
 	struct ath12k *ar;
@@ -8361,6 +10557,8 @@
 
 int ath12k_mac_wait_tx_complete(struct ath12k *ar)
 {
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
 	ath12k_mac_drain_tx(ar);
 	return ath12k_mac_flush(ar);
 }
@@ -8369,7 +10567,11 @@
 				u32 queues, bool drop)
 {
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
+	struct ath12k_link_vif *arvif;
+	struct ath12k_vif *ahvif;
+	unsigned long links;
 	struct ath12k *ar;
+	u8 link_id;
 	int i;
 
 	lockdep_assert_wiphy(hw->wiphy);
@@ -8384,12 +10586,18 @@
 		return;
 	}
 
-	ar = ath12k_get_ar_by_vif(hw, vif);
+	for_each_ar(ah, ar, i)
+		wiphy_work_flush(hw->wiphy, &ar->wmi_mgmt_tx_work);
 
-	if (!ar)
-		return;
+	ahvif = ath12k_vif_to_ahvif(vif);
+	links = ahvif->links_map;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(hw->wiphy, ahvif->link[link_id]);
+		if (!(arvif && arvif->ar))
+			continue;
 
-	ath12k_mac_flush(ar);
+		ath12k_mac_flush(arvif->ar);
+	}
 }
 
 static int
@@ -8421,19 +10629,36 @@
 	if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask))
 		return false;
 
+	if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask))
+		return false;
+
 	return num_rates == 1;
 }
 
+static __le16
+ath12k_mac_get_tx_mcs_map(const struct ieee80211_sta_he_cap *he_cap)
+{
+	if (he_cap->he_cap_elem.phy_cap_info[0] &
+	    IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+		return he_cap->he_mcs_nss_supp.tx_mcs_160;
+
+	return he_cap->he_mcs_nss_supp.tx_mcs_80;
+}
+
 static bool
 ath12k_mac_bitrate_mask_get_single_nss(struct ath12k *ar,
+				       struct ieee80211_vif *vif,
 				       enum nl80211_band band,
 				       const struct cfg80211_bitrate_mask *mask,
 				       int *nss)
 {
 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
+	const struct ieee80211_sta_he_cap *he_cap;
+	u16 he_mcs_map = 0;
 	u8 ht_nss_mask = 0;
 	u8 vht_nss_mask = 0;
+	u8 he_nss_mask = 0;
 	int i;
 
 	/* No need to consider legacy here. Basic rates are always present
@@ -8460,7 +10685,24 @@
 			return false;
 	}
 
-	if (ht_nss_mask != vht_nss_mask)
+	he_cap = ieee80211_get_he_iftype_cap_vif(sband, vif);
+	if (!he_cap)
+		return false;
+
+	he_mcs_map = le16_to_cpu(ath12k_mac_get_tx_mcs_map(he_cap));
+
+	for (i = 0; i < ARRAY_SIZE(mask->control[band].he_mcs); i++) {
+		if (mask->control[band].he_mcs[i] == 0)
+			continue;
+
+		if (mask->control[band].he_mcs[i] ==
+		    ath12k_mac_get_max_he_mcs_map(he_mcs_map, i))
+			he_nss_mask |= BIT(i);
+		else
+			return false;
+	}
+
+	if (ht_nss_mask != vht_nss_mask || ht_nss_mask != he_nss_mask)
 		return false;
 
 	if (ht_nss_mask == 0)
@@ -8507,18 +10749,135 @@
 	return 0;
 }
 
-static int ath12k_mac_set_fixed_rate_params(struct ath12k_link_vif *arvif,
-					    u32 rate, u8 nss, u8 sgi, u8 ldpc)
+static int
+ath12k_mac_set_fixed_rate_gi_ltf(struct ath12k_link_vif *arvif, u8 he_gi, u8 he_ltf)
+{
+	struct ath12k *ar = arvif->ar;
+	int ret;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	/* 0.8 = 0, 1.6 = 2 and 3.2 = 3. */
+	if (he_gi && he_gi != 0xFF)
+		he_gi += 1;
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_SGI, he_gi);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set HE GI:%d, error:%d\n",
+			    he_gi, ret);
+		return ret;
+	}
+	/* start from 1 */
+	if (he_ltf != 0xFF)
+		he_ltf += 1;
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_HE_LTF, he_ltf);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to set HE LTF:%d, error:%d\n",
+			    he_ltf, ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int
+ath12k_mac_set_auto_rate_gi_ltf(struct ath12k_link_vif *arvif, u16 he_gi, u8 he_ltf)
+{
+	struct ath12k *ar = arvif->ar;
+	int ret;
+	u32 he_ar_gi_ltf;
+
+	if (he_gi != 0xFF) {
+		switch (he_gi) {
+		case NL80211_RATE_INFO_HE_GI_0_8:
+			he_gi = WMI_AUTORATE_800NS_GI;
+			break;
+		case NL80211_RATE_INFO_HE_GI_1_6:
+			he_gi = WMI_AUTORATE_1600NS_GI;
+			break;
+		case NL80211_RATE_INFO_HE_GI_3_2:
+			he_gi = WMI_AUTORATE_3200NS_GI;
+			break;
+		default:
+			ath12k_warn(ar->ab, "Invalid GI\n");
+			return -EINVAL;
+		}
+	}
+
+	if (he_ltf != 0xFF) {
+		switch (he_ltf) {
+		case NL80211_RATE_INFO_HE_1XLTF:
+			he_ltf = WMI_HE_AUTORATE_LTF_1X;
+			break;
+		case NL80211_RATE_INFO_HE_2XLTF:
+			he_ltf = WMI_HE_AUTORATE_LTF_2X;
+			break;
+		case NL80211_RATE_INFO_HE_4XLTF:
+			he_ltf = WMI_HE_AUTORATE_LTF_4X;
+			break;
+		default:
+			ath12k_warn(ar->ab, "Invalid LTF\n");
+			return -EINVAL;
+		}
+	}
+
+	he_ar_gi_ltf = he_gi | he_ltf;
+
+	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
+					    WMI_VDEV_PARAM_AUTORATE_MISC_CFG,
+					    he_ar_gi_ltf);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to set HE autorate GI:%u, LTF:%u params, error:%d\n",
+			    he_gi, he_ltf, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static u32 ath12k_mac_nlgi_to_wmigi(enum nl80211_txrate_gi gi)
 {
+	switch (gi) {
+	case NL80211_TXRATE_DEFAULT_GI:
+		return WMI_GI_400_NS;
+	case NL80211_TXRATE_FORCE_LGI:
+		return WMI_GI_800_NS;
+	default:
+		return WMI_GI_400_NS;
+	}
+}
+
+static int ath12k_mac_set_rate_params(struct ath12k_link_vif *arvif,
+				      u32 rate, u8 nss, u8 sgi, u8 ldpc,
+				      u8 he_gi, u8 he_ltf, bool he_fixed_rate)
+{
+	struct ieee80211_bss_conf *link_conf;
 	struct ath12k *ar = arvif->ar;
 	u32 vdev_param;
+	u32 param_value;
 	int ret;
+	bool he_support;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
 
-	ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
-		   arvif->vdev_id, rate, nss, sgi);
+	link_conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!link_conf)
+		return -EINVAL;
 
+	he_support = link_conf->he_support;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "mac set rate params vdev %i rate 0x%02x nss 0x%02x sgi 0x%02x ldpc 0x%02x\n",
+		   arvif->vdev_id, rate, nss, sgi, ldpc);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_MAC,
+		   "he_gi 0x%02x he_ltf 0x%02x he_fixed_rate %d\n", he_gi,
+		   he_ltf, he_fixed_rate);
+
+	if (!he_support) {
 	vdev_param = WMI_VDEV_PARAM_FIXED_RATE;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    vdev_param, rate);
@@ -8527,8 +10886,10 @@
 			    rate, ret);
 		return ret;
 	}
+	}
 
 	vdev_param = WMI_VDEV_PARAM_NSS;
+
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
 					    vdev_param, nss);
 	if (ret) {
@@ -8537,23 +10898,32 @@
 		return ret;
 	}
 
-	vdev_param = WMI_VDEV_PARAM_SGI;
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    vdev_param, sgi);
+					    WMI_VDEV_PARAM_LDPC, ldpc);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
-			    sgi, ret);
+		ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
+			    ldpc, ret);
 		return ret;
 	}
 
-	vdev_param = WMI_VDEV_PARAM_LDPC;
+	if (he_support) {
+		if (he_fixed_rate)
+			ret = ath12k_mac_set_fixed_rate_gi_ltf(arvif, he_gi, he_ltf);
+		else
+			ret = ath12k_mac_set_auto_rate_gi_ltf(arvif, he_gi, he_ltf);
+		if (ret)
+			return ret;
+	} else {
+		vdev_param = WMI_VDEV_PARAM_SGI;
+		param_value = ath12k_mac_nlgi_to_wmigi(sgi);
 	ret = ath12k_wmi_vdev_set_param_cmd(ar, arvif->vdev_id,
-					    vdev_param, ldpc);
+						    vdev_param, param_value);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set ldpc param %d: %d\n",
-			    ldpc, ret);
+			ath12k_warn(ar->ab, "failed to set sgi param %d: %d\n",
+				    sgi, ret);
 		return ret;
 	}
+	}
 
 	return 0;
 }
@@ -8583,15 +10953,44 @@
 	return true;
 }
 
+static bool
+ath12k_mac_he_mcs_range_present(struct ath12k *ar,
+				enum nl80211_band band,
+				const struct cfg80211_bitrate_mask *mask)
+{
+	int i;
+	u16 he_mcs;
+
+	for (i = 0; i < NL80211_HE_NSS_MAX; i++) {
+		he_mcs = mask->control[band].he_mcs[i];
+
+		switch (he_mcs) {
+		case 0:
+		case BIT(8) - 1:
+		case BIT(10) - 1:
+		case BIT(12) - 1:
+			break;
+		default:
+			return false;
+		}
+	}
+
+	return true;
+}
+
 static void ath12k_mac_set_bitrate_mask_iter(void *data,
 					     struct ieee80211_sta *sta)
 {
 	struct ath12k_link_vif *arvif = data;
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
-	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k_link_sta *arsta;
 	struct ath12k *ar = arvif->ar;
 
-	if (arsta->arvif != arvif)
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				  ahsta->link[arvif->link_id]);
+	if (!arsta || arsta->arvif != arvif)
 		return;
 
 	spin_lock_bh(&ar->data_lock);
@@ -8606,21 +11005,81 @@
 {
 	struct ath12k_link_vif *arvif = data;
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
-	struct ath12k_link_sta *arsta = &ahsta->deflink;
+	struct ath12k_link_sta *arsta;
 	struct ath12k *ar = arvif->ar;
 	int ret;
 
-	if (arsta->arvif != arvif)
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+				  ahsta->link[arvif->link_id]);
+
+	if (!arsta || arsta->arvif != arvif)
 		return;
 
-	ret = ath12k_wmi_set_peer_param(ar, sta->addr,
+	ret = ath12k_wmi_set_peer_param(ar, arsta->addr,
 					arvif->vdev_id,
 					WMI_PEER_PARAM_FIXED_RATE,
 					WMI_FIXED_RATE_NONE);
 	if (ret)
 		ath12k_warn(ar->ab,
 			    "failed to disable peer fixed rate for STA %pM ret %d\n",
-			    sta->addr, ret);
+			    arsta->addr, ret);
+}
+
+static bool
+ath12k_mac_validate_fixed_rate_settings(struct ath12k *ar, enum nl80211_band band,
+					const struct cfg80211_bitrate_mask *mask,
+					unsigned int link_id)
+{
+	bool he_fixed_rate = false, vht_fixed_rate = false;
+	const u16 *vht_mcs_mask, *he_mcs_mask;
+	struct ieee80211_link_sta *link_sta;
+	struct ath12k_peer *peer, *tmp;
+	u8 vht_nss, he_nss;
+	int ret = true;
+
+	vht_mcs_mask = mask->control[band].vht_mcs;
+	he_mcs_mask = mask->control[band].he_mcs;
+
+	if (ath12k_mac_bitrate_mask_num_vht_rates(ar, band, mask) == 1)
+		vht_fixed_rate = true;
+
+	if (ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask) == 1)
+		he_fixed_rate = true;
+
+	if (!vht_fixed_rate && !he_fixed_rate)
+		return true;
+
+	vht_nss = ath12k_mac_max_vht_nss(vht_mcs_mask);
+	he_nss =  ath12k_mac_max_he_nss(he_mcs_mask);
+
+	rcu_read_lock();
+	spin_lock_bh(&ar->ab->base_lock);
+	list_for_each_entry_safe(peer, tmp, &ar->ab->peers, list) {
+		if (peer->sta) {
+			link_sta = rcu_dereference(peer->sta->link[link_id]);
+			if (!link_sta) {
+				ret = false;
+				goto exit;
+			}
+
+			if (vht_fixed_rate && (!link_sta->vht_cap.vht_supported ||
+					       link_sta->rx_nss < vht_nss)) {
+				ret = false;
+				goto exit;
+			}
+			if (he_fixed_rate && (!link_sta->he_cap.has_he ||
+					      link_sta->rx_nss < he_nss)) {
+				ret = false;
+				goto exit;
+			}
+		}
+	}
+exit:
+	spin_unlock_bh(&ar->ab->base_lock);
+	rcu_read_unlock();
+	return ret;
 }
 
 static int
@@ -8635,13 +11094,17 @@
 	enum nl80211_band band;
 	const u8 *ht_mcs_mask;
 	const u16 *vht_mcs_mask;
+	const u16 *he_mcs_mask;
+	u8 he_ltf = 0;
+	u8 he_gi = 0;
 	u32 rate;
-	u8 nss;
+	u8 nss, mac_nss;
 	u8 sgi;
 	u8 ldpc;
 	int single_nss;
 	int ret;
 	int num_rates;
+	bool he_fixed_rate = false;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
@@ -8656,14 +11119,18 @@
 	band = def.chan->band;
 	ht_mcs_mask = mask->control[band].ht_mcs;
 	vht_mcs_mask = mask->control[band].vht_mcs;
+	he_mcs_mask = mask->control[band].he_mcs;
 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
 
 	sgi = mask->control[band].gi;
-	if (sgi == NL80211_TXRATE_FORCE_LGI) {
+	if (sgi == NL80211_TXRATE_FORCE_SGI) {
 		ret = -EINVAL;
 		goto out;
 	}
 
+	he_gi = mask->control[band].he_gi;
+	he_ltf = mask->control[band].he_ltf;
+
 	/* mac80211 doesn't support sending a fixed HT/VHT MCS alone, rather it
 	 * requires passing at least one of used basic rates along with them.
 	 * Fixed rate setting across different preambles(legacy, HT, VHT) is
@@ -8680,18 +11147,31 @@
 				    arvif->vdev_id, ret);
 			goto out;
 		}
+
 		ieee80211_iterate_stations_mtx(hw,
 					       ath12k_mac_disable_peer_fixed_rate,
 					       arvif);
-	} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, band, mask,
+	} else if (ath12k_mac_bitrate_mask_get_single_nss(ar, vif, band, mask,
 							  &single_nss)) {
 		rate = WMI_FIXED_RATE_NONE;
 		nss = single_nss;
+		arvif->bitrate_mask = *mask;
+
+		ieee80211_iterate_stations_atomic(hw,
+						  ath12k_mac_set_bitrate_mask_iter,
+						  arvif);
 	} else {
 		rate = WMI_FIXED_RATE_NONE;
-		nss = min_t(u32, ar->num_tx_chains,
-			    max(ath12k_mac_max_ht_nss(ht_mcs_mask),
-				ath12k_mac_max_vht_nss(vht_mcs_mask)));
+
+		if (!ath12k_mac_validate_fixed_rate_settings(ar, band,
+							     mask, arvif->link_id))
+			ath12k_warn(ar->ab,
+				    "failed to update fixed rate settings due to mcs/nss incompatibility\n");
+
+		mac_nss = max3(ath12k_mac_max_ht_nss(ht_mcs_mask),
+			       ath12k_mac_max_vht_nss(vht_mcs_mask),
+			       ath12k_mac_max_he_nss(he_mcs_mask));
+		nss = min_t(u32, ar->num_tx_chains, mac_nss);
 
 		/* If multiple rates across different preambles are given
 		 * we can reconfigure this info with all peers using PEER_ASSOC
@@ -8723,9 +11203,21 @@
 			 */
 			ath12k_warn(ar->ab,
 				    "Setting more than one MCS Value in bitrate mask not supported\n");
-			return -EINVAL;
+			ret = -EINVAL;
+			goto out;
 		}
 
+		num_rates = ath12k_mac_bitrate_mask_num_he_rates(ar, band, mask);
+		if (num_rates == 1)
+			he_fixed_rate = true;
+
+		if (!ath12k_mac_he_mcs_range_present(ar, band, mask) &&
+		    num_rates > 1) {
+			ath12k_warn(ar->ab,
+				    "Setting more than one HE MCS Value in bitrate mask not supported\n");
+			ret = -EINVAL;
+			goto out;
+		}
 		ieee80211_iterate_stations_mtx(hw,
 					       ath12k_mac_disable_peer_fixed_rate,
 					       arvif);
@@ -8736,9 +11228,10 @@
 					       arvif);
 	}
 
-	ret = ath12k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
+	ret = ath12k_mac_set_rate_params(arvif, rate, nss, sgi, ldpc, he_gi,
+					 he_ltf, he_fixed_rate);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to set fixed rate params on vdev %i: %d\n",
+		ath12k_warn(ar->ab, "failed to set rate params on vdev %i: %d\n",
 			    arvif->vdev_id, ret);
 	}
 
@@ -8798,7 +11291,7 @@
 			ahvif = arvif->ahvif;
 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
 				   "reconfig cipher %d up %d vdev type %d\n",
-				   ahvif->key_cipher,
+				   arvif->key_cipher,
 				   arvif->is_up,
 				   ahvif->vdev_type);
 
@@ -8913,11 +11406,21 @@
 					 struct station_info *sinfo)
 {
 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_fw_stats_req_params params = {};
 	struct ath12k_link_sta *arsta;
+	struct ath12k *ar;
+	s8 signal;
+	bool db2dbm;
 
 	lockdep_assert_wiphy(hw->wiphy);
 
 	arsta = &ahsta->deflink;
+	ar = ath12k_get_ar_by_vif(hw, vif, arsta->link_id);
+	if (!ar)
+		return;
+
+	db2dbm = test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
+			  ar->ab->wmi_ab.svc_map);
 
 	sinfo->rx_duration = arsta->rx_duration;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
@@ -8925,9 +11428,7 @@
 	sinfo->tx_duration = arsta->tx_duration;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_DURATION);
 
-	if (!arsta->txrate.legacy && !arsta->txrate.nss)
-		return;
-
+	if (arsta->txrate.legacy || arsta->txrate.nss) {
 	if (arsta->txrate.legacy) {
 		sinfo->txrate.legacy = arsta->txrate.legacy;
 	} else {
@@ -8937,15 +11438,38 @@
 		sinfo->txrate.he_gi = arsta->txrate.he_gi;
 		sinfo->txrate.he_dcm = arsta->txrate.he_dcm;
 		sinfo->txrate.he_ru_alloc = arsta->txrate.he_ru_alloc;
+			sinfo->txrate.eht_gi = arsta->txrate.eht_gi;
+			sinfo->txrate.eht_ru_alloc = arsta->txrate.eht_ru_alloc;
 	}
 	sinfo->txrate.flags = arsta->txrate.flags;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
+	}
 
 	/* TODO: Use real NF instead of default one. */
-	sinfo->signal = arsta->rssi_comb + ATH12K_DEFAULT_NOISE_FLOOR;
+	signal = arsta->rssi_comb;
+
+	params.pdev_id = ar->pdev->pdev_id;
+	params.vdev_id = 0;
+	params.stats_id = WMI_REQUEST_VDEV_STAT;
+
+	if (!signal &&
+	    ahsta->ahvif->vdev_type == WMI_VDEV_TYPE_STA &&
+	    !(ath12k_mac_get_fw_stats(ar, &params)))
+		signal = arsta->rssi_beacon;
+
+	if (signal) {
+		sinfo->signal = db2dbm ? signal : signal + ATH12K_DEFAULT_NOISE_FLOOR;
 	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
 }
 
+	sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi);
+
+	if (!db2dbm)
+		sinfo->signal_avg += ATH12K_DEFAULT_NOISE_FLOOR;
+
+	sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG);
+}
+
 static int ath12k_mac_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
 						  struct ieee80211_vif *vif)
 {
@@ -8963,6 +11487,7 @@
 	ath12k_scan_abort(ar);
 
 	cancel_delayed_work_sync(&ar->scan.timeout);
+	wiphy_work_cancel(hw->wiphy, &ar->scan.vdev_clean_wk);
 
 	return 0;
 }
@@ -8975,7 +11500,6 @@
 {
 	struct ath12k_vif *ahvif = ath12k_vif_to_ahvif(vif);
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
-	struct ath12k_wmi_scan_req_arg arg;
 	struct ath12k_link_vif *arvif;
 	struct ath12k *ar;
 	u32 scan_time_msec;
@@ -8986,10 +11510,8 @@
 	lockdep_assert_wiphy(hw->wiphy);
 
 	ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq);
-	if (!ar) {
-		ret = -EINVAL;
-		goto exit;
-	}
+	if (!ar)
+		return -EINVAL;
 
 	/* check if any of the links of ML VIF is already started on
 	 * radio(ar) correpsondig to given scan frequency and use it,
@@ -9008,15 +11530,11 @@
 	 * always on the same band for the vif
 	 */
 	if (arvif->is_created) {
-		if (WARN_ON(!arvif->ar)) {
-			ret = -EINVAL;
-			goto exit;
-		}
+		if (WARN_ON(!arvif->ar))
+			return -EINVAL;
 
-		if (ar != arvif->ar && arvif->is_started) {
-			ret = -EBUSY;
-			goto exit;
-		}
+		if (ar != arvif->ar && arvif->is_started)
+			return -EBUSY;
 
 		if (ar != arvif->ar) {
 			ath12k_mac_remove_link_interface(hw, arvif);
@@ -9033,7 +11551,7 @@
 		if (ret) {
 			ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n",
 				    ret);
-			goto exit;
+			return ret;
 		}
 	}
 
@@ -9046,7 +11564,7 @@
 		reinit_completion(&ar->scan.on_channel);
 		ar->scan.state = ATH12K_SCAN_STARTING;
 		ar->scan.is_roc = true;
-		ar->scan.vdev_id = arvif->vdev_id;
+		ar->scan.arvif = arvif;
 		ar->scan.roc_freq = chan->center_freq;
 		ar->scan.roc_notify = true;
 		ret = 0;
@@ -9061,37 +11579,41 @@
 	spin_unlock_bh(&ar->data_lock);
 
 	if (ret)
-		goto exit;
+		return ret;
 
 	scan_time_msec = hw->wiphy->max_remain_on_channel_duration * 2;
 
-	memset(&arg, 0, sizeof(arg));
-	ath12k_wmi_start_scan_init(ar, &arg);
-	arg.num_chan = 1;
-	arg.chan_list = kcalloc(arg.num_chan, sizeof(*arg.chan_list),
+	struct ath12k_wmi_scan_req_arg *arg __free(kfree) =
+					kzalloc(sizeof(*arg), GFP_KERNEL);
+	if (!arg)
+		return -ENOMEM;
+
+	ath12k_wmi_start_scan_init(ar, arg);
+	arg->num_chan = 1;
+
+	u32 *chan_list __free(kfree) = kcalloc(arg->num_chan, sizeof(*chan_list),
 				GFP_KERNEL);
-	if (!arg.chan_list) {
-		ret = -ENOMEM;
-		goto exit;
-	}
+	if (!chan_list)
+		return -ENOMEM;
 
-	arg.vdev_id = arvif->vdev_id;
-	arg.scan_id = ATH12K_SCAN_ID;
-	arg.chan_list[0] = chan->center_freq;
-	arg.dwell_time_active = scan_time_msec;
-	arg.dwell_time_passive = scan_time_msec;
-	arg.max_scan_time = scan_time_msec;
-	arg.scan_f_passive = 1;
-	arg.burst_duration = duration;
+	arg->chan_list = chan_list;
+	arg->vdev_id = arvif->vdev_id;
+	arg->scan_id = ATH12K_SCAN_ID;
+	arg->chan_list[0] = chan->center_freq;
+	arg->dwell_time_active = scan_time_msec;
+	arg->dwell_time_passive = scan_time_msec;
+	arg->max_scan_time = scan_time_msec;
+	arg->scan_f_passive = 1;
+	arg->burst_duration = duration;
 
-	ret = ath12k_start_scan(ar, &arg);
+	ret = ath12k_start_scan(ar, arg);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to start roc scan: %d\n", ret);
 
 		spin_lock_bh(&ar->data_lock);
 		ar->scan.state = ATH12K_SCAN_IDLE;
 		spin_unlock_bh(&ar->data_lock);
-		goto free_chan_list;
+		return ret;
 	}
 
 	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
@@ -9100,20 +11622,13 @@
 		ret = ath12k_scan_stop(ar);
 		if (ret)
 			ath12k_warn(ar->ab, "failed to stop scan: %d\n", ret);
-		ret = -ETIMEDOUT;
-		goto free_chan_list;
+		return -ETIMEDOUT;
 	}
 
 	ieee80211_queue_delayed_work(hw, &ar->scan.timeout,
 				     msecs_to_jiffies(duration));
 
-	ret = 0;
-
-free_chan_list:
-	kfree(arg.chan_list);
-
-exit:
-	return ret;
+	return 0;
 }
 
 static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw,
@@ -9172,7 +11687,7 @@
 	.set_rekey_data	                = ath12k_mac_op_set_rekey_data,
 	.sta_state                      = ath12k_mac_op_sta_state,
 	.sta_set_txpwr			= ath12k_mac_op_sta_set_txpwr,
-	.link_sta_rc_update		= ath12k_mac_op_sta_rc_update,
+	.link_sta_rc_update		= ath12k_mac_op_link_sta_rc_update,
 	.conf_tx                        = ath12k_mac_op_conf_tx,
 	.set_antenna			= ath12k_mac_op_set_antenna,
 	.get_antenna			= ath12k_mac_op_get_antenna,
@@ -9183,6 +11698,7 @@
 	.assign_vif_chanctx		= ath12k_mac_op_assign_vif_chanctx,
 	.unassign_vif_chanctx		= ath12k_mac_op_unassign_vif_chanctx,
 	.switch_vif_chanctx		= ath12k_mac_op_switch_vif_chanctx,
+	.get_txpower			= ath12k_mac_op_get_txpower,
 	.set_rts_threshold		= ath12k_mac_op_set_rts_threshold,
 	.set_frag_threshold		= ath12k_mac_op_set_frag_threshold,
 	.set_bitrate_mask		= ath12k_mac_op_set_bitrate_mask,
@@ -9191,14 +11707,32 @@
 	.sta_statistics			= ath12k_mac_op_sta_statistics,
 	.remain_on_channel              = ath12k_mac_op_remain_on_channel,
 	.cancel_remain_on_channel       = ath12k_mac_op_cancel_remain_on_channel,
-
+	.change_sta_links               = ath12k_mac_op_change_sta_links,
+	.can_activate_links             = ath12k_mac_op_can_activate_links,
 #ifdef CONFIG_PM
 	.suspend			= ath12k_wow_op_suspend,
 	.resume				= ath12k_wow_op_resume,
 	.set_wakeup			= ath12k_wow_op_set_wakeup,
 #endif
+#ifdef CONFIG_ATH12K_DEBUGFS
+	.vif_add_debugfs                = ath12k_debugfs_op_vif_add,
+#endif
+	CFG80211_TESTMODE_CMD(ath12k_tm_cmd)
+#ifdef CONFIG_ATH12K_DEBUGFS
+	.link_sta_add_debugfs           = ath12k_debugfs_link_sta_op_add,
+#endif
 };
 
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+				  u32 freq_low, u32 freq_high)
+{
+	if (!(freq_low && freq_high))
+		return;
+
+	ar->freq_range.start_freq = MHZ_TO_KHZ(freq_low);
+	ar->freq_range.end_freq = MHZ_TO_KHZ(freq_high);
+}
+
 static void ath12k_mac_update_ch_list(struct ath12k *ar,
 				      struct ieee80211_supported_band *band,
 				      u32 freq_low, u32 freq_high)
@@ -9213,9 +11747,6 @@
 		    band->channels[i].center_freq > freq_high)
 			band->channels[i].flags |= IEEE80211_CHAN_DISABLED;
 	}
-
-	ar->freq_low = freq_low;
-	ar->freq_high = freq_high;
 }
 
 static u32 ath12k_get_phy_id(struct ath12k *ar, u32 band)
@@ -9223,10 +11754,10 @@
 	struct ath12k_pdev *pdev = ar->pdev;
 	struct ath12k_pdev_cap *pdev_cap = &pdev->cap;
 
-	if (band == WMI_HOST_WLAN_2G_CAP)
+	if (band == WMI_HOST_WLAN_2GHZ_CAP)
 		return pdev_cap->band[NL80211_BAND_2GHZ].phy_id;
 
-	if (band == WMI_HOST_WLAN_5G_CAP)
+	if (band == WMI_HOST_WLAN_5GHZ_CAP)
 		return pdev_cap->band[NL80211_BAND_5GHZ].phy_id;
 
 	ath12k_warn(ar->ab, "unsupported phy cap:%d\n", band);
@@ -9234,15 +11765,47 @@
 	return 0;
 }
 
+static int ath12k_mac_update_band(struct ath12k *ar,
+				  struct ieee80211_supported_band *orig_band,
+				  struct ieee80211_supported_band *new_band)
+{
+	struct ath12k_base *ab = ar->ab;
+	int i;
+
+	if (!orig_band || !new_band)
+		return -EINVAL;
+
+	if (orig_band->band != new_band->band)
+		return -EINVAL;
+
+	if (WARN_ON(!ab->ag->mlo_capable))
+		return -EOPNOTSUPP;
+
+	for (i = 0; i < new_band->n_channels; i++) {
+		if (new_band->channels[i].flags & IEEE80211_CHAN_DISABLED)
+			continue;
+		/* An enabled channel in new_band should not be already enabled
+		 * in the orig_band
+		 */
+		if (WARN_ON(!(orig_band->channels[i].flags &
+			      IEEE80211_CHAN_DISABLED)))
+			return -ENOTRECOVERABLE;
+		orig_band->channels[i].flags &= ~IEEE80211_CHAN_DISABLED;
+	}
+	return 0;
+}
+
 static int ath12k_mac_setup_channels_rates(struct ath12k *ar,
 					   u32 supported_bands,
 					   struct ieee80211_supported_band *bands[])
 {
+	struct ath12k_base *ab = ar->ab;
 	struct ieee80211_supported_band *band;
 	struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
 	struct ath12k_hw *ah = ar->ah;
 	void *channels;
-	u32 phy_id;
+	u32 phy_id, freq_low, freq_high;
+	int ret;
 
 	BUILD_BUG_ON((ARRAY_SIZE(ath12k_2ghz_channels) +
 		      ARRAY_SIZE(ath12k_5ghz_channels) +
@@ -9251,7 +11814,7 @@
 
 	reg_cap = &ar->ab->hal_reg_cap[ar->pdev_idx];
 
-	if (supported_bands & WMI_HOST_WLAN_2G_CAP) {
+	if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		channels = kmemdup(ath12k_2ghz_channels,
 				   sizeof(ath12k_2ghz_channels),
 				   GFP_KERNEL);
@@ -9264,19 +11827,38 @@
 		band->channels = channels;
 		band->n_bitrates = ath12k_g_rates_size;
 		band->bitrates = ath12k_g_rates;
-		bands[NL80211_BAND_2GHZ] = band;
 
-		if (ar->ab->hw_params->single_pdev_only) {
-			phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2G_CAP);
+		if (ab->hw_params->single_pdev_only) {
+			phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_2GHZ_CAP);
 			reg_cap = &ar->ab->hal_reg_cap[phy_id];
 		}
+
+		freq_low = max(reg_cap->low_2ghz_chan,
+			       ab->reg_freq_2ghz.start_freq);
+		freq_high = min(reg_cap->high_2ghz_chan,
+				ab->reg_freq_2ghz.end_freq);
+
 		ath12k_mac_update_ch_list(ar, band,
 					  reg_cap->low_2ghz_chan,
 					  reg_cap->high_2ghz_chan);
+		ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+		ar->num_channels = ath12k_reg_get_num_chans_in_band(ar, band);
+		if (!bands[NL80211_BAND_2GHZ]) {
+			bands[NL80211_BAND_2GHZ] = band;
+		} else {
+			/* Split mac in same band under same wiphy during MLO */
+			ret = ath12k_mac_update_band(ar,
+						     bands[NL80211_BAND_2GHZ],
+						     band);
+			if (ret)
+				return ret;
+			ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 2 GHz split mac during MLO\n",
+				   ar->pdev->pdev_id);
+		}
 	}
 
-	if (supported_bands & WMI_HOST_WLAN_5G_CAP) {
-		if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6G_FREQ) {
+	if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP) {
+		if (reg_cap->high_5ghz_chan >= ATH12K_MIN_6GHZ_FREQ) {
 			channels = kmemdup(ath12k_6ghz_channels,
 					   sizeof(ath12k_6ghz_channels), GFP_KERNEL);
 			if (!channels) {
@@ -9291,14 +11873,34 @@
 			band->channels = channels;
 			band->n_bitrates = ath12k_a_rates_size;
 			band->bitrates = ath12k_a_rates;
-			bands[NL80211_BAND_6GHZ] = band;
+
+			freq_low = max(reg_cap->low_5ghz_chan,
+				       ab->reg_freq_6ghz.start_freq);
+			freq_high = min(reg_cap->high_5ghz_chan,
+					ab->reg_freq_6ghz.end_freq);
+
 			ath12k_mac_update_ch_list(ar, band,
 						  reg_cap->low_5ghz_chan,
 						  reg_cap->high_5ghz_chan);
+
+			ath12k_mac_update_freq_range(ar, freq_low, freq_high);
 			ah->use_6ghz_regd = true;
+			ar->num_channels = ath12k_reg_get_num_chans_in_band(ar, band);
+			if (!bands[NL80211_BAND_6GHZ]) {
+				bands[NL80211_BAND_6GHZ] = band;
+			} else {
+				/* Split mac in same band under same wiphy during MLO */
+				ret = ath12k_mac_update_band(ar,
+							     bands[NL80211_BAND_6GHZ],
+							     band);
+				if (ret)
+					return ret;
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 6 GHz split mac during MLO\n",
+					   ar->pdev->pdev_id);
+			}
 		}
 
-		if (reg_cap->low_5ghz_chan < ATH12K_MIN_6G_FREQ) {
+		if (reg_cap->low_5ghz_chan < ATH12K_MIN_6GHZ_FREQ) {
 			channels = kmemdup(ath12k_5ghz_channels,
 					   sizeof(ath12k_5ghz_channels),
 					   GFP_KERNEL);
@@ -9314,16 +11916,34 @@
 			band->channels = channels;
 			band->n_bitrates = ath12k_a_rates_size;
 			band->bitrates = ath12k_a_rates;
-			bands[NL80211_BAND_5GHZ] = band;
 
 			if (ar->ab->hw_params->single_pdev_only) {
-				phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5G_CAP);
+				phy_id = ath12k_get_phy_id(ar, WMI_HOST_WLAN_5GHZ_CAP);
 				reg_cap = &ar->ab->hal_reg_cap[phy_id];
 			}
 
+			freq_low = max(reg_cap->low_5ghz_chan,
+				       ab->reg_freq_5ghz.start_freq);
+			freq_high = min(reg_cap->high_5ghz_chan,
+					ab->reg_freq_5ghz.end_freq);
 			ath12k_mac_update_ch_list(ar, band,
 						  reg_cap->low_5ghz_chan,
 						  reg_cap->high_5ghz_chan);
+
+			ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+			ar->num_channels = ath12k_reg_get_num_chans_in_band(ar, band);
+			if (!bands[NL80211_BAND_5GHZ]) {
+				bands[NL80211_BAND_5GHZ] = band;
+			} else {
+				/* Split mac in same band under same wiphy during MLO */
+				ret = ath12k_mac_update_band(ar,
+							     bands[NL80211_BAND_5GHZ],
+							     band);
+				if (ret)
+					return ret;
+				ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u identified as 5 GHz split mac during MLO\n",
+					   ar->pdev->pdev_id);
+			}
 		}
 	}
 
@@ -9347,14 +11967,20 @@
 {
 	struct ath12k *ar;
 	int i;
-	u16 interface_modes, mode;
-	bool is_enable = true;
+	u16 interface_modes, mode = 0;
+	bool is_enable = false;
 
+	if (type == NL80211_IFTYPE_MESH_POINT) {
+		if (IS_ENABLED(CONFIG_MAC80211_MESH))
+			mode = BIT(type);
+	} else {
 	mode = BIT(type);
+	}
+
 	for_each_ar(ah, ar, i) {
 		interface_modes = ar->ab->hw_params->interface_modes;
-		if (!(interface_modes & mode)) {
-			is_enable = false;
+		if (interface_modes & mode) {
+			is_enable = true;
 			break;
 		}
 	}
@@ -9362,23 +11988,20 @@
 	return is_enable;
 }
 
-static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+static int
+ath12k_mac_setup_radio_iface_comb(struct ath12k *ar,
+				  struct ieee80211_iface_combination *comb)
 {
-	struct wiphy *wiphy = ah->hw->wiphy;
-	struct ieee80211_iface_combination *combinations;
+	u16 interface_modes = ar->ab->hw_params->interface_modes;
 	struct ieee80211_iface_limit *limits;
 	int n_limits, max_interfaces;
 	bool ap, mesh, p2p;
 
-	ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
-	p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+	ap = interface_modes & BIT(NL80211_IFTYPE_AP);
+	p2p = interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE);
 
 	mesh = IS_ENABLED(CONFIG_MAC80211_MESH) &&
-		ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
-
-	combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
-	if (!combinations)
-		return -ENOMEM;
+	       (interface_modes & BIT(NL80211_IFTYPE_MESH_POINT));
 
 	if ((ap || mesh) && !p2p) {
 		n_limits = 2;
@@ -9395,10 +12018,8 @@
 	}
 
 	limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
-	if (!limits) {
-		kfree(combinations);
+	if (!limits)
 		return -ENOMEM;
-	}
 
 	limits[0].max = 1;
 	limits[0].types |= BIT(NL80211_IFTYPE_STATION);
@@ -9419,21 +12040,177 @@
 		limits[2].types |= BIT(NL80211_IFTYPE_P2P_DEVICE);
 	}
 
-	combinations[0].limits = limits;
-	combinations[0].n_limits = n_limits;
-	combinations[0].max_interfaces = max_interfaces;
-	combinations[0].num_different_channels = 1;
-	combinations[0].beacon_int_infra_match = true;
-	combinations[0].beacon_int_min_gcd = 100;
-	combinations[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+	comb[0].limits = limits;
+	comb[0].n_limits = n_limits;
+	comb[0].max_interfaces = max_interfaces;
+	comb[0].num_different_channels = 1;
+	comb[0].beacon_int_infra_match = true;
+	comb[0].beacon_int_min_gcd = 100;
+	comb[0].radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
 						BIT(NL80211_CHAN_WIDTH_20) |
 						BIT(NL80211_CHAN_WIDTH_40) |
-						BIT(NL80211_CHAN_WIDTH_80);
+					BIT(NL80211_CHAN_WIDTH_80) |
+					BIT(NL80211_CHAN_WIDTH_160);
+
+	return 0;
+}
 
+static int
+ath12k_mac_setup_global_iface_comb(struct ath12k_hw *ah,
+				   struct wiphy_radio *radio,
+				   u8 n_radio,
+				   struct ieee80211_iface_combination *comb)
+{
+	const struct ieee80211_iface_combination *iter_comb;
+	struct ieee80211_iface_limit *limits;
+	int i, j, n_limits;
+	bool ap, mesh, p2p;
+
+	if (!n_radio)
+		return 0;
+
+	ap = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_AP);
+	p2p = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_P2P_DEVICE);
+	mesh = ath12k_mac_is_iface_mode_enable(ah, NL80211_IFTYPE_MESH_POINT);
+
+	if ((ap || mesh) && !p2p)
+		n_limits = 2;
+	else if (p2p)
+		n_limits = 3;
+	else
+		n_limits = 1;
+
+	limits = kcalloc(n_limits, sizeof(*limits), GFP_KERNEL);
+	if (!limits)
+		return -ENOMEM;
+
+	for (i = 0; i < n_radio; i++) {
+		iter_comb = radio[i].iface_combinations;
+		for (j = 0; j < iter_comb->n_limits && j < n_limits; j++) {
+			limits[j].types |= iter_comb->limits[j].types;
+			limits[j].max += iter_comb->limits[j].max;
+		}
+
+		comb->max_interfaces += iter_comb->max_interfaces;
+		comb->num_different_channels += iter_comb->num_different_channels;
+		comb->radar_detect_widths |= iter_comb->radar_detect_widths;
+	}
+
+	comb->limits = limits;
+	comb->n_limits = n_limits;
+	comb->beacon_int_infra_match = true;
+	comb->beacon_int_min_gcd = 100;
+
+	return 0;
+}
+
+static
+void ath12k_mac_cleanup_iface_comb(const struct ieee80211_iface_combination *iface_comb)
+{
+	kfree(iface_comb[0].limits);
+	kfree(iface_comb);
+}
+
+static void ath12k_mac_cleanup_iface_combinations(struct ath12k_hw *ah)
+{
+	struct wiphy *wiphy = ah->hw->wiphy;
+	const struct wiphy_radio *radio;
+	int i;
+
+	if (wiphy->n_radio > 0) {
+		radio = wiphy->radio;
+		for (i = 0; i < wiphy->n_radio; i++)
+			ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+		kfree(wiphy->radio);
+	}
+
+	ath12k_mac_cleanup_iface_comb(wiphy->iface_combinations);
+}
+
+static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah)
+{
+	struct ieee80211_iface_combination *combinations, *comb;
+	struct wiphy *wiphy = ah->hw->wiphy;
+	struct wiphy_radio *radio;
+	struct ath12k *ar;
+	int i, ret;
+
+	combinations = kzalloc(sizeof(*combinations), GFP_KERNEL);
+	if (!combinations)
+		return -ENOMEM;
+
+	if (ah->num_radio == 1) {
+		ret = ath12k_mac_setup_radio_iface_comb(&ah->radio[0],
+							combinations);
+		if (ret) {
+			ath12k_hw_warn(ah, "failed to setup radio interface combinations for one radio: %d",
+				       ret);
+			goto err_free_combinations;
+		}
+
+		goto out;
+	}
+
+	/* there are multiple radios */
+
+	radio = kcalloc(ah->num_radio, sizeof(*radio), GFP_KERNEL);
+	if (!radio) {
+		ret = -ENOMEM;
+		goto err_free_combinations;
+	}
+
+	for_each_ar(ah, ar, i) {
+		comb = kzalloc(sizeof(*comb), GFP_KERNEL);
+		if (!comb) {
+			ret = -ENOMEM;
+			goto err_free_radios;
+		}
+
+		ret = ath12k_mac_setup_radio_iface_comb(ar, comb);
+		if (ret) {
+			ath12k_hw_warn(ah, "failed to setup radio interface combinations for radio %d: %d",
+				       i, ret);
+			kfree(comb);
+			goto err_free_radios;
+		}
+
+		radio[i].freq_range = &ar->freq_range;
+		radio[i].n_freq_range = 1;
+
+		radio[i].iface_combinations = comb;
+		radio[i].n_iface_combinations = 1;
+	}
+
+	ret = ath12k_mac_setup_global_iface_comb(ah, radio, ah->num_radio, combinations);
+	if (ret) {
+		ath12k_hw_warn(ah, "failed to setup global interface combinations: %d",
+			       ret);
+		goto err_free_all_radios;
+	}
+
+	wiphy->radio = radio;
+	wiphy->n_radio = ah->num_radio;
+
+out:
 	wiphy->iface_combinations = combinations;
 	wiphy->n_iface_combinations = 1;
 
 	return 0;
+
+err_free_all_radios:
+	i = ah->num_radio;
+
+err_free_radios:
+	while (i--)
+		ath12k_mac_cleanup_iface_comb(radio[i].iface_combinations);
+
+	kfree(radio);
+
+err_free_combinations:
+	kfree(combinations);
+
+	return ret;
 }
 
 static const u8 ath12k_if_types_ext_capa[] = {
@@ -9457,7 +12234,7 @@
 	[10] = WLAN_EXT_CAPA11_EMA_SUPPORT,
 };
 
-static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
+static struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = {
 	{
 		.extended_capabilities = ath12k_if_types_ext_capa,
 		.extended_capabilities_mask = ath12k_if_types_ext_capa,
@@ -9474,6 +12251,8 @@
 		.extended_capabilities_mask = ath12k_if_types_ext_capa_ap,
 		.extended_capabilities_len =
 				sizeof(ath12k_if_types_ext_capa_ap),
+		.eml_capabilities = 0,
+		.mld_capa_and_ops = 0,
 	},
 };
 
@@ -9490,12 +12269,12 @@
 static void ath12k_mac_hw_unregister(struct ath12k_hw *ah)
 {
 	struct ieee80211_hw *hw = ah->hw;
-	struct wiphy *wiphy = hw->wiphy;
 	struct ath12k *ar;
 	int i;
 
 	for_each_ar(ah, ar, i) {
 		cancel_work_sync(&ar->regd_update_work);
+		ath12k_fw_stats_free(&ar->fw_stats);
 		ath12k_debugfs_unregister(ar);
 	}
 
@@ -9504,9 +12283,9 @@
 	for_each_ar(ah, ar, i)
 		ath12k_mac_cleanup_unregister(ar);
 
-	kfree(wiphy->iface_combinations[0].limits);
-	kfree(wiphy->iface_combinations);
+	ath12k_mac_cleanup_iface_combinations(ah);
 
+	kfree(hw->wiphy->addresses);
 	SET_IEEE80211_DEV(hw, NULL);
 }
 
@@ -9562,9 +12341,16 @@
 	bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false;
 	u8 *mac_addr = NULL;
 	u8 mbssid_max_interfaces = 0;
+	struct mac_address *addresses;
+	u16 n_addresses;
 
 	wiphy->max_ap_assoc_sta = 0;
 
+	n_addresses = ah->num_radio;
+	addresses = kcalloc(n_addresses, sizeof(*addresses), GFP_KERNEL);
+	if (!addresses)
+		return -ENOMEM;
+
 	for_each_ar(ah, ar, i) {
 		u32 ht_cap_info = 0;
 
@@ -9580,7 +12366,10 @@
 		if (ret)
 			goto err_cleanup_unregister;
 
+		/* 6 GHz does not support HT Cap, hence do not consider it */
+		if (!ar->supports_6ghz)
 		ht_cap &= ht_cap_info;
+
 		wiphy->max_ap_assoc_sta += ar->max_num_stations;
 
 		/* Advertise the max antenna support of all radios, driver can handle
@@ -9601,6 +12390,8 @@
 		if (!ar->ab->hw_params->supports_monitor)
 			is_monitor_disable = true;
 
+		memcpy(addresses[i].addr, ar->mac_addr, ETH_ALEN);
+
 		if (i == 0)
 			mac_addr = ar->mac_addr;
 		else
@@ -9643,8 +12434,16 @@
 	ieee80211_hw_set(hw, QUEUE_CONTROL);
 	ieee80211_hw_set(hw, SUPPORTS_TX_FRAG);
 	ieee80211_hw_set(hw, REPORTS_LOW_ACK);
+	ieee80211_hw_set(hw, WANT_MONITOR_VIF);
+	if (ath12k_frame_mode == ATH12K_HW_TXRX_ETHERNET) {
+		ieee80211_hw_set(hw, SUPPORTS_TX_ENCAP_OFFLOAD);
+		ieee80211_hw_set(hw, SUPPORTS_RX_DECAP_OFFLOAD);
+	}
+
+	if (cap->nss_ratio_enabled)
+		ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW);
 
-	if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) {
+	if ((ht_cap & WMI_HT_CAP_ENABLED) || is_6ghz) {
 		ieee80211_hw_set(hw, AMPDU_AGGREGATION);
 		ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW);
 		ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER);
@@ -9660,7 +12459,7 @@
 	 * handle it when the ht capability different for each band.
 	 */
 	if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS ||
-	    (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
+	    (is_6ghz && ab->hw_params->supports_dynamic_smps_6ghz))
 		wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
 
 	wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
@@ -9682,8 +12481,21 @@
 	 */
 	wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT;
 
+	/* Copy over MLO related capabilities received from
+	 * WMI_SERVICE_READY_EXT2_EVENT if single_chip_mlo_supp is set.
+	 */
+	if (ab->ag->mlo_capable) {
+		ath12k_iftypes_ext_capa[2].eml_capabilities = cap->eml_cap;
+		ath12k_iftypes_ext_capa[2].mld_capa_and_ops = cap->mld_cap;
+		wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO;
+
+		ieee80211_hw_set(hw, MLO_MCAST_MULTI_LINK_TX);
+	}
+
 	hw->queues = ATH12K_HW_MAX_QUEUES;
 	wiphy->tx_queue_len = ATH12K_QUEUE_LEN;
+	wiphy->addresses = addresses;
+	wiphy->n_addresses = n_addresses;
 	hw->offchannel_tx_hw_queue = ATH12K_HW_MAX_QUEUES - 1;
 	hw->max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF_EHT;
 
@@ -9693,6 +12505,7 @@
 
 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_STA_TX_PWR);
+	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
 
 	wiphy->cipher_suites = cipher_suites;
 	wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -9711,6 +12524,13 @@
 	}
 
 	wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_PUNCT);
+	if (test_bit(WMI_TLV_SERVICE_BEACON_PROTECTION_SUPPORT, ab->wmi_ab.svc_map))
+		wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION);
+	if (ah->num_radio == 1) {
+		/* this attributes at phy level only makes sense when
+		 * not grouping devices */
+		wiphy->dev_port = ar->pdev_idx;
+	}
 
 	ath12k_reg_init(hw);
 
@@ -9720,6 +12540,8 @@
 		ieee80211_hw_set(hw, SUPPORT_FAST_XMIT);
 	}
 
+	ieee80211_hw_set(hw, ALLOW_DRV_TX_FOR_DATA);
+
 	if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) {
 		wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
 		wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
@@ -9735,13 +12557,19 @@
 	ret = ath12k_wow_init(ar);
 	if (ret) {
 		ath12k_warn(ar->ab, "failed to init wow: %d\n", ret);
-		goto err_free_if_combs;
+		goto err_cleanup_if_combs;
 	}
 
+	snprintf(hw->wiphy->fw_version,
+		 sizeof(hw->wiphy->fw_version),
+		 "%s, 0x%x",
+		 ab->hw_params->name,
+		 ab->qmi.target.fw_version);
+
 	ret = ieee80211_register_hw(hw);
 	if (ret) {
 		ath12k_err(ab, "ieee80211 registration failed: %d\n", ret);
-		goto err_free_if_combs;
+		goto err_cleanup_if_combs;
 	}
 
 	if (is_monitor_disable)
@@ -9760,7 +12588,11 @@
 			goto err_unregister_hw;
 		}
 
+		ath12k_fw_stats_init(ar);
 		ath12k_debugfs_register(ar);
+		ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac pdev %u freq limits %u->%u MHz, no. of channels %u\n",
+			   ar->pdev->pdev_id, ar->freq_range.start_freq,
+			   ar->freq_range.end_freq, ar->num_channels);
 	}
 
 	return 0;
@@ -9771,9 +12603,8 @@
 
 	ieee80211_unregister_hw(hw);
 
-err_free_if_combs:
-	kfree(wiphy->iface_combinations[0].limits);
-	kfree(wiphy->iface_combinations);
+err_cleanup_if_combs:
+	ath12k_mac_cleanup_iface_combinations(ah);
 
 err_complete_cleanup_unregister:
 	i = ah->num_radio;
@@ -9784,6 +12615,7 @@
 		ath12k_mac_cleanup_unregister(ar);
 	}
 
+	kfree(addresses);
 	SET_IEEE80211_DEV(hw, NULL);
 
 	return ret;
@@ -9807,6 +12639,7 @@
 	ar->cfg_rx_chainmask = pdev->cap.rx_chain_mask;
 	ar->num_tx_chains = hweight32(pdev->cap.tx_chain_mask);
 	ar->num_rx_chains = hweight32(pdev->cap.rx_chain_mask);
+	ar->scan.arvif = NULL;
 
 	spin_lock_init(&ar->data_lock);
 	INIT_LIST_HEAD(&ar->arvifs);
@@ -9821,29 +12654,180 @@
 	init_completion(&ar->scan.started);
 	init_completion(&ar->scan.completed);
 	init_completion(&ar->scan.on_channel);
+	init_completion(&ar->mlo_setup_done);
+	init_completion(&ar->thermal.wmi_sync);
 
 	INIT_DELAYED_WORK(&ar->scan.timeout, ath12k_scan_timeout_work);
+	wiphy_work_init(&ar->scan.vdev_clean_wk, ath12k_scan_vdev_clean_work);
 	INIT_WORK(&ar->regd_update_work, ath12k_regd_update_work);
 
-	INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
+	wiphy_work_init(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work);
 	skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
+
+	ar->monitor_vdev_id = -1;
+	ar->monitor_conf_enabled = false;
+	ar->monitor_vdev_created = false;
+	ar->monitor_started = false;
+}
+
+static int __ath12k_mac_mlo_setup(struct ath12k *ar)
+{
+	u8 num_link = 0, partner_link_id[ATH12K_GROUP_MAX_RADIO] = {};
+	struct ath12k_base *partner_ab, *ab = ar->ab;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct wmi_mlo_setup_arg mlo = {};
+	struct ath12k_pdev *pdev;
+	unsigned long time_left;
+	int i, j, ret;
+
+	lockdep_assert_held(&ag->mutex);
+
+	reinit_completion(&ar->mlo_setup_done);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		partner_ab = ag->ab[i];
+
+		for (j = 0; j < partner_ab->num_radios; j++) {
+			pdev = &partner_ab->pdevs[j];
+
+			/* Avoid the self link */
+			if (ar == pdev->ar)
+				continue;
+
+			partner_link_id[num_link] = pdev->hw_link_id;
+			num_link++;
+
+			ath12k_dbg(ab, ATH12K_DBG_MAC, "device %d pdev %d hw_link_id %d num_link %d\n",
+				   i, j, pdev->hw_link_id, num_link);
+		}
+	}
+
+	if (num_link == 0)
+		return 0;
+
+	mlo.group_id = cpu_to_le32(ag->id);
+	mlo.partner_link_id = partner_link_id;
+	mlo.num_partner_links = num_link;
+	ar->mlo_setup_status = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "group id %d num_link %d\n", ag->id, num_link);
+
+	ret = ath12k_wmi_mlo_setup(ar, &mlo);
+	if (ret) {
+		ath12k_err(ab, "failed to send  setup MLO WMI command for pdev %d: %d\n",
+			   ar->pdev_idx, ret);
+		return ret;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->mlo_setup_done,
+						WMI_MLO_CMD_TIMEOUT_HZ);
+
+	if (!time_left || ar->mlo_setup_status)
+		return ar->mlo_setup_status ? : -ETIMEDOUT;
+
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo setup done for pdev %d\n", ar->pdev_idx);
+
+	return 0;
+}
+
+static int __ath12k_mac_mlo_teardown(struct ath12k *ar)
+{
+	struct ath12k_base *ab = ar->ab;
+	int ret;
+	u8 num_link;
+
+	if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags))
+		return 0;
+
+	num_link = ath12k_get_num_partner_link(ar);
+
+	if (num_link == 0)
+		return 0;
+
+	ret = ath12k_wmi_mlo_teardown(ar);
+	if (ret) {
+		ath12k_warn(ab, "failed to send MLO teardown WMI command for pdev %d: %d\n",
+			    ar->pdev_idx, ret);
+		return ret;
 }
 
-int ath12k_mac_register(struct ath12k_base *ab)
+	ath12k_dbg(ab, ATH12K_DBG_MAC, "mlo teardown for pdev %d\n", ar->pdev_idx);
+
+	return 0;
+}
+
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag)
 {
 	struct ath12k_hw *ah;
-	int i;
+	struct ath12k *ar;
 	int ret;
+	int i, j;
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		for_each_ar(ah, ar, j) {
+			ar = &ah->radio[j];
+			ret = __ath12k_mac_mlo_setup(ar);
+			if (ret) {
+				ath12k_err(ar->ab, "failed to setup MLO: %d\n", ret);
+				goto err_setup;
+			}
+		}
+	}
 
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
 		return 0;
 
-	/* Initialize channel counters frequency value in hertz */
-	ab->cc_freq_hz = 320000;
-	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+err_setup:
+	for (i = i - 1; i >= 0; i--) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		for (j = j - 1; j >= 0; j--) {
+			ar = &ah->radio[j];
+			if (!ar)
+				continue;
+
+			__ath12k_mac_mlo_teardown(ar);
+		}
+	}
+
+	return ret;
+}
+
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	struct ath12k *ar;
+	int ret, i, j;
 
-	for (i = 0; i < ab->num_hw; i++) {
-		ah = ab->ah[i];
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ag->ah[i];
+		if (!ah)
+			continue;
+
+		for_each_ar(ah, ar, j) {
+			ar = &ah->radio[j];
+			ret = __ath12k_mac_mlo_teardown(ar);
+			if (ret) {
+				ath12k_err(ar->ab, "failed to teardown MLO: %d\n", ret);
+				break;
+			}
+		}
+	}
+}
+
+int ath12k_mac_register(struct ath12k_hw_group *ag)
+{
+	struct ath12k_hw *ah;
+	int i;
+	int ret;
+
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ath12k_ag_to_ah(ag, i);
 
 		ret = ath12k_mac_hw_register(ah);
 		if (ret)
@@ -9854,7 +12838,7 @@
 
 err:
 	for (i = i - 1; i >= 0; i--) {
-		ah = ab->ah[i];
+		ah = ath12k_ag_to_ah(ag, i);
 		if (!ah)
 			continue;
 
@@ -9864,13 +12848,13 @@
 	return ret;
 }
 
-void ath12k_mac_unregister(struct ath12k_base *ab)
+void ath12k_mac_unregister(struct ath12k_hw_group *ag)
 {
 	struct ath12k_hw *ah;
 	int i;
 
-	for (i = ab->num_hw - 1; i >= 0; i--) {
-		ah = ab->ah[i];
+	for (i = ag->num_hw - 1; i >= 0; i--) {
+		ah = ath12k_ag_to_ah(ag, i);
 		if (!ah)
 			continue;
 
@@ -9883,12 +12867,13 @@
 	ieee80211_free_hw(ah->hw);
 }
 
-static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab,
+static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_hw_group *ag,
 						struct ath12k_pdev_map *pdev_map,
 						u8 num_pdev_map)
 {
 	struct ieee80211_hw *hw;
 	struct ath12k *ar;
+	struct ath12k_base *ab;
 	struct ath12k_pdev *pdev;
 	struct ath12k_hw *ah;
 	int i;
@@ -9904,6 +12889,7 @@
 	ah->num_radio = num_pdev_map;
 
 	mutex_init(&ah->hw_mutex);
+	INIT_LIST_HEAD(&ah->ml_peers);
 
 	for (i = 0; i < num_pdev_map; i++) {
 		ab = pdev_map[i].ab;
@@ -9918,54 +12904,116 @@
 		ar->pdev_idx = pdev_idx;
 		pdev->ar = ar;
 
+		ag->hw_links[ar->hw_link_id].device_id = ab->device_id;
+		ag->hw_links[ar->hw_link_id].pdev_idx = pdev_idx;
+
 		ath12k_mac_setup(ar);
+		ath12k_dp_pdev_pre_alloc(ar);
 	}
 
 	return ah;
 }
 
-void ath12k_mac_destroy(struct ath12k_base *ab)
+void ath12k_mac_destroy(struct ath12k_hw_group *ag)
 {
 	struct ath12k_pdev *pdev;
-	int i;
+	struct ath12k_base *ab = ag->ab[0];
+	int i, j;
+	struct ath12k_hw *ah;
 
-	for (i = 0; i < ab->num_radios; i++) {
-		pdev = &ab->pdevs[i];
-		if (!pdev->ar)
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
 			continue;
 
+		for (j = 0; j < ab->num_radios; j++) {
+			pdev = &ab->pdevs[j];
+			if (!pdev->ar)
+				continue;
 		pdev->ar = NULL;
 	}
+	}
 
-	for (i = 0; i < ab->num_hw; i++) {
-		if (!ab->ah[i])
+	for (i = 0; i < ag->num_hw; i++) {
+		ah = ath12k_ag_to_ah(ag, i);
+		if (!ah)
 			continue;
 
-		ath12k_mac_hw_destroy(ab->ah[i]);
-		ab->ah[i] = NULL;
+		ath12k_mac_hw_destroy(ah);
+		ath12k_ag_set_ah(ag, i, NULL);
 	}
 }
 
-int ath12k_mac_allocate(struct ath12k_base *ab)
+static void ath12k_mac_set_device_defaults(struct ath12k_base *ab)
 {
+	/* Initialize channel counters frequency value in hertz */
+	ab->cc_freq_hz = 320000;
+	ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS)) - 1;
+}
+
+int ath12k_mac_allocate(struct ath12k_hw_group *ag)
+{
+	struct ath12k_pdev_map pdev_map[ATH12K_GROUP_MAX_RADIO];
+	int mac_id, device_id, total_radio, num_hw;
+	struct ath12k_base *ab;
 	struct ath12k_hw *ah;
-	struct ath12k_pdev_map pdev_map[MAX_RADIOS];
 	int ret, i, j;
 	u8 radio_per_hw;
 
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))
-		return 0;
+	total_radio = 0;
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
 
-	ab->num_hw = ab->num_radios;
+		ath12k_mac_set_device_defaults(ab);
+		total_radio += ab->num_radios;
+	}
+
+	if (!total_radio)
+		return -EINVAL;
+
+	if (WARN_ON(total_radio > ATH12K_GROUP_MAX_RADIO))
+		return -ENOSPC;
+
+	/* All pdev get combined and register as single wiphy based on
+	 * hardware group which participate in multi-link operation else
+	 * each pdev get register separately.
+	 */
+	if (ag->mlo_capable)
+		radio_per_hw = total_radio;
+	else
 	radio_per_hw = 1;
 
-	for (i = 0; i < ab->num_hw; i++) {
+	num_hw = total_radio / radio_per_hw;
+
+	ag->num_hw = 0;
+	device_id = 0;
+	mac_id = 0;
+	for (i = 0; i < num_hw; i++) {
 		for (j = 0; j < radio_per_hw; j++) {
+			if (device_id >= ag->num_devices || !ag->ab[device_id]) {
+				ret = -ENOSPC;
+				goto err;
+			}
+
+			ab = ag->ab[device_id];
 			pdev_map[j].ab = ab;
-			pdev_map[j].pdev_idx = (i * radio_per_hw) + j;
+			pdev_map[j].pdev_idx = mac_id;
+			mac_id++;
+
+			/* If mac_id falls beyond the current device MACs then
+			 * move to next device
+			 */
+			if (mac_id >= ab->num_radios) {
+				mac_id = 0;
+				device_id++;
+			}
 		}
 
-		ah = ath12k_mac_hw_allocate(ab, pdev_map, radio_per_hw);
+		ab = pdev_map->ab;
+
+		ah = ath12k_mac_hw_allocate(ag, pdev_map, radio_per_hw);
 		if (!ah) {
 			ath12k_warn(ab, "failed to allocate mac80211 hw device for hw_idx %d\n",
 				    i);
@@ -9973,20 +13021,22 @@
 			goto err;
 		}
 
-		ab->ah[i] = ah;
-	}
+		ah->dev = ab->dev;
 
-	ath12k_dp_pdev_pre_alloc(ab);
+		ag->ah[i] = ah;
+		ag->num_hw++;
+	}
 
 	return 0;
 
 err:
 	for (i = i - 1; i >= 0; i--) {
-		if (!ab->ah[i])
+		ah = ath12k_ag_to_ah(ag, i);
+		if (!ah)
 			continue;
 
-		ath12k_mac_hw_destroy(ab->ah[i]);
-		ab->ah[i] = NULL;
+		ath12k_mac_hw_destroy(ah);
+		ath12k_ag_set_ah(ag, i, NULL);
 	}
 
 	return ret;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/mac.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mac.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/mac.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mac.h	2025-09-25 17:40:34.159360244 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_MAC_H
@@ -14,6 +14,7 @@
 struct ath12k;
 struct ath12k_base;
 struct ath12k_hw;
+struct ath12k_hw_group;
 struct ath12k_pdev_map;
 
 struct ath12k_generic_iter {
@@ -32,11 +33,16 @@
 #define ATH12K_KEEPALIVE_MAX_IDLE		3895
 #define ATH12K_KEEPALIVE_MAX_UNRESPONSIVE	3900
 
+#define ATH12K_PDEV_TX_POWER_INVALID		((u32)-1)
+#define ATH12K_PDEV_TX_POWER_REFRESH_TIME_MSECS	5000 /* msecs */
+
 /* FIXME: should these be in ieee80211.h? */
 #define IEEE80211_VHT_MCS_SUPPORT_0_11_MASK	GENMASK(23, 16)
 #define IEEE80211_DISABLE_VHT_MCS_SUPPORT_0_11	BIT(24)
 
 #define ATH12K_CHAN_WIDTH_NUM			14
+#define ATH12K_BW_NSS_MAP_ENABLE		BIT(31)
+#define ATH12K_PEER_RX_NSS_160MHZ		GENMASK(2, 0)
 
 #define ATH12K_TX_POWER_MAX_VAL	70
 #define ATH12K_TX_POWER_MIN_VAL	0
@@ -44,6 +50,29 @@
 #define ATH12K_DEFAULT_LINK_ID	0
 #define ATH12K_INVALID_LINK_ID	255
 
+/* Default link after the IEEE802.11 defined Max link id limit
+ * for driver usage purpose.
+ */
+#define ATH12K_DEFAULT_SCAN_LINK	IEEE80211_MLD_MAX_NUM_LINKS
+/* Define 1 scan link for each radio for parallel scan purposes */
+#define ATH12K_NUM_MAX_LINKS	(IEEE80211_MLD_MAX_NUM_LINKS + ATH12K_GROUP_MAX_RADIO)
+#define ATH12K_SCAN_LINKS_MASK	GENMASK(ATH12K_NUM_MAX_LINKS, IEEE80211_MLD_MAX_NUM_LINKS)
+
+#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
+	u8_get_bits(hecap_phy[3], IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER)
+
+#define HECAP_PHY_SUBFME_GET(hecap_phy) \
+	u8_get_bits(hecap_phy[4], IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE)
+
+#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
+	u8_get_bits(hecap_phy[4], IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER)
+
+#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
+	u8_get_bits(hecap_phy[2], IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO)
+
+#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
+	u8_get_bits(hecap_phy[2], IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO)
+
 enum ath12k_supported_bw {
 	ATH12K_BW_20    = 0,
 	ATH12K_BW_40    = 1,
@@ -52,12 +81,17 @@
 	ATH12K_BW_320   = 4,
 };
 
+struct ath12k_mac_get_any_chanctx_conf_arg {
+	struct ath12k *ar;
+	struct ieee80211_chanctx_conf *chanctx_conf;
+};
+
 extern const struct htt_rx_ring_tlv_filter ath12k_mac_mon_status_filter_default;
 
-void ath12k_mac_destroy(struct ath12k_base *ab);
-void ath12k_mac_unregister(struct ath12k_base *ab);
-int ath12k_mac_register(struct ath12k_base *ab);
-int ath12k_mac_allocate(struct ath12k_base *ab);
+void ath12k_mac_destroy(struct ath12k_hw_group *ag);
+void ath12k_mac_unregister(struct ath12k_hw_group *ag);
+int ath12k_mac_register(struct ath12k_hw_group *ag);
+int ath12k_mac_allocate(struct ath12k_hw_group *ag);
 int ath12k_mac_hw_ratecode_to_legacy_rate(u8 hw_rc, u8 preamble, u8 *rateidx,
 					  u16 *rate);
 u8 ath12k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
@@ -89,5 +123,21 @@
 				 enum wmi_sta_keepalive_method method,
 				 u32 interval);
 u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar);
-
+int ath12k_mac_mlo_setup(struct ath12k_hw_group *ag);
+int ath12k_mac_mlo_ready(struct ath12k_hw_group *ag);
+void ath12k_mac_mlo_teardown(struct ath12k_hw_group *ag);
+int ath12k_mac_vdev_stop(struct ath12k_link_vif *arvif);
+void ath12k_mac_get_any_chanctx_conf_iter(struct ieee80211_hw *hw,
+					  struct ieee80211_chanctx_conf *conf,
+					  void *data);
+u16 ath12k_mac_he_convert_tones_to_ru_tones(u16 tones);
+enum nl80211_eht_ru_alloc ath12k_mac_eht_ru_tones_to_nl80211_eht_ru_alloc(u16 ru_tones);
+enum nl80211_eht_gi ath12k_mac_eht_gi_to_nl80211_eht_gi(u8 sgi);
+struct ieee80211_bss_conf *ath12k_mac_get_link_bss_conf(struct ath12k_link_vif *arvif);
+struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    u8 link_id);
+int ath12k_mac_get_fw_stats(struct ath12k *ar, struct ath12k_fw_stats_req_params *param);
+void ath12k_mac_update_freq_range(struct ath12k *ar,
+				  u32 freq_low, u32 freq_high);
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/mhi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mhi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/mhi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/mhi.c	2025-09-25 17:40:34.159360244 +0200
@@ -315,7 +315,7 @@
 {
 	struct ath12k_base *ab = ab_pci->ab;
 	struct mhi_controller *mhi_ctrl;
-	unsigned int board_id;
+	unsigned int board_id = 0;
 	int ret;
 	bool dualmac = false;
 
@@ -324,6 +324,7 @@
 		return -ENOMEM;
 
 	ab_pci->mhi_pre_cb = MHI_CB_INVALID;
+
 	ab_pci->mhi_ctrl = mhi_ctrl;
 	mhi_ctrl->cntrl_dev = ab->dev;
 	mhi_ctrl->regs = ab->mem;
@@ -338,12 +339,40 @@
 		if (!board_id || (board_id == OTP_INVALID_BOARD_ID)) {
 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
 				   "failed to read board id\n");
-		} else if (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK) {
+		}
+	}
+
+	if (ab->hw_params->otp_board_id_register &&
+	    (!board_id || (board_id == OTP_INVALID_BOARD_ID))) {
+		u16 ov_board_id;
+
+		/* look for override */
+		if (ath12k_pci_has_board_id_override(ab, &ov_board_id)) {
+			board_id = ov_board_id;
+			ath12k_info(ab,
+				    "overriding board-id to 0x%x (%d)\n",
+				    board_id, board_id);
+		} else {
+			ath12k_err(ab,
+				   "device has invalid board-id (0x%04x), "
+				   "to use this card you need to setup "
+				   "%s/%s/%s file with "
+				   "this line:\n  pci:%s=<board_id>\n",
+				   board_id,
+				   ATH12K_FW_DIR, ab->hw_params->fw.dir,
+				   ATH12K_BOARD_OVERRIDE_FILE,
+				   pci_name(ab_pci->pdev));
+			ret = -EIO;
+			goto free_controller;
+		}
+	}
+
+	if (board_id != OTP_INVALID_BOARD_ID &&
+	    (board_id & OTP_VALID_DUALMAC_BOARD_ID_MASK)) {
 			dualmac = true;
 			ath12k_dbg(ab, ATH12K_DBG_BOOT,
 				   "dualmac fw selected for board id: %x\n", board_id);
 		}
-	}
 
 	if (dualmac) {
 		if (ab->fw.amss_dualmac_data && ab->fw.amss_dualmac_len > 0) {
@@ -351,9 +380,15 @@
 			mhi_ctrl->fw_data = ab->fw.amss_dualmac_data;
 			mhi_ctrl->fw_sz = ab->fw.amss_dualmac_len;
 		} else {
-			ath12k_warn(ab, "dualmac firmware IE not present in firmware-N.bin\n");
-			ret = -ENOENT;
-			goto free_controller;
+			/* use the old separate mhi.bin MHI firmware file */
+			ath12k_core_create_firmware_path(ab, ATH12K_AMSS_DUAL_FILE,
+							 ab_pci->amss_path,
+							 sizeof(ab_pci->amss_path));
+			__set_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID,
+				  ab->fw.fw_features);
+			__set_bit(ATH12K_FW_FEATURE_MLO,
+				  ab->fw.fw_features);
+			mhi_ctrl->fw_image = ab_pci->amss_path;
 		}
 	} else {
 		if (ab->fw.amss_data && ab->fw.amss_len > 0) {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/pci.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/pci.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/pci.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/pci.c	2025-09-25 17:40:34.159360244 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/module.h>
@@ -15,6 +15,7 @@
 #include "hif.h"
 #include "mhi.h"
 #include "debug.h"
+#include "debugfs.h"
 
 #define ATH12K_PCI_BAR_NUM		0
 #define ATH12K_PCI_DMA_MASK		32
@@ -45,6 +46,17 @@
 #define DOMAIN_NUMBER_MASK		GENMASK(7, 4)
 #define BUS_NUMBER_MASK			GENMASK(3, 0)
 
+static bool ath12k_bdf_search_pci_subsys_and_board;
+module_param_named(bdf_search_pci_subsys_and_board,
+		   ath12k_bdf_search_pci_subsys_and_board,
+		   bool, 0644);
+MODULE_PARM_DESC(bdf_search_pci_subsys_and_board, "enable board-2.bin BDF "
+		 "search lookup using pci subsystem IDs and board");
+
+static bool __read_mostly ath12k_napi_threaded = false;
+module_param_named(napi_threaded, ath12k_napi_threaded, bool, 0644);
+MODULE_PARM_DESC(napi_threaded, "Enable threaded napi");
+
 static const struct pci_device_id ath12k_pci_id_table[] = {
 	{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
 	{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
@@ -56,12 +68,12 @@
 /* TODO: revisit IRQ mapping for new SRNG's */
 static const struct ath12k_msi_config ath12k_msi_config[] = {
 	{
-		.total_vectors = 16,
+		.total_vectors = 32,
 		.total_users = 3,
 		.users = (struct ath12k_msi_user[]) {
 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
 			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
-			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
+			{ .name = "DP", .num_vectors = 16, .base_vector = 8 },
 		},
 	},
 };
@@ -348,8 +360,11 @@
 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 
-		for (j = 0; j < irq_grp->num_irq; j++)
+		for (j = 0; j < irq_grp->num_irq; j++) {
 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
+			kfree(ab->irq_names[irq_grp->irqs[j]]);
+			ab->irq_names[irq_grp->irqs[j]] = NULL;
+		}
 
 		netif_napi_del(&irq_grp->napi);
 		free_netdev(irq_grp->napi_ndev);
@@ -483,8 +498,11 @@
 
 		ath12k_pci_ext_grp_disable(irq_grp);
 
+		if (irq_grp->napi_enabled) {
 		napi_synchronize(&irq_grp->napi);
 		napi_disable(&irq_grp->napi);
+			irq_grp->napi_enabled = false;
+		}
 	}
 }
 
@@ -588,6 +606,11 @@
 			goto fail_allocate;
 		}
 
+		snprintf(irq_grp->napi_ndev->name,
+			 sizeof(irq_grp->napi_ndev->name),
+			 "ath12k-%s",
+			 dev_name(ab->dev));
+
 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
 			       ath12k_pci_ext_grp_napi_poll);
 
@@ -608,16 +631,23 @@
 			int irq_idx = irq_grp->irqs[j];
 			int vector = (i % num_vectors) + base_vector;
 			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
+			u8 bus_id = pci_domain_nr(ab_pci->pdev->bus);
+			char dp_irq_name[32];
 
 			ab->irq_num[irq_idx] = irq;
 
+			scnprintf(dp_irq_name, sizeof (dp_irq_name),
+                                  "pci%u_wlan_dp_%u", bus_id, i);
+			ab->irq_names[i] = kstrdup(dp_irq_name,
+						   GFP_KERNEL);
+
 			ath12k_dbg(ab, ATH12K_DBG_PCI,
 				   "irq:%d group:%d\n", irq, i);
 
 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
 			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
 					  ab_pci->irq_flags,
-					  "DP_EXT_IRQ", irq_grp);
+					  ab->irq_names[i], irq_grp);
 			if (ret) {
 				ath12k_err(ab, "failed request irq %d: %d\n",
 					   vector, ret);
@@ -1114,7 +1144,13 @@
 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
 
+		if (!irq_grp->napi_enabled) {
+			dev_set_threaded(irq_grp->napi_ndev,
+					 ath12k_napi_threaded);
 		napi_enable(&irq_grp->napi);
+			irq_grp->napi_enabled = true;
+		}
+
 		ath12k_pci_ext_grp_enable(irq_grp);
 	}
 
@@ -1123,6 +1159,9 @@
 
 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
 {
+	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
+		return;
+
 	__ath12k_pci_ext_irq_disable(ab);
 	ath12k_pci_sync_ext_irqs(ab);
 }
@@ -1147,6 +1186,11 @@
 
 void ath12k_pci_stop(struct ath12k_base *ab)
 {
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+
+	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+		return;
+
 	ath12k_pci_ce_irq_disable_sync(ab);
 	ath12k_ce_cleanup_pipes(ab);
 }
@@ -1476,6 +1520,9 @@
 {
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 
+	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
+		return;
+
 	/* restore aspm in case firmware bootup fails */
 	ath12k_pci_aspm_restore(ab_pci);
 
@@ -1516,6 +1563,29 @@
 #endif
 };
 
+bool ath12k_pci_has_board_id_override(struct ath12k_base *ab,
+				      u16 *ov_board_id)
+{
+	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
+	struct pci_dev *pdev = ab_pci->pdev;
+	const struct ath12k_bid_override *ov;
+
+	list_for_each_entry(ov, &ab->board_id_overrides, next) {
+		if (ov->domain != pci_domain_nr(pdev->bus))
+			continue;
+
+		if (ov->bus_nr != pdev->bus->number)
+			continue;
+
+		if (pdev->devfn != PCI_DEVFN(ov->slot, ov->func))
+			continue;
+
+		*ov_board_id = ov->board_id;
+		return true;
+	}
+	return false;
+}
+
 static
 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
 {
@@ -1553,6 +1623,7 @@
 	ab_pci->ab = ab;
 	ab_pci->pdev = pdev;
 	ab->hif.ops = &ath12k_pci_hif_ops;
+	ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
 	pci_set_drvdata(pdev, ab);
 	spin_lock_init(&ab_pci->window_lock);
 
@@ -1571,6 +1642,9 @@
 	ab->id.subsystem_vendor = pdev->subsystem_vendor;
 	ab->id.subsystem_device = pdev->subsystem_device;
 
+	if (ath12k_bdf_search_pci_subsys_and_board)
+		ab->id.bdf_search = ATH12K_BDF_SEARCH_PCI_SUBSYS_AND_BOARD;
+
 	switch (pci_dev->device) {
 	case QCN9274_DEVICE_ID:
 		ab_pci->msi_config = &ath12k_msi_config[0];
@@ -1692,12 +1766,12 @@
 err_mhi_unregister:
 	ath12k_mhi_unregister(ab_pci);
 
-err_pci_msi_free:
-	ath12k_pci_msi_free(ab_pci);
-
 err_irq_affinity_cleanup:
 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
 
+err_pci_msi_free:
+	ath12k_pci_msi_free(ab_pci);
+
 err_pci_free_region:
 	ath12k_pci_free_region(ab_pci);
 
@@ -1717,6 +1791,9 @@
 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
 		ath12k_pci_power_down(ab, false);
 		ath12k_qmi_deinit_service(ab);
+		ath12k_core_hw_group_unassign(ab);
+		ath12k_core_panic_notifier_unregister(ab);
+		ath12k_debugfs_soc_destroy(ab);
 		goto qmi_fail;
 	}
 
@@ -1739,13 +1816,37 @@
 	ath12k_core_free(ab);
 }
 
+static void ath12k_pci_hw_group_power_down(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	if (!ag)
+		return;
+
+	mutex_lock(&ag->mutex);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		ath12k_pci_power_down(ab, false);
+	}
+
+	mutex_unlock(&ag->mutex);
+}
+
 static void ath12k_pci_shutdown(struct pci_dev *pdev)
 {
 	struct ath12k_base *ab = pci_get_drvdata(pdev);
 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
 
+	if (!ath12k_en_shutdown)
+		return;
+
 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
-	ath12k_pci_power_down(ab, false);
+	ath12k_pci_hw_group_power_down(ab->ag);
 }
 
 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
@@ -1830,6 +1931,7 @@
 static void ath12k_pci_exit(void)
 {
 	pci_unregister_driver(&ath12k_pci_driver);
+	ath12k_cache_cleanup();
 }
 
 module_exit(ath12k_pci_exit);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/pci.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/pci.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/pci.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/pci.h	2025-09-25 17:40:34.159360244 +0200
@@ -123,6 +123,8 @@
 	return (struct ath12k_pci *)ab->drv_priv;
 }
 
+bool ath12k_pci_has_board_id_override(struct ath12k_base *ab,
+				      u16 *ov_board_id);
 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
 				       int *num_vectors, u32 *user_base_data,
 				       u32 *base_vector);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/peer.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/peer.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/peer.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/peer.c	2025-09-25 17:40:34.159360244 +0200
@@ -8,6 +8,22 @@
 #include "peer.h"
 #include "debug.h"
 
+static struct ath12k_ml_peer *ath12k_peer_ml_find(struct ath12k_hw *ah, const u8 *addr)
+{
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	list_for_each_entry(ml_peer, &ah->ml_peers, list) {
+		if (!ether_addr_equal(ml_peer->addr, addr))
+			continue;
+
+		return ml_peer;
+	}
+
+	return NULL;
+}
+
 struct ath12k_peer *ath12k_peer_find(struct ath12k_base *ab, int vdev_id,
 				     const u8 *addr)
 {
@@ -63,6 +79,20 @@
 	return NULL;
 }
 
+static struct ath12k_peer *ath12k_peer_find_by_ml_id(struct ath12k_base *ab,
+						     int ml_peer_id)
+{
+	struct ath12k_peer *peer;
+
+	lockdep_assert_held(&ab->base_lock);
+
+	list_for_each_entry(peer, &ab->peers, list)
+		if (ml_peer_id == peer->ml_id)
+			return peer;
+
+	return NULL;
+}
+
 struct ath12k_peer *ath12k_peer_find_by_id(struct ath12k_base *ab,
 					   int peer_id)
 {
@@ -70,6 +100,9 @@
 
 	lockdep_assert_held(&ab->base_lock);
 
+	if (peer_id & ATH12K_PEER_ML_ID_VALID)
+		return ath12k_peer_find_by_ml_id(ab, peer_id);
+
 	list_for_each_entry(peer, &ab->peers, list)
 		if (peer_id == peer->peer_id)
 			return peer;
@@ -93,20 +126,6 @@
 	return false;
 }
 
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab,
-					    int ast_hash)
-{
-	struct ath12k_peer *peer;
-
-	lockdep_assert_held(&ab->base_lock);
-
-	list_for_each_entry(peer, &ab->peers, list)
-		if (ast_hash == peer->ast_hash)
-			return peer;
-
-	return NULL;
-}
-
 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id)
 {
 	struct ath12k_peer *peer;
@@ -231,8 +250,9 @@
 	return 0;
 }
 
-int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+static int ath12k_peer_delete_send(struct ath12k *ar, u32 vdev_id, const u8 *addr)
 {
+	struct ath12k_base *ab = ar->ab;
 	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -241,12 +261,25 @@
 
 	ret = ath12k_wmi_send_peer_delete_cmd(ar, addr, vdev_id);
 	if (ret) {
-		ath12k_warn(ar->ab,
+		ath12k_warn(ab,
 			    "failed to delete peer vdev_id %d addr %pM ret %d\n",
 			    vdev_id, addr, ret);
 		return ret;
 	}
 
+	return 0;
+}
+
+int ath12k_peer_delete(struct ath12k *ar, u32 vdev_id, u8 *addr)
+{
+	int ret;
+
+	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
+
+	ret = ath12k_peer_delete_send(ar, vdev_id, addr);
+	if (ret)
+		return ret;
+
 	ret = ath12k_wait_for_peer_delete_done(ar, vdev_id, addr);
 	if (ret)
 		return ret;
@@ -266,7 +299,11 @@
 		       struct ath12k_wmi_peer_create_arg *arg)
 {
 	struct ieee80211_vif *vif = ath12k_ahvif_to_vif(arvif->ahvif);
+	struct ath12k_link_sta *arsta;
+	u8 link_id = arvif->link_id;
 	struct ath12k_peer *peer;
+	struct ath12k_sta *ahsta;
+	u16 ml_peer_id;
 	int ret;
 
 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
@@ -326,12 +363,39 @@
 
 	peer->pdev_idx = ar->pdev_idx;
 	peer->sta = sta;
+	peer->vif = vif;
 
 	if (vif->type == NL80211_IFTYPE_STATION) {
 		arvif->ast_hash = peer->ast_hash;
 		arvif->ast_idx = peer->hw_peer_id;
 	}
 
+	if (vif->type == NL80211_IFTYPE_AP)
+		peer->is_reset_mcbc = true;
+
+	if (sta) {
+		ahsta = ath12k_sta_to_ahsta(sta);
+		arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
+					  ahsta->link[link_id]);
+
+		peer->link_id = arsta->link_id;
+
+		/* Fill ML info into created peer */
+		if (sta->mlo) {
+			ml_peer_id = ahsta->ml_peer_id;
+			peer->ml_id = ml_peer_id | ATH12K_PEER_ML_ID_VALID;
+			ether_addr_copy(peer->ml_addr, sta->addr);
+
+			/* the assoc link is considered primary for now */
+			peer->primary_link = arsta->is_assoc_link;
+			peer->mlo = true;
+		} else {
+			peer->ml_id = ATH12K_MLO_PEER_ID_INVALID;
+			peer->primary_link = true;
+			peer->mlo = false;
+		}
+	}
+
 	peer->sec_type = HAL_ENCRYPT_TYPE_OPEN;
 	peer->sec_type_grp = HAL_ENCRYPT_TYPE_OPEN;
 
@@ -341,3 +405,150 @@
 
 	return 0;
 }
+
+static u16 ath12k_peer_ml_alloc(struct ath12k_hw *ah)
+{
+	u16 ml_peer_id;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	for (ml_peer_id = 0; ml_peer_id < ATH12K_MAX_MLO_PEERS; ml_peer_id++) {
+		if (test_bit(ml_peer_id, ah->free_ml_peer_id_map))
+			continue;
+
+		set_bit(ml_peer_id, ah->free_ml_peer_id_map);
+		break;
+	}
+
+	if (ml_peer_id == ATH12K_MAX_MLO_PEERS)
+		ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+	return ml_peer_id;
+}
+
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (!sta->mlo)
+		return -EINVAL;
+
+	ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+	if (ml_peer) {
+		ath12k_hw_warn(ah, "ML peer %d exists already, unable to add new entry for %pM",
+			       ml_peer->id, sta->addr);
+		return -EEXIST;
+	}
+
+	ml_peer = kzalloc(sizeof(*ml_peer), GFP_ATOMIC);
+	if (!ml_peer)
+		return -ENOMEM;
+
+	ahsta->ml_peer_id = ath12k_peer_ml_alloc(ah);
+
+	if (ahsta->ml_peer_id == ATH12K_MLO_PEER_ID_INVALID) {
+		ath12k_hw_warn(ah, "unable to allocate ML peer id for sta %pM",
+			       sta->addr);
+		kfree(ml_peer);
+		return -ENOMEM;
+	}
+
+	ether_addr_copy(ml_peer->addr, sta->addr);
+	ml_peer->id = ahsta->ml_peer_id;
+	list_add(&ml_peer->list, &ah->ml_peers);
+
+	return 0;
+}
+
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta)
+{
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(sta);
+	struct ath12k_ml_peer *ml_peer;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (!sta->mlo)
+		return -EINVAL;
+
+	clear_bit(ahsta->ml_peer_id, ah->free_ml_peer_id_map);
+	ahsta->ml_peer_id = ATH12K_MLO_PEER_ID_INVALID;
+
+	ml_peer = ath12k_peer_ml_find(ah, sta->addr);
+	if (!ml_peer) {
+		ath12k_hw_warn(ah, "ML peer for %pM not found", sta->addr);
+		return -EINVAL;
+	}
+
+	list_del(&ml_peer->list);
+	kfree(ml_peer);
+
+	return 0;
+}
+
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta)
+{
+	struct ieee80211_sta *sta = ath12k_ahsta_to_sta(ahsta);
+	struct ath12k_hw *ah = ahvif->ah;
+	struct ath12k_link_vif *arvif;
+	struct ath12k_link_sta *arsta;
+	unsigned long links;
+	struct ath12k *ar;
+	int ret, err_ret = 0;
+	u8 link_id;
+
+	lockdep_assert_wiphy(ah->hw->wiphy);
+
+	if (!sta->mlo)
+		return -EINVAL;
+
+	/* FW expects delete of all link peers at once before waiting for reception
+	 * of peer unmap or delete responses
+	 */
+	links = ahsta->links_map;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+		arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+		if (!ar)
+			continue;
+
+		ath12k_dp_peer_cleanup(ar, arvif->vdev_id, arsta->addr);
+
+		ret = ath12k_peer_delete_send(ar, arvif->vdev_id, arsta->addr);
+		if (ret) {
+			ath12k_warn(ar->ab,
+				    "failed to delete peer vdev_id %d addr %pM ret %d\n",
+				    arvif->vdev_id, arsta->addr, ret);
+			err_ret = ret;
+			continue;
+		}
+	}
+
+	/* Ensure all link peers are deleted and unmapped */
+	links = ahsta->links_map;
+	for_each_set_bit(link_id, &links, IEEE80211_MLD_MAX_NUM_LINKS) {
+		arvif = wiphy_dereference(ah->hw->wiphy, ahvif->link[link_id]);
+		arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+		if (!arvif || !arsta)
+			continue;
+
+		ar = arvif->ar;
+		if (!ar)
+			continue;
+
+		ret = ath12k_wait_for_peer_delete_done(ar, arvif->vdev_id, arsta->addr);
+		if (ret) {
+			err_ret = ret;
+			continue;
+		}
+		ar->num_peers--;
+	}
+
+	return err_ret;
+}
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/peer.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/peer.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/peer.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/peer.h	2025-09-25 17:40:34.159360244 +0200
@@ -19,9 +19,12 @@
 	u32 resp_rate_flags;
 };
 
+#define ATH12K_PEER_ML_ID_VALID         BIT(13)
+
 struct ath12k_peer {
 	struct list_head list;
 	struct ieee80211_sta *sta;
+	struct ieee80211_vif *vif;
 	int vdev_id;
 	u8 addr[ETH_ALEN];
 	int peer_id;
@@ -44,9 +47,29 @@
 	struct ppdu_user_delayba ppdu_stats_delayba;
 	bool delayba_flag;
 	bool is_authorized;
-
+	bool mlo;
 	/* protected by ab->data_lock */
 	bool dp_setup_done;
+
+	u16 ml_id;
+
+	/* any other ML info common for all partners can be added
+	 * here and would be same for all partner peers.
+	 */
+	u8 ml_addr[ETH_ALEN];
+
+	/* To ensure only certain work related to dp is done once */
+	bool primary_link;
+
+	/* for reference to ath12k_link_sta */
+	u8 link_id;
+	bool is_reset_mcbc;
+};
+
+struct ath12k_ml_peer {
+	struct list_head list;
+	u8 addr[ETH_ALEN];
+	u16 id;
 };
 
 void ath12k_peer_unmap_event(struct ath12k_base *ab, u16 peer_id);
@@ -65,6 +88,8 @@
 int ath12k_wait_for_peer_delete_done(struct ath12k *ar, u32 vdev_id,
 				     const u8 *addr);
 bool ath12k_peer_exist_by_vdev_id(struct ath12k_base *ab, int vdev_id);
-struct ath12k_peer *ath12k_peer_find_by_ast(struct ath12k_base *ab, int ast_hash);
+int ath12k_peer_ml_create(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_ml_delete(struct ath12k_hw *ah, struct ieee80211_sta *sta);
+int ath12k_peer_mlo_link_peers_delete(struct ath12k_vif *ahvif, struct ath12k_sta *ahsta);
 
 #endif /* _PEER_H_ */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/qmi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/qmi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/qmi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/qmi.c	2025-09-25 17:40:34.159360244 +0200
@@ -1,13 +1,14 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #include <linux/elf.h>
 
 #include "qmi.h"
 #include "core.h"
+#include "pci.h"
 #include "debug.h"
 #include <linux/of.h>
 #include <linux/firmware.h>
@@ -17,6 +18,10 @@
 #define PLATFORM_CAP_PCIE_GLOBAL_RESET	0x08
 #define ATH12K_QMI_MAX_CHUNK_SIZE	2097152
 
+static bool ath12k_skip_caldata;
+module_param_named(skip_caldata, ath12k_skip_caldata, bool, 0444);
+MODULE_PARM_DESC(ath12k_skip_caldata, "Skip caldata download");
+
 static const struct qmi_elem_info wlfw_host_mlo_chip_info_s_v01_ei[] = {
 	{
 		.data_type      = QMI_UNSIGNED_1_BYTE,
@@ -2016,26 +2021,100 @@
 	},
 };
 
-static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
+struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_OPT_FLAG,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog_valid),
+	},
+	{
+		.data_type      = QMI_UNSIGNED_1_BYTE,
+		.elem_len       = 1,
+		.elem_size      = sizeof(u8),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x10,
+		.offset         = offsetof(struct wlfw_ini_req_msg_v01,
+					   enablefwlog),
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
+	{
+		.data_type      = QMI_STRUCT,
+		.elem_len       = 1,
+		.elem_size      = sizeof(struct qmi_response_type_v01),
+		.array_type     = NO_ARRAY,
+		.tlv_type       = 0x02,
+		.offset         = offsetof(struct wlfw_ini_resp_msg_v01,
+					   resp),
+		.ei_array       = qmi_response_type_v01_ei,
+	},
+	{
+		.data_type      = QMI_EOTI,
+		.array_type     = NO_ARRAY,
+		.tlv_type       = QMI_COMMON_TLV_TYPE,
+	},
+};
+
+static void ath12k_host_cap_hw_link_id_init(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab, *partner_ab;
+	int i, j, hw_id_base;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		hw_id_base = 0;
+		ab = ag->ab[i];
+
+		for (j = 0; j < ag->num_devices; j++) {
+			partner_ab = ag->ab[j];
+
+			if (partner_ab->wsi_info.index >= ab->wsi_info.index)
+				continue;
+
+			hw_id_base += partner_ab->qmi.num_radios;
+		}
+
+		ab->wsi_info.hw_link_id_base = hw_id_base;
+	}
+
+	ag->hw_link_id_init_done = true;
+}
+
+static int ath12k_host_cap_parse_mlo(struct ath12k_base *ab,
 				      struct qmi_wlanfw_host_cap_req_msg_v01 *req)
 {
 	struct wlfw_host_mlo_chip_info_s_v01 *info;
+	struct ath12k_hw_group *ag = ab->ag;
+	struct ath12k_base *partner_ab;
 	u8 hw_link_id = 0;
-	int i;
+	int i, j, ret;
 
-	if (!(ab->mlo_capable_flags & ATH12K_INTRA_DEVICE_MLO_SUPPORT)) {
+	if (!ag->mlo_capable) {
 		ath12k_dbg(ab, ATH12K_DBG_QMI,
-			   "intra device MLO is disabled hence skip QMI MLO cap");
-		return;
+			   "MLO is disabled hence skip QMI MLO cap");
+		return 0;
 	}
 
 	if (!ab->qmi.num_radios || ab->qmi.num_radios == U8_MAX) {
-		ab->mlo_capable_flags = 0;
-
+		ag->mlo_capable = false;
 		ath12k_dbg(ab, ATH12K_DBG_QMI,
 			   "skip QMI MLO cap due to invalid num_radio %d\n",
 			   ab->qmi.num_radios);
-		return;
+		return 0;
+	}
+
+	if (ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+		ath12k_err(ab, "failed to send MLO cap due to invalid device id\n");
+		return -EINVAL;
 	}
 
 	req->mlo_capable_valid = 1;
@@ -2043,30 +2122,88 @@
 	req->mlo_chip_id_valid = 1;
 	req->mlo_chip_id = ab->device_id;
 	req->mlo_group_id_valid = 1;
-	req->mlo_group_id = 0;
+	req->mlo_group_id = ag->id;
 	req->max_mlo_peer_valid = 1;
 	/* Max peer number generally won't change for the same device
 	 * but needs to be synced with host driver.
 	 */
 	req->max_mlo_peer = ab->hw_params->max_mlo_peer;
 	req->mlo_num_chips_valid = 1;
-	req->mlo_num_chips = 1;
+	req->mlo_num_chips = ag->num_devices;
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo capability advertisement device_id %d group_id %d num_devices %d",
+		   req->mlo_chip_id, req->mlo_group_id, req->mlo_num_chips);
+
+	mutex_lock(&ag->mutex);
+
+	if (!ag->hw_link_id_init_done)
+		ath12k_host_cap_hw_link_id_init(ag);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		info = &req->mlo_chip_info[i];
+		partner_ab = ag->ab[i];
+
+		if (partner_ab->device_id == ATH12K_INVALID_DEVICE_ID) {
+			ath12k_err(ab, "failed to send MLO cap due to invalid partner device id\n");
+			ret = -EINVAL;
+			goto device_cleanup;
+		}
+
+		info->chip_id = partner_ab->device_id;
+		info->num_local_links = partner_ab->qmi.num_radios;
+
+		ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo device id %d num_link %d\n",
+			   info->chip_id, info->num_local_links);
+
+		for (j = 0; j < info->num_local_links; j++) {
+			info->hw_link_id[j] = partner_ab->wsi_info.hw_link_id_base + j;
+			info->valid_mlo_link_id[j] = 1;
 
-	info = &req->mlo_chip_info[0];
-	info->chip_id = ab->device_id;
-	info->num_local_links = ab->qmi.num_radios;
-
-	for (i = 0; i < info->num_local_links; i++) {
-		info->hw_link_id[i] = hw_link_id;
-		info->valid_mlo_link_id[i] = 1;
+			ath12k_dbg(ab, ATH12K_DBG_QMI, "mlo hw_link_id %d\n",
+				   info->hw_link_id[j]);
 
 		hw_link_id++;
 	}
+	}
+
+	if (hw_link_id <= 0)
+		ag->mlo_capable = false;
 
 	req->mlo_chip_info_valid = 1;
+
+	mutex_unlock(&ag->mutex);
+
+	return 0;
+
+device_cleanup:
+	for (i = i - 1; i >= 0; i--) {
+		info = &req->mlo_chip_info[i];
+
+		memset(info, 0, sizeof(*info));
+	}
+
+	req->mlo_num_chips = 0;
+	req->mlo_num_chips_valid = 0;
+
+	req->max_mlo_peer = 0;
+	req->max_mlo_peer_valid = 0;
+	req->mlo_group_id = 0;
+	req->mlo_group_id_valid = 0;
+	req->mlo_chip_id = 0;
+	req->mlo_chip_id_valid = 0;
+	req->mlo_capable = 0;
+	req->mlo_capable_valid = 0;
+
+	ag->mlo_capable = false;
+
+	mutex_unlock(&ag->mutex);
+
+	return ret;
 }
 
-static int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_host_cap_send(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_host_cap_req_msg_v01 req = {};
 	struct qmi_wlanfw_host_cap_resp_msg_v01 resp = {};
@@ -2111,7 +2248,9 @@
 		req.nm_modem |= PLATFORM_CAP_PCIE_GLOBAL_RESET;
 	}
 
-	ath12k_host_cap_parse_mlo(ab, &req);
+	ret = ath12k_host_cap_parse_mlo(ab, &req);
+	if (ret < 0)
+		goto out;
 
 	ret = qmi_txn_init(&ab->qmi.handle, &txn,
 			   qmi_wlanfw_host_cap_resp_msg_v01_ei, &resp);
@@ -2174,13 +2313,6 @@
 		goto out;
 	}
 
-	if (resp.single_chip_mlo_support_valid) {
-		if (resp.single_chip_mlo_support)
-			ab->mlo_capable_flags |= ATH12K_INTRA_DEVICE_MLO_SUPPORT;
-		else
-			ab->mlo_capable_flags &= ~ATH12K_INTRA_DEVICE_MLO_SUPPORT;
-	}
-
 	if (!resp.num_phy_valid) {
 		ret = -ENODATA;
 		goto out;
@@ -2189,10 +2321,9 @@
 	ab->qmi.num_radios = resp.num_phy;
 
 	ath12k_dbg(ab, ATH12K_DBG_QMI,
-		   "phy capability resp valid %d num_phy %d valid %d board_id %d valid %d single_chip_mlo_support %d\n",
+		   "phy capability resp valid %d num_phy %d valid %d board_id %d\n",
 		   resp.num_phy_valid, resp.num_phy,
-		   resp.board_id_valid, resp.board_id,
-		   resp.single_chip_mlo_support_valid, resp.single_chip_mlo_support);
+		   resp.board_id_valid, resp.board_id);
 
 	return;
 
@@ -2275,7 +2406,9 @@
 	return ret;
 }
 
-static int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_respond_fw_mem_request(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_respond_mem_req_msg_v01 *req;
 	struct qmi_wlanfw_respond_mem_resp_msg_v01 resp = {};
@@ -2350,15 +2483,76 @@
 	return ret;
 }
 
-static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag)
 {
+	struct target_mem_chunk *mlo_chunk;
 	int i;
 
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
+	lockdep_assert_held(&ag->mutex);
+
+	if (!ag->mlo_mem.init_done || ag->num_started)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(ag->mlo_mem.chunk); i++) {
+		mlo_chunk = &ag->mlo_mem.chunk[i];
+
+		if (mlo_chunk->v.addr)
+			/* TODO: Mode 0 recovery is the default mode hence resetting the
+			 * whole memory region for now. Once Mode 1 support is added, this
+			 * needs to be handled properly
+			 */
+			memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+	}
+}
+
+static void ath12k_qmi_free_mlo_mem_chunk(struct ath12k_base *ab,
+					  struct target_mem_chunk *chunk,
+					  int idx)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	struct target_mem_chunk *mlo_chunk;
+
+	lockdep_assert_held(&ag->mutex);
+
+	if (!ag->mlo_mem.init_done || ag->num_started)
+		return;
+
+	if (idx >= ARRAY_SIZE(ag->mlo_mem.chunk)) {
+		ath12k_warn(ab, "invalid index for MLO memory chunk free: %d\n", idx);
+		return;
+	}
+
+	mlo_chunk = &ag->mlo_mem.chunk[idx];
+	if (mlo_chunk->v.addr) {
+		ath12k_dma_free_coherent_no_dev(
+				  mlo_chunk->size,
+				  mlo_chunk->v.addr,
+				  mlo_chunk->paddr);
+		mlo_chunk->v.addr = NULL;
+	}
+
+	mlo_chunk->paddr = 0;
+	mlo_chunk->size = 0;
+	chunk->v.addr = NULL;
+	chunk->paddr = 0;
+	chunk->size = 0;
+}
+
+static void ath12k_qmi_free_target_mem_chunk(struct ath12k_base *ab)
+{
+	struct ath12k_hw_group *ag = ab->ag;
+	int i, mlo_idx;
+
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
 		if (!ab->qmi.target_mem[i].v.addr)
 			continue;
 
-		dma_free_coherent(ab->dev,
+		if (ab->qmi.target_mem[i].type == MLO_GLOBAL_MEM_REGION_TYPE) {
+			ath12k_qmi_free_mlo_mem_chunk(ab,
+						      &ab->qmi.target_mem[i],
+						      mlo_idx++);
+		} else {
+			ath12k_dma_free_coherent_no_dev(
 				  ab->qmi.target_mem[i].prev_size,
 				  ab->qmi.target_mem[i].v.addr,
 				  ab->qmi.target_mem[i].paddr);
@@ -2366,25 +2560,15 @@
 	}
 }
 
-static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
-{
-	int i;
-	struct target_mem_chunk *chunk;
-
-	ab->qmi.target_mem_delayed = false;
-
-	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
-		chunk = &ab->qmi.target_mem[i];
+	if (!ag->num_started && ag->mlo_mem.init_done) {
+		ag->mlo_mem.init_done = false;
+		ag->mlo_mem.mlo_mem_size = 0;
+	}
+}
 
-		/* Allocate memory for the region and the functionality supported
-		 * on the host. For the non-supported memory region, host does not
-		 * allocate memory, assigns NULL and FW will handle this without crashing.
-		 */
-		switch (chunk->type) {
-		case HOST_DDR_REGION_TYPE:
-		case M3_DUMP_REGION_TYPE:
-		case PAGEABLE_MEM_REGION_TYPE:
-		case CALDB_MEM_REGION_TYPE:
+static int ath12k_qmi_alloc_chunk(struct ath12k_base *ab,
+				  struct target_mem_chunk *chunk)
+{
 			/* Firmware reloads in recovery/resume.
 			 * In such cases, no need to allocate memory for FW again.
 			 */
@@ -2394,12 +2578,12 @@
 					goto this_chunk_done;
 
 				/* cannot reuse the existing chunk */
-				dma_free_coherent(ab->dev, chunk->prev_size,
+		ath12k_dma_free_coherent_no_dev(chunk->prev_size,
 						  chunk->v.addr, chunk->paddr);
 				chunk->v.addr = NULL;
 			}
 
-			chunk->v.addr = dma_alloc_coherent(ab->dev,
+	chunk->v.addr = ath12k_dma_alloc_coherent_no_dev(
 							   chunk->size,
 							   &chunk->paddr,
 							   GFP_KERNEL | __GFP_NOWARN);
@@ -2411,18 +2595,84 @@
 						    chunk->size,
 						    chunk->type);
 					ath12k_qmi_free_target_mem_chunk(ab);
-					return 0;
+			return -EAGAIN;
 				}
 				ath12k_warn(ab, "memory allocation failure for %u size: %d\n",
 					    chunk->type, chunk->size);
 				return -ENOMEM;
 			}
-
 			chunk->prev_type = chunk->type;
 			chunk->prev_size = chunk->size;
 this_chunk_done:
+	return 0;
+}
+
+static int ath12k_qmi_alloc_target_mem_chunk(struct ath12k_base *ab)
+{
+	struct target_mem_chunk *chunk, *mlo_chunk;
+	struct ath12k_hw_group *ag = ab->ag;
+	int i, mlo_idx, ret;
+	int mlo_size = 0;
+
+	mutex_lock(&ag->mutex);
+
+	if (!ag->mlo_mem.init_done) {
+		memset(ag->mlo_mem.chunk, 0, sizeof(ag->mlo_mem.chunk));
+		ag->mlo_mem.init_done = true;
+	}
+
+	ab->qmi.target_mem_delayed = false;
+
+	for (i = 0, mlo_idx = 0; i < ab->qmi.mem_seg_count; i++) {
+		chunk = &ab->qmi.target_mem[i];
+
+		/* Allocate memory for the region and the functionality supported
+		 * on the host. For the non-supported memory region, host does not
+		 * allocate memory, assigns NULL and FW will handle this without crashing.
+		 */
+		switch (chunk->type) {
+		case HOST_DDR_REGION_TYPE:
+		case M3_DUMP_REGION_TYPE:
+		case PAGEABLE_MEM_REGION_TYPE:
+		case CALDB_MEM_REGION_TYPE:
+			ret = ath12k_qmi_alloc_chunk(ab, chunk);
+			if (ret)
+				goto err;
+			break;
+		case MLO_GLOBAL_MEM_REGION_TYPE:
+			mlo_size += chunk->size;
+			if (ag->mlo_mem.mlo_mem_size &&
+			    mlo_size > ag->mlo_mem.mlo_mem_size) {
+				ath12k_err(ab, "QMI MLO memory allocation failure, requested size %d is more than allocated size %d",
+					   mlo_size, ag->mlo_mem.mlo_mem_size);
+				ret = -EINVAL;
+				goto err;
+			}
+
+			mlo_chunk = &ag->mlo_mem.chunk[mlo_idx];
+			if (mlo_chunk->paddr) {
+				if (chunk->size != mlo_chunk->size) {
+					ath12k_err(ab, "QMI MLO chunk memory allocation failure for index %d, requested size %d is more than allocated size %d",
+						   mlo_idx, chunk->size, mlo_chunk->size);
+					ret = -EINVAL;
+					goto err;
+				}
+			} else {
+				mlo_chunk->size = chunk->size;
+				mlo_chunk->type = chunk->type;
+				ret = ath12k_qmi_alloc_chunk(ab, mlo_chunk);
+				if (ret)
+					goto err;
+				memset(mlo_chunk->v.addr, 0, mlo_chunk->size);
+			}
+
+			chunk->paddr = mlo_chunk->paddr;
+			chunk->v.addr = mlo_chunk->v.addr;
+			mlo_idx++;
+
 			break;
 		default:
+			if (chunk->type != AFC_MEM_REGION_TYPE)
 			ath12k_warn(ab, "memory type %u not supported\n",
 				    chunk->type);
 			chunk->paddr = 0;
@@ -2430,10 +2680,39 @@
 			break;
 		}
 	}
+
+	if (!ag->mlo_mem.mlo_mem_size) {
+		ag->mlo_mem.mlo_mem_size = mlo_size;
+	} else if (ag->mlo_mem.mlo_mem_size != mlo_size) {
+		ath12k_err(ab, "QMI MLO memory size error, expected size is %d but requested size is %d",
+			   ag->mlo_mem.mlo_mem_size, mlo_size);
+		ret = -EINVAL;
+		goto err;
+	}
+
+	mutex_unlock(&ag->mutex);
+
 	return 0;
+
+err:
+	ath12k_qmi_free_target_mem_chunk(ab);
+
+	mutex_unlock(&ag->mutex);
+
+	/* The firmware will attempt to request memory in smaller chunks
+	 * on the next try. However, the current caller should be notified
+	 * that this instance of request parsing was successful.
+	 * Therefore, return 0 only.
+	 */
+	if (ret == -EAGAIN)
+		ret = 0;
+
+	return ret;
 }
 
-static int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_request_target_cap(struct ath12k_base *ab)
 {
 	struct qmi_wlanfw_cap_req_msg_v01 req = {};
 	struct qmi_wlanfw_cap_resp_msg_v01 resp = {};
@@ -2479,8 +2758,13 @@
 
 	if (resp.board_info_valid)
 		ab->qmi.target.board_id = resp.board_info.board_id;
-	else
+	else {
+               u16 ov_board_id;
+               if (ab->hif.bus == ATH12K_BUS_PCI &&
+		   ath12k_pci_has_board_id_override(ab, &ov_board_id))
+                       board_id = ov_board_id;
 		ab->qmi.target.board_id = board_id;
+	}
 
 	if (resp.soc_info_valid)
 		ab->qmi.target.soc_id = resp.soc_info.soc_id;
@@ -2527,6 +2811,15 @@
 	if (r)
 		ath12k_dbg(ab, ATH12K_DBG_QMI, "SMBIOS bdf variant name not set.\n");
 
+	r = ath12k_acpi_start(ab);
+	if (r)
+		/* ACPI is optional so continue in case of an error */
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "acpi failed: %d\n", r);
+
+	r = ath12k_acpi_check_bdf_variant_name(ab);
+	if (r)
+		ath12k_dbg(ab, ATH12K_DBG_BOOT, "ACPI bdf variant name not set.\n");
+
 out:
 	return ret;
 }
@@ -2619,7 +2912,9 @@
 	return ret;
 }
 
-static int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_load_bdf_qmi(struct ath12k_base *ab,
 				   enum ath12k_qmi_bdf_type type)
 {
 	struct device *dev = ab->dev;
@@ -2654,6 +2949,17 @@
 		}
 		break;
 	case ATH12K_QMI_BDF_TYPE_CALIBRATION:
+		if (ath12k_skip_caldata) {
+			if (ath12k_ftm_mode) {
+				ath12k_warn(ab, "Skipping caldata download "
+					    "in FTM mode\n");
+				goto out;
+			}
+			ath12k_err(ab, "skip_caldata=1 module param is "
+				   "unsupported when FTM mode is disabled.\n");
+			ret = -EOPNOTSUPP;
+			goto out;
+		}
 
 		if (ab->qmi.target.eeprom_caldata) {
 			file_type = ATH12K_QMI_FILE_TYPE_EEPROM;
@@ -2724,7 +3030,7 @@
 	if (!m3_mem->vaddr)
 		return;
 
-	dma_free_coherent(ab->dev, m3_mem->size,
+	ath12k_dma_free_coherent_no_dev(m3_mem->size,
 			  m3_mem->vaddr, m3_mem->paddr);
 	m3_mem->vaddr = NULL;
 	m3_mem->size = 0;
@@ -2769,7 +3075,7 @@
 		ath12k_qmi_m3_free(ab);
 	}
 
-	m3_mem->vaddr = dma_alloc_coherent(ab->dev,
+	m3_mem->vaddr = ath12k_dma_alloc_coherent_no_dev(
 					   m3_len, &m3_mem->paddr,
 					   GFP_KERNEL);
 	if (!m3_mem->vaddr) {
@@ -2791,7 +3097,9 @@
 	return ret;
 }
 
-static int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_wlanfw_m3_info_send(struct ath12k_base *ab)
 {
 	struct m3_mem_region *m3_mem = &ab->qmi.m3_mem;
 	struct qmi_wlanfw_m3_info_req_msg_v01 req = {};
@@ -3023,6 +3331,8 @@
 {
 	int ret;
 
+	clear_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags);
+
 	ret = ath12k_qmi_wlanfw_mode_send(ab, ATH12K_FIRMWARE_MODE_OFF);
 	if (ret < 0) {
 		ath12k_warn(ab, "qmi failed to send wlan mode off\n");
@@ -3079,9 +3389,69 @@
 	return 0;
 }
 
-static int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab)
 {
-	struct ath12k_base *ab = qmi->ab;
+	struct ath12k_qmi *qmi = &ab->qmi;
+
+	spin_lock(&qmi->event_lock);
+
+	if (ath12k_qmi_get_event_block(qmi))
+		ath12k_qmi_set_event_block(qmi, false);
+
+	spin_unlock(&qmi->event_lock);
+
+	ath12k_dbg(ab, ATH12K_DBG_QMI, "trigger host cap for device id %d\n",
+		   ab->device_id);
+
+	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_HOST_CAP, NULL);
+}
+
+static bool ath12k_qmi_hw_group_host_cap_ready(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+
+		if (!(ab && ab->qmi.num_radios != U8_MAX))
+			return false;
+	}
+
+	return true;
+}
+
+static struct ath12k_base *ath12k_qmi_hw_group_find_blocked(struct ath12k_hw_group *ag)
+{
+	struct ath12k_base *ab;
+	int i;
+
+	lockdep_assert_held(&ag->mutex);
+
+	for (i = 0; i < ag->num_devices; i++) {
+		ab = ag->ab[i];
+		if (!ab)
+			continue;
+
+		spin_lock(&ab->qmi.event_lock);
+
+		if (ath12k_qmi_get_event_block(&ab->qmi)) {
+			spin_unlock(&ab->qmi.event_lock);
+			return ab;
+		}
+
+		spin_unlock(&ab->qmi.event_lock);
+	}
+
+	return NULL;
+}
+
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_server_arrive(struct ath12k_qmi *qmi)
+{
+	struct ath12k_base *ab = qmi->ab, *block_ab;
+	struct ath12k_hw_group *ag = ab->ag;
 	int ret;
 
 	ath12k_qmi_phy_cap_send(ab);
@@ -3092,16 +3462,30 @@
 		return ret;
 	}
 
-	ret = ath12k_qmi_host_cap_send(ab);
-	if (ret < 0) {
-		ath12k_warn(ab, "qmi failed to send host cap QMI:%d\n", ret);
-		return ret;
+	spin_lock(&qmi->event_lock);
+
+	ath12k_qmi_set_event_block(qmi, true);
+
+	spin_unlock(&qmi->event_lock);
+
+	mutex_lock(&ag->mutex);
+
+	if (ath12k_qmi_hw_group_host_cap_ready(ag)) {
+		ath12k_core_hw_group_set_mlo_capable(ag);
+
+		block_ab = ath12k_qmi_hw_group_find_blocked(ag);
+		if (block_ab)
+			ath12k_qmi_trigger_host_cap(block_ab);
 	}
 
+	mutex_unlock(&ag->mutex);
+
 	return ret;
 }
 
-static int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_mem_request(struct ath12k_qmi *qmi)
 {
 	struct ath12k_base *ab = qmi->ab;
 	int ret;
@@ -3115,7 +3499,9 @@
 	return ret;
 }
 
-static int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
+/* clang stack usage explodes if this is inlined */
+static noinline_for_stack
+int ath12k_qmi_event_load_bdf(struct ath12k_qmi *qmi)
 {
 	struct ath12k_base *ab = qmi->ab;
 	int ret;
@@ -3213,6 +3599,52 @@
 	ath12k_qmi_driver_event_post(qmi, ATH12K_QMI_EVENT_FW_READY, NULL);
 }
 
+int ath12k_enable_fwlog(struct ath12k_base *ab)
+{
+	struct wlfw_ini_req_msg_v01 *req;
+	struct wlfw_ini_resp_msg_v01 resp = {};
+	struct qmi_txn txn = {};
+	int ret = 0;
+
+	req = kzalloc(sizeof(*req), GFP_KERNEL);
+	if (!req)
+		return -ENOMEM;
+
+	req->enablefwlog_valid = 1;
+	req->enablefwlog = 1;
+
+	ret = qmi_txn_init(&ab->qmi.handle, &txn, wlfw_ini_resp_msg_v01_ei, &resp);
+	if (ret < 0)
+		goto out;
+
+	ret = qmi_send_request(&ab->qmi.handle, NULL, &txn,
+			       QMI_WLFW_INI_REQ_V01,
+			       WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN,
+			       wlfw_ini_req_msg_v01_ei, req);
+
+	if (ret < 0) {
+		ath12k_warn(ab, "Failed to send init request for enabling fwlog = %d\n",
+			    ret);
+		qmi_txn_cancel(&txn);
+		goto out;
+	}
+
+	ret = qmi_txn_wait(&txn, msecs_to_jiffies(ATH12K_QMI_WLANFW_TIMEOUT_MS));
+	if (ret < 0) {
+		ath12k_warn(ab, "fwlog enable wait for resp failed: %d\n", ret);
+		goto out;
+	}
+
+	if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+		ath12k_warn(ab, "fwlog enable request failed, result: %d, err: %d\n",
+			    resp.resp.result, resp.resp.error);
+		ret = -EINVAL;
+	}
+out:
+	kfree(req);
+	return ret;
+}
+
 static const struct qmi_msg_handler ath12k_qmi_msg_handlers[] = {
 	{
 		.type = QMI_INDICATION,
@@ -3280,6 +3712,21 @@
 	.del_server = ath12k_qmi_ops_del_server,
 };
 
+static int ath12k_qmi_event_host_cap(struct ath12k_qmi *qmi)
+{
+	struct ath12k_base *ab = qmi->ab;
+	int ret;
+
+	ret = ath12k_qmi_host_cap_send(ab);
+	if (ret < 0) {
+		ath12k_warn(ab, "failed to send qmi host cap for device id %d: %d\n",
+			    ab->device_id, ret);
+		return ret;
+	}
+
+	return ret;
+}
+
 static void ath12k_qmi_driver_event_work(struct work_struct *work)
 {
 	struct ath12k_qmi *qmi = container_of(work, struct ath12k_qmi,
@@ -3306,7 +3753,6 @@
 			break;
 		case ATH12K_QMI_EVENT_SERVER_EXIT:
 			set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags);
-			set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
 			break;
 		case ATH12K_QMI_EVENT_REQUEST_MEM:
 			ret = ath12k_qmi_event_mem_request(qmi);
@@ -3320,20 +3766,28 @@
 			break;
 		case ATH12K_QMI_EVENT_FW_READY:
 			clear_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
-			if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+			if (test_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE, &ab->dev_flags)) {
 				if (ab->is_reset)
 					ath12k_hal_dump_srng_stats(ab);
+
+				set_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
 				queue_work(ab->workqueue, &ab->restart_work);
 				break;
 			}
 
 			clear_bit(ATH12K_FLAG_CRASH_FLUSH,
 				  &ab->dev_flags);
-			clear_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags);
-			ath12k_core_qmi_firmware_ready(ab);
-			set_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags);
+			ret = ath12k_core_qmi_firmware_ready(ab);
+			if (!ret)
+				set_bit(ATH12K_FLAG_QMI_FW_READY_COMPLETE,
+					&ab->dev_flags);
 
 			break;
+		case ATH12K_QMI_EVENT_HOST_CAP:
+			ret = ath12k_qmi_event_host_cap(qmi);
+			if (ret < 0)
+				set_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags);
+			break;
 		default:
 			ath12k_warn(ab, "invalid event type: %d", event->type);
 			break;
@@ -3354,7 +3808,8 @@
 	memset(&ab->qmi.target_mem, 0, sizeof(struct target_mem_chunk));
 	ab->qmi.ab = ab;
 
-	ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE_DEFAULT;
+	ab->qmi.target_mem_mode = ATH12K_QMI_TARGET_MEM_MODE;
+
 	ret = qmi_handle_init(&ab->qmi.handle, ATH12K_QMI_RESP_LEN_MAX,
 			      &ath12k_qmi_ops, ath12k_qmi_msg_handlers);
 	if (ret < 0) {
@@ -3386,11 +3841,15 @@
 
 void ath12k_qmi_deinit_service(struct ath12k_base *ab)
 {
+	if (!ab->qmi.ab)
+		return;
+
 	qmi_handle_release(&ab->qmi.handle);
 	cancel_work_sync(&ab->qmi.event_work);
 	destroy_workqueue(ab->qmi.event_wq);
 	ath12k_qmi_m3_free(ab);
 	ath12k_qmi_free_target_mem_chunk(ab);
+	ab->qmi.ab = NULL;
 }
 
 void ath12k_qmi_free_resource(struct ath12k_base *ab)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/qmi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/qmi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/qmi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/qmi.h	2025-09-25 17:40:34.163360264 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_QMI_H
@@ -36,11 +36,16 @@
 
 #define QMI_WLANFW_MAX_DATA_SIZE_V01		6144
 #define ATH12K_FIRMWARE_MODE_OFF		4
-#define ATH12K_QMI_TARGET_MEM_MODE_DEFAULT	0
 
 #define ATH12K_BOARD_ID_DEFAULT	0xFF
 
 struct ath12k_base;
+struct ath12k_hw_group;
+
+enum ath12k_target_mem_mode {
+	ATH12K_QMI_TARGET_MEM_MODE_DEFAULT = 0,
+	ATH12K_QMI_TARGET_MEM_MODE_512M,
+};
 
 enum ath12k_qmi_file_type {
 	ATH12K_QMI_FILE_TYPE_BDF_GOLDEN	= 0,
@@ -68,6 +73,7 @@
 	ATH12K_QMI_EVENT_FORCE_FW_ASSERT,
 	ATH12K_QMI_EVENT_POWER_UP,
 	ATH12K_QMI_EVENT_POWER_DOWN,
+	ATH12K_QMI_EVENT_HOST_CAP,
 	ATH12K_QMI_EVENT_MAX,
 };
 
@@ -142,6 +148,10 @@
 	u32 target_mem_mode;
 	bool target_mem_delayed;
 	u8 cal_done;
+
+	/* protected with struct ath12k_qmi::event_lock */
+	bool block_event;
+
 	u8 num_radios;
 	struct target_info target;
 	struct m3_mem_region m3_mem;
@@ -167,7 +177,9 @@
 	BDF_MEM_REGION_TYPE = 0x2,
 	M3_DUMP_REGION_TYPE = 0x3,
 	CALDB_MEM_REGION_TYPE = 0x4,
+	MLO_GLOBAL_MEM_REGION_TYPE = 0x8,
 	PAGEABLE_MEM_REGION_TYPE = 0x9,
+	AFC_MEM_REGION_TYPE = 0xa,
 };
 
 enum qmi_wlanfw_host_build_type {
@@ -594,11 +606,40 @@
 	struct qmi_response_type_v01 resp;
 };
 
+static inline void ath12k_qmi_set_event_block(struct ath12k_qmi *qmi, bool block)
+{
+	lockdep_assert_held(&qmi->event_lock);
+
+	qmi->block_event = block;
+}
+
+static inline bool ath12k_qmi_get_event_block(struct ath12k_qmi *qmi)
+{
+	lockdep_assert_held(&qmi->event_lock);
+
+	return qmi->block_event;
+}
+
+#define QMI_WLFW_INI_REQ_V01 0x002F
+#define WLFW_INI_REQ_MSG_V01_MAX_MSG_LEN 4
+
+struct wlfw_ini_req_msg_v01 {
+	u8 enablefwlog_valid;
+	u8 enablefwlog;
+};
+
+struct wlfw_ini_resp_msg_v01 {
+	struct qmi_response_type_v01 resp;
+};
+
 int ath12k_qmi_firmware_start(struct ath12k_base *ab,
 			      u32 mode);
 void ath12k_qmi_firmware_stop(struct ath12k_base *ab);
 void ath12k_qmi_deinit_service(struct ath12k_base *ab);
 int ath12k_qmi_init_service(struct ath12k_base *ab);
 void ath12k_qmi_free_resource(struct ath12k_base *ab);
+void ath12k_qmi_trigger_host_cap(struct ath12k_base *ab);
+void ath12k_qmi_reset_mlo_mem(struct ath12k_hw_group *ag);
+int ath12k_enable_fwlog(struct ath12k_base *ab);
 
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/reg.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/reg.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/reg.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/reg.c	2025-09-25 17:40:34.163360264 +0200
@@ -1,11 +1,12 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/rtnetlink.h>
 #include "core.h"
 #include "debug.h"
+#include "mac.h"
 
 /* World regdom to be used in case default regd from fw is unavailable */
 #define ATH12K_2GHZ_CH01_11      REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
@@ -50,7 +51,7 @@
 	struct ath12k_wmi_init_country_arg arg;
 	struct ath12k_hw *ah = ath12k_hw_to_ah(hw);
 	struct ath12k *ar = ath12k_ah_to_ar(ah, 0);
-	int ret, i;
+	int ret, i, j;
 
 	ath12k_dbg(ar->ab, ATH12K_DBG_REG,
 		   "Regulatory Notification received for %s\n", wiphy_name(wiphy));
@@ -86,6 +87,9 @@
 	arg.cc_info.alpha2[2] = 0;
 
 	/* Allow fresh updates to wiphy regd */
+	for_each_ar(ah, ar, i)
+		for (j = 0; j < ar->ab->num_radios; j++)
+			ar->ab->regd_change_user_request[j] = true;
 	ah->regd_updated = false;
 
 	/* Send the reg change request to all the radios */
@@ -122,8 +126,10 @@
 		}
 	}
 
-	if (WARN_ON(!num_channels))
-		return -EINVAL;
+	if (!num_channels) {
+		ath12k_warn(ar->ab, "pdev is not supported for this country\n");
+		return -EOPNOTSUPP;
+	}
 
 	arg = kzalloc(struct_size(arg, channel, num_channels), GFP_KERNEL);
 
@@ -204,8 +210,27 @@
 		       sizeof(struct ieee80211_reg_rule));
 }
 
+int ath12k_reg_get_num_chans_in_band(struct ath12k *ar,
+				     struct ieee80211_supported_band *band)
+{
+	int i, count = 0;
+	u32 center_freq;
+
+	for (i = 0; i < band->n_channels; i++) {
+		center_freq = band->channels[i].center_freq;
+		if (center_freq >= KHZ_TO_MHZ(ar->freq_range.start_freq) &&
+		    center_freq <= KHZ_TO_MHZ(ar->freq_range.end_freq))
+			count++;
+	}
+
+	return count;
+}
+
 int ath12k_regd_update(struct ath12k *ar, bool init)
 {
+	struct ath12k_wmi_hal_reg_capabilities_ext_arg *reg_cap;
+	struct ieee80211_supported_band *update_band;
+	u32 phy_id, freq_low, freq_high, supported_bands, band;
 	struct ath12k_hw *ah = ath12k_ar_to_ah(ar);
 	struct ieee80211_hw *hw = ah->hw;
 	struct ieee80211_regdomain *regd, *regd_copy = NULL;
@@ -215,6 +240,44 @@
 
 	ab = ar->ab;
 
+	supported_bands = ar->pdev->cap.supported_bands;
+	if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP)
+		band = NL80211_BAND_2GHZ;
+	else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz)
+		band = NL80211_BAND_5GHZ;
+	else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz)
+		band = NL80211_BAND_6GHZ;
+
+	update_band = &ar->mac.sbands[band];
+	reg_cap = &ab->hal_reg_cap[ar->pdev_idx];
+
+	if (ab->hw_params->single_pdev_only && !ar->supports_6ghz) {
+		phy_id = ar->pdev->cap.band[band].phy_id;
+		reg_cap = &ab->hal_reg_cap[phy_id];
+	}
+
+	/* Possible that due to reg change, current limits for supported
+	* frequency changed. Update that
+	*/
+	if (supported_bands & WMI_HOST_WLAN_2GHZ_CAP) {
+		freq_low = max(reg_cap->low_2ghz_chan, ab->reg_freq_2ghz.start_freq);
+		freq_high = min(reg_cap->high_2ghz_chan, ab->reg_freq_2ghz.end_freq);
+	} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && !ar->supports_6ghz) {
+		freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_5ghz.start_freq);
+		freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_5ghz.end_freq);
+	} else if (supported_bands & WMI_HOST_WLAN_5GHZ_CAP && ar->supports_6ghz) {
+		freq_low = max(reg_cap->low_5ghz_chan, ab->reg_freq_6ghz.start_freq);
+		freq_high = min(reg_cap->high_5ghz_chan, ab->reg_freq_6ghz.end_freq);
+	}
+
+	ath12k_mac_update_freq_range(ar, freq_low, freq_high);
+	ar->num_channels = ath12k_reg_get_num_chans_in_band(ar, update_band);
+
+	ath12k_dbg(ab, ATH12K_DBG_REG, "pdev %u reg updated freq limits %u->%u MHz, no. of channels %u\n",
+		   ar->pdev->pdev_id, freq_low,
+		   freq_high, ar->num_channels);
+
+
 	/* If one of the radios within ah has already updated the regd for
 	 * the wiphy, then avoid setting regd again
 	 */
@@ -238,7 +301,7 @@
 
 	spin_lock_bh(&ab->base_lock);
 
-	if (init) {
+	if (init && !ab->new_regd[pdev_id]) {
 		/* Apply the regd received during init through
 		 * WMI_REG_CHAN_LIST_CC event. In case of failure to
 		 * receive the regd, initialize with a default world
@@ -296,9 +359,12 @@
 	for_each_ar(ah, ar, i) {
 		ab = ar->ab;
 		ret = ath12k_reg_update_chan_list(ar);
-		if (ret)
+		if (ret) {
+			if (ret == -EOPNOTSUPP)
+				continue;
 			goto err;
 	}
+	}
 skip:
 	return 0;
 err:
@@ -365,129 +431,6 @@
 	return flags;
 }
 
-static bool
-ath12k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
-			 struct ieee80211_reg_rule *rule2)
-{
-	u32 start_freq1, end_freq1;
-	u32 start_freq2, end_freq2;
-
-	start_freq1 = rule1->freq_range.start_freq_khz;
-	start_freq2 = rule2->freq_range.start_freq_khz;
-
-	end_freq1 = rule1->freq_range.end_freq_khz;
-	end_freq2 = rule2->freq_range.end_freq_khz;
-
-	if ((start_freq1 >= start_freq2 &&
-	     start_freq1 < end_freq2) ||
-	    (start_freq2 > start_freq1 &&
-	     start_freq2 < end_freq1))
-		return true;
-
-	/* TODO: Should we restrict intersection feasibility
-	 *  based on min bandwidth of the intersected region also,
-	 *  say the intersected rule should have a  min bandwidth
-	 * of 20MHz?
-	 */
-
-	return false;
-}
-
-static void ath12k_reg_intersect_rules(struct ieee80211_reg_rule *rule1,
-				       struct ieee80211_reg_rule *rule2,
-				       struct ieee80211_reg_rule *new_rule)
-{
-	u32 start_freq1, end_freq1;
-	u32 start_freq2, end_freq2;
-	u32 freq_diff, max_bw;
-
-	start_freq1 = rule1->freq_range.start_freq_khz;
-	start_freq2 = rule2->freq_range.start_freq_khz;
-
-	end_freq1 = rule1->freq_range.end_freq_khz;
-	end_freq2 = rule2->freq_range.end_freq_khz;
-
-	new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
-						    start_freq2);
-	new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
-
-	freq_diff = new_rule->freq_range.end_freq_khz -
-			new_rule->freq_range.start_freq_khz;
-	max_bw = min_t(u32, rule1->freq_range.max_bandwidth_khz,
-		       rule2->freq_range.max_bandwidth_khz);
-	new_rule->freq_range.max_bandwidth_khz = min_t(u32, max_bw, freq_diff);
-
-	new_rule->power_rule.max_antenna_gain =
-		min_t(u32, rule1->power_rule.max_antenna_gain,
-		      rule2->power_rule.max_antenna_gain);
-
-	new_rule->power_rule.max_eirp = min_t(u32, rule1->power_rule.max_eirp,
-					      rule2->power_rule.max_eirp);
-
-	/* Use the flags of both the rules */
-	new_rule->flags = rule1->flags | rule2->flags;
-
-	/* To be safe, lts use the max cac timeout of both rules */
-	new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
-				     rule2->dfs_cac_ms);
-}
-
-static struct ieee80211_regdomain *
-ath12k_regd_intersect(struct ieee80211_regdomain *default_regd,
-		      struct ieee80211_regdomain *curr_regd)
-{
-	u8 num_old_regd_rules, num_curr_regd_rules, num_new_regd_rules;
-	struct ieee80211_reg_rule *old_rule, *curr_rule, *new_rule;
-	struct ieee80211_regdomain *new_regd = NULL;
-	u8 i, j, k;
-
-	num_old_regd_rules = default_regd->n_reg_rules;
-	num_curr_regd_rules = curr_regd->n_reg_rules;
-	num_new_regd_rules = 0;
-
-	/* Find the number of intersecting rules to allocate new regd memory */
-	for (i = 0; i < num_old_regd_rules; i++) {
-		old_rule = default_regd->reg_rules + i;
-		for (j = 0; j < num_curr_regd_rules; j++) {
-			curr_rule = curr_regd->reg_rules + j;
-
-			if (ath12k_reg_can_intersect(old_rule, curr_rule))
-				num_new_regd_rules++;
-		}
-	}
-
-	if (!num_new_regd_rules)
-		return NULL;
-
-	new_regd = kzalloc(sizeof(*new_regd) + (num_new_regd_rules *
-			sizeof(struct ieee80211_reg_rule)),
-			GFP_ATOMIC);
-
-	if (!new_regd)
-		return NULL;
-
-	/* We set the new country and dfs region directly and only trim
-	 * the freq, power, antenna gain by intersecting with the
-	 * default regdomain. Also MAX of the dfs cac timeout is selected.
-	 */
-	new_regd->n_reg_rules = num_new_regd_rules;
-	memcpy(new_regd->alpha2, curr_regd->alpha2, sizeof(new_regd->alpha2));
-	new_regd->dfs_region = curr_regd->dfs_region;
-	new_rule = new_regd->reg_rules;
-
-	for (i = 0, k = 0; i < num_old_regd_rules; i++) {
-		old_rule = default_regd->reg_rules + i;
-		for (j = 0; j < num_curr_regd_rules; j++) {
-			curr_rule = curr_regd->reg_rules + j;
-
-			if (ath12k_reg_can_intersect(old_rule, curr_rule))
-				ath12k_reg_intersect_rules(old_rule, curr_rule,
-							   (new_rule + k++));
-		}
-	}
-	return new_regd;
-}
-
 static const char *
 ath12k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
 {
@@ -611,11 +554,21 @@
 	*rule_idx = i;
 }
 
+static void ath12k_copy_reg_rule_freq(struct ath12k_reg_freq *ath12k_reg_freq,
+				 struct ath12k_reg_rule *reg_rule)
+{
+	if (ath12k_reg_freq->start_freq > reg_rule->start_freq)
+		ath12k_reg_freq->start_freq = reg_rule->start_freq;
+
+	if (ath12k_reg_freq->end_freq < reg_rule->end_freq)
+		ath12k_reg_freq->end_freq = reg_rule->end_freq;
+}
+
 struct ieee80211_regdomain *
 ath12k_reg_build_regd(struct ath12k_base *ab,
-		      struct ath12k_reg_info *reg_info, bool intersect)
+		      struct ath12k_reg_info *reg_info)
 {
-	struct ieee80211_regdomain *tmp_regd, *default_regd, *new_regd = NULL;
+	struct ieee80211_regdomain *new_regd = NULL;
 	struct ath12k_reg_rule *reg_rule;
 	u8 i = 0, j = 0, k = 0;
 	u8 num_rules;
@@ -639,21 +592,29 @@
 	if (reg_info->dfs_region == ATH12K_DFS_REG_ETSI)
 		num_rules += 2;
 
-	tmp_regd = kzalloc(sizeof(*tmp_regd) +
+	new_regd = kzalloc(sizeof(*new_regd) +
 			   (num_rules * sizeof(struct ieee80211_reg_rule)),
 			   GFP_ATOMIC);
-	if (!tmp_regd)
+	if (!new_regd)
 		goto ret;
 
-	memcpy(tmp_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
+	memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
 	memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
 	alpha2[2] = '\0';
-	tmp_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
+	new_regd->dfs_region = ath12k_map_fw_dfs_region(reg_info->dfs_region);
 
 	ath12k_dbg(ab, ATH12K_DBG_REG,
 		   "\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
-		   alpha2, ath12k_reg_get_regdom_str(tmp_regd->dfs_region),
+		   alpha2, ath12k_reg_get_regdom_str(new_regd->dfs_region),
 		   reg_info->dfs_region, num_rules);
+
+	/* Reset start and end frequency for each band */
+	ab->reg_freq_5ghz.start_freq = INT_MAX;
+	ab->reg_freq_5ghz.end_freq = 0;
+	ab->reg_freq_2ghz.start_freq = INT_MAX;
+	ab->reg_freq_2ghz.end_freq = 0;
+	ab->reg_freq_6ghz.start_freq = INT_MAX;
+	ab->reg_freq_6ghz.end_freq = 0;
 	/* Update reg_rules[] below. Firmware is expected to
 	 * send these rules in order(2G rules first and then 5G)
 	 */
@@ -664,6 +625,7 @@
 			max_bw = min_t(u16, reg_rule->max_bw,
 				       reg_info->max_bw_2g);
 			flags = 0;
+			ath12k_copy_reg_rule_freq(&ab->reg_freq_2ghz, reg_rule);
 		} else if (reg_info->num_5g_reg_rules &&
 			   (j < reg_info->num_5g_reg_rules)) {
 			reg_rule = reg_info->reg_rules_5g_ptr + j++;
@@ -677,6 +639,9 @@
 			 * per other BW rule flags we pass from here
 			 */
 			flags = NL80211_RRF_AUTO_BW;
+
+			ath12k_copy_reg_rule_freq(&ab->reg_freq_5ghz, reg_rule);
+
 		} else if (reg_info->is_ext_reg_event &&
 			   reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP] &&
 			(k < reg_info->num_6g_reg_rules_ap[WMI_REG_INDOOR_AP])) {
@@ -684,6 +649,7 @@
 			max_bw = min_t(u16, reg_rule->max_bw,
 				       reg_info->max_bw_6g_ap[WMI_REG_INDOOR_AP]);
 			flags = NL80211_RRF_AUTO_BW;
+			ath12k_copy_reg_rule_freq(&ab->reg_freq_6ghz, reg_rule);
 		} else {
 			break;
 		}
@@ -691,7 +657,7 @@
 		flags |= ath12k_map_fw_reg_flags(reg_rule->flags);
 		flags |= ath12k_map_fw_phy_flags(reg_info->phybitmap);
 
-		ath12k_reg_update_rule(tmp_regd->reg_rules + i,
+		ath12k_reg_update_rule(new_regd->reg_rules + i,
 				       reg_rule->start_freq,
 				       reg_rule->end_freq, max_bw,
 				       reg_rule->ant_gain, reg_rule->reg_power,
@@ -706,7 +672,7 @@
 		    reg_info->dfs_region == ATH12K_DFS_REG_ETSI &&
 		    (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
 		    reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
-			ath12k_reg_update_weather_radar_band(ab, tmp_regd,
+			ath12k_reg_update_weather_radar_band(ab, new_regd,
 							     reg_rule, &i,
 							     flags, max_bw);
 			continue;
@@ -716,35 +682,19 @@
 			ath12k_dbg(ab, ATH12K_DBG_REG, "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d)\n",
 				   i + 1, reg_rule->start_freq, reg_rule->end_freq,
 				   max_bw, reg_rule->ant_gain, reg_rule->reg_power,
-				   tmp_regd->reg_rules[i].dfs_cac_ms,
+				   new_regd->reg_rules[i].dfs_cac_ms,
 				   flags, reg_rule->psd_flag, reg_rule->psd_eirp);
 		} else {
 			ath12k_dbg(ab, ATH12K_DBG_REG,
 				   "\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
 				   i + 1, reg_rule->start_freq, reg_rule->end_freq,
 				   max_bw, reg_rule->ant_gain, reg_rule->reg_power,
-				   tmp_regd->reg_rules[i].dfs_cac_ms,
+				   new_regd->reg_rules[i].dfs_cac_ms,
 				   flags);
 		}
 	}
 
-	tmp_regd->n_reg_rules = i;
-
-	if (intersect) {
-		default_regd = ab->default_regd[reg_info->phy_id];
-
-		/* Get a new regd by intersecting the received regd with
-		 * our default regd.
-		 */
-		new_regd = ath12k_regd_intersect(default_regd, tmp_regd);
-		kfree(tmp_regd);
-		if (!new_regd) {
-			ath12k_warn(ab, "Unable to create intersected regdomain\n");
-			goto ret;
-		}
-	} else {
-		new_regd = tmp_regd;
-	}
+	new_regd->n_reg_rules = i;
 
 ret:
 	return new_regd;
@@ -777,8 +727,14 @@
 {
 	int i;
 
+	if (ab->regd_freed)
+		return;
+
 	for (i = 0; i < ab->hw_params->max_radios; i++) {
 		kfree(ab->default_regd[i]);
 		kfree(ab->new_regd[i]);
+		ab->default_regd[i] = NULL;
+		ab->new_regd[i] = NULL;
+		ab->regd_freed = true;
 	}
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/reg.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/reg.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/reg.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/reg.h	2025-09-25 17:40:34.163360264 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_REG_H
@@ -13,6 +13,9 @@
 struct ath12k_base;
 struct ath12k;
 
+#define ATH12K_2GHZ_MAX_FREQUENCY	2495
+#define ATH12K_5GHZ_MAX_FREQUENCY	5920
+
 /* DFS regdomains supported by Firmware */
 enum ath12k_dfs_region {
 	ATH12K_DFS_REG_UNSET,
@@ -93,9 +96,10 @@
 void ath12k_reg_free(struct ath12k_base *ab);
 void ath12k_regd_update_work(struct work_struct *work);
 struct ieee80211_regdomain *ath12k_reg_build_regd(struct ath12k_base *ab,
-						  struct ath12k_reg_info *reg_info,
-						  bool intersect);
+						  struct ath12k_reg_info *reg_info);
 int ath12k_regd_update(struct ath12k *ar, bool init);
 int ath12k_reg_update_chan_list(struct ath12k *ar);
+int ath12k_reg_get_num_chans_in_band(struct ath12k *ar,
+				     struct ieee80211_supported_band *band);
 
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/rx_desc.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/rx_desc.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/rx_desc.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/rx_desc.h	2025-07-01 14:10:42.748046910 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #ifndef ATH12K_RX_DESC_H
 #define ATH12K_RX_DESC_H
@@ -637,6 +637,8 @@
 	RX_MSDU_START_PKT_TYPE_11N,
 	RX_MSDU_START_PKT_TYPE_11AC,
 	RX_MSDU_START_PKT_TYPE_11AX,
+	RX_MSDU_START_PKT_TYPE_11BA,
+	RX_MSDU_START_PKT_TYPE_11BE,
 };
 
 enum rx_msdu_start_sgi {
@@ -1539,12 +1541,4 @@
 #define MAX_MU_GROUP_SHOW 16
 #define MAX_MU_GROUP_LENGTH (6 * MAX_MU_GROUP_SHOW)
 
-#define HAL_RX_RU_ALLOC_TYPE_MAX 6
-#define RU_26  1
-#define RU_52  2
-#define RU_106 4
-#define RU_242 9
-#define RU_484 18
-#define RU_996 37
-
 #endif /* ATH12K_RX_DESC_H */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/wmi.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/wmi.c
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/wmi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/wmi.c	2025-09-29 14:23:07.613732450 +0200
@@ -1,7 +1,7 @@
 // SPDX-License-Identifier: BSD-3-Clause-Clear
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 #include <linux/skbuff.h>
 #include <linux/ctype.h>
@@ -15,16 +15,23 @@
 #include <linux/time.h>
 #include <linux/of.h>
 #include "core.h"
+#include "debugfs.h"
 #include "debug.h"
 #include "mac.h"
 #include "hw.h"
 #include "peer.h"
 #include "p2p.h"
+#include "testmode.h"
 
 struct ath12k_wmi_svc_ready_parse {
 	bool wmi_svc_bitmap_done;
 };
 
+struct wmi_tlv_fw_stats_parse {
+	const struct wmi_stats_event *ev;
+	struct ath12k_fw_stats *stats;
+};
+
 struct ath12k_wmi_dma_ring_caps_parse {
 	struct ath12k_wmi_dma_ring_caps_params *dma_ring_caps;
 	u32 n_dma_ring_caps;
@@ -173,7 +180,7 @@
 		.min_len = sizeof(struct wmi_p2p_noa_event) },
 };
 
-static __le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len)
 {
 	return le32_encode_bits(cmd, WMI_TLV_TAG) |
 		le32_encode_bits(len, WMI_TLV_LEN);
@@ -514,15 +521,19 @@
 	 * band to band for a single radio, need to see how this should be
 	 * handled.
 	 */
-	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_2g);
 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_2g);
-	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+	} else if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
 		pdev_cap->vht_cap = le32_to_cpu(mac_caps->vht_cap_info_5g);
 		pdev_cap->vht_mcs = le32_to_cpu(mac_caps->vht_supp_mcs_5g);
 		pdev_cap->he_mcs = le32_to_cpu(mac_caps->he_supp_mcs_5g);
 		pdev_cap->tx_chain_mask = le32_to_cpu(mac_caps->tx_chain_mask_5g);
 		pdev_cap->rx_chain_mask = le32_to_cpu(mac_caps->rx_chain_mask_5g);
+		pdev_cap->nss_ratio_enabled =
+			WMI_NSS_RATIO_EN_DIS_GET(mac_caps->nss_ratio);
+		pdev_cap->nss_ratio_info =
+			WMI_NSS_RATIO_INFO_GET(mac_caps->nss_ratio);
 	} else {
 		return -EINVAL;
 	}
@@ -540,7 +551,7 @@
 	pdev_cap->rx_chain_mask_shift =
 			find_first_bit((unsigned long *)&pdev_cap->rx_chain_mask, 32);
 
-	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2G_CAP) {
+	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_2GHZ_CAP) {
 		cap_band = &pdev_cap->band[NL80211_BAND_2GHZ];
 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
 		cap_band->max_bw_supported = le32_to_cpu(mac_caps->max_bw_supported_2g);
@@ -560,7 +571,7 @@
 				le32_to_cpu(mac_caps->he_ppet2g.ppet16_ppet8_ru3_ru0[i]);
 	}
 
-	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5G_CAP) {
+	if (le32_to_cpu(mac_caps->supported_bands) & WMI_HOST_WLAN_5GHZ_CAP) {
 		cap_band = &pdev_cap->band[NL80211_BAND_5GHZ];
 		cap_band->phy_id = le32_to_cpu(mac_caps->phy_id);
 		cap_band->max_bw_supported =
@@ -814,6 +825,39 @@
 	return ret;
 }
 
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+				      u32 vdev_id, u32 pdev_id)
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct wmi_request_stats_cmd *cmd;
+	struct sk_buff *skb;
+	int ret;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_request_stats_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_REQUEST_STATS_CMD,
+						 sizeof(*cmd));
+
+	cmd->stats_id = cpu_to_le32(stats_id);
+	cmd->vdev_id = cpu_to_le32(vdev_id);
+	cmd->pdev_id = cpu_to_le32(pdev_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_STATS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to send WMI_REQUEST_STATS cmd\n");
+		dev_kfree_skb(skb);
+	}
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
+		   "WMI request stats 0x%x vdev id %d pdev id %d\n",
+		   stats_id, vdev_id, pdev_id);
+
+	return ret;
+}
+
 int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr,
 			   struct ath12k_wmi_vdev_create_arg *args)
 {
@@ -821,6 +865,8 @@
 	struct wmi_vdev_create_cmd *cmd;
 	struct sk_buff *skb;
 	struct ath12k_wmi_vdev_txrx_streams_params *txrx_streams;
+	bool is_ml_vdev = is_valid_ether_addr(args->mld_addr);
+	struct wmi_vdev_create_mlo_params *ml_params;
 	struct wmi_tlv *tlv;
 	int ret, len;
 	void *ptr;
@@ -830,7 +876,8 @@
 	 * both the bands.
 	 */
 	len = sizeof(*cmd) + TLV_HDR_SIZE +
-		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams));
+		(WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams)) +
+		(is_ml_vdev ? TLV_HDR_SIZE + sizeof(*ml_params) : 0);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -879,6 +926,21 @@
 	txrx_streams->supported_rx_streams =
 				cpu_to_le32(args->chains[NL80211_BAND_5GHZ].rx);
 
+	ptr += WMI_NUM_SUPPORTED_BAND_MAX * sizeof(*txrx_streams);
+
+	if (is_ml_vdev) {
+		tlv = ptr;
+		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+						 sizeof(*ml_params));
+		ptr += TLV_HDR_SIZE;
+		ml_params = ptr;
+
+		ml_params->tlv_header =
+			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_CREATE_PARAMS,
+					       sizeof(*ml_params));
+		ether_addr_copy(ml_params->mld_macaddr.addr, args->mld_addr);
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 		   "WMI vdev create: id %d type %d subtype %d macaddr %pM pdevid %d\n",
 		   args->if_id, args->type, args->subtype,
@@ -980,14 +1042,31 @@
 static void ath12k_wmi_put_wmi_channel(struct ath12k_wmi_channel_params *chan,
 				       struct wmi_vdev_start_req_arg *arg)
 {
+	u32 center_freq1 = arg->band_center_freq1;
+
 	memset(chan, 0, sizeof(*chan));
 
 	chan->mhz = cpu_to_le32(arg->freq);
 	chan->band_center_freq1 = cpu_to_le32(arg->band_center_freq1);
-	if (arg->mode == MODE_11AC_VHT80_80)
-		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq2);
+	if (arg->mode == MODE_11BE_EHT320) {
+		if (arg->freq > arg->band_center_freq1)
+			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 80);
+		else
+			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 80);
+
+		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq1);
+
+	} else if (arg->mode == MODE_11BE_EHT160 ||
+		   arg->mode == MODE_11AX_HE160) {
+		if (arg->freq > arg->band_center_freq1)
+			chan->band_center_freq1 = cpu_to_le32(center_freq1 + 40);
 	else
+			chan->band_center_freq1 = cpu_to_le32(center_freq1 - 40);
+
+		chan->band_center_freq2 = cpu_to_le32(arg->band_center_freq1);
+	} else {
 		chan->band_center_freq2 = 0;
+	}
 
 	chan->info |= le32_encode_bits(arg->mode, WMI_CHAN_INFO_MODE);
 	if (arg->passive)
@@ -1020,19 +1099,27 @@
 int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg,
 			  bool restart)
 {
+	struct wmi_vdev_start_mlo_params *ml_params;
+	struct wmi_partner_link_info *partner_info;
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_vdev_start_request_cmd *cmd;
 	struct sk_buff *skb;
 	struct ath12k_wmi_channel_params *chan;
 	struct wmi_tlv *tlv;
 	void *ptr;
-	int ret, len;
+	int ret, len, i, ml_arg_size = 0;
 
 	if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
 		return -EINVAL;
 
 	len = sizeof(*cmd) + sizeof(*chan) + TLV_HDR_SIZE;
 
+	if (!restart && arg->ml.enabled) {
+		ml_arg_size = TLV_HDR_SIZE + sizeof(*ml_params) +
+			      TLV_HDR_SIZE + (arg->ml.num_partner_links *
+					      sizeof(*partner_info));
+		len += ml_arg_size;
+	}
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
 		return -ENOMEM;
@@ -1085,6 +1172,61 @@
 
 	ptr += sizeof(*tlv);
 
+	if (ml_arg_size) {
+		tlv = ptr;
+		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+						 sizeof(*ml_params));
+		ptr += TLV_HDR_SIZE;
+
+		ml_params = ptr;
+
+		ml_params->tlv_header =
+			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_VDEV_START_PARAMS,
+					       sizeof(*ml_params));
+
+		ml_params->flags = le32_encode_bits(arg->ml.enabled,
+						    ATH12K_WMI_FLAG_MLO_ENABLED) |
+				   le32_encode_bits(arg->ml.assoc_link,
+						    ATH12K_WMI_FLAG_MLO_ASSOC_LINK) |
+				   le32_encode_bits(arg->ml.mcast_link,
+						    ATH12K_WMI_FLAG_MLO_MCAST_VDEV) |
+				   le32_encode_bits(arg->ml.link_add,
+						    ATH12K_WMI_FLAG_MLO_LINK_ADD);
+
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %d start ml flags 0x%x\n",
+			   arg->vdev_id, ml_params->flags);
+
+		ptr += sizeof(*ml_params);
+
+		tlv = ptr;
+		tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+						 arg->ml.num_partner_links *
+						 sizeof(*partner_info));
+		ptr += TLV_HDR_SIZE;
+
+		partner_info = ptr;
+
+		for (i = 0; i < arg->ml.num_partner_links; i++) {
+			partner_info->tlv_header =
+				ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+						       sizeof(*partner_info));
+			partner_info->vdev_id =
+				cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+			partner_info->hw_link_id =
+				cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+			ether_addr_copy(partner_info->vdev_addr.addr,
+					arg->ml.partner_info[i].addr);
+
+			ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "partner vdev %d hw_link_id %d macaddr%pM\n",
+				   partner_info->vdev_id, partner_info->hw_link_id,
+				   partner_info->vdev_addr.addr);
+
+			partner_info++;
+		}
+
+		ptr = partner_info;
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "vdev %s id 0x%x freq 0x%x mode 0x%x\n",
 		   restart ? "restart" : "start", arg->vdev_id,
 		   arg->freq, arg->mode);
@@ -1149,9 +1291,14 @@
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
 	struct wmi_peer_create_cmd *cmd;
 	struct sk_buff *skb;
-	int ret;
+	int ret, len;
+	struct wmi_peer_create_mlo_params *ml_param;
+	void *ptr;
+	struct wmi_tlv *tlv;
 
-	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, sizeof(*cmd));
+	len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(*ml_param);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
 		return -ENOMEM;
 
@@ -1163,9 +1310,23 @@
 	cmd->peer_type = cpu_to_le32(arg->peer_type);
 	cmd->vdev_id = cpu_to_le32(arg->vdev_id);
 
+	ptr = skb->data + sizeof(*cmd);
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT,
+					 sizeof(*ml_param));
+	ptr += TLV_HDR_SIZE;
+	ml_param = ptr;
+	ml_param->tlv_header =
+			ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_CREATE_PARAMS,
+					       sizeof(*ml_param));
+	if (arg->ml_enabled)
+		ml_param->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+	ptr += sizeof(*ml_param);
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
-		   "WMI peer create vdev_id %d peer_addr %pM\n",
-		   arg->vdev_id, arg->peer_addr);
+		   "WMI peer create vdev_id %d peer_addr %pM ml_flags 0x%x\n",
+		   arg->vdev_id, arg->peer_addr, ml_param->flags);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_CREATE_CMDID);
 	if (ret) {
@@ -1788,14 +1949,19 @@
 	return ret;
 }
 
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
 			struct ieee80211_mutable_offsets *offs,
 			struct sk_buff *bcn,
 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args)
 {
+	struct ath12k *ar = arvif->ar;
 	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct ath12k_base *ab = ar->ab;
 	struct wmi_bcn_tmpl_cmd *cmd;
 	struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info;
+	struct ath12k_vif *ahvif = arvif->ahvif;
+	struct ieee80211_bss_conf *conf;
+	u32 vdev_id = arvif->vdev_id;
 	struct wmi_tlv *tlv;
 	struct sk_buff *skb;
 	u32 ema_params = 0;
@@ -1803,6 +1969,14 @@
 	int ret, len;
 	size_t aligned_len = roundup(bcn->len, 4);
 
+	conf = ath12k_mac_get_link_bss_conf(arvif);
+	if (!conf) {
+		ath12k_warn(ab,
+			    "unable to access bss link conf in beacon template command for vif %pM link %u\n",
+			    ahvif->vif->addr, arvif->link_id);
+		return -EINVAL;
+	}
+
 	len = sizeof(*cmd) + sizeof(*bcn_prb_info) + TLV_HDR_SIZE + aligned_len;
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
@@ -1814,8 +1988,16 @@
 						 sizeof(*cmd));
 	cmd->vdev_id = cpu_to_le32(vdev_id);
 	cmd->tim_ie_offset = cpu_to_le32(offs->tim_offset);
-	cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]);
-	cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]);
+
+	if (conf->csa_active) {
+		cmd->csa_switch_count_offset =
+				cpu_to_le32(offs->cntdwn_counter_offs[0]);
+		cmd->ext_csa_switch_count_offset =
+				cpu_to_le32(offs->cntdwn_counter_offs[1]);
+		cmd->csa_event_bitmap = cpu_to_le32(0xFFFFFFFF);
+		arvif->current_cntdown_counter = bcn->data[offs->cntdwn_counter_offs[0]];
+	}
+
 	cmd->buf_len = cpu_to_le32(bcn->len);
 	cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off);
 	if (ema_args) {
@@ -1827,6 +2009,9 @@
 			u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST);
 		cmd->ema_params = cpu_to_le32(ema_params);
 	}
+	cmd->feature_enable_bitmap =
+		cpu_to_le32(u32_encode_bits(arvif->beacon_prot,
+					    WMI_BEACON_PROTECTION_EN_BIT));
 
 	ptr = skb->data + sizeof(*cmd);
 
@@ -1845,7 +2030,7 @@
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_BCN_TMPL_CMDID);
 	if (ret) {
-		ath12k_warn(ar->ab, "failed to send WMI_BCN_TMPL_CMDID\n");
+		ath12k_warn(ab, "failed to send WMI_BCN_TMPL_CMDID\n");
 		dev_kfree_skb(skb);
 	}
 
@@ -1993,6 +2178,65 @@
 		cmd->peer_flags &= cpu_to_le32(~WMI_PEER_HT);
 }
 
+#define EMLSR_PAD_DELAY_MAX	5
+#define EMLSR_TRANS_DELAY_MAX	6
+#define EML_TRANS_TIMEOUT_MAX	11
+#define TU_TO_USEC(t)		((t) << 10)  /* (t) x 1024 */
+
+static u32 ath12k_wmi_get_emlsr_pad_delay_us(u16 eml_cap)
+{
+	/* IEEE Std 802.11be-2024 Table 9-417i—Encoding of the EMLSR
+	 * Padding Delay subfield.
+	 */
+	u32 pad_delay = u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_PADDING_DELAY);
+	static const u32 pad_delay_us[EMLSR_PAD_DELAY_MAX] = {0, 32, 64, 128, 256};
+
+	if (pad_delay >= EMLSR_PAD_DELAY_MAX)
+		return 0;
+
+	return pad_delay_us[pad_delay];
+}
+
+static u32 ath12k_wmi_get_emlsr_trans_delay_us(u16 eml_cap)
+{
+	/* IEEE Std 802.11be-2024 Table 9-417j—Encoding of the EMLSR
+	 * Transition Delay subfield.
+	 */
+	u32 trans_delay = u16_get_bits(eml_cap,
+				       IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY);
+	static const u32 trans_delay_us[EMLSR_TRANS_DELAY_MAX] = {
+		0, 16, 32, 64, 128, 256
+	};
+
+	if (trans_delay >= EMLSR_TRANS_DELAY_MAX)
+		return 0;
+
+	return trans_delay_us[trans_delay];
+}
+
+static u32 ath12k_wmi_get_emlsr_trans_timeout_us(u16 eml_cap)
+{
+	/* IEEE Std 802.11be-2024 Table 9-417m—Encoding of the
+	 * Transition Timeout subfield.
+	 */
+	u8 timeout = u16_get_bits(eml_cap, IEEE80211_EML_CAP_TRANSITION_TIMEOUT);
+	static const u32 trans_timeout_us[EML_TRANS_TIMEOUT_MAX] = {
+		0, 128, 256, 512,
+		TU_TO_USEC(1),
+		TU_TO_USEC((1U << 1)),
+		TU_TO_USEC((1U << 2)),
+		TU_TO_USEC((1U << 3)),
+		TU_TO_USEC((1U << 4)),
+		TU_TO_USEC((1U << 5)),
+		TU_TO_USEC((1U << 6)),
+	};
+
+	if (timeout >= EML_TRANS_TIMEOUT_MAX)
+		return 0;
+
+	return trans_timeout_us[timeout];
+}
+
 int ath12k_wmi_send_peer_assoc_cmd(struct ath12k *ar,
 				   struct ath12k_wmi_peer_assoc_arg *arg)
 {
@@ -2001,12 +2245,16 @@
 	struct ath12k_wmi_vht_rate_set_params *mcs;
 	struct ath12k_wmi_he_rate_set_params *he_mcs;
 	struct ath12k_wmi_eht_rate_set_params *eht_mcs;
+	struct wmi_peer_assoc_mlo_params *ml_params;
+	struct wmi_peer_assoc_mlo_partner_info_params *partner_info;
 	struct sk_buff *skb;
 	struct wmi_tlv *tlv;
 	void *ptr;
-	u32 peer_legacy_rates_align;
+	u32 peer_legacy_rates_align, eml_delay, eml_trans_timeout;
 	u32 peer_ht_rates_align;
 	int i, ret, len;
+	u16 eml_cap;
+	__le32 v;
 
 	peer_legacy_rates_align = roundup(arg->peer_legacy_rates.num_rates,
 					  sizeof(u32));
@@ -2018,8 +2266,13 @@
 	      TLV_HDR_SIZE + (peer_ht_rates_align * sizeof(u8)) +
 	      sizeof(*mcs) + TLV_HDR_SIZE +
 	      (sizeof(*he_mcs) * arg->peer_he_mcs_count) +
-	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count) +
-	      TLV_HDR_SIZE + TLV_HDR_SIZE;
+	      TLV_HDR_SIZE + (sizeof(*eht_mcs) * arg->peer_eht_mcs_count);
+
+	if (arg->ml.enabled)
+		len += TLV_HDR_SIZE + sizeof(*ml_params) +
+		       TLV_HDR_SIZE + (arg->ml.num_partner_links * sizeof(*partner_info));
+	else
+		len += (2 * TLV_HDR_SIZE);
 
 	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
 	if (!skb)
@@ -2138,17 +2391,61 @@
 		he_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
 							    sizeof(*he_mcs));
 
-		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
-		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+		he_mcs->rx_mcs_set = cpu_to_le32(arg->peer_he_tx_mcs_set[i]);
+		he_mcs->tx_mcs_set = cpu_to_le32(arg->peer_he_rx_mcs_set[i]);
 		ptr += sizeof(*he_mcs);
 	}
 
-	/* MLO header tag with 0 length */
-	len = 0;
 	tlv = ptr;
+	len = arg->ml.enabled ? sizeof(*ml_params) : 0;
 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
 	ptr += TLV_HDR_SIZE;
+	if (!len)
+		goto skip_ml_params;
+
+	ml_params = ptr;
+	ml_params->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_PEER_ASSOC_PARAMS,
+						       len);
+	ml_params->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+	if (arg->ml.assoc_link)
+		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+	if (arg->ml.primary_umac)
+		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+	if (arg->ml.logical_link_idx_valid)
+		ml_params->flags |=
+			cpu_to_le32(ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID);
+
+	if (arg->ml.peer_id_valid)
+		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_PEER_ID_VALID);
+
+	ether_addr_copy(ml_params->mld_addr.addr, arg->ml.mld_addr);
+	ml_params->logical_link_idx = cpu_to_le32(arg->ml.logical_link_idx);
+	ml_params->ml_peer_id = cpu_to_le32(arg->ml.ml_peer_id);
+	ml_params->ieee_link_id = cpu_to_le32(arg->ml.ieee_link_id);
+
+	eml_cap = arg->ml.eml_cap;
+	if (u16_get_bits(eml_cap, IEEE80211_EML_CAP_EMLSR_SUPP)) {
+		ml_params->flags |= cpu_to_le32(ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT);
+		/* Padding delay */
+		eml_delay = ath12k_wmi_get_emlsr_pad_delay_us(eml_cap);
+		ml_params->emlsr_padding_delay_us = cpu_to_le32(eml_delay);
+		/* Transition delay */
+		eml_delay = ath12k_wmi_get_emlsr_trans_delay_us(eml_cap);
+		ml_params->emlsr_trans_delay_us = cpu_to_le32(eml_delay);
+		/* Transition timeout */
+		eml_trans_timeout = ath12k_wmi_get_emlsr_trans_timeout_us(eml_cap);
+		ml_params->emlsr_trans_timeout_us = cpu_to_le32(eml_trans_timeout);
+		ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi peer (%pM) emlsr padding delay %u, trans delay %u trans timeout %u",
+			   arg->peer_mac, ml_params->emlsr_padding_delay_us,
+			   ml_params->emlsr_trans_delay_us,
+			   ml_params->emlsr_trans_timeout_us);
+	}
+	ptr += sizeof(*ml_params);
 
+skip_ml_params:
 	/* Loop through the EHT rate set */
 	len = arg->peer_eht_mcs_count * sizeof(*eht_mcs);
 	tlv = ptr;
@@ -2157,7 +2454,7 @@
 
 	for (i = 0; i < arg->peer_eht_mcs_count; i++) {
 		eht_mcs = ptr;
-		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HE_RATE_SET,
+		eht_mcs->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_EHT_RATE_SET,
 							     sizeof(*eht_mcs));
 
 		eht_mcs->rx_mcs_set = cpu_to_le32(arg->peer_eht_rx_mcs_set[i]);
@@ -2165,14 +2462,51 @@
 		ptr += sizeof(*eht_mcs);
 	}
 
-	/* ML partner links tag with 0 length */
-	len = 0;
+	/* Update MCS15 capability */
+	if (!arg->enable_mcs15)
+		cmd->peer_eht_ops |= cpu_to_le32(IEEE80211_EHT_OPER_MCS15_DISABLE);
+
 	tlv = ptr;
+	len = arg->ml.enabled ? arg->ml.num_partner_links * sizeof(*partner_info) : 0;
+	/* fill ML Partner links */
 	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, len);
 	ptr += TLV_HDR_SIZE;
 
+	if (len == 0)
+		goto send;
+
+	for (i = 0; i < arg->ml.num_partner_links; i++) {
+		u32 cmd = WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC;
+
+		partner_info = ptr;
+		partner_info->tlv_header = ath12k_wmi_tlv_cmd_hdr(cmd,
+								  sizeof(*partner_info));
+		partner_info->vdev_id = cpu_to_le32(arg->ml.partner_info[i].vdev_id);
+		partner_info->hw_link_id =
+			cpu_to_le32(arg->ml.partner_info[i].hw_link_id);
+		partner_info->flags = cpu_to_le32(ATH12K_WMI_FLAG_MLO_ENABLED);
+
+		if (arg->ml.partner_info[i].assoc_link)
+			partner_info->flags |=
+				cpu_to_le32(ATH12K_WMI_FLAG_MLO_ASSOC_LINK);
+
+		if (arg->ml.partner_info[i].primary_umac)
+			partner_info->flags |=
+				cpu_to_le32(ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC);
+
+		if (arg->ml.partner_info[i].logical_link_idx_valid) {
+			v = cpu_to_le32(ATH12K_WMI_FLAG_MLO_LINK_ID_VALID);
+			partner_info->flags |= v;
+		}
+
+		partner_info->logical_link_idx =
+			cpu_to_le32(arg->ml.partner_info[i].logical_link_idx);
+		ptr += sizeof(*partner_info);
+	}
+
+send:
 	ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
-		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x\n",
+		   "wmi peer assoc vdev id %d assoc id %d peer mac %pM peer_flags %x rate_caps %x peer_caps %x listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d peer_mpdu_density %d vht_caps %x he cap_info %x he ops %x he cap_info_ext %x he phy %x %x %x peer_bw_rxnss_override %x peer_flags_ext %x eht mac_cap %x %x eht phy_cap %x %x %x peer_eht_ops %x\n",
 		   cmd->vdev_id, cmd->peer_associd, arg->peer_mac,
 		   cmd->peer_flags, cmd->peer_rate_caps, cmd->peer_caps,
 		   cmd->peer_listen_intval, cmd->peer_ht_caps,
@@ -2185,7 +2519,7 @@
 		   cmd->peer_bw_rxnss_override, cmd->peer_flags_ext,
 		   cmd->peer_eht_cap_mac[0], cmd->peer_eht_cap_mac[1],
 		   cmd->peer_eht_cap_phy[0], cmd->peer_eht_cap_phy[1],
-		   cmd->peer_eht_cap_phy[2]);
+		   cmd->peer_eht_cap_phy[2], cmd->peer_eht_ops);
 
 	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_PEER_ASSOC_CMDID);
 	if (ret) {
@@ -2203,15 +2537,15 @@
 	/* setup commonly used values */
 	arg->scan_req_id = 1;
 	arg->scan_priority = WMI_SCAN_PRIORITY_LOW;
-	arg->dwell_time_active = 50;
-	arg->dwell_time_active_2g = 0;
+	arg->dwell_time_active = 175;
+	arg->dwell_time_active_2g = 175;
 	arg->dwell_time_passive = 150;
-	arg->dwell_time_active_6g = 40;
+	arg->dwell_time_active_6g = 175;
 	arg->dwell_time_passive_6g = 30;
 	arg->min_rest_time = 50;
 	arg->max_rest_time = 500;
 	arg->repeat_probe_time = 0;
-	arg->probe_spacing_time = 0;
+	arg->probe_spacing_time = 100;
 	arg->idle_time = 0;
 	arg->max_scan_time = 20000;
 	arg->probe_delay = 5;
@@ -2221,6 +2555,7 @@
 				  WMI_SCAN_EVENT_FOREIGN_CHAN |
 				  WMI_SCAN_EVENT_DEQUEUED;
 	arg->scan_f_chan_stat_evnt = 1;
+	arg->scan_f_force_active_dfs_chn = 1;
 	arg->num_bssid = 1;
 
 	/* fill bssid_list[0] with 0xff, otherwise bssid and RA will be
@@ -2627,6 +2962,8 @@
 						  WMI_CHAN_REG_INFO1_REG_CLS);
 			*reg2 |= le32_encode_bits(channel_arg->antennamax,
 						  WMI_CHAN_REG_INFO2_ANT_MAX);
+			*reg2 |= le32_encode_bits(channel_arg->maxregpower,
+						  WMI_CHAN_REG_INFO2_MAX_TX_PWR);
 
 			ath12k_dbg(ar->ab, ATH12K_DBG_WMI,
 				   "WMI chan scan list chan[%d] = %u, chan_info->info %8x\n",
@@ -3417,15 +3754,15 @@
 		arg[i].pdev_id = pdev->pdev_id;
 
 		switch (pdev->cap.supported_bands) {
-		case WMI_HOST_WLAN_2G_5G_CAP:
+		case WMI_HOST_WLAN_2GHZ_5GHZ_CAP:
 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
 			break;
-		case WMI_HOST_WLAN_2G_CAP:
+		case WMI_HOST_WLAN_2GHZ_CAP:
 			arg[i].start_freq = hal_reg_cap->low_2ghz_chan;
 			arg[i].end_freq = hal_reg_cap->high_2ghz_chan;
 			break;
-		case WMI_HOST_WLAN_5G_CAP:
+		case WMI_HOST_WLAN_5GHZ_CAP:
 			arg[i].start_freq = hal_reg_cap->low_5ghz_chan;
 			arg[i].end_freq = hal_reg_cap->high_5ghz_chan;
 			break;
@@ -4372,6 +4709,7 @@
 	return 0;
 
 err:
+	kfree(svc_rdy_ext.mac_phy_caps);
 	ath12k_wmi_free_dbring_caps(ab);
 	return ret;
 }
@@ -4470,7 +4808,7 @@
 		bands = pdev->cap.supported_bands;
 	}
 
-	if (bands & WMI_HOST_WLAN_2G_CAP) {
+	if (bands & WMI_HOST_WLAN_2GHZ_CAP) {
 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_2GHZ,
 					  caps->eht_cap_mac_info_2ghz,
 					  caps->eht_cap_phy_info_2ghz,
@@ -4479,7 +4817,7 @@
 					  caps->eht_cap_info_internal);
 	}
 
-	if (bands & WMI_HOST_WLAN_5G_CAP) {
+	if (bands & WMI_HOST_WLAN_5GHZ_CAP) {
 		ath12k_wmi_eht_caps_parse(pdev, NL80211_BAND_5GHZ,
 					  caps->eht_cap_mac_info_5ghz,
 					  caps->eht_cap_phy_info_5ghz,
@@ -4495,6 +4833,9 @@
 					  caps->eht_cap_info_internal);
 	}
 
+	pdev->cap.eml_cap = le32_to_cpu(caps->eml_capability);
+	pdev->cap.mld_cap = le32_to_cpu(caps->mld_capability);
+
 	return 0;
 }
 
@@ -4690,7 +5031,7 @@
 	for (count = 0; count < num_reg_rules; count++) {
 		start_freq = le32_get_bits(rule[count].freq_info, REG_RULE_START_FREQ);
 
-		if (start_freq >= ATH12K_MIN_6G_FREQ)
+		if (start_freq >= ATH12K_MIN_6GHZ_FREQ)
 			num_invalid_5ghz_rules++;
 	}
 
@@ -4760,9 +5101,9 @@
 	for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
 		num_6g_reg_rules_ap[i] = reg_info->num_6g_reg_rules_ap[i];
 
-		if (num_6g_reg_rules_ap[i] > MAX_6G_REG_RULES) {
+		if (num_6g_reg_rules_ap[i] > MAX_6GHZ_REG_RULES) {
 			ath12k_warn(ab, "Num 6G reg rules for AP mode(%d) exceeds max limit (num_6g_reg_rules_ap: %d, max_rules: %d)\n",
-				    i, num_6g_reg_rules_ap[i], MAX_6G_REG_RULES);
+				    i, num_6g_reg_rules_ap[i], MAX_6GHZ_REG_RULES);
 			kfree(tb);
 			return -EINVAL;
 		}
@@ -4783,9 +5124,9 @@
 				reg_info->num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
 		total_reg_rules += num_6g_reg_rules_cl[WMI_REG_VLP_AP][i];
 
-		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6G_REG_RULES ||
-		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6G_REG_RULES ||
-		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6G_REG_RULES) {
+		if (num_6g_reg_rules_cl[WMI_REG_INDOOR_AP][i] > MAX_6GHZ_REG_RULES ||
+		    num_6g_reg_rules_cl[WMI_REG_STD_POWER_AP][i] > MAX_6GHZ_REG_RULES ||
+		    num_6g_reg_rules_cl[WMI_REG_VLP_AP][i] >  MAX_6GHZ_REG_RULES) {
 			ath12k_warn(ab, "Num 6g client reg rules exceeds max limit, for client(type: %d)\n",
 				    i);
 			kfree(tb);
@@ -5221,6 +5562,9 @@
 	if ((!(info->flags & IEEE80211_TX_CTL_NO_ACK)) && !status)
 		info->flags |= IEEE80211_TX_STAT_ACK;
 
+	if ((info->flags & IEEE80211_TX_CTL_NO_ACK) && !status)
+		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+
 	ieee80211_tx_status_irqsafe(ath12k_ar_to_hw(ar), msdu);
 
 	num_mgmt = atomic_dec_if_positive(&ar->num_pending_mgmt_tx);
@@ -5472,6 +5816,8 @@
 	}
 
 	arg->mac_addr = ev->peer_macaddr.addr;
+	arg->reason = ev->reason;
+	arg->rssi = ev->rssi;
 
 	kfree(tb);
 	return 0;
@@ -5669,10 +6015,11 @@
 
 static int
 ath12k_pull_pdev_temp_ev(struct ath12k_base *ab, struct sk_buff *skb,
-			 const struct wmi_pdev_temperature_event *ev)
+			 struct wmi_pdev_temperature_event *ev)
 {
 	const void **tb;
 	int ret;
+	const struct wmi_pdev_temperature_event *_ev;
 
 	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
 	if (IS_ERR(tb)) {
@@ -5681,13 +6028,14 @@
 		return ret;
 	}
 
-	ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
-	if (!ev) {
+	_ev = tb[WMI_TAG_PDEV_TEMPERATURE_EVENT];
+	if (!_ev) {
 		ath12k_warn(ab, "failed to fetch pdev temp ev");
 		kfree(tb);
 		return -EPROTO;
 	}
 
+	*ev = *_ev;
 	kfree(tb);
 	return 0;
 }
@@ -5704,22 +6052,10 @@
 	dev_kfree_skb(skb);
 }
 
-static bool ath12k_reg_is_world_alpha(char *alpha)
-{
-	if (alpha[0] == '0' && alpha[1] == '0')
-		return true;
-
-	if (alpha[0] == 'n' && alpha[1] == 'a')
-		return true;
-
-	return false;
-}
-
 static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	struct ath12k_reg_info *reg_info = NULL;
 	struct ieee80211_regdomain *regd = NULL;
-	bool intersect = false;
 	int ret = 0, pdev_idx, i, j;
 	struct ath12k *ar;
 
@@ -5767,24 +6103,15 @@
 		    reg_info->alpha2, 2))
 		goto mem_free;
 
-	/* Intersect new rules with default regd if a new country setting was
-	 * requested, i.e a default regd was already set during initialization
-	 * and the regd coming from this event has a valid country info.
-	 */
-	if (ab->default_regd[pdev_idx] &&
-	    !ath12k_reg_is_world_alpha((char *)
-		ab->default_regd[pdev_idx]->alpha2) &&
-	    !ath12k_reg_is_world_alpha((char *)reg_info->alpha2))
-		intersect = true;
-
-	regd = ath12k_reg_build_regd(ab, reg_info, intersect);
+	regd = ath12k_reg_build_regd(ab, reg_info);
 	if (!regd) {
 		ath12k_warn(ab, "failed to build regd from reg_info\n");
 		goto fallback;
 	}
 
 	spin_lock(&ab->base_lock);
-	if (test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags)) {
+	if (ab->default_regd[pdev_idx] &&
+	    ab->regd_change_user_request[pdev_idx]) {
 		/* Once mac is registered, ar is valid and all CC events from
 		 * fw is considered to be received due to user requests
 		 * currently.
@@ -5792,6 +6119,7 @@
 		 * generated regd to ar. NULL pointer handling will be
 		 * taken care by kfree itself.
 		 */
+		ab->regd_change_user_request[pdev_idx] = false;
 		ar = ab->pdevs[pdev_idx].ar;
 		kfree(ab->new_regd[pdev_idx]);
 		ab->new_regd[pdev_idx] = regd;
@@ -5806,6 +6134,8 @@
 		/* This regd would be applied during mac registration */
 		ab->default_regd[pdev_idx] = regd;
 	}
+
+	ab->regd_freed = false;
 	ab->dfs_region = reg_info->dfs_region;
 	spin_unlock(&ab->base_lock);
 
@@ -5872,6 +6202,16 @@
 		for (i = 0; i < ab->num_radios; i++) {
 			pdev = &ab->pdevs[i];
 			ether_addr_copy(pdev->mac_addr, addr_list[i].addr);
+			if (!memcmp(pdev->mac_addr, "\x00\x03\x7f", 3)) {
+				/* randomly assigned mac address
+				 * chosen by firmware, but the problem
+				 * is that it will chose the same one
+				 * for all PCIe cards and we will get
+				 * duplicate addresses, at least make
+				 * sure to mark them locally assigned
+				 * so userspace will override them */
+				pdev->mac_addr[0] |= (1 << 1);
+			}
 		}
 		ab->pdevs_macaddr_valid = true;
 		break;
@@ -6071,7 +6411,7 @@
 		goto exit;
 	}
 
-	if ((test_bit(ATH12K_CAC_RUNNING, &ar->dev_flags)) ||
+	if ((test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) ||
 	    (rx_ev.status & (WMI_RX_STATUS_ERR_DECRYPT |
 			     WMI_RX_STATUS_ERR_KEY_CACHE_MISS |
 			     WMI_RX_STATUS_ERR_CRC))) {
@@ -6082,13 +6422,13 @@
 	if (rx_ev.status & WMI_RX_STATUS_ERR_MIC)
 		status->flag |= RX_FLAG_MMIC_ERROR;
 
-	if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ &&
-	    rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) {
+	if (rx_ev.chan_freq >= ATH12K_MIN_6GHZ_FREQ &&
+	    rx_ev.chan_freq <= ATH12K_MAX_6GHZ_FREQ) {
 		status->band = NL80211_BAND_6GHZ;
 		status->freq = rx_ev.chan_freq;
 	} else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) {
 		status->band = NL80211_BAND_2GHZ;
-	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) {
+	} else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5GHZ_CHAN) {
 		status->band = NL80211_BAND_5GHZ;
 	} else {
 		/* Shouldn't happen unless list of advertised channels to
@@ -6200,7 +6540,8 @@
 
 			spin_lock_bh(&ar->data_lock);
 			if (ar->scan.state == state &&
-			    ar->scan.vdev_id == vdev_id) {
+			    ar->scan.arvif &&
+			    ar->scan.arvif->vdev_id == vdev_id) {
 				spin_unlock_bh(&ar->data_lock);
 				return ar;
 			}
@@ -6296,6 +6637,7 @@
 	struct wmi_peer_sta_kickout_arg arg = {};
 	struct ieee80211_sta *sta;
 	struct ath12k_peer *peer;
+	struct ath12k_vif *ahvif;
 	struct ath12k *ar;
 
 	if (ath12k_pull_peer_sta_kickout_ev(ab, skb, &arg) != 0) {
@@ -6322,6 +6664,8 @@
 		goto exit;
 	}
 
+	ahvif = ath12k_vif_to_ahvif(peer->vif);
+
 	sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
 					   arg.mac_addr, NULL);
 	if (!sta) {
@@ -6330,11 +6674,16 @@
 		goto exit;
 	}
 
+	if (ar->ab->hw_params->handle_beacon_miss &&
+	    ahvif->vif->type == NL80211_IFTYPE_STATION &&
+	    arg.reason == __cpu_to_le32(WMI_PEER_STA_KICKOUT_REASON_INACTIVITY))
+		ath12k_mac_handle_beacon_miss(ar, peer->vdev_id);
+	else
+		ieee80211_report_low_ack(sta, 10);
+
 	ath12k_dbg(ab, ATH12K_DBG_WMI, "peer sta kickout event %pM",
 		   arg.mac_addr);
 
-	ieee80211_report_low_ack(sta, 10);
-
 exit:
 	spin_unlock_bh(&ab->base_lock);
 	rcu_read_unlock();
@@ -6521,12 +6870,14 @@
 	survey->noise     = le32_to_cpu(bss_ch_info_ev.noise_floor);
 	survey->time      = div_u64(total, cc_freq_hz);
 	survey->time_busy = div_u64(busy, cc_freq_hz);
-	survey->time_rx   = div_u64(rx_bss, cc_freq_hz);
+	survey->time_rx   = div_u64(rx, cc_freq_hz);
 	survey->time_tx   = div_u64(tx, cc_freq_hz);
+	survey->time_bss_rx = div_u64(rx_bss, cc_freq_hz);
 	survey->filled   |= (SURVEY_INFO_NOISE_DBM |
 			     SURVEY_INFO_TIME |
 			     SURVEY_INFO_TIME_BUSY |
 			     SURVEY_INFO_TIME_RX |
+			     SURVEY_INFO_TIME_BSS_RX |
 			     SURVEY_INFO_TIME_TX);
 exit:
 	spin_unlock_bh(&ar->data_lock);
@@ -6668,8 +7019,640 @@
 	rcu_read_unlock();
 }
 
+static void
+ath12k_wmi_fw_vdev_stats_dump(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats,
+			      char *buf, u32 *length)
+{
+	const struct ath12k_fw_stats_vdev *vdev;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+	struct ath12k_link_vif *arvif;
+	u32 len = *length;
+	u8 *vif_macaddr;
+	int i;
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n",
+			 "ath12k VDEV stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "=================");
+
+	list_for_each_entry(vdev, &fw_stats->vdevs, list) {
+		arvif = ath12k_mac_get_arvif(ar, vdev->vdev_id);
+		if (!arvif)
+			continue;
+		vif_macaddr = arvif->ahvif->vif->addr;
+
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "VDEV ID", vdev->vdev_id);
+		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+				 "VDEV MAC address", vif_macaddr);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "beacon snr", vdev->beacon_snr);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "data snr", vdev->data_snr);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num rx frames", vdev->num_rx_frames);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num rts fail", vdev->num_rts_fail);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num rts success", vdev->num_rts_success);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num rx err", vdev->num_rx_err);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num rx discard", vdev->num_rx_discard);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "num tx not acked", vdev->num_tx_not_acked);
+
+		for (i = 0 ; i < WLAN_MAX_AC; i++)
+			len += scnprintf(buf + len, buf_len - len,
+					"%25s [%02d] %u\n",
+					"num tx frames", i,
+					vdev->num_tx_frames[i]);
+
+		for (i = 0 ; i < WLAN_MAX_AC; i++)
+			len += scnprintf(buf + len, buf_len - len,
+					"%25s [%02d] %u\n",
+					"num tx frames retries", i,
+					vdev->num_tx_frames_retries[i]);
+
+		for (i = 0 ; i < WLAN_MAX_AC; i++)
+			len += scnprintf(buf + len, buf_len - len,
+					"%25s [%02d] %u\n",
+					"num tx frames failures", i,
+					vdev->num_tx_frames_failures[i]);
+
+		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+			len += scnprintf(buf + len, buf_len - len,
+					"%25s [%02d] 0x%08x\n",
+					"tx rate history", i,
+					vdev->tx_rate_history[i]);
+		for (i = 0 ; i < MAX_TX_RATE_VALUES; i++)
+			len += scnprintf(buf + len, buf_len - len,
+					"%25s [%02d] %u\n",
+					"beacon rssi history", i,
+					vdev->beacon_rssi_history[i]);
+
+		len += scnprintf(buf + len, buf_len - len, "\n");
+		*length = len;
+	}
+}
+
+static void
+ath12k_wmi_fw_bcn_stats_dump(struct ath12k *ar,
+			     struct ath12k_fw_stats *fw_stats,
+			     char *buf, u32 *length)
+{
+	const struct ath12k_fw_stats_bcn *bcn;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+	struct ath12k_link_vif *arvif;
+	u32 len = *length;
+	size_t num_bcn;
+
+	num_bcn = list_count_nodes(&fw_stats->bcn);
+
+	len += scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
+			 "ath12k Beacon stats", num_bcn);
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "===================");
+
+	list_for_each_entry(bcn, &fw_stats->bcn, list) {
+		arvif = ath12k_mac_get_arvif(ar, bcn->vdev_id);
+		if (!arvif)
+			continue;
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "VDEV ID", bcn->vdev_id);
+		len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
+				 "VDEV MAC address", arvif->ahvif->vif->addr);
+		len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+				 "================");
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "Num of beacon tx success", bcn->tx_bcn_succ_cnt);
+		len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
+				 "Num of beacon tx failures", bcn->tx_bcn_outage_cnt);
+
+		len += scnprintf(buf + len, buf_len - len, "\n");
+		*length = len;
+	}
+}
+
+static void
+ath12k_wmi_fw_pdev_base_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+				   char *buf, u32 *length, u64 fw_soc_drop_cnt)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len = scnprintf(buf + len, buf_len - len, "\n");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n",
+			"ath12k PDEV stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			"=================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			"Channel noise floor", pdev->ch_noise_floor);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Channel TX power", pdev->chan_tx_power);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"TX frame count", pdev->tx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX frame count", pdev->rx_frame_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"RX clear count", pdev->rx_clear_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"Cycle count", pdev->cycle_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			"PHY error count", pdev->phy_err_count);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10llu\n",
+			"soc drop count", fw_soc_drop_cnt);
+
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_tx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath12k PDEV TX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "====================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies queued", pdev->comp_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HTT cookies disp.", pdev->comp_delivered);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDU queued", pdev->msdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU queued", pdev->mpdu_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs dropped", pdev->wmm_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local enqued", pdev->local_enqued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Local freed", pdev->local_freed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "HW queued", pdev->hw_queued);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs reaped", pdev->hw_reaped);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Num underruns", pdev->underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PPDUs cleaned", pdev->tx_abort);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs requeued", pdev->mpdus_requed);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Excessive retries", pdev->tx_ko);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "HW rate", pdev->data_rc);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Sched self triggers", pdev->self_triggers);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Dropped due to SW retries",
+			 pdev->sw_retry_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Illegal rate phy errors",
+			 pdev->illgl_rate_phy_err);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PDEV continuous xretry", pdev->pdev_cont_xretry);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "TX timeout", pdev->pdev_tx_timeout);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PDEV resets", pdev->pdev_resets);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "Stateless TIDs alloc failures",
+			 pdev->stateless_tid_alloc_failure);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "PHY underrun", pdev->phy_underrun);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
+			 "MPDU is more than txop limit", pdev->txop_ovf);
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_rx_stats_dump(const struct ath12k_fw_stats_pdev *pdev,
+				 char *buf, u32 *length)
+{
+	u32 len = *length;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	len += scnprintf(buf + len, buf_len - len, "\n%30s\n",
+			 "ath12k PDEV RX stats");
+	len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
+			 "====================");
+
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Mid PPDU route change",
+			 pdev->mid_ppdu_route_change);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Tot. number of statuses", pdev->status_rcvd);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 0", pdev->r0_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 1", pdev->r1_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 2", pdev->r2_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Extra frags on rings 3", pdev->r3_frags);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to HTT", pdev->htt_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to HTT", pdev->htt_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MSDUs delivered to stack", pdev->loc_msdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDUs delivered to stack", pdev->loc_mpdus);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "Oversized AMSUs", pdev->oversize_amsdu);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors", pdev->phy_errs);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "PHY errors drops", pdev->phy_err_drop);
+	len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
+			 "MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
+	*length = len;
+}
+
+static void
+ath12k_wmi_fw_pdev_stats_dump(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats,
+			      char *buf, u32 *length)
+{
+	const struct ath12k_fw_stats_pdev *pdev;
+	u32 len = *length;
+
+	pdev = list_first_entry_or_null(&fw_stats->pdevs,
+					struct ath12k_fw_stats_pdev, list);
+	if (!pdev) {
+		ath12k_warn(ar->ab, "failed to get pdev stats\n");
+		return;
+	}
+
+	ath12k_wmi_fw_pdev_base_stats_dump(pdev, buf, &len,
+					   ar->ab->fw_soc_drop_count);
+	ath12k_wmi_fw_pdev_tx_stats_dump(pdev, buf, &len);
+	ath12k_wmi_fw_pdev_rx_stats_dump(pdev, buf, &len);
+
+	*length = len;
+}
+
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats,
+			      u32 stats_id, char *buf)
+{
+	u32 len = 0;
+	u32 buf_len = ATH12K_FW_STATS_BUF_SIZE;
+
+	spin_lock_bh(&ar->data_lock);
+
+	switch (stats_id) {
+	case WMI_REQUEST_VDEV_STAT:
+		ath12k_wmi_fw_vdev_stats_dump(ar, fw_stats, buf, &len);
+		break;
+	case WMI_REQUEST_BCN_STAT:
+		ath12k_wmi_fw_bcn_stats_dump(ar, fw_stats, buf, &len);
+		break;
+	case WMI_REQUEST_PDEV_STAT:
+		ath12k_wmi_fw_pdev_stats_dump(ar, fw_stats, buf, &len);
+		break;
+	default:
+		break;
+	}
+
+	spin_unlock_bh(&ar->data_lock);
+
+	if (len >= buf_len)
+		buf[len - 1] = 0;
+	else
+		buf[len] = 0;
+
+	ath12k_fw_stats_reset(ar);
+}
+
+static void
+ath12k_wmi_pull_vdev_stats(const struct wmi_vdev_stats_params *src,
+			   struct ath12k_fw_stats_vdev *dst)
+{
+	int i;
+
+	dst->vdev_id = le32_to_cpu(src->vdev_id);
+	dst->beacon_snr = le32_to_cpu(src->beacon_snr);
+	dst->data_snr = le32_to_cpu(src->data_snr);
+	dst->num_rx_frames = le32_to_cpu(src->num_rx_frames);
+	dst->num_rts_fail = le32_to_cpu(src->num_rts_fail);
+	dst->num_rts_success = le32_to_cpu(src->num_rts_success);
+	dst->num_rx_err = le32_to_cpu(src->num_rx_err);
+	dst->num_rx_discard = le32_to_cpu(src->num_rx_discard);
+	dst->num_tx_not_acked = le32_to_cpu(src->num_tx_not_acked);
+
+	for (i = 0; i < WLAN_MAX_AC; i++)
+		dst->num_tx_frames[i] =
+			le32_to_cpu(src->num_tx_frames[i]);
+
+	for (i = 0; i < WLAN_MAX_AC; i++)
+		dst->num_tx_frames_retries[i] =
+			le32_to_cpu(src->num_tx_frames_retries[i]);
+
+	for (i = 0; i < WLAN_MAX_AC; i++)
+		dst->num_tx_frames_failures[i] =
+			le32_to_cpu(src->num_tx_frames_failures[i]);
+
+	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+		dst->tx_rate_history[i] =
+			le32_to_cpu(src->tx_rate_history[i]);
+
+	for (i = 0; i < MAX_TX_RATE_VALUES; i++)
+		dst->beacon_rssi_history[i] =
+			le32_to_cpu(src->beacon_rssi_history[i]);
+}
+
+static void
+ath12k_wmi_pull_bcn_stats(const struct ath12k_wmi_bcn_stats_params *src,
+			  struct ath12k_fw_stats_bcn *dst)
+{
+	dst->vdev_id = le32_to_cpu(src->vdev_id);
+	dst->tx_bcn_succ_cnt = le32_to_cpu(src->tx_bcn_succ_cnt);
+	dst->tx_bcn_outage_cnt = le32_to_cpu(src->tx_bcn_outage_cnt);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_base(const struct ath12k_wmi_pdev_base_stats_params *src,
+				struct ath12k_fw_stats_pdev *dst)
+{
+	dst->ch_noise_floor = a_sle32_to_cpu(src->chan_nf);
+	dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
+	dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
+	dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
+	dst->cycle_count = __le32_to_cpu(src->cycle_count);
+	dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
+	dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_tx(const struct ath12k_wmi_pdev_tx_stats_params *src,
+			      struct ath12k_fw_stats_pdev *dst)
+{
+	dst->comp_queued = a_sle32_to_cpu(src->comp_queued);
+	dst->comp_delivered = a_sle32_to_cpu(src->comp_delivered);
+	dst->msdu_enqued = a_sle32_to_cpu(src->msdu_enqued);
+	dst->mpdu_enqued = a_sle32_to_cpu(src->mpdu_enqued);
+	dst->wmm_drop = a_sle32_to_cpu(src->wmm_drop);
+	dst->local_enqued = a_sle32_to_cpu(src->local_enqued);
+	dst->local_freed = a_sle32_to_cpu(src->local_freed);
+	dst->hw_queued = a_sle32_to_cpu(src->hw_queued);
+	dst->hw_reaped = a_sle32_to_cpu(src->hw_reaped);
+	dst->underrun = a_sle32_to_cpu(src->underrun);
+	dst->tx_abort = a_sle32_to_cpu(src->tx_abort);
+	dst->mpdus_requed = a_sle32_to_cpu(src->mpdus_requed);
+	dst->tx_ko = __le32_to_cpu(src->tx_ko);
+	dst->data_rc = __le32_to_cpu(src->data_rc);
+	dst->self_triggers = __le32_to_cpu(src->self_triggers);
+	dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
+	dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
+	dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
+	dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
+	dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
+	dst->stateless_tid_alloc_failure =
+		__le32_to_cpu(src->stateless_tid_alloc_failure);
+	dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
+	dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
+}
+
+static void
+ath12k_wmi_pull_pdev_stats_rx(const struct ath12k_wmi_pdev_rx_stats_params *src,
+			      struct ath12k_fw_stats_pdev *dst)
+{
+	dst->mid_ppdu_route_change =
+		a_sle32_to_cpu(src->mid_ppdu_route_change);
+	dst->status_rcvd = a_sle32_to_cpu(src->status_rcvd);
+	dst->r0_frags = a_sle32_to_cpu(src->r0_frags);
+	dst->r1_frags = a_sle32_to_cpu(src->r1_frags);
+	dst->r2_frags = a_sle32_to_cpu(src->r2_frags);
+	dst->r3_frags = a_sle32_to_cpu(src->r3_frags);
+	dst->htt_msdus = a_sle32_to_cpu(src->htt_msdus);
+	dst->htt_mpdus = a_sle32_to_cpu(src->htt_mpdus);
+	dst->loc_msdus = a_sle32_to_cpu(src->loc_msdus);
+	dst->loc_mpdus = a_sle32_to_cpu(src->loc_mpdus);
+	dst->oversize_amsdu = a_sle32_to_cpu(src->oversize_amsdu);
+	dst->phy_errs = a_sle32_to_cpu(src->phy_errs);
+	dst->phy_err_drop = a_sle32_to_cpu(src->phy_err_drop);
+	dst->mpdu_errs = a_sle32_to_cpu(src->mpdu_errs);
+}
+
+static int ath12k_wmi_tlv_fw_stats_data_parse(struct ath12k_base *ab,
+					      struct wmi_tlv_fw_stats_parse *parse,
+					      const void *ptr,
+					      u16 len)
+{
+	const struct wmi_stats_event *ev = parse->ev;
+	struct ath12k_fw_stats *stats = parse->stats;
+	struct ath12k *ar;
+	struct ath12k_link_vif *arvif;
+	struct ieee80211_sta *sta;
+	struct ath12k_sta *ahsta;
+	struct ath12k_link_sta *arsta;
+	int i, ret = 0;
+	const void *data = ptr;
+
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch update stats ev");
+		return -EPROTO;
+	}
+
+	if (!stats)
+		return -EINVAL;
+
+	rcu_read_lock();
+
+	stats->pdev_id = le32_to_cpu(ev->pdev_id);
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats->pdev_id);
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev id %d in update stats event\n",
+			    le32_to_cpu(ev->pdev_id));
+		ret = -EPROTO;
+		goto exit;
+	}
+
+	for (i = 0; i < le32_to_cpu(ev->num_vdev_stats); i++) {
+		const struct wmi_vdev_stats_params *src;
+		struct ath12k_fw_stats_vdev *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			ret = -EPROTO;
+			goto exit;
+		}
+
+		arvif = ath12k_mac_get_arvif(ar, le32_to_cpu(src->vdev_id));
+		if (arvif) {
+			sta = ieee80211_find_sta_by_ifaddr(ath12k_ar_to_hw(ar),
+							   arvif->bssid,
+							   NULL);
+			if (sta) {
+				ahsta = ath12k_sta_to_ahsta(sta);
+				arsta = &ahsta->deflink;
+				arsta->rssi_beacon = le32_to_cpu(src->beacon_snr);
+				ath12k_dbg(ab, ATH12K_DBG_WMI,
+					   "wmi stats vdev id %d snr %d\n",
+					   src->vdev_id, src->beacon_snr);
+			} else {
+				ath12k_dbg(ab, ATH12K_DBG_WMI,
+					   "not found station bssid %pM for vdev stat\n",
+					   arvif->bssid);
+			}
+		}
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+		ath12k_wmi_pull_vdev_stats(src, dst);
+		stats->stats_id = WMI_REQUEST_VDEV_STAT;
+		list_add_tail(&dst->list, &stats->vdevs);
+	}
+	for (i = 0; i < le32_to_cpu(ev->num_bcn_stats); i++) {
+		const struct ath12k_wmi_bcn_stats_params *src;
+		struct ath12k_fw_stats_bcn *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			ret = -EPROTO;
+			goto exit;
+		}
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+		ath12k_wmi_pull_bcn_stats(src, dst);
+		stats->stats_id = WMI_REQUEST_BCN_STAT;
+		list_add_tail(&dst->list, &stats->bcn);
+	}
+	for (i = 0; i < le32_to_cpu(ev->num_pdev_stats); i++) {
+		const struct ath12k_wmi_pdev_stats_params *src;
+		struct ath12k_fw_stats_pdev *dst;
+
+		src = data;
+		if (len < sizeof(*src)) {
+			ret = -EPROTO;
+			goto exit;
+		}
+
+		stats->stats_id = WMI_REQUEST_PDEV_STAT;
+
+		data += sizeof(*src);
+		len -= sizeof(*src);
+
+		dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
+		if (!dst)
+			continue;
+
+		ath12k_wmi_pull_pdev_stats_base(&src->base, dst);
+		ath12k_wmi_pull_pdev_stats_tx(&src->tx, dst);
+		ath12k_wmi_pull_pdev_stats_rx(&src->rx, dst);
+		list_add_tail(&dst->list, &stats->pdevs);
+	}
+
+exit:
+	rcu_read_unlock();
+	return ret;
+}
+
+static int ath12k_wmi_tlv_fw_stats_parse(struct ath12k_base *ab,
+					 u16 tag, u16 len,
+					 const void *ptr, void *data)
+{
+	struct wmi_tlv_fw_stats_parse *parse = data;
+	int ret = 0;
+
+	switch (tag) {
+	case WMI_TAG_STATS_EVENT:
+		parse->ev = ptr;
+		break;
+	case WMI_TAG_ARRAY_BYTE:
+		ret = ath12k_wmi_tlv_fw_stats_data_parse(ab, parse, ptr, len);
+		break;
+	default:
+		break;
+	}
+	return ret;
+}
+
+static int ath12k_wmi_pull_fw_stats(struct ath12k_base *ab, struct sk_buff *skb,
+				    struct ath12k_fw_stats *stats)
+{
+	struct wmi_tlv_fw_stats_parse parse = {};
+
+	stats->stats_id = 0;
+	parse.stats = stats;
+
+	return ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				   ath12k_wmi_tlv_fw_stats_parse,
+				   &parse);
+}
+
 static void ath12k_update_stats_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
+	struct ath12k_fw_stats stats = {};
+	struct ath12k *ar;
+	int ret;
+
+	INIT_LIST_HEAD(&stats.pdevs);
+	INIT_LIST_HEAD(&stats.vdevs);
+	INIT_LIST_HEAD(&stats.bcn);
+
+	ret = ath12k_wmi_pull_fw_stats(ab, skb, &stats);
+	if (ret) {
+		ath12k_warn(ab, "failed to pull fw stats: %d\n", ret);
+		goto free;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI, "event update stats");
+
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, stats.pdev_id);
+	if (!ar) {
+		rcu_read_unlock();
+		ath12k_warn(ab, "failed to get ar for pdev_id %d: %d\n",
+			    stats.pdev_id, ret);
+		goto free;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+	/* WMI_REQUEST_PDEV_STAT can be requested via .get_txpower mac ops or via
+	 * debugfs fw stats. Therefore, processing it separately.
+	 */
+	if (stats.stats_id == WMI_REQUEST_PDEV_STAT) {
+		list_splice_tail_init(&stats.pdevs, &ar->fw_stats.pdevs);
+		ar->fw_stats.fw_stats_done = true;
+		goto complete;
+	}
+
+	/* WMI_REQUEST_VDEV_STAT and WMI_REQUEST_BCN_STAT are currently requested only
+	 * via debugfs fw stats. Hence, processing these in debugfs context.
+	 */
+	ath12k_debugfs_fw_stats_process(ar, &stats);
+
+complete:
+	complete(&ar->fw_stats_complete);
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+
+	/* Since the stats's pdev, vdev and beacon list are spliced and reinitialised
+	 * at this point, no need to free the individual list.
+	 */
+	return;
+
+free:
+	ath12k_fw_stats_free(&stats);
 }
 
 /* PDEV_CTL_FAILSAFE_CHECK_EVENT is received from FW when the frequency scanned
@@ -6715,16 +7698,15 @@
 					  const struct ath12k_wmi_pdev_csa_event *ev,
 					  const u32 *vdev_ids)
 {
-	int i;
+	u32 current_switch_count = le32_to_cpu(ev->current_switch_count);
+	u32 num_vdevs = le32_to_cpu(ev->num_vdevs);
+	struct ieee80211_bss_conf *conf;
 	struct ath12k_link_vif *arvif;
 	struct ath12k_vif *ahvif;
-
-	/* Finish CSA once the switch count becomes NULL */
-	if (ev->current_switch_count)
-		return;
+	int i;
 
 	rcu_read_lock();
-	for (i = 0; i < le32_to_cpu(ev->num_vdevs); i++) {
+	for (i = 0; i < num_vdevs; i++) {
 		arvif = ath12k_mac_get_arvif_by_vdev_id(ab, vdev_ids[i]);
 
 		if (!arvif) {
@@ -6734,8 +7716,39 @@
 		}
 		ahvif = arvif->ahvif;
 
-		if (arvif->is_up && ahvif->vif->bss_conf.csa_active)
-			ieee80211_csa_finish(ahvif->vif, 0);
+		if (arvif->link_id >= IEEE80211_MLD_MAX_NUM_LINKS) {
+			ath12k_warn(ab, "Invalid CSA switch count even link id: %d\n",
+				    arvif->link_id);
+			continue;
+		}
+
+		conf = rcu_dereference(ahvif->vif->link_conf[arvif->link_id]);
+		if (!conf) {
+			ath12k_warn(ab, "unable to access bss link conf in process csa for vif %pM link %u\n",
+				    ahvif->vif->addr, arvif->link_id);
+			continue;
+		}
+
+		if (!arvif->is_up || !conf->csa_active)
+			continue;
+
+		/* Finish CSA when counter reaches zero */
+		if (!current_switch_count) {
+			ieee80211_csa_finish(ahvif->vif, arvif->link_id);
+			arvif->current_cntdown_counter = 0;
+		} else if (current_switch_count > 1) {
+			/* If the count in event is not what we expect, don't update the
+			 * mac80211 count. Since during beacon Tx failure, count in the
+			 * firmware will not decrement and this event will come with the
+			 * previous count value again
+			 */
+			if (current_switch_count != arvif->current_cntdown_counter)
+				continue;
+
+			arvif->current_cntdown_counter =
+				ieee80211_beacon_update_cntdwn(ahvif->vif,
+							       arvif->link_id);
+		}
 	}
 	rcu_read_unlock();
 }
@@ -6779,6 +7792,7 @@
 ath12k_wmi_pdev_dfs_radar_detected_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	const void **tb;
+	struct ath12k_mac_get_any_chanctx_conf_arg arg;
 	const struct ath12k_wmi_pdev_radar_event *ev;
 	struct ath12k *ar;
 	int ret;
@@ -6814,13 +7828,22 @@
 		goto exit;
 	}
 
+	arg.ar = ar;
+	arg.chanctx_conf = NULL;
+	ieee80211_iter_chan_contexts_atomic(ath12k_ar_to_hw(ar),
+					    ath12k_mac_get_any_chanctx_conf_iter, &arg);
+	if (!arg.chanctx_conf) {
+		ath12k_warn(ab, "failed to find valid chanctx_conf in radar detected event\n");
+		goto exit;
+	}
+
 	ath12k_dbg(ar->ab, ATH12K_DBG_REG, "DFS Radar Detected in pdev %d\n",
 		   ev->pdev_id);
 
 	if (ar->dfs_block_radar_events)
 		ath12k_info(ab, "DFS Radar detected, but ignored as requested\n");
 	else
-		ieee80211_radar_detected(ath12k_ar_to_hw(ar), NULL);
+		ieee80211_radar_detected(ath12k_ar_to_hw(ar), arg.chanctx_conf);
 
 exit:
 	rcu_read_unlock();
@@ -6828,6 +7851,35 @@
 	kfree(tb);
 }
 
+static void ath12k_tm_wmi_event_segmented(struct ath12k_base *ab, u32 cmd_id,
+					  struct sk_buff *skb)
+{
+	const struct ath12k_wmi_ftm_event *ev;
+	const void **tb;
+	int ret;
+	u16 length;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse ftm event tlv: %d\n", ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_ARRAY_BYTE];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch ftm msg\n");
+		kfree(tb);
+		return;
+	}
+
+	length = skb->len - TLV_HDR_SIZE;
+	ath12k_tm_process_event(ab, cmd_id, ev, length);
+	kfree(tb);
+	tb = NULL;
+}
+
 static void
 ath12k_wmi_pdev_temperature_event(struct ath12k_base *ab,
 				  struct sk_buff *skb)
@@ -6851,6 +7903,7 @@
 		goto exit;
 	}
 
+	ath12k_thermal_event_temperature(ar, ev.temp);
 exit:
 	rcu_read_unlock();
 }
@@ -7005,6 +8058,25 @@
 ath12k_wmi_diag_event(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	trace_ath12k_wmi_diag(ab, skb->data, skb->len);
+	const struct wmi_tlv *tlv;
+	u16 tlv_tag, tlv_len;
+	u32 *dev_id;
+	u8 *data;
+
+	tlv = (struct wmi_tlv *)skb->data;
+	tlv_tag = FIELD_GET(WMI_TLV_TAG, tlv->header);
+	tlv_len = FIELD_GET(WMI_TLV_LEN, tlv->header);
+
+	if (tlv_tag == WMI_TAG_ARRAY_BYTE) {
+		data = skb->data + sizeof(struct wmi_tlv);
+		dev_id = (uint32_t *)data;
+		*dev_id = ab->hw_params->hw_rev + ATH12K_DIAG_HW_ID_OFFSET;
+	} else {
+		ath12k_warn(ab, "WMI Diag Event missing required tlv\n");
+		return;
+	}
+
+	ath12k_fwlog_write(ab, data, tlv_len);
 }
 
 static void ath12k_wmi_twt_enable_event(struct ath12k_base *ab,
@@ -7175,6 +8247,459 @@
 	kfree(tb);
 }
 
+static void ath12k_wmi_event_mlo_setup_complete(struct ath12k_base *ab,
+						struct sk_buff *skb)
+{
+	const struct wmi_mlo_setup_complete_event *ev;
+	struct ath12k *ar = NULL;
+	struct ath12k_pdev *pdev;
+	const void **tb;
+	int ret, i;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse mlo setup complete event tlv: %d\n",
+			    ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_MLO_SETUP_COMPLETE_EVENT];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch mlo setup complete event\n");
+		kfree(tb);
+		return;
+	}
+
+	if (le32_to_cpu(ev->pdev_id) > ab->num_radios)
+		goto skip_lookup;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->pdev_id == le32_to_cpu(ev->pdev_id)) {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+skip_lookup:
+	if (!ar) {
+		ath12k_warn(ab, "invalid pdev_id %d status %u in setup complete event\n",
+			    ev->pdev_id, ev->status);
+		goto out;
+	}
+
+	ar->mlo_setup_status = le32_to_cpu(ev->status);
+	complete(&ar->mlo_setup_done);
+
+out:
+	kfree(tb);
+}
+
+static void ath12k_wmi_event_teardown_complete(struct ath12k_base *ab,
+					       struct sk_buff *skb)
+{
+	const struct wmi_mlo_teardown_complete_event *ev;
+	const void **tb;
+	int ret;
+
+	tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC);
+	if (IS_ERR(tb)) {
+		ret = PTR_ERR(tb);
+		ath12k_warn(ab, "failed to parse teardown complete event tlv: %d\n", ret);
+		return;
+	}
+
+	ev = tb[WMI_TAG_MLO_TEARDOWN_COMPLETE];
+	if (!ev) {
+		ath12k_warn(ab, "failed to fetch teardown complete event\n");
+		kfree(tb);
+		return;
+	}
+
+	kfree(tb);
+}
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+static int ath12k_wmi_tpc_stats_copy_buffer(struct ath12k_base *ab,
+					    const void *ptr, u16 tag, u16 len,
+					    struct wmi_tpc_stats_arg *tpc_stats)
+{
+	u32 len1, len2, len3, len4;
+	s16 *dst_ptr;
+	s8 *dst_ptr_ctl;
+
+	len1 = le32_to_cpu(tpc_stats->max_reg_allowed_power.tpc_reg_pwr.reg_array_len);
+	len2 = le32_to_cpu(tpc_stats->rates_array1.tpc_rates_array.rate_array_len);
+	len3 = le32_to_cpu(tpc_stats->rates_array2.tpc_rates_array.rate_array_len);
+	len4 = le32_to_cpu(tpc_stats->ctl_array.tpc_ctl_pwr.ctl_array_len);
+
+	switch (tpc_stats->event_count) {
+	case ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT:
+		if (len1 > len)
+			return -ENOBUFS;
+
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_REG_PWR_ALLOWED) {
+			dst_ptr = tpc_stats->max_reg_allowed_power.reg_pwr_array;
+			memcpy(dst_ptr, ptr, len1);
+		}
+		break;
+	case ATH12K_TPC_STATS_RATES_EVENT1:
+		if (len2 > len)
+			return -ENOBUFS;
+
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY1) {
+			dst_ptr = tpc_stats->rates_array1.rate_array;
+			memcpy(dst_ptr, ptr, len2);
+		}
+		break;
+	case ATH12K_TPC_STATS_RATES_EVENT2:
+		if (len3 > len)
+			return -ENOBUFS;
+
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_RATES_ARRAY2) {
+			dst_ptr = tpc_stats->rates_array2.rate_array;
+			memcpy(dst_ptr, ptr, len3);
+		}
+		break;
+	case ATH12K_TPC_STATS_CTL_TABLE_EVENT:
+		if (len4 > len)
+			return -ENOBUFS;
+
+		if (tpc_stats->tlvs_rcvd & WMI_TPC_CTL_PWR_ARRAY) {
+			dst_ptr_ctl = tpc_stats->ctl_array.ctl_pwr_table;
+			memcpy(dst_ptr_ctl, ptr, len4);
+		}
+		break;
+	}
+	return 0;
+}
+
+static int ath12k_tpc_get_reg_pwr(struct ath12k_base *ab,
+				  struct wmi_tpc_stats_arg *tpc_stats,
+				  struct wmi_max_reg_power_fixed_params *ev)
+{
+	struct wmi_max_reg_power_allowed_arg *reg_pwr;
+	u32 total_size;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received reg power array type %d length %d for tpc stats\n",
+		   ev->reg_power_type, ev->reg_array_len);
+
+	switch (le32_to_cpu(ev->reg_power_type)) {
+	case TPC_STATS_REG_PWR_ALLOWED_TYPE:
+		reg_pwr = &tpc_stats->max_reg_allowed_power;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* Each entry is 2 byte hence multiplying the indices with 2 */
+	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4) * 2;
+	if (le32_to_cpu(ev->reg_array_len) != total_size) {
+		ath12k_warn(ab,
+			    "Total size and reg_array_len doesn't match for tpc stats\n");
+		return -EINVAL;
+	}
+
+	memcpy(&reg_pwr->tpc_reg_pwr, ev, sizeof(struct wmi_max_reg_power_fixed_params));
+
+	reg_pwr->reg_pwr_array = kzalloc(le32_to_cpu(reg_pwr->tpc_reg_pwr.reg_array_len),
+					 GFP_ATOMIC);
+	if (!reg_pwr->reg_pwr_array)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= WMI_TPC_REG_PWR_ALLOWED;
+
+	return 0;
+}
+
+static int ath12k_tpc_get_rate_array(struct ath12k_base *ab,
+				     struct wmi_tpc_stats_arg *tpc_stats,
+				     struct wmi_tpc_rates_array_fixed_params *ev)
+{
+	struct wmi_tpc_rates_array_arg *rates_array;
+	u32 flag = 0, rate_array_len;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received rates array type %d length %d for tpc stats\n",
+		   ev->rate_array_type, ev->rate_array_len);
+
+	switch (le32_to_cpu(ev->rate_array_type)) {
+	case ATH12K_TPC_STATS_RATES_ARRAY1:
+		rates_array = &tpc_stats->rates_array1;
+		flag = WMI_TPC_RATES_ARRAY1;
+		break;
+	case ATH12K_TPC_STATS_RATES_ARRAY2:
+		rates_array = &tpc_stats->rates_array2;
+		flag = WMI_TPC_RATES_ARRAY2;
+		break;
+	default:
+		ath12k_warn(ab,
+			    "Received invalid type of rates array for tpc stats\n");
+		return -EINVAL;
+	}
+	memcpy(&rates_array->tpc_rates_array, ev,
+	       sizeof(struct wmi_tpc_rates_array_fixed_params));
+	rate_array_len = le32_to_cpu(rates_array->tpc_rates_array.rate_array_len);
+	rates_array->rate_array = kzalloc(rate_array_len, GFP_ATOMIC);
+	if (!rates_array->rate_array)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= flag;
+	return 0;
+}
+
+static int ath12k_tpc_get_ctl_pwr_tbl(struct ath12k_base *ab,
+				      struct wmi_tpc_stats_arg *tpc_stats,
+				      struct wmi_tpc_ctl_pwr_fixed_params *ev)
+{
+	struct wmi_tpc_ctl_pwr_table_arg *ctl_array;
+	u32 total_size, ctl_array_len, flag = 0;
+
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "Received ctl array type %d length %d for tpc stats\n",
+		   ev->ctl_array_type, ev->ctl_array_len);
+
+	switch (le32_to_cpu(ev->ctl_array_type)) {
+	case ATH12K_TPC_STATS_CTL_ARRAY:
+		ctl_array = &tpc_stats->ctl_array;
+		flag = WMI_TPC_CTL_PWR_ARRAY;
+		break;
+	default:
+		ath12k_warn(ab,
+			    "Received invalid type of ctl pwr table for tpc stats\n");
+		return -EINVAL;
+	}
+
+	total_size = le32_to_cpu(ev->d1) * le32_to_cpu(ev->d2) *
+		     le32_to_cpu(ev->d3) * le32_to_cpu(ev->d4);
+	if (le32_to_cpu(ev->ctl_array_len) != total_size) {
+		ath12k_warn(ab,
+			    "Total size and ctl_array_len doesn't match for tpc stats\n");
+		return -EINVAL;
+	}
+
+	memcpy(&ctl_array->tpc_ctl_pwr, ev, sizeof(struct wmi_tpc_ctl_pwr_fixed_params));
+	ctl_array_len = le32_to_cpu(ctl_array->tpc_ctl_pwr.ctl_array_len);
+	ctl_array->ctl_pwr_table = kzalloc(ctl_array_len, GFP_ATOMIC);
+	if (!ctl_array->ctl_pwr_table)
+		return -ENOMEM;
+
+	tpc_stats->tlvs_rcvd |= flag;
+	return 0;
+}
+
+static int ath12k_wmi_tpc_stats_subtlv_parser(struct ath12k_base *ab,
+					      u16 tag, u16 len,
+					      const void *ptr, void *data)
+{
+	struct wmi_tpc_rates_array_fixed_params *tpc_rates_array;
+	struct wmi_max_reg_power_fixed_params *tpc_reg_pwr;
+	struct wmi_tpc_ctl_pwr_fixed_params *tpc_ctl_pwr;
+	struct wmi_tpc_stats_arg *tpc_stats = data;
+	struct wmi_tpc_config_params *tpc_config;
+	int ret = 0;
+
+	if (!tpc_stats) {
+		ath12k_warn(ab, "tpc stats memory unavailable\n");
+		return -EINVAL;
+	}
+
+	switch (tag) {
+	case WMI_TAG_TPC_STATS_CONFIG_EVENT:
+		tpc_config = (struct wmi_tpc_config_params *)ptr;
+		memcpy(&tpc_stats->tpc_config, tpc_config,
+		       sizeof(struct wmi_tpc_config_params));
+		break;
+	case WMI_TAG_TPC_STATS_REG_PWR_ALLOWED:
+		tpc_reg_pwr = (struct wmi_max_reg_power_fixed_params *)ptr;
+		ret = ath12k_tpc_get_reg_pwr(ab, tpc_stats, tpc_reg_pwr);
+		break;
+	case WMI_TAG_TPC_STATS_RATES_ARRAY:
+		tpc_rates_array = (struct wmi_tpc_rates_array_fixed_params *)ptr;
+		ret = ath12k_tpc_get_rate_array(ab, tpc_stats, tpc_rates_array);
+		break;
+	case WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT:
+		tpc_ctl_pwr = (struct wmi_tpc_ctl_pwr_fixed_params *)ptr;
+		ret = ath12k_tpc_get_ctl_pwr_tbl(ab, tpc_stats, tpc_ctl_pwr);
+		break;
+	default:
+		ath12k_warn(ab,
+			    "Received invalid tag for tpc stats in subtlvs\n");
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int ath12k_wmi_tpc_stats_event_parser(struct ath12k_base *ab,
+					     u16 tag, u16 len,
+					     const void *ptr, void *data)
+{
+	struct wmi_tpc_stats_arg *tpc_stats = (struct wmi_tpc_stats_arg *)data;
+	int ret;
+
+	switch (tag) {
+	case WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM:
+		ret = 0;
+		/* Fixed param is already processed*/
+		break;
+	case WMI_TAG_ARRAY_STRUCT:
+		/* len 0 is expected for array of struct when there
+		 * is no content of that type to pack inside that tlv
+		 */
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tlv_iter(ab, ptr, len,
+					  ath12k_wmi_tpc_stats_subtlv_parser,
+					  tpc_stats);
+		break;
+	case WMI_TAG_ARRAY_INT16:
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+						       WMI_TAG_ARRAY_INT16,
+						       len, tpc_stats);
+		break;
+	case WMI_TAG_ARRAY_BYTE:
+		if (len == 0)
+			return 0;
+		ret = ath12k_wmi_tpc_stats_copy_buffer(ab, ptr,
+						       WMI_TAG_ARRAY_BYTE,
+						       len, tpc_stats);
+		break;
+	default:
+		ath12k_warn(ab, "Received invalid tag for tpc stats\n");
+		ret = -EINVAL;
+		break;
+	}
+	return ret;
+}
+
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar)
+{
+	struct wmi_tpc_stats_arg *tpc_stats = ar->debug.tpc_stats;
+
+	lockdep_assert_held(&ar->data_lock);
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "tpc stats mem free\n");
+	if (tpc_stats) {
+		kfree(tpc_stats->max_reg_allowed_power.reg_pwr_array);
+		kfree(tpc_stats->rates_array1.rate_array);
+		kfree(tpc_stats->rates_array2.rate_array);
+		kfree(tpc_stats->ctl_array.ctl_pwr_table);
+		kfree(tpc_stats);
+		ar->debug.tpc_stats = NULL;
+	}
+}
+
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+					 struct sk_buff *skb)
+{
+	struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *fixed_param;
+	struct wmi_tpc_stats_arg *tpc_stats;
+	const struct wmi_tlv *tlv;
+	void *ptr = skb->data;
+	struct ath12k *ar;
+	u16 tlv_tag;
+	u32 event_count;
+	int ret;
+
+	if (!skb->data) {
+		ath12k_warn(ab, "No data present in tpc stats event\n");
+		return;
+	}
+
+	if (skb->len < (sizeof(*fixed_param) + TLV_HDR_SIZE)) {
+		ath12k_warn(ab, "TPC stats event size invalid\n");
+		return;
+	}
+
+	tlv = (struct wmi_tlv *)ptr;
+	tlv_tag = le32_get_bits(tlv->header, WMI_TLV_TAG);
+	ptr += sizeof(*tlv);
+
+	if (tlv_tag != WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM) {
+		ath12k_warn(ab, "TPC stats without fixed param tlv at start\n");
+		return;
+	}
+
+	fixed_param = (struct ath12k_wmi_pdev_tpc_stats_event_fixed_params *)ptr;
+	rcu_read_lock();
+	ar = ath12k_mac_get_ar_by_pdev_id(ab, le32_to_cpu(fixed_param->pdev_id) + 1);
+	if (!ar) {
+		ath12k_warn(ab, "Failed to get ar for tpc stats\n");
+		rcu_read_unlock();
+		return;
+	}
+	spin_lock_bh(&ar->data_lock);
+	if (!ar->debug.tpc_request) {
+		/* Event is received either without request or the
+		 * timeout, if memory is already allocated free it
+		 */
+		if (ar->debug.tpc_stats) {
+			ath12k_warn(ab, "Freeing memory for tpc_stats\n");
+			ath12k_wmi_free_tpc_stats_mem(ar);
+		}
+		goto unlock;
+	}
+
+	event_count = le32_to_cpu(fixed_param->event_count);
+	if (event_count == 0) {
+		if (ar->debug.tpc_stats) {
+			ath12k_warn(ab,
+				    "Invalid tpc memory present\n");
+			goto unlock;
+		}
+		ar->debug.tpc_stats =
+			kzalloc(sizeof(struct wmi_tpc_stats_arg),
+				GFP_ATOMIC);
+		if (!ar->debug.tpc_stats) {
+			ath12k_warn(ab,
+				    "Failed to allocate memory for tpc stats\n");
+			goto unlock;
+		}
+	}
+
+	tpc_stats = ar->debug.tpc_stats;
+
+	if (!(event_count == 0)) {
+		if (event_count != tpc_stats->event_count + 1) {
+			ath12k_warn(ab,
+				    "Invalid tpc event received\n");
+			goto unlock;
+		}
+	}
+	tpc_stats->pdev_id = le32_to_cpu(fixed_param->pdev_id);
+	tpc_stats->end_of_event = le32_to_cpu(fixed_param->end_of_event);
+	tpc_stats->event_count = le32_to_cpu(fixed_param->event_count);
+	ath12k_dbg(ab, ATH12K_DBG_WMI,
+		   "tpc stats event_count %d\n",
+		   tpc_stats->event_count);
+	ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len,
+				  ath12k_wmi_tpc_stats_event_parser,
+				  tpc_stats);
+	if (ret) {
+		if (tpc_stats)
+			ath12k_wmi_free_tpc_stats_mem(ar);
+		ath12k_warn(ab, "failed to parse tpc_stats tlv: %d\n", ret);
+		goto unlock;
+	}
+
+	if (tpc_stats && tpc_stats->end_of_event)
+		complete(&ar->debug.tpc_complete);
+
+unlock:
+	spin_unlock_bh(&ar->data_lock);
+	rcu_read_unlock();
+}
+#else
+static void ath12k_wmi_process_tpc_stats(struct ath12k_base *ab,
+					 struct sk_buff *skb)
+{
+}
+#endif
+
 static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb)
 {
 	struct wmi_cmd_hdr *cmd_hdr;
@@ -7279,13 +8804,6 @@
 	case WMI_P2P_NOA_EVENTID:
 		ath12k_wmi_p2p_noa_event(ab, skb);
 		break;
-	/* add Unsupported events here */
-	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
-	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
-	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
-		ath12k_dbg(ab, ATH12K_DBG_WMI,
-			   "ignoring unsupported event 0x%x\n", id);
-		break;
 	case WMI_PDEV_DFS_RADAR_DETECTION_EVENTID:
 		ath12k_wmi_pdev_dfs_radar_detected_event(ab, skb);
 		break;
@@ -7301,7 +8819,34 @@
 	case WMI_GTK_OFFLOAD_STATUS_EVENTID:
 		ath12k_wmi_gtk_offload_status_event(ab, skb);
 		break;
-	/* TODO: Add remaining events */
+	case WMI_MLO_SETUP_COMPLETE_EVENTID:
+		ath12k_wmi_event_mlo_setup_complete(ab, skb);
+		break;
+	case WMI_MLO_TEARDOWN_COMPLETE_EVENTID:
+		ath12k_wmi_event_teardown_complete(ab, skb);
+		break;
+	case WMI_HALPHY_STATS_CTRL_PATH_EVENTID:
+		ath12k_wmi_process_tpc_stats(ab, skb);
+		break;
+	/* add Unsupported events (rare) here */
+	case WMI_TBTTOFFSET_EXT_UPDATE_EVENTID:
+	case WMI_PEER_OPER_MODE_CHANGE_EVENTID:
+	case WMI_PDEV_DMA_RING_CFG_RSP_EVENTID:
+		ath12k_dbg(ab, ATH12K_DBG_WMI,
+			   "ignoring unsupported event 0x%x\n", id);
+		break;
+	/* add Unsupported events (frequent) here */
+	case WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID:
+	case WMI_MGMT_RX_FW_CONSUMED_EVENTID:
+	case WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID:
+		/* debug might flood hence silently ignore (no-op) */
+		break;
+	case WMI_PDEV_UTF_EVENTID:
+		if (test_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags))
+			ath12k_tm_wmi_event_segmented(ab, id, skb);
+		else
+			ath12k_tm_wmi_event_unsegmented(ab, id, skb);
+		break;
 	default:
 		ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id);
 		break;
@@ -7438,6 +8983,74 @@
 	return ath12k_wmi_send_unit_test_cmd(ar, wmi_ut, dfs_args);
 }
 
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type)
+{
+	struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *cmd;
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	__le32 *pdev_id;
+	u32 buf_len;
+	void *ptr;
+	int ret;
+
+	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + sizeof(u32) + TLV_HDR_SIZE + TLV_HDR_SIZE;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+	if (!skb)
+		return -ENOMEM;
+	cmd = (struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM,
+						 sizeof(*cmd));
+
+	cmd->stats_id_mask = cpu_to_le32(WMI_REQ_CTRL_PATH_PDEV_TX_STAT);
+	cmd->action = cpu_to_le32(WMI_REQUEST_CTRL_PATH_STAT_GET);
+	cmd->subid = cpu_to_le32(tpc_stats_type);
+
+	ptr = skb->data + sizeof(*cmd);
+
+	/* The below TLV arrays optionally follow this fixed param TLV structure
+	 * 1. ARRAY_UINT32 pdev_ids[]
+	 *      If this array is present and non-zero length, stats should only
+	 *      be provided from the pdevs identified in the array.
+	 * 2. ARRAY_UNIT32 vdev_ids[]
+	 *      If this array is present and non-zero length, stats should only
+	 *      be provided from the vdevs identified in the array.
+	 * 3. ath12k_wmi_mac_addr_params peer_macaddr[];
+	 *      If this array is present and non-zero length, stats should only
+	 *      be provided from the peers with the MAC addresses specified
+	 *      in the array
+	 */
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32));
+	ptr += TLV_HDR_SIZE;
+
+	pdev_id = ptr;
+	*pdev_id = cpu_to_le32(ath12k_mac_get_target_pdev_id(ar));
+	ptr += sizeof(*pdev_id);
+
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0);
+	ptr += TLV_HDR_SIZE;
+
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_FIXED_STRUCT, 0);
+	ptr += TLV_HDR_SIZE;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab,
+			    "failed to submit WMI_REQUEST_STATS_CTRL_PATH_CMDID\n");
+		dev_kfree_skb(skb);
+		return ret;
+	}
+	ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI get TPC STATS sent on pdev %d\n",
+		   ar->pdev->pdev_id);
+
+	return ret;
+}
+
 int ath12k_wmi_connect(struct ath12k_base *ab)
 {
 	u32 i;
@@ -8117,3 +9730,104 @@
 
 	return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID);
 }
+
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params)
+{
+	struct wmi_mlo_setup_cmd *cmd;
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	u32 *partner_links, num_links;
+	int i, ret, buf_len, arg_len;
+	struct sk_buff *skb;
+	struct wmi_tlv *tlv;
+	void *ptr;
+
+	num_links = mlo_params->num_partner_links;
+	arg_len = num_links * sizeof(u32);
+	buf_len = sizeof(*cmd) + TLV_HDR_SIZE + arg_len;
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_setup_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_SETUP_CMD,
+						 sizeof(*cmd));
+	cmd->mld_group_id = mlo_params->group_id;
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+	ptr = skb->data + sizeof(*cmd);
+
+	tlv = ptr;
+	tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, arg_len);
+	ptr += TLV_HDR_SIZE;
+
+	partner_links = ptr;
+	for (i = 0; i < num_links; i++)
+		partner_links[i] = mlo_params->partner_link_id[i];
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_SETUP_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit WMI_MLO_SETUP_CMDID command: %d\n",
+			    ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath12k_wmi_mlo_ready(struct ath12k *ar)
+{
+	struct wmi_mlo_ready_cmd *cmd;
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_ready_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_READY_CMD,
+						 sizeof(*cmd));
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_READY_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit WMI_MLO_READY_CMDID command: %d\n",
+			    ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	return 0;
+}
+
+int ath12k_wmi_mlo_teardown(struct ath12k *ar)
+{
+	struct wmi_mlo_teardown_cmd *cmd;
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	int ret, len;
+
+	len = sizeof(*cmd);
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len);
+	if (!skb)
+		return -ENOMEM;
+
+	cmd = (struct wmi_mlo_teardown_cmd *)skb->data;
+	cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_MLO_TEARDOWN_CMD,
+						 sizeof(*cmd));
+	cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+	cmd->reason_code = WMI_MLO_TEARDOWN_SSR_REASON;
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, WMI_MLO_TEARDOWN_CMDID);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to submit WMI MLO teardown command: %d\n",
+			    ret);
+		dev_kfree_skb(skb);
+		return ret;
+	}
+
+	return 0;
+}
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath12k/wmi.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/wmi.h
--- linux-6.13.12/drivers/net/wireless/ath/ath12k/wmi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/wmi.h	2025-09-29 14:23:07.613732450 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: BSD-3-Clause-Clear */
 /*
  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
  */
 
 #ifndef ATH12K_WMI_H
@@ -25,6 +25,7 @@
 struct ath12k_base;
 struct ath12k;
 struct ath12k_link_vif;
+struct ath12k_fw_stats;
 
 /* There is no signed version of __le32, so for a temporary solution come
  * up with our own version. The idea is from fs/ntfs/endian.h.
@@ -215,9 +216,25 @@
 };
 
 enum WMI_HOST_WLAN_BAND {
-	WMI_HOST_WLAN_2G_CAP	= 1,
-	WMI_HOST_WLAN_5G_CAP	= 2,
-	WMI_HOST_WLAN_2G_5G_CAP	= 3,
+	WMI_HOST_WLAN_2GHZ_CAP		= 1,
+	WMI_HOST_WLAN_5GHZ_CAP		= 2,
+	WMI_HOST_WLAN_2GHZ_5GHZ_CAP	= 3,
+};
+
+/* Parameters used for WMI_VDEV_PARAM_AUTORATE_MISC_CFG command.
+ * Used only for HE auto rate mode.
+ */
+enum {
+	/* HE LTF related configuration */
+	WMI_HE_AUTORATE_LTF_1X = BIT(0),
+	WMI_HE_AUTORATE_LTF_2X = BIT(1),
+	WMI_HE_AUTORATE_LTF_4X = BIT(2),
+
+	/* HE GI related configuration */
+	WMI_AUTORATE_400NS_GI = BIT(8),
+	WMI_AUTORATE_800NS_GI = BIT(9),
+	WMI_AUTORATE_1600NS_GI = BIT(10),
+	WMI_AUTORATE_3200NS_GI = BIT(11),
 };
 
 enum wmi_cmd_group {
@@ -285,6 +302,7 @@
 	WMI_GRP_TWT            = 0x3e,
 	WMI_GRP_MOTION_DET     = 0x3f,
 	WMI_GRP_SPATIAL_REUSE  = 0x40,
+	WMI_GRP_MLO            = 0x48,
 };
 
 #define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
@@ -515,6 +533,9 @@
 	WMI_REQUEST_RCPI_CMDID,
 	WMI_REQUEST_PEER_STATS_INFO_CMDID,
 	WMI_REQUEST_RADIO_CHAN_STATS_CMDID,
+	WMI_REQUEST_WLM_STATS_CMDID,
+	WMI_REQUEST_CTRL_PATH_STATS_CMDID,
+	WMI_REQUEST_HALPHY_CTRL_PATH_STATS_CMDID = WMI_REQUEST_CTRL_PATH_STATS_CMDID + 3,
 	WMI_SET_ARP_NS_OFFLOAD_CMDID = WMI_TLV_CMD(WMI_GRP_ARP_NS_OFL),
 	WMI_ADD_PROACTIVE_ARP_RSP_PATTERN_CMDID,
 	WMI_DEL_PROACTIVE_ARP_RSP_PATTERN_CMDID,
@@ -665,6 +686,10 @@
 	WMI_PDEV_OBSS_PD_SPATIAL_REUSE_CMDID =
 				WMI_TLV_CMD(WMI_GRP_SPATIAL_REUSE),
 	WMI_PDEV_OBSS_PD_SPATIAL_REUSE_SET_DEF_OBSS_THRESH_CMDID,
+	WMI_MLO_LINK_SET_ACTIVE_CMDID = WMI_TLV_CMD(WMI_GRP_MLO),
+	WMI_MLO_SETUP_CMDID,
+	WMI_MLO_READY_CMDID,
+	WMI_MLO_TEARDOWN_CMDID,
 };
 
 enum wmi_tlv_event_id {
@@ -706,6 +731,8 @@
 	WMI_PDEV_RAP_INFO_EVENTID,
 	WMI_CHAN_RF_CHARACTERIZATION_INFO_EVENTID,
 	WMI_SERVICE_READY_EXT2_EVENTID,
+	WMI_PDEV_GET_HALPHY_CAL_STATUS_EVENTID =
+					WMI_SERVICE_READY_EXT2_EVENTID + 4,
 	WMI_VDEV_START_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_VDEV),
 	WMI_VDEV_STOPPED_EVENTID,
 	WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID,
@@ -747,6 +774,7 @@
 	WMI_TBTTOFFSET_EXT_UPDATE_EVENTID,
 	WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID,
 	WMI_HOST_FILS_DISCOVERY_EVENTID,
+	WMI_MGMT_RX_FW_CONSUMED_EVENTID = WMI_HOST_FILS_DISCOVERY_EVENTID + 3,
 	WMI_TX_DELBA_COMPLETE_EVENTID = WMI_TLV_CMD(WMI_GRP_BA_NEG),
 	WMI_TX_ADDBA_COMPLETE_EVENTID,
 	WMI_BA_RSP_SSN_EVENTID,
@@ -777,6 +805,9 @@
 	WMI_UPDATE_RCPI_EVENTID,
 	WMI_PEER_STATS_INFO_EVENTID,
 	WMI_RADIO_CHAN_STATS_EVENTID,
+	WMI_WLM_STATS_EVENTID,
+	WMI_CTRL_PATH_STATS_EVENTID,
+	WMI_HALPHY_STATS_CTRL_PATH_EVENTID,
 	WMI_NLO_MATCH_EVENTID = WMI_TLV_CMD(WMI_GRP_NLO_OFL),
 	WMI_NLO_SCAN_COMPLETE_EVENTID,
 	WMI_APFIND_EVENTID,
@@ -845,6 +876,8 @@
 	WMI_MDNS_STATS_EVENTID = WMI_TLV_CMD(WMI_GRP_MDNS_OFL),
 	WMI_SAP_OFL_ADD_STA_EVENTID = WMI_TLV_CMD(WMI_GRP_SAP_OFL),
 	WMI_SAP_OFL_DEL_STA_EVENTID,
+	WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID =
+				    WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
 	WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_TLV_CMD(WMI_GRP_OCB),
 	WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
 	WMI_DCC_GET_STATS_RESP_EVENTID,
@@ -874,6 +907,9 @@
 	WMI_TWT_DEL_DIALOG_EVENTID,
 	WMI_TWT_PAUSE_DIALOG_EVENTID,
 	WMI_TWT_RESUME_DIALOG_EVENTID,
+	WMI_MLO_LINK_SET_ACTIVE_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MLO),
+	WMI_MLO_SETUP_COMPLETE_EVENTID,
+	WMI_MLO_TEARDOWN_COMPLETE_EVENTID,
 };
 
 enum wmi_tlv_pdev_param {
@@ -1132,13 +1168,16 @@
 	WMI_VDEV_PARAM_HE_RANGE_EXT,
 	WMI_VDEV_PARAM_ENABLE_BCAST_PROBE_RESPONSE,
 	WMI_VDEV_PARAM_FILS_MAX_CHANNEL_GUARD_TIME,
+	WMI_VDEV_PARAM_HE_LTF = 0x74,
 	WMI_VDEV_PARAM_BA_MODE = 0x7e,
+	WMI_VDEV_PARAM_AUTORATE_MISC_CFG = 0x80,
 	WMI_VDEV_PARAM_SET_HE_SOUNDING_MODE = 0x87,
 	WMI_VDEV_PARAM_6GHZ_PARAMS = 0x99,
 	WMI_VDEV_PARAM_PROTOTYPE = 0x8000,
 	WMI_VDEV_PARAM_BSS_COLOR,
 	WMI_VDEV_PARAM_SET_HEMU_MODE,
 	WMI_VDEV_PARAM_HEOPS_0_31 = 0x8003,
+	WMI_VDEV_PARAM_SET_EHT_MU_MODE = 0x8005,
 };
 
 enum wmi_tlv_peer_flags {
@@ -1178,6 +1217,7 @@
 	WMI_TAG_ARRAY_BYTE,
 	WMI_TAG_ARRAY_STRUCT,
 	WMI_TAG_ARRAY_FIXED_STRUCT,
+	WMI_TAG_ARRAY_INT16,
 	WMI_TAG_LAST_ARRAY_ENUM = 31,
 	WMI_TAG_SERVICE_READY_EVENT,
 	WMI_TAG_HAL_REG_CAPABILITIES,
@@ -1928,10 +1968,31 @@
 	WMI_TAG_MAC_PHY_CAPABILITIES_EXT = 0x36F,
 	WMI_TAG_REGULATORY_RULE_EXT_STRUCT = 0x3A9,
 	WMI_TAG_REG_CHAN_LIST_CC_EXT_EVENT,
+	WMI_TAG_TPC_STATS_GET_CMD = 0x38B,
+	WMI_TAG_TPC_STATS_EVENT_FIXED_PARAM,
+	WMI_TAG_TPC_STATS_CONFIG_EVENT,
+	WMI_TAG_TPC_STATS_REG_PWR_ALLOWED,
+	WMI_TAG_TPC_STATS_RATES_ARRAY,
+	WMI_TAG_TPC_STATS_CTL_PWR_TABLE_EVENT,
 	WMI_TAG_EHT_RATE_SET = 0x3C4,
+	WMI_TAG_DCS_AWGN_INT_TYPE = 0x3C5,
+	WMI_TAG_MLO_TX_SEND_PARAMS,
+	WMI_TAG_MLO_PARTNER_LINK_PARAMS,
+	WMI_TAG_MLO_PARTNER_LINK_PARAMS_PEER_ASSOC,
+	WMI_TAG_MLO_SETUP_CMD = 0x3C9,
+	WMI_TAG_MLO_SETUP_COMPLETE_EVENT,
+	WMI_TAG_MLO_READY_CMD,
+	WMI_TAG_MLO_TEARDOWN_CMD,
+	WMI_TAG_MLO_TEARDOWN_COMPLETE,
+	WMI_TAG_MLO_PEER_ASSOC_PARAMS = 0x3D0,
+	WMI_TAG_MLO_PEER_CREATE_PARAMS = 0x3D5,
+	WMI_TAG_MLO_VDEV_START_PARAMS = 0x3D6,
+	WMI_TAG_MLO_VDEV_CREATE_PARAMS = 0x3D7,
 	WMI_TAG_PDEV_SET_BIOS_SAR_TABLE_CMD = 0x3D8,
 	WMI_TAG_PDEV_SET_BIOS_GEO_TABLE_CMD = 0x3D9,
 	WMI_TAG_PDEV_SET_BIOS_INTERFACE_CMD = 0x3FB,
+	WMI_TAG_HALPHY_CTRL_PATH_CMD_FIXED_PARAM = 0x442,
+	WMI_TAG_HALPHY_CTRL_PATH_EVENT_FIXED_PARAM,
 	WMI_TAG_MAX
 };
 
@@ -2155,6 +2216,7 @@
 	WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213,
 	WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219,
 	WMI_TLV_SERVICE_EXT2_MSG = 220,
+	WMI_TLV_SERVICE_BEACON_PROTECTION_SUPPORT = 244,
 	WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253,
 
 	WMI_MAX_EXT_SERVICE = 256,
@@ -2245,6 +2307,21 @@
 	WMI_DIRECT_BUF_MAX
 };
 
+/**
+ * enum wmi_nss_ratio - NSS ratio received from FW during service ready ext event
+ * @WMI_NSS_RATIO_1BY2_NSS: Max nss of 160MHz is equals to half of the max nss of 80MHz
+ * @WMI_NSS_RATIO_3BY4_NSS: Max nss of 160MHz is equals to 3/4 of the max nss of 80MHz
+ * @WMI_NSS_RATIO_1_NSS: Max nss of 160MHz is equals to the max nss of 80MHz
+ * @WMI_NSS_RATIO_2_NSS: Max nss of 160MHz is equals to two times the max nss of 80MHz
+ */
+
+enum wmi_nss_ratio {
+	WMI_NSS_RATIO_1BY2_NSS,
+	WMI_NSS_RATIO_3BY4_NSS,
+	WMI_NSS_RATIO_1_NSS,
+	WMI_NSS_RATIO_2_NSS
+};
+
 struct ath12k_wmi_pdev_band_arg {
 	u32 pdev_id;
 	u32 start_freq;
@@ -2561,6 +2638,12 @@
 } __packed;
 
 #define WMI_MAX_HECAP_PHY_SIZE                 (3)
+#define WMI_NSS_RATIO_EN_DIS_BITPOS    BIT(0)
+#define WMI_NSS_RATIO_EN_DIS_GET(_val) \
+	le32_get_bits(_val, WMI_NSS_RATIO_EN_DIS_BITPOS)
+#define WMI_NSS_RATIO_INFO_BITPOS              GENMASK(4, 1)
+#define WMI_NSS_RATIO_INFO_GET(_val) \
+	le32_get_bits(_val, WMI_NSS_RATIO_INFO_BITPOS)
 
 /* pdev_id is present in lower 16 bits of pdev_and_hw_link_ids in
  * ath12k_wmi_mac_phy_caps_params & ath12k_wmi_caps_ext_params.
@@ -2602,6 +2685,13 @@
 	__le32 he_cap_info_2g_ext;
 	__le32 he_cap_info_5g_ext;
 	__le32 he_cap_info_internal;
+	__le32 wireless_modes;
+	__le32 low_2ghz_chan_freq;
+	__le32 high_2ghz_chan_freq;
+	__le32 low_5ghz_chan_freq;
+	__le32 high_5ghz_chan_freq;
+	__le32 nss_ratio;
+
 } __packed;
 
 struct ath12k_wmi_hal_reg_caps_ext_params {
@@ -2648,8 +2738,8 @@
  * 2 - index for 160 MHz, first 3 bytes valid
  * 3 - index for 320 MHz, first 3 bytes valid
  */
-#define WMI_MAX_EHT_SUPP_MCS_2G_SIZE  2
-#define WMI_MAX_EHT_SUPP_MCS_5G_SIZE  4
+#define WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE  2
+#define WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE  4
 
 #define WMI_EHTCAP_TXRX_MCS_NSS_IDX_80    0
 #define WMI_EHTCAP_TXRX_MCS_NSS_IDX_160   1
@@ -2688,8 +2778,10 @@
 	struct ath12k_wmi_ppe_threshold_params eht_ppet_2ghz;
 	struct ath12k_wmi_ppe_threshold_params eht_ppet_5ghz;
 	__le32 eht_cap_info_internal;
-	__le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2G_SIZE];
-	__le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5G_SIZE];
+	__le32 eht_supp_mcs_ext_2ghz[WMI_MAX_EHT_SUPP_MCS_2GHZ_SIZE];
+	__le32 eht_supp_mcs_ext_5ghz[WMI_MAX_EHT_SUPP_MCS_5GHZ_SIZE];
+	__le32 eml_capability;
+	__le32 mld_capability;
 } __packed;
 
 /* 2 word representation of MAC addr */
@@ -2740,6 +2832,7 @@
 	u8 if_stats_id;
 	u32 mbssid_flags;
 	u32 mbssid_tx_vdev_id;
+	u8 mld_addr[ETH_ALEN];
 };
 
 #define ATH12K_MAX_VDEV_STATS_ID	0x30
@@ -2766,6 +2859,33 @@
 	__le32 supported_rx_streams;
 } __packed;
 
+struct wmi_vdev_create_mlo_params {
+	__le32 tlv_header;
+	struct ath12k_wmi_mac_addr_params mld_macaddr;
+} __packed;
+
+#define ATH12K_WMI_FLAG_MLO_ENABLED			BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK			BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC		BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LOGICAL_LINK_IDX_VALID	BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID		BIT(4)
+#define ATH12K_WMI_FLAG_MLO_MCAST_VDEV			BIT(5)
+#define ATH12K_WMI_FLAG_MLO_EMLSR_SUPPORT		BIT(6)
+#define ATH12K_WMI_FLAG_MLO_FORCED_INACTIVE		BIT(7)
+#define ATH12K_WMI_FLAG_MLO_LINK_ADD			BIT(8)
+
+struct wmi_vdev_start_mlo_params {
+	__le32 tlv_header;
+	__le32 flags;
+} __packed;
+
+struct wmi_partner_link_info {
+	__le32 tlv_header;
+	__le32 vdev_id;
+	__le32 hw_link_id;
+	struct ath12k_wmi_mac_addr_params vdev_addr;
+} __packed;
+
 struct wmi_vdev_delete_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -2909,6 +3029,27 @@
 	MODE_MAX = 33,
 };
 
+#define ATH12K_WMI_MLO_MAX_LINKS 4
+
+struct wmi_ml_partner_info {
+	u32 vdev_id;
+	u32 hw_link_id;
+	u8 addr[ETH_ALEN];
+	bool assoc_link;
+	bool primary_umac;
+	bool logical_link_idx_valid;
+	u32 logical_link_idx;
+};
+
+struct wmi_ml_arg {
+	bool enabled;
+	bool assoc_link;
+	bool mcast_link;
+	bool link_add;
+	u8 num_partner_links;
+	struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+};
+
 struct wmi_vdev_start_req_arg {
 	u32 vdev_id;
 	u32 freq;
@@ -2946,12 +3087,19 @@
 	u32 mbssid_flags;
 	u32 mbssid_tx_vdev_id;
 	u32 punct_bitmap;
+	struct wmi_ml_arg ml;
 };
 
 struct ath12k_wmi_peer_create_arg {
 	const u8 *peer_addr;
 	u32 peer_type;
 	u32 vdev_id;
+	bool ml_enabled;
+};
+
+struct wmi_peer_create_mlo_params {
+	__le32 tlv_header;
+	__le32 flags;
 };
 
 struct ath12k_wmi_pdev_set_regdomain_arg {
@@ -2995,31 +3143,6 @@
 #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2)
 #define WMI_VDEV_PARAM_TXBF_MU_TX_BFER BIT(3)
 
-#define HECAP_PHYDWORD_0	0
-#define HECAP_PHYDWORD_1	1
-#define HECAP_PHYDWORD_2	2
-
-#define HECAP_PHY_SU_BFER		BIT(31)
-#define HECAP_PHY_SU_BFEE		BIT(0)
-#define HECAP_PHY_MU_BFER		BIT(1)
-#define HECAP_PHY_UL_MUMIMO		BIT(22)
-#define HECAP_PHY_UL_MUOFDMA		BIT(23)
-
-#define HECAP_PHY_SUBFMR_GET(hecap_phy) \
-	u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_SU_BFER)
-
-#define HECAP_PHY_SUBFME_GET(hecap_phy) \
-	u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_SU_BFEE)
-
-#define HECAP_PHY_MUBFMR_GET(hecap_phy) \
-	u32_get_bits(hecap_phy[HECAP_PHYDWORD_1], HECAP_PHY_MU_BFER)
-
-#define HECAP_PHY_ULMUMIMO_GET(hecap_phy) \
-	u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUMIMO)
-
-#define HECAP_PHY_ULOFDMA_GET(hecap_phy) \
-	u32_get_bits(hecap_phy[HECAP_PHYDWORD_0], HECAP_PHY_UL_MUOFDMA)
-
 #define HE_MODE_SU_TX_BFEE	BIT(0)
 #define HE_MODE_SU_TX_BFER	BIT(1)
 #define HE_MODE_MU_TX_BFEE	BIT(2)
@@ -3031,8 +3154,31 @@
 #define HE_DL_MUOFDMA_ENABLE	1
 #define HE_UL_MUOFDMA_ENABLE	1
 #define HE_DL_MUMIMO_ENABLE	1
+#define HE_UL_MUMIMO_ENABLE	1
 #define HE_MU_BFEE_ENABLE	1
 #define HE_SU_BFEE_ENABLE	1
+#define HE_MU_BFER_ENABLE	1
+#define HE_SU_BFER_ENABLE	1
+
+#define EHT_MODE_SU_TX_BFEE		BIT(0)
+#define EHT_MODE_SU_TX_BFER		BIT(1)
+#define EHT_MODE_MU_TX_BFEE		BIT(2)
+#define EHT_MODE_MU_TX_BFER		BIT(3)
+#define EHT_MODE_DL_OFDMA		BIT(4)
+#define EHT_MODE_UL_OFDMA		BIT(5)
+#define EHT_MODE_MUMIMO			BIT(6)
+#define EHT_MODE_DL_OFDMA_TXBF		BIT(7)
+#define EHT_MODE_DL_OFDMA_MUMIMO	BIT(8)
+#define EHT_MODE_UL_OFDMA_MUMIMO	BIT(9)
+
+#define EHT_DL_MUOFDMA_ENABLE    1
+#define EHT_UL_MUOFDMA_ENABLE    1
+#define EHT_DL_MUMIMO_ENABLE     1
+#define EHT_UL_MUMIMO_ENABLE     1
+#define EHT_MU_BFEE_ENABLE       1
+#define EHT_SU_BFEE_ENABLE       1
+#define EHT_MU_BFER_ENABLE       1
+#define EHT_SU_BFER_ENABLE       1
 
 #define HE_VHT_SOUNDING_MODE_ENABLE		1
 #define HE_SU_MU_SOUNDING_MODE_ENABLE		1
@@ -3496,6 +3642,15 @@
 	__le32 delay_time_ms;
 } __packed;
 
+/* Param values to be sent for WMI_VDEV_PARAM_SGI param_id
+ * which are used in 11n, 11ac systems
+ * @WMI_GI_800_NS - Always uses 0.8us (Long GI)
+ * @WMI_GI_400_NS - Firmware switches between 0.4us (Short GI)
+ *			and 0.8us (Long GI) based on packet error rate.
+ */
+#define WMI_GI_800_NS 0
+#define WMI_GI_400_NS 1
+
 struct wmi_vdev_set_param_cmd {
 	__le32 tlv_header;
 	__le32 vdev_id;
@@ -3540,6 +3695,26 @@
 	struct ath12k_wmi_p2p_noa_descriptor descriptors[WMI_P2P_MAX_NOA_DESCRIPTORS];
 } __packed;
 
+#define MAX_WMI_UTF_LEN 252
+
+struct ath12k_wmi_ftm_seg_hdr_params {
+	__le32 len;
+	__le32 msgref;
+	__le32 segmentinfo;
+	__le32 pdev_id;
+} __packed;
+
+struct ath12k_wmi_ftm_cmd {
+	__le32 tlv_header;
+	struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+	u8 data[];
+} __packed;
+
+struct ath12k_wmi_ftm_event {
+	struct ath12k_wmi_ftm_seg_hdr_params seg_hdr;
+	u8 data[];
+} __packed;
+
 #define WMI_BEACON_TX_BUFFER_SIZE	512
 
 #define WMI_EMA_BEACON_CNT      GENMASK(7, 0)
@@ -3547,6 +3722,8 @@
 #define WMI_EMA_BEACON_FIRST    GENMASK(23, 16)
 #define WMI_EMA_BEACON_LAST     GENMASK(31, 24)
 
+#define WMI_BEACON_PROTECTION_EN_BIT	BIT(0)
+
 struct ath12k_wmi_bcn_tmpl_ema_arg {
 	u8 bcn_cnt;
 	u8 bcn_index;
@@ -3616,7 +3793,25 @@
 #define WMI_HOST_MAX_HE_RATE_SET		3
 #define WMI_HECAP_TXRX_MCS_NSS_IDX_80		0
 #define WMI_HECAP_TXRX_MCS_NSS_IDX_160		1
-#define WMI_HECAP_TXRX_MCS_NSS_IDX_80_80	2
+
+#define ATH12K_WMI_MLO_MAX_PARTNER_LINKS \
+	(ATH12K_WMI_MLO_MAX_LINKS + ATH12K_MAX_NUM_BRIDGE_LINKS - 1)
+
+struct peer_assoc_mlo_params {
+	bool enabled;
+	bool assoc_link;
+	bool primary_umac;
+	bool peer_id_valid;
+	bool logical_link_idx_valid;
+	bool bridge_peer;
+	u8 mld_addr[ETH_ALEN];
+	u32 logical_link_idx;
+	u32 ml_peer_id;
+	u32 ieee_link_id;
+	u8 num_partner_links;
+	struct wmi_ml_partner_info partner_info[ATH12K_WMI_MLO_MAX_LINKS];
+	u16 eml_cap;
+};
 
 struct wmi_rate_set_arg {
 	u32 num_rates;
@@ -3692,8 +3887,37 @@
 	u32 peer_eht_tx_mcs_set[WMI_MAX_EHTCAP_RATE_SET];
 	struct ath12k_wmi_ppe_threshold_arg peer_eht_ppet;
 	u32 punct_bitmap;
+	bool is_assoc;
+	struct peer_assoc_mlo_params ml;
+	bool enable_mcs15;
 };
 
+#define ATH12K_WMI_FLAG_MLO_ENABLED			BIT(0)
+#define ATH12K_WMI_FLAG_MLO_ASSOC_LINK			BIT(1)
+#define ATH12K_WMI_FLAG_MLO_PRIMARY_UMAC		BIT(2)
+#define ATH12K_WMI_FLAG_MLO_LINK_ID_VALID		BIT(3)
+#define ATH12K_WMI_FLAG_MLO_PEER_ID_VALID		BIT(4)
+
+struct wmi_peer_assoc_mlo_partner_info_params {
+	__le32 tlv_header;
+	__le32 vdev_id;
+	__le32 hw_link_id;
+	__le32 flags;
+	__le32 logical_link_idx;
+} __packed;
+
+struct wmi_peer_assoc_mlo_params {
+	__le32 tlv_header;
+	__le32 flags;
+	struct ath12k_wmi_mac_addr_params mld_addr;
+	__le32 logical_link_idx;
+	__le32 ml_peer_id;
+	__le32 ieee_link_id;
+	__le32 emlsr_trans_timeout_us;
+	__le32 emlsr_trans_delay_us;
+	__le32 emlsr_padding_delay_us;
+} __packed;
+
 struct wmi_peer_assoc_complete_cmd {
 	__le32 tlv_header;
 	struct ath12k_wmi_mac_addr_params peer_macaddr;
@@ -3930,7 +4154,9 @@
 
 struct ath12k_wmi_he_rate_set_params {
 	__le32 tlv_header;
+	/* MCS at which the peer can receive */
 	__le32 rx_mcs_set;
+	/* MCS at which the peer can transmit */
 	__le32 tx_mcs_set;
 } __packed;
 
@@ -3942,7 +4168,7 @@
 
 #define MAX_REG_RULES 10
 #define REG_ALPHA2_LEN 2
-#define MAX_6G_REG_RULES 5
+#define MAX_6GHZ_REG_RULES 5
 
 enum wmi_start_event_param {
 	WMI_VDEV_START_RESP_EVENT = 0,
@@ -4239,12 +4465,26 @@
 	__le32 tsf_timestamp;
 } __packed;
 
+enum wmi_peer_sta_kickout_reason {
+	WMI_PEER_STA_KICKOUT_REASON_UNSPECIFIED = 0,
+	WMI_PEER_STA_KICKOUT_REASON_XRETRY = 1,
+	WMI_PEER_STA_KICKOUT_REASON_INACTIVITY = 2,
+	WMI_PEER_STA_KICKOUT_REASON_IBSS_DISCONNECT = 3,
+	WMI_PEER_STA_KICKOUT_REASON_TDLS_DISCONNECT = 4,
+	WMI_PEER_STA_KICKOUT_REASON_SA_QUERY_TIMEOUT = 5,
+	WMI_PEER_STA_KICKOUT_REASON_ROAMING_EVENT = 6,
+};
+
 struct wmi_peer_sta_kickout_arg {
 	const u8 *mac_addr;
+	__le32 reason;
+	__le32 rssi;
 };
 
 struct wmi_peer_sta_kickout_event {
 	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	__le32 reason;
+	__le32 rssi;
 } __packed;
 
 #define WMI_ROAM_REASON_MASK		GENMASK(3, 0)
@@ -4432,7 +4672,7 @@
 
 #define DISABLE_SIFS_RESPONSE_TRIGGER 0
 
-#define WMI_MAX_KEY_INDEX   3
+#define WMI_MAX_KEY_INDEX   7
 #define WMI_MAX_KEY_LEN     32
 
 enum wmi_key_type {
@@ -4474,6 +4714,7 @@
 	WMI_RATE_PREAMBLE_HT,
 	WMI_RATE_PREAMBLE_VHT,
 	WMI_RATE_PREAMBLE_HE,
+	WMI_RATE_PREAMBLE_EHT,
 };
 
 /**
@@ -4814,6 +5055,7 @@
 
 #define MAX_RADIOS 2
 
+#define WMI_MLO_CMD_TIMEOUT_HZ (5 * HZ)
 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ)
 #define WMI_SEND_TIMEOUT_HZ (3 * HZ)
 
@@ -4910,6 +5152,43 @@
 	__le32 status;
 } __packed;
 
+struct wmi_mlo_setup_cmd {
+	__le32 tlv_header;
+	__le32 mld_group_id;
+	__le32 pdev_id;
+} __packed;
+
+struct wmi_mlo_setup_arg {
+	__le32 group_id;
+	u8 num_partner_links;
+	u8 *partner_link_id;
+};
+
+struct wmi_mlo_ready_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+} __packed;
+
+enum wmi_mlo_tear_down_reason_code_type {
+	WMI_MLO_TEARDOWN_SSR_REASON,
+};
+
+struct wmi_mlo_teardown_cmd {
+	__le32 tlv_header;
+	__le32 pdev_id;
+	__le32 reason_code;
+} __packed;
+
+struct wmi_mlo_setup_complete_event {
+	__le32 pdev_id;
+	__le32 status;
+} __packed;
+
+struct wmi_mlo_teardown_complete_event {
+	__le32 pdev_id;
+	__le32 status;
+} __packed;
+
 /* WOW structures */
 enum wmi_wow_wakeup_event {
 	WOW_BMISS_EVENT = 0,
@@ -5460,6 +5739,245 @@
 #define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT	30
 #define WMI_STA_KEEPALIVE_INTERVAL_DISABLE	0
 
+struct wmi_stats_event {
+	__le32 stats_id;
+	__le32 num_pdev_stats;
+	__le32 num_vdev_stats;
+	__le32 num_peer_stats;
+	__le32 num_bcnflt_stats;
+	__le32 num_chan_stats;
+	__le32 num_mib_stats;
+	__le32 pdev_id;
+	__le32 num_bcn_stats;
+	__le32 num_peer_extd_stats;
+	__le32 num_peer_extd2_stats;
+} __packed;
+
+enum wmi_stats_id {
+	WMI_REQUEST_PDEV_STAT	= BIT(2),
+	WMI_REQUEST_VDEV_STAT	= BIT(3),
+	WMI_REQUEST_BCN_STAT	= BIT(11),
+};
+
+struct wmi_request_stats_cmd {
+	__le32 tlv_header;
+	__le32 stats_id;
+	__le32 vdev_id;
+	struct ath12k_wmi_mac_addr_params peer_macaddr;
+	__le32 pdev_id;
+} __packed;
+
+#define WLAN_MAX_AC 4
+#define MAX_TX_RATE_VALUES 10
+
+struct wmi_vdev_stats_params {
+	__le32 vdev_id;
+	__le32 beacon_snr;
+	__le32 data_snr;
+	__le32 num_tx_frames[WLAN_MAX_AC];
+	__le32 num_rx_frames;
+	__le32 num_tx_frames_retries[WLAN_MAX_AC];
+	__le32 num_tx_frames_failures[WLAN_MAX_AC];
+	__le32 num_rts_fail;
+	__le32 num_rts_success;
+	__le32 num_rx_err;
+	__le32 num_rx_discard;
+	__le32 num_tx_not_acked;
+	__le32 tx_rate_history[MAX_TX_RATE_VALUES];
+	__le32 beacon_rssi_history[MAX_TX_RATE_VALUES];
+} __packed;
+
+struct ath12k_wmi_bcn_stats_params {
+	__le32 vdev_id;
+	__le32 tx_bcn_succ_cnt;
+	__le32 tx_bcn_outage_cnt;
+} __packed;
+
+struct ath12k_wmi_pdev_base_stats_params {
+	a_sle32 chan_nf;
+	__le32 tx_frame_count; /* Cycles spent transmitting frames */
+	__le32 rx_frame_count; /* Cycles spent receiving frames */
+	__le32 rx_clear_count; /* Total channel busy time, evidently */
+	__le32 cycle_count; /* Total on-channel time */
+	__le32 phy_err_count;
+	__le32 chan_tx_pwr;
+} __packed;
+
+struct ath12k_wmi_pdev_tx_stats_params {
+	a_sle32 comp_queued;
+	a_sle32 comp_delivered;
+	a_sle32 msdu_enqued;
+	a_sle32 mpdu_enqued;
+	a_sle32 wmm_drop;
+	a_sle32 local_enqued;
+	a_sle32 local_freed;
+	a_sle32 hw_queued;
+	a_sle32 hw_reaped;
+	a_sle32 underrun;
+	a_sle32 tx_abort;
+	a_sle32 mpdus_requed;
+	__le32 tx_ko;
+	__le32 data_rc;
+	__le32 self_triggers;
+	__le32 sw_retry_failure;
+	__le32 illgl_rate_phy_err;
+	__le32 pdev_cont_xretry;
+	__le32 pdev_tx_timeout;
+	__le32 pdev_resets;
+	__le32 stateless_tid_alloc_failure;
+	__le32 phy_underrun;
+	__le32 txop_ovf;
+} __packed;
+
+struct ath12k_wmi_pdev_rx_stats_params {
+	a_sle32 mid_ppdu_route_change;
+	a_sle32 status_rcvd;
+	a_sle32 r0_frags;
+	a_sle32 r1_frags;
+	a_sle32 r2_frags;
+	a_sle32 r3_frags;
+	a_sle32 htt_msdus;
+	a_sle32 htt_mpdus;
+	a_sle32 loc_msdus;
+	a_sle32 loc_mpdus;
+	a_sle32 oversize_amsdu;
+	a_sle32 phy_errs;
+	a_sle32 phy_err_drop;
+	a_sle32 mpdu_errs;
+} __packed;
+
+struct ath12k_wmi_pdev_stats_params {
+	struct ath12k_wmi_pdev_base_stats_params base;
+	struct ath12k_wmi_pdev_tx_stats_params tx;
+	struct ath12k_wmi_pdev_rx_stats_params rx;
+} __packed;
+
+struct ath12k_fw_stats_req_params {
+	u32 stats_id;
+	u32 vdev_id;
+	u32 pdev_id;
+};
+
+#define WMI_REQ_CTRL_PATH_PDEV_TX_STAT		1
+#define WMI_REQUEST_CTRL_PATH_STAT_GET		1
+
+#define WMI_TPC_CONFIG			BIT(1)
+#define WMI_TPC_REG_PWR_ALLOWED		BIT(2)
+#define WMI_TPC_RATES_ARRAY1		BIT(3)
+#define WMI_TPC_RATES_ARRAY2		BIT(4)
+#define WMI_TPC_RATES_DL_OFDMA_ARRAY	BIT(5)
+#define WMI_TPC_CTL_PWR_ARRAY		BIT(6)
+#define WMI_TPC_CONFIG_PARAM		0x1
+#define ATH12K_TPC_RATE_ARRAY_MU	GENMASK(15, 8)
+#define ATH12K_TPC_RATE_ARRAY_SU	GENMASK(7, 0)
+#define TPC_STATS_REG_PWR_ALLOWED_TYPE	0
+
+enum wmi_halphy_ctrl_path_stats_id {
+	WMI_HALPHY_PDEV_TX_SU_STATS = 0,
+	WMI_HALPHY_PDEV_TX_SUTXBF_STATS,
+	WMI_HALPHY_PDEV_TX_MU_STATS,
+	WMI_HALPHY_PDEV_TX_MUTXBF_STATS,
+	WMI_HALPHY_PDEV_TX_STATS_MAX,
+};
+
+enum ath12k_wmi_tpc_stats_rates_array {
+	ATH12K_TPC_STATS_RATES_ARRAY1,
+	ATH12K_TPC_STATS_RATES_ARRAY2,
+};
+
+enum ath12k_wmi_tpc_stats_ctl_array {
+	ATH12K_TPC_STATS_CTL_ARRAY,
+	ATH12K_TPC_STATS_CTL_160ARRAY,
+};
+
+enum ath12k_wmi_tpc_stats_events {
+	ATH12K_TPC_STATS_CONFIG_REG_PWR_EVENT,
+	ATH12K_TPC_STATS_RATES_EVENT1,
+	ATH12K_TPC_STATS_RATES_EVENT2,
+	ATH12K_TPC_STATS_CTL_TABLE_EVENT
+};
+
+struct wmi_request_halphy_ctrl_path_stats_cmd_fixed_params {
+	__le32 tlv_header;
+	__le32 stats_id_mask;
+	__le32 request_id;
+	__le32 action;
+	__le32 subid;
+} __packed;
+
+struct ath12k_wmi_pdev_tpc_stats_event_fixed_params {
+	__le32 pdev_id;
+	__le32 end_of_event;
+	__le32 event_count;
+} __packed;
+
+struct wmi_tpc_config_params {
+	__le32 reg_domain;
+	__le32 chan_freq;
+	__le32 phy_mode;
+	__le32 twice_antenna_reduction;
+	__le32 twice_max_reg_power;
+	__le32 twice_antenna_gain;
+	__le32 power_limit;
+	__le32 rate_max;
+	__le32 num_tx_chain;
+	__le32 ctl;
+	__le32 flags;
+	__le32 caps;
+} __packed;
+
+struct wmi_max_reg_power_fixed_params {
+	__le32 reg_power_type;
+	__le32 reg_array_len;
+	__le32 d1;
+	__le32 d2;
+	__le32 d3;
+	__le32 d4;
+} __packed;
+
+struct wmi_max_reg_power_allowed_arg {
+	struct wmi_max_reg_power_fixed_params tpc_reg_pwr;
+	s16 *reg_pwr_array;
+};
+
+struct wmi_tpc_rates_array_fixed_params {
+	__le32 rate_array_type;
+	__le32 rate_array_len;
+} __packed;
+
+struct wmi_tpc_rates_array_arg {
+	struct wmi_tpc_rates_array_fixed_params tpc_rates_array;
+	s16 *rate_array;
+};
+
+struct wmi_tpc_ctl_pwr_fixed_params {
+	__le32 ctl_array_type;
+	__le32 ctl_array_len;
+	__le32 end_of_ctl_pwr;
+	__le32 ctl_pwr_count;
+	__le32 d1;
+	__le32 d2;
+	__le32 d3;
+	__le32 d4;
+} __packed;
+
+struct wmi_tpc_ctl_pwr_table_arg {
+	struct wmi_tpc_ctl_pwr_fixed_params tpc_ctl_pwr;
+	s8 *ctl_pwr_table;
+};
+
+struct wmi_tpc_stats_arg {
+	u32 pdev_id;
+	u32 event_count;
+	u32 end_of_event;
+	u32 tlvs_rcvd;
+	struct wmi_max_reg_power_allowed_arg max_reg_allowed_power;
+	struct wmi_tpc_rates_array_arg rates_array1;
+	struct wmi_tpc_rates_array_arg rates_array2;
+	struct wmi_tpc_config_params tpc_config;
+	struct wmi_tpc_ctl_pwr_table_arg ctl_array;
+};
+
 void ath12k_wmi_init_qcn9274(struct ath12k_base *ab,
 			     struct ath12k_wmi_resource_config_arg *config);
 void ath12k_wmi_init_wcn7850(struct ath12k_base *ab,
@@ -5471,7 +5989,7 @@
 			 struct sk_buff *frame);
 int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id,
 			     const u8 *p2p_ie);
-int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id,
+int ath12k_wmi_bcn_tmpl(struct ath12k_link_vif *arvif,
 			struct ieee80211_mutable_offsets *offs,
 			struct sk_buff *bcn,
 			struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args);
@@ -5585,6 +6103,13 @@
 			    const u8 *buf, size_t buf_len);
 int ath12k_wmi_set_bios_sar_cmd(struct ath12k_base *ab, const u8 *psar_table);
 int ath12k_wmi_set_bios_geo_cmd(struct ath12k_base *ab, const u8 *pgeo_table);
+int ath12k_wmi_send_stats_request_cmd(struct ath12k *ar, u32 stats_id,
+				      u32 vdev_id, u32 pdev_id);
+__le32 ath12k_wmi_tlv_hdr(u32 cmd, u32 len);
+
+int ath12k_wmi_send_tpc_stats_request(struct ath12k *ar,
+				      enum wmi_halphy_ctrl_path_stats_id tpc_stats_type);
+void ath12k_wmi_free_tpc_stats_mem(struct ath12k *ar);
 
 static inline u32
 ath12k_wmi_caps_ext_get_pdev_id(const struct ath12k_wmi_caps_ext_params *param)
@@ -5635,5 +6160,11 @@
 				 struct ath12k_link_vif *arvif);
 int ath12k_wmi_sta_keepalive(struct ath12k *ar,
 			     const struct wmi_sta_keepalive_arg *arg);
+int ath12k_wmi_mlo_setup(struct ath12k *ar, struct wmi_mlo_setup_arg *mlo_params);
+int ath12k_wmi_mlo_ready(struct ath12k *ar);
+int ath12k_wmi_mlo_teardown(struct ath12k *ar);
+void ath12k_wmi_fw_stats_dump(struct ath12k *ar,
+			      struct ath12k_fw_stats *fw_stats, u32 stats_id,
+			      char *buf);
 
 #endif
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/antenna.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/antenna.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/antenna.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/antenna.c	2025-07-01 14:10:42.764047339 +0200
@@ -193,7 +193,7 @@
 static void ath_ant_set_alt_ratio(struct ath_ant_comb *antcomb,
 				  struct ath_hw_antcomb_conf *conf)
 {
-	/* set alt to the conf with maximun ratio */
+	/* set alt to the conf with maximum ratio */
 	if (antcomb->first_ratio && antcomb->second_ratio) {
 		if (antcomb->rssi_second > antcomb->rssi_third) {
 			/* first alt*/
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9002_hw.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9002_hw.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9002_hw.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9002_hw.c	2025-07-01 14:10:42.764047339 +0200
@@ -395,7 +395,7 @@
 		ah->config.hw_hang_checks |= HW_MAC_HANG;
 }
 
-/* Sets up the AR5008/AR9001/AR9002 hardware familiy callbacks */
+/* Sets up the AR5008/AR9001/AR9002 hardware family callbacks */
 int ar9002_hw_attach_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_calib.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_calib.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_calib.c	2025-09-25 17:40:34.175360324 +0200
@@ -165,6 +165,8 @@
 		if (ret < 0)
 			return ret;
 
+		ath9k_hw_update_cca_threshold(ah);
+
 		/* start NF calibration, without updating BB NF register */
 		ath9k_hw_start_nfcal(ah, false);
 	}
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_hw.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_hw.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_hw.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_hw.c	2025-07-01 14:10:42.764047339 +0200
@@ -1170,7 +1170,7 @@
 	return false;
 }
 
-/* Sets up the AR9003 hardware familiy callbacks */
+/* Sets up the AR9003 hardware family callbacks */
 void ar9003_hw_attach_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_mci.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_mci.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_mci.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_mci.c	2025-07-01 14:10:42.764047339 +0200
@@ -637,7 +637,7 @@
 		 * same time. Since BT's calibration doesn't happen
 		 * that often, we'll let BT completes calibration then
 		 * we continue to wait for cal_grant from BT.
-		 * Orginal: Wait BT_CAL_GRANT.
+		 * Original: Wait BT_CAL_GRANT.
 		 * New: Receive BT_CAL_REQ -> send WLAN_CAL_GRANT->wait
 		 * BT_CAL_DONE -> Wait BT_CAL_GRANT.
 		 */
@@ -747,7 +747,7 @@
 	 * BT is sleeping. Check if BT wakes up during
 	 * WLAN calibration. If BT wakes up during
 	 * WLAN calibration, need to go through all
-	 * message exchanges again and recal.
+	 * message exchanges again and recalibrate.
 	 */
 	REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_RAW,
 		  (AR_MCI_INTERRUPT_RX_MSG_REMOTE_RESET |
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_phy.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_phy.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.c	2025-09-25 17:40:34.175360324 +0200
@@ -1915,6 +1915,116 @@
 	}
 }
 
+/*
+ * Adaptive CCA threshold - Calculate and update CCA threshold periodically
+ * after NF calibration and at the end of initialization sequence during every
+ * chip reset.
+ *
+ * Step 1: Compute NF_max_primary and NF_max_extension
+ * If noise floor completes,
+ *   NF_max_primary = max of noise floor read across all chains in primary channel
+ *   NF_max_extension = max of noise floor read across all chains in extension channel
+ * else
+ *   NF_max_primary = NF_max_extension = the value that is forced into HW as noise floor
+ *
+ * Step 2: Compute CCA_threshold_primary and CCA_threshold_extension
+ *   CCA_threshold_primary = CCA_detection_level – CCA_detection_margin – NF_max_primary
+ *   CCA_threshold_extension = CCA_detection_level – CCA_detection_margin – NF_max_extension
+ *
+ * Step 3: Program CCA thresholds
+ *
+ */
+static void ar9003_update_cca_threshold(struct ath_hw *ah)
+{
+	struct ath9k_hw_cal_data *cal = ah->caldata;
+	struct ath9k_nfcal_hist *h;
+	u_int16_t cca_detection_margin_pri, cca_detection_margin_ext;
+	int16_t nf, nf_max_primary, nf_max_extension, nf_nominal,
+		derived_max_cca, max_cca_cap, cca_threshold_primary,
+		cca_threshold_extension;
+	u_int8_t chainmask;
+	int chan, chain, i, init_nf = 0;
+
+	if (!ah->adaptive_cca_threshold_enabled)
+		return;
+
+	if (!cal)
+		return;
+
+	h = cal->nfCalHist;
+
+	if (IS_CHAN_2GHZ(ah->curchan))
+		nf = ah->nf_2g.max;
+	else
+		nf = ah->nf_5g.max;
+
+	nf_max_primary = nf_max_extension = nf;
+
+	chainmask = ah->rxchainmask & ah->caps.rx_chainmask;
+
+	/* Compute max of noise floor read across all chains in primary channel */
+	for (chan = 0; chan < 2 /*ctl,ext*/; chan++) {
+		ath_dbg(ath9k_hw_common(ah), CALIBRATE, "chan: %s\n",
+			!chan ? "ctrl" : "extn");
+
+		for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
+			if (!((chainmask >> chain) & 0x1))
+				continue;
+
+			i = chan * AR9300_MAX_CHAINS + chain;
+			if (!init_nf) {
+				nf = h[i].privNF;
+				init_nf = 1;
+			}
+
+			ath_dbg(ath9k_hw_common(ah), CALIBRATE, "privNF[%d]: %d\n",
+				i, h[i].privNF);
+			nf = (nf > h[i].privNF) ? nf : h[i].privNF;
+		}
+
+		if (!chan)
+			nf_max_primary = nf;
+		else
+			nf_max_extension = nf;
+	}
+
+	if (IS_CHAN_HT40(ah->curchan))
+		nf_nominal = NF_NOM_40MHZ;
+	else
+		nf_nominal = NF_NOM_20MHZ;
+
+	cca_detection_margin_pri = ah->cca_detection_margin;
+	if (nf_max_primary < nf_nominal)
+		cca_detection_margin_pri += (nf_nominal - nf_max_primary);
+
+	cca_detection_margin_ext = ah->cca_detection_margin;
+	if (nf_max_extension < nf_nominal)
+		cca_detection_margin_ext += (nf_nominal - nf_max_extension);
+
+	derived_max_cca = ah->cca_detection_level - ah->cca_detection_margin - BEST_CASE_NOISE_FLOOR;
+	max_cca_cap = derived_max_cca < MAX_CCA_THRESHOLD ? derived_max_cca : MAX_CCA_THRESHOLD;
+
+	ath_dbg(ath9k_hw_common(ah), CALIBRATE, "derived_max_cca: %d, max_cca_cap: %d\n",
+		derived_max_cca, max_cca_cap);
+
+	cca_threshold_primary = ah->cca_detection_level - cca_detection_margin_pri - nf_max_primary;
+	cca_threshold_primary = cca_threshold_primary < max_cca_cap ?
+				(cca_threshold_primary > MIN_CCA_THRESHOLD ?
+					cca_threshold_primary : MIN_CCA_THRESHOLD) : max_cca_cap;
+	cca_threshold_extension = ah->cca_detection_level - cca_detection_margin_ext - nf_max_extension;
+	cca_threshold_extension = cca_threshold_extension < max_cca_cap ?
+				  (cca_threshold_extension > MIN_CCA_THRESHOLD ?
+					cca_threshold_extension : MIN_CCA_THRESHOLD) : max_cca_cap;
+
+	ath_dbg(ath9k_hw_common(ah), CALIBRATE,
+		"nf_max_primary: %d, nf_max_extension: %d, cca_pri: %d, cca_ext: %d\n",
+		nf_max_primary, nf_max_extension, cca_threshold_primary, cca_threshold_extension);
+
+	REG_RMW_FIELD(ah, AR_PHY_CCA_0, AR_PHY_CCA_THRESH62, cca_threshold_primary);
+	REG_RMW_FIELD(ah, AR_PHY_EXTCHN_PWRTHR1, AR_PHY_EXT_CCA0_THRESH62, cca_threshold_extension);
+	REG_RMW_FIELD(ah, AR_PHY_CCA_CTRL_0, AR_PHY_EXT_CCA0_THRESH62_MODE, 0x0);
+}
+
 void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
 {
 	struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
@@ -1950,6 +2060,7 @@
 	priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
 	priv_ops->set_radar_params = ar9003_hw_set_radar_params;
 	priv_ops->fast_chan_change = ar9003_hw_fast_chan_change;
+	priv_ops->update_cca_threshold = ar9003_update_cca_threshold;
 
 	ops->antdiv_comb_conf_get = ar9003_hw_antdiv_comb_conf_get;
 	ops->antdiv_comb_conf_set = ar9003_hw_antdiv_comb_conf_set;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_phy.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ar9003_phy.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ar9003_phy.h	2025-09-25 17:40:34.175360324 +0200
@@ -246,7 +246,7 @@
 
 
 /*
- * MRC Feild Definitions
+ * MRC Field Definitions
  */
 #define AR_PHY_SGI_DSC_MAN   0x0007FFF0
 #define AR_PHY_SGI_DSC_MAN_S 4
@@ -399,6 +399,8 @@
 #define AR_PHY_EXT_CCA0_THRESH62_S  0
 #define AR_PHY_EXT_CCA0_THRESH62_1    0x000001FF
 #define AR_PHY_EXT_CCA0_THRESH62_1_S  0
+#define AR_PHY_EXT_CCA0_THRESH62_MODE    0x00040000
+#define AR_PHY_EXT_CCA0_THRESH62_MODE_S  18
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK          0x0000003F
 #define AR_PHY_CCK_DETECT_WEAK_SIG_THR_CCK_S        0
 #define AR_PHY_CCK_DETECT_ANT_SWITCH_TIME           0x00001FC0
@@ -1317,4 +1319,10 @@
 
 #define AR9300_DFS_FIRPWR -28
 
+#define BEST_CASE_NOISE_FLOOR         -130
+#define MAX_CCA_THRESHOLD              90
+#define MIN_CCA_THRESHOLD              0
+#define NF_NOM_20MHZ                  -101
+#define NF_NOM_40MHZ                  -98
+
 #endif  /* AR9003_PHY_H */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/ath9k.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ath9k.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/ath9k.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/ath9k.h	2025-09-25 17:40:34.175360324 +0200
@@ -274,7 +274,6 @@
 
 struct ath_tx_control {
 	struct ath_txq *txq;
-	struct ath_node *an;
 	struct ieee80211_sta *sta;
 	u8 paprd;
 };
@@ -338,7 +337,7 @@
 
 	struct ath_beacon_config beacon;
 	struct ath9k_hw_cal_data caldata;
-	struct timespec64 tsf_ts;
+	ktime_t tsf_ts;
 	u64 tsf_val;
 	u32 last_beacon;
 
@@ -592,8 +591,8 @@
 int ath_tx_init(struct ath_softc *sc, int nbufs);
 int ath_txq_update(struct ath_softc *sc, int qnum,
 		   struct ath9k_tx_queue_info *q);
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
-		     int width, int half_gi, bool shortPreamble);
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+		     int half_gi, bool shortPreamble);
 void ath_update_max_aggr_framelen(struct ath_softc *sc, int queue, int txop);
 void ath_assign_seq(struct ath_common *common, struct sk_buff *skb);
 int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
@@ -1011,13 +1010,15 @@
 	struct ath_offchannel offchannel;
 	struct ath_chanctx *next_chan;
 	struct completion go_beacon;
-	struct timespec64 last_event_time;
+	ktime_t last_event_time;
 #endif
 
 	unsigned long driver_data;
 
 	u8 gtt_cnt;
 	u32 intrstatus;
+	unsigned long rx_active_check_time;
+	u32 rx_active_count;
 	u16 ps_flags; /* PS_* */
 	bool ps_enabled;
 	bool ps_idle;
@@ -1152,4 +1153,18 @@
 static inline void ath_ahb_exit(void) {};
 #endif
 
+#ifdef CONFIG_ATH9K_TX99
+extern int ath9k_enable_tx99;
+
+static inline bool ath9k_tx99_enabled(void)
+{
+	return ath9k_enable_tx99 > 0;
+}
+#else
+static inline bool ath9k_tx99_enabled(void)
+{
+	return false;
+}
+#endif
+
 #endif /* ATH9K_H */
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/beacon.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/beacon.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/beacon.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/beacon.c	2025-07-01 14:10:42.768047446 +0200
@@ -293,7 +293,7 @@
 	/* Modify TSF as required and update the HW. */
 	avp->chanctx->tsf_val += tsfadjust;
 	if (sc->cur_chan == avp->chanctx) {
-		offset = ath9k_hw_get_tsf_offset(&avp->chanctx->tsf_ts, NULL);
+		offset = ath9k_hw_get_tsf_offset(avp->chanctx->tsf_ts, 0);
 		ath9k_hw_settsf64(sc->sc_ah, avp->chanctx->tsf_val + offset);
 	}
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/calib.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/calib.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/calib.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/calib.c	2025-09-25 17:40:34.175360324 +0200
@@ -16,29 +16,25 @@
 
 #include "hw.h"
 #include "hw-ops.h"
+#include <linux/sort.h>
 #include <linux/export.h>
 
 /* Common calibration code */
 
+static int rcmp_i16(const void *x, const void *y)
+{
+	/* Sort in reverse order. */
+	return *(int16_t *)y - *(int16_t *)x;
+}
 
 static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
 {
-	int16_t nfval;
-	int16_t sort[ATH9K_NF_CAL_HIST_MAX];
-	int i, j;
-
-	for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
-		sort[i] = nfCalBuffer[i];
+	int16_t nfcal[ATH9K_NF_CAL_HIST_MAX];
 
-	for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
-		for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
-			if (sort[j] > sort[j - 1])
-				swap(sort[j], sort[j - 1]);
-		}
-	}
-	nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
+	memcpy(nfcal, nfCalBuffer, sizeof(nfcal));
+	sort(nfcal, ATH9K_NF_CAL_HIST_MAX, sizeof(int16_t), rcmp_i16, NULL);
 
-	return nfval;
+	return nfcal[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
 }
 
 static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
@@ -231,12 +227,17 @@
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_ENABLE_NF);
 
+	if (ah->adaptive_cca_threshold_enabled) {
+		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
+		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+	} else {
 	if (update)
 		REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
 	else
 		REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah),
 		    AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
+	}
 
 	REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_NF);
 }
@@ -473,9 +474,13 @@
 	 * the baseband update the internal NF value itself, similar to
 	 * what is being done after a full reset.
 	 */
-	if (!test_bit(NFCAL_PENDING, &caldata->cal_flags))
-		ath9k_hw_start_nfcal(ah, true);
-	else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
+	if (!test_bit(NFCAL_PENDING, &caldata->cal_flags)) {
+		bool do_fast_recalib;
+
+		ath9k_hw_update_cca_threshold(ah);
+		do_fast_recalib = !ah->adaptive_cca_threshold_enabled;
+		ath9k_hw_start_nfcal(ah, do_fast_recalib);
+	} else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_NF))
 		ath9k_hw_getnf(ah, ah->curchan);
 
 	set_bit(NFCAL_INTF, &caldata->cal_flags);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/channel.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/channel.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/channel.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/channel.c	2025-09-25 17:40:34.179360343 +0200
@@ -17,7 +17,7 @@
 #include "ath9k.h"
 
 /* Set/change channels.  If the channel is really being changed, it's done
- * by reseting the chip.  To accomplish this we must first cleanup any pending
+ * by resetting the chip.  To accomplish this we must first cleanup any pending
  * DMA, then restart stuff.
  */
 static int ath_set_channel(struct ath_softc *sc)
@@ -232,16 +232,11 @@
 
 static u32 chanctx_event_delta(struct ath_softc *sc)
 {
-	u64 ms;
-	struct timespec64 ts, *old;
+	ktime_t ts = ktime_get_raw();
+	s64 ms = ktime_ms_delta(ts, sc->last_event_time);
 
-	ktime_get_raw_ts64(&ts);
-	old = &sc->last_event_time;
-	ms = ts.tv_sec * 1000 + ts.tv_nsec / 1000000;
-	ms -= old->tv_sec * 1000 + old->tv_nsec / 1000000;
 	sc->last_event_time = ts;
-
-	return (u32)ms;
+	return ms;
 }
 
 void ath_chanctx_check_active(struct ath_softc *sc, struct ath_chanctx *ctx)
@@ -334,8 +329,8 @@
 static void ath_chanctx_adjust_tbtt_delta(struct ath_softc *sc)
 {
 	struct ath_chanctx *prev, *cur;
-	struct timespec64 ts;
 	u32 cur_tsf, prev_tsf, beacon_int;
+	ktime_t ts;
 	s32 offset;
 
 	beacon_int = TU_TO_USEC(sc->cur_chan->beacon.beacon_interval);
@@ -346,12 +341,12 @@
 	if (!prev->switch_after_beacon)
 		return;
 
-	ktime_get_raw_ts64(&ts);
+	ts = ktime_get_raw();
 	cur_tsf = (u32) cur->tsf_val +
-		  ath9k_hw_get_tsf_offset(&cur->tsf_ts, &ts);
+		  ath9k_hw_get_tsf_offset(cur->tsf_ts, ts);
 
 	prev_tsf = prev->last_beacon - (u32) prev->tsf_val + cur_tsf;
-	prev_tsf -= ath9k_hw_get_tsf_offset(&prev->tsf_ts, &ts);
+	prev_tsf -= ath9k_hw_get_tsf_offset(prev->tsf_ts, ts);
 
 	/* Adjust the TSF time of the AP chanctx to keep its beacons
 	 * at half beacon interval offset relative to the STA chanctx.
@@ -691,7 +686,7 @@
 		 */
 		tsf_time = sc->sched.switch_start_time;
 		tsf_time -= (u32) sc->cur_chan->tsf_val +
-			ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+			ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
 		tsf_time += ath9k_hw_gettsf32(ah);
 
 		sc->sched.beacon_adjust = false;
@@ -1230,10 +1225,10 @@
 {
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	struct ath_chanctx *old_ctx;
-	struct timespec64 ts;
 	bool measure_time = false;
 	bool send_ps = false;
 	bool queues_stopped = false;
+	ktime_t ts;
 
 	spin_lock_bh(&sc->chan_lock);
 	if (!sc->next_chan) {
@@ -1260,7 +1255,7 @@
 		spin_unlock_bh(&sc->chan_lock);
 
 		if (sc->next_chan == &sc->offchannel.chan) {
-			ktime_get_raw_ts64(&ts);
+			ts = ktime_get_raw();
 			measure_time = true;
 		}
 
@@ -1277,7 +1272,7 @@
 		spin_lock_bh(&sc->chan_lock);
 
 		if (sc->cur_chan != &sc->offchannel.chan) {
-			ktime_get_raw_ts64(&sc->cur_chan->tsf_ts);
+			sc->cur_chan->tsf_ts = ktime_get_raw();
 			sc->cur_chan->tsf_val = ath9k_hw_gettsf64(sc->sc_ah);
 		}
 	}
@@ -1303,7 +1298,7 @@
 		ath_set_channel(sc);
 		if (measure_time)
 			sc->sched.channel_switch_time =
-				ath9k_hw_get_tsf_offset(&ts, NULL);
+				ath9k_hw_get_tsf_offset(ts, 0);
 		/*
 		 * A reset will ensure that all queues are woken up,
 		 * so there is no need to awaken them again.
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/debug.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/debug.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/debug.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/debug.c	2025-09-25 17:40:34.179360343 +0200
@@ -750,6 +750,7 @@
 		[RESET_TYPE_CALIBRATION] = "Calibration error",
 		[RESET_TX_DMA_ERROR] = "Tx DMA stop error",
 		[RESET_RX_DMA_ERROR] = "Rx DMA stop error",
+		[RESET_TYPE_RX_INACTIVE] = "Rx path inactive",
 	};
 	int i;
 
@@ -1249,6 +1250,101 @@
 	.llseek = default_llseek,
 };
 
+static ssize_t read_file_cca_detection_level(struct file *file,
+					     char __user *user_buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", ah->cca_detection_level);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_cca_detection_level(struct file *file,
+					      const char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (kstrtol(buf, 0, &val))
+		return -EINVAL;
+
+	if (val > 0)
+		return -EINVAL;
+
+	ah->cca_detection_level = val;
+
+	return count;
+}
+
+static const struct file_operations fops_cca_detection_level = {
+	.read = read_file_cca_detection_level,
+	.write = write_file_cca_detection_level,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t read_file_cca_detection_margin(struct file *file,
+					      char __user *user_buf,
+					      size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	char buf[32];
+	unsigned int len;
+
+	len = sprintf(buf, "%d\n", ah->cca_detection_margin);
+
+	return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+}
+
+static ssize_t write_file_cca_detection_margin(struct file *file,
+					       const char __user *user_buf,
+					       size_t count, loff_t *ppos)
+{
+	struct ath_softc *sc = file->private_data;
+	struct ath_hw *ah = sc->sc_ah;
+	unsigned long val;
+	char buf[32];
+	ssize_t len;
+
+	len = min(count, sizeof(buf) - 1);
+	if (copy_from_user(buf, user_buf, len))
+		return -EFAULT;
+
+	buf[len] = '\0';
+
+	if (kstrtoul(buf, 0, &val))
+		return -EINVAL;
+
+	ah->cca_detection_margin = val;
+
+	return count;
+}
+
+static const struct file_operations fops_cca_detection_margin = {
+	.read = read_file_cca_detection_margin,
+	.write = write_file_cca_detection_margin,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
 /* Ethtool support for get-stats */
 
 #define AMKSTR(nm) #nm "_BE", #nm "_BK", #nm "_VI", #nm "_VO"
@@ -1455,6 +1551,11 @@
 
 	debugfs_create_file("nf_override", 0600,
 			    sc->debug.debugfs_phy, sc, &fops_nf_override);
-
+	debugfs_create_file("cca_detection_level", 0600,
+			    sc->debug.debugfs_phy, sc, &fops_cca_detection_level);
+	debugfs_create_file("cca_detection_margin", 0600,
+			    sc->debug.debugfs_phy, sc, &fops_cca_detection_margin);
+	debugfs_create_bool("adaptive_cca_threshold_enabled", 0600, sc->debug.debugfs_phy,
+			    &sc->sc_ah->adaptive_cca_threshold_enabled);
 	return 0;
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/debug.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/debug.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/debug.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/debug.h	2025-07-01 14:10:42.768047446 +0200
@@ -53,6 +53,7 @@
 	RESET_TYPE_CALIBRATION,
 	RESET_TX_DMA_ERROR,
 	RESET_RX_DMA_ERROR,
+	RESET_TYPE_RX_INACTIVE,
 	__RESET_TYPE_MAX
 };
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/hw-ops.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw-ops.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/hw-ops.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw-ops.h	2025-09-25 17:40:34.183360363 +0200
@@ -260,6 +260,14 @@
 	ath9k_hw_private_ops(ah)->set_radar_params(ah, &ah->radar_conf);
 }
 
+static inline void ath9k_hw_update_cca_threshold(struct ath_hw *ah)
+{
+	if (!ath9k_hw_private_ops(ah)->update_cca_threshold)
+		return;
+
+	ath9k_hw_private_ops(ah)->update_cca_threshold(ah);
+}
+
 static inline void ath9k_hw_init_cal_settings(struct ath_hw *ah)
 {
 	ath9k_hw_private_ops(ah)->init_cal_settings(ah);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/hw.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/hw.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw.c	2025-09-25 17:40:34.187360383 +0200
@@ -395,6 +395,7 @@
 {
 	struct ath_common *common = ath9k_hw_common(ah);
 
+
 	ah->config.dma_beacon_response_time = 1;
 	ah->config.sw_beacon_response_time = 6;
 	ah->config.cwm_ignore_extcca = false;
@@ -1837,6 +1838,7 @@
 		ar9003_mci_2g5g_switch(ah, false);
 
 	ath9k_hw_loadnf(ah, ah->curchan);
+	ath9k_hw_update_cca_threshold(ah);
 	ath9k_hw_start_nfcal(ah, true);
 
 	if (AR_SREV_9271(ah))
@@ -1847,20 +1849,11 @@
 	return -EINVAL;
 }
 
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur)
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur)
 {
-	struct timespec64 ts;
-	s64 usec;
-
-	if (!cur) {
-		ktime_get_raw_ts64(&ts);
-		cur = &ts;
-	}
-
-	usec = cur->tv_sec * 1000000ULL + cur->tv_nsec / 1000;
-	usec -= last->tv_sec * 1000000ULL + last->tv_nsec / 1000;
-
-	return (u32) usec;
+	if (cur == 0)
+		cur = ktime_get_raw();
+	return ktime_us_delta(cur, last);
 }
 EXPORT_SYMBOL(ath9k_hw_get_tsf_offset);
 
@@ -1871,7 +1864,7 @@
 	u32 saveLedState;
 	u32 saveDefAntenna;
 	u32 macStaId1;
-	struct timespec64 tsf_ts;
+	ktime_t tsf_ts;
 	u32 tsf_offset;
 	u64 tsf = 0;
 	int r;
@@ -1917,7 +1910,7 @@
 	macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
 
 	/* Save TSF before chip reset, a cold reset clears it */
-	ktime_get_raw_ts64(&tsf_ts);
+	tsf_ts = ktime_get_raw();
 	tsf = ath9k_hw_gettsf64(ah);
 
 	saveLedState = REG_READ(ah, AR_CFG_LED) &
@@ -1951,7 +1944,7 @@
 	}
 
 	/* Restore TSF */
-	tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+	tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
 	ath9k_hw_settsf64(ah, tsf + tsf_offset);
 
 	if (AR_SREV_9280_20_OR_LATER(ah))
@@ -1975,7 +1968,7 @@
 	 * value after the initvals have been applied.
 	 */
 	if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
-		tsf_offset = ath9k_hw_get_tsf_offset(&tsf_ts, NULL);
+		tsf_offset = ath9k_hw_get_tsf_offset(tsf_ts, 0);
 		ath9k_hw_settsf64(ah, tsf + tsf_offset);
 	}
 
@@ -2062,6 +2055,7 @@
 
 	if (AR_SREV_9300_20_OR_LATER(ah)) {
 		ath9k_hw_loadnf(ah, chan);
+		ath9k_hw_update_cca_threshold(ah);
 		ath9k_hw_start_nfcal(ah, true);
 	}
 
@@ -2149,7 +2143,7 @@
 
 		/* When chip goes into network sleep, it could be waken
 		 * up by MCI_INT interrupt caused by BT's HW messages
-		 * (LNA_xxx, CONT_xxx) which chould be in a very fast
+		 * (LNA_xxx, CONT_xxx) which could be in a very fast
 		 * rate (~100us). This will cause chip to leave and
 		 * re-enter network sleep mode frequently, which in
 		 * consequence will have WLAN MCI HW to generate lots of
@@ -2544,7 +2538,7 @@
 
 	pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
 	/*
-	 * For AR9271 we will temporarilly uses the rx chainmax as read from
+	 * For AR9271 we will temporarily use the rx chainmax as read from
 	 * the EEPROM.
 	 */
 	if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
@@ -3384,3 +3378,12 @@
 	hw_name[used] = '\0';
 }
 EXPORT_SYMBOL(ath9k_hw_name);
+
+void ath9k_hw_name_short(struct ath_hw *ah, char *hw_name, size_t len)
+{
+	scnprintf(hw_name, len,
+		  "AR%s, 0x%x",
+		  ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
+		  ah->hw_version.macRev);
+}
+EXPORT_SYMBOL(ath9k_hw_name_short);
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/hw.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/hw.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/hw.h	2025-09-25 17:40:34.187360383 +0200
@@ -282,7 +282,7 @@
  * an exact user defined pattern or de-authentication/disassoc pattern.
  * @ATH9K_HW_WOW_PATTERN_MATCH_DWORD: device requires the first four
  * bytes of the pattern for user defined pattern, de-authentication and
- * disassociation patterns for all types of possible frames recieved
+ * disassociation patterns for all types of possible frames received
  * of those types.
  */
 
@@ -667,7 +667,7 @@
 				 struct ath_hw_radar_conf *conf);
 	int (*fast_chan_change)(struct ath_hw *ah, struct ath9k_channel *chan,
 				u8 *ini_reloaded);
-
+	void (*update_cca_threshold)(struct ath_hw *ah);
 	/* ANI */
 	void (*ani_cache_ini_regs)(struct ath_hw *ah);
 
@@ -988,6 +988,10 @@
 	bool msi_enabled;
 	u32 msi_mask;
 	u32 msi_reg;
+
+	bool adaptive_cca_threshold_enabled;
+	s16 cca_detection_level;
+	u16 cca_detection_margin;
 };
 
 struct ath_bus_ops {
@@ -1066,7 +1070,7 @@
 u64 ath9k_hw_gettsf64(struct ath_hw *ah);
 void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
 void ath9k_hw_reset_tsf(struct ath_hw *ah);
-u32 ath9k_hw_get_tsf_offset(struct timespec64 *last, struct timespec64 *cur);
+u32 ath9k_hw_get_tsf_offset(ktime_t last, ktime_t cur);
 void ath9k_hw_set_tsfadjust(struct ath_hw *ah, bool set);
 void ath9k_hw_init_global_settings(struct ath_hw *ah);
 u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah);
@@ -1096,6 +1100,7 @@
 void ath_gen_timer_isr(struct ath_hw *hw);
 
 void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
+void ath9k_hw_name_short(struct ath_hw *ah, char *hw_name, size_t len);
 
 /* PHY */
 void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/init.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/init.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/init.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/init.c	2025-09-25 17:40:34.187360383 +0200
@@ -79,6 +79,16 @@
 module_param_named(use_msi, ath9k_use_msi, int, 0444);
 MODULE_PARM_DESC(use_msi, "Use MSI instead of INTx if possible");
 
+int ath9k_use_adaptive_cca;
+module_param_named(use_adaptive_cca, ath9k_use_adaptive_cca, int, 0444);
+MODULE_PARM_DESC(use_adaptive_cca, "enable adaptive cca by default");
+
+#ifdef CONFIG_ATH9K_TX99
+int ath9k_enable_tx99;
+module_param_named(enable_tx99, ath9k_enable_tx99, int, 0444);
+MODULE_PARM_DESC(enable_tx99, "Enable TX99, which will disable STA/AP mode support");
+#endif
+
 bool is_ath9k_unloaded;
 
 #ifdef CONFIG_MAC80211_LEDS
@@ -647,7 +657,9 @@
 		ah->ah_flags |= AH_NO_EEP_SWAP;
 	}
 
-	of_get_mac_address(np, common->macaddr);
+	ret = of_get_mac_address(np, common->macaddr);
+	if (ret == -EPROBE_DEFER)
+		return ret;
 
 	return 0;
 }
@@ -670,6 +682,9 @@
 	ah->hw_version.devid = devid;
 	ah->ah_flags |= AH_USE_EEPROM;
 	ah->led_pin = -1;
+	ah->cca_detection_level = -70;
+	ah->cca_detection_margin = 3;
+	ah->adaptive_cca_threshold_enabled = ath9k_use_adaptive_cca;
 	ah->reg_ops.read = ath9k_ioread32;
 	ah->reg_ops.multi_read = ath9k_multi_ioread32;
 	ah->reg_ops.write = ath9k_iowrite32;
@@ -698,6 +713,7 @@
 	common->debug_mask = ath9k_debug;
 	common->btcoex_enabled = ath9k_btcoex_enable == 1;
 	common->disable_ani = false;
+	common->dfs_pulse_valid_diff_ts = 0;
 
 	/*
 	 * Platform quirks.
@@ -933,7 +949,7 @@
 			       NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
 			       NL80211_FEATURE_P2P_GO_CTWIN;
 
-	if (!IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (!ath9k_tx99_enabled()) {
 		hw->wiphy->interface_modes =
 			BIT(NL80211_IFTYPE_P2P_GO) |
 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -1048,6 +1064,10 @@
 
 	wiphy_read_of_freq_limits(hw->wiphy);
 
+	ath9k_hw_name_short(ah,
+			    hw->wiphy->fw_version,
+			    sizeof(hw->wiphy->fw_version));
+
 	/* Register with mac80211 */
 	error = ieee80211_register_hw(hw);
 	if (error)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/link.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/link.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/link.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/link.c	2025-09-25 17:40:34.187360383 +0200
@@ -50,7 +50,36 @@
 		"tx hung, resetting the chip\n");
 	ath9k_queue_reset(sc, RESET_TYPE_TX_HANG);
 	return false;
+}
+
+#define RX_INACTIVE_CHECK_INTERVAL (4 * MSEC_PER_SEC)
+
+static bool ath_hw_rx_inactive_check(struct ath_softc *sc)
+{
+	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+	u32 interval, count;
+
+	interval = jiffies_to_msecs(jiffies - sc->rx_active_check_time);
+	count = sc->rx_active_count;
+
+	if (interval < RX_INACTIVE_CHECK_INTERVAL)
+		return true; /* too soon to check */
 
+	sc->rx_active_count = 0;
+	sc->rx_active_check_time = jiffies;
+
+	/* Need at least one interrupt per second, and we should only react if
+	 * we are within a factor two of the expected interval
+	 */
+	if (interval > RX_INACTIVE_CHECK_INTERVAL * 2 ||
+	    count >= interval / MSEC_PER_SEC)
+		return true;
+
+	ath_dbg(common, RESET,
+		"RX inactivity detected. Schedule chip reset\n");
+	ath9k_queue_reset(sc, RESET_TYPE_RX_INACTIVE);
+
+	return false;
 }
 
 void ath_hw_check_work(struct work_struct *work)
@@ -58,8 +87,8 @@
 	struct ath_softc *sc = container_of(work, struct ath_softc,
 					    hw_check_work.work);
 
-	if (!ath_hw_check(sc) ||
-	    !ath_tx_complete_check(sc))
+	if (!ath_hw_check(sc) || !ath_tx_complete_check(sc) ||
+	    !ath_hw_rx_inactive_check(sc))
 		return;
 
 	ieee80211_queue_delayed_work(sc->hw, &sc->hw_check_work,
@@ -523,11 +552,14 @@
 		survey->filled |= SURVEY_INFO_TIME |
 			SURVEY_INFO_TIME_BUSY |
 			SURVEY_INFO_TIME_RX |
+			SURVEY_INFO_TIME_BSS_RX |
 			SURVEY_INFO_TIME_TX;
 		survey->time += cc->cycles / div;
 		survey->time_busy += cc->rx_busy / div;
 		survey->time_rx += cc->rx_frame / div;
 		survey->time_tx += cc->tx_frame / div;
+		/* convert rx airtime from usec to msec */
+		survey->time_bss_rx += cc->rx_bss_frame / 1000;
 	}
 
 	if (cc->cycles < div)
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/mac.h linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/mac.h
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/mac.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/mac.h	2025-07-01 14:10:42.776047660 +0200
@@ -251,7 +251,7 @@
  * when the descriptor is specifically marked to generate
  * an interrupt with this flag. Descriptors should be
  * marked periodically to insure timely replenishing of the
- * supply needed for sending frames. Defering interrupts
+ * supply needed for sending frames. Deferring interrupts
  * reduces system load and potentially allows more concurrent
  * work to be done but if done to aggressively can cause
  * senders to backup. When the hardware queue is left too
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/main.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/main.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/main.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/main.c	2025-09-25 17:40:34.187360383 +0200
@@ -249,8 +249,7 @@
 		if (sc->cur_chan->tsf_val) {
 			u32 offset;
 
-			offset = ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts,
-							 NULL);
+			offset = ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
 			ath9k_hw_settsf64(ah, sc->cur_chan->tsf_val + offset);
 		}
 
@@ -453,6 +452,7 @@
 			ath_rx_tasklet(sc, 0, true);
 
 		ath_rx_tasklet(sc, 0, false);
+		sc->rx_active_count++;
 	}
 
 	if (status & ATH9K_INT_TX) {
@@ -1001,7 +1001,7 @@
 static void ath9k_vif_iter_set_beacon(struct ath9k_vif_iter_data *iter_data,
 				      struct ieee80211_vif *vif)
 {
-	/* Use the first (configured) interface, but prefering AP interfaces. */
+	/* Use the first (configured) interface, but preferring AP interfaces. */
 	if (!iter_data->primary_beacon_vif) {
 		iter_data->primary_beacon_vif = vif;
 	} else {
@@ -1336,7 +1336,7 @@
 	struct ath_node *an = &avp->mcast_node;
 
 	mutex_lock(&sc->mutex);
-	if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (ath9k_tx99_enabled()) {
 		if (sc->cur_chan->nvifs >= 1) {
 			mutex_unlock(&sc->mutex);
 			return -EOPNOTSUPP;
@@ -1386,7 +1386,7 @@
 
 	mutex_lock(&sc->mutex);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99)) {
+	if (ath9k_tx99_enabled()) {
 		mutex_unlock(&sc->mutex);
 		return -EOPNOTSUPP;
 	}
@@ -1446,7 +1446,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	sc->ps_enabled = true;
@@ -1465,7 +1465,7 @@
 	struct ath_hw *ah = sc->sc_ah;
 	struct ath_common *common = ath9k_hw_common(ah);
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	sc->ps_enabled = false;
@@ -1541,7 +1541,7 @@
 		ath_chanctx_set_channel(sc, ctx, &hw->conf.chandef);
 	}
 
-	if (changed & IEEE80211_CONF_CHANGE_POWER)
+	if ((changed & IEEE80211_CONF_CHANGE_POWER) && !(ah->tpc_enabled))
 		ath9k_set_txpower(sc, NULL);
 
 	mutex_unlock(&sc->mutex);
@@ -1955,7 +1955,7 @@
 		tsf = ath9k_hw_gettsf64(sc->sc_ah);
 	} else {
 		tsf = sc->cur_chan->tsf_val +
-		      ath9k_hw_get_tsf_offset(&sc->cur_chan->tsf_ts, NULL);
+		      ath9k_hw_get_tsf_offset(sc->cur_chan->tsf_ts, 0);
 	}
 	tsf += le64_to_cpu(avp->tsf_adjust);
 	ath9k_ps_restore(sc);
@@ -1974,7 +1974,7 @@
 	mutex_lock(&sc->mutex);
 	ath9k_ps_wakeup(sc);
 	tsf -= le64_to_cpu(avp->tsf_adjust);
-	ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+	avp->chanctx->tsf_ts = ktime_get_raw();
 	if (sc->cur_chan == avp->chanctx)
 		ath9k_hw_settsf64(sc->sc_ah, tsf);
 	avp->chanctx->tsf_val = tsf;
@@ -1990,7 +1990,7 @@
 	mutex_lock(&sc->mutex);
 
 	ath9k_ps_wakeup(sc);
-	ktime_get_raw_ts64(&avp->chanctx->tsf_ts);
+	avp->chanctx->tsf_ts = ktime_get_raw();
 	if (sc->cur_chan == avp->chanctx)
 		ath9k_hw_reset_tsf(sc->sc_ah);
 	avp->chanctx->tsf_val = 0;
@@ -2069,7 +2069,7 @@
 	unsigned long flags;
 	int pos;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return -EOPNOTSUPP;
 
 	spin_lock_irqsave(&common->cc_lock, flags);
@@ -2119,7 +2119,7 @@
 	struct ath_softc *sc = hw->priv;
 	struct ath_hw *ah = sc->sc_ah;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return;
 
 	mutex_lock(&sc->mutex);
@@ -2767,7 +2767,7 @@
 #endif
 
 static int ath9k_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			     int *dbm)
+			     unsigned int link_id, int *dbm)
 {
 	struct ath_softc *sc = hw->priv;
 	struct ath_vif *avp = (void *)vif->drv_priv;
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/recv.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/recv.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/recv.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/recv.c	2025-09-25 17:40:34.187360383 +0200
@@ -377,7 +377,7 @@
 	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
 	u32 rfilt;
 
-	if (IS_ENABLED(CONFIG_ATH9K_TX99))
+	if (ath9k_tx99_enabled())
 		return 0;
 
 	rfilt = ATH9K_RX_FILTER_UCAST | ATH9K_RX_FILTER_BCAST
@@ -1018,6 +1018,7 @@
 	struct ieee80211_rx_status *rxs;
 	const struct ieee80211_rate *rate;
 	bool is_sgi, is_40, is_sp;
+	unsigned long flags;
 	int phy;
 	u16 len = rs->rs_datalen;
 	u32 airtime = 0;
@@ -1042,7 +1043,7 @@
 	if (!!(rxs->encoding == RX_ENC_HT)) {
 		/* MCS rates */
 
-		airtime += ath_pkt_duration(sc, rxs->rate_idx, len,
+		airtime += ath_pkt_duration(rxs->rate_idx, len,
 					is_40, is_sgi, is_sp);
 	} else {
 
@@ -1052,6 +1053,10 @@
 						len, rxs->rate_idx, is_sp);
 	}
 
+	spin_lock_irqsave(&common->cc_lock, flags);
+	common->cc_survey.rx_bss_frame += airtime;
+	spin_unlock_irqrestore(&common->cc_lock, flags);
+
 	ieee80211_sta_register_airtime(sta, tidno, 0, airtime);
 exit:
 	rcu_read_unlock();
diff -ruw linux-6.13.12/drivers/net/wireless/ath/ath9k/xmit.c linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/xmit.c
--- linux-6.13.12/drivers/net/wireless/ath/ath9k/xmit.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath9k/xmit.c	2025-07-01 14:10:42.776047660 +0200
@@ -67,8 +67,7 @@
 static void ath_tx_rc_status(struct ath_softc *sc, struct ath_buf *bf,
 			     struct ath_tx_status *ts, int nframes, int nbad,
 			     int txok);
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
-			      struct ath_buf *bf);
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf);
 static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
 					   struct ath_txq *txq,
 					   struct ath_atx_tid *tid,
@@ -208,10 +207,10 @@
 				       ARRAY_SIZE(bf->rates));
 }
 
-static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
-			     struct sk_buff *skb)
+static void ath_txq_skb_done(struct ath_softc *sc, struct sk_buff *skb)
 {
 	struct ath_frame_info *fi = get_frame_info(skb);
+	struct ath_txq *txq;
 	int q = fi->txq;
 
 	if (q < 0)
@@ -224,7 +223,7 @@
 }
 
 static struct ath_atx_tid *
-ath_get_skb_tid(struct ath_softc *sc, struct ath_node *an, struct sk_buff *skb)
+ath_get_skb_tid(struct ath_node *an, struct sk_buff *skb)
 {
 	u8 tidno = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 	return ATH_AN_2_TID(an, tidno);
@@ -294,13 +293,13 @@
 		fi = get_frame_info(skb);
 		bf = fi->bf;
 		if (!bf) {
-			ath_txq_skb_done(sc, txq, skb);
+			ath_txq_skb_done(sc, skb);
 			ieee80211_free_txskb(sc->hw, skb);
 			continue;
 		}
 
 		if (fi->baw_tracked) {
-			ath_tx_update_baw(sc, tid, bf);
+			ath_tx_update_baw(tid, bf);
 			sendbar = true;
 		}
 
@@ -315,8 +314,7 @@
 	}
 }
 
-static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
-			      struct ath_buf *bf)
+static void ath_tx_update_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
 {
 	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
 	u16 seqno = bf->bf_state.seqno;
@@ -338,8 +336,7 @@
 	}
 }
 
-static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
-			     struct ath_buf *bf)
+static void ath_tx_addto_baw(struct ath_atx_tid *tid, struct ath_buf *bf)
 {
 	struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
 	u16 seqno = bf->bf_state.seqno;
@@ -452,9 +449,8 @@
 	return tbf;
 }
 
-static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
-			        struct ath_tx_status *ts, int txok,
-			        int *nframes, int *nbad)
+static void ath_tx_count_frames(struct ath_buf *bf, struct ath_tx_status *ts,
+				int txok, int *nframes, int *nbad)
 {
 	u16 seq_st = 0;
 	u32 ba[WME_BA_BMP_SIZE >> 5];
@@ -557,7 +553,7 @@
 			/*
 			 * AR5416 can become deaf/mute when BA
 			 * issue happens. Chip needs to be reset.
-			 * But AP code may have sychronization issues
+			 * But AP code may have synchronization issues
 			 * when perform internal reset in this routine.
 			 * Only enable reset in STA mode for now.
 			 */
@@ -568,7 +564,7 @@
 
 	__skb_queue_head_init(&bf_pending);
 
-	ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
+	ath_tx_count_frames(bf, ts, txok, &nframes, &nbad);
 	while (bf) {
 		u16 seqno = bf->bf_state.seqno;
 
@@ -621,7 +617,7 @@
 			 * complete the acked-ones/xretried ones; update
 			 * block-ack window
 			 */
-			ath_tx_update_baw(sc, tid, bf);
+			ath_tx_update_baw(tid, bf);
 
 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
 				memcpy(tx_info->control.rates, rates, sizeof(rates));
@@ -651,7 +647,7 @@
 				 * run out of tx buf.
 				 */
 				if (!tbf) {
-					ath_tx_update_baw(sc, tid, bf);
+					ath_tx_update_baw(tid, bf);
 
 					ath_tx_complete_buf(sc, bf, txq,
 							    &bf_head, NULL, ts,
@@ -752,7 +748,7 @@
 	sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr1, hdr->addr2);
 	if (sta) {
 		struct ath_node *an = (struct ath_node *)sta->drv_priv;
-		tid = ath_get_skb_tid(sc, an, bf->bf_mpdu);
+		tid = ath_get_skb_tid(an, bf->bf_mpdu);
 		ath_tx_count_airtime(sc, sta, bf, ts, tid->tidno);
 		if (ts->ts_status & (ATH9K_TXERR_FILT | ATH9K_TXERR_XRETRY))
 			tid->clear_ps_filter = true;
@@ -962,7 +958,7 @@
 			bf->bf_state.stale = false;
 
 		if (!bf) {
-			ath_txq_skb_done(sc, txq, skb);
+			ath_txq_skb_done(sc, skb);
 			ieee80211_free_txskb(sc->hw, skb);
 			continue;
 		}
@@ -1012,13 +1008,13 @@
 
 			INIT_LIST_HEAD(&bf_head);
 			list_add(&bf->list, &bf_head);
-			ath_tx_update_baw(sc, tid, bf);
+			ath_tx_update_baw(tid, bf);
 			ath_tx_complete_buf(sc, bf, txq, &bf_head, NULL, &ts, 0);
 			continue;
 		}
 
 		if (bf_isampdu(bf))
-			ath_tx_addto_baw(sc, tid, bf);
+			ath_tx_addto_baw(tid, bf);
 
 		break;
 	}
@@ -1114,8 +1110,8 @@
  * width  - 0 for 20 MHz, 1 for 40 MHz
  * half_gi - to use 4us v/s 3.6 us for symbol time
  */
-u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
-		     int width, int half_gi, bool shortPreamble)
+u32 ath_pkt_duration(u8 rix, int pktlen, int width,
+		     int half_gi, bool shortPreamble)
 {
 	u32 nbits, nsymbits, duration, nsymbols;
 	int streams;
@@ -1327,7 +1323,7 @@
 			info->rates[i].Rate = rix | 0x80;
 			info->rates[i].ChSel = ath_txchainmask_reduction(sc,
 					ah->txchainmask, info->rates[i].Rate);
-			info->rates[i].PktDuration = ath_pkt_duration(sc, rix, len,
+			info->rates[i].PktDuration = ath_pkt_duration(rix, len,
 				 is_40, is_sgi, is_sp);
 			if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
 				info->rates[i].RateFlags |= ATH9K_RATESERIES_STBC;
@@ -2122,7 +2118,7 @@
 	bf->bf_state.bf_type = 0;
 	if (tid && (tx_info->flags & IEEE80211_TX_CTL_AMPDU)) {
 		bf->bf_state.bf_type = BUF_AMPDU;
-		ath_tx_addto_baw(sc, tid, bf);
+		ath_tx_addto_baw(tid, bf);
 	}
 
 	bf->bf_next = NULL;
@@ -2295,19 +2291,10 @@
 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 	struct ieee80211_sta *sta = txctl->sta;
 	struct ieee80211_vif *vif = info->control.vif;
-	struct ath_vif *avp;
 	struct ath_softc *sc = hw->priv;
 	int frmlen = skb->len + FCS_LEN;
 	int padpos, padsize;
 
-	/* NOTE:  sta can be NULL according to net/mac80211.h */
-	if (sta)
-		txctl->an = (struct ath_node *)sta->drv_priv;
-	else if (vif && ieee80211_is_data(hdr->frame_control)) {
-		avp = (void *)vif->drv_priv;
-		txctl->an = &avp->mcast_node;
-	}
-
 	if (info->control.hw_key)
 		frmlen += info->control.hw_key->icv_len;
 
@@ -2368,7 +2355,7 @@
 
 	if (txctl->sta) {
 		an = (struct ath_node *) sta->drv_priv;
-		tid = ath_get_skb_tid(sc, an, skb);
+		tid = ath_get_skb_tid(an, skb);
 	}
 
 	ath_txq_lock(sc, txq);
@@ -2379,7 +2366,7 @@
 
 	bf = ath_tx_setup_buffer(sc, txq, tid, skb);
 	if (!bf) {
-		ath_txq_skb_done(sc, txq, skb);
+		ath_txq_skb_done(sc, skb);
 		if (txctl->paprd)
 			dev_kfree_skb_any(skb);
 		else
@@ -2514,7 +2501,7 @@
 	}
 	spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
 
-	ath_txq_skb_done(sc, txq, skb);
+	ath_txq_skb_done(sc, skb);
 	tx_info->status.status_driver_data[0] = sta;
 	__skb_queue_tail(&txq->complete_q, skb);
 }
diff -ruw linux-6.13.12/drivers/net/wireless/ath/dfs_pattern_detector.c linux-6.13.12-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c
--- linux-6.13.12/drivers/net/wireless/ath/dfs_pattern_detector.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/dfs_pattern_detector.c	2025-09-25 17:40:34.191360403 +0200
@@ -270,6 +270,7 @@
 {
 	u32 i;
 	struct channel_detector *cd;
+	int diff_ts;
 
 	/*
 	 * pulses received for a non-supported or un-initialized
@@ -282,8 +283,9 @@
 	if (cd == NULL)
 		return false;
 
+	diff_ts = event->ts - dpd->last_pulse_ts;
 	/* reset detector on time stamp wraparound, caused by TSF reset */
-	if (event->ts < dpd->last_pulse_ts)
+	if (diff_ts < dpd->common->dfs_pulse_valid_diff_ts)
 		dpd_reset(dpd);
 	dpd->last_pulse_ts = event->ts;
 
diff -ruw linux-6.13.12/drivers/net/wireless/ath/dfs_pattern_detector.h linux-6.13.12-fbx/drivers/net/wireless/ath/dfs_pattern_detector.h
--- linux-6.13.12/drivers/net/wireless/ath/dfs_pattern_detector.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/dfs_pattern_detector.h	2025-09-25 17:40:34.191360403 +0200
@@ -24,7 +24,7 @@
 /* tolerated deviation of radar time stamp in usecs on both sides
  * TODO: this might need to be HW-dependent
  */
-#define PRI_TOLERANCE	16
+#define PRI_TOLERANCE	6
 
 /**
  * struct ath_dfs_pool_stats - DFS Statistics for global pools
diff -ruw linux-6.13.12/drivers/net/wireless/ath/key.c linux-6.13.12-fbx/drivers/net/wireless/ath/key.c
--- linux-6.13.12/drivers/net/wireless/ath/key.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/key.c	2025-09-25 17:40:34.191360403 +0200
@@ -524,7 +524,7 @@
 			idx = ath_reserve_key_cache_slot(common, key->cipher);
 			break;
 		default:
-			idx = key->keyidx;
+			idx = ath_reserve_key_cache_slot(common, key->cipher);
 			break;
 		}
 	} else if (key->keyidx) {
diff -ruw linux-6.13.12/drivers/net/wireless/ath/regd.c linux-6.13.12-fbx/drivers/net/wireless/ath/regd.c
--- linux-6.13.12/drivers/net/wireless/ath/regd.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/regd.c	2025-09-25 17:40:34.191360403 +0200
@@ -345,6 +345,10 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
+
 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
 		if (!wiphy->bands[band])
 			continue;
@@ -379,6 +383,10 @@
 {
 	struct ieee80211_supported_band *sband;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
+
 	sband = wiphy->bands[NL80211_BAND_2GHZ];
 	if (!sband)
 		return;
@@ -408,6 +416,9 @@
 	struct ieee80211_channel *ch;
 	unsigned int i;
 
+#ifdef CONFIG_ATH_REG_IGNORE
+	return;
+#endif
 	if (!wiphy->bands[NL80211_BAND_5GHZ])
 		return;
 
@@ -640,6 +651,11 @@
 	const struct ieee80211_regdomain *regd;
 
 	wiphy->reg_notifier = reg_notifier;
+
+#ifdef CONFIG_ATH_REG_IGNORE
+	return 0;
+#endif
+
 	wiphy->regulatory_flags |= REGULATORY_STRICT_REG |
 				   REGULATORY_CUSTOM_REG;
 
@@ -704,7 +720,7 @@
 	    regdmn == CTRY_DEFAULT) {
 		printk(KERN_DEBUG "ath: EEPROM indicates default "
 		       "country code should be used\n");
-		reg->country_code = CTRY_UNITED_STATES;
+		reg->country_code = CTRY_FRANCE;
 	}
 
 	if (reg->country_code == CTRY_DEFAULT) {
diff -ruw linux-6.13.12/drivers/net/wireless/marvell/Kconfig linux-6.13.12-fbx/drivers/net/wireless/marvell/Kconfig
--- linux-6.13.12/drivers/net/wireless/marvell/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/marvell/Kconfig	2025-09-29 14:23:07.613732450 +0200
@@ -25,4 +25,8 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called mwl8k.  If unsure, say N.
 
+config MWL8K_NEW
+	tristate "Marvell 88W8xxx PCI/PCIe NEW"
+	depends on MAC80211 && PCI
+
 endif # WLAN_VENDOR_MARVELL
diff -ruw linux-6.13.12/drivers/net/wireless/marvell/Makefile linux-6.13.12-fbx/drivers/net/wireless/marvell/Makefile
--- linux-6.13.12/drivers/net/wireless/marvell/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/marvell/Makefile	2025-09-25 17:40:34.287360879 +0200
@@ -5,3 +5,4 @@
 obj-$(CONFIG_MWIFIEX)	+= mwifiex/
 
 obj-$(CONFIG_MWL8K)	+= mwl8k.o
+obj-$(CONFIG_MWL8K_NEW)	+= mwl8k_new/
diff -ruw linux-6.13.12/drivers/nvmem/Kconfig linux-6.13.12-fbx/drivers/nvmem/Kconfig
--- linux-6.13.12/drivers/nvmem/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/nvmem/Kconfig	2025-09-25 17:40:34.499361930 +0200
@@ -61,6 +61,9 @@
 	  This driver provides support for Broadcom's NVRAM that can be accessed
 	  using I/O mapping.
 
+config NVMEM_IGNORE_RO
+	bool "ignore read-only flags"
+
 config NVMEM_IMX_IIM
 	tristate "i.MX IC Identification Module support"
 	depends on ARCH_MXC || COMPILE_TEST
@@ -267,6 +270,12 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem-rmem.
 
+config NVMEM_IOMAP
+	tristate "I/O mapped NVMEM support"
+	depends on HAS_IOMEM
+	help
+	  This driver supports NVMEM that can be accessed using I/O mapping.
+
 config NVMEM_ROCKCHIP_EFUSE
 	tristate "Rockchip eFuse Support"
 	depends on ARCH_ROCKCHIP || COMPILE_TEST
@@ -428,4 +437,8 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called nvmem_qoriq_efuse.
 
+config NVMEM_CORTINA_OTP
+	tristate "Cortina OTP support."
+	depends on ARCH_CORTINA
+
 endif
diff -ruw linux-6.13.12/drivers/nvmem/Makefile linux-6.13.12-fbx/drivers/nvmem/Makefile
--- linux-6.13.12/drivers/nvmem/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/nvmem/Makefile	2025-09-25 17:40:34.499361930 +0200
@@ -56,6 +56,8 @@
 nvmem-rcar-efuse-y			:= rcar-efuse.o
 obj-$(CONFIG_NVMEM_RMEM) 		+= nvmem-rmem.o
 nvmem-rmem-y				:= rmem.o
+obj-$(CONFIG_NVMEM_IOMAP)	+= nvmem_iomap.o
+nvmem_iomap-y			:= iomap.o
 obj-$(CONFIG_NVMEM_ROCKCHIP_EFUSE)	+= nvmem_rockchip_efuse.o
 nvmem_rockchip_efuse-y			:= rockchip-efuse.o
 obj-$(CONFIG_NVMEM_ROCKCHIP_OTP)	+= nvmem-rockchip-otp.o
@@ -85,3 +87,7 @@
 nvmem_zynqmp_nvmem-y			:= zynqmp_nvmem.o
 obj-$(CONFIG_NVMEM_QORIQ_EFUSE)		+= nvmem-qoriq-efuse.o
 nvmem-qoriq-efuse-y			:= qoriq-efuse.o
+
+# when in Rome ...
+obj-$(CONFIG_NVMEM_CORTINA_OTP)		+= nvmem-cortina-otp.o
+nvmem-cortina-otp-y			:= cortina-otp.o
diff -ruw linux-6.13.12/drivers/nvmem/core.c linux-6.13.12-fbx/drivers/nvmem/core.c
--- linux-6.13.12/drivers/nvmem/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/nvmem/core.c	2025-09-25 17:40:34.499361930 +0200
@@ -996,8 +996,10 @@
 	if (rval)
 		goto err_put_device;
 
+#ifndef CONFIG_NVMEM_IGNORE_RO
 	nvmem->read_only = device_property_present(config->dev, "read-only") ||
 			   config->read_only || !nvmem->reg_write;
+#endif
 
 #ifdef CONFIG_NVMEM_SYSFS
 	nvmem->dev.groups = nvmem_dev_groups;
diff -ruw linux-6.13.12/drivers/of/Kconfig linux-6.13.12-fbx/drivers/of/Kconfig
--- linux-6.13.12/drivers/of/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/of/Kconfig	2025-09-25 17:40:34.503361950 +0200
@@ -61,6 +61,11 @@
 
 	  If unsure, say N here, but this option is safe to enable.
 
+config OF_DTB_BUILTIN_LIST
+	string "Link given list of DTB files into kernel"
+	help
+	  Specify filename without .dtb extension
+
 config OF_FLATTREE
 	bool
 	select DTC
@@ -126,4 +131,11 @@
 config OF_NUMA
 	bool
 
+config OF_CONFIGFS
+	bool "Device Tree Overlay ConfigFS interface"
+	select CONFIGFS_FS
+	select OF_OVERLAY
+	help
+	  Enable a simple user-space driven DT overlay interface.
+
 endif # OF
diff -ruw linux-6.13.12/drivers/of/Makefile linux-6.13.12-fbx/drivers/of/Makefile
--- linux-6.13.12/drivers/of/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/of/Makefile	2025-09-25 17:40:34.503361950 +0200
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-y = base.o cpu.o device.o module.o platform.o property.o
 obj-$(CONFIG_OF_KOBJ) += kobj.o
+obj-$(CONFIG_OF_CONFIGFS) += configfs.o
 obj-$(CONFIG_OF_DYNAMIC) += dynamic.o
 obj-$(CONFIG_OF_FLATTREE) += fdt.o empty_root.dtb.o
 obj-$(CONFIG_OF_EARLY_FLATTREE) += fdt_address.o
diff -ruw linux-6.13.12/drivers/of/fdt.c linux-6.13.12-fbx/drivers/of/fdt.c
--- linux-6.13.12/drivers/of/fdt.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/of/fdt.c	2025-09-25 17:40:34.503361950 +0200
@@ -28,6 +28,7 @@
 
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
 
 #include "of_private.h"
 
@@ -651,6 +652,39 @@
 	return 0;
 }
 
+/*
+ * iterate list of built-in dtb to find a compatible match
+ */
+const void __init *of_fdt_find_compatible_dtb(const char *name)
+{
+	struct fdt_header {
+		__be32 magic;
+		__be32 totalsize;
+	};
+	const struct fdt_header *blob, *best;
+	unsigned int best_score = ~0;
+
+	best = NULL;
+	blob = (const struct fdt_header *)__dtb_start;
+	while ((void *)blob < (void *)__dtb_end &&
+	       (be32_to_cpu(blob->magic) == OF_DT_HEADER)) {
+		unsigned int score;
+		u32 size;
+
+		score = of_fdt_is_compatible(blob, 0, name);
+		if (score > 0 && score < best_score) {
+			best = blob;
+			best_score = score;
+		}
+
+		size = be32_to_cpu(blob->totalsize);
+		blob = (const struct fdt_header *)
+			PTR_ALIGN((void *)blob + size, STRUCT_ALIGNMENT);
+	}
+
+	return best;
+}
+
 /**
  * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
  * @node: node to test
diff -ruw linux-6.13.12/drivers/of/overlay.c linux-6.13.12-fbx/drivers/of/overlay.c
--- linux-6.13.12/drivers/of/overlay.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/of/overlay.c	2025-09-25 17:40:34.503361950 +0200
@@ -357,7 +357,7 @@
 	}
 
 	if (!of_node_check_flag(target->np, OF_OVERLAY))
-		pr_err("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
+		pr_debug("WARNING: memory leak will occur if overlay removed, property: %pOF/%s\n",
 		       target->np, new_prop->name);
 
 	if (ret)
diff -ruw linux-6.13.12/drivers/opp/debugfs.c linux-6.13.12-fbx/drivers/opp/debugfs.c
--- linux-6.13.12/drivers/opp/debugfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/opp/debugfs.c	2025-07-01 14:10:43.120056870 +0200
@@ -217,7 +217,7 @@
 {
 	struct opp_device *new_dev = NULL, *iter;
 	const struct device *dev;
-	struct dentry *dentry;
+	int err;
 
 	/* Look for next opp-dev */
 	list_for_each_entry(iter, &opp_table->dev_list, node)
@@ -234,16 +234,14 @@
 
 	opp_set_dev_name(dev, opp_table->dentry_name);
 
-	dentry = debugfs_rename(rootdir, opp_dev->dentry, rootdir,
-				opp_table->dentry_name);
-	if (IS_ERR(dentry)) {
+	err = debugfs_change_name(opp_dev->dentry, "%s", opp_table->dentry_name);
+	if (err) {
 		dev_err(dev, "%s: Failed to rename link from: %s to %s\n",
 			__func__, dev_name(opp_dev->dev), dev_name(dev));
 		return;
 	}
 
-	new_dev->dentry = dentry;
-	opp_table->dentry = dentry;
+	new_dev->dentry = opp_table->dentry = opp_dev->dentry;
 }
 
 /**
diff -ruw linux-6.13.12/drivers/pci/controller/Kconfig linux-6.13.12-fbx/drivers/pci/controller/Kconfig
--- linux-6.13.12/drivers/pci/controller/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/Kconfig	2025-09-25 17:40:34.511361990 +0200
@@ -207,6 +207,12 @@
 	  Say Y here if you want to enable Gen3 PCIe controller support on
 	  MediaTek SoCs.
 
+config PCIE_BCM63XX
+	tristate "BCM63XX SoCs PCIe endpoint driver."
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	depends on OF
+	depends on PCI_MSI
+
 config PCIE_MT7621
 	tristate "MediaTek MT7621 PCIe controller"
 	depends on SOC_MT7621 || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/pci/controller/Makefile linux-6.13.12-fbx/drivers/pci/controller/Makefile
--- linux-6.13.12/drivers/pci/controller/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/Makefile	2025-09-25 17:40:34.511361990 +0200
@@ -33,6 +33,7 @@
 obj-$(CONFIG_PCIE_ROCKCHIP_HOST) += pcie-rockchip-host.o
 obj-$(CONFIG_PCIE_MEDIATEK) += pcie-mediatek.o
 obj-$(CONFIG_PCIE_MEDIATEK_GEN3) += pcie-mediatek-gen3.o
+obj-$(CONFIG_PCIE_BCM63XX) += pcie-bcm63xx.o
 obj-$(CONFIG_VMD) += vmd.o
 obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
 obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
diff -ruw linux-6.13.12/drivers/pci/controller/dwc/Kconfig linux-6.13.12-fbx/drivers/pci/controller/dwc/Kconfig
--- linux-6.13.12/drivers/pci/controller/dwc/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/dwc/Kconfig	2025-09-25 17:40:34.515362010 +0200
@@ -70,6 +70,16 @@
 	  Enables support for the PCIe controller in the Baikal-T1 SoC to work
 	  in host mode. It's based on the Synopsys DWC PCIe v4.60a IP-core.
 
+config PCIE_CORTINA
+	tristate "Cortina Access PCIe controller"
+	depends on ARCH_CORTINA_ACCESS || COMPILE_TEST
+	depends on PCI_MSI
+	select PCIE_DW_HOST
+	select PCI_MSI_ARCH_FALLBACKS
+	help
+	  Say Y here if you want PCIE controller support on Cortina Access
+          SoCs
+
 config PCI_IMX6
 	bool
 
diff -ruw linux-6.13.12/drivers/pci/controller/dwc/Makefile linux-6.13.12-fbx/drivers/pci/controller/dwc/Makefile
--- linux-6.13.12/drivers/pci/controller/dwc/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/dwc/Makefile	2025-09-25 17:40:34.515362010 +0200
@@ -4,6 +4,7 @@
 obj-$(CONFIG_PCIE_DW_EP) += pcie-designware-ep.o
 obj-$(CONFIG_PCIE_DW_PLAT) += pcie-designware-plat.o
 obj-$(CONFIG_PCIE_BT1) += pcie-bt1.o
+obj-$(CONFIG_PCIE_CORTINA) += pcie-cortina.o
 obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o
 obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o
 obj-$(CONFIG_PCIE_FU740) += pcie-fu740.o
@@ -16,7 +17,6 @@
 obj-$(CONFIG_PCIE_QCOM) += pcie-qcom.o
 obj-$(CONFIG_PCIE_QCOM_EP) += pcie-qcom-ep.o
 obj-$(CONFIG_PCIE_ARMADA_8K) += pcie-armada8k.o
-obj-$(CONFIG_PCIE_ARTPEC6) += pcie-artpec6.o
 obj-$(CONFIG_PCIE_ROCKCHIP_DW) += pcie-dw-rockchip.o
 obj-$(CONFIG_PCIE_INTEL_GW) += pcie-intel-gw.o
 obj-$(CONFIG_PCIE_KEEMBAY) += pcie-keembay.o
diff -ruw linux-6.13.12/drivers/pci/pci-sysfs.c linux-6.13.12-fbx/drivers/pci/pci-sysfs.c
--- linux-6.13.12/drivers/pci/pci-sysfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/pci-sysfs.c	2025-09-25 17:40:34.543362148 +0200
@@ -1172,6 +1172,7 @@
 {
 	int i;
 
+	mutex_lock(&pdev->sysfs_init_lock);
 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 		struct bin_attribute *res_attr;
 
@@ -1187,6 +1188,9 @@
 			kfree(res_attr);
 		}
 	}
+
+	pdev->sysfs_init_done = 0;
+	mutex_unlock(&pdev->sysfs_init_lock);
 }
 
 static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
@@ -1256,6 +1260,12 @@
 	int i;
 	int retval;
 
+	mutex_lock(&pdev->sysfs_init_lock);
+	if (pdev->sysfs_init_done) {
+		mutex_unlock(&pdev->sysfs_init_lock);
+		return 0;
+	}
+
 	/* Expose the PCI resources from this device as files */
 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 
@@ -1269,10 +1279,14 @@
 		    pdev->resource[i].flags & IORESOURCE_PREFETCH)
 			retval = pci_create_attr(pdev, i, 1);
 		if (retval) {
+			mutex_unlock(&pdev->sysfs_init_lock);
 			pci_remove_resource_files(pdev);
 			return retval;
 		}
 	}
+
+	pdev->sysfs_init_done = 1;
+	mutex_unlock(&pdev->sysfs_init_lock);
 	return 0;
 }
 #else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
diff -ruw linux-6.13.12/drivers/pci/pcie/aer.c linux-6.13.12-fbx/drivers/pci/pcie/aer.c
--- linux-6.13.12/drivers/pci/pcie/aer.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/pcie/aer.c	2025-09-25 17:40:34.547362168 +0200
@@ -50,6 +50,9 @@
 	DECLARE_KFIFO(aer_fifo, struct aer_err_source, AER_ERROR_SOURCES_MAX);
 };
 
+static int pcie_aer_silence_messages = 0;
+core_param(pcie_aer_silence_messages, pcie_aer_silence_messages, int, 0644);
+
 /* AER stats for the device */
 struct aer_stats {
 
@@ -705,6 +708,7 @@
 	const char *level;
 
 	if (!info->status) {
+		if (!pcie_aer_silence_messages)
 		pci_err(dev, "PCIe Bus Error: severity=%s, type=Inaccessible, (Unregistered Agent ID)\n",
 			aer_error_severity_string[info->severity]);
 		goto out;
@@ -715,6 +719,7 @@
 
 	level = (info->severity == AER_CORRECTABLE) ? KERN_WARNING : KERN_ERR;
 
+	if (!pcie_aer_silence_messages) {
 	pci_printk(level, dev, "PCIe Bus Error: severity=%s, type=%s, (%s)\n",
 		   aer_error_severity_string[info->severity],
 		   aer_error_layer[layer], aer_agent_string[agent]);
@@ -726,6 +731,7 @@
 
 	if (info->tlp_header_valid)
 		__print_tlp_header(dev, &info->tlp);
+	}
 
 out:
 	if (info->id && info->error_dev_num > 1 && info->id == id)
@@ -740,6 +746,7 @@
 	u8 bus = info->id >> 8;
 	u8 devfn = info->id & 0xff;
 
+	if (!pcie_aer_silence_messages)
 	pci_info(dev, "%s%s error message received from %04x:%02x:%02x.%d\n",
 		 info->multi_error_valid ? "Multiple " : "",
 		 aer_error_severity_string[info->severity],
@@ -932,6 +939,7 @@
 		u8 bus = e_info->id >> 8;
 		u8 devfn = e_info->id & 0xff;
 
+		if (!pcie_aer_silence_messages)
 		pci_info(parent, "found no error details for %04x:%02x:%02x.%d\n",
 			 pci_domain_nr(parent->bus), bus, PCI_SLOT(devfn),
 			 PCI_FUNC(devfn));
diff -ruw linux-6.13.12/drivers/pci/pcie/portdrv.c linux-6.13.12-fbx/drivers/pci/pcie/portdrv.c
--- linux-6.13.12/drivers/pci/pcie/portdrv.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/pcie/portdrv.c	2025-09-25 17:40:34.547362168 +0200
@@ -19,9 +19,13 @@
 #include <linux/slab.h>
 #include <linux/aer.h>
 
+#include <linux/hwmon-pericom-pcie.h>
+
 #include "../pci.h"
 #include "portdrv.h"
 
+static DEFINE_MUTEX(irq_alloc_mutex);
+
 /*
  * The PCIe Capability Interrupt Message Number (PCIe r3.1, sec 7.8.2) must
  * be one of the first 32 MSI-X entries.  Per PCI r3.0, sec 6.8.3.1, MSI
@@ -115,16 +119,21 @@
 	int nr_entries, nvec, pcie_irq;
 	u32 pme = 0, aer = 0, dpc = 0;
 
+	mutex_lock(&irq_alloc_mutex);
+
 	/* Allocate the maximum possible number of MSI/MSI-X vectors */
 	nr_entries = pci_alloc_irq_vectors(dev, 1, PCIE_PORT_MAX_MSI_ENTRIES,
 			PCI_IRQ_MSIX | PCI_IRQ_MSI);
-	if (nr_entries < 0)
+	if (nr_entries < 0) {
+		mutex_unlock(&irq_alloc_mutex);
 		return nr_entries;
+	}
 
 	/* See how many and which Interrupt Message Numbers we actually use */
 	nvec = pcie_message_numbers(dev, mask, &pme, &aer, &dpc);
 	if (nvec > nr_entries) {
 		pci_free_irq_vectors(dev);
+		mutex_unlock(&irq_alloc_mutex);
 		return -EIO;
 	}
 
@@ -144,9 +153,13 @@
 
 		nr_entries = pci_alloc_irq_vectors(dev, nvec, nvec,
 				PCI_IRQ_MSIX | PCI_IRQ_MSI);
-		if (nr_entries < 0)
+		if (nr_entries < 0) {
+			mutex_unlock(&irq_alloc_mutex);
 			return nr_entries;
 	}
+	}
+
+	mutex_unlock(&irq_alloc_mutex);
 
 	/* PME, hotplug and bandwidth notification share an MSI/MSI-X vector */
 	if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP |
@@ -673,6 +686,13 @@
 #define PCIE_PORTDRV_PM_OPS	NULL
 #endif /* !PM */
 
+static inline bool __pci_dev_support_hwmon_pericom(const struct pci_dev *dev)
+{
+	return pci_pcie_type(dev) == PCI_EXP_TYPE_UPSTREAM &&
+		dev->vendor == PCI_VENDOR_ID_PERICOM &&
+		dev->device == 0xc008;
+}
+
 /*
  * pcie_portdrv_probe - Probe PCI-Express port devices
  * @dev: PCI-Express port device being probed
@@ -719,6 +739,9 @@
 		pm_runtime_allow(&dev->dev);
 	}
 
+	if (__pci_dev_support_hwmon_pericom(dev))
+		return hwmon_pericom_pcie_probe(dev);
+
 	return 0;
 }
 
@@ -730,6 +753,7 @@
 		pm_runtime_dont_use_autosuspend(&dev->dev);
 	}
 
+	hwmon_pericom_pcie_remove(dev);
 	pcie_port_device_remove(dev);
 
 	pci_disable_device(dev);
@@ -743,6 +767,7 @@
 		pm_runtime_dont_use_autosuspend(&dev->dev);
 	}
 
+	hwmon_pericom_pcie_remove(dev);
 	pcie_port_device_remove(dev);
 }
 
diff -ruw linux-6.13.12/drivers/pci/probe.c linux-6.13.12-fbx/drivers/pci/probe.c
--- linux-6.13.12/drivers/pci/probe.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/probe.c	2025-09-25 17:40:34.547362168 +0200
@@ -2359,6 +2359,7 @@
 		return NULL;
 
 	INIT_LIST_HEAD(&dev->bus_list);
+	mutex_init(&dev->sysfs_init_lock);
 	dev->dev.type = &pci_dev_type;
 	dev->bus = pci_bus_get(bus);
 	dev->driver_exclusive_resource = (struct resource) {
@@ -3333,6 +3334,34 @@
 	return max;
 }
 
+/*
+ * Walks the PCI/PCIe tree to find the first instance of a PCIe device and
+ * hands off the PCIe bus to pcie_bus_configure_settings to walk the rest.
+ */
+static int pcie_rescan_bus_configure_settings(struct pci_dev *dev, void *data)
+{
+	if (pci_is_pcie(dev)) {
+		struct pci_bus *child, *bus = dev->bus;
+
+		list_for_each_entry(child, &bus->children, node)
+			pcie_bus_configure_settings(child);
+
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ * pci_bus_configure_settings - Configure bus settings
+ * @bus: PCI/PCIE bus to configure
+ *
+ * Currently only configures PCIe bus settings related to MPS and MRRS.
+ */
+static void pci_bus_configure_settings(struct pci_bus *bus)
+{
+	pci_walk_bus(bus, pcie_rescan_bus_configure_settings, NULL);
+}
+
 /**
  * pci_rescan_bus - Scan a PCI bus for devices
  * @bus: PCI bus to scan
@@ -3348,6 +3377,7 @@
 
 	max = pci_scan_child_bus(bus);
 	pci_assign_unassigned_bus_resources(bus);
+	pci_bus_configure_settings(bus);
 	pci_bus_add_devices(bus);
 
 	return max;
diff -ruw linux-6.13.12/drivers/pci/quirks.c linux-6.13.12-fbx/drivers/pci/quirks.c
--- linux-6.13.12/drivers/pci/quirks.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pci/quirks.c	2025-09-25 17:40:34.551362188 +0200
@@ -3269,6 +3269,8 @@
 	dev->is_hotplug_bridge = 1;
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_HINT, 0x0020, quirk_hotplug_bridge);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_PERICOM, PCI_DEVICE_ID_PI7C9X20303SL,
+			 quirk_hotplug_bridge);
 
 /*
  * This is a quirk for the Ricoh MMC controller found as a part of some
diff -ruw linux-6.13.12/drivers/phy/Kconfig linux-6.13.12-fbx/drivers/phy/Kconfig
--- linux-6.13.12/drivers/phy/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/Kconfig	2025-09-25 17:40:34.567362267 +0200
@@ -25,6 +25,14 @@
 	  Provides a number of helpers a core functions for MIPI D-PHY
 	  drivers to us.
 
+config PHY_CORTINA_ACCESS
+	tristate "Cortina Access PHY driver"
+	depends on OF && (ARCH_CORTINA || COMPILE_TEST)
+	select GENERIC_PHY
+	help
+	  Enable this to support the Cortina Access PHY.
+	  If unsure, say N.
+
 config PHY_LPC18XX_USB_OTG
 	tristate "NXP LPC18xx/43xx SoC USB OTG PHY driver"
 	depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
@@ -93,6 +101,10 @@
 	  schemes. It supports all three USB 2.0 data rates: Low Speed, Full
 	  Speed and High Speed.
 
+config XDSL_PHY_API
+	tristate "xDSL PHY API"
+	select GENERIC_PHY
+
 source "drivers/phy/allwinner/Kconfig"
 source "drivers/phy/amlogic/Kconfig"
 source "drivers/phy/broadcom/Kconfig"
diff -ruw linux-6.13.12/drivers/phy/Makefile linux-6.13.12-fbx/drivers/phy/Makefile
--- linux-6.13.12/drivers/phy/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/Makefile	2025-09-25 17:40:34.567362267 +0200
@@ -6,12 +6,15 @@
 obj-$(CONFIG_GENERIC_PHY)		+= phy-core.o
 obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY)	+= phy-core-mipi-dphy.o
 obj-$(CONFIG_PHY_CAN_TRANSCEIVER)	+= phy-can-transceiver.o
+obj-$(CONFIG_PHY_CORTINA_ACCESS)	+= phy-cortina.o
 obj-$(CONFIG_PHY_LPC18XX_USB_OTG)	+= phy-lpc18xx-usb-otg.o
 obj-$(CONFIG_PHY_XGENE)			+= phy-xgene.o
 obj-$(CONFIG_PHY_PISTACHIO_USB)		+= phy-pistachio-usb.o
 obj-$(CONFIG_USB_LGM_PHY)		+= phy-lgm-usb.o
 obj-$(CONFIG_PHY_AIROHA_PCIE)		+= phy-airoha-pcie.o
 obj-$(CONFIG_PHY_NXP_PTN3222)		+= phy-nxp-ptn3222.o
+obj-$(CONFIG_XDSL_PHY_API)		+= xdsl_phy_api.o
+
 obj-y					+= allwinner/	\
 					   amlogic/	\
 					   broadcom/	\
diff -ruw linux-6.13.12/drivers/phy/broadcom/Kconfig linux-6.13.12-fbx/drivers/phy/broadcom/Kconfig
--- linux-6.13.12/drivers/phy/broadcom/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/broadcom/Kconfig	2025-09-25 17:40:34.567362267 +0200
@@ -39,6 +39,11 @@
 	help
 	  Enable this to support the Broadcom Kona USB 2.0 PHY.
 
+config PHY_BRCM_USB_63138
+	tristate "Broadcom 63138 USB 2.0/3.0 PHY Driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	select GENERIC_PHY
+
 config PHY_BCM_NS_USB2
 	tristate "Broadcom Northstar USB 2.0 PHY Driver"
 	depends on ARCH_BCM_IPROC || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/phy/broadcom/Makefile linux-6.13.12-fbx/drivers/phy/broadcom/Makefile
--- linux-6.13.12/drivers/phy/broadcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/broadcom/Makefile	2025-09-25 17:40:34.567362267 +0200
@@ -1,6 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 obj-$(CONFIG_PHY_BCM63XX_USBH)		+= phy-bcm63xx-usbh.o
 obj-$(CONFIG_PHY_CYGNUS_PCIE)		+= phy-bcm-cygnus-pcie.o
+obj-$(CONFIG_PHY_BRCM_USB_63138)	+= phy-brcm-usb-63138.o
 obj-$(CONFIG_BCM_KONA_USB2_PHY)		+= phy-bcm-kona-usb2.o
 obj-$(CONFIG_PHY_BCM_NS_USB2)		+= phy-bcm-ns-usb2.o
 obj-$(CONFIG_PHY_BCM_NS_USB3)		+= phy-bcm-ns-usb3.o
diff -ruw linux-6.13.12/drivers/phy/marvell/Kconfig linux-6.13.12-fbx/drivers/phy/marvell/Kconfig
--- linux-6.13.12/drivers/phy/marvell/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/marvell/Kconfig	2025-09-25 17:40:34.571362287 +0200
@@ -136,3 +136,11 @@
 	  components on MMP3-based boards.
 
 	  To compile this driver as a module, choose M here.
+
+config PHY_UTMI_CP110
+	bool "Marvell CP110 UTMI PHY Driver"
+	depends on ARCH_MVEBU
+	depends on OF
+	help
+	  Enable this to support Marvell USB2.0 PHY driver for Marvell
+	  CP110-based SoCs (A7K and A8K).
diff -ruw linux-6.13.12/drivers/phy/marvell/Makefile linux-6.13.12-fbx/drivers/phy/marvell/Makefile
--- linux-6.13.12/drivers/phy/marvell/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/phy/marvell/Makefile	2025-09-25 17:40:34.571362287 +0200
@@ -13,3 +13,4 @@
 obj-$(CONFIG_PHY_PXA_28NM_HSIC)		+= phy-pxa-28nm-hsic.o
 obj-$(CONFIG_PHY_PXA_28NM_USB2)		+= phy-pxa-28nm-usb2.o
 obj-$(CONFIG_PHY_PXA_USB)		+= phy-pxa-usb.o
+obj-$(CONFIG_PHY_UTMI_CP110)		+= phy-utmi-cp110.o
diff -ruw linux-6.13.12/drivers/pinctrl/Kconfig linux-6.13.12-fbx/drivers/pinctrl/Kconfig
--- linux-6.13.12/drivers/pinctrl/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/Kconfig	2025-09-25 17:40:34.591362386 +0200
@@ -153,6 +153,12 @@
 	help
 	  Pinctrl driver for Bitmain BM1880 SoC.
 
+config PINCTRL_CORTINA
+	bool "Cortina-Access pin control"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+	select PINMUX
+	select GENERIC_PINCONF
+
 config PINCTRL_CY8C95X0
 	tristate "Cypress CY8C95X0 I2C pinctrl and GPIO driver"
 	depends on I2C
diff -ruw linux-6.13.12/drivers/pinctrl/Makefile linux-6.13.12-fbx/drivers/pinctrl/Makefile
--- linux-6.13.12/drivers/pinctrl/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/Makefile	2025-09-25 17:40:34.591362386 +0200
@@ -64,6 +64,7 @@
 obj-y				+= bcm/
 obj-$(CONFIG_PINCTRL_BERLIN)	+= berlin/
 obj-y				+= cirrus/
+obj-$(CONFIG_PINCTRL_CORTINA)	+= cortina/
 obj-y				+= freescale/
 obj-$(CONFIG_X86)		+= intel/
 obj-y				+= mediatek/
diff -ruw linux-6.13.12/drivers/pinctrl/bcm/Kconfig linux-6.13.12-fbx/drivers/pinctrl/bcm/Kconfig
--- linux-6.13.12/drivers/pinctrl/bcm/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/bcm/Kconfig	2025-09-25 17:40:34.595362406 +0200
@@ -52,6 +52,16 @@
 	select REGMAP
 	select GPIO_REGMAP
 
+config PINCTRL_BCM63138
+	bool "Broadcom 63138 pinmux driver"
+	depends on OF && (ARCH_BCMBCA || COMPILE_TEST)
+	default ARCH_BCMBCA
+	select PINMUX
+	select PINCONF
+	select GENERIC_PINCONF
+	select GPIOLIB
+	select GPIOLIB_IRQCHIP
+
 config PINCTRL_BCM6318
 	bool "Broadcom BCM6318 GPIO driver"
 	depends on (BMIPS_GENERIC || COMPILE_TEST)
diff -ruw linux-6.13.12/drivers/pinctrl/bcm/Makefile linux-6.13.12-fbx/drivers/pinctrl/bcm/Makefile
--- linux-6.13.12/drivers/pinctrl/bcm/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/bcm/Makefile	2025-09-25 17:40:34.595362406 +0200
@@ -11,6 +11,7 @@
 obj-$(CONFIG_PINCTRL_BCM6362)		+= pinctrl-bcm6362.o
 obj-$(CONFIG_PINCTRL_BCM6368)		+= pinctrl-bcm6368.o
 obj-$(CONFIG_PINCTRL_BCM63268)		+= pinctrl-bcm63268.o
+obj-$(CONFIG_PINCTRL_BCM63138)		+= pinctrl-bcm63138.o
 obj-$(CONFIG_PINCTRL_IPROC_GPIO)	+= pinctrl-iproc-gpio.o
 obj-$(CONFIG_PINCTRL_CYGNUS_MUX)	+= pinctrl-cygnus-mux.o
 obj-$(CONFIG_PINCTRL_NS)		+= pinctrl-ns.o
diff -ruw linux-6.13.12/drivers/platform/Kconfig linux-6.13.12-fbx/drivers/platform/Kconfig
--- linux-6.13.12/drivers/platform/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/platform/Kconfig	2025-09-25 17:40:34.663362743 +0200
@@ -18,3 +18,11 @@
 source "drivers/platform/x86/Kconfig"
 
 source "drivers/platform/arm64/Kconfig"
+
+if X86_INTEL_CE
+source "drivers/platform/intelce/Kconfig"
+endif
+
+source "drivers/platform/fbxgw7r/Kconfig"
+
+source "drivers/platform/ipq/Kconfig"
diff -ruw linux-6.13.12/drivers/platform/Makefile linux-6.13.12-fbx/drivers/platform/Makefile
--- linux-6.13.12/drivers/platform/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/platform/Makefile	2025-09-25 17:40:34.663362743 +0200
@@ -13,3 +13,6 @@
 obj-$(CONFIG_CZNIC_PLATFORMS)	+= cznic/
 obj-$(CONFIG_SURFACE_PLATFORMS)	+= surface/
 obj-$(CONFIG_ARM64_PLATFORM_DEVICES)	+= arm64/
+obj-$(CONFIG_X86_INTEL_CE)	+= intelce/
+obj-$(CONFIG_FBXGW7R_PLATFORM)	+= fbxgw7r/
+obj-$(CONFIG_QCOM_IPQ_PLATFORM)	+= ipq/
diff -ruw linux-6.13.12/drivers/power/reset/Kconfig linux-6.13.12-fbx/drivers/power/reset/Kconfig
--- linux-6.13.12/drivers/power/reset/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/power/reset/Kconfig	2025-09-25 17:40:34.727363061 +0200
@@ -315,4 +315,12 @@
 	help
 	  This driver supports reset or low power mode handling for Mellanox BlueField.
 
+config POWER_RESET_SYSCON_REASON
+	tristate "Generic SYSCON regmap reset reason driver"
+	depends on OF
+	depends on MFD_SYSCON
+	help
+	  Say y here will enable syscon reset reason driver. This will store
+	  reset reason (cold, soft, panic, watchdog) and expose it to sysfs.
+
 endif
diff -ruw linux-6.13.12/drivers/power/reset/Makefile linux-6.13.12-fbx/drivers/power/reset/Makefile
--- linux-6.13.12/drivers/power/reset/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/power/reset/Makefile	2025-09-25 17:40:34.727363061 +0200
@@ -37,3 +37,4 @@
 obj-$(CONFIG_POWER_RESET_SC27XX) += sc27xx-poweroff.o
 obj-$(CONFIG_NVMEM_REBOOT_MODE) += nvmem-reboot-mode.o
 obj-$(CONFIG_POWER_MLXBF) += pwr-mlxbf.o
+obj-$(CONFIG_POWER_RESET_SYSCON_REASON) += syscon-reset-reason.o
diff -ruw linux-6.13.12/drivers/regulator/Kconfig linux-6.13.12-fbx/drivers/regulator/Kconfig
--- linux-6.13.12/drivers/regulator/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/regulator/Kconfig	2025-09-25 17:40:34.779363319 +0200
@@ -30,6 +30,12 @@
 	help
 	  Say yes here to enable debugging support.
 
+config REGULATOR_FAULT_SENSING
+	bool "Regulator fault-sensing detection"
+	help
+	  Add support for fault-sensing gpio which will cause
+	  regulator to be forced-disabled.
+
 config REGULATOR_FIXED_VOLTAGE
 	tristate "Fixed voltage regulator support"
 	help
diff -ruw linux-6.13.12/drivers/rtc/Kconfig linux-6.13.12-fbx/drivers/rtc/Kconfig
--- linux-6.13.12/drivers/rtc/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/rtc/Kconfig	2025-09-25 17:40:34.799363418 +0200
@@ -1450,6 +1450,15 @@
 	  This driver can also be built as a module. If so, the module
 	  will be called rtc-meson-vrtc.
 
+config RTC_DRV_MESON_AXG
+	tristate "Amlogic Meson AXG RTC + wakeup"
+	depends on RTC_HCTOSYS
+	depends on ARCH_MESON || COMPILE_TEST
+	default m if ARCH_MESON
+	help
+	  If you say yes here you will get support for the RTC of Amlogic SoCs
+	  as implemented in M3's firmware for AXG platform by Freebox.
+
 config RTC_DRV_OMAP
 	tristate "TI OMAP Real Time Clock"
 	depends on ARCH_OMAP || ARCH_DAVINCI || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/rtc/Makefile linux-6.13.12-fbx/drivers/rtc/Makefile
--- linux-6.13.12/drivers/rtc/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/rtc/Makefile	2025-09-25 17:40:34.799363418 +0200
@@ -102,6 +102,7 @@
 obj-$(CONFIG_RTC_DRV_MAX8997)	+= rtc-max8997.o
 obj-$(CONFIG_RTC_DRV_MAX8998)	+= rtc-max8998.o
 obj-$(CONFIG_RTC_DRV_MESON_VRTC)+= rtc-meson-vrtc.o
+obj-$(CONFIG_RTC_DRV_MESON_AXG)	+= rtc-meson-axg.o
 obj-$(CONFIG_RTC_DRV_MC13XXX)	+= rtc-mc13xxx.o
 obj-$(CONFIG_RTC_DRV_MCP795)	+= rtc-mcp795.o
 obj-$(CONFIG_RTC_DRV_MESON)	+= rtc-meson.o
diff -ruw linux-6.13.12/drivers/soc/bcm/Kconfig linux-6.13.12-fbx/drivers/soc/bcm/Kconfig
--- linux-6.13.12/drivers/soc/bcm/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/soc/bcm/Kconfig	2025-09-25 17:40:35.059364707 +0200
@@ -1,6 +1,18 @@
 # SPDX-License-Identifier: GPL-2.0-only
 menu "Broadcom SoC drivers"
 
+config SOC_BCM63XX
+	bool "Broadcom 63xx SoC drivers"
+	depends on BMIPS_GENERIC || ARCH_BCMBCA || COMPILE_TEST
+	select RESET_CONTROLLER
+	help
+	  Enables drivers for the Broadcom 63xx series of chips.
+	  Drivers can be enabled individually within this menu.
+
+	  If unsure, say N.
+
+source "drivers/soc/bcm/bcm63xx/Kconfig"
+
 config SOC_BRCMSTB
 	bool "Broadcom STB SoC drivers"
 	depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/soc/bcm/Makefile linux-6.13.12-fbx/drivers/soc/bcm/Makefile
--- linux-6.13.12/drivers/soc/bcm/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/soc/bcm/Makefile	2025-09-25 17:40:35.059364707 +0200
@@ -1,2 +1,3 @@
 # SPDX-License-Identifier: GPL-2.0-only
 obj-$(CONFIG_SOC_BRCMSTB)	+= brcmstb/
+obj-$(CONFIG_SOC_BCM63XX)	+= bcm63xx/
diff -ruw linux-6.13.12/drivers/soc/qcom/Kconfig linux-6.13.12-fbx/drivers/soc/qcom/Kconfig
--- linux-6.13.12/drivers/soc/qcom/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/soc/qcom/Kconfig	2025-09-25 17:40:35.087364846 +0200
@@ -295,4 +295,11 @@
 	  This module provides the APIs to the client drivers that wants to send the
 	  PBS trigger event to the PBS RAM.
 
+config QCOM_IMEM_RESET_REASON
+	tristate "QCOM IMEM based reset reason"
+	depends on ARCH_QCOM || COMPILE_TEST
+
+config QCOM_LICENSE_MANAGER_SIMPLE
+	tristate "QCOM Simple License Manager Driver"
+
 endmenu
diff -ruw linux-6.13.12/drivers/soc/qcom/Makefile linux-6.13.12-fbx/drivers/soc/qcom/Makefile
--- linux-6.13.12/drivers/soc/qcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/soc/qcom/Makefile	2025-09-25 17:40:35.087364846 +0200
@@ -39,3 +39,7 @@
 qcom_ice-objs			+= ice.o
 obj-$(CONFIG_QCOM_INLINE_CRYPTO_ENGINE)	+= qcom_ice.o
 obj-$(CONFIG_QCOM_PBS) +=	qcom-pbs.o
+
+obj-$(CONFIG_QCOM_IMEM_RESET_REASON) += qcom-imem-reset-reason.o
+
+obj-$(CONFIG_QCOM_LICENSE_MANAGER_SIMPLE)	+= qcom-license-manager-simple.o
diff -ruw linux-6.13.12/drivers/spi/Kconfig linux-6.13.12-fbx/drivers/spi/Kconfig
--- linux-6.13.12/drivers/spi/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/spi/Kconfig	2025-09-25 17:40:35.107364945 +0200
@@ -308,6 +308,19 @@
 	  This enables support for the Coldfire QSPI controller in master
 	  mode.
 
+config SPI_CORTINA
+	tristate "Cortina-Access host SPI controller"
+	depends on ARCH_CORTINA || COMPILE_TEST
+	default ARCH_CORTINA_ACCESS
+	help
+	  This enables support for the Cortina-Access host SPI controller
+
+config SPI_CORTINA_SFLASH
+	tristate "Cortina-Access serial flash controller driver"
+	depends on MACH_CORTINA_SATURN || COMPILE_TEST
+	depends on SPI_MEM
+	default MACH_CORTINA_SATURN
+
 config SPI_CS42L43
 	tristate "Cirrus Logic CS42L43 SPI controller"
 	depends on MFD_CS42L43 && PINCTRL_CS42L43
@@ -843,6 +856,12 @@
 	help
 	  This selects a driver for the PPC4xx SPI Controller.
 
+config SPI_TDM_ORION
+	tristate "Orion TDM SPI master"
+	depends on PLAT_ORION
+	help
+	  This enables using the TDM SPI master controller on the Orion chips.
+
 config SPI_PXA2XX
 	tristate "PXA2xx SSP SPI master"
 	depends on ARCH_PXA || ARCH_MMP || (X86 && (PCI || ACPI)) || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/spi/Makefile linux-6.13.12-fbx/drivers/spi/Makefile
--- linux-6.13.12/drivers/spi/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/spi/Makefile	2025-09-25 17:40:35.107364945 +0200
@@ -43,6 +43,8 @@
 obj-$(CONFIG_SPI_CH341)			+= spi-ch341.o
 obj-$(CONFIG_SPI_CLPS711X)		+= spi-clps711x.o
 obj-$(CONFIG_SPI_COLDFIRE_QSPI)		+= spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_CORTINA)		+= spi-cortina.o
+obj-$(CONFIG_SPI_CORTINA_SFLASH)	+= spi-cortina-sflash.o
 obj-$(CONFIG_SPI_CS42L43)		+= spi-cs42l43.o
 obj-$(CONFIG_SPI_DAVINCI)		+= spi-davinci.o
 obj-$(CONFIG_SPI_DLN2)			+= spi-dln2.o
@@ -107,6 +109,7 @@
 obj-$(CONFIG_SPI_PCI1XXXX)		+= spi-pci1xxxx.o
 obj-$(CONFIG_SPI_PIC32)			+= spi-pic32.o
 obj-$(CONFIG_SPI_PIC32_SQI)		+= spi-pic32-sqi.o
+obj-$(CONFIG_SPI_TDM_ORION)		+= orion_tdm_spi.o
 obj-$(CONFIG_SPI_PL022)			+= spi-pl022.o
 obj-$(CONFIG_SPI_PPC4xx)		+= spi-ppc4xx.o
 obj-$(CONFIG_SPI_PXA2XX)		+= spi-pxa2xx-core.o
diff -ruw linux-6.13.12/drivers/spi/spi-bcm63xx-hsspi.c linux-6.13.12-fbx/drivers/spi/spi-bcm63xx-hsspi.c
--- linux-6.13.12/drivers/spi/spi-bcm63xx-hsspi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/spi/spi-bcm63xx-hsspi.c	2025-09-25 17:40:35.107364945 +0200
@@ -62,7 +62,7 @@
 #define HSSPI_PINGPONG_STATUS_SRC_BUSY		BIT(1)
 
 #define HSSPI_PROFILE_CLK_CTRL_REG(x)		(0x100 + (x) * 0x20)
-#define CLK_CTRL_FREQ_CTRL_MASK			0x0000ffff
+#define CLK_CTRL_FREQ_CTRL_MASK			0x000007ff
 #define CLK_CTRL_SPI_CLK_2X_SEL			BIT(14)
 #define CLK_CTRL_ACCUM_RST_ON_LOOP		BIT(15)
 
@@ -143,6 +143,8 @@
 	u32 xfer_mode;
 	u32 prepend_cnt;
 	u8 *prepend_buf;
+
+	int dummy_cs;
 };
 
 static ssize_t wait_mode_show(struct device *dev, struct device_attribute *attr,
@@ -445,6 +447,8 @@
 	u32 reg;
 
 	reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+	if (reg > CLK_CTRL_FREQ_CTRL_MASK)
+		reg = CLK_CTRL_FREQ_CTRL_MASK;
 	__raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
 		     bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
 
@@ -465,6 +469,17 @@
 	mutex_unlock(&bs->bus_mutex);
 }
 
+/*
+ * use the DT-specified dummy chip select or fallback to the previous
+ * behaviour (dummy_cs = !cs).
+ */
+static inline int bcm63xx_hsspi_get_dummy_cs(struct bcm63xx_hsspi *bs, int cs)
+{
+	if (bs->dummy_cs >= 0)
+		return bs->dummy_cs;
+	return !cs;
+}
+
 static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
 {
 	struct bcm63xx_hsspi *bs = spi_controller_get_devdata(spi->controller);
@@ -475,6 +490,7 @@
 	const u8 *tx = t->tx_buf;
 	u8 *rx = t->rx_buf;
 	u32 reg = 0;
+	int dummy_cs = bcm63xx_hsspi_get_dummy_cs(bs, chip_select);
 
 	bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
 	if (!t->cs_off)
@@ -520,7 +536,7 @@
 			__raw_writel(HSSPI_PINGx_CMD_DONE(0),
 				     bs->regs + HSSPI_INT_MASK_REG);
 
-		reg =  !chip_select << PINGPONG_CMD_SS_SHIFT |
+		reg =  dummy_cs << PINGPONG_CMD_SS_SHIFT |
 			    chip_select << PINGPONG_CMD_PROFILE_SHIFT |
 			    PINGPONG_COMMAND_START_NOW;
 		__raw_writel(reg, bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
@@ -600,7 +616,7 @@
 	 * e. At the end restore the polarities again to their default values.
 	 */
 
-	dummy_cs = !spi_get_chipselect(spi, 0);
+	dummy_cs = bcm63xx_hsspi_get_dummy_cs(bs, spi_get_chipselect(spi, 0));
 	bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
 
 	list_for_each_entry(t, &msg->transfers, transfer_list) {
@@ -731,6 +747,7 @@
 	int irq, ret;
 	u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
 	struct reset_control *reset;
+	u32 dummy_cs;
 
 	irq = platform_get_irq(pdev, 0);
 	if (irq < 0)
@@ -799,6 +816,13 @@
 		goto out_put_host;
 	}
 
+	if (of_property_read_u32(dev->of_node, "broadcom,dummy-cs",
+				&dummy_cs) < 0 || dummy_cs >= 8)
+		bs->dummy_cs = -1;
+	else
+		bs->dummy_cs = dummy_cs;
+	dev_info(&pdev->dev, "using dummy chip select %d\n", bs->dummy_cs);
+
 	mutex_init(&bs->bus_mutex);
 	mutex_init(&bs->msg_mutex);
 	init_completion(&bs->done);
diff -ruw linux-6.13.12/drivers/tee/optee/smc_abi.c linux-6.13.12-fbx/drivers/tee/optee/smc_abi.c
--- linux-6.13.12/drivers/tee/optee/smc_abi.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/tee/optee/smc_abi.c	2025-09-25 17:40:35.219365501 +0200
@@ -1770,9 +1770,14 @@
 	if (optee->smc.sec_caps & OPTEE_SMC_SEC_CAP_DYNAMIC_SHM)
 		pr_info("dynamic shared memory is enabled\n");
 
+	if (device_property_present(&pdev->dev, "skip-enumeration"))
+		optee->scan_bus_done = true;
+
+	if (!optee->scan_bus_done) {
 	rc = optee_enumerate_devices(PTA_CMD_GET_DEVICES);
 	if (rc)
 		goto err_disable_shm_cache;
+	}
 
 	INIT_WORK(&optee->rpmb_scan_bus_work, optee_bus_scan_rpmb);
 	optee->rpmb_intf.notifier_call = optee_rpmb_intf_rdev;
diff -ruw linux-6.13.12/drivers/thermal/Kconfig linux-6.13.12-fbx/drivers/thermal/Kconfig
--- linux-6.13.12/drivers/thermal/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/thermal/Kconfig	2025-09-25 17:40:35.219365501 +0200
@@ -435,6 +435,11 @@
 source "drivers/thermal/broadcom/Kconfig"
 endmenu
 
+menu "Cortina thermal drivers"
+depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+source "drivers/thermal/cortina/Kconfig"
+endmenu
+
 menu "Texas Instruments thermal drivers"
 depends on ARCH_HAS_BANDGAP || COMPILE_TEST
 depends on HAS_IOMEM
diff -ruw linux-6.13.12/drivers/thermal/Makefile linux-6.13.12-fbx/drivers/thermal/Makefile
--- linux-6.13.12/drivers/thermal/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/thermal/Makefile	2025-09-25 17:40:35.223365520 +0200
@@ -53,6 +53,7 @@
 obj-$(CONFIG_MAX77620_THERMAL)	+= max77620_thermal.o
 obj-$(CONFIG_QORIQ_THERMAL)	+= qoriq_thermal.o
 obj-$(CONFIG_DA9062_THERMAL)	+= da9062-thermal.o
+obj-y				+= cortina/
 obj-y				+= intel/
 obj-$(CONFIG_TI_SOC_THERMAL)	+= ti-soc-thermal/
 obj-y				+= st/
diff -ruw linux-6.13.12/drivers/thermal/thermal_core.c linux-6.13.12-fbx/drivers/thermal/thermal_core.c
--- linux-6.13.12/drivers/thermal/thermal_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/thermal/thermal_core.c	2025-09-25 17:40:35.231365560 +0200
@@ -1037,6 +1037,7 @@
 
 /**
  * __thermal_cooling_device_register() - register a new thermal cooling device
+ * @dev:	parent device
  * @np:		a pointer to a device tree node.
  * @type:	the thermal cooling device type.
  * @devdata:	device private data.
@@ -1052,7 +1053,7 @@
  * ERR_PTR. Caller must check return value with IS_ERR*() helpers.
  */
 static struct thermal_cooling_device *
-__thermal_cooling_device_register(struct device_node *np,
+__thermal_cooling_device_register(struct device *pdev, struct device_node *np,
 				  const char *type, void *devdata,
 				  const struct thermal_cooling_device_ops *ops)
 {
@@ -1112,6 +1113,7 @@
 	ret = dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
 	if (ret)
 		goto out_cooling_dev;
+	cdev->device.parent = pdev;
 
 	ret = device_register(&cdev->device);
 	if (ret) {
@@ -1155,11 +1157,30 @@
 thermal_cooling_device_register(const char *type, void *devdata,
 				const struct thermal_cooling_device_ops *ops)
 {
-	return __thermal_cooling_device_register(NULL, type, devdata, ops);
+	return __thermal_cooling_device_register(NULL, NULL, type, devdata, ops);
 }
 EXPORT_SYMBOL_GPL(thermal_cooling_device_register);
 
 /**
+ * thermal_cooling_device_register_with_parent() - register a new thermal cooling device
+ * @pdev:	parent device
+ * @type:	the thermal cooling device type.
+ * @devdata:	device private data.
+ * @ops:		standard thermal cooling devices callbacks.
+ *
+ * Same as thermal_cooling_device_register but take also the parent device.
+ * Then, hwpath will include the parent device to uniquely identify this device
+ */
+struct thermal_cooling_device *
+thermal_cooling_device_register_with_parent(struct device *pdev,
+				const char *type, void *devdata,
+				const struct thermal_cooling_device_ops *ops)
+{
+	return __thermal_cooling_device_register(pdev, NULL, type, devdata, ops);
+}
+EXPORT_SYMBOL_GPL(thermal_cooling_device_register_with_parent);
+
+/**
  * thermal_of_cooling_device_register() - register an OF thermal cooling device
  * @np:		a pointer to a device tree node.
  * @type:	the thermal cooling device type.
@@ -1179,7 +1200,7 @@
 				   const char *type, void *devdata,
 				   const struct thermal_cooling_device_ops *ops)
 {
-	return __thermal_cooling_device_register(np, type, devdata, ops);
+	return __thermal_cooling_device_register(NULL, np, type, devdata, ops);
 }
 EXPORT_SYMBOL_GPL(thermal_of_cooling_device_register);
 
@@ -1219,7 +1240,7 @@
 	if (!ptr)
 		return ERR_PTR(-ENOMEM);
 
-	tcd = __thermal_cooling_device_register(np, type, devdata, ops);
+	tcd = __thermal_cooling_device_register(NULL, np, type, devdata, ops);
 	if (IS_ERR(tcd)) {
 		devres_free(ptr);
 		return tcd;
diff -ruw linux-6.13.12/drivers/tty/serial/Kconfig linux-6.13.12-fbx/drivers/tty/serial/Kconfig
--- linux-6.13.12/drivers/tty/serial/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/tty/serial/Kconfig	2025-09-25 17:40:35.255365679 +0200
@@ -1368,6 +1368,17 @@
 	  This driver can also be build as a module. If so, the module will be called
 	  men_z135_uart.ko
 
+config SERIAL_CORTINA
+        bool "CORTINA serial port support"
+        depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+        select SERIAL_CORE
+
+config SERIAL_CORTINA_CONSOLE
+        bool "Console on Cortina serial port"
+        depends on SERIAL_CORTINA
+        select SERIAL_CORE_CONSOLE
+	select SERIAL_EARLYCON
+
 config SERIAL_SPRD
 	tristate "Support for Spreadtrum serial"
 	select SERIAL_CORE
diff -ruw linux-6.13.12/drivers/tty/serial/Makefile linux-6.13.12-fbx/drivers/tty/serial/Makefile
--- linux-6.13.12/drivers/tty/serial/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/tty/serial/Makefile	2025-09-25 17:40:35.255365679 +0200
@@ -36,6 +36,7 @@
 obj-$(CONFIG_SERIAL_CLPS711X)		+= clps711x.o
 obj-$(CONFIG_SERIAL_CPM)		+= cpm_uart.o
 obj-$(CONFIG_SERIAL_CONEXANT_DIGICOLOR)	+= digicolor-usart.o
+obj-$(CONFIG_SERIAL_CORTINA)	+= serial-cortina.o
 obj-$(CONFIG_SERIAL_DZ)			+= dz.o
 obj-$(CONFIG_SERIAL_ESP32)		+= esp32_uart.o
 obj-$(CONFIG_SERIAL_ESP32_ACM)		+= esp32_acm.o
diff -ruw linux-6.13.12/drivers/usb/dwc3/Kconfig linux-6.13.12-fbx/drivers/usb/dwc3/Kconfig
--- linux-6.13.12/drivers/usb/dwc3/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/dwc3/Kconfig	2025-09-25 17:40:35.307365937 +0200
@@ -189,4 +189,13 @@
 	  or dual-role mode.
 	  Say 'Y' or 'M' if you have such device.
 
+config USB_DWC3_CORTINA
+	tristate "Cortina Access Platform"
+	depends on OF && (ARCH_CORTINA || COMPILE_TEST)
+	default USB_DWC3
+	help
+	  Some Cortina SoCs use DesignWare Core IP for USB2/3
+	  functionality.
+	  Say 'Y' or 'M' if you have one such device.
+
 endif
diff -ruw linux-6.13.12/drivers/usb/dwc3/Makefile linux-6.13.12-fbx/drivers/usb/dwc3/Makefile
--- linux-6.13.12/drivers/usb/dwc3/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/dwc3/Makefile	2025-09-25 17:40:35.307365937 +0200
@@ -56,3 +56,4 @@
 obj-$(CONFIG_USB_DWC3_XILINX)		+= dwc3-xilinx.o
 obj-$(CONFIG_USB_DWC3_OCTEON)		+= dwc3-octeon.o
 obj-$(CONFIG_USB_DWC3_RTK)		+= dwc3-rtk.o
+obj-$(CONFIG_USB_DWC3_CORTINA)		+= dwc3-cortina.o
diff -ruw linux-6.13.12/drivers/usb/host/Kconfig linux-6.13.12-fbx/drivers/usb/host/Kconfig
--- linux-6.13.12/drivers/usb/host/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/Kconfig	2025-09-25 17:40:35.343366115 +0200
@@ -78,13 +78,13 @@
 	  If unsure, say N.
 
 config USB_XHCI_MVEBU
-	tristate "xHCI support for Marvell Armada 375/38x/37xx"
+	tristate "xHCI support for Marvell Armada 375/38x/37xx/70x0/80x0"
 	select USB_XHCI_PLATFORM
 	depends on HAS_IOMEM
 	depends on ARCH_MVEBU || COMPILE_TEST
 	help
 	  Say 'Y' to enable the support for the xHCI host controller
-	  found in Marvell Armada 375/38x/37xx ARM SOCs.
+	  found in Marvell Armada 375/38x/37xx/70x0/80x0 ARM SOCs.
 
 config USB_XHCI_RCAR
 	tristate "xHCI support for Renesas R-Car SoCs"
@@ -690,6 +690,10 @@
 
 	  If unsure, say N.
 
+config USB_BCM63158
+	tristate "Broadcom BCM63158 SoC USB host driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
 config USB_HCD_SSB
 	tristate "SSB usb host driver"
 	depends on SSB
diff -ruw linux-6.13.12/drivers/usb/host/Makefile linux-6.13.12-fbx/drivers/usb/host/Makefile
--- linux-6.13.12/drivers/usb/host/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/Makefile	2025-09-25 17:40:35.347366135 +0200
@@ -85,3 +85,4 @@
 obj-$(CONFIG_USB_HCD_SSB)	+= ssb-hcd.o
 obj-$(CONFIG_USB_MAX3421_HCD)	+= max3421-hcd.o
 obj-$(CONFIG_USB_XEN_HCD)	+= xen-hcd.o
+obj-$(CONFIG_USB_BCM63158)	+= usb-bcm63158.o
diff -ruw linux-6.13.12/drivers/usb/host/xhci-hub.c linux-6.13.12-fbx/drivers/usb/host/xhci-hub.c
--- linux-6.13.12/drivers/usb/host/xhci-hub.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/xhci-hub.c	2025-09-25 17:40:35.355366175 +0200
@@ -17,6 +17,8 @@
 #include "xhci.h"
 #include "xhci-trace.h"
 
+extern int usb3_disable;
+
 #define	PORT_WAKE_BITS	(PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
 #define	PORT_RWC_BITS	(PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
 			 PORT_RC | PORT_PLC | PORT_PE)
@@ -655,6 +657,9 @@
 	xhci_dbg(xhci, "set port power %d-%d %s, portsc: 0x%x\n",
 		 hcd->self.busnum, port->hcd_portnum + 1, on ? "ON" : "OFF", temp);
 
+	if (usb3_disable && on && hcd->speed >= HCD_USB3)
+		on = false;
+
 	temp = xhci_port_state_to_neutral(temp);
 
 	if (on) {
diff -ruw linux-6.13.12/drivers/usb/host/xhci-plat.c linux-6.13.12-fbx/drivers/usb/host/xhci-plat.c
--- linux-6.13.12/drivers/usb/host/xhci-plat.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/xhci-plat.c	2025-09-25 17:40:35.359366195 +0200
@@ -128,6 +128,8 @@
 		.compatible = "marvell,armada3700-xhci",
 		.data = &xhci_plat_marvell_armada3700,
 	}, {
+		.compatible = "marvell,armada-8k-xhci",
+	}, {
 		.compatible = "brcm,xhci-brcm-v2",
 		.data = &xhci_plat_brcm,
 	}, {
diff -ruw linux-6.13.12/drivers/usb/host/xhci-ring.c linux-6.13.12-fbx/drivers/usb/host/xhci-ring.c
--- linux-6.13.12/drivers/usb/host/xhci-ring.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/xhci-ring.c	2025-09-25 17:40:35.359366195 +0200
@@ -1976,7 +1976,8 @@
 	}
 
 	/* We might get interrupts after shared_hcd is removed */
-	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL) {
+	if (port->rhub == &xhci->usb3_rhub && xhci->shared_hcd == NULL &&
+	    !xhci_has_one_roothub(xhci)) {
 		xhci_dbg(xhci, "ignore port event for removed USB3 hcd\n");
 		bogus_port_status = true;
 		goto cleanup;
diff -ruw linux-6.13.12/drivers/usb/host/xhci.c linux-6.13.12-fbx/drivers/usb/host/xhci.c
--- linux-6.13.12/drivers/usb/host/xhci.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/xhci.c	2025-09-25 17:40:35.363366215 +0200
@@ -39,6 +39,10 @@
 module_param(quirks, ullong, S_IRUGO);
 MODULE_PARM_DESC(quirks, "Bit flags for quirks to be enabled as default");
 
+int usb3_disable = 0;
+module_param(usb3_disable, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(usb3_disable, "Disable USB3 interface");
+
 static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
 {
 	struct xhci_segment *seg;
diff -ruw linux-6.13.12/drivers/usb/storage/usb.c linux-6.13.12-fbx/drivers/usb/storage/usb.c
--- linux-6.13.12/drivers/usb/storage/usb.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/usb/storage/usb.c	2025-09-25 17:40:35.391366353 +0200
@@ -68,7 +68,7 @@
 MODULE_DESCRIPTION("USB Mass Storage driver for Linux");
 MODULE_LICENSE("GPL");
 
-static unsigned int delay_use = 1 * MSEC_PER_SEC;
+static unsigned int delay_use = 5 * MSEC_PER_SEC;
 
 /**
  * parse_delay_str - parse an unsigned decimal integer delay
diff -ruw linux-6.13.12/drivers/video/Kconfig linux-6.13.12-fbx/drivers/video/Kconfig
--- linux-6.13.12/drivers/video/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/video/Kconfig	2025-09-25 17:40:35.415366472 +0200
@@ -69,5 +69,4 @@
 
 endif
 
-
 endmenu
diff -ruw linux-6.13.12/drivers/video/fbdev/Kconfig linux-6.13.12-fbx/drivers/video/fbdev/Kconfig
--- linux-6.13.12/drivers/video/fbdev/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/video/fbdev/Kconfig	2025-09-25 17:40:35.419366492 +0200
@@ -1819,6 +1819,18 @@
 	  called sm712fb. If you want to compile it as a module, say M
 	  here and read <file:Documentation/kbuild/modules.rst>.
 
+config FB_SSD1320
+	tristate "SSD1320 OLED driver"
+	depends on FB && SPI
+	select FB_SYSMEM_HELPERS_DEFERRED
+	select FB_BACKLIGHT
+
+config FB_SSD1327
+	tristate "SSD1327 OLED driver"
+	depends on FB && SPI
+	select FB_SYSMEM_HELPERS_DEFERRED
+	select FB_BACKLIGHT
+
 source "drivers/video/fbdev/omap/Kconfig"
 source "drivers/video/fbdev/omap2/Kconfig"
 source "drivers/video/fbdev/mmp/Kconfig"
diff -ruw linux-6.13.12/drivers/video/fbdev/Makefile linux-6.13.12-fbx/drivers/video/fbdev/Makefile
--- linux-6.13.12/drivers/video/fbdev/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/video/fbdev/Makefile	2025-09-25 17:40:35.419366492 +0200
@@ -123,6 +123,8 @@
 obj-$(CONFIG_FB_OF)               += offb.o
 obj-$(CONFIG_FB_SSD1307)	  += ssd1307fb.o
 obj-$(CONFIG_FB_SIMPLE)           += simplefb.o
+obj-$(CONFIG_FB_SSD1327)          += ssd1327.o
+obj-$(CONFIG_FB_SSD1320)          += ssd1320.o
 
 # the test framebuffer is last
 obj-$(CONFIG_FB_VIRTUAL)          += vfb.o
diff -ruw linux-6.13.12/drivers/watchdog/Kconfig linux-6.13.12-fbx/drivers/watchdog/Kconfig
--- linux-6.13.12/drivers/watchdog/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/watchdog/Kconfig	2025-09-25 17:40:35.463366710 +0200
@@ -1043,6 +1043,13 @@
 
 	  When built as a module this will be called realtek_otto_wdt.
 
+config CORTINA_PER_WDT
+	tristate "Cortina watchdog support"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
+	depends on COMMON_CLK
+	select WATCHDOG_CORE
+	default ARCH_CORTINA || MACH_CORTINA_SATURN
+
 config SPRD_WATCHDOG
 	tristate "Spreadtrum watchdog support"
 	depends on ARCH_SPRD || COMPILE_TEST
diff -ruw linux-6.13.12/drivers/watchdog/Makefile linux-6.13.12-fbx/drivers/watchdog/Makefile
--- linux-6.13.12/drivers/watchdog/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/drivers/watchdog/Makefile	2025-09-25 17:40:35.463366710 +0200
@@ -166,6 +166,7 @@
 # MIPS Architecture
 obj-$(CONFIG_ATH79_WDT) += ath79_wdt.o
 obj-$(CONFIG_BCM47XX_WDT) += bcm47xx_wdt.o
+obj-$(CONFIG_CORTINA_PER_WDT) += cortina_per_wdt.o
 obj-$(CONFIG_RC32434_WDT) += rc32434_wdt.o
 obj-$(CONFIG_INDYDOG) += indydog.o
 obj-$(CONFIG_JZ4740_WDT) += jz4740_wdt.o
diff -ruw linux-6.13.12/fs/Kconfig linux-6.13.12-fbx/fs/Kconfig
--- linux-6.13.12/fs/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/Kconfig	2025-09-25 17:40:36.423371471 +0200
@@ -161,6 +161,7 @@
 source "fs/fat/Kconfig"
 source "fs/exfat/Kconfig"
 source "fs/ntfs3/Kconfig"
+source "fs/exfat-fbx/Kconfig"
 
 endmenu
 endif # BLOCK
diff -ruw linux-6.13.12/fs/Makefile linux-6.13.12-fbx/fs/Makefile
--- linux-6.13.12/fs/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/Makefile	2025-09-25 17:40:36.423371471 +0200
@@ -129,3 +129,4 @@
 obj-$(CONFIG_VBOXSF_FS)		+= vboxsf/
 obj-$(CONFIG_ZONEFS_FS)		+= zonefs/
 obj-$(CONFIG_BPF_LSM)		+= bpf_fs_kfuncs.o
+obj-$(CONFIG_EXFAT_FS_FBX)		+= exfat-fbx/
diff -ruw linux-6.13.12/fs/binfmt_elf.c linux-6.13.12-fbx/fs/binfmt_elf.c
--- linux-6.13.12/fs/binfmt_elf.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/binfmt_elf.c	2025-09-25 17:40:36.475371729 +0200
@@ -280,6 +280,7 @@
 	NEW_AUX_ENT(AT_RSEQ_FEATURE_SIZE, offsetof(struct rseq, end));
 	NEW_AUX_ENT(AT_RSEQ_ALIGN, __alignof__(struct rseq));
 #endif
+	NEW_AUX_ENT(AT_ASLR, bprm->aslr_used);
 #undef NEW_AUX_ENT
 	/* AT_NULL is zero; clear the rest too */
 	memset(elf_info, 0, (char *)mm->saved_auxv +
@@ -1014,6 +1015,8 @@
 		current->flags |= PF_RANDOMIZE;
 
 	setup_new_exec(bprm);
+	if ((current->flags & PF_RANDOMIZE) && !bprm->aslr_used)
+		bprm->aslr_used = 1;
 
 	/* Do this so that we can load the interpreter, if need be.  We will
 	   change some of these later */
diff -ruw linux-6.13.12/fs/debugfs/inode.c linux-6.13.12-fbx/fs/debugfs/inode.c
--- linux-6.13.12/fs/debugfs/inode.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/debugfs/inode.c	2025-09-25 17:40:36.543372066 +0200
@@ -834,76 +834,70 @@
 EXPORT_SYMBOL_GPL(debugfs_lookup_and_remove);
 
 /**
- * debugfs_rename - rename a file/directory in the debugfs filesystem
- * @old_dir: a pointer to the parent dentry for the renamed object. This
- *          should be a directory dentry.
- * @old_dentry: dentry of an object to be renamed.
- * @new_dir: a pointer to the parent dentry where the object should be
- *          moved. This should be a directory dentry.
- * @new_name: a pointer to a string containing the target name.
+ * debugfs_change_name - rename a file/directory in the debugfs filesystem
+ * @dentry: dentry of an object to be renamed.
+ * @fmt: format for new name
  *
  * This function renames a file/directory in debugfs.  The target must not
  * exist for rename to succeed.
  *
- * This function will return a pointer to old_dentry (which is updated to
- * reflect renaming) if it succeeds. If an error occurs, ERR_PTR(-ERROR)
- * will be returned.
+ * This function will return 0 on success and -E... on failure.
  *
  * If debugfs is not enabled in the kernel, the value -%ENODEV will be
  * returned.
  */
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
-		struct dentry *new_dir, const char *new_name)
+int __printf(2, 3) debugfs_change_name(struct dentry *dentry, const char *fmt, ...)
 {
-	int error;
-	struct dentry *dentry = NULL, *trap;
+	int error = 0;
+	const char *new_name;
 	struct name_snapshot old_name;
-
-	if (IS_ERR(old_dir))
-		return old_dir;
-	if (IS_ERR(new_dir))
-		return new_dir;
-	if (IS_ERR_OR_NULL(old_dentry))
-		return old_dentry;
-
-	trap = lock_rename(new_dir, old_dir);
-	/* Source or destination directories don't exist? */
-	if (d_really_is_negative(old_dir) || d_really_is_negative(new_dir))
-		goto exit;
-	/* Source does not exist, cyclic rename, or mountpoint? */
-	if (d_really_is_negative(old_dentry) || old_dentry == trap ||
-	    d_mountpoint(old_dentry))
-		goto exit;
-	dentry = lookup_one_len(new_name, new_dir, strlen(new_name));
-	/* Lookup failed, cyclic rename or target exists? */
-	if (IS_ERR(dentry) || dentry == trap || d_really_is_positive(dentry))
-		goto exit;
-
-	take_dentry_name_snapshot(&old_name, old_dentry);
-
-	error = simple_rename(&nop_mnt_idmap, d_inode(old_dir), old_dentry,
-			      d_inode(new_dir), dentry, 0);
-	if (error) {
-		release_dentry_name_snapshot(&old_name);
-		goto exit;
+	struct dentry *parent, *target;
+	struct inode *dir;
+	va_list ap;
+
+	if (IS_ERR_OR_NULL(dentry))
+		return 0;
+
+	va_start(ap, fmt);
+	new_name = kvasprintf_const(GFP_KERNEL, fmt, ap);
+	va_end(ap);
+	if (!new_name)
+		return -ENOMEM;
+
+	parent = dget_parent(dentry);
+	dir = d_inode(parent);
+	inode_lock(dir);
+
+	take_dentry_name_snapshot(&old_name, dentry);
+
+	if (WARN_ON_ONCE(dentry->d_parent != parent)) {
+		error = -EINVAL;
+		goto out;
+	}
+	if (strcmp(old_name.name.name, new_name) == 0)
+		goto out;
+	target = lookup_one_len(new_name, parent, strlen(new_name));
+	if (IS_ERR(target)) {
+		error = PTR_ERR(target);
+		goto out;
+	}
+	if (d_really_is_positive(target)) {
+		dput(target);
+		error = -EINVAL;
+		goto out;
 	}
-	d_move(old_dentry, dentry);
-	fsnotify_move(d_inode(old_dir), d_inode(new_dir), &old_name.name,
-		d_is_dir(old_dentry),
-		NULL, old_dentry);
+	simple_rename_timestamp(dir, dentry, dir, target);
+	d_move(dentry, target);
+	dput(target);
+	fsnotify_move(dir, dir, &old_name.name, d_is_dir(dentry), NULL, dentry);
+out:
 	release_dentry_name_snapshot(&old_name);
-	unlock_rename(new_dir, old_dir);
-	dput(dentry);
-	return old_dentry;
-exit:
-	if (dentry && !IS_ERR(dentry))
-		dput(dentry);
-	unlock_rename(new_dir, old_dir);
-	if (IS_ERR(dentry))
-		return dentry;
-	return ERR_PTR(-EINVAL);
+	inode_unlock(dir);
+	dput(parent);
+	kfree_const(new_name);
+	return error;
 }
-EXPORT_SYMBOL_GPL(debugfs_rename);
+EXPORT_SYMBOL_GPL(debugfs_change_name);
 
 /**
  * debugfs_initialized - Tells whether debugfs has been registered
diff -ruw linux-6.13.12/fs/exec.c linux-6.13.12-fbx/fs/exec.c
--- linux-6.13.12/fs/exec.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/exec.c	2025-09-25 17:40:36.555372125 +0200
@@ -77,6 +77,7 @@
 #include "internal.h"
 
 #include <trace/events/sched.h>
+#include <linux/prctl-private.h>
 
 static int bprm_creds_from_file(struct linux_binprm *bprm);
 
@@ -1851,6 +1852,22 @@
 	if (retval)
 		return retval;
 
+#ifdef CONFIG_PSEUDO_ASLR
+	/*
+	 * apply paslr policy
+	 */
+	bprm->aslr_used = 0;
+	if (randomize_va_space == 3) {
+		switch (current->paslr_exec_policy) {
+		case PR_PASLR_POLICY_UID:
+		case PR_PASLR_POLICY_PRESEED:
+			bprm->aslr_used = 2;
+		case PR_PASLR_POLICY_DISABLE:
+			break;
+		}
+	}
+#endif
+
 	/*
 	 * Check for unsafe execution states before exec_binprm(), which
 	 * will call back into begin_new_exec(), into bprm_creds_from_file(),
@@ -1867,6 +1884,41 @@
 	if (retval)
 		goto out;
 
+#ifdef CONFIG_PSEUDO_ASLR
+	/*
+	 * apply paslr policy
+	 */
+	if (bprm->aslr_used == 2) {
+		switch (current->paslr_exec_policy) {
+		case PR_PASLR_POLICY_UID:
+		{
+			unsigned char uid_seed[SCHED_PASLR_SEED_SIZE];
+
+			/* seed from uid */
+			memset(uid_seed, 0, sizeof (uid_seed));
+			memcpy(uid_seed, &current->real_cred->suid,
+			       sizeof (current->real_cred->suid));
+			paslr_task_init(current,
+					current->paslr_exec_policy,
+					uid_seed);
+			current->paslr_used = true;
+			break;
+		}
+		case PR_PASLR_POLICY_DISABLE:
+			/* no paslr */
+			current->paslr_used = false;
+			break;
+		case PR_PASLR_POLICY_PRESEED:
+			/* use pre-seed */
+			paslr_task_init(current,
+					current->paslr_exec_policy,
+					current->paslr_exec_preseed);
+			current->paslr_used = true;
+			break;
+		}
+	}
+#endif
+
 	retval = exec_binprm(bprm);
 	if (retval < 0)
 		goto out;
@@ -1908,6 +1960,23 @@
 		return PTR_ERR(filename);
 
 	/*
+	 * handle current->exec_mode:
+	 * - if unlimited, then nothing to do.
+	 * - if once, then set it to denied and continue (next execve
+	 *   after this one will fail).
+	 * - if denied, then effectively fail the execve call with EPERM.
+	 */
+	switch (current->exec_mode) {
+	case EXEC_MODE_UNLIMITED:
+		break;
+	case EXEC_MODE_ONCE:
+		current->exec_mode = EXEC_MODE_DENIED;
+		break;
+	case EXEC_MODE_DENIED:
+		return -EPERM;
+	}
+
+	/*
 	 * We move the actual failure in case of RLIMIT_NPROC excess from
 	 * set*uid() to execve() because too many poorly written programs
 	 * don't check setuid() return code.  Here we additionally recheck
diff -ruw linux-6.13.12/fs/proc/array.c linux-6.13.12-fbx/fs/proc/array.c
--- linux-6.13.12/fs/proc/array.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/proc/array.c	2025-09-25 17:40:36.743373058 +0200
@@ -145,6 +145,21 @@
 	return task_state_array[task_state_index(tsk)];
 }
 
+static const char *const task_exec_mode_array[] = {
+	"0 (Denied)",
+	"1 (Once)",
+	"2 (Unlimited)",
+};
+
+static inline const char *get_task_exec_mode(struct task_struct *tsk)
+{
+	unsigned int exec_mode = tsk->exec_mode;
+
+	if (exec_mode > EXEC_MODE_UNLIMITED)
+		return "? (Invalid)";
+	return task_exec_mode_array[exec_mode];
+}
+
 static inline void task_state(struct seq_file *m, struct pid_namespace *ns,
 				struct pid *pid, struct task_struct *p)
 {
@@ -195,6 +210,10 @@
 	seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->sgid));
 	seq_put_decimal_ull(m, "\t", from_kgid_munged(user_ns, cred->fsgid));
 	seq_put_decimal_ull(m, "\nFDSize:\t", max_fds);
+#ifdef CONFIG_PSEUDO_ASLR
+	seq_put_decimal_ull(m, "\nPaslr:\t", p->paslr_used);
+	seq_put_decimal_ull(m, "\nPaslrPolicy:\t", p->paslr_exec_policy);
+#endif
 
 	seq_puts(m, "\nGroups:\t");
 	group_info = cred->group_info;
@@ -403,6 +422,12 @@
 	seq_putc(m, '\n');
 }
 
+static inline void task_exec_mode(struct seq_file *m,
+				  struct task_struct *p)
+{
+	seq_printf(m, "Exec mode: %s\n", get_task_exec_mode(p));
+}
+
 static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
 {
 	seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -461,6 +486,7 @@
 	cpuset_task_status_allowed(m, task);
 	task_context_switch_counts(m, task);
 	arch_proc_pid_thread_features(m, task);
+	task_exec_mode(m, task);
 	return 0;
 }
 
diff -ruw linux-6.13.12/fs/proc/base.c linux-6.13.12-fbx/fs/proc/base.c
--- linux-6.13.12/fs/proc/base.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/proc/base.c	2025-09-25 17:40:36.743373058 +0200
@@ -3269,6 +3269,7 @@
 				struct pid *pid, struct task_struct *task)
 {
 	struct mm_struct *mm;
+	int ret = 0;
 
 	mm = get_task_mm(task);
 	if (mm) {
@@ -3276,6 +3277,16 @@
 		seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
 		seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
 		seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
+		seq_printf(m, "ksm_merge_any: %s\n",
+				test_bit(MMF_VM_MERGE_ANY, &mm->flags) ? "yes" : "no");
+		ret = mmap_read_lock_killable(mm);
+		if (ret) {
+			mmput(mm);
+			return ret;
+		}
+		seq_printf(m, "ksm_mergeable: %s\n",
+				ksm_process_mergeable(mm) ? "yes" : "no");
+		mmap_read_unlock(mm);
 		mmput(mm);
 	}
 
diff -ruw linux-6.13.12/fs/pstore/inode.c linux-6.13.12-fbx/fs/pstore/inode.c
--- linux-6.13.12/fs/pstore/inode.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/pstore/inode.c	2025-09-25 17:40:36.747373078 +0200
@@ -24,6 +24,7 @@
 #include <linux/slab.h>
 #include <linux/uaccess.h>
 #include <linux/cleanup.h>
+#include <linux/xattr.h>
 
 #include "internal.h"
 
@@ -202,6 +203,54 @@
 	return simple_unlink(dir, dentry);
 }
 
+/*
+ *
+ */
+static ssize_t
+pstore_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
+{
+	struct pstore_private *p = d_inode(dentry)->i_private;
+	struct pstore_record *record = p->record;
+
+	printk("pstore_xattr_list called\n");
+	if (!record->uts_release[0]) {
+		printk("zero len\n");
+		return 0;
+	}
+
+	return strscpy(buffer, "user.uts_release", buffer_size);
+}
+
+/*
+ *
+ */
+static int
+pstore_xattr_get_user_uts_release(const struct xattr_handler *hdl,
+				  struct dentry *dentry, struct inode *inode,
+				  const char *name, void *buffer, size_t size)
+{
+	struct pstore_private *p = d_inode(dentry)->i_private;
+	struct pstore_record *record = p->record;
+	size_t len = strlen(record->uts_release) + 1;
+
+	if (buffer) {
+		if (size < len)
+			return -ERANGE;
+		memcpy(buffer, record->uts_release, len);
+	}
+	return len;
+}
+
+static const struct xattr_handler pstore_xattr_user_uts_release_handler = {
+	.name = "user.uts_release",
+	.get = pstore_xattr_get_user_uts_release,
+};
+
+static const struct xattr_handler *pstore_xattr_handlers[] = {
+	&pstore_xattr_user_uts_release_handler,
+	NULL
+};
+
 static void pstore_evict_inode(struct inode *inode)
 {
 	struct pstore_private	*p = inode->i_private;
@@ -210,6 +259,10 @@
 	free_pstore_private(p);
 }
 
+const struct inode_operations pstore_file_inode_operations = {
+	.listxattr	= pstore_xattr_list,
+};
+
 static const struct inode_operations pstore_dir_inode_operations = {
 	.lookup		= simple_lookup,
 	.unlink		= pstore_unlink,
@@ -359,9 +412,10 @@
 		return -ENOMEM;
 	inode->i_mode = S_IFREG | 0444;
 	inode->i_fop = &pstore_file_operations;
-	scnprintf(name, sizeof(name), "%s-%s-%llu%s",
+	scnprintf(name, sizeof(name), "%s-%s-%s%llu%s",
 			pstore_type_to_name(record->type),
-			record->psi->name, record->id,
+		        record->psi->name, record->old ? "old-" : "",
+		        record->id,
 			record->compressed ? ".enc.z" : "");
 
 	private = kzalloc(sizeof(*private), GFP_KERNEL);
@@ -374,6 +428,7 @@
 
 	private->dentry = dentry;
 	private->record = record;
+	inode->i_op = &pstore_file_inode_operations;
 	inode->i_size = private->total_size = size;
 	inode->i_private = private;
 
@@ -416,6 +471,7 @@
 	sb->s_magic		= PSTOREFS_MAGIC;
 	sb->s_op		= &pstore_ops;
 	sb->s_time_gran		= 1;
+	sb->s_xattr		= pstore_xattr_handlers;
 
 	parse_options(data);
 
diff -ruw linux-6.13.12/fs/pstore/ram.c linux-6.13.12-fbx/fs/pstore/ram.c
--- linux-6.13.12/fs/pstore/ram.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/pstore/ram.c	2025-09-25 17:40:36.747373078 +0200
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/mm.h>
+#include <generated/utsrelease.h>
 
 #include "internal.h"
 #include "ram_internal.h"
@@ -142,17 +143,31 @@
 
 	record->type = prz->type;
 	record->id = id;
+	record->old = prz->old_zone;
 
 	return prz;
 }
 
 static int ramoops_read_kmsg_hdr(char *buffer, struct timespec64 *time,
-				  bool *compressed)
+				 bool *compressed,
+				 char *rbuf, size_t rbuf_size)
 {
 	char data_type;
 	int header_length = 0;
+	char release[131];
 
-	if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
+	release[0] = 0;
+	if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c-VER%130s%n",
+		   (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
+		   release, &header_length) == 4) {
+		time->tv_nsec *= 1000;
+		if (data_type == 'C')
+			*compressed = true;
+		else
+			*compressed = false;
+		strim(release);
+
+	} else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lld.%lu-%c\n%n",
 		   (time64_t *)&time->tv_sec, &time->tv_nsec, &data_type,
 		   &header_length) == 3) {
 		time->tv_nsec *= 1000;
@@ -170,6 +185,8 @@
 		time->tv_nsec = 0;
 		*compressed = false;
 	}
+
+	strscpy(rbuf, release, rbuf_size);
 	return header_length;
 }
 
@@ -204,7 +221,9 @@
 			continue;
 		header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz),
 						      &record->time,
-						      &record->compressed);
+						      &record->compressed,
+						      record->uts_release,
+						      sizeof (record->uts_release));
 		/* Clear and skip this DMESG record if it has no valid header */
 		if (!header_length) {
 			persistent_ram_free_old(prz);
@@ -297,14 +316,15 @@
 static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
 				     struct pstore_record *record)
 {
-	char hdr[36]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c\n"(3) */
+	char hdr[165]; /* "===="(4), %lld(20), "."(1), %06lu(6), "-%c"(2), "-VER%.127s\n"(4 + 127 + 1) */
 	size_t len;
 
 	len = scnprintf(hdr, sizeof(hdr),
-		RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c\n",
+		RAMOOPS_KERNMSG_HDR "%lld.%06lu-%c-VER%s\n",
 		(time64_t)record->time.tv_sec,
 		record->time.tv_nsec / 1000,
-		record->compressed ? 'C' : 'D');
+		record->compressed ? 'C' : 'D',
+		UTS_RELEASE);
 	persistent_ram_write(prz, hdr, len);
 
 	return len;
@@ -486,7 +506,7 @@
 static int ramoops_init_przs(const char *name,
 			     struct device *dev, struct ramoops_context *cxt,
 			     struct persistent_ram_zone ***przs,
-			     phys_addr_t *paddr, size_t mem_sz,
+			     phys_addr_t *paddr, void *vaddr, size_t mem_sz,
 			     ssize_t record_size,
 			     unsigned int *cnt, u32 sig, u32 flags)
 {
@@ -551,7 +571,7 @@
 		else
 			label = kasprintf(GFP_KERNEL, "ramoops:%s(%d/%d)",
 					  name, i, *cnt - 1);
-		prz_ar[i] = persistent_ram_new(*paddr, zone_sz, sig,
+		prz_ar[i] = persistent_ram_new(*paddr, vaddr, zone_sz, sig,
 					       &cxt->ecc_info,
 					       cxt->memtype, flags, label);
 		kfree(label);
@@ -584,7 +604,7 @@
 static int ramoops_init_prz(const char *name,
 			    struct device *dev, struct ramoops_context *cxt,
 			    struct persistent_ram_zone **prz,
-			    phys_addr_t *paddr, size_t sz, u32 sig)
+			    phys_addr_t *paddr, void *vaddr, size_t sz, u32 sig)
 {
 	char *label;
 
@@ -599,7 +619,7 @@
 	}
 
 	label = kasprintf(GFP_KERNEL, "ramoops:%s", name);
-	*prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
+	*prz = persistent_ram_new(*paddr, vaddr, sz, sig, &cxt->ecc_info,
 				  cxt->memtype, PRZ_FLAG_ZAP_OLD, label);
 	kfree(label);
 	if (IS_ERR(*prz)) {
@@ -787,17 +807,20 @@
 	dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
 			- cxt->pmsg_size;
 	err = ramoops_init_przs("dmesg", dev, cxt, &cxt->dprzs, &paddr,
+				pdata->mem_ptr,
 				dump_mem_sz, cxt->record_size,
 				&cxt->max_dump_cnt, 0, 0);
 	if (err)
 		goto fail_init;
 
 	err = ramoops_init_prz("console", dev, cxt, &cxt->cprz, &paddr,
+			       pdata->mem_ptr,
 			       cxt->console_size, 0);
 	if (err)
 		goto fail_init;
 
 	err = ramoops_init_prz("pmsg", dev, cxt, &cxt->mprz, &paddr,
+			       pdata->mem_ptr,
 				cxt->pmsg_size, 0);
 	if (err)
 		goto fail_init;
@@ -806,6 +829,7 @@
 				? nr_cpu_ids
 				: 1;
 	err = ramoops_init_przs("ftrace", dev, cxt, &cxt->fprzs, &paddr,
+				pdata->mem_ptr,
 				cxt->ftrace_size, -1,
 				&cxt->max_ftrace_cnt, LINUX_VERSION_CODE,
 				(cxt->flags & RAMOOPS_FLAG_FTRACE_PER_CPU)
diff -ruw linux-6.13.12/fs/pstore/ram_core.c linux-6.13.12-fbx/fs/pstore/ram_core.c
--- linux-6.13.12/fs/pstore/ram_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/pstore/ram_core.c	2025-09-25 17:40:36.747373078 +0200
@@ -34,6 +34,7 @@
 	uint32_t    sig;
 	atomic_t    start;
 	atomic_t    size;
+	atomic_t    flags;
 	uint8_t     data[];
 };
 
@@ -393,6 +394,7 @@
 {
 	atomic_set(&prz->buffer->start, 0);
 	atomic_set(&prz->buffer->size, 0);
+	atomic_set(&prz->buffer->flags, 0);
 	persistent_ram_update_header_ecc(prz);
 }
 
@@ -479,13 +481,16 @@
 	return va;
 }
 
-static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+static int persistent_ram_buffer_map(phys_addr_t start, void *vaddr,
+				     phys_addr_t size,
 		struct persistent_ram_zone *prz, int memtype)
 {
 	prz->paddr = start;
 	prz->size = size;
 
-	if (pfn_valid(start >> PAGE_SHIFT))
+	if (vaddr)
+		prz->vaddr = vaddr;
+	else if (pfn_valid(start >> PAGE_SHIFT))
 		prz->vaddr = persistent_ram_vmap(start, size, memtype);
 	else
 		prz->vaddr = persistent_ram_iomap(start, size, memtype,
@@ -532,6 +537,15 @@
 			pr_debug("found existing buffer, size %zu, start %zu\n",
 				 buffer_size(prz), buffer_start(prz));
 			persistent_ram_save_old(prz);
+
+			if (atomic_read(&prz->buffer->flags) > 0) {
+				pr_info("old ramoops!\n");
+				prz->old_zone = true;
+			} else {
+				pr_info("fresh ramoops!\n");
+				atomic_set(&prz->buffer->flags, 1);
+			}
+			persistent_ram_update_header_ecc(prz);
 		}
 	} else {
 		pr_debug("no valid data in buffer (sig = 0x%08x)\n",
@@ -581,7 +595,8 @@
 	*_prz = NULL;
 }
 
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
+					       void *vaddr, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			unsigned int memtype, u32 flags, char *label)
 {
@@ -601,7 +616,7 @@
 	if (!prz->label)
 		goto err;
 
-	ret = persistent_ram_buffer_map(start, size, prz, memtype);
+	ret = persistent_ram_buffer_map(start, vaddr, size, prz, memtype);
 	if (ret)
 		goto err;
 
diff -ruw linux-6.13.12/fs/pstore/ram_internal.h linux-6.13.12-fbx/fs/pstore/ram_internal.h
--- linux-6.13.12/fs/pstore/ram_internal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/pstore/ram_internal.h	2025-09-25 17:40:36.751373097 +0200
@@ -55,6 +55,10 @@
  * @old_log_size:
  *	bytes contained in @old_log
  *
+ * @old_zone:
+ *      tells whether the zone has just been freshly created, and has
+ *      been read for the first time, this boot, or if it is old, and
+ *      has been created many boots ago.
  */
 struct persistent_ram_zone {
 	phys_addr_t paddr;
@@ -77,9 +81,12 @@
 
 	char *old_log;
 	size_t old_log_size;
+
+	bool old_zone;
 };
 
-struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start,
+					       void *addr, size_t size,
 			u32 sig, struct persistent_ram_ecc_info *ecc_info,
 			unsigned int memtype, u32 flags, char *label);
 void persistent_ram_free(struct persistent_ram_zone **_prz);
diff -ruw linux-6.13.12/fs/smb/server/Kconfig linux-6.13.12-fbx/fs/smb/server/Kconfig
--- linux-6.13.12/fs/smb/server/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/Kconfig	2025-09-25 17:40:36.775373216 +0200
@@ -57,6 +57,16 @@
 	  SMB Direct allows transferring SMB packets over RDMA. If unsure,
 	  say N.
 
+config SMB_INSECURE_SERVER
+	bool "Support for insecure SMB1/CIFS and SMB2.0 protocols"
+	depends on SMB_SERVER
+	select CRYPTO_MD4
+	default n
+
+	help
+	  This enables deprecated insecure protocols dialects: SMB1/CIFS
+	  and SMB2.0
+
 endif
 
 config SMB_SERVER_CHECK_CAP_NET_ADMIN
diff -ruw linux-6.13.12/fs/smb/server/Makefile linux-6.13.12-fbx/fs/smb/server/Makefile
--- linux-6.13.12/fs/smb/server/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/Makefile	2025-09-25 17:40:36.775373216 +0200
@@ -18,3 +18,4 @@
 $(obj)/ksmbd_spnego_negtokentarg.asn1.o: $(obj)/ksmbd_spnego_negtokentarg.asn1.c $(obj)/ksmbd_spnego_negtokentarg.asn1.h
 
 ksmbd-$(CONFIG_SMB_SERVER_SMBDIRECT) += transport_rdma.o
+ksmbd-$(CONFIG_SMB_INSECURE_SERVER) += smb1pdu.o smb1ops.o smb1misc.o
diff -ruw linux-6.13.12/fs/smb/server/auth.c linux-6.13.12-fbx/fs/smb/server/auth.c
--- linux-6.13.12/fs/smb/server/auth.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/auth.c	2025-09-25 17:40:36.775373216 +0200
@@ -68,6 +68,126 @@
 {
 	memcpy(buf, NEGOTIATE_GSS_HEADER, AUTH_GSS_LENGTH);
 }
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static void
+str_to_key(unsigned char *str, unsigned char *key)
+{
+	int i;
+
+	key[0] = str[0] >> 1;
+	key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2);
+	key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3);
+	key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4);
+	key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5);
+	key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6);
+	key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7);
+	key[7] = str[6] & 0x7F;
+	for (i = 0; i < 8; i++)
+		key[i] = (key[i] << 1);
+}
+
+static int
+smbhash(unsigned char *out, const unsigned char *in, unsigned char *key)
+{
+	unsigned char key2[8];
+	struct des_ctx ctx;
+
+	if (fips_enabled) {
+		ksmbd_debug(AUTH, "FIPS compliance enabled: DES not permitted\n");
+		return -ENOENT;
+	}
+
+	str_to_key(key, key2);
+	des_expand_key(&ctx, key2, DES_KEY_SIZE);
+	des_encrypt(&ctx, out, in);
+	memzero_explicit(&ctx, sizeof(ctx));
+	return 0;
+}
+
+static int ksmbd_enc_p24(unsigned char *p21, const unsigned char *c8, unsigned char *p24)
+{
+	int rc;
+
+	rc = smbhash(p24, c8, p21);
+	if (rc)
+		return rc;
+	rc = smbhash(p24 + 8, c8, p21 + 7);
+	if (rc)
+		return rc;
+	return smbhash(p24 + 16, c8, p21 + 14);
+}
+
+/* produce a md4 message digest from data of length n bytes */
+static int ksmbd_enc_md4(unsigned char *md4_hash, unsigned char *link_str,
+			 int link_len)
+{
+	int rc;
+	struct ksmbd_crypto_ctx *ctx;
+
+	ctx = ksmbd_crypto_ctx_find_md4();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "Crypto md4 allocation error\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD4(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not init md4 shash\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD4(ctx), link_str, link_len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with link_str\n");
+		goto out;
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD4(ctx), md4_hash);
+	if (rc)
+		ksmbd_debug(AUTH, "Could not generate md4 hash\n");
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+
+static int ksmbd_enc_update_sess_key(unsigned char *md5_hash, char *nonce,
+				     char *server_challenge, int len)
+{
+	int rc;
+	struct ksmbd_crypto_ctx *ctx;
+
+	ctx = ksmbd_crypto_ctx_find_md5();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "Crypto md5 allocation error\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD5(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not init md5 shash\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), server_challenge, len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with challenge\n");
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), nonce, len);
+	if (rc) {
+		ksmbd_debug(AUTH, "Could not update with nonce\n");
+		goto out;
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD5(ctx), md5_hash);
+	if (rc)
+		ksmbd_debug(AUTH, "Could not generate md5 hash\n");
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+#endif
 
 /**
  * ksmbd_gen_sess_key() - function to generate session key
@@ -207,6 +327,45 @@
 }
 
 /**
+ * ksmbd_auth_ntlm() - NTLM authentication handler
+ * @sess:	session of connection
+ * @pw_buf:	NTLM challenge response
+ * @passkey:	user password
+ *
+ * Return:	0 on success, error number on error
+ */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf, char *cryptkey)
+{
+	int rc;
+	unsigned char p21[21];
+	char key[CIFS_AUTH_RESP_SIZE];
+
+	memset(p21, '\0', 21);
+	memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+	rc = ksmbd_enc_p24(p21, cryptkey, key);
+	if (rc) {
+		pr_err("password processing failed\n");
+		return rc;
+	}
+
+	ksmbd_enc_md4(sess->sess_key, user_passkey(sess->user),
+		      CIFS_SMB1_SESSKEY_SIZE);
+	memcpy(sess->sess_key + CIFS_SMB1_SESSKEY_SIZE, key,
+	       CIFS_AUTH_RESP_SIZE);
+	sess->sequence_number = 1;
+
+	if (strncmp(pw_buf, key, CIFS_AUTH_RESP_SIZE) != 0) {
+		ksmbd_debug(AUTH, "ntlmv1 authentication failed\n");
+		return -EINVAL;
+	}
+
+	ksmbd_debug(AUTH, "ntlmv1 authentication pass\n");
+	return 0;
+}
+#endif
+
+/**
  * ksmbd_auth_ntlmv2() - NTLMv2 authentication handler
  * @conn:		connection
  * @sess:		session of connection
@@ -292,6 +451,46 @@
 	return rc;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * __ksmbd_auth_ntlmv2() - NTLM2(extended security) authentication handler
+ * @sess:	session of connection
+ * @client_nonce:	client nonce from LM response.
+ * @ntlm_resp:		ntlm response data from client.
+ *
+ * Return:	0 on success, error number on error
+ */
+static int __ksmbd_auth_ntlmv2(struct ksmbd_session *sess,
+			       char *client_nonce,
+			       char *ntlm_resp,
+			       char *cryptkey)
+{
+	char sess_key[CIFS_SMB1_SESSKEY_SIZE] = {0};
+	int rc;
+	unsigned char p21[21];
+	char key[CIFS_AUTH_RESP_SIZE];
+
+	rc = ksmbd_enc_update_sess_key(sess_key, client_nonce, cryptkey, 8);
+	if (rc) {
+		pr_err("password processing failed\n");
+		goto out;
+	}
+
+	memset(p21, '\0', 21);
+	memcpy(p21, user_passkey(sess->user), CIFS_NTHASH_SIZE);
+	rc = ksmbd_enc_p24(p21, sess_key, key);
+	if (rc) {
+		pr_err("password processing failed\n");
+		goto out;
+	}
+
+	if (memcmp(ntlm_resp, key, CIFS_AUTH_RESP_SIZE) != 0)
+		rc = -EINVAL;
+out:
+	return rc;
+}
+#endif
+
 /**
  * ksmbd_decode_ntlmssp_auth_blob() - helper function to construct
  * authenticate blob
@@ -309,6 +508,10 @@
 	char *domain_name;
 	unsigned int nt_off, dn_off;
 	unsigned short nt_len, dn_len;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	unsigned int lm_off;
+	unsigned short lm_len;
+#endif
 	int ret;
 
 	if (blob_len < sizeof(struct authenticate_message)) {
@@ -332,6 +535,26 @@
 	    nt_len < CIFS_ENCPWD_SIZE)
 		return -EINVAL;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	lm_off = le32_to_cpu(authblob->LmChallengeResponse.BufferOffset);
+	lm_len = le16_to_cpu(authblob->LmChallengeResponse.Length);
+	if (blob_len < (u64)lm_off + lm_len)
+		return -EINVAL;
+
+	/* process NTLM authentication */
+	if (nt_len == CIFS_AUTH_RESP_SIZE) {
+		if (le32_to_cpu(authblob->NegotiateFlags) &
+		    NTLMSSP_NEGOTIATE_EXTENDED_SEC)
+			return __ksmbd_auth_ntlmv2(sess,
+						   (char *)authblob + lm_off,
+						   (char *)authblob + nt_off,
+						   conn->ntlmssp.cryptkey);
+		else
+			return ksmbd_auth_ntlm(sess, (char *)authblob +
+				nt_off, conn->ntlmssp.cryptkey);
+	}
+#endif
+
 	/* TODO : use domain name that imported from configuration file */
 	domain_name = smb_strndup_from_utf16((const char *)authblob + dn_off,
 					     dn_len, true, conn->local_nls);
@@ -550,7 +773,19 @@
 		retval = -ENOMEM;
 		goto out;
 	}
+
+	if (!sess->user) {
+		/* First successful authentication */
 	sess->user = user;
+	} else {
+		if (!ksmbd_compare_user(sess->user, user)) {
+			ksmbd_debug(AUTH, "different user tried to reuse session\n");
+			retval = -EPERM;
+			ksmbd_free_user(user);
+			goto out;
+		}
+		ksmbd_free_user(user);
+	}
 
 	memcpy(sess->sess_key, resp->payload, resp->session_key_len);
 	memcpy(out_blob, resp->payload + resp->session_key_len,
@@ -569,6 +804,60 @@
 }
 #endif
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * ksmbd_sign_smb1_pdu() - function to generate SMB1 packet signing
+ * @sess:	session of connection
+ * @iov:        buffer iov array
+ * @n_vec:	number of iovecs
+ * @sig:        signature value generated for client request packet
+ *
+ */
+int ksmbd_sign_smb1_pdu(struct ksmbd_session *sess, struct kvec *iov, int n_vec,
+			char *sig)
+{
+	struct ksmbd_crypto_ctx *ctx;
+	int rc, i;
+
+	ctx = ksmbd_crypto_ctx_find_md5();
+	if (!ctx) {
+		ksmbd_debug(AUTH, "could not crypto alloc md5\n");
+		return -ENOMEM;
+	}
+
+	rc = crypto_shash_init(CRYPTO_MD5(ctx));
+	if (rc) {
+		ksmbd_debug(AUTH, "md5 init error %d\n", rc);
+		goto out;
+	}
+
+	rc = crypto_shash_update(CRYPTO_MD5(ctx), sess->sess_key, 40);
+	if (rc) {
+		ksmbd_debug(AUTH, "md5 update error %d\n", rc);
+		goto out;
+	}
+
+	for (i = 0; i < n_vec; i++) {
+		rc = crypto_shash_update(CRYPTO_MD5(ctx),
+					 iov[i].iov_base,
+					 iov[i].iov_len);
+		if (rc) {
+			ksmbd_debug(AUTH, "md5 update error %d\n", rc);
+			goto out;
+		}
+	}
+
+	rc = crypto_shash_final(CRYPTO_MD5(ctx), sig);
+	if (rc)
+		ksmbd_debug(AUTH, "md5 generation error %d\n", rc);
+
+out:
+	ksmbd_release_crypto_ctx(ctx);
+	return rc;
+}
+#endif
+
+
 /**
  * ksmbd_sign_smb2_pdu() - function to generate packet signing
  * @conn:	connection
diff -ruw linux-6.13.12/fs/smb/server/auth.h linux-6.13.12-fbx/fs/smb/server/auth.h
--- linux-6.13.12/fs/smb/server/auth.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/auth.h	2025-09-25 17:40:36.775373216 +0200
@@ -52,6 +52,11 @@
 				   struct ksmbd_conn *conn);
 int ksmbd_krb5_authenticate(struct ksmbd_session *sess, char *in_blob,
 			    int in_len,	char *out_blob, int *out_len);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_auth_ntlm(struct ksmbd_session *sess, char *pw_buf, char *cryptkey);
+int ksmbd_sign_smb1_pdu(struct ksmbd_session *sess, struct kvec *iov, int n_vec,
+			char *sig);
+#endif
 int ksmbd_sign_smb2_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
 			int n_vec, char *sig);
 int ksmbd_sign_smb3_pdu(struct ksmbd_conn *conn, char *key, struct kvec *iov,
diff -ruw linux-6.13.12/fs/smb/server/connection.c linux-6.13.12-fbx/fs/smb/server/connection.c
--- linux-6.13.12/fs/smb/server/connection.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/connection.c	2025-09-25 17:40:36.775373216 +0200
@@ -10,6 +10,9 @@
 
 #include "server.h"
 #include "smb_common.h"
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
 #include "mgmt/ksmbd_ida.h"
 #include "connection.h"
 #include "transport_tcp.h"
@@ -115,9 +118,20 @@
 {
 	struct ksmbd_conn *conn = work->conn;
 	struct list_head *requests_queue = NULL;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	struct smb2_hdr *hdr = work->request_buf;
 
+	if (hdr->ProtocolId == SMB2_PROTO_NUMBER) {
 	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
 		requests_queue = &conn->requests;
+	} else {
+		if (conn->ops->get_cmd_val(work) != SMB_COM_NT_CANCEL)
+			requests_queue = &conn->requests;
+	}
+#else
+	if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE)
+		requests_queue = &conn->requests;
+#endif
 
 	atomic_inc(&conn->req_running);
 	if (requests_queue) {
@@ -220,6 +234,12 @@
 	if (work->send_no_response)
 		return 0;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (!work->iov_idx)
+		ksmbd_iov_pin_rsp(work, (char *)work->response_buf + 4,
+				  work->response_offset);
+#endif
+
 	if (!work->iov_idx)
 		return -EINVAL;
 
@@ -295,7 +315,11 @@
 	return true;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define SMB1_MIN_SUPPORTED_HEADER_SIZE SMB_HEADER_SIZE
+#else
 #define SMB1_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb_hdr))
+#endif
 #define SMB2_MIN_SUPPORTED_HEADER_SIZE (sizeof(struct smb2_hdr) + 4)
 
 /**
@@ -351,8 +375,9 @@
 			max_allowed_pdu_size = SMB3_MAX_MSGSIZE;
 
 		if (pdu_size > max_allowed_pdu_size) {
-			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) on connection(%d)\n",
+			pr_err_ratelimited("PDU length(%u) exceeded maximum allowed pdu size(%u) from %pISc (status=%d\n",
 					pdu_size, max_allowed_pdu_size,
+					KSMBD_TCP_PEER_SOCKADDR(conn),
 					READ_ONCE(conn->status));
 			break;
 		}
diff -ruw linux-6.13.12/fs/smb/server/crypto_ctx.c linux-6.13.12-fbx/fs/smb/server/crypto_ctx.c
--- linux-6.13.12/fs/smb/server/crypto_ctx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/crypto_ctx.c	2025-09-25 17:40:36.775373216 +0200
@@ -81,6 +81,14 @@
 	case CRYPTO_SHASH_SHA512:
 		tfm = crypto_alloc_shash("sha512", 0, 0);
 		break;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case CRYPTO_SHASH_MD4:
+		tfm = crypto_alloc_shash("md4", 0, 0);
+		break;
+	case CRYPTO_SHASH_MD5:
+		tfm = crypto_alloc_shash("md5", 0, 0);
+		break;
+#endif
 	default:
 		return NULL;
 	}
@@ -207,6 +215,17 @@
 {
 	return ____crypto_shash_ctx_find(CRYPTO_SHASH_SHA512);
 }
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void)
+{
+	return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD4);
+}
+
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void)
+{
+	return ____crypto_shash_ctx_find(CRYPTO_SHASH_MD5);
+}
+#endif
 
 static struct ksmbd_crypto_ctx *____crypto_aead_ctx_find(int id)
 {
diff -ruw linux-6.13.12/fs/smb/server/crypto_ctx.h linux-6.13.12-fbx/fs/smb/server/crypto_ctx.h
--- linux-6.13.12/fs/smb/server/crypto_ctx.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/crypto_ctx.h	2025-09-25 17:40:36.775373216 +0200
@@ -15,6 +15,10 @@
 	CRYPTO_SHASH_CMACAES,
 	CRYPTO_SHASH_SHA256,
 	CRYPTO_SHASH_SHA512,
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	CRYPTO_SHASH_MD4,
+	CRYPTO_SHASH_MD5,
+#endif
 	CRYPTO_SHASH_MAX,
 };
 
@@ -41,6 +45,10 @@
 #define CRYPTO_CMACAES(c)	((c)->desc[CRYPTO_SHASH_CMACAES])
 #define CRYPTO_SHA256(c)	((c)->desc[CRYPTO_SHASH_SHA256])
 #define CRYPTO_SHA512(c)	((c)->desc[CRYPTO_SHASH_SHA512])
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CRYPTO_MD4(c)		((c)->desc[CRYPTO_SHASH_MD4])
+#define CRYPTO_MD5(c)		((c)->desc[CRYPTO_SHASH_MD5])
+#endif
 
 #define CRYPTO_HMACMD5_TFM(c)	((c)->desc[CRYPTO_SHASH_HMACMD5]->tfm)
 #define CRYPTO_HMACSHA256_TFM(c)\
@@ -48,6 +56,10 @@
 #define CRYPTO_CMACAES_TFM(c)	((c)->desc[CRYPTO_SHASH_CMACAES]->tfm)
 #define CRYPTO_SHA256_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA256]->tfm)
 #define CRYPTO_SHA512_TFM(c)	((c)->desc[CRYPTO_SHASH_SHA512]->tfm)
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CRYPTO_MD4_TFM(c)	((c)->desc[CRYPTO_SHASH_MD4]->tfm)
+#define CRYPTO_MD5_TFM(c)	((c)->desc[CRYPTO_SHASH_MD5]->tfm)
+#endif
 
 #define CRYPTO_GCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_GCM])
 #define CRYPTO_CCM(c)		((c)->ccmaes[CRYPTO_AEAD_AES_CCM])
@@ -58,6 +70,10 @@
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_cmacaes(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha512(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_sha256(void);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md4(void);
+struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_md5(void);
+#endif
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_gcm(void);
 struct ksmbd_crypto_ctx *ksmbd_crypto_ctx_find_ccm(void);
 void ksmbd_crypto_destroy(void);
diff -ruw linux-6.13.12/fs/smb/server/ksmbd_work.h linux-6.13.12-fbx/fs/smb/server/ksmbd_work.h
--- linux-6.13.12/fs/smb/server/ksmbd_work.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/ksmbd_work.h	2025-09-25 17:40:36.779373236 +0200
@@ -35,6 +35,9 @@
 	void                            *request_buf;
 	/* Response buffer */
 	void                            *response_buf;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	size_t				response_offset;
+#endif
 
 	struct list_head		aux_read_list;
 
diff -ruw linux-6.13.12/fs/smb/server/mgmt/ksmbd_ida.c linux-6.13.12-fbx/fs/smb/server/mgmt/ksmbd_ida.c
--- linux-6.13.12/fs/smb/server/mgmt/ksmbd_ida.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/mgmt/ksmbd_ida.c	2025-09-25 17:40:36.779373236 +0200
@@ -6,6 +6,18 @@
 #include "ksmbd_ida.h"
 #include "../glob.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_tid(struct ida *ida)
+{
+	return ida_alloc_range(ida, 1, 0xFFFE, KSMBD_DEFAULT_GFP);
+}
+
+int ksmbd_acquire_smb1_uid(struct ida *ida)
+{
+	return ida_alloc_range(ida, 1, 0xFFFD, KSMBD_DEFAULT_GFP);
+}
+#endif
+
 int ksmbd_acquire_smb2_tid(struct ida *ida)
 {
 	return ida_alloc_range(ida, 1, 0xFFFFFFFE, KSMBD_DEFAULT_GFP);
diff -ruw linux-6.13.12/fs/smb/server/mgmt/ksmbd_ida.h linux-6.13.12-fbx/fs/smb/server/mgmt/ksmbd_ida.h
--- linux-6.13.12/fs/smb/server/mgmt/ksmbd_ida.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/mgmt/ksmbd_ida.h	2025-09-25 17:40:36.779373236 +0200
@@ -16,6 +16,9 @@
  *    The value 0xFFFF is used to specify all TIDs or no TID,
  *    depending upon the context in which it is used.
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_tid(struct ida *ida);
+#endif
 int ksmbd_acquire_smb2_tid(struct ida *ida);
 
 /*
@@ -25,6 +28,9 @@
  *    valid UID.<21> All other possible values for a UID, excluding
  *    zero (0x0000), are valid.
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_acquire_smb1_uid(struct ida *ida);
+#endif
 int ksmbd_acquire_smb2_uid(struct ida *ida);
 int ksmbd_acquire_async_msg_id(struct ida *ida);
 
diff -ruw linux-6.13.12/fs/smb/server/mgmt/user_session.c linux-6.13.12-fbx/fs/smb/server/mgmt/user_session.c
--- linux-6.13.12/fs/smb/server/mgmt/user_session.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/mgmt/user_session.c	2025-09-25 17:40:36.779373236 +0200
@@ -59,10 +59,12 @@
 	struct ksmbd_session_rpc *entry;
 	long index;
 
+	down_write(&sess->rpc_lock);
 	xa_for_each(&sess->rpc_handle_list, index, entry) {
 		xa_erase(&sess->rpc_handle_list, index);
 		__session_rpc_close(sess, entry);
 	}
+	up_write(&sess->rpc_lock);
 
 	xa_destroy(&sess->rpc_handle_list);
 }
@@ -92,7 +94,7 @@
 {
 	struct ksmbd_session_rpc *entry, *old;
 	struct ksmbd_rpc_command *resp;
-	int method;
+	int method, id;
 
 	method = __rpc_method(rpc_name);
 	if (!method)
@@ -102,26 +104,29 @@
 	if (!entry)
 		return -ENOMEM;
 
+	down_read(&sess->rpc_lock);
 	entry->method = method;
-	entry->id = ksmbd_ipc_id_alloc();
-	if (entry->id < 0)
+	entry->id = id = ksmbd_ipc_id_alloc();
+	if (id < 0)
 		goto free_entry;
-	old = xa_store(&sess->rpc_handle_list, entry->id, entry, KSMBD_DEFAULT_GFP);
+	old = xa_store(&sess->rpc_handle_list, id, entry, KSMBD_DEFAULT_GFP);
 	if (xa_is_err(old))
 		goto free_id;
 
-	resp = ksmbd_rpc_open(sess, entry->id);
+	resp = ksmbd_rpc_open(sess, id);
 	if (!resp)
 		goto erase_xa;
 
+	up_read(&sess->rpc_lock);
 	kvfree(resp);
-	return entry->id;
+	return id;
 erase_xa:
 	xa_erase(&sess->rpc_handle_list, entry->id);
 free_id:
 	ksmbd_rpc_id_free(entry->id);
 free_entry:
 	kfree(entry);
+	up_read(&sess->rpc_lock);
 	return -EINVAL;
 }
 
@@ -129,9 +134,11 @@
 {
 	struct ksmbd_session_rpc *entry;
 
+	down_write(&sess->rpc_lock);
 	entry = xa_erase(&sess->rpc_handle_list, id);
 	if (entry)
 		__session_rpc_close(sess, entry);
+	up_write(&sess->rpc_lock);
 }
 
 int ksmbd_session_rpc_method(struct ksmbd_session *sess, int id)
@@ -186,7 +193,12 @@
 		     time_after(jiffies,
 			       sess->last_active + SMB2_SESSION_TIMEOUT))) {
 			xa_erase(&conn->sessions, sess->id);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
+				hash_del(&sess->hlist);
+#else
 			hash_del(&sess->hlist);
+#endif
 			ksmbd_session_destroy(sess);
 			continue;
 		}
@@ -229,7 +241,12 @@
 		hash_for_each_safe(sessions_table, bkt, tmp, sess, hlist) {
 			if (!ksmbd_chann_del(conn, sess) &&
 			    xa_empty(&sess->ksmbd_chann_list)) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
+				hash_del(&sess->hlist);
+#else
 				hash_del(&sess->hlist);
+#endif
 				down_write(&conn->session_lock);
 				xa_erase(&conn->sessions, sess->id);
 				up_write(&conn->session_lock);
@@ -252,7 +269,12 @@
 		ksmbd_chann_del(conn, sess);
 		if (xa_empty(&sess->ksmbd_chann_list)) {
 			xa_erase(&conn->sessions, sess->id);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+			if (hash_hashed(&sess->hlist))
 			hash_del(&sess->hlist);
+#else
+			hash_del(&sess->hlist);
+#endif
 			if (atomic_dec_and_test(&sess->refcnt))
 				ksmbd_session_destroy(sess);
 		}
@@ -405,6 +427,18 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static int __init_smb1_session(struct ksmbd_session *sess)
+{
+	int id = ksmbd_acquire_smb1_uid(&session_ida);
+
+	if (id < 0)
+		return -EINVAL;
+	sess->id = id;
+	return 0;
+}
+#endif
+
 static int __init_smb2_session(struct ksmbd_session *sess)
 {
 	int id = ksmbd_acquire_smb2_uid(&session_ida);
@@ -420,8 +454,10 @@
 	struct ksmbd_session *sess;
 	int ret;
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 	if (protocol != CIFDS_SESSION_FLAG_SMB2)
 		return NULL;
+#endif
 
 	sess = kzalloc(sizeof(struct ksmbd_session), KSMBD_DEFAULT_GFP);
 	if (!sess)
@@ -439,8 +475,22 @@
 	sess->sequence_number = 1;
 	rwlock_init(&sess->tree_conns_lock);
 	atomic_set(&sess->refcnt, 2);
+	init_rwsem(&sess->rpc_lock);
 
+	switch (protocol) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case CIFDS_SESSION_FLAG_SMB1:
+		ret = __init_smb1_session(sess);
+		break;
+#endif
+	case CIFDS_SESSION_FLAG_SMB2:
 	ret = __init_smb2_session(sess);
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
 	if (ret)
 		goto error;
 
@@ -457,6 +507,13 @@
 	return NULL;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_session *ksmbd_smb1_session_create(void)
+{
+	return __session_create(CIFDS_SESSION_FLAG_SMB1);
+}
+#endif
+
 struct ksmbd_session *ksmbd_smb2_session_create(void)
 {
 	return __session_create(CIFDS_SESSION_FLAG_SMB2);
@@ -466,6 +523,10 @@
 {
 	int id = -EINVAL;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB1))
+		id = ksmbd_acquire_smb1_tid(&sess->tree_conn_ida);
+#endif
 	if (test_session_flag(sess, CIFDS_SESSION_FLAG_SMB2))
 		id = ksmbd_acquire_smb2_tid(&sess->tree_conn_ida);
 
diff -ruw linux-6.13.12/fs/smb/server/mgmt/user_session.h linux-6.13.12-fbx/fs/smb/server/mgmt/user_session.h
--- linux-6.13.12/fs/smb/server/mgmt/user_session.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/mgmt/user_session.h	2025-09-25 17:40:36.779373236 +0200
@@ -12,6 +12,9 @@
 #include "../smb_common.h"
 #include "../ntlmssp.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define CIFDS_SESSION_FLAG_SMB1		BIT(0)
+#endif
 #define CIFDS_SESSION_FLAG_SMB2		BIT(1)
 
 #define PREAUTH_HASHVALUE_SIZE		64
@@ -63,6 +66,7 @@
 	rwlock_t			tree_conns_lock;
 
 	atomic_t			refcnt;
+	struct rw_semaphore		rpc_lock;
 };
 
 static inline int test_session_flag(struct ksmbd_session *sess, int bit)
@@ -80,6 +84,9 @@
 	sess->flags &= ~bit;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_session *ksmbd_smb1_session_create(void);
+#endif
 struct ksmbd_session *ksmbd_smb2_session_create(void);
 
 void ksmbd_session_destroy(struct ksmbd_session *sess);
diff -ruw linux-6.13.12/fs/smb/server/oplock.c linux-6.13.12-fbx/fs/smb/server/oplock.c
--- linux-6.13.12/fs/smb/server/oplock.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/oplock.c	2025-09-25 17:40:36.779373236 +0200
@@ -16,6 +16,10 @@
 #include "mgmt/share_config.h"
 #include "mgmt/tree_connect.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
+
 static LIST_HEAD(lease_table_list);
 static DEFINE_RWLOCK(lease_list_lock);
 
@@ -45,6 +49,9 @@
 	opinfo->pending_break = 0;
 	opinfo->fid = id;
 	opinfo->Tid = Tid;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	opinfo->is_smb2 = IS_SMB2(conn);
+#endif
 	INIT_LIST_HEAD(&opinfo->op_entry);
 	init_waitqueue_head(&opinfo->oplock_q);
 	init_waitqueue_head(&opinfo->oplock_brk);
@@ -129,14 +136,6 @@
 	kfree(opinfo);
 }
 
-static inline void opinfo_free_rcu(struct rcu_head *rcu_head)
-{
-	struct oplock_info *opinfo;
-
-	opinfo = container_of(rcu_head, struct oplock_info, rcu_head);
-	free_opinfo(opinfo);
-}
-
 struct oplock_info *opinfo_get(struct ksmbd_file *fp)
 {
 	struct oplock_info *opinfo;
@@ -154,11 +153,8 @@
 {
 	struct oplock_info *opinfo;
 
-	if (list_empty(&ci->m_op_list))
-		return NULL;
-
-	rcu_read_lock();
-	opinfo = list_first_or_null_rcu(&ci->m_op_list, struct oplock_info,
+	down_read(&ci->m_lock);
+	opinfo = list_first_entry_or_null(&ci->m_op_list, struct oplock_info,
 					op_entry);
 	if (opinfo) {
 		if (opinfo->conn == NULL ||
@@ -171,8 +167,7 @@
 			}
 		}
 	}
-
-	rcu_read_unlock();
+	up_read(&ci->m_lock);
 
 	return opinfo;
 }
@@ -185,7 +180,7 @@
 	if (!atomic_dec_and_test(&opinfo->refcount))
 		return;
 
-	call_rcu(&opinfo->rcu_head, opinfo_free_rcu);
+	free_opinfo(opinfo);
 }
 
 static void opinfo_add(struct oplock_info *opinfo)
@@ -193,7 +188,7 @@
 	struct ksmbd_inode *ci = opinfo->o_fp->f_ci;
 
 	down_write(&ci->m_lock);
-	list_add_rcu(&opinfo->op_entry, &ci->m_op_list);
+	list_add(&opinfo->op_entry, &ci->m_op_list);
 	up_write(&ci->m_lock);
 }
 
@@ -207,7 +202,7 @@
 		write_unlock(&lease_list_lock);
 	}
 	down_write(&ci->m_lock);
-	list_del_rcu(&opinfo->op_entry);
+	list_del(&opinfo->op_entry);
 	up_write(&ci->m_lock);
 }
 
@@ -245,6 +240,8 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
 	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
 	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
@@ -256,6 +253,27 @@
 
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+	} else {
+		if (!(opinfo->level == OPLOCK_EXCLUSIVE ||
+		      opinfo->level == OPLOCK_BATCH)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_READ;
+	}
+#else
+	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+		pr_err("bad oplock(0x%x)\n", opinfo->level);
+		if (opinfo->is_lease)
+			pr_err("lease state(0x%x)\n", lease->state);
+		return -EINVAL;
+	}
+	opinfo->level = SMB2_OPLOCK_LEVEL_II;
+
+	if (opinfo->is_lease)
+		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -284,6 +302,27 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
+		if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
+		      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			if (opinfo->is_lease)
+				pr_err("lease state(0x%x)\n", lease->state);
+			return -EINVAL;
+		}
+		opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+		if (opinfo->is_lease)
+			lease->state = lease->new_state;
+	} else {
+		if (!(opinfo->level == OPLOCK_EXCLUSIVE ||
+		      opinfo->level == OPLOCK_BATCH)) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_NONE;
+	}
+#else
 	if (!(opinfo->level == SMB2_OPLOCK_LEVEL_BATCH ||
 	      opinfo->level == SMB2_OPLOCK_LEVEL_EXCLUSIVE)) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
@@ -294,6 +333,7 @@
 	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -307,6 +347,8 @@
 {
 	struct lease *lease = opinfo->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo->is_smb2) {
 	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
 		pr_err("bad oplock(0x%x)\n", opinfo->level);
 		if (opinfo->is_lease)
@@ -316,6 +358,24 @@
 	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
 	if (opinfo->is_lease)
 		lease->state = lease->new_state;
+	} else {
+		if (opinfo->level != OPLOCK_READ) {
+			pr_err("bad oplock(0x%x)\n", opinfo->level);
+			return -EINVAL;
+		}
+		opinfo->level = OPLOCK_NONE;
+	}
+#else
+	if (opinfo->level != SMB2_OPLOCK_LEVEL_II) {
+		pr_err("bad oplock(0x%x)\n", opinfo->level);
+		if (opinfo->is_lease)
+			pr_err("lease state(0x%x)\n", lease->state);
+		return -EINVAL;
+	}
+	opinfo->level = SMB2_OPLOCK_LEVEL_NONE;
+	if (opinfo->is_lease)
+		lease->state = lease->new_state;
+#endif
 	return 0;
 }
 
@@ -419,10 +479,24 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2) {
 	if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
 		opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
 	else
 		opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+	} else {
+		if (req_oplock == REQ_BATCHOPLOCK)
+			opinfo_new->level = OPLOCK_BATCH;
+		else
+			opinfo_new->level = OPLOCK_EXCLUSIVE;
+	}
+#else
+	if (req_oplock == SMB2_OPLOCK_LEVEL_BATCH)
+		opinfo_new->level = SMB2_OPLOCK_LEVEL_BATCH;
+	else
+		opinfo_new->level = SMB2_OPLOCK_LEVEL_EXCLUSIVE;
+#endif
 
 	if (lctx) {
 		lease->state = lctx->req_state;
@@ -442,7 +516,14 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2)
 	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+	else
+		opinfo_new->level = OPLOCK_READ;
+#else
+	opinfo_new->level = SMB2_OPLOCK_LEVEL_II;
+#endif
 
 	if (lctx) {
 		lease->state = SMB2_LEASE_READ_CACHING_LE;
@@ -464,7 +545,14 @@
 {
 	struct lease *lease = opinfo_new->o_lease;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (opinfo_new->is_smb2)
 	opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+	else
+		opinfo_new->level = OPLOCK_NONE;
+#else
+	opinfo_new->level = SMB2_OPLOCK_LEVEL_NONE;
+#endif
 
 	if (lctx) {
 		lease->state = 0;
@@ -620,6 +708,119 @@
 	return 0;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static inline int allocate_oplock_break_buf(struct ksmbd_work *work)
+{
+	work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, KSMBD_DEFAULT_GFP);
+	if (!work->response_buf)
+		return -ENOMEM;
+	work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+	return 0;
+}
+
+/**
+ * smb1_oplock_break_noti() - send smb1 oplock break cmd from conn
+ * to client
+ * @work:     smb work object
+ *
+ * There are two ways this function can be called. 1- while file open we break
+ * from exclusive/batch lock to levelII oplock and 2- while file write/truncate
+ * we break from levelII oplock no oplock.
+ * work->request_buf contains oplock_info.
+ */
+static void __smb1_oplock_break_noti(struct work_struct *wk)
+{
+	struct ksmbd_work *work = container_of(wk, struct ksmbd_work, work);
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *rsp_hdr;
+	struct smb_com_lock_req *req;
+	struct oplock_info *opinfo = work->request_buf;
+
+	if (allocate_oplock_break_buf(work)) {
+		pr_err("smb_allocate_rsp_buf failed! ");
+		ksmbd_free_work_struct(work);
+		return;
+	}
+
+	/* Init response header */
+	rsp_hdr = work->response_buf;
+	/* wct is 8 for locking andx(18) */
+	memset(rsp_hdr, 0, sizeof(struct smb_hdr) + 18);
+	rsp_hdr->smb_buf_length =
+		cpu_to_be32(conn->vals->header_size - 4 + 18);
+	rsp_hdr->Protocol[0] = 0xFF;
+	rsp_hdr->Protocol[1] = 'S';
+	rsp_hdr->Protocol[2] = 'M';
+	rsp_hdr->Protocol[3] = 'B';
+
+	rsp_hdr->Command = SMB_COM_LOCKING_ANDX;
+	/* we know unicode, long file name and use nt error codes */
+	rsp_hdr->Flags2 = SMBFLG2_UNICODE | SMBFLG2_KNOWS_LONG_NAMES |
+		SMBFLG2_ERR_STATUS;
+	rsp_hdr->Uid = cpu_to_le16(work->sess->id);
+	rsp_hdr->Pid = cpu_to_le16(0xFFFF);
+	rsp_hdr->Mid = cpu_to_le16(0xFFFF);
+	rsp_hdr->Tid = cpu_to_le16(opinfo->Tid);
+	rsp_hdr->WordCount = 8;
+
+	/* Init locking request */
+	req = work->response_buf;
+
+	req->AndXCommand = 0xFF;
+	req->AndXReserved = 0;
+	req->AndXOffset = 0;
+	req->Fid = opinfo->fid;
+	req->LockType = LOCKING_ANDX_OPLOCK_RELEASE;
+	if (!opinfo->open_trunc &&
+	    (opinfo->level == OPLOCK_BATCH ||
+	     opinfo->level == OPLOCK_EXCLUSIVE))
+		req->OplockLevel = 1;
+	else
+		req->OplockLevel = 0;
+	req->Timeout = 0;
+	req->NumberOfUnlocks = 0;
+	req->ByteCount = 0;
+	ksmbd_debug(OPLOCK, "sending oplock break for fid %d lock level = %d\n",
+		    req->Fid, req->OplockLevel);
+
+	ksmbd_conn_write(work);
+	ksmbd_free_work_struct(work);
+}
+
+/**
+ * smb1_oplock_break() - send smb1 exclusive/batch to level2 oplock
+ *		break command from server to client
+ * @opinfo:		oplock info object
+ * @ack_required	if requiring ack
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int smb1_oplock_break_noti(struct oplock_info *opinfo)
+{
+	struct ksmbd_conn *conn = opinfo->conn;
+	struct ksmbd_work *work = ksmbd_alloc_work_struct();
+
+	if (!work)
+		return -ENOMEM;
+
+	work->request_buf = (char *)opinfo;
+	work->conn = conn;
+
+	ksmbd_conn_r_count_inc(conn);
+	if (opinfo->op_state == OPLOCK_ACK_WAIT) {
+		INIT_WORK(&work->work, __smb1_oplock_break_noti);
+		ksmbd_queue_work(work);
+
+		wait_for_break_ack(opinfo);
+	} else {
+		__smb1_oplock_break_noti(&work->work);
+		if (opinfo->level == OPLOCK_READ)
+			opinfo->level = OPLOCK_NONE;
+	}
+	return 0;
+}
+#endif
+
 /**
  * __smb2_oplock_break_noti() - send smb2 oplock break cmd from conn
  * to client
@@ -929,10 +1130,20 @@
 			brk_opinfo->op_state = OPLOCK_ACK_WAIT;
 	}
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (brk_opinfo->is_smb2)
 	if (brk_opinfo->is_lease)
 		err = smb2_lease_break_noti(brk_opinfo);
 	else
 		err = smb2_oplock_break_noti(brk_opinfo);
+	else
+		err = smb1_oplock_break_noti(brk_opinfo);
+#else
+	if (brk_opinfo->is_lease)
+		err = smb2_lease_break_noti(brk_opinfo);
+	else
+		err = smb2_oplock_break_noti(brk_opinfo);
+#endif
 
 	ksmbd_debug(OPLOCK, "oplock granted = %d\n", brk_opinfo->level);
 	if (brk_opinfo->op_state == OPLOCK_CLOSING)
@@ -1076,6 +1287,10 @@
 			     struct lease_ctx_info *lctx)
 {
 	switch (level) {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case REQ_OPLOCK:
+	case REQ_BATCHOPLOCK:
+#endif
 	case SMB2_OPLOCK_LEVEL_BATCH:
 	case SMB2_OPLOCK_LEVEL_EXCLUSIVE:
 		grant_write_oplock(opinfo, level, lctx);
@@ -1347,8 +1562,8 @@
 	ci = fp->f_ci;
 	op = opinfo_get(fp);
 
-	rcu_read_lock();
-	list_for_each_entry_rcu(brk_op, &ci->m_op_list, op_entry) {
+	down_read(&ci->m_lock);
+	list_for_each_entry(brk_op, &ci->m_op_list, op_entry) {
 		if (brk_op->conn == NULL)
 			continue;
 
@@ -1358,7 +1573,35 @@
 		if (ksmbd_conn_releasing(brk_op->conn))
 			continue;
 
-		rcu_read_unlock();
+#ifdef CONFIG_SMB_INSECURE_SERVER
+		if (brk_op->is_smb2) {
+			if (brk_op->is_lease && (brk_op->o_lease->state &
+					(~(SMB2_LEASE_READ_CACHING_LE |
+					   SMB2_LEASE_HANDLE_CACHING_LE)))) {
+				ksmbd_debug(OPLOCK,
+					    "unexpected lease state(0x%x)\n",
+					    brk_op->o_lease->state);
+				goto next;
+			} else if (brk_op->level !=
+					SMB2_OPLOCK_LEVEL_II) {
+				ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+					    brk_op->level);
+				goto next;
+			}
+
+			/* Skip oplock being break to none */
+			if (brk_op->is_lease &&
+			    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
+			    atomic_read(&brk_op->breaking_cnt))
+				goto next;
+		} else {
+			if (brk_op->level != OPLOCK_READ) {
+				ksmbd_debug(OPLOCK, "unexpected oplock(0x%x)\n",
+					    brk_op->level);
+				goto next;
+			}
+		}
+#else
 		if (brk_op->is_lease && (brk_op->o_lease->state &
 		    (~(SMB2_LEASE_READ_CACHING_LE |
 				SMB2_LEASE_HANDLE_CACHING_LE)))) {
@@ -1377,6 +1620,7 @@
 		    brk_op->o_lease->new_state == SMB2_LEASE_NONE_LE &&
 		    atomic_read(&brk_op->breaking_cnt))
 			goto next;
+#endif
 
 		if (op && op->is_lease && brk_op->is_lease &&
 		    !memcmp(conn->ClientGUID, brk_op->conn->ClientGUID,
@@ -1388,9 +1632,8 @@
 		oplock_break(brk_op, SMB2_OPLOCK_LEVEL_NONE, NULL);
 next:
 		opinfo_put(brk_op);
-		rcu_read_lock();
 	}
-	rcu_read_unlock();
+	up_read(&ci->m_lock);
 
 	if (op)
 		opinfo_put(op);
@@ -1507,7 +1750,7 @@
 
 		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
 		    sizeof(struct create_lease_v2) - 4)
-			return NULL;
+			goto err_out;
 
 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
 		lreq->req_state = lc->lcontext.LeaseState;
@@ -1523,7 +1766,7 @@
 
 		if (le16_to_cpu(cc->DataOffset) + le32_to_cpu(cc->DataLength) <
 		    sizeof(struct create_lease))
-			return NULL;
+			goto err_out;
 
 		memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
 		lreq->req_state = lc->lcontext.LeaseState;
@@ -1532,6 +1775,9 @@
 		lreq->version = 1;
 	}
 	return lreq;
+err_out:
+	kfree(lreq);
+	return NULL;
 }
 
 /**
diff -ruw linux-6.13.12/fs/smb/server/oplock.h linux-6.13.12-fbx/fs/smb/server/oplock.h
--- linux-6.13.12/fs/smb/server/oplock.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/oplock.h	2025-09-25 17:40:36.779373236 +0200
@@ -11,6 +11,14 @@
 
 #define OPLOCK_WAIT_TIME	(35 * HZ)
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/* SMB Oplock levels */
+#define OPLOCK_NONE      0
+#define OPLOCK_EXCLUSIVE 1
+#define OPLOCK_BATCH     2
+#define OPLOCK_READ      3  /* level 2 oplock */
+#endif
+
 /* Oplock states */
 #define OPLOCK_STATE_NONE	0x00
 #define OPLOCK_ACK_WAIT		0x01
@@ -65,13 +73,15 @@
 	atomic_t		refcount;
 	__u16                   Tid;
 	bool			is_lease;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	bool			is_smb2;
+#endif
 	bool			open_trunc;	/* truncate on open */
 	struct lease		*o_lease;
 	struct list_head        op_entry;
 	struct list_head        lease_entry;
 	wait_queue_head_t oplock_q; /* Other server threads */
 	wait_queue_head_t oplock_brk; /* oplock breaking wait */
-	struct rcu_head		rcu_head;
 };
 
 struct lease_break_info {
diff -ruw linux-6.13.12/fs/smb/server/smb2ops.c linux-6.13.12-fbx/fs/smb/server/smb2ops.c
--- linux-6.13.12/fs/smb/server/smb2ops.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb2ops.c	2025-09-25 17:40:36.783373256 +0200
@@ -12,6 +12,34 @@
 #include "smb_common.h"
 #include "server.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+static struct smb_version_values smb20_server_values = {
+	.version_string = SMB20_VERSION_STRING,
+	.protocol_id = SMB20_PROT_ID,
+	.capabilities = 0,
+	.max_read_size = CIFS_DEFAULT_IOSIZE,
+	.max_write_size = CIFS_DEFAULT_IOSIZE,
+	.max_trans_size = CIFS_DEFAULT_IOSIZE,
+	.max_credits = SMB2_MAX_CREDITS,
+	.large_lock_type = 0,
+	.exclusive_lock_type = SMB2_LOCKFLAG_EXCLUSIVE,
+	.shared_lock_type = SMB2_LOCKFLAG_SHARED,
+	.unlock_lock_type = SMB2_LOCKFLAG_UNLOCK,
+	.header_size = sizeof(struct smb2_hdr),
+	.max_header_size = MAX_SMB2_HDR_SIZE,
+	.read_rsp_size = sizeof(struct smb2_read_rsp) - 1,
+	.lock_cmd = SMB2_LOCK,
+	.cap_unix = 0,
+	.cap_nt_find = SMB2_NT_FIND,
+	.cap_large_files = SMB2_LARGE_FILES,
+	.create_lease_size = sizeof(struct create_lease),
+	.create_durable_size = sizeof(struct create_durable_rsp),
+	.create_mxac_size = sizeof(struct create_mxac_rsp),
+	.create_disk_id_size = sizeof(struct create_disk_id_rsp),
+	.create_posix_size = sizeof(struct create_posix_rsp),
+};
+#endif
+
 static struct smb_version_values smb21_server_values = {
 	.version_string = SMB21_VERSION_STRING,
 	.protocol_id = SMB21_PROT_ID,
@@ -190,6 +218,22 @@
 	[SMB2_CHANGE_NOTIFY_HE]	=	{ .proc = smb2_notify},
 };
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * init_smb2_0_server() - initialize a smb server connection with smb2.0
+ *			command dispatcher
+ * @conn:	connection instance
+ */
+void init_smb2_0_server(struct ksmbd_conn *conn)
+{
+	conn->vals = &smb20_server_values;
+	conn->ops = &smb2_0_server_ops;
+	conn->cmds = smb2_0_server_cmds;
+	conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
+	conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256;
+}
+#endif
+
 /**
  * init_smb2_1_server() - initialize a smb server connection with smb2.1
  *			command dispatcher
diff -ruw linux-6.13.12/fs/smb/server/smb2pdu.c linux-6.13.12-fbx/fs/smb/server/smb2pdu.c
--- linux-6.13.12/fs/smb/server/smb2pdu.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb2pdu.c	2025-09-25 17:40:36.787373276 +0200
@@ -497,6 +497,10 @@
 {
 	struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
 	struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
+	size_t rcv_sz = get_rfc1002_len(work->request_buf);
+
+	if (rcv_sz < sizeof(struct smb2_hdr))
+		return 1;
 
 	memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
 	rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
@@ -632,6 +636,11 @@
 		return name;
 	}
 
+	if (*name == '\0') {
+		kfree(name);
+		return ERR_PTR(-EINVAL);
+	}
+
 	if (*name == '\\') {
 		pr_err("not allow directory name included leading slash\n");
 		kfree(name);
@@ -1194,6 +1203,11 @@
 	case SMB21_PROT_ID:
 		init_smb2_1_server(conn);
 		break;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	case SMB20_PROT_ID:
+		init_smb2_0_server(conn);
+		break;
+#endif
 	case SMB2X_PROT_ID:
 	case BAD_PROT_ID:
 	default:
@@ -1444,7 +1458,7 @@
 {
 	struct ksmbd_conn *conn = work->conn;
 	struct ksmbd_session *sess = work->sess;
-	struct channel *chann = NULL;
+	struct channel *chann = NULL, *old;
 	struct ksmbd_user *user;
 	u64 prev_id;
 	int sz, rc;
@@ -1556,7 +1570,12 @@
 				return -ENOMEM;
 
 			chann->conn = conn;
-			xa_store(&sess->ksmbd_chann_list, (long)conn, chann, KSMBD_DEFAULT_GFP);
+			old = xa_store(&sess->ksmbd_chann_list, (long)conn, chann,
+					KSMBD_DEFAULT_GFP);
+			if (xa_is_err(old)) {
+				kfree(chann);
+				return xa_err(old);
+			}
 		}
 	}
 
@@ -1596,20 +1615,18 @@
 	out_len = work->response_sz -
 		(le16_to_cpu(rsp->SecurityBufferOffset) + 4);
 
-	/* Check previous session */
-	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
-	if (prev_sess_id && prev_sess_id != sess->id)
-		destroy_previous_session(conn, sess->user, prev_sess_id);
-
-	if (sess->state == SMB2_SESSION_VALID)
-		ksmbd_free_user(sess->user);
-
 	retval = ksmbd_krb5_authenticate(sess, in_blob, in_len,
 					 out_blob, &out_len);
 	if (retval) {
 		ksmbd_debug(SMB, "krb5 authentication failed\n");
 		return -EINVAL;
 	}
+
+	/* Check previous session */
+	prev_sess_id = le64_to_cpu(req->PreviousSessionId);
+	if (prev_sess_id && prev_sess_id != sess->id)
+		destroy_previous_session(conn, sess->user, prev_sess_id);
+
 	rsp->SecurityBufferLength = cpu_to_le16(out_len);
 
 	if ((conn->sign || server_conf.enforced_signing) ||
@@ -1824,8 +1841,6 @@
 				ksmbd_conn_set_good(conn);
 				sess->state = SMB2_SESSION_VALID;
 			}
-			kfree(sess->Preauth_HashValue);
-			sess->Preauth_HashValue = NULL;
 		} else if (conn->preferred_auth_mech == KSMBD_AUTH_NTLMSSP) {
 			if (negblob->MessageType == NtLmNegotiate) {
 				rc = ntlm_negotiate(work, negblob, negblob_len, rsp);
@@ -1852,8 +1867,6 @@
 						kfree(preauth_sess);
 					}
 				}
-				kfree(sess->Preauth_HashValue);
-				sess->Preauth_HashValue = NULL;
 			} else {
 				pr_info_ratelimited("Unknown NTLMSSP message type : 0x%x\n",
 						le32_to_cpu(negblob->MessageType));
@@ -8511,11 +8524,6 @@
 		goto err_out;
 	}
 
-	opinfo->op_state = OPLOCK_STATE_NONE;
-	wake_up_interruptible_all(&opinfo->oplock_q);
-	opinfo_put(opinfo);
-	ksmbd_fd_put(work, fp);
-
 	rsp->StructureSize = cpu_to_le16(24);
 	rsp->OplockLevel = rsp_oplevel;
 	rsp->Reserved = 0;
@@ -8523,16 +8531,15 @@
 	rsp->VolatileFid = volatile_id;
 	rsp->PersistentFid = persistent_id;
 	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_oplock_break));
-	if (!ret)
-		return;
-
+	if (ret) {
 err_out:
+		smb2_set_err_rsp(work);
+	}
+
 	opinfo->op_state = OPLOCK_STATE_NONE;
 	wake_up_interruptible_all(&opinfo->oplock_q);
-
 	opinfo_put(opinfo);
 	ksmbd_fd_put(work, fp);
-	smb2_set_err_rsp(work);
 }
 
 static int check_lease_state(struct lease *lease, __le32 req_state)
@@ -8662,11 +8669,6 @@
 	}
 
 	lease_state = lease->state;
-	opinfo->op_state = OPLOCK_STATE_NONE;
-	wake_up_interruptible_all(&opinfo->oplock_q);
-	atomic_dec(&opinfo->breaking_cnt);
-	wake_up_interruptible_all(&opinfo->oplock_brk);
-	opinfo_put(opinfo);
 
 	rsp->StructureSize = cpu_to_le16(36);
 	rsp->Reserved = 0;
@@ -8675,16 +8677,16 @@
 	rsp->LeaseState = lease_state;
 	rsp->LeaseDuration = 0;
 	ret = ksmbd_iov_pin_rsp(work, rsp, sizeof(struct smb2_lease_ack));
-	if (!ret)
-		return;
-
+	if (ret) {
 err_out:
+		smb2_set_err_rsp(work);
+	}
+
+	opinfo->op_state = OPLOCK_STATE_NONE;
 	wake_up_interruptible_all(&opinfo->oplock_q);
 	atomic_dec(&opinfo->breaking_cnt);
 	wake_up_interruptible_all(&opinfo->oplock_brk);
-
 	opinfo_put(opinfo);
-	smb2_set_err_rsp(work);
 }
 
 /**
diff -ruw linux-6.13.12/fs/smb/server/smb2pdu.h linux-6.13.12-fbx/fs/smb/server/smb2pdu.h
--- linux-6.13.12/fs/smb/server/smb2pdu.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb2pdu.h	2025-09-25 17:40:36.787373276 +0200
@@ -424,6 +424,9 @@
 } __packed;
 
 /* functions */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+void init_smb2_0_server(struct ksmbd_conn *conn);
+#endif
 void init_smb2_1_server(struct ksmbd_conn *conn);
 void init_smb3_0_server(struct ksmbd_conn *conn);
 void init_smb3_02_server(struct ksmbd_conn *conn);
diff -ruw linux-6.13.12/fs/smb/server/smb_common.c linux-6.13.12-fbx/fs/smb/server/smb_common.c
--- linux-6.13.12/fs/smb/server/smb_common.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb_common.c	2025-09-25 17:40:36.787373276 +0200
@@ -17,6 +17,10 @@
 #include "mgmt/tree_connect.h"
 #include "mgmt/share_config.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#include "smb1pdu.h"
+#endif
+
 /*for shortname implementation */
 static const char *basechars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ_-!@#$%";
 #define MANGLE_BASE (strlen(basechars) - 1)
@@ -32,6 +36,20 @@
 };
 
 static struct smb_protocol smb1_protos[] = {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	{
+		SMB1_PROT,
+		"\2NT LM 0.12",
+		"NT1",
+		SMB10_PROT_ID
+	},
+	{
+		SMB2_PROT,
+		"\2SMB 2.002",
+		"SMB2_02",
+		SMB20_PROT_ID
+	},
+#endif
 	{
 		SMB21_PROT,
 		"\2SMB 2.1",
@@ -90,7 +108,11 @@
 
 inline int ksmbd_min_protocol(void)
 {
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	return SMB1_PROT;
+#else
 	return SMB21_PROT;
+#endif
 }
 
 inline int ksmbd_max_protocol(void)
@@ -135,6 +157,16 @@
 int ksmbd_verify_smb_message(struct ksmbd_work *work)
 {
 	struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
+
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER) {
+		ksmbd_debug(SMB, "got SMB2 command\n");
+		return ksmbd_smb2_check_message(work);
+	}
+
+	work->conn->outstanding_credits++;
+	return ksmbd_smb1_check_message(work);
+#else
 	struct smb_hdr *hdr;
 
 	if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
@@ -148,6 +180,7 @@
 	}
 
 	return -EINVAL;
+#endif
 }
 
 /**
@@ -299,6 +332,7 @@
 	return BAD_PROT_ID;
 }
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 #define SMB_COM_NEGOTIATE_EX	0x0
 
 /**
@@ -409,6 +443,7 @@
 	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
 	return 0;
 }
+#endif
 
 int ksmbd_init_smb_server(struct ksmbd_conn *conn)
 {
@@ -570,6 +605,7 @@
 		conn->dialect <= SMB311_PROT_ID);
 }
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 static int smb_handle_negotiate(struct ksmbd_work *work)
 {
 	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
@@ -586,6 +622,7 @@
 	neg_rsp->ByteCount = 0;
 	return 0;
 }
+#endif
 
 int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
 {
diff -ruw linux-6.13.12/fs/smb/server/smb_common.h linux-6.13.12-fbx/fs/smb/server/smb_common.h
--- linux-6.13.12/fs/smb/server/smb_common.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb_common.h	2025-09-25 17:40:36.787373276 +0200
@@ -49,6 +49,42 @@
 /*
  * File Attribute flags
  */
+#ifdef CONFIG_SMB_INSECURE_SERVER
+#define ATTR_READONLY			0x0001
+#define ATTR_HIDDEN			0x0002
+#define ATTR_SYSTEM			0x0004
+#define ATTR_VOLUME			0x0008
+#define ATTR_DIRECTORY			0x0010
+#define ATTR_ARCHIVE			0x0020
+#define ATTR_DEVICE			0x0040
+#define ATTR_NORMAL			0x0080
+#define ATTR_TEMPORARY			0x0100
+#define ATTR_SPARSE			0x0200
+#define ATTR_REPARSE			0x0400
+#define ATTR_COMPRESSED			0x0800
+#define ATTR_OFFLINE			0x1000
+#define ATTR_NOT_CONTENT_INDEXED	0x2000
+#define ATTR_ENCRYPTED			0x4000
+
+#define ATTR_READONLY_LE		cpu_to_le32(ATTR_READONLY)
+#define ATTR_HIDDEN_LE			cpu_to_le32(ATTR_HIDDEN)
+#define ATTR_SYSTEM_LE			cpu_to_le32(ATTR_SYSTEM)
+#define ATTR_DIRECTORY_LE		cpu_to_le32(ATTR_DIRECTORY)
+#define ATTR_ARCHIVE_LE			cpu_to_le32(ATTR_ARCHIVE)
+#define ATTR_NORMAL_LE			cpu_to_le32(ATTR_NORMAL)
+#define ATTR_TEMPORARY_LE		cpu_to_le32(ATTR_TEMPORARY)
+#define ATTR_SPARSE_FILE_LE		cpu_to_le32(ATTR_SPARSE)
+#define ATTR_REPARSE_POINT_LE		cpu_to_le32(ATTR_REPARSE)
+#define ATTR_COMPRESSED_LE		cpu_to_le32(ATTR_COMPRESSED)
+#define ATTR_OFFLINE_LE			cpu_to_le32(ATTR_OFFLINE)
+#define ATTR_NOT_CONTENT_INDEXED_LE	cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
+#define ATTR_ENCRYPTED_LE		cpu_to_le32(ATTR_ENCRYPTED)
+#define ATTR_INTEGRITY_STREAML_LE	cpu_to_le32(0x00008000)
+#define ATTR_NO_SCRUB_DATA_LE		cpu_to_le32(0x00020000)
+#define ATTR_MASK_LE			cpu_to_le32(0x00007FB7)
+
+#define IS_SMB2(x)			((x)->vals->protocol_id != SMB10_PROT_ID)
+#endif
 #define ATTR_POSIX_SEMANTICS		0x01000000
 #define ATTR_BACKUP_SEMANTICS		0x02000000
 #define ATTR_DELETE_ON_CLOSE		0x04000000
@@ -203,11 +239,13 @@
 	unsigned char DialectsArray[];
 } __packed;
 
+#ifndef CONFIG_SMB_INSECURE_SERVER
 struct smb_negotiate_rsp {
 	struct smb_hdr hdr;     /* wct = 17 */
 	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
 	__le16 ByteCount;
 } __packed;
+#endif
 
 struct filesystem_attribute_info {
 	__le32 Attributes;
@@ -236,6 +274,14 @@
 	__le32 BytesPerSector;
 } __packed;     /* size info, level 0x103 */
 
+struct filesystem_full_info {
+	__le64 TotalAllocationUnits;
+	__le64 FreeAllocationUnits;
+	__le64 ActualAvailableUnits;
+	__le32 SectorsPerAllocationUnit;
+	__le32 BytesPerSector;
+} __packed;     /* size info, level 0x3ef */
+
 #define EXTENDED_INFO_MAGIC 0x43667364	/* Cfsd */
 #define STRING_LENGTH 28
 
diff -ruw linux-6.13.12/fs/smb/server/transport_ipc.c linux-6.13.12-fbx/fs/smb/server/transport_ipc.c
--- linux-6.13.12/fs/smb/server/transport_ipc.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/transport_ipc.c	2025-09-25 17:40:36.787373276 +0200
@@ -310,7 +310,11 @@
 	server_conf.signing = req->signing;
 	server_conf.tcp_port = req->tcp_port;
 	server_conf.ipc_timeout = req->ipc_timeout * HZ;
-	server_conf.deadtime = req->deadtime * SMB_ECHO_INTERVAL;
+	if (check_mul_overflow(req->deadtime, SMB_ECHO_INTERVAL,
+					&server_conf.deadtime)) {
+		ret = -EINVAL;
+		goto out;
+	}
 	server_conf.share_fake_fscaps = req->share_fake_fscaps;
 	ksmbd_init_domain(req->sub_auth);
 
@@ -336,6 +340,7 @@
 	ret |= ksmbd_set_work_group(req->work_group);
 	ret |= ksmbd_tcp_set_interfaces(KSMBD_STARTUP_CONFIG_INTERFACES(req),
 					req->ifc_list_sz);
+out:
 	if (ret) {
 		pr_err("Server configuration error: %s %s %s\n",
 		       req->netbios_name, req->server_string,
diff -ruw linux-6.13.12/fs/smb/server/unicode.c linux-6.13.12-fbx/fs/smb/server/unicode.c
--- linux-6.13.12/fs/smb/server/unicode.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/unicode.c	2025-09-25 17:40:36.791373296 +0200
@@ -13,6 +13,24 @@
 #include "unicode.h"
 #include "smb_common.h"
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int smb1_utf16_name_length(const __le16 *from, int maxbytes)
+{
+	int i, len = 0;
+	int maxwords = maxbytes / 2;
+	__u16 ftmp;
+
+	for (i = 0; i < maxwords; i++) {
+		ftmp = get_unaligned_le16(&from[i]);
+		len += 2;
+		if (ftmp == 0)
+			break;
+	}
+
+	return len;
+}
+#endif
+
 /*
  * cifs_mapchar() - convert a host-endian char to proper char in codepage
  * @target:	where converted character should be copied
diff -ruw linux-6.13.12/fs/smb/server/unicode.h linux-6.13.12-fbx/fs/smb/server/unicode.h
--- linux-6.13.12/fs/smb/server/unicode.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/unicode.h	2025-09-25 17:40:36.791373296 +0200
@@ -28,6 +28,9 @@
 #include "../../nls/nls_ucs2_utils.h"
 
 #ifdef __KERNEL__
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int smb1_utf16_name_length(const __le16 *from, int maxbytes);
+#endif
 int smb_strtoUTF16(__le16 *to, const char *from, int len,
 		   const struct nls_table *codepage);
 char *smb_strndup_from_utf16(const char *src, const int maxlen,
diff -ruw linux-6.13.12/fs/smb/server/vfs.c linux-6.13.12-fbx/fs/smb/server/vfs.c
--- linux-6.13.12/fs/smb/server/vfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/vfs.c	2025-09-25 17:40:36.791373296 +0200
@@ -426,10 +426,15 @@
 	ksmbd_debug(VFS, "write stream data pos : %llu, count : %zd\n",
 		    *pos, count);
 
+	if (*pos >= XATTR_SIZE_MAX) {
+		pr_err("stream write position %lld is out of bounds\n",	*pos);
+		return -EINVAL;
+	}
+
 	size = *pos + count;
 	if (size > XATTR_SIZE_MAX) {
 		size = XATTR_SIZE_MAX;
-		count = (*pos + count) - XATTR_SIZE_MAX;
+		count = XATTR_SIZE_MAX - *pos;
 	}
 
 	v_len = ksmbd_vfs_getcasexattr(idmap,
@@ -496,7 +501,8 @@
 	int err = 0;
 
 	if (work->conn->connection_type) {
-		if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE))) {
+		if (!(fp->daccess & (FILE_WRITE_DATA_LE | FILE_APPEND_DATA_LE)) ||
+		    S_ISDIR(file_inode(fp->filp)->i_mode)) {
 			pr_err("no right to write(%pD)\n", fp->filp);
 			err = -EACCES;
 			goto out;
@@ -557,12 +563,253 @@
 {
 	int err;
 
-	err = vfs_getattr(path, stat, STATX_BTIME, AT_STATX_SYNC_AS_STAT);
+	err = vfs_getattr(path, stat, STATX_BASIC_STATS | STATX_BTIME,
+			AT_STATX_SYNC_AS_STAT);
 	if (err)
 		pr_err("getattr failed, err %d\n", err);
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * smb_check_attrs() - sanitize inode attributes
+ * @inode:	inode
+ * @attrs:	inode attributes
+ */
+static void smb_check_attrs(struct inode *inode, struct iattr *attrs)
+{
+	/* sanitize the mode change */
+	if (attrs->ia_valid & ATTR_MODE) {
+		attrs->ia_mode &= S_IALLUGO;
+		attrs->ia_mode |= (inode->i_mode & ~S_IALLUGO);
+	}
+
+	/* Revoke setuid/setgid on chown */
+	if (!S_ISDIR(inode->i_mode) &&
+	    (((attrs->ia_valid & ATTR_UID) &&
+	      !uid_eq(attrs->ia_uid, inode->i_uid)) ||
+	     ((attrs->ia_valid & ATTR_GID) &&
+	      !gid_eq(attrs->ia_gid, inode->i_gid)))) {
+		attrs->ia_valid |= ATTR_KILL_PRIV;
+		if (attrs->ia_valid & ATTR_MODE) {
+			/* we're setting mode too, just clear the s*id bits */
+			attrs->ia_mode &= ~S_ISUID;
+			if (attrs->ia_mode & 0010)
+				attrs->ia_mode &= ~S_ISGID;
+		} else {
+			/* set ATTR_KILL_* bits and let VFS handle it */
+			attrs->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
+		}
+	}
+}
+
+/**
+ * ksmbd_vfs_setattr() - vfs helper for smb setattr
+ * @work:	work
+ * @name:	file name
+ * @fid:	file id of open file
+ * @attrs:	inode attributes
+ *
+ * Return:	0 on success, otherwise error
+ */
+int ksmbd_vfs_setattr(struct ksmbd_work *work, const char *name, u64 fid,
+		      struct iattr *attrs)
+{
+	struct file *filp;
+	struct dentry *dentry;
+	struct inode *inode;
+	struct path path;
+	bool update_size = false;
+	int err = 0;
+	struct ksmbd_file *fp = NULL;
+	struct mnt_idmap *idmap;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	if (name) {
+		err = kern_path(name, 0, &path);
+		if (err) {
+			ksmbd_revert_fsids(work);
+			ksmbd_debug(VFS, "lookup failed for %s, err = %d\n",
+				    name, err);
+			return -ENOENT;
+		}
+		dentry = path.dentry;
+		inode = d_inode(dentry);
+		idmap = mnt_idmap(path.mnt);
+	} else {
+		fp = ksmbd_lookup_fd_fast(work, fid);
+		if (!fp) {
+			ksmbd_revert_fsids(work);
+			pr_err("failed to get filp for fid %llu\n", fid);
+			return -ENOENT;
+		}
+
+		filp = fp->filp;
+		dentry = filp->f_path.dentry;
+		inode = d_inode(dentry);
+		idmap = file_mnt_idmap(filp);
+	}
+
+	err = inode_permission(idmap, d_inode(dentry), MAY_WRITE);
+	if (err)
+		goto out;
+
+	/* no need to update mode of symlink */
+	if (S_ISLNK(inode->i_mode))
+		attrs->ia_valid &= ~ATTR_MODE;
+
+	/* skip setattr, if nothing to update */
+	if (!attrs->ia_valid) {
+		err = 0;
+		goto out;
+	}
+
+	smb_check_attrs(inode, attrs);
+	if (attrs->ia_valid & ATTR_SIZE) {
+		err = get_write_access(inode);
+		if (err)
+			goto out;
+		update_size = true;
+	}
+
+	attrs->ia_valid |= ATTR_CTIME;
+
+	inode_lock(inode);
+	err = notify_change(idmap, dentry, attrs, NULL);
+	inode_unlock(inode);
+
+	if (update_size)
+		put_write_access(inode);
+
+	if (!err) {
+		sync_inode_metadata(inode, 1);
+		ksmbd_debug(VFS, "fid %llu, setattr done\n", fid);
+	}
+
+out:
+	if (name)
+		path_put(&path);
+	ksmbd_fd_put(work, fp);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * ksmbd_vfs_symlink() - vfs helper for creating smb symlink
+ * @name:	source file name
+ * @symname:	symlink name
+ *
+ * Return:	0 on success, otherwise error
+ */
+int ksmbd_vfs_symlink(struct ksmbd_work *work, const char *name,
+		      const char *symname)
+{
+	struct path path;
+	struct dentry *dentry;
+	int err;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	dentry = kern_path_create(AT_FDCWD, symname, &path, 0);
+	if (IS_ERR(dentry)) {
+		ksmbd_revert_fsids(work);
+		err = PTR_ERR(dentry);
+		pr_err("path create failed for %s, err %d\n", name, err);
+		return err;
+	}
+
+	err = vfs_symlink(mnt_idmap(path.mnt), d_inode(dentry->d_parent),
+			  dentry, name);
+	if (err && (err != -EEXIST || err != -ENOSPC))
+		ksmbd_debug(VFS, "failed to create symlink, err %d\n", err);
+
+	done_path_create(&path, dentry);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * ksmbd_vfs_readlink() - vfs helper for reading value of symlink
+ * @path:	path of symlink
+ * @buf:	destination buffer for symlink value
+ * @lenp:	destination buffer length
+ *
+ * Return:	symlink value length on success, otherwise error
+ */
+int ksmbd_vfs_readlink(struct path *path, char *buf, int lenp)
+{
+	struct inode *inode;
+	int err;
+	const char *link;
+	DEFINE_DELAYED_CALL(done);
+	int len;
+
+	if (!path)
+		return -ENOENT;
+
+	inode = d_inode(path->dentry);
+	if (!S_ISLNK(inode->i_mode))
+		return -EINVAL;
+
+	link = vfs_get_link(path->dentry, &done);
+	if (IS_ERR(link)) {
+		err = PTR_ERR(link);
+		pr_err("readlink failed, err = %d\n", err);
+		return err;
+	}
+
+	len = strlen(link);
+	if (len > lenp)
+		len = lenp;
+
+	memcpy(buf, link, len);
+	do_delayed_call(&done);
+
+	return len;
+}
+
+int ksmbd_vfs_readdir_name(struct ksmbd_work *work,
+			   struct mnt_idmap *idmap,
+			   struct ksmbd_kstat *ksmbd_kstat,
+			   const char *de_name, int de_name_len,
+			   const char *dir_path)
+{
+	struct path parent_path, path;
+	int rc, file_pathlen, dir_pathlen;
+	char *name;
+
+	dir_pathlen = strlen(dir_path);
+	/* 1 for '/'*/
+	file_pathlen = dir_pathlen +  de_name_len + 1;
+	name = kmalloc(file_pathlen + 1, KSMBD_DEFAULT_GFP);
+	if (!name)
+		return -ENOMEM;
+
+	memcpy(name, dir_path, dir_pathlen);
+	memset(name + dir_pathlen, '/', 1);
+	memcpy(name + dir_pathlen + 1, de_name, de_name_len);
+	name[file_pathlen] = '\0';
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, true);
+	if (rc) {
+		pr_err("lookup failed: %s [%d]\n", name, rc);
+		kfree(name);
+		return -ENOMEM;
+	}
+
+	ksmbd_vfs_fill_dentry_attrs(work, idmap, path.dentry, ksmbd_kstat);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	kfree(name);
+	return 0;
+}
+#endif
+
 /**
  * ksmbd_vfs_fsync() - vfs helper for smb fsync
  * @work:	work
@@ -958,6 +1205,38 @@
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+int ksmbd_vfs_fsetxattr(struct ksmbd_work *work, const char *filename,
+			const char *attr_name, const void *attr_value,
+			size_t attr_size, int flags)
+{
+	struct path path;
+	int err;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	err = kern_path(filename, 0, &path);
+	if (err) {
+		ksmbd_revert_fsids(work);
+		ksmbd_debug(VFS, "cannot get linux path %s, err %d\n",
+			    filename, err);
+		return err;
+	}
+
+	err = vfs_setxattr(mnt_idmap(path.mnt), path.dentry,
+			   attr_name,
+			   attr_value,
+			   attr_size,
+			   flags);
+	if (err)
+		ksmbd_debug(VFS, "setxattr failed, err %d\n", err);
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+#endif
+
 /**
  * ksmbd_vfs_set_fadvise() - convert smb IO caching options to linux options
  * @filp:	file pointer for IO
@@ -1109,6 +1388,63 @@
 	return err;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+/**
+ * ksmbd_vfs_dentry_open() - open a dentry and provide fid for it
+ * @work:	smb work ptr
+ * @path:	path of dentry to be opened
+ * @flags:	open flags
+ * @ret_id:	fid returned on this
+ * @option:	file access pattern options for fadvise
+ * @fexist:	file already present or not
+ *
+ * Return:	allocated struct ksmbd_file on success, otherwise error pointer
+ */
+struct ksmbd_file *ksmbd_vfs_dentry_open(struct ksmbd_work *work,
+					 const struct path *path, int flags,
+					 __le32 option, int fexist)
+{
+	struct file *filp;
+	int err = 0;
+	struct ksmbd_file *fp = NULL;
+
+	filp = dentry_open(path, flags | O_LARGEFILE, current_cred());
+	if (IS_ERR(filp)) {
+		err = PTR_ERR(filp);
+		pr_err("dentry open failed, err %d\n", err);
+		return ERR_PTR(err);
+	}
+
+	ksmbd_vfs_set_fadvise(filp, option);
+
+	fp = ksmbd_open_fd(work, filp);
+	if (IS_ERR(fp)) {
+		fput(filp);
+		err = PTR_ERR(fp);
+		pr_err("id insert failed\n");
+		goto err_out;
+	}
+
+	if (flags & O_TRUNC) {
+		if (fexist)
+			smb_break_all_oplock(work, fp);
+		err = vfs_truncate((struct path *)path, 0);
+		if (err)
+			goto err_out;
+	}
+	return fp;
+
+err_out:
+	if (!IS_ERR(fp))
+		ksmbd_close_fd(work, fp->volatile_id);
+	if (err) {
+		fp = ERR_PTR(err);
+		pr_err("err : %d\n", err);
+	}
+	return fp;
+}
+#endif
+
 static bool __dir_empty(struct dir_context *ctx, const char *name, int namlen,
 		       loff_t offset, u64 ino, unsigned int d_type)
 {
@@ -1292,6 +1628,7 @@
 
 		err = ksmbd_vfs_lock_parent(parent_path->dentry, path->dentry);
 		if (err) {
+			mnt_drop_write(parent_path->mnt);
 			path_put(path);
 			path_put(parent_path);
 		}
diff -ruw linux-6.13.12/fs/smb/server/vfs.h linux-6.13.12-fbx/fs/smb/server/vfs.h
--- linux-6.13.12/fs/smb/server/vfs.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/vfs.h	2025-09-25 17:40:36.791373296 +0200
@@ -39,6 +39,9 @@
 
 struct ksmbd_dir_info {
 	const char	*name;
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	char		*smb1_name;
+#endif
 	char		*wptr;
 	char		*rptr;
 	int		name_len;
@@ -87,6 +90,24 @@
 int ksmbd_vfs_link(struct ksmbd_work *work,
 		   const char *oldname, const char *newname);
 int ksmbd_vfs_getattr(const struct path *path, struct kstat *stat);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_vfs_dentry_open(struct ksmbd_work *work,
+					 const struct path *path, int flags,
+					 __le32 option, int fexist);
+int ksmbd_vfs_setattr(struct ksmbd_work *work, const char *name,
+		      u64 fid, struct iattr *attrs);
+int ksmbd_vfs_fsetxattr(struct ksmbd_work *work, const char *filename,
+			const char *attr_name, const void *attr_value,
+			size_t attr_size, int flags);
+int ksmbd_vfs_symlink(struct ksmbd_work *work,
+		      const char *name, const char *symname);
+int ksmbd_vfs_readlink(struct path *path, char *buf, int lenp);
+int ksmbd_vfs_readdir_name(struct ksmbd_work *work,
+			   struct mnt_idmap *idmap,
+			   struct ksmbd_kstat *ksmbd_kstat,
+			   const char *de_name, int de_name_len,
+			   const char *dir_path);
+#endif
 int ksmbd_vfs_rename(struct ksmbd_work *work, const struct path *old_path,
 		     char *newname, int flags);
 int ksmbd_vfs_truncate(struct ksmbd_work *work,
diff -ruw linux-6.13.12/fs/smb/server/vfs_cache.c linux-6.13.12-fbx/fs/smb/server/vfs_cache.c
--- linux-6.13.12/fs/smb/server/vfs_cache.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/vfs_cache.c	2025-09-25 17:40:36.791373296 +0200
@@ -342,6 +342,9 @@
 		locks_free_lock(smb_lock->fl);
 		kfree(smb_lock);
 	}
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	kfree(fp->filename);
+#endif
 
 	if (ksmbd_stream_fd(fp))
 		kfree(fp->stream.name);
@@ -529,6 +532,52 @@
 	return fp;
 }
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work, char *filename)
+{
+	struct ksmbd_file	*fp = NULL;
+	unsigned int		id;
+	char			*pathname;
+
+	pathname = kmalloc(PATH_MAX, KSMBD_DEFAULT_GFP);
+	if (!pathname)
+		return NULL;
+
+	read_lock(&work->sess->file_table.lock);
+	idr_for_each_entry(work->sess->file_table.idr, fp, id) {
+		char *path = d_path(&fp->filp->f_path, pathname, PATH_MAX);
+
+		if (IS_ERR(path))
+			break;
+
+		if (!strcmp(path, filename)) {
+			fp = ksmbd_fp_get(fp);
+			break;
+		}
+	}
+	read_unlock(&work->sess->file_table.lock);
+
+	kfree(pathname);
+	return fp;
+}
+
+int ksmbd_file_table_flush(struct ksmbd_work *work)
+{
+	struct ksmbd_file	*fp = NULL;
+	unsigned int		id;
+	int			ret;
+
+	read_lock(&work->sess->file_table.lock);
+	idr_for_each_entry(work->sess->file_table.idr, fp,id) {
+		ret = ksmbd_vfs_fsync(work, fp->volatile_id, KSMBD_NO_FID);
+		if (ret)
+			break;
+	}
+	read_unlock(&work->sess->file_table.lock);
+	return ret;
+}
+#endif
+
 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry)
 {
 	struct ksmbd_file	*lfp;
@@ -579,7 +628,13 @@
 
 	idr_preload(KSMBD_DEFAULT_GFP);
 	write_lock(&ft->lock);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	ret = idr_alloc_cyclic(ft->idr, fp, 0,
+			       IS_SMB2(fp->conn) ? INT_MAX - 1 : 0xFFFF,
+			       GFP_NOWAIT);
+#else
 	ret = idr_alloc_cyclic(ft->idr, fp, 0, INT_MAX - 1, GFP_NOWAIT);
+#endif
 	if (ret >= 0) {
 		id = ret;
 		ret = 0;
@@ -661,21 +716,40 @@
 		       bool (*skip)(struct ksmbd_tree_connect *tcon,
 				    struct ksmbd_file *fp))
 {
-	unsigned int			id;
 	struct ksmbd_file		*fp;
+	unsigned int id = 0;
 	int				num = 0;
 
-	idr_for_each_entry(ft->idr, fp, id) {
-		if (skip(tcon, fp))
+	while (1) {
+		write_lock(&ft->lock);
+		fp = idr_get_next(ft->idr, &id);
+		if (!fp) {
+			write_unlock(&ft->lock);
+			break;
+		}
+
+		if (skip(tcon, fp) ||
+		    !atomic_dec_and_test(&fp->refcount)) {
+			id++;
+			write_unlock(&ft->lock);
 			continue;
+		}
 
 		set_close_state_blocked_works(fp);
+		idr_remove(ft->idr, fp->volatile_id);
+		fp->volatile_id = KSMBD_NO_FID;
+		write_unlock(&ft->lock);
+
+		down_write(&fp->f_ci->m_lock);
+		list_del_init(&fp->node);
+		up_write(&fp->f_ci->m_lock);
 
-		if (!atomic_dec_and_test(&fp->refcount))
-			continue;
 		__ksmbd_close_fd(ft, fp);
+
 		num++;
+		id++;
 	}
+
 	return num;
 }
 
diff -ruw linux-6.13.12/fs/smb/server/vfs_cache.h linux-6.13.12-fbx/fs/smb/server/vfs_cache.h
--- linux-6.13.12/fs/smb/server/vfs_cache.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/fs/smb/server/vfs_cache.h	2025-09-25 17:40:36.791373296 +0200
@@ -103,6 +103,20 @@
 	unsigned int			durable_timeout;
 	unsigned int			durable_scavenger_timeout;
 
+#ifdef CONFIG_SMB_INSECURE_SERVER
+	/* for SMB1 */
+	int				pid;
+
+	/* conflict lock fail count for SMB1 */
+	unsigned int			cflock_cnt;
+	/* last lock failure start offset for SMB1 */
+	unsigned long long		llock_fstart;
+
+	int				dirent_offset;
+
+	/* for find_first/find_next */
+	char				*filename;
+#endif
 	/* if ls is happening on directory, below is valid*/
 	struct ksmbd_readdir_data	readdir_data;
 	int				dot_dotdot[2];
@@ -150,6 +164,10 @@
 struct ksmbd_file *ksmbd_lookup_durable_fd(unsigned long long id);
 void ksmbd_put_durable_fd(struct ksmbd_file *fp);
 struct ksmbd_file *ksmbd_lookup_fd_cguid(char *cguid);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+struct ksmbd_file *ksmbd_lookup_fd_filename(struct ksmbd_work *work, char *filename);
+int ksmbd_file_table_flush(struct ksmbd_work *work);
+#endif
 struct ksmbd_file *ksmbd_lookup_fd_inode(struct dentry *dentry);
 unsigned int ksmbd_open_durable_fd(struct ksmbd_file *fp);
 struct ksmbd_file *ksmbd_open_fd(struct ksmbd_work *work, struct file *filp);
diff -ruw linux-6.13.12/include/asm-generic/vmlinux.lds.h linux-6.13.12-fbx/include/asm-generic/vmlinux.lds.h
--- linux-6.13.12/include/asm-generic/vmlinux.lds.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/asm-generic/vmlinux.lds.h	2025-09-29 14:42:15.219423119 +0200
@@ -345,7 +345,7 @@
 #define KERNEL_DTB()							\
 	STRUCT_ALIGN();							\
 	__dtb_start = .;						\
-	KEEP(*(.dtb.init.rodata))					\
+	KEEP(*(.dtb.rodata))						\
 	__dtb_end = .;
 
 /*
@@ -457,6 +457,7 @@
 	. = ALIGN((align));						\
 	.rodata           : AT(ADDR(.rodata) - LOAD_OFFSET) {		\
 		__start_rodata = .;					\
+		KERNEL_DTB()						\
 		*(.rodata) *(.rodata.*) *(.data.rel.ro*)		\
 		SCHED_DATA						\
 		RO_AFTER_INIT_DATA	/* Read only after init */	\
@@ -711,7 +712,6 @@
 	TIMER_OF_TABLES()						\
 	CPU_METHOD_OF_TABLES()						\
 	CPUIDLE_METHOD_OF_TABLES()					\
-	KERNEL_DTB()							\
 	IRQCHIP_OF_MATCH_TABLE()					\
 	ACPI_PROBE_TABLE(irqchip)					\
 	ACPI_PROBE_TABLE(timer)						\
diff -ruw linux-6.13.12/include/dt-bindings/input/linux-event-codes.h linux-6.13.12-fbx/include/dt-bindings/input/linux-event-codes.h
--- linux-6.13.12/include/dt-bindings/input/linux-event-codes.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/input/linux-event-codes.h	2025-09-25 17:40:37.299375815 +0200
@@ -807,6 +807,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.13.12/include/linux/binfmts.h linux-6.13.12-fbx/include/linux/binfmts.h
--- linux-6.13.12/include/linux/binfmts.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/binfmts.h	2025-09-25 17:40:36.935374010 +0200
@@ -62,7 +62,7 @@
 	unsigned long loader, exec;
 
 	struct rlimit rlim_stack; /* Saved RLIMIT_STACK used during exec. */
-
+	int aslr_used;
 	char buf[BINPRM_BUF_SIZE];
 } __randomize_layout;
 
diff -ruw linux-6.13.12/include/linux/brcmphy.h linux-6.13.12-fbx/include/linux/brcmphy.h
--- linux-6.13.12/include/linux/brcmphy.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/brcmphy.h	2025-09-25 17:40:36.943374049 +0200
@@ -35,6 +35,10 @@
 #define PHY_ID_BCM72113			0x35905310
 #define PHY_ID_BCM72116			0x35905350
 #define PHY_ID_BCM72165			0x35905340
+
+#define PHY_ID_BCM63138			0x600d85c0
+#define PHY_ID_BCM63138S		0x0143bff0
+
 #define PHY_ID_BCM7250			0xae025280
 #define PHY_ID_BCM7255			0xae025120
 #define PHY_ID_BCM7260			0xae025190
@@ -58,6 +62,8 @@
 #define PHY_ID_BCM_CYGNUS		0xae025200
 #define PHY_ID_BCM_OMEGA		0xae025100
 
+#define PHY_ID_BCM63158			0xae0251c1
+
 #define PHY_BCM_OUI_MASK		0xfffffc00
 #define PHY_BCM_OUI_1			0x00206000
 #define PHY_BCM_OUI_2			0x0143bc00
diff -ruw linux-6.13.12/include/linux/debugfs.h linux-6.13.12-fbx/include/linux/debugfs.h
--- linux-6.13.12/include/linux/debugfs.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/debugfs.h	2025-09-25 17:40:36.959374129 +0200
@@ -164,8 +164,7 @@
 ssize_t debugfs_attr_write_signed(struct file *file, const char __user *buf,
 			size_t len, loff_t *ppos);
 
-struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
-                struct dentry *new_dir, const char *new_name);
+int debugfs_change_name(struct dentry *dentry, const char *fmt, ...) __printf(2, 3);
 
 void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
 		       u8 *value);
@@ -341,10 +340,10 @@
 	return -ENODEV;
 }
 
-static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
-                struct dentry *new_dir, char *new_name)
+static inline int __printf(2, 3) debugfs_change_name(struct dentry *dentry,
+					const char *fmt, ...)
 {
-	return ERR_PTR(-ENODEV);
+	return -ENODEV;
 }
 
 static inline void debugfs_create_u8(const char *name, umode_t mode,
diff -ruw linux-6.13.12/include/linux/device.h linux-6.13.12-fbx/include/linux/device.h
--- linux-6.13.12/include/linux/device.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/device.h	2025-09-25 17:40:36.963374149 +0200
@@ -372,6 +372,9 @@
 void __iomem *devm_of_iomap(struct device *dev,
 			    struct device_node *node, int index,
 			    resource_size_t *size);
+
+void __iomem *devm_of_iomap_byname(struct device *dev, struct device_node *node,
+				   char *name, resource_size_t *size);
 #else
 
 static inline
@@ -395,6 +398,13 @@
 {
 	return ERR_PTR(-EINVAL);
 }
+
+static inline
+void __iomem *devm_of_iomap_byname(struct device *dev, struct device_node *node,
+				   char *name, resource_size_t *size)
+{
+	return ERR_PTR(-EINVAL);
+}
 
 #endif
 
diff -ruw linux-6.13.12/include/linux/dma-mapping.h linux-6.13.12-fbx/include/linux/dma-mapping.h
--- linux-6.13.12/include/linux/dma-mapping.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/dma-mapping.h	2025-09-25 17:40:36.967374168 +0200
@@ -489,6 +489,9 @@
 	return DMA_BIT_MASK(32);
 }
 
+void *dma_alloc_coherent_no_dev(size_t size, dma_addr_t *dma_handle, gfp_t gfp);
+void dma_free_coherent_no_dev(size_t size, void *addr, dma_addr_t dma_handle);
+
 /*
  * Set both the DMA mask and the coherent DMA mask to the same thing.
  * Note that we don't check the return value from dma_set_coherent_mask()
diff -ruw linux-6.13.12/include/linux/ethtool.h linux-6.13.12-fbx/include/linux/ethtool.h
--- linux-6.13.12/include/linux/ethtool.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/ethtool.h	2025-09-25 17:40:36.971374188 +0200
@@ -1125,6 +1125,19 @@
 	int	(*set_mm)(struct net_device *dev, struct ethtool_mm_cfg *cfg,
 			  struct netlink_ext_ack *extack);
 	void	(*get_mm_stats)(struct net_device *dev, struct ethtool_mm_stats *stats);
+	int	(*set_shaper_param)(struct net_device *,
+				    const struct ethtool_shaper_params *);
+	int	(*get_shaper_param)(struct net_device *,
+				    struct ethtool_shaper_params *);
+	int	(*get_epon_param)(struct net_device *,
+				  struct ethtool_epon_param *);
+	int	(*set_epon_param)(struct net_device *,
+				  const struct ethtool_epon_param *);
+	struct phylink *(*get_phylink)(struct net_device *);
+	int	(*get_prbs_param)(struct net_device *,
+				  struct ethtool_prbs_param *);
+	int	(*set_prbs_param)(struct net_device *,
+				  const struct ethtool_prbs_param *);
 };
 
 int ethtool_check_ops(const struct ethtool_ops *ops);
diff -ruw linux-6.13.12/include/linux/ieee80211.h linux-6.13.12-fbx/include/linux/ieee80211.h
--- linux-6.13.12/include/linux/ieee80211.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/ieee80211.h	2025-09-25 17:40:36.995374307 +0200
@@ -1532,6 +1532,17 @@
 				struct {
 					u8 action_code;
 				} __packed ttlm_tear_down;
+				struct {
+					u8 action_code;
+					u8 dialog_token;
+					u8 variable[];
+				} __packed ml_reconf_req;
+				struct {
+					u8 action_code;
+					u8 dialog_token;
+					u8 count;
+					u8 variable[];
+				} __packed ml_reconf_resp;
 			} u;
 		} __packed action;
 		DECLARE_FLEX_ARRAY(u8, body); /* Generic frame body */
@@ -1542,11 +1553,13 @@
 #define BSS_MEMBERSHIP_SELECTOR_HT_PHY	127
 #define BSS_MEMBERSHIP_SELECTOR_VHT_PHY	126
 #define BSS_MEMBERSHIP_SELECTOR_GLK	125
-#define BSS_MEMBERSHIP_SELECTOR_EPS	124
+#define BSS_MEMBERSHIP_SELECTOR_EPD	124
 #define BSS_MEMBERSHIP_SELECTOR_SAE_H2E 123
 #define BSS_MEMBERSHIP_SELECTOR_HE_PHY	122
 #define BSS_MEMBERSHIP_SELECTOR_EHT_PHY	121
 
+#define BSS_MEMBERSHIP_SELECTOR_MIN	BSS_MEMBERSHIP_SELECTOR_EHT_PHY
+
 /* mgmt header + 1 byte category code */
 #define IEEE80211_MIN_ACTION_SIZE offsetof(struct ieee80211_mgmt, u.action.u)
 
@@ -2308,6 +2321,7 @@
 #define IEEE80211_EHT_OPER_EHT_DEF_PE_DURATION	                0x04
 #define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_LIMIT         0x08
 #define IEEE80211_EHT_OPER_GROUP_ADDRESSED_BU_IND_EXP_MASK      0x30
+#define IEEE80211_EHT_OPER_MCS15_DISABLE                        0x40
 
 /**
  * struct ieee80211_eht_operation - eht operation element
@@ -3883,6 +3897,16 @@
 	WLAN_PROTECTED_EHT_ACTION_TTLM_REQ = 0,
 	WLAN_PROTECTED_EHT_ACTION_TTLM_RES = 1,
 	WLAN_PROTECTED_EHT_ACTION_TTLM_TEARDOWN = 2,
+	WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_REQ = 3,
+	WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_RESP = 4,
+	WLAN_PROTECTED_EHT_ACTION_EPCS_ENABLE_TEARDOWN = 5,
+	WLAN_PROTECTED_EHT_ACTION_EML_OP_MODE_NOTIF = 6,
+	WLAN_PROTECTED_EHT_ACTION_LINK_RECOMMEND = 7,
+	WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_REQ = 8,
+	WLAN_PROTECTED_EHT_ACTION_ML_OP_UPDATE_RESP = 9,
+	WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_NOTIF = 10,
+	WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ = 11,
+	WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP = 12,
 };
 
 /* Security key length */
@@ -4055,6 +4079,9 @@
 /* Defines support for enhanced multi-bssid advertisement*/
 #define WLAN_EXT_CAPA11_EMA_SUPPORT	BIT(3)
 
+/* Enable Beacon Protection */
+#define WLAN_EXT_CAPA11_BCN_PROTECT BIT(4)
+
 /* TDLS specific payload type in the LLC/SNAP header */
 #define WLAN_TDLS_SNAP_RFTYPE	0x2
 
@@ -4961,6 +4988,7 @@
 #define IEEE80211_MLC_BASIC_PRES_EML_CAPA		0x0080
 #define IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP		0x0100
 #define IEEE80211_MLC_BASIC_PRES_MLD_ID			0x0200
+#define IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP	0x0400
 
 #define IEEE80211_MED_SYNC_DELAY_DURATION		0x00ff
 #define IEEE80211_MED_SYNC_DELAY_SYNC_OFDM_ED_THRESH	0x0f00
@@ -5018,6 +5046,8 @@
 #define IEEE80211_MLD_CAP_OP_TID_TO_LINK_MAP_NEG_SUPP_DIFF	3
 #define IEEE80211_MLD_CAP_OP_FREQ_SEP_TYPE_IND		0x0f80
 #define IEEE80211_MLD_CAP_OP_AAR_SUPPORT		0x1000
+#define IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT	0x2000
+#define IEEE80211_MLD_CAP_OP_ALIGNED_TWT_SUPPORT	0x4000
 
 struct ieee80211_mle_basic_common_info {
 	u8 len;
@@ -5033,6 +5063,9 @@
 } __packed;
 
 #define IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR		0x0010
+#define IEEE80211_MLC_RECONF_PRES_EML_CAPA		0x0020
+#define IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP		0x0040
+#define IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP	0x0080
 
 /* no fixed fields in RECONF */
 
@@ -5223,6 +5256,47 @@
 }
 
 /**
+ * ieee80211_mle_get_ext_mld_capa_op - returns the extended MLD capabilities
+ *	and operations.
+ * @data: pointer to the multi-link element
+ * Return: the extended MLD capabilities and operations field value from
+ *	the multi-link element, or 0 if not present
+ *
+ * The element is assumed to be of the correct type (BASIC) and big enough,
+ * this must be checked using ieee80211_mle_type_ok().
+ */
+static inline u16 ieee80211_mle_get_ext_mld_capa_op(const u8 *data)
+{
+	const struct ieee80211_multi_link_elem *mle = (const void *)data;
+	u16 control = le16_to_cpu(mle->control);
+	const u8 *common = mle->variable;
+
+	/*
+	 * common points now at the beginning of
+	 * ieee80211_mle_basic_common_info
+	 */
+	common += sizeof(struct ieee80211_mle_basic_common_info);
+
+	if (!(control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP))
+		return 0;
+
+	if (control & IEEE80211_MLC_BASIC_PRES_LINK_ID)
+		common += 1;
+	if (control & IEEE80211_MLC_BASIC_PRES_BSS_PARAM_CH_CNT)
+		common += 1;
+	if (control & IEEE80211_MLC_BASIC_PRES_MED_SYNC_DELAY)
+		common += 2;
+	if (control & IEEE80211_MLC_BASIC_PRES_EML_CAPA)
+		common += 2;
+	if (control & IEEE80211_MLC_BASIC_PRES_MLD_CAPA_OP)
+		common += 2;
+	if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
+		common += 1;
+
+	return get_unaligned_le16(common);
+}
+
+/**
  * ieee80211_mle_get_mld_id - returns the MLD ID
  * @data: pointer to the multi-link element
  * Return: The MLD ID in the given multi-link element, or 0 if not present
@@ -5294,6 +5368,8 @@
 			common += 2;
 		if (control & IEEE80211_MLC_BASIC_PRES_MLD_ID)
 			common += 1;
+		if (control & IEEE80211_MLC_BASIC_PRES_EXT_MLD_CAPA_OP)
+			common += 2;
 		break;
 	case IEEE80211_ML_CONTROL_TYPE_PREQ:
 		common += sizeof(struct ieee80211_mle_preq_common_info);
@@ -5304,6 +5380,12 @@
 	case IEEE80211_ML_CONTROL_TYPE_RECONF:
 		if (control & IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR)
 			common += ETH_ALEN;
+		if (control & IEEE80211_MLC_RECONF_PRES_EML_CAPA)
+			common += 2;
+		if (control & IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP)
+			common += 2;
+		if (control & IEEE80211_MLC_RECONF_PRES_EXT_MLD_CAPA_OP)
+			common += 2;
 		break;
 	case IEEE80211_ML_CONTROL_TYPE_TDLS:
 		common += sizeof(struct ieee80211_mle_tdls_common_info);
@@ -5453,7 +5535,12 @@
 #define IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE		0x0010
 #define IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT		0x0020
 #define IEEE80211_MLE_STA_RECONF_CONTROL_AP_REM_TIMER_PRESENT		0x0040
-#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_UPDATE_TYPE		0x0780
+#define	IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE                 0x0780
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_AP_REM          0
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_OP_PARAM_UPDATE 1
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK        2
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK        3
+#define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_NSTR_STATUS     4
 #define IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_PARAMS_PRESENT	0x0800
 
 /**
diff -ruw linux-6.13.12/include/linux/if_vlan.h linux-6.13.12-fbx/include/linux/if_vlan.h
--- linux-6.13.12/include/linux/if_vlan.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/if_vlan.h	2025-09-25 17:40:36.999374327 +0200
@@ -12,6 +12,7 @@
 #include <linux/rtnetlink.h>
 #include <linux/bug.h>
 #include <uapi/linux/if_vlan.h>
+#include <uapi/linux/pkt_sched.h>
 
 #define VLAN_HLEN	4		/* The additional bytes required by VLAN
 					 * (in addition to the Ethernet header)
@@ -144,6 +145,7 @@
 			 int (*action)(struct net_device *dev, int vid,
 				       void *arg), void *arg);
 extern struct net_device *vlan_dev_real_dev(const struct net_device *dev);
+extern struct net_device *vlan_dev_upper_dev(const struct net_device *dev);
 extern u16 vlan_dev_vlan_id(const struct net_device *dev);
 extern __be16 vlan_dev_vlan_proto(const struct net_device *dev);
 
@@ -213,7 +215,7 @@
 
 	mp = vlan_dev_priv(dev)->egress_priority_map[(skprio & 0xF)];
 	while (mp) {
-		if (mp->priority == skprio) {
+		if (mp->priority == (skprio & TC_H_MIN_MASK)) {
 			return mp->vlan_qos; /* This should already be shifted
 					      * to mask correctly with the
 					      * VLAN's TCI */
@@ -257,6 +259,12 @@
 	return NULL;
 }
 
+static inline struct net_device *vlan_dev_upper_dev(const struct net_device *dev)
+{
+	BUG();
+	return NULL;
+}
+
 static inline u16 vlan_dev_vlan_id(const struct net_device *dev)
 {
 	BUG();
@@ -317,6 +325,9 @@
 	switch (ethertype) {
 	case htons(ETH_P_8021Q):
 	case htons(ETH_P_8021AD):
+#ifdef CONFIG_VLAN_FBX
+	case htons(ETH_P_FBXVLAN):
+#endif
 		return true;
 	default:
 		return false;
diff -ruw linux-6.13.12/include/linux/in.h linux-6.13.12-fbx/include/linux/in.h
--- linux-6.13.12/include/linux/in.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/in.h	2025-09-25 17:40:36.999374327 +0200
@@ -30,6 +30,9 @@
 		return 0;
 	case IPPROTO_AH:	/* SPI */
 		return 4;
+	case IPPROTO_IPV6:
+		/* third byte of ipv6 destination address */
+		return 36;
 	default:
 		return -EINVAL;
 	}
diff -ruw linux-6.13.12/include/linux/irqchip/arm-gic-v3.h linux-6.13.12-fbx/include/linux/irqchip/arm-gic-v3.h
--- linux-6.13.12/include/linux/irqchip/arm-gic-v3.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/irqchip/arm-gic-v3.h	2025-09-25 17:40:37.007374367 +0200
@@ -615,6 +615,8 @@
 	struct {
 		raw_spinlock_t	rd_lock;
 		void __iomem	*rd_base;
+		/* rd_base_phys, for use by irq-gicv3-ca code */
+		phys_addr_t	rd_base_phys;
 		struct page	*pend_page;
 		phys_addr_t	phys_base;
 		u64             flags;
diff -ruw linux-6.13.12/include/linux/ksm.h linux-6.13.12-fbx/include/linux/ksm.h
--- linux-6.13.12/include/linux/ksm.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/ksm.h	2025-09-25 17:40:37.015374406 +0200
@@ -93,6 +93,7 @@
 void collect_procs_ksm(const struct folio *folio, const struct page *page,
 		struct list_head *to_kill, int force_early);
 long ksm_process_profit(struct mm_struct *);
+bool ksm_process_mergeable(struct mm_struct *mm);
 
 #else  /* !CONFIG_KSM */
 
diff -ruw linux-6.13.12/include/linux/mhi.h linux-6.13.12-fbx/include/linux/mhi.h
--- linux-6.13.12/include/linux/mhi.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mhi.h	2025-09-25 17:40:37.043374545 +0200
@@ -14,6 +14,7 @@
 #include <linux/spinlock.h>
 #include <linux/wait.h>
 #include <linux/workqueue.h>
+#include <soc/qcom/license-manager-simple.h>
 
 #define MHI_MAX_OEM_PK_HASH_SEGMENTS 16
 
@@ -447,6 +448,8 @@
 	bool wake_set;
 	unsigned long irq_flags;
 	u32 mru;
+
+	struct lm_license_buf license_buf;
 };
 
 /**
diff -ruw linux-6.13.12/include/linux/miscdevice.h linux-6.13.12-fbx/include/linux/miscdevice.h
--- linux-6.13.12/include/linux/miscdevice.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/miscdevice.h	2025-09-25 17:40:37.043374545 +0200
@@ -21,6 +21,7 @@
 #define APOLLO_MOUSE_MINOR	7	/* unused */
 #define PC110PAD_MINOR		9	/* unused */
 /*#define ADB_MOUSE_MINOR	10	FIXME OBSOLETE */
+#define TALDEV_MINOR		74	/* Marvell TAL device */
 #define WATCHDOG_MINOR		130	/* Watchdog timer     */
 #define TEMP_MINOR		131	/* Temperature Sensor */
 #define APM_MINOR_DEV		134
diff -ruw linux-6.13.12/include/linux/mm_types.h linux-6.13.12-fbx/include/linux/mm_types.h
--- linux-6.13.12/include/linux/mm_types.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mm_types.h	2025-09-25 17:40:37.047374565 +0200
@@ -121,7 +121,7 @@
 			 */
 			unsigned long pp_magic;
 			struct page_pool *pp;
-			unsigned long _pp_mapping_pad;
+			unsigned long pp_recycle_flag;
 			unsigned long dma_addr;
 			atomic_long_t pp_ref_count;
 		};
diff -ruw linux-6.13.12/include/linux/mm_types_task.h linux-6.13.12-fbx/include/linux/mm_types_task.h
--- linux-6.13.12/include/linux/mm_types_task.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mm_types_task.h	2025-09-25 17:40:37.047374565 +0200
@@ -44,8 +44,8 @@
 #endif
 };
 
-#define PAGE_FRAG_CACHE_MAX_SIZE	__ALIGN_MASK(32768, ~PAGE_MASK)
-#define PAGE_FRAG_CACHE_MAX_ORDER	get_order(PAGE_FRAG_CACHE_MAX_SIZE)
+#define PAGE_FRAG_CACHE_MAX_ORDER	CONFIG_PAGE_FRAG_CACHE_ORDER
+#define PAGE_FRAG_CACHE_MAX_SIZE	(4096 << PAGE_FRAG_CACHE_MAX_ORDER)
 struct page_frag_cache {
 	/* encoded_page consists of the virtual address, pfmemalloc bit and
 	 * order of a page.
@@ -62,6 +62,7 @@
 	__u32 offset;
 	__u32 pagecnt_bias;
 #endif
+	atomic_t pages_allocated;
 };
 
 /* Track pages that require TLB flushes */
diff -ruw linux-6.13.12/include/linux/mmc/card.h linux-6.13.12-fbx/include/linux/mmc/card.h
--- linux-6.13.12/include/linux/mmc/card.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mmc/card.h	2025-09-25 17:40:37.047374565 +0200
@@ -293,6 +293,11 @@
 #define MMC_BLK_DATA_AREA_RPMB	(1<<3)
 };
 
+struct ro_area {
+	u64 start;
+	u64 end;
+};
+
 /*
  * MMC device
  */
@@ -377,6 +382,9 @@
 	unsigned int    nr_parts;
 
 	struct workqueue_struct *complete_wq;	/* Private workqueue */
+
+	struct ro_area user_ro_area;
+	struct ro_area boot_ro_area;
 };
 
 static inline bool mmc_large_sector(struct mmc_card *card)
diff -ruw linux-6.13.12/include/linux/mtd/mtd.h linux-6.13.12-fbx/include/linux/mtd/mtd.h
--- linux-6.13.12/include/linux/mtd/mtd.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mtd/mtd.h	2025-09-25 17:40:37.051374585 +0200
@@ -287,6 +287,13 @@
 	 */
 	unsigned int bitflip_threshold;
 
+	/* NAND related attributes */
+	const char *nand_type;
+	const char *nand_manufacturer;
+	const char *onfi_model;
+	uint8_t onfi_ecc_bits;
+	uint8_t nand_ids[8];
+
 	/* Kernel-only stuff starts here. */
 	const char *name;
 	int index;
diff -ruw linux-6.13.12/include/linux/mtd/spi-nor.h linux-6.13.12-fbx/include/linux/mtd/spi-nor.h
--- linux-6.13.12/include/linux/mtd/spi-nor.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/mtd/spi-nor.h	2025-09-25 17:40:37.055374605 +0200
@@ -40,9 +40,11 @@
 #define SPINOR_OP_PP_1_8_8	0xc2	/* Octal page program */
 #define SPINOR_OP_BE_4K		0x20	/* Erase 4KiB block */
 #define SPINOR_OP_BE_4K_PMC	0xd7	/* Erase 4KiB block on PMC chips */
+#define SPINOR_OP_EWRSR		0x50	/* SST: Enable write to status reg */
 #define SPINOR_OP_BE_32K	0x52	/* Erase 32KiB block */
 #define SPINOR_OP_CHIP_ERASE	0xc7	/* Erase whole flash chip */
 #define SPINOR_OP_SE		0xd8	/* Sector erase (usually 64KiB) */
+#define	SPINOR_OP_RDID_ALT	0x90	/* Read ID (alt) */
 #define SPINOR_OP_RDID		0x9f	/* Read JEDEC ID */
 #define SPINOR_OP_RDSFDP	0x5a	/* Read SFDP */
 #define SPINOR_OP_RDCR		0x35	/* Read configuration register */
@@ -313,6 +315,9 @@
 	ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
 			 const u8 *buf);
 	int (*erase)(struct spi_nor *nor, loff_t offs);
+
+	int (*read_alt_id)(struct spi_nor *nor, u8 cmd, u8 *val, int len);
+	int (*read_atmel_id)(struct spi_nor *nor, u8 cmd, u8 *val, int len);
 };
 
 /**
diff -ruw linux-6.13.12/include/linux/netdevice.h linux-6.13.12-fbx/include/linux/netdevice.h
--- linux-6.13.12/include/linux/netdevice.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/netdevice.h	2025-09-25 17:40:37.055374605 +0200
@@ -85,6 +85,20 @@
 
 typedef u32 xdp_features_t;
 
+#ifdef CONFIG_NETRXTHREAD
+
+#define RXTHREAD_MAX_PKTS       512
+struct krxd {
+	struct sk_buff_head	pkt_queue;
+	unsigned int		stats_pkts;
+	unsigned int		stats_dropped;
+	wait_queue_head_t	wq;
+	struct task_struct	*task;
+};
+
+extern struct krxd gkrxd[CONFIG_NETRXTHREAD_RX_QUEUE];
+#endif
+
 void synchronize_net(void);
 void netdev_set_default_ethtool_ops(struct net_device *dev,
 				    const struct ethtool_ops *ops);
@@ -1718,6 +1732,8 @@
 	IFF_L3MDEV_RX_HANDLER		= 1<<29,
 	IFF_NO_ADDRCONF			= BIT_ULL(30),
 	IFF_TX_SKB_NO_LINEAR		= BIT_ULL(31),
+	IFF_FBXBRIDGE			= BIT_ULL(32),
+	IFF_FBXBRIDGE_PORT		= BIT_ULL(33),
 };
 
 /* Specifies the type of the struct net_device::ml_priv pointer */
@@ -2059,8 +2075,8 @@
 	/* TX read-mostly hotpath */
 	__cacheline_group_begin(net_device_read_tx);
 	struct_group(priv_flags_fast,
-		unsigned long		priv_flags:32;
-		unsigned long		lltx:1;
+		unsigned long long	priv_flags:34;
+		unsigned long long	lltx:1;
 	);
 	const struct net_device_ops *netdev_ops;
 	const struct header_ops *header_ops;
@@ -5205,6 +5221,16 @@
 	return dev->priv_flags & IFF_BRIDGE_PORT;
 }
 
+static inline bool netif_is_fbxbridge_master(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_FBXBRIDGE;
+}
+
+static inline bool netif_is_fbxbridge_port(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_FBXBRIDGE_PORT;
+}
+
 static inline bool netif_is_ovs_master(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_OPENVSWITCH;
diff -ruw linux-6.13.12/include/linux/netfilter/nf_conntrack_ftp.h linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_ftp.h
--- linux-6.13.12/include/linux/netfilter/nf_conntrack_ftp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_ftp.h	2025-09-25 17:40:37.055374605 +0200
@@ -22,6 +22,11 @@
 	u_int16_t seq_aft_nl_num[IP_CT_DIR_MAX];
 	/* pickup sequence tracking, useful for conntrackd */
 	u_int16_t flags[IP_CT_DIR_MAX];
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	unsigned int is_fbxbridge;
+	unsigned long fbxbridge_remote;
+	unsigned long fbxbridge_wan;
+#endif
 };
 
 /* For NAT to hook in when we find a packet which describes what other
diff -ruw linux-6.13.12/include/linux/netfilter/nf_conntrack_sip.h linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_sip.h
--- linux-6.13.12/include/linux/netfilter/nf_conntrack_sip.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_sip.h	2025-09-25 17:40:37.055374605 +0200
@@ -5,6 +5,7 @@
 #include <linux/skbuff.h>
 #include <linux/types.h>
 #include <net/netfilter/nf_conntrack_expect.h>
+#include <crypto/sha2.h>
 
 #define SIP_PORT	5060
 #define SIP_TIMEOUT	3600
@@ -12,7 +13,7 @@
 struct nf_ct_sip_master {
 	unsigned int	register_cseq;
 	unsigned int	invite_cseq;
-	__be16		forced_dport;
+	__be16		forced_dport[IP_CT_DIR_MAX];
 };
 
 enum sip_expectation_classes {
@@ -30,6 +31,10 @@
 	enum sip_expectation_classes	class;
 };
 
+struct nf_ct_sip_expect {
+	u8				cid_hash[SHA256_DIGEST_SIZE];
+};
+
 #define SDP_MEDIA_TYPE(__name, __class)					\
 {									\
 	.name	= (__name),						\
diff -ruw linux-6.13.12/include/linux/netfilter/nf_conntrack_tcp.h linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_tcp.h
--- linux-6.13.12/include/linux/netfilter/nf_conntrack_tcp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/netfilter/nf_conntrack_tcp.h	2025-09-25 17:40:37.055374605 +0200
@@ -28,6 +28,7 @@
 	/* For SYN packets while we may be out-of-sync */
 	u_int8_t	last_wscale;	/* Last window scaling factor seen */
 	u_int8_t	last_flags;	/* Last flags set */
+	u_int32_t	no_window_track;
 };
 
 #endif /* _NF_CONNTRACK_TCP_H */
diff -ruw linux-6.13.12/include/linux/of_fdt.h linux-6.13.12-fbx/include/linux/of_fdt.h
--- linux-6.13.12/include/linux/of_fdt.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/of_fdt.h	2025-09-25 17:40:37.063374645 +0200
@@ -84,6 +84,7 @@
 extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
+const void *of_fdt_find_compatible_dtb(const char *name);
 #else /* CONFIG_OF_EARLY_FLATTREE */
 static inline void early_init_dt_check_for_usable_mem_range(void) {}
 static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; }
diff -ruw linux-6.13.12/include/linux/page_owner.h linux-6.13.12-fbx/include/linux/page_owner.h
--- linux-6.13.12/include/linux/page_owner.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/page_owner.h	2025-09-25 17:40:37.063374645 +0200
@@ -13,6 +13,9 @@
 			unsigned short order, gfp_t gfp_mask);
 extern void __split_page_owner(struct page *page, int old_order,
 			int new_order);
+extern void __set_page_owner_frag_cache(struct page *page,
+					unsigned int order,
+					struct page_frag_cache *nc);
 extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
 extern void __set_page_owner_migrate_reason(struct page *page, int reason);
 extern void __dump_page_owner(const struct page *page);
@@ -38,6 +41,13 @@
 	if (static_branch_unlikely(&page_owner_inited))
 		__split_page_owner(page, old_order, new_order);
 }
+static inline void set_page_owner_frag_cache(struct page *page,
+					     unsigned int order,
+					     struct page_frag_cache *nc)
+{
+	if (static_branch_unlikely(&page_owner_inited))
+		__set_page_owner_frag_cache(page, order, nc);
+}
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
 {
 	if (static_branch_unlikely(&page_owner_inited))
@@ -65,6 +75,11 @@
 			int new_order)
 {
 }
+static inline void set_page_owner_frag_cache(struct page *page,
+					     unsigned int order,
+					     struct page_frag_cache *nc)
+{
+}
 static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
 {
 }
diff -ruw linux-6.13.12/include/linux/part_stat.h linux-6.13.12-fbx/include/linux/part_stat.h
--- linux-6.13.12/include/linux/part_stat.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/part_stat.h	2025-09-25 17:40:37.063374645 +0200
@@ -12,6 +12,7 @@
 	unsigned long merges[NR_STAT_GROUPS];
 	unsigned long io_ticks;
 	local_t in_flight[2];
+	unsigned long io_errors[2];
 };
 
 /*
diff -ruw linux-6.13.12/include/linux/pci.h linux-6.13.12-fbx/include/linux/pci.h
--- linux-6.13.12/include/linux/pci.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/pci.h	2025-09-25 17:40:37.067374664 +0200
@@ -476,12 +476,14 @@
 	unsigned int	no_command_memory:1;	/* No PCI_COMMAND_MEMORY */
 	unsigned int	rom_bar_overlap:1;	/* ROM BAR disable broken */
 	unsigned int	rom_attr_enabled:1;	/* Display of ROM attribute enabled? */
+	unsigned int	sysfs_init_done:1;	/* res_attr has been created */
 	pci_dev_flags_t dev_flags;
 	atomic_t	enable_cnt;	/* pci_enable_device has been called */
 
 	spinlock_t	pcie_cap_lock;		/* Protects RMW ops in capability accessors */
 	u32		saved_config_space[16]; /* Config space saved at suspend time */
 	struct hlist_head saved_cap_space;
+	struct mutex	sysfs_init_lock;	/* res_attr has been created */
 	struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
 	struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
 
@@ -551,6 +553,9 @@
 	u8		tph_mode;	/* TPH mode */
 	u8		tph_req_type;	/* TPH requester type */
 #endif
+#ifdef CONFIG_SENSORS_PERICOM_PCIE
+	void *hwmon_priv;
+#endif
 };
 
 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
diff -ruw linux-6.13.12/include/linux/pci_ids.h linux-6.13.12-fbx/include/linux/pci_ids.h
--- linux-6.13.12/include/linux/pci_ids.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/pci_ids.h	2025-09-25 17:40:37.067374664 +0200
@@ -1850,6 +1850,7 @@
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7952	0x7952
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7954	0x7954
 #define PCI_DEVICE_ID_PERICOM_PI7C9X7958	0x7958
+#define PCI_DEVICE_ID_PI7C9X20303SL		0xa303
 
 #define PCI_SUBVENDOR_ID_CHASE_PCIFAST		0x12E0
 #define PCI_SUBDEVICE_ID_CHASE_PCIFAST4		0x0031
diff -ruw linux-6.13.12/include/linux/phy.h linux-6.13.12-fbx/include/linux/phy.h
--- linux-6.13.12/include/linux/phy.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/phy.h	2025-09-25 17:40:37.075374704 +0200
@@ -100,6 +100,7 @@
  * @PHY_INTERFACE_MODE_MII: Media-independent interface
  * @PHY_INTERFACE_MODE_GMII: Gigabit media-independent interface
  * @PHY_INTERFACE_MODE_SGMII: Serial gigabit media-independent interface
+ * @PHY_INTERFACE_MODE_HISGMII: High-speed SGMII Realtek proprietary interface
  * @PHY_INTERFACE_MODE_TBI: Ten Bit Interface
  * @PHY_INTERFACE_MODE_REVMII: Reverse Media Independent Interface
  * @PHY_INTERFACE_MODE_RMII: Reduced Media Independent Interface
@@ -170,6 +171,16 @@
 	PHY_INTERFACE_MODE_QUSGMII,
 	PHY_INTERFACE_MODE_1000BASEKX,
 	PHY_INTERFACE_MODE_10G_QXGMII,
+
+	PHY_INTERFACE_MODE_1000BASEPX_D,
+	PHY_INTERFACE_MODE_1000BASEPX_U,
+	PHY_INTERFACE_MODE_10000BASEPR_D,
+	PHY_INTERFACE_MODE_10000BASEPR_U,
+	PHY_INTERFACE_MODE_10000_1000_BASEPRX_D,
+	PHY_INTERFACE_MODE_10000_1000_BASEPRX_U,
+
+	PHY_INTERFACE_MODE_HISGMII,
+
 	PHY_INTERFACE_MODE_MAX,
 } phy_interface_t;
 
@@ -235,6 +246,8 @@
 		return "gmii";
 	case PHY_INTERFACE_MODE_SGMII:
 		return "sgmii";
+	case PHY_INTERFACE_MODE_HISGMII:
+		return "hisgmii";
 	case PHY_INTERFACE_MODE_TBI:
 		return "tbi";
 	case PHY_INTERFACE_MODE_REVMII:
@@ -293,6 +306,18 @@
 		return "qusgmii";
 	case PHY_INTERFACE_MODE_10G_QXGMII:
 		return "10g-qxgmii";
+	case PHY_INTERFACE_MODE_1000BASEPX_D:
+		return "1000base-px-d";
+	case PHY_INTERFACE_MODE_1000BASEPX_U:
+		return "1000base-px-u";
+	case PHY_INTERFACE_MODE_10000BASEPR_D:
+		return "10000base-pr-d";
+	case PHY_INTERFACE_MODE_10000BASEPR_U:
+		return "10000base-pr-u";
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_D:
+		return "10000_1000base-prx-d";
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+		return "10000_1000base-prx-u";
 	default:
 		return "unknown";
 	}
@@ -437,7 +462,11 @@
 	/** @reset_post_delay_us: GPIO reset deassert delay in microseconds */
 	int reset_post_delay_us;
 	/** @reset_gpiod: Reset GPIO descriptor pointer */
-	struct gpio_desc *reset_gpiod;
+	struct gpio_descs *reset_gpiod;
+
+	/* mark non-present phy as present but broken during
+	 * probing */
+	bool keep_broken_phy;
 
 	/** @shared_lock: protect access to the shared element */
 	struct mutex shared_lock;
@@ -1996,6 +2025,7 @@
 
 /* Generic C45 PHY driver */
 extern struct phy_driver genphy_c45_driver;
+extern struct phy_driver genphy_broken_c45_driver;
 
 /* The gen10g_* functions are the old Clause 45 stub */
 int gen10g_config_aneg(struct phy_device *phydev);
@@ -2043,6 +2073,7 @@
 void phy_support_sym_pause(struct phy_device *phydev);
 void phy_support_asym_pause(struct phy_device *phydev);
 void phy_support_eee(struct phy_device *phydev);
+void phy_disable_eee(struct phy_device *phydev);
 void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx,
 		       bool autoneg);
 void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx);
diff -ruw linux-6.13.12/include/linux/phylink.h linux-6.13.12-fbx/include/linux/phylink.h
--- linux-6.13.12/include/linux/phylink.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/phylink.h	2025-09-25 17:40:37.075374704 +0200
@@ -607,6 +607,7 @@
 int phylink_pcs_pre_init(struct phylink *pl, struct phylink_pcs *pcs);
 
 void phylink_start(struct phylink *);
+void phylink_start_silent(struct phylink *);
 void phylink_stop(struct phylink *);
 
 void phylink_suspend(struct phylink *pl, bool mac_wol);
@@ -628,6 +629,10 @@
 int phylink_init_eee(struct phylink *, bool);
 int phylink_ethtool_get_eee(struct phylink *link, struct ethtool_keee *eee);
 int phylink_ethtool_set_eee(struct phylink *link, struct ethtool_keee *eee);
+void phylink_support_eee(struct phylink *pl);
+void phylink_disable_eee(struct phylink *pl);
+void phylink_eee_update_cfg_timer(struct phylink *pl, unsigned int val);
+
 int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
 int phylink_speed_down(struct phylink *pl, bool sync);
 int phylink_speed_up(struct phylink *pl);
@@ -687,4 +692,13 @@
 
 void phylink_decode_usxgmii_word(struct phylink_link_state *state,
 				 uint16_t lpa);
+
+int phylink_set_interface(struct phylink *pl,
+			  phy_interface_t interface,
+			  bool an_enabled);
+void phylink_get_interface(struct phylink *pl,
+			   phy_interface_t *interface,
+			   int *an_en,
+			   int *mode);
+
 #endif
diff -ruw linux-6.13.12/include/linux/pnp.h linux-6.13.12-fbx/include/linux/pnp.h
--- linux-6.13.12/include/linux/pnp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/pnp.h	2025-07-01 14:33:38.536466857 +0200
@@ -290,7 +290,7 @@
 }
 
 struct pnp_fixup {
-	char id[7];
+	char id[8];
 	void (*quirk_function) (struct pnp_dev *dev);	/* fixup function */
 };
 
diff -ruw linux-6.13.12/include/linux/ppp_channel.h linux-6.13.12-fbx/include/linux/ppp_channel.h
--- linux-6.13.12/include/linux/ppp_channel.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/ppp_channel.h	2025-09-25 17:40:37.083374744 +0200
@@ -50,6 +50,9 @@
 /* Called by the channel when it can send some more data. */
 extern void ppp_output_wakeup(struct ppp_channel *);
 
+/* Called by the channel when it want to prevent further transmit on it */
+extern void ppp_output_stop(struct ppp_channel *);
+
 /* Called by the channel to process a received PPP packet.
    The packet should have just the 2-byte PPP protocol header. */
 extern void ppp_input(struct ppp_channel *, struct sk_buff *);
diff -ruw linux-6.13.12/include/linux/pstore.h linux-6.13.12-fbx/include/linux/pstore.h
--- linux-6.13.12/include/linux/pstore.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/pstore.h	2025-09-25 17:40:37.087374764 +0200
@@ -67,6 +67,7 @@
  * @reason:	kdump reason for notification
  * @part:	position in a multipart record
  * @compressed:	whether the buffer is compressed
+ * @old:        reflects underlying prz old_zone.
  *
  */
 struct pstore_record {
@@ -83,6 +84,8 @@
 	enum kmsg_dump_reason	reason;
 	unsigned int		part;
 	bool			compressed;
+	bool			old;
+	char			uts_release[128];
 };
 
 /**
diff -ruw linux-6.13.12/include/linux/pstore_ram.h linux-6.13.12-fbx/include/linux/pstore_ram.h
--- linux-6.13.12/include/linux/pstore_ram.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/pstore_ram.h	2025-09-25 17:40:37.087374764 +0200
@@ -29,6 +29,7 @@
 struct ramoops_platform_data {
 	unsigned long	mem_size;
 	phys_addr_t	mem_address;
+	void		*mem_ptr;
 	unsigned int	mem_type;
 	unsigned long	record_size;
 	unsigned long	console_size;
diff -ruw linux-6.13.12/include/linux/regmap.h linux-6.13.12-fbx/include/linux/regmap.h
--- linux-6.13.12/include/linux/regmap.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/regmap.h	2025-09-25 17:40:37.091374783 +0200
@@ -1704,6 +1704,7 @@
 int regmap_irq_get_virq(struct regmap_irq_chip_data *data, int irq);
 struct irq_domain *regmap_irq_get_domain(struct regmap_irq_chip_data *data);
 
+void __iomem *regmap_get_mmio_base_address(struct regmap *map);
 #else
 
 /*
diff -ruw linux-6.13.12/include/linux/reset.h linux-6.13.12-fbx/include/linux/reset.h
--- linux-6.13.12/include/linux/reset.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/reset.h	2025-09-25 17:40:37.095374803 +0200
@@ -2,6 +2,7 @@
 #ifndef _LINUX_RESET_H_
 #define _LINUX_RESET_H_
 
+#include <linux/bits.h>
 #include <linux/err.h>
 #include <linux/errno.h>
 #include <linux/types.h>
diff -ruw linux-6.13.12/include/linux/sched.h linux-6.13.12-fbx/include/linux/sched.h
--- linux-6.13.12/include/linux/sched.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/sched.h	2025-09-25 17:40:37.099374823 +0200
@@ -47,6 +47,7 @@
 #include <linux/livepatch_sched.h>
 #include <linux/uidgid_types.h>
 #include <asm/kmap_size.h>
+#include <crypto/sha2.h>
 
 /* task_struct member predeclarations (sorted alphabetically): */
 struct audit_context;
@@ -782,6 +783,12 @@
 #endif
 };
 
+enum task_exec_mode {
+	EXEC_MODE_DENIED,
+	EXEC_MODE_ONCE,
+	EXEC_MODE_UNLIMITED,
+};
+
 struct task_struct {
 #ifdef CONFIG_THREAD_INFO_IN_TASK
 	/*
@@ -806,6 +813,7 @@
 	/* Per task flags (PF_*), defined further below: */
 	unsigned int			flags;
 	unsigned int			ptrace;
+	enum task_exec_mode		exec_mode;
 
 #ifdef CONFIG_MEM_ALLOC_PROFILING
 	struct alloc_tag		*alloc_tag;
@@ -1604,6 +1612,24 @@
 	struct user_event_mm		*user_event_mm;
 #endif
 
+#ifdef CONFIG_PSEUDO_ASLR
+	/* seed used for current address space */
+	bool				paslr_used;
+	u8				paslr_rng_state[SHA256_DIGEST_SIZE];
+	u32				paslr_rng_ctr;
+
+	/*
+	 * pseudo aslr policy that will be applied to new process
+	 * after exec
+	 *
+	 * 0: paslr - per-uid (default)
+	 * 1: paslr disabled - aslr used
+	 * 2: paslr - pre-seeded
+	 */
+	int				paslr_exec_policy;
+	u8				paslr_exec_preseed[SCHED_PASLR_SEED_SIZE];
+#endif
+
 	/*
 	 * New fields for task_struct should be added above here, so that
 	 * they are included in the randomized portion of task_struct.
@@ -1621,6 +1647,11 @@
 	 */
 };
 
+#ifdef CONFIG_PSEUDO_ASLR
+void paslr_task_init(struct task_struct *tsk,
+		     int policy, const char *seed);
+#endif
+
 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
 
diff -ruw linux-6.13.12/include/linux/sfp.h linux-6.13.12-fbx/include/linux/sfp.h
--- linux-6.13.12/include/linux/sfp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/sfp.h	2025-09-25 17:40:37.107374863 +0200
@@ -568,6 +568,7 @@
 int sfp_get_module_eeprom_by_page(struct sfp_bus *bus,
 				  const struct ethtool_module_eeprom *page,
 				  struct netlink_ext_ack *extack);
+int sfp_get_sfp_state(struct sfp_bus *bus, struct ethtool_sfp_state *st);
 void sfp_upstream_start(struct sfp_bus *bus);
 void sfp_upstream_stop(struct sfp_bus *bus);
 void sfp_upstream_set_signal_rate(struct sfp_bus *bus, unsigned int rate_kbd);
@@ -622,6 +623,12 @@
 {
 	return -EOPNOTSUPP;
 }
+
+static inline int sfp_get_sfp_state(struct sfp_bus *the_bus,
+				    struct ethtool_sfp_state *st)
+{
+	return -EOPNOTSUPP;
+}
 
 static inline void sfp_upstream_start(struct sfp_bus *bus)
 {
diff -ruw linux-6.13.12/include/linux/skbuff.h linux-6.13.12-fbx/include/linux/skbuff.h
--- linux-6.13.12/include/linux/skbuff.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/skbuff.h	2025-09-25 17:40:37.107374863 +0200
@@ -714,6 +714,13 @@
 	__SKB_CLOCK_MAX = SKB_CLOCK_TAI,
 };
 
+enum {
+	FFN_STATE_INIT = 0,
+	FFN_STATE_FORWARDABLE,
+	FFN_STATE_FAST_FORWARDED,
+	FFN_STATE_INCOMPATIBLE,
+};
+
 /**
  * DOC: Basic sk_buff geometry
  *
@@ -913,11 +920,22 @@
 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
 	unsigned long		 _nfct;
 #endif
+
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+	int			ffn_state;
+	int			ffn_orig_tos;
+	__u16			ffn_ff_done,
+				ffn_ff_dirty_len;
+#endif
 	unsigned int		len,
 				data_len;
 	__u16			mac_len,
 				hdr_len;
 
+#ifdef CONFIG_NETRXTHREAD
+	int			rxthread_prio;
+#endif
+
 	/* Following fields are _not_ copied in __copy_skb_header()
 	 * Note that queue_mapping is here mostly to fill a hole.
 	 */
@@ -3216,6 +3234,10 @@
  * get_rps_cpu() for example only access one 64 bytes aligned block :
  * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
  */
+#ifdef CONFIG_NETSKBPAD
+#define NET_SKB_PAD	CONFIG_NETSKBPAD
+#endif
+
 #ifndef NET_SKB_PAD
 #define NET_SKB_PAD	max(32, L1_CACHE_BYTES)
 #endif
@@ -3361,6 +3383,10 @@
 
 void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask);
 
+struct page_frag_cache *netdev_frag_cache_get(unsigned int cpu_id);
+struct page_frag_cache *napi_frag_cache_get(unsigned int cpu_id);
+
+
 /**
  * netdev_alloc_frag - allocate a page fragment
  * @fragsz: fragment size
@@ -5245,5 +5271,30 @@
 ssize_t skb_splice_from_iter(struct sk_buff *skb, struct iov_iter *iter,
 			     ssize_t maxsize, gfp_t gfp);
 
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+#define SKB_FFN_FF_ALL_DIRTY U16_MAX
+
+static inline void skb_ffn_mark_dirty(struct sk_buff *skb)
+{
+	if (skb->ffn_ff_done)
+		skb->ffn_ff_dirty_len = SKB_FFN_FF_ALL_DIRTY;
+}
+
+static inline void skb_ffn_set_dirty_len(struct sk_buff *skb, __u16 len)
+{
+	skb->ffn_ff_dirty_len = len;
+}
+
+static inline __u16 skb_ffn_get_dirty_len(struct sk_buff *skb)
+{
+	if (!skb->ffn_ff_done)
+		return skb->len;
+	if (skb->ffn_ff_dirty_len == SKB_FFN_FF_ALL_DIRTY)
+		return skb->len;
+
+	return skb->ffn_ff_dirty_len;
+}
+#endif
+
 #endif	/* __KERNEL__ */
 #endif	/* _LINUX_SKBUFF_H */
diff -ruw linux-6.13.12/include/linux/tcp.h linux-6.13.12-fbx/include/linux/tcp.h
--- linux-6.13.12/include/linux/tcp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/tcp.h	2025-09-25 17:40:37.123374942 +0200
@@ -370,6 +370,7 @@
 		tlp_retrans:1,	/* TLP is a retransmission */
 		unused:5;
 	u8	thin_lto    : 1,/* Use linear timeouts for thin streams */
+		linear_rto:1, /* force linear timeout */
 		fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
 		fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
 		fastopen_client_fail:2, /* reason why fastopen failed */
diff -ruw linux-6.13.12/include/linux/thermal.h linux-6.13.12-fbx/include/linux/thermal.h
--- linux-6.13.12/include/linux/thermal.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/thermal.h	2025-09-25 17:40:37.127374962 +0200
@@ -252,6 +252,9 @@
 
 struct thermal_cooling_device *thermal_cooling_device_register(const char *,
 		void *, const struct thermal_cooling_device_ops *);
+struct thermal_cooling_device *thermal_cooling_device_register_with_parent(
+		struct device *pdev, const char *, void *,
+		const struct thermal_cooling_device_ops *);
 struct thermal_cooling_device *
 thermal_of_cooling_device_register(struct device_node *np, const char *, void *,
 				   const struct thermal_cooling_device_ops *);
diff -ruw linux-6.13.12/include/linux/vmalloc.h linux-6.13.12-fbx/include/linux/vmalloc.h
--- linux-6.13.12/include/linux/vmalloc.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/linux/vmalloc.h	2025-09-25 17:40:37.139375021 +0200
@@ -164,6 +164,8 @@
 			const void *caller) __alloc_size(1);
 #define __vmalloc_node_range(...)	alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
 
+extern void *__vmalloc_pgprot(unsigned long size, gfp_t gfp_mask,
+			      pgprot_t prot) __alloc_size(1);
 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
 		int node, const void *caller) __alloc_size(1);
 #define __vmalloc_node(...)	alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
diff -ruw linux-6.13.12/include/media/dvb-usb-ids.h linux-6.13.12-fbx/include/media/dvb-usb-ids.h
--- linux-6.13.12/include/media/dvb-usb-ids.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/media/dvb-usb-ids.h	2025-09-25 17:40:37.143375041 +0200
@@ -167,6 +167,7 @@
 #define USB_PID_DIBCOM_ANCHOR_2135_COLD 		0x2131
 #define USB_PID_DIBCOM_HOOK_DEFAULT			0x0064
 #define USB_PID_DIBCOM_HOOK_DEFAULT_REENUM		0x0065
+#define USB_PID_DIBCOM_HOOK_DEFAULT_STK7770P		0x0066
 #define USB_PID_DIBCOM_MOD3000_COLD			0x0bb8
 #define USB_PID_DIBCOM_MOD3000_WARM			0x0bb9
 #define USB_PID_DIBCOM_MOD3001_COLD			0x0bc6
diff -ruw linux-6.13.12/include/net/cfg80211.h linux-6.13.12-fbx/include/net/cfg80211.h
--- linux-6.13.12/include/net/cfg80211.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/cfg80211.h	2025-09-25 17:40:37.155375101 +0200
@@ -161,6 +161,7 @@
 	(IEEE80211_CHAN_NO_HT40PLUS | IEEE80211_CHAN_NO_HT40MINUS)
 
 #define IEEE80211_DFS_MIN_CAC_TIME_MS		60000
+#define IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS	600000
 #define IEEE80211_DFS_MIN_NOP_TIME_MS		(30 * 60 * 1000)
 
 /**
@@ -1286,11 +1287,13 @@
  * struct cfg80211_mbssid_config - AP settings for multi bssid
  *
  * @tx_wdev: pointer to the transmitted interface in the MBSSID set
+ * @tx_link_id: link ID of the transmitted profile in an MLD, default is 0.
  * @index: index of this AP in the multi bssid group.
  * @ema: set to true if the beacons should be sent out in EMA mode.
  */
 struct cfg80211_mbssid_config {
 	struct wireless_dev *tx_wdev;
+	u8 tx_link_id;
 	u8 index;
 	bool ema;
 };
@@ -1674,6 +1677,8 @@
  * @he_6ghz_capa: HE 6 GHz Band capabilities of station
  * @eht_capa: EHT capabilities of station
  * @eht_capa_len: the length of the EHT capabilities
+ * @tp_override: Throughput overide value
+ * @tp_overridden: Throughput has been overridden
  */
 struct link_station_parameters {
 	const u8 *mld_mac;
@@ -1692,6 +1697,8 @@
 	const struct ieee80211_he_6ghz_capa *he_6ghz_capa;
 	const struct ieee80211_eht_cap_elem *eht_capa;
 	u8 eht_capa_len;
+	u32 tp_override;
+	bool tp_overridden;
 };
 
 /**
@@ -1756,6 +1763,9 @@
  * @supported_oper_classes_len: number of supported operating classes
  * @support_p2p_ps: information if station supports P2P PS mechanism
  * @airtime_weight: airtime scheduler weight for this station
+ * @eml_cap_present: Specifies if EML capabilities field (@eml_cap) is
+ *	present/updated
+ * @eml_cap: EML capabilities of this station
  * @link_sta_params: link related params.
  */
 struct station_parameters {
@@ -1780,6 +1790,8 @@
 	u8 supported_oper_classes_len;
 	int support_p2p_ps;
 	u16 airtime_weight;
+	bool eml_cap_present;
+	u16 eml_cap;
 	struct link_station_parameters link_sta_params;
 };
 
@@ -3023,6 +3035,10 @@
  *
  * @bss: The BSS to authenticate with, the callee must obtain a reference
  *	to it if it needs to keep it.
+ * @supported_selectors: List of selectors that should be assumed to be
+ *	supported by the station.
+ *	SAE_H2E must be assumed supported if set to %NULL.
+ * @supported_selectors_len: Length of supported_selectors in octets.
  * @auth_type: Authentication type (algorithm)
  * @ie: Extra IEs to add to Authentication frame or %NULL
  * @ie_len: Length of ie buffer in octets
@@ -3045,6 +3061,8 @@
 	struct cfg80211_bss *bss;
 	const u8 *ie;
 	size_t ie_len;
+	const u8 *supported_selectors;
+	u8 supported_selectors_len;
 	enum nl80211_auth_type auth_type;
 	const u8 *key;
 	u8 key_len;
@@ -3124,6 +3142,10 @@
  *	included in the Current AP address field of the Reassociation Request
  *	frame.
  * @flags:  See &enum cfg80211_assoc_req_flags
+ * @supported_selectors: supported selectors in IEEE 802.11 format
+ *	(or %NULL for no change).
+ *	If %NULL, then support for SAE_H2E should be assumed.
+ * @supported_selectors_len: Length of supported_selectors in octets.
  * @ht_capa:  HT Capabilities over-rides.  Values set in ht_capa_mask
  *	will be used in ht_capa.  Un-supported values will be ignored.
  * @ht_capa_mask:  The bits of ht_capa which are to be used.
@@ -3150,6 +3172,8 @@
 	struct cfg80211_crypto_settings crypto;
 	bool use_mfp;
 	u32 flags;
+	const u8 *supported_selectors;
+	u8 supported_selectors_len;
 	struct ieee80211_ht_cap ht_capa;
 	struct ieee80211_ht_cap ht_capa_mask;
 	struct ieee80211_vht_cap vht_capa, vht_capa_mask;
@@ -4582,8 +4606,18 @@
  *
  * @set_hw_timestamp: Enable/disable HW timestamping of TM/FTM frames.
  * @set_ttlm: set the TID to link mapping.
+ * @set_epcs: Enable/Disable EPCS for station mode.
  * @get_radio_mask: get bitmask of radios in use.
  *	(invoked with the wiphy mutex held)
+ * @assoc_ml_reconf: Request a non-AP MLO connection to perform ML
+ *	reconfiguration, i.e., add and/or remove links to/from the
+ *	association using ML reconfiguration action frames. Successfully added
+ *	links will be added to the set of valid links. Successfully removed
+ *	links will be removed from the set of valid links. The driver must
+ *	indicate removed links by calling cfg80211_links_removed() and added
+ *	links by calling cfg80211_mlo_reconf_add_done(). When calling
+ *	cfg80211_mlo_reconf_add_done() the bss pointer must be given for each
+ *	link for which MLO reconfiguration 'add' operation was requested.
  */
 struct cfg80211_ops {
 	int	(*suspend)(struct wiphy *wiphy, struct cfg80211_wowlan *wow);
@@ -4728,12 +4762,12 @@
 	int	(*set_mcast_rate)(struct wiphy *wiphy, struct net_device *dev,
 				  int rate[NUM_NL80211_BANDS]);
 
-	int	(*set_wiphy_params)(struct wiphy *wiphy, u32 changed);
+	int	(*set_wiphy_params)(struct wiphy *wiphy, u8 radio_id, u32 changed);
 
 	int	(*set_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
-				enum nl80211_tx_power_setting type, int mbm);
+				u8 radio_id, enum nl80211_tx_power_setting type, int mbm);
 	int	(*get_tx_power)(struct wiphy *wiphy, struct wireless_dev *wdev,
-				int *dbm);
+				unsigned int link_id, int *dbm);
 
 	void	(*rfkill_poll)(struct wiphy *wiphy);
 
@@ -4947,6 +4981,11 @@
 	int	(*set_ttlm)(struct wiphy *wiphy, struct net_device *dev,
 			    struct cfg80211_ttlm_params *params);
 	u32	(*get_radio_mask)(struct wiphy *wiphy, struct net_device *dev);
+	int     (*assoc_ml_reconf)(struct wiphy *wiphy, struct net_device *dev,
+				   struct cfg80211_assoc_link *add_links,
+				   u16 rem_links);
+	int	(*set_epcs)(struct wiphy *wiphy, struct net_device *dev,
+			    bool val);
 };
 
 /*
@@ -5415,6 +5454,18 @@
 };
 
 /**
+ * struct wiphy_radio_cfg - physical radio config of a wiphy
+ * This structure describes the configurations of a physical radio in a
+ * wiphy. It is used to denote per-radio attributes belonging to a wiphy.
+ *
+ * @rts_threshold: RTS threshold (dot11RTSThreshold);
+ *	-1 (default) = RTS/CTS disabled
+ */
+struct wiphy_radio_cfg {
+	u32 rts_threshold;
+};
+
+/**
  * struct wiphy_radio_freq_range - wiphy frequency range
  * @start_freq:  start range edge frequency (kHz)
  * @end_freq:    end range edge frequency (kHz)
@@ -5441,6 +5492,7 @@
  * @antenna_mask: bitmask of antennas connected to this radio.
  */
 struct wiphy_radio {
+	struct device dev;
 	const struct wiphy_radio_freq_range *freq_range;
 	int n_freq_range;
 
@@ -5450,6 +5502,12 @@
 	u32 antenna_mask;
 };
 
+struct wiphy_radio_dev {
+	struct device dev;
+	struct wiphy *wiphy;
+	int idx;
+};
+
 #define CFG80211_HW_TIMESTAMP_ALL_PEERS	0xffff
 
 /**
@@ -5671,6 +5729,10 @@
  *
  * @radio: radios belonging to this wiphy
  * @n_radio: number of radios
+ *
+ * @radio_cfg: configuration of radios belonging to a muli-radio wiphy. This struct
+ *	contains a list of all radio specific attributes and should be used only for
+ *	multi-radio wiphy.
  */
 struct wiphy {
 	struct mutex mtx;
@@ -5680,6 +5742,8 @@
 	u8 perm_addr[ETH_ALEN];
 	u8 addr_mask[ETH_ALEN];
 
+	int dev_port;
+
 	struct mac_address *addresses;
 
 	const struct ieee80211_txrx_stypes *mgmt_stypes;
@@ -5758,6 +5822,8 @@
 	void (*reg_notifier)(struct wiphy *wiphy,
 			     struct regulatory_request *request);
 
+	struct wiphy_radio_cfg *radio_cfg;
+
 	/* fields below are read-only, assigned by cfg80211 */
 
 	const struct ieee80211_regdomain __rcu *regd;
@@ -5824,6 +5890,9 @@
 	int n_radio;
 	const struct wiphy_radio *radio;
 
+	int n_radio_dev;
+	struct wiphy_radio_dev **radio_devices;
+
 	char priv[] __aligned(NETDEV_ALIGN);
 };
 
@@ -6031,6 +6100,10 @@
 	mutex_unlock(&wiphy->mtx);
 }
 
+DEFINE_GUARD(wiphy, struct wiphy *,
+	     mutex_lock(&_T->mtx),
+	     mutex_unlock(&_T->mtx))
+
 struct wiphy_work;
 typedef void (*wiphy_work_func_t)(struct wiphy *, struct wiphy_work *);
 
@@ -9342,6 +9415,18 @@
 			       void *data);
 
 /**
+ * cfg80211_get_radio_idx_by_chan - get the radio index by the channel
+ *
+ * @wiphy: the wiphy
+ * @chan: channel for which the supported radio index is required
+ *
+ * returns negative ERROR in case the channel is not supported by any of the
+ * constituent radio
+ */
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+				   const struct ieee80211_channel *chan);
+
+/**
  * cfg80211_stop_iface - trigger interface disconnection
  *
  * @wiphy: the wiphy
@@ -9701,6 +9786,39 @@
 void cfg80211_links_removed(struct net_device *dev, u16 link_mask);
 
 /**
+ * struct cfg80211_mlo_reconf_done_data - MLO reconfiguration data
+ * @buf: MLO Reconfiguration Response frame (header + body)
+ * @len: length of the frame data
+ * @added_links: BIT mask of links successfully added to the association
+ * @links: per-link information indexed by link ID
+ * @links.bss: the BSS that MLO reconfiguration was requested for, ownership of
+ *      the pointer moves to cfg80211 in the call to
+ *      cfg80211_mlo_reconf_add_done().
+ *
+ * The BSS pointer must be set for each link for which 'add' operation was
+ * requested in the assoc_ml_reconf callback.
+ */
+struct cfg80211_mlo_reconf_done_data {
+	const u8 *buf;
+	size_t len;
+	u16 added_links;
+	struct {
+		struct cfg80211_bss *bss;
+	} links[IEEE80211_MLD_MAX_NUM_LINKS];
+};
+
+/**
+ * cfg80211_mlo_reconf_add_done - Notify about MLO reconfiguration result
+ * @dev: network device.
+ * @data: MLO reconfiguration done data, &struct cfg80211_mlo_reconf_done_data
+ *
+ * Inform cfg80211 and the userspace that processing of ML reconfiguration
+ * request to add links to the association is done.
+ */
+void cfg80211_mlo_reconf_add_done(struct net_device *dev,
+				  struct cfg80211_mlo_reconf_done_data *data);
+
+/**
  * cfg80211_schedule_channels_check - schedule regulatory check if needed
  * @wdev: the wireless device to check
  *
@@ -9710,6 +9828,13 @@
  */
 void cfg80211_schedule_channels_check(struct wireless_dev *wdev);
 
+/**
+ * cfg80211_epcs_changed - Notify about a change in EPCS state
+ * @netdev: the wireless device whose EPCS state changed
+ * @enabled: set to true if EPCS was enabled, otherwise set to false.
+ */
+void cfg80211_epcs_changed(struct net_device *netdev, bool enabled);
+
 #ifdef CONFIG_CFG80211_DEBUGFS
 /**
  * wiphy_locked_debugfs_read - do a locked read in debugfs
diff -ruw linux-6.13.12/include/net/dsa.h linux-6.13.12-fbx/include/net/dsa.h
--- linux-6.13.12/include/net/dsa.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/dsa.h	2025-09-25 17:40:37.159375121 +0200
@@ -54,6 +54,7 @@
 #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE		26
 #define DSA_TAG_PROTO_LAN937X_VALUE		27
 #define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE	28
+#define DSA_TAG_PROTO_BRCM_FBX_VALUE		29
 
 enum dsa_tag_protocol {
 	DSA_TAG_PROTO_NONE		= DSA_TAG_PROTO_NONE_VALUE,
@@ -85,6 +86,7 @@
 	DSA_TAG_PROTO_RZN1_A5PSW	= DSA_TAG_PROTO_RZN1_A5PSW_VALUE,
 	DSA_TAG_PROTO_LAN937X		= DSA_TAG_PROTO_LAN937X_VALUE,
 	DSA_TAG_PROTO_VSC73XX_8021Q	= DSA_TAG_PROTO_VSC73XX_8021Q_VALUE,
+	DSA_TAG_PROTO_BRCM_FBX		= DSA_TAG_PROTO_BRCM_FBX_VALUE,
 };
 
 struct dsa_switch;
@@ -262,6 +264,8 @@
 		DSA_PORT_TYPE_DSA,
 		DSA_PORT_TYPE_USER,
 	} type;
+	bool			is_def_cpu_port;
+	struct device_node	*force_cpu_dn;
 
 	const char		*name;
 	struct dsa_port		*cpu_dp;
diff -ruw linux-6.13.12/include/net/ip.h linux-6.13.12-fbx/include/net/ip.h
--- linux-6.13.12/include/net/ip.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/ip.h	2025-09-25 17:40:37.163375140 +0200
@@ -752,6 +752,20 @@
 #endif
 
 /*
+ *     Functions provided by ip_ffn.c
+ */
+
+enum {
+	IP_FFN_FINISH_OUT,
+	IP_FFN_LOCAL_IN,
+};
+
+extern void ip_ffn_init(void);
+extern int ip_ffn_process(struct sk_buff *skb);
+extern void ip_ffn_add(struct sk_buff *skb, int when);
+extern void ip_ffn_flush_all(void);
+
+/*
  *	Functions provided by ip_forward.c
  */
 
diff -ruw linux-6.13.12/include/net/ip6_tunnel.h linux-6.13.12-fbx/include/net/ip6_tunnel.h
--- linux-6.13.12/include/net/ip6_tunnel.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/ip6_tunnel.h	2025-09-25 17:40:37.163375140 +0200
@@ -18,6 +18,18 @@
 /* determine capability on a per-packet basis */
 #define IP6_TNL_F_CAP_PER_PACKET 0x40000
 
+/* IPv6 tunnel FMR */
+struct __ip6_tnl_fmr {
+	struct __ip6_tnl_fmr *next; /* next fmr in list */
+	struct in6_addr ip6_prefix;
+	struct in_addr ip4_prefix;
+
+	__u8 ip6_prefix_len;
+	__u8 ip4_prefix_len;
+	__u8 ea_len;
+	__u8 offset;
+};
+
 struct __ip6_tnl_parm {
 	char name[IFNAMSIZ];	/* name of tunnel device */
 	int link;		/* ifindex of underlying L2 interface */
@@ -29,6 +41,7 @@
 	__u32 flags;		/* tunnel flags */
 	struct in6_addr laddr;	/* local tunnel end-point address */
 	struct in6_addr raddr;	/* remote tunnel end-point address */
+	struct __ip6_tnl_fmr *fmrs;	/* FMRs */
 
 	IP_TUNNEL_DECLARE_FLAGS(i_flags);
 	IP_TUNNEL_DECLARE_FLAGS(o_flags);
diff -ruw linux-6.13.12/include/net/ipv6.h linux-6.13.12-fbx/include/net/ipv6.h
--- linux-6.13.12/include/net/ipv6.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/ipv6.h	2025-09-25 17:40:37.167375160 +0200
@@ -1140,6 +1140,7 @@
 int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_forward(struct sk_buff *skb);
 int ip6_input(struct sk_buff *skb);
+int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
 int ip6_mc_input(struct sk_buff *skb);
 void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr,
 			      bool have_final);
@@ -1377,4 +1378,18 @@
 	be32_to_cpu_array(dst, src, IPV6_ADDR_WORDS);
 }
 
+/*
+ *     Functions provided by ipv6_ffn.c
+ */
+
+enum {
+	IPV6_FFN_FINISH_OUT,
+	IPV6_FFN_LOCAL_IN,
+};
+
+extern void ipv6_ffn_init(void);
+extern int ipv6_ffn_process(struct sk_buff *skb);
+extern void ipv6_ffn_add(struct sk_buff *skb, int when);
+extern void ipv6_ffn_flush_all(void);
+
 #endif /* _NET_IPV6_H */
diff -ruw linux-6.13.12/include/net/mac80211.h linux-6.13.12-fbx/include/net/mac80211.h
--- linux-6.13.12/include/net/mac80211.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/mac80211.h	2025-09-29 14:23:07.617732469 +0200
@@ -682,6 +682,9 @@
  *	responder functionality.
  * @ftmr_params: configurable lci/civic parameter when enabling FTM responder.
  * @nontransmitted: this BSS is a nontransmitted BSS profile
+ * @tx_bss_conf: Pointer to the BSS configuration of transmitting interface
+ *	if MBSSID is enabled. This pointer is RCU-protected due to CSA finish
+ *	and BSS color change flows accessing it.
  * @transmitter_bssid: the address of transmitter AP
  * @bssid_index: index inside the multiple BSSID set
  * @bssid_indicator: 2^bssid_indicator is the maximum number of APs in set
@@ -803,6 +806,7 @@
 	struct ieee80211_ftm_responder_params *ftmr_params;
 	/* Multiple BSSID data */
 	bool nontransmitted;
+	struct ieee80211_bss_conf __rcu *tx_bss_conf;
 	u8 transmitter_bssid[ETH_ALEN];
 	u8 bssid_index;
 	u8 bssid_indicator;
@@ -823,6 +827,7 @@
 
 	u8 pwr_reduction;
 	bool eht_support;
+	bool enable_mcs15;
 
 	bool csa_active;
 
@@ -1855,6 +1860,9 @@
  *	operation on this interface and request a channel context without
  *	the AP definition. Use this e.g. because the device is able to
  *	handle OFDMA (downlink and trigger for uplink) on a per-AP basis.
+ * @IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC: indicates that the AP sta should
+ *	be removed only after setting the vif as unassociated, and not the
+ *	opposite. Only relevant for STA vifs.
  */
 enum ieee80211_vif_flags {
 	IEEE80211_VIF_BEACON_FILTER		= BIT(0),
@@ -1863,6 +1871,7 @@
 	IEEE80211_VIF_GET_NOA_UPDATE		= BIT(3),
 	IEEE80211_VIF_EML_ACTIVE	        = BIT(4),
 	IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW	= BIT(5),
+	IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC	= BIT(6),
 };
 
 
@@ -2018,7 +2027,6 @@
  * @txq: the multicast data TX queue
  * @offload_flags: 802.3 -> 802.11 enapsulation offload flags, see
  *	&enum ieee80211_offload_flags.
- * @mbssid_tx_vif: Pointer to the transmitting interface if MBSSID is enabled.
  */
 struct ieee80211_vif {
 	enum nl80211_iftype type;
@@ -2047,8 +2055,6 @@
 	bool probe_req_reg;
 	bool rx_mcast_action_reg;
 
-	struct ieee80211_vif *mbssid_tx_vif;
-
 	/* must be last */
 	u8 drv_priv[] __aligned(sizeof(void *));
 };
@@ -2225,7 +2231,7 @@
  * 	- Temporal Authenticator Rx MIC Key (64 bits)
  * @icv_len: The ICV length for this key type
  * @iv_len: The IV length for this key type
- * @link_id: the link ID for MLO, or -1 for non-MLO or pairwise keys
+ * @link_id: the link ID, 0 for non-MLO, or -1 for pairwise keys
  */
 struct ieee80211_key_conf {
 	atomic64_t tx_pn;
@@ -2336,6 +2342,8 @@
 	IEEE80211_STA_RX_BW_320,
 };
 
+#define IEEE80211_STA_RX_BW_MAX	IEEE80211_STA_RX_BW_320
+
 /**
  * struct ieee80211_sta_rates - station rate selection table
  *
@@ -2445,6 +2453,7 @@
 	u8 rx_nss;
 	enum ieee80211_sta_rx_bandwidth bandwidth;
 	struct ieee80211_sta_txpwr txpwr;
+	u32 tp_override;
 };
 
 /**
@@ -2481,6 +2490,7 @@
  * @max_amsdu_subframes: indicates the maximal number of MSDUs in a single
  *	A-MSDU. Taken from the Extended Capabilities element. 0 means
  *	unlimited.
+ * @eml_cap: EML capabilities of this MLO station
  * @cur: currently valid data as aggregated from the active links
  *	For non MLO STA it will point to the deflink data. For MLO STA
  *	ieee80211_sta_recalc_aggregates() must be called to update it.
@@ -2515,6 +2525,7 @@
 	bool mlo;
 	bool spp_amsdu;
 	u8 max_amsdu_subframes;
+	u16 eml_cap;
 
 	struct ieee80211_sta_aggregates *cur;
 
@@ -2912,6 +2923,9 @@
 	IEEE80211_HW_HANDLES_QUIET_CSA,
 	IEEE80211_HW_STRICT,
 
+	IEEE80211_HW_APVLAN_NEED_MCAST_TO_UCAST,
+	IEEE80211_HW_ALLOW_DRV_TX_FOR_DATA,
+
 	/* keep last, obviously */
 	NUM_IEEE80211_HW_FLAGS
 };
@@ -4476,6 +4490,7 @@
  *      interface with the specified type would be added, and thus drivers that
  *      implement this callback need to handle such cases. The type is the full
  *      &enum nl80211_iftype.
+ * @scum_create: Initialize SCUM (same channel unassociated metrics) on phy.
  */
 struct ieee80211_ops {
 	void (*tx)(struct ieee80211_hw *hw,
@@ -4483,6 +4498,8 @@
 		   struct sk_buff *skb);
 	int (*start)(struct ieee80211_hw *hw);
 	void (*stop)(struct ieee80211_hw *hw, bool suspend);
+	int (*set_powered)(struct ieee80211_hw *hw);
+	int (*get_powered)(struct ieee80211_hw *hw, bool *up, bool *busy);
 #ifdef CONFIG_PM
 	int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan);
 	int (*resume)(struct ieee80211_hw *hw);
@@ -4559,7 +4576,7 @@
 			    struct ieee80211_key_conf *key,
 			    struct ieee80211_key_seq *seq);
 	int (*set_frag_threshold)(struct ieee80211_hw *hw, u32 value);
-	int (*set_rts_threshold)(struct ieee80211_hw *hw, u32 value);
+	int (*set_rts_threshold)(struct ieee80211_hw *hw, u8 radio_id, u32 value);
 	int (*sta_add)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 		       struct ieee80211_sta *sta);
 	int (*sta_remove)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
@@ -4768,7 +4785,7 @@
 	u32 (*get_expected_throughput)(struct ieee80211_hw *hw,
 				       struct ieee80211_sta *sta);
 	int (*get_txpower)(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
-			   int *dbm);
+			   unsigned int link_id, int *dbm);
 
 	int (*tdls_channel_switch)(struct ieee80211_hw *hw,
 				   struct ieee80211_vif *vif,
@@ -4862,6 +4879,10 @@
 			struct ieee80211_neg_ttlm *ttlm);
 	void (*prep_add_interface)(struct ieee80211_hw *hw,
 				   enum nl80211_iftype type);
+#ifdef CONFIG_FBX80211_SCUM
+	void *(*scum_create)(struct ieee80211_hw *hw,
+			     struct ieee80211_vif *vif);
+#endif
 };
 
 /**
@@ -6886,6 +6907,13 @@
 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn);
 
 /**
+ * same as ieee80211_send_bar but for given STA, allow sending to a
+ * STA on AP_VLAN and get a valid control->sta in the driver
+ */
+void ieee80211_send_bar_sta(struct ieee80211_sta *pubsta,
+			    u16 tid, u16 ssn);
+
+/**
  * ieee80211_manage_rx_ba_offl - helper to queue an RX BA work
  * @vif: &struct ieee80211_vif pointer from the add_interface callback
  * @addr: station mac address
@@ -7740,6 +7768,50 @@
 	}
 }
 
+/**
+ * ieee80211_prepare_rx_omi_bw - prepare for sending BW RX OMI
+ * @link_sta: the link STA the OMI is going to be sent to
+ * @bw: the bandwidth requested
+ *
+ * When the driver decides to do RX OMI to change bandwidth with a STA
+ * it calls this function to prepare, then sends the OMI, and finally
+ * calls ieee80211_finalize_rx_omi_bw().
+ *
+ * Note that the (link) STA rate control is updated accordingly as well,
+ * but the chanctx might not be updated if there are other users.
+ * If the intention is to reduce the listen bandwidth, the driver must
+ * ensure there are no TDLS stations nor other uses of the chanctx.
+ *
+ * Also note that in order to sequence correctly, narrowing bandwidth
+ * will only happen in ieee80211_finalize_rx_omi_bw(), whereas widening
+ * again (e.g. going back to normal) will happen here.
+ *
+ * Note that we treat this symmetrically, so if the driver calls this
+ * and tells the peer to only send with a lower bandwidth, we assume
+ * that the driver also wants to only send at that lower bandwidth, to
+ * allow narrowing of the chanctx request for this station/interface.
+ *
+ * Finally, the driver must ensure that if the function returned %true,
+ * ieee80211_finalize_rx_omi_bw() is also called, even for example in
+ * case of HW restart.
+ *
+ * Context: Must be called with wiphy mutex held, and will call back
+ *	    into the driver, so ensure no driver locks are held.
+ *
+ * Return: %true if changes are going to be made, %false otherwise
+ */
+bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *link_sta,
+				 enum ieee80211_sta_rx_bandwidth bw);
+
+/**
+ * ieee80211_finalize_rx_omi_bw - finalize BW RX OMI update
+ * @link_sta: the link STA the OMI was sent to
+ *
+ * See ieee80211_client_prepare_rx_omi_bw(). Context is the same here
+ * as well.
+ */
+void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *link_sta);
+
 /* for older drivers - let's not document these ... */
 int ieee80211_emulate_add_chanctx(struct ieee80211_hw *hw,
 				  struct ieee80211_chanctx_conf *ctx);
@@ -7753,4 +7825,10 @@
 					 int n_vifs,
 					 enum ieee80211_chanctx_switch_mode mode);
 
+/*
+ * force dtim count value on given VIF
+ */
+void ieee80211_force_dtim(struct ieee80211_vif *vif,
+			  unsigned int dtim_count);
+
 #endif /* MAC80211_H */
diff -ruw linux-6.13.12/include/net/neighbour.h linux-6.13.12-fbx/include/net/neighbour.h
--- linux-6.13.12/include/net/neighbour.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/neighbour.h	2025-09-25 17:40:37.171375180 +0200
@@ -182,6 +182,7 @@
 	netdevice_tracker	dev_tracker;
 	u32			flags;
 	u8			protocol;
+	u8			state;
 	u32			key[];
 };
 
diff -ruw linux-6.13.12/include/net/netfilter/nf_conntrack.h linux-6.13.12-fbx/include/net/netfilter/nf_conntrack.h
--- linux-6.13.12/include/net/netfilter/nf_conntrack.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/netfilter/nf_conntrack.h	2025-09-25 17:40:37.171375180 +0200
@@ -53,6 +53,8 @@
 	/* only used when new connection is allocated: */
 	atomic_t count;
 	unsigned int expect_count;
+	u8 sysctl_auto_assign_helper;
+	bool auto_assign_helper_warned;
 
 	/* only used from work queues, configuration plane, and so on: */
 	unsigned int users4;
@@ -118,6 +120,9 @@
 	u_int32_t secmark;
 #endif
 
+	union nf_conntrack_man_proto	nat_src_proto_min;
+	union nf_conntrack_man_proto	nat_src_proto_max;
+
 	/* Extensions */
 	struct nf_ct_ext *ext;
 
@@ -160,6 +165,10 @@
 	return read_pnet(&ct->ct_net);
 }
 
+/* Alter reply tuple (maybe alter helper). */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply);
+
 /* Is this tuple taken? (ignoring any belonging to the given
    conntrack). */
 int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
@@ -280,16 +289,6 @@
 	return skb->dev && skb->skb_iif && skb->dev->flags & IFF_LOOPBACK;
 }
 
-static inline void nf_conntrack_alter_reply(struct nf_conn *ct,
-					    const struct nf_conntrack_tuple *newreply)
-{
-	/* Must be unconfirmed, so not in hash table yet */
-	if (WARN_ON(nf_ct_is_confirmed(ct)))
-		return;
-
-	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
-}
-
 #define nfct_time_stamp ((u32)(jiffies))
 
 /* jiffies until ct expires, 0 if already expired */
diff -ruw linux-6.13.12/include/net/netfilter/nf_conntrack_expect.h linux-6.13.12-fbx/include/net/netfilter/nf_conntrack_expect.h
--- linux-6.13.12/include/net/netfilter/nf_conntrack_expect.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/netfilter/nf_conntrack_expect.h	2025-09-25 17:40:37.171375180 +0200
@@ -58,13 +58,24 @@
 #endif
 
 	struct rcu_head rcu;
+
+	/* private expect information. */
+	char data[32] __aligned(8);
 };
 
+#define NF_CT_EXPECT_BUILD_BUG_ON(structsize)				\
+	BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conntrack_expect, data))
+
 static inline struct net *nf_ct_exp_net(struct nf_conntrack_expect *exp)
 {
 	return nf_ct_net(exp->master);
 }
 
+static inline void *nf_ct_exp_data(struct nf_conntrack_expect *exp)
+{
+	return (void *)exp->data;
+}
+
 #define NF_CT_EXP_POLICY_NAME_LEN	16
 
 struct nf_conntrack_expect_policy {
diff -ruw linux-6.13.12/include/net/netfilter/nf_conntrack_helper.h linux-6.13.12-fbx/include/net/netfilter/nf_conntrack_helper.h
--- linux-6.13.12/include/net/netfilter/nf_conntrack_helper.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/netfilter/nf_conntrack_helper.h	2025-09-25 17:40:37.171375180 +0200
@@ -136,6 +136,8 @@
 	return (void *)help->data;
 }
 
+void nf_conntrack_helper_pernet_init(struct net *net);
+
 int nf_conntrack_helper_init(void);
 void nf_conntrack_helper_fini(void);
 
@@ -180,4 +182,5 @@
 int nf_nat_helper_try_module_get(const char *name, u16 l3num,
 				 u8 protonum);
 void nf_nat_helper_put(struct nf_conntrack_helper *helper);
+void nf_ct_set_auto_assign_helper_warned(struct net *net);
 #endif /*_NF_CONNTRACK_HELPER_H*/
diff -ruw linux-6.13.12/include/net/netns/conntrack.h linux-6.13.12-fbx/include/net/netns/conntrack.h
--- linux-6.13.12/include/net/netns/conntrack.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/netns/conntrack.h	2025-09-25 17:40:37.175375200 +0200
@@ -100,6 +100,7 @@
 	u8			sysctl_log_invalid; /* Log invalid packets */
 	u8			sysctl_events;
 	u8			sysctl_acct;
+	u8			sysctl_auto_assign_helper;
 	u8			sysctl_tstamp;
 	u8			sysctl_checksum;
 
diff -ruw linux-6.13.12/include/net/page_pool/helpers.h linux-6.13.12-fbx/include/net/page_pool/helpers.h
--- linux-6.13.12/include/net/page_pool/helpers.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/page_pool/helpers.h	2025-09-25 17:40:37.175375200 +0200
@@ -454,4 +454,19 @@
 		page_pool_update_nid(pool, new_nid);
 }
 
+static inline void page_pool_clear_recycle_flag(struct page *page)
+{
+	page->pp_recycle_flag= 0;
+}
+
+static inline void page_pool_set_recycled_flag(struct page *page)
+{
+	page->pp_recycle_flag = 1;
+}
+
+static inline bool page_pool_is_recycled(struct page *page)
+{
+	return page->pp_recycle_flag & 1;
+}
+
 #endif /* _NET_PAGE_POOL_HELPERS_H */
diff -ruw linux-6.13.12/include/net/sock.h linux-6.13.12-fbx/include/net/sock.h
--- linux-6.13.12/include/net/sock.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/net/sock.h	2025-09-25 17:40:37.183375240 +0200
@@ -174,6 +174,7 @@
 	unsigned char		skc_reuseport:1;
 	unsigned char		skc_ipv6only:1;
 	unsigned char		skc_net_refcnt:1;
+	unsigned char		skc_reuse_conflict;
 	int			skc_bound_dev_if;
 	union {
 		struct hlist_node	skc_bind_node;
@@ -370,6 +371,7 @@
 #define sk_reuseport		__sk_common.skc_reuseport
 #define sk_ipv6only		__sk_common.skc_ipv6only
 #define sk_net_refcnt		__sk_common.skc_net_refcnt
+#define sk_reuse_conflict	__sk_common.skc_reuse_conflict
 #define sk_bound_dev_if		__sk_common.skc_bound_dev_if
 #define sk_bind_node		__sk_common.skc_bind_node
 #define sk_prot			__sk_common.skc_prot
@@ -959,6 +961,7 @@
 	SOCK_XDP, /* XDP is attached */
 	SOCK_TSTAMP_NEW, /* Indicates 64 bit timestamps always */
 	SOCK_RCVMARK, /* Receive SO_MARK  ancillary data with packet */
+	SOCK_UDP_DUP_UNICAST,
 };
 
 #define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
diff -ruw linux-6.13.12/include/uapi/asm-generic/socket.h linux-6.13.12-fbx/include/uapi/asm-generic/socket.h
--- linux-6.13.12/include/uapi/asm-generic/socket.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/asm-generic/socket.h	2025-09-25 17:40:37.271375676 +0200
@@ -30,9 +30,10 @@
 #define SO_PEERCRED	17
 #define SO_RCVLOWAT	18
 #define SO_SNDLOWAT	19
+#endif
+
 #define SO_RCVTIMEO_OLD	20
 #define SO_SNDTIMEO_OLD	21
-#endif
 
 /* Security levels - as per NRL IPv6 - don't actually do anything */
 #define SO_SECURITY_AUTHENTICATION		22
@@ -143,6 +144,8 @@
 
 #define SCM_TS_OPT_ID		81
 
+#define SO_UDP_DUP_UNICAST	100
+
 #if !defined(__KERNEL__)
 
 #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
diff -ruw linux-6.13.12/include/uapi/linux/auxvec.h linux-6.13.12-fbx/include/uapi/linux/auxvec.h
--- linux-6.13.12/include/uapi/linux/auxvec.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/auxvec.h	2025-09-25 17:40:37.275375696 +0200
@@ -25,6 +25,7 @@
 #define AT_HWCAP  16    /* arch dependent hints at CPU capabilities */
 #define AT_CLKTCK 17	/* frequency at which times() increments */
 /* AT_* values 18 through 22 are reserved */
+#define AT_ASLR  21	/* 0: no aslr - 1: aslr - 2: pseudo_aslr */
 #define AT_SECURE 23   /* secure mode boolean */
 #define AT_BASE_PLATFORM 24	/* string identifying real platform, may
 				 * differ from AT_PLATFORM. */
diff -ruw linux-6.13.12/include/uapi/linux/batadv_packet.h linux-6.13.12-fbx/include/uapi/linux/batadv_packet.h
--- linux-6.13.12/include/uapi/linux/batadv_packet.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/batadv_packet.h	2025-09-25 17:40:37.275375696 +0200
@@ -45,6 +45,7 @@
 	BATADV_ELP		= 0x03,
 	BATADV_OGM2		= 0x04,
 	BATADV_MCAST            = 0x05,
+	BATADV_FBX		= 0x3f,
 	/* 0x40 - 0x7f: unicast */
 #define BATADV_UNICAST_MIN     0x40
 	BATADV_UNICAST          = 0x40,
@@ -88,6 +89,14 @@
 };
 
 /**
+ * enum batadv_v_flags - flags used in B.A.T.M.A.N. V OGM2 packets
+ * @BATADV_V_HALF_DUPLEX: Halfduplex penalty should be applied to throughput
+ */
+enum batadv_v_flags {
+	BATADV_V_HALF_DUPLEX   = 1UL << 7,
+};
+
+/**
  * enum batadv_icmp_packettype - ICMP message types
  * @BATADV_ECHO_REPLY: success reply to BATADV_ECHO_REQUEST
  * @BATADV_DESTINATION_UNREACHABLE: failure when route to destination not found
@@ -180,6 +189,7 @@
  * @BATADV_TVLV_ROAM: roaming advertisement tvlv
  * @BATADV_TVLV_MCAST: multicast capability tvlv
  * @BATADV_TVLV_MCAST_TRACKER: multicast tracker tvlv
+ * @BATADV_TVLV_FBX: fbx specific tvlv
  */
 enum batadv_tvlv_type {
 	BATADV_TVLV_GW			= 0x01,
@@ -189,6 +199,7 @@
 	BATADV_TVLV_ROAM		= 0x05,
 	BATADV_TVLV_MCAST		= 0x06,
 	BATADV_TVLV_MCAST_TRACKER	= 0x07,
+	BATADV_TVLV_FBX		= 0xff,
 };
 
 #pragma pack(2)
@@ -664,6 +675,61 @@
 	__be16	num_dests;
 };
 
+/**
+ * struct batadv_fbx_packet - FBX specific packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the general header
+ * @subtype: FBX packet subtype (see batadv_fbx_subtype)
+ * @rev: FBX specific version for compatibility
+ * @seqno: an alway increasing sequence number, not checked for now
+ */
+struct batadv_fbx_packet {
+	__u8   packet_type;
+	__u8   version;
+	__u8   subtype; /* see batadv_fbx_subtype detection message types */
+	__u8   rev;
+	__be32 seqno;
+};
+#define BATADV_FBX_HLEN sizeof(struct batadv_fbx_packet)
+
+/**
+ * enum batadv_fbx_subtype - FBX packet subtypes
+ * @BATADV_FBX_MTU_PROBE: Big message sent to a neigh to probe link MTU
+ * @BATADV_FBX_MTU_RESP: MTU acknowledgment from receiver to sender
+ */
+enum batadv_fbx_subtype {
+	BATADV_FBX_SUB_UNUSED = 0,
+	BATADV_FBX_SUB_MTU_PROBE,
+	BATADV_FBX_SUB_MTU_RESP,
+	BATADV_FBX_SUB_SLAP,
+	/* keep last */
+	BATADV_FBX_SUB_LAST,
+};
+
+/**
+ * struct batadv_fbx_mtu_packet - FBX MTU probing packet
+ * @hdr: Common FBX header
+ * @mtu: The mtu this probe / resp packet relates to
+ */
+struct batadv_fbx_mtu_packet {
+	struct batadv_fbx_packet hdr;
+	__be16 mtu;
+};
+
+#define BATADV_FBX_MTU_HLEN sizeof(struct batadv_fbx_mtu_packet)
+
+/**
+ * struct batadv_fbx_slap_packet - FBX SLAP ID packet
+ * @hdr: Common FBX header
+ * @prio: SLAP Prio of the originator node
+ */
+struct batadv_fbx_slap_packet {
+	struct batadv_fbx_packet hdr;
+	__be32 prio;
+};
+
+#define BATADV_FBX_SLAP_HLEN sizeof(struct batadv_fbx_slap_packet)
+
 #pragma pack()
 
 #endif /* _UAPI_LINUX_BATADV_PACKET_H_ */
diff -ruw linux-6.13.12/include/uapi/linux/batman_adv.h linux-6.13.12-fbx/include/uapi/linux/batman_adv.h
--- linux-6.13.12/include/uapi/linux/batman_adv.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/batman_adv.h	2025-09-25 17:40:37.275375696 +0200
@@ -11,6 +11,7 @@
 
 #define BATADV_NL_MCAST_GROUP_CONFIG	"config"
 #define BATADV_NL_MCAST_GROUP_TPMETER	"tpmeter"
+#define BATADV_NL_MCAST_GROUP_ROUTE	"route"
 
 /**
  * enum batadv_tt_client_flags - TT client specific flags
@@ -49,6 +50,12 @@
 	BATADV_TT_CLIENT_ISOLA	 = (1 << 5),
 
 	/**
+	 * @BATADV_TT_CLIENT_SEEN: this global client has been actually
+	 * detected to be part of the originator and has not yet expired
+	 */
+	BATADV_TT_CLIENT_SEEN = (1 << 6),
+
+	/**
 	 * @BATADV_TT_CLIENT_NOPURGE: this client should never be removed from
 	 * the table
 	 */
@@ -481,6 +488,11 @@
 	 */
 	BATADV_ATTR_MULTICAST_FANOUT,
 
+	/**
+	 * @BATADV_ATTF_FBX: defines FBX specific NL attributes
+	 */
+	BATADV_ATTR_FBX,
+
 	/* add attributes above here, update the policy in netlink.c */
 
 	/**
@@ -500,6 +512,52 @@
 };
 
 /**
+ * enum batadv_nl_fbx_attrs - batman-adv netlink attributes
+ */
+enum batadv_nl_fbx_attr {
+	/**
+	 * @BATADV_ATTR_FBX_MTU: defines the MTU this neighbor can safely use.
+	 */
+	BATADV_ATTR_FBX_MTU,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_MASTER_MAC: Show current SLAP master address
+	 */
+	BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_MASTER_PRIO: Show current SLAP master priority
+	 */
+	BATADV_ATTR_FBX_SLAP_MASTER_PRIO,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_IFINDEX: defines the currently selected SLAP
+	 * interface
+	 */
+	BATADV_ATTR_FBX_SLAP_IFINDEX,
+
+	/**
+	 * @BATADV_ATTR_FBX_SLAP_PRIO: defines the current SLAP priority
+	 */
+	BATADV_ATTR_FBX_SLAP_PRIO,
+
+	/**
+	 * @__BATADV_ATTR_FBX_AFTER_LAST: internal use
+	 */
+	__BATADV_ATTR_FBX_AFTER_LAST,
+
+	/**
+	 * @NUM_BATADV_FBX_ATTR: total number of batadv_nl_fbx_attrs available
+	 */
+	NUM_BATADV_ATTR_FBX = __BATADV_ATTR_FBX_AFTER_LAST,
+
+	/**
+	 * @BATADV_ATTR_FBX_MAX: highest attribute number currently defined
+	 */
+	BATADV_ATTR_FBX_MAX = __BATADV_ATTR_FBX_AFTER_LAST - 1
+};
+
+/**
  * enum batadv_nl_commands - supported batman-adv netlink commands
  */
 enum batadv_nl_commands {
@@ -613,6 +671,21 @@
 	 */
 	BATADV_CMD_SET_VLAN,
 
+	/**
+	 * @BATADV_CMD_ADD_ROUTE: Add new route to reach originator
+	 */
+	BATADV_CMD_ADD_ROUTE,
+
+	/**
+	 * @BATADV_CMD_DEL_ROUTE: Del route to originator
+	 */
+	BATADV_CMD_DEL_ROUTE,
+
+	/**
+	 * @BATADV_CMD_CHANGE_ROUTE: Modify an existing route to originator
+	 */
+	BATADV_CMD_CHANGE_ROUTE,
+
 	/* add new commands above here */
 
 	/**
diff -ruw linux-6.13.12/include/uapi/linux/ethtool.h linux-6.13.12-fbx/include/uapi/linux/ethtool.h
--- linux-6.13.12/include/uapi/linux/ethtool.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/ethtool.h	2025-09-25 17:40:37.287375755 +0200
@@ -294,6 +294,8 @@
 	ETHTOOL_PHY_DOWNSHIFT,
 	ETHTOOL_PHY_FAST_LINK_DOWN,
 	ETHTOOL_PHY_EDPD,
+	ETHTOOL_PHY_BROKEN,
+
 	/*
 	 * Add your fresh new phy tunable attribute above and remember to update
 	 * phy_tunable_strings[] in net/ethtool/common.c
@@ -379,6 +381,30 @@
 	__u32	reserved[2];
 };
 
+struct ethtool_eee_linkmode {
+	__u32	cmd;
+	__u32	eee_active;
+	__u32	eee_enabled;
+	__u32	tx_lpi_enabled;
+	__u32	tx_lpi_timer;
+	__s8	link_mode_masks_nwords;
+	__u8	reserved[3];
+
+#ifndef __KERNEL__
+	/* Linux builds with -Wflex-array-member-not-at-end but does
+	 * not use the "link_mode_masks" member. Leave it defined for
+	 * userspace for now, and when userspace wants to start using
+	 * -Wfamnae, we'll need a new solution.
+	 */
+	__u32	link_mode_masks[];
+	/* layout of link_mode_masks fields:
+	 * __u32 map_supported[link_mode_masks_nwords];
+	 * __u32 map_advertising[link_mode_masks_nwords];
+	 * __u32 map_lp_advertising[link_mode_masks_nwords];
+	 */
+#endif
+};
+
 /**
  * struct ethtool_modinfo - plugin module eeprom information
  * @cmd: %ETHTOOL_GMODULEINFO
@@ -681,6 +707,7 @@
  * @ETH_SS_STATS_ETH_MAC: names of IEEE 802.3 MAC statistics
  * @ETH_SS_STATS_ETH_CTRL: names of IEEE 802.3 MAC Control statistics
  * @ETH_SS_STATS_RMON: names of RMON statistics
+ * @ETH_SS_PHYLINK_IFTYPES: names of phylink interface types
  *
  * @ETH_SS_COUNT: number of defined string sets
  */
@@ -706,6 +733,7 @@
 	ETH_SS_STATS_ETH_MAC,
 	ETH_SS_STATS_ETH_CTRL,
 	ETH_SS_STATS_RMON,
+	ETH_SS_PHYLINK_IFTYPES,
 
 	/* add new constants above here */
 	ETH_SS_COUNT
@@ -1843,6 +1871,121 @@
 #define ETHTOOL_FEC_BASER		(1 << ETHTOOL_FEC_BASER_BIT)
 #define ETHTOOL_FEC_LLRS		(1 << ETHTOOL_FEC_LLRS_BIT)
 
+/**
+ * struct ethtool_shaper_params
+ * @cmd: %ETHTOOL_GSHAPER_PARAMS / %ETHTOOL_SSHAPER_PARAMS
+ */
+struct ethtool_shaper_params {
+	__u32 cmd;
+
+	__u64 rate;
+	__u32 burst;
+	__u32 mtu;
+};
+
+/**
+ * struct ethtool_epon_param
+ * @cmd: Command number = %ETHTOOL_GEPON_PARAM or %ETHTOOL_SEPON_*
+ */
+struct ethtool_epon_param {
+	__u32   cmd;
+	__u8	discovery_rx;
+	__u8	registered;
+	__u16	llid;
+	__u32	burst_cap;
+	__u32	lasermon_event_count;
+	__u32	change_count;
+	__u32	keys_update_id;
+	__u8	key_sci[8];
+	__u8	down_key0[16];
+	__u8	down_key1[16];
+	__u32	down_encrypt;
+	__u32	down_last_rx_encrypted;
+	__u32	down_last_rx_key_id;
+	__u16	mcast_llid;
+	__u16	pad;
+};
+
+/*
+ * currently a 1:1 mapping for SFP SM in drivers/net/phy/sfp.c
+ */
+enum {
+	ETHTOOL_SFP_S_DOWN = 0,
+	ETHTOOL_SFP_S_FAIL,
+	ETHTOOL_SFP_S_WAIT,
+	ETHTOOL_SFP_S_INIT,
+	ETHTOOL_SFP_S_INIT_PHY,
+	ETHTOOL_SFP_S_INIT_TX_FAULT,
+	ETHTOOL_SFP_S_WAIT_LOS,
+	ETHTOOL_SFP_S_LINK_UP,
+	ETHTOOL_SFP_S_TX_FAULT,
+	ETHTOOL_SFP_S_REINIT,
+	ETHTOOL_SFP_S_TX_DISABLE,
+};
+
+/**
+ * struct ethtool_sfp_state
+ * @cmd: Command number = %ETHTOOL_GSFP_STATE
+ */
+struct ethtool_sfp_state {
+	__u32 cmd;
+
+	__u32 fsm_state;
+
+	__u8 o_pwren;
+	__u8 o_txdis;
+	__u8 i_presence;
+	__u8 i_rxlos;
+	__u8 i_txfault;
+};
+
+/**
+ * struct ethtool_phylink_if_mode
+ * @cmd: %ETHTOOL_GPHYLINK_IFTYPE / %ETHTOOL_SPHYLINK_IFTYPE
+ */
+struct ethtool_phylink_iftype {
+	__u32	cmd;
+
+	/* stringified phy_interface_t (enum is not part of UAPI and
+	 * is not stable), uses string from phy_modes()  */
+	char	iftype[ETH_GSTRING_LEN];
+
+	__u32	autoneg_en;
+
+	/* enum MLO_AN_xxx, read-only */
+	__u32	mode;
+};
+
+enum ethtool_prbs_direction {
+	ETHTOOL_PRBS_DIRECTION_TX = 0x1,
+	ETHTOOL_PRBS_DIRECTION_RX = 0x2,
+	ETHTOOL_PRBS_DIRECTION_BOTH = ETHTOOL_PRBS_DIRECTION_RX |
+				      ETHTOOL_PRBS_DIRECTION_TX,
+};
+
+/**
+ * struct ethtool_prbs_params
+ * @cmd: %ETHTOOL_GPRBS_PARAM / %ETHTOOL_SPRBS_PARAM
+ * @enable: Whether a transmission/analysis is ongoing
+ * @length: Length of the sequence
+ * @errors: Number of errors detected during the analysis
+ * @lock: Current state of the PRBS checker lock bit (read-only)
+ */
+struct ethtool_prbs_param {
+	__u32 cmd;
+
+	__u8 enable;
+	__u8 length;
+	__u64 errors;
+	__u8 lock;
+
+	enum ethtool_prbs_direction direction;
+
+	char pre_emphasis[ETH_GSTRING_LEN];
+	char amplitude[ETH_GSTRING_LEN];
+	char post_emphasis[ETH_GSTRING_LEN];
+};
+
 /* CMDs currently supported */
 #define ETHTOOL_GSET		0x00000001 /* DEPRECATED, Get settings.
 					    * Please use ETHTOOL_GLINKSETTINGS
@@ -1938,6 +2081,29 @@
 #define ETHTOOL_GFECPARAM	0x00000050 /* Get FEC settings */
 #define ETHTOOL_SFECPARAM	0x00000051 /* Set FEC settings */
 
+#define ETHTOOL_GEPON_PARAM	0x00000052 /* Get EPON params */
+#define ETHTOOL_SEPON_KEYS	0x00000053 /* Set EPON encryption keys */
+#define ETHTOOL_SEPON_ENCRYPT	0x00000054 /* Set EPON encryption keys */
+#define ETHTOOL_SEPON_RESTART	0x00000055 /* restart epon link */
+#define ETHTOOL_SEPON_BURST	0x00000056 /* update burst value */
+#define ETHTOOL_SEPON_ADD_MCLLID	0x00000057 /* add epon llid */
+#define ETHTOOL_SEPON_DEL_MCLLID	0x00000058 /* remove epon llid */
+#define ETHTOOL_SEPON_CLR_MCLLID	0x00000059 /* remove all epon llid */
+
+#define ETHTOOL_GSFP_STATE	0x00000060 /* get SFP state (IOs/FSM) */
+
+#define ETHTOOL_SSHAPER_PARAMS	0x00000061 /* set HW TX shaper params */
+#define ETHTOOL_GSHAPER_PARAMS	0x00000062 /* get HW TX shaper params */
+
+#define ETHTOOL_GPHYLINK_IFTYPE	0x00000063 /* get phylink interface type  */
+#define ETHTOOL_SPHYLINK_IFTYPE	0x00000064 /* set phylink interface type */
+
+#define ETHTOOL_GEEE_LINKMODE	0x00000065 /* Get EEE settings linkmode format */
+#define ETHTOOL_SEEE_LINKMODE	0x00000066 /* Set EEE settings linkmode format */
+
+#define ETHTOOL_GPRBS_PARAM	0x00000067 /* Set hardware PRBS parameters */
+#define ETHTOOL_SPRBS_PARAM	0x00000068 /* Get hardware PRBS parameters */
+
 /* compatibility with older code */
 #define SPARC_ETH_GSET		ETHTOOL_GSET
 #define SPARC_ETH_SSET		ETHTOOL_SSET
@@ -2055,6 +2221,12 @@
 	ETHTOOL_LINK_MODE_10baseT1S_Half_BIT		 = 100,
 	ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT	 = 101,
 	ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT		 = 102,
+	ETHTOOL_LINK_MODE_1000basePX_D_Full_BIT		 = 103,
+	ETHTOOL_LINK_MODE_1000basePX_U_Full_BIT		 = 104,
+	ETHTOOL_LINK_MODE_10000basePR_D_Full_BIT	 = 105,
+	ETHTOOL_LINK_MODE_10000basePR_U_Full_BIT	 = 106,
+	ETHTOOL_LINK_MODE_10000_1000basePRX_D_Full_BIT	 = 107,
+	ETHTOOL_LINK_MODE_10000_1000basePRX_U_Full_BIT	 = 108,
 
 	/* must be last entry */
 	__ETHTOOL_LINK_MODE_MASK_NBITS
diff -ruw linux-6.13.12/include/uapi/linux/if_ether.h linux-6.13.12-fbx/include/uapi/linux/if_ether.h
--- linux-6.13.12/include/uapi/linux/if_ether.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/if_ether.h	2025-09-25 17:40:37.295375795 +0200
@@ -55,6 +55,7 @@
 #define	ETH_P_BPQ	0x08FF		/* G8BPQ AX.25 Ethernet Packet	[ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_IEEEPUP	0x0a00		/* Xerox IEEE802.3 PUP packet */
 #define ETH_P_IEEEPUPAT	0x0a01		/* Xerox IEEE802.3 PUP Addr Trans packet */
+#define ETH_P_FBXVLAN	0x1337		/* Freebox specific VLAN type [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_BATMAN	0x4305		/* B.A.T.M.A.N.-Advanced packet [ NOT AN OFFICIALLY REGISTERED ID ] */
 #define ETH_P_DEC       0x6000          /* DEC Assigned proto           */
 #define ETH_P_DNA_DL    0x6001          /* DEC DNA Dump/Load            */
diff -ruw linux-6.13.12/include/uapi/linux/if_tun.h linux-6.13.12-fbx/include/uapi/linux/if_tun.h
--- linux-6.13.12/include/uapi/linux/if_tun.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/if_tun.h	2025-09-25 17:40:37.299375815 +0200
@@ -62,6 +62,32 @@
 #define TUNSETCARRIER _IOW('T', 226, int)
 #define TUNGETDEVNETNS _IO('T', 227)
 
+
+struct smalltun_rule {
+	__u8	proto;
+	__be16	src_port_start;
+	__be16	src_port_end;
+	__be16	dst_port_start;
+	__be16	dst_port_end;
+};
+
+struct smalltun_fp {
+	__be32	inner_src;
+	__be32	inner_dst;
+
+	__u32	af;
+	__u8	outer_src[16];
+	__u8	outer_dst[16];
+	__be16	outer_src_port;
+	__be16	outer_dst_port;
+
+	struct smalltun_rule rules[8];
+	__u32	rule_count;
+};
+
+#define TUNSMALLTUNSETFP _IOW('T', 228, struct smalltun_fp)
+#define TUNSMALLTUNDELFP _IOW('T', 229, struct smalltun_fp)
+
 /* TUNSETIFF ifr flags */
 #define IFF_TUN		0x0001
 #define IFF_TAP		0x0002
diff -ruw linux-6.13.12/include/uapi/linux/if_tunnel.h linux-6.13.12-fbx/include/uapi/linux/if_tunnel.h
--- linux-6.13.12/include/uapi/linux/if_tunnel.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/if_tunnel.h	2025-09-25 17:40:37.299375815 +0200
@@ -77,10 +77,23 @@
 	IFLA_IPTUN_ENCAP_DPORT,
 	IFLA_IPTUN_COLLECT_METADATA,
 	IFLA_IPTUN_FWMARK,
+	IFLA_IPTUN_FMRS,
 	__IFLA_IPTUN_MAX,
 };
 #define IFLA_IPTUN_MAX	(__IFLA_IPTUN_MAX - 1)
 
+enum {
+	IFLA_IPTUN_FMR_UNSPEC,
+	IFLA_IPTUN_FMR_IP6_PREFIX,
+	IFLA_IPTUN_FMR_IP4_PREFIX,
+	IFLA_IPTUN_FMR_IP6_PREFIX_LEN,
+	IFLA_IPTUN_FMR_IP4_PREFIX_LEN,
+	IFLA_IPTUN_FMR_EA_LEN,
+	IFLA_IPTUN_FMR_OFFSET,
+	__IFLA_IPTUN_FMR_MAX,
+};
+#define IFLA_IPTUN_FMR_MAX (__IFLA_IPTUN_FMR_MAX - 1)
+
 enum tunnel_encap_types {
 	TUNNEL_ENCAP_NONE,
 	TUNNEL_ENCAP_FOU,
diff -ruw linux-6.13.12/include/uapi/linux/input-event-codes.h linux-6.13.12-fbx/include/uapi/linux/input-event-codes.h
--- linux-6.13.12/include/uapi/linux/input-event-codes.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/input-event-codes.h	2025-09-25 17:40:37.299375815 +0200
@@ -807,6 +807,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.13.12/include/uapi/linux/libc-compat.h linux-6.13.12-fbx/include/uapi/linux/libc-compat.h
--- linux-6.13.12/include/uapi/linux/libc-compat.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/libc-compat.h	2025-09-25 17:40:37.303375835 +0200
@@ -49,11 +49,11 @@
 #ifndef _UAPI_LIBC_COMPAT_H
 #define _UAPI_LIBC_COMPAT_H
 
-/* We have included glibc headers... */
-#if defined(__GLIBC__)
+/* We have included libc headers... */
+#if !defined(__KERNEL__)
 
-/* Coordinate with glibc net/if.h header. */
-#if defined(_NET_IF_H) && defined(__USE_MISC)
+/* Coordinate with libc net/if.h header. */
+#if defined(_NET_IF_H) && (!defined(__GLIBC__) || defined(__USE_MISC))
 
 /* GLIBC headers included first so don't define anything
  * that would already be defined. */
@@ -65,9 +65,11 @@
 /* Everything up to IFF_DYNAMIC, matches net/if.h until glibc 2.23 */
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS 0
 /* For the future if glibc adds IFF_LOWER_UP, IFF_DORMANT and IFF_ECHO */
+#ifndef IFF_ECHO
 #ifndef __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO
 #define __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO 1
 #endif /* __UAPI_DEF_IF_NET_DEVICE_FLAGS_LOWER_UP_DORMANT_ECHO */
+#endif /* IFF_ECHO */
 
 #else /* _NET_IF_H */
 
@@ -140,6 +142,25 @@
 
 #endif /* _NETINET_IN_H */
 
+/* Coordinate with glibc netipx/ipx.h header. */
+#if defined(__NETIPX_IPX_H)
+
+#define __UAPI_DEF_SOCKADDR_IPX			0
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION		0
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION	0
+#define __UAPI_DEF_IPX_CONFIG_DATA		0
+#define __UAPI_DEF_IPX_ROUTE_DEF		0
+
+#else /* defined(__NETIPX_IPX_H) */
+
+#define __UAPI_DEF_SOCKADDR_IPX			1
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION		1
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION	1
+#define __UAPI_DEF_IPX_CONFIG_DATA		1
+#define __UAPI_DEF_IPX_ROUTE_DEF		1
+
+#endif /* defined(__NETIPX_IPX_H) */
+
 /* Definitions for xattr.h */
 #if defined(_SYS_XATTR_H)
 #define __UAPI_DEF_XATTR		0
@@ -151,7 +172,7 @@
  * or we are being included in the kernel, then define everything
  * that we need. Check for previous __UAPI_* definitions to give
  * unsupported C libraries a way to opt out of any kernel definition. */
-#else /* !defined(__GLIBC__) */
+#else /* !defined(__KERNEL__) */
 
 /* Definitions for if.h */
 #ifndef __UAPI_DEF_IF_IFCONF
@@ -221,11 +242,28 @@
 #define __UAPI_DEF_IP6_MTUINFO		1
 #endif
 
+/* Definitions for ipx.h */
+#ifndef __UAPI_DEF_SOCKADDR_IPX
+#define __UAPI_DEF_SOCKADDR_IPX			1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEFINITION
+#define __UAPI_DEF_IPX_ROUTE_DEFINITION		1
+#endif
+#ifndef __UAPI_DEF_IPX_INTERFACE_DEFINITION
+#define __UAPI_DEF_IPX_INTERFACE_DEFINITION	1
+#endif
+#ifndef __UAPI_DEF_IPX_CONFIG_DATA
+#define __UAPI_DEF_IPX_CONFIG_DATA		1
+#endif
+#ifndef __UAPI_DEF_IPX_ROUTE_DEF
+#define __UAPI_DEF_IPX_ROUTE_DEF		1
+#endif
+
 /* Definitions for xattr.h */
 #ifndef __UAPI_DEF_XATTR
 #define __UAPI_DEF_XATTR		1
 #endif
 
-#endif /* __GLIBC__ */
+#endif /* __KERNEL__ */
 
 #endif /* _UAPI_LIBC_COMPAT_H */
diff -ruw linux-6.13.12/include/uapi/linux/nl80211.h linux-6.13.12-fbx/include/uapi/linux/nl80211.h
--- linux-6.13.12/include/uapi/linux/nl80211.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/nl80211.h	2025-09-25 17:40:37.311375874 +0200
@@ -1329,6 +1329,13 @@
  *      %NL80211_ATTR_MLO_TTLM_ULINK attributes are used to specify the
  *      TID to Link mapping for downlink/uplink traffic.
  *
+ * @NL80211_CMD_ASSOC_MLO_RECONF: For a non-AP MLD station, request to
+ *      add/remove links to/from the association.
+ *
+ * @NL80211_CMD_EPCS_CFG: EPCS configuration for a station. Used by userland to
+ *	control EPCS configuration. Used to notify userland on the current state
+ *	of EPCS.
+ *
  * @NL80211_CMD_MAX: highest used command number
  * @__NL80211_CMD_AFTER_LAST: internal use
  */
@@ -1586,6 +1593,9 @@
 
 	NL80211_CMD_SET_TID_TO_LINK_MAPPING,
 
+	NL80211_CMD_ASSOC_MLO_RECONF,
+	NL80211_CMD_EPCS_CFG,
+
 	/* add new commands above here */
 
 	/* used to define NL80211_CMD_MAX below */
@@ -2871,6 +2881,23 @@
  * @NL80211_ATTR_VIF_RADIO_MASK: Bitmask of allowed radios (u32).
  *	A value of 0 means all radios.
  *
+ * @NL80211_ATTR_SUPPORTED_SELECTORS: supported selectors, array of
+ *	supported selectors as defined by IEEE 802.11 7.3.2.2 but without the
+ *	length restriction (at most %NL80211_MAX_SUPP_SELECTORS).
+ *	This can be used to provide a list of selectors that are implemented
+ *	by the supplicant. If not given, support for SAE_H2E is assumed.
+ *
+ * @NL80211_ATTR_MLO_RECONF_REM_LINKS: (u16) A bitmask of the links requested
+ *      to be removed from the MLO association.
+ *
+ * @NL80211_ATTR_EPCS: Flag attribute indicating that EPCS is enabled for a
+ *	station interface.
+ *
+ * @NL80211_ATTR_WIPHY_RADIO_INDEX: Integer attribute denoting the index of
+ *	the radio in interest. Internally a value of 0xff is used to indicate
+ *	this attribute is not present, and hence any associated attributes are
+ *	deemed to be applicable to all radios
+ *
  * @NUM_NL80211_ATTR: total number of nl80211_attrs available
  * @NL80211_ATTR_MAX: highest attribute number currently defined
  * @__NL80211_ATTR_AFTER_LAST: internal use
@@ -3421,6 +3448,13 @@
 
 	NL80211_ATTR_VIF_RADIO_MASK,
 
+	NL80211_ATTR_SUPPORTED_SELECTORS,
+
+	NL80211_ATTR_MLO_RECONF_REM_LINKS,
+	NL80211_ATTR_EPCS,
+
+	NL80211_ATTR_WIPHY_RADIO_INDEX,
+
 	/* add attributes here, update the policy in nl80211.c */
 
 	__NL80211_ATTR_AFTER_LAST,
@@ -3463,8 +3497,10 @@
 #define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
 
 #define NL80211_WIPHY_NAME_MAXLEN		64
+#define NL80211_WIPHY_RADIO_ID_MAX		0xff
 
 #define NL80211_MAX_SUPP_RATES			32
+#define NL80211_MAX_SUPP_SELECTORS		128
 #define NL80211_MAX_SUPP_HT_RATES		77
 #define NL80211_MAX_SUPP_REG_RULES		128
 #define NL80211_TKIP_DATA_OFFSET_ENCR_KEY	0
@@ -6772,6 +6808,7 @@
  *	these channels would passively be scanned. Also note that when the flag
  *	is set, in addition to the colocated APs, PSC channels would also be
  *	scanned if the user space has asked for it.
+ * @NL80211_SCAN_FLAG_UPDATE_DFS: scan results will update DFS state
  */
 enum nl80211_scan_flags {
 	NL80211_SCAN_FLAG_LOW_PRIORITY				= 1<<0,
@@ -6789,6 +6826,7 @@
 	NL80211_SCAN_FLAG_MIN_PREQ_CONTENT			= 1<<12,
 	NL80211_SCAN_FLAG_FREQ_KHZ				= 1<<13,
 	NL80211_SCAN_FLAG_COLOCATED_6GHZ			= 1<<14,
+	NL80211_SCAN_FLAG_UPDATE_DFS				= 1<<15,
 };
 
 /**
@@ -7994,6 +8032,11 @@
  *	Setting this flag is permitted only if the driver advertises EMA support
  *	by setting wiphy->ema_max_profile_periodicity to non-zero.
  *
+ * @NL80211_MBSSID_CONFIG_ATTR_TX_LINK_ID: Link ID of the transmitted profile.
+ *	This parameter is mandatory if the transmitted profile is part of an MLD
+ *	and the interface getting configured is a non-transmitted profile. For all
+ *	other cases it will be ignored.
+ *
  * @__NL80211_MBSSID_CONFIG_ATTR_LAST: Internal
  * @NL80211_MBSSID_CONFIG_ATTR_MAX: highest attribute
  */
@@ -8005,6 +8048,7 @@
 	NL80211_MBSSID_CONFIG_ATTR_INDEX,
 	NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX,
 	NL80211_MBSSID_CONFIG_ATTR_EMA,
+	NL80211_MBSSID_CONFIG_ATTR_TX_LINK_ID,
 
 	/* keep last */
 	__NL80211_MBSSID_CONFIG_ATTR_LAST,
@@ -8040,6 +8084,7 @@
  *	and contains attributes defined in &enum nl80211_if_combination_attrs.
  * @NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK: bitmask (u32) of antennas
  *	connected to this radio.
+ * @NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD: RTS threshold (u32) of this radio.
  *
  * @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal
  * @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute
@@ -8051,6 +8096,7 @@
 	NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE,
 	NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION,
 	NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
+	NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD,
 
 	/* keep last */
 	__NL80211_WIPHY_RADIO_ATTR_LAST,
diff -ruw linux-6.13.12/include/uapi/linux/sched.h linux-6.13.12-fbx/include/uapi/linux/sched.h
--- linux-6.13.12/include/uapi/linux/sched.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/sched.h	2025-09-25 17:40:37.315375894 +0200
@@ -146,4 +146,6 @@
 			 SCHED_FLAG_KEEP_ALL		| \
 			 SCHED_FLAG_UTIL_CLAMP)
 
+#define SCHED_PASLR_SEED_SIZE	16
+
 #endif /* _UAPI_LINUX_SCHED_H */
diff -ruw linux-6.13.12/include/uapi/linux/serial_core.h linux-6.13.12-fbx/include/uapi/linux/serial_core.h
--- linux-6.13.12/include/uapi/linux/serial_core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/serial_core.h	2025-09-25 17:40:37.319375914 +0200
@@ -234,4 +234,7 @@
 /* Generic type identifier for ports which type is not important to userspace. */
 #define PORT_GENERIC	(-1)
 
+/* Cortina uart */
+#define PORT_CORTINA	124
+
 #endif /* _UAPILINUX_SERIAL_CORE_H */
diff -ruw linux-6.13.12/include/uapi/linux/sockios.h linux-6.13.12-fbx/include/uapi/linux/sockios.h
--- linux-6.13.12/include/uapi/linux/sockios.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/sockios.h	2025-09-25 17:40:37.319375914 +0200
@@ -153,6 +153,14 @@
 #define SIOCSHWTSTAMP	0x89b0		/* set and get config		*/
 #define SIOCGHWTSTAMP	0x89b1		/* get config			*/
 
+/* fbxbridge call */
+#define SIOCGFBXBRIDGE	0x89c0		/* fbxbridge support          */
+#define SIOCSFBXBRIDGE	0x89c1		/* Set fbxbridge options      */
+
+/* fbxdiverter call */
+#define SIOCGFBXDIVERT  0x89d0		/* fbxdiverter support          */
+#define SIOCSFBXDIVERT  0x89d1		/* Set fbxdiverter options      */
+
 /* Device private ioctl calls */
 
 /*
diff -ruw linux-6.13.12/include/uapi/linux/tcp.h linux-6.13.12-fbx/include/uapi/linux/tcp.h
--- linux-6.13.12/include/uapi/linux/tcp.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/tcp.h	2025-09-25 17:40:37.319375914 +0200
@@ -141,6 +141,8 @@
 #define TCP_REPAIR_OFF		0
 #define TCP_REPAIR_OFF_NO_WP	-1	/* Turn off without window probes */
 
+#define TCP_LINEAR_RTO		128	/* force use of linear timeouts */
+
 struct tcp_repair_opt {
 	__u32	opt_code;
 	__u32	opt_val;
diff -ruw linux-6.13.12/include/uapi/linux/tty.h linux-6.13.12-fbx/include/uapi/linux/tty.h
--- linux-6.13.12/include/uapi/linux/tty.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/tty.h	2025-09-25 17:40:37.319375914 +0200
@@ -39,8 +39,9 @@
 #define N_MCTP		28	/* MCTP-over-serial */
 #define N_DEVELOPMENT	29	/* Manual out-of-tree testing */
 #define N_CAN327	30	/* ELM327 based OBD-II interfaces */
+#define N_REMOTI	31	/* RemoTI over UART */
 
 /* Always the newest line discipline + 1 */
-#define NR_LDISCS	31
+#define NR_LDISCS	32
 
 #endif /* _UAPI_LINUX_TTY_H */
diff -ruw linux-6.13.12/init/Kconfig linux-6.13.12-fbx/init/Kconfig
--- linux-6.13.12/init/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/init/Kconfig	2025-09-25 17:40:37.335375993 +0200
@@ -175,6 +175,15 @@
 	  Maximum of each of the number of arguments and environment
 	  variables passed to init from the kernel command line.
 
+
+config CROSS_COMPILE
+	string "Cross-compiler tool prefix"
+	help
+	  Same as running 'make CROSS_COMPILE=prefix-' but stored for
+	  default make runs in this kernel build directory.  You don't
+	  need to set this unless you want the configured kernel build
+	  directory to select the cross-compiler automatically.
+
 config COMPILE_TEST
 	bool "Compile also drivers which will not load"
 	depends on HAS_IOMEM
@@ -817,6 +826,43 @@
 
 	  There is no additional runtime cost to printk with this enabled.
 
+config FBX_DECRYPT_INITRD
+	bool "Decrypt initrd at boot"
+	depends on BLK_DEV_RAM
+	default n
+
+choice
+	prompt "initrd decryption encryption flavor"
+	default FBX_DECRYPT_INITRD_RC4
+	depends on FBX_DECRYPT_INITRD
+
+config FBX_DECRYPT_INITRD_RC4
+	bool "RC4"
+
+config FBX_DECRYPT_INITRD_CHACHA20
+	bool "CHACHA20"
+	select CRYPTO_CHACHA20
+
+endchoice
+
+config FBX_DECRYPT_INITRD_KEY
+	string "Decryption key"
+	depends on FBX_DECRYPT_INITRD
+
+config FBX_DECRYPT_INITRD_NONCE
+	string "Decryption nonce/IV"
+	depends on FBX_DECRYPT_INITRD_CHACHA20
+
+config FBX_VERIFY_INITRD
+	bool "Verify initrd at boot"
+	depends on FBX_DECRYPT_INITRD
+	select CRYPTO_RSA
+	select CRYPTO_SHA256
+
+config FBX_VERIFY_INITRD_PUBKEY
+	string "Public key path for initrd verify"
+	depends on FBX_VERIFY_INITRD
+
 #
 # Architectures with an unreliable sched_clock() should select this:
 #
diff -ruw linux-6.13.12/init/Makefile linux-6.13.12-fbx/init/Makefile
--- linux-6.13.12/init/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/init/Makefile	2025-09-25 17:40:37.335375993 +0200
@@ -15,6 +15,14 @@
 
 obj-y                          += init_task.o
 
+obj-$(CONFIG_FBX_DECRYPT_INITRD)+= fbx_decrypt_initrd.o
+obj-$(CONFIG_FBX_DECRYPT_INITRD_RC4) += rc4.o
+obj-$(CONFIG_FBX_VERIFY_INITRD) += fbx_initrd_pub_key.o
+
+PUB_KEY_PATH_UNQUOTED = $(patsubst "%",%,$(CONFIG_FBX_VERIFY_INITRD_PUBKEY))
+
+init/fbx_initrd_pub_key.o: $(PUB_KEY_PATH_UNQUOTED)
+
 mounts-y			:= do_mounts.o
 mounts-$(CONFIG_BLK_DEV_RAM)	+= do_mounts_rd.o
 mounts-$(CONFIG_BLK_DEV_INITRD)	+= do_mounts_initrd.o
diff -ruw linux-6.13.12/init/init_task.c linux-6.13.12-fbx/init/init_task.c
--- linux-6.13.12/init/init_task.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/init/init_task.c	2025-09-25 17:40:37.339376013 +0200
@@ -72,6 +72,7 @@
 	.stack		= init_stack,
 	.usage		= REFCOUNT_INIT(2),
 	.flags		= PF_KTHREAD,
+	.exec_mode	= EXEC_MODE_UNLIMITED,
 	.prio		= MAX_PRIO - 20,
 	.static_prio	= MAX_PRIO - 20,
 	.normal_prio	= MAX_PRIO - 20,
diff -ruw linux-6.13.12/init/initramfs.c linux-6.13.12-fbx/init/initramfs.c
--- linux-6.13.12/init/initramfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/init/initramfs.c	2025-09-29 14:23:07.617732469 +0200
@@ -18,6 +18,7 @@
 #include <linux/init_syscalls.h>
 #include <linux/umh.h>
 #include <linux/security.h>
+#include <linux/printk.h>
 
 #include "do_mounts.h"
 
@@ -687,6 +688,10 @@
 	ssize_t written;
 	struct file *file;
 	loff_t pos = 0;
+#ifdef CONFIG_FBX_DECRYPT_INITRD
+	int ret;
+	extern int fbx_decrypt_initrd(char *start, u32 size);
+#endif
 
 	printk(KERN_INFO "rootfs image is not initramfs (%s); looks like an initrd\n",
 			err);
@@ -694,6 +699,15 @@
 	if (IS_ERR(file))
 		return;
 
+#ifdef CONFIG_FBX_DECRYPT_INITRD
+	ret = fbx_decrypt_initrd((char*)initrd_start,
+				 initrd_end - initrd_start);
+	if (ret) {
+		printk(KERN_ERR "Decrypt failed: %i\n", ret);
+		return;
+	}
+#endif
+
 	written = xwrite(file, (char *)initrd_start, initrd_end - initrd_start,
 			&pos);
 	if (written != initrd_end - initrd_start)
diff -ruw linux-6.13.12/kernel/bpf/core.c linux-6.13.12-fbx/kernel/bpf/core.c
--- linux-6.13.12/kernel/bpf/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/kernel/bpf/core.c	2025-09-25 17:40:37.363376132 +0200
@@ -2296,8 +2296,7 @@
 					 const struct bpf_insn *insn)
 {
 	/* If this handler ever gets executed, then BPF_JIT_ALWAYS_ON
-	 * is not working properly, or interpreter is being used when
-	 * prog->jit_requested is not 0, so warn about it!
+	 * is not working properly, so warn about it!
 	 */
 	WARN_ON_ONCE(1);
 	return 0;
@@ -2377,8 +2376,9 @@
 	return ret;
 }
 
-static void bpf_prog_select_func(struct bpf_prog *fp)
+static bool bpf_prog_select_interpreter(struct bpf_prog *fp)
 {
+	bool select_interpreter = false;
 #ifndef CONFIG_BPF_JIT_ALWAYS_ON
 	u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
 	u32 idx = (round_up(stack_depth, 32) / 32) - 1;
@@ -2387,15 +2387,16 @@
 	 * But for non-JITed programs, we don't need bpf_func, so no bounds
 	 * check needed.
 	 */
-	if (!fp->jit_requested &&
-	    !WARN_ON_ONCE(idx >= ARRAY_SIZE(interpreters))) {
+	if (idx < ARRAY_SIZE(interpreters)) {
 		fp->bpf_func = interpreters[idx];
+		select_interpreter = true;
 	} else {
 		fp->bpf_func = __bpf_prog_ret0_warn;
 	}
 #else
 	fp->bpf_func = __bpf_prog_ret0_warn;
 #endif
+	return select_interpreter;
 }
 
 /**
@@ -2423,7 +2424,8 @@
 	    bpf_prog_has_kfunc_call(fp))
 		jit_needed = true;
 
-	bpf_prog_select_func(fp);
+	if (!bpf_prog_select_interpreter(fp))
+		jit_needed = true;
 
 	/* eBPF JITs can rewrite the program in case constant
 	 * blinding is active. However, in case of error during
diff -ruw linux-6.13.12/kernel/dma/mapping.c linux-6.13.12-fbx/kernel/dma/mapping.c
--- linux-6.13.12/kernel/dma/mapping.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/kernel/dma/mapping.c	2025-09-25 17:40:37.391376271 +0200
@@ -664,6 +664,53 @@
 }
 EXPORT_SYMBOL(dma_free_attrs);
 
+void *dma_alloc_coherent_no_dev(size_t size, dma_addr_t *dma_handle, gfp_t gfp)
+{
+	struct page *page;
+	void *cpu_addr;
+	pgprot_t prot;
+
+	size = PAGE_ALIGN(size);
+	/* no specific zone and no zero init, this is incompatible with
+	 * our pupose built reduced implementation
+	 */
+	gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO);
+
+	/* simply alloc some continuous pages */
+	page = alloc_pages(gfp, get_order(size));
+	if (!page)
+		return NULL;
+
+	/* remove any dirty cache lines on the kernel alias */
+	arch_dma_prep_coherent(page, size);
+
+	/* create a coherent mapping */
+	prot = pgprot_dmacoherent(PAGE_KERNEL);
+	cpu_addr = dma_common_contiguous_remap(page, size, prot,
+			__builtin_return_address(0));
+	if (!cpu_addr) {
+		__free_pages(page, get_order(size));
+		return NULL;
+	}
+
+	memset(cpu_addr, 0, size);
+
+	*dma_handle = page_to_phys(page);
+	return cpu_addr;
+}
+EXPORT_SYMBOL(dma_alloc_coherent_no_dev);
+
+void dma_free_coherent_no_dev(size_t size, void *cpu_addr,dma_addr_t dma_handle)
+{
+	struct page *page;
+
+	page = phys_to_page(dma_handle);
+
+	vunmap(cpu_addr);
+	return __free_pages(page, get_order(size));
+}
+EXPORT_SYMBOL(dma_free_coherent_no_dev);
+
 static struct page *__dma_alloc_pages(struct device *dev, size_t size,
 		dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
 {
diff -ruw linux-6.13.12/kernel/fork.c linux-6.13.12-fbx/kernel/fork.c
--- linux-6.13.12/kernel/fork.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/kernel/fork.c	2025-09-25 17:40:37.399376311 +0200
@@ -1157,6 +1157,13 @@
 	set_task_stack_end_magic(tsk);
 	clear_syscall_work_syscall_user_dispatch(tsk);
 
+#ifdef CONFIG_PSEUDO_ASLR
+	tsk->paslr_used = false;
+	tsk->paslr_exec_policy = orig->paslr_exec_policy;
+	memcpy(tsk->paslr_exec_preseed, orig->paslr_exec_preseed,
+	       sizeof (tsk->paslr_exec_preseed));
+#endif
+
 #ifdef CONFIG_STACKPROTECTOR
 	tsk->stack_canary = get_random_canary();
 #endif
@@ -1210,6 +1217,11 @@
 	tsk->mm_cid_active = 0;
 	tsk->migrate_from_cpu = -1;
 #endif
+	/*
+	 * inherit parent exec_mode.
+	 */
+	tsk->exec_mode = orig->exec_mode;
+
 	return tsk;
 
 free_stack:
diff -ruw linux-6.13.12/kernel/module/Kconfig linux-6.13.12-fbx/kernel/module/Kconfig
--- linux-6.13.12/kernel/module/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/kernel/module/Kconfig	2025-09-25 17:40:37.423376430 +0200
@@ -397,6 +397,10 @@
 	  one per line. The path can be absolute, or relative to the kernel
 	  source or obj tree.
 
+config UNUSED_KSYMS_WHITELIST_SYMS
+	string "Whitelist of symbols name to keep in ksymtab"
+	depends on TRIM_UNUSED_KSYMS
+
 config MODULES_TREE_LOOKUP
 	def_bool y
 	depends on PERF_EVENTS || TRACING || CFI_CLANG
diff -ruw linux-6.13.12/kernel/sys.c linux-6.13.12-fbx/kernel/sys.c
--- linux-6.13.12/kernel/sys.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/kernel/sys.c	2025-09-25 17:40:37.463376628 +0200
@@ -12,6 +12,7 @@
 #include <linux/mman.h>
 #include <linux/reboot.h>
 #include <linux/prctl.h>
+#include <linux/prctl-private.h>
 #include <linux/highuid.h>
 #include <linux/fs.h>
 #include <linux/kmod.h>
@@ -2809,6 +2810,36 @@
 			return -EINVAL;
 		error = arch_lock_shadow_stack_status(me, arg2);
 		break;
+	case PR_SET_EXEC_MODE:
+		if (arg2 != EXEC_MODE_UNLIMITED &&
+		    arg2 != EXEC_MODE_ONCE &&
+		    arg2 != EXEC_MODE_DENIED)
+			return -EINVAL;
+
+		if (arg2 > current->exec_mode)
+			return -EPERM;
+		current->exec_mode = arg2;
+		return 0;
+	case PR_SET_PASLR_POLICY:
+		if (arg2 != PR_PASLR_POLICY_UID &&
+		    arg2 != PR_PASLR_POLICY_DISABLE &&
+		    arg2 != PR_PASLR_POLICY_PRESEED)
+			return -EINVAL;
+
+#ifdef CONFIG_PSEUDO_ASLR
+		if (arg2 == PR_PASLR_POLICY_PRESEED) {
+			if (copy_from_user(current->paslr_exec_preseed,
+					   (u8 __user *)arg3,
+					   sizeof(current->paslr_exec_preseed)))
+				return -EFAULT;
+		}
+		current->paslr_exec_policy = arg2;
+		return 0;
+#else
+		return -EOPNOTSUPP;
+#endif
+	case PR_GET_EXEC_MODE:
+		return current->exec_mode;
 	default:
 		error = -EINVAL;
 		break;
diff -ruw linux-6.13.12/lib/Kconfig linux-6.13.12-fbx/lib/Kconfig
--- linux-6.13.12/lib/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/lib/Kconfig	2025-09-25 17:40:37.507376846 +0200
@@ -751,6 +751,13 @@
 	help
           Run boot-time test of light-weight queuing.
 
+config ARCH_HAS_FBXSERIAL
+	bool
+
+config FBXSERIAL
+	bool "fbxserial"
+	select CRC32
+
 endmenu
 
 config GENERIC_IOREMAP
diff -ruw linux-6.13.12/lib/Makefile linux-6.13.12-fbx/lib/Makefile
--- linux-6.13.12/lib/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/lib/Makefile	2025-09-25 17:40:37.507376846 +0200
@@ -406,3 +406,4 @@
 obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
 
 subdir-$(CONFIG_FORTIFY_SOURCE) += test_fortify
+obj-$(CONFIG_FBXSERIAL) += fbxserial.o
diff -ruw linux-6.13.12/lib/devres.c linux-6.13.12-fbx/lib/devres.c
--- linux-6.13.12/lib/devres.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/lib/devres.c	2025-09-25 17:40:37.527376945 +0200
@@ -250,6 +250,47 @@
 }
 EXPORT_SYMBOL(devm_of_iomap);
 
+/*
+ * devm_of_iomap - Requests a resource and maps the memory mapped IO
+ *		   for a given device_node managed by a given device
+ *
+ * Checks that a resource is a valid memory region, requests the memory
+ * region and ioremaps it. All operations are managed and will be undone
+ * on driver detach of the device.
+ *
+ * This is to be used when a device requests/maps resources described
+ * by other device tree nodes (children or otherwise).
+ *
+ * @dev:	The device "managing" the resource
+ * @node:       The device-tree node where the resource resides
+ * @name:	name of the MMIO range in the "reg-names" property
+ * @size:	Returns the size of the resource (pass NULL if not needed)
+ *
+ * Usage example:
+ *
+ *	base = devm_of_iomap_byname(&pdev->dev, node, "register", NULL);
+ *	if (IS_ERR(base))
+ *		return PTR_ERR(base);
+ *
+ * Return: a pointer to the requested and mapped memory or an ERR_PTR() encoded
+ * error code on failure.
+ */
+void __iomem *devm_of_iomap_byname(struct device *dev, struct device_node *node,
+				   char *name, resource_size_t *size)
+{
+	int index;
+
+	if (!name)
+		return IOMEM_ERR_PTR(-EINVAL);
+
+	index = fwnode_property_match_string(&node->fwnode, "reg-names", name);
+	if (index < 0)
+		return IOMEM_ERR_PTR(index);
+
+	return devm_of_iomap(dev, node, index, size);
+}
+EXPORT_SYMBOL(devm_of_iomap_byname);
+
 #ifdef CONFIG_HAS_IOPORT_MAP
 /*
  * Generic iomap devres
diff -ruw linux-6.13.12/mm/Kconfig linux-6.13.12-fbx/mm/Kconfig
--- linux-6.13.12/mm/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/Kconfig	2025-09-25 17:40:37.575377183 +0200
@@ -369,6 +369,13 @@
 
 	  Say Y if unsure.
 
+config PSEUDO_ASLR
+	bool "Support pseudo-ASLR for userland address space"
+	select CRYPTO_LIB_SHA256
+	depends on ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
+	help
+	  ASLR with a configurable pseudo-random seed policy.
+
 config COMPAT_BRK
 	bool "Disable heap randomization"
 	default y
@@ -762,6 +769,13 @@
 config ARCH_SUPPORTS_MEMORY_FAILURE
 	bool
 
+config PAGE_FRAG_CACHE_ORDER
+	int "page order size of page fragment allocator"
+	default 3
+	help
+	  This allocator is used by networking only for skb->head allocation.
+	  A large value speeds up allocation but causes memory fragmentation.
+
 config MEMORY_FAILURE
 	depends on MMU
 	depends on ARCH_SUPPORTS_MEMORY_FAILURE
diff -ruw linux-6.13.12/mm/ksm.c linux-6.13.12-fbx/mm/ksm.c
--- linux-6.13.12/mm/ksm.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/ksm.c	2025-09-25 17:40:37.603377322 +0200
@@ -3262,6 +3262,25 @@
 #endif /* CONFIG_MEMORY_HOTREMOVE */
 
 #ifdef CONFIG_PROC_FS
+/*
+ * The process is mergeable only if any VMA is currently
+ * applicable to KSM.
+ *
+ * The mmap lock must be held in read mode.
+ */
+bool ksm_process_mergeable(struct mm_struct *mm)
+{
+	struct vm_area_struct *vma;
+
+	mmap_assert_locked(mm);
+	VMA_ITERATOR(vmi, mm, 0);
+	for_each_vma(vmi, vma)
+		if (vma->vm_flags & VM_MERGEABLE)
+			return true;
+
+	return false;
+}
+
 long ksm_process_profit(struct mm_struct *mm)
 {
 	return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
diff -ruw linux-6.13.12/mm/memtest.c linux-6.13.12-fbx/mm/memtest.c
--- linux-6.13.12/mm/memtest.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/memtest.c	2025-09-25 17:40:37.611377362 +0200
@@ -29,6 +29,7 @@
 	0x7a6c7258554e494cULL, /* yeah ;-) */
 };
 
+#ifndef CONFIG_X86 // original code
 static void __init reserve_bad_mem(u64 pattern, phys_addr_t start_bad, phys_addr_t end_bad)
 {
 	pr_info("  %016llx bad mem addr %pa - %pa reserved\n",
@@ -69,19 +70,83 @@
 
 	early_memtest_done = true;
 }
+#else // fbx6hd
+/*
+ * Count memory errors in segment [a,b]
+ * If an error is detected, remove segment from memory pool.
+ */
+static void __init test_segment(unsigned long a, unsigned long b, u64 pat)
+{
+	int err_count = 0;
+	u64 *p;
+
+	/*
+	 * If an exception, such as page fault, is required to prefetch the data,
+	 * then the software prefetch instruction retires without prefetching data.
+	 */
+	for (p = __va(a); p != __va(b); p += 4) {
+		__builtin_prefetch(p+64);
+		if (p[0] != pat || p[1] != pat || p[2] != pat || p[3] != pat)
+			++err_count;
+	}
+
+	if (err_count) {
+		pr_warn("BAD+RAM: %lx-%lx: N=%d", a, b, err_count);
+		memblock_reserve(a, b-a);
+	}
+}
+
+typedef u64 u128 __attribute__ ((__vector_size__ (16)));
+
+static void __init write_pattern(unsigned long a, unsigned long b, u64 pat)
+{
+	u128 val = (u128){ pat, pat };
+	u128 *p = __va(a), *q = __va(b);
+	kernel_fpu_begin();
+	asm("movdqa %0, %%xmm0" : : "m" (val));
+	for (/**/; p != q; ++p)
+		asm("movntdq %%xmm0, %0" : "=m" (*p));
+	kernel_fpu_end();
+}
+
+#define SEGMENT_SIZE (1 << 16) // 64K
+
+static void __init memtest(u64 pattern, phys_addr_t start_phys, phys_addr_t size)
+{
+	/* On fbx6hd, ulong is wide enough to store physical addresses */
+	unsigned long curr = start_phys;
+	unsigned long next = ALIGN(curr + 1, SEGMENT_SIZE);
+	unsigned long end = start_phys + size;
+
+	// Check alignment for correct loop unrolling
+	if (curr & 0x1f || end & 0x1f) {
+		pr_warn("BAD+RAM: %lx-%lx: misaligned", curr, end);
+		return;
+	}
+
+	write_pattern(curr, end, pattern);
+
+	while (curr < end) {
+		if (next > end)
+			next = end;
+		test_segment(curr, next, pattern);
+		curr = next;
+		next += SEGMENT_SIZE;
+	}
+}
+#endif
 
 static void __init do_one_pass(u64 pattern, phys_addr_t start, phys_addr_t end)
 {
 	u64 i;
 	phys_addr_t this_start, this_end;
 
+	pr_info("pattern %016llx\n", cpu_to_be64(pattern));
 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &this_start,
 				&this_end, NULL) {
 		this_start = clamp(this_start, start, end);
 		this_end = clamp(this_end, start, end);
 		if (this_start < this_end) {
-			pr_info("  %pa - %pa pattern %016llx\n",
-				&this_start, &this_end, cpu_to_be64(pattern));
 			memtest(pattern, this_start, this_end - this_start);
 		}
 	}
diff -ruw linux-6.13.12/mm/page_frag_cache.c linux-6.13.12-fbx/mm/page_frag_cache.c
--- linux-6.13.12/mm/page_frag_cache.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/page_frag_cache.c	2025-09-25 17:40:37.619377402 +0200
@@ -18,6 +18,7 @@
 #include <linux/init.h>
 #include <linux/mm.h>
 #include <linux/page_frag_cache.h>
+#include <linux/page_owner.h>
 #include "internal.h"
 
 static unsigned long encoded_page_create(struct page *page, unsigned int order,
@@ -66,6 +67,8 @@
 
 	nc->encoded_page = page ?
 		encoded_page_create(page, order, page_is_pfmemalloc(page)) : 0;
+	if (page)
+		set_page_owner_frag_cache(page, order, nc);
 
 	return page;
 }
diff -ruw linux-6.13.12/mm/pgtable-generic.c linux-6.13.12-fbx/mm/pgtable-generic.c
--- linux-6.13.12/mm/pgtable-generic.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/pgtable-generic.c	2025-09-25 17:40:37.623377421 +0200
@@ -407,3 +407,5 @@
 	pte_unmap_unlock(pte, ptl);
 	goto again;
 }
+
+EXPORT_SYMBOL(__pte_offset_map_lock);
diff -ruw linux-6.13.12/mm/util.c linux-6.13.12-fbx/mm/util.c
--- linux-6.13.12/mm/util.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/util.c	2025-09-25 17:40:37.635377481 +0200
@@ -28,6 +28,10 @@
 
 #include <kunit/visibility.h>
 
+#ifdef CONFIG_PSEUDO_ASLR
+#include <crypto/sha2.h>
+#endif
+
 #include "internal.h"
 #include "swap.h"
 
@@ -351,6 +355,74 @@
 #endif
 }
 
+#ifdef CONFIG_PSEUDO_ASLR
+
+static u8 paslr_host_seed[SCHED_PASLR_SEED_SIZE];
+static bool paslr_host_seed_valid;
+static DEFINE_MUTEX(paslr_mutex);
+
+void paslr_task_init(struct task_struct *tsk, int policy, const char *seed)
+{
+	struct sha256_state s;
+
+	mutex_lock(&paslr_mutex);
+	if (!paslr_host_seed_valid) {
+		get_random_bytes_wait(paslr_host_seed,
+				      sizeof (paslr_host_seed));
+		paslr_host_seed_valid = true;
+	}
+	mutex_unlock(&paslr_mutex);
+
+	sha256_init(&s);
+	sha256_update(&s, paslr_host_seed, SCHED_PASLR_SEED_SIZE);
+	sha256_update(&s, (void *)&policy, sizeof (policy));
+	sha256_update(&s, seed, SCHED_PASLR_SEED_SIZE);
+	sha256_final(&s, tsk->paslr_rng_state);
+
+	tsk->paslr_rng_ctr = 0;
+}
+
+static unsigned long aslr_get_random_long(bool skip_paslr)
+{
+	struct sha256_state s;
+	unsigned long v;
+	u8 d[SHA256_DIGEST_SIZE];
+
+	if (skip_paslr || !current->paslr_used)
+		goto no_paslr;
+
+	/* generate output with current counter value */
+	++current->paslr_rng_ctr;
+	sha256_init(&s);
+	sha256_update(&s, current->paslr_rng_state,
+		      sizeof (current->paslr_rng_state));
+	sha256_update(&s, (u8 *)&current->paslr_rng_ctr,
+		      sizeof (current->paslr_rng_ctr));
+	sha256_final(&s, d);
+
+	/* reseed with next counter value */
+	++current->paslr_rng_ctr;
+	sha256_init(&s);
+	sha256_update(&s, current->paslr_rng_state,
+		      sizeof (current->paslr_rng_state));
+	sha256_update(&s, (u8 *)&current->paslr_rng_ctr,
+		      sizeof (current->paslr_rng_ctr));
+	sha256_final(&s, current->paslr_rng_state);
+
+	memcpy(&v, d, sizeof (v));
+	return v;
+
+no_paslr:
+	return get_random_long();
+}
+
+#else
+static inline unsigned long aslr_get_random_long(bool skip_paslr)
+{
+	return get_random_long();
+}
+#endif
+
 /**
  * randomize_page - Generate a random, page aligned address
  * @start:	The smallest acceptable address the caller will take.
@@ -365,7 +437,8 @@
  * Return: A page aligned address within [start, start + range).  On error,
  * @start is returned.
  */
-unsigned long randomize_page(unsigned long start, unsigned long range)
+static unsigned long _randomize_page(unsigned long start, unsigned long range,
+				     bool skip_paslr)
 {
 	if (!PAGE_ALIGNED(start)) {
 		range -= PAGE_ALIGN(start) - start;
@@ -380,7 +453,12 @@
 	if (range == 0)
 		return start;
 
-	return start + (get_random_long() % range << PAGE_SHIFT);
+	return start + (aslr_get_random_long(skip_paslr) % range << PAGE_SHIFT);
+}
+
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+	return _randomize_page(start, range, false);
 }
 
 #ifdef CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
@@ -388,9 +466,9 @@
 {
 	/* Is the current task 32bit ? */
 	if (!IS_ENABLED(CONFIG_64BIT) || is_compat_task())
-		return randomize_page(mm->brk, SZ_32M);
+		return _randomize_page(mm->brk, SZ_32M, true);
 
-	return randomize_page(mm->brk, SZ_1G);
+	return _randomize_page(mm->brk, SZ_1G, true);
 }
 
 unsigned long arch_mmap_rnd(void)
@@ -399,10 +477,10 @@
 
 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
 	if (is_compat_task())
-		rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
+		rnd = aslr_get_random_long(false) & ((1UL << mmap_rnd_compat_bits) - 1);
 	else
 #endif /* CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS */
-		rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
+		rnd = aslr_get_random_long(false) & ((1UL << mmap_rnd_bits) - 1);
 
 	return rnd << PAGE_SHIFT;
 }
diff -ruw linux-6.13.12/mm/vmalloc.c linux-6.13.12-fbx/mm/vmalloc.c
--- linux-6.13.12/mm/vmalloc.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/mm/vmalloc.c	2025-09-25 17:40:37.635377481 +0200
@@ -3927,6 +3927,19 @@
 }
 EXPORT_SYMBOL(__vmalloc_noprof);
 
+/*
+ * __vmalloc_pgprot(): same as __vmalloc, but with a pgprot_t parameter.
+ *
+ * required for IntelCE drivers.
+ */
+void *__vmalloc_pgprot(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
+{
+	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+				    gfp_mask, prot, 0, NUMA_NO_NODE,
+				    __builtin_return_address(0));
+}
+EXPORT_SYMBOL(__vmalloc_pgprot);
+
 /**
  * vmalloc - allocate virtually contiguous memory
  * @size:    allocation size
diff -ruw linux-6.13.12/net/8021q/Kconfig linux-6.13.12-fbx/net/8021q/Kconfig
--- linux-6.13.12/net/8021q/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/8021q/Kconfig	2025-09-25 17:40:37.643377521 +0200
@@ -39,3 +39,12 @@
 	  supersedes GVRP and is not backwards-compatible.
 
 	  If unsure, say N.
+
+config VLAN_FBX
+	bool "Freebox specific VLAN ethertype to bypass dump switches"
+	depends on VLAN_8021Q
+	help
+	  Select this to enable FBX VLAN specific ethertype to bypass
+	  switches that drops 802.1q packets
+
+	  If unsure, say N.
diff -ruw linux-6.13.12/net/8021q/vlan.c linux-6.13.12-fbx/net/8021q/vlan.c
--- linux-6.13.12/net/8021q/vlan.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/8021q/vlan.c	2025-09-25 17:40:37.643377521 +0200
@@ -212,7 +212,8 @@
 /*  Attach a VLAN device to a mac address (ie Ethernet Card).
  *  Returns 0 if the device was created or a negative error code otherwise.
  */
-static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id);
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
 {
 	struct net_device *new_dev;
 	struct vlan_dev_priv *vlan;
diff -ruw linux-6.13.12/net/8021q/vlan.h linux-6.13.12-fbx/net/8021q/vlan.h
--- linux-6.13.12/net/8021q/vlan.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/8021q/vlan.h	2025-09-25 17:40:37.643377521 +0200
@@ -16,6 +16,9 @@
 enum vlan_protos {
 	VLAN_PROTO_8021Q	= 0,
 	VLAN_PROTO_8021AD,
+#ifdef CONFIG_VLAN_FBX
+	VLAN_PROTO_FBX,
+#endif
 	VLAN_PROTO_NUM,
 };
 
@@ -43,6 +46,10 @@
 		return VLAN_PROTO_8021Q;
 	case htons(ETH_P_8021AD):
 		return VLAN_PROTO_8021AD;
+#ifdef CONFIG_VLAN_FBX
+	case htons(ETH_P_FBXVLAN):
+		return VLAN_PROTO_FBX;
+#endif
 	default:
 		WARN(1, "invalid VLAN protocol: 0x%04x\n", ntohs(proto));
 		return -EINVAL;
diff -ruw linux-6.13.12/net/8021q/vlan_core.c linux-6.13.12-fbx/net/8021q/vlan_core.c
--- linux-6.13.12/net/8021q/vlan_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/8021q/vlan_core.c	2025-09-25 17:40:37.643377521 +0200
@@ -99,6 +99,12 @@
 }
 EXPORT_SYMBOL(__vlan_find_dev_deep_rcu);
 
+struct net_device *vlan_dev_upper_dev(const struct net_device *dev)
+{
+	return vlan_dev_priv(dev)->real_dev;
+}
+EXPORT_SYMBOL(vlan_dev_upper_dev);
+
 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
 {
 	struct net_device *ret = vlan_dev_priv(dev)->real_dev;
diff -ruw linux-6.13.12/net/8021q/vlan_netlink.c linux-6.13.12-fbx/net/8021q/vlan_netlink.c
--- linux-6.13.12/net/8021q/vlan_netlink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/8021q/vlan_netlink.c	2025-09-25 17:40:37.643377521 +0200
@@ -63,6 +63,9 @@
 		switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
 		case htons(ETH_P_8021Q):
 		case htons(ETH_P_8021AD):
+#ifdef CONFIG_VLAN_FBX
+		case htons(ETH_P_FBXVLAN):
+#endif
 			break;
 		default:
 			NL_SET_ERR_MSG_MOD(extack, "Invalid VLAN protocol");
diff -ruw linux-6.13.12/net/Kconfig linux-6.13.12-fbx/net/Kconfig
--- linux-6.13.12/net/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/Kconfig	2025-09-25 17:40:37.647377540 +0200
@@ -32,6 +32,10 @@
 	  This option can be selected by other options that need compat
 	  netlink messages.
 
+config NET_PROMISC_MESSAGES
+	bool "show promisc/allmulti status change in kernel log"
+	default y
+
 config COMPAT_NETLINK_MESSAGES
 	def_bool y
 	depends on COMPAT
@@ -77,6 +81,19 @@
 
 menu "Networking options"
 
+config NETSKBPAD
+	int "Size reserved by dev_alloc_skb"
+	default 32
+
+config NETRXTHREAD
+	bool "Do rx network processing in kernel thread"
+	depends on BROKEN_ON_SMP
+
+config NETRXTHREAD_RX_QUEUE
+	int "Number of rx queues"
+	default 1
+	depends on NETRXTHREAD
+
 source "net/packet/Kconfig"
 source "net/unix/Kconfig"
 source "net/tls/Kconfig"
@@ -251,6 +268,8 @@
 source "net/tipc/Kconfig"
 source "net/atm/Kconfig"
 source "net/l2tp/Kconfig"
+source "net/fbxatm/Kconfig"
+source "net/fbxbridge/Kconfig"
 source "net/802/Kconfig"
 source "net/bridge/Kconfig"
 source "net/dsa/Kconfig"
diff -ruw linux-6.13.12/net/Makefile linux-6.13.12-fbx/net/Makefile
--- linux-6.13.12/net/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/Makefile	2025-09-25 17:40:37.647377540 +0200
@@ -38,6 +38,12 @@
 obj-$(CONFIG_STREAM_PARSER)	+= strparser/
 obj-$(CONFIG_ATM)		+= atm/
 obj-$(CONFIG_L2TP)		+= l2tp/
+ifneq ($(CONFIG_FBXATM),)
+obj-y				+= fbxatm/
+endif
+ifneq ($(CONFIG_FBXBRIDGE),)
+obj-y				+= fbxbridge/
+endif
 obj-$(CONFIG_PHONET)		+= phonet/
 ifneq ($(CONFIG_VLAN_8021Q),)
 obj-y				+= 8021q/
diff -ruw linux-6.13.12/net/batman-adv/Kconfig linux-6.13.12-fbx/net/batman-adv/Kconfig
--- linux-6.13.12/net/batman-adv/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/Kconfig	2025-09-25 17:40:37.651377560 +0200
@@ -94,3 +94,31 @@
 	  messages using the generic tracing infrastructure of the kernel.
 	  BATMAN_ADV_DEBUG must also be selected to get trace events for
 	  batadv_dbg.
+
+config BATMAN_ADV_FBX
+	bool "B.A.T.M.A.N. FBX specific features"
+	depends on BATMAN_ADV
+	help
+	  This enables FBX specific options to be selected (e.g. MTU
+	  discovery, SLAP protocol).
+
+config BATMAN_ADV_FBX_MTU
+	bool "B.A.T.M.A.N. FBX path max MTU discovery feature"
+	depends on BATMAN_ADV_FBX
+	help
+	  This enables FBX path max MTU discovery protocol.
+
+config BATMAN_ADV_FBX_SLAP
+	bool "B.A.T.M.A.N. FBX SLAP"
+	depends on BATMAN_ADV_FBX
+	help
+	  This enables FBX SLAP (simple loop avoidance protocol) to handle
+	  blend of LAN and B.A.T.M.A.N traffic on ethernet port correctly
+	  in Freebox configuration
+
+config BATMAN_ADV_FBX_PERIF_ROUTER
+	bool "B.A.T.M.A.N. FBX perif router"
+	depends on BATMAN_ADV_FBX && BATMAN_ADV_BATMAN_V
+	help
+	  Keep track of per interface best router to reach a specific
+	  originator.
diff -ruw linux-6.13.12/net/batman-adv/Makefile linux-6.13.12-fbx/net/batman-adv/Makefile
--- linux-6.13.12/net/batman-adv/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/Makefile	2025-09-25 17:40:37.651377560 +0200
@@ -4,6 +4,7 @@
 # Marek Lindner, Simon Wunderlich
 
 obj-$(CONFIG_BATMAN_ADV) += batman-adv.o
+
 batman-adv-y += bat_algo.o
 batman-adv-y += bat_iv_ogm.o
 batman-adv-$(CONFIG_BATMAN_ADV_BATMAN_V) += bat_v.o
@@ -31,5 +32,9 @@
 batman-adv-y += tp_meter.o
 batman-adv-y += translation-table.o
 batman-adv-y += tvlv.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX) += fbx/fbx.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_MTU) += fbx/mtu.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_SLAP) += fbx/slap.o
+batman-adv-$(CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER) += fbx/router.o
 
 CFLAGS_trace.o := -I$(src)
diff -ruw linux-6.13.12/net/batman-adv/bat_iv_ogm.c linux-6.13.12-fbx/net/batman-adv/bat_iv_ogm.c
--- linux-6.13.12/net/batman-adv/bat_iv_ogm.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/bat_iv_ogm.c	2025-09-25 17:40:37.651377560 +0200
@@ -2022,13 +2022,20 @@
 	bool ret = true;
 
 	neigh1_ifinfo = batadv_neigh_ifinfo_get(neigh1, if_outgoing1);
-	neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
-
-	if (!neigh1_ifinfo || !neigh2_ifinfo) {
+	if (!neigh1_ifinfo) {
 		ret = false;
 		goto out;
 	}
 
+	/* If neigh2 is invalid always prefer neigh1 */
+	*diff = 1;
+	if (!neigh2)
+		goto out;
+
+	neigh2_ifinfo = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
+	if (!neigh2_ifinfo)
+		goto out;
+
 	tq1 = neigh1_ifinfo->bat_iv.tq_avg;
 	tq2 = neigh2_ifinfo->bat_iv.tq_avg;
 	*diff = (int)tq1 - (int)tq2;
diff -ruw linux-6.13.12/net/batman-adv/bat_v.c linux-6.13.12-fbx/net/batman-adv/bat_v.c
--- linux-6.13.12/net/batman-adv/bat_v.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/bat_v.c	2025-09-25 17:40:37.651377560 +0200
@@ -38,6 +38,8 @@
 #include "hard-interface.h"
 #include "hash.h"
 #include "log.h"
+#include "fbx/fbx.h"
+#include "fbx/mtu.h"
 #include "netlink.h"
 #include "originator.h"
 
@@ -128,6 +130,7 @@
 batadv_v_neigh_dump_neigh(struct sk_buff *msg, u32 portid, u32 seq,
 			  struct batadv_hardif_neigh_node *hardif_neigh)
 {
+	struct batadv_priv *bat_priv;
 	void *hdr;
 	unsigned int last_seen_msecs;
 	u32 throughput;
@@ -152,6 +155,9 @@
 	    nla_put_u32(msg, BATADV_ATTR_THROUGHPUT, throughput))
 		goto nla_put_failure;
 
+	bat_priv = netdev_priv(hardif_neigh->if_incoming->soft_iface);
+	batadv_fbx_nl(bat_priv, BATADV_CMD_GET_NEIGHBORS, NULL, msg,
+		      hardif_neigh);
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -309,6 +315,9 @@
 	if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
 		goto nla_put_failure;
 
+	batadv_fbx_nl(bat_priv, BATADV_CMD_GET_ORIGINATORS, NULL, msg,
+		      orig_node);
+
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -458,6 +467,11 @@
 	if (!ifinfo1)
 		goto err_ifinfo1;
 
+	/* If neigh2 is invalid always prefer neigh1 */
+	ret = 1;
+	if (!neigh2)
+		goto err_ifinfo2;
+
 	ifinfo2 = batadv_neigh_ifinfo_get(neigh2, if_outgoing2);
 	if (!ifinfo2)
 		goto err_ifinfo2;
diff -ruw linux-6.13.12/net/batman-adv/bat_v_ogm.c linux-6.13.12-fbx/net/batman-adv/bat_v_ogm.c
--- linux-6.13.12/net/batman-adv/bat_v_ogm.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/bat_v_ogm.c	2025-09-25 17:40:37.651377560 +0200
@@ -42,6 +42,7 @@
 #include "send.h"
 #include "translation-table.h"
 #include "tvlv.h"
+#include "fbx/fbx.h"
 
 /**
  * batadv_v_ogm_orig_get() - retrieve and possibly create an originator node
@@ -475,12 +476,14 @@
 static u32 batadv_v_forward_penalty(struct batadv_priv *bat_priv,
 				    struct batadv_hard_iface *if_incoming,
 				    struct batadv_hard_iface *if_outgoing,
-				    u32 throughput)
+				    u32 throughput, bool *half_duplex)
 {
 	int if_hop_penalty = atomic_read(&if_incoming->hop_penalty);
 	int hop_penalty = atomic_read(&bat_priv->hop_penalty);
 	int hop_penalty_max = BATADV_TQ_MAX_VALUE;
 
+	*half_duplex = false;
+
 	/* Apply per hardif hop penalty */
 	throughput = throughput * (hop_penalty_max - if_hop_penalty) /
 		     hop_penalty_max;
@@ -495,8 +498,10 @@
 	 */
 	if (throughput > 10 &&
 	    if_incoming == if_outgoing &&
-	    !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX))
+	    !(if_incoming->bat_v.flags & BATADV_FULL_DUPLEX)) {
+		*half_duplex = true;
 		return throughput / 2;
+	}
 
 	/* hop penalty of 255 equals 100% */
 	return throughput * (hop_penalty_max - hop_penalty) / hop_penalty_max;
@@ -574,6 +579,9 @@
 
 	/* apply forward penalty */
 	ogm_forward = (struct batadv_ogm2_packet *)skb_buff;
+	ogm_forward->flags &= ~BATADV_V_HALF_DUPLEX;
+	if (neigh_ifinfo->bat_v.half_duplex)
+		ogm_forward->flags |= BATADV_V_HALF_DUPLEX;
 	ogm_forward->throughput = htonl(neigh_ifinfo->bat_v.throughput);
 	ogm_forward->ttl--;
 
@@ -616,6 +624,7 @@
 	bool protection_started = false;
 	int ret = -EINVAL;
 	u32 path_throughput;
+	bool half_duplex;
 	s32 seq_diff;
 
 	orig_ifinfo = batadv_orig_ifinfo_new(orig_node, if_outgoing);
@@ -657,10 +666,12 @@
 
 	path_throughput = batadv_v_forward_penalty(bat_priv, if_incoming,
 						   if_outgoing,
-						   ntohl(ogm2->throughput));
+						   ntohl(ogm2->throughput),
+						   &half_duplex);
 	neigh_ifinfo->bat_v.throughput = path_throughput;
 	neigh_ifinfo->bat_v.last_seqno = ntohl(ogm2->seqno);
 	neigh_ifinfo->last_ttl = ogm2->ttl;
+	neigh_ifinfo->bat_v.half_duplex = half_duplex;
 
 	if (seq_diff > 0 || protection_started)
 		ret = 1;
@@ -843,6 +854,28 @@
 }
 
 /**
+ * batadv_v_get_throughput() - Compute path throughput from ogm
+ * @ogm: OGM2 packet received
+ * @neigh: Neighbour OGM packet has been received from
+ * @return: Estimated path throughput
+ */
+static u32 batadv_v_get_throughput(struct batadv_ogm2_packet *ogm,
+				   struct batadv_hardif_neigh_node *neigh)
+{
+	u32 oth, lth;
+
+	oth = ntohl(ogm->throughput);
+	lth = ewma_throughput_read(&neigh->bat_v.throughput);
+
+	if (!(ogm->flags & BATADV_V_HALF_DUPLEX))
+		return min_t(u32, lth, oth);
+
+	/* OGM throughput was divided by two for retrocompatibility sake */
+	oth *= 2;
+	return oth * lth / (oth + lth);
+}
+
+/**
  * batadv_v_ogm_process() - process an incoming batman v OGM
  * @skb: the skb containing the OGM
  * @ogm_offset: offset to the OGM which should be processed (for aggregates)
@@ -858,7 +891,7 @@
 	struct batadv_neigh_node *neigh_node = NULL;
 	struct batadv_hard_iface *hard_iface;
 	struct batadv_ogm2_packet *ogm_packet;
-	u32 ogm_throughput, link_throughput, path_throughput;
+	u32 ogm_throughput;
 	int ret;
 
 	ethhdr = eth_hdr(skb);
@@ -912,9 +945,8 @@
 	 *  - For OGMs traversing more than hop the path throughput metric is
 	 *    the smaller of the path throughput and the link throughput.
 	 */
-	link_throughput = ewma_throughput_read(&hardif_neigh->bat_v.throughput);
-	path_throughput = min_t(u32, link_throughput, ogm_throughput);
-	ogm_packet->throughput = htonl(path_throughput);
+	ogm_packet->throughput = htonl(batadv_v_get_throughput(ogm_packet,
+							       hardif_neigh));
 
 	batadv_v_ogm_process_per_outif(bat_priv, ethhdr, ogm_packet, orig_node,
 				       neigh_node, if_incoming,
@@ -968,6 +1000,7 @@
 	}
 	rcu_read_unlock();
 out:
+	batadv_fbx_ogm_process(bat_priv, orig_node, neigh_node, ogm_packet);
 	batadv_orig_node_put(orig_node);
 	batadv_neigh_node_put(neigh_node);
 	batadv_hardif_neigh_put(hardif_neigh);
diff -ruw linux-6.13.12/net/batman-adv/fragmentation.c linux-6.13.12-fbx/net/batman-adv/fragmentation.c
--- linux-6.13.12/net/batman-adv/fragmentation.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/fragmentation.c	2025-09-25 17:40:37.655377580 +0200
@@ -27,6 +27,8 @@
 #include "originator.h"
 #include "send.h"
 
+#include "fbx/mtu.h"
+
 /**
  * batadv_frag_clear_chain() - delete entries in the fragment buffer chain
  * @head: head of chain with entries.
@@ -439,7 +441,7 @@
 	struct batadv_hard_iface *primary_if = NULL;
 	struct batadv_frag_packet frag_header;
 	struct sk_buff *skb_fragment;
-	unsigned int mtu = net_dev->mtu;
+	unsigned int mtu = batadv_mtu_get_for_neigh(neigh_node->hardif_neigh);
 	unsigned int header_size = sizeof(frag_header);
 	unsigned int max_fragment_size, num_fragments;
 	int ret;
diff -ruw linux-6.13.12/net/batman-adv/hard-interface.c linux-6.13.12-fbx/net/batman-adv/hard-interface.c
--- linux-6.13.12/net/batman-adv/hard-interface.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/hard-interface.c	2025-09-29 14:23:07.621732489 +0200
@@ -37,6 +37,7 @@
 #include "gateway_client.h"
 #include "log.h"
 #include "originator.h"
+#include "fbx/fbx.h"
 #include "send.h"
 #include "soft-interface.h"
 #include "translation-table.h"
@@ -470,6 +471,7 @@
 
 	batadv_dat_init_own_addr(bat_priv, primary_if);
 	batadv_bla_update_orig_address(bat_priv, primary_if, oldif);
+	batadv_fbx_primary_update(bat_priv, primary_if);
 out:
 	batadv_hardif_put(primary_if);
 }
@@ -946,7 +948,7 @@
 	switch (event) {
 	case NETDEV_REGISTER:
 		bat_priv = netdev_priv(net_dev);
-		batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+		batadv_softif_create_vlan_own(bat_priv, BATADV_NO_FLAGS);
 		break;
 	}
 
@@ -988,8 +990,10 @@
 		batadv_hardif_remove_interface(hard_iface);
 		break;
 	case NETDEV_CHANGEMTU:
-		if (hard_iface->soft_iface)
+		if (hard_iface->soft_iface) {
 			batadv_update_min_mtu(hard_iface->soft_iface);
+			batadv_fbx_hardif_update(hard_iface);
+		}
 		break;
 	case NETDEV_CHANGEADDR:
 		if (hard_iface->if_status == BATADV_IF_NOT_IN_USE)
diff -ruw linux-6.13.12/net/batman-adv/main.c linux-6.13.12-fbx/net/batman-adv/main.c
--- linux-6.13.12/net/batman-adv/main.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/main.c	2025-09-25 17:40:37.659377600 +0200
@@ -60,6 +60,7 @@
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /* List manipulations on hardif_list have to be rtnl_lock()'ed,
  * list traversals just rcu-locked
@@ -116,6 +117,7 @@
 	register_netdevice_notifier(&batadv_hard_if_notifier);
 	rtnl_link_register(&batadv_link_ops);
 	batadv_netlink_register();
+	batadv_fbx_init();
 
 	pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
 		BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
@@ -130,6 +132,7 @@
 
 static void __exit batadv_exit(void)
 {
+	batadv_fbx_exit();
 	batadv_netlink_unregister();
 	rtnl_link_unregister(&batadv_link_ops);
 	unregister_netdevice_notifier(&batadv_hard_if_notifier);
@@ -229,6 +232,7 @@
 
 	batadv_gw_init(bat_priv);
 	batadv_mcast_init(bat_priv);
+	batadv_fbx_new_priv(bat_priv);
 
 	atomic_set(&bat_priv->gw.reselect, 0);
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
@@ -271,6 +275,7 @@
 	batadv_dat_free(bat_priv);
 	batadv_bla_free(bat_priv);
 
+	batadv_fbx_free_priv(bat_priv);
 	batadv_mcast_free(bat_priv);
 
 	/* Free the TT and the originator tables only after having terminated
@@ -482,6 +487,7 @@
 	memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
 
 	idx = batadv_ogm_packet->packet_type;
+
 	(*batadv_rx_handler[idx])(skb, hard_iface);
 
 	batadv_hardif_put(hard_iface);
@@ -527,6 +533,7 @@
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
 	BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_packet) != 8);
 
 	i = sizeof_field(struct sk_buff, cb);
 	BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
diff -ruw linux-6.13.12/net/batman-adv/netlink.c linux-6.13.12-fbx/net/batman-adv/netlink.c
--- linux-6.13.12/net/batman-adv/netlink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/netlink.c	2025-09-25 17:40:37.663377620 +0200
@@ -49,6 +49,7 @@
 #include "soft-interface.h"
 #include "tp_meter.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 struct genl_family batadv_netlink_family;
 
@@ -56,6 +57,7 @@
 enum batadv_netlink_multicast_groups {
 	BATADV_NL_MCGRP_CONFIG,
 	BATADV_NL_MCGRP_TPMETER,
+	BATADV_NL_MCGRP_ROUTE,
 };
 
 /**
@@ -87,6 +89,7 @@
 static const struct genl_multicast_group batadv_netlink_mcgrps[] = {
 	[BATADV_NL_MCGRP_CONFIG] = { .name = BATADV_NL_MCAST_GROUP_CONFIG },
 	[BATADV_NL_MCGRP_TPMETER] = { .name = BATADV_NL_MCAST_GROUP_TPMETER },
+	[BATADV_NL_MCGRP_ROUTE] = { .name = BATADV_NL_MCAST_GROUP_ROUTE },
 };
 
 static const struct nla_policy batadv_netlink_policy[NUM_BATADV_ATTR] = {
@@ -149,6 +152,7 @@
 	[BATADV_ATTR_ORIG_INTERVAL]		= { .type = NLA_U32 },
 	[BATADV_ATTR_ELP_INTERVAL]		= { .type = NLA_U32 },
 	[BATADV_ATTR_THROUGHPUT_OVERRIDE]	= { .type = NLA_U32 },
+	[BATADV_ATTR_FBX]			= { .type = NLA_NESTED },
 };
 
 /**
@@ -359,6 +363,7 @@
 
 	batadv_hardif_put(primary_if);
 
+	batadv_fbx_nl(bat_priv, cmd, NULL, msg, NULL);
 	genlmsg_end(msg, hdr);
 	return 0;
 
@@ -611,6 +616,7 @@
 		atomic_set(&bat_priv->orig_interval, orig_interval);
 	}
 
+	batadv_fbx_nl(bat_priv, BATADV_CMD_SET_MESH, info, NULL, NULL);
 	batadv_netlink_notify_mesh(bat_priv);
 
 	return 0;
@@ -1150,6 +1156,100 @@
 }
 
 /**
+ * batadv_netlink_mesh_fill: Fill message with route attributes
+ * @msg: Nelink message to dump route info into
+ * @bat_priv: the bat priv with all the soft interface information
+ * @cmd: type of netlink message
+ * @orig: Current route destination originator
+ * @neigh: Current best neighbour for this originator
+ * @best: Globally best route
+ * @portid: Port making netlink request
+ * @seq: sequence number for message
+ * @flags: Additionnal netlink flag message
+ */
+static int batadv_netlink_route_fill(struct sk_buff *msg,
+				     struct batadv_priv *bat_priv,
+				     enum batadv_nl_commands cmd,
+				     struct batadv_orig_node *orig_node,
+				     struct batadv_neigh_node *neigh_node,
+				     bool best,
+				     u32 portid, u32 seq, int flags)
+{
+	struct net_device *soft_iface = bat_priv->soft_iface;
+	void *hdr;
+
+	hdr = genlmsg_put(msg, portid, seq, &batadv_netlink_family, flags, cmd);
+	if (!hdr)
+		return -ENOBUFS;
+
+	if (nla_put_u32(msg, BATADV_ATTR_MESH_IFINDEX, soft_iface->ifindex))
+		goto nla_put_failure;
+
+	if (nla_put(msg, BATADV_ATTR_MESH_ADDRESS, ETH_ALEN,
+		    soft_iface->dev_addr))
+		goto nla_put_failure;
+
+	if (nla_put(msg, BATADV_ATTR_ORIG_ADDRESS, ETH_ALEN,
+		    orig_node->orig))
+		goto nla_put_failure;
+
+	if (best && nla_put_flag(msg, BATADV_ATTR_FLAG_BEST))
+		goto nla_put_failure;
+
+	if (neigh_node && nla_put(msg, BATADV_ATTR_NEIGH_ADDRESS, ETH_ALEN,
+				  neigh_node->addr))
+		goto nla_put_failure;
+
+	if (neigh_node &&
+	    nla_put_u32(msg, BATADV_ATTR_HARD_IFINDEX,
+			neigh_node->if_incoming->net_dev->ifindex))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	return 0;
+
+nla_put_failure:
+	genlmsg_cancel(msg, hdr);
+	return -EMSGSIZE;
+}
+
+/**
+ * batadv_netlink_notify_route() - send route events to listener
+ * @bat_priv: the bat priv with all the soft interface information
+ * @ev: Route event type (add, del or change)
+ * @orig: The route destination node
+ * @neigh: Best neighbour for this route
+ * @best: This is a globaly best route
+ *
+ * Return: 0 on success, < 0 on error
+ */
+int batadv_netlink_notify_route(struct batadv_priv *bat_priv,
+				enum batadv_nl_commands ev,
+				struct batadv_orig_node *orig_node,
+				struct batadv_neigh_node *neigh_node,
+				bool best)
+{
+	struct sk_buff *msg;
+	int ret;
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+	if (!msg)
+		return -ENOMEM;
+
+	ret = batadv_netlink_route_fill(msg, bat_priv, ev, orig_node,
+					neigh_node, best, 0, 0, 0);
+	if (ret < 0) {
+		nlmsg_free(msg);
+		return ret;
+	}
+
+	genlmsg_multicast_netns(&batadv_netlink_family,
+				dev_net(bat_priv->soft_iface), msg, 0,
+				BATADV_NL_MCGRP_ROUTE, GFP_ATOMIC);
+	return 0;
+}
+
+/**
  * batadv_get_softif_from_info() - Retrieve soft interface from genl attributes
  * @net: the applicable net namespace
  * @info: receiver information
@@ -1409,7 +1509,7 @@
 	{
 		.cmd = BATADV_CMD_GET_ORIGINATORS,
 		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
-		.flags = GENL_UNS_ADMIN_PERM,
+		/* can be retrieved by unprivileged users */
 		.dumpit = batadv_orig_dump,
 	},
 	{
diff -ruw linux-6.13.12/net/batman-adv/netlink.h linux-6.13.12-fbx/net/batman-adv/netlink.h
--- linux-6.13.12/net/batman-adv/netlink.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/netlink.h	2025-09-25 17:40:37.663377620 +0200
@@ -21,6 +21,12 @@
 				  u8 result, u32 test_time, u64 total_bytes,
 				  u32 cookie);
 
+int batadv_netlink_notify_route(struct batadv_priv *bat_priv,
+				enum batadv_nl_commands ev,
+				struct batadv_orig_node *orig_node,
+				struct batadv_neigh_node *neigh_node,
+				bool best);
+
 extern struct genl_family batadv_netlink_family;
 
 #endif /* _NET_BATMAN_ADV_NETLINK_H_ */
diff -ruw linux-6.13.12/net/batman-adv/originator.c linux-6.13.12-fbx/net/batman-adv/originator.c
--- linux-6.13.12/net/batman-adv/originator.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/originator.c	2025-09-25 17:40:37.663377620 +0200
@@ -43,6 +43,7 @@
 #include "routing.h"
 #include "soft-interface.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /* hash class keys */
 static struct lock_class_key batadv_orig_hash_lock_class_key;
@@ -276,6 +277,7 @@
 	hlist_del_init_rcu(&hardif_neigh->list);
 	spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
 
+	batadv_fbx_neigh_release(hardif_neigh);
 	batadv_hardif_put(hardif_neigh->if_incoming);
 	kfree_rcu(hardif_neigh, rcu);
 }
@@ -435,6 +437,8 @@
 	INIT_HLIST_NODE(&orig_ifinfo->list);
 	kref_init(&orig_ifinfo->refcount);
 
+	batadv_fbx_orig_ifinfo_init(orig_ifinfo);
+
 	kref_get(&orig_ifinfo->refcount);
 	hlist_add_head_rcu(&orig_ifinfo->list,
 			   &orig_node->ifinfo_list);
@@ -571,6 +575,7 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
 	struct batadv_hardif_neigh_node *hardif_neigh;
+	int ret;
 
 	spin_lock_bh(&hard_iface->neigh_list_lock);
 
@@ -591,6 +596,13 @@
 	hardif_neigh->last_seen = jiffies;
 
 	kref_init(&hardif_neigh->refcount);
+	ret = batadv_fbx_neigh_init(hardif_neigh);
+	if (ret) {
+		batadv_hardif_put(hard_iface);
+		kfree(hardif_neigh);
+		hardif_neigh = NULL;
+		goto out;
+	}
 
 	if (bat_priv->algo_ops->neigh.hardif_init)
 		bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
@@ -830,6 +842,8 @@
 
 	orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
 
+	batadv_fbx_orig_ifinfo_release(orig_ifinfo);
+
 	if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
 		batadv_hardif_put(orig_ifinfo->if_outgoing);
 
@@ -850,6 +864,8 @@
 
 	orig_node = container_of(rcu, struct batadv_orig_node, rcu);
 
+	batadv_fbx_orig_release(orig_node);
+
 	batadv_mcast_purge_orig(orig_node);
 
 	batadv_frag_purge_orig(orig_node, NULL);
@@ -960,7 +976,7 @@
 	struct batadv_orig_node *orig_node;
 	struct batadv_orig_node_vlan *vlan;
 	unsigned long reset_time;
-	int i;
+	int ret, i;
 
 	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
 		   "Creating new originator: %pM\n", addr);
@@ -1019,6 +1035,10 @@
 		orig_node->fragments[i].size = 0;
 	}
 
+	ret = batadv_fbx_orig_init(orig_node);
+	if (ret)
+		goto free_orig_node;
+
 	return orig_node;
 free_orig_node:
 	kfree(orig_node);
@@ -1194,8 +1214,8 @@
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
-		if (best && (bao->neigh.cmp(neigh, if_outgoing, best,
-					    if_outgoing) <= 0))
+		if (bao->neigh.cmp(neigh, if_outgoing, best,
+				   if_outgoing) <= 0)
 			continue;
 
 		if (!kref_get_unless_zero(&neigh->refcount))
diff -ruw linux-6.13.12/net/batman-adv/routing.c linux-6.13.12-fbx/net/batman-adv/routing.c
--- linux-6.13.12/net/batman-adv/routing.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/routing.c	2025-09-25 17:40:37.663377620 +0200
@@ -37,6 +37,8 @@
 #include "tp_meter.h"
 #include "translation-table.h"
 #include "tvlv.h"
+#include "netlink.h"
+#include "fbx/fbx.h"
 
 static int batadv_route_unicast_packet(struct sk_buff *skb,
 				       struct batadv_hard_iface *recv_if);
@@ -57,6 +59,7 @@
 {
 	struct batadv_orig_ifinfo *orig_ifinfo;
 	struct batadv_neigh_node *curr_router;
+	enum batadv_nl_commands cmd;
 
 	orig_ifinfo = batadv_orig_ifinfo_get(orig_node, recv_if);
 	if (!orig_ifinfo)
@@ -82,6 +85,7 @@
 
 	/* route deleted */
 	if (curr_router && !neigh_node) {
+		cmd = BATADV_CMD_DEL_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Deleting route towards: %pM\n", orig_node->orig);
 		batadv_tt_global_del_orig(bat_priv, orig_node, -1,
@@ -89,11 +93,13 @@
 
 	/* route added */
 	} else if (!curr_router && neigh_node) {
+		cmd = BATADV_CMD_ADD_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Adding route towards: %pM (via %pM)\n",
 			   orig_node->orig, neigh_node->addr);
 	/* route changed */
 	} else if (neigh_node && curr_router) {
+		cmd = BATADV_CMD_CHANGE_ROUTE;
 		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
 			   "Changing route towards: %pM (now via %pM - was via %pM)\n",
 			   orig_node->orig, neigh_node->addr,
@@ -102,6 +108,10 @@
 
 	/* decrease refcount of previous best neighbor */
 	batadv_neigh_node_put(curr_router);
+
+	if (recv_if == BATADV_IF_DEFAULT)
+		batadv_netlink_notify_route(bat_priv, cmd, orig_node,
+					    neigh_node, true);
 }
 
 /**
@@ -946,7 +956,7 @@
 	int check, hdr_size = sizeof(*unicast_packet);
 	enum batadv_subtype subtype;
 	int ret = NET_RX_DROP;
-	bool is4addr, is_gw;
+	bool is4addr, is_gw, shortcut;
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 	is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
@@ -970,8 +980,10 @@
 
 	unicast_packet = (struct batadv_unicast_packet *)skb->data;
 
+	shortcut = batadv_fbx_shortcut(bat_priv, unicast_packet->dest);
+
 	/* packet for me */
-	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
+	if (shortcut || batadv_is_my_mac(bat_priv, unicast_packet->dest)) {
 		/* If this is a unicast packet from another backgone gw,
 		 * drop it.
 		 */
diff -ruw linux-6.13.12/net/batman-adv/send.c linux-6.13.12-fbx/net/batman-adv/send.c
--- linux-6.13.12/net/batman-adv/send.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/send.c	2025-09-25 17:40:37.663377620 +0200
@@ -34,6 +34,8 @@
 #include "gateway_client.h"
 #include "hard-interface.h"
 #include "log.h"
+#include "fbx/fbx.h"
+#include "fbx/mtu.h"
 #include "network-coding.h"
 #include "originator.h"
 #include "routing.h"
@@ -192,7 +194,7 @@
 	 * it if needed.
 	 */
 	if (atomic_read(&bat_priv->fragmentation) &&
-	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
+	    skb->len > batadv_mtu_get_for_neigh(neigh_node->hardif_neigh)) {
 		/* Fragment and send packet. */
 		ret = batadv_frag_send_packet(skb, orig_node, neigh_node);
 		/* skb was consumed */
@@ -408,7 +410,7 @@
 	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
 	struct batadv_orig_node *orig_node;
 	u8 *src, *dst;
-	int ret;
+	int ret = NET_XMIT_DROP;
 
 	src = ethhdr->h_source;
 	dst = ethhdr->h_dest;
@@ -420,6 +422,10 @@
 	}
 	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
 
+	/* unknown unicast (no tt entry for dest), handle same as broadcast */
+	if (!orig_node)
+		return batadv_send_bcast_packet(bat_priv, skb, 0, true);
+
 	ret = batadv_send_skb_unicast(bat_priv, skb, packet_type,
 				      packet_subtype, orig_node, vid);
 
@@ -990,13 +996,48 @@
  *
  * Consumes the provided skb.
  */
-void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+int batadv_send_bcast_packet(struct batadv_priv *bat_priv,
 			      struct sk_buff *skb,
 			      unsigned long delay,
 			      bool own_packet)
 {
+	struct batadv_bcast_packet *bcast_packet;
+	struct batadv_hard_iface *primary_if = NULL;
+	u32 seqno;
+
+	primary_if = batadv_primary_if_get_selected(bat_priv);
+	if (!primary_if)
+		goto drop;
+
+	if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
+		goto drop;
+
+	bcast_packet = (struct batadv_bcast_packet *)skb->data;
+	bcast_packet->version = BATADV_COMPAT_VERSION;
+	bcast_packet->ttl = BATADV_TTL - 1;
+
+	/* batman packet type: broadcast */
+	bcast_packet->packet_type = BATADV_BCAST;
+	bcast_packet->reserved = 0;
+
+	/* hw address of first interface is the orig mac because only
+	 * this mac is known throughout the mesh
+	 */
+	ether_addr_copy(bcast_packet->orig,
+			primary_if->net_dev->dev_addr);
+
+	/* set broadcast sequence number */
+	seqno = atomic_inc_return(&bat_priv->bcast_seqno);
+	bcast_packet->seqno = htonl(seqno);
+
 	__batadv_forw_bcast_packet(bat_priv, skb, delay, own_packet);
+
 	consume_skb(skb);
+	batadv_hardif_put(primary_if);
+	return NET_XMIT_SUCCESS;
+drop:
+	kfree_skb(skb);
+	return NET_XMIT_DROP;
 }
 
 /**
diff -ruw linux-6.13.12/net/batman-adv/send.h linux-6.13.12-fbx/net/batman-adv/send.h
--- linux-6.13.12/net/batman-adv/send.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/send.h	2025-09-25 17:40:37.663377620 +0200
@@ -43,7 +43,7 @@
 			     struct sk_buff *skb,
 			     unsigned long delay,
 			     bool own_packet);
-void batadv_send_bcast_packet(struct batadv_priv *bat_priv,
+int batadv_send_bcast_packet(struct batadv_priv *bat_priv,
 			      struct sk_buff *skb,
 			      unsigned long delay,
 			      bool own_packet);
diff -ruw linux-6.13.12/net/batman-adv/soft-interface.c linux-6.13.12-fbx/net/batman-adv/soft-interface.c
--- linux-6.13.12/net/batman-adv/soft-interface.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/soft-interface.c	2025-09-25 17:40:37.667377640 +0200
@@ -50,6 +50,7 @@
 #include "network-coding.h"
 #include "send.h"
 #include "translation-table.h"
+#include "fbx/fbx.h"
 
 /**
  * batadv_skb_head_push() - Increase header size and move (push) head pointer
@@ -141,6 +142,10 @@
 
 	rcu_read_lock();
 	hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) {
+		/* we don't use this VID ourself, avoid adding us to it */
+		if (!batadv_is_my_client(bat_priv, old_addr, vlan->vid))
+			continue;
+
 		batadv_tt_local_remove(bat_priv, old_addr, vlan->vid,
 				       "mac address changed", false);
 		batadv_tt_local_add(dev, addr->sa_data, vlan->vid,
@@ -177,15 +182,31 @@
 {
 }
 
+#ifndef ETH_P_BCOM_DSA
+#define ETH_P_BCOM_DSA 0x8874
+#endif
+
+#ifndef ETH_P_REALTEK_RRCP
+#define ETH_P_REALTEK_RRCP 0x8899
+#endif
+
+#ifndef RRCP_PROTO_RLDP
+#define RRCP_PROTO_RLDP 0x03
+#endif
+
+#ifndef RRCP_PROTO_RLDP2
+#define RRCP_PROTO_RLDP2 0x23
+#endif
+
 static netdev_tx_t batadv_interface_tx(struct sk_buff *skb,
 				       struct net_device *soft_iface)
 {
 	struct ethhdr *ethhdr;
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
-	struct batadv_hard_iface *primary_if = NULL;
-	struct batadv_bcast_packet *bcast_packet;
+#ifdef CONFIG_BATMAN_ADV_BLA
 	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
 					      0x00, 0x00};
+#endif
 	static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00,
 					       0x00, 0x00};
 	enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO;
@@ -196,12 +217,12 @@
 	unsigned long brd_delay = 0;
 	bool do_bcast = false, client_added;
 	unsigned short vid;
-	u32 seqno;
 	int gw_mode;
 	enum batadv_forw_mode forw_mode = BATADV_FORW_BCAST;
 	int mcast_is_routable = 0;
 	int network_offset = ETH_HLEN;
 	__be16 proto;
+	u8 rrcp_proto = 0;
 
 	if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
 		goto dropped;
@@ -240,6 +261,9 @@
 	if (batadv_bla_tx(bat_priv, skb, vid))
 		goto dropped;
 
+	if (!batadv_fbx_check_skb_tx(bat_priv, skb, vid))
+		goto dropped;
+
 	/* skb->data might have been reallocated by batadv_bla_tx() */
 	ethhdr = eth_hdr(skb);
 
@@ -256,18 +280,39 @@
 	/* Snoop address candidates from DHCPACKs for early DAT filling */
 	batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid);
 
-	/* don't accept stp packets. STP does not help in meshes.
-	 * better use the bridge loop avoidance ...
+	/* don't accept stp packets with BLA. STP does not help in meshes.
+	 * better use the bridge loop avoidance .... But as some devices uses
+	 * that, such as SONOS ones, if BLA is not enabled let those packets
+	 * going and print debug once.
 	 *
 	 * The same goes for ECTP sent at least by some Cisco Switches,
 	 * it might confuse the mesh when used with bridge loop avoidance.
+	 * Those seems safe to drop with or without BLA.
 	 */
-	if (batadv_compare_eth(ethhdr->h_dest, stp_addr))
+#ifdef CONFIG_BATMAN_ADV_BLA
+	if (atomic_read(&bat_priv->bridge_loop_avoidance) &&
+	    batadv_compare_eth(ethhdr->h_dest, stp_addr))
 		goto dropped;
+#endif
 
 	if (batadv_compare_eth(ethhdr->h_dest, ectp_addr))
 		goto dropped;
 
+	/* Also don't accept Broadcom and Realtek loop detection packets as
+	 * they do not play well on mesh.
+	 */
+	if (unlikely(is_multicast_ether_addr(ethhdr->h_dest))) {
+		if (unlikely(proto == htons(ETH_P_BCOM_DSA)))
+			goto dropped;
+		if (unlikely(proto == htons(ETH_P_REALTEK_RRCP))) {
+			skb_copy_bits(skb, sizeof(*ethhdr), &rrcp_proto, 1);
+			if (rrcp_proto == RRCP_PROTO_RLDP)
+				goto dropped;
+			if (rrcp_proto == RRCP_PROTO_RLDP2)
+				goto dropped;
+		}
+	}
+
 	gw_mode = atomic_read(&bat_priv->gw.mode);
 	if (is_multicast_ether_addr(ethhdr->h_dest)) {
 		/* if gw mode is off, broadcast every packet */
@@ -322,10 +367,6 @@
 
 	/* ethernet packet should be broadcasted */
 	if (do_bcast) {
-		primary_if = batadv_primary_if_get_selected(bat_priv);
-		if (!primary_if)
-			goto dropped;
-
 		/* in case of ARP request, we do not immediately broadcasti the
 		 * packet, instead we first wait for DAT to try to retrieve the
 		 * correct ARP entry
@@ -333,28 +374,9 @@
 		if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
 			brd_delay = msecs_to_jiffies(ARP_REQ_DELAY);
 
-		if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0)
-			goto dropped;
-
-		bcast_packet = (struct batadv_bcast_packet *)skb->data;
-		bcast_packet->version = BATADV_COMPAT_VERSION;
-		bcast_packet->ttl = BATADV_TTL - 1;
-
-		/* batman packet type: broadcast */
-		bcast_packet->packet_type = BATADV_BCAST;
-		bcast_packet->reserved = 0;
-
-		/* hw address of first interface is the orig mac because only
-		 * this mac is known throughout the mesh
-		 */
-		ether_addr_copy(bcast_packet->orig,
-				primary_if->net_dev->dev_addr);
-
-		/* set broadcast sequence number */
-		seqno = atomic_inc_return(&bat_priv->bcast_seqno);
-		bcast_packet->seqno = htonl(seqno);
-
-		batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
+		ret = batadv_send_bcast_packet(bat_priv, skb, brd_delay, true);
+		if (ret != NET_XMIT_SUCCESS)
+			goto dropped_freed;
 	/* unicast packet */
 	} else {
 		/* DHCP packets going to a server will use the GW feature */
@@ -391,7 +413,6 @@
 dropped_freed:
 	batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
 end:
-	batadv_hardif_put(primary_if);
 	return NETDEV_TX_OK;
 }
 
@@ -437,6 +458,9 @@
 	if (unlikely(!pskb_may_pull(skb, ETH_HLEN)))
 		goto dropped;
 
+	if (!batadv_fbx_check_skb_rx(bat_priv, packet_type, skb))
+		goto dropped;
+
 	vid = batadv_get_vid(skb, 0);
 	ethhdr = eth_hdr(skb);
 
@@ -553,9 +577,11 @@
  * @bat_priv: the bat priv with all the soft interface information
  * @vid: the VLAN identifier
  *
- * Return: 0 on success, a negative error otherwise.
+ * Return: a pointer to the newly allocated softif vlan struct on success, NULL
+ * otherwise.
  */
-int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
+static struct batadv_softif_vlan *
+batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 {
 	struct batadv_softif_vlan *vlan;
 
@@ -563,15 +589,14 @@
 
 	vlan = batadv_softif_vlan_get(bat_priv, vid);
 	if (vlan) {
-		batadv_softif_vlan_put(vlan);
 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-		return -EEXIST;
+		return vlan;
 	}
 
 	vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
 	if (!vlan) {
 		spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-		return -ENOMEM;
+		return NULL;
 	}
 
 	vlan->bat_priv = bat_priv;
@@ -580,38 +605,74 @@
 
 	atomic_set(&vlan->ap_isolation, 0);
 
-	kref_get(&vlan->refcount);
 	hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
 	spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
 
+	return vlan;
+}
+
+/**
+ * batadv_softif_vlan_get_or_create() - retrieve or create a softif vlan struct
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Return: the softif vlan struct if found or created or NULL otherwise.
+ */
+struct batadv_softif_vlan *
+batadv_softif_vlan_get_or_create(struct batadv_priv *bat_priv,
+				 unsigned short vid)
+{
+	struct batadv_softif_vlan *vlan = batadv_softif_vlan_get(bat_priv, vid);
+
+	if (vlan)
+		return vlan;
+
+	return batadv_softif_create_vlan(bat_priv, vid);
+}
+
+/**
+ * batadv_softif_create_vlan_own() - add our own softif to the local TT
+ * @bat_priv: the bat priv with all the soft interface information
+ * @vid: the VLAN identifier
+ *
+ * Adds the MAC address of our own soft interface with the given VLAN ID as
+ * a permanent local TT entry.
+ *
+ * Return: 0 on success, a negative error otherwise.
+ */
+int batadv_softif_create_vlan_own(struct batadv_priv *bat_priv,
+				  unsigned short vid)
+{
+	int ret;
+
 	/* add a new TT local entry. This one will be marked with the NOPURGE
 	 * flag
 	 */
-	batadv_tt_local_add(bat_priv->soft_iface,
+	ret = batadv_tt_local_add(bat_priv->soft_iface,
 			    bat_priv->soft_iface->dev_addr, vid,
 			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
-
-	/* don't return reference to new softif_vlan */
-	batadv_softif_vlan_put(vlan);
+	if (ret < 0)
+		return ret;
 
 	return 0;
 }
 
 /**
- * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object
+ * batadv_softif_destroy_vlan_own() - remove our own softif from the local TT
  * @bat_priv: the bat priv with all the soft interface information
- * @vlan: the object to remove
+ * @vid: the VLAN identifier
+ *
+ * Removes the MAC address of our own soft interface with the given VLAN ID from
+ * the local TT.
  */
-static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
-				       struct batadv_softif_vlan *vlan)
+static void batadv_softif_destroy_vlan_own(struct batadv_priv *bat_priv,
+					   unsigned short vid)
 {
 	/* explicitly remove the associated TT local entry because it is marked
 	 * with the NOPURGE flag
 	 */
-	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
-			       vlan->vid, "vlan interface destroyed", false);
-
-	batadv_softif_vlan_put(vlan);
+	batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, vid,
+			       "vlan interface destroyed", false);
 }
 
 /**
@@ -629,7 +690,6 @@
 				    unsigned short vid)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
-	struct batadv_softif_vlan *vlan;
 
 	/* only 802.1Q vlans are supported.
 	 * batman-adv does not know how to handle other types
@@ -639,25 +699,7 @@
 
 	vid |= BATADV_VLAN_HAS_TAG;
 
-	/* if a new vlan is getting created and it already exists, it means that
-	 * it was not deleted yet. batadv_softif_vlan_get() increases the
-	 * refcount in order to revive the object.
-	 *
-	 * if it does not exist then create it.
-	 */
-	vlan = batadv_softif_vlan_get(bat_priv, vid);
-	if (!vlan)
-		return batadv_softif_create_vlan(bat_priv, vid);
-
-	/* add a new TT local entry. This one will be marked with the NOPURGE
-	 * flag. This must be added again, even if the vlan object already
-	 * exists, because the entry was deleted by kill_vid()
-	 */
-	batadv_tt_local_add(bat_priv->soft_iface,
-			    bat_priv->soft_iface->dev_addr, vid,
-			    BATADV_NULL_IFINDEX, BATADV_NO_MARK);
-
-	return 0;
+	return batadv_softif_create_vlan_own(bat_priv, vid);
 }
 
 /**
@@ -676,7 +718,6 @@
 				     unsigned short vid)
 {
 	struct batadv_priv *bat_priv = netdev_priv(dev);
-	struct batadv_softif_vlan *vlan;
 
 	/* only 802.1Q vlans are supported. batman-adv does not know how to
 	 * handle other types
@@ -684,15 +725,7 @@
 	if (proto != htons(ETH_P_8021Q))
 		return -EINVAL;
 
-	vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
-	if (!vlan)
-		return -ENOENT;
-
-	batadv_softif_destroy_vlan(bat_priv, vlan);
-
-	/* finally free the vlan object */
-	batadv_softif_vlan_put(vlan);
-
+	batadv_softif_destroy_vlan_own(bat_priv, vid | BATADV_VLAN_HAS_TAG);
 	return 0;
 }
 
@@ -781,15 +814,16 @@
 	atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
 
 	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
-	atomic_set(&bat_priv->bcast_seqno, 1);
+	get_random_bytes(&random_seqno, sizeof(random_seqno));
+	atomic_set(&bat_priv->bcast_seqno, random_seqno);
 	atomic_set(&bat_priv->tt.vn, 0);
-	atomic_set(&bat_priv->tt.local_changes, 0);
 	atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
 #ifdef CONFIG_BATMAN_ADV_BLA
 	atomic_set(&bat_priv->bla.num_requests, 0);
 #endif
 	atomic_set(&bat_priv->tp_num, 0);
 
+	bat_priv->tt.local_changes = 0;
 	bat_priv->tt.last_changeset = NULL;
 	bat_priv->tt.last_changeset_len = 0;
 	bat_priv->isolation_mark = 0;
@@ -1100,7 +1134,6 @@
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
 	struct batadv_hard_iface *hard_iface;
-	struct batadv_softif_vlan *vlan;
 
 	list_for_each_entry(hard_iface, &batadv_hardif_list, list) {
 		if (hard_iface->soft_iface == soft_iface)
@@ -1108,11 +1141,7 @@
 	}
 
 	/* destroy the "untagged" VLAN */
-	vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
-	if (vlan) {
-		batadv_softif_destroy_vlan(bat_priv, vlan);
-		batadv_softif_vlan_put(vlan);
-	}
+	batadv_softif_destroy_vlan_own(bat_priv, BATADV_NO_FLAGS);
 
 	unregister_netdevice_queue(soft_iface, head);
 }
diff -ruw linux-6.13.12/net/batman-adv/soft-interface.h linux-6.13.12-fbx/net/batman-adv/soft-interface.h
--- linux-6.13.12/net/batman-adv/soft-interface.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/soft-interface.h	2025-09-25 17:40:37.667377640 +0200
@@ -21,10 +21,14 @@
 			 struct batadv_orig_node *orig_node);
 bool batadv_softif_is_valid(const struct net_device *net_dev);
 extern struct rtnl_link_ops batadv_link_ops;
-int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid);
+int batadv_softif_create_vlan_own(struct batadv_priv *bat_priv,
+				  unsigned short vid);
 void batadv_softif_vlan_release(struct kref *ref);
 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
 						  unsigned short vid);
+struct batadv_softif_vlan *
+batadv_softif_vlan_get_or_create(struct batadv_priv *bat_priv,
+				 unsigned short vid);
 
 /**
  * batadv_softif_vlan_put() - decrease the vlan object refcounter and
diff -ruw linux-6.13.12/net/batman-adv/translation-table.c linux-6.13.12-fbx/net/batman-adv/translation-table.c
--- linux-6.13.12/net/batman-adv/translation-table.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/translation-table.c	2025-09-25 17:40:37.667377640 +0200
@@ -51,6 +51,7 @@
 #include "originator.h"
 #include "soft-interface.h"
 #include "tvlv.h"
+#include "fbx/fbx.h"
 
 static struct kmem_cache *batadv_tl_cache __read_mostly;
 static struct kmem_cache *batadv_tg_cache __read_mostly;
@@ -168,7 +169,7 @@
  * Return: a pointer to the corresponding tt_local_entry struct if the client is
  * found, NULL otherwise.
  */
-static struct batadv_tt_local_entry *
+struct batadv_tt_local_entry *
 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
 			  unsigned short vid)
 {
@@ -231,8 +232,7 @@
  *  possibly release it
  * @tt_local_entry: tt_local_entry to be free'd
  */
-static void
-batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
+void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry)
 {
 	if (!tt_local_entry)
 		return;
@@ -423,7 +423,6 @@
 	struct batadv_tt_change_node *tt_change_node, *entry, *safe;
 	struct batadv_tt_common_entry *common = &tt_local_entry->common;
 	u8 flags = common->flags | event_flags;
-	bool event_removed = false;
 	bool del_op_requested, del_op_entry;
 
 	tt_change_node = kmem_cache_alloc(batadv_tt_change_cache, GFP_ATOMIC);
@@ -461,28 +460,27 @@
 		/* this is a second add in the same originator interval. It
 		 * means that flags have been changed: update them!
 		 */
-		if (!del_op_requested && !del_op_entry)
+		if (del_op_requested == del_op_entry) {
 			entry->change.flags = flags;
+			goto discard;
+		}
 
 		continue;
 del:
 		list_del(&entry->list);
 		kmem_cache_free(batadv_tt_change_cache, entry);
+		bat_priv->tt.local_changes--;
+discard:
 		kmem_cache_free(batadv_tt_change_cache, tt_change_node);
-		event_removed = true;
 		goto unlock;
 	}
 
 	/* track the change in the OGMinterval list */
 	list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
+	bat_priv->tt.local_changes++;
 
 unlock:
 	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
-
-	if (event_removed)
-		atomic_dec(&bat_priv->tt.local_changes);
-	else
-		atomic_inc(&bat_priv->tt.local_changes);
 }
 
 /**
@@ -592,7 +590,7 @@
  *
  * Return: true if the client was successfully added, false otherwise.
  */
-bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+int batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 			 unsigned short vid, int ifindex, u32 mark)
 {
 	struct batadv_priv *bat_priv = netdev_priv(soft_iface);
@@ -605,10 +603,12 @@
 	struct hlist_head *head;
 	struct batadv_tt_orig_list_entry *orig_entry;
 	int hash_added, table_size, packet_size_max;
-	bool ret = false;
+	bool roam = true;
 	bool roamed_back = false;
+	u16 save_flags;
 	u8 remote_flags;
 	u32 match_mark;
+	int ret = 0;
 
 	if (ifindex != BATADV_NULL_IFINDEX)
 		in_dev = dev_get_by_index(net, ifindex);
@@ -659,21 +659,22 @@
 		net_ratelimited_function(batadv_info, soft_iface,
 					 "Local translation table size (%i) exceeds maximum packet size (%i); Ignoring new local tt entry: %pM\n",
 					 table_size, packet_size_max, addr);
+		ret = -E2BIG;
 		goto out;
 	}
 
 	tt_local = kmem_cache_alloc(batadv_tl_cache, GFP_ATOMIC);
-	if (!tt_local)
+	if (!tt_local) {
+		ret = -ENOMEM;
 		goto out;
+	}
 
 	/* increase the refcounter of the related vlan */
-	vlan = batadv_softif_vlan_get(bat_priv, vid);
+	vlan = batadv_softif_vlan_get_or_create(bat_priv, vid);
 	if (!vlan) {
-		net_ratelimited_function(batadv_info, soft_iface,
-					 "adding TT local entry %pM to non-existent VLAN %d\n",
-					 addr, batadv_print_vid(vid));
 		kmem_cache_free(batadv_tl_cache, tt_local);
 		tt_local = NULL;
+		ret = -ENOMEM;
 		goto out;
 	}
 
@@ -718,10 +719,17 @@
 	batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
 
 check_roaming:
+
+	save_flags = tt_local->common.flags;
+	roam = batadv_fbx_tt_local_add(bat_priv, tt_local, tt_global, ifindex);
+	if (save_flags != tt_local->common.flags)
+		batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
+
 	/* Check whether it is a roaming, but don't do anything if the roaming
 	 * process has already been handled
 	 */
-	if (tt_global && !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
+	if (roam && tt_global &&
+	    !(tt_global->common.flags & BATADV_TT_CLIENT_ROAM)) {
 		/* These node are probably going to update their tt table */
 		head = &tt_global->orig_list;
 		rcu_read_lock();
@@ -770,7 +778,7 @@
 	if (remote_flags ^ (tt_local->common.flags & BATADV_TT_REMOTE_MASK))
 		batadv_tt_local_event(bat_priv, tt_local, BATADV_NO_FLAGS);
 
-	ret = true;
+	ret = 1;
 out:
 	batadv_hardif_put(in_hardif);
 	dev_put(in_dev);
@@ -952,7 +960,7 @@
 	size_t tt_extra_len = 0;
 	u16 tvlv_len;
 
-	tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
+	tt_diff_entries_num = READ_ONCE(bat_priv->tt.local_changes);
 	tt_diff_len = batadv_tt_len(tt_diff_entries_num);
 
 	/* if we have too many changes for one packet don't send any
@@ -979,7 +987,7 @@
 		goto container_register;
 
 	spin_lock_bh(&bat_priv->tt.changes_list_lock);
-	atomic_set(&bat_priv->tt.local_changes, 0);
+	bat_priv->tt.local_changes = 0;
 
 	list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
 				 list) {
@@ -1224,6 +1232,7 @@
 	struct batadv_tt_local_entry *tt_local_entry;
 	u16 flags, curr_flags = BATADV_NO_FLAGS;
 	struct hlist_node *tt_removed_node;
+	bool remove;
 
 	tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
 	if (!tt_local_entry)
@@ -1231,6 +1240,16 @@
 
 	curr_flags = tt_local_entry->common.flags;
 
+	remove = batadv_fbx_tt_local_del(bat_priv, tt_local_entry);
+	if (!remove) {
+		/* Do not delete local entry if other SLAP node has still
+		 * references on it, just mark it shallow
+		 */
+		batadv_tt_local_event(bat_priv, tt_local_entry,
+				      BATADV_NO_FLAGS);
+		goto out;
+	}
+
 	flags = BATADV_TT_CLIENT_DEL;
 	/* if this global entry addition is due to a roaming, the node has to
 	 * mark the local entry as "roamed" in order to correctly reroute
@@ -1301,6 +1320,13 @@
 		if (!batadv_has_timed_out(tt_local_entry->last_seen, timeout))
 			continue;
 
+		if (!batadv_fbx_tt_local_del(bat_priv, tt_local_entry)) {
+			/* Send only flag changes */
+			batadv_tt_local_event(bat_priv, tt_local_entry,
+					      BATADV_NO_FLAGS);
+			continue;
+		}
+
 		batadv_tt_local_set_pending(bat_priv, tt_local_entry,
 					    BATADV_TT_CLIENT_DEL, "timed out");
 	}
@@ -1395,7 +1421,7 @@
 		kmem_cache_free(batadv_tt_change_cache, entry);
 	}
 
-	atomic_set(&bat_priv->tt.local_changes, 0);
+	bat_priv->tt.local_changes = 0;
 	spin_unlock_bh(&bat_priv->tt.changes_list_lock);
 }
 
@@ -1567,7 +1593,7 @@
 {
 	struct batadv_tt_global_entry *tt_global_entry;
 	struct batadv_tt_local_entry *tt_local_entry;
-	bool ret = false;
+	bool remove = true, ret = false;
 	int hash_added;
 	struct batadv_tt_common_entry *common;
 	u16 local_flags;
@@ -1683,6 +1709,8 @@
 	batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn,
 					flags & BATADV_TT_SYNC_MASK);
 
+	remove = batadv_fbx_tt_global_add(bat_priv, tt_global_entry, orig_node);
+
 	batadv_dbg(BATADV_DBG_TT, bat_priv,
 		   "Creating new global tt entry: %pM (vid: %d, via %pM)\n",
 		   common->addr, batadv_print_vid(common->vid),
@@ -1696,12 +1724,16 @@
 	if (is_multicast_ether_addr(tt_addr))
 		goto out;
 
+	if (!remove)
+		goto out_clear_roam;
+
 	/* remove address from local hash if present */
 	local_flags = batadv_tt_local_remove(bat_priv, tt_addr, vid,
 					     "global tt received",
 					     flags & BATADV_TT_CLIENT_ROAM);
 	tt_global_entry->common.flags |= local_flags & BATADV_TT_CLIENT_WIFI;
 
+out_clear_roam:
 	if (!(flags & BATADV_TT_CLIENT_ROAM))
 		/* this is a normal global add. Therefore the client is not in a
 		 * roaming state anymore.
@@ -2138,7 +2170,7 @@
 	local_entry = batadv_tt_local_hash_find(bat_priv,
 						tt_global_entry->common.addr,
 						vid);
-	if (local_entry) {
+	if (local_entry && local_entry->common.flags & BATADV_TT_CLIENT_SEEN) {
 		/* local entry exists, case 2: client roamed to us. */
 		batadv_tt_global_del_orig_list(tt_global_entry);
 		batadv_tt_global_free(bat_priv, tt_global_entry, message);
@@ -2149,6 +2181,8 @@
 	}
 
 out:
+	/* TODO check roaming */
+	batadv_fbx_tt_global_del(bat_priv, tt_global_entry, orig_node);
 	batadv_tt_global_entry_put(tt_global_entry);
 	batadv_tt_local_entry_put(local_entry);
 }
@@ -3656,7 +3690,7 @@
 {
 	lockdep_assert_held(&bat_priv->tt.commit_lock);
 
-	if (atomic_read(&bat_priv->tt.local_changes) < 1) {
+	if (READ_ONCE(bat_priv->tt.local_changes) < 1) {
 		if (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))
 			batadv_tt_tvlv_container_update(bat_priv);
 		return;
@@ -3999,7 +4033,8 @@
 					     u16 tvlv_value_len)
 {
 	struct batadv_tvlv_tt_data *tt_data;
-	u16 tt_vlan_len, tt_num_entries;
+	size_t tt_vlan_len;
+	u16 tt_num_entries;
 	char tt_flag;
 	bool ret;
 
diff -ruw linux-6.13.12/net/batman-adv/translation-table.h linux-6.13.12-fbx/net/batman-adv/translation-table.h
--- linux-6.13.12/net/batman-adv/translation-table.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/translation-table.h	2025-09-25 17:40:37.667377640 +0200
@@ -16,11 +16,15 @@
 #include <linux/types.h>
 
 int batadv_tt_init(struct batadv_priv *bat_priv);
-bool batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
+int batadv_tt_local_add(struct net_device *soft_iface, const u8 *addr,
 			 unsigned short vid, int ifindex, u32 mark);
 u16 batadv_tt_local_remove(struct batadv_priv *bat_priv,
 			   const u8 *addr, unsigned short vid,
 			   const char *message, bool roaming);
+struct batadv_tt_local_entry *
+batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const u8 *addr,
+			  unsigned short vid);
+void batadv_tt_local_entry_put(struct batadv_tt_local_entry *tt_local_entry);
 int batadv_tt_local_dump(struct sk_buff *msg, struct netlink_callback *cb);
 int batadv_tt_global_dump(struct sk_buff *msg, struct netlink_callback *cb);
 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
diff -ruw linux-6.13.12/net/batman-adv/types.h linux-6.13.12-fbx/net/batman-adv/types.h
--- linux-6.13.12/net/batman-adv/types.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/batman-adv/types.h	2025-09-25 17:40:37.671377659 +0200
@@ -275,6 +275,19 @@
 
 	/** @rcu: struct used for freeing in an RCU-safe manner */
 	struct rcu_head rcu;
+#ifdef CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER
+	/**
+	 * @fbx_ifrouter: best router that should be used to reach this
+	 * originator from this interface. Due to internal BATMAN_V router
+	 * management algorithm, orig_ifinfo->router is the best router to
+	 * reach originator from any interface not only this one,
+	 * orig_ifinfo->fbx_ifrouter is used to get best router for this
+	 * interface.
+	 */
+	struct batadv_neigh_node __rcu *fbx_ifrouter;
+	/** @ fbx_ifrouter_lock: lock to protect fbx_ifrouter rcu update */
+	spinlock_t fbx_ifrouter_lock;
+#endif
 };
 
 /**
@@ -527,6 +540,10 @@
 
 	/** @bat_iv: B.A.T.M.A.N. IV private structure */
 	struct batadv_orig_bat_iv bat_iv;
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	struct batadv_slap_segid *slap_segid;
+	spinlock_t slap_lock;
+#endif
 };
 
 /**
@@ -598,6 +615,74 @@
 	unsigned long last_unicast_tx;
 };
 
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+/**
+ * MTU discovery bookkeeping data
+ */
+struct batadv_mtu {
+	/** @neigh: Neighbour hardif */
+	struct batadv_hardif_neigh_node __rcu *neigh;
+	/** periodic work */
+	struct delayed_work periodic_work;
+	/** timeout work */
+	struct delayed_work recv_work;
+	/** release work */
+	struct delayed_work release_work;
+	/** the final MTU to use for this link */
+	atomic_t mtu;
+	/** current delay, not critical, use (READ/WRITE)_ONCE */
+	unsigned long delay;
+};
+#endif
+
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+/**
+ * batadv_slap_segid -  SLAP segment address used to uniquely identify a SLAP
+ * segment
+ */
+struct batadv_slap_segid {
+	/* @rcu: struct used for freeing in a RCU-safe manner */
+	struct rcu_head rcu;
+	/* @addr: The SLAP master MAC address is used as unique segment ID */
+	u8 addr[ETH_ALEN];
+};
+
+/**
+ * batadv_slap_id - SLAP ID management structure, a SLAP ID is composed of the
+ * MAC address of the originator along with a priority
+ */
+struct batadv_slap_id {
+	/** @bat_priv: pointer to the mesh object */
+	struct batadv_priv *bat_priv;
+	/* @dw: delayed work used to schedule SLAP ID expiration */
+	struct delayed_work expire;
+	/** @refcount: Number of contexts using this slap id */
+	struct kref refcount;
+	/* @rcu: struct used for freeing in a RCU-safe manner */
+	struct rcu_head rcu;
+	/* @exp_time: Expiration time of this SLAP ID, in jiffies */
+	unsigned long exp_time;
+	/* @prio: SLAP ID priority part */
+	u32 prio;
+	/* @addr: SLAP ID addr part */
+	u8 addr[ETH_ALEN];
+};
+
+struct batadv_hardif_neigh_node;
+
+/**
+ * struct batadv_hardif_neigh_slap - SLAP specific neighbor information
+ */
+struct batadv_hardif_neigh_slap {
+	/** @neigh: Neighbour hardif */
+	struct batadv_hardif_neigh_node __rcu *neigh;
+	/** @announce: Work to announce our SLAP ID to this neighbor */
+	struct delayed_work announce;
+	/** @release: Work to announce our SLAP ID to this neighbor */
+	struct delayed_work release;
+};
+#endif
+
 /**
  * struct batadv_hardif_neigh_node - unique neighbor per hard-interface
  */
@@ -629,6 +714,15 @@
 
 	/** @rcu: struct used for freeing in a RCU-safe manner */
 	struct rcu_head rcu;
+
+	/** @mtud: struct used for mtu discovery related stuff */
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	struct batadv_mtu *mtud;
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	/** @slap: SLAP specific neighbour information */
+	struct batadv_hardif_neigh_slap *slap;
+#endif
 };
 
 /**
@@ -705,6 +799,9 @@
 
 	/** @last_seqno: last sequence number known for this neighbor */
 	u32 last_seqno;
+
+	/** @half_duplex: throughput should suffer half duplex penalty */
+	bool half_duplex;
 };
 
 /**
@@ -1019,7 +1116,7 @@
 	atomic_t ogm_append_cnt;
 
 	/** @local_changes: changes registered in an originator interval */
-	atomic_t local_changes;
+	size_t local_changes;
 
 	/**
 	 * @changes_list: tracks tt local changes within an originator interval
@@ -1811,6 +1908,42 @@
 	/** @bat_v: B.A.T.M.A.N. V per soft-interface private data */
 	struct batadv_priv_bat_v bat_v;
 #endif
+
+#ifdef CONFIG_BATMAN_ADV_FBX
+	/** @fbx_nl_cmd_handlers: List of NL command handler */
+	struct hlist_head fbx_nl_handlers;
+	/** @fbx_tvlv_handlers: List of FBX specific TVLV handler */
+	struct hlist_head fbx_tvlv_handlers;
+	/** @fbx_tvlv_containers: List of FBX specific TVLV container */
+	struct hlist_head fbx_tvlv_containers;
+	/** @fbx_nl_lock: FBX specific NL handler list lock */
+	spinlock_t fbx_nl_lock;
+	/** @fbx_tvlv_lock: FBX specific TVLV handler list lock */
+	spinlock_t fbx_tvlv_lock;
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	/** MTU delayed work */
+	atomic_t mtu_seqno;
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	/**
+	 * @slap_lock: Common SLAP lock used to serialize rcu write side on
+	 * both bat_priv and neighbor
+	 */
+	spinlock_t slap_lock;
+	/** @slap_master: Current SLAP master */
+	struct batadv_slap_id __rcu *slap_master;
+	/** @slap_iface: Current SLAP hard interface */
+	struct  batadv_hard_iface __rcu *slap_iface;
+	/** @slap_id: Current SLAP ID */
+	struct  batadv_slap_id __rcu *slap_id;
+	/** @slap_skb: SLAP ID SKB model to use */
+	struct  sk_buff __rcu *slap_skb;
+	/* @slap_wq: Common worqueue for delayed SLAP work */
+	struct workqueue_struct *slap_wq;
+	/* @stp_guard_stop */
+	unsigned long stp_guard_stop;
+#endif
+#endif
 };
 
 #ifdef CONFIG_BATMAN_ADV_BLA
diff -ruw linux-6.13.12/net/bridge/Kconfig linux-6.13.12-fbx/net/bridge/Kconfig
--- linux-6.13.12/net/bridge/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/bridge/Kconfig	2025-09-25 17:40:37.687377739 +0200
@@ -34,6 +34,11 @@
 
 	  If unsure, say N.
 
+config BRIDGE_STATE_MESSAGES
+	bool "show port status change in kernel log"
+	depends on BRIDGE
+	default y
+
 config BRIDGE_IGMP_SNOOPING
 	bool "IGMP/MLD snooping"
 	depends on BRIDGE
diff -ruw linux-6.13.12/net/bridge/br_device.c linux-6.13.12-fbx/net/bridge/br_device.c
--- linux-6.13.12/net/bridge/br_device.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/bridge/br_device.c	2025-09-25 17:40:37.687377739 +0200
@@ -204,6 +204,7 @@
 	struct net_bridge *br = netdev_priv(dev);
 
 	WRITE_ONCE(dev->mtu, new_mtu);
+	br->forced_mtu = new_mtu;
 
 	/* this flag will be cleared if the MTU was automatically adjusted */
 	br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
diff -ruw linux-6.13.12/net/bridge/br_fdb.c linux-6.13.12-fbx/net/bridge/br_fdb.c
--- linux-6.13.12/net/bridge/br_fdb.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/bridge/br_fdb.c	2025-09-25 17:40:37.687377739 +0200
@@ -880,6 +880,21 @@
 		  test_and_clear_bit(BR_FDB_NOTIFY_INACTIVE, &fdb->flags));
 }
 
+bool br_fdb_update_only(struct net_bridge *br,
+			struct net_bridge_port *source,
+			const unsigned char *addr)
+{
+	struct net_bridge_fdb_entry *fdb;
+
+	fdb = br_fdb_find_rcu(br, addr, 0);
+	if (!fdb)
+		return false;
+
+	fdb->updated = jiffies;
+	return true;
+}
+EXPORT_SYMBOL(br_fdb_update_only);
+
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, unsigned long flags)
 {
@@ -1026,6 +1041,7 @@
 	rcu_read_unlock();
 	return err;
 }
+EXPORT_SYMBOL(br_fdb_find_rcu);
 
 /* returns true if the fdb is modified */
 static bool fdb_handle_notify(struct net_bridge_fdb_entry *fdb, u8 notify)
diff -ruw linux-6.13.12/net/bridge/br_if.c linux-6.13.12-fbx/net/bridge/br_if.c
--- linux-6.13.12/net/bridge/br_if.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/bridge/br_if.c	2025-09-25 17:40:37.687377739 +0200
@@ -501,13 +501,13 @@
 static int br_mtu_min(const struct net_bridge *br)
 {
 	const struct net_bridge_port *p;
-	int ret_mtu = 0;
+	int ret_mtu = min_t(unsigned int, br->forced_mtu, ETH_DATA_LEN);
 
 	list_for_each_entry(p, &br->port_list, list)
 		if (!ret_mtu || ret_mtu > p->dev->mtu)
 			ret_mtu = p->dev->mtu;
 
-	return ret_mtu ? ret_mtu : ETH_DATA_LEN;
+	return ret_mtu;
 }
 
 void br_mtu_auto_adjust(struct net_bridge *br)
diff -ruw linux-6.13.12/net/bridge/br_private.h linux-6.13.12-fbx/net/bridge/br_private.h
--- linux-6.13.12/net/bridge/br_private.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/bridge/br_private.h	2025-09-25 17:40:37.691377759 +0200
@@ -577,6 +577,7 @@
 #if IS_ENABLED(CONFIG_BRIDGE_CFM)
 	struct hlist_head		mep_list;
 #endif
+	unsigned int			forced_mtu;
 };
 
 struct br_input_skb_cb {
@@ -634,8 +635,14 @@
 	br_printk(KERN_WARNING, __br, format, ##args)
 #define br_notice(__br, format, args...)		\
 	br_printk(KERN_NOTICE, __br, format, ##args)
+
+#ifdef CONFIG_BRIDGE_STATE_MESSAGES
 #define br_info(__br, format, args...)			\
 	br_printk(KERN_INFO, __br, format, ##args)
+#else
+#define br_info(__br, format, args...)			\
+	pr_debug("%s: " format,  (__br)->dev->name, ##args)
+#endif
 
 #define br_debug(br, format, args...)			\
 	pr_debug("%s: " format,  (br)->dev->name, ##args)
@@ -848,6 +855,9 @@
 		   unsigned long off);
 int br_fdb_add_local(struct net_bridge *br, struct net_bridge_port *source,
 		     const unsigned char *addr, u16 vid);
+bool br_fdb_update_only(struct net_bridge *br,
+			struct net_bridge_port *source,
+			const unsigned char *addr);
 void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
 		   const unsigned char *addr, u16 vid, unsigned long flags);
 
diff -ruw linux-6.13.12/net/core/dev.c linux-6.13.12-fbx/net/core/dev.c
--- linux-6.13.12/net/core/dev.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/dev.c	2025-09-25 17:40:37.707377838 +0200
@@ -172,6 +172,10 @@
 					   struct net_device *dev,
 					   struct netlink_ext_ack *extack);
 
+#ifdef CONFIG_NETRXTHREAD
+struct krxd gkrxd[CONFIG_NETRXTHREAD_RX_QUEUE];
+#endif
+
 static DEFINE_MUTEX(ifalias_mutex);
 
 /* protects napi_hash addition/deletion and napi_gen_id */
@@ -5274,6 +5278,23 @@
 	return ret;
 }
 
+/* Start Freebox added code */
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+int (*fbxdiverter_hook)(struct sk_buff *);
+
+static int handle_fbxdiverter(struct sk_buff *skb)
+{
+	/* try_module_get is missing here, so there is a race on
+	 * fbxdiverter module deletion */
+	if (!fbxdiverter_hook)
+		return 0;
+	return fbxdiverter_hook(skb);
+}
+
+EXPORT_SYMBOL(fbxdiverter_hook);
+#endif
+
+
 /**
  *	__netif_rx	-	Slightly optimized version of netif_rx
  *	@skb: buffer to post
@@ -5527,28 +5548,116 @@
 	return 0;
 }
 
+static int __netif_receive_skb_core_end(struct sk_buff **pskb, bool pfmemalloc,
+					struct packet_type **ppt_prev);
+
 static int __netif_receive_skb_core(struct sk_buff **pskb, bool pfmemalloc,
 				    struct packet_type **ppt_prev)
 {
-	struct packet_type *ptype, *pt_prev;
-	rx_handler_func_t *rx_handler;
 	struct sk_buff *skb = *pskb;
-	struct net_device *orig_dev;
-	bool deliver_exact = false;
-	int ret = NET_RX_DROP;
-	__be16 type;
+#ifdef CONFIG_NETRXTHREAD
+	unsigned int len;
+	struct krxd *krxd;
+#endif
 
 	net_timestamp_check(!READ_ONCE(net_hotdata.tstamp_prequeue), skb);
 
 	trace_netif_receive_skb(skb);
 
-	orig_dev = skb->dev;
-
 	skb_reset_network_header(skb);
 	if (!skb_transport_header_was_set(skb))
 		skb_reset_transport_header(skb);
 	skb_reset_mac_len(skb);
 
+#if defined(CONFIG_FREEBOX_DIVERTER) || defined(CONFIG_FREEBOX_DIVERTER_MODULE)
+	if (handle_fbxdiverter(skb))
+		return NET_RX_SUCCESS;
+#endif
+
+#ifndef CONFIG_NETRXTHREAD
+	return __netif_receive_skb_core_end(pskb, pfmemalloc, ppt_prev);
+#else
+	if (pfmemalloc)
+		return __netif_receive_skb_core_end(pskb, pfmemalloc, ppt_prev);
+
+	BUILD_BUG_ON(ARRAY_SIZE(gkrxd) < 2);
+	krxd = &gkrxd[skb->rxthread_prio & 1];
+
+        /* queue the packet to the rx thread */
+	local_bh_disable();
+	len = skb_queue_len(&krxd->pkt_queue);
+	if (len < RXTHREAD_MAX_PKTS) {
+		__skb_queue_tail(&krxd->pkt_queue, skb);
+		krxd->stats_pkts++;
+		if (!len)
+			wake_up(&krxd->wq);
+	} else {
+		krxd->stats_dropped++;
+		dev_kfree_skb(skb);
+        }
+	local_bh_enable();
+	return NET_RX_SUCCESS;
+#endif
+}
+
+#ifdef CONFIG_NETRXTHREAD
+static int krxd_action(void *data)
+{
+	struct krxd *krxd = (struct krxd *)data;
+	unsigned int queue = krxd - gkrxd;
+	struct sk_buff *skb;
+
+	set_user_nice(current, queue > 0 ? -10 : -5);
+	current->flags |= PF_NOFREEZE;
+	__set_current_state(TASK_RUNNING);
+
+	local_bh_disable();
+	while (1) {
+		struct packet_type *pt_prev = NULL;
+		struct net_device *orig_dev;
+
+		skb = skb_dequeue(&krxd->pkt_queue);
+		if (!skb) {
+			local_bh_enable();
+			wait_event_interruptible(krxd->wq,
+						 skb_queue_len(&krxd->pkt_queue));
+			set_current_state(TASK_RUNNING);
+			local_bh_disable();
+			continue;
+		}
+
+		rcu_read_lock();
+		orig_dev = skb->dev;
+		__netif_receive_skb_core_end(&skb, false, &pt_prev);
+		if (pt_prev)
+			INDIRECT_CALL_INET(pt_prev->func,
+					   ipv6_rcv, ip_rcv, skb,
+					   skb->dev, pt_prev, orig_dev);
+		rcu_read_unlock();
+
+		/* only schedule when working on lowest prio queue */
+		if (queue == 0 && need_resched()) {
+			local_bh_enable();
+			schedule();
+			local_bh_disable();
+		}
+	}
+	return 0;
+}
+#endif
+
+static int __netif_receive_skb_core_end(struct sk_buff **pskb, bool pfmemalloc,
+					struct packet_type **ppt_prev)
+{
+	struct sk_buff *skb = *pskb;
+	struct packet_type *ptype, *pt_prev;
+	rx_handler_func_t *rx_handler;
+	struct net_device *orig_dev;
+	bool deliver_exact = false;
+	int ret = NET_RX_DROP;
+	__be16 type;
+
+	orig_dev = skb->dev;
 	pt_prev = NULL;
 
 another_round:
@@ -5707,7 +5816,9 @@
 	if (pt_prev) {
 		if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC)))
 			goto drop;
-		*ppt_prev = pt_prev;
+		else
+			ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
+						 skb->dev, pt_prev, orig_dev);
 	} else {
 drop:
 		if (!deliver_exact)
@@ -5738,10 +5849,16 @@
 	struct packet_type *pt_prev = NULL;
 	int ret;
 
+#ifdef CONFIG_NETRXTHREAD
+	(void)orig_dev;
+	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
+#else
 	ret = __netif_receive_skb_core(&skb, pfmemalloc, &pt_prev);
 	if (pt_prev)
 		ret = INDIRECT_CALL_INET(pt_prev->func, ipv6_rcv, ip_rcv, skb,
 					 skb->dev, pt_prev, orig_dev);
+#endif
+
 	return ret;
 }
 
@@ -8793,8 +8910,10 @@
 	WRITE_ONCE(dev->promiscuity, promiscuity);
 	if (flags != old_flags) {
 		WRITE_ONCE(dev->flags, flags);
+#ifdef CONFIG_NET_PROMISC_MESSAGES
 		netdev_info(dev, "%s promiscuous mode\n",
 			    dev->flags & IFF_PROMISC ? "entered" : "left");
+#endif
 		if (audit_enabled) {
 			current_uid_gid(&uid, &gid);
 			audit_log(audit_context(), GFP_ATOMIC,
@@ -8864,8 +8983,10 @@
 	WRITE_ONCE(dev->allmulti, allmulti);
 	if (flags != old_flags) {
 		WRITE_ONCE(dev->flags, flags);
+#ifdef CONFIG_NET_PROMISC_MESSAGES
 		netdev_info(dev, "%s allmulticast mode\n",
 			    dev->flags & IFF_ALLMULTI ? "entered" : "left");
+#endif
 		dev_change_rx_flags(dev, IFF_ALLMULTI);
 		dev_set_rx_mode(dev);
 		if (notify)
@@ -12335,6 +12456,24 @@
 	open_softirq(NET_TX_SOFTIRQ, net_tx_action);
 	open_softirq(NET_RX_SOFTIRQ, net_rx_action);
 
+#ifdef CONFIG_NETRXTHREAD
+        for (i = 0; i < CONFIG_NETRXTHREAD_RX_QUEUE; i++) {
+		struct krxd *krxd = &gkrxd[i];
+		struct task_struct *task;
+
+		skb_queue_head_init(&krxd->pkt_queue);
+		init_waitqueue_head(&krxd->wq);
+		task = kthread_create(krxd_action, krxd, "krxthread_%u", i);
+		if (IS_ERR(task)) {
+			printk(KERN_ERR "unable to create krxd\n");
+			return -ENOMEM;
+		}
+		krxd->task = task;
+		wake_up_process(task);
+	}
+#endif
+
+
 	rc = cpuhp_setup_state_nocalls(CPUHP_NET_DEV_DEAD, "net/dev:dead",
 				       NULL, dev_cpu_dead);
 	WARN_ON(rc < 0);
diff -ruw linux-6.13.12/net/core/neighbour.c linux-6.13.12-fbx/net/core/neighbour.c
--- linux-6.13.12/net/core/neighbour.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/neighbour.c	2025-09-25 17:40:37.715377878 +0200
@@ -54,7 +54,8 @@
 			   u32 pid);
 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
-				    struct net_device *dev);
+				    struct net_device *dev,
+				    bool skip_perm);
 
 #ifdef CONFIG_PROC_FS
 static const struct seq_operations neigh_stat_seq_ops;
@@ -423,7 +424,7 @@
 {
 	write_lock_bh(&tbl->lock);
 	neigh_flush_dev(tbl, dev, skip_perm);
-	pneigh_ifdown_and_unlock(tbl, dev);
+	pneigh_ifdown_and_unlock(tbl, dev, skip_perm);
 	pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL,
 			   tbl->family);
 	if (skb_queue_empty_lockless(&tbl->proxy_queue))
@@ -803,7 +804,8 @@
 }
 
 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
-				    struct net_device *dev)
+				    struct net_device *dev,
+				    bool skip_perm)
 {
 	struct pneigh_entry *n, **np, *freelist = NULL;
 	u32 h;
@@ -811,12 +813,15 @@
 	for (h = 0; h <= PNEIGH_HASHMASK; h++) {
 		np = &tbl->phash_buckets[h];
 		while ((n = *np) != NULL) {
+			if (skip_perm && n->state & NUD_PERMANENT)
+				goto skip;
 			if (!dev || n->dev == dev) {
 				*np = n->next;
 				n->next = freelist;
 				freelist = n;
 				continue;
 			}
+skip:
 			np = &n->next;
 		}
 	}
@@ -1979,6 +1984,7 @@
 	if (tb[NDA_PROTOCOL])
 		protocol = nla_get_u8(tb[NDA_PROTOCOL]);
 	if (ndm_flags & NTF_PROXY) {
+		u8 state = ndm->ndm_state & NUD_PERMANENT;
 		struct pneigh_entry *pn;
 
 		if (ndm_flags & NTF_MANAGED) {
@@ -1990,6 +1996,7 @@
 		pn = pneigh_lookup(tbl, net, dst, dev, 1);
 		if (pn) {
 			pn->flags = ndm_flags;
+			pn->state = state;
 			if (protocol)
 				pn->protocol = protocol;
 			err = 0;
diff -ruw linux-6.13.12/net/core/net-procfs.c linux-6.13.12-fbx/net/core/net-procfs.c
--- linux-6.13.12/net/core/net-procfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/net-procfs.c	2025-09-25 17:40:37.715377878 +0200
@@ -168,6 +168,89 @@
 	.show  = softnet_seq_show,
 };
 
+static struct page_frag_cache *frag_alloc_netdev_get_online(loff_t *pos)
+{
+	struct page_frag_cache *nc = NULL;
+
+	while (*pos < nr_cpu_ids)
+		if (cpu_online(*pos)) {
+			nc = netdev_frag_cache_get(*pos);
+			break;
+		} else
+			++*pos;
+	return nc;
+}
+
+static struct page_frag_cache *frag_alloc_napi_get_online(loff_t *pos)
+{
+	struct page_frag_cache *nc = NULL;
+
+	while (*pos < nr_cpu_ids)
+		if (cpu_online(*pos)) {
+			nc = napi_frag_cache_get(*pos);
+			break;
+		} else
+			++*pos;
+	return nc;
+}
+
+static void *frag_alloc_netdev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return frag_alloc_netdev_get_online(pos);
+}
+
+static void *frag_alloc_netdev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return frag_alloc_netdev_get_online(pos);
+}
+
+static void *frag_alloc_napi_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return frag_alloc_napi_get_online(pos);
+}
+
+static void *frag_alloc_napi_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	++*pos;
+	return frag_alloc_napi_get_online(pos);
+}
+
+static void frag_alloc_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int frag_alloc_seq_show(struct seq_file *seq, void *v)
+{
+#ifdef CONFIG_PAGE_OWNER
+	struct page_frag_cache *nc = v;
+	unsigned int pages = atomic_read(&nc->pages_allocated);
+
+	seq_printf(seq,
+		   "cpu[%d]: pages:%u (%lu kB)\n",
+		   (int)seq->index, pages, (pages * PAGE_SIZE) / 1024);
+#else
+	seq_printf(seq,
+		   "cpu[%d]: CONFIG_PAGE_OWNER missing\n",
+		   (int)seq->index);
+#endif
+	return 0;
+}
+
+static const struct seq_operations frag_alloc_netdev_seq_ops = {
+	.start = frag_alloc_netdev_seq_start,
+	.next  = frag_alloc_netdev_seq_next,
+	.stop  = frag_alloc_seq_stop,
+	.show  = frag_alloc_seq_show,
+};
+
+static const struct seq_operations frag_alloc_napi_seq_ops = {
+	.start = frag_alloc_napi_seq_start,
+	.next  = frag_alloc_napi_seq_next,
+	.stop  = frag_alloc_seq_stop,
+	.show  = frag_alloc_seq_show,
+};
+
 static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
 {
 	struct list_head *ptype_list = NULL;
@@ -288,6 +371,85 @@
 	.show  = ptype_seq_show,
 };
 
+#ifdef CONFIG_NETRXTHREAD
+/*
+ *	This is invoked by the /proc filesystem handler to display a device
+ *	in detail.
+ */
+static void *krxthread_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	int *queue;
+
+	if (*pos > CONFIG_NETRXTHREAD_RX_QUEUE)
+		return NULL;
+
+	queue = kmalloc(sizeof(*queue), GFP_KERNEL);
+	if (!queue)
+		return NULL;
+	*queue = ((int)*pos - 1);
+
+	return queue;
+}
+
+static void *krxthread_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	int *queue = v;
+
+	if (*pos == CONFIG_NETRXTHREAD_RX_QUEUE) {
+		++*pos;
+		return NULL;
+	}
+
+	++*queue;
+	*pos = *queue + 1;
+	return queue;
+}
+
+static void krxthread_seq_stop(struct seq_file *seq, void *v)
+{
+	kfree(v);
+}
+
+static void krxthread_seq_printf_stats(struct seq_file *seq, int queue)
+{
+	seq_printf(seq, "%8u %12u %12u\n",
+		   queue,
+		   gkrxd[queue].stats_pkts,
+		   gkrxd[queue].stats_dropped);
+}
+
+static int krxthread_seq_show(struct seq_file *seq, void *v)
+{
+	int *queue = v;
+
+	if (*queue == -1)
+		seq_printf(seq, "%8s %12s %12s\n",
+			   "queue", "packets", "drops");
+	else
+		krxthread_seq_printf_stats(seq, *queue);
+	return 0;
+}
+
+static const struct seq_operations krxthread_seq_ops = {
+	.start = krxthread_seq_start,
+	.next  = krxthread_seq_next,
+	.stop  = krxthread_seq_stop,
+	.show  = krxthread_seq_show,
+};
+
+static int krxthread_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &krxthread_seq_ops);
+}
+
+static const struct proc_ops krxthread_seq_fops = {
+	.proc_open	= krxthread_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+#endif /* KRXTHREAD */
+
 static int __net_init dev_proc_net_init(struct net *net)
 {
 	int rc = -ENOMEM;
@@ -301,9 +463,20 @@
 	if (!proc_create_net("ptype", 0444, net->proc_net, &ptype_seq_ops,
 			sizeof(struct seq_net_private)))
 		goto out_softnet;
+	if (!proc_create_seq("frag_alloc_netdev", 0444, net->proc_net,
+			     &frag_alloc_netdev_seq_ops))
+		goto out_softnet;
+	if (!proc_create_seq("frag_alloc_napi", 0444, net->proc_net,
+			     &frag_alloc_napi_seq_ops))
+		goto out_softnet;
 
 	if (wext_proc_init(net))
 		goto out_ptype;
+#ifdef CONFIG_NETRXTHREAD
+	if (!proc_create("krxthread", S_IRUGO, net->proc_net,
+			 &krxthread_seq_fops))
+		goto out_ptype;
+#endif
 	rc = 0;
 out:
 	return rc;
diff -ruw linux-6.13.12/net/core/net-sysfs.c linux-6.13.12-fbx/net/core/net-sysfs.c
--- linux-6.13.12/net/core/net-sysfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/net-sysfs.c	2025-09-25 17:40:37.715377878 +0200
@@ -15,6 +15,7 @@
 #include <linux/nsproxy.h>
 #include <net/sock.h>
 #include <net/net_namespace.h>
+#include <net/cfg80211.h>
 #include <linux/rtnetlink.h>
 #include <linux/vmalloc.h>
 #include <linux/export.h>
@@ -769,7 +770,28 @@
 	.attrs  = netstat_attrs,
 };
 
+#if IS_ENABLED(CONFIG_CFG80211)
+static ssize_t show_nl80211_iftype(struct device *dev,
+				   struct device_attribute *attr, char *buf)
+{
+	const struct net_device *netdev = to_net_dev(dev);
+	ssize_t ret = 0;
+
+	if (!rtnl_trylock())
+		return restart_syscall();
+	if (netdev->ieee80211_ptr)
+		ret = sprintf(buf, "%d\n", netdev->ieee80211_ptr->iftype);
+	rtnl_unlock();
+
+	return ret;
+}
+static DEVICE_ATTR(nl80211_iftype, S_IRUGO, show_nl80211_iftype, NULL);
+#endif
+
 static struct attribute *wireless_attrs[] = {
+#if IS_ENABLED(CONFIG_CFG80211)
+	&dev_attr_nl80211_iftype.attr,
+#endif
 	NULL
 };
 
diff -ruw linux-6.13.12/net/core/of_net.c linux-6.13.12-fbx/net/core/of_net.c
--- linux-6.13.12/net/core/of_net.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/of_net.c	2025-09-25 17:40:37.715377878 +0200
@@ -13,6 +13,7 @@
 #include <linux/export.h>
 #include <linux/device.h>
 #include <linux/nvmem-consumer.h>
+#include <linux/fbxserial.h>
 
 /**
  * of_get_phy_mode - Get phy mode for given device_node
@@ -97,6 +98,30 @@
 }
 EXPORT_SYMBOL(of_get_mac_address_nvmem);
 
+static int of_get_mac_addr_from_fbxserial(struct device_node *np, u8 *addr)
+{
+#ifdef CONFIG_FBXSERIAL
+	struct property *pp;
+	const void *mac;
+
+	pp = of_find_property(np, "fbxserial-mac-address", NULL);
+	if (!pp || pp->length != 4)
+		return -ENODEV;
+
+	mac = fbxserialinfo_get_mac_addr(be32_to_cpu(*(u32*)pp->value));
+	if (IS_ERR(mac))
+		return PTR_ERR(mac);
+	memcpy(addr, mac, ETH_ALEN);
+
+	if (of_property_present(np, "fbxserial-mac-address-set-lla"))
+		addr[0] |= 0x02;
+
+	return 0;
+#else
+	return -ENOSYS;
+#endif
+}
+
 /**
  * of_get_mac_address()
  * @np:		Caller's Device Node
@@ -126,10 +151,17 @@
 int of_get_mac_address(struct device_node *np, u8 *addr)
 {
 	int ret;
+	bool need_defer = false;
 
 	if (!np)
 		return -ENODEV;
 
+	ret = of_get_mac_addr_from_fbxserial(np, addr);
+	if (!ret)
+		return 0;
+	if (ret == -EPROBE_DEFER)
+		need_defer = true;
+
 	ret = of_get_mac_addr(np, "mac-address", addr);
 	if (!ret)
 		return 0;
@@ -142,7 +174,13 @@
 	if (!ret)
 		return 0;
 
-	return of_get_mac_address_nvmem(np, addr);
+	ret = of_get_mac_address_nvmem(np, addr);
+	if (!ret)
+		return 0;
+
+	if (need_defer)
+		return -EPROBE_DEFER;
+	return ret;
 }
 EXPORT_SYMBOL(of_get_mac_address);
 
diff -ruw linux-6.13.12/net/core/page_pool.c linux-6.13.12-fbx/net/core/page_pool.c
--- linux-6.13.12/net/core/page_pool.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/page_pool.c	2025-09-25 17:40:37.715377878 +0200
@@ -627,6 +627,7 @@
 {
 	netmem_set_pp(netmem, pool);
 	netmem_or_pp_magic(netmem, PP_SIGNATURE);
+	page_pool_clear_recycle_flag(netmem_to_page(netmem));
 
 	/* Ensuring all pages have been split into one fragment initially:
 	 * page_pool_set_pp_info() is only called once for every page when it
diff -ruw linux-6.13.12/net/core/skbuff.c linux-6.13.12-fbx/net/core/skbuff.c
--- linux-6.13.12/net/core/skbuff.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/skbuff.c	2025-09-25 17:40:37.723377917 +0200
@@ -293,6 +293,17 @@
 	.bh_lock = INIT_LOCAL_LOCK(bh_lock),
 };
 
+struct page_frag_cache *netdev_frag_cache_get(unsigned int cpu_id)
+{
+	return per_cpu_ptr(&netdev_alloc_cache, cpu_id);
+}
+
+struct page_frag_cache *napi_frag_cache_get(unsigned int cpu_id)
+{
+	struct napi_alloc_cache *nc = per_cpu_ptr(&napi_alloc_cache, cpu_id);
+	return &nc->page;
+}
+
 /* Double check that napi_get_frags() allocates skbs with
  * skb->head being backed by slab, not a page fragment.
  * This is to make sure bug fixed in 3226b158e67c
@@ -1048,6 +1059,10 @@
 {
 	if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
 		return false;
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_ff_done & BIT(1))
+		page_pool_set_recycled_flag(virt_to_page(data));
+#endif
 	return napi_pp_put_page(page_to_netmem(virt_to_page(data)));
 }
 
@@ -1545,6 +1560,11 @@
 	new->queue_mapping = old->queue_mapping;
 
 	memcpy(&new->headers, &old->headers, sizeof(new->headers));
+
+#ifdef CONFIG_IP_FFN
+	new->ffn_state		= FFN_STATE_INIT;
+	new->ffn_ff_done	= 0;
+#endif
 	CHECK_SKB_FIELD(protocol);
 	CHECK_SKB_FIELD(csum);
 	CHECK_SKB_FIELD(hash);
@@ -6127,12 +6147,16 @@
 	skb->offload_fwd_mark = 0;
 	skb->offload_l3_fwd_mark = 0;
 #endif
+	skb->mark = 0;
 	ipvs_reset(skb);
 
+#ifdef CONFIG_IP_FFN
+	skb->ffn_state = FFN_STATE_INIT;
+	skb->ffn_ff_done = 0;
+#endif
 	if (!xnet)
 		return;
 
-	skb->mark = 0;
 	skb_clear_tstamp(skb);
 }
 EXPORT_SYMBOL_GPL(skb_scrub_packet);
diff -ruw linux-6.13.12/net/core/sock.c linux-6.13.12-fbx/net/core/sock.c
--- linux-6.13.12/net/core/sock.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/core/sock.c	2025-09-25 17:40:37.723377917 +0200
@@ -1623,6 +1623,21 @@
 		break;
 	}
 
+	case SO_TXREHASH:
+		if (val < -1 || val > 1) {
+			ret = -EINVAL;
+			break;
+		}
+		if ((u8)val == SOCK_TXREHASH_DEFAULT)
+			val = READ_ONCE(sock_net(sk)->core.sysctl_txrehash);
+		/* Paired with READ_ONCE() in tcp_rtx_synack() */
+		WRITE_ONCE(sk->sk_txrehash, (u8)val);
+		break;
+
+	case SO_UDP_DUP_UNICAST:
+		sock_valbool_flag(sk, SOCK_UDP_DUP_UNICAST, valbool);
+		break;
+
 	default:
 		ret = -ENOPROTOOPT;
 		break;
@@ -2041,6 +2056,10 @@
 		v.val64 = sock_gen_cookie(sk);
 		break;
 
+	case SO_UDP_DUP_UNICAST:
+		v.val = sock_flag(sk, SOCK_UDP_DUP_UNICAST);
+		break;
+
 	case SO_ZEROCOPY:
 		v.val = sock_flag(sk, SOCK_ZEROCOPY);
 		break;
diff -ruw linux-6.13.12/net/dsa/Kconfig linux-6.13.12-fbx/net/dsa/Kconfig
--- linux-6.13.12/net/dsa/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/dsa/Kconfig	2025-09-25 17:40:37.739377997 +0200
@@ -63,6 +63,10 @@
 	  Say Y or M if you want to enable support for tagging frames
 	  for the Hirschmann Hellcreek TSN switches.
 
+config NET_DSA_TAG_BRCM_FBX
+	tristate "Tag driver for Broadcom switches using in-frame headers"
+	select NET_DSA_TAG_BRCM_COMMON
+
 config NET_DSA_TAG_GSWIP
 	tristate "Tag driver for Lantiq / Intel GSWIP switches"
 	help
diff -ruw linux-6.13.12/net/dsa/dsa.c linux-6.13.12-fbx/net/dsa/dsa.c
--- linux-6.13.12/net/dsa/dsa.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/dsa/dsa.c	2025-09-25 17:40:37.739377997 +0200
@@ -420,6 +420,32 @@
 	return cpu_dp;
 }
 
+/*
+ * find cpu port forcibly assigned to this port via device tree
+ */
+static struct dsa_port *
+dsa_port_get_forced_cpu_port(struct dsa_switch_tree *dst,
+			     struct dsa_port *dp)
+{
+       struct device_node *cpu_dn;
+       struct dsa_switch *ds = dp->ds;
+
+       cpu_dn = of_parse_phandle(dp->dn, "dsa,cpu-port", 0);
+       if (!cpu_dn)
+               return ERR_PTR(-ENOENT);
+
+       list_for_each_entry (dp, &dst->ports, list) {
+               if (!dsa_port_is_cpu(dp))
+                       continue;
+
+               if (dp->dn == cpu_dn)
+                       return dp;
+       }
+
+       dev_err(ds->dev, "failed to find cpu port referenced by phandle");
+       return ERR_PTR(-EINVAL);
+}
+
 /* Perform initial assignment of CPU ports to user ports and DSA links in the
  * fabric, giving preference to CPU ports local to each switch. Default to
  * using the first CPU port in the switch tree if the port does not have a CPU
@@ -428,17 +454,49 @@
 static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
 {
 	struct dsa_port *preferred_cpu_dp, *cpu_dp, *dp;
+	size_t loop;
+
+	/*
+	 * first pass to assign explicit cpu port assigned through DT
+	 * to user ports
+	 */
+	list_for_each_entry(dp, &dst->ports, list) {
+		if (!dsa_port_is_user(dp) && !dsa_port_is_dsa(dp))
+			continue;
+
+		preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(dp->ds);
+		if (preferred_cpu_dp && preferred_cpu_dp != dp)
+			continue;
+
+		cpu_dp = dsa_port_get_forced_cpu_port(dst, dp);
+
+		if (IS_ERR(cpu_dp)) {
+			if (PTR_ERR(cpu_dp) == -EINVAL)
+				return -EINVAL;
+			continue;
+		}
 
+		dp->cpu_dp = cpu_dp;
+	}
+
+	/*
+	 * for user ports without explicit cpu port, we will assigned
+	 * one CPU port from the same switch, first lookup default CPU
+	 * port if it's declared in DT, or fallback to first cpu port
+	 */
+	for (loop = 0; loop < 2; loop++) {
 	list_for_each_entry(cpu_dp, &dst->ports, list) {
 		if (!dsa_port_is_cpu(cpu_dp))
 			continue;
 
-		preferred_cpu_dp = dsa_switch_preferred_default_local_cpu_port(cpu_dp->ds);
-		if (preferred_cpu_dp && preferred_cpu_dp != cpu_dp)
+			if (loop == 0 && !cpu_dp->is_def_cpu_port)
 			continue;
 
+			list_for_each_entry(dp, &dst->ports, list) {
 		/* Prefer a local CPU port */
-		dsa_switch_for_each_port(dp, cpu_dp->ds) {
+				if (dp->ds != cpu_dp->ds)
+					continue;
+
 			/* Prefer the first local CPU port found */
 			if (dp->cpu_dp)
 				continue;
@@ -447,7 +505,10 @@
 				dp->cpu_dp = cpu_dp;
 		}
 	}
+	}
 
+	/* finally handle all remaining user ports, which don't have a
+	 * CPU port on the same switch */
 	return dsa_tree_setup_default_cpu(dst);
 }
 
@@ -1217,6 +1278,7 @@
 	dp->type = DSA_PORT_TYPE_CPU;
 	dsa_port_set_tag_protocol(dp, dst->tag_ops);
 	dp->dst = dst;
+	dp->is_def_cpu_port = of_property_read_bool(dp->dn, "dsa,def-cpu-port");
 
 	/* At this point, the tree may be configured to use a different
 	 * tagger than the one chosen by the switch driver during
diff -ruw linux-6.13.12/net/dsa/tag_brcm.c linux-6.13.12-fbx/net/dsa/tag_brcm.c
--- linux-6.13.12/net/dsa/tag_brcm.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/dsa/tag_brcm.c	2025-09-25 17:40:37.739377997 +0200
@@ -79,7 +79,8 @@
 #define BRCM_EG_PID_MASK	0x1f
 
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
-	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND) || \
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
 
 static struct sk_buff *brcm_tag_xmit_ll(struct sk_buff *skb,
 					struct net_device *dev,
@@ -177,7 +178,8 @@
 }
 #endif
 
-#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM) || \
+	IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
 static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb,
 				     struct net_device *dev)
 {
@@ -199,7 +201,9 @@
 
 	return nskb;
 }
+#endif
 
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
 static const struct dsa_device_ops brcm_netdev_ops = {
 	.name	= BRCM_NAME,
 	.proto	= DSA_TAG_PROTO_BRCM,
@@ -321,6 +325,37 @@
 MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_PREPEND, BRCM_PREPEND_NAME);
 #endif
 
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
+static struct sk_buff *
+brcm_tag_rcv_fbx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct sk_buff *nskb = brcm_tag_rcv(skb, dev);
+
+	if (!nskb)
+		return nskb;
+
+	/* if the packet was broadcast, the switch already did the
+	 * flood to the other ports */
+	if (nskb->pkt_type == PACKET_BROADCAST)
+		nskb->offload_fwd_mark = 1;
+	else
+		nskb->offload_fwd_mark = 0;
+
+	return nskb;
+}
+
+static const struct dsa_device_ops brcm_fbx_netdev_ops = {
+	.name	= "brcm-fbx",
+	.proto	= DSA_TAG_PROTO_BRCM_FBX,
+	.xmit	= brcm_tag_xmit,
+	.rcv	= brcm_tag_rcv_fbx,
+	.needed_headroom = BRCM_TAG_LEN,
+};
+
+DSA_TAG_DRIVER(brcm_fbx_netdev_ops);
+MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_BRCM_FBX, "brcm-fbx");
+#endif
+
 static struct dsa_tag_driver *dsa_tag_driver_array[] =	{
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM)
 	&DSA_TAG_DRIVER_NAME(brcm_netdev_ops),
@@ -331,6 +366,9 @@
 #if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_PREPEND)
 	&DSA_TAG_DRIVER_NAME(brcm_prepend_netdev_ops),
 #endif
+#if IS_ENABLED(CONFIG_NET_DSA_TAG_BRCM_FBX)
+	&DSA_TAG_DRIVER_NAME(brcm_fbx_netdev_ops),
+#endif
 };
 
 module_dsa_tag_drivers(dsa_tag_driver_array);
diff -ruw linux-6.13.12/net/dsa/user.c linux-6.13.12-fbx/net/dsa/user.c
--- linux-6.13.12/net/dsa/user.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/dsa/user.c	2025-09-25 17:40:37.739377997 +0200
@@ -532,7 +532,7 @@
 	ndm->ndm_family  = AF_BRIDGE;
 	ndm->ndm_pad1    = 0;
 	ndm->ndm_pad2    = 0;
-	ndm->ndm_flags   = NTF_SELF;
+	ndm->ndm_flags   = NTF_SELF | NTF_OFFLOADED;
 	ndm->ndm_type    = 0;
 	ndm->ndm_ifindex = dump->dev->ifindex;
 	ndm->ndm_state   = is_static ? NUD_NOARP : NUD_REACHABLE;
diff -ruw linux-6.13.12/net/ethernet/eth.c linux-6.13.12-fbx/net/ethernet/eth.c
--- linux-6.13.12/net/ethernet/eth.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ethernet/eth.c	2025-09-25 17:40:37.739377997 +0200
@@ -62,6 +62,7 @@
 #include <net/gro.h>
 #include <linux/uaccess.h>
 #include <net/pkt_sched.h>
+#include <linux/fbxserial.h>
 
 /**
  * eth_header - create the Ethernet header
@@ -597,6 +598,28 @@
  */
 int fwnode_get_mac_address(struct fwnode_handle *fwnode, char *addr)
 {
+#ifdef CONFIG_FBXSERIAL
+	u32 index;
+	int ret;
+
+	ret = fwnode_property_read_u32(fwnode, "fbxserial-mac-address",
+				       &index);
+	if (ret == 0) {
+		const void *res = fbxserialinfo_get_mac_addr(index);
+		if (IS_ERR(res) && PTR_ERR(res) == -EPROBE_DEFER)
+			return -EPROBE_DEFER;
+
+		if (res) {
+			if (fwnode_property_present(
+				    fwnode, "fbxserial-mac-address-set-lla"))
+				addr[0] |= 0x02;
+
+			memcpy(addr, res, ETH_ALEN);
+			return 0;
+		}
+	}
+#endif
+
 	if (!fwnode_get_mac_addr(fwnode, "mac-address", addr) ||
 	    !fwnode_get_mac_addr(fwnode, "local-mac-address", addr) ||
 	    !fwnode_get_mac_addr(fwnode, "address", addr))
diff -ruw linux-6.13.12/net/ethtool/common.c linux-6.13.12-fbx/net/ethtool/common.c
--- linux-6.13.12/net/ethtool/common.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ethtool/common.c	2025-09-25 17:40:37.739377997 +0200
@@ -210,6 +210,12 @@
 	__DEFINE_LINK_MODE_NAME(10, T1S, Half),
 	__DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half),
 	__DEFINE_LINK_MODE_NAME(10, T1BRR, Full),
+	__DEFINE_LINK_MODE_NAME(1000, PX_D, Full),
+	__DEFINE_LINK_MODE_NAME(1000, PX_U, Full),
+	__DEFINE_LINK_MODE_NAME(10000, PR_D, Full),
+	__DEFINE_LINK_MODE_NAME(10000, PR_U, Full),
+	__DEFINE_LINK_MODE_NAME(10000_1000, PRX_D, Full),
+	__DEFINE_LINK_MODE_NAME(10000_1000, PRX_U, Full),
 };
 static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS);
 
@@ -251,6 +257,10 @@
 #define __LINK_MODE_LANES_VR8		8
 #define __LINK_MODE_LANES_DR8_2		8
 #define __LINK_MODE_LANES_T1BRR		1
+#define __LINK_MODE_LANES_PX_D		1
+#define __LINK_MODE_LANES_PX_U		1
+#define __LINK_MODE_LANES_PR_U		1
+#define __LINK_MODE_LANES_PR_D		1
 
 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex)	\
 	[ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = {		\
@@ -375,6 +385,18 @@
 	__DEFINE_LINK_MODE_PARAMS(10, T1S, Half),
 	__DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half),
 	__DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full),
+	__DEFINE_LINK_MODE_PARAMS(1000, PX_D, Full),
+	__DEFINE_LINK_MODE_PARAMS(1000, PX_U, Full),
+	__DEFINE_LINK_MODE_PARAMS(10000, PR_D, Full),
+	__DEFINE_LINK_MODE_PARAMS(10000, PR_U, Full),
+	[ETHTOOL_LINK_MODE_10000_1000basePRX_D_Full_BIT] = {
+		.speed  = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+	},
+	[ETHTOOL_LINK_MODE_10000_1000basePRX_U_Full_BIT] = {
+		.speed  = SPEED_10000,
+		.duplex = DUPLEX_FULL,
+	},
 };
 static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS);
 
diff -ruw linux-6.13.12/net/ethtool/ioctl.c linux-6.13.12-fbx/net/ethtool/ioctl.c
--- linux-6.13.12/net/ethtool/ioctl.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ethtool/ioctl.c	2025-09-25 17:40:37.743378016 +0200
@@ -17,6 +17,7 @@
 #include <linux/netdevice.h>
 #include <linux/net_tstamp.h>
 #include <linux/phy.h>
+#include <linux/phylink.h>
 #include <linux/bitops.h>
 #include <linux/uaccess.h>
 #include <linux/vmalloc.h>
@@ -183,6 +184,9 @@
 	if (sset == ETH_SS_LINK_MODES)
 		return __ETHTOOL_LINK_MODE_MASK_NBITS;
 
+	if (sset == ETH_SS_PHYLINK_IFTYPES)
+		return PHY_INTERFACE_MODE_MAX - 1;
+
 	if (ops->get_sset_count && ops->get_strings)
 		return ops->get_sset_count(dev, sset);
 	else
@@ -212,7 +216,16 @@
 	else if (stringset == ETH_SS_LINK_MODES)
 		memcpy(data, link_mode_names,
 		       __ETHTOOL_LINK_MODE_MASK_NBITS * ETH_GSTRING_LEN);
-	else
+	else if (stringset == ETH_SS_PHYLINK_IFTYPES) {
+		int i;
+
+		for (i = PHY_INTERFACE_MODE_NA + 1;
+		     i < PHY_INTERFACE_MODE_MAX; i++) {
+			strscpy(data + (i - 1) * ETH_GSTRING_LEN,
+				phy_modes(i),
+				ETH_GSTRING_LEN);
+		}
+	} else
 		/* ops->get_strings is valid because checked earlier */
 		ops->get_strings(dev, stringset, data);
 }
@@ -1801,6 +1814,127 @@
 	return ret;
 }
 
+struct ethtool_eee_usettings {
+	struct ethtool_eee_linkmode base;
+	struct {
+		__u32 supported[__ETHTOOL_LINK_MODE_MASK_NU32];
+		__u32 advertised[__ETHTOOL_LINK_MODE_MASK_NU32];
+		__u32 lp_advertised[__ETHTOOL_LINK_MODE_MASK_NU32];
+	} link_modes;
+};
+
+/* convert ethtool_eee_usettings in user space to a kernel internal
+ * ethtool_keee. return 0 on success, errno on error.
+ */
+static int load_keee_from_user_linkmode(struct ethtool_keee *keee,
+					const void __user *from)
+{
+	struct ethtool_eee_usettings eee_usettings;
+	struct ethtool_eee_linkmode *base;
+
+	if (copy_from_user(&eee_usettings, from, sizeof(eee_usettings)))
+		return -EFAULT;
+
+	/* expect userland to get this value from a previous
+	 * ETHTOOL_GLINKSETTINGS call */
+	if (__ETHTOOL_LINK_MODE_MASK_NU32
+	    != eee_usettings.base.link_mode_masks_nwords)
+		return -EINVAL;
+
+	memset(keee, 0, sizeof(*keee));
+	base = &eee_usettings.base;
+	keee->eee_enabled = base->eee_enabled;
+	keee->tx_lpi_enabled = base->tx_lpi_enabled;
+	keee->tx_lpi_timer = base->tx_lpi_timer;
+
+	bitmap_from_arr32(keee->supported,
+			  eee_usettings.link_modes.supported,
+			  __ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_from_arr32(keee->advertised,
+			  eee_usettings.link_modes.advertised,
+			  __ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_from_arr32(keee->lp_advertised,
+			  eee_usettings.link_modes.lp_advertised,
+			  __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+	return 0;
+}
+
+/* convert a kernel internal ethtool_eee_ksettings to
+ * ethtool_eee_linkmode in user space. return 0 on success, errno on
+ * error.
+ */
+static int
+store_link_keee_for_user(void __user *to,
+			 const struct ethtool_keee *keee)
+{
+	struct ethtool_eee_usettings eee_usettings;
+	struct ethtool_eee_linkmode *base;
+
+	memset(&eee_usettings, 0, sizeof(eee_usettings));
+	base = &eee_usettings.base;
+	base->eee_active = keee->eee_active;
+	base->eee_enabled = keee->eee_enabled;
+	base->tx_lpi_enabled = keee->tx_lpi_enabled;
+	base->tx_lpi_timer = keee->tx_lpi_timer;
+	base->link_mode_masks_nwords = __ETHTOOL_LINK_MODE_MASK_NU32;
+
+	bitmap_to_arr32(eee_usettings.link_modes.supported,
+			keee->supported,
+			__ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_to_arr32(eee_usettings.link_modes.advertised,
+			keee->advertised,
+			__ETHTOOL_LINK_MODE_MASK_NBITS);
+	bitmap_to_arr32(eee_usettings.link_modes.lp_advertised,
+			keee->lp_advertised,
+			__ETHTOOL_LINK_MODE_MASK_NBITS);
+
+	if (copy_to_user(to, &eee_usettings, sizeof(eee_usettings)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_get_eee_linkmode(struct net_device *dev,
+				    char __user *useraddr)
+{
+	struct ethtool_keee keee;
+	int rc;
+
+	if (!dev->ethtool_ops->get_eee)
+		return -EOPNOTSUPP;
+
+	memset(&keee, 0, sizeof(keee));
+	rc = dev->ethtool_ops->get_eee(dev, &keee);
+	if (rc)
+		return rc;
+
+	rc = store_link_keee_for_user(useraddr, &keee);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+static int ethtool_set_eee_linkmode(struct net_device *dev,
+				    char __user *useraddr)
+{
+	struct ethtool_keee keee;
+	int ret;
+
+	if (!dev->ethtool_ops->set_eee)
+		return -EOPNOTSUPP;
+
+	ret = load_keee_from_user_linkmode(&keee, useraddr);
+	if (ret)
+		return ret;
+
+	ret = dev->ethtool_ops->set_eee(dev, &keee);
+	if (!ret)
+		ethtool_notify(dev, ETHTOOL_MSG_EEE_NTF, NULL);
+	return ret;
+}
+
 static int ethtool_nway_reset(struct net_device *dev)
 {
 	if (!dev->ethtool_ops->nway_reset)
@@ -2943,6 +3077,7 @@
 	switch (tuna->id) {
 	case ETHTOOL_PHY_DOWNSHIFT:
 	case ETHTOOL_PHY_FAST_LINK_DOWN:
+	case ETHTOOL_PHY_BROKEN:
 		if (tuna->len != sizeof(u8) ||
 		    tuna->type_id != ETHTOOL_TUNABLE_U8)
 			return -EINVAL;
@@ -3069,6 +3204,187 @@
 	return dev->ethtool_ops->set_fecparam(dev, &fecparam);
 }
 
+static int ethtool_get_sfp_state(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_sfp_state sfp_state;
+	int rc;
+
+	if (!dev->sfp_bus) {
+		printk("no SFP bus ya twat.\n");
+		return -ENODEV;
+	}
+
+	rc = sfp_get_sfp_state(dev->sfp_bus, &sfp_state);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(useraddr, &sfp_state, sizeof (sfp_state)))
+		return -EFAULT;
+	return 0;
+}
+
+static int ethtool_get_shaper_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_shaper_params sp;
+	int rc;
+
+	if (!dev->ethtool_ops->get_shaper_param)
+		return -EOPNOTSUPP;
+
+	memset(&sp, 0, sizeof (sp));
+	rc = dev->ethtool_ops->get_shaper_param(dev, &sp);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(uaddr, &sp, sizeof (sp)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_set_shaper_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_shaper_params sp;
+
+	if (!dev->ethtool_ops->set_shaper_param)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&sp, uaddr, sizeof (sp)))
+		return -EFAULT;
+
+	return dev->ethtool_ops->set_shaper_param(dev, &sp);
+}
+
+static int ethtool_get_eponparam(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_epon_param eponparam = { .cmd = ETHTOOL_GEPON_PARAM };
+	int rc;
+
+	if (!dev->ethtool_ops->get_epon_param)
+		return -EOPNOTSUPP;
+
+	rc = dev->ethtool_ops->get_epon_param(dev, &eponparam);
+	if (rc)
+		return rc;
+
+	if (copy_to_user(useraddr, &eponparam, sizeof(eponparam)))
+		return -EFAULT;
+	return 0;
+}
+
+static int ethtool_set_eponparam(struct net_device *dev, void __user *useraddr)
+{
+	struct ethtool_epon_param eponparam;
+
+	if (!dev->ethtool_ops->set_epon_param)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&eponparam, useraddr, sizeof(eponparam)))
+		return -EFAULT;
+
+	return dev->ethtool_ops->set_epon_param(dev, &eponparam);
+}
+
+#ifdef CONFIG_PHYLINK
+static int ethtool_get_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_phylink_iftype sp;
+	phy_interface_t interface;
+	struct phylink *pl;
+	int mode, an_enable;
+
+	if (!dev->ethtool_ops->get_phylink)
+		return -EOPNOTSUPP;
+
+	pl = dev->ethtool_ops->get_phylink(dev);
+	if (!pl)
+		return -EIO;
+
+	memset(&sp, 0, sizeof (sp));
+	phylink_get_interface(pl, &interface, &an_enable, &mode);
+	strscpy(sp.iftype, phy_modes(interface), sizeof (sp.iftype));
+	sp.autoneg_en = an_enable;
+	sp.mode = mode;
+
+	if (copy_to_user(uaddr, &sp, sizeof (sp)))
+		return -EFAULT;
+
+	return 0;
+}
+#else
+static inline int ethtool_get_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_PHYLINK
+static int ethtool_set_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_phylink_iftype sp;
+	phy_interface_t i;
+	struct phylink *pl;
+
+	if (copy_from_user(&sp, uaddr, sizeof (sp)))
+		return -EFAULT;
+
+	if (!dev->ethtool_ops->get_phylink)
+		return -EOPNOTSUPP;
+
+	pl = dev->ethtool_ops->get_phylink(dev);
+	if (!pl)
+		return -EIO;
+
+	sp.iftype[sizeof (sp.iftype) - 1] = 0;
+
+	for (i = PHY_INTERFACE_MODE_NA; i < PHY_INTERFACE_MODE_MAX; i++) {
+		if (!strcmp(phy_modes(i), sp.iftype))
+			break;
+	}
+
+	if (i == PHY_INTERFACE_MODE_MAX)
+		return -EINVAL;
+
+	return phylink_set_interface(pl, i, sp.autoneg_en ? 1 : 0);
+}
+#else
+static inline int ethtool_set_phylink_iftype(struct net_device *dev, void __user *uaddr)
+{
+	return -ENOTSUPP;
+}
+#endif
+
+static int ethtool_get_prbs_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_prbs_param prbs = { .cmd = ETHTOOL_GPRBS_PARAM };
+	int ret;
+
+	if (!dev->ethtool_ops->get_prbs_param)
+		return -EOPNOTSUPP;
+
+	ret = dev->ethtool_ops->get_prbs_param(dev, &prbs);
+	if (ret)
+		return ret;
+
+	if (copy_to_user(uaddr, &prbs, sizeof(prbs)))
+		return -EFAULT;
+
+	return 0;
+}
+
+static int ethtool_set_prbs_params(struct net_device *dev, void __user *uaddr)
+{
+	struct ethtool_prbs_param prbs;
+
+	if (!dev->ethtool_ops->set_prbs_param)
+		return -EOPNOTSUPP;
+
+	if (copy_from_user(&prbs, uaddr, sizeof(prbs)))
+		return -EFAULT;
+
+	return dev->ethtool_ops->set_prbs_param(dev, &prbs);
+}
+
 /* The main entry point in this file.  Called from net/core/dev_ioctl.c */
 
 static int
@@ -3355,6 +3671,45 @@
 	case ETHTOOL_SFECPARAM:
 		rc = ethtool_set_fecparam(dev, useraddr);
 		break;
+	case ETHTOOL_SSHAPER_PARAMS:
+		rc = ethtool_set_shaper_params(dev, useraddr);
+		break;
+	case ETHTOOL_GSHAPER_PARAMS:
+		rc = ethtool_get_shaper_params(dev, useraddr);
+		break;
+	case ETHTOOL_GEPON_PARAM:
+		rc = ethtool_get_eponparam(dev, useraddr);
+		break;
+	case ETHTOOL_SEPON_KEYS:
+	case ETHTOOL_SEPON_ENCRYPT:
+	case ETHTOOL_SEPON_RESTART:
+	case ETHTOOL_SEPON_BURST:
+	case ETHTOOL_SEPON_ADD_MCLLID:
+	case ETHTOOL_SEPON_DEL_MCLLID:
+	case ETHTOOL_SEPON_CLR_MCLLID:
+		rc = ethtool_set_eponparam(dev, useraddr);
+		break;
+	case ETHTOOL_GSFP_STATE:
+		rc = ethtool_get_sfp_state(dev, useraddr);
+		break;
+	case ETHTOOL_GPHYLINK_IFTYPE:
+		rc = ethtool_get_phylink_iftype(dev, useraddr);
+		break;
+	case ETHTOOL_SPHYLINK_IFTYPE:
+		rc = ethtool_set_phylink_iftype(dev, useraddr);
+		break;
+	case ETHTOOL_GEEE_LINKMODE:
+		rc = ethtool_get_eee_linkmode(dev, useraddr);
+		break;
+	case ETHTOOL_SEEE_LINKMODE:
+		rc = ethtool_set_eee_linkmode(dev, useraddr);
+		break;
+	case ETHTOOL_GPRBS_PARAM:
+		rc = ethtool_get_prbs_params(dev, useraddr);
+		break;
+	case ETHTOOL_SPRBS_PARAM:
+		rc = ethtool_set_prbs_params(dev, useraddr);
+		break;
 	default:
 		rc = -EOPNOTSUPP;
 	}
diff -ruw linux-6.13.12/net/ipv4/Makefile linux-6.13.12-fbx/net/ipv4/Makefile
--- linux-6.13.12/net/ipv4/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/Makefile	2025-09-25 17:40:37.755378076 +0200
@@ -18,6 +18,8 @@
 
 obj-$(CONFIG_NET_IP_TUNNEL) += ip_tunnel.o
 obj-$(CONFIG_SYSCTL) += sysctl_net_ipv4.o
+
+obj-$(CONFIG_IP_FFN) += ip_ffn.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_IP_MULTIPLE_TABLES) += fib_rules.o
 obj-$(CONFIG_IP_MROUTE) += ipmr.o
diff -ruw linux-6.13.12/net/ipv4/ip_input.c linux-6.13.12-fbx/net/ipv4/ip_input.c
--- linux-6.13.12/net/ipv4/ip_input.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/ip_input.c	2025-09-25 17:40:37.763378116 +0200
@@ -224,8 +224,13 @@
 	}
 }
 
-static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
+int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ip_ffn_add(skb, IP_FFN_LOCAL_IN);
+#endif
 	skb_clear_delivery_time(skb);
 	__skb_pull(skb, skb_network_header_len(skb));
 
@@ -564,6 +569,11 @@
 	if (skb == NULL)
 		return NET_RX_DROP;
 
+#ifdef CONFIG_IP_FFN
+	if (!ip_ffn_process(skb))
+		return NET_RX_SUCCESS;
+#endif
+
 	return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
 		       net, NULL, skb, dev, NULL,
 		       ip_rcv_finish);
diff -ruw linux-6.13.12/net/ipv4/ip_output.c linux-6.13.12-fbx/net/ipv4/ip_output.c
--- linux-6.13.12/net/ipv4/ip_output.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/ip_output.c	2025-09-25 17:40:37.763378116 +0200
@@ -226,6 +226,11 @@
 			return res;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ip_ffn_add(skb, IP_FFN_FINISH_OUT);
+#endif
+
 	rcu_read_lock();
 	neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
 	if (!IS_ERR(neigh)) {
@@ -431,6 +436,11 @@
 	skb->dev = dev;
 	skb->protocol = htons(ETH_P_IP);
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FAST_FORWARDED)
+		return ip_finish_output(net, sk, skb);
+#endif
+
 	return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
 			    net, sk, skb, indev, dev,
 			    ip_finish_output,
@@ -1683,4 +1693,7 @@
 #if defined(CONFIG_IP_MULTICAST)
 	igmp_mc_init();
 #endif
+#ifdef CONFIG_IP_FFN
+	ip_ffn_init();
+#endif
 }
diff -ruw linux-6.13.12/net/ipv4/ip_tunnel_core.c linux-6.13.12-fbx/net/ipv4/ip_tunnel_core.c
--- linux-6.13.12/net/ipv4/ip_tunnel_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/ip_tunnel_core.c	2025-09-25 17:40:37.767378135 +0200
@@ -38,6 +38,9 @@
 #include <net/geneve.h>
 #include <net/vxlan.h>
 #include <net/erspan.h>
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#include <net/netfilter/nf_conntrack.h>
+#endif
 
 const struct ip_tunnel_encap_ops __rcu *
 		iptun_encaps[MAX_IPTUN_ENCAP_OPS] __read_mostly;
@@ -60,6 +63,11 @@
 	skb_scrub_packet(skb, xnet);
 
 	skb_clear_hash_if_not_l4(skb);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+	if (proto == IPPROTO_IPV6)
+		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+#endif
+
 	skb_dst_set(skb, &rt->dst);
 	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
 
diff -ruw linux-6.13.12/net/ipv4/ipconfig.c linux-6.13.12-fbx/net/ipv4/ipconfig.c
--- linux-6.13.12/net/ipv4/ipconfig.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/ipconfig.c	2025-09-25 17:40:37.767378135 +0200
@@ -197,16 +197,62 @@
 static struct ic_device *ic_first_dev __initdata;	/* List of open device */
 static struct ic_device *ic_dev __initdata;		/* Selected device */
 
-static bool __init ic_is_init_dev(struct net_device *dev)
+static bool __init ic_is_init_dev(struct net_device *dev, bool partial)
 {
+	char *p = NULL;
+	bool ret;
+
 	if (dev->flags & IFF_LOOPBACK)
 		return false;
-	return user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
+
+	if (partial) {
+		p = strchr(user_dev_name, '.');
+		if (p)
+			*p = 0;
+	}
+
+	ret = false;
+	if (user_dev_name[0] ? !strcmp(dev->name, user_dev_name) :
 	    (!(dev->flags & IFF_LOOPBACK) &&
 	     (dev->flags & (IFF_POINTOPOINT|IFF_BROADCAST)) &&
-	     strncmp(dev->name, "dummy", 5));
+	     strncmp(dev->name, "dummy", 5)))
+		ret = true;
+	if (p)
+		*p = '.';
+	return ret;
 }
 
+#ifdef CONFIG_VLAN_8021Q
+int register_vlan_device(struct net_device *real_dev, u16 vlan_id);
+
+static void __init prepare_vlan(void)
+{
+	unsigned short oflags;
+	struct net_device *dev;
+	char *p;
+	u16 vid;
+
+	if (!strchr(user_dev_name, '.'))
+		return;
+
+	p = strchr(user_dev_name, '.');
+	*p = 0;
+	vid = simple_strtoul(p + 1, NULL, 10);
+	dev = __dev_get_by_name(&init_net, user_dev_name);
+	if (!dev)
+		goto fail;
+
+	oflags = dev->flags;
+	if (dev_change_flags(dev, oflags | IFF_UP, NULL) < 0)
+		goto fail;
+
+	register_vlan_device(dev, vid);
+
+fail:
+	*p = '.';
+}
+#endif
+
 static int __init ic_open_devs(void)
 {
 	struct ic_device *d, **last;
@@ -225,8 +271,13 @@
 			pr_err("IP-Config: Failed to open %s\n", dev->name);
 	}
 
+#ifdef CONFIG_VLAN_8021Q
+	/* register vlan device if needed */
+	prepare_vlan();
+#endif
+
 	for_each_netdev(&init_net, dev) {
-		if (ic_is_init_dev(dev)) {
+		if (ic_is_init_dev(dev, false)) {
 			int able = 0;
 			if (dev->mtu >= 364)
 				able |= IC_BOOTP;
@@ -281,7 +332,7 @@
 
 		rtnl_lock();
 		for_each_netdev(&init_net, dev)
-			if (ic_is_init_dev(dev) && netif_carrier_ok(dev)) {
+			if (ic_is_init_dev(dev, false) && netif_carrier_ok(dev)) {
 				rtnl_unlock();
 				goto have_carrier;
 			}
@@ -732,8 +783,10 @@
 			e += len;
 		}
 		if (*vendor_class_identifier) {
+#ifdef IPCONFIG_DEBUG
 			pr_info("DHCP: sending class identifier \"%s\"\n",
 				vendor_class_identifier);
+#endif
 			*e++ = 60;	/* Class-identifier */
 			len = strlen(vendor_class_identifier);
 			*e++ = len;
@@ -1451,7 +1504,7 @@
 
 		rtnl_lock();
 		for_each_netdev(&init_net, dev) {
-			if (ic_is_init_dev(dev)) {
+			if (ic_is_init_dev(dev, true)) {
 				found = 1;
 				break;
 			}
diff -ruw linux-6.13.12/net/ipv4/netfilter/Kconfig linux-6.13.12-fbx/net/ipv4/netfilter/Kconfig
--- linux-6.13.12/net/ipv4/netfilter/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/netfilter/Kconfig	2025-09-25 17:40:37.767378135 +0200
@@ -6,6 +6,13 @@
 menu "IP: Netfilter Configuration"
 	depends on INET && NETFILTER
 
+config IP_FFN
+	bool "IP: Fast forwarding and NAT"
+
+config IP_FFN_PROCFS
+	bool "IP: Fast forwarding and NAT /proc/net entries"
+	depends on IP_FFN
+
 config NF_DEFRAG_IPV4
 	tristate
 	default n
diff -ruw linux-6.13.12/net/ipv4/netfilter/ip_tables.c linux-6.13.12-fbx/net/ipv4/netfilter/ip_tables.c
--- linux-6.13.12/net/ipv4/netfilter/ip_tables.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/netfilter/ip_tables.c	2025-09-25 17:40:37.767378135 +0200
@@ -1099,6 +1099,8 @@
 	return ret;
 }
 
+extern void fbxbr_flush_cache(void);
+
 static int
 do_replace(struct net *net, sockptr_t arg, unsigned int len)
 {
@@ -1142,6 +1144,14 @@
 			   tmp.num_counters, tmp.counters);
 	if (ret)
 		goto free_newinfo_untrans;
+
+#ifdef CONFIG_FBXBRIDGE
+	fbxbr_flush_cache();
+#endif
+
+#ifdef CONFIG_IP_FFN
+	ip_ffn_flush_all();
+#endif
 	return 0;
 
  free_newinfo_untrans:
diff -ruw linux-6.13.12/net/ipv4/tcp.c linux-6.13.12-fbx/net/ipv4/tcp.c
--- linux-6.13.12/net/ipv4/tcp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/tcp.c	2025-09-25 17:40:37.771378155 +0200
@@ -3829,6 +3829,13 @@
 			err = -EINVAL;
 		break;
 
+	case TCP_LINEAR_RTO:
+		if (val < 0 || val > 1)
+			err = -EINVAL;
+		else
+			tp->linear_rto = val;
+		break;
+
 	case TCP_REPAIR:
 		if (!tcp_can_repair_sock(sk))
 			err = -EPERM;
@@ -4434,6 +4441,9 @@
 	case TCP_THIN_DUPACK:
 		val = 0;
 		break;
+	case TCP_LINEAR_RTO:
+		val = tp->linear_rto;
+		break;
 
 	case TCP_REPAIR:
 		val = tp->repair;
diff -ruw linux-6.13.12/net/ipv4/tcp_timer.c linux-6.13.12-fbx/net/ipv4/tcp_timer.c
--- linux-6.13.12/net/ipv4/tcp_timer.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/tcp_timer.c	2025-09-25 17:40:37.779378195 +0200
@@ -666,6 +666,9 @@
 		icsk->icsk_rto = clamp(__tcp_set_rto(tp),
 				       tcp_rto_min(sk),
 				       TCP_RTO_MAX);
+	} else if (sk->sk_state == TCP_ESTABLISHED && tp->linear_rto) {
+		icsk->icsk_backoff = 0;
+		icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
 	} else if (sk->sk_state != TCP_SYN_SENT ||
 		   tp->total_rto >
 		   READ_ONCE(net->ipv4.sysctl_tcp_syn_linear_timeouts)) {
diff -ruw linux-6.13.12/net/ipv4/udp.c linux-6.13.12-fbx/net/ipv4/udp.c
--- linux-6.13.12/net/ipv4/udp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv4/udp.c	2025-09-25 17:40:37.783378215 +0200
@@ -318,6 +318,49 @@
 	inet_sk(sk)->inet_num = snum;
 	udp_sk(sk)->udp_port_hash = snum;
 	udp_sk(sk)->udp_portaddr_hash ^= snum;
+
+	/* resolve udp reuse conflict */
+	if (sk->sk_reuse) {
+		struct sock *sk2;
+		bool found;
+
+		found = false;
+		sk_for_each(sk2, &hslot->head) {
+			if (!net_eq(sock_net(sk2), net) ||
+			    sk2 == sk ||
+			    (udp_sk(sk2)->udp_port_hash != snum))
+				continue;
+
+			if (sk2->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if &&
+			    sk2->sk_bound_dev_if != sk->sk_bound_dev_if)
+				continue;
+
+			if (!inet_rcv_saddr_equal(sk, sk2, true))
+				continue;
+
+			found = true;
+			break;
+		}
+
+		sk_for_each(sk2, &hslot->head) {
+			if (!net_eq(sock_net(sk2), net) ||
+			    sk2 == sk ||
+			    (udp_sk(sk2)->udp_port_hash != snum))
+				continue;
+
+			if (sk2->sk_bound_dev_if &&
+			    sk->sk_bound_dev_if &&
+			    sk2->sk_bound_dev_if != sk->sk_bound_dev_if)
+				continue;
+
+			if (!inet_rcv_saddr_equal(sk, sk2, true))
+				continue;
+
+			sk->sk_reuse_conflict = found;
+		}
+	}
+
 	if (sk_unhashed(sk)) {
 		if (sk->sk_reuseport &&
 		    udp_reuseport_add_sock(sk, hslot)) {
@@ -2563,6 +2606,90 @@
 	return 0;
 }
 
+/*
+ *	Unicast goes to one listener and all sockets with dup flag
+ *
+ *	Note: called only from the BH handler context.
+ *
+ *	Note2: it is okay to use the udp_table.hash table only here
+ *	and not udp_table.hash2 table as the sock is always hashed in
+ *	both udp_table.hash and udp_table.hash2. This might impact
+ *	performance if the sock hash bucket hosts more than 10 socks
+ *	but has the benefit of keeping the code simplier.
+ *
+ *	Note3: __udp_is_mcast_sock() does not have really anything to
+ *	do with multicast, it used there to deliver the packet only to
+ *	the sockets that are bound to the ip:port/interface the skbuff
+ *	is targeted to.
+ */
+static int __udp4_lib_uc_conflict_deliver(struct net *net, struct sk_buff *skb,
+					  struct udphdr  *uh,
+					  __be32 saddr, __be32 daddr,
+					  struct udp_table *udptable,
+					  int proto)
+{
+	struct sock *sk, *first = NULL;
+	unsigned short hnum = ntohs(uh->dest);
+	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+	int dif = skb->dev->ifindex;
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
+	struct hlist_node *node;
+	struct sk_buff *nskb;
+	int sdif = inet_sdif(skb);
+	bool found_non_dup;
+
+	found_non_dup = false;
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		bool need_deliver;
+
+		if (!__udp_is_mcast_sock(net, sk, uh->dest, daddr,
+					 uh->source, saddr, dif, sdif, hnum))
+			continue;
+
+		if (sock_flag(sk, SOCK_UDP_DUP_UNICAST))
+			need_deliver = true;
+		else {
+			if (!found_non_dup)
+				need_deliver = true;
+			else
+				need_deliver = false;
+			found_non_dup = true;
+		}
+
+		if (!need_deliver)
+			continue;
+
+		if (!first) {
+			first = sk;
+			continue;
+		}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					IS_UDPLITE(sk));
+			__UDP_INC_STATS(net, UDP_MIB_INERRORS,
+					IS_UDPLITE(sk));
+			continue;
+		}
+
+		if (udp_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
+
+	if (first) {
+		if (udp_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
+	} else {
+		kfree_skb(skb);
+		__UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				proto == IPPROTO_UDPLITE);
+	}
+
+	return 0;
+}
+
 /* Initialize UDP checksum. If exited with zero value (success),
  * CHECKSUM_UNNECESSARY means, that no more checks are required.
  * Otherwise, csum completion requires checksumming packet body,
@@ -2696,8 +2823,15 @@
 						saddr, daddr, udptable, proto);
 
 	sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
-	if (sk)
+	if (sk) {
+		if (sk->sk_reuse_conflict)
+			return __udp4_lib_uc_conflict_deliver(net,
+							      skb, uh,
+							      saddr, daddr,
+							      udptable, proto);
+
 		return udp_unicast_rcv_skb(sk, skb, uh);
+	}
 no_sk:
 	if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
 		goto drop;
diff -ruw linux-6.13.12/net/ipv6/Makefile linux-6.13.12-fbx/net/ipv6/Makefile
--- linux-6.13.12/net/ipv6/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/Makefile	2025-09-25 17:40:37.783378215 +0200
@@ -13,6 +13,7 @@
 		udp_offload.o seg6.o fib6_notifier.o rpl.o ioam6.o
 
 ipv6-$(CONFIG_SYSCTL) += sysctl_net_ipv6.o
+ipv6-$(CONFIG_IPV6_FFN) += ip6_ffn.o
 ipv6-$(CONFIG_IPV6_MROUTE) += ip6mr.o
 
 ipv6-$(CONFIG_XFRM) += xfrm6_policy.o xfrm6_state.o xfrm6_input.o \
diff -ruw linux-6.13.12/net/ipv6/addrconf.c linux-6.13.12-fbx/net/ipv6/addrconf.c
--- linux-6.13.12/net/ipv6/addrconf.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/addrconf.c	2025-09-25 17:40:37.787378235 +0200
@@ -2371,12 +2371,27 @@
 	return 0;
 }
 
+static int addrconf_ifid_ppp(u8 *eui, struct net_device *dev)
+{
+	if (is_zero_ether_addr(dev->perm_addr))
+		return -1;
+
+	memcpy(eui, dev->perm_addr, 3);
+	memcpy(eui + 5, dev->perm_addr + 3, 3);
+	eui[3] = 0xFF;
+	eui[4] = 0xFE;
+	eui[0] ^= 2;
+	return 0;
+}
+
 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
 {
 	switch (dev->type) {
 	case ARPHRD_ETHER:
 	case ARPHRD_FDDI:
 		return addrconf_ifid_eui48(eui, dev);
+	case ARPHRD_PPP:
+		return addrconf_ifid_ppp(eui, dev);
 	case ARPHRD_ARCNET:
 		return addrconf_ifid_arcnet(eui, dev);
 	case ARPHRD_INFINIBAND:
@@ -3484,6 +3499,7 @@
 
 	if ((dev->type != ARPHRD_ETHER) &&
 	    (dev->type != ARPHRD_FDDI) &&
+	    (dev->type != ARPHRD_PPP) &&
 	    (dev->type != ARPHRD_ARCNET) &&
 	    (dev->type != ARPHRD_INFINIBAND) &&
 	    (dev->type != ARPHRD_IEEE1394) &&
diff -ruw linux-6.13.12/net/ipv6/af_inet6.c linux-6.13.12-fbx/net/ipv6/af_inet6.c
--- linux-6.13.12/net/ipv6/af_inet6.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/af_inet6.c	2025-09-25 17:40:37.787378235 +0200
@@ -1190,6 +1190,10 @@
 	if (err)
 		goto udpv6_fail;
 
+#ifdef CONFIG_IPV6_FFN
+	ipv6_ffn_init();
+#endif
+
 	err = udplitev6_init();
 	if (err)
 		goto udplitev6_fail;
diff -ruw linux-6.13.12/net/ipv6/ip6_input.c linux-6.13.12-fbx/net/ipv6/ip6_input.c
--- linux-6.13.12/net/ipv6/ip6_input.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/ip6_input.c	2025-09-25 17:40:37.795378274 +0200
@@ -306,6 +306,12 @@
 	skb = ip6_rcv_core(skb, dev, net);
 	if (skb == NULL)
 		return NET_RX_DROP;
+
+#ifdef CONFIG_IPV6_FFN
+	if (!ipv6_ffn_process(skb))
+		return NET_RX_SUCCESS;
+#endif
+
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
 		       net, NULL, skb, dev, NULL,
 		       ip6_rcv_finish);
@@ -474,8 +480,13 @@
 	kfree_skb_reason(skb, reason);
 }
 
-static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
+int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
 {
+#ifdef CONFIG_IPV6_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ipv6_ffn_add(skb, IPV6_FFN_LOCAL_IN);
+#endif
+
 	skb_clear_delivery_time(skb);
 	rcu_read_lock();
 	ip6_protocol_deliver_rcu(net, skb, 0, false);
diff -ruw linux-6.13.12/net/ipv6/ip6_output.c linux-6.13.12-fbx/net/ipv6/ip6_output.c
--- linux-6.13.12/net/ipv6/ip6_output.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/ip6_output.c	2025-09-25 17:40:37.795378274 +0200
@@ -52,6 +52,7 @@
 #include <net/icmp.h>
 #include <net/xfrm.h>
 #include <net/checksum.h>
+#include <net/dsfield.h>
 #include <linux/mroute6.h>
 #include <net/l3mdev.h>
 #include <net/lwtunnel.h>
@@ -123,6 +124,11 @@
 
 	IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUT, skb->len);
 
+#ifdef CONFIG_IPV6_FFN
+	if (skb->ffn_state == FFN_STATE_FORWARDABLE)
+		ipv6_ffn_add(skb, IPV6_FFN_FINISH_OUT);
+#endif
+
 	rcu_read_lock();
 	nexthop = rt6_nexthop(dst_rt6_info(dst), daddr);
 	neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
@@ -244,6 +250,11 @@
 		return 0;
 	}
 
+#ifdef CONFIG_IP_FFN
+	if (skb->ffn_state == FFN_STATE_FAST_FORWARDED)
+		return ip6_finish_output(net, sk, skb);
+#endif
+
 	return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
 			    net, sk, skb, indev, dev,
 			    ip6_finish_output,
@@ -665,6 +676,8 @@
 
 	hdr->hop_limit--;
 
+	skb->priority = rt_tos2priority(ipv6_get_dsfield(hdr));
+
 	return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
 		       net, NULL, skb, skb->dev, dst->dev,
 		       ip6_forward_finish);
diff -ruw linux-6.13.12/net/ipv6/ip6_tunnel.c linux-6.13.12-fbx/net/ipv6/ip6_tunnel.c
--- linux-6.13.12/net/ipv6/ip6_tunnel.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/ip6_tunnel.c	2025-09-25 17:40:37.795378274 +0200
@@ -68,9 +68,9 @@
 module_param(log_ecn_error, bool, 0644);
 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
 
-static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
+static u32 HASH(const struct in6_addr *addr)
 {
-	u32 hash = ipv6_addr_hash(addr1) ^ ipv6_addr_hash(addr2);
+	u32 hash = ipv6_addr_hash(addr);
 
 	return hash_32(hash, IP6_TUNNEL_HASH_SIZE_SHIFT);
 }
@@ -115,17 +115,26 @@
 ip6_tnl_lookup(struct net *net, int link,
 	       const struct in6_addr *remote, const struct in6_addr *local)
 {
-	unsigned int hash = HASH(remote, local);
+	unsigned int hash = HASH(local);
 	struct ip6_tnl *t, *cand = NULL;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	struct in6_addr any;
+	struct __ip6_tnl_fmr *fmr;
 
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
-		    !ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    !(t->dev->flags & IFF_UP))
 			continue;
 
+		if (!ipv6_addr_equal(remote, &t->parms.raddr)) {
+			for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+				if (ipv6_prefix_equal(remote, &fmr->ip6_prefix,
+						      fmr->ip6_prefix_len))
+					return t;
+			}
+			continue ;
+		}
+
 		if (link == t->parms.link)
 			return t;
 		else
@@ -133,7 +142,7 @@
 	}
 
 	memset(&any, 0, sizeof(any));
-	hash = HASH(&any, local);
+	hash = HASH(local);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(local, &t->parms.laddr) ||
 		    !ipv6_addr_any(&t->parms.raddr) ||
@@ -146,7 +155,7 @@
 			cand = t;
 	}
 
-	hash = HASH(remote, &any);
+	hash = HASH(&any);
 	for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
 		if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
 		    !ipv6_addr_any(&t->parms.laddr) ||
@@ -195,7 +204,7 @@
 
 	if (!ipv6_addr_any(remote) || !ipv6_addr_any(local)) {
 		prio = 1;
-		h = HASH(remote, local);
+		h = HASH(local);
 	}
 	return &ip6n->tnls[prio][h];
 }
@@ -376,6 +385,12 @@
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+
 	if (dev == ip6n->fb_tnl_dev)
 		RCU_INIT_POINTER(ip6n->tnls_wc[0], NULL);
 	else
@@ -790,6 +805,107 @@
 }
 EXPORT_SYMBOL_GPL(ip6_tnl_rcv_ctl);
 
+/**
+ * ip4ip6_fmr_calc - calculate target / source IPv6-address based on FMR
+ *   @dest: destination IPv6 address buffer
+ *   @skb: received socket buffer
+ *   @fmr: MAP FMR
+ *   @xmit: Calculate for xmit or rcv
+ **/
+static void ip4ip6_fmr_calc(struct in6_addr *dest,
+		const struct iphdr *iph, const uint8_t *end,
+		const struct __ip6_tnl_fmr *fmr, bool xmit)
+{
+	int psidlen = fmr->ea_len - (32 - fmr->ip4_prefix_len);
+	u8 *portp = NULL;
+	bool use_dest_addr;
+	const struct iphdr *dsth = iph;
+
+	if ((u8*)dsth >= end)
+		return;
+
+	/* find significant IP header */
+	if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+		if (ih && ((u8*)&ih[1]) <= end && (
+			ih->type == ICMP_DEST_UNREACH ||
+			ih->type == ICMP_SOURCE_QUENCH ||
+			ih->type == ICMP_TIME_EXCEEDED ||
+			ih->type == ICMP_PARAMETERPROB ||
+			ih->type == ICMP_REDIRECT))
+				dsth = (const struct iphdr*)&ih[1];
+	}
+
+	/* in xmit-path use dest port by default and source port only if
+		this is an ICMP reply to something else; vice versa in rcv-path */
+	use_dest_addr = (xmit && dsth == iph) || (!xmit && dsth != iph);
+
+	/* get dst port */
+	if (((u8*)&dsth[1]) <= end && (
+		dsth->protocol == IPPROTO_UDP ||
+		dsth->protocol == IPPROTO_TCP ||
+		dsth->protocol == IPPROTO_SCTP ||
+		dsth->protocol == IPPROTO_DCCP)) {
+			/* for UDP, TCP, SCTP and DCCP source and dest port
+			follow IPv4 header directly */
+			portp = ((u8*)dsth) + dsth->ihl * 4;
+
+			if (use_dest_addr)
+				portp += sizeof(u16);
+	} else if (iph->protocol == IPPROTO_ICMP) {
+		struct icmphdr *ih = (struct icmphdr*)(((u8*)dsth) + dsth->ihl * 4);
+
+		/* use icmp identifier as port */
+		if (((u8*)&ih) <= end && (
+		    (use_dest_addr && (
+		    ih->type == ICMP_ECHOREPLY ||
+			ih->type == ICMP_TIMESTAMPREPLY ||
+			ih->type == ICMP_INFO_REPLY ||
+			ih->type == ICMP_ADDRESSREPLY)) ||
+			(!use_dest_addr && (
+			ih->type == ICMP_ECHO ||
+			ih->type == ICMP_TIMESTAMP ||
+			ih->type == ICMP_INFO_REQUEST ||
+			ih->type == ICMP_ADDRESS)
+			)))
+				portp = (u8*)&ih->un.echo.id;
+	}
+
+	if ((portp && &portp[2] <= end) || psidlen == 0) {
+		int frombyte = fmr->ip6_prefix_len / 8;
+		int fromrem = fmr->ip6_prefix_len % 8;
+		int bytes = sizeof(struct in6_addr) - frombyte;
+		const u32 *addr = (use_dest_addr) ? &iph->daddr : &iph->saddr;
+		u64 eabits = ((u64)ntohl(*addr)) << (32 + fmr->ip4_prefix_len);
+		u64 t = 0;
+
+		/* extract PSID from port and add it to eabits */
+		u16 psidbits = 0;
+		if (psidlen > 0) {
+			psidbits = ((u16)portp[0]) << 8 | ((u16)portp[1]);
+			psidbits >>= 16 - psidlen - fmr->offset;
+			psidbits = (u16)(psidbits << (16 - psidlen));
+			eabits |= ((u64)psidbits) << (48 - (fmr->ea_len - psidlen));
+		}
+
+		/* rewrite destination address */
+		*dest = fmr->ip6_prefix;
+		memcpy(&dest->s6_addr[10], addr, sizeof(*addr));
+		dest->s6_addr16[7] = htons(psidbits >> (16 - psidlen));
+
+		if (bytes > sizeof(u64))
+			bytes = sizeof(u64);
+
+		/* insert eabits */
+		memcpy(&t, &dest->s6_addr[frombyte], bytes);
+		t = be64_to_cpu(t) & ~(((((u64)1) << fmr->ea_len) - 1)
+			<< (64 - fmr->ea_len - fromrem));
+		t = cpu_to_be64(t | (eabits >> fromrem));
+		memcpy(&dest->s6_addr[frombyte], &t, bytes);
+	}
+}
+
+
 static int __ip6_tnl_rcv(struct ip6_tnl *tunnel, struct sk_buff *skb,
 			 const struct tnl_ptk_info *tpi,
 			 struct metadata_dst *tun_dst,
@@ -855,6 +971,27 @@
 
 	memset(skb->cb, 0, sizeof(struct inet6_skb_parm));
 
+	if (tpi->proto == htons(ETH_P_IP) &&
+		!ipv6_addr_equal(&ipv6h->saddr, &tunnel->parms.raddr)) {
+			/* Packet didn't come from BR, so lookup FMR */
+			struct __ip6_tnl_fmr *fmr;
+			struct in6_addr expected = tunnel->parms.raddr;
+			for (fmr = tunnel->parms.fmrs; fmr; fmr = fmr->next)
+				if (ipv6_prefix_equal(&ipv6h->saddr,
+					&fmr->ip6_prefix, fmr->ip6_prefix_len))
+						break;
+
+			/* Check that IPv6 matches IPv4 source to prevent spoofing */
+			if (fmr)
+				ip4ip6_fmr_calc(&expected, ip_hdr(skb),
+						skb_tail_pointer(skb), fmr, false);
+
+			if (!ipv6_addr_equal(&ipv6h->saddr, &expected)) {
+				rcu_read_unlock();
+				goto drop;
+			}
+	}
+
 	__skb_tunnel_rx(skb, tunnel->dev, tunnel->net);
 
 	err = dscp_ecn_decapsulate(tunnel, ipv6h, skb);
@@ -1004,6 +1141,7 @@
 	opt->ops.opt_nflen = 8;
 }
 
+
 /**
  * ip6_tnl_addr_conflict - compare packet addresses to tunnel's own
  *   @t: the outgoing tunnel device
@@ -1257,8 +1395,10 @@
 	 */
 	max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr)
 			+ dst->header_len + t->hlen;
-	if (max_headroom > READ_ONCE(dev->needed_headroom))
-		WRITE_ONCE(dev->needed_headroom, max_headroom);
+	if (max_headroom > READ_ONCE(dev->needed_headroom) +
+	    dev->hard_header_len)
+		WRITE_ONCE(dev->needed_headroom,
+			   max_headroom - dev->hard_header_len);
 
 	err = ip6_tnl_encap(skb, t, &proto, fl6);
 	if (err)
@@ -1298,6 +1438,7 @@
 	const struct iphdr  *iph;
 	int encap_limit = -1;
 	__u16 offset;
+	struct __ip6_tnl_fmr *fmr;
 	struct flowi6 fl6;
 	__u8 dsfield, orig_dsfield;
 	__u32 mtu;
@@ -1393,6 +1534,18 @@
 	fl6.flowi6_uid = sock_net_uid(dev_net(dev), NULL);
 	dsfield = INET_ECN_encapsulate(dsfield, orig_dsfield);
 
+	/* try to find matching FMR */
+	for (fmr = t->parms.fmrs; fmr; fmr = fmr->next) {
+		unsigned mshift = 32 - fmr->ip4_prefix_len;
+		if (ntohl(fmr->ip4_prefix.s_addr) >> mshift ==
+				ntohl(ip_hdr(skb)->daddr) >> mshift)
+			break;
+	}
+
+	/* change dstaddr according to FMR */
+	if (fmr)
+		ip4ip6_fmr_calc(&fl6.daddr, ip_hdr(skb), skb_tail_pointer(skb), fmr, true);
+
 	if (iptunnel_handle_offloads(skb, SKB_GSO_IPXIP6))
 		return -1;
 
@@ -1546,6 +1699,14 @@
 	t->parms.link = p->link;
 	t->parms.proto = p->proto;
 	t->parms.fwmark = p->fwmark;
+
+	while (t->parms.fmrs) {
+		struct __ip6_tnl_fmr *next = t->parms.fmrs->next;
+		kfree(t->parms.fmrs);
+		t->parms.fmrs = next;
+	}
+	t->parms.fmrs = p->fmrs;
+
 	dst_cache_reset(&t->dst_cache);
 	ip6_tnl_link_config(t);
 }
@@ -1580,6 +1741,7 @@
 	p->flowinfo = u->flowinfo;
 	p->link = u->link;
 	p->proto = u->proto;
+	p->fmrs = NULL;
 	memcpy(p->name, u->name, sizeof(u->name));
 }
 
@@ -1963,13 +2125,22 @@
 	return 0;
 }
 
-static void ip6_tnl_netlink_parms(struct nlattr *data[],
+static const struct nla_policy ip6_tnl_fmr_policy[IFLA_IPTUN_FMR_MAX + 1] = {
+	[IFLA_IPTUN_FMR_IP6_PREFIX] = { .len = sizeof(struct in6_addr) },
+	[IFLA_IPTUN_FMR_IP4_PREFIX] = { .len = sizeof(struct in_addr) },
+	[IFLA_IPTUN_FMR_IP6_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_IP4_PREFIX_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_EA_LEN] = { .type = NLA_U8 },
+	[IFLA_IPTUN_FMR_OFFSET] = { .type = NLA_U8 }
+};
+
+static int ip6_tnl_netlink_parms(struct nlattr *data[],
 				  struct __ip6_tnl_parm *parms)
 {
 	memset(parms, 0, sizeof(*parms));
 
 	if (!data)
-		return;
+		return 0;
 
 	if (data[IFLA_IPTUN_LINK])
 		parms->link = nla_get_u32(data[IFLA_IPTUN_LINK]);
@@ -2000,6 +2171,52 @@
 
 	if (data[IFLA_IPTUN_FWMARK])
 		parms->fwmark = nla_get_u32(data[IFLA_IPTUN_FWMARK]);
+
+	if (data[IFLA_IPTUN_FMRS]) {
+		unsigned rem;
+		struct nlattr *fmr;
+
+		nla_for_each_nested(fmr, data[IFLA_IPTUN_FMRS], rem) {
+			struct nlattr *fmrd[IFLA_IPTUN_FMR_MAX + 1], *c;
+			struct __ip6_tnl_fmr *nfmr;
+			int err;
+
+			err = nla_parse_nested_deprecated(fmrd, IFLA_IPTUN_FMR_MAX,
+					       fmr, ip6_tnl_fmr_policy, NULL);
+			if (err)
+				return err;
+
+			if (!(nfmr = kzalloc(sizeof(*nfmr), GFP_KERNEL)))
+				return -ENOMEM;
+
+			nfmr->offset = 6;
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX]))
+				nla_memcpy(&nfmr->ip6_prefix, fmrd[IFLA_IPTUN_FMR_IP6_PREFIX],
+					sizeof(nfmr->ip6_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX]))
+				nla_memcpy(&nfmr->ip4_prefix, fmrd[IFLA_IPTUN_FMR_IP4_PREFIX],
+					sizeof(nfmr->ip4_prefix));
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP6_PREFIX_LEN]))
+				nfmr->ip6_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_IP4_PREFIX_LEN]))
+				nfmr->ip4_prefix_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_EA_LEN]))
+				nfmr->ea_len = nla_get_u8(c);
+
+			if ((c = fmrd[IFLA_IPTUN_FMR_OFFSET]))
+				nfmr->offset = nla_get_u8(c);
+
+			nfmr->next = parms->fmrs;
+			parms->fmrs = nfmr;
+		}
+	}
+
+	return 0;
 }
 
 static int ip6_tnl_newlink(struct net *src_net, struct net_device *dev,
@@ -2020,7 +2237,9 @@
 			return err;
 	}
 
-	ip6_tnl_netlink_parms(data, &nt->parms);
+	err = ip6_tnl_netlink_parms(data, &nt->parms);
+	if (err)
+		return err;
 
 	if (nt->parms.collect_md) {
 		if (rtnl_dereference(ip6n->collect_md_tun))
@@ -2047,6 +2266,7 @@
 	struct net *net = t->net;
 	struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
 	struct ip_tunnel_encap ipencap;
+	int err;
 
 	if (dev == ip6n->fb_tnl_dev)
 		return -EINVAL;
@@ -2057,7 +2277,10 @@
 		if (err < 0)
 			return err;
 	}
-	ip6_tnl_netlink_parms(data, &p);
+	err = ip6_tnl_netlink_parms(data, &p);
+	if (err)
+		return err;
+
 	if (p.collect_md)
 		return -EINVAL;
 
@@ -2083,6 +2306,12 @@
 
 static size_t ip6_tnl_get_size(const struct net_device *dev)
 {
+	const struct ip6_tnl *t = netdev_priv(dev);
+	struct __ip6_tnl_fmr *c;
+	int fmrs = 0;
+	for (c = t->parms.fmrs; c; c = c->next)
+		++fmrs;
+
 	return
 		/* IFLA_IPTUN_LINK */
 		nla_total_size(4) +
@@ -2112,6 +2341,24 @@
 		nla_total_size(0) +
 		/* IFLA_IPTUN_FWMARK */
 		nla_total_size(4) +
+		/* IFLA_IPTUN_FMRS */
+		nla_total_size(0) +
+		(
+			/* nest */
+			nla_total_size(0) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX */
+			nla_total_size(sizeof(struct in6_addr)) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX */
+			nla_total_size(sizeof(struct in_addr)) +
+			/* IFLA_IPTUN_FMR_EA_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP6_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_IP4_PREFIX_LEN */
+			nla_total_size(1) +
+			/* IFLA_IPTUN_FMR_OFFSET */
+			nla_total_size(1)
+		) * fmrs +
 		0;
 }
 
@@ -2119,6 +2366,9 @@
 {
 	struct ip6_tnl *tunnel = netdev_priv(dev);
 	struct __ip6_tnl_parm *parm = &tunnel->parms;
+	struct __ip6_tnl_fmr *c;
+	int fmrcnt = 0;
+	struct nlattr *fmrs;
 
 	if (nla_put_u32(skb, IFLA_IPTUN_LINK, parm->link) ||
 	    nla_put_in6_addr(skb, IFLA_IPTUN_LOCAL, &parm->laddr) ||
@@ -2128,9 +2378,27 @@
 	    nla_put_be32(skb, IFLA_IPTUN_FLOWINFO, parm->flowinfo) ||
 	    nla_put_u32(skb, IFLA_IPTUN_FLAGS, parm->flags) ||
 	    nla_put_u8(skb, IFLA_IPTUN_PROTO, parm->proto) ||
-	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark))
+	    nla_put_u32(skb, IFLA_IPTUN_FWMARK, parm->fwmark) ||
+	    !(fmrs = nla_nest_start_noflag(skb, IFLA_IPTUN_FMRS)))
 		goto nla_put_failure;
 
+	for (c = parm->fmrs; c; c = c->next) {
+		struct nlattr *fmr = nla_nest_start_noflag(skb, ++fmrcnt);
+		if (!fmr ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP6_PREFIX,
+				sizeof(c->ip6_prefix), &c->ip6_prefix) ||
+			nla_put(skb, IFLA_IPTUN_FMR_IP4_PREFIX,
+				sizeof(c->ip4_prefix), &c->ip4_prefix) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP6_PREFIX_LEN, c->ip6_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_IP4_PREFIX_LEN, c->ip4_prefix_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_EA_LEN, c->ea_len) ||
+			nla_put_u8(skb, IFLA_IPTUN_FMR_OFFSET, c->offset))
+				goto nla_put_failure;
+
+		nla_nest_end(skb, fmr);
+	}
+	nla_nest_end(skb, fmrs);
+
 	if (nla_put_u16(skb, IFLA_IPTUN_ENCAP_TYPE, tunnel->encap.type) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_SPORT, tunnel->encap.sport) ||
 	    nla_put_be16(skb, IFLA_IPTUN_ENCAP_DPORT, tunnel->encap.dport) ||
@@ -2170,6 +2438,7 @@
 	[IFLA_IPTUN_ENCAP_DPORT]	= { .type = NLA_U16 },
 	[IFLA_IPTUN_COLLECT_METADATA]	= { .type = NLA_FLAG },
 	[IFLA_IPTUN_FWMARK]		= { .type = NLA_U32 },
+	[IFLA_IPTUN_FMRS]		= { .type = NLA_NESTED },
 };
 
 static struct rtnl_link_ops ip6_link_ops __read_mostly = {
diff -ruw linux-6.13.12/net/ipv6/netfilter/Kconfig linux-6.13.12-fbx/net/ipv6/netfilter/Kconfig
--- linux-6.13.12/net/ipv6/netfilter/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/netfilter/Kconfig	2025-09-25 17:40:37.799378294 +0200
@@ -17,6 +17,13 @@
 	  This is not needed if you are using iptables over nftables
 	  (iptables-nft).
 
+config IPV6_FFN
+	bool "IPv6: Fast forwarding and NAT"
+
+config IPV6_FFN_PROCFS
+	bool "IPv6: Fast forwarding and NAT /proc/net entries"
+	depends on IPV6_FFN
+
 config NF_SOCKET_IPV6
 	tristate "IPv6 socket lookup support"
 	help
diff -ruw linux-6.13.12/net/ipv6/netfilter/ip6_tables.c linux-6.13.12-fbx/net/ipv6/netfilter/ip6_tables.c
--- linux-6.13.12/net/ipv6/netfilter/ip6_tables.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/netfilter/ip6_tables.c	2025-09-25 17:40:37.799378294 +0200
@@ -1159,6 +1159,10 @@
 			   tmp.num_counters, tmp.counters);
 	if (ret)
 		goto free_newinfo_untrans;
+
+#ifdef CONFIG_IPV6_FFN
+	ipv6_ffn_flush_all();
+#endif
 	return 0;
 
  free_newinfo_untrans:
diff -ruw linux-6.13.12/net/ipv6/udp.c linux-6.13.12-fbx/net/ipv6/udp.c
--- linux-6.13.12/net/ipv6/udp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/ipv6/udp.c	2025-09-25 17:40:37.803378314 +0200
@@ -973,6 +973,82 @@
  * Note: called only from the BH handler context,
  * so we don't need to lock the hashes.
  */
+static int __udp6_lib_uc_conflict_deliver(struct net *net, struct sk_buff *skb,
+		const struct in6_addr *saddr, const struct in6_addr *daddr,
+		struct udp_table *udptable, int proto)
+{
+	struct sock *sk, *first = NULL;
+	const struct udphdr *uh = udp_hdr(skb);
+	unsigned short hnum = ntohs(uh->dest);
+	struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+	unsigned int offset = offsetof(typeof(*sk), sk_node);
+	int dif = inet6_iif(skb);
+	int sdif = inet6_sdif(skb);
+	struct hlist_node *node;
+	struct sk_buff *nskb;
+	bool found_non_dup;
+
+	found_non_dup = false;
+	sk_for_each_entry_offset_rcu(sk, node, &hslot->head, offset) {
+		bool need_deliver;
+
+		if (!__udp_v6_is_mcast_sock(net, sk, uh->dest, daddr,
+					    uh->source, saddr, dif, sdif, hnum))
+
+			continue;
+
+		/* If zero checksum and no_check is not on for
+		 * the socket then skip it.
+		 */
+		if (!uh->check && !udp_get_no_check6_rx(sk))
+			continue;
+
+		if (sock_flag(sk, SOCK_UDP_DUP_UNICAST))
+			need_deliver = true;
+		else {
+			if (!found_non_dup)
+				need_deliver = true;
+			else
+				need_deliver = false;
+			found_non_dup = true;
+		}
+
+		if (!need_deliver)
+			continue;
+
+		if (!first) {
+			first = sk;
+			continue;
+		}
+		nskb = skb_clone(skb, GFP_ATOMIC);
+		if (unlikely(!nskb)) {
+			atomic_inc(&sk->sk_drops);
+			__UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
+					 IS_UDPLITE(sk));
+			__UDP6_INC_STATS(net, UDP_MIB_INERRORS,
+					 IS_UDPLITE(sk));
+			continue;
+		}
+
+		if (udpv6_queue_rcv_skb(sk, nskb) > 0)
+			consume_skb(nskb);
+	}
+
+	if (first) {
+		if (udpv6_queue_rcv_skb(first, skb) > 0)
+			consume_skb(skb);
+	} else {
+		kfree_skb(skb);
+		__UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
+				 proto == IPPROTO_UDPLITE);
+	}
+	return 0;
+}
+
+/*
+ * Note: called only from the BH handler context,
+ * so we don't need to lock the hashes.
+ */
 static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 		const struct in6_addr *saddr, const struct in6_addr *daddr,
 		struct udp_table *udptable, int proto)
@@ -1148,6 +1224,12 @@
 	if (sk) {
 		if (!uh->check && !udp_get_no_check6_rx(sk))
 			goto report_csum_error;
+
+		if (sk->sk_reuse_conflict)
+			return __udp6_lib_uc_conflict_deliver(net, skb,
+						      saddr, daddr,
+						      udptable, proto);
+
 		return udp6_unicast_rcv_skb(sk, skb, uh);
 	}
 no_sk:
diff -ruw linux-6.13.12/net/mac80211/Kconfig linux-6.13.12-fbx/net/mac80211/Kconfig
--- linux-6.13.12/net/mac80211/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/Kconfig	2025-09-25 17:40:37.811378354 +0200
@@ -314,3 +314,9 @@
 	  connect more stations than the number selected here.)
 
 	  If unsure, leave the default of 0.
+
+config FBX80211_SCUM
+	bool "Same channel unassociated metrics"
+	depends on FBX80211
+	help
+	  Support for unassociated STA metrics
diff -ruw linux-6.13.12/net/mac80211/Makefile linux-6.13.12-fbx/net/mac80211/Makefile
--- linux-6.13.12/net/mac80211/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/Makefile	2025-09-25 17:40:37.811378354 +0200
@@ -54,6 +54,7 @@
 	mesh_ps.o
 
 mac80211-$(CONFIG_PM) += pm.o
+mac80211-$(CONFIG_FBX80211_SCUM) += fbx_scum.o fbx_scum_monif.o
 
 CFLAGS_trace.o := -I$(src)
 
diff -ruw linux-6.13.12/net/mac80211/agg-tx.c linux-6.13.12-fbx/net/mac80211/agg-tx.c
--- linux-6.13.12/net/mac80211/agg-tx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/agg-tx.c	2025-09-25 17:40:37.811378354 +0200
@@ -130,6 +130,14 @@
 }
 EXPORT_SYMBOL(ieee80211_send_bar);
 
+void ieee80211_send_bar_sta(struct ieee80211_sta *pubsta,
+			    u16 tid, u16 ssn)
+{
+	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
+	ieee80211_send_bar(&sta->sdata->vif, pubsta->addr, tid, ssn);
+}
+EXPORT_SYMBOL(ieee80211_send_bar_sta);
+
 void ieee80211_assign_tid_tx(struct sta_info *sta, int tid,
 			     struct tid_ampdu_tx *tid_tx)
 {
diff -ruw linux-6.13.12/net/mac80211/cfg.c linux-6.13.12-fbx/net/mac80211/cfg.c
--- linux-6.13.12/net/mac80211/cfg.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/cfg.c	2025-09-25 17:40:37.811378354 +0200
@@ -147,8 +147,8 @@
 					   struct ieee80211_bss_conf *link_conf)
 {
 	struct ieee80211_sub_if_data *tx_sdata;
+	struct ieee80211_bss_conf *old;
 
-	sdata->vif.mbssid_tx_vif = NULL;
 	link_conf->bssid_index = 0;
 	link_conf->nontransmitted = false;
 	link_conf->ema_ap = false;
@@ -157,14 +157,27 @@
 	if (sdata->vif.type != NL80211_IFTYPE_AP || !params->tx_wdev)
 		return -EINVAL;
 
+	old = sdata_dereference(link_conf->tx_bss_conf, sdata);
+	if (old)
+		return -EALREADY;
+
 	tx_sdata = IEEE80211_WDEV_TO_SUB_IF(params->tx_wdev);
 	if (!tx_sdata)
 		return -EINVAL;
 
 	if (tx_sdata == sdata) {
-		sdata->vif.mbssid_tx_vif = &sdata->vif;
+		rcu_assign_pointer(link_conf->tx_bss_conf, link_conf);
 	} else {
-		sdata->vif.mbssid_tx_vif = &tx_sdata->vif;
+		struct ieee80211_bss_conf *tx_bss_conf;
+
+		tx_bss_conf =
+			sdata_dereference(tx_sdata->vif.link_conf[params->tx_link_id],
+					  sdata);
+		if (rcu_access_pointer(tx_bss_conf->tx_bss_conf) != tx_bss_conf)
+			return -EINVAL;
+
+		rcu_assign_pointer(link_conf->tx_bss_conf, tx_bss_conf);
+
 		link_conf->nontransmitted = true;
 		link_conf->bssid_index = params->index;
 	}
@@ -503,6 +516,9 @@
 	if (IS_ERR(link))
 		return PTR_ERR(link);
 
+	if (WARN_ON(pairwise && link_id >= 0))
+		return -EINVAL;
+
 	if (pairwise && params->mode == NL80211_KEY_SET_TX)
 		return ieee80211_set_tx(sdata, mac_addr, key_idx);
 
@@ -525,10 +541,12 @@
 	if (IS_ERR(key))
 		return PTR_ERR(key);
 
-	key->conf.link_id = link_id;
-
-	if (pairwise)
+	if (pairwise) {
 		key->conf.flags |= IEEE80211_KEY_FLAG_PAIRWISE;
+		key->conf.link_id = -1;
+	} else {
+		key->conf.link_id = link->link_id;
+	}
 
 	if (params->mode == NL80211_KEY_NO_TX)
 		key->conf.flags |= IEEE80211_KEY_FLAG_NO_AUTO_TX;
@@ -1405,6 +1423,8 @@
 				(IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ |
 				 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ |
 				 IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ);
+		link_conf->enable_mcs15 = !(u8_get_bits(params->eht_oper->params,
+							IEEE80211_EHT_OPER_MCS15_DISABLE));
 	} else {
 		link_conf->eht_su_beamformer = false;
 		link_conf->eht_su_beamformee = false;
@@ -1665,13 +1685,12 @@
 	kfree(link_conf->ftmr_params);
 	link_conf->ftmr_params = NULL;
 
-	sdata->vif.mbssid_tx_vif = NULL;
 	link_conf->bssid_index = 0;
 	link_conf->nontransmitted = false;
 	link_conf->ema_ap = false;
 	link_conf->bssid_indicator = 0;
 
-	__sta_info_flush(sdata, true, link_id);
+	__sta_info_flush(sdata, true, link_id, NULL);
 
 	ieee80211_remove_link_keys(link, &keys);
 	if (!list_empty(&keys)) {
@@ -1679,6 +1698,9 @@
 		ieee80211_free_key_list(local, &keys);
 	}
 
+	ieee80211_stop_mbssid(sdata);
+	RCU_INIT_POINTER(link_conf->tx_bss_conf, NULL);
+
 	link_conf->enable_beacon = false;
 	sdata->beacon_rate_set = false;
 	sdata->vif.cfg.ssid_len = 0;
@@ -1857,7 +1879,8 @@
 		       params->vht_capa ||
 		       params->he_capa ||
 		       params->eht_capa ||
-		       params->opmode_notif_used;
+		       params->opmode_notif_used ||
+		       params->tp_overridden;
 
 	switch (mode) {
 	case STA_LINK_MODE_NEW:
@@ -1946,6 +1969,9 @@
 					      sband->band);
 	}
 
+	if (params->tp_overridden)
+		link_sta->pub->tp_override = params->tp_override;
+
 	return 0;
 }
 
@@ -2062,6 +2088,9 @@
 	if (params->listen_interval >= 0)
 		sta->listen_interval = params->listen_interval;
 
+	if (params->eml_cap_present)
+		sta->sta.eml_cap = params->eml_cap;
+
 	ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_STA_MODIFY,
 					&params->link_sta_params);
 	if (ret)
@@ -2698,8 +2727,10 @@
 
 	err = ieee80211_link_use_channel(&sdata->deflink, &chanreq,
 					 IEEE80211_CHANCTX_SHARED);
-	if (err)
+	if (err) {
+		kfree(ifmsh->ie);
 		return err;
+	}
 
 	return ieee80211_start_mesh(sdata);
 }
@@ -3004,7 +3035,7 @@
 	return 0;
 }
 
-static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u32 changed)
+static int ieee80211_set_wiphy_params(struct wiphy *wiphy, u8 radio_id, u32 changed)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
 	int err;
@@ -3033,7 +3064,13 @@
 	}
 
 	if (changed & WIPHY_PARAM_RTS_THRESHOLD) {
-		err = drv_set_rts_threshold(local, wiphy->rts_threshold);
+		u32 rts_threshold;
+
+		if (radio_id >= wiphy->n_radio)
+			rts_threshold = wiphy->rts_threshold;
+		else
+			rts_threshold = wiphy->radio_cfg[radio_id].rts_threshold;
+		err = drv_set_rts_threshold(local, radio_id, rts_threshold);
 
 		if (err)
 			return err;
@@ -3062,12 +3099,13 @@
 }
 
 static int ieee80211_set_tx_power(struct wiphy *wiphy,
-				  struct wireless_dev *wdev,
+				  struct wireless_dev *wdev, u8 radio_id,
 				  enum nl80211_tx_power_setting type, int mbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata;
 	enum nl80211_tx_power_setting txp_type = type;
+	struct ieee80211_chanctx_conf *conf;
 	bool update_txp_type = false;
 	bool has_monitor = false;
 	int user_power_level;
@@ -3143,6 +3181,12 @@
 			if (!link)
 				continue;
 
+			if (radio_id < wiphy->n_radio) {
+				conf = wiphy_dereference(wiphy, link->conf->chanctx_conf);
+				if (!conf || conf->radio_idx != radio_id)
+					continue;
+			}
+
 			link->user_power_level = local->user_power_level;
 			if (txp_type != link->conf->txpower_type)
 				update_txp_type = true;
@@ -3163,6 +3207,12 @@
 			if (!link)
 				continue;
 
+			if (radio_id < wiphy->n_radio) {
+				conf = wiphy_dereference(wiphy, link->conf->chanctx_conf);
+				if (!conf || conf->radio_idx != radio_id)
+					continue;
+			}
+
 			ieee80211_recalc_txpower(link, update_txp_type);
 		}
 	}
@@ -3190,19 +3240,27 @@
 
 static int ieee80211_get_tx_power(struct wiphy *wiphy,
 				  struct wireless_dev *wdev,
+				  unsigned int link_id,
 				  int *dbm)
 {
 	struct ieee80211_local *local = wiphy_priv(wiphy);
 	struct ieee80211_sub_if_data *sdata = IEEE80211_WDEV_TO_SUB_IF(wdev);
+	struct ieee80211_link_data *link_data;
 
 	if (local->ops->get_txpower &&
 	    (sdata->flags & IEEE80211_SDATA_IN_DRIVER))
-		return drv_get_txpower(local, sdata, dbm);
+		return drv_get_txpower(local, sdata, link_id, dbm);
 
-	if (local->emulate_chanctx)
+	if (local->emulate_chanctx) {
 		*dbm = local->hw.conf.power_level;
+	} else {
+		link_data = wiphy_dereference(wiphy, sdata->link[link_id]);
+
+		if (link_data)
+			*dbm = link_data->conf->txpower;
 	else
-		*dbm = sdata->vif.bss_conf.txpower;
+			return -ENOLINK;
+	}
 
 	/* INT_MIN indicates no power level was set yet */
 	if (*dbm == INT_MIN)
@@ -3517,6 +3575,61 @@
 	return 0;
 }
 
+static bool
+__ieee80211_is_scan_ongoing(struct wiphy *wiphy,
+			    struct ieee80211_local *local,
+			    struct cfg80211_chan_def *chandef)
+{
+	struct cfg80211_scan_request *scan_req;
+	int chan_radio_idx, req_radio_idx;
+	struct ieee80211_roc_work *roc;
+	bool ret = false;
+
+	if (!list_empty(&local->roc_list) || local->scanning)
+		ret = true;
+
+	if (wiphy->n_radio < 2)
+		return ret;
+
+	/* Multiple HWs are grouped under same wiphy. If not scanning then return
+	 * now itself
+	 */
+	if (!ret)
+		return ret;
+
+	req_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chandef->chan);
+	if (req_radio_idx < 0)
+		return true;
+
+	if (local->scanning) {
+		rcu_read_lock();
+		scan_req = rcu_dereference(local->scan_req);
+		/* scanning is going on but info is not there. Should not happen
+		 * but if it does, let's not take risk and assume we can't use
+		 * the hw hence return true
+		 */
+		if (WARN_ON(!scan_req)) {
+			rcu_read_unlock();
+			return true;
+		}
+
+		ret = ieee80211_is_radio_idx_in_scan_req(wiphy, scan_req, req_radio_idx);
+		rcu_read_unlock();
+		return ret;
+	}
+
+	if (!list_empty(&local->roc_list)) {
+		list_for_each_entry(roc, &local->roc_list, list) {
+			chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, roc->chan);
+			if (chan_radio_idx == req_radio_idx)
+				return true;
+		}
+		return false;
+	}
+
+	return false;
+}
+
 static int ieee80211_start_radar_detection(struct wiphy *wiphy,
 					   struct net_device *dev,
 					   struct cfg80211_chan_def *chandef,
@@ -3530,7 +3643,7 @@
 
 	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (!list_empty(&local->roc_list) || local->scanning)
+	if (__ieee80211_is_scan_ongoing(wiphy, local, chandef))
 		return -EBUSY;
 
 	link_data = sdata_dereference(sdata->link[link_id], sdata);
@@ -3688,6 +3801,7 @@
 {
 	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
 	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_bss_conf *tx_bss_conf;
 	struct ieee80211_link_data *link_data;
 
 	if (WARN_ON(link_id >= IEEE80211_MLD_MAX_NUM_LINKS))
@@ -3701,25 +3815,24 @@
 		return;
 	}
 
-	/* TODO: MBSSID with MLO changes */
-	if (vif->mbssid_tx_vif == vif) {
+	tx_bss_conf = rcu_dereference(link_data->conf->tx_bss_conf);
+	if (tx_bss_conf == link_data->conf) {
 		/* Trigger ieee80211_csa_finish() on the non-transmitting
 		 * interfaces when channel switch is received on
 		 * transmitting interface
 		 */
-		struct ieee80211_sub_if_data *iter;
+		struct ieee80211_link_data *iter;
 
-		list_for_each_entry_rcu(iter, &local->interfaces, list) {
-			if (!ieee80211_sdata_running(iter))
+		for_each_sdata_link(local, iter) {
+			if (iter->sdata == sdata ||
+			    rcu_access_pointer(iter->conf->tx_bss_conf) != tx_bss_conf)
 				continue;
 
-			if (iter == sdata || iter->vif.mbssid_tx_vif != vif)
-				continue;
-
-			wiphy_work_queue(iter->local->hw.wiphy,
-					 &iter->deflink.csa.finalize_work);
+			wiphy_work_queue(iter->sdata->local->hw.wiphy,
+					 &iter->csa.finalize_work);
 		}
 	}
+
 	wiphy_work_queue(local->hw.wiphy, &link_data->csa.finalize_work);
 
 	rcu_read_unlock();
@@ -4022,7 +4135,7 @@
 
 	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (!list_empty(&local->roc_list) || local->scanning)
+	if (__ieee80211_is_scan_ongoing(wiphy, local, &params->chandef))
 		return -EBUSY;
 
 	if (sdata->wdev.links[link_id].cac_started)
@@ -4254,10 +4367,12 @@
 	int size = sizeof(*nullfunc);
 	__le16 fc;
 	bool qos;
+	struct ieee80211_bss_conf *conf;
 	struct ieee80211_tx_info *info;
 	struct sta_info *sta;
 	struct ieee80211_chanctx_conf *chanctx_conf;
 	enum nl80211_band band;
+	u8 link_id;
 	int ret;
 
 	/* the lock is needed to assign the cookie later */
@@ -4272,11 +4387,25 @@
 
 	qos = sta->sta.wme;
 
-	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
+	/* In case of ML vif, we shall use the default sta link to
+	 * send the probe frame. For non ML vif the link id 0 is
+	 * the deflink
+	 */
+	link_id = sta->deflink.link_id;
+
+	conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+
+	if (unlikely(!conf)) {
+		ret = -ENOLINK;
+		goto unlock;
+	}
+
+	chanctx_conf = rcu_dereference(conf->chanctx_conf);
 	if (WARN_ON(!chanctx_conf)) {
 		ret = -EINVAL;
 		goto unlock;
 	}
+
 	band = chanctx_conf->def.chan->band;
 
 	if (qos) {
@@ -4304,8 +4433,8 @@
 	nullfunc->frame_control = fc;
 	nullfunc->duration_id = 0;
 	memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
-	memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
-	memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+	memcpy(nullfunc->addr2, conf->addr, ETH_ALEN);
+	memcpy(nullfunc->addr3, conf->addr, ETH_ALEN);
 	nullfunc->seq_ctrl = 0;
 
 	info = IEEE80211_SKB_CB(skb);
@@ -4314,6 +4443,8 @@
 		       IEEE80211_TX_INTFL_NL80211_FRAME_TX;
 	info->band = band;
 
+	info->control.flags |= u32_encode_bits(link_id, IEEE80211_TX_CTRL_MLO_LINK);
+
 	skb_set_queue_mapping(skb, IEEE80211_AC_VO);
 	skb->priority = 7;
 	if (qos)
@@ -4822,20 +4953,22 @@
 
 	ieee80211_link_info_change_notify(sdata, link, changed);
 
-	if (!sdata->vif.bss_conf.nontransmitted && sdata->vif.mbssid_tx_vif) {
-		struct ieee80211_sub_if_data *child;
+	if (!link->conf->nontransmitted &&
+	    rcu_access_pointer(link->conf->tx_bss_conf)) {
+		struct ieee80211_link_data *tmp;
+
+		for_each_sdata_link(sdata->local, tmp) {
+			if (tmp->sdata == sdata ||
+			    rcu_access_pointer(tmp->conf->tx_bss_conf) != link->conf)
+				continue;
 
-		list_for_each_entry(child, &sdata->local->interfaces, list) {
-			if (child != sdata && child->vif.mbssid_tx_vif == &sdata->vif) {
-				child->vif.bss_conf.he_bss_color.color = color;
-				child->vif.bss_conf.he_bss_color.enabled = enable;
-				ieee80211_link_info_change_notify(child,
-								  &child->deflink,
+			tmp->conf->he_bss_color.color = color;
+			tmp->conf->he_bss_color.enabled = enable;
+			ieee80211_link_info_change_notify(tmp->sdata, tmp,
 								  BSS_CHANGED_HE_BSS_COLOR);
 			}
 		}
 	}
-}
 
 static int ieee80211_color_change_finalize(struct ieee80211_link_data *link)
 {
@@ -5172,6 +5305,18 @@
 	return ieee80211_req_neg_ttlm(sdata, params);
 }
 
+static int
+ieee80211_assoc_ml_reconf(struct wiphy *wiphy, struct net_device *dev,
+			  struct cfg80211_assoc_link *add_links,
+			  u16 rem_links)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
+
+	return ieee80211_mgd_assoc_ml_reconf(sdata, add_links, rem_links);
+}
+
 const struct cfg80211_ops mac80211_config_ops = {
 	.add_virtual_intf = ieee80211_add_iface,
 	.del_virtual_intf = ieee80211_del_iface,
@@ -5286,4 +5431,5 @@
 	.set_hw_timestamp = ieee80211_set_hw_timestamp,
 	.set_ttlm = ieee80211_set_ttlm,
 	.get_radio_mask = ieee80211_get_radio_mask,
+	.assoc_ml_reconf = ieee80211_assoc_ml_reconf,
 };
diff -ruw linux-6.13.12/net/mac80211/chan.c linux-6.13.12-fbx/net/mac80211/chan.c
--- linux-6.13.12/net/mac80211/chan.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/chan.c	2025-09-25 17:40:37.811378354 +0200
@@ -247,6 +247,13 @@
 	if (!link_sta)
 		return NL80211_CHAN_WIDTH_20_NOHT;
 
+	/*
+	 * We assume that TX/RX might be asymmetric (so e.g. VHT operating
+	 * mode notification changes what a STA wants to receive, but not
+	 * necessarily what it will transmit to us), and therefore use the
+	 * capabilities here. Calling it RX bandwidth capability is a bit
+	 * wrong though, since capabilities are in fact symmetric.
+	 */
 	width = ieee80211_sta_cap_rx_bw(link_sta);
 
 	switch (width) {
@@ -637,15 +644,23 @@
 	return NULL;
 }
 
-bool ieee80211_is_radar_required(struct ieee80211_local *local)
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+				 struct cfg80211_scan_request *req)
 {
+	struct wiphy *wiphy = local->hw.wiphy;
 	struct ieee80211_link_data *link;
+	struct ieee80211_channel *chan;
+	int radio_idx;
 
 	lockdep_assert_wiphy(local->hw.wiphy);
 
 	for_each_sdata_link(local, link) {
-		if (link->radar_required)
-			return true;
+		if (link->radar_required) {
+			chan = link->conf->chanreq.oper.chan;
+			radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+			return ieee80211_is_radio_idx_in_scan_req(wiphy, req,
+								  radio_idx);
+		}
 	}
 
 	return false;
@@ -2124,6 +2139,9 @@
 {
 	struct ieee80211_sub_if_data *sdata = link->sdata;
 
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+		return;
+
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
 	if (rcu_access_pointer(link->conf->chanctx_conf))
diff -ruw linux-6.13.12/net/mac80211/debug.h linux-6.13.12-fbx/net/mac80211/debug.h
--- linux-6.13.12/net/mac80211/debug.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/debug.h	2025-09-25 17:40:37.811378354 +0200
@@ -1,7 +1,7 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
  * Portions
- * Copyright (C) 2022 - 2023 Intel Corporation
+ * Copyright (C) 2022 - 2024 Intel Corporation
  */
 #ifndef __MAC80211_DEBUG_H
 #define __MAC80211_DEBUG_H
@@ -152,6 +152,14 @@
 		else							\
 			_sdata_err((link)->sdata, fmt, ##__VA_ARGS__);	\
 	} while (0)
+#define link_id_info(sdata, link_id, fmt, ...)				\
+	do {								\
+		if (ieee80211_vif_is_mld(&sdata->vif))			\
+			_sdata_info(sdata, "[link %d] " fmt, link_id,	\
+				    ##__VA_ARGS__);			\
+		else							\
+			_sdata_info(sdata, fmt, ##__VA_ARGS__);		\
+	} while (0)
 #define _link_id_dbg(print, sdata, link_id, fmt, ...)			\
 	do {								\
 		if (ieee80211_vif_is_mld(&(sdata)->vif))		\
diff -ruw linux-6.13.12/net/mac80211/debugfs.c linux-6.13.12-fbx/net/mac80211/debugfs.c
--- linux-6.13.12/net/mac80211/debugfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/debugfs.c	2025-09-25 17:40:37.811378354 +0200
@@ -284,7 +284,8 @@
 	q_limit_low_old = local->aql_txq_limit_low[ac];
 	q_limit_high_old = local->aql_txq_limit_high[ac];
 
-	wiphy_lock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
+
 	local->aql_txq_limit_low[ac] = q_limit_low;
 	local->aql_txq_limit_high[ac] = q_limit_high;
 
@@ -296,7 +297,6 @@
 			sta->airtime[ac].aql_limit_high = q_limit_high;
 		}
 	}
-	wiphy_unlock(local->hw.wiphy);
 
 	return count;
 }
@@ -492,6 +492,8 @@
 	FLAG(DISALLOW_PUNCTURING),
 	FLAG(DISALLOW_PUNCTURING_5GHZ),
 	FLAG(HANDLES_QUIET_CSA),
+	FLAG(APVLAN_NEED_MCAST_TO_UCAST),
+	FLAG(ALLOW_DRV_TX_FOR_DATA),
 	FLAG(STRICT),
 #undef FLAG
 };
diff -ruw linux-6.13.12/net/mac80211/debugfs_key.c linux-6.13.12-fbx/net/mac80211/debugfs_key.c
--- linux-6.13.12/net/mac80211/debugfs_key.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/debugfs_key.c	2025-07-01 14:10:45.700125948 +0200
@@ -402,25 +402,6 @@
 	}
 }
 
-void ieee80211_debugfs_key_add_mgmt_default(struct ieee80211_sub_if_data *sdata)
-{
-	char buf[50];
-	struct ieee80211_key *key;
-
-	if (!sdata->vif.debugfs_dir)
-		return;
-
-	key = wiphy_dereference(sdata->local->hw.wiphy,
-				sdata->deflink.default_mgmt_key);
-	if (key) {
-		sprintf(buf, "../keys/%d", key->debugfs.cnt);
-		sdata->debugfs.default_mgmt_key =
-			debugfs_create_symlink("default_mgmt_key",
-					       sdata->vif.debugfs_dir, buf);
-	} else
-		ieee80211_debugfs_key_remove_mgmt_default(sdata);
-}
-
 void ieee80211_debugfs_key_remove_mgmt_default(struct ieee80211_sub_if_data *sdata)
 {
 	if (!sdata)
@@ -431,27 +412,6 @@
 }
 
 void
-ieee80211_debugfs_key_add_beacon_default(struct ieee80211_sub_if_data *sdata)
-{
-	char buf[50];
-	struct ieee80211_key *key;
-
-	if (!sdata->vif.debugfs_dir)
-		return;
-
-	key = wiphy_dereference(sdata->local->hw.wiphy,
-				sdata->deflink.default_beacon_key);
-	if (key) {
-		sprintf(buf, "../keys/%d", key->debugfs.cnt);
-		sdata->debugfs.default_beacon_key =
-			debugfs_create_symlink("default_beacon_key",
-					       sdata->vif.debugfs_dir, buf);
-	} else {
-		ieee80211_debugfs_key_remove_beacon_default(sdata);
-	}
-}
-
-void
 ieee80211_debugfs_key_remove_beacon_default(struct ieee80211_sub_if_data *sdata)
 {
 	if (!sdata)
@@ -460,10 +420,3 @@
 	debugfs_remove(sdata->debugfs.default_beacon_key);
 	sdata->debugfs.default_beacon_key = NULL;
 }
-
-void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
-				   struct sta_info *sta)
-{
-	debugfs_remove(key->debugfs.stalink);
-	key->debugfs.stalink = NULL;
-}
diff -ruw linux-6.13.12/net/mac80211/debugfs_key.h linux-6.13.12-fbx/net/mac80211/debugfs_key.h
--- linux-6.13.12/net/mac80211/debugfs_key.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/debugfs_key.h	2025-07-01 14:10:45.700125948 +0200
@@ -6,16 +6,10 @@
 void ieee80211_debugfs_key_add(struct ieee80211_key *key);
 void ieee80211_debugfs_key_remove(struct ieee80211_key *key);
 void ieee80211_debugfs_key_update_default(struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_add_mgmt_default(
-	struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_key_remove_mgmt_default(
 	struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_add_beacon_default(
-	struct ieee80211_sub_if_data *sdata);
 void ieee80211_debugfs_key_remove_beacon_default(
 	struct ieee80211_sub_if_data *sdata);
-void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
-				   struct sta_info *sta);
 #else
 static inline void ieee80211_debugfs_key_add(struct ieee80211_key *key)
 {}
@@ -24,21 +18,12 @@
 static inline void ieee80211_debugfs_key_update_default(
 	struct ieee80211_sub_if_data *sdata)
 {}
-static inline void ieee80211_debugfs_key_add_mgmt_default(
-	struct ieee80211_sub_if_data *sdata)
-{}
 static inline void ieee80211_debugfs_key_remove_mgmt_default(
 	struct ieee80211_sub_if_data *sdata)
 {}
-static inline void ieee80211_debugfs_key_add_beacon_default(
-	struct ieee80211_sub_if_data *sdata)
-{}
 static inline void ieee80211_debugfs_key_remove_beacon_default(
 	struct ieee80211_sub_if_data *sdata)
 {}
-static inline void ieee80211_debugfs_key_sta_del(struct ieee80211_key *key,
-						 struct sta_info *sta)
-{}
 #endif
 
 #endif /* __MAC80211_DEBUGFS_KEY_H */
diff -ruw linux-6.13.12/net/mac80211/debugfs_netdev.c linux-6.13.12-fbx/net/mac80211/debugfs_netdev.c
--- linux-6.13.12/net/mac80211/debugfs_netdev.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/debugfs_netdev.c	2025-09-25 17:40:37.815378373 +0200
@@ -1025,16 +1025,7 @@
 
 void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
 {
-	struct dentry *dir;
-	char buf[10 + IFNAMSIZ];
-
-	dir = sdata->vif.debugfs_dir;
-
-	if (IS_ERR_OR_NULL(dir))
-		return;
-
-	sprintf(buf, "netdev:%s", sdata->name);
-	debugfs_rename(dir->d_parent, dir, dir->d_parent, buf);
+	debugfs_change_name(sdata->vif.debugfs_dir, "netdev:%s", sdata->name);
 }
 
 void ieee80211_debugfs_recreate_netdev(struct ieee80211_sub_if_data *sdata,
diff -ruw linux-6.13.12/net/mac80211/driver-ops.c linux-6.13.12-fbx/net/mac80211/driver-ops.c
--- linux-6.13.12/net/mac80211/driver-ops.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/driver-ops.c	2025-09-25 17:40:37.815378373 +0200
@@ -54,6 +54,30 @@
 	local->started = false;
 }
 
+int drv_get_powered(struct ieee80211_local *local, bool *up, bool *busy)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (local->ops->get_powered)
+		ret = local->ops->get_powered(&local->hw, up, busy);
+
+	return ret;
+}
+
+int drv_set_powered(struct ieee80211_local *local)
+{
+	int ret = -EOPNOTSUPP;
+
+	might_sleep();
+
+	if (local->ops->set_powered)
+		ret = local->ops->set_powered(&local->hw);
+
+	return ret;
+}
+
 int drv_add_interface(struct ieee80211_local *local,
 		      struct ieee80211_sub_if_data *sdata)
 {
@@ -477,7 +501,8 @@
 			 sdata->vif.type == NL80211_IFTYPE_NAN ||
 			 (sdata->vif.type == NL80211_IFTYPE_MONITOR &&
 			  !sdata->vif.bss_conf.mu_mimo_owner &&
-			  !(changed & BSS_CHANGED_TXPOWER))))
+			  !(changed & (BSS_CHANGED_TXPOWER |
+				       BSS_CHANGED_QOS)))))
 		return;
 
 	if (!check_sdata_in_driver(sdata))
diff -ruw linux-6.13.12/net/mac80211/driver-ops.h linux-6.13.12-fbx/net/mac80211/driver-ops.h
--- linux-6.13.12/net/mac80211/driver-ops.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/driver-ops.h	2025-09-25 17:40:37.815378373 +0200
@@ -89,6 +89,8 @@
 
 int drv_start(struct ieee80211_local *local);
 void drv_stop(struct ieee80211_local *local, bool suspend);
+int drv_get_powered(struct ieee80211_local *local, bool *up, bool *busy);
+int drv_set_powered(struct ieee80211_local *local);
 
 #ifdef CONFIG_PM
 static inline int drv_suspend(struct ieee80211_local *local,
@@ -402,16 +404,16 @@
 }
 
 static inline int drv_set_rts_threshold(struct ieee80211_local *local,
-					u32 value)
+					u8 radio_id, u32 value)
 {
 	int ret = 0;
 
 	might_sleep();
 	lockdep_assert_wiphy(local->hw.wiphy);
 
-	trace_drv_set_rts_threshold(local, value);
+	trace_drv_set_rts_threshold(local, radio_id, value);
 	if (local->ops->set_rts_threshold)
-		ret = local->ops->set_rts_threshold(&local->hw, value);
+		ret = local->ops->set_rts_threshold(&local->hw, radio_id, value);
 	trace_drv_return_int(local, ret);
 	return ret;
 }
@@ -1276,7 +1278,8 @@
 }
 
 static inline int drv_get_txpower(struct ieee80211_local *local,
-				  struct ieee80211_sub_if_data *sdata, int *dbm)
+				  struct ieee80211_sub_if_data *sdata,
+				  unsigned int link_id, int *dbm)
 {
 	int ret;
 
@@ -1286,8 +1289,8 @@
 	if (!local->ops->get_txpower)
 		return -EOPNOTSUPP;
 
-	ret = local->ops->get_txpower(&local->hw, &sdata->vif, dbm);
-	trace_drv_get_txpower(local, sdata, *dbm, ret);
+	ret = local->ops->get_txpower(&local->hw, &sdata->vif, link_id, dbm);
+	trace_drv_get_txpower(local, sdata, link_id, *dbm, ret);
 
 	return ret;
 }
diff -ruw linux-6.13.12/net/mac80211/eht.c linux-6.13.12-fbx/net/mac80211/eht.c
--- linux-6.13.12/net/mac80211/eht.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/eht.c	2025-09-25 17:40:37.815378373 +0200
@@ -33,6 +33,33 @@
 						  sdata->vif.type ==
 							NL80211_IFTYPE_STATION);
 
+	/*
+	 * mediatek MT7927 windows driver 5.4.0.3044 sets the
+	 * IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G bit in
+	 * 6Ghz band, which is supposed to be reserved
+	 *
+	 * This causes the kernel to miscalculate mcs_nss_size to 3
+	 * bytes, resulting in incorrect rx/tx nss map, so the sta is
+	 * believed to have have 0 NSS for 80/160/320.
+	 *
+	 * Since the driver does not set the
+	 * IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT bit, we can do
+	 * a hack that should not affect other implementations, and
+	 * assume than any leftover bytes in eht capabilities must be
+	 * that we miscalculated the mcs_nss size.
+	 */
+	if (!(eht_cap_ie_elem->fixed.phy_cap_info[5] &
+	      IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT) &&
+	    mcs_nss_size == 3 &&
+	    eht_cap_len > eht_total_size + mcs_nss_size) {
+		/* check if the leftover looks like a possible mcs_nss
+		 * mapping (should be multiple of 3 bytes) */
+		u8 var_size = eht_cap_len - sizeof(eht_cap->eht_cap_elem);
+		if (var_size == 3 || var_size == 6 || var_size == 9) {
+			mcs_nss_size = var_size;
+		}
+	}
+
 	eht_total_size += mcs_nss_size;
 
 	/* Calculate the PPE thresholds length only if the header is present */
diff -ruw linux-6.13.12/net/mac80211/ethtool.c linux-6.13.12-fbx/net/mac80211/ethtool.c
--- linux-6.13.12/net/mac80211/ethtool.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/ethtool.c	2025-09-25 17:40:37.815378373 +0200
@@ -19,16 +19,13 @@
 				   struct netlink_ext_ack *extack)
 {
 	struct ieee80211_local *local = wiphy_priv(dev->ieee80211_ptr->wiphy);
-	int ret;
 
 	if (rp->rx_mini_pending != 0 || rp->rx_jumbo_pending != 0)
 		return -EINVAL;
 
-	wiphy_lock(local->hw.wiphy);
-	ret = drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
-	wiphy_unlock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
 
-	return ret;
+	return drv_set_ringparam(local, rp->tx_pending, rp->rx_pending);
 }
 
 static void ieee80211_get_ringparam(struct net_device *dev,
@@ -40,10 +37,10 @@
 
 	memset(rp, 0, sizeof(*rp));
 
-	wiphy_lock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
+
 	drv_get_ringparam(local, &rp->tx_pending, &rp->tx_max_pending,
 			  &rp->rx_pending, &rp->rx_max_pending);
-	wiphy_unlock(local->hw.wiphy);
 }
 
 static const char ieee80211_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
@@ -57,6 +54,22 @@
 };
 #define STA_STATS_LEN	ARRAY_SIZE(ieee80211_gstrings_sta_stats)
 
+struct ethtool_priv_flags_strings {
+	const char string[ETH_GSTRING_LEN];
+};
+
+enum {
+	POWERED_SUPPORTED	= (1 << 0),
+	POWERED_STATUS		= (1 << 1),
+	POWERED_CHANGE_BUSY	= (1 << 2),
+};
+
+static const struct ethtool_priv_flags_strings ieee80211_pflags_strings[] = {
+	{ .string = "powered-supported" },
+	{ .string = "powered-status" },
+	{ .string = "powered-change-busy", },
+};
+
 static int ieee80211_get_sset_count(struct net_device *dev, int sset)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
@@ -67,6 +80,9 @@
 
 	rv += drv_get_et_sset_count(sdata, sset);
 
+	if (sset == ETH_SS_PRIV_FLAGS)
+		rv += ARRAY_SIZE(ieee80211_pflags_strings);
+
 	if (rv == 0)
 		return -EOPNOTSUPP;
 	return rv;
@@ -109,7 +125,7 @@
 	 * network device.
 	 */
 
-	wiphy_lock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
 
 	if (sdata->vif.type == NL80211_IFTYPE_STATION) {
 		sta = sta_info_get_bss(sdata, sdata->deflink.u.mgd.bssid);
@@ -160,6 +176,10 @@
 	chanctx_conf = rcu_dereference(sdata->vif.bss_conf.chanctx_conf);
 	if (chanctx_conf)
 		channel = chanctx_conf->def.chan;
+	else if (local->open_count > 0 &&
+		 local->open_count == local->monitors &&
+		 sdata->vif.type == NL80211_IFTYPE_MONITOR)
+		channel = local->monitor_chanreq.oper.chan;
 	else
 		channel = NULL;
 	rcu_read_unlock();
@@ -205,13 +225,10 @@
 	else
 		data[i++] = -1LL;
 
-	if (WARN_ON(i != STA_STATS_LEN)) {
-		wiphy_unlock(local->hw.wiphy);
+	if (WARN_ON(i != STA_STATS_LEN))
 		return;
-	}
 
 	drv_get_et_stats(sdata, stats, &(data[STA_STATS_LEN]));
-	wiphy_unlock(local->hw.wiphy);
 }
 
 static void ieee80211_get_strings(struct net_device *dev, u32 sset, u8 *data)
@@ -224,6 +241,9 @@
 		memcpy(data, ieee80211_gstrings_sta_stats, sz_sta_stats);
 	}
 	drv_get_et_strings(sdata, sset, &(data[sz_sta_stats]));
+	if (sset == ETH_SS_PRIV_FLAGS)
+		memcpy(data, ieee80211_pflags_strings,
+		       sizeof (ieee80211_pflags_strings));
 }
 
 static int ieee80211_get_regs_len(struct net_device *dev)
@@ -241,6 +261,35 @@
 	regs->len = 0;
 }
 
+static u32 ieee80211_get_priv_flags(struct net_device *dev)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	bool powered, powered_busy;
+	u32 ret;
+
+	ret = 0;
+	if (!drv_get_powered(local, &powered, &powered_busy)) {
+		ret |= POWERED_SUPPORTED;
+		if (powered)
+			ret |= POWERED_STATUS;
+		if (powered_busy)
+			ret |= POWERED_CHANGE_BUSY;
+	}
+	return ret;
+}
+
+static int ieee80211_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+
+	if (flags & (POWERED_STATUS))
+		return drv_set_powered(local);
+
+	return 0;
+}
+
 const struct ethtool_ops ieee80211_ethtool_ops = {
 	.get_drvinfo = cfg80211_get_drvinfo,
 	.get_regs_len = ieee80211_get_regs_len,
@@ -251,4 +300,6 @@
 	.get_strings = ieee80211_get_strings,
 	.get_ethtool_stats = ieee80211_get_stats,
 	.get_sset_count = ieee80211_get_sset_count,
+	.set_priv_flags	= ieee80211_set_priv_flags,
+	.get_priv_flags	= ieee80211_get_priv_flags,
 };
diff -ruw linux-6.13.12/net/mac80211/he.c linux-6.13.12-fbx/net/mac80211/he.c
--- linux-6.13.12/net/mac80211/he.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/he.c	2025-09-25 17:40:37.815378373 +0200
@@ -3,10 +3,11 @@
  * HE handling
  *
  * Copyright(c) 2017 Intel Deutschland GmbH
- * Copyright(c) 2019 - 2023 Intel Corporation
+ * Copyright(c) 2019 - 2024 Intel Corporation
  */
 
 #include "ieee80211_i.h"
+#include "rate.h"
 
 static void
 ieee80211_update_from_he_6ghz_capa(const struct ieee80211_he_6ghz_capa *he_6ghz_capa,
@@ -158,6 +159,14 @@
 		       &he_cap_ie[sizeof(he_cap->he_cap_elem) + mcs_nss_size],
 		       he_ppe_size);
 
+	/* Check if STA supports at least single spatial stream */
+	if (he_cap->he_mcs_nss_supp.rx_mcs_80 == cpu_to_le16(0xFFFF)) {
+		he_cap->has_he = false;
+		sdata_info(sdata, "Ignoring HE IE from %pM due to invalid rx_mcs_80\n",
+			   link_sta->pub->addr);
+		return;
+	}
+
 	he_cap->has_he = true;
 
 	link_sta->cur_max_bandwidth = ieee80211_sta_cap_rx_bw(link_sta);
@@ -248,3 +257,119 @@
 		he_obss_pd->enable = true;
 	}
 }
+
+static void ieee80211_link_sta_rc_update_omi(struct ieee80211_link_data *link,
+					     struct link_sta_info *link_sta)
+{
+	struct ieee80211_sub_if_data *sdata = link->sdata;
+	struct ieee80211_supported_band *sband;
+	enum ieee80211_sta_rx_bandwidth new_bw;
+	enum nl80211_band band;
+
+	band = link->conf->chanreq.oper.chan->band;
+	sband = sdata->local->hw.wiphy->bands[band];
+
+	new_bw = ieee80211_sta_cur_vht_bw(link_sta);
+	if (link_sta->pub->bandwidth == new_bw)
+		return;
+
+	link_sta->pub->bandwidth = new_bw;
+	rate_control_rate_update(sdata->local, sband, link_sta,
+				 IEEE80211_RC_BW_CHANGED);
+}
+
+bool ieee80211_prepare_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta,
+				 enum ieee80211_sta_rx_bandwidth bw)
+{
+	struct sta_info *sta = container_of(pub_link_sta->sta,
+					    struct sta_info, sta);
+	struct ieee80211_local *local = sta->sdata->local;
+	struct link_sta_info *link_sta =
+		sdata_dereference(sta->link[pub_link_sta->link_id], sta->sdata);
+	struct ieee80211_link_data *link =
+		sdata_dereference(sta->sdata->link[pub_link_sta->link_id],
+				  sta->sdata);
+	struct ieee80211_chanctx_conf *conf;
+	struct ieee80211_chanctx *chanctx;
+	bool ret;
+
+	if (WARN_ON(!link || !link_sta || link_sta->pub != pub_link_sta))
+		return false;
+
+	conf = sdata_dereference(link->conf->chanctx_conf, sta->sdata);
+	if (WARN_ON(!conf))
+		return false;
+
+	trace_api_prepare_rx_omi_bw(local, sta->sdata, link_sta, bw);
+
+	chanctx = container_of(conf, typeof(*chanctx), conf);
+
+	if (link_sta->rx_omi_bw_staging == bw) {
+		ret = false;
+		goto trace;
+	}
+
+	/* must call this API in pairs */
+	if (WARN_ON(link_sta->rx_omi_bw_tx != link_sta->rx_omi_bw_staging ||
+		    link_sta->rx_omi_bw_rx != link_sta->rx_omi_bw_staging)) {
+		ret = false;
+		goto trace;
+	}
+
+	if (bw < link_sta->rx_omi_bw_staging) {
+		link_sta->rx_omi_bw_tx = bw;
+		ieee80211_link_sta_rc_update_omi(link, link_sta);
+	} else {
+		link_sta->rx_omi_bw_rx = bw;
+		ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
+	}
+
+	link_sta->rx_omi_bw_staging = bw;
+	ret = true;
+trace:
+	trace_api_return_bool(local, ret);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(ieee80211_prepare_rx_omi_bw);
+
+void ieee80211_finalize_rx_omi_bw(struct ieee80211_link_sta *pub_link_sta)
+{
+	struct sta_info *sta = container_of(pub_link_sta->sta,
+					    struct sta_info, sta);
+	struct ieee80211_local *local = sta->sdata->local;
+	struct link_sta_info *link_sta =
+		sdata_dereference(sta->link[pub_link_sta->link_id], sta->sdata);
+	struct ieee80211_link_data *link =
+		sdata_dereference(sta->sdata->link[pub_link_sta->link_id],
+				  sta->sdata);
+	struct ieee80211_chanctx_conf *conf;
+	struct ieee80211_chanctx *chanctx;
+
+	if (WARN_ON(!link || !link_sta || link_sta->pub != pub_link_sta))
+		return;
+
+	conf = sdata_dereference(link->conf->chanctx_conf, sta->sdata);
+	if (WARN_ON(!conf))
+		return;
+
+	trace_api_finalize_rx_omi_bw(local, sta->sdata, link_sta);
+
+	chanctx = container_of(conf, typeof(*chanctx), conf);
+
+	if (link_sta->rx_omi_bw_tx != link_sta->rx_omi_bw_staging) {
+		/* rate control in finalize only when widening bandwidth */
+		WARN_ON(link_sta->rx_omi_bw_tx > link_sta->rx_omi_bw_staging);
+		link_sta->rx_omi_bw_tx = link_sta->rx_omi_bw_staging;
+		ieee80211_link_sta_rc_update_omi(link, link_sta);
+	}
+
+	if (link_sta->rx_omi_bw_rx != link_sta->rx_omi_bw_staging) {
+		/* channel context in finalize only when narrowing bandwidth */
+		WARN_ON(link_sta->rx_omi_bw_rx < link_sta->rx_omi_bw_staging);
+		link_sta->rx_omi_bw_rx = link_sta->rx_omi_bw_staging;
+		ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false);
+	}
+
+	trace_api_return_void(local);
+}
+EXPORT_SYMBOL_GPL(ieee80211_finalize_rx_omi_bw);
diff -ruw linux-6.13.12/net/mac80211/ibss.c linux-6.13.12-fbx/net/mac80211/ibss.c
--- linux-6.13.12/net/mac80211/ibss.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/ibss.c	2025-09-25 17:40:37.815378373 +0200
@@ -245,6 +245,7 @@
 		sdata->vif.cfg.ibss_creator = false;
 		sdata->vif.bss_conf.enable_beacon = false;
 		netif_carrier_off(sdata->dev);
+		synchronize_net();
 		ieee80211_bss_info_change_notify(sdata,
 						 BSS_CHANGED_IBSS |
 						 BSS_CHANGED_BEACON_ENABLED);
@@ -1826,8 +1827,8 @@
 {
 	struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
 
-	ieee80211_ibss_disconnect(sdata);
 	ifibss->ssid_len = 0;
+	ieee80211_ibss_disconnect(sdata);
 	eth_zero_addr(ifibss->bssid);
 
 	/* remove beacon */
diff -ruw linux-6.13.12/net/mac80211/ieee80211_i.h linux-6.13.12-fbx/net/mac80211/ieee80211_i.h
--- linux-6.13.12/net/mac80211/ieee80211_i.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/ieee80211_i.h	2025-09-29 14:23:07.625732509 +0200
@@ -404,6 +404,8 @@
 	int tries;
 	u16 algorithm, expected_transaction;
 
+	unsigned long userspace_selectors[BITS_TO_LONGS(128)];
+
 	u8 key[WLAN_KEY_LEN_WEP104];
 	u8 key_len, key_idx;
 	bool done, waiting;
@@ -444,6 +446,8 @@
 	const u8 *supp_rates;
 	u8 supp_rates_len;
 
+	unsigned long userspace_selectors[BITS_TO_LONGS(128)];
+
 	unsigned long timeout;
 	int tries;
 
@@ -602,6 +606,15 @@
 	/* dialog token enumerator for neg TTLM request */
 	u8 dialog_token_alloc;
 	struct wiphy_delayed_work neg_ttlm_timeout_work;
+
+	/* Locally initiated multi-link reconfiguration */
+	struct {
+		struct ieee80211_mgd_assoc_data *add_links_data;
+		struct wiphy_delayed_work wk;
+		u16 removed_links;
+		u16 added_links;
+		u8 dialog_token;
+	} reconf;
 };
 
 struct ieee80211_if_ibss {
@@ -938,11 +951,22 @@
 	struct ieee80211_txq txq;
 };
 
+#ifdef CONFIG_FBX80211_SCUM
+struct ieee80211_if_scum_monif {
+	struct ieee80211_vif *vif;
+	struct list_head next;
+	bool skip_mon;
+};
+#endif
+
 struct ieee80211_if_mntr {
 	u32 flags;
 	u8 mu_follow_addr[ETH_ALEN] __aligned(2);
 
 	struct list_head list;
+#ifdef CONFIG_FBX80211_SCUM
+	struct ieee80211_if_scum_monif scum;
+#endif
 };
 
 /**
@@ -1181,6 +1205,11 @@
 	} debugfs;
 #endif
 
+#ifdef CONFIG_FBX80211_SCUM
+	struct fbx80211_scum_ops __rcu *scum_ops;
+	struct list_head scum_client_list;
+#endif
+
 	/* must be last, dynamically sized area in this! */
 	struct ieee80211_vif vif;
 };
@@ -1204,7 +1233,7 @@
 	for (int ___link_id = 0;					\
 	     ___link_id < ARRAY_SIZE(___sdata->link);			\
 	     ___link_id++)						\
-	if ((_link = wiphy_dereference((local)->hw.wiphy,		\
+	if ((_link = wiphy_dereference((_local)->hw.wiphy,		\
 				       ___sdata->link[___link_id])))
 
 static inline int
@@ -2059,6 +2088,8 @@
 	ieee80211_vif_set_links(sdata, 0, 0);
 }
 
+void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata);
+
 /* tx handling */
 void ieee80211_clear_tx_pending(struct ieee80211_local *local);
 void ieee80211_tx_pending(struct tasklet_struct *t);
@@ -2111,8 +2142,6 @@
 int ieee80211_send_smps_action(struct ieee80211_sub_if_data *sdata,
 			       enum ieee80211_smps_mode smps, const u8 *da,
 			       const u8 *bssid, int link_id);
-bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
-				   enum ieee80211_smps_mode smps_mode_new);
 void ieee80211_add_addbaext(struct sk_buff *skb,
 			    const u8 req_addba_ext_data,
 			    u16 buf_size);
@@ -2684,8 +2713,12 @@
 				      struct ieee80211_chanctx *ctx,
 				      struct ieee80211_link_data *rsvd_for,
 				      bool check_reserved);
-bool ieee80211_is_radar_required(struct ieee80211_local *local);
+bool ieee80211_is_radar_required(struct ieee80211_local *local,
+				 struct cfg80211_scan_request *req);
 
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+					struct cfg80211_scan_request *scan_req,
+					int radio_idx);
 void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work);
 void ieee80211_dfs_cac_cancel(struct ieee80211_local *local,
 			      struct ieee80211_chanctx *chanctx);
@@ -2768,6 +2801,14 @@
 void ieee80211_add_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
 void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_def *chandef);
 
+int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
+				  struct cfg80211_assoc_link *add_links,
+				  u16 rem_links);
+
+void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata,
+				      struct ieee80211_mgmt *mgmt, size_t len);
+void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata);
+
 #if IS_ENABLED(CONFIG_MAC80211_KUNIT_TEST)
 #define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) EXPORT_SYMBOL_IF_KUNIT(sym)
 #define VISIBLE_IF_MAC80211_KUNIT
diff -ruw linux-6.13.12/net/mac80211/iface.c linux-6.13.12-fbx/net/mac80211/iface.c
--- linux-6.13.12/net/mac80211/iface.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/iface.c	2025-09-29 14:23:07.625732509 +0200
@@ -26,6 +26,7 @@
 #include "driver-ops.h"
 #include "wme.h"
 #include "rate.h"
+#include "fbx_scum.h"
 
 /**
  * DOC: Interface list locking
@@ -295,7 +296,6 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ieee80211_local *local = sdata->local;
-	int ret;
 
 	/*
 	 * This happens during unregistration if there's a bond device
@@ -305,11 +305,9 @@
 	if (!dev->ieee80211_ptr->registered)
 		return 0;
 
-	wiphy_lock(local->hw.wiphy);
-	ret = _ieee80211_change_mac(sdata, addr);
-	wiphy_unlock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
 
-	return ret;
+	return _ieee80211_change_mac(sdata, addr);
 }
 
 static inline int identical_mac_addr_allowed(int type1, int type2)
@@ -445,16 +443,13 @@
 	if (!is_valid_ether_addr(dev->dev_addr))
 		return -EADDRNOTAVAIL;
 
-	wiphy_lock(sdata->local->hw.wiphy);
+	guard(wiphy)(sdata->local->hw.wiphy);
+
 	err = ieee80211_check_concurrent_iface(sdata, sdata->vif.type);
 	if (err)
-		goto out;
-
-	err = ieee80211_do_open(&sdata->wdev, true);
-out:
-	wiphy_unlock(sdata->local->hw.wiphy);
-
 	return err;
+
+	return ieee80211_do_open(&sdata->wdev, true);
 }
 
 static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down)
@@ -491,8 +486,14 @@
 	case NL80211_IFTYPE_MONITOR:
 		if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
 			break;
+		if (fbx80211_skip_mon(sdata))
+			break;
 		list_del_rcu(&sdata->u.mntr.list);
 		break;
+	case NL80211_IFTYPE_AP_VLAN:
+		sdata->wdev.valid_links = 0;
+		ieee80211_vif_clear_links(sdata);
+		break;
 	default:
 		break;
 	}
@@ -666,6 +667,9 @@
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 		ieee80211_txq_remove_vlan(local, sdata);
 
+	if (sdata->vif.txq)
+		ieee80211_txq_purge(sdata->local, to_txq_info(sdata->vif.txq));
+
 	sdata->bss = NULL;
 
 	if (local->open_count == 0)
@@ -733,30 +737,59 @@
 		ieee80211_add_virtual_monitor(local);
 }
 
-static void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata)
+void ieee80211_stop_mbssid(struct ieee80211_sub_if_data *sdata)
 {
-	struct ieee80211_sub_if_data *tx_sdata, *non_tx_sdata, *tmp_sdata;
-	struct ieee80211_vif *tx_vif = sdata->vif.mbssid_tx_vif;
+	struct ieee80211_sub_if_data *tx_sdata;
+	struct ieee80211_bss_conf *link_conf, *tx_bss_conf;
+	struct ieee80211_link_data *tx_link, *link;
+	unsigned int link_id;
 
-	if (!tx_vif)
-		return;
+	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	tx_sdata = vif_to_sdata(tx_vif);
-	sdata->vif.mbssid_tx_vif = NULL;
+	/* Check if any of the links of current sdata is an MBSSID. */
+	for_each_vif_active_link(&sdata->vif, link_conf, link_id) {
+		tx_bss_conf = sdata_dereference(link_conf->tx_bss_conf, sdata);
+		if (!tx_bss_conf)
+			continue;
+
+		tx_sdata = vif_to_sdata(tx_bss_conf->vif);
+		RCU_INIT_POINTER(link_conf->tx_bss_conf, NULL);
 
-	list_for_each_entry_safe(non_tx_sdata, tmp_sdata,
-				 &tx_sdata->local->interfaces, list) {
-		if (non_tx_sdata != sdata && non_tx_sdata != tx_sdata &&
-		    non_tx_sdata->vif.mbssid_tx_vif == tx_vif &&
-		    ieee80211_sdata_running(non_tx_sdata)) {
-			non_tx_sdata->vif.mbssid_tx_vif = NULL;
-			dev_close(non_tx_sdata->wdev.netdev);
+		/* If we are not tx sdata reset tx sdata's tx_bss_conf to avoid recusrion
+		 * while closing tx sdata at the end of outer loop below.
+		 */
+		if (sdata != tx_sdata) {
+			tx_link = sdata_dereference(tx_sdata->link[tx_bss_conf->link_id],
+						    tx_sdata);
+			if (!tx_link)
+				continue;
+
+			RCU_INIT_POINTER(tx_link->conf->tx_bss_conf, NULL);
 		}
+
+		/* loop through sdatas to find if any of their links
+		 * belong to same MBSSID set as the one getting deleted.
+		 */
+		for_each_sdata_link(tx_sdata->local, link) {
+			struct ieee80211_sub_if_data *link_sdata = link->sdata;
+
+			if (link_sdata == sdata || link_sdata == tx_sdata ||
+			    rcu_access_pointer(link->conf->tx_bss_conf) != tx_bss_conf)
+				continue;
+
+			RCU_INIT_POINTER(link->conf->tx_bss_conf, NULL);
+
+			/* Remove all links of matching MLD until dynamic link
+			 * removal can be supported.
+			 */
+			cfg80211_stop_iface(link_sdata->wdev.wiphy, &link_sdata->wdev,
+					    GFP_KERNEL);
 	}
 
-	if (sdata != tx_sdata && ieee80211_sdata_running(tx_sdata)) {
-		tx_sdata->vif.mbssid_tx_vif = NULL;
-		dev_close(tx_sdata->wdev.netdev);
+		/* If we are not tx sdata, remove links of tx sdata and proceed */
+		if (sdata != tx_sdata && ieee80211_sdata_running(tx_sdata))
+			cfg80211_stop_iface(tx_sdata->wdev.wiphy,
+					    &tx_sdata->wdev, GFP_KERNEL);
 	}
 }
 
@@ -764,22 +797,26 @@
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 
-	/* close dependent VLAN and MBSSID interfaces before locking wiphy */
+	/* close dependent VLAN interfaces before locking wiphy */
 	if (sdata->vif.type == NL80211_IFTYPE_AP) {
 		struct ieee80211_sub_if_data *vlan, *tmpsdata;
 
 		list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans,
 					 u.vlan.list)
 			dev_close(vlan->dev);
-
-		ieee80211_stop_mbssid(sdata);
 	}
 
-	wiphy_lock(sdata->local->hw.wiphy);
+	guard(wiphy)(sdata->local->hw.wiphy);
+
 	wiphy_work_cancel(sdata->local->hw.wiphy, &sdata->activate_links_work);
 
+	/* Close the dependent MBSSID interfaces with wiphy lock as we may be
+	 * terminating its partner links too in case of MLD.
+	 */
+	if (sdata->vif.type == NL80211_IFTYPE_AP)
+		ieee80211_stop_mbssid(sdata);
+
 	ieee80211_do_stop(sdata, true);
-	wiphy_unlock(sdata->local->hw.wiphy);
 
 	return 0;
 }
@@ -988,7 +1025,7 @@
 		    local->hw.wiphy->frag_threshold != (u32)-1)
 			flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
 
-		if (local->monitors)
+		if (!list_empty(&local->mon_list))
 			flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
 	} else {
 		flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
@@ -1240,7 +1277,7 @@
 	struct net_device *dev = wdev->netdev;
 	struct ieee80211_local *local = sdata->local;
 	u64 changed = 0;
-	int res;
+	int link_id, res;
 	u32 hw_reconf_flags = 0;
 
 	lockdep_assert_wiphy(local->hw.wiphy);
@@ -1272,6 +1309,8 @@
 		sdata->crypto_tx_tailroom_needed_cnt +=
 			master->crypto_tx_tailroom_needed_cnt;
 
+		ieee80211_apvlan_link_setup(sdata);
+
 		break;
 		}
 	case NL80211_IFTYPE_AP:
@@ -1328,7 +1367,20 @@
 	case NL80211_IFTYPE_AP_VLAN:
 		/* no need to tell driver, but set carrier and chanctx */
 		if (sdata->bss->active) {
+			struct ieee80211_link_data *link;
+			unsigned long valid_links = sdata->vif.valid_links;
+
+			if (valid_links) {
+				for_each_set_bit(link_id, &valid_links,
+						 IEEE80211_MLD_MAX_NUM_LINKS) {
+					link = sdata_dereference(sdata->link[link_id],
+								 sdata);
+					ieee80211_link_vlan_copy_chanctx(link);
+				}
+			} else {
 			ieee80211_link_vlan_copy_chanctx(&sdata->deflink);
+			}
+
 			netif_carrier_on(dev);
 			ieee80211_set_vif_encap_ops(sdata);
 		} else {
@@ -1359,6 +1411,10 @@
 			hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR;
 		}
 
+		if (!(sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
+		    !fbx80211_skip_mon(sdata))
+			list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
+
 		ieee80211_adjust_monitor_flags(sdata, 1);
 		ieee80211_configure_filter(local);
 		ieee80211_recalc_offload(local);
@@ -1432,11 +1488,6 @@
 	case NL80211_IFTYPE_P2P_DEVICE:
 		rcu_assign_pointer(local->p2p_sdata, sdata);
 		break;
-	case NL80211_IFTYPE_MONITOR:
-		if (sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES)
-			break;
-		list_add_tail_rcu(&sdata->u.mntr.list, &local->mon_list);
-		break;
 	default:
 		break;
 	}
@@ -1570,6 +1621,10 @@
 				ieee80211_process_neg_ttlm_res(sdata, mgmt,
 							       skb->len);
 				break;
+			case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP:
+				ieee80211_process_ml_reconf_resp(sdata, mgmt,
+								 skb->len);
+				break;
 			default:
 				break;
 			}
@@ -2283,7 +2338,7 @@
 	 */
 	cfg80211_shutdown_all_interfaces(local->hw.wiphy);
 
-	wiphy_lock(local->hw.wiphy);
+	guard(wiphy)(local->hw.wiphy);
 
 	WARN(local->open_count, "%s: open count remains %d\n",
 	     wiphy_name(local->hw.wiphy), local->open_count);
@@ -2313,7 +2368,6 @@
 		if (!netdev)
 			kfree(sdata);
 	}
-	wiphy_unlock(local->hw.wiphy);
 }
 
 static int netdev_notify(struct notifier_block *nb,
diff -ruw linux-6.13.12/net/mac80211/key.c linux-6.13.12-fbx/net/mac80211/key.c
--- linux-6.13.12/net/mac80211/key.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/key.c	2025-09-25 17:40:37.819378393 +0200
@@ -166,6 +166,12 @@
 		 * Hence, don't send GTKs for VLAN interfaces to the driver.
 		 */
 		if (!(key->conf.flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
+			if (ieee80211_hw_check(&key->local->hw,
+					       APVLAN_NEED_MCAST_TO_UCAST)) {
+				/* no need to fail, this key will
+				 * never be used */
+				return 0;
+			}
 			ret = 1;
 			goto out_unsupported;
 		}
@@ -1409,7 +1415,7 @@
 	if (sdata->u.mgd.mfp != IEEE80211_MFP_DISABLED)
 		key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
 
-	key->conf.link_id = link_id;
+	key->conf.link_id = link_data->link_id;
 
 	err = ieee80211_key_link(key, link_data, NULL);
 	if (err)
diff -ruw linux-6.13.12/net/mac80211/link.c linux-6.13.12-fbx/net/mac80211/link.c
--- linux-6.13.12/net/mac80211/link.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/link.c	2025-09-25 17:40:37.819378393 +0200
@@ -12,6 +12,62 @@
 #include "key.h"
 #include "debugfs_netdev.h"
 
+static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_sub_if_data *vlan;
+	struct ieee80211_link_data *link;
+	u16 master_links = sdata->vif.valid_links;
+	u16 new_links, vlan_links;
+	unsigned long add;
+
+	list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
+		int link_id;
+
+		if (!vlan)
+			continue;
+
+		/* No support for 4addr with MLO yet */
+		if (vlan->wdev.use_4addr)
+			return;
+
+		vlan_links = vlan->vif.valid_links;
+
+		new_links = master_links;
+
+		add = new_links & ~vlan_links;
+		if (!add)
+			continue;
+
+		ieee80211_vif_set_links(vlan, add, 0);
+
+		for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+			link = sdata_dereference(vlan->link[link_id], vlan);
+			ieee80211_link_vlan_copy_chanctx(link);
+		}
+	}
+}
+
+void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_sub_if_data *master = container_of(sdata->bss,
+					    struct ieee80211_sub_if_data, u.ap);
+	u16 new_links = master->vif.valid_links;
+	unsigned long add;
+	int link_id;
+
+	if (!master->vif.valid_links)
+		return;
+
+	add = new_links;
+	for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
+		sdata->wdev.valid_links |= BIT(link_id);
+		ether_addr_copy(sdata->wdev.links[link_id].addr,
+				master->wdev.links[link_id].addr);
+	}
+
+	ieee80211_vif_set_links(sdata, new_links, 0);
+}
+
 void ieee80211_link_setup(struct ieee80211_link_data *link)
 {
 	if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
@@ -31,6 +87,22 @@
 	rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
 	rcu_assign_pointer(sdata->link[link_id], link);
 
+	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		struct ieee80211_sub_if_data *master;
+		struct ieee80211_bss_conf *master_conf;
+
+		master = container_of(sdata->bss,
+				      struct ieee80211_sub_if_data, u.ap);
+		if (!master)
+			goto skip_vlan_conf;
+
+		master_conf = sdata_dereference(master->vif.link_conf[link_id],
+						master);
+
+		memcpy(link_conf, master_conf, sizeof(*link_conf));
+	}
+
+skip_vlan_conf:
 	link->sdata = sdata;
 	link->link_id = link_id;
 	link->conf = link_conf;
@@ -54,6 +126,7 @@
 	if (!deflink) {
 		switch (sdata->vif.type) {
 		case NL80211_IFTYPE_AP:
+		case NL80211_IFTYPE_AP_VLAN:
 			ether_addr_copy(link_conf->addr,
 					sdata->wdev.links[link_id].addr);
 			link_conf->bssid = link_conf->addr;
@@ -177,6 +250,7 @@
 
 	switch (sdata->vif.type) {
 	case NL80211_IFTYPE_AP:
+	case NL80211_IFTYPE_AP_VLAN:
 		/* in an AP all links are always active */
 		sdata->vif.active_links = valid_links;
 
@@ -278,12 +352,16 @@
 		ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links);
 
 		/* tell the driver */
+		if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
 		ret = drv_change_vif_links(sdata->local, sdata,
 					   old_links & old_active,
 					   new_links & sdata->vif.active_links,
 					   old);
 		if (!new_links)
 			ieee80211_debugfs_recreate_netdev(sdata, false);
+
+		if (sdata->vif.type == NL80211_IFTYPE_AP)
+			ieee80211_update_apvlan_links(sdata);
 	}
 
 	if (ret) {
diff -ruw linux-6.13.12/net/mac80211/main.c linux-6.13.12-fbx/net/mac80211/main.c
--- linux-6.13.12/net/mac80211/main.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/main.c	2025-09-25 17:40:37.819378393 +0200
@@ -5,7 +5,7 @@
  * Copyright 2006-2007	Jiri Benc <jbenc@suse.cz>
  * Copyright 2013-2014  Intel Mobile Communications GmbH
  * Copyright (C) 2017     Intel Deutschland GmbH
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 
 #include <net/mac80211.h>
@@ -33,6 +33,7 @@
 #include "wep.h"
 #include "led.h"
 #include "debugfs.h"
+#include "fbx_scum.h"
 
 void ieee80211_configure_filter(struct ieee80211_local *local)
 {
@@ -726,8 +727,13 @@
 	},
 	[NL80211_IFTYPE_P2P_DEVICE] = {
 		.tx = 0xffff,
+		/*
+		 * To support P2P PASN pairing let user space register to rx
+		 * also AUTH frames on P2P device interface.
+		 */
 		.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
-			BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
+			BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
+			BIT(IEEE80211_STYPE_AUTH >> 4),
 	},
 };
 
@@ -1305,6 +1311,11 @@
 			    sband->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40 &&
 			    !(iftd->he_cap.he_cap_elem.phy_cap_info[0] & he_40_mhz_cap))
 				return -EINVAL;
+
+			/* no support for per-band vendor elems with MLO */
+			if (WARN_ON(iftd->vendor_elems.len &&
+				    hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO))
+				return -EINVAL;
 		}
 
 		/* HT, VHT, HE require QoS, thus >= 4 queues */
diff -ruw linux-6.13.12/net/mac80211/mesh_plink.c linux-6.13.12-fbx/net/mac80211/mesh_plink.c
--- linux-6.13.12/net/mac80211/mesh_plink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/mesh_plink.c	2025-09-25 17:40:37.823378413 +0200
@@ -432,15 +432,14 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_supported_band *sband;
-	u32 rates, basic_rates = 0, changed = 0;
+	u32 rates, changed = 0;
 	enum ieee80211_sta_rx_bandwidth bw = sta->sta.deflink.bandwidth;
 
 	sband = ieee80211_get_sband(sdata);
 	if (!sband)
 		return;
 
-	rates = ieee80211_sta_get_rates(sdata, elems, sband->band,
-					&basic_rates);
+	rates = ieee80211_sta_get_rates(sdata, elems, sband->band, NULL);
 
 	spin_lock_bh(&sta->mesh->plink_lock);
 	sta->deflink.rx_stats.last_rx = jiffies;
diff -ruw linux-6.13.12/net/mac80211/mlme.c linux-6.13.12-fbx/net/mac80211/mlme.c
--- linux-6.13.12/net/mac80211/mlme.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/mlme.c	2025-09-25 17:40:37.823378413 +0200
@@ -347,6 +347,7 @@
 
 static bool
 ieee80211_verify_peer_he_mcs_support(struct ieee80211_sub_if_data *sdata,
+				     int link_id,
 				     const struct ieee80211_he_cap_elem *he_cap,
 				     const struct ieee80211_he_operation *he_op)
 {
@@ -374,7 +375,7 @@
 	 */
 	if ((mcs_80_map_tx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED ||
 	    (mcs_80_map_rx & 0x3) == IEEE80211_HE_MCS_NOT_SUPPORTED) {
-		sdata_info(sdata,
+		link_id_info(sdata, link_id,
 			   "Missing mandatory rates for 1 Nss, rx 0x%x, tx 0x%x, disable HE\n",
 			   mcs_80_map_tx, mcs_80_map_rx);
 		return false;
@@ -420,9 +421,9 @@
 		if (ap_rx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
 		    ap_tx_val == IEEE80211_HE_MCS_NOT_SUPPORTED ||
 		    ap_rx_val < ap_op_val || ap_tx_val < ap_op_val) {
-			sdata_info(sdata,
+			link_id_info(sdata, link_id,
 				   "Invalid rates for %d Nss, rx %d, tx %d oper %d, disable HE\n",
-				   nss, ap_rx_val, ap_rx_val, ap_op_val);
+				     nss, ap_rx_val, ap_tx_val, ap_op_val);
 			return false;
 		}
 	}
@@ -592,6 +593,68 @@
 	return true;
 }
 
+static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
+				const u8 *supp_rates,
+				unsigned int supp_rates_len,
+				const u8 *ext_supp_rates,
+				unsigned int ext_supp_rates_len,
+				u32 *rates, u32 *basic_rates,
+				unsigned long *unknown_rates_selectors,
+				bool *have_higher_than_11mbit,
+				int *min_rate, int *min_rate_index)
+{
+	int i, j;
+
+	for (i = 0; i < supp_rates_len + ext_supp_rates_len; i++) {
+		u8 supp_rate = i < supp_rates_len ?
+				supp_rates[i] :
+				ext_supp_rates[i - supp_rates_len];
+		int rate = supp_rate & 0x7f;
+		bool is_basic = !!(supp_rate & 0x80);
+
+		if ((rate * 5) > 110 && have_higher_than_11mbit)
+			*have_higher_than_11mbit = true;
+
+		/*
+		 * Skip membership selectors since they're not rates.
+		 *
+		 * Note: Even though the membership selector and the basic
+		 *	 rate flag share the same bit, they are not exactly
+		 *	 the same.
+		 */
+		if (is_basic && rate >= BSS_MEMBERSHIP_SELECTOR_MIN) {
+			if (unknown_rates_selectors)
+				set_bit(rate, unknown_rates_selectors);
+			continue;
+		}
+
+		for (j = 0; j < sband->n_bitrates; j++) {
+			struct ieee80211_rate *br;
+			int brate;
+
+			br = &sband->bitrates[j];
+
+			brate = DIV_ROUND_UP(br->bitrate, 5);
+			if (brate == rate) {
+				if (rates)
+					*rates |= BIT(j);
+				if (is_basic && basic_rates)
+					*basic_rates |= BIT(j);
+				if (min_rate && (rate * 5) < *min_rate) {
+					*min_rate = rate * 5;
+					if (min_rate_index)
+						*min_rate_index = j;
+				}
+				break;
+			}
+		}
+
+		/* Handle an unknown entry as if it is an unknown selector */
+		if (is_basic && unknown_rates_selectors && j == sband->n_bitrates)
+			set_bit(rate, unknown_rates_selectors);
+	}
+}
+
 static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata,
 				     const struct cfg80211_chan_def *chandef,
 				     u32 prohibited_flags)
@@ -822,7 +885,8 @@
 			      struct ieee80211_conn_settings *conn,
 			      struct cfg80211_bss *cbss, int link_id,
 			      struct ieee80211_chan_req *chanreq,
-			      struct cfg80211_chan_def *ap_chandef)
+			      struct cfg80211_chan_def *ap_chandef,
+			      unsigned long *userspace_selectors)
 {
 	const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies);
 	struct ieee80211_bss *bss = (void *)cbss->priv;
@@ -836,6 +900,8 @@
 	struct ieee802_11_elems *elems;
 	struct ieee80211_supported_band *sband;
 	enum ieee80211_conn_mode ap_mode;
+	unsigned long unknown_rates_selectors[BITS_TO_LONGS(128)] = {};
+	unsigned long sta_selectors[BITS_TO_LONGS(128)] = {};
 	int ret;
 
 again:
@@ -864,6 +930,11 @@
 
 	sband = sdata->local->hw.wiphy->bands[channel->band];
 
+	ieee80211_get_rates(sband, elems->supp_rates, elems->supp_rates_len,
+			    elems->ext_supp_rates, elems->ext_supp_rates_len,
+			    NULL, NULL, unknown_rates_selectors, NULL, NULL,
+			    NULL);
+
 	switch (channel->band) {
 	case NL80211_BAND_S1GHZ:
 		if (WARN_ON(ap_mode != IEEE80211_CONN_MODE_S1G)) {
@@ -873,7 +944,7 @@
 		return elems;
 	case NL80211_BAND_6GHZ:
 		if (ap_mode < IEEE80211_CONN_MODE_HE) {
-			sdata_info(sdata,
+			link_id_info(sdata, link_id,
 				   "Rejecting non-HE 6/7 GHz connection");
 			ret = -EINVAL;
 			goto free;
@@ -914,6 +985,29 @@
 
 	chanreq->oper = *ap_chandef;
 
+	bitmap_copy(sta_selectors, userspace_selectors, 128);
+	if (conn->mode >= IEEE80211_CONN_MODE_HT)
+		set_bit(BSS_MEMBERSHIP_SELECTOR_HT_PHY, sta_selectors);
+	if (conn->mode >= IEEE80211_CONN_MODE_VHT)
+		set_bit(BSS_MEMBERSHIP_SELECTOR_VHT_PHY, sta_selectors);
+	if (conn->mode >= IEEE80211_CONN_MODE_HE)
+		set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, sta_selectors);
+	if (conn->mode >= IEEE80211_CONN_MODE_EHT)
+		set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, sta_selectors);
+
+	/*
+	 * We do not support EPD or GLK so never add them.
+	 * SAE_H2E is handled through userspace_selectors.
+	 */
+
+	/* Check if we support all required features */
+	if (!bitmap_subset(unknown_rates_selectors, sta_selectors, 128)) {
+		link_id_info(sdata, link_id,
+			     "required basic rate or BSS membership selectors not supported or disabled, rejecting connection\n");
+		ret = -EINVAL;
+		goto free;
+	}
+
 	ieee80211_set_chanreq_ap(sdata, chanreq, conn, ap_chandef);
 
 	while (!ieee80211_chandef_usable(sdata, &chanreq->oper,
@@ -945,16 +1039,18 @@
 	}
 
 	if (chanreq->oper.width != ap_chandef->width || ap_mode != conn->mode)
-		sdata_info(sdata,
+		link_id_info(sdata, link_id,
 			   "regulatory prevented using AP config, downgraded\n");
 
 	if (conn->mode >= IEEE80211_CONN_MODE_HE &&
-	    (!ieee80211_verify_peer_he_mcs_support(sdata, (void *)elems->he_cap,
+	    (!ieee80211_verify_peer_he_mcs_support(sdata, link_id,
+						   (void *)elems->he_cap,
 						   elems->he_operation) ||
 	     !ieee80211_verify_sta_he_mcs_support(sdata, sband,
 						  elems->he_operation))) {
 		conn->mode = IEEE80211_CONN_MODE_VHT;
-		sdata_info(sdata, "required MCSes not supported, disabling HE\n");
+		link_id_info(sdata, link_id,
+			     "required MCSes not supported, disabling HE\n");
 	}
 
 	if (conn->mode >= IEEE80211_CONN_MODE_EHT &&
@@ -964,7 +1060,8 @@
 		conn->bw_limit = min_t(enum ieee80211_conn_bw_limit,
 				       conn->bw_limit,
 				       IEEE80211_CONN_BW_LIMIT_160);
-		sdata_info(sdata, "required MCSes not supported, disabling EHT\n");
+		link_id_info(sdata, link_id,
+			     "required MCSes not supported, disabling EHT\n");
 	}
 
 	/* the mode can only decrease, so this must terminate */
@@ -991,7 +1088,8 @@
 
 static int ieee80211_config_bw(struct ieee80211_link_data *link,
 			       struct ieee802_11_elems *elems,
-			       bool update, u64 *changed)
+			       bool update, u64 *changed,
+			       const char *frame)
 {
 	struct ieee80211_channel *channel = link->conf->chanreq.oper.chan;
 	struct ieee80211_sub_if_data *sdata = link->sdata;
@@ -1016,9 +1114,10 @@
 
 	if (ap_mode != link->u.mgd.conn.mode) {
 		link_info(link,
-			  "AP appears to change mode (expected %s, found %s), disconnect\n",
+			  "AP %pM appears to change mode (expected %s, found %s) in %s, disconnect\n",
+			  link->u.mgd.bssid,
 			  ieee80211_conn_mode_str(link->u.mgd.conn.mode),
-			  ieee80211_conn_mode_str(ap_mode));
+			  ieee80211_conn_mode_str(ap_mode), frame);
 		return -EINVAL;
 	}
 
@@ -1063,16 +1162,16 @@
 		return 0;
 
 	link_info(link,
-		  "AP %pM changed bandwidth, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
-		  link->u.mgd.bssid, chanreq.oper.chan->center_freq,
+		  "AP %pM changed bandwidth in %s, new used config is %d.%03d MHz, width %d (%d.%03d/%d MHz)\n",
+		  link->u.mgd.bssid, frame, chanreq.oper.chan->center_freq,
 		  chanreq.oper.chan->freq_offset, chanreq.oper.width,
 		  chanreq.oper.center_freq1, chanreq.oper.freq1_offset,
 		  chanreq.oper.center_freq2);
 
 	if (!cfg80211_chandef_valid(&chanreq.oper)) {
 		sdata_info(sdata,
-			   "AP %pM changed caps/bw in a way we can't support - disconnect\n",
-			   link->u.mgd.bssid);
+			   "AP %pM changed caps/bw in %s in a way we can't support - disconnect\n",
+			   link->u.mgd.bssid, frame);
 		return -EINVAL;
 	}
 
@@ -1101,8 +1200,8 @@
 	ret = ieee80211_link_change_chanreq(link, &chanreq, changed);
 	if (ret) {
 		sdata_info(sdata,
-			   "AP %pM changed bandwidth to incompatible one - disconnect\n",
-			   link->u.mgd.bssid);
+			   "AP %pM changed bandwidth in %s to incompatible one - disconnect\n",
+			   link->u.mgd.bssid, frame);
 		return ret;
 	}
 
@@ -1419,23 +1518,25 @@
 #define PRESENT_ELEMS_MAX	8
 #define PRESENT_ELEM_EXT_OFFS	0x100
 
-static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
+static void
+ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
 					struct sk_buff *skb, u16 capab,
 					const struct element *ext_capa,
-					const u16 *present_elems);
+			    const u16 *present_elems,
+			    struct ieee80211_mgd_assoc_data *assoc_data);
 
-static size_t ieee80211_assoc_link_elems(struct ieee80211_sub_if_data *sdata,
+static size_t
+ieee80211_add_link_elems(struct ieee80211_sub_if_data *sdata,
 					 struct sk_buff *skb, u16 *capab,
 					 const struct element *ext_capa,
 					 const u8 *extra_elems,
 					 size_t extra_elems_len,
 					 unsigned int link_id,
 					 struct ieee80211_link_data *link,
-					 u16 *present_elems)
+			 u16 *present_elems,
+			 struct ieee80211_mgd_assoc_data *assoc_data)
 {
 	enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
-	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
 	struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
 	struct ieee80211_channel *chan = cbss->channel;
 	const struct ieee80211_sband_iftype_data *iftd;
@@ -1584,7 +1685,7 @@
 
 	if (link_id == assoc_data->assoc_link_id)
 		ieee80211_assoc_add_ml_elem(sdata, skb, orig_capab, ext_capa,
-					    present_elems);
+					    present_elems, assoc_data);
 
 	/* crash if somebody gets it wrong */
 	present_elems = NULL;
@@ -1663,14 +1764,14 @@
 		*len = skb->len - skb_len - 2;
 }
 
-static void ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
+static void
+ieee80211_assoc_add_ml_elem(struct ieee80211_sub_if_data *sdata,
 					struct sk_buff *skb, u16 capab,
 					const struct element *ext_capa,
-					const u16 *outer_present_elems)
+			    const u16 *outer_present_elems,
+			    struct ieee80211_mgd_assoc_data *assoc_data)
 {
 	struct ieee80211_local *local = sdata->local;
-	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-	struct ieee80211_mgd_assoc_data *assoc_data = ifmgd->assoc_data;
 	struct ieee80211_multi_link_elem *ml_elem;
 	struct ieee80211_mle_basic_common_info *common;
 	const struct wiphy_iftype_ext_capab *ift_ext_capa;
@@ -1743,16 +1844,17 @@
 		 * (if applicable) are skipped. So we only have
 		 * the capability field (remember the position and fill
 		 * later), followed by the elements added below by
-		 * calling ieee80211_assoc_link_elems().
+		 * calling ieee80211_add_link_elems().
 		 */
 		capab_pos = skb_put(skb, 2);
 
-		extra_used = ieee80211_assoc_link_elems(sdata, skb, &capab,
+		extra_used = ieee80211_add_link_elems(sdata, skb, &capab,
 							ext_capa,
 							extra_elems,
 							extra_elems_len,
 							link_id, NULL,
-							link_present_elems);
+						      link_present_elems,
+						      assoc_data);
 		if (extra_elems)
 			skb_put_data(skb, extra_elems + extra_used,
 				     extra_elems_len - extra_used);
@@ -1769,6 +1871,55 @@
 	ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
 }
 
+static int
+ieee80211_link_common_elems_size(struct ieee80211_sub_if_data *sdata,
+				 enum nl80211_iftype iftype,
+				 struct cfg80211_bss *cbss,
+				 size_t elems_len)
+{
+	struct ieee80211_local *local = sdata->local;
+	const struct ieee80211_sband_iftype_data *iftd;
+	struct ieee80211_supported_band *sband;
+	size_t size = 0;
+
+	if (!cbss)
+		return size;
+
+	sband = local->hw.wiphy->bands[cbss->channel->band];
+
+	/* add STA profile elements length */
+	size += elems_len;
+
+	/* and supported rates length */
+	size += 4 + sband->n_bitrates;
+
+	/* supported channels */
+	size += 2 + 2 * sband->n_channels;
+
+	iftd = ieee80211_get_sband_iftype_data(sband, iftype);
+	if (iftd)
+		size += iftd->vendor_elems.len;
+
+	/* power capability */
+	size += 4;
+
+	/* HT, VHT, HE, EHT */
+	size += 2 + sizeof(struct ieee80211_ht_cap);
+	size += 2 + sizeof(struct ieee80211_vht_cap);
+	size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
+		sizeof(struct ieee80211_he_mcs_nss_supp) +
+		IEEE80211_HE_PPE_THRES_MAX_LEN;
+
+	if (sband->band == NL80211_BAND_6GHZ)
+		size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
+
+	size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
+		sizeof(struct ieee80211_eht_mcs_nss_supp) +
+		IEEE80211_EHT_PPE_THRES_MAX_LEN;
+
+	return size;
+}
+
 static int ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
 {
 	struct ieee80211_local *local = sdata->local;
@@ -1807,42 +1958,15 @@
 
 	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
 		struct cfg80211_bss *cbss = assoc_data->link[link_id].bss;
-		const struct ieee80211_sband_iftype_data *iftd;
-		struct ieee80211_supported_band *sband;
+		size_t elems_len = assoc_data->link[link_id].elems_len;
 
 		if (!cbss)
 			continue;
 
-		sband = local->hw.wiphy->bands[cbss->channel->band];
-
 		n_links++;
-		/* add STA profile elements length */
-		size += assoc_data->link[link_id].elems_len;
-		/* and supported rates length */
-		size += 4 + sband->n_bitrates;
-		/* supported channels */
-		size += 2 + 2 * sband->n_channels;
-
-		iftd = ieee80211_get_sband_iftype_data(sband, iftype);
-		if (iftd)
-			size += iftd->vendor_elems.len;
 
-		/* power capability */
-		size += 4;
-
-		/* HT, VHT, HE, EHT */
-		size += 2 + sizeof(struct ieee80211_ht_cap);
-		size += 2 + sizeof(struct ieee80211_vht_cap);
-		size += 2 + 1 + sizeof(struct ieee80211_he_cap_elem) +
-			sizeof(struct ieee80211_he_mcs_nss_supp) +
-			IEEE80211_HE_PPE_THRES_MAX_LEN;
-
-		if (sband->band == NL80211_BAND_6GHZ)
-			size += 2 + 1 + sizeof(struct ieee80211_he_6ghz_capa);
-
-		size += 2 + 1 + sizeof(struct ieee80211_eht_cap_elem) +
-			sizeof(struct ieee80211_eht_mcs_nss_supp) +
-			IEEE80211_EHT_PPE_THRES_MAX_LEN;
+		size += ieee80211_link_common_elems_size(sdata, iftype, cbss,
+							 elems_len);
 
 		/* non-inheritance element */
 		size += 2 + 2 + PRESENT_ELEMS_MAX;
@@ -1940,12 +2064,12 @@
 
 	/* add the elements for the assoc (main) link */
 	link_capab = capab;
-	offset = ieee80211_assoc_link_elems(sdata, skb, &link_capab,
+	offset = ieee80211_add_link_elems(sdata, skb, &link_capab,
 					    ext_capa,
 					    assoc_data->ie,
 					    assoc_data->ie_len,
 					    assoc_data->assoc_link_id, link,
-					    present_elems);
+					  present_elems, assoc_data);
 	put_unaligned_le16(link_capab, capab_pos);
 
 	/* if present, add any custom non-vendor IEs */
@@ -3601,12 +3725,45 @@
 	netif_carrier_on(sdata->dev);
 }
 
+static void ieee80211_ml_reconf_reset(struct ieee80211_sub_if_data *sdata)
+{
+	struct ieee80211_mgd_assoc_data *add_links_data =
+		sdata->u.mgd.reconf.add_links_data;
+
+	if (!ieee80211_vif_is_mld(&sdata->vif) ||
+	    !(sdata->u.mgd.reconf.added_links |
+	      sdata->u.mgd.reconf.removed_links))
+		return;
+
+	wiphy_delayed_work_cancel(sdata->local->hw.wiphy,
+				  &sdata->u.mgd.reconf.wk);
+	sdata->u.mgd.reconf.added_links = 0;
+	sdata->u.mgd.reconf.removed_links = 0;
+	sdata->u.mgd.reconf.dialog_token = 0;
+
+	if (add_links_data) {
+		struct cfg80211_mlo_reconf_done_data done_data = {};
+		u8 link_id;
+
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++)
+			done_data.links[link_id].bss =
+				add_links_data->link[link_id].bss;
+
+		cfg80211_mlo_reconf_add_done(sdata->dev, &done_data);
+
+		kfree(sdata->u.mgd.reconf.add_links_data);
+		sdata->u.mgd.reconf.add_links_data = NULL;
+	}
+}
+
 static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata,
 				   u16 stype, u16 reason, bool tx,
 				   u8 *frame_buf)
 {
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
 	struct ieee80211_local *local = sdata->local;
+	struct sta_info *ap_sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
 	unsigned int link_id;
 	u64 changed = 0;
 	struct ieee80211_prep_tx_info info = {
@@ -3617,6 +3774,9 @@
 
 	lockdep_assert_wiphy(local->hw.wiphy);
 
+	if (WARN_ON(!ap_sta))
+		return;
+
 	if (WARN_ON_ONCE(tx && !frame_buf))
 		return;
 
@@ -3680,8 +3840,16 @@
 
 	sdata->vif.cfg.ssid_len = 0;
 
-	/* remove AP and TDLS peers */
+	/* Remove TDLS peers */
+	__sta_info_flush(sdata, false, -1, ap_sta);
+
+	if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) {
+		/* Only move the AP state */
+		sta_info_move_state(ap_sta, IEEE80211_STA_NONE);
+	} else {
+		/* Remove AP peer */
 	sta_info_flush(sdata, -1);
+	}
 
 	/* finally reset all BSS / config parameters */
 	if (!ieee80211_vif_is_mld(&sdata->vif))
@@ -3732,6 +3900,14 @@
 		ieee80211_vif_cfg_change_notify(sdata, changed);
 	}
 
+	if (sdata->vif.driver_flags & IEEE80211_VIF_REMOVE_AP_AFTER_DISASSOC) {
+		/*
+		 * After notifying the driver about the disassoc,
+		 * remove the ap sta.
+		 */
+		sta_info_flush(sdata, -1);
+	}
+
 	/* disassociated - set to defaults now */
 	ieee80211_set_wmm_default(&sdata->deflink, false, false);
 
@@ -3793,6 +3969,12 @@
 	ieee80211_vif_set_links(sdata, 0, 0);
 
 	ifmgd->mcast_seq_last = IEEE80211_SN_MODULO;
+
+	/* if disconnection happens in the middle of the ML reconfiguration
+	 * flow, cfg80211 must called to release the BSS references obtained
+	 * when the flow started.
+	 */
+	ieee80211_ml_reconf_reset(sdata);
 }
 
 static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
@@ -3907,9 +4089,6 @@
 
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	if (WARN_ON(ieee80211_vif_is_mld(&sdata->vif)))
-		return;
-
 	/*
 	 * Try sending broadcast probe requests for the last three
 	 * probe requests after the first ones failed since some
@@ -3955,9 +4134,6 @@
 
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
-	if (WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)))
-		return;
-
 	if (!ieee80211_sdata_running(sdata))
 		return;
 
@@ -4228,6 +4404,8 @@
 
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
+	sdata->u.mgd.auth_data = NULL;
+
 	if (!assoc) {
 		/*
 		 * we are not authenticated yet, the only timer that could be
@@ -4249,7 +4427,6 @@
 
 	cfg80211_put_bss(sdata->local->hw.wiphy, auth_data->bss);
 	kfree(auth_data);
-	sdata->u.mgd.auth_data = NULL;
 }
 
 enum assoc_status {
@@ -4266,6 +4443,8 @@
 
 	lockdep_assert_wiphy(sdata->local->hw.wiphy);
 
+	sdata->u.mgd.assoc_data = NULL;
+
 	if (status != ASSOC_SUCCESS) {
 		/*
 		 * we are not associated yet, the only timer that could be
@@ -4304,7 +4483,6 @@
 	}
 
 	kfree(assoc_data);
-	sdata->u.mgd.assoc_data = NULL;
 }
 
 static void ieee80211_auth_challenge(struct ieee80211_sub_if_data *sdata,
@@ -4607,57 +4785,6 @@
 				    false);
 }
 
-static void ieee80211_get_rates(struct ieee80211_supported_band *sband,
-				u8 *supp_rates, unsigned int supp_rates_len,
-				u32 *rates, u32 *basic_rates,
-				bool *have_higher_than_11mbit,
-				int *min_rate, int *min_rate_index)
-{
-	int i, j;
-
-	for (i = 0; i < supp_rates_len; i++) {
-		int rate = supp_rates[i] & 0x7f;
-		bool is_basic = !!(supp_rates[i] & 0x80);
-
-		if ((rate * 5) > 110)
-			*have_higher_than_11mbit = true;
-
-		/*
-		 * Skip HT, VHT, HE, EHT and SAE H2E only BSS membership
-		 * selectors since they're not rates.
-		 *
-		 * Note: Even though the membership selector and the basic
-		 *	 rate flag share the same bit, they are not exactly
-		 *	 the same.
-		 */
-		if (supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HT_PHY) ||
-		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_VHT_PHY) ||
-		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_HE_PHY) ||
-		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_EHT_PHY) ||
-		    supp_rates[i] == (0x80 | BSS_MEMBERSHIP_SELECTOR_SAE_H2E))
-			continue;
-
-		for (j = 0; j < sband->n_bitrates; j++) {
-			struct ieee80211_rate *br;
-			int brate;
-
-			br = &sband->bitrates[j];
-
-			brate = DIV_ROUND_UP(br->bitrate, 5);
-			if (brate == rate) {
-				*rates |= BIT(j);
-				if (is_basic)
-					*basic_rates |= BIT(j);
-				if ((rate * 5) < *min_rate) {
-					*min_rate = rate * 5;
-					*min_rate_index = j;
-				}
-				break;
-			}
-		}
-	}
-}
-
 static bool ieee80211_twt_req_supported(struct ieee80211_sub_if_data *sdata,
 					struct ieee80211_supported_band *sband,
 					const struct link_sta_info *link_sta,
@@ -4719,7 +4846,8 @@
 					u64 *changed)
 {
 	struct ieee80211_sub_if_data *sdata = link->sdata;
-	struct ieee80211_mgd_assoc_data *assoc_data = sdata->u.mgd.assoc_data;
+	struct ieee80211_mgd_assoc_data *assoc_data =
+		sdata->u.mgd.assoc_data ?: sdata->u.mgd.reconf.add_links_data;
 	struct ieee80211_bss_conf *bss_conf = link->conf;
 	struct ieee80211_local *local = sdata->local;
 	unsigned int link_id = link->link_id;
@@ -4907,7 +5035,7 @@
 	/* check/update if AP changed anything in assoc response vs. scan */
 	if (ieee80211_config_bw(link, elems,
 				link_id == assoc_data->assoc_link_id,
-				changed)) {
+				changed, "assoc response")) {
 		ret = false;
 		goto out;
 	}
@@ -5139,7 +5267,9 @@
 	sband = local->hw.wiphy->bands[cbss->channel->band];
 
 	ieee80211_get_rates(sband, bss->supp_rates, bss->supp_rates_len,
-			    &rates, &basic_rates, &have_higher_than_11mbit,
+			    NULL, 0,
+			    &rates, &basic_rates, NULL,
+			    &have_higher_than_11mbit,
 			    &min_rate, &min_rate_index);
 
 	/*
@@ -5532,7 +5662,8 @@
 				  struct ieee80211_link_data *link,
 				  int link_id,
 				  struct cfg80211_bss *cbss, bool mlo,
-				  struct ieee80211_conn_settings *conn)
+				  struct ieee80211_conn_settings *conn,
+				  unsigned long *userspace_selectors)
 {
 	struct ieee80211_local *local = sdata->local;
 	bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ;
@@ -5545,7 +5676,8 @@
 
 	rcu_read_lock();
 	elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id,
-					      &chanreq, &ap_chandef);
+					      &chanreq, &ap_chandef,
+					      userspace_selectors);
 
 	if (IS_ERR(elems)) {
 		rcu_read_unlock();
@@ -5739,7 +5871,8 @@
 			link->u.mgd.conn = assoc_data->link[link_id].conn;
 
 			err = ieee80211_prep_channel(sdata, link, link_id, cbss,
-						     true, &link->u.mgd.conn);
+						     true, &link->u.mgd.conn,
+						     assoc_data->userspace_selectors);
 			if (err) {
 				link_info(link, "prep_channel failed\n");
 				goto out_err;
@@ -5857,6 +5990,8 @@
 	if (!assoc_data)
 		return;
 
+	info.link_id = assoc_data->assoc_link_id;
+
 	parse_params.mode =
 		assoc_data->link[assoc_data->assoc_link_id].conn.mode;
 
@@ -7074,7 +7209,7 @@
 
 	changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems);
 
-	if (ieee80211_config_bw(link, elems, true, &changed)) {
+	if (ieee80211_config_bw(link, elems, true, &changed, "beacon")) {
 		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
 				       WLAN_REASON_DEAUTH_LEAVING,
 				       true, deauth_buf);
@@ -8153,6 +8288,25 @@
 				     link->u.mgd.driver_smps_mode);
 }
 
+static void ieee80211_ml_sta_reconf_timeout(struct wiphy *wiphy,
+					    struct wiphy_work *work)
+{
+	struct ieee80211_sub_if_data *sdata =
+		container_of(work, struct ieee80211_sub_if_data,
+			     u.mgd.reconf.wk.work);
+
+	if (!sdata->u.mgd.reconf.added_links &&
+	    !sdata->u.mgd.reconf.removed_links)
+		return;
+
+	sdata_info(sdata,
+		   "mlo: reconf: timeout: added=0x%x, removed=0x%x\n",
+		   sdata->u.mgd.reconf.added_links,
+		   sdata->u.mgd.reconf.removed_links);
+
+	__ieee80211_disconnect(sdata);
+}
+
 /* interface setup */
 void ieee80211_sta_setup_sdata(struct ieee80211_sub_if_data *sdata)
 {
@@ -8167,6 +8321,8 @@
 				ieee80211_tdls_peer_del_work);
 	wiphy_delayed_work_init(&ifmgd->ml_reconf_work,
 				ieee80211_ml_reconf_work);
+	wiphy_delayed_work_init(&ifmgd->reconf.wk,
+				ieee80211_ml_sta_reconf_timeout);
 	timer_setup(&ifmgd->timer, ieee80211_sta_timer, 0);
 	timer_setup(&ifmgd->bcn_mon_timer, ieee80211_sta_bcn_mon_timer, 0);
 	timer_setup(&ifmgd->conn_mon_timer, ieee80211_sta_conn_mon_timer, 0);
@@ -8227,6 +8383,9 @@
 	if (sdata->u.mgd.assoc_data)
 		ether_addr_copy(link->conf->addr,
 				sdata->u.mgd.assoc_data->link[link_id].addr);
+	else if (sdata->u.mgd.reconf.add_links_data)
+		ether_addr_copy(link->conf->addr,
+				sdata->u.mgd.reconf.add_links_data->link[link_id].addr);
 	else if (!is_valid_ether_addr(link->conf->addr))
 		eth_random_addr(link->conf->addr);
 }
@@ -8249,7 +8408,8 @@
 				     struct cfg80211_bss *cbss, s8 link_id,
 				     const u8 *ap_mld_addr, bool assoc,
 				     struct ieee80211_conn_settings *conn,
-				     bool override)
+				     bool override,
+				     unsigned long *userspace_selectors)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
@@ -8388,7 +8548,8 @@
 		 */
 		link->u.mgd.conn = *conn;
 		err = ieee80211_prep_channel(sdata, link, link->link_id, cbss,
-					     mlo, &link->u.mgd.conn);
+					     mlo, &link->u.mgd.conn,
+					     userspace_selectors);
 		if (err) {
 			if (new_sta)
 				sta_info_free(local, new_sta);
@@ -8504,6 +8665,22 @@
 	return ret;
 }
 
+static void ieee80211_parse_cfg_selectors(unsigned long *userspace_selectors,
+					  const u8 *supported_selectors,
+					  u8 supported_selectors_len)
+{
+	if (supported_selectors) {
+		for (int i = 0; i < supported_selectors_len; i++) {
+			set_bit(supported_selectors[i],
+				userspace_selectors);
+		}
+	} else {
+		/* Assume SAE_H2E support for backward compatibility. */
+		set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E,
+			userspace_selectors);
+	}
+}
+
 /* config hooks */
 int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,
 		       struct cfg80211_auth_request *req)
@@ -8605,6 +8782,10 @@
 		memcpy(auth_data->key, req->key, req->key_len);
 	}
 
+	ieee80211_parse_cfg_selectors(auth_data->userspace_selectors,
+				      req->supported_selectors,
+				      req->supported_selectors_len);
+
 	auth_data->algorithm = auth_alg;
 
 	/* try to authenticate/probe */
@@ -8658,7 +8839,8 @@
 
 	err = ieee80211_prep_connection(sdata, req->bss, req->link_id,
 					req->ap_mld_addr, cont_auth,
-					&conn, false);
+					&conn, false,
+					auth_data->userspace_selectors);
 	if (err)
 		goto err_clear;
 
@@ -8945,6 +9127,10 @@
 					    false);
 	}
 
+	ieee80211_parse_cfg_selectors(assoc_data->userspace_selectors,
+				      req->supported_selectors,
+				      req->supported_selectors_len);
+
 	memcpy(&ifmgd->ht_capa, &req->ht_capa, sizeof(ifmgd->ht_capa));
 	memcpy(&ifmgd->ht_capa_mask, &req->ht_capa_mask,
 	       sizeof(ifmgd->ht_capa_mask));
@@ -9191,7 +9377,8 @@
 		/* only calculate the mode, hence link == NULL */
 		err = ieee80211_prep_channel(sdata, NULL, i,
 					     assoc_data->link[i].bss, true,
-					     &assoc_data->link[i].conn);
+					     &assoc_data->link[i].conn,
+					     assoc_data->userspace_selectors);
 		if (err) {
 			req->links[i].error = err;
 			goto err_clear;
@@ -9207,7 +9394,8 @@
 	err = ieee80211_prep_connection(sdata, cbss, req->link_id,
 					req->ap_mld_addr, true,
 					&assoc_data->link[assoc_link_id].conn,
-					override);
+					override,
+					assoc_data->userspace_selectors);
 	if (err)
 		goto err_clear;
 
@@ -9451,3 +9639,697 @@
 	_ieee80211_enable_rssi_reports(sdata, 0, 0);
 }
 EXPORT_SYMBOL(ieee80211_disable_rssi_reports);
+
+static void ieee80211_ml_reconf_selectors(unsigned long *userspace_selectors)
+{
+	*userspace_selectors = 0;
+
+	/* these selectors are mandatory for ML reconfiguration */
+	set_bit(BSS_MEMBERSHIP_SELECTOR_SAE_H2E, userspace_selectors);
+	set_bit(BSS_MEMBERSHIP_SELECTOR_HE_PHY, userspace_selectors);
+	set_bit(BSS_MEMBERSHIP_SELECTOR_EHT_PHY, userspace_selectors);
+}
+
+void ieee80211_process_ml_reconf_resp(struct ieee80211_sub_if_data *sdata,
+				      struct ieee80211_mgmt *mgmt, size_t len)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
+	struct ieee80211_mgd_assoc_data *add_links_data =
+		ifmgd->reconf.add_links_data;
+	struct sta_info *sta;
+	struct cfg80211_mlo_reconf_done_data done_data = {};
+	u16 sta_changed_links = sdata->u.mgd.reconf.added_links |
+		                sdata->u.mgd.reconf.removed_links;
+	u16 link_mask, valid_links;
+	unsigned int link_id;
+	unsigned long userspace_selectors;
+	size_t orig_len = len;
+	u8 i, group_key_data_len;
+	u8 *pos;
+
+	if (!ieee80211_vif_is_mld(&sdata->vif) ||
+	    len < offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp) ||
+	    mgmt->u.action.u.ml_reconf_resp.dialog_token !=
+	    sdata->u.mgd.reconf.dialog_token ||
+	    !sta_changed_links)
+		return;
+
+	pos = mgmt->u.action.u.ml_reconf_resp.variable;
+	len -= offsetofend(typeof(*mgmt), u.action.u.ml_reconf_resp);
+
+	/* each status duple is 3 octets */
+	if (len < mgmt->u.action.u.ml_reconf_resp.count * 3) {
+		sdata_info(sdata,
+			   "mlo: reconf: unexpected len=%zu, count=%u\n",
+			   len, mgmt->u.action.u.ml_reconf_resp.count);
+		goto disconnect;
+	}
+
+	link_mask = sta_changed_links;
+	for (i = 0; i < mgmt->u.action.u.ml_reconf_resp.count; i++) {
+		u16 status = get_unaligned_le16(pos + 1);
+
+		link_id = *pos;
+
+		if (!(link_mask & BIT(link_id))) {
+			sdata_info(sdata,
+				   "mlo: reconf: unexpected link: %u, changed=0x%x\n",
+				   link_id, sta_changed_links);
+			goto disconnect;
+		}
+
+		/* clear the corresponding link, to detect the case that
+		 * the same link was included more than one time
+		 */
+		link_mask &= ~BIT(link_id);
+
+		/* Handle failure to remove links here. Failure to remove added
+		 * links will be done later in the flow.
+		 */
+		if (status != WLAN_STATUS_SUCCESS) {
+			sdata_info(sdata,
+				   "mlo: reconf: failed on link=%u, status=%u\n",
+				   link_id, status);
+
+			/* The AP MLD failed to remove a link that was already
+			 * removed locally. As this is not expected behavior,
+			 * disconnect
+			 */
+			if (sdata->u.mgd.reconf.removed_links & BIT(link_id))
+				goto disconnect;
+
+			/* The AP MLD failed to add a link. Remove it from the
+			 * added links.
+			 */
+			sdata->u.mgd.reconf.added_links &= ~BIT(link_id);
+		}
+
+		pos += 3;
+		len -= 3;
+	}
+
+	if (link_mask) {
+		sdata_info(sdata,
+			   "mlo: reconf: no response for links=0x%x\n",
+			   link_mask);
+		goto disconnect;
+	}
+
+	if (!sdata->u.mgd.reconf.added_links)
+		goto out;
+
+	if (len < 1 || len < 1 + *pos) {
+		sdata_info(sdata,
+			   "mlo: reconf: invalid group key data length");
+		goto disconnect;
+	}
+
+	/* The Group Key Data field must be present when links are added. This
+	 * field should be processed by userland.
+	 */
+	group_key_data_len = *pos++;
+
+	pos += group_key_data_len;
+	len -= group_key_data_len + 1;
+
+	/* Process the information for the added links */
+	sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+	if (WARN_ON(!sta))
+		goto disconnect;
+
+	valid_links = sdata->vif.valid_links;
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+		if (!add_links_data->link[link_id].bss ||
+		    !(sdata->u.mgd.reconf.added_links & BIT(link_id)))
+
+			continue;
+
+		valid_links |= BIT(link_id);
+		if (ieee80211_sta_allocate_link(sta, link_id))
+			goto disconnect;
+	}
+
+	ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links);
+	ieee80211_ml_reconf_selectors(&userspace_selectors);
+	link_mask = 0;
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+		struct cfg80211_bss *cbss = add_links_data->link[link_id].bss;
+		struct ieee80211_link_data *link;
+		struct link_sta_info *link_sta;
+		u64 changed = 0;
+
+		if (!cbss)
+			continue;
+
+		link = sdata_dereference(sdata->link[link_id], sdata);
+		if (WARN_ON(!link))
+			goto disconnect;
+
+		link_info(link,
+			  "mlo: reconf: local address %pM, AP link address %pM\n",
+			  add_links_data->link[link_id].addr,
+			  add_links_data->link[link_id].bss->bssid);
+
+		link_sta = rcu_dereference_protected(sta->link[link_id],
+						     lockdep_is_held(&local->hw.wiphy->mtx));
+		if (WARN_ON(!link_sta))
+			goto disconnect;
+
+		if (!link->u.mgd.have_beacon) {
+			const struct cfg80211_bss_ies *ies;
+
+			rcu_read_lock();
+			ies = rcu_dereference(cbss->beacon_ies);
+			if (ies)
+				link->u.mgd.have_beacon = true;
+			else
+				ies = rcu_dereference(cbss->ies);
+			ieee80211_get_dtim(ies,
+					   &link->conf->sync_dtim_count,
+					   &link->u.mgd.dtim_period);
+			link->conf->beacon_int = cbss->beacon_interval;
+			rcu_read_unlock();
+		}
+
+		link->conf->dtim_period = link->u.mgd.dtim_period ?: 1;
+
+		link->u.mgd.conn = add_links_data->link[link_id].conn;
+		if (ieee80211_prep_channel(sdata, link, link_id, cbss,
+					   true, &link->u.mgd.conn,
+					   &userspace_selectors)) {
+			link_info(link, "mlo: reconf: prep_channel failed\n");
+			goto disconnect;
+		}
+
+		if (ieee80211_mgd_setup_link_sta(link, sta, link_sta,
+						 add_links_data->link[link_id].bss))
+			goto disconnect;
+
+		if (!ieee80211_assoc_config_link(link, link_sta,
+						 add_links_data->link[link_id].bss,
+						 mgmt, pos, len,
+						 &changed))
+			goto disconnect;
+
+		/* The AP MLD indicated success for this link, but the station
+		 * profile status indicated otherwise. Since there is an
+		 * inconsistency in the ML reconfiguration response, disconnect
+		 */
+		if (add_links_data->link[link_id].status != WLAN_STATUS_SUCCESS)
+			goto disconnect;
+
+		ieee80211_sta_init_nss(link_sta);
+		if (ieee80211_sta_activate_link(sta, link_id))
+			goto disconnect;
+
+		changed |= ieee80211_link_set_associated(link, cbss);
+		ieee80211_link_info_change_notify(sdata, link, changed);
+
+		ieee80211_recalc_smps(sdata, link);
+		link_mask |= BIT(link_id);
+	}
+
+	sdata_info(sdata,
+		   "mlo: reconf: current valid_links=0x%x, added=0x%x\n",
+		   valid_links, link_mask);
+
+	/* links might have changed due to rejected ones, set them again */
+	ieee80211_vif_set_links(sdata, valid_links, sdata->vif.dormant_links);
+	ieee80211_vif_cfg_change_notify(sdata, BSS_CHANGED_MLD_VALID_LINKS);
+
+	ieee80211_recalc_ps(local);
+	ieee80211_recalc_ps_vif(sdata);
+
+	done_data.buf = (const u8 *)mgmt;
+	done_data.len = orig_len;
+	done_data.added_links = link_mask;
+
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++)
+		done_data.links[link_id].bss = add_links_data->link[link_id].bss;
+
+	cfg80211_mlo_reconf_add_done(sdata->dev, &done_data);
+	kfree(sdata->u.mgd.reconf.add_links_data);
+	sdata->u.mgd.reconf.add_links_data = NULL;
+out:
+	ieee80211_ml_reconf_reset(sdata);
+	return;
+
+disconnect:
+	__ieee80211_disconnect(sdata);
+}
+
+static struct sk_buff *
+ieee80211_build_ml_reconf_req(struct ieee80211_sub_if_data *sdata,
+			      struct ieee80211_mgd_assoc_data *add_links_data,
+			      u16 removed_links)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_mgmt *mgmt;
+	struct ieee80211_multi_link_elem *ml_elem;
+	struct ieee80211_mle_basic_common_info *common;
+	enum nl80211_iftype iftype = ieee80211_vif_type_p2p(&sdata->vif);
+	struct sk_buff *skb;
+	size_t size;
+	unsigned int link_id;
+	__le16 eml_capa = 0, mld_capa_ops = 0;
+	struct ieee80211_tx_info *info;
+	u8 common_size, var_common_size;
+	u8 *ml_elem_len;
+	u16 capab = 0;
+
+	size = local->hw.extra_tx_headroom + sizeof(*mgmt);
+
+	/* Consider the maximal length of the reconfiguration ML element */
+	size += sizeof(struct ieee80211_multi_link_elem);
+
+	/* The Basic ML element and the Reconfiguration ML element have the same
+	 * fixed common information fields in the context of ML reconfiguration
+	 * action frame. The AP MLD MAC address must always be present
+	 */
+	common_size = sizeof(*common);
+
+	/* when adding links, the MLD capabilities must be present */
+	var_common_size = 0;
+	if (add_links_data) {
+		const struct wiphy_iftype_ext_capab *ift_ext_capa =
+			cfg80211_get_iftype_ext_capa(local->hw.wiphy,
+						     ieee80211_vif_type_p2p(&sdata->vif));
+
+		if (ift_ext_capa) {
+			eml_capa = cpu_to_le16(ift_ext_capa->eml_capabilities);
+			mld_capa_ops =
+				cpu_to_le16(ift_ext_capa->mld_capa_and_ops);
+		}
+
+		/* MLD capabilities and operation */
+		var_common_size += 2;
+
+		/* EML capabilities */
+		if (eml_capa & cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP |
+					    IEEE80211_EML_CAP_EMLMR_SUPPORT)))
+			var_common_size += 2;
+	}
+
+	/* Add the common information length */
+	size += common_size + var_common_size;
+
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+		struct cfg80211_bss *cbss;
+		size_t elems_len;
+
+		if (removed_links & BIT(link_id)) {
+			size += sizeof(struct ieee80211_mle_per_sta_profile) +
+				ETH_ALEN;
+			continue;
+		}
+
+		if (!add_links_data || !add_links_data->link[link_id].bss)
+			continue;
+
+		elems_len = add_links_data->link[link_id].elems_len;
+		cbss = add_links_data->link[link_id].bss;
+
+		/* should be the same across all BSSes */
+		if (cbss->capability & WLAN_CAPABILITY_PRIVACY)
+			capab |= WLAN_CAPABILITY_PRIVACY;
+
+		size += 2 + sizeof(struct ieee80211_mle_per_sta_profile) +
+			ETH_ALEN;
+
+		/* SSID element + WMM */
+		size += 2 + sdata->vif.cfg.ssid_len + 9;
+		size += ieee80211_link_common_elems_size(sdata, iftype, cbss,
+							 elems_len);
+	}
+
+	skb = alloc_skb(size, GFP_KERNEL);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, local->hw.extra_tx_headroom);
+	mgmt = skb_put_zero(skb, offsetofend(struct ieee80211_mgmt,
+					     u.action.u.ml_reconf_req));
+
+	/* Add the MAC header */
+	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+					  IEEE80211_STYPE_ACTION);
+	memcpy(mgmt->da, sdata->vif.cfg.ap_addr, ETH_ALEN);
+	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
+	memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN);
+
+	/* Add the action frame fixed fields */
+	mgmt->u.action.category = WLAN_CATEGORY_PROTECTED_EHT;
+	mgmt->u.action.u.ml_reconf_req.action_code =
+		WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_REQ;
+
+	/* allocate a dialog token and store it */
+	sdata->u.mgd.reconf.dialog_token = ++sdata->u.mgd.dialog_token_alloc;
+	mgmt->u.action.u.ml_reconf_req.dialog_token =
+		sdata->u.mgd.reconf.dialog_token;
+
+	/* Add the ML reconfiguration element and the common information  */
+	skb_put_u8(skb, WLAN_EID_EXTENSION);
+	ml_elem_len = skb_put(skb, 1);
+	skb_put_u8(skb, WLAN_EID_EXT_EHT_MULTI_LINK);
+	ml_elem = skb_put(skb, sizeof(*ml_elem));
+	ml_elem->control =
+		cpu_to_le16(IEEE80211_ML_CONTROL_TYPE_RECONF |
+			    IEEE80211_MLC_RECONF_PRES_MLD_MAC_ADDR);
+	common = skb_put(skb, common_size);
+	common->len = common_size + var_common_size;
+	memcpy(common->mld_mac_addr, sdata->vif.addr, ETH_ALEN);
+
+	if (add_links_data) {
+		if (eml_capa &
+		    cpu_to_le16((IEEE80211_EML_CAP_EMLSR_SUPP |
+				 IEEE80211_EML_CAP_EMLMR_SUPPORT))) {
+			ml_elem->control |=
+				cpu_to_le16(IEEE80211_MLC_RECONF_PRES_EML_CAPA);
+			skb_put_data(skb, &eml_capa, sizeof(eml_capa));
+		}
+
+		ml_elem->control |=
+			cpu_to_le16(IEEE80211_MLC_RECONF_PRES_MLD_CAPA_OP);
+
+		skb_put_data(skb, &mld_capa_ops, sizeof(mld_capa_ops));
+	}
+
+	if (sdata->u.mgd.flags & IEEE80211_STA_ENABLE_RRM)
+		capab |= WLAN_CAPABILITY_RADIO_MEASURE;
+
+	/* Add the per station profile */
+	for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) {
+		u8 *subelem_len = NULL;
+		u16 ctrl;
+		const u8 *addr;
+
+		/* Skip links that are not changing */
+		if (!(removed_links & BIT(link_id)) &&
+		    (!add_links_data || !add_links_data->link[link_id].bss))
+			continue;
+
+		ctrl = link_id |
+		       IEEE80211_MLE_STA_RECONF_CONTROL_STA_MAC_ADDR_PRESENT;
+
+		if (removed_links & BIT(link_id)) {
+			struct ieee80211_bss_conf *conf =
+				sdata_dereference(sdata->vif.link_conf[link_id],
+						  sdata);
+			if (!conf)
+				continue;
+
+			addr = conf->addr;
+			ctrl |= u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_DEL_LINK,
+						IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE);
+		} else {
+			addr = add_links_data->link[link_id].addr;
+			ctrl |= IEEE80211_MLE_STA_RECONF_CONTROL_COMPLETE_PROFILE |
+				u16_encode_bits(IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE_ADD_LINK,
+						IEEE80211_MLE_STA_RECONF_CONTROL_OPERATION_TYPE);
+		}
+
+		skb_put_u8(skb, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE);
+		subelem_len = skb_put(skb, 1);
+
+		put_unaligned_le16(ctrl, skb_put(skb, sizeof(ctrl)));
+		skb_put_u8(skb, 1 + ETH_ALEN);
+		skb_put_data(skb, addr, ETH_ALEN);
+
+		if (!(removed_links & BIT(link_id))) {
+			u16 link_present_elems[PRESENT_ELEMS_MAX] = {};
+			size_t extra_used;
+			void *capab_pos;
+			u8 qos_info;
+
+			capab_pos = skb_put(skb, 2);
+
+			skb_put_u8(skb, WLAN_EID_SSID);
+			skb_put_u8(skb, sdata->vif.cfg.ssid_len);
+			skb_put_data(skb, sdata->vif.cfg.ssid,
+				     sdata->vif.cfg.ssid_len);
+
+			extra_used =
+				ieee80211_add_link_elems(sdata, skb, &capab, NULL,
+							 add_links_data->link[link_id].elems,
+							 add_links_data->link[link_id].elems_len,
+							 link_id, NULL,
+							 link_present_elems,
+							 add_links_data);
+
+			if (add_links_data->link[link_id].elems)
+				skb_put_data(skb,
+					     add_links_data->link[link_id].elems +
+					     extra_used,
+					     add_links_data->link[link_id].elems_len -
+					     extra_used);
+			if (sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED) {
+				qos_info = sdata->u.mgd.uapsd_queues;
+				qos_info |= (sdata->u.mgd.uapsd_max_sp_len <<
+					     IEEE80211_WMM_IE_STA_QOSINFO_SP_SHIFT);
+			} else {
+				qos_info = 0;
+			}
+
+			ieee80211_add_wmm_info_ie(skb_put(skb, 9), qos_info);
+			put_unaligned_le16(capab, capab_pos);
+		}
+
+		ieee80211_fragment_element(skb, subelem_len,
+					   IEEE80211_MLE_SUBELEM_FRAGMENT);
+	}
+
+	ieee80211_fragment_element(skb, ml_elem_len, WLAN_EID_FRAGMENT);
+
+	info = IEEE80211_SKB_CB(skb);
+	info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS;
+
+	return skb;
+}
+
+int ieee80211_mgd_assoc_ml_reconf(struct ieee80211_sub_if_data *sdata,
+				  struct cfg80211_assoc_link *add_links,
+				  u16 rem_links)
+{
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_mgd_assoc_data *data = NULL;
+	struct sta_info *sta;
+	struct sk_buff *skb;
+	u16 added_links, new_valid_links;
+	int link_id, err;
+
+	if (!ieee80211_vif_is_mld(&sdata->vif) ||
+	    !(sdata->vif.cfg.mld_capa_op &
+	      IEEE80211_MLD_CAP_OP_LINK_RECONF_SUPPORT))
+		return -EINVAL;
+
+	/* No support for concurrent ML reconfiguration operation */
+	if (sdata->u.mgd.reconf.added_links ||
+	    sdata->u.mgd.reconf.removed_links)
+		return -EBUSY;
+
+	added_links = 0;
+	for (link_id = 0; add_links && link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+	     link_id++) {
+		if (!add_links[link_id].bss)
+			continue;
+
+		added_links |= BIT(link_id);
+	}
+
+	sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr);
+	if (WARN_ON(!sta))
+		return -ENOLINK;
+
+	if (rem_links & BIT(sta->sta.deflink.link_id))
+		return -EINVAL;
+
+	/* Adding links to the set of valid link is done only after a successful
+	 * ML reconfiguration frame exchange. Here prepare the data for the ML
+	 * reconfiguration frame construction and allocate the required
+	 * resources
+	 */
+	if (added_links) {
+		bool uapsd_supported;
+		unsigned long userspace_selectors;
+
+		data = kzalloc(sizeof(*data), GFP_KERNEL);
+		if (!data)
+			return -ENOMEM;
+
+		uapsd_supported = true;
+		ieee80211_ml_reconf_selectors(&userspace_selectors);
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++) {
+			struct ieee80211_supported_band *sband;
+			struct cfg80211_bss *link_cbss = add_links[link_id].bss;
+			struct ieee80211_bss *bss;
+
+			if (!link_cbss)
+				continue;
+
+			bss = (void *)link_cbss->priv;
+
+			if (!bss->wmm_used) {
+				err = -EINVAL;
+				goto err_free;
+			}
+
+			if (link_cbss->channel->band == NL80211_BAND_S1GHZ) {
+				err = -EINVAL;
+				goto err_free;
+			}
+
+			eth_random_addr(data->link[link_id].addr);
+			data->link[link_id].conn =
+				ieee80211_conn_settings_unlimited;
+			sband =
+				local->hw.wiphy->bands[link_cbss->channel->band];
+
+			ieee80211_determine_our_sta_mode(sdata, sband,
+							 NULL, true, link_id,
+							 &data->link[link_id].conn);
+
+			data->link[link_id].bss = link_cbss;
+			data->link[link_id].disabled =
+				add_links[link_id].disabled;
+			data->link[link_id].elems =
+				(u8 *)add_links[link_id].elems;
+			data->link[link_id].elems_len =
+				add_links[link_id].elems_len;
+
+			if (!bss->uapsd_supported)
+				uapsd_supported = false;
+
+			if (data->link[link_id].conn.mode <
+			    IEEE80211_CONN_MODE_EHT) {
+				err = -EINVAL;
+				goto err_free;
+			}
+
+			err = ieee80211_mgd_get_ap_ht_vht_capa(sdata, data,
+							       link_id);
+			if (err) {
+				err = -EINVAL;
+				goto err_free;
+			}
+		}
+
+		/* Require U-APSD support to be similar to the current valid
+		 * links
+		 */
+		if (uapsd_supported !=
+		    !!(sdata->u.mgd.flags & IEEE80211_STA_UAPSD_ENABLED)) {
+			err = -EINVAL;
+			goto err_free;
+		}
+
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++) {
+			if (!data->link[link_id].bss)
+				continue;
+
+			/* only used to verify the mode, nothing is allocated */
+			err = ieee80211_prep_channel(sdata, NULL, link_id,
+						     data->link[link_id].bss,
+						     true,
+						     &data->link[link_id].conn,
+						     &userspace_selectors);
+			if (err)
+				goto err_free;
+		}
+	}
+
+	/* link removal is done before the ML reconfiguration frame exchange so
+	 * that these links will not be used between their removal by the AP MLD
+	 * and before the station got the ML reconfiguration response. Based on
+	 * Section 35.3.6.4 in Draft P802.11be_D7.0 the AP MLD should accept the
+	 * link removal request.
+	 */
+	if (rem_links) {
+		u16 new_active_links = sdata->vif.active_links & ~rem_links;
+
+		new_valid_links = sdata->vif.valid_links & ~rem_links;
+
+		/* Should not be left with no valid links to perform the
+		 * ML reconfiguration
+		 */
+		if (!new_valid_links ||
+		    !(new_valid_links & ~sdata->vif.dormant_links)) {
+			sdata_info(sdata, "mlo: reconf: no valid links\n");
+			err = -EINVAL;
+			goto err_free;
+		}
+
+		if (new_active_links != sdata->vif.active_links) {
+			if (!new_active_links)
+				new_active_links =
+					BIT(__ffs(new_valid_links &
+						  ~sdata->vif.dormant_links));
+
+			err = ieee80211_set_active_links(&sdata->vif,
+							 new_active_links);
+			if (err) {
+				sdata_info(sdata,
+					   "mlo: reconf: failed set active links\n");
+				goto err_free;
+			}
+		}
+	}
+
+	/* Build the SKB before the link removal as the construction of the
+	 * station info for removed links requires the local address.
+	 * Invalidate the removed links, so that the transmission of the ML
+	 * reconfiguration request frame would not be done using them, as the AP
+	 * is expected to send the ML reconfiguration response frame on the link
+	 * on which the request was received.
+	 */
+	skb = ieee80211_build_ml_reconf_req(sdata, data, rem_links);
+	if (!skb) {
+		err = -ENOMEM;
+		goto err_free;
+	}
+
+	if (rem_links) {
+		u16 new_dormant_links = sdata->vif.dormant_links & ~rem_links;
+
+		err = ieee80211_vif_set_links(sdata, new_valid_links,
+					      new_dormant_links);
+		if (err) {
+			sdata_info(sdata,
+				   "mlo: reconf: failed set valid links\n");
+			kfree_skb(skb);
+			goto err_free;
+		}
+
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++) {
+			if (!(rem_links & BIT(link_id)))
+				continue;
+
+			ieee80211_sta_remove_link(sta, link_id);
+		}
+
+		/* notify the driver and upper layers */
+		ieee80211_vif_cfg_change_notify(sdata,
+						BSS_CHANGED_MLD_VALID_LINKS);
+		cfg80211_links_removed(sdata->dev, rem_links);
+	}
+
+	sdata_info(sdata, "mlo: reconf: adding=0x%x, removed=0x%x\n",
+		   added_links, rem_links);
+
+	ieee80211_tx_skb(sdata, skb);
+
+	sdata->u.mgd.reconf.added_links = added_links;
+	sdata->u.mgd.reconf.add_links_data = data;
+	sdata->u.mgd.reconf.removed_links = rem_links;
+	wiphy_delayed_work_queue(sdata->local->hw.wiphy,
+				 &sdata->u.mgd.reconf.wk,
+				 IEEE80211_ASSOC_TIMEOUT_SHORT);
+	return 0;
+
+ err_free:
+	kfree(data);
+	return err;
+}
diff -ruw linux-6.13.12/net/mac80211/offchannel.c linux-6.13.12-fbx/net/mac80211/offchannel.c
--- linux-6.13.12/net/mac80211/offchannel.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/offchannel.c	2025-09-25 17:40:37.827378433 +0200
@@ -565,6 +565,7 @@
 				    struct sk_buff *txskb,
 				    enum ieee80211_roc_type type)
 {
+	struct cfg80211_scan_request *req = local->scan_req;
 	struct ieee80211_roc_work *roc, *tmp;
 	bool queued = false, combine_started = true;
 	int ret;
@@ -614,7 +615,7 @@
 
 	/* if there's no need to queue, handle it immediately */
 	if (list_empty(&local->roc_list) &&
-	    !local->scanning && !ieee80211_is_radar_required(local)) {
+	    !local->scanning && !ieee80211_is_radar_required(local, req)) {
 		/* if not HW assist, just queue & schedule work */
 		if (!local->ops->remain_on_channel) {
 			list_add_tail(&roc->list, &local->roc_list);
diff -ruw linux-6.13.12/net/mac80211/rx.c linux-6.13.12-fbx/net/mac80211/rx.c
--- linux-6.13.12/net/mac80211/rx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/rx.c	2025-09-25 17:40:37.827378433 +0200
@@ -23,6 +23,7 @@
 #include <net/mac80211.h>
 #include <net/ieee80211_radiotap.h>
 #include <linux/unaligned.h>
+#include <net/fbx80211.h>
 
 #include "ieee80211_i.h"
 #include "driver-ops.h"
@@ -34,6 +35,8 @@
 #include "wme.h"
 #include "rate.h"
 
+#include "fbx_scum_monif.h"
+
 /*
  * monitor mode reception
  *
@@ -822,17 +825,12 @@
 		return NULL;
 	}
 
-	only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
+	fbx80211_scum_mon_rx(local, origskb, rate, rtap_space);
 
-	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR)) {
-		if (only_monitor) {
-			dev_kfree_skb(origskb);
-			return NULL;
-		}
+	only_monitor = should_drop_frame(origskb, present_fcs_len, rtap_space);
 
-		return ieee80211_clean_skb(origskb, present_fcs_len,
-					   rtap_space);
-	}
+	if (!local->monitors || (status->flag & RX_FLAG_SKIP_MONITOR))
+		goto out;
 
 	ieee80211_handle_mu_mimo_mon(monitor_sdata, origskb, rtap_space);
 
@@ -885,6 +883,11 @@
 
 	if (!origskb)
 		return NULL;
+out:
+	if (only_monitor) {
+		dev_kfree_skb(origskb);
+		return NULL;
+	}
 
 	return ieee80211_clean_skb(origskb, present_fcs_len, rtap_space);
 }
@@ -3035,8 +3038,7 @@
 			check_da = NULL;
 			break;
 		case NL80211_IFTYPE_STATION:
-			if (!rx->sta ||
-			    !test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
+			if (!test_sta_flag(rx->sta, WLAN_STA_TDLS_PEER))
 				check_sa = NULL;
 			break;
 		case NL80211_IFTYPE_MESH_POINT:
@@ -3820,6 +3822,18 @@
 					      u.action.u.ttlm_res))
 				goto invalid;
 			goto queue;
+		case WLAN_PROTECTED_EHT_ACTION_LINK_RECONFIG_RESP:
+			if (sdata->vif.type != NL80211_IFTYPE_STATION)
+				break;
+
+			/* The reconfiguration response action frame must
+			 * least one 'Status Duple' entry (3 octets)
+			 */
+			if (len <
+			    offsetofend(typeof(*mgmt),
+					u.action.u.ml_reconf_resp) + 3)
+				goto invalid;
+			goto queue;
 		default:
 			break;
 		}
@@ -4563,7 +4577,9 @@
 		return ieee80211_is_public_action(hdr, skb->len) ||
 		       ieee80211_is_probe_req(hdr->frame_control) ||
 		       ieee80211_is_probe_resp(hdr->frame_control) ||
-		       ieee80211_is_beacon(hdr->frame_control);
+		       ieee80211_is_beacon(hdr->frame_control) ||
+		       (ieee80211_is_auth(hdr->frame_control) &&
+			ether_addr_equal(sdata->vif.addr, hdr->addr1));
 	case NL80211_IFTYPE_NAN:
 		/* Currently no frames on NAN interface are allowed */
 		return false;
@@ -5305,6 +5321,12 @@
 	prev = NULL;
 
 	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+		unsigned int link_id;
+		struct ieee80211_bss_conf *bss_conf;
+		struct ieee80211_chanctx_conf *conf;
+		unsigned long valid_links = sdata->vif.valid_links;
+		bool flag = false;
+
 		if (!ieee80211_sdata_running(sdata))
 			continue;
 
@@ -5312,12 +5334,37 @@
 		    sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
 			continue;
 
+		if (valid_links) {
+			for_each_set_bit(link_id, &valid_links,
+					 IEEE80211_MLD_MAX_NUM_LINKS) {
+				bss_conf = rcu_dereference(sdata->vif.link_conf[link_id]);
+				if (bss_conf) {
+					conf = rcu_dereference(bss_conf->chanctx_conf);
+					if (conf && conf->def.chan &&
+					    conf->def.chan->center_freq == status->freq) {
+						flag = true;
+						break;
+					}
+				}
+			}
+		} else {
+			bss_conf = &sdata->vif.bss_conf;
+
+			if (bss_conf) {
+				conf = rcu_dereference(bss_conf->chanctx_conf);
+				if (conf && conf->def.chan &&
+				    conf->def.chan->center_freq == status->freq)
+					flag = true;
+			}
+		}
+
 		/*
 		 * frame is destined for this interface, but if it's
 		 * not also for the previous one we handle that after
 		 * the loop to avoid copying the SKB once too much
 		 */
 
+		if (flag) {
 		if (!prev) {
 			prev = sdata;
 			continue;
@@ -5328,13 +5375,47 @@
 
 		prev = sdata;
 	}
+	}
 
 	if (prev) {
+		unsigned int link_id;
+		struct ieee80211_bss_conf *bss_conf;
+		struct ieee80211_chanctx_conf *conf;
+		unsigned long valid_links = prev->vif.valid_links;
+		bool flag = false;
+
+		if (valid_links) {
+			for_each_set_bit(link_id, &valid_links,
+					 IEEE80211_MLD_MAX_NUM_LINKS) {
+				bss_conf = rcu_dereference(prev->vif.link_conf[link_id]);
+
+				if (bss_conf) {
+					conf = rcu_dereference(bss_conf->chanctx_conf);
+					if (conf && conf->def.chan &&
+					    conf->def.chan->center_freq == status->freq) {
+						flag = true;
+						break;
+					}
+				}
+			}
+		} else {
+			bss_conf = &prev->vif.bss_conf;
+
+			if (bss_conf) {
+				conf = rcu_dereference(bss_conf->chanctx_conf);
+				if (conf && conf->def.chan &&
+				    conf->def.chan->center_freq == status->freq)
+					flag = true;
+			}
+		}
+
+		if (flag) {
 		rx.sdata = prev;
 
 		if (ieee80211_rx_for_interface(&rx, skb, true))
 			return;
 	}
+	}
 
  out:
 	dev_kfree_skb(skb);
@@ -5380,7 +5461,7 @@
 	 * The same happens when we're not even started,
 	 * but that's worth a warning.
 	 */
-	if (WARN_ON(!local->started))
+	if (!local->started)
 		goto drop;
 
 	if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
diff -ruw linux-6.13.12/net/mac80211/scan.c linux-6.13.12-fbx/net/mac80211/scan.c
--- linux-6.13.12/net/mac80211/scan.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/scan.c	2025-09-25 17:40:37.827378433 +0200
@@ -571,7 +571,8 @@
 	return 0;
 }
 
-static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata)
+static bool __ieee80211_can_leave_ch(struct ieee80211_sub_if_data *sdata,
+				     struct cfg80211_scan_request *req)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct ieee80211_sub_if_data *sdata_iter;
@@ -579,7 +580,7 @@
 
 	lockdep_assert_wiphy(local->hw.wiphy);
 
-	if (!ieee80211_is_radar_required(local))
+	if (!ieee80211_is_radar_required(local, req))
 		return true;
 
 	if (!regulatory_pre_cac_allowed(local->hw.wiphy))
@@ -595,9 +596,10 @@
 }
 
 static bool ieee80211_can_scan(struct ieee80211_local *local,
-			       struct ieee80211_sub_if_data *sdata)
+			       struct ieee80211_sub_if_data *sdata,
+			       struct cfg80211_scan_request *req)
 {
-	if (!__ieee80211_can_leave_ch(sdata))
+	if (!__ieee80211_can_leave_ch(sdata, req))
 		return false;
 
 	if (!list_empty(&local->roc_list))
@@ -612,15 +614,19 @@
 
 void ieee80211_run_deferred_scan(struct ieee80211_local *local)
 {
+	struct cfg80211_scan_request *req;
+
 	lockdep_assert_wiphy(local->hw.wiphy);
 
 	if (!local->scan_req || local->scanning)
 		return;
 
+	req = local->scan_req;
+
 	if (!ieee80211_can_scan(local,
 				rcu_dereference_protected(
 					local->scan_sdata,
-					lockdep_is_held(&local->hw.wiphy->mtx))))
+					lockdep_is_held(&local->hw.wiphy->mtx)), req))
 		return;
 
 	wiphy_delayed_work_queue(local->hw.wiphy, &local->scan_work,
@@ -717,10 +723,10 @@
 	    !(sdata->vif.active_links & BIT(req->tsf_report_link_id)))
 		return -EINVAL;
 
-	if (!__ieee80211_can_leave_ch(sdata))
+	if (!__ieee80211_can_leave_ch(sdata, req))
 		return -EBUSY;
 
-	if (!ieee80211_can_scan(local, sdata)) {
+	if (!ieee80211_can_scan(local, sdata, req)) {
 		/* wait for the work to finish/time out */
 		rcu_assign_pointer(local->scan_req, req);
 		rcu_assign_pointer(local->scan_sdata, sdata);
diff -ruw linux-6.13.12/net/mac80211/sta_info.c linux-6.13.12-fbx/net/mac80211/sta_info.c
--- linux-6.13.12/net/mac80211/sta_info.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/sta_info.c	2025-09-25 17:40:37.831378453 +0200
@@ -463,9 +463,10 @@
 		return;
 
 	local_bh_disable();
-	if (!test_sta_flag(sta, WLAN_STA_PS_STA))
+	if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+		if (test_sta_flag(sta, WLAN_STA_PS_DELIVER))
 		ieee80211_sta_ps_deliver_wakeup(sta);
-	else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
+	} else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL))
 		ieee80211_sta_ps_deliver_poll_response(sta);
 	else if (test_and_clear_sta_flag(sta, WLAN_STA_UAPSD))
 		ieee80211_sta_ps_deliver_uapsd(sta);
@@ -509,6 +510,24 @@
 	for (i = 0; i < ARRAY_SIZE(link_info->rx_stats_avg.chain_signal); i++)
 		ewma_signal_init(&link_info->rx_stats_avg.chain_signal[i]);
 
+	link_info->rx_omi_bw_rx = IEEE80211_STA_RX_BW_MAX;
+	link_info->rx_omi_bw_tx = IEEE80211_STA_RX_BW_MAX;
+	link_info->rx_omi_bw_staging = IEEE80211_STA_RX_BW_MAX;
+
+	/*
+	 * Cause (a) warning(s) if IEEE80211_STA_RX_BW_MAX != 320
+	 * or if new values are added to the enum.
+	 */
+	switch (link_info->cur_max_bandwidth) {
+	case IEEE80211_STA_RX_BW_20:
+	case IEEE80211_STA_RX_BW_40:
+	case IEEE80211_STA_RX_BW_80:
+	case IEEE80211_STA_RX_BW_160:
+	case IEEE80211_STA_RX_BW_MAX:
+		/* intentionally nothing */
+		break;
+	}
+
 	return 0;
 }
 
@@ -1581,7 +1600,7 @@
 
 
 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
-		     int link_id)
+		     int link_id, struct sta_info *do_not_flush_sta)
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sta_info *sta, *tmp;
@@ -1599,6 +1618,9 @@
 		    (!vlans || sdata->bss != sta->sdata->bss))
 			continue;
 
+		if (sta == do_not_flush_sta)
+			continue;
+
 		if (link_id >= 0 && sta->sta.valid_links &&
 		    !(sta->sta.valid_links & BIT(link_id)))
 			continue;
@@ -2856,6 +2878,10 @@
 	struct rate_control_ref *ref = NULL;
 	u32 thr = 0;
 
+	/* first check for overrride */
+	if (sta->deflink.pub->tp_override)
+		return sta->deflink.pub->tp_override;
+
 	if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
 		ref = local->rate_ctrl;
 
diff -ruw linux-6.13.12/net/mac80211/sta_info.h linux-6.13.12-fbx/net/mac80211/sta_info.h
--- linux-6.13.12/net/mac80211/sta_info.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/sta_info.h	2025-09-25 17:40:37.831378453 +0200
@@ -512,6 +512,10 @@
  * @status_stats.avg_ack_signal: average ACK signal
  * @cur_max_bandwidth: maximum bandwidth to use for TX to the station,
  *	taken from HT/VHT capabilities or VHT operating mode notification
+ * @rx_omi_bw_rx: RX OMI bandwidth restriction to apply for RX
+ * @rx_omi_bw_tx: RX OMI bandwidth restriction to apply for TX
+ * @rx_omi_bw_staging: RX OMI bandwidth restriction to apply later
+ *	during finalize
  * @debugfs_dir: debug filesystem directory dentry
  * @pub: public (driver visible) link STA data
  * TODO Move other link params from sta_info as required for MLD operation
@@ -561,6 +565,9 @@
 	} tx_stats;
 
 	enum ieee80211_sta_rx_bandwidth cur_max_bandwidth;
+	enum ieee80211_sta_rx_bandwidth rx_omi_bw_rx,
+					rx_omi_bw_tx,
+					rx_omi_bw_staging;
 
 #ifdef CONFIG_MAC80211_DEBUGFS
 	struct dentry *debugfs_dir;
@@ -701,6 +708,8 @@
 	struct airtime_info airtime[IEEE80211_NUM_ACS];
 	u16 airtime_weight;
 
+	u32 tp_override;
+
 	/*
 	 * Aggregation information, locked with lock.
 	 */
@@ -899,9 +908,10 @@
  * @link_id: if given (>=0), all those STA entries using @link_id only
  *	     will be removed. If -1 is passed, all STA entries will be
  *	     removed.
+ * @do_not_flush_sta: a station that shouldn't be flushed.
  */
 int __sta_info_flush(struct ieee80211_sub_if_data *sdata, bool vlans,
-		     int link_id);
+		     int link_id, struct sta_info *do_not_flush_sta);
 
 /**
  * sta_info_flush - flush matching STA entries from the STA table
@@ -916,7 +926,7 @@
 static inline int sta_info_flush(struct ieee80211_sub_if_data *sdata,
 				 int link_id)
 {
-	return __sta_info_flush(sdata, false, link_id);
+	return __sta_info_flush(sdata, false, link_id, NULL);
 }
 
 void sta_set_rate_info_tx(struct sta_info *sta,
diff -ruw linux-6.13.12/net/mac80211/status.c linux-6.13.12-fbx/net/mac80211/status.c
--- linux-6.13.12/net/mac80211/status.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/status.c	2025-09-25 17:40:37.831378453 +0200
@@ -17,6 +17,7 @@
 #include "mesh.h"
 #include "led.h"
 #include "wme.h"
+#include "fbx_scum.h"
 
 
 void ieee80211_tx_status_irqsafe(struct ieee80211_hw *hw,
@@ -930,6 +931,9 @@
 			if (sdata->u.mntr.flags & MONITOR_FLAG_SKIP_TX)
 				continue;
 
+			if (fbx80211_skip_mon(sdata))
+				continue;
+
 			if ((sdata->u.mntr.flags & MONITOR_FLAG_COOK_FRAMES) &&
 			    !send_to_cooked)
 				continue;
@@ -969,10 +973,12 @@
 	bool noack_success;
 	struct ieee80211_bar *bar;
 	int tid = IEEE80211_NUM_TIDS;
+	bool ack_requested;
 
+	ack_requested = !(info->flags & IEEE80211_TX_CTL_NO_ACK);
 	fc = hdr->frame_control;
 
-	if (status->sta) {
+	if (status->sta && ack_requested) {
 		sta = container_of(status->sta, struct sta_info, sta);
 
 		if (info->flags & IEEE80211_TX_STATUS_EOSP)
@@ -1147,6 +1153,7 @@
 	int rates_idx, retry_count;
 	bool acked, noack_success, ack_signal_valid;
 	u16 tx_time_est;
+	bool ack_requested;
 
 	if (pubsta) {
 		sta = container_of(pubsta, struct sta_info, sta);
@@ -1173,12 +1180,13 @@
 
 	rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
 
+	ack_requested = !(info->flags & IEEE80211_TX_CTL_NO_ACK);
 	acked = !!(info->flags & IEEE80211_TX_STAT_ACK);
 	noack_success = !!(info->flags & IEEE80211_TX_STAT_NOACK_TRANSMITTED);
 	ack_signal_valid =
 		!!(info->status.flags & IEEE80211_TX_STATUS_ACK_SIGNAL_VALID);
 
-	if (pubsta) {
+	if (pubsta && ack_requested) {
 		struct ieee80211_sub_if_data *sdata = sta->sdata;
 
 		if (!acked && !noack_success)
diff -ruw linux-6.13.12/net/mac80211/tests/Makefile linux-6.13.12-fbx/net/mac80211/tests/Makefile
--- linux-6.13.12/net/mac80211/tests/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/tests/Makefile	2025-09-25 17:40:37.831378453 +0200
@@ -1,3 +1,3 @@
-mac80211-tests-y += module.o elems.o mfp.o tpe.o
+mac80211-tests-y += module.o util.o elems.o mfp.o tpe.o
 
 obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o
diff -ruw linux-6.13.12/net/mac80211/trace.h linux-6.13.12-fbx/net/mac80211/trace.h
--- linux-6.13.12/net/mac80211/trace.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/trace.h	2025-09-25 17:40:37.831378453 +0200
@@ -823,9 +823,27 @@
 	TP_ARGS(local, value)
 );
 
-DEFINE_EVENT(local_u32_evt, drv_set_rts_threshold,
-	TP_PROTO(struct ieee80211_local *local, u32 value),
-	TP_ARGS(local, value)
+TRACE_EVENT(drv_set_rts_threshold,
+	TP_PROTO(struct ieee80211_local *local, u8 radio_id, u32 value),
+
+	TP_ARGS(local, radio_id, value),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(u8, radio_id)
+		__field(u32, value)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->radio_id = radio_id;
+		__entry->value = value;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT " value:%d",
+		LOCAL_PR_ARG, __entry->value
+	)
 );
 
 TRACE_EVENT(drv_set_coverage_class,
@@ -2173,13 +2191,14 @@
 TRACE_EVENT(drv_get_txpower,
 	TP_PROTO(struct ieee80211_local *local,
 		 struct ieee80211_sub_if_data *sdata,
-		 int dbm, int ret),
+		 unsigned int link_id, int dbm, int ret),
 
-	TP_ARGS(local, sdata, dbm, ret),
+	TP_ARGS(local, sdata, link_id, dbm, ret),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		VIF_ENTRY
+		__field(unsigned int, link_id)
 		__field(int, dbm)
 		__field(int, ret)
 	),
@@ -2187,13 +2206,14 @@
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		VIF_ASSIGN;
+		__entry->link_id = link_id;
 		__entry->dbm = dbm;
 		__entry->ret = ret;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT VIF_PR_FMT " dbm:%d ret:%d",
-		LOCAL_PR_ARG, VIF_PR_ARG, __entry->dbm, __entry->ret
+		LOCAL_PR_FMT VIF_PR_FMT " link_id:%d dbm:%d ret:%d",
+		LOCAL_PR_ARG, VIF_PR_ARG, __entry->link_id, __entry->dbm, __entry->ret
 	)
 );
 
@@ -2588,6 +2608,45 @@
  * Tracing for API calls that drivers call.
  */
 
+TRACE_EVENT(api_return_bool,
+	TP_PROTO(struct ieee80211_local *local, bool result),
+
+	TP_ARGS(local, result),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		__field(bool, result)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		__entry->result = result;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT " result=%d",
+		LOCAL_PR_ARG, __entry->result
+	)
+);
+
+TRACE_EVENT(api_return_void,
+	TP_PROTO(struct ieee80211_local *local),
+
+	TP_ARGS(local),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT, LOCAL_PR_ARG
+	)
+);
+
 TRACE_EVENT(api_start_tx_ba_session,
 	TP_PROTO(struct ieee80211_sta *sta, u16 tid),
 
@@ -3052,6 +3111,65 @@
 	)
 );
 
+TRACE_EVENT(api_prepare_rx_omi_bw,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct link_sta_info *link_sta,
+		 enum ieee80211_sta_rx_bandwidth bw),
+
+	TP_ARGS(local, sdata, link_sta, bw),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		STA_ENTRY
+		__field(int, link_id)
+		__field(u32, bw)
+		__field(bool, result)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		STA_NAMED_ASSIGN(link_sta->sta);
+		__entry->link_id = link_sta->link_id;
+		__entry->bw = bw;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT " " VIF_PR_FMT " " STA_PR_FMT " link:%d, bw:%d",
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG,
+		__entry->link_id, __entry->bw
+	)
+);
+
+TRACE_EVENT(api_finalize_rx_omi_bw,
+	TP_PROTO(struct ieee80211_local *local,
+		 struct ieee80211_sub_if_data *sdata,
+		 struct link_sta_info *link_sta),
+
+	TP_ARGS(local, sdata, link_sta),
+
+	TP_STRUCT__entry(
+		LOCAL_ENTRY
+		VIF_ENTRY
+		STA_ENTRY
+		__field(int, link_id)
+	),
+
+	TP_fast_assign(
+		LOCAL_ASSIGN;
+		VIF_ASSIGN;
+		STA_NAMED_ASSIGN(link_sta->sta);
+		__entry->link_id = link_sta->link_id;
+	),
+
+	TP_printk(
+		LOCAL_PR_FMT " " VIF_PR_FMT " " STA_PR_FMT " link:%d",
+		LOCAL_PR_ARG, VIF_PR_ARG, STA_PR_ARG, __entry->link_id
+	)
+);
+
 /*
  * Tracing for internal functions
  * (which may also be called in response to driver calls)
@@ -3059,49 +3177,55 @@
 
 TRACE_EVENT(wake_queue,
 	TP_PROTO(struct ieee80211_local *local, u16 queue,
-		 enum queue_stop_reason reason),
+		 enum queue_stop_reason reason, int refcount),
 
-	TP_ARGS(local, queue, reason),
+	TP_ARGS(local, queue, reason, refcount),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(u16, queue)
 		__field(u32, reason)
+		__field(int, refcount)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->queue = queue;
 		__entry->reason = reason;
+		__entry->refcount = refcount;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " queue:%d, reason:%d",
-		LOCAL_PR_ARG, __entry->queue, __entry->reason
+		LOCAL_PR_FMT " queue:%d, reason:%d, refcount: %d",
+		LOCAL_PR_ARG, __entry->queue, __entry->reason,
+		__entry->refcount
 	)
 );
 
 TRACE_EVENT(stop_queue,
 	TP_PROTO(struct ieee80211_local *local, u16 queue,
-		 enum queue_stop_reason reason),
+		 enum queue_stop_reason reason, int refcount),
 
-	TP_ARGS(local, queue, reason),
+	TP_ARGS(local, queue, reason, refcount),
 
 	TP_STRUCT__entry(
 		LOCAL_ENTRY
 		__field(u16, queue)
 		__field(u32, reason)
+		__field(int, refcount)
 	),
 
 	TP_fast_assign(
 		LOCAL_ASSIGN;
 		__entry->queue = queue;
 		__entry->reason = reason;
+		__entry->refcount = refcount;
 	),
 
 	TP_printk(
-		LOCAL_PR_FMT " queue:%d, reason:%d",
-		LOCAL_PR_ARG, __entry->queue, __entry->reason
+		LOCAL_PR_FMT " queue:%d, reason:%d, refcount: %d",
+		LOCAL_PR_ARG, __entry->queue, __entry->reason,
+		__entry->refcount
 	)
 );
 
diff -ruw linux-6.13.12/net/mac80211/tx.c linux-6.13.12-fbx/net/mac80211/tx.c
--- linux-6.13.12/net/mac80211/tx.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/tx.c	2025-09-25 17:40:37.835378473 +0200
@@ -622,6 +622,12 @@
 	else
 		tx->key = NULL;
 
+	if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
+		if (tx->key && tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE)
+			info->control.hw_key = &tx->key->conf;
+		return TX_CONTINUE;
+	}
+
 	if (tx->key) {
 		bool skip_hw = false;
 
@@ -680,6 +686,7 @@
 	struct ieee80211_hdr *hdr = (void *)tx->skb->data;
 	struct ieee80211_supported_band *sband;
 	u32 len;
+	u8 i;
 	struct ieee80211_tx_rate_control txrc;
 	struct ieee80211_sta_rates *ratetbl = NULL;
 	bool encap = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP;
@@ -715,7 +722,13 @@
 		    tx->sdata->vif.type == NL80211_IFTYPE_OCB);
 
 	/* set up RTS protection if desired */
-	if (len > tx->local->hw.wiphy->rts_threshold) {
+	if (tx->local->hw.wiphy->n_radio) {
+		for (i = 0; i < tx->local->hw.wiphy->n_radio; i++) {
+			if (len > tx->local->hw.wiphy->radio_cfg[i].rts_threshold)
+				txrc.rts = true;
+		}
+	} else {
+		if (len > tx->local->hw.wiphy->rts_threshold)
 		txrc.rts = true;
 	}
 
@@ -1304,6 +1317,9 @@
 	    (info->control.flags & IEEE80211_TX_CTRL_PS_RESPONSE))
 		return NULL;
 
+	if (unlikely(ieee80211_is_ctl(hdr->frame_control)))
+		return NULL;
+
 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
 	    unlikely(!ieee80211_is_data_present(hdr->frame_control))) {
 		if ((!ieee80211_is_mgmt(hdr->frame_control) ||
@@ -3868,20 +3884,32 @@
 	tx.sdata = vif_to_sdata(info->control.vif);
 
 	if (txq->sta) {
+		const u8 *sender, *dest;
+
 		tx.sta = container_of(txq->sta, struct sta_info, sta);
+
+		if ((info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
+			struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+			sender = ehdr->h_source;
+			dest = ehdr->h_dest;
+		} else {
+			sender = hdr->addr2;
+			dest = hdr->addr1;
+		}
+
 		/*
 		 * Drop unicast frames to unauthorised stations unless they are
 		 * injected frames or EAPOL frames from the local station.
 		 */
 		if (unlikely(!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
-			     ieee80211_is_data(hdr->frame_control) &&
+			     ieee80211_is_tx_data(skb) &&
 			     !ieee80211_vif_is_mesh(&tx.sdata->vif) &&
 			     tx.sdata->vif.type != NL80211_IFTYPE_OCB &&
-			     !is_multicast_ether_addr(hdr->addr1) &&
+			     !is_multicast_ether_addr(dest) &&
 			     !test_sta_flag(tx.sta, WLAN_STA_AUTHORIZED) &&
 			     (!(info->control.flags &
 				IEEE80211_TX_CTRL_PORT_CTRL_PROTO) ||
-			      !ieee80211_is_our_addr(tx.sdata, hdr->addr2,
+			      !ieee80211_is_our_addr(tx.sdata, sender,
 						     NULL)))) {
 			I802_DEBUG_INC(local->tx_handlers_drop_unauth_port);
 			ieee80211_free_txskb(&local->hw, skb);
@@ -3893,6 +3921,7 @@
 	 * The key can be removed while the packet was queued, so need to call
 	 * this here to get the current key.
 	 */
+	info->control.hw_key = NULL;
 	r = ieee80211_tx_h_select_key(&tx);
 	if (r != TX_CONTINUE) {
 		ieee80211_free_txskb(&local->hw, skb);
@@ -4278,6 +4307,10 @@
 	struct sk_buff *next;
 	int len = skb->len;
 
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+	skb_ffn_mark_dirty(skb);
+#endif
+
 	if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
 		kfree_skb(skb);
 		return;
@@ -4377,6 +4410,9 @@
 			return false;
 		if (sdata->wdev.use_4addr)
 			return false;
+		if (ieee80211_hw_check(&sdata->local->hw,
+				       APVLAN_NEED_MCAST_TO_UCAST))
+			break;
 		fallthrough;
 	case NL80211_IFTYPE_AP:
 		/* check runtime toggle for this bss */
@@ -4525,8 +4561,10 @@
 						     IEEE80211_TX_CTRL_MLO_LINK_UNSPEC,
 						     NULL);
 	} else if (ieee80211_vif_is_mld(&sdata->vif) &&
-		   sdata->vif.type == NL80211_IFTYPE_AP &&
-		   !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) {
+		   ((sdata->vif.type == NL80211_IFTYPE_AP &&
+		     !ieee80211_hw_check(&sdata->local->hw, MLO_MCAST_MULTI_LINK_TX)) ||
+		    ((sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+		     !sdata->wdev.use_4addr))) {
 		ieee80211_mlo_multicast_tx(dev, skb);
 	} else {
 normal:
@@ -4583,7 +4621,20 @@
 {
 	struct ieee80211_local *local = sdata->local;
 	struct sk_buff *next;
-	bool ret = true;
+	struct ieee80211_tx_control control = {};
+	struct ieee80211_sta *pubsta = NULL;
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+	bool ret = true, is_eapol;
+
+	is_eapol = (sdata->control_port_protocol == ehdr->h_proto);
+	if (sta && is_eapol) {
+		if (sta->uploaded)
+			pubsta = &sta->sta;
+
+		control.sta = pubsta;
+		drv_tx(local, &control, skb);
+		return ret;
+	}
 
 	if (ieee80211_queue_skb(local, sdata, sta, skb))
 		return true;
@@ -4599,19 +4650,25 @@
 
 static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
 				struct net_device *dev, struct sta_info *sta,
-				struct ieee80211_key *key, struct sk_buff *skb)
+				struct ieee80211_key *key, struct sk_buff *skb,
+				u32 info_flags, u32 ctrl_flags, u64 *cookie)
 {
 	struct ieee80211_tx_info *info;
 	struct ieee80211_local *local = sdata->local;
 	struct tid_ampdu_tx *tid_tx;
 	struct sk_buff *seg, *next;
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+	unsigned char *ra = ehdr->h_dest;
 	unsigned int skbs = 0, len = 0;
 	u16 queue;
+	bool multicast;
 	u8 tid;
 
 	queue = ieee80211_select_queue(sdata, sta, skb);
 	skb_set_queue_mapping(skb, queue);
 
+	multicast = is_multicast_ether_addr(ra);
+
 	if (unlikely(test_bit(SCAN_SW_SCANNING, &local->scanning)) &&
 	    test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))
 		goto out_free;
@@ -4644,6 +4701,8 @@
 	info = IEEE80211_SKB_CB(skb);
 	memset(info, 0, sizeof(*info));
 
+	info->control.flags = ctrl_flags;
+	info->flags |= info_flags;
 	info->hw_queue = sdata->vif.hw_queue[queue];
 
 	if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
@@ -4663,10 +4722,11 @@
 			memcpy(IEEE80211_SKB_CB(seg), info, sizeof(*info));
 	}
 
-	if (unlikely(skb->sk &&
-		     skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS)) {
+	if (unlikely((skb->sk &&
+		      skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
+		     ((ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS) && !multicast))) {
 		info->status_data = ieee80211_store_ack_skb(local, skb,
-							    &info->flags, NULL);
+							    &info->flags, cookie);
 		if (info->status_data)
 			info->status_data_idr = 1;
 	}
@@ -4685,13 +4745,87 @@
 	kfree_skb(skb);
 }
 
-netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
-					    struct net_device *dev)
+static
+void ieee80211_8023_xmit_ap(struct ieee80211_sub_if_data *sdata,
+			    struct net_device *dev, struct sta_info *sta,
+			    struct ieee80211_key *key, struct sk_buff *skb,
+			    u32 info_flags, u32 ctrl_flags, u64 *cookie)
+{
+	struct ieee80211_tx_info *info;
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_sta *pubsta = NULL;
+	struct ieee80211_tx_control control = {};
+	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
+        unsigned char *ra = ehdr->h_dest;
+        bool multicast = is_multicast_ether_addr(ra);
+	unsigned long flags;
+	int q;
+	u16 q_map;
+
+	/*
+	 * If the skb is shared we need to obtain our own copy.
+	 */
+	skb = skb_share_check(skb, GFP_ATOMIC);
+
+	if (unlikely(!skb))
+		return;
+
+	info = IEEE80211_SKB_CB(skb);
+	memset(info, 0, sizeof(*info));
+	info->flags |= info_flags;
+
+	if (unlikely((skb->sk &&
+		      skb_shinfo(skb)->tx_flags & SKBTX_WIFI_STATUS) ||
+                     ((ctrl_flags & IEEE80211_TX_CTL_REQ_TX_STATUS) && !multicast))) {
+		info->status_data = ieee80211_store_ack_skb(local, skb,
+							    &info->flags, cookie);
+		if (info->status_data)
+			info->status_data_idr = 1;
+	}
+
+	info->flags |= IEEE80211_TX_CTL_HW_80211_ENCAP;
+	info->control.vif = &sdata->vif;
+
+	if (key)
+		info->control.hw_key = &key->conf;
+
+	q_map = skb_get_queue_mapping(skb);
+	q = sdata->vif.hw_queue[q_map];
+
+	if (sta) {
+		sta->deflink.tx_stats.bytes[q_map] += skb->len;
+		sta->deflink.tx_stats.packets[q_map]++;
+	}
+
+	spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+
+	if (local->queue_stop_reasons[q] || !skb_queue_empty(&local->pending[q])) {
+		skb_queue_tail(&local->pending[q], skb);
+		spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+		return;
+	}
+
+	spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+
+	if (sta && sta->uploaded)
+		pubsta = &sta->sta;
+
+	control.sta = pubsta;
+
+	drv_tx(local, &control, skb);
+}
+
+static netdev_tx_t __ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+						     struct net_device *dev,
+						     u32 info_flags,
+						     u32 ctrl_flags,
+						     u64 *cookie)
 {
 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
 	struct ethhdr *ehdr = (struct ethhdr *)skb->data;
 	struct ieee80211_key *key;
 	struct sta_info *sta;
+	bool is_eapol;
 
 	if (unlikely(!ieee80211_sdata_running(sdata) || skb->len < ETH_HLEN)) {
 		kfree_skb(skb);
@@ -4704,10 +4838,11 @@
 		kfree_skb(skb);
 		goto out;
 	}
+	is_eapol = (sdata->control_port_protocol == ehdr->h_proto);
 
 	if (unlikely(IS_ERR_OR_NULL(sta) || !sta->uploaded ||
-	    !test_sta_flag(sta, WLAN_STA_AUTHORIZED) ||
-	    sdata->control_port_protocol == ehdr->h_proto))
+	    (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) && !is_eapol) ||
+	    (is_eapol && !(sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED))))
 		goto skip_offload;
 
 	key = rcu_dereference(sta->ptk[sta->ptk_idx]);
@@ -4719,7 +4854,15 @@
 		goto skip_offload;
 
 	sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-	ieee80211_8023_xmit(sdata, dev, sta, key, skb);
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP &&
+	    ieee80211_hw_check(&sdata->local->hw, ALLOW_DRV_TX_FOR_DATA)) {
+		ieee80211_8023_xmit_ap(sdata, dev, sta, key, skb, info_flags, ctrl_flags, cookie);
+		goto out;
+	}
+
+	ieee80211_8023_xmit(sdata, dev, sta, key, skb, info_flags,
+			    ctrl_flags, cookie);
 	goto out;
 
 skip_offload:
@@ -4730,6 +4873,62 @@
 	return NETDEV_TX_OK;
 }
 
+netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+#if defined(CONFIG_IP_FFN) || defined(CONFIG_IPV6_FFN)
+	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+	struct ieee80211_local *local = sdata->local;
+	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+	struct ieee80211_tx_control control = {};
+	struct sta_info *sta;
+	struct ieee80211_sta *pubsta = NULL;
+
+	info->control.vif = &sdata->vif;
+
+	if (skb->ffn_ff_done &&
+	    ieee80211_hw_check(&local->hw, ALLOW_DRV_TX_FOR_DATA)) {
+		info->control.flags = u32_encode_bits(IEEE80211_LINK_UNSPECIFIED,
+						      IEEE80211_TX_CTRL_MLO_LINK);
+		info->flags = IEEE80211_TX_CTL_HW_80211_ENCAP;
+
+		if (hweight16(sdata->vif.valid_links) > 1) {
+			rcu_read_lock();
+
+			if (ieee80211_lookup_ra_sta(sdata, skb, &sta)) {
+				kfree_skb(skb);
+				goto out;
+			}
+
+			if (!IS_ERR_OR_NULL(sta) && sta->uploaded)
+				pubsta = &sta->sta;
+
+			control.sta = pubsta;
+			drv_tx(sdata->local, &control,  skb);
+out:
+			rcu_read_unlock();
+		} else {
+			control.sta = NULL;
+
+			rcu_read_lock();
+			if (!ieee80211_lookup_ra_sta(sdata, skb, &sta) &&
+			    !IS_ERR_OR_NULL(sta)) {
+				sta->deflink.tx_stats.packets[0]++;
+				sta->deflink.tx_stats.bytes[0] += skb->len;
+			}
+			dev_sw_netstats_tx_add(dev, 1, skb->len);
+
+			drv_tx(sdata->local, &control,  skb);
+			rcu_read_unlock();
+		}
+
+		return NETDEV_TX_OK;
+	}
+#endif
+
+	return __ieee80211_subif_start_xmit_8023(skb, dev, 0, 0, NULL);
+}
+
 struct sk_buff *
 ieee80211_build_data_template(struct ieee80211_sub_if_data *sdata,
 			      struct sk_buff *skb, u32 info_flags)
@@ -6243,7 +6442,12 @@
 
 start_xmit:
 	local_bh_disable();
-	__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags, cookie);
+	if (sdata->vif.offload_flags & IEEE80211_OFFLOAD_ENCAP_ENABLED)
+		__ieee80211_subif_start_xmit_8023(skb, skb->dev, flags,
+						  ctrl_flags, cookie);
+	else
+		__ieee80211_subif_start_xmit(skb, skb->dev, flags, ctrl_flags,
+					     cookie);
 	local_bh_enable();
 
 	return 0;
diff -ruw linux-6.13.12/net/mac80211/util.c linux-6.13.12-fbx/net/mac80211/util.c
--- linux-6.13.12/net/mac80211/util.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/util.c	2025-09-25 17:40:37.835378473 +0200
@@ -437,8 +437,6 @@
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	trace_wake_queue(local, queue, reason);
-
 	if (WARN_ON(queue >= hw->queues))
 		return;
 
@@ -456,6 +454,9 @@
 	if (local->q_stop_reasons[queue][reason] == 0)
 		__clear_bit(reason, &local->queue_stop_reasons[queue]);
 
+	trace_wake_queue(local, queue, reason,
+			 local->q_stop_reasons[queue][reason]);
+
 	if (local->queue_stop_reasons[queue] != 0)
 		/* someone still has this queue stopped */
 		return;
@@ -502,8 +503,6 @@
 {
 	struct ieee80211_local *local = hw_to_local(hw);
 
-	trace_stop_queue(local, queue, reason);
-
 	if (WARN_ON(queue >= hw->queues))
 		return;
 
@@ -512,6 +511,9 @@
 	else
 		local->q_stop_reasons[queue][reason]++;
 
+	trace_stop_queue(local, queue, reason,
+			 local->q_stop_reasons[queue][reason]);
+
 	set_bit(reason, &local->queue_stop_reasons[queue]);
 }
 
@@ -1838,7 +1840,13 @@
 	drv_set_frag_threshold(local, hw->wiphy->frag_threshold);
 
 	/* setup RTS threshold */
-	drv_set_rts_threshold(local, hw->wiphy->rts_threshold);
+	if (hw->wiphy->n_radio)
+		for (i = 0; i < hw->wiphy->n_radio; i++)
+			drv_set_rts_threshold(local, i,
+					      hw->wiphy->radio_cfg[i].rts_threshold);
+	else
+		drv_set_rts_threshold(local, NL80211_WIPHY_RADIO_ID_MAX,
+				      hw->wiphy->rts_threshold);
 
 	/* reset coverage class */
 	drv_set_coverage_class(local, hw->wiphy->coverage_class);
@@ -2751,6 +2759,7 @@
 {
 	struct ieee80211_he_operation *he_oper;
 	struct ieee80211_he_6ghz_oper *he_6ghz_op;
+	struct cfg80211_chan_def he_chandef;
 	u32 he_oper_params;
 	u8 ie_len = 1 + sizeof(struct ieee80211_he_operation);
 
@@ -2782,27 +2791,33 @@
 	if (chandef->chan->band != NL80211_BAND_6GHZ)
 		goto out;
 
+	cfg80211_chandef_create(&he_chandef, chandef->chan, NL80211_CHAN_NO_HT);
+	he_chandef.center_freq1 = chandef->center_freq1;
+	he_chandef.center_freq2 = chandef->center_freq2;
+	he_chandef.width = chandef->width;
+
 	/* TODO add VHT operational */
 	he_6ghz_op = (struct ieee80211_he_6ghz_oper *)pos;
 	he_6ghz_op->minrate = 6; /* 6 Mbps */
 	he_6ghz_op->primary =
-		ieee80211_frequency_to_channel(chandef->chan->center_freq);
+		ieee80211_frequency_to_channel(he_chandef.chan->center_freq);
 	he_6ghz_op->ccfs0 =
-		ieee80211_frequency_to_channel(chandef->center_freq1);
-	if (chandef->center_freq2)
+		ieee80211_frequency_to_channel(he_chandef.center_freq1);
+	if (he_chandef.center_freq2)
 		he_6ghz_op->ccfs1 =
-			ieee80211_frequency_to_channel(chandef->center_freq2);
+			ieee80211_frequency_to_channel(he_chandef.center_freq2);
 	else
 		he_6ghz_op->ccfs1 = 0;
 
-	switch (chandef->width) {
+	switch (he_chandef.width) {
 	case NL80211_CHAN_WIDTH_320:
-		/*
-		 * TODO: mesh operation is not defined over 6GHz 320 MHz
-		 * channels.
+		/* Downgrade EHT 320 MHz BW to 160 MHz for HE and set new
+		 * center_freq1
 		 */
-		WARN_ON(1);
-		break;
+		ieee80211_chandef_downgrade(&he_chandef, NULL);
+		he_6ghz_op->ccfs0 =
+			ieee80211_frequency_to_channel(he_chandef.center_freq1);
+		fallthrough;
 	case NL80211_CHAN_WIDTH_160:
 		/* Convert 160 MHz channel width to new style as interop
 		 * workaround.
@@ -2810,7 +2825,7 @@
 		he_6ghz_op->control =
 			IEEE80211_HE_6GHZ_OPER_CTRL_CHANWIDTH_160MHZ;
 		he_6ghz_op->ccfs1 = he_6ghz_op->ccfs0;
-		if (chandef->chan->center_freq < chandef->center_freq1)
+		if (he_chandef.chan->center_freq < he_chandef.center_freq1)
 			he_6ghz_op->ccfs0 -= 8;
 		else
 			he_6ghz_op->ccfs0 += 8;
@@ -3650,31 +3665,6 @@
 	WARN_ON_ONCE(!cfg80211_chandef_valid(c));
 }
 
-/*
- * Returns true if smps_mode_new is strictly more restrictive than
- * smps_mode_old.
- */
-bool ieee80211_smps_is_restrictive(enum ieee80211_smps_mode smps_mode_old,
-				   enum ieee80211_smps_mode smps_mode_new)
-{
-	if (WARN_ON_ONCE(smps_mode_old == IEEE80211_SMPS_AUTOMATIC ||
-			 smps_mode_new == IEEE80211_SMPS_AUTOMATIC))
-		return false;
-
-	switch (smps_mode_old) {
-	case IEEE80211_SMPS_STATIC:
-		return false;
-	case IEEE80211_SMPS_DYNAMIC:
-		return smps_mode_new == IEEE80211_SMPS_STATIC;
-	case IEEE80211_SMPS_OFF:
-		return smps_mode_new != IEEE80211_SMPS_OFF;
-	default:
-		WARN_ON(1);
-	}
-
-	return false;
-}
-
 int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
 			      struct cfg80211_csa_settings *csa_settings)
 {
@@ -3950,6 +3940,32 @@
 	ps->dtim_count = dtim_count;
 }
 
+void ieee80211_force_dtim(struct ieee80211_vif *vif,
+			  unsigned int dtim_count)
+{
+	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+	u8 dtim_period = sdata->vif.bss_conf.dtim_period;
+	struct ps_data *ps;
+
+	if (sdata->vif.type == NL80211_IFTYPE_AP ||
+	    sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+		if (!sdata->bss)
+			return;
+
+		ps = &sdata->bss->ps;
+	} else if (ieee80211_vif_is_mesh(&sdata->vif)) {
+		ps = &sdata->u.mesh.ps;
+	} else {
+		return;
+	}
+
+	if (WARN_ON_ONCE(dtim_count >= dtim_period))
+		return;
+
+	ps->dtim_count = dtim_count;
+}
+EXPORT_SYMBOL(ieee80211_force_dtim);
+
 static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local,
 					 struct ieee80211_chanctx *ctx)
 {
@@ -3983,6 +3999,23 @@
 	return radar_detect;
 }
 
+bool ieee80211_is_radio_idx_in_scan_req(struct wiphy *wiphy,
+					struct cfg80211_scan_request *scan_req,
+					int radio_idx)
+{
+	struct ieee80211_channel *chan;
+	int i, chan_radio_idx;
+
+	for (i = 0; i < scan_req->n_channels; i++) {
+		chan = scan_req->channels[i];
+		chan_radio_idx = cfg80211_get_radio_idx_by_chan(wiphy, chan);
+		if (chan_radio_idx == radio_idx)
+			return true;
+	}
+
+	return false;
+}
+
 static u32
 __ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata)
 {
diff -ruw linux-6.13.12/net/mac80211/vht.c linux-6.13.12-fbx/net/mac80211/vht.c
--- linux-6.13.12/net/mac80211/vht.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/mac80211/vht.c	2025-09-25 17:40:37.835378473 +0200
@@ -232,9 +232,11 @@
 	       sizeof(struct ieee80211_vht_mcs_info));
 
 	/* copy EXT_NSS_BW Support value or remove the capability */
-	if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW))
+	if (ieee80211_hw_check(&sdata->local->hw, SUPPORTS_VHT_EXT_NSS_BW)) {
+		vht_cap->cap |= cap_info &
+			IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK;
 		vht_cap->cap |= (cap_info & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK);
-	else
+	} else
 		vht_cap->vht_mcs.tx_highest &=
 			~cpu_to_le16(IEEE80211_VHT_EXT_NSS_BW_CAPABLE);
 
@@ -350,8 +352,8 @@
 }
 
 /* FIXME: move this to some better location - parses HE/EHT now */
-enum ieee80211_sta_rx_bandwidth
-_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+static enum ieee80211_sta_rx_bandwidth
+__ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
 			 struct cfg80211_chan_def *chandef)
 {
 	unsigned int link_id = link_sta->link_id;
@@ -423,6 +425,28 @@
 	return IEEE80211_STA_RX_BW_80;
 }
 
+enum ieee80211_sta_rx_bandwidth
+_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta,
+			 struct cfg80211_chan_def *chandef)
+{
+	/*
+	 * With RX OMI, also pretend that the STA's capability changed.
+	 * Of course this isn't really true, it didn't change, only our
+	 * RX capability was changed by notifying RX OMI to the STA.
+	 * The purpose, however, is to save power, and that requires
+	 * changing also transmissions to the AP and the chanctx. The
+	 * transmissions depend on link_sta->bandwidth which is set in
+	 * _ieee80211_sta_cur_vht_bw() below, but the chanctx depends
+	 * on the result of this function which is also called by
+	 * _ieee80211_sta_cur_vht_bw(), so we need to do that here as
+	 * well. This is sufficient for the steady state, but during
+	 * the transition we already need to change TX/RX separately,
+	 * so _ieee80211_sta_cur_vht_bw() below applies the _tx one.
+	 */
+	return min(__ieee80211_sta_cap_rx_bw(link_sta, chandef),
+		   link_sta->rx_omi_bw_rx);
+}
+
 enum nl80211_chan_width
 ieee80211_sta_cap_chan_bw(struct link_sta_info *link_sta)
 {
@@ -503,8 +527,11 @@
 		rcu_read_unlock();
 	}
 
-	bw = _ieee80211_sta_cap_rx_bw(link_sta, chandef);
+	/* intentionally do not take rx_bw_omi_rx into account */
+	bw = __ieee80211_sta_cap_rx_bw(link_sta, chandef);
 	bw = min(bw, link_sta->cur_max_bandwidth);
+	/* but do apply rx_omi_bw_tx */
+	bw = min(bw, link_sta->rx_omi_bw_tx);
 
 	/* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of
 	 * IEEE80211-2016 specification makes higher bandwidth operation
diff -ruw linux-6.13.12/net/netfilter/Kconfig linux-6.13.12-fbx/net/netfilter/Kconfig
--- linux-6.13.12/net/netfilter/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/Kconfig	2025-09-25 17:40:37.847378532 +0200
@@ -364,6 +364,7 @@
 config NF_CONNTRACK_SIP
 	tristate "SIP protocol support"
 	default m if NETFILTER_ADVANCED=n
+	select CRYPTO_LIB_SHA256
 	help
 	  SIP is an application-layer control protocol that can establish,
 	  modify, and terminate multimedia sessions (conferences) such as
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_core.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_core.c
--- linux-6.13.12/net/netfilter/nf_conntrack_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_core.c	2025-09-25 17:40:37.851378552 +0200
@@ -571,12 +571,29 @@
 #endif
 }
 
+#ifdef CONFIG_IP_FFN
+extern void ip_ffn_ct_destroy(struct nf_conn *ct);
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+extern void ipv6_ffn_ct_destroy(struct nf_conn *ct);
+#endif
+
 void nf_ct_destroy(struct nf_conntrack *nfct)
 {
 	struct nf_conn *ct = (struct nf_conn *)nfct;
 
 	WARN_ON(refcount_read(&nfct->use) != 0);
 
+#ifdef CONFIG_IP_FFN
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == AF_INET)
+		ip_ffn_ct_destroy(ct);
+#endif
+#ifdef CONFIG_IPV6_FFN
+	if (ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num == AF_INET6)
+		ipv6_ffn_ct_destroy(ct);
+#endif
+
 	if (unlikely(nf_ct_is_template(ct))) {
 		nf_ct_tmpl_free(ct);
 		return;
@@ -1822,7 +1839,7 @@
 		}
 		spin_unlock_bh(&nf_conntrack_expect_lock);
 	}
-	if (!exp && tmpl)
+	if (!exp)
 		__nf_ct_try_assign_helper(ct, tmpl, GFP_ATOMIC);
 
 	/* Other CPU might have obtained a pointer to this object before it was
@@ -2086,6 +2103,28 @@
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_in);
 
+/* Alter reply tuple (maybe alter helper).  This is for NAT, and is
+   implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *ct,
+			      const struct nf_conntrack_tuple *newreply)
+{
+	struct nf_conn_help *help = nfct_help(ct);
+
+	/* Should be unconfirmed, so not in hash table yet */
+	WARN_ON(nf_ct_is_confirmed(ct));
+
+	nf_ct_dump_tuple(newreply);
+
+	ct->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+	if (ct->master || (help && !hlist_empty(&help->expectations)))
+		return;
+
+	rcu_read_lock();
+	__nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+	rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_alter_reply);
+
 /* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
 void __nf_ct_refresh_acct(struct nf_conn *ct,
 			  enum ip_conntrack_info ctinfo,
@@ -2751,6 +2790,7 @@
 	nf_conntrack_acct_pernet_init(net);
 	nf_conntrack_tstamp_pernet_init(net);
 	nf_conntrack_ecache_pernet_init(net);
+	nf_conntrack_helper_pernet_init(net);
 	nf_conntrack_proto_pernet_init(net);
 
 	return 0;
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_ftp.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_ftp.c
--- linux-6.13.12/net/netfilter/nf_conntrack_ftp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_ftp.c	2025-09-25 17:40:37.851378552 +0200
@@ -27,6 +27,10 @@
 #include <linux/netfilter/nf_conntrack_ftp.h>
 
 #define HELPER_NAME "ftp"
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+#include <net/netfilter/nf_nat_helper.h>
+#include <net/fbxbridge.h>
+#endif
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
@@ -397,6 +401,17 @@
 	if (unlikely(skb_linearize(skb)))
 		return NF_DROP;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (!ct_ftp_info->is_fbxbridge && skb->dev->fbx_bridge) {
+		struct fbxbridge *fbxbr;
+
+		fbxbr = skb->dev->fbx_bridge;
+		ct_ftp_info->is_fbxbridge = 1;
+		ct_ftp_info->fbxbridge_remote = ntohl(fbxbr->br_remote_ipaddr);
+		ct_ftp_info->fbxbridge_wan = fbxbr->wan_ipaddr;
+	}
+#endif
+
 	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
 	if (th == NULL)
 		return NF_ACCEPT;
@@ -483,6 +498,50 @@
 	 * Doesn't matter unless NAT is happening.  */
 	daddr = &ct->tuplehash[!dir].tuple.dst.u3;
 
+#if defined(CONFIG_FREEBOX_BRIDGE) || defined(CONFIG_FREEBOX_BRIDGE_MODULE)
+	if (ct_ftp_info->is_fbxbridge &&
+	    search[dir][i].ftptype == NF_CT_FTP_PORT) {
+		unsigned long orig_ip_addr;
+		unsigned short orig_port;
+		char buffer[sizeof("nnn,nnn,nnn,nnn,nnn,nnn")];
+		unsigned int len;
+		__be32 addr;
+
+		/* kludge: if  we are here,  then this is a  local pkt
+		 * that has  gone through internal  fbxbridge snat.
+		 *
+		 * If we see a port  command, then we mangle packet to
+		 * change  ip  address  given  to  the  remote  bridge
+		 * address */
+
+		/* check  address  is  packet  is  the  one  fbxbridge
+		 * changed */
+		orig_ip_addr = cmd.u3.ip;
+		if (orig_ip_addr != ct_ftp_info->fbxbridge_wan)
+			goto donttouch;
+
+		/* now mangle the remote address */
+		orig_port = cmd.u.tcp.port;
+		addr = ct_ftp_info->fbxbridge_remote;
+		len = sprintf(buffer, "%u,%u,%u,%u,%u,%u",
+			      ((unsigned char *)&addr)[0],
+			      ((unsigned char *)&addr)[1],
+			      ((unsigned char *)&addr)[2],
+			      ((unsigned char *)&addr)[3],
+			      orig_port >> 8 , orig_port & 0xFF);
+
+		nf_nat_mangle_tcp_packet(skb, ct, ctinfo, matchoff,
+					 matchlen, buffer, len);
+
+		/* then adjust as if nothing happened */
+		matchlen = len;
+		cmd.u3.ip = ct_ftp_info->fbxbridge_remote;
+	}
+donttouch:
+
+#endif
+
+
 	/* Update the ftp info */
 	if ((cmd.l3num == nf_ct_l3num(ct)) &&
 	    memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_helper.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_helper.c
--- linux-6.13.12/net/netfilter/nf_conntrack_helper.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_helper.c	2025-09-25 17:40:37.855378572 +0200
@@ -37,6 +37,11 @@
 EXPORT_SYMBOL_GPL(nf_ct_helper_hsize);
 static unsigned int nf_ct_helper_count __read_mostly;
 
+static bool nf_ct_auto_assign_helper __read_mostly = true;
+module_param_named(nf_conntrack_helper, nf_ct_auto_assign_helper, bool, 0644);
+MODULE_PARM_DESC(nf_conntrack_helper,
+		 "Enable automatic conntrack helper assignment (default 0)");
+
 static DEFINE_MUTEX(nf_ct_nat_helpers_mutex);
 static struct list_head nf_ct_nat_helpers __read_mostly;
 
@@ -48,6 +53,24 @@
 		(__force __u16)tuple->src.u.all) % nf_ct_helper_hsize;
 }
 
+static struct nf_conntrack_helper *
+__nf_ct_helper_find(const struct nf_conntrack_tuple *tuple)
+{
+	struct nf_conntrack_helper *helper;
+	struct nf_conntrack_tuple_mask mask = { .src.u.all = htons(0xFFFF) };
+	unsigned int h;
+
+	if (!nf_ct_helper_count)
+		return NULL;
+
+	h = helper_hash(tuple);
+	hlist_for_each_entry_rcu(helper, &nf_ct_helper_hash[h], hnode) {
+		if (nf_ct_tuple_src_mask_cmp(tuple, &helper->tuple, &mask))
+			return helper;
+	}
+	return NULL;
+}
+
 struct nf_conntrack_helper *
 __nf_conntrack_helper_find(const char *name, u16 l3num, u8 protonum)
 {
@@ -188,32 +211,61 @@
 }
 EXPORT_SYMBOL_GPL(nf_ct_helper_ext_add);
 
+static struct nf_conntrack_helper *
+nf_ct_lookup_helper(struct nf_conn *ct, struct net *net)
+{
+	struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+	if (!cnet->sysctl_auto_assign_helper) {
+		if (cnet->auto_assign_helper_warned)
+			return NULL;
+		if (!__nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple))
+			return NULL;
+		pr_info("nf_conntrack: default automatic helper assignment "
+			"has been turned off for security reasons and CT-based "
+			"firewall rule not found. Use the iptables CT target "
+			"to attach helpers instead.\n");
+		cnet->auto_assign_helper_warned = true;
+		return NULL;
+	}
+
+	return __nf_ct_helper_find(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+}
+
 int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
 			      gfp_t flags)
 {
 	struct nf_conntrack_helper *helper = NULL;
 	struct nf_conn_help *help;
+	struct net *net = nf_ct_net(ct);
 
-	/* We already got a helper explicitly attached (e.g. nft_ct) */
+	/* We already got a helper explicitly attached. The function
+	 * nf_conntrack_alter_reply - in case NAT is in use - asks for looking
+	 * the helper up again. Since now the user is in full control of
+	 * making consistent helper configurations, skip this automatic
+	 * re-lookup, otherwise we'll lose the helper.
+	 */
 	if (test_bit(IPS_HELPER_BIT, &ct->status))
 		return 0;
 
-	if (WARN_ON_ONCE(!tmpl))
-		return 0;
-
+	if (tmpl != NULL) {
 	help = nfct_help(tmpl);
 	if (help != NULL) {
 		helper = rcu_dereference(help->helper);
 		set_bit(IPS_HELPER_BIT, &ct->status);
 	}
+	}
 
 	help = nfct_help(ct);
 
 	if (helper == NULL) {
+		helper = nf_ct_lookup_helper(ct, net);
+		if (helper == NULL) {
 		if (help)
 			RCU_INIT_POINTER(help->helper, NULL);
 		return 0;
 	}
+	}
 
 	if (help == NULL) {
 		help = nf_ct_helper_ext_add(ct, flags);
@@ -498,6 +550,19 @@
 }
 EXPORT_SYMBOL_GPL(nf_nat_helper_unregister);
 
+void nf_ct_set_auto_assign_helper_warned(struct net *net)
+{
+	nf_ct_pernet(net)->auto_assign_helper_warned = true;
+}
+EXPORT_SYMBOL_GPL(nf_ct_set_auto_assign_helper_warned);
+
+void nf_conntrack_helper_pernet_init(struct net *net)
+{
+	struct nf_conntrack_net *cnet = nf_ct_pernet(net);
+
+	cnet->sysctl_auto_assign_helper = nf_ct_auto_assign_helper;
+}
+
 int nf_conntrack_helper_init(void)
 {
 	nf_ct_helper_hsize = 1; /* gets rounded up to use one page */
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_netlink.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_netlink.c
--- linux-6.13.12/net/netfilter/nf_conntrack_netlink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_netlink.c	2025-09-25 17:40:37.855378572 +0200
@@ -2296,6 +2296,11 @@
 			ct->status |= IPS_HELPER;
 			RCU_INIT_POINTER(help->helper, helper);
 		}
+	} else {
+		/* try an implicit helper assignation */
+		err = __nf_ct_try_assign_helper(ct, NULL, GFP_ATOMIC);
+		if (err < 0)
+			goto err2;
 	}
 
 	err = ctnetlink_setup_nat(ct, cda);
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_proto_tcp.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_proto_tcp.c
--- linux-6.13.12/net/netfilter/nf_conntrack_proto_tcp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_proto_tcp.c	2025-09-25 17:40:37.855378572 +0200
@@ -1250,6 +1250,7 @@
 		break;
 	}
 
+	if (!ct->proto.tcp.no_window_track) {
 	res = tcp_in_window(ct, dir, index,
 			    skb, dataoff, th, state);
 	switch (res) {
@@ -1263,6 +1264,7 @@
 	case NFCT_TCP_ACCEPT:
 		break;
 	}
+	}
      in_window:
 	/* From now on we have got in-window packets */
 	ct->proto.tcp.last_index = index;
@@ -1336,6 +1338,46 @@
 	return NF_ACCEPT;
 }
 
+#ifdef CONFIG_IP_FFN
+int external_tcpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo);
+int external_tcpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_tcp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+int external_tcpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo);
+int external_tcpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET6,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_tcp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
 #if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_proto_udp.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_proto_udp.c
--- linux-6.13.12/net/netfilter/nf_conntrack_proto_udp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_proto_udp.c	2025-09-25 17:40:37.855378572 +0200
@@ -129,6 +129,46 @@
 	return NF_ACCEPT;
 }
 
+#ifdef CONFIG_IP_FFN
+int external_udpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo);
+int external_udpv4_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_udp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
+#ifdef CONFIG_IPV6_FFN
+int external_udpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo);
+int external_udpv6_packet(struct nf_conn *ct,
+			  struct sk_buff *skb,
+			  unsigned int dataoff,
+			  enum ip_conntrack_info ctinfo)
+{
+	/* fixme: is is always PRE_ROUTING ?*/
+	struct nf_hook_state state = {
+		.hook = NF_INET_PRE_ROUTING,
+		.pf = AF_INET6,
+		.net = nf_ct_net(ct),
+	};
+	return nf_conntrack_udp_packet(ct, skb, dataoff, ctinfo, &state);
+}
+#endif
+
 #ifdef CONFIG_NF_CT_PROTO_UDPLITE
 static void udplite_error_log(const struct sk_buff *skb,
 			      const struct nf_hook_state *state,
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_sip.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_sip.c
--- linux-6.13.12/net/netfilter/nf_conntrack_sip.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_sip.c	2025-09-25 17:40:37.855378572 +0200
@@ -35,6 +35,8 @@
 MODULE_ALIAS("ip_conntrack_sip");
 MODULE_ALIAS_NFCT_HELPER(HELPER_NAME);
 
+#define MAX_CALLS	8
+
 #define MAX_PORTS	8
 static unsigned short ports[MAX_PORTS];
 static unsigned int ports_c;
@@ -825,7 +827,8 @@
 	return found;
 }
 
-static void flush_expectations(struct nf_conn *ct, bool media)
+static void __flush_expectations(struct nf_conn *ct, bool media,
+				 const u8 *cid_hash)
 {
 	struct nf_conn_help *help = nfct_help(ct);
 	struct nf_conntrack_expect *exp;
@@ -835,6 +838,15 @@
 	hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
 		if ((exp->class != SIP_EXPECT_SIGNALLING) ^ media)
 			continue;
+		if (media && cid_hash) {
+			const struct nf_ct_sip_expect *exp_sip_info;
+			exp_sip_info = nf_ct_exp_data(exp);
+
+			if (memcmp(exp_sip_info->cid_hash, cid_hash,
+				   sizeof (exp_sip_info->cid_hash)))
+				continue;
+		}
+
 		if (!nf_ct_remove_expect(exp))
 			continue;
 		if (!media)
@@ -843,12 +855,36 @@
 	spin_unlock_bh(&nf_conntrack_expect_lock);
 }
 
+static void flush_sig_expectations(struct nf_conn *ct)
+{
+	return __flush_expectations(ct, false, NULL);
+}
+
+static void flush_media_expectations(struct nf_conn *ct,
+				     const char *msg_data,
+				     unsigned int msg_len)
+{
+	unsigned int matchoff, matchlen;
+	u8 cid_hash[SHA256_DIGEST_SIZE];
+	struct sha256_state s;
+
+	sha256_init(&s);
+	if (ct_sip_get_header(ct, msg_data, 0, msg_len,
+			      SIP_HDR_CALL_ID,
+			      &matchoff, &matchlen) > 0)
+		sha256_update(&s, msg_data + matchoff, matchlen);
+	sha256_final(&s, cid_hash);
+
+	__flush_expectations(ct, true, cid_hash);
+}
+
 static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff,
 				 unsigned int dataoff,
 				 const char **dptr, unsigned int *datalen,
 				 union nf_inet_addr *daddr, __be16 port,
 				 enum sip_expectation_classes class,
-				 unsigned int mediaoff, unsigned int medialen)
+				 unsigned int mediaoff, unsigned int medialen,
+				 const u8 *cid_hash)
 {
 	struct nf_conntrack_expect *exp, *rtp_exp, *rtcp_exp;
 	enum ip_conntrack_info ctinfo;
@@ -861,6 +897,7 @@
 	u_int16_t base_port;
 	__be16 rtp_port, rtcp_port;
 	const struct nf_nat_sip_hooks *hooks;
+	struct nf_ct_sip_expect *exp_sip_info;
 
 	saddr = NULL;
 	if (sip_direct_media) {
@@ -953,18 +990,29 @@
 			goto err1;
 	}
 
-	if (skip_expect)
+	if (skip_expect) {
+		exp_sip_info = nf_ct_exp_data(exp);
+		memcpy(exp_sip_info->cid_hash, cid_hash,
+		       sizeof (exp_sip_info->cid_hash));
 		return NF_ACCEPT;
+	}
 
 	rtp_exp = nf_ct_expect_alloc(ct);
 	if (rtp_exp == NULL)
 		goto err1;
+	exp_sip_info = nf_ct_exp_data(rtp_exp);
+	memcpy(exp_sip_info->cid_hash, cid_hash,
+	       sizeof (exp_sip_info->cid_hash));
 	nf_ct_expect_init(rtp_exp, class, nf_ct_l3num(ct), saddr, daddr,
 			  IPPROTO_UDP, NULL, &rtp_port);
 
+
 	rtcp_exp = nf_ct_expect_alloc(ct);
 	if (rtcp_exp == NULL)
 		goto err2;
+	exp_sip_info = nf_ct_exp_data(rtcp_exp);
+	memcpy(exp_sip_info->cid_hash, cid_hash,
+	       sizeof (exp_sip_info->cid_hash));
 	nf_ct_expect_init(rtcp_exp, class, nf_ct_l3num(ct), saddr, daddr,
 			  IPPROTO_UDP, NULL, &rtcp_port);
 
@@ -1039,10 +1087,20 @@
 	const struct nf_nat_sip_hooks *hooks;
 	unsigned int port;
 	const struct sdp_media_type *t;
+	struct sha256_state s;
+	u8 cid_hash[SHA256_DIGEST_SIZE];
 	int ret = NF_ACCEPT;
 
 	hooks = rcu_dereference(nf_nat_sip_hooks);
 
+	/* extract caller id if any */
+	sha256_init(&s);
+	if (ct_sip_get_header(ct, *dptr, 0, *datalen,
+			      SIP_HDR_CALL_ID,
+			      &matchoff, &matchlen) > 0)
+		sha256_update(&s, *dptr + matchoff, matchlen);
+	sha256_final(&s, cid_hash);
+
 	/* Find beginning of session description */
 	if (ct_sip_get_sdp_header(ct, *dptr, 0, *datalen,
 				  SDP_HDR_VERSION, SDP_HDR_UNSPEC,
@@ -1101,7 +1159,7 @@
 		ret = set_expected_rtp_rtcp(skb, protoff, dataoff,
 					    dptr, datalen,
 					    &rtp_addr, htons(port), t->class,
-					    mediaoff, medialen);
+					    mediaoff, medialen, cid_hash);
 		if (ret != NF_ACCEPT) {
 			nf_ct_helper_log(skb, ct,
 					 "cannot add expectation for voice");
@@ -1145,7 +1203,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1162,7 +1220,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1179,7 +1237,7 @@
 	    (code >= 200 && code <= 299))
 		return process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	else if (ct_sip_info->invite_cseq == cseq)
-		flush_expectations(ct, true);
+		flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1193,7 +1251,7 @@
 	struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
 	unsigned int ret;
 
-	flush_expectations(ct, true);
+	flush_media_expectations(ct, *dptr, *datalen);
 	ret = process_sdp(skb, protoff, dataoff, dptr, datalen, cseq);
 	if (ret == NF_ACCEPT)
 		ct_sip_info->invite_cseq = cseq;
@@ -1208,7 +1266,7 @@
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 
-	flush_expectations(ct, true);
+	flush_media_expectations(ct, *dptr, *datalen);
 	return NF_ACCEPT;
 }
 
@@ -1392,7 +1450,7 @@
 	}
 
 flush:
-	flush_expectations(ct, false);
+	flush_sig_expectations(ct);
 	return NF_ACCEPT;
 }
 
@@ -1468,12 +1526,13 @@
 	 * Via: header so that nf_nat_sip can redirect the responses to
 	 * the correct port.
 	 */
-	if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
+	if (nf_ct_protonum(ct) == IPPROTO_UDP &&
+	    ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen,
 				    SIP_HDR_VIA_UDP, NULL, &matchoff,
 				    &matchlen, &addr, &port) > 0 &&
 	    port != ct->tuplehash[dir].tuple.src.u.udp.port &&
 	    nf_inet_addr_cmp(&addr, &ct->tuplehash[dir].tuple.src.u3))
-		ct_sip_info->forced_dport = port;
+		ct_sip_info->forced_dport[!dir] = port;
 
 	for (i = 0; i < ARRAY_SIZE(sip_handlers); i++) {
 		const struct sip_handler *handler;
@@ -1647,17 +1706,17 @@
 	},
 	[SIP_EXPECT_AUDIO] = {
 		.name		= "audio",
-		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * 2 * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 	[SIP_EXPECT_VIDEO] = {
 		.name		= "video",
-		.max_expected	= 2 * IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * 2 * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 	[SIP_EXPECT_IMAGE] = {
 		.name		= "image",
-		.max_expected	= IP_CT_DIR_MAX,
+		.max_expected	= MAX_CALLS * IP_CT_DIR_MAX,
 		.timeout	= 3 * 60,
 	},
 };
@@ -1672,6 +1731,7 @@
 	int i, ret;
 
 	NF_CT_HELPER_BUILD_BUG_ON(sizeof(struct nf_ct_sip_master));
+	NF_CT_EXPECT_BUILD_BUG_ON(sizeof(struct nf_ct_sip_expect));
 
 	if (ports_c == 0)
 		ports[ports_c++] = SIP_PORT;
diff -ruw linux-6.13.12/net/netfilter/nf_conntrack_standalone.c linux-6.13.12-fbx/net/netfilter/nf_conntrack_standalone.c
--- linux-6.13.12/net/netfilter/nf_conntrack_standalone.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_conntrack_standalone.c	2025-09-25 17:40:37.855378572 +0200
@@ -554,6 +554,7 @@
 	NF_SYSCTL_CT_LOG_INVALID,
 	NF_SYSCTL_CT_EXPECT_MAX,
 	NF_SYSCTL_CT_ACCT,
+	NF_SYSCTL_CT_HELPER,
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	NF_SYSCTL_CT_EVENTS,
 #endif
@@ -666,6 +667,14 @@
 		.extra1 	= SYSCTL_ZERO,
 		.extra2 	= SYSCTL_ONE,
 	},
+	[NF_SYSCTL_CT_HELPER] = {
+		.procname	= "nf_conntrack_helper",
+		.maxlen		= sizeof(u8),
+		.mode		= 0644,
+		.proc_handler	= proc_dou8vec_minmax,
+		.extra1 	= SYSCTL_ZERO,
+		.extra2 	= SYSCTL_ONE,
+	},
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	[NF_SYSCTL_CT_EVENTS] = {
 		.procname	= "nf_conntrack_events",
@@ -1060,6 +1069,7 @@
 	table[NF_SYSCTL_CT_CHECKSUM].data = &net->ct.sysctl_checksum;
 	table[NF_SYSCTL_CT_LOG_INVALID].data = &net->ct.sysctl_log_invalid;
 	table[NF_SYSCTL_CT_ACCT].data = &net->ct.sysctl_acct;
+	table[NF_SYSCTL_CT_HELPER].data = &cnet->sysctl_auto_assign_helper;
 #ifdef CONFIG_NF_CONNTRACK_EVENTS
 	table[NF_SYSCTL_CT_EVENTS].data = &net->ct.sysctl_events;
 #endif
diff -ruw linux-6.13.12/net/netfilter/nf_nat_core.c linux-6.13.12-fbx/net/netfilter/nf_nat_core.c
--- linux-6.13.12/net/netfilter/nf_nat_core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_nat_core.c	2025-09-25 17:40:37.859378592 +0200
@@ -844,6 +844,11 @@
 	else
 		ct->status |= IPS_SRC_NAT_DONE;
 
+	if (maniptype == NF_NAT_MANIP_SRC) {
+		ct->nat_src_proto_min = range->min_proto;
+		ct->nat_src_proto_max = range->max_proto;
+	}
+
 	return NF_ACCEPT;
 }
 EXPORT_SYMBOL(nf_nat_setup_info);
diff -ruw linux-6.13.12/net/netfilter/nf_nat_ftp.c linux-6.13.12-fbx/net/netfilter/nf_nat_ftp.c
--- linux-6.13.12/net/netfilter/nf_nat_ftp.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_nat_ftp.c	2025-09-25 17:40:37.859378592 +0200
@@ -69,7 +69,8 @@
 			       struct nf_conntrack_expect *exp)
 {
 	union nf_inet_addr newaddr;
-	u_int16_t port;
+	u_int16_t port, sport, eport;
+	unsigned int i;
 	int dir = CTINFO2DIR(ctinfo);
 	struct nf_conn *ct = exp->master;
 	char buffer[sizeof("|1||65535|") + INET6_ADDRSTRLEN];
@@ -86,7 +87,42 @@
 	 * this one. */
 	exp->expectfn = nf_nat_follow_master;
 
-	port = nf_nat_exp_find_port(exp, ntohs(exp->saved_proto.tcp.port));
+	if (dir == IP_CT_DIR_ORIGINAL &&
+	    (ct->status & IPS_SRC_NAT) &&
+	    ct->nat_src_proto_min.all &&
+	    ct->nat_src_proto_max.all) {
+		sport = ntohs(ct->nat_src_proto_min.all);
+		eport = ntohs(ct->nat_src_proto_max.all);
+	} else {
+		sport = 1024;
+		eport = 65535;
+	}
+
+	port = ntohs(exp->saved_proto.tcp.port);
+	if (port < sport || port > eport) {
+		get_random_bytes(&port, sizeof (port));
+		port %= eport - sport;
+		port += sport;
+	}
+
+	/* Try to get same port: if not, try to change it. */
+	for (i = 0; i < eport - sport + 1; i++) {
+		int ret;
+
+		exp->tuple.dst.u.tcp.port = htons(port);
+		ret = nf_ct_expect_related(exp, 0);
+		if (ret == 0)
+			break;
+		else if (ret != -EBUSY) {
+			port = 0;
+			break;
+		}
+
+		port++;
+		if (port > eport)
+			port = sport;
+	}
+
 	if (port == 0) {
 		nf_ct_helper_log(skb, exp->master, "all ports in use");
 		return NF_DROP;
diff -ruw linux-6.13.12/net/netfilter/nf_nat_helper.c linux-6.13.12-fbx/net/netfilter/nf_nat_helper.c
--- linux-6.13.12/net/netfilter/nf_nat_helper.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_nat_helper.c	2025-09-25 17:40:37.859378592 +0200
@@ -188,6 +188,14 @@
 	range.flags = NF_NAT_RANGE_MAP_IPS;
 	range.min_addr = range.max_addr
 		= ct->master->tuplehash[!exp->dir].tuple.dst.u3;
+
+	if (ct->master->nat_src_proto_min.all &&
+	    ct->master->nat_src_proto_max.all) {
+		range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+		range.min_proto = ct->master->nat_src_proto_min;
+		range.max_proto = ct->master->nat_src_proto_max;
+	}
+
 	nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
 
 	/* For DST manip, map port here to where it's expected. */
diff -ruw linux-6.13.12/net/netfilter/nf_nat_proto.c linux-6.13.12-fbx/net/netfilter/nf_nat_proto.c
--- linux-6.13.12/net/netfilter/nf_nat_proto.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_nat_proto.c	2025-09-25 17:40:37.859378592 +0200
@@ -385,6 +385,8 @@
 {
 #if IS_ENABLED(CONFIG_IPV6)
 	struct ipv6hdr *ipv6h;
+	const __be32 *to;
+	__be32 *from;
 	__be16 frag_off;
 	int hdroff;
 	u8 nexthdr;
@@ -407,10 +409,24 @@
 	ipv6h = (void *)skb->data + iphdroff;
 
 manip_addr:
-	if (maniptype == NF_NAT_MANIP_SRC)
-		ipv6h->saddr = target->src.u3.in6;
-	else
-		ipv6h->daddr = target->dst.u3.in6;
+	if (maniptype == NF_NAT_MANIP_SRC) {
+		from = ipv6h->saddr.s6_addr32;
+		to = target->src.u3.in6.s6_addr32;
+	} else {
+		from = ipv6h->daddr.s6_addr32;
+		to = target->dst.u3.in6.s6_addr32;
+	}
+
+	if (skb->ip_summed == CHECKSUM_COMPLETE) {
+		__be32 diff[] = {
+			~from[0], ~from[1], ~from[2], ~from[3],
+			to[0], to[1], to[2], to[3],
+		};
+
+		skb->csum = ~csum_partial(diff, sizeof(diff), ~skb->csum);
+	}
+
+	memcpy(from, to, sizeof (struct in6_addr));
 
 #endif
 	return true;
diff -ruw linux-6.13.12/net/netfilter/nf_nat_sip.c linux-6.13.12-fbx/net/netfilter/nf_nat_sip.c
--- linux-6.13.12/net/netfilter/nf_nat_sip.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nf_nat_sip.c	2025-09-25 17:40:37.859378592 +0200
@@ -111,8 +111,11 @@
 	} else if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.dst.u3, addr) &&
 		   ct->tuplehash[dir].tuple.dst.u.udp.port == port) {
 		newaddr = ct->tuplehash[!dir].tuple.src.u3;
-		newport = ct_sip_info->forced_dport ? :
+		if (nf_ct_protonum(ct) == IPPROTO_UDP)
+			newport = ct_sip_info->forced_dport[dir] ? :
 			  ct->tuplehash[!dir].tuple.src.u.udp.port;
+		else
+			newport = ct->tuplehash[!dir].tuple.src.u.udp.port;
 	} else
 		return 1;
 
@@ -279,7 +282,8 @@
 	}
 
 	/* Mangle destination port for Cisco phones, then fix up checksums */
-	if (dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport) {
+	if (nf_ct_protonum(ct) == IPPROTO_UDP &&
+	    dir == IP_CT_DIR_REPLY && ct_sip_info->forced_dport[dir]) {
 		struct udphdr *uh;
 
 		if (skb_ensure_writable(skb, skb->len)) {
@@ -288,7 +292,7 @@
 		}
 
 		uh = (void *)skb->data + protoff;
-		uh->dest = ct_sip_info->forced_dport;
+		uh->dest = ct_sip_info->forced_dport[dir];
 
 		if (!nf_nat_mangle_udp_packet(skb, ct, ctinfo, protoff,
 					      0, 0, NULL, 0)) {
@@ -365,6 +369,15 @@
 		range_set_for_snat = 1;
 	}
 
+	if (range_set_for_snat) {
+		if (ct->master->nat_src_proto_min.all &&
+		    ct->master->nat_src_proto_max.all) {
+			range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
+			range.min_proto = ct->master->nat_src_proto_min;
+			range.max_proto = ct->master->nat_src_proto_max;
+		}
+	}
+
 	/* Perform SRC manip. */
 	if (range_set_for_snat)
 		nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC);
@@ -382,10 +395,11 @@
 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
 	struct nf_ct_sip_master *ct_sip_info = nfct_help_data(ct);
 	union nf_inet_addr newaddr;
-	u_int16_t port;
+	u_int16_t port, sport, eport;
 	__be16 srcport;
 	char buffer[INET6_ADDRSTRLEN + sizeof("[]:nnnnn")];
 	unsigned int buflen;
+	unsigned int i;
 
 	/* Connection will come from reply */
 	if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
@@ -397,7 +411,7 @@
 	/* If the signalling port matches the connection's source port in the
 	 * original direction, try to use the destination port in the opposite
 	 * direction. */
-	srcport = ct_sip_info->forced_dport ? :
+	srcport = ct_sip_info->forced_dport[dir] ? :
 		  ct->tuplehash[dir].tuple.src.u.udp.port;
 	if (exp->tuple.dst.u.udp.port == srcport)
 		port = ntohs(ct->tuplehash[!dir].tuple.dst.u.udp.port);
@@ -410,7 +424,40 @@
 	exp->dir = !dir;
 	exp->expectfn = nf_nat_sip_expected;
 
-	port = nf_nat_exp_find_port(exp, port);
+	if (dir == IP_CT_DIR_ORIGINAL &&
+	    (ct->status & IPS_SRC_NAT) &&
+	    ct->nat_src_proto_min.all &&
+	    ct->nat_src_proto_max.all) {
+		sport = ntohs(ct->nat_src_proto_min.all);
+		eport = ntohs(ct->nat_src_proto_max.all);
+	} else {
+		sport = 1024;
+		eport = 65535;
+	}
+
+	if (port < sport || port > eport) {
+		get_random_bytes(&port, sizeof (port));
+		port %= eport - sport;
+		port += sport;
+	}
+
+	for (i = 0; i < eport - sport + 1; i++) {
+		int ret;
+
+		exp->tuple.dst.u.udp.port = htons(port);
+		ret = nf_ct_expect_related(exp, NF_CT_EXP_F_SKIP_MASTER);
+		if (ret == 0)
+			break;
+		else if (ret != -EBUSY) {
+			port = 0;
+			break;
+		}
+
+		port++;
+		if (port > eport)
+			port = sport;
+	}
+
 	if (port == 0) {
 		nf_ct_helper_log(skb, ct, "all ports in use for SIP");
 		return NF_DROP;
@@ -568,7 +615,8 @@
 	enum ip_conntrack_info ctinfo;
 	struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
 	enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
-	u_int16_t port;
+	u_int16_t port, sport, eport;
+	unsigned int i;
 
 	/* Connection will come from reply */
 	if (nf_inet_addr_cmp(&ct->tuplehash[dir].tuple.src.u3,
@@ -589,17 +637,37 @@
 	rtcp_exp->dir = !dir;
 	rtcp_exp->expectfn = nf_nat_sip_expected;
 
+	if (dir == IP_CT_DIR_ORIGINAL &&
+	    (ct->status & IPS_SRC_NAT) &&
+	    ct->nat_src_proto_min.all &&
+	    ct->nat_src_proto_max.all) {
+		sport = ntohs(ct->nat_src_proto_min.all);
+		eport = ntohs(ct->nat_src_proto_max.all);
+	} else {
+		sport = 1024;
+		eport = 65535;
+	}
+
+	port = ntohs(rtp_exp->tuple.dst.u.udp.port);
+	if (port < sport || port > eport - 1) {
+		get_random_bytes(&port, sizeof (port));
+		port %= eport - sport;
+		port += sport;
+	}
+
 	/* Try to get same pair of ports: if not, try to change them. */
-	for (port = ntohs(rtp_exp->tuple.dst.u.udp.port);
-	     port != 0; port += 2) {
+	for (i = 0; i < eport - sport + 1; i += 2) {
 		int ret;
 
 		rtp_exp->tuple.dst.u.udp.port = htons(port);
 		ret = nf_ct_expect_related(rtp_exp,
 					   NF_CT_EXP_F_SKIP_MASTER);
-		if (ret == -EBUSY)
+		if (ret == -EBUSY) {
+			port += 2;
+			if (port > eport)
+				port = sport;
 			continue;
-		else if (ret < 0) {
+		} else if (ret < 0) {
 			port = 0;
 			break;
 		}
@@ -610,12 +678,19 @@
 			break;
 		else if (ret == -EBUSY) {
 			nf_ct_unexpect_related(rtp_exp);
+			port += 2;
+			if (port > eport)
+				port = sport;
 			continue;
 		} else if (ret < 0) {
 			nf_ct_unexpect_related(rtp_exp);
 			port = 0;
 			break;
 		}
+
+		port += 2;
+		if (port > eport)
+			port = sport;
 	}
 
 	if (port == 0) {
diff -ruw linux-6.13.12/net/netfilter/nfnetlink.c linux-6.13.12-fbx/net/netfilter/nfnetlink.c
--- linux-6.13.12/net/netfilter/nfnetlink.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/netfilter/nfnetlink.c	2025-09-25 17:40:37.863378612 +0200
@@ -656,7 +656,10 @@
 	    skb->len < nlh->nlmsg_len)
 		return;
 
-	if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
+	if (NFNL_SUBSYS_ID(nlh->nlmsg_type) == NFNL_SUBSYS_CTNETLINK &&
+	    NFNL_MSG_TYPE(nlh->nlmsg_type) == 1 /* IPCTNL_MSG_CT_GET */) {
+		pr_debug("Carving out access exception for conntrack get; does not work for batch queries\n");
+	} else if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
 		netlink_ack(skb, nlh, -EPERM, NULL);
 		return;
 	}
diff -ruw linux-6.13.12/net/sched/sch_drr.c linux-6.13.12-fbx/net/sched/sch_drr.c
--- linux-6.13.12/net/sched/sch_drr.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/sched/sch_drr.c	2025-09-25 17:40:37.903378810 +0200
@@ -105,6 +105,7 @@
 		return -ENOBUFS;
 
 	gnet_stats_basic_sync_init(&cl->bstats);
+	INIT_LIST_HEAD(&cl->alist);
 	cl->common.classid = classid;
 	cl->quantum	   = quantum;
 	cl->qdisc	   = qdisc_create_dflt(sch->dev_queue,
@@ -229,7 +230,7 @@
 {
 	struct drr_class *cl = (struct drr_class *)arg;
 
-	list_del(&cl->alist);
+	list_del_init(&cl->alist);
 }
 
 static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
@@ -326,7 +327,9 @@
 			cl = drr_find_class(sch, res.classid);
 		return cl;
 	}
-	return NULL;
+
+	/* default to first minor if it exists, or drop */
+	return drr_find_class(sch, TC_H_MAKE(TC_H_MAJ(sch->handle), 1));
 }
 
 static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -390,7 +393,7 @@
 			if (unlikely(skb == NULL))
 				goto out;
 			if (cl->qdisc->q.qlen == 0)
-				list_del(&cl->alist);
+				list_del_init(&cl->alist);
 
 			bstats_update(&cl->bstats, skb);
 			qdisc_bstats_update(sch, skb);
@@ -431,7 +434,7 @@
 	for (i = 0; i < q->clhash.hashsize; i++) {
 		hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
 			if (cl->qdisc->q.qlen)
-				list_del(&cl->alist);
+				list_del_init(&cl->alist);
 			qdisc_reset(cl->qdisc);
 		}
 	}
diff -ruw linux-6.13.12/net/socket.c linux-6.13.12-fbx/net/socket.c
--- linux-6.13.12/net/socket.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/socket.c	2025-09-25 17:40:37.927378929 +0200
@@ -1228,6 +1228,32 @@
 	return err;
 }
 
+static DEFINE_MUTEX(fbxbridge_ioctl_mutex);
+static int (*fbxbridge_ioctl_hook)(struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void fbxbridge_set(int (*hook)(struct net *, unsigned int, void __user *));
+void fbxbridge_set(int (*hook)(struct net *, unsigned int, void __user *))
+{
+	mutex_lock(&fbxbridge_ioctl_mutex);
+	fbxbridge_ioctl_hook = hook;
+	mutex_unlock(&fbxbridge_ioctl_mutex);
+}
+
+static DEFINE_MUTEX(fbxdiverter_ioctl_mutex);
+static int (*fbxdiverter_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void fbxdiverter_ioctl_set(int (*hook) (struct net *, unsigned int,
+					void __user *));
+void fbxdiverter_ioctl_set(int (*hook) (struct net *, unsigned int,
+					void __user *))
+{
+	mutex_lock(&fbxdiverter_ioctl_mutex);
+	fbxdiverter_ioctl_hook = hook;
+	mutex_unlock(&fbxdiverter_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(fbxdiverter_ioctl_set);
+
 /*
  *	With an ioctl, arg may well be a user mode pointer, but we don't know
  *	what to do with it - that's up to the protocol still.
@@ -1299,6 +1325,17 @@
 
 			err = open_related_ns(&net->ns, get_net_ns);
 			break;
+		case SIOCGFBXDIVERT:
+		case SIOCSFBXDIVERT:
+			err = -ENOPKG;
+			if (!fbxdiverter_ioctl_hook)
+				request_module("fbxdiverter");
+
+			mutex_lock(&fbxdiverter_ioctl_mutex);
+			if (fbxdiverter_ioctl_hook)
+				err = fbxdiverter_ioctl_hook(net, cmd, argp);
+			mutex_unlock(&fbxdiverter_ioctl_mutex);
+			break;
 		case SIOCGSTAMP_OLD:
 		case SIOCGSTAMPNS_OLD:
 			if (!ops->gettstamp) {
@@ -1324,6 +1361,17 @@
 			err = dev_ifconf(net, argp);
 			break;
 
+		case SIOCGFBXBRIDGE:
+		case SIOCSFBXBRIDGE:
+			err = -ENOPKG;
+			if (!fbxbridge_ioctl_hook)
+				request_module("fbxbridge");
+
+			mutex_lock(&fbxbridge_ioctl_mutex);
+			if (fbxbridge_ioctl_hook)
+				err = fbxbridge_ioctl_hook(net, cmd, argp);
+			mutex_unlock(&fbxbridge_ioctl_mutex);
+			break;
 		default:
 			err = sock_do_ioctl(net, sock, cmd, arg);
 			break;
diff -ruw linux-6.13.12/net/unix/Kconfig linux-6.13.12-fbx/net/unix/Kconfig
--- linux-6.13.12/net/unix/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/unix/Kconfig	2025-09-25 17:40:37.951379048 +0200
@@ -21,6 +21,9 @@
 	depends on UNIX
 	default y
 
+config UNIX_ABSTRACT_IGNORE_NETNS
+	bool "make abstract namespace global to all network namespaces"
+
 config UNIX_DIAG
 	tristate "UNIX: socket monitoring interface"
 	depends on UNIX
diff -ruw linux-6.13.12/net/unix/af_unix.c linux-6.13.12-fbx/net/unix/af_unix.c
--- linux-6.13.12/net/unix/af_unix.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/unix/af_unix.c	2025-09-25 17:40:37.951379048 +0200
@@ -1183,8 +1183,19 @@
 
 	if (sunaddr->sun_path[0])
 		sk = unix_find_bsd(sunaddr, addr_len, type);
-	else
+	else {
+#ifdef CONFIG_UNIX_ABSTRACT_IGNORE_NETNS
+		down_read(&net_rwsem);
+		for_each_net(net) {
+#endif
 		sk = unix_find_abstract(net, sunaddr, addr_len, type);
+#ifdef CONFIG_UNIX_ABSTRACT_IGNORE_NETNS
+			if (!IS_ERR(sk))
+				break;
+		}
+		up_read(&net_rwsem);
+#endif
+	}
 
 	return sk;
 }
diff -ruw linux-6.13.12/net/wireless/Kconfig linux-6.13.12-fbx/net/wireless/Kconfig
--- linux-6.13.12/net/wireless/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/Kconfig	2025-09-25 17:40:37.951379048 +0200
@@ -174,6 +174,9 @@
 
 	  If unsure, say N.
 
+config CFG80211_DFS_CACHE
+	bool "keep each channel dfs state in global cache"
+
 config CFG80211_CRDA_SUPPORT
 	bool "support CRDA" if EXPERT
 	default y
@@ -201,4 +204,9 @@
 
 	  If unsure, say N.
 
+config FBX80211
+	bool "fbx genl family"
+	help
+	  Support for freebox specific genl family
+
 endif # CFG80211
diff -ruw linux-6.13.12/net/wireless/Makefile linux-6.13.12-fbx/net/wireless/Makefile
--- linux-6.13.12/net/wireless/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/Makefile	2025-09-25 17:40:37.951379048 +0200
@@ -6,7 +6,7 @@
 obj-$(CONFIG_WEXT_PROC) += wext-proc.o
 obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
 
-cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
+cfg80211-y += core.o sysfs.o sysfs-radio.o radiotap.o util.o reg.o scan.o nl80211.o
 cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
 cfg80211-y += pmsr.o
 cfg80211-$(CONFIG_OF) += of.o
@@ -20,6 +20,8 @@
 cfg80211-y += extra-certs.o
 endif
 
+cfg80211-$(CONFIG_FBX80211) += nlfbx.o
+
 $(obj)/shipped-certs.c: $(sort $(wildcard $(src)/certs/*.hex))
 	@$(kecho) "  GEN     $@"
 	$(Q)(echo '#include "reg.h"'; \
diff -ruw linux-6.13.12/net/wireless/chan.c linux-6.13.12-fbx/net/wireless/chan.c
--- linux-6.13.12/net/wireless/chan.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/chan.c	2025-09-25 17:40:37.955379068 +0200
@@ -55,6 +55,56 @@
 }
 EXPORT_SYMBOL(cfg80211_chandef_create);
 
+static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
+{
+	return nl80211_chan_width_to_mhz(c->width);
+}
+
+static u32 cfg80211_get_start_freq(const struct cfg80211_chan_def *chandef,
+				   u32 cf)
+{
+	u32 start_freq, center_freq, bandwidth;
+
+	center_freq = MHZ_TO_KHZ((cf == 1) ?
+			chandef->center_freq1 : chandef->center_freq2);
+	bandwidth = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
+
+	if (bandwidth <= MHZ_TO_KHZ(20))
+		start_freq = center_freq;
+	else
+		start_freq = center_freq - bandwidth / 2 + MHZ_TO_KHZ(10);
+
+	return start_freq;
+}
+
+static u32 cfg80211_get_end_freq(const struct cfg80211_chan_def *chandef,
+				 u32 cf)
+{
+	u32 end_freq, center_freq, bandwidth;
+
+	center_freq = MHZ_TO_KHZ((cf == 1) ?
+			chandef->center_freq1 : chandef->center_freq2);
+	bandwidth = MHZ_TO_KHZ(cfg80211_chandef_get_width(chandef));
+
+	if (bandwidth <= MHZ_TO_KHZ(20))
+		end_freq = center_freq;
+	else
+		end_freq = center_freq + bandwidth / 2 - MHZ_TO_KHZ(10);
+
+	return end_freq;
+}
+
+#define for_each_subchan(chandef, freq, cf)				\
+	for (u32 punctured = chandef->punctured,			\
+	     cf = 1, freq = cfg80211_get_start_freq(chandef, cf);	\
+	     freq <= cfg80211_get_end_freq(chandef, cf);		\
+	     freq += MHZ_TO_KHZ(20),					\
+	     ((cf == 1 && chandef->center_freq2 != 0 &&			\
+	       freq > cfg80211_get_end_freq(chandef, cf)) ?		\
+	      (cf++, freq = cfg80211_get_start_freq(chandef, cf),	\
+	       punctured = 0) : (punctured >>= 1)))			\
+		if (!(punctured & 1))
+
 struct cfg80211_per_bw_puncturing_values {
 	u8 len;
 	const u16 *valid_values;
@@ -258,11 +308,6 @@
 }
 EXPORT_SYMBOL(nl80211_chan_width_to_mhz);
 
-static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c)
-{
-	return nl80211_chan_width_to_mhz(c->width);
-}
-
 static bool cfg80211_valid_center_freq(u32 center,
 				       enum nl80211_chan_width width)
 {
@@ -582,29 +627,82 @@
 }
 EXPORT_SYMBOL(cfg80211_chandef_compatible);
 
-static void cfg80211_set_chans_dfs_state(struct wiphy *wiphy, u32 center_freq,
-					 u32 bandwidth,
-					 enum nl80211_dfs_state dfs_state)
+static inline u32
+dfs_cache_channel_to_khz(const struct cfg80211_chan_dfs_cache *cd)
 {
-	struct ieee80211_channel *c;
+	return MHZ_TO_KHZ(cd->center_freq) + cd->freq_offset;
+}
+
+static struct cfg80211_chan_dfs_cache *
+__get_dfs_chan_cache(struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
 	u32 freq;
 
-	for (freq = center_freq - bandwidth/2 + 10;
-	     freq <= center_freq + bandwidth/2 - 10;
-	     freq += 20) {
-		c = ieee80211_get_channel(wiphy, freq);
-		if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
-			continue;
+	freq = ieee80211_channel_to_khz(c);
+	list_for_each_entry(cd, &cfg80211_dfs_cache.bands[c->band], next) {
+		if (dfs_cache_channel_to_khz(cd) == freq)
+			return cd;
+	}
+	return NULL;
+}
 
-		c->dfs_state = dfs_state;
-		c->dfs_state_entered = jiffies;
+struct cfg80211_chan_dfs_cache *
+cfg80211_get_dfs_chan_cache(struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+	u32 freq;
+
+	freq = ieee80211_channel_to_khz(c);
+	list_for_each_entry(cd, &cfg80211_dfs_cache.bands[c->band], next) {
+		if (dfs_cache_channel_to_khz(cd) == freq)
+			return cd;
+	}
+	return NULL;
+}
+
+void cfg80211_flush_dfs_cache(void)
+{
+	struct cfg80211_chan_dfs_cache *cd, *tmp;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(cfg80211_dfs_cache.bands); i++) {
+		list_for_each_entry_safe(cd, tmp,
+					 &cfg80211_dfs_cache.bands[i], next) {
+			list_del(&cd->next);
+			kfree(cd);
+		}
+	}
 	}
+
+static void set_dfs_cache_state(struct ieee80211_channel *c,
+				enum nl80211_dfs_state dfs_state)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+
+	mutex_lock(&cfg80211_dfs_cache.mtx);
+
+	cd = __get_dfs_chan_cache(c);
+	if (!cd) {
+		cd = kzalloc(sizeof (*cd), GFP_KERNEL);
+		if (!cd)
+			return;
+		cd->center_freq = c->center_freq;
+		cd->freq_offset = c->freq_offset;
+		list_add(&cd->next, &cfg80211_dfs_cache.bands[c->band]);
+	}
+
+	cd->dfs_state = dfs_state;
+	cd->dfs_state_entered = jiffies;
+
+	mutex_unlock(&cfg80211_dfs_cache.mtx);
 }
 
 void cfg80211_set_dfs_state(struct wiphy *wiphy,
 			    const struct cfg80211_chan_def *chandef,
 			    enum nl80211_dfs_state dfs_state)
 {
+	struct ieee80211_channel *c;
 	int width;
 
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -614,41 +712,17 @@
 	if (width < 0)
 		return;
 
-	cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq1,
-				     width, dfs_state);
-
-	if (!chandef->center_freq2)
-		return;
-	cfg80211_set_chans_dfs_state(wiphy, chandef->center_freq2,
-				     width, dfs_state);
-}
-
-static u32 cfg80211_get_start_freq(u32 center_freq,
-				   u32 bandwidth)
-{
-	u32 start_freq;
+	for_each_subchan(chandef, freq, cf) {
+		c = ieee80211_get_channel_khz(wiphy, freq);
+		if (!c || !(c->flags & IEEE80211_CHAN_RADAR))
+			continue;
 
-	bandwidth = MHZ_TO_KHZ(bandwidth);
-	if (bandwidth <= MHZ_TO_KHZ(20))
-		start_freq = center_freq;
-	else
-		start_freq = center_freq - bandwidth / 2 + MHZ_TO_KHZ(10);
+		if (IS_ENABLED(CONFIG_CFG80211_DFS_CACHE))
+			set_dfs_cache_state(c, dfs_state);
 
-	return start_freq;
+		c->dfs_state = dfs_state;
+		c->dfs_state_entered = jiffies;
 }
-
-static u32 cfg80211_get_end_freq(u32 center_freq,
-				 u32 bandwidth)
-{
-	u32 end_freq;
-
-	bandwidth = MHZ_TO_KHZ(bandwidth);
-	if (bandwidth <= MHZ_TO_KHZ(20))
-		end_freq = center_freq;
-	else
-		end_freq = center_freq + bandwidth / 2 - MHZ_TO_KHZ(10);
-
-	return end_freq;
 }
 
 static bool
@@ -725,17 +799,12 @@
 }
 
 static int cfg80211_get_chans_dfs_required(struct wiphy *wiphy,
-					    u32 center_freq,
-					    u32 bandwidth,
+					   const struct cfg80211_chan_def *chandef,
 					    enum nl80211_iftype iftype)
 {
 	struct ieee80211_channel *c;
-	u32 freq, start_freq, end_freq;
-
-	start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
-	end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
 
-	for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+	for_each_subchan(chandef, freq, cf) {
 		c = ieee80211_get_channel_khz(wiphy, freq);
 		if (!c)
 			return -EINVAL;
@@ -768,25 +837,9 @@
 		if (width < 0)
 			return -EINVAL;
 
-		ret = cfg80211_get_chans_dfs_required(wiphy,
-					ieee80211_chandef_to_khz(chandef),
-					width, iftype);
-		if (ret < 0)
-			return ret;
-		else if (ret > 0)
-			return BIT(chandef->width);
-
-		if (!chandef->center_freq2)
-			return 0;
-
-		ret = cfg80211_get_chans_dfs_required(wiphy,
-					MHZ_TO_KHZ(chandef->center_freq2),
-					width, iftype);
-		if (ret < 0)
-			return ret;
-		else if (ret > 0)
-			return BIT(chandef->width);
+		ret = cfg80211_get_chans_dfs_required(wiphy, chandef, iftype);
 
+		return (ret > 0) ? BIT(chandef->width) : ret;
 		break;
 	case NL80211_IFTYPE_STATION:
 	case NL80211_IFTYPE_OCB:
@@ -806,16 +859,18 @@
 }
 EXPORT_SYMBOL(cfg80211_chandef_dfs_required);
 
-static int cfg80211_get_chans_dfs_usable(struct wiphy *wiphy,
-					 u32 center_freq,
-					 u32 bandwidth)
+bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
+				 const struct cfg80211_chan_def *chandef)
 {
 	struct ieee80211_channel *c;
-	u32 freq, start_freq, end_freq;
-	int count = 0;
+	int width, count = 0;
 
-	start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
-	end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
+	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+		return false;
+
+	width = cfg80211_chandef_get_width(chandef);
+	if (width < 0)
+		return false;
 
 	/*
 	 * Check entire range of channels for the bandwidth.
@@ -823,61 +878,24 @@
 	 * DFS_AVAILABLE). Return number of usable channels
 	 * (require CAC). Allow DFS and non-DFS channel mix.
 	 */
-	for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+	for_each_subchan(chandef, freq, cf) {
 		c = ieee80211_get_channel_khz(wiphy, freq);
 		if (!c)
-			return -EINVAL;
+			return false;
 
 		if (c->flags & IEEE80211_CHAN_DISABLED)
-			return -EINVAL;
+			return false;
 
 		if (c->flags & IEEE80211_CHAN_RADAR) {
 			if (c->dfs_state == NL80211_DFS_UNAVAILABLE)
-				return -EINVAL;
+				return false;
 
 			if (c->dfs_state == NL80211_DFS_USABLE)
 				count++;
 		}
 	}
 
-	return count;
-}
-
-bool cfg80211_chandef_dfs_usable(struct wiphy *wiphy,
-				 const struct cfg80211_chan_def *chandef)
-{
-	int width;
-	int r1, r2 = 0;
-
-	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
-		return false;
-
-	width = cfg80211_chandef_get_width(chandef);
-	if (width < 0)
-		return false;
-
-	r1 = cfg80211_get_chans_dfs_usable(wiphy,
-					   MHZ_TO_KHZ(chandef->center_freq1),
-					   width);
-
-	if (r1 < 0)
-		return false;
-
-	switch (chandef->width) {
-	case NL80211_CHAN_WIDTH_80P80:
-		WARN_ON(!chandef->center_freq2);
-		r2 = cfg80211_get_chans_dfs_usable(wiphy,
-					MHZ_TO_KHZ(chandef->center_freq2),
-					width);
-		if (r2 < 0)
-			return false;
-		break;
-	default:
-		WARN_ON(chandef->center_freq2);
-		break;
-	}
-
-	return (r1 + r2 > 0);
+	return count > 0;
 }
 EXPORT_SYMBOL(cfg80211_chandef_dfs_usable);
 
@@ -1039,10 +1057,10 @@
 		if (!reg_dfs_domain_same(wiphy, &rdev->wiphy))
 			continue;
 
-		wiphy_lock(&rdev->wiphy);
+		guard(wiphy)(&rdev->wiphy);
+
 		found = cfg80211_is_wiphy_oper_chan(&rdev->wiphy, chan) ||
 			cfg80211_offchan_chain_is_active(rdev, chan);
-		wiphy_unlock(&rdev->wiphy);
 
 		if (found)
 			return true;
@@ -1051,26 +1069,29 @@
 	return false;
 }
 
-static bool cfg80211_get_chans_dfs_available(struct wiphy *wiphy,
-					     u32 center_freq,
-					     u32 bandwidth)
+static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
+				const struct cfg80211_chan_def *chandef)
 {
 	struct ieee80211_channel *c;
-	u32 freq, start_freq, end_freq;
+	int width;
 	bool dfs_offload;
 
+	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+		return false;
+
+	width = cfg80211_chandef_get_width(chandef);
+	if (width < 0)
+		return false;
+
 	dfs_offload = wiphy_ext_feature_isset(wiphy,
 					      NL80211_EXT_FEATURE_DFS_OFFLOAD);
 
-	start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
-	end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
-
 	/*
 	 * Check entire range of channels for the bandwidth.
 	 * If any channel in between is disabled or has not
 	 * had gone through CAC return false
 	 */
-	for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
+	for_each_subchan(chandef, freq, cf) {
 		c = ieee80211_get_channel_khz(wiphy, freq);
 		if (!c)
 			return false;
@@ -1087,124 +1108,54 @@
 	return true;
 }
 
-static bool cfg80211_chandef_dfs_available(struct wiphy *wiphy,
+unsigned int
+cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
 				const struct cfg80211_chan_def *chandef)
 {
+	struct ieee80211_channel *c;
 	int width;
-	int r;
+	unsigned int t1 = 0, t2 = 0;
 
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
-		return false;
+		return 0;
 
 	width = cfg80211_chandef_get_width(chandef);
 	if (width < 0)
-		return false;
-
-	r = cfg80211_get_chans_dfs_available(wiphy,
-					     MHZ_TO_KHZ(chandef->center_freq1),
-					     width);
-
-	/* If any of channels unavailable for cf1 just return */
-	if (!r)
-		return r;
-
-	switch (chandef->width) {
-	case NL80211_CHAN_WIDTH_80P80:
-		WARN_ON(!chandef->center_freq2);
-		r = cfg80211_get_chans_dfs_available(wiphy,
-					MHZ_TO_KHZ(chandef->center_freq2),
-					width);
-		break;
-	default:
-		WARN_ON(chandef->center_freq2);
-		break;
-	}
-
-	return r;
-}
-
-static unsigned int cfg80211_get_chans_dfs_cac_time(struct wiphy *wiphy,
-						    u32 center_freq,
-						    u32 bandwidth)
-{
-	struct ieee80211_channel *c;
-	u32 start_freq, end_freq, freq;
-	unsigned int dfs_cac_ms = 0;
-
-	start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
-	end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
-
-	for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
-		c = ieee80211_get_channel_khz(wiphy, freq);
-		if (!c)
 			return 0;
 
-		if (c->flags & IEEE80211_CHAN_DISABLED)
-			return 0;
+	for_each_subchan(chandef, freq, cf) {
+		c = ieee80211_get_channel_khz(wiphy, freq);
+		if (!c || (c->flags & IEEE80211_CHAN_DISABLED)) {
+			if (cf == 1)
+				t1 = INT_MAX;
+			else
+				t2 = INT_MAX;
+			continue;
+		}
 
 		if (!(c->flags & IEEE80211_CHAN_RADAR))
 			continue;
 
-		if (c->dfs_cac_ms > dfs_cac_ms)
-			dfs_cac_ms = c->dfs_cac_ms;
-	}
+		if (cf == 1 && c->dfs_cac_ms > t1)
+			t1 = c->dfs_cac_ms;
 
-	return dfs_cac_ms;
+		if (cf == 2 && c->dfs_cac_ms > t2)
+			t2 = c->dfs_cac_ms;
 }
 
-unsigned int
-cfg80211_chandef_dfs_cac_time(struct wiphy *wiphy,
-			      const struct cfg80211_chan_def *chandef)
-{
-	int width;
-	unsigned int t1 = 0, t2 = 0;
-
-	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
+	if (t1 == INT_MAX && t2 == INT_MAX)
 		return 0;
 
-	width = cfg80211_chandef_get_width(chandef);
-	if (width < 0)
-		return 0;
+	if (t1 == INT_MAX)
+		return t2;
 
-	t1 = cfg80211_get_chans_dfs_cac_time(wiphy,
-					     MHZ_TO_KHZ(chandef->center_freq1),
-					     width);
-
-	if (!chandef->center_freq2)
+	if (t2 == INT_MAX)
 		return t1;
 
-	t2 = cfg80211_get_chans_dfs_cac_time(wiphy,
-					     MHZ_TO_KHZ(chandef->center_freq2),
-					     width);
-
 	return max(t1, t2);
 }
 EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time);
 
-static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy,
-					u32 center_freq, u32 bandwidth,
-					u32 prohibited_flags,
-					u32 permitting_flags)
-{
-	struct ieee80211_channel *c;
-	u32 freq, start_freq, end_freq;
-
-	start_freq = cfg80211_get_start_freq(center_freq, bandwidth);
-	end_freq = cfg80211_get_end_freq(center_freq, bandwidth);
-
-	for (freq = start_freq; freq <= end_freq; freq += MHZ_TO_KHZ(20)) {
-		c = ieee80211_get_channel_khz(wiphy, freq);
-		if (!c)
-			return false;
-		if (c->flags & permitting_flags)
-			continue;
-		if (c->flags & prohibited_flags)
-			return false;
-	}
-
-	return true;
-}
-
 /* check if the operating channels are valid and supported */
 static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels,
 				 enum ieee80211_edmg_bw_config edmg_bw_config,
@@ -1270,6 +1221,7 @@
 	bool ext_nss_cap, support_80_80 = false, support_320 = false;
 	const struct ieee80211_sband_iftype_data *iftd;
 	struct ieee80211_supported_band *sband;
+	struct ieee80211_channel *c;
 	int i;
 
 	if (WARN_ON(!cfg80211_chandef_valid(chandef)))
@@ -1420,19 +1372,17 @@
 	if (width < 20)
 		prohibited_flags |= IEEE80211_CHAN_NO_OFDM;
 
-
-	if (!cfg80211_secondary_chans_ok(wiphy,
-					 ieee80211_chandef_to_khz(chandef),
-					 width, prohibited_flags,
-					 permitting_flags))
+	for_each_subchan(chandef, freq, cf) {
+		c = ieee80211_get_channel_khz(wiphy, freq);
+		if (!c)
 		return false;
+		if (c->flags & permitting_flags)
+			continue;
+		if (c->flags & prohibited_flags)
+			return false;
+	}
 
-	if (!chandef->center_freq2)
 		return true;
-	return cfg80211_secondary_chans_ok(wiphy,
-					   MHZ_TO_KHZ(chandef->center_freq2),
-					   width, prohibited_flags,
-					   permitting_flags);
 }
 
 bool cfg80211_chandef_usable(struct wiphy *wiphy,
diff -ruw linux-6.13.12/net/wireless/core.c linux-6.13.12-fbx/net/wireless/core.c
--- linux-6.13.12/net/wireless/core.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/core.c	2025-09-29 14:23:07.625732509 +0200
@@ -25,6 +25,7 @@
 #include <net/genetlink.h>
 #include <net/cfg80211.h>
 #include "nl80211.h"
+#include "nlfbx.h"
 #include "core.h"
 #include "sysfs.h"
 #include "debugfs.h"
@@ -54,6 +55,10 @@
 MODULE_PARM_DESC(cfg80211_disable_40mhz_24ghz,
 		 "Disable 40MHz support in the 2.4GHz band");
 
+/* global dfs cache */
+struct cfg80211_dfs_cache cfg80211_dfs_cache;
+static struct dentry *cfg80211_dfs_cache_debugfs;
+
 struct cfg80211_registered_device *cfg80211_rdev_by_wiphy_idx(int wiphy_idx)
 {
 	struct cfg80211_registered_device *result = NULL, *rdev;
@@ -143,10 +148,7 @@
 	if (result)
 		return result;
 
-	if (!IS_ERR_OR_NULL(rdev->wiphy.debugfsdir))
-		debugfs_rename(rdev->wiphy.debugfsdir->d_parent,
-			       rdev->wiphy.debugfsdir,
-			       rdev->wiphy.debugfsdir->d_parent, newname);
+	debugfs_change_name(rdev->wiphy.debugfsdir, "%s", newname);
 
 	nl80211_notify_wiphy(rdev, NL80211_CMD_NEW_WIPHY);
 
@@ -191,7 +193,8 @@
 		return err;
 	}
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
 			continue;
@@ -212,7 +215,6 @@
 			continue;
 		nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
 	}
-	wiphy_unlock(&rdev->wiphy);
 
 	return 0;
 }
@@ -221,9 +223,9 @@
 {
 	struct cfg80211_registered_device *rdev = data;
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	rdev_rfkill_poll(rdev);
-	wiphy_unlock(&rdev->wiphy);
 }
 
 void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
@@ -283,7 +285,7 @@
 
 		/* otherwise, check iftype */
 
-		wiphy_lock(wiphy);
+		guard(wiphy)(wiphy);
 
 		switch (wdev->iftype) {
 		case NL80211_IFTYPE_P2P_DEVICE:
@@ -295,8 +297,6 @@
 		default:
 			break;
 		}
-
-		wiphy_unlock(wiphy);
 	}
 }
 EXPORT_SYMBOL_GPL(cfg80211_shutdown_all_interfaces);
@@ -331,9 +331,9 @@
 	rdev = container_of(work, struct cfg80211_registered_device,
 			    event_work);
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	cfg80211_process_rdev_events(rdev);
-	wiphy_unlock(&rdev->wiphy);
 }
 
 void cfg80211_destroy_ifaces(struct cfg80211_registered_device *rdev)
@@ -347,10 +347,10 @@
 			if (wdev->netdev)
 				dev_close(wdev->netdev);
 
-			wiphy_lock(&rdev->wiphy);
+			guard(wiphy)(&rdev->wiphy);
+
 			cfg80211_leave(rdev, wdev);
 			cfg80211_remove_virtual_intf(rdev, wdev);
-			wiphy_unlock(&rdev->wiphy);
 		}
 	}
 }
@@ -423,9 +423,9 @@
 
 	trace_wiphy_work_worker_start(&rdev->wiphy);
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
 	if (rdev->suspended)
-		goto out;
+		return;
 
 	spin_lock_irq(&rdev->wiphy_work_lock);
 	wk = list_first_entry_or_null(&rdev->wiphy_work_list,
@@ -441,8 +441,6 @@
 	} else {
 		spin_unlock_irq(&rdev->wiphy_work_lock);
 	}
-out:
-	wiphy_unlock(&rdev->wiphy);
 }
 
 /* exported functions */
@@ -598,6 +596,7 @@
 
 	rdev->wiphy.max_sched_scan_plans = 1;
 	rdev->wiphy.max_sched_scan_plan_interval = U32_MAX;
+	rdev->wiphy.dev_port = -1;
 
 	return &rdev->wiphy;
 }
@@ -1073,6 +1072,58 @@
 	rdev->wiphy.registered = true;
 	rtnl_unlock();
 
+	/* register individual radio objects */
+	wiphy->n_radio_dev = wiphy->n_radio;
+	if (!wiphy->n_radio_dev)
+		wiphy->n_radio_dev = 1;
+
+	wiphy->radio_devices = kcalloc(wiphy->n_radio_dev,
+				       sizeof (wiphy->radio_devices),
+				       GFP_KERNEL);
+	if (!wiphy->radio_devices) {
+		wiphy_unregister(&rdev->wiphy);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < wiphy->n_radio_dev; i++) {
+		struct wiphy_radio_dev *radio;
+
+		radio = kzalloc(sizeof (*radio), GFP_KERNEL);
+		if (!radio) {
+			wiphy_unregister(&rdev->wiphy);
+			return -ENOMEM;
+		}
+
+		radio->idx = i;
+		device_initialize(&radio->dev);
+		radio->dev.parent = &wiphy->dev;
+		radio->dev.class = &ieee80211_radio_class;
+		radio->dev.platform_data = radio;
+		radio->wiphy = wiphy;
+
+		wiphy->radio_devices[i] = radio;
+	}
+
+	for (i = 0; i < wiphy->n_radio_dev; i++) {
+		struct wiphy_radio_dev *radio = wiphy->radio_devices[i];
+
+		res = dev_set_name(&radio->dev, "%s_radio%d",
+				   wiphy_name(wiphy),
+				   i);
+		if (res) {
+			wiphy_unregister(&rdev->wiphy);
+			return res;
+		}
+
+		res = device_add(&radio->dev);
+		if (res) {
+			wiphy_unregister(&rdev->wiphy);
+			return res;
+		}
+
+		get_device(&wiphy->dev);
+	}
+
 	res = rfkill_register(rdev->wiphy.rfkill);
 	if (res) {
 		rfkill_destroy(rdev->wiphy.rfkill);
@@ -1081,6 +1132,23 @@
 		return res;
 	}
 
+	/* Allocate radio configuration space for multi-radio wiphy.
+	 */
+	if (wiphy->n_radio) {
+		int idx;
+
+		wiphy->radio_cfg = kcalloc(wiphy->n_radio, sizeof(*wiphy->radio_cfg),
+					   GFP_KERNEL);
+		if (!wiphy->radio_cfg)
+			return -ENOMEM;
+		/*
+		 * Initialize wiphy radio parameters to IEEE 802.11 MIB default values.
+		 * RTS threshold is disabled by default with the special -1 value.
+		 */
+		for (idx = 0; idx < wiphy->n_radio; idx++)
+			wiphy->radio_cfg[idx].rts_threshold = (u32)-1;
+	}
+
 	return 0;
 }
 EXPORT_SYMBOL(wiphy_register);
@@ -1130,6 +1198,7 @@
 void wiphy_unregister(struct wiphy *wiphy)
 {
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	int i;
 
 	wait_event(rdev->dev_wait, ({
 		int __count;
@@ -1143,6 +1212,18 @@
 
 	rtnl_lock();
 	wiphy_lock(&rdev->wiphy);
+
+	if (wiphy->radio_devices) {
+		for (i = 0; i < wiphy->n_radio_dev; i++) {
+			struct wiphy_radio_dev *radio;
+			radio = wiphy->radio_devices[i];
+			if (!radio)
+				continue;
+			device_del(&radio->dev);
+		}
+		kfree(wiphy->radio_devices);
+	}
+
 	nl80211_notify_wiphy(rdev, NL80211_CMD_DEL_WIPHY);
 	rdev->wiphy.registered = false;
 
@@ -1191,6 +1272,7 @@
 	cfg80211_rdev_free_wowlan(rdev);
 	cfg80211_free_coalesce(rdev->coalesce);
 	rdev->coalesce = NULL;
+	kfree(wiphy->radio_cfg);
 }
 EXPORT_SYMBOL(wiphy_unregister);
 
@@ -1491,6 +1573,7 @@
 	lockdep_assert_held(&rdev->wiphy.mtx);
 
 	/* we'll take care of this */
+	dev->dev_port = rdev->wiphy.dev_port + 1;
 	wdev->registered = true;
 	wdev->registering = true;
 	ret = register_netdevice(dev);
@@ -1533,9 +1616,9 @@
 		break;
 	case NETDEV_REGISTER:
 		if (!wdev->registered) {
-			wiphy_lock(&rdev->wiphy);
+			guard(wiphy)(&rdev->wiphy);
+
 			cfg80211_register_wdev(rdev, wdev);
-			wiphy_unlock(&rdev->wiphy);
 		}
 		break;
 	case NETDEV_UNREGISTER:
@@ -1544,16 +1627,16 @@
 		 * so check wdev->registered.
 		 */
 		if (wdev->registered && !wdev->registering) {
-			wiphy_lock(&rdev->wiphy);
+			guard(wiphy)(&rdev->wiphy);
+
 			_cfg80211_unregister_wdev(wdev, false);
-			wiphy_unlock(&rdev->wiphy);
 		}
 		break;
 	case NETDEV_GOING_DOWN:
-		wiphy_lock(&rdev->wiphy);
+		scoped_guard(wiphy, &rdev->wiphy) {
 		cfg80211_leave(rdev, wdev);
 		cfg80211_remove_links(wdev);
-		wiphy_unlock(&rdev->wiphy);
+		}
 		/* since we just did cfg80211_leave() nothing to do there */
 		cancel_work_sync(&wdev->disconnect_wk);
 		cancel_work_sync(&wdev->pmsr_free_wk);
@@ -1765,6 +1848,40 @@
 }
 EXPORT_SYMBOL_GPL(wiphy_delayed_work_pending);
 
+/*
+ *
+ */
+static int dfs_cache_flush_set(void *data, u64 val)
+{
+	cfg80211_flush_dfs_cache();
+	printk("DFS cache flushed\n");
+	return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_dfs_cache_flush, NULL,
+			 dfs_cache_flush_set, "%llu\n");
+
+static void __init dfs_cache_init(void)
+{
+	size_t i;
+
+	mutex_init(&cfg80211_dfs_cache.mtx);
+
+	for (i = 0; i < ARRAY_SIZE(cfg80211_dfs_cache.bands); i++)
+		INIT_LIST_HEAD(&cfg80211_dfs_cache.bands[i]);
+
+	cfg80211_dfs_cache_debugfs =
+		debugfs_create_file_unsafe("dfs_cache_flush", 0200,
+					   ieee80211_debugfs_dir, NULL,
+					   &fops_dfs_cache_flush);
+}
+
+static void __exit dfs_cache_exit(void)
+{
+	cfg80211_flush_dfs_cache();
+	debugfs_remove(cfg80211_dfs_cache_debugfs);
+}
+
 static int __init cfg80211_init(void)
 {
 	int err;
@@ -1777,6 +1894,10 @@
 	if (err)
 		goto out_fail_sysfs;
 
+	err = wiphy_sysfs_radio_init();
+	if (err)
+		goto out_fail_sysfs;
+
 	err = register_netdevice_notifier(&cfg80211_netdev_notifier);
 	if (err)
 		goto out_fail_notifier;
@@ -1785,8 +1906,14 @@
 	if (err)
 		goto out_fail_nl80211;
 
+	err = nlfbx_init();
+	if (err)
+		goto out_fail_nlfbx;
+
 	ieee80211_debugfs_dir = debugfs_create_dir("ieee80211", NULL);
 
+	dfs_cache_init();
+
 	err = regulatory_init();
 	if (err)
 		goto out_fail_reg;
@@ -1802,11 +1929,14 @@
 out_fail_wq:
 	regulatory_exit();
 out_fail_reg:
+	nlfbx_exit();
+out_fail_nlfbx:
 	debugfs_remove(ieee80211_debugfs_dir);
 	nl80211_exit();
 out_fail_nl80211:
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
 out_fail_notifier:
+	wiphy_sysfs_radio_exit();
 	wiphy_sysfs_exit();
 out_fail_sysfs:
 	unregister_pernet_device(&cfg80211_pernet_ops);
@@ -1817,9 +1947,11 @@
 
 static void __exit cfg80211_exit(void)
 {
+	dfs_cache_exit();
 	debugfs_remove(ieee80211_debugfs_dir);
 	nl80211_exit();
 	unregister_netdevice_notifier(&cfg80211_netdev_notifier);
+	wiphy_sysfs_radio_exit();
 	wiphy_sysfs_exit();
 	regulatory_exit();
 	unregister_pernet_device(&cfg80211_pernet_ops);
diff -ruw linux-6.13.12/net/wireless/core.h linux-6.13.12-fbx/net/wireless/core.h
--- linux-6.13.12/net/wireless/core.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/core.h	2025-09-25 17:40:37.955379068 +0200
@@ -160,6 +160,30 @@
 extern struct list_head cfg80211_rdev_list;
 extern int cfg80211_rdev_list_generation;
 
+
+/*
+ * DFS cache
+ */
+struct cfg80211_chan_dfs_cache {
+	u32 center_freq;
+	u16 freq_offset;
+
+	enum nl80211_dfs_state dfs_state;
+	unsigned long dfs_state_entered;
+	struct list_head next;
+};
+
+struct cfg80211_dfs_cache {
+	struct list_head bands[NUM_NL80211_BANDS];
+	struct mutex mtx;
+};
+
+extern struct cfg80211_dfs_cache cfg80211_dfs_cache;
+
+struct cfg80211_chan_dfs_cache *
+cfg80211_get_dfs_chan_cache(struct ieee80211_channel *c);
+void cfg80211_flush_dfs_cache(void);
+
 /* This is constructed like this so it can be used in if/else */
 static inline int for_each_rdev_check_rtnl(void)
 {
@@ -567,6 +591,10 @@
 				 struct wireless_dev *wdev);
 void cfg80211_wdev_release_link_bsses(struct wireless_dev *wdev, u16 link_mask);
 
+int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+			     struct net_device *dev,
+			     struct cfg80211_assoc_link *links,
+			     u16 rem_links);
 /**
  * struct cfg80211_colocated_ap - colocated AP information
  *
diff -ruw linux-6.13.12/net/wireless/debugfs.c linux-6.13.12-fbx/net/wireless/debugfs.c
--- linux-6.13.12/net/wireless/debugfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/debugfs.c	2025-09-25 17:40:37.955379068 +0200
@@ -29,8 +29,6 @@
 	.llseek = generic_file_llseek,					\
 }
 
-DEBUGFS_READONLY_FILE(rts_threshold, 20, "%d",
-		      wiphy->rts_threshold);
 DEBUGFS_READONLY_FILE(fragmentation_threshold, 20, "%d",
 		      wiphy->frag_threshold);
 DEBUGFS_READONLY_FILE(short_retry_limit, 20, "%d",
@@ -96,6 +94,39 @@
 	.open = simple_open,
 	.llseek = default_llseek,
 };
+
+static ssize_t rts_threshold_read(struct file *file, char __user *user_buf,
+				  size_t count, loff_t *ppos)
+{
+	unsigned int buf_size = PAGE_SIZE, res = 0, i;
+	struct wiphy *wiphy = file->private_data;
+	char *buf;
+	ssize_t r;
+
+	buf = kzalloc(buf_size, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (wiphy->n_radio) {
+		for (i = 0; i < wiphy->n_radio; i++)
+			res += scnprintf(buf, buf_size, "%d\n",
+					 wiphy->radio_cfg[i].rts_threshold);
+	} else {
+		res = scnprintf(buf, buf_size, "%d\n", wiphy->rts_threshold);
+	}
+
+	r = simple_read_from_buffer(user_buf, count, ppos, buf, res);
+
+	kfree(buf);
+
+	return r;
+}
+
+static const struct file_operations rts_threshold_ops = {
+	.read = rts_threshold_read,
+	.open = simple_open,
+	.llseek = default_llseek,
+};
 
 #define DEBUGFS_ADD(name)						\
 	debugfs_create_file(#name, 0444, phyd, &rdev->wiphy, &name## _ops)
diff -ruw linux-6.13.12/net/wireless/mlme.c linux-6.13.12-fbx/net/wireless/mlme.c
--- linux-6.13.12/net/wireless/mlme.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/mlme.c	2025-09-25 17:40:37.955379068 +0200
@@ -352,6 +352,13 @@
 		return -EINVAL;
 	}
 
+	if (ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_a) !=
+	    ieee80211_mle_get_ext_mld_capa_op((const u8 *)mle_b)) {
+		NL_SET_ERR_MSG(extack,
+			       "extended link MLD capabilities/ops mismatch");
+		return -EINVAL;
+	}
+
 	return 0;
 }
 
@@ -627,10 +634,10 @@
 	rdev = container_of(wk, struct cfg80211_registered_device,
 			    mgmt_registrations_update_wk);
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		cfg80211_mgmt_registrations_update(wdev);
-	wiphy_unlock(&rdev->wiphy);
 }
 
 int cfg80211_mlme_register_mgmt(struct wireless_dev *wdev, u32 snd_portid,
@@ -1193,10 +1200,10 @@
 			      const struct cfg80211_chan_def *chandef,
 			      enum nl80211_radar_event event)
 {
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	__cfg80211_background_cac_event(rdev, rdev->background_radar_wdev,
 					chandef, event);
-	wiphy_unlock(&rdev->wiphy);
 }
 
 void cfg80211_background_cac_done_wk(struct work_struct *work)
@@ -1287,3 +1294,80 @@
 					&rdev->background_radar_chandef,
 					NL80211_RADAR_CAC_ABORTED);
 }
+
+int cfg80211_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+			     struct net_device *dev,
+			     struct cfg80211_assoc_link *links,
+			     u16 rem_links)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	int err;
+
+	lockdep_assert_wiphy(wdev->wiphy);
+
+	err = rdev_assoc_ml_reconf(rdev, dev, links, rem_links);
+	if (!err) {
+		int link_id;
+
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++) {
+			if (!links[link_id].bss)
+				continue;
+
+			cfg80211_ref_bss(&rdev->wiphy, links[link_id].bss);
+			cfg80211_hold_bss(bss_from_pub(links[link_id].bss));
+		}
+	}
+
+	return err;
+}
+
+void cfg80211_mlo_reconf_add_done(struct net_device *dev,
+				  struct cfg80211_mlo_reconf_done_data *data)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct wiphy *wiphy = wdev->wiphy;
+	int link_id;
+
+	lockdep_assert_wiphy(wiphy);
+
+	trace_cfg80211_mlo_reconf_add_done(dev, data->added_links,
+					   data->buf, data->len);
+
+	if (WARN_ON(!wdev->valid_links))
+		return;
+
+	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_STATION &&
+		    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT))
+		return;
+
+	/* validate that a BSS is given for each added link */
+	for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+		struct cfg80211_bss *bss = data->links[link_id].bss;
+
+		if (!(data->added_links & BIT(link_id)))
+			continue;
+
+		if (WARN_ON(!bss))
+			return;
+	}
+
+	for (link_id = 0; link_id < ARRAY_SIZE(data->links); link_id++) {
+		struct cfg80211_bss *bss = data->links[link_id].bss;
+
+		if (!bss)
+			continue;
+
+		if (data->added_links & BIT(link_id)) {
+			wdev->links[link_id].client.current_bss =
+				bss_from_pub(bss);
+		} else {
+			cfg80211_unhold_bss(bss_from_pub(bss));
+			cfg80211_put_bss(wiphy, bss);
+		}
+	}
+
+	wdev->valid_links |= data->added_links;
+	nl80211_mlo_reconf_add_done(dev, data);
+}
+EXPORT_SYMBOL(cfg80211_mlo_reconf_add_done);
diff -ruw linux-6.13.12/net/wireless/nl80211.c linux-6.13.12-fbx/net/wireless/nl80211.c
--- linux-6.13.12/net/wireless/nl80211.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/nl80211.c	2025-09-25 17:40:37.959379088 +0200
@@ -294,6 +294,21 @@
 	return 0;
 }
 
+static int validate_supported_selectors(const struct nlattr *attr,
+					struct netlink_ext_ack *extack)
+{
+	const u8 *supported_selectors = nla_data(attr);
+	u8 supported_selectors_len = nla_len(attr);
+
+	/* The top bit must not be set as it is not part of the selector */
+	for (int i = 0; i < supported_selectors_len; i++) {
+		if (supported_selectors[i] & 0x80)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
 /* policy for the attributes */
 static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR];
 
@@ -454,6 +469,8 @@
 	[NL80211_MBSSID_CONFIG_ATTR_INDEX] = { .type = NLA_U8 },
 	[NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX] = { .type = NLA_U32 },
 	[NL80211_MBSSID_CONFIG_ATTR_EMA] = { .type = NLA_FLAG },
+	[NL80211_MBSSID_CONFIG_ATTR_TX_LINK_ID] =
+		NLA_POLICY_MAX(NLA_U8, IEEE80211_MLD_MAX_NUM_LINKS),
 };
 
 static const struct nla_policy
@@ -818,6 +835,7 @@
 	[NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN),
 	[NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG },
 	[NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT },
+	[NL80211_ATTR_EML_CAPABILITY] = { .type = NLA_U16 },
 	[NL80211_ATTR_PUNCT_BITMAP] =
 		NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range),
 
@@ -830,6 +848,12 @@
 	[NL80211_ATTR_MLO_TTLM_ULINK] = NLA_POLICY_EXACT_LEN(sizeof(u16) * 8),
 	[NL80211_ATTR_ASSOC_SPP_AMSDU] = { .type = NLA_FLAG },
 	[NL80211_ATTR_VIF_RADIO_MASK] = { .type = NLA_U32 },
+	[NL80211_ATTR_SUPPORTED_SELECTORS] =
+		NLA_POLICY_VALIDATE_FN(NLA_BINARY, validate_supported_selectors,
+				       NL80211_MAX_SUPP_SELECTORS),
+	[NL80211_ATTR_MLO_RECONF_REM_LINKS] = { .type = NLA_U16 },
+	[NL80211_ATTR_EPCS] = { .type = NLA_FLAG },
+	[NL80211_ATTR_WIPHY_RADIO_INDEX] = { .type = NLA_U8 },
 };
 
 /* policy for the key attributes */
@@ -2418,6 +2442,7 @@
 static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx)
 {
 	const struct wiphy_radio *r = &wiphy->radio[idx];
+	const struct wiphy_radio_cfg *rcfg = &wiphy->radio_cfg[idx];
 	struct nlattr *radio, *freq;
 	int i;
 
@@ -2428,6 +2453,10 @@
 	if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx))
 		goto nla_put_failure;
 
+	if (wiphy->radio_cfg &&
+	    nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_RTS_THRESHOLD, rcfg->rts_threshold))
+		goto nla_put_failure;
+
 	if (r->antenna_mask &&
 	    nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_ANTENNA_MASK,
 			r->antenna_mask))
@@ -3064,6 +3093,7 @@
 				goto nla_put_failure;
 		}
 
+		state->capa_start = 0;
 		state->split_start++;
 		break;
 	case 14:
@@ -3210,7 +3240,8 @@
 						 skb,
 						 NETLINK_CB(cb->skb).portid,
 						 cb->nlh->nlmsg_seq,
-						 NLM_F_MULTI, state);
+						 NLM_F_MULTI,
+						 state);
 			if (ret < 0) {
 				/*
 				 * If sending the wiphy data didn't fit (ENOBUFS
@@ -3587,7 +3618,7 @@
 	int result = 0, rem_txq_params = 0;
 	struct nlattr *nl_txq_params;
 	u32 changed;
-	u8 retry_short = 0, retry_long = 0;
+	u8 retry_short = 0, retry_long = 0, radio_id = NL80211_WIPHY_RADIO_ID_MAX;
 	u32 frag_threshold = 0, rts_threshold = 0;
 	u8 coverage_class = 0;
 	u32 txq_limit = 0, txq_memory_limit = 0, txq_quantum = 0;
@@ -3626,7 +3657,7 @@
 	} else
 		wdev = netdev->ieee80211_ptr;
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
 
 	/*
 	 * end workaround code, by now the rdev is available
@@ -3639,32 +3670,35 @@
 	rtnl_unlock();
 
 	if (result)
-		goto out;
+		return result;
+
+	if (info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]) {
+		/* Radio idx is not expected for non-multi radio wiphy */
+		if (!rdev->wiphy.n_radio)
+			return -EINVAL;
+
+		radio_id = nla_get_u8(info->attrs[NL80211_ATTR_WIPHY_RADIO_INDEX]);
+		if (radio_id >= rdev->wiphy.n_radio)
+			return -EINVAL;
+	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) {
 		struct ieee80211_txq_params txq_params;
 		struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1];
 
-		if (!rdev->ops->set_txq_params) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_txq_params)
+			return -EOPNOTSUPP;
 
-		if (!netdev) {
-			result = -EINVAL;
-			goto out;
-		}
+		if (!netdev)
+			return -EINVAL;
 
 		if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP &&
-		    netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) {
-			result = -EINVAL;
-			goto out;
-		}
+		    netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MONITOR &&
+		    netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO)
+			return -EINVAL;
 
-		if (!netif_running(netdev)) {
-			result = -ENETDOWN;
-			goto out;
-		}
+		if (!netif_running(netdev))
+			return -ENETDOWN;
 
 		nla_for_each_nested(nl_txq_params,
 				    info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
@@ -3675,10 +3709,11 @@
 							     txq_params_policy,
 							     info->extack);
 			if (result)
-				goto out;
+				return result;
+
 			result = parse_txq_params(tb, &txq_params);
 			if (result)
-				goto out;
+				return result;
 
 			txq_params.link_id =
 				nl80211_link_id_or_invalid(info->attrs);
@@ -3694,7 +3729,7 @@
 				result = rdev_set_txq_params(rdev, netdev,
 							     &txq_params);
 			if (result)
-				goto out;
+				return result;
 		}
 	}
 
@@ -3711,7 +3746,7 @@
 		}
 
 		if (result)
-			goto out;
+			return result;
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) {
@@ -3722,28 +3757,24 @@
 		if (!(rdev->wiphy.features & NL80211_FEATURE_VIF_TXPOWER))
 			txp_wdev = NULL;
 
-		if (!rdev->ops->set_tx_power) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_tx_power)
+			return -EOPNOTSUPP;
 
 		idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING;
 		type = nla_get_u32(info->attrs[idx]);
 
 		if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] &&
-		    (type != NL80211_TX_POWER_AUTOMATIC)) {
-			result = -EINVAL;
-			goto out;
-		}
+		    (type != NL80211_TX_POWER_AUTOMATIC))
+			return -EINVAL;
 
 		if (type != NL80211_TX_POWER_AUTOMATIC) {
 			idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL;
 			mbm = nla_get_u32(info->attrs[idx]);
 		}
 
-		result = rdev_set_tx_power(rdev, txp_wdev, type, mbm);
+		result = rdev_set_tx_power(rdev, txp_wdev, radio_id, type, mbm);
 		if (result)
-			goto out;
+			return result;
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] &&
@@ -3752,10 +3783,8 @@
 
 		if ((!rdev->wiphy.available_antennas_tx &&
 		     !rdev->wiphy.available_antennas_rx) ||
-		    !rdev->ops->set_antenna) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+		    !rdev->ops->set_antenna)
+			return -EOPNOTSUPP;
 
 		tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]);
 		rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]);
@@ -3763,17 +3792,15 @@
 		/* reject antenna configurations which don't match the
 		 * available antenna masks, except for the "all" mask */
 		if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) ||
-		    (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) {
-			result = -EINVAL;
-			goto out;
-		}
+		    (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx)))
+			return -EINVAL;
 
 		tx_ant = tx_ant & rdev->wiphy.available_antennas_tx;
 		rx_ant = rx_ant & rdev->wiphy.available_antennas_rx;
 
 		result = rdev_set_antenna(rdev, tx_ant, rx_ant);
 		if (result)
-			goto out;
+			return result;
 	}
 
 	changed = 0;
@@ -3795,10 +3822,8 @@
 	if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) {
 		frag_threshold = nla_get_u32(
 			info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]);
-		if (frag_threshold < 256) {
-			result = -EINVAL;
-			goto out;
-		}
+		if (frag_threshold < 256)
+			return -EINVAL;
 
 		if (frag_threshold != (u32) -1) {
 			/*
@@ -3819,10 +3844,8 @@
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) {
-		if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) {
-			result = -EINVAL;
-			goto out;
-		}
+		if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK])
+			return -EINVAL;
 
 		coverage_class = nla_get_u8(
 			info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]);
@@ -3830,20 +3853,17 @@
 	}
 
 	if (info->attrs[NL80211_ATTR_WIPHY_DYN_ACK]) {
-		if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION)) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!(rdev->wiphy.features & NL80211_FEATURE_ACKTO_ESTIMATION))
+			return -EOPNOTSUPP;
 
 		changed |= WIPHY_PARAM_DYN_ACK;
 	}
 
 	if (info->attrs[NL80211_ATTR_TXQ_LIMIT]) {
 		if (!wiphy_ext_feature_isset(&rdev->wiphy,
-					     NL80211_EXT_FEATURE_TXQS)) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+					     NL80211_EXT_FEATURE_TXQS))
+			return -EOPNOTSUPP;
+
 		txq_limit = nla_get_u32(
 			info->attrs[NL80211_ATTR_TXQ_LIMIT]);
 		changed |= WIPHY_PARAM_TXQ_LIMIT;
@@ -3851,10 +3871,9 @@
 
 	if (info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]) {
 		if (!wiphy_ext_feature_isset(&rdev->wiphy,
-					     NL80211_EXT_FEATURE_TXQS)) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+					     NL80211_EXT_FEATURE_TXQS))
+			return -EOPNOTSUPP;
+
 		txq_memory_limit = nla_get_u32(
 			info->attrs[NL80211_ATTR_TXQ_MEMORY_LIMIT]);
 		changed |= WIPHY_PARAM_TXQ_MEMORY_LIMIT;
@@ -3862,10 +3881,9 @@
 
 	if (info->attrs[NL80211_ATTR_TXQ_QUANTUM]) {
 		if (!wiphy_ext_feature_isset(&rdev->wiphy,
-					     NL80211_EXT_FEATURE_TXQS)) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+					     NL80211_EXT_FEATURE_TXQS))
+			return -EOPNOTSUPP;
+
 		txq_quantum = nla_get_u32(
 			info->attrs[NL80211_ATTR_TXQ_QUANTUM]);
 		changed |= WIPHY_PARAM_TXQ_QUANTUM;
@@ -3874,14 +3892,25 @@
 	if (changed) {
 		u8 old_retry_short, old_retry_long;
 		u32 old_frag_threshold, old_rts_threshold;
-		u8 old_coverage_class;
+		u8 old_coverage_class, i;
 		u32 old_txq_limit, old_txq_memory_limit, old_txq_quantum;
 
-		if (!rdev->ops->set_wiphy_params) {
-			result = -EOPNOTSUPP;
-			goto out;
-		}
+		if (!rdev->ops->set_wiphy_params)
+			return -EOPNOTSUPP;
+
+		if (radio_id < rdev->wiphy.n_radio) {
+			old_rts_threshold =
+				rdev->wiphy.radio_cfg[radio_id].rts_threshold;
+
+			if (changed & WIPHY_PARAM_RTS_THRESHOLD)
+				rdev->wiphy.radio_cfg[radio_id].rts_threshold =
+					rts_threshold;
 
+			result = rdev_set_wiphy_params(rdev, radio_id, changed);
+			if (result)
+				rdev->wiphy.radio_cfg[radio_id].rts_threshold =
+					old_rts_threshold;
+		} else {
 		old_retry_short = rdev->wiphy.retry_short;
 		old_retry_long = rdev->wiphy.retry_long;
 		old_frag_threshold = rdev->wiphy.frag_threshold;
@@ -3908,7 +3937,7 @@
 		if (changed & WIPHY_PARAM_TXQ_QUANTUM)
 			rdev->wiphy.txq_quantum = txq_quantum;
 
-		result = rdev_set_wiphy_params(rdev, changed);
+			result = rdev_set_wiphy_params(rdev, radio_id, changed);
 		if (result) {
 			rdev->wiphy.retry_short = old_retry_short;
 			rdev->wiphy.retry_long = old_retry_long;
@@ -3918,15 +3947,17 @@
 			rdev->wiphy.txq_limit = old_txq_limit;
 			rdev->wiphy.txq_memory_limit = old_txq_memory_limit;
 			rdev->wiphy.txq_quantum = old_txq_quantum;
-			goto out;
-		}
+				return result;
 	}
 
-	result = 0;
+			for (i = 0 ; i < rdev->wiphy.n_radio; i++) {
+				rdev->wiphy.radio_cfg[i].rts_threshold =
+					rdev->wiphy.rts_threshold;
+			}
+		}
+	}
 
-out:
-	wiphy_unlock(&rdev->wiphy);
-	return result;
+	return 0;
 }
 
 int nl80211_send_chandef(struct sk_buff *msg, const struct cfg80211_chan_def *chandef)
@@ -4010,10 +4041,10 @@
 			goto nla_put_failure;
 	}
 
-	if (rdev->ops->get_tx_power) {
+	if (rdev->ops->get_tx_power && !wdev->valid_links) {
 		int dbm, ret;
 
-		ret = rdev_get_tx_power(rdev, wdev, &dbm);
+		ret = rdev_get_tx_power(rdev, wdev, 0, &dbm);
 		if (ret == 0 &&
 		    nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
 				DBM_TO_MBM(dbm)))
@@ -4082,6 +4113,15 @@
 			if (ret == 0 && nl80211_send_chandef(msg, &chandef))
 				goto nla_put_failure;
 
+			if (rdev->ops->get_tx_power) {
+				int dbm, ret;
+
+				ret = rdev_get_tx_power(rdev, wdev, link_id, &dbm);
+				if (ret == 0 &&
+				    nla_put_u32(msg, NL80211_ATTR_WIPHY_TX_POWER_LEVEL,
+						DBM_TO_MBM(dbm)))
+					goto nla_put_failure;
+			}
 			nla_nest_end(msg, link);
 		}
 
@@ -4144,22 +4184,22 @@
 
 		if_idx = 0;
 
-		wiphy_lock(&rdev->wiphy);
+		guard(wiphy)(&rdev->wiphy);
+
 		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (if_idx < if_start) {
 				if_idx++;
 				continue;
 			}
+
 			if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).portid,
 					       cb->nlh->nlmsg_seq, NLM_F_MULTI,
 					       rdev, wdev,
-					       NL80211_CMD_NEW_INTERFACE) < 0) {
-				wiphy_unlock(&rdev->wiphy);
+					       NL80211_CMD_NEW_INTERFACE) < 0)
 				goto out;
-			}
+
 			if_idx++;
 		}
-		wiphy_unlock(&rdev->wiphy);
 
 		if_start = 0;
 		wp_idx++;
@@ -4522,16 +4562,13 @@
 static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
-	int ret;
 
 	/* to avoid failing a new interface creation due to pending removal */
 	cfg80211_destroy_ifaces(rdev);
 
-	wiphy_lock(&rdev->wiphy);
-	ret = _nl80211_new_interface(skb, info);
-	wiphy_unlock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
 
-	return ret;
+	return _nl80211_new_interface(skb, info);
 }
 
 static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info)
@@ -5566,6 +5603,18 @@
 			}
 
 			config->tx_wdev = tx_netdev->ieee80211_ptr;
+
+			if (config->tx_wdev->valid_links) {
+				if (!tb[NL80211_MBSSID_CONFIG_ATTR_TX_LINK_ID])
+					return -ENOLINK;
+
+				config->tx_link_id =
+					nla_get_u8(tb[NL80211_MBSSID_CONFIG_ATTR_TX_LINK_ID]);
+
+				if (!(config->tx_wdev->valid_links &
+				      BIT(config->tx_link_id)))
+					return -ENOLINK;
+			}
 		} else {
 			config->tx_wdev = dev->ieee80211_ptr;
 		}
@@ -7354,6 +7403,34 @@
 	return 0;
 }
 
+static const
+struct nla_policy nl80211_sta_info_policy[NL80211_STA_INFO_MAX+1] = {
+	[NL80211_STA_INFO_EXPECTED_THROUGHPUT] = { .type = NLA_U32 },
+};
+
+static int nl80211_parse_sta_info(struct genl_info *info,
+				  struct station_parameters *params)
+{
+	struct nlattr *tb[NL80211_STA_INFO_MAX + 1];
+
+	if (!info->attrs[NL80211_ATTR_STA_INFO])
+		return 0;
+	if (nla_parse_nested_deprecated(tb,
+					NL80211_STA_INFO_MAX,
+					info->attrs[NL80211_ATTR_STA_INFO],
+					nl80211_sta_info_policy,
+					info->extack))
+		return -EINVAL;
+
+	if (tb[NL80211_STA_INFO_EXPECTED_THROUGHPUT]) {
+		params->link_sta_params.tp_overridden = true;
+		params->link_sta_params.tp_override =
+			nla_get_u32(tb[NL80211_STA_INFO_EXPECTED_THROUGHPUT]);
+	}
+
+	return 0;
+}
+
 static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -7466,6 +7543,12 @@
 		params.link_sta_params.he_6ghz_capa =
 			nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
 
+	if (info->attrs[NL80211_ATTR_EML_CAPABILITY]) {
+		params.eml_cap_present = true;
+		params.eml_cap =
+			nla_get_u16(info->attrs[NL80211_ATTR_EML_CAPABILITY]);
+	}
+
 	if (info->attrs[NL80211_ATTR_AIRTIME_WEIGHT])
 		params.airtime_weight =
 			nla_get_u16(info->attrs[NL80211_ATTR_AIRTIME_WEIGHT]);
@@ -7486,6 +7569,10 @@
 	if (err)
 		return err;
 
+	err = nl80211_parse_sta_info(info, &params);
+	if (err)
+		return err;
+
 	params.vlan = get_vlan(info, rdev);
 	if (IS_ERR(params.vlan))
 		return PTR_ERR(params.vlan);
@@ -7624,6 +7711,11 @@
 		}
 	}
 
+	if (info->attrs[NL80211_ATTR_EML_CAPABILITY]) {
+		params.eml_cap_present = true;
+		params.eml_cap = nla_get_u16(info->attrs[NL80211_ATTR_EML_CAPABILITY]);
+	}
+
 	if (info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY])
 		params.link_sta_params.he_6ghz_capa =
 			nla_data(info->attrs[NL80211_ATTR_HE_6GHZ_CAPABILITY]);
@@ -9325,6 +9417,32 @@
 	return 0;
 }
 
+static bool radio_channel_allowed(struct wiphy *wiphy,
+				  struct ieee80211_channel *chan,
+				  u32 radio_mask)
+{
+	const struct wiphy_radio *radio;
+	struct cfg80211_chan_def chandef;
+	int i;
+
+	if (!wiphy->n_radio || radio_mask == BIT(wiphy->n_radio) - 1)
+		return true;
+
+	cfg80211_chandef_create(&chandef, chan, NL80211_CHAN_HT20);
+	for (i = 0; i < wiphy->n_radio; i++) {
+		if (!(radio_mask & BIT(i)))
+			continue;
+
+		radio = &wiphy->radio[i];
+		if (!cfg80211_radio_chandef_valid(radio, &chandef))
+			continue;
+
+		return true;
+	}
+
+	return false;
+}
+
 static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
@@ -9337,6 +9455,7 @@
 	int err, tmp, n_ssids = 0, n_channels, i;
 	size_t ie_len, size;
 	size_t ssids_offset, ie_offset;
+	u32 radio_mask = 0;
 
 	wiphy = &rdev->wiphy;
 
@@ -9349,6 +9468,21 @@
 	if (rdev->scan_req || rdev->scan_msg)
 		return -EBUSY;
 
+	if (info->attrs[NL80211_ATTR_VIF_RADIO_MASK]) {
+		u32 mask;
+
+		mask = nla_get_u32(info->attrs[NL80211_ATTR_VIF_RADIO_MASK]);
+
+		/* don't use any radio restricted by mask */
+		if (wiphy->n_radio) {
+			if (mask & ~wdev->radio_mask)
+				return -EINVAL;
+
+			mask &= wdev->radio_mask;
+		}
+		radio_mask = mask;
+	}
+
 	if (info->attrs[NL80211_ATTR_SCAN_FREQ_KHZ]) {
 		if (!wiphy_ext_feature_isset(wiphy,
 					     NL80211_EXT_FEATURE_SCAN_FREQ_KHZ))
@@ -9418,6 +9552,12 @@
 			    !cfg80211_wdev_channel_allowed(wdev, chan))
 				continue;
 
+			/* ignore channel not available to selected
+			 * radios */
+			if (radio_mask &&
+			    !radio_channel_allowed(wiphy, chan, radio_mask))
+				continue;
+
 			request->channels[i] = chan;
 			i++;
 		}
@@ -9439,6 +9579,13 @@
 				    !cfg80211_wdev_channel_allowed(wdev, chan))
 					continue;
 
+				/* ignore channel not available to selected
+				 * radio (if any) */
+				if (radio_mask &&
+				    !radio_channel_allowed(wiphy, chan,
+							   radio_mask))
+					continue;
+
 				request->channels[i] = chan;
 				i++;
 			}
@@ -10103,7 +10250,7 @@
 	struct cfg80211_chan_def chandef;
 	enum nl80211_dfs_regions dfs_region;
 	unsigned int cac_time_ms;
-	int err = -EINVAL;
+	int err;
 
 	flush_delayed_work(&rdev->dfs_update_channels_wk);
 
@@ -10118,35 +10265,29 @@
 		return -EINVAL;
 	}
 
-	wiphy_lock(wiphy);
+	guard(wiphy)(wiphy);
 
 	dfs_region = reg_get_dfs_region(wiphy);
 	if (dfs_region == NL80211_DFS_UNSET)
-		goto unlock;
+		return -EINVAL;
 
 	err = nl80211_parse_chandef(rdev, info, &chandef);
 	if (err)
-		goto unlock;
+		return err;
 
 	err = cfg80211_chandef_dfs_required(wiphy, &chandef, wdev->iftype);
 	if (err < 0)
-		goto unlock;
+		return err;
 
-	if (err == 0) {
-		err = -EINVAL;
-		goto unlock;
-	}
+	if (err == 0)
+		return -EINVAL;
 
-	if (!cfg80211_chandef_dfs_usable(wiphy, &chandef)) {
-		err = -EINVAL;
-		goto unlock;
-	}
+	if (!cfg80211_chandef_dfs_usable(wiphy, &chandef))
+		return -EINVAL;
 
-	if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND])) {
-		err = cfg80211_start_background_radar_detection(rdev, wdev,
+	if (nla_get_flag(info->attrs[NL80211_ATTR_RADAR_BACKGROUND]))
+		return cfg80211_start_background_radar_detection(rdev, wdev,
 								&chandef);
-		goto unlock;
-	}
 
 	if (cfg80211_beaconing_iface_active(wdev)) {
 		/* During MLO other link(s) can beacon, only the current link
@@ -10156,26 +10297,19 @@
 		    !wdev->links[link_id].ap.beacon_interval) {
 			/* nothing */
 		} else {
-			err = -EBUSY;
-			goto unlock;
+			return -EBUSY;
 		}
 	}
 
-	if (wdev->links[link_id].cac_started) {
-		err = -EBUSY;
-		goto unlock;
-	}
+	if (wdev->links[link_id].cac_started)
+		return -EBUSY;
 
 	/* CAC start is offloaded to HW and can't be started manually */
-	if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD)) {
-		err = -EOPNOTSUPP;
-		goto unlock;
-	}
+	if (wiphy_ext_feature_isset(wiphy, NL80211_EXT_FEATURE_DFS_OFFLOAD))
+		return -EOPNOTSUPP;
 
-	if (!rdev->ops->start_radar_detection) {
-		err = -EOPNOTSUPP;
-		goto unlock;
-	}
+	if (!rdev->ops->start_radar_detection)
+		return -EOPNOTSUPP;
 
 	cac_time_ms = cfg80211_chandef_dfs_cac_time(&rdev->wiphy, &chandef);
 	if (WARN_ON(!cac_time_ms))
@@ -10183,11 +10317,13 @@
 
 	err = rdev_start_radar_detection(rdev, dev, &chandef, cac_time_ms,
 					 link_id);
-	if (!err) {
+	if (err)
+		return err;
+
 		switch (wdev->iftype) {
 		case NL80211_IFTYPE_AP:
 		case NL80211_IFTYPE_P2P_GO:
-			wdev->links[0].ap.chandef = chandef;
+		wdev->links[link_id].ap.chandef = chandef;
 			break;
 		case NL80211_IFTYPE_ADHOC:
 			wdev->u.ibss.chandef = chandef;
@@ -10201,9 +10337,6 @@
 		wdev->links[link_id].cac_started = true;
 		wdev->links[link_id].cac_start_time = jiffies;
 		wdev->links[link_id].cac_time_ms = cac_time_ms;
-	}
-unlock:
-	wiphy_unlock(wiphy);
 
 	return err;
 }
@@ -10902,6 +11035,13 @@
 		req.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);
 	}
 
+	if (info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]) {
+		req.supported_selectors =
+			nla_data(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+		req.supported_selectors_len =
+			nla_len(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+	}
+
 	auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]);
 	if (!nl80211_valid_auth_type(rdev, auth_type, NL80211_CMD_AUTHENTICATE))
 		return -EINVAL;
@@ -11132,12 +11272,83 @@
 	return bss;
 }
 
+static int nl80211_process_links(struct cfg80211_registered_device *rdev,
+				 struct cfg80211_assoc_link *links,
+				 const u8 *ssid, int ssid_len,
+				 struct genl_info *info)
+{
+	unsigned int attrsize = NUM_NL80211_ATTR * sizeof(struct nlattr *);
+	struct nlattr **attrs __free(kfree) = kzalloc(attrsize, GFP_KERNEL);
+	struct nlattr *link;
+	unsigned int link_id;
+	int rem, err;
+
+	if (!attrs)
+		return -ENOMEM;
+
+	nla_for_each_nested(link, info->attrs[NL80211_ATTR_MLO_LINKS], rem) {
+		memset(attrs, 0, attrsize);
+
+		nla_parse_nested(attrs, NL80211_ATTR_MAX, link, NULL, NULL);
+
+		if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
+			NL_SET_BAD_ATTR(info->extack, link);
+			return -EINVAL;
+		}
+
+		link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]);
+		/* cannot use the same link ID again */
+		if (links[link_id].bss) {
+			NL_SET_BAD_ATTR(info->extack, link);
+			return -EINVAL;
+		}
+		links[link_id].bss =
+			nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
+					  link_id, link_id);
+		if (IS_ERR(links[link_id].bss)) {
+			err = PTR_ERR(links[link_id].bss);
+			links[link_id].bss = NULL;
+			NL_SET_ERR_MSG_ATTR(info->extack, link,
+					    "Error fetching BSS for link");
+			return err;
+		}
+
+		if (attrs[NL80211_ATTR_IE]) {
+			links[link_id].elems = nla_data(attrs[NL80211_ATTR_IE]);
+			links[link_id].elems_len =
+				nla_len(attrs[NL80211_ATTR_IE]);
+
+			if (cfg80211_find_elem(WLAN_EID_FRAGMENT,
+					       links[link_id].elems,
+					       links[link_id].elems_len)) {
+				NL_SET_ERR_MSG_ATTR(info->extack,
+						    attrs[NL80211_ATTR_IE],
+						    "cannot deal with fragmentation");
+				return -EINVAL;
+			}
+
+			if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
+						   links[link_id].elems,
+						   links[link_id].elems_len)) {
+				NL_SET_ERR_MSG_ATTR(info->extack,
+						    attrs[NL80211_ATTR_IE],
+						    "cannot deal with non-inheritance");
+				return -EINVAL;
+			}
+		}
+
+		links[link_id].disabled =
+			nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
+	}
+
+	return 0;
+}
+
 static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
 {
 	struct cfg80211_registered_device *rdev = info->user_ptr[0];
 	struct net_device *dev = info->user_ptr[1];
 	struct cfg80211_assoc_request req = {};
-	struct nlattr **attrs = NULL;
 	const u8 *ap_addr, *ssid;
 	unsigned int link_id;
 	int err, ssid_len;
@@ -11184,6 +11395,13 @@
 	if (info->attrs[NL80211_ATTR_PREV_BSSID])
 		req.prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]);
 
+	if (info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]) {
+		req.supported_selectors =
+			nla_data(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+		req.supported_selectors_len =
+			nla_len(info->attrs[NL80211_ATTR_SUPPORTED_SELECTORS]);
+	}
+
 	if (nla_get_flag(info->attrs[NL80211_ATTR_DISABLE_HT]))
 		req.flags |= ASSOC_REQ_DISABLE_HT;
 
@@ -11269,10 +11487,6 @@
 	req.link_id = nl80211_link_id_or_invalid(info->attrs);
 
 	if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
-		unsigned int attrsize = NUM_NL80211_ATTR * sizeof(*attrs);
-		struct nlattr *link;
-		int rem = 0;
-
 		if (req.link_id < 0)
 			return -EINVAL;
 
@@ -11287,72 +11501,10 @@
 		req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]);
 		ap_addr = req.ap_mld_addr;
 
-		attrs = kzalloc(attrsize, GFP_KERNEL);
-		if (!attrs)
-			return -ENOMEM;
-
-		nla_for_each_nested(link,
-				    info->attrs[NL80211_ATTR_MLO_LINKS],
-				    rem) {
-			memset(attrs, 0, attrsize);
-
-			nla_parse_nested(attrs, NL80211_ATTR_MAX,
-					 link, NULL, NULL);
-
-			if (!attrs[NL80211_ATTR_MLO_LINK_ID]) {
-				err = -EINVAL;
-				NL_SET_BAD_ATTR(info->extack, link);
-				goto free;
-			}
-
-			link_id = nla_get_u8(attrs[NL80211_ATTR_MLO_LINK_ID]);
-			/* cannot use the same link ID again */
-			if (req.links[link_id].bss) {
-				err = -EINVAL;
-				NL_SET_BAD_ATTR(info->extack, link);
-				goto free;
-			}
-			req.links[link_id].bss =
-				nl80211_assoc_bss(rdev, ssid, ssid_len, attrs,
-						  req.link_id, link_id);
-			if (IS_ERR(req.links[link_id].bss)) {
-				err = PTR_ERR(req.links[link_id].bss);
-				req.links[link_id].bss = NULL;
-				NL_SET_ERR_MSG_ATTR(info->extack,
-						    link, "Error fetching BSS for link");
-				goto free;
-			}
-
-			if (attrs[NL80211_ATTR_IE]) {
-				req.links[link_id].elems =
-					nla_data(attrs[NL80211_ATTR_IE]);
-				req.links[link_id].elems_len =
-					nla_len(attrs[NL80211_ATTR_IE]);
-
-				if (cfg80211_find_elem(WLAN_EID_FRAGMENT,
-						       req.links[link_id].elems,
-						       req.links[link_id].elems_len)) {
-					NL_SET_ERR_MSG_ATTR(info->extack,
-							    attrs[NL80211_ATTR_IE],
-							    "cannot deal with fragmentation");
-					err = -EINVAL;
-					goto free;
-				}
-
-				if (cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
-							   req.links[link_id].elems,
-							   req.links[link_id].elems_len)) {
-					NL_SET_ERR_MSG_ATTR(info->extack,
-							    attrs[NL80211_ATTR_IE],
-							    "cannot deal with non-inheritance");
-					err = -EINVAL;
+		err = nl80211_process_links(rdev, req.links, ssid, ssid_len,
+					    info);
+		if (err)
 					goto free;
-				}
-			}
-
-			req.links[link_id].disabled =
-				nla_get_flag(attrs[NL80211_ATTR_MLO_LINK_DISABLED]);
-		}
 
 		if (!req.links[req.link_id].bss) {
 			err = -EINVAL;
@@ -11372,9 +11524,6 @@
 			err = -EINVAL;
 			goto free;
 		}
-
-		kfree(attrs);
-		attrs = NULL;
 	} else {
 		if (req.link_id >= 0)
 			return -EINVAL;
@@ -11434,7 +11583,6 @@
 	for (link_id = 0; link_id < ARRAY_SIZE(req.links); link_id++)
 		cfg80211_put_bss(&rdev->wiphy, req.links[link_id].bss);
 	cfg80211_put_bss(&rdev->wiphy, req.bss);
-	kfree(attrs);
 
 	return err;
 }
@@ -16486,6 +16634,86 @@
 	return rdev_set_ttlm(rdev, dev, &params);
 }
 
+static int nl80211_assoc_ml_reconf(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct cfg80211_assoc_link links[IEEE80211_MLD_MAX_NUM_LINKS] = {};
+	unsigned int link_id;
+	u16 add_links, rem_links;
+	int err;
+
+	if (!wdev->valid_links)
+		return -EINVAL;
+
+	if (dev->ieee80211_ptr->conn_owner_nlportid &&
+	    dev->ieee80211_ptr->conn_owner_nlportid != info->snd_portid)
+		return -EPERM;
+
+	if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION &&
+	    dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT)
+		return -EOPNOTSUPP;
+
+	add_links = 0;
+	if (info->attrs[NL80211_ATTR_MLO_LINKS]) {
+		err = nl80211_process_links(rdev, links, NULL, 0, info);
+		if (err)
+			return err;
+
+		for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS;
+		     link_id++) {
+			if (!links[link_id].bss)
+				continue;
+			add_links |= BIT(link_id);
+		}
+	}
+
+	if (info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS])
+		rem_links =
+			nla_get_u16(info->attrs[NL80211_ATTR_MLO_RECONF_REM_LINKS]);
+	else
+		rem_links = 0;
+
+	/* Validate that existing links are not added, removed links are valid
+	 * and don't allow adding and removing the same links
+	 */
+	if ((add_links & rem_links) || !(add_links | rem_links) ||
+	    (wdev->valid_links & add_links) ||
+	    ((wdev->valid_links & rem_links) != rem_links)) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	err = cfg80211_assoc_ml_reconf(rdev, dev, links, rem_links);
+
+out:
+	for (link_id = 0; link_id < ARRAY_SIZE(links); link_id++)
+		cfg80211_put_bss(&rdev->wiphy, links[link_id].bss);
+
+	return err;
+}
+
+static int
+nl80211_epcs_cfg(struct sk_buff *skb, struct genl_info *info)
+{
+	struct cfg80211_registered_device *rdev = info->user_ptr[0];
+	struct net_device *dev = info->user_ptr[1];
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	bool val;
+
+	if (wdev->iftype != NL80211_IFTYPE_STATION &&
+	    wdev->iftype != NL80211_IFTYPE_P2P_CLIENT)
+		return -EOPNOTSUPP;
+
+	if (!wdev->connected)
+		return -ENOLINK;
+
+	val = nla_get_flag(info->attrs[NL80211_ATTR_EPCS]);
+
+	return rdev_set_epcs(rdev, dev, val);
+}
+
 #define NL80211_FLAG_NEED_WIPHY		0x01
 #define NL80211_FLAG_NEED_NETDEV	0x02
 #define NL80211_FLAG_NEED_RTNL		0x04
@@ -17678,6 +17906,18 @@
 		.flags = GENL_UNS_ADMIN_PERM,
 		.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
 	},
+	{
+		.cmd = NL80211_CMD_ASSOC_MLO_RECONF,
+		.doit = nl80211_assoc_ml_reconf,
+		.flags = GENL_UNS_ADMIN_PERM,
+		.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+	},
+	{
+		.cmd = NL80211_CMD_EPCS_CFG,
+		.doit = nl80211_epcs_cfg,
+		.flags = GENL_UNS_ADMIN_PERM,
+		.internal_flags = IFLAGS(NL80211_FLAG_NEED_NETDEV_UP),
+	},
 };
 
 static struct genl_family nl80211_fam __ro_after_init = {
@@ -18453,9 +18693,8 @@
 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer_addr))
 		goto nla_put_failure;
 
-	if ((td_bitmap_len > 0) && td_bitmap)
-		if (nla_put(msg, NL80211_ATTR_TD_BITMAP,
-			    td_bitmap_len, td_bitmap))
+	if (td_bitmap_len > 0 && td_bitmap &&
+	    nla_put(msg, NL80211_ATTR_TD_BITMAP, td_bitmap_len, td_bitmap))
 			goto nla_put_failure;
 
 	genlmsg_end(msg, hdr);
@@ -18574,6 +18813,23 @@
 }
 EXPORT_SYMBOL(cfg80211_links_removed);
 
+void nl80211_mlo_reconf_add_done(struct net_device *dev,
+				 struct cfg80211_mlo_reconf_done_data *data)
+{
+	struct wireless_dev *wdev = dev->ieee80211_ptr;
+	struct wiphy *wiphy = wdev->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct nl80211_mlme_event event = {
+		.cmd = NL80211_CMD_ASSOC_MLO_RECONF,
+		.buf = data->buf,
+		.buf_len = data->len,
+		.uapsd_queues = -1,
+	};
+
+	nl80211_send_mlme_event(rdev, dev, &event, GFP_KERNEL);
+}
+EXPORT_SYMBOL(nl80211_mlo_reconf_add_done);
+
 void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev,
 			     struct net_device *netdev, const u8 *bssid,
 			     gfp_t gfp)
@@ -20406,6 +20662,39 @@
 }
 EXPORT_SYMBOL(cfg80211_schedule_channels_check);
 
+void cfg80211_epcs_changed(struct net_device *netdev, bool enabled)
+{
+	struct wireless_dev *wdev = netdev->ieee80211_ptr;
+	struct wiphy *wiphy = wdev->wiphy;
+	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+	struct sk_buff *msg;
+	void *hdr;
+
+	trace_cfg80211_epcs_changed(wdev, enabled);
+
+	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+	if (!msg)
+		return;
+
+	hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_EPCS_CFG);
+	if (!hdr) {
+		nlmsg_free(msg);
+		return;
+	}
+
+	if (enabled && nla_put_flag(msg, NL80211_ATTR_EPCS))
+		goto nla_put_failure;
+
+	genlmsg_end(msg, hdr);
+	genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
+				NL80211_MCGRP_MLME, GFP_KERNEL);
+	return;
+
+ nla_put_failure:
+	nlmsg_free(msg);
+}
+EXPORT_SYMBOL(cfg80211_epcs_changed);
+
 /* initialisation/exit functions */
 
 int __init nl80211_init(void)
diff -ruw linux-6.13.12/net/wireless/nl80211.h linux-6.13.12-fbx/net/wireless/nl80211.h
--- linux-6.13.12/net/wireless/nl80211.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/nl80211.h	2025-07-01 14:10:45.856130125 +0200
@@ -124,4 +124,7 @@
 /* peer measurement */
 int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info);
 
+void nl80211_mlo_reconf_add_done(struct net_device *dev,
+				 struct cfg80211_mlo_reconf_done_data *data);
+
 #endif /* __NET_WIRELESS_NL80211_H */
diff -ruw linux-6.13.12/net/wireless/pmsr.c linux-6.13.12-fbx/net/wireless/pmsr.c
--- linux-6.13.12/net/wireless/pmsr.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/pmsr.c	2025-07-01 14:10:45.856130125 +0200
@@ -630,9 +630,9 @@
 	struct wireless_dev *wdev = container_of(work, struct wireless_dev,
 						 pmsr_free_wk);
 
-	wiphy_lock(wdev->wiphy);
+	guard(wiphy)(wdev->wiphy);
+
 	cfg80211_pmsr_process_abort(wdev);
-	wiphy_unlock(wdev->wiphy);
 }
 
 void cfg80211_pmsr_wdev_down(struct wireless_dev *wdev)
diff -ruw linux-6.13.12/net/wireless/rdev-ops.h linux-6.13.12-fbx/net/wireless/rdev-ops.h
--- linux-6.13.12/net/wireless/rdev-ops.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/rdev-ops.h	2025-09-25 17:40:37.959379088 +0200
@@ -577,34 +577,35 @@
 }
 
 static inline int
-rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed)
+rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u8 radio_id, u32 changed)
 {
 	int ret = -EOPNOTSUPP;
 
-	trace_rdev_set_wiphy_params(&rdev->wiphy, changed);
+	trace_rdev_set_wiphy_params(&rdev->wiphy, radio_id, changed);
 	if (rdev->ops->set_wiphy_params)
-		ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed);
+		ret = rdev->ops->set_wiphy_params(&rdev->wiphy, radio_id, changed);
 	trace_rdev_return_int(&rdev->wiphy, ret);
 	return ret;
 }
 
 static inline int rdev_set_tx_power(struct cfg80211_registered_device *rdev,
-				    struct wireless_dev *wdev,
+				    struct wireless_dev *wdev, u8 radio_id,
 				    enum nl80211_tx_power_setting type, int mbm)
 {
 	int ret;
-	trace_rdev_set_tx_power(&rdev->wiphy, wdev, type, mbm);
-	ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, type, mbm);
+	trace_rdev_set_tx_power(&rdev->wiphy, wdev, radio_id, type, mbm);
+	ret = rdev->ops->set_tx_power(&rdev->wiphy, wdev, radio_id, type, mbm);
 	trace_rdev_return_int(&rdev->wiphy, ret);
 	return ret;
 }
 
 static inline int rdev_get_tx_power(struct cfg80211_registered_device *rdev,
-				    struct wireless_dev *wdev, int *dbm)
+				    struct wireless_dev *wdev, unsigned int link_id,
+				    int *dbm)
 {
 	int ret;
-	trace_rdev_get_tx_power(&rdev->wiphy, wdev);
-	ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, dbm);
+	trace_rdev_get_tx_power(&rdev->wiphy, wdev, link_id);
+	ret = rdev->ops->get_tx_power(&rdev->wiphy, wdev, link_id, dbm);
 	trace_rdev_return_int_int(&rdev->wiphy, ret, *dbm);
 	return ret;
 }
@@ -1546,4 +1547,38 @@
 
 	return rdev->ops->get_radio_mask(wiphy, dev);
 }
+
+static inline int
+rdev_assoc_ml_reconf(struct cfg80211_registered_device *rdev,
+		     struct net_device *dev,
+		     struct cfg80211_assoc_link *add_links,
+		     u16 rem_links)
+{
+	struct wiphy *wiphy = &rdev->wiphy;
+	int ret = -EOPNOTSUPP;
+
+	trace_rdev_assoc_ml_reconf(wiphy, dev, add_links, rem_links);
+	if (rdev->ops->assoc_ml_reconf)
+		ret = rdev->ops->assoc_ml_reconf(wiphy, dev, add_links,
+						 rem_links);
+	trace_rdev_return_int(wiphy, ret);
+
+	return ret;
+}
+
+static inline int
+rdev_set_epcs(struct cfg80211_registered_device *rdev,
+	      struct net_device *dev, bool val)
+{
+	struct wiphy *wiphy = &rdev->wiphy;
+	int ret = -EOPNOTSUPP;
+
+	trace_rdev_set_epcs(wiphy, dev, val);
+	if (rdev->ops->set_epcs)
+		ret = rdev->ops->set_epcs(wiphy, dev, val);
+	trace_rdev_return_int(wiphy, ret);
+
+	return ret;
+}
+
 #endif /* __CFG80211_RDEV_OPS */
diff -ruw linux-6.13.12/net/wireless/reg.c linux-6.13.12-fbx/net/wireless/reg.c
--- linux-6.13.12/net/wireless/reg.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/reg.c	2025-09-25 17:40:37.959379088 +0200
@@ -1771,6 +1771,40 @@
 	return bw_flags;
 }
 
+static void restore_channel_dfs_cached_state(struct wiphy *wiphy,
+					     struct ieee80211_channel *c)
+{
+	struct cfg80211_chan_dfs_cache *cd;
+	unsigned long timeout;
+
+	if (!IS_ENABLED(CONFIG_CFG80211_DFS_CACHE))
+		return;
+
+	cd = cfg80211_get_dfs_chan_cache(c);
+	if (!cd)
+		return;
+
+	if (cd->dfs_state == NL80211_DFS_USABLE)
+		return;
+
+	if (cd->dfs_state == NL80211_DFS_UNAVAILABLE) {
+		struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
+
+		timeout = cd->dfs_state_entered +
+			msecs_to_jiffies(IEEE80211_DFS_MIN_NOP_TIME_MS);
+
+		if (time_after_eq(jiffies, timeout))
+			return;
+
+		cfg80211_sched_dfs_chan_update(rdev);
+	}
+
+	wiphy_info(wiphy, "restoring channel %u DFS state from cache\n",
+		   cd->center_freq);
+	c->dfs_state = cd->dfs_state;
+	c->dfs_state_entered = cd->dfs_state_entered;
+}
+
 static void handle_channel_single_rule(struct wiphy *wiphy,
 				       enum nl80211_reg_initiator initiator,
 				       struct ieee80211_channel *chan,
@@ -1805,6 +1839,9 @@
 
 		if (chan->flags & IEEE80211_CHAN_RADAR) {
 			chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
+			if (chan->center_freq >= 5600 &&
+			    chan->center_freq <= 5650)
+				chan->dfs_cac_ms = IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS;
 			if (reg_rule->dfs_cac_ms)
 				chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
 		}
@@ -1817,6 +1854,7 @@
 
 	chan->dfs_state = NL80211_DFS_USABLE;
 	chan->dfs_state_entered = jiffies;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 	chan->flags = flags | bw_flags | map_regdom_flags(reg_rule->flags);
@@ -1828,9 +1866,14 @@
 	if (chan->flags & IEEE80211_CHAN_RADAR) {
 		if (reg_rule->dfs_cac_ms)
 			chan->dfs_cac_ms = reg_rule->dfs_cac_ms;
+		else {
+			if (chan->center_freq >= 5600 &&
+			    chan->center_freq <= 5650)
+				chan->dfs_cac_ms = IEEE80211_DFS_WEATHER_MIN_CAC_TIME_MS;
 		else
 			chan->dfs_cac_ms = IEEE80211_DFS_MIN_CAC_TIME_MS;
 	}
+	}
 
 	if (chan->flags & IEEE80211_CHAN_PSD)
 		chan->psd = reg_rule->psd;
@@ -1915,6 +1958,7 @@
 
 	chan->dfs_state = NL80211_DFS_USABLE;
 	chan->dfs_state_entered = jiffies;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 	chan->flags = flags | bw_flags1 | bw_flags2 |
@@ -2466,11 +2510,11 @@
 	struct wireless_dev *wdev;
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
 
-	wiphy_lock(wiphy);
+	guard(wiphy)(wiphy);
+
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list)
 		if (!reg_wdev_chan_valid(wiphy, wdev))
 			cfg80211_leave(rdev, wdev);
-	wiphy_unlock(wiphy);
 }
 
 static void reg_check_chans_work(struct work_struct *work)
@@ -2576,6 +2620,7 @@
 
 	chan->dfs_state_entered = jiffies;
 	chan->dfs_state = NL80211_DFS_USABLE;
+	restore_channel_dfs_cached_state(wiphy, chan);
 
 	chan->beacon_found = false;
 
@@ -2650,13 +2695,11 @@
 		return;
 
 	rtnl_lock();
-	wiphy_lock(wiphy);
-
+	scoped_guard(wiphy, wiphy) {
 	tmp = get_wiphy_regdom(wiphy);
 	rcu_assign_pointer(wiphy->regd, new_regd);
 	rcu_free_regdom(tmp);
-
-	wiphy_unlock(wiphy);
+	}
 	rtnl_unlock();
 }
 EXPORT_SYMBOL(wiphy_apply_custom_regulatory);
@@ -2826,9 +2869,9 @@
 
 		tmp = get_wiphy_regdom(wiphy);
 		ASSERT_RTNL();
-		wiphy_lock(wiphy);
+		scoped_guard(wiphy, wiphy) {
 		rcu_assign_pointer(wiphy->regd, regd);
-		wiphy_unlock(wiphy);
+		}
 		rcu_free_regdom(tmp);
 	}
 
@@ -3206,9 +3249,9 @@
 	ASSERT_RTNL();
 
 	for_each_rdev(rdev) {
-		wiphy_lock(&rdev->wiphy);
+		guard(wiphy)(&rdev->wiphy);
+
 		reg_process_self_managed_hint(&rdev->wiphy);
-		wiphy_unlock(&rdev->wiphy);
 	}
 
 	reg_check_channels();
@@ -3601,15 +3644,13 @@
 	struct wireless_dev *wdev;
 
 	for_each_rdev(rdev) {
-		wiphy_lock(&rdev->wiphy);
+		guard(wiphy)(&rdev->wiphy);
+
 		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
-			if (!(wdev->wiphy->regulatory_flags & flag)) {
-				wiphy_unlock(&rdev->wiphy);
+			if (!(wdev->wiphy->regulatory_flags & flag))
 				return false;
 			}
 		}
-		wiphy_unlock(&rdev->wiphy);
-	}
 
 	return true;
 }
@@ -3884,19 +3925,18 @@
 
 	if (!driver_request->intersect) {
 		ASSERT_RTNL();
-		wiphy_lock(request_wiphy);
+		scoped_guard(wiphy, request_wiphy) {
 		if (request_wiphy->regd)
 			tmp = get_wiphy_regdom(request_wiphy);
 
 		regd = reg_copy_regd(rd);
-		if (IS_ERR(regd)) {
-			wiphy_unlock(request_wiphy);
+			if (IS_ERR(regd))
 			return PTR_ERR(regd);
-		}
 
 		rcu_assign_pointer(request_wiphy->regd, regd);
 		rcu_free_regdom(tmp);
-		wiphy_unlock(request_wiphy);
+		}
+
 		reset_regdomains(false, rd);
 		return 0;
 	}
diff -ruw linux-6.13.12/net/wireless/scan.c linux-6.13.12-fbx/net/wireless/scan.c
--- linux-6.13.12/net/wireless/scan.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/scan.c	2025-09-25 17:40:37.963379107 +0200
@@ -272,12 +272,19 @@
 {
 	const struct element *non_inherit_elem, *parent, *sub;
 	u8 *pos = new_ie;
-	u8 id, ext_id;
+	const u8 *mbssid_index_ie;
+	u8 id, ext_id, bssid_index = 255;
 	unsigned int match_len;
 
 	non_inherit_elem = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE,
 						  subie, subie_len);
 
+	mbssid_index_ie = cfg80211_find_ie(WLAN_EID_MULTI_BSSID_IDX, subie,
+					   subie_len);
+	if (mbssid_index_ie && mbssid_index_ie[1] > 0 &&
+	    mbssid_index_ie[2] > 0 && mbssid_index_ie[2] <= 46)
+		bssid_index = mbssid_index_ie[2];
+
 	/* We copy the elements one by one from the parent to the generated
 	 * elements.
 	 * If they are not inherited (included in subie or in the non
@@ -316,6 +323,24 @@
 			continue;
 		}
 
+		/* For ML probe response, match the MLE in the frame body with
+		 * MLD id being 'bssid_index'
+		 */
+		if (parent->id == WLAN_EID_EXTENSION && parent->datalen > 1 &&
+		    parent->data[0] == WLAN_EID_EXT_EHT_MULTI_LINK &&
+		    bssid_index == ieee80211_mle_get_mld_id(parent->data + 1)) {
+			if (!cfg80211_copy_elem_with_frags(parent,
+							   ie, ielen,
+							   &pos, new_ie,
+							   new_ie_len))
+				return 0;
+
+			/* Continue here to prevent processing the MLE in
+			 * sub-element, which AP MLD should not carry
+			 */
+			continue;
+		}
+
 		/* Already copied if an earlier element had the same type */
 		if (cfg80211_find_elem_match(id, ie, (u8 *)parent - ie,
 					     &ext_id, match_len, 0))
@@ -704,7 +729,7 @@
 					   bss_params)))
 		return RNR_ITER_CONTINUE;
 
-	entry = kzalloc(sizeof(*entry) + IEEE80211_MAX_SSID_LEN, GFP_ATOMIC);
+	entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
 	if (!entry)
 		return RNR_ITER_ERROR;
 
@@ -713,6 +738,17 @@
 
 	if (!cfg80211_parse_ap_info(entry, tbtt_info, tbtt_info_len,
 				    data->ssid_elem, data->s_ssid_tmp)) {
+		struct cfg80211_colocated_ap *tmp;
+
+		/* Don't add duplicate BSSIDs on the same channel. */
+		list_for_each_entry(tmp, &data->ap_list, list) {
+			if (ether_addr_equal(tmp->bssid, entry->bssid) &&
+			    tmp->center_freq == entry->center_freq) {
+				kfree(entry);
+				return RNR_ITER_CONTINUE;
+			}
+		}
+
 		data->n_coloc++;
 		list_add_tail(&entry->list, &data->ap_list);
 	} else {
@@ -1235,7 +1271,8 @@
 	rdev = container_of(work, struct cfg80211_registered_device,
 			   sched_scan_res_wk);
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
+
 	list_for_each_entry_safe(req, tmp, &rdev->sched_scan_req_list, list) {
 		if (req->report_results) {
 			req->report_results = false;
@@ -1250,7 +1287,6 @@
 						NL80211_CMD_SCHED_SCAN_RESULTS);
 		}
 	}
-	wiphy_unlock(&rdev->wiphy);
 }
 
 void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid)
@@ -1285,9 +1321,9 @@
 
 void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid)
 {
-	wiphy_lock(wiphy);
+	guard(wiphy)(wiphy);
+
 	cfg80211_sched_scan_stopped_locked(wiphy, reqid);
-	wiphy_unlock(wiphy);
 }
 EXPORT_SYMBOL(cfg80211_sched_scan_stopped);
 
@@ -1913,6 +1949,165 @@
 	return true;
 }
 
+static const struct channel_5ghz_desc {
+	unsigned int freq;
+	unsigned int aligned_start_freq_40;
+        unsigned int aligned_start_freq_80;
+        unsigned int aligned_start_freq_160;
+} channel_5ghz_descs[] = {
+        { 5180, 5170, 5170, 5170 }, /* 36 */
+        { 5200, 5170, 5170, 5170 }, /* 40 */
+        { 5220, 5210, 5170, 5170 }, /* 44 */
+        { 5240, 5210, 5170, 5170 }, /* 48 */
+        { 5260, 5250, 5250, 5170 }, /* 52 */
+        { 5280, 5250, 5250, 5170 }, /* 56 */
+        { 5300, 5290, 5250, 5170 }, /* 60 */
+        { 5320, 5290, 5250, 5170 }, /* 64 */
+        { 5340, 5330, 5330, 5330 }, /* 68 */
+        { 5360, 5330, 5330, 5330 }, /* 72 */
+        { 5380, 5370, 5330, 5330 }, /* 76 */
+        { 5400, 5370, 5330, 5330 }, /* 80 */
+        { 5420, 5410, 5410, 5330 }, /* 84 */
+        { 5440, 5410, 5410, 5330 }, /* 88 */
+        { 5460, 5450, 5410, 5330 }, /* 92 */
+        { 5480, 5450, 5410, 5330 }, /* 96 */
+        { 5500, 5490, 5490, 5490 }, /* 100 */
+        { 5520, 5490, 5490, 5490 }, /* 104 */
+        { 5540, 5530, 5490, 5490 }, /* 108 */
+        { 5560, 5530, 5490, 5490 }, /* 112 */
+        { 5580, 5570, 5570, 5490 }, /* 116 */
+        { 5600, 5570, 5570, 5490 }, /* 120 */
+        { 5620, 5610, 5570, 5490 }, /* 124 */
+        { 5640, 5610, 5570, 5490 }, /* 128 */
+        { 5660, 5650, 5650, 5650 }, /* 132 */
+        { 5680, 5650, 5650, 5650 }, /* 136 */
+        { 5700, 5690, 5650, 5650 }, /* 140 */
+        { 5720, 5690, 5650, 5650 }, /* 144 */
+        { 5745, 5735, 5735, 5735 }, /* 148 */
+        { 5765, 5735, 5735, 5735 }, /* 152 */
+        { 5785, 5775, 5735, 5735 }, /* 156 */
+        { 5805, 5775, 5735, 5735 }, /* 160 */
+        { 5825, 5815, 5815, 5735 }, /* 164 */
+        { 5845, 5815, 5815, 5735 }, /* 168 */
+        { 5865, 5855, 5815, 5735 }, /* 172 */
+        { 5885, 5855, 5815, 5735 }, /* 176 */
+        { 5905, 5895, 5895, 5895 }, /* 180 */
+};
+
+static void bss_update_rdev_dfs_state(struct cfg80211_registered_device *rdev,
+				      struct cfg80211_internal_bss *bss)
+{
+	struct ieee80211_channel *chan = bss->pub.channel;
+	const struct cfg80211_bss_ies *ies = bss->pub.ies;
+	const struct channel_5ghz_desc *cdesc;
+	const struct element *elem;
+	enum nl80211_band band;
+	unsigned int width, start_freq, freq;
+	u8 oper_class;
+	size_t i;
+
+	/* extract some IE to check if AP seems to be indeed doing
+	 * DFS, expect country code & power constraint to be present */
+	if (!cfg80211_find_elem(WLAN_EID_PWR_CONSTRAINT, ies->data,
+				ies->len) ||
+	    !cfg80211_find_elem(WLAN_EID_COUNTRY, ies->data,
+				ies->len))
+		return;
+
+	/* guess operating bandwidth, use only operating class for
+	 * now */
+	elem = cfg80211_find_elem(WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+				  ies->data, ies->len);
+	if (!elem || elem->datalen < 1)
+		return;
+
+	oper_class = elem->data[0];
+	if (!ieee80211_operating_class_to_band(oper_class, &band) ||
+	    band != NL80211_BAND_5GHZ)
+		return;
+
+	switch (oper_class) {
+	case 115:
+	case 118:
+	case 121:
+	case 124:
+	case 125:
+		width = 20;
+		break;
+	case 116:
+	case 117:
+	case 119:
+	case 120:
+	case 122:
+	case 123:
+	case 126:
+	case 127:
+		width = 40;
+		break;
+	case 128:
+		width = 80;
+		break;
+	case 129:
+		width = 160;
+		break;
+	case 130:
+		/* ignore 80+80 */
+		return;
+	default:
+		return;
+	}
+
+	/* compute spanned channels according to primary channel &
+	 * width */
+	cdesc = NULL;
+	for (i = 0; i < ARRAY_SIZE(channel_5ghz_descs); i++) {
+		if (channel_5ghz_descs[i].freq == chan->center_freq) {
+			cdesc = &channel_5ghz_descs[i];
+			break;
+		}
+	}
+
+	if (!cdesc)
+		return;
+
+	switch (width) {
+	case 20:
+		start_freq = cdesc->freq - 10;
+		break;
+	case 40:
+		start_freq = cdesc->aligned_start_freq_40;
+		break;
+	case 80:
+		start_freq = cdesc->aligned_start_freq_80;
+		break;
+	case 160:
+		start_freq = cdesc->aligned_start_freq_160;
+		break;
+	}
+
+	for (freq = start_freq + 10;
+	     freq <= start_freq + width - 10;
+	     freq += 20) {
+		struct ieee80211_channel *c;
+
+		c = ieee80211_get_channel(&rdev->wiphy, freq);
+		if (!c ||
+		    (c->flags & IEEE80211_CHAN_DISABLED) ||
+		    !(c->flags & IEEE80211_CHAN_RADAR))
+			continue;
+
+		if (c->dfs_state != NL80211_DFS_USABLE)
+			continue;
+
+		wiphy_info(&rdev->wiphy,
+			   "setting channel %u DFS state from scan result\n",
+			   freq);
+
+		c->dfs_state = NL80211_DFS_AVAILABLE;
+		c->dfs_state_entered = jiffies;
+	}
+}
+
 /* Returned bss is reference counted and must be cleaned up appropriately. */
 static struct cfg80211_internal_bss *
 __cfg80211_bss_update(struct cfg80211_registered_device *rdev,
@@ -1935,6 +2130,10 @@
 	if (found) {
 		if (!cfg80211_update_known_bss(rdev, found, tmp, signal_valid))
 			return NULL;
+
+		if (rdev->scan_req &&
+		    rdev->scan_req->flags & NL80211_SCAN_FLAG_UPDATE_DFS)
+			bss_update_rdev_dfs_state(rdev, found);
 	} else {
 		struct cfg80211_internal_bss *new;
 		struct cfg80211_internal_bss *hidden;
@@ -1997,6 +2196,10 @@
 			bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss));
 		}
 
+		if (rdev->scan_req &&
+		    rdev->scan_req->flags & NL80211_SCAN_FLAG_UPDATE_DFS)
+			bss_update_rdev_dfs_state(rdev, new);
+
 		cfg80211_insert_bss(rdev, new);
 		found = new;
 	}
@@ -3562,10 +3765,8 @@
 	/* translate "Scan for SSID" request */
 	if (wreq) {
 		if (wrqu->data.flags & IW_SCAN_THIS_ESSID) {
-			if (wreq->essid_len > IEEE80211_MAX_SSID_LEN) {
-				err = -EINVAL;
-				goto out;
-			}
+			if (wreq->essid_len > IEEE80211_MAX_SSID_LEN)
+				return -EINVAL;
 			memcpy(creq->ssids[0].ssid, wreq->essid, wreq->essid_len);
 			creq->ssids[0].ssid_len = wreq->essid_len;
 		}
@@ -3581,8 +3782,7 @@
 
 	eth_broadcast_addr(creq->bssid);
 
-	wiphy_lock(&rdev->wiphy);
-
+	scoped_guard(wiphy, &rdev->wiphy) {
 	rdev->scan_req = creq;
 	err = rdev_scan(rdev, creq);
 	if (err) {
@@ -3594,7 +3794,8 @@
 		creq = NULL;
 		dev_hold(dev);
 	}
-	wiphy_unlock(&rdev->wiphy);
+	}
+
  out:
 	kfree(creq);
 	return err;
diff -ruw linux-6.13.12/net/wireless/sme.c linux-6.13.12-fbx/net/wireless/sme.c
--- linux-6.13.12/net/wireless/sme.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/sme.c	2025-09-25 17:40:37.963379107 +0200
@@ -252,7 +252,7 @@
 	u8 bssid_buf[ETH_ALEN], *bssid = NULL;
 	enum nl80211_timeout_reason treason;
 
-	wiphy_lock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
 
 	list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 		if (!wdev->netdev)
@@ -280,8 +280,6 @@
 			__cfg80211_connect_result(wdev->netdev, &cr, false);
 		}
 	}
-
-	wiphy_unlock(&rdev->wiphy);
 }
 
 static void cfg80211_step_auth_next(struct cfg80211_conn *conn,
@@ -693,13 +691,13 @@
 	 * as chan dfs state, etc.
 	 */
 	for_each_rdev(rdev) {
-		wiphy_lock(&rdev->wiphy);
+		guard(wiphy)(&rdev->wiphy);
+
 		list_for_each_entry(wdev, &rdev->wiphy.wdev_list, list) {
 			if (wdev->conn || wdev->connected ||
 			    cfg80211_beaconing_iface_active(wdev))
 				is_all_idle = false;
 		}
-		wiphy_unlock(&rdev->wiphy);
 	}
 
 	return is_all_idle;
@@ -1583,7 +1581,7 @@
 		container_of(work, struct wireless_dev, disconnect_wk);
 	struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
 
-	wiphy_lock(wdev->wiphy);
+	guard(wiphy)(wdev->wiphy);
 
 	if (wdev->conn_owner_nlportid) {
 		switch (wdev->iftype) {
@@ -1619,6 +1617,4 @@
 			break;
 		}
 	}
-
-	wiphy_unlock(wdev->wiphy);
 }
diff -ruw linux-6.13.12/net/wireless/sysfs.c linux-6.13.12-fbx/net/wireless/sysfs.c
--- linux-6.13.12/net/wireless/sysfs.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/sysfs.c	2025-09-25 17:40:37.963379107 +0200
@@ -36,6 +36,8 @@
 SHOW_FMT(index, "%d", wiphy_idx);
 SHOW_FMT(macaddress, "%pM", wiphy.perm_addr);
 SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
+SHOW_FMT(dev_port, "%d", wiphy.dev_port);
+SHOW_FMT(n_radio_dev, "%d", wiphy.n_radio_dev);
 
 static ssize_t name_show(struct device *dev,
 			 struct device_attribute *attr,
@@ -70,6 +72,8 @@
 	&dev_attr_macaddress.attr,
 	&dev_attr_address_mask.attr,
 	&dev_attr_addresses.attr,
+	&dev_attr_dev_port.attr,
+	&dev_attr_n_radio_dev.attr,
 	&dev_attr_name.attr,
 	NULL,
 };
diff -ruw linux-6.13.12/net/wireless/sysfs.h linux-6.13.12-fbx/net/wireless/sysfs.h
--- linux-6.13.12/net/wireless/sysfs.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/sysfs.h	2025-09-25 17:40:37.963379107 +0200
@@ -7,4 +7,9 @@
 
 extern struct class ieee80211_class;
 
+int wiphy_sysfs_radio_init(void);
+void wiphy_sysfs_radio_exit(void);
+
+extern struct class ieee80211_radio_class;
+
 #endif /* __WIRELESS_SYSFS_H */
diff -ruw linux-6.13.12/net/wireless/trace.h linux-6.13.12-fbx/net/wireless/trace.h
--- linux-6.13.12/net/wireless/trace.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/trace.h	2025-09-25 17:40:37.963379107 +0200
@@ -1676,10 +1676,11 @@
 );
 
 TRACE_EVENT(rdev_set_wiphy_params,
-	TP_PROTO(struct wiphy *wiphy, u32 changed),
-	TP_ARGS(wiphy, changed),
+	TP_PROTO(struct wiphy *wiphy, u8 radio_id, u32 changed),
+	TP_ARGS(wiphy, radio_id, changed),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
+		__field(u8, radio_id)
 		__field(u32, changed)
 	),
 	TP_fast_assign(
@@ -1690,29 +1691,51 @@
 		  WIPHY_PR_ARG, __entry->changed)
 );
 
-DEFINE_EVENT(wiphy_wdev_evt, rdev_get_tx_power,
-	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev),
-	TP_ARGS(wiphy, wdev)
+DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 unsigned int link_id),
+	TP_ARGS(wiphy, wdev, link_id),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		WDEV_ENTRY
+		__field(unsigned int, link_id)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		WDEV_ASSIGN;
+		__entry->link_id = link_id;
+	),
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
 );
 
-TRACE_EVENT(rdev_set_tx_power,
+DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_tx_power,
 	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
+		 unsigned int link_id),
+	TP_ARGS(wiphy, wdev, link_id)
+);
+
+TRACE_EVENT(rdev_set_tx_power,
+	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev, u8 radio_id,
 		 enum nl80211_tx_power_setting type, int mbm),
-	TP_ARGS(wiphy, wdev, type, mbm),
+	TP_ARGS(wiphy, wdev, radio_id, type, mbm),
 	TP_STRUCT__entry(
 		WIPHY_ENTRY
 		WDEV_ENTRY
+		__field(u8, radio_id)
 		__field(enum nl80211_tx_power_setting, type)
 		__field(int, mbm)
 	),
 	TP_fast_assign(
 		WIPHY_ASSIGN;
 		WDEV_ASSIGN;
+		__entry->radio_id = radio_id;
 		__entry->type = type;
 		__entry->mbm = mbm;
 	),
-	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", type: %u, mbm: %d",
-		  WIPHY_PR_ARG, WDEV_PR_ARG,__entry->type, __entry->mbm)
+	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", radio_id: %u, type: %u, mbm: %d",
+		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->radio_id, __entry->type,
+		  __entry->mbm)
 );
 
 TRACE_EVENT(rdev_return_int_int,
@@ -2192,25 +2215,6 @@
 	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", noack_map: %u",
 		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->noack_map)
 );
-
-DECLARE_EVENT_CLASS(wiphy_wdev_link_evt,
-	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
-		 unsigned int link_id),
-	TP_ARGS(wiphy, wdev, link_id),
-	TP_STRUCT__entry(
-		WIPHY_ENTRY
-		WDEV_ENTRY
-		__field(unsigned int, link_id)
-	),
-	TP_fast_assign(
-		WIPHY_ASSIGN;
-		WDEV_ASSIGN;
-		__entry->link_id = link_id;
-	),
-	TP_printk(WIPHY_PR_FMT ", " WDEV_PR_FMT ", link_id: %u",
-		  WIPHY_PR_ARG, WDEV_PR_ARG, __entry->link_id)
-);
-
 DEFINE_EVENT(wiphy_wdev_link_evt, rdev_get_channel,
 	TP_PROTO(struct wiphy *wiphy, struct wireless_dev *wdev,
 		 unsigned int link_id),
@@ -3049,6 +3053,24 @@
 		  WIPHY_PR_ARG, NETDEV_PR_ARG)
 );
 
+TRACE_EVENT(rdev_set_epcs,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 bool val),
+	TP_ARGS(wiphy, netdev, val),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(bool, val)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		__entry->val = val;
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", config=%u",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->val)
+);
+
 /*************************************************************
  *	     cfg80211 exported functions traces		     *
  *************************************************************/
@@ -4104,6 +4126,66 @@
 		  __entry->link_mask)
 );
 
+TRACE_EVENT(cfg80211_mlo_reconf_add_done,
+	TP_PROTO(struct net_device *netdev, u16 link_mask,
+		 const u8 *buf, size_t len),
+	TP_ARGS(netdev, link_mask, buf, len),
+	TP_STRUCT__entry(
+		NETDEV_ENTRY
+		__field(u16, link_mask)
+		__dynamic_array(u8, buf, len)
+	),
+	TP_fast_assign(
+		NETDEV_ASSIGN;
+		__entry->link_mask = link_mask;
+		memcpy(__get_dynamic_array(buf), buf, len);
+	),
+	TP_printk(NETDEV_PR_FMT ", link_mask:0x%x",
+		  NETDEV_PR_ARG, __entry->link_mask)
+);
+
+TRACE_EVENT(rdev_assoc_ml_reconf,
+	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev,
+		 struct cfg80211_assoc_link *add_links,
+		 u16 rem_links),
+	TP_ARGS(wiphy, netdev, add_links, rem_links),
+	TP_STRUCT__entry(
+		WIPHY_ENTRY
+		NETDEV_ENTRY
+		__field(u16, add_links)
+		__field(u16, rem_links)
+	),
+	TP_fast_assign(
+		WIPHY_ASSIGN;
+		NETDEV_ASSIGN;
+		u32 i;
+
+		__entry->add_links = 0;
+		__entry->rem_links = rem_links;
+		for (i = 0; add_links && i < IEEE80211_MLD_MAX_NUM_LINKS; i++)
+			if (add_links[i].bss)
+				__entry->add_links |= BIT(i);
+	),
+	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", add_links=0x%x, rem_links=0x%x",
+		  WIPHY_PR_ARG, NETDEV_PR_ARG,
+		  __entry->add_links, __entry->rem_links)
+);
+
+TRACE_EVENT(cfg80211_epcs_changed,
+	TP_PROTO(struct wireless_dev *wdev, bool enabled),
+	TP_ARGS(wdev, enabled),
+	TP_STRUCT__entry(
+		WDEV_ENTRY
+		__field(u32, enabled)
+	),
+	TP_fast_assign(
+		WDEV_ASSIGN;
+		__entry->enabled = enabled;
+	),
+	TP_printk(WDEV_PR_FMT ", enabled=%u",
+		  WDEV_PR_ARG, __entry->enabled)
+);
+
 #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */
 
 #undef TRACE_INCLUDE_PATH
diff -ruw linux-6.13.12/net/wireless/util.c linux-6.13.12-fbx/net/wireless/util.c
--- linux-6.13.12/net/wireless/util.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/net/wireless/util.c	2025-09-25 17:40:37.963379107 +0200
@@ -2516,6 +2516,30 @@
 }
 EXPORT_SYMBOL(cfg80211_check_combinations);
 
+int cfg80211_get_radio_idx_by_chan(struct wiphy *wiphy,
+				   const struct ieee80211_channel *chan)
+{
+	const struct wiphy_radio *radio;
+	int i, j;
+	u32 freq;
+
+	if (!chan)
+		return -EINVAL;
+
+	freq = MHZ_TO_KHZ(chan->center_freq);
+	for (i = 0; i < wiphy->n_radio; i++) {
+		radio = &wiphy->radio[i];
+		for (j = 0; j < radio->n_freq_range; j++) {
+			if (freq >= radio->freq_range[j].start_freq &&
+			    freq <= radio->freq_range[j].end_freq)
+				return i;
+		}
+	}
+
+	return -ENOENT;
+}
+EXPORT_SYMBOL(cfg80211_get_radio_idx_by_chan);
+
 int ieee80211_get_ratemask(struct ieee80211_supported_band *sband,
 			   const u8 *rates, unsigned int n_rates,
 			   u32 *mask)
@@ -2572,7 +2596,6 @@
 {
 	struct cfg80211_registered_device *rdev;
 	struct wireless_dev *wdev;
-	int ret;
 
 	wdev = dev->ieee80211_ptr;
 	if (!wdev)
@@ -2584,11 +2607,9 @@
 
 	memset(sinfo, 0, sizeof(*sinfo));
 
-	wiphy_lock(&rdev->wiphy);
-	ret = rdev_get_station(rdev, dev, mac_addr, sinfo);
-	wiphy_unlock(&rdev->wiphy);
+	guard(wiphy)(&rdev->wiphy);
 
-	return ret;
+	return rdev_get_station(rdev, dev, mac_addr, sinfo);
 }
 EXPORT_SYMBOL(cfg80211_get_station);
 
diff -ruw linux-6.13.12/scripts/Makefile.dtbs linux-6.13.12-fbx/scripts/Makefile.dtbs
--- linux-6.13.12/scripts/Makefile.dtbs	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/Makefile.dtbs	2025-09-25 17:40:37.991379246 +0200
@@ -34,7 +34,7 @@
 # Assembly file to wrap dtb(o)
 # ---------------------------------------------------------------------------
 
-builtin-dtb-section = $(if $(filter arch/$(SRCARCH)/boot/dts%, $(obj)),.dtb.init.rodata,.rodata)
+builtin-dtb-section = .dtb.rodata
 
 # Generate an assembly file to wrap the output of the device tree compiler
 quiet_cmd_wrap_S_dtb = WRAP    $@
diff -ruw linux-6.13.12/scripts/Makefile.modpost linux-6.13.12-fbx/scripts/Makefile.modpost
--- linux-6.13.12/scripts/Makefile.modpost	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/Makefile.modpost	2025-09-25 17:40:37.995379266 +0200
@@ -97,6 +97,7 @@
 ksym-wl := $(if $(filter-out /%, $(ksym-wl)),$(if $(wildcard $(ksym-wl)),,$(srctree)/))$(ksym-wl)
 modpost-args += -t $(addprefix -u , $(ksym-wl))
 modpost-deps += $(ksym-wl)
+modpost-args += $(addprefix -U , $(CONFIG_UNUSED_KSYMS_WHITELIST_SYMS))
 endif
 
 ifeq ($(wildcard vmlinux.o),)
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/arm64/Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/Makefile
--- linux-6.13.12/scripts/dtc/include-prefixes/arm64/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/Makefile	2025-09-25 17:40:30.187340548 +0200
@@ -12,6 +12,7 @@
 subdir-y += bitmain
 subdir-y += broadcom
 subdir-y += cavium
+subdir-y += cortina-access
 subdir-y += exynos
 subdir-y += freescale
 subdir-y += hisilicon
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/arm64/amlogic/Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/amlogic/Makefile
--- linux-6.13.12/scripts/dtc/include-prefixes/arm64/amlogic/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/amlogic/Makefile	2025-09-25 17:40:30.195340588 +0200
@@ -10,6 +10,12 @@
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j100.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-2.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-jethome-jethub-j110-rev-3.dtb
+DTC_FLAGS += -@
+fbx-boards += \
+	fbxwmr.dtb \
+	fbxwmr-r1.dtb fbxwmr-r2.dtb \
+	fbxwmr-r3.dtb fbxwmr-r4.dtb
+dtb-$(CONFIG_ARCH_MESON) += $(fbx-boards)
 dtb-$(CONFIG_ARCH_MESON) += meson-axg-s400.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-fbx8am-brcm.dtb
 dtb-$(CONFIG_ARCH_MESON) += meson-g12a-fbx8am-realtek.dtb
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/arm64/broadcom/Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/Makefile
--- linux-6.13.12/scripts/dtc/include-prefixes/arm64/broadcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/Makefile	2025-09-25 17:40:30.215340687 +0200
@@ -16,3 +16,4 @@
 subdir-y	+= bcmbca
 subdir-y	+= northstar2
 subdir-y	+= stingray
+subdir-y	+= bcm63xx
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/arm64/marvell/Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/marvell/Makefile
--- linux-6.13.12/scripts/dtc/include-prefixes/arm64/marvell/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/marvell/Makefile	2025-09-25 17:40:30.259340905 +0200
@@ -28,7 +28,19 @@
 dtb-$(CONFIG_ARCH_MVEBU) += cn9130-crb-B.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5x-rd-carrier-cn9131.dtb
 dtb-$(CONFIG_ARCH_MVEBU) += ac5-98dx35xx-rd.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-base.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9130-cf-pro.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9131-cf-solidwan.dtb
-dtb-$(CONFIG_ARCH_MVEBU) += cn9132-clearfog.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_dsl_lte.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_ftth_pon.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp1_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_exp2_test_module.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_pericom.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += fbxgw7r_pcie_pine_asmedia.dtb
+
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp1_ftth_p2p.dtb
+dtb-$(CONFIG_ARCH_MVEBU) += jbxgw7r_exp2_ftth_p2p.dtb
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/arm64/qcom/Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/qcom/Makefile
--- linux-6.13.12/scripts/dtc/include-prefixes/arm64/qcom/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/qcom/Makefile	2025-09-25 17:40:30.291341064 +0200
@@ -1,288 +1,302 @@
 # SPDX-License-Identifier: GPL-2.0
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc.dtb
 
 apq8016-sbc-usb-host-dtbs	:= apq8016-sbc.dtb apq8016-sbc-usb-host.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc-usb-host.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-sbc-d3-camera-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8016-schneider-hmibsc.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8039-t2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-db820c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= apq8096-ifc6640.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5018-rdp432-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5018-tplink-archer-ax55-v1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp441.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp442.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp468.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq5332-rdp474.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq6018-cp01-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk01.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq8074-hk10-c2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp418.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp433.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp449.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp453.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= ipq9574-rdp454.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8216-samsung-fortuna3g.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-acer-a1-724.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-alcatel-idol347.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-asus-z00l.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-gplus-fl8005a.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-huawei-g7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-lg-c50.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-lg-m216.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8150.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-longcheer-l8910.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-harpia.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-osprey.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-motorola-surnia.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a3u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-a5u-eur.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-e7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gprimeltecan.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandmax.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-grandprimelte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt510.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-gt58.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j3ltetw.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-j5x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-rossa.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-samsung-serranove.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-uf896.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-thwc-ufi001c.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt86518.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt86528.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-wingtech-wt88047.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8916-yiming-uz801v3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8929-wingtech-wt82918hd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-huawei-kiwi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-longcheer-l9100.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-samsung-a7.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-sony-xperia-kanuti-tulip.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-wingtech-wt82918.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8939-wingtech-wt82918hd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-motorola-potter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-daisy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-mido.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-tissot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8953-xiaomi-vince.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-kugo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8956-sony-xperia-loire-suzu.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-bullhead-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-lg-h815.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-msft-lumia-octagon-talkman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8992-xiaomi-libra.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-huawei-angler-rev-101.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-msft-lumia-octagon-cityman.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-ivy.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-karin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-sumire.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-oneplus3t.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-dora.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-kagura.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-sony-xperia-tone-keyaki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996-xiaomi-gemini.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-natrium.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8996pro-xiaomi-scorpio.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-asus-novago-tp370ql.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-fxtec-pro1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-hp-envy-x2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-lenovo-miix-630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-cheeseburger.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-oneplus-dumpling.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-lilac.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-maple.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-sony-xperia-yoshino-poplar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= msm8998-xiaomi-sagit.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-fairphone-fp5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcm6490-shift-otter.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-1000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs404-evb-4000.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs6490-rb3gen2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs8550-aim300-aiot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs9100-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qcs9100-ride-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qdu1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb2210-rb1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb4210-rb2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc-usb-host.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-sbc-d3-camera-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8016-schneider-hmibsc.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8039-t2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8094-sony-xperia-kitakami-karin_windy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-db820c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= apq8096-ifc6640.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5018-rdp432-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5018-tplink-archer-ax55-v1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp441.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp442.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp468.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq5332-rdp474.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq6018-cp01-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk01.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq8074-hk10-c2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp418.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp433.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp449.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp453.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= ipq9574-rdp454.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8216-samsung-fortuna3g.dtb
+dtb-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r.dtb jbxgw9r.dtb fbxgw9r-ltd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-acer-a1-724.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-alcatel-idol347.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-asus-z00l.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-gplus-fl8005a.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-huawei-g7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-lg-c50.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-lg-m216.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8150.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-longcheer-l8910.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-harpia.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-osprey.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-motorola-surnia.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a3u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-a5u-eur.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-e7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gprimeltecan.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandmax.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-grandprimelte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt510.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-gt58.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j3ltetw.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-j5x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-rossa.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-samsung-serranove.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-uf896.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-thwc-ufi001c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt86518.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt86528.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-wingtech-wt88047.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8916-yiming-uz801v3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8929-wingtech-wt82918hd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-huawei-kiwi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-longcheer-l9100.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-samsung-a7.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-sony-xperia-kanuti-tulip.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-wingtech-wt82918.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8939-wingtech-wt82918hd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-motorola-potter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-daisy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-mido.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-tissot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8953-xiaomi-vince.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-kugo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8956-sony-xperia-loire-suzu.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-bullhead-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-lg-h815.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-msft-lumia-octagon-talkman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8992-xiaomi-libra.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-huawei-angler-rev-101.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-msft-lumia-octagon-cityman.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-ivy.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-karin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-satsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-sumire.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8994-sony-xperia-kitakami-suzuran.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-oneplus3t.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-dora.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-kagura.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-sony-xperia-tone-keyaki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996-xiaomi-gemini.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-natrium.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8996pro-xiaomi-scorpio.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-asus-novago-tp370ql.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-fxtec-pro1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-hp-envy-x2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-lenovo-miix-630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-cheeseburger.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-oneplus-dumpling.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-lilac.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-maple.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-sony-xperia-yoshino-poplar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= msm8998-xiaomi-sagit.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-fairphone-fp5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcm6490-shift-otter.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-1000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs404-evb-4000.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs6490-rb3gen2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs8550-aim300-aiot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs9100-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qcs9100-ride-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qdu1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb2210-rb1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb4210-rb2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5.dtb
 
 qrb5165-rb5-vision-mezzanine-dtbs	:= qrb5165-rb5.dtb qrb5165-rb5-vision-mezzanine.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= qrb5165-rb5-vision-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= qru1000-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8155p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8295p-adp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8540p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sa8775p-ride-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-acer-aspire1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-coachz-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-homestar-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-kingoftown.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r9-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10-kb.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-r10-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-lazor-limozeen-nots-r10.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-lte-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-parade.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel-ti.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pazquel360-wifi.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r2-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-pompom-r3-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7180-trogdor-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-crd-pro.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-evoker-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-herobrine-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r0.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-villager-r1-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-herobrine-zombie-nvme-lte.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-idp2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc7280-crd-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8180x-lenovo-flex-5g.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8180x-primus.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sc8280xp-microsoft-arcata.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sda660-inforce-ifc6560.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-lenovo-tbx605f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm450-motorola-ali.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-ganges-kirin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-discovery.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-pioneer.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm630-sony-xperia-nile-voyager.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-fairphone-fp3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm632-motorola-ocean.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm636-sony-xperia-ganges-mermaid.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm660-xiaomi-lavender.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm670-google-sargo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r1.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-cheza-r3.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qrb5165-rb5-vision-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= qru1000-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8155p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8295p-adp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8540p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sa8775p-ride-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-acer-aspire1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-coachz-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-homestar-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-kingoftown.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r9-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10-kb.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-r10-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r5.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r9.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-lazor-limozeen-nots-r10.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-lte-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-parade.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel-ti.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pazquel360-wifi.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r2-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-pompom-r3-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-quackingstick-r0-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-inx-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-wormdingler-rev1-boe-rt5682s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7180-trogdor-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-crd-pro.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-evoker-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-herobrine-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r0.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-villager-r1-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-herobrine-zombie-nvme-lte.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-idp2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc7280-crd-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8180x-lenovo-flex-5g.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8180x-primus.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-lenovo-thinkpad-x13s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sc8280xp-microsoft-arcata.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sda660-inforce-ifc6560.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-lenovo-tbx605f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm450-motorola-ali.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-ganges-kirin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-discovery.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-pioneer.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm630-sony-xperia-nile-voyager.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-fairphone-fp3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm632-motorola-ocean.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm636-sony-xperia-ganges-mermaid.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm660-xiaomi-lavender.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm670-google-sargo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r1.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-cheza-r3.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c.dtb
 
 sdm845-db845c-navigation-mezzanine-dtbs	:= sdm845-db845c.dtb sdm845-db845c-navigation-mezzanine.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-db845c-navigation-mezzanine.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyln.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-lg-judyp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-enchilada.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-oneplus-fajita.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-samsung-starqltechn.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akari.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-akatsuki.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-sony-xperia-tama-apollo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-ebbg.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-beryllium-tianma.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-xiaomi-polaris.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm845-shift-axolotl.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-lenovo-yoga-c630.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdm850-samsung-w737.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sdx75-idp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4250-oneplus-billie2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm4450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115-fxtec-pro1x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6115p-lenovo-j606f.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-sony-xperia-seine-pdx201.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6125-xiaomi-laurel-sprout.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6350-sony-xperia-lena-pdx213.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm6375-sony-xperia-murray-pdx225.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7125-xiaomi-curtana.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7125-xiaomi-joyeuse.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7225-fairphone-fp4.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm7325-nothing-spacewar.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-microsoft-surface-duo.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-bahamut.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8150-sony-xperia-kumano-griffin.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx203.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-sony-xperia-edo-pdx206.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-boe.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-elish-csot.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8250-xiaomi-pipa.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-microsoft-surface-duo2.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx214.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8350-sony-xperia-sagami-pdx215.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx223.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8450-sony-xperia-nagara-pdx224.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-samsung-q5q.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8550-sony-xperia-yodo-pdx234.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-db845c-navigation-mezzanine.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyln.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-lg-judyp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-enchilada.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-oneplus-fajita.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-samsung-starqltechn.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akari.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-akatsuki.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-sony-xperia-tama-apollo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-ebbg.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-beryllium-tianma.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-xiaomi-polaris.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm845-shift-axolotl.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-lenovo-yoga-c630.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdm850-samsung-w737.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sdx75-idp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4250-oneplus-billie2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm4450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115-fxtec-pro1x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6115p-lenovo-j606f.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-sony-xperia-seine-pdx201.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6125-xiaomi-laurel-sprout.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6350-sony-xperia-lena-pdx213.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm6375-sony-xperia-murray-pdx225.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7125-xiaomi-curtana.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7125-xiaomi-joyeuse.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7225-fairphone-fp4.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm7325-nothing-spacewar.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-microsoft-surface-duo.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-bahamut.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8150-sony-xperia-kumano-griffin.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx203.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-sony-xperia-edo-pdx206.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-boe.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-elish-csot.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8250-xiaomi-pipa.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-microsoft-surface-duo2.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx214.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8350-sony-xperia-sagami-pdx215.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx223.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8450-sony-xperia-nagara-pdx224.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-samsung-q5q.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8550-sony-xperia-yodo-pdx234.dtb
 
 sm8650-hdk-display-card-dtbs	:= sm8650-hdk.dtb sm8650-hdk-display-card.dtbo
 
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-hdk-display-card.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-hdk.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-mtp.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= sm8650-qrd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e78100-lenovo-thinkpad-t14s.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-asus-vivobook-s15.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-crd.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-dell-xps13-9345.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-lenovo-yoga-slim7x.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-microsoft-romulus13.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-microsoft-romulus15.dtb
-dtb-$(CONFIG_ARCH_QCOM)	+= x1e80100-qcp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-hdk-display-card.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-hdk.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-mtp.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= sm8650-qrd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e78100-lenovo-thinkpad-t14s.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-asus-vivobook-s15.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-crd.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-dell-xps13-9345.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-lenovo-yoga-slim7x.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-microsoft-romulus13.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-microsoft-romulus15.dtb
+dtb-$(CONFIG_ARCH_QCOM_DTB)	+= x1e80100-qcp.dtb
+
+always-$(CONFIG_ARCH_QCOM_FBX_DTB)	+= fbxgw9r_dtbs
+clean-files				+= fbxgw9r_dtbs
+board-dtbs				=  \
+					fbxgw9r.dtb \
+					fbxgw9r-ltd.dtb \
+					jbxgw9r.dtb
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw9r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
diff -ruw linux-6.13.12/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h
--- linux-6.13.12/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/input/linux-event-codes.h	2025-09-25 17:40:37.299375815 +0200
@@ -807,6 +807,18 @@
 #define BTN_TRIGGER_HAPPY39		0x2e6
 #define BTN_TRIGGER_HAPPY40		0x2e7
 
+#define KEY_APP_TV			0x2f1
+#define KEY_APP_REPLAY			0x2f2
+#define KEY_APP_VIDEOCLUB		0x2f3
+#define KEY_APP_WHATSON			0x2f4
+#define KEY_APP_RECORDS			0x2f5
+#define KEY_APP_MEDIA			0x2f6
+#define KEY_APP_YOUTUBE			0x2f7
+#define KEY_APP_RADIOS			0x2f8
+#define KEY_APP_CANALVOD		0x2f9
+#define KEY_APP_PIP			0x2fa
+#define KEY_APP_NETFLIX			0x2fb
+
 /* We avoid low common keys in module aliases so they don't get huge. */
 #define KEY_MIN_INTERESTING	KEY_MUTE
 #define KEY_MAX			0x2ff
diff -ruw linux-6.13.12/scripts/mod/modpost.c linux-6.13.12-fbx/scripts/mod/modpost.c
--- linux-6.13.12/scripts/mod/modpost.c	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/scripts/mod/modpost.c	2025-09-25 17:40:38.015379365 +0200
@@ -2149,6 +2149,11 @@
 	}
 }
 
+struct syms_wl {
+	struct list_head list;
+	const char *name;
+};
+
 int main(int argc, char **argv)
 {
 	struct module *mod;
@@ -2157,9 +2162,11 @@
 	char *dump_write = NULL, *files_source = NULL;
 	int opt;
 	LIST_HEAD(dump_lists);
+	LIST_HEAD(syms_wl);
 	struct dump_list *dl, *dl2;
+	struct syms_wl *wl, *wl2;
 
-	while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:")) != -1) {
+	while ((opt = getopt(argc, argv, "ei:MmnT:to:au:U:WwENd:")) != -1) {
 		switch (opt) {
 		case 'e':
 			external_module = true;
@@ -2193,6 +2200,11 @@
 		case 'u':
 			unused_exports_white_list = optarg;
 			break;
+		case 'U':
+			wl = xmalloc(sizeof(*dl));
+			wl->name = optarg;
+			list_add_tail(&wl->list, &syms_wl);
+			break;
 		case 'W':
 			extra_warn = true;
 			break;
@@ -2240,6 +2252,16 @@
 	if (unused_exports_white_list)
 		handle_white_list_exports(unused_exports_white_list);
 
+	list_for_each_entry_safe(wl, wl2, &syms_wl, list) {
+		struct symbol *sym = find_symbol(wl->name);
+		if (sym)
+			sym->used = true;
+		else
+			error("whitelist symbol \"%s\" not found", wl->name);
+		list_del(&wl->list);
+		free(wl);
+	}
+
 	list_for_each_entry(mod, &modules, list) {
 		if (mod->dump_file)
 			continue;
diff -ruw linux-6.13.12/sound/soc/kirkwood/Kconfig linux-6.13.12-fbx/sound/soc/kirkwood/Kconfig
--- linux-6.13.12/sound/soc/kirkwood/Kconfig	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/sound/soc/kirkwood/Kconfig	2025-09-25 17:40:38.367381111 +0200
@@ -16,3 +16,8 @@
 	  Say Y if you want to add support for SoC audio on
 	  the Armada 370 Development Board.
 
+config SND_KIRKWOOD_SOC_FBXGW2R
+	tristate "Soc Audio support for fbxgw2r"
+	depends on SND_KIRKWOOD_SOC && MACH_FBXGW2R && I2C
+	select SND_KIRKWOOD_SOC_I2S
+	select SND_SOC_CS42L52
diff -ruw linux-6.13.12/sound/soc/kirkwood/Makefile linux-6.13.12-fbx/sound/soc/kirkwood/Makefile
--- linux-6.13.12/sound/soc/kirkwood/Makefile	2025-04-20 10:18:30.000000000 +0200
+++ linux-6.13.12-fbx/sound/soc/kirkwood/Makefile	2025-09-25 17:40:38.367381111 +0200
@@ -6,3 +6,6 @@
 snd-soc-armada-370-db-y := armada-370-db.o
 
 obj-$(CONFIG_SND_KIRKWOOD_SOC_ARMADA370_DB) += snd-soc-armada-370-db.o
+
+snd-soc-fbxgw2r-objs := kirkwood-fbxgw2r.o
+obj-$(CONFIG_SND_KIRKWOOD_SOC_FBXGW2R) += snd-soc-fbxgw2r.o
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am-brcm.dtso	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+&uart_A {
+	bluetooth {
+		compatible = "brcm,bcm43438-bt";
+		shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+		max-speed = <2000000>;
+		clocks = <&wifi32k>;
+		clock-names = "lpo";
+		vbat-supply = <&vddao_3v3>;
+		vddio-supply = <&vddio_ao1v8>;
+	};
+};
+
+&sd_emmc_a {
+	/* Per mmc-controller.yaml */
+	#address-cells = <1>;
+	#size-cells = <0>;
+	/* NB: may be either AP6398S or AP6398SR3 wifi module */
+	brcmf: wifi@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am-realtek.dtso	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+&uart_A {
+	bluetooth {
+		compatible = "realtek,rtl8822cs-bt";
+		enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+		host-wake-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>;
+		device-wake-gpios = <&gpio GPIOX_18 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&sd_emmc_a {
+	/* No explicit compatible for rtl8822cs sdio */
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/arch/arm64/boot/dts/amlogic/meson-g12a-fbx8am.dts	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/*
+ * SEI codename: SEI530FB (based on SEI510)
+ * Freebox codename: fbx8am
+ * Commercial names: Freebox Pop, Player TV Free 4K
+ */
+
+/dts-v1/;
+
+#include "meson-g12a.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+	compatible = "freebox,fbx8am", "amlogic,g12a";
+	model = "Freebox Player Pop";
+	chassis-type = "embedded";
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		poll-interval = <100>;
+
+		/* Physical user-accessible reset button near USB port */
+		power-button {
+			label = "Reset";
+			linux,code = <BTN_MISC>;
+			gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	spdif_dit: audio-codec-2 {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+		status = "okay";
+		sound-name-prefix = "DIT";
+	};
+
+	aliases {
+		serial0 = &uart_AO;
+		ethernet0 = &ethmac;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	ao_5v: regulator-ao-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "AO_5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&dc_in>;
+		regulator-always-on;
+	};
+
+	dc_in: regulator-dc-in {
+		compatible = "regulator-fixed";
+		regulator-name = "DC_IN";
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+		regulator-always-on;
+	};
+
+	emmc_1v8: regulator-emmc-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "EMMC_1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vddao_3v3>;
+		regulator-always-on;
+	};
+
+	vddao_3v3: regulator-vddao-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&ao_5v>;
+		regulator-always-on;
+	};
+
+	vddao_3v3_t: regulator-vddao-3v3-t {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3_T";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vddao_3v3>;
+		gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+		enable-active-high;
+	};
+
+	vddcpu: regulator-vddcpu {
+		/*
+		 * SY8120B1ABC DC/DC Regulator.
+		 */
+		compatible = "pwm-regulator";
+
+		regulator-name = "VDDCPU";
+		regulator-min-microvolt = <721000>;
+		regulator-max-microvolt = <1022000>;
+
+		pwm-supply = <&ao_5v>;
+
+		pwms = <&pwm_AO_cd 1 1250 0>;
+		pwm-dutycycle-range = <100 0>;
+
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vddio_ao1v8: regulator-vddio-ao1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vddao_3v3>;
+		regulator-always-on;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		post-power-on-delay-ms = <10>; /* required for 43752 */
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sound {
+		compatible = "amlogic,axg-sound-card";
+		model = "fbx8am";
+		audio-aux-devs = <&tdmout_b>;
+		audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+				"TDMOUT_B IN 1", "FRDDR_B OUT 1",
+				"TDMOUT_B IN 2", "FRDDR_C OUT 1",
+				"TDM_B Playback", "TDMOUT_B OUT",
+				"SPDIFOUT_A IN 0", "FRDDR_A OUT 3",
+				"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
+				"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+
+		clocks = <&clkc CLKID_MPLL2>,
+			 <&clkc CLKID_MPLL0>,
+			 <&clkc CLKID_MPLL1>;
+
+		assigned-clocks = <&clkc CLKID_MPLL2>,
+				  <&clkc CLKID_MPLL0>,
+				  <&clkc CLKID_MPLL1>;
+		assigned-clock-parents = <0>, <0>, <0>;
+		assigned-clock-rates = <294912000>,
+				       <270950400>,
+				       <393216000>;
+
+		dai-link-0 {
+			sound-dai = <&frddr_a>;
+		};
+
+		dai-link-1 {
+			sound-dai = <&frddr_b>;
+		};
+
+		dai-link-2 {
+			sound-dai = <&frddr_c>;
+		};
+
+		/* 8ch hdmi interface */
+		dai-link-3 {
+			sound-dai = <&tdmif_b>;
+			dai-format = "i2s";
+			dai-tdm-slot-tx-mask-0 = <1 1>;
+			dai-tdm-slot-tx-mask-1 = <1 1>;
+			dai-tdm-slot-tx-mask-2 = <1 1>;
+			dai-tdm-slot-tx-mask-3 = <1 1>;
+			mclk-fs = <256>;
+
+			codec {
+				sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+			};
+		};
+
+		/* spdif hdmi or toslink interface */
+		dai-link-4 {
+			sound-dai = <&spdifout_a>;
+
+			codec-0 {
+			sound-dai = <&spdif_dit>;
+			};
+
+			codec-1 {
+				sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_A>;
+			};
+		};
+
+		/* spdif hdmi interface */
+		dai-link-5 {
+			sound-dai = <&spdifout_b>;
+
+			codec {
+				sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_B>;
+			};
+		};
+
+		/* hdmi glue */
+		dai-link-6 {
+			sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+			codec {
+				sound-dai = <&hdmi_tx>;
+			};
+		};
+	};
+};
+
+&arb {
+	status = "okay";
+};
+
+&cecb_AO {
+	pinctrl-0 = <&cec_ao_b_h_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+	hdmi-phandle = <&hdmi_tx>;
+};
+
+&clkc_audio {
+	status = "okay";
+};
+
+&cpu0 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu1 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu2 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu3 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&ethmac {
+	status = "okay";
+	phy-handle = <&internal_ephy>;
+	phy-mode = "rmii";
+};
+
+&frddr_a {
+	status = "okay";
+};
+
+&frddr_b {
+	status = "okay";
+};
+
+&frddr_c {
+	status = "okay";
+};
+
+&spdifout_a {
+	pinctrl-0 = <&spdif_out_h_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&spdifout_b {
+	status = "okay";
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
+&i2c3 {
+	status = "okay";
+	pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+&pwm_AO_cd {
+	pinctrl-0 = <&pwm_ao_d_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&xtal>;
+	clock-names = "clkin1";
+	status = "okay";
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&xtal>;
+	clock-names = "clkin0";
+};
+
+&pdm {
+	pinctrl-0 = <&pdm_din0_z_pins>, <&pdm_din1_z_pins>,
+		    <&pdm_din2_z_pins>, <&pdm_din3_z_pins>,
+		    <&pdm_dclk_z_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&saradc {
+	status = "okay";
+	vref-supply = <&vddio_ao1v8>;
+};
+
+/* SDIO */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-1 = <&sdio_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	sd-uhs-sdr50;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	/* WiFi firmware requires power to be kept while in suspend */
+	keep-power-in-suspend;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_ao1v8>;
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_c_pins>;
+	pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <50000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddao_3v3>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+	pinctrl-1 = <&emmc_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&emmc_1v8>;
+};
+
+&tdmif_b {
+	status = "okay";
+};
+
+&tdmout_b {
+	status = "okay";
+};
+
+&tohdmitx {
+	status = "okay";
+};
+
+&uart_A {
+	status = "okay";
+	pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+	pinctrl-names = "default";
+	uart-has-rtscts;
+};
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&usb {
+	status = "okay";
+	dr_mode = "host";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/Makefile
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/Makefile	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,37 @@
+board-dtbs = \
+	fbxgw8r-board-00.dtb \
+	fbxgw8r-board-01.dtb \
+	fbxgw8r-board-02.dtb \
+	fbxgw8r-board-03.dtb \
+	fbxgw8r-board-04.dtb
+
+dtb-$(CONFIG_ARCH_BCMBCA) += bcm963158ref1d.dtb fbxgw8r.dtb $(board-dtbs)
+
+always-y	:= $(dtb-y)
+always-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_dtbs
+
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb fbxgw8r_dtbs
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+# calculate size of file $1 aligned to a $2 boundary
+file_size = $(shell echo "sz=$$(stat -c %s $1); a=$2; (sz + a - 1) / a * a" | bc)
+
+# due to a bug in CFE v2.4, check that $1 and $2's sizes are okay and
+# will be accepted. in flash mode we need to account for the AES
+# padding (15 bytes at most)
+check_dtb_size	= if [ $(call file_size,$1,16) -gt $(call file_size,$2,1) ]; \
+	then echo "bad DTB size for CFE v2.4: $2's size must be greater than $1's size"; exit 1; fi
+
+$(obj)/fbxgw8r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(Q)$(call check_dtb_size,$(obj)/fbxgw8r.dtb,$(obj)/fbxgw8r-board-00.dtb)
+	$(Q)$(call check_dtb_size,$(obj)/fbxgw8r.dtb,$(obj)/fbxgw8r-board-01.dtb)
+	$(call cmd,dtbs)
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
+
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_asmedia.dtb
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_dualband_noswitch.dtb
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./bcm63158.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/bcm63158.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./bcm63158.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/bcm63158.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,1126 @@
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/reset/brcm,bcm63xx-pmc.h>
+#include <dt-bindings/brcm,bcm63158-ubus.h>
+#include <dt-bindings/pinctrl/bcm63158-pinfunc.h>
+#include <dt-bindings/brcm,bcm63xx-pcie.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/soc/broadcom,bcm63xx-xdslphy.h>
+#include <dt-bindings/soc/broadcom,bcm63158-procmon.h>
+
+
+#define USE_PSCI	// comment when booting on broadcom CFE.
+
+/*
+ * uncomment when xrdp reserved memory is needed for debugging.
+ */
+// #define USE_RDP_RESERVED_TM
+
+#define SDIO_EMMC_SPI                   95
+#define SPU_GMAC_SPI                    75
+#define HS_SPI_SPI			37
+#define BSC_I2C0_SPI			82
+#define BSC_I2C1_SPI			83
+#define PCIE0_SPI			60
+#define PCIE1_SPI			61
+#define PCIE2_SPI			62
+#define PCIE3_SPI			63
+#define HS_UART_SPI			34
+#define XHCI_SPI			123
+#define OHCI0_SPI			124
+#define EHCI0_SPI			125
+#define OHCI1_SPI			121
+#define EHCI1_SPI			122
+
+#define DRAM_BASE			0x0
+#define DRAM_DEF_SIZE			0x08000000
+
+/memreserve/ 0x00000000 0x00020000;
+
+/ {
+   	model = "Broadcom-v8A";
+	compatible = "brcm,brcm-v8A";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+#ifdef USE_PSCI
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+#define CPU_ENABLE_METHOD "psci"
+#else
+#define CPU_ENABLE_METHOD "spin-table"
+#endif
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+			skip-enumeration;
+		};
+	};
+
+        cpus {
+                #address-cells = <2>;
+		#size-cells = <0>;
+
+                B53_0: cpu@0 {
+                        device_type = "cpu";
+                        compatible = "arm,cortex-a53";
+			reg = <0x0 0x0>;
+                        next-level-cache = <&L2_0>;
+                };
+                B53_1: cpu@1 {
+                        device_type = "cpu";
+                        compatible = "arm,cortex-a53";
+		        reg = <0x0 0x1>;
+			enable-method = CPU_ENABLE_METHOD;
+                        cpu-release-addr = <0x0 0xfff8>;
+                        next-level-cache = <&L2_0>;
+                };
+                B53_2: cpu@2 {
+                        device_type = "cpu";
+			compatible = "arm,cortex-a53";
+                        reg = <0x0 0x2>;
+			enable-method = CPU_ENABLE_METHOD;
+                        cpu-release-addr = <0x0 0xfff8>;
+			next-level-cache = <&L2_0>;
+                };
+                B53_3: cpu@3 {
+                        device_type = "cpu";
+			compatible = "arm,cortex-a53";
+	                reg = <0x0 0x3>;
+			enable-method = CPU_ENABLE_METHOD;
+      			cpu-release-addr = <0x0 0xfff8>;
+                        next-level-cache = <&L2_0>;
+                };
+
+                L2_0: l2-cache0 {
+                        compatible = "cache";
+                };
+        };
+
+	timer {
+                compatible = "arm,armv8-timer";
+                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+        pmu {
+                compatible = "arm,armv8-pmuv3";
+                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+                interrupt-affinity = <&B53_0>,
+                                     <&B53_1>,
+                                     <&B53_2>,
+                                     <&B53_3>;
+	};
+
+	soc_dram: memory@00000000 {
+		device_type = "memory";
+		reg = <0x00000000 DRAM_BASE 0x0 DRAM_DEF_SIZE>;
+
+		// this is overwritten by bootloader with correct value
+		brcm,ddr-mcb = <0x4142b>;
+	};
+
+        reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		atf@0x10000000 {
+			no-map;
+			reg = <0x0 0x10000000 0x0 0x00100000>;
+		};
+		optee@0x10800000 {
+			no-map;
+			reg = <0x0 0x10800000 0x0 0x00400000>;
+		};
+
+		optee-shared-area@0x11000000 {
+			no-map;
+			reg = <0x0 0x11000000 0x0 0x00100000>;
+		};
+		dsl_reserved: dsl_reserved {
+			compatible = "shared-dma-pool";
+			/*
+			 * only 3MB are actually used, but because of pointer alignment
+			 * arithmetics done by the driver, they need to be at the end of an
+			* 8MB aligned region, must be at an address lower than 256M too
+			 */
+			size = <0x0 0x00800000>;
+			alignment = <0x0 0x00800000>;
+			alloc-ranges = <0x0 0x0 0x0 0x10000000>;
+			no-map;
+			no-cache;
+                };
+#ifdef USE_RDP_RESERVED_TM
+		rdp_reserved_tm: rdp_reserved_tm {
+			compatible = "shared-dma-pool";
+			size = <0x0 0x00800000>;
+			alloc-ranges = <0x0 0x0 0x0 0x10000000>;
+			no-map;
+			no-cache;
+                };
+#endif
+	};
+
+        uartclk: uartclk {
+                compatible = "fixed-clock";
+                #clock-cells = <0>;
+                clock-frequency = <50000000>;
+	};
+
+	spiclk: spiclk {
+                compatible = "fixed-clock";
+                #clock-cells = <0>;
+                clock-frequency = <(200 * 1000 * 1000)>;
+	};
+
+	pcie01: pcidual@80040000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80040000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xC0000000 0 0xC0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE01>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <2>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie0: pci@80040000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80040000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xC0000000 0 0xC0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE0>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie1: pci@80050000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80050000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xD0000000 0 0xD0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE1>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE1_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE1_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie2: pci@80060000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80060000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xE0000000 0 0xE0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE2>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE2>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE2_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE2_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie3: pci@80070000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80070000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xB0000000 0 0xB0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE3>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE3>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE3_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE3_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+	};
+
+	/* ARM bus */
+	axi@80000000 {
+                compatible = "simple-bus";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                ranges = <0x0 0x0 0x0 0x80000000 0x0 0x04000000>;
+
+		xtm: xtm@80130000 {
+			compatible = "brcm,bcm63158-xtm";
+			status = "disabled";
+
+			interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+			reg = <0x0 0x130000 0x0 0x4000>;
+
+			xdsl-phy = <&xdsl_phy>;
+			xtm-runner,xrdp = <&xrdp>;
+		};
+
+		memc: memc@0x80180000 {
+			compatible = "brcm,bcm63158-memc";
+			reg = <0x0 0x180000 0x0 0x40000>;
+		};
+
+		pmc: pmc@80200000 {
+			compatible = "brcm,bcm63158-pmc";
+			reg = <0x0 0x200000 0x0 0x10000>;
+			#reset-cells = <1>;
+		};
+
+		procmon: procmon@80280000 {
+			compatible = "brcm,bcm63158-procmon";
+			reg = <0x0 0x280000 0x0 0x100>;
+			#procmon-cells = <1>;
+		};
+
+		ubus4: ubus4@80300000 {
+			compatible = "brcm,bcm63158-ubus4";
+			reg = <0x0 0x03000000 0x0 0x00500000>,
+				<0x0 0x10a0400 0x0 0x400>;
+			reg-names = "master-config", "coherency-config";
+			#ubus-cells = <1>;
+			brcm,dram = <&soc_dram>;
+		};
+
+		sf2: sf2@80400000 {
+			compatible = "brcm,bcm63158-sf2";
+			reg = <0x0 0x400000 0x0 0x80000>,
+			    <0x0 0x480000 0x0 0x500>,
+			    <0x0 0x4805c0 0x0 0x10>,
+			    <0x0 0x480600 0x0 0x200>,
+			    <0x0 0x480800 0x0 0x500>;
+			reg-names = "core", "reg", "mdio", "fcb", "acb";
+			resets = <&pmc PMC_R_SF2>;
+			reset-names = "sf2";
+			status = "disabled";
+
+			sf2,qphy-base-id = <1>;
+			sf2,sphy-phy-id = <5>;
+			sf2,serdes-phy-id = <6>;
+
+			leds-top = <&leds_top_syscon>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				sf2_port0: port@0 {
+					// this is a normal port
+					reg = <0>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy0>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port1: port@1 {
+					// this is a normal port
+					reg = <1>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy1>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port2: port@2 {
+					// this is a normal port
+					reg = <2>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy2>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port3: port@3 {
+					// this is a normal port
+					reg = <3>;
+					status = "disabled";
+					/* 0: quad phy3, 1: rgmii2 */
+					mux1-in-port = <0>;
+					phy-handle = <&sf2_qphy3>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port4: port@4 {
+					// this is a normal port
+					reg = <4>;
+					status = "disabled";
+					/* default config is xbar to sphy */
+					xbar-in-port = <2>;
+					phy-handle = <&sf2_sphy>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port5: port@5 {
+					// this is a CPU port
+					reg = <5>;
+					status = "disabled";
+					phy-connection-type = "internal";
+					ethernet = <&runner_unimac1>;
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+
+				sf2_port6: port@6 {
+					// this is a normal port
+					reg = <6>;
+					status = "disabled";
+					xbar-in-port = <1>;
+					/* default config is xbar to serdes */
+					phy-connection-type = "sgmii";
+				};
+
+				sf2_port7: port@7 {
+					// this is a CPU port
+					reg = <7>;
+					status = "disabled";
+					ethernet = <&runner_unimac2>;
+					phy-connection-type = "internal";
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+
+				sf2_port8: port@8 {
+					// this is a CPU port
+					reg = <8>;
+					status = "disabled";
+					/* 0: system port, 1: unimac bbh */
+					mux2-in-port = <1>;
+
+					dsa,def-cpu-port;
+					//ethernet = <&systemport>;
+					ethernet = <&runner_unimac0>;
+
+					phy-connection-type = "internal";
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+			};
+
+			sf2,wan-port-config {
+				status = "disabled";
+				xbar-in-port = <0>;
+			};
+
+			sf2,mdio {
+		                #address-cells = <1>;
+		                #size-cells = <0>;
+
+				/* XXX: depends on sf2,qphy-base-id */
+				sf2_qphy0: ethernetphy@1 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <1>;
+				};
+				sf2_qphy1: ethernet-phy@2 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <2>;
+				};
+				sf2_qphy2: ethernet-phy@3 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <3>;
+				};
+				sf2_qphy3: ethernet-phy@4 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <4>;
+				};
+				/* XXX: depends on sf2,sphy-base-id */
+				sf2_sphy: ethernet-phy@5 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <5>;
+				};
+			};
+		};
+
+		systemport: systemport@80490000 {
+			compatible = "brcm,systemport-63158";
+			reg = <0x0 0x490000 0x0 0x4650>;
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+				   <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+			dma-coherent;
+
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		xdsl_phy: xdsl-phy@80650000 {
+			compatible = "brcm,bcm63158-xdsl-phy";
+			status = "disabled";
+
+			memory-region = <&dsl_reserved>;
+			interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+			reg = <0x0 0x650000 0x0 0x20000>,
+				<0x0 0x800000 0x0 0xe0000>,
+				<0x0 0x9A0000 0x0 0x660000>;
+			reg-names = "phy", "lmem", "xmem";
+
+			pinctrl-0 = <&ld0_pins>;
+			pinctrl-names = "default";
+
+			ubus = <&ubus4 UBUS_PORT_ID_DSLCPU>,
+				<&ubus4 UBUS_PORT_ID_DSL>;
+
+			/*
+			 * this is used by dsldiags, but unfortunately
+			 * lying outside the axi space, in the ubus
+			 * space.
+			 */
+			perf-base = <0xff800000>;
+			perf-size = <0x10>;
+		};
+
+		gic: interrupt-controller@81000000 {
+	                compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+	                #interrupt-cells = <3>;
+	                #address-cells = <0>;
+	                interrupt-controller;
+	                reg = <0x0 0x1001000 0 0x1000>,
+	                      <0x0 0x1002000 0 0x2000>;
+	        };
+
+		usb: usb@8000d000 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-usb";
+
+			reg = <0x0 0xd000 0x0 0x1000>,
+				<0x0 0xc200 0x0 0x100>,
+				<0x0 0xc300 0x0 0x100>,
+				<0x0 0xc400 0x0 0x100>,
+				<0x0 0xc500 0x0 0x100>,
+				<0x0 0xc600 0x0 0x100>;
+			reg-names = "xhci", "usb-control", "ehci0",
+				"ohci0", "ehci1", "ohci1";
+			dma-coherent;
+
+			interrupts = <GIC_SPI XHCI_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI EHCI0_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI OHCI0_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI EHCI1_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI OHCI1_SPI IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "xhci", "ehci0", "ohci0",
+				"ehci1", "ohci1";
+
+			resets = <&pmc PMC_R_USBH>;
+			reset-names = "xhci-pmc-reset";
+
+			ubus = <&ubus4 UBUS_PORT_ID_USB>;
+		};
+
+		xrdp: xrdp@82000000 {
+			compatible = "brcm,bcm63158-xrdp";
+			reg = <0x0 0x2000000 0x0 0x1000000>,
+				<0x0 0x0170000 0x0 0x10000>;
+			reg-names = "core", "wan_top";
+
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+
+			interrupt-names = "fpm",
+				"hash",
+				"qm",
+				"dsptchr",
+				"sbpm",
+				"runner0",
+				"runner1",
+				"runner2",
+				"runner3",
+				"runner4",
+				"runner5",
+				"queue0",
+				"queue1",
+				"queue2",
+				"queue3",
+				"queue4",
+				"queue5",
+				"queue6",
+				"queue7",
+				"queue8",
+				"queue9",
+				"queue10",
+				"queue11",
+				"queue12",
+				"queue13",
+				"queue14",
+				"queue15",
+				"queue16",
+				"queue17",
+				"queue18",
+				"queue19",
+				"queue20",
+				"queue21",
+				"queue22",
+				"queue23",
+				"queue24",
+				"queue25",
+				"queue26",
+				"queue27",
+				"queue28",
+				"queue29",
+				"queue30",
+				"queue31";
+
+#ifdef USE_RDP_RESERVED_TM
+			memory-region = <&rdp_reserved_tm>;
+#endif
+			resets = <&pmc PMC_R_XRDP>;
+			reset-names = "rdp";
+			ubus = <&ubus4 UBUS_PORT_ID_QM>,
+				<&ubus4 UBUS_PORT_ID_DQM>,
+				<&ubus4 UBUS_PORT_ID_NATC>,
+				<&ubus4 UBUS_PORT_ID_DMA0>,
+				<&ubus4 UBUS_PORT_ID_RQ0>,
+				<&ubus4 UBUS_PORT_ID_SWH>;
+		};
+
+		runner_unimac0: runner-unimac0 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <0>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_unimac1: runner-unimac1 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <1>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_unimac2: runner-unimac2 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <2>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_xport0: runner-xport0 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-xport";
+			reg = <0x0 0x00144000 0x0 0x100>,
+				<0x0 0x00138000 0x0 0x6fff>,
+				<0x0 0x00147800 0x0 0xe80>,
+				<0x0 0x00140000 0x0 0x3fff>;
+			reg-names = "wan_top", "xport", "xlif", "epon";
+			dma-coherent;
+
+			resets = <&pmc PMC_R_WAN_AE>;
+			reset-names = "wan_ae";
+
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,xport-pon-bbh = <3>;
+			enet-runner,xport-ae-bbh = <4>;
+
+			//phy-mode = "1000base-x";
+			phy-mode = "10gbase-r";
+			managed = "in-band-status";
+		};
+	};
+
+	ubus@ff800000 {
+                compatible = "simple-bus";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                ranges = <0x0 0x0 0x0 0xff800000 0x0 0x62000>;
+
+		leds_top_syscon: system-controller@ff800800 {
+			compatible = "syscon", "simple-mfd";
+			reg = <0x0 0x800 0x0 0x100>;
+		};
+
+		sdhci: sdhci@ff810000 {
+			status = "disabled";
+			compatible = "brcm,bcm63xx-sdhci";
+			reg = <0x0 0x00010000 0x0 0x100>;
+			interrupts = <GIC_SPI SDIO_EMMC_SPI IRQ_TYPE_LEVEL_HIGH>;
+			no-1-8v;
+			bus-width = <8>;
+		};
+
+                arm_serial0: serial@ff812000 {
+                        compatible = "arm,pl011", "arm,primecell";
+                        reg = <0x0 0x12000 0x0 0x1000>;
+                        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+                        clocks = <&uartclk>, <&uartclk>;
+                        clock-names = "uartclk", "apb_pclk";
+			status = "disabled";
+                };
+
+                arm_serial2: serial@ff814000 {
+                        compatible = "arm,pl011", "arm,primecell";
+                        reg = <0x0 0x14000 0x0 0x1000>;
+                        interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+                        clocks = <&uartclk>, <&uartclk>;
+                        clock-names = "uartclk", "apb_pclk";
+			status = "disabled";
+                };
+
+		timer: timer@400 {
+			compatible = "syscon", "brcm,bcm63158-timer";
+			reg = <0x0 0x400 0x0 0x94>,
+				<0x0 0x5a03c 0x0 0x4>;
+			reg-names = "timer", "top-reset-status";
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		reboot {
+			compatible = "syscon-reboot";
+			regmap = <&timer>;
+			offset = <0x8c>;
+			mask = <1>;
+		};
+
+		pinctrl: pinctrl@500 {
+			compatible = "brcm,bcm63158-pinctrl";
+			reg = <0x0 0x500 0x0 0x60>,
+				<0x0 0x20 0x0 0x2c>;
+			reg-names = "gpio", "irq";
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			/* just for ref, they are hardcoded in driver too */
+			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+
+			emmc_pins: emmc-pins-0 {
+				emmc-d0 {
+					pinmux = <BCM63158_GPIO_51__FUNC_NAND_DATA_0>;
+				};
+				emmc-d1 {
+					pinmux = <BCM63158_GPIO_52__FUNC_NAND_DATA_1>;
+				};
+				emmc-d2 {
+					pinmux = <BCM63158_GPIO_53__FUNC_NAND_DATA_2>;
+				};
+				emmc-d3 {
+					pinmux = <BCM63158_GPIO_54__FUNC_NAND_DATA_3>;
+				};
+				emmc-d4 {
+					pinmux = <BCM63158_GPIO_55__FUNC_NAND_DATA_4>;
+				};
+				emmc-d5 {
+					pinmux = <BCM63158_GPIO_56__FUNC_NAND_DATA_5>;
+				};
+				emmc-d6 {
+					pinmux = <BCM63158_GPIO_57__FUNC_NAND_DATA_6>;
+				};
+				emmc-d7 {
+					pinmux = <BCM63158_GPIO_58__FUNC_NAND_DATA_7>;
+				};
+				emmc-clk {
+					pinmux = <BCM63158_GPIO_62__FUNC_EMMC_CLK>;
+				};
+				emmc-cmd {
+					pinmux = <BCM63158_GPIO_63__FUNC_EMMC_CMD>;
+				};
+			};
+
+			spi_pins: spi-pins {
+				spi-clk {
+					pinmux = <BCM63158_GPIO_108__FUNC_SPIM_CLK>;
+				};
+				spi-mosi {
+					pinmux = <BCM63158_GPIO_109__FUNC_SPIM_MOSI>;
+				};
+				spi-miso {
+					pinmux = <BCM63158_GPIO_110__FUNC_SPIM_MISO>;
+				};
+
+				/*
+				 * board DTS will have to specify SPI
+				 * SS pins as required.
+				 */
+			};
+
+			i2c0_pins: i2c0-pins {
+				i2c-sda {
+					pinmux = <BCM63158_GPIO_24__FUNC_B_I2C_SDA_0>;
+				};
+				i2c-scl {
+					pinmux = <BCM63158_GPIO_25__FUNC_B_I2C_SCL_0>;
+				};
+			};
+
+			i2c1_pins: i2c1-pins {
+				i2c-sda {
+					pinmux = <BCM63158_GPIO_15__FUNC_B_I2C_SDA_1>;
+				};
+				i2c-scl {
+					pinmux = <BCM63158_GPIO_16__FUNC_B_I2C_SCL_1>;
+				};
+			};
+
+			pcie0_pins: pcie0-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_113__FUNC_PCIE0a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_114__FUNC_PCIE0a_RST_B>;
+				};
+			};
+
+			pcie1_pins: pcie1-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_115__FUNC_PCIE1a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_116__FUNC_PCIE1a_RST_B>;
+				};
+			};
+
+			pcie2_pins: pcie2-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_117__FUNC_PCIE2a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_118__FUNC_PCIE2a_RST_B>;
+				};
+			};
+
+			pcie3_pins: pcie3-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_119__FUNC_PCIE3_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_120__FUNC_PCIE3_RST_B>;
+				};
+			};
+
+			pcm_pins: pcm-pins {
+				pcm-clk {
+					pinmux = <BCM63158_GPIO_44__FUNC_PCM_CLK>;
+				};
+				pcm-fsync {
+					pinmux = <BCM63158_GPIO_45__FUNC_PCM_FS>;
+				};
+				pcm-sdin {
+					pinmux = <BCM63158_GPIO_42__FUNC_PCM_SDIN>;
+				};
+				pcm-sdout {
+					pinmux = <BCM63158_GPIO_43__FUNC_PCM_SDOUT>;
+				};
+			};
+
+			hs_uart_pins: hs-uart-pins {
+				hs-uart-sout {
+					pinmux = <BCM63158_GPIO_06__FUNC_A_UART2_SOUT>;
+				};
+				hs-uart-sin {
+					pinmux = <BCM63158_GPIO_05__FUNC_A_UART2_SIN>;
+				};
+				hs-uart-cts {
+					pinmux = <BCM63158_GPIO_03__FUNC_A_UART2_CTS>;
+				};
+				hs-uart-rts {
+					pinmux = <BCM63158_GPIO_04__FUNC_A_UART2_RTS>;
+				};
+			};
+
+			usb01_pins: usb01-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_122__FUNC_USB0a_PWRON>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_121__FUNC_USB0a_PWRFLT>;
+				};
+				pwr1-en {
+					pinmux = <BCM63158_GPIO_124__FUNC_USB1a_PWRON>;
+				};
+				pwr1-fault {
+					pinmux = <BCM63158_GPIO_123__FUNC_USB1a_PWRFLT>;
+				};
+			};
+
+			usb0_pins: usb0-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_122__FUNC_USB0a_PWRON>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_121__FUNC_USB0a_PWRFLT>;
+				};
+			};
+
+			usb1_pins: usb1-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_123__FUNC_USB1a_PWRFLT>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_124__FUNC_USB1a_PWRON>;
+				};
+			};
+
+			ld0_pins: ld0-pins {
+				ld0_pwr_up {
+					pinmux = <BCM63158_GPIO_32__FUNC_VDSL_CTRL0>;
+				};
+
+				ld0_din {
+					pinmux = <BCM63158_GPIO_33__FUNC_VDSL_CTRL_1>;
+				};
+
+				ld0_dclk {
+					pinmux = <BCM63158_GPIO_34__FUNC_VDSL_CTRL_2>;
+				};
+			};
+
+			ld1_pins: ld1-pins-0 {
+				ld1_pwr_up {
+					pinmux = <BCM63158_GPIO_35__FUNC_VDSL_CTRL_3>;
+				};
+
+				ld1_din {
+					pinmux = <BCM63158_GPIO_36__FUNC_VDSL_CTRL_4>;
+				};
+
+				ld1_dclk {
+					pinmux = <BCM63158_GPIO_37__FUNC_VDSL_CTRL_5>;
+				};
+			};
+
+			gphy01_link_act_leds: gphy01-link-act-leds {
+				gphy0_link_act_led {
+					pinmux = <BCM63158_GPIO_84__FUNC_B_LED_20>;
+				};
+				gphy1_link_act_led {
+					pinmux = <BCM63158_GPIO_85__FUNC_B_LED_21>;
+				};
+			};
+
+			sfp_rogue0_pins: sfp-rogue0-pins {
+				sfp_rogue0_in {
+					pinmux = <BCM63158_GPIO_34__FUNC_B_ROGUE_IN>;
+				};
+			};
+
+			sfp_rogue1_pins: sfp-rogue1-pins {
+				sfp_rogue1_in {
+					pinmux = <BCM63158_GPIO_40__FUNC_A_ROGUE_IN>;
+				};
+			};
+			sfp_rs0_gpio_pins: sfp-rs0-gpio {
+				sfp_rogue1_rs0 {
+					pinmux = <BCM63158_GPIO_40__FUNC_GPIO_40>;
+				};
+			};
+		};
+
+		hs_spim: spi@1000 {
+			status = "disabled";
+			compatible = "brcm,bcm6328-hsspi";
+			reg = <0x0 0x1000 0x0 0x600>;
+			interrupts = <GIC_SPI HS_SPI_SPI IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&spiclk>;
+			clock-names = "hsspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		hs_uart: hs-uart@10400 {
+			status = "disabled";
+			compatible = "brcm,bcm63xx-hs-uart";
+			reg = <0x0 0x00010400 0x0 0x1e0>;
+			interrupts = <GIC_SPI HS_UART_SPI IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&uartclk>;
+		};
+
+		i2c_bsc0: i2c@2100 {
+			status = "disabled";
+			compatible = "brcm,brcmper-i2c";
+			reg = <0x0 0x2100 0x0 0x60>;
+			interrupts = <GIC_SPI BSC_I2C0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		i2c_bsc1: i2c@5a800 {
+			status = "disabled";
+			compatible = "brcm,brcmper-i2c";
+			reg = <0x0 0x5a800 0x0 0x60>;
+			interrupts = <GIC_SPI BSC_I2C1_SPI IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		bcm_pcm: bcm_pcm@60000 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-pcm";
+			reg = <0x0 0x60000 0x0 0x2000>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+                        interrupt-names = "pcm", "dma0", "dma1";
+		};
+
+		bcm63158_cpufreq {
+			compatible = "brcm,bcm63158-cpufreq";
+			pmc = <&pmc>;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./bcm963158ref1d.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/bcm963158ref1d.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./bcm963158ref1d.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/bcm963158ref1d.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,331 @@
+/*
+ * Broadcom BCM63158 Reference Board REF1 DTS
+ */
+
+/dts-v1/;
+
+#include "bcm63158.dtsi"
+
+/ {
+	compatible = "brcm,BCM963158REF1", "brcm,bcm63158";
+	model = "Broadcom BCM963158REF1";
+
+	chosen {
+		bootargs = "console=ttyAMA0,115200";
+		stdout-path = &arm_serial0;
+	};
+
+	reserved-memory {
+		ramoops@3fff0000 {
+			compatible = "ramoops";
+			/* RAM top - 64k */
+			reg = <0x0 0x3fff0000 0x0 (64 * 1024)>;
+			record-size = <(64 * 1024)>;
+			ecc-size = <16>;
+			no-dump-oops;
+		};
+	};
+
+	bcm963158ref1d-fbxgpio {
+		compatible = "fbx,fbxgpio";
+
+		wps-button {
+			gpio = <&pinctrl 41 0>;
+			input;
+		};
+		dsl0-link-led {
+			gpio = <&pinctrl 18 0>;
+			output-low;
+		};
+
+		sfp-ae-pwren {
+			gpio = <&pinctrl 3 0>;
+			output-low;
+		};
+		sfp-ae-rs0 {
+			gpio = <&pinctrl 40 0>;
+			input;
+		};
+		sfp-ae-rs1 {
+			gpio = <&pinctrl 12 0>;
+			output-low;
+		};
+
+		sfp-ae-presence {
+			gpio = <&pinctrl 9 0>;
+			input;
+		};
+		sfp-ae-rxlos {
+			gpio = <&pinctrl 8 0>;
+			input;
+		};
+
+		sfp-sgmii-presence {
+			gpio = <&pinctrl 20 0>;
+			input;
+		};
+		sfp-sgmii-rxlos {
+			gpio = <&pinctrl 21 0>;
+			input;
+		};
+	};
+
+	i2c0_gpio: i2c0-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 24 0 /* sda */
+			 &pinctrl 25 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	i2c1_gpio: i2c1-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 15 0 /* sda */
+			 &pinctrl 16 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+};
+
+&arm_serial0 {
+	status = "okay";
+};
+
+&sf2 {
+	status = "okay";
+};
+
+&sf2_port0 {
+	status = "okay";
+	label = "swp1";
+};
+
+&sf2_port1 {
+	status = "okay";
+	label = "swp2";
+};
+
+&sf2_port2 {
+	status = "okay";
+	label = "swp3";
+};
+
+&sf2_port3 {
+	status = "okay";
+	label = "swp4";
+};
+
+&sf2_port4 {
+	status = "okay";
+	label = "swp5";
+};
+
+&sf2_port8 {
+	status = "okay";
+};
+
+&sf2_qphy0 {
+	status = "okay";
+};
+
+&sf2_qphy1 {
+	status = "okay";
+};
+
+&sf2_qphy2 {
+	status = "okay";
+};
+
+&sf2_qphy3 {
+	status = "okay";
+};
+
+&sf2_sphy {
+	status = "okay";
+};
+
+&systemport {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&sdhci {
+	status = "okay";
+
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <0 (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label = "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label = "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label = "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		new-bank0@0 {
+			label = "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+		};
+	};
+
+
+	partitions-boot {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%dboot0";
+
+		cfe@0 {
+			label = "cfe";
+			reg = /bits/64 <0 (1 * 1024 * 1024)>;
+			read-only;
+		};
+
+		serial@0 {
+			label = "fbxserial";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+
+		fbxboot@0 {
+			label = "fbxboot";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+	};
+};
+
+&spi_pins {
+	spi-ss0 {
+		pinmux = <BCM63158_GPIO_111__FUNC_SPIM_SS0_B>;
+	};
+	spi-ss1 {
+		pinmux = <BCM63158_GPIO_112__FUNC_SPIM_SS1_B>;
+	};
+};
+
+&hs_spim {
+	status = "okay";
+	num-cs = <2>;
+	broadcom,dummy-cs = <2>;
+	pinctrl-0 = <&spi_pins>;
+	pinctrl-names = "default";
+	serial-flash@0 {
+		compatible = "m25p80";
+		reg = <0>;
+		spi-max-frequency = <(50 * 1000 * 1000)>;
+		label = "serial-flash";
+	};
+
+	/* TO TEST SLAC */
+	/*
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(1 * 1000 * 1000)>;
+	};
+	*/
+
+	/* TO TEST LCD  */
+	/*
+	ssd1320@1 {
+		compatible = "solomon,ssd1320";
+		reg = <1>;
+		spi-max-frequency = <(9 * 1000 * 1000)>;
+		ssd1320,width = <160>;
+		ssd1320,height = <100>;
+		ssd1320,segs-hw-skip = <0>;
+		ssd1320,coms-hw-skip = <30>;
+		ssd1320,rotate = <180>;
+		ssd1320,watchdog = <300>;
+		ssd1320,data-select-gpio = <&pinctrl 14 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&pinctrl 4 GPIO_ACTIVE_HIGH>;
+	};
+	*/
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&pcie2 {
+	status = "okay";
+	pinctrl-0 = <&pcie2_pins>;
+	pinctrl-names = "default";
+};
+
+
+&pcie3 {
+	status = "okay";
+	pinctrl-0 = <&pcie3_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>, <&ld1_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6304 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+
+	afe-id-1 = <(BCM63XX_XDSLPHY_AFE_CHIP_CH1 |
+		   BCM63XX_XDSLPHY_AFE_LD_6304 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+/* TO TEST TELEPHONY */
+/*
+&bcm_pcm {
+	status = "okay";
+	pinctrl-0 = <&pcm_pins>;
+	pinctrl-names = "default";
+};
+*/
+
+&usb {
+	status = "okay";
+
+	pinctrl-0 = <&usb01_pins>;
+	pinctrl-names = "default";
+
+	brcm,pwren-low;
+	brcm,pwrflt-low;
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-00.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-00.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-00.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-00.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x00
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-00", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+	cfe-v2.4-work-around-padding;
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-01.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-01.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-01.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-01.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,18 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x01
+ *
+ * With Realtek PHY 2.5G phy replacing the Aquantia 2.5G PHY, at
+ * address 0x6 (protos) or 0x7 (final version).
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-01", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+	cfe-v2.4-work-around-padding;
+	cfe-v2.4-work-around-padding-01;
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-02.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-02.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-02.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-02.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x02, same as 0x00 but with external
+ * PMU (only for power-btn/rtc & leds) and wifi 7
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-external-pmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-02", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-03.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-03.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-03.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-03.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x02, same as 0x01 but with external
+ * PMU (only for power-btn/rtc & leds) and wifi 7
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-external-pmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-03", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-04.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-04.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-board-04.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-board-04.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,16 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x04
+ *
+ * With Realtek PHY 2.5G phy, DDR4 & fbxpmu, dual pci express, fbxpmu
+ * based keypad.
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-onboard-pmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-04", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-external-pmu.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-external-pmu.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-external-pmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-external-pmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,272 @@
+/*
+ * common dtsi file for fbxgw8r for boards with on-board PMU, wifi 7
+ */
+/ {
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	dgasp {
+		compatible = "misc,dgasp";
+		interrupt-parent = <&pinctrl>;
+		interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+
+		toset-gpios = <&pinctrl 99 GPIO_ACTIVE_LOW>; /* w-enable-2 */
+	};
+};
+
+&i2c1_gpio {
+	adt7475@2e {
+		compatible = "adi,adt7475";
+		reg = <0x2e>;
+	};
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "bt-rst", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "i2c-int", /* 19 */
+			  "pmu-dyinggasp-int", /* 20 */
+			  "fan-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "pmu-int", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "backlight-en", /* 81 */
+			  "led-white", /* 82 */
+			  "led-red", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "keypad-left", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "bt-rst", /* 88 */
+			  "ha-swd-clk", /* 89 */
+			  "ha-swd-io", /* 90 */
+			  "ha-rst", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "keypad-up", /* 96 */
+			  "keypad-down", /* 97 */
+			  "w-enable-1", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "keypad-right", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&pinctrl 97 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&pinctrl 96 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&pinctrl 102 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&pinctrl 86 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&i2c1_gpio {
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&pinctrl>;
+			interrupts = <37 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "", /* 0 */
+					  "", /* 1 */
+					  "", /* 2 */
+					  "", /* 3 */
+					  "power-button", /* 4 */
+					  "test-mode"; /* 5 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+	};
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6303 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+&xtm {
+	status = "okay";
+};
+
+&fbxgw8r_gpio {
+	w-disable-2 {
+		gpio = <&pinctrl 99 GPIO_ACTIVE_LOW>;
+		output-low;
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-nopmu.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-nopmu.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-nopmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-nopmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,231 @@
+/*
+ * common dtsi file for fbxgw8r for boards without PMU
+ */
+/ {
+	leds {
+		compatible = "gpio-leds";
+		led0 {
+			gpios = <&pinctrl 82 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+			label = "white";
+		};
+		led1 {
+			gpios = <&pinctrl 83 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			label = "red";
+		};
+	};
+};
+
+&i2c1_gpio {
+	adt7475@2e {
+		compatible = "adi,adt7475";
+		reg = <0x2e>;
+	};
+
+	ld6710-fbx@68 {
+		compatible = "leadtrend,ld6710-fbx";
+		reg = <0x68>;
+	};
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "bt-rst", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "i2c-int", /* 19 */
+			  "poe-on", /* 20 */
+			  "fan-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "boot-eth", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "backlight-en", /* 81 */
+			  "led-white", /* 82 */
+			  "led-red", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "keypad-left", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "bt-rst", /* 88 */
+			  "ha-swd-clk", /* 89 */
+			  "ha-swd-io", /* 90 */
+			  "ha-rst", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "keypad-up", /* 96 */
+			  "keypad-down", /* 97 */
+			  "w-enable-1", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "keypad-right", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&fbxgw8r_gpio {
+	poe-on {
+		gpio = <&pinctrl 20 0>;
+		output-low;
+	};
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&pinctrl 97 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&pinctrl 96 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&pinctrl 102 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&pinctrl 86 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&pcie0 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&pcie1 {
+	status = "okay";
+	pinctrl-0 = <&pcie1_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6303 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+&xtm {
+	status = "okay";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-onboard-pmu.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-onboard-pmu.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common-onboard-pmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common-onboard-pmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,256 @@
+/*
+ * common dtsi file for fbxgw8r for boards with on-board PMU
+ */
+/ {
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	dgasp {
+		compatible = "misc,dgasp";
+		interrupt-parent = <&pinctrl>;
+		interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+		toset-gpios = <&pinctrl 99 GPIO_ACTIVE_LOW>; /* w-enable-2 */
+	};
+};
+
+&ramoops {
+	reg = <0x0 0x3fff0000 0x0 (64 * 1024)>;
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "pmu-dyinggasp-int", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "", /* 19 */
+			  "", /* 20 */
+			  "pmu-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "boot-eth", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "", /* 81 */
+			  "", /* 82 */
+			  "", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "", /* 88 */
+			  "", /* 89 */
+			  "", /* 90 */
+			  "", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "", /* 96 */
+			  "", /* 97 */
+			  "", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&dsl_reserved {
+	status = "disabled";
+};
+
+&i2c1_gpio {
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&pinctrl>;
+			interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "keypad-down", /* 0 */
+					  "keypad-up", /* 1 */
+					  "keypad-cancel", /* 2 */
+					  "keypad-ok", /* 3 */
+					  "power-button", /* 4 */
+					  "test-mode"; /* 5 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+	};
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&fbxpmu_gpio_expander 0 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&fbxpmu_gpio_expander 3 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&fbxpmu_gpio_expander 2 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&fbxgw8r_gpio {
+	w-disable-2 {
+		gpio = <&pinctrl 99 GPIO_ACTIVE_LOW>;
+		output-low;
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-common.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-common.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,441 @@
+/*
+ * common dtsi file for fbxgw8r.
+ */
+#include <dt-bindings/input/linux-event-codes.h>
+#include "bcm63158.dtsi"
+
+#include <../../../include/generated/autoconf.h>
+
+/ {
+	model = "Freebox FBXGW8R";
+
+	chosen {
+		bootargs = "console=ttyAMA0,115200";
+		stdout-path = &arm_serial0;
+	};
+
+	reserved-memory {
+		ramoops: ramoops@1fff0000 {
+			compatible = "ramoops";
+			/* RAM top - 64k */
+			reg = <0x0 0x1fff0000 0x0 (64 * 1024)>;
+			record-size = <(64 * 1024)>;
+			ecc-size = <16>;
+			no-dump-oops;
+		};
+
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+		qca_pine_2G4: qca-pine-2G4 {
+			reg = <0x0 0x14000000 0x0 0x01A00000>;
+			no-map;
+		};
+		qca_pine_5G: qca-pine-5G {
+			reg = <0x0 0x15a00000 0x0 0x01A00000>;
+			no-map;
+		};
+#endif
+	};
+	fbxgw8r_gpio: fbxgw8r-gpio {
+		compatible = "fbx,fbxgpio";
+
+		wan-sfp-txfault {
+			gpio = <&pinctrl 11 0>;
+			input;
+		};
+		wan-sfp-pwren {
+			gpio = <&pinctrl 14 0>;
+			output-low;
+		};
+		wan-sfp-presence {
+			gpio = <&pinctrl 9 0>;
+			input;
+		};
+		wan-sfp-pwrgood {
+			gpio = <&pinctrl 41 0>;
+			input;
+		};
+		wan-sfp-rxlos {
+			gpio = <&pinctrl 10 0>;
+			input;
+		};
+		wan-sfp-rs1 {
+			gpio = <&pinctrl 12 0>;
+			output-high;
+		};
+		wan-sfp-rogue-in {
+			gpio = <&pinctrl 40 0>;
+			input;
+			no-claim;
+		};
+
+		boot-eth {
+			gpio = <&pinctrl 37 0>;
+			input;
+		};
+
+		ha-rst {
+			gpio = <&pinctrl 91 0>;
+			output-low;
+		};
+		backlight-en {
+			gpio = <&pinctrl 81 0>;
+			output-low;
+		};
+
+		board-id-0 {
+			gpio = <&pinctrl 35 0>;
+			input;
+		};
+		board-id-1 {
+			gpio = <&pinctrl 28 0>;
+			input;
+		};
+		board-id-2 {
+			gpio = <&pinctrl 29 0>;
+			input;
+		};
+		board-id-3 {
+			gpio = <&pinctrl 30 0>;
+			input;
+		};
+		board-id-4 {
+			gpio = <&pinctrl 31 0>;
+			input;
+		};
+		board-id-5 {
+			gpio = <&pinctrl 13 0>;
+			input;
+		};
+	};
+
+	keypad: keypad {
+		compatible = "gpio-keys";
+		autorepeat = <1>;
+	};
+
+	i2c0_gpio: i2c0-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 24 0 /* sda */
+			 &pinctrl 25 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	i2c1_gpio: i2c1-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 15 0 /* sda */
+			 &pinctrl 16 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	aliases {
+		i2c0 = &i2c0_gpio;
+		i2c1 = &i2c1_gpio;
+	};
+
+	qcom,diag@0 {
+		compatible = "qcom,diag";
+		status = "ok";
+	};
+};
+
+&pinctrl {
+	arm_serial2_pins: arm-serial2-pins-0 {
+		arm_serial2_sout {
+			pinmux = <BCM63158_GPIO_18__FUNC_C_UART3_SOUT>;
+		};
+		arm_serial2_sin {
+			pinmux = <BCM63158_GPIO_17__FUNC_C_UART3_SIN>;
+		};
+	};
+};
+
+&arm_serial0 {
+	status = "okay";
+};
+
+
+&arm_serial2 {
+	/* home automation */
+	status = "okay";
+	pinctrl-0 = <&arm_serial2_pins>;
+	pinctrl-names = "default";
+};
+
+&sf2 {
+	status = "okay";
+
+	pinctrl-0 = <&gphy01_link_act_leds>;
+	pinctrl-names = "default";
+
+	sf2,mdio {
+		reset-gpio = <&pinctrl 27 GPIO_ACTIVE_LOW>;
+		reset-delay-us = <100000>;
+		reset-post-delay-us = <100000>;
+		keep-broken-phy;
+	};
+};
+
+&sf2_port0 {
+	status = "okay";
+	label = "swp1";
+	sf2,led-link-act = <20>;
+};
+
+&sf2_port1 {
+	status = "okay";
+	label = "swp2";
+	sf2,led-link-act = <21>;
+	dsa,cpu-port = <&sf2_port7>;
+};
+
+&sf2_port5 {
+	status = "okay";
+};
+
+&sf2_port6 {
+	status = "okay";
+	xbar-in-port = <0>;
+	phy-handle = <&port6_phy>;
+	label = "swp3";
+	dsa,cpu-port = <&sf2_port5>;
+};
+
+&sf2_port7 {
+	status = "okay";
+};
+
+&sf2_port8 {
+	status = "okay";
+};
+
+&sf2_qphy0 {
+	status = "okay";
+};
+
+&sf2_qphy1 {
+	status = "okay";
+};
+
+&runner_unimac0 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_unimac1 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_unimac2 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_xport0 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+	pinctrl-0 = <&sfp_rogue1_pins>;
+	pinctrl-1 = <&sfp_rs0_gpio_pins>;
+	pinctrl-names = "rogue1", "rs0";
+
+};
+
+&sdhci {
+	status = "okay";
+
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	user-ro-area = /bits/64 <0 (32 << 20)>;
+	boot-ro-area = /bits/64 <0 (4 << 20)>;
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <0 (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label = "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label = "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label = "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		new-bank0@0 {
+			label = "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+                fbxmbr@0 {
+			label = "fbxmbr";
+			reg = /bits/64 <(-1) (4096)>;
+                };
+
+		fortknox@0 {
+			label = "fortknox";
+			reg = /bits/64 <(-1) (128 * 1024 * 1024)>;
+                };
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+                };
+
+	};
+
+
+	partitions-boot {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%dboot0";
+
+		cfe@0 {
+			label = "cfe0";
+			reg = /bits/64 <0 (256 * 1024)>;
+			read-only;
+		};
+		cfe@1 {
+			label = "cfe1";
+			reg = /bits/64 <(-1) (256 * 1024)>;
+			read-only;
+		};
+		cfe@2 {
+			label = "cfe2";
+			reg = /bits/64 <(-1) (256 * 1024)>;
+			read-only;
+		};
+
+		serial@0 {
+			label = "fbxserial";
+			reg = /bits/64 <(1024 * 1024) (8 * 1024)>;
+			read-only;
+		};
+
+		fbxboot@0 {
+			label = "fbxboot";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+
+		calibration@0 {
+			label = "calibration";
+			reg = /bits/64 <(-1) (64 * 1024)>;
+			read-only;
+		};
+	};
+};
+
+&spi_pins {
+	spi-ss0 {
+		pinmux = <BCM63158_GPIO_111__FUNC_SPIM_SS0_B>;
+	};
+	spi-ss1 {
+		pinmux = <BCM63158_GPIO_112__FUNC_SPIM_SS1_B>;
+	};
+};
+
+&hs_spim {
+	status = "okay";
+	num-cs = <2>;
+	broadcom,dummy-cs = <2>;
+	pinctrl-0 = <&spi_pins>;
+	pinctrl-names = "default";
+
+	ssd1320@0 {
+		compatible = "solomon,ssd1320";
+		reg = <0>;
+
+		spi-max-frequency = <(14 * 1000 * 1000)>;
+
+		/*
+		* display mapping info (when looking at it such as keypad
+		* is on the right):
+		*
+		* SEG used on x-axis
+		* COM used on y-axis
+		*
+		* top-left: COM159/SEG0
+		* bottom-right: COM0/SEG159
+		*
+		* visible area (160x80)
+		*  top-left: COM119/SEG0
+		*  bottom-right: COM40/SEG159
+		*
+		* SEG are mapped in alternate: SEG0, SEG80, SEG1, ...
+		*/
+		ssd1320,com-range = <40 119>;
+		ssd1320,seg-range = <0 159>;
+		ssd1320,com-reverse-dir;
+		ssd1320,seg-first-odd;
+
+		ssd1320,clk-divide-ratio = <0xb1>;
+		ssd1320,precharge-period = <0x42>;
+		ssd1320,vcom-deselect-level = <0x30>;
+		ssd1320,precharge-voltage = <0x10>;
+		ssd1320,iref = <0x10>;
+
+		ssd1320,display-enh-a = <0xd5>;
+		ssd1320,display-enh-b = <0x21>;
+
+		ssd1320,grayscale-table = <0x01 0x02 0x03 0x05 0x08 0x0b
+			0xe 0x12 0x17 0x1c 0x22 0x29 0x2f 0x36 0x3f>;
+
+		ssd1320,default-brightness = <0x9f>;
+		ssd1320,max-brightness = <0xff>;
+
+		ssd1320,watchdog = <300>;
+		ssd1320,vcc-gpio = <&pinctrl 87 GPIO_ACTIVE_HIGH>;
+		ssd1320,data-select-gpio = <&pinctrl 80 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&pinctrl 36 GPIO_ACTIVE_LOW>;
+	};
+
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(1 * 1000 * 1000)>;
+	};
+};
+
+&bcm_pcm {
+	status = "okay";
+	pinctrl-0 = <&pcm_pins>;
+	pinctrl-names = "default";
+};
+
+&usb {
+	status = "okay";
+
+	pinctrl-0 = <&usb1_pins>;
+	pinctrl-names = "default";
+
+	brcm,pwren-high;
+	brcm,pwrflt-low;
+};
+
+&memc {
+	// status = "disabled";
+	brcm,auto-sr-en;
+	brcm,auto-sr-thresh = <20>;
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-phy-aquantia.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-phy-aquantia.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-phy-aquantia.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-phy-aquantia.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,12 @@
+&sf2 {
+	sf2,mdio {
+		/* aquantia PHY */
+		port6_phy: ethernet-phy@8 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			status = "okay";
+			reg = <8>;
+			eee-broken-2500t;
+			eee-broken-5000t;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-phy-realtek.dtsi linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-phy-realtek.dtsi
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r-phy-realtek.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r-phy-realtek.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,10 @@
+&sf2 {
+	sf2,mdio {
+		/* realtek PHY */
+		port6_phy: ethernet-phy@8 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			status = "okay";
+			reg = <7>;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,13 @@
+/*
+ * Freebox FBXGW8R Board DTS
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-00", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r_pcie_pine_asmedia.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r_pcie_pine_asmedia.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r_pcie_pine_asmedia.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r_pcie_pine_asmedia.dts	2025-09-25 17:40:30.223340727 +0200
@@ -0,0 +1,60 @@
+/dts-v1/;
+/plugin/;
+
+/ {
+	compatible = "freebox,fbxgw8r";
+
+	fragment@1 {
+		target = <&pcie0>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine6G {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa9>;
+				};
+			};
+		};
+	};
+
+
+	fragment@2 {
+		target = <&pcie1>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				us {
+					#address-cells = <3>;
+					#size-cells = <2>;
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					ds1 {
+						#address-cells = <3>;
+						#size-cells = <2>;
+						reg = <0x1800 0x0 0x0 0x0 0x0>;
+						ep_pine2G4 {
+							reg = <0x0000 0x0 0x0 0x0 0x0>;
+							qcom,board_id = <0xa6>;
+						};
+					};
+					ds2 {
+						#address-cells = <3>;
+						#size-cells = <2>;
+						reg = <0x3800 0x0 0x0 0x0 0x0>;
+						ep_pine5G {
+							reg = <0x0000 0x0 0x0 0x0 0x0>;
+							qcom,board_id = <0xa3>;
+						};
+					};
+				};
+			};
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r_pcie_pine_dualband_noswitch.dts linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r_pcie_pine_dualband_noswitch.dts
--- linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx./fbxgw8r_pcie_pine_dualband_noswitch.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/broadcom/bcm63xx/fbxgw8r_pcie_pine_dualband_noswitch.dts	2025-09-25 17:40:30.223340727 +0200
@@ -0,0 +1,49 @@
+/dts-v1/;
+/plugin/;
+
+#include <../../../include/generated/autoconf.h>
+
+/ {
+	compatible = "freebox,fbxgw8r";
+
+	fragment@1 {
+		target = <&pcie0>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine5G {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa3>;
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+					memory-region = <&qca_pine_5G>;
+#endif
+				};
+			};
+		};
+	};
+
+
+	fragment@2 {
+		target = <&pcie1>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine2G4 {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa6>;
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+					memory-region = <&qca_pine_2G4>;
+#endif
+				};
+			};
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/arch/arm64/boot/dts/cortina-access./Makefile linux-6.13.12-fbx/arch/arm64/boot/dts/cortina-access/Makefile
--- linux-6.13.12-fbx/arch/arm64/boot/dts/cortina-access./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/arch/arm64/boot/dts/cortina-access/Makefile	2025-09-25 17:40:30.227340746 +0200
@@ -0,0 +1,16 @@
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += fbxgw3r-evb.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += fbxgw3r-board-00.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += ca8289-engboard.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += ca8289-refboard.dtb
+
+always-$(CONFIG_ARCH_CORTINA_ACCESS)	+= fbxgw3r_dtbs
+clean-files				+= fbxgw3r_dtbs
+board-dtbs				=  \
+					fbxgw3r-evb.dtb \
+					fbxgw3r-board-00.dtb
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw3r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/block/partitions/dt.c	2025-09-25 17:40:31.127345209 +0200
@@ -0,0 +1,204 @@
+#define PREFIX "dtparts"
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/ctype.h>
+#include "check.h"
+
+/**
+ * match_one: - Determines if a string matches a simple pattern
+ * @s: the string to examine for presence of the pattern
+ * @p: the string containing the pattern
+ */
+static int match_one(char *s, const char *p)
+{
+	char *meta, *from, *to;
+
+	while (1) {
+		int len = -1;
+
+		meta = strchr(p, '%');
+		if (!meta)
+			return strcmp(p, s) == 0;
+
+		if (strncmp(p, s, meta-p))
+			return 0;
+
+		s += meta - p;
+		p = meta + 1;
+
+		if (isdigit(*p))
+			len = simple_strtoul(p, (char **) &p, 10);
+		else if (*p == '%') {
+			if (*s++ != '%')
+				return 0;
+			p++;
+			continue;
+		}
+
+		from = s;
+		switch (*p++) {
+		case 's': {
+			size_t str_len = strlen(s);
+
+			if (str_len == 0)
+				return 0;
+			if (len == -1 || len > str_len)
+				len = str_len;
+			to = s + len;
+			break;
+		}
+		case 'd':
+			simple_strtol(s, &to, 0);
+			goto num;
+		case 'u':
+			simple_strtoul(s, &to, 0);
+			goto num;
+		case 'o':
+			simple_strtoul(s, &to, 8);
+			goto num;
+		case 'x':
+			simple_strtoul(s, &to, 16);
+
+		num:
+			if (to == from)
+				return 0;
+			break;
+		default:
+			return 0;
+		}
+		s = to;
+	}
+}
+
+/*
+ *
+ */
+static struct device_node *find_first_parent_node(const struct device *ddev)
+{
+	while (ddev && !ddev->of_node)
+		ddev = ddev->parent;
+
+	if (!ddev)
+		return NULL;
+	return ddev->of_node;
+}
+
+/*
+ *
+ */
+int dt_partition(struct parsed_partitions *state)
+{
+	struct device *ddev = disk_to_dev(state->disk);
+	struct device_node *np, *part_node, *pp;
+	u64 disk_size, last_end;
+	int nr_parts, i;
+
+	/* find first parent device with a non null device tree
+	 * node */
+	np = find_first_parent_node(ddev);
+	if (!np)
+		return -1;
+
+	part_node = NULL;
+	for_each_child_of_node(np, pp) {
+		char diskname[BDEVNAME_SIZE];
+		const char *pattern;
+
+		if (!of_device_is_compatible(pp, "fixed-partitions"))
+			continue;
+
+		/* check device name match pattern */
+		strscpy(diskname, state->disk->disk_name, sizeof (diskname));
+
+		if (of_property_read_string(pp, "disk-name", &pattern)) {
+			part_node = pp;
+			break;
+		}
+
+		if (match_one(diskname, pattern)) {
+			part_node = pp;
+			break;
+		}
+	}
+
+	if (!part_node)
+		return -1;
+
+	/* First count the subnodes */
+	nr_parts = 0;
+	for_each_child_of_node(part_node,  pp)
+		nr_parts++;
+
+	if (nr_parts == 0)
+		return 0;
+
+	disk_size = get_capacity(state->disk) << 9;
+
+	last_end = 0;
+	i = 1;
+	for_each_child_of_node(part_node,  pp) {
+		struct partition_meta_info *info;
+		char tmp[sizeof (info->volname) + 4];
+		const __be32 *reg;
+		const char *partname;
+		int a_cells, s_cells;
+		u64 size, offset;
+		int len;
+
+		reg = of_get_property(pp, "reg", &len);
+		if (!reg) {
+			pr_err("part %pOF (%pOF) missing reg property.\n",
+			       pp, np);
+			return -1;
+		}
+
+		a_cells = of_n_addr_cells(pp);
+		s_cells = of_n_size_cells(pp);
+		if (len / 4 != a_cells + s_cells) {
+			pr_err("ofpart partition %pOF (%pOF) "
+			       "error parsing reg property.\n",
+			       pp, np);
+			return -1;
+		}
+
+		partname = of_get_property(pp, "label", &len);
+		if (!partname)
+			partname = of_get_property(pp, "name", &len);
+
+		if (i >= state->limit) {
+			pr_err("too many partitions\n");
+			return -1;
+		}
+
+		offset = of_read_number(reg, a_cells);
+		if (offset == (u64)-1) {
+			offset = last_end;
+		}
+
+		size = of_read_number(reg + a_cells, s_cells);
+		if (size == (u64)-1)
+			size = disk_size - offset;
+
+		last_end = offset + size;
+		put_partition(state, i, offset >> 9, size >> 9);
+
+		info = &state->parts[i].info;
+		strscpy(info->volname, partname, sizeof (info->volname));
+		state->parts[i].has_info = true;
+
+		if (!IS_ENABLED(CONFIG_OF_PARTITION_IGNORE_RO) &&
+		    of_get_property(pp, "read-only", &len))
+			state->parts[i].flags |= ADDPART_FLAG_READONLY;
+
+		snprintf(tmp, sizeof(tmp), "(%s/%s)",
+			 info->volname,
+			 state->parts[i].flags ? "ro" : "rw");
+		strlcat(state->pp_buf, tmp, PAGE_SIZE);
+
+		i++;
+	}
+
+	strlcat(state->pp_buf, "\n", PAGE_SIZE);
+	return 1;
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/config	2025-09-29 14:43:11.651702952 +0200
@@ -0,0 +1,5099 @@
+#
+# Automatically generated file; DO NOT EDIT.
+# Linux/arm64 6.13.12 Kernel Configuration
+#
+CONFIG_CC_VERSION_TEXT="aarch64-linux-musl-gcc (freebox) 14.2.0"
+CONFIG_CC_IS_GCC=y
+CONFIG_GCC_VERSION=140200
+CONFIG_CLANG_VERSION=0
+CONFIG_AS_IS_GNU=y
+CONFIG_AS_VERSION=24300
+CONFIG_LD_IS_BFD=y
+CONFIG_LD_VERSION=24300
+CONFIG_LLD_VERSION=0
+CONFIG_RUSTC_VERSION=0
+CONFIG_RUSTC_LLVM_VERSION=0
+CONFIG_CC_CAN_LINK=y
+CONFIG_CC_CAN_LINK_STATIC=y
+CONFIG_CC_HAS_ASM_GOTO_OUTPUT=y
+CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT=y
+CONFIG_TOOLS_SUPPORT_RELR=y
+CONFIG_CC_HAS_ASM_INLINE=y
+CONFIG_CC_HAS_NO_PROFILE_FN_ATTR=y
+CONFIG_LD_CAN_USE_KEEP_IN_OVERLAY=y
+CONFIG_PAHOLE_VERSION=0
+CONFIG_IRQ_WORK=y
+CONFIG_BUILDTIME_TABLE_SORT=y
+CONFIG_THREAD_INFO_IN_TASK=y
+
+#
+# General setup
+#
+CONFIG_INIT_ENV_ARG_LIMIT=32
+CONFIG_CROSS_COMPILE="/opt/toolchains/aarch64-musl-1.2.5-gcc-14.2.0-binutils-2.43-mold-2.33.0-gdb-15.1-1/bin/aarch64-linux-musl-"
+# CONFIG_COMPILE_TEST is not set
+# CONFIG_WERROR is not set
+CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
+CONFIG_BUILD_SALT=""
+CONFIG_DEFAULT_INIT=""
+CONFIG_DEFAULT_HOSTNAME="(none)"
+CONFIG_SYSVIPC=y
+CONFIG_SYSVIPC_SYSCTL=y
+# CONFIG_POSIX_MQUEUE is not set
+# CONFIG_WATCH_QUEUE is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_USELIB is not set
+CONFIG_AUDIT=y
+CONFIG_HAVE_ARCH_AUDITSYSCALL=y
+CONFIG_AUDITSYSCALL=y
+
+#
+# IRQ subsystem
+#
+CONFIG_GENERIC_IRQ_PROBE=y
+CONFIG_GENERIC_IRQ_SHOW=y
+CONFIG_GENERIC_IRQ_SHOW_LEVEL=y
+CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK=y
+CONFIG_HARDIRQS_SW_RESEND=y
+CONFIG_IRQ_DOMAIN=y
+CONFIG_IRQ_DOMAIN_HIERARCHY=y
+CONFIG_GENERIC_IRQ_IPI=y
+CONFIG_GENERIC_MSI_IRQ=y
+CONFIG_IRQ_FORCED_THREADING=y
+CONFIG_SPARSE_IRQ=y
+# CONFIG_GENERIC_IRQ_DEBUGFS is not set
+# end of IRQ subsystem
+
+CONFIG_GENERIC_TIME_VSYSCALL=y
+CONFIG_GENERIC_CLOCKEVENTS=y
+CONFIG_ARCH_HAS_TICK_BROADCAST=y
+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y
+CONFIG_HAVE_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_POSIX_CPU_TIMERS_TASK_WORK=y
+CONFIG_CONTEXT_TRACKING=y
+CONFIG_CONTEXT_TRACKING_IDLE=y
+
+#
+# Timers subsystem
+#
+CONFIG_TICK_ONESHOT=y
+CONFIG_HZ_PERIODIC=y
+# CONFIG_NO_HZ_IDLE is not set
+# CONFIG_NO_HZ_FULL is not set
+# CONFIG_NO_HZ is not set
+CONFIG_HIGH_RES_TIMERS=y
+# end of Timers subsystem
+
+CONFIG_BPF=y
+CONFIG_HAVE_EBPF_JIT=y
+CONFIG_ARCH_WANT_DEFAULT_BPF_JIT=y
+
+#
+# BPF subsystem
+#
+# CONFIG_BPF_SYSCALL is not set
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_DEFAULT_ON=y
+# end of BPF subsystem
+
+CONFIG_PREEMPT_NONE_BUILD=y
+CONFIG_PREEMPT_NONE=y
+# CONFIG_PREEMPT_VOLUNTARY is not set
+# CONFIG_PREEMPT is not set
+# CONFIG_PREEMPT_RT is not set
+CONFIG_PREEMPT_COUNT=y
+# CONFIG_PREEMPT_DYNAMIC is not set
+
+#
+# CPU/Task time and stats accounting
+#
+CONFIG_TICK_CPU_ACCOUNTING=y
+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_SCHED_AVG_IRQ=y
+# CONFIG_BSD_PROCESS_ACCT is not set
+# CONFIG_TASKSTATS is not set
+# CONFIG_PSI is not set
+# end of CPU/Task time and stats accounting
+
+# CONFIG_CPU_ISOLATION is not set
+
+#
+# RCU Subsystem
+#
+CONFIG_TREE_RCU=y
+# CONFIG_RCU_EXPERT is not set
+CONFIG_TREE_SRCU=y
+CONFIG_RCU_STALL_COMMON=y
+CONFIG_RCU_NEED_SEGCBLIST=y
+# end of RCU Subsystem
+
+CONFIG_IKCONFIG=y
+# CONFIG_IKCONFIG_PROC is not set
+# CONFIG_IKHEADERS is not set
+CONFIG_LOG_BUF_SHIFT=18
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=12
+# CONFIG_PRINTK_INDEX is not set
+# CONFIG_FBX_DECRYPT_INITRD is not set
+CONFIG_GENERIC_SCHED_CLOCK=y
+
+#
+# Scheduler features
+#
+# end of Scheduler features
+
+CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
+CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH=y
+CONFIG_CC_HAS_INT128=y
+CONFIG_CC_IMPLICIT_FALLTHROUGH="-Wimplicit-fallthrough=5"
+CONFIG_GCC10_NO_ARRAY_BOUNDS=y
+CONFIG_CC_NO_ARRAY_BOUNDS=y
+CONFIG_GCC_NO_STRINGOP_OVERFLOW=y
+CONFIG_CC_NO_STRINGOP_OVERFLOW=y
+CONFIG_ARCH_SUPPORTS_INT128=y
+CONFIG_CGROUPS=y
+# CONFIG_CGROUP_FAVOR_DYNMODS is not set
+# CONFIG_MEMCG is not set
+# CONFIG_BLK_CGROUP is not set
+# CONFIG_CGROUP_SCHED is not set
+# CONFIG_CGROUP_PIDS is not set
+# CONFIG_CGROUP_RDMA is not set
+# CONFIG_CGROUP_FREEZER is not set
+# CONFIG_CPUSETS is not set
+# CONFIG_CGROUP_DEVICE is not set
+# CONFIG_CGROUP_CPUACCT is not set
+# CONFIG_CGROUP_PERF is not set
+# CONFIG_CGROUP_MISC is not set
+# CONFIG_CGROUP_DEBUG is not set
+CONFIG_NAMESPACES=y
+CONFIG_UTS_NS=y
+# CONFIG_TIME_NS is not set
+CONFIG_IPC_NS=y
+# CONFIG_USER_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_NET_NS=y
+# CONFIG_CHECKPOINT_RESTORE is not set
+# CONFIG_SCHED_AUTOGROUP is not set
+# CONFIG_RELAY is not set
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_SOURCE=""
+# CONFIG_INITRAMFS_FORCE is not set
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_RD_ZSTD is not set
+# CONFIG_BOOT_CONFIG is not set
+# CONFIG_INITRAMFS_PRESERVE_MTIME is not set
+CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_LD_ORPHAN_WARN=y
+CONFIG_LD_ORPHAN_WARN_LEVEL="warn"
+CONFIG_SYSCTL=y
+CONFIG_SYSCTL_EXCEPTION_TRACE=y
+CONFIG_EXPERT=y
+CONFIG_MULTIUSER=y
+# CONFIG_SGETMASK_SYSCALL is not set
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+CONFIG_POSIX_TIMERS=y
+CONFIG_PRINTK=y
+CONFIG_BUG=y
+CONFIG_ELF_CORE=y
+# CONFIG_BASE_SMALL is not set
+CONFIG_FUTEX=y
+CONFIG_FUTEX_PI=y
+CONFIG_EPOLL=y
+CONFIG_SIGNALFD=y
+CONFIG_TIMERFD=y
+CONFIG_EVENTFD=y
+CONFIG_SHMEM=y
+CONFIG_AIO=y
+# CONFIG_IO_URING is not set
+CONFIG_ADVISE_SYSCALLS=y
+CONFIG_MEMBARRIER=y
+# CONFIG_KCMP is not set
+# CONFIG_RSEQ is not set
+# CONFIG_CACHESTAT_SYSCALL is not set
+# CONFIG_PC104 is not set
+CONFIG_KALLSYMS=y
+# CONFIG_KALLSYMS_SELFTEST is not set
+# CONFIG_KALLSYMS_ALL is not set
+CONFIG_ARCH_HAS_MEMBARRIER_SYNC_CORE=y
+CONFIG_HAVE_PERF_EVENTS=y
+
+#
+# Kernel Performance Events And Counters
+#
+CONFIG_PERF_EVENTS=y
+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set
+# end of Kernel Performance Events And Counters
+
+# CONFIG_PROFILING is not set
+
+#
+# Kexec and crash features
+#
+# CONFIG_KEXEC_FILE is not set
+# end of Kexec and crash features
+# end of General setup
+
+CONFIG_ARM64=y
+CONFIG_RUSTC_SUPPORTS_ARM64=y
+CONFIG_GCC_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_64BIT=y
+CONFIG_MMU=y
+CONFIG_ARM64_CONT_PTE_SHIFT=4
+CONFIG_ARM64_CONT_PMD_SHIFT=4
+CONFIG_ARCH_MMAP_RND_BITS_MIN=18
+CONFIG_ARCH_MMAP_RND_BITS_MAX=24
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN=11
+CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX=16
+CONFIG_STACKTRACE_SUPPORT=y
+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000
+CONFIG_LOCKDEP_SUPPORT=y
+CONFIG_GENERIC_BUG=y
+CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y
+CONFIG_GENERIC_HWEIGHT=y
+CONFIG_GENERIC_CSUM=y
+CONFIG_GENERIC_CALIBRATE_DELAY=y
+CONFIG_SMP=y
+CONFIG_KERNEL_MODE_NEON=y
+CONFIG_FIX_EARLYCON_MEM=y
+CONFIG_PGTABLE_LEVELS=3
+CONFIG_ARCH_SUPPORTS_UPROBES=y
+CONFIG_ARCH_PROC_KCORE_TEXT=y
+CONFIG_BUILTIN_RETURN_ADDRESS_STRIPS_PAC=y
+
+#
+# Platform selection
+#
+# CONFIG_ARCH_ACTIONS is not set
+# CONFIG_ARCH_AIROHA is not set
+# CONFIG_ARCH_SUNXI is not set
+# CONFIG_ARCH_ALPINE is not set
+# CONFIG_ARCH_APPLE is not set
+CONFIG_ARCH_BCM=y
+# CONFIG_ARCH_BCM2835 is not set
+# CONFIG_ARCH_BCM_IPROC is not set
+CONFIG_ARCH_BCMBCA=y
+# CONFIG_ARCH_BRCMSTB is not set
+CONFIG_ARCH_BCM63XX_SHARED_OSH=y
+# CONFIG_ARCH_BERLIN is not set
+# CONFIG_ARCH_BITMAIN is not set
+# CONFIG_ARCH_CORTINA_ACCESS is not set
+# CONFIG_ARCH_EXYNOS is not set
+# CONFIG_ARCH_SPARX5 is not set
+# CONFIG_ARCH_K3 is not set
+# CONFIG_ARCH_LG1K is not set
+# CONFIG_ARCH_HISI is not set
+# CONFIG_ARCH_KEEMBAY is not set
+# CONFIG_ARCH_MEDIATEK is not set
+# CONFIG_ARCH_MESON is not set
+# CONFIG_ARCH_MVEBU is not set
+# CONFIG_ARCH_NXP is not set
+# CONFIG_ARCH_MA35 is not set
+# CONFIG_ARCH_NPCM is not set
+# CONFIG_ARCH_PENSANDO is not set
+# CONFIG_ARCH_QCOM is not set
+# CONFIG_ARCH_REALTEK is not set
+# CONFIG_ARCH_RENESAS is not set
+# CONFIG_ARCH_ROCKCHIP is not set
+# CONFIG_ARCH_SEATTLE is not set
+# CONFIG_ARCH_INTEL_SOCFPGA is not set
+# CONFIG_ARCH_STM32 is not set
+# CONFIG_ARCH_SYNQUACER is not set
+# CONFIG_ARCH_TEGRA is not set
+# CONFIG_ARCH_SPRD is not set
+# CONFIG_ARCH_THUNDER is not set
+# CONFIG_ARCH_THUNDER2 is not set
+# CONFIG_ARCH_UNIPHIER is not set
+# CONFIG_ARCH_VEXPRESS is not set
+# CONFIG_ARCH_VISCONTI is not set
+# CONFIG_ARCH_XGENE is not set
+# CONFIG_ARCH_ZYNQMP is not set
+# end of Platform selection
+
+#
+# Kernel Features
+#
+
+#
+# ARM errata workarounds via the alternatives framework
+#
+# CONFIG_AMPERE_ERRATUM_AC03_CPU_38 is not set
+CONFIG_ARM64_WORKAROUND_CLEAN_CACHE=y
+CONFIG_ARM64_ERRATUM_826319=y
+CONFIG_ARM64_ERRATUM_827319=y
+CONFIG_ARM64_ERRATUM_824069=y
+CONFIG_ARM64_ERRATUM_819472=y
+# CONFIG_ARM64_ERRATUM_832075 is not set
+CONFIG_ARM64_ERRATUM_843419=y
+CONFIG_ARM64_LD_HAS_FIX_ERRATUM_843419=y
+# CONFIG_ARM64_ERRATUM_1024718 is not set
+# CONFIG_ARM64_ERRATUM_1165522 is not set
+# CONFIG_ARM64_ERRATUM_1319367 is not set
+# CONFIG_ARM64_ERRATUM_1530923 is not set
+# CONFIG_ARM64_ERRATUM_2441007 is not set
+# CONFIG_ARM64_ERRATUM_1286807 is not set
+# CONFIG_ARM64_ERRATUM_1463225 is not set
+# CONFIG_ARM64_ERRATUM_1542419 is not set
+# CONFIG_ARM64_ERRATUM_1508412 is not set
+# CONFIG_ARM64_ERRATUM_2051678 is not set
+# CONFIG_ARM64_ERRATUM_2077057 is not set
+# CONFIG_ARM64_ERRATUM_2658417 is not set
+# CONFIG_ARM64_ERRATUM_2054223 is not set
+# CONFIG_ARM64_ERRATUM_2067961 is not set
+# CONFIG_ARM64_ERRATUM_2441009 is not set
+# CONFIG_ARM64_ERRATUM_2645198 is not set
+# CONFIG_ARM64_ERRATUM_2966298 is not set
+# CONFIG_ARM64_ERRATUM_3117295 is not set
+# CONFIG_ARM64_ERRATUM_3194386 is not set
+# CONFIG_CAVIUM_ERRATUM_22375 is not set
+# CONFIG_CAVIUM_ERRATUM_23154 is not set
+# CONFIG_CAVIUM_ERRATUM_27456 is not set
+# CONFIG_CAVIUM_ERRATUM_30115 is not set
+# CONFIG_CAVIUM_TX2_ERRATUM_219 is not set
+# CONFIG_FUJITSU_ERRATUM_010001 is not set
+# CONFIG_HISILICON_ERRATUM_161600802 is not set
+# CONFIG_HISILICON_ERRATUM_162100801 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_1003 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_1009 is not set
+# CONFIG_QCOM_QDF2400_ERRATUM_0065 is not set
+# CONFIG_QCOM_FALKOR_ERRATUM_E1041 is not set
+# CONFIG_NVIDIA_CARMEL_CNP_ERRATUM is not set
+# CONFIG_ROCKCHIP_ERRATUM_3568002 is not set
+# CONFIG_ROCKCHIP_ERRATUM_3588001 is not set
+# CONFIG_SOCIONEXT_SYNQUACER_PREITS is not set
+# end of ARM errata workarounds via the alternatives framework
+
+CONFIG_ARM64_4K_PAGES=y
+# CONFIG_ARM64_16K_PAGES is not set
+# CONFIG_ARM64_64K_PAGES is not set
+CONFIG_ARM64_VA_BITS_39=y
+# CONFIG_ARM64_VA_BITS_48 is not set
+# CONFIG_ARM64_VA_BITS_52 is not set
+CONFIG_ARM64_VA_BITS=39
+CONFIG_ARM64_PA_BITS_48=y
+CONFIG_ARM64_PA_BITS=48
+# CONFIG_CPU_BIG_ENDIAN is not set
+CONFIG_CPU_LITTLE_ENDIAN=y
+# CONFIG_SCHED_MC is not set
+# CONFIG_SCHED_CLUSTER is not set
+# CONFIG_SCHED_SMT is not set
+CONFIG_NR_CPUS=4
+# CONFIG_HOTPLUG_CPU is not set
+# CONFIG_NUMA is not set
+CONFIG_HZ_100=y
+# CONFIG_HZ_250 is not set
+# CONFIG_HZ_300 is not set
+# CONFIG_HZ_1000 is not set
+CONFIG_HZ=100
+CONFIG_SCHED_HRTICK=y
+CONFIG_ARCH_SPARSEMEM_ENABLE=y
+CONFIG_HW_PERF_EVENTS=y
+CONFIG_CC_HAVE_SHADOW_CALL_STACK=y
+# CONFIG_PARAVIRT is not set
+# CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
+CONFIG_ARCH_SUPPORTS_KEXEC_FILE=y
+CONFIG_ARCH_SUPPORTS_KEXEC_SIG=y
+CONFIG_ARCH_SUPPORTS_KEXEC_IMAGE_VERIFY_SIG=y
+CONFIG_ARCH_DEFAULT_KEXEC_IMAGE_VERIFY_SIG=y
+CONFIG_ARCH_SUPPORTS_CRASH_DUMP=y
+CONFIG_ARCH_DEFAULT_CRASH_DUMP=y
+# CONFIG_XEN is not set
+CONFIG_ARCH_FORCE_MAX_ORDER=10
+# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
+# CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY is not set
+# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
+# CONFIG_ARM64_SW_TTBR0_PAN is not set
+CONFIG_ARM64_TAGGED_ADDR_ABI=y
+# CONFIG_COMPAT is not set
+
+#
+# ARMv8.1 architectural features
+#
+CONFIG_ARM64_HW_AFDBM=y
+CONFIG_ARM64_PAN=y
+CONFIG_AS_HAS_LSE_ATOMICS=y
+# CONFIG_ARM64_USE_LSE_ATOMICS is not set
+# end of ARMv8.1 architectural features
+
+#
+# ARMv8.2 architectural features
+#
+CONFIG_AS_HAS_ARMV8_2=y
+CONFIG_AS_HAS_SHA3=y
+# CONFIG_ARM64_PMEM is not set
+# CONFIG_ARM64_RAS_EXTN is not set
+# CONFIG_ARM64_CNP is not set
+# end of ARMv8.2 architectural features
+
+#
+# ARMv8.3 architectural features
+#
+# CONFIG_ARM64_PTR_AUTH is not set
+CONFIG_CC_HAS_BRANCH_PROT_PAC_RET=y
+CONFIG_CC_HAS_SIGN_RETURN_ADDRESS=y
+CONFIG_AS_HAS_ARMV8_3=y
+CONFIG_AS_HAS_CFI_NEGATE_RA_STATE=y
+CONFIG_AS_HAS_LDAPR=y
+# end of ARMv8.3 architectural features
+
+#
+# ARMv8.4 architectural features
+#
+# CONFIG_ARM64_AMU_EXTN is not set
+CONFIG_AS_HAS_ARMV8_4=y
+# CONFIG_ARM64_TLB_RANGE is not set
+# end of ARMv8.4 architectural features
+
+#
+# ARMv8.5 architectural features
+#
+CONFIG_AS_HAS_ARMV8_5=y
+# CONFIG_ARM64_BTI is not set
+CONFIG_CC_HAS_BRANCH_PROT_PAC_RET_BTI=y
+# CONFIG_ARM64_E0PD is not set
+CONFIG_ARM64_AS_HAS_MTE=y
+# CONFIG_ARM64_MTE is not set
+# end of ARMv8.5 architectural features
+
+#
+# ARMv8.7 architectural features
+#
+# CONFIG_ARM64_EPAN is not set
+# end of ARMv8.7 architectural features
+
+CONFIG_AS_HAS_MOPS=y
+
+#
+# ARMv8.9 architectural features
+#
+# CONFIG_ARM64_POE is not set
+CONFIG_ARCH_PKEY_BITS=3
+# CONFIG_ARM64_HAFT is not set
+# end of ARMv8.9 architectural features
+
+#
+# v9.4 architectural features
+#
+# CONFIG_ARM64_GCS is not set
+# end of v9.4 architectural features
+
+# CONFIG_ARM64_SVE is not set
+# CONFIG_ARM64_PSEUDO_NMI is not set
+# CONFIG_RELOCATABLE is not set
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_CC_HAVE_STACKPROTECTOR_SYSREG=y
+# end of Kernel Features
+
+#
+# Boot options
+#
+CONFIG_CMDLINE="console=ttyAMA0,115200 earlycon=pl011,mmio32,0xff812000 debug ip=:::::swp1:dhcp root=/dev/nfs"
+# CONFIG_CMDLINE_FROM_BOOTLOADER is not set
+CONFIG_CMDLINE_FORCE=y
+# CONFIG_EFI is not set
+# CONFIG_COMPRESSED_INSTALL is not set
+# end of Boot options
+
+#
+# Power management options
+#
+# CONFIG_SUSPEND is not set
+# CONFIG_PM is not set
+# CONFIG_ENERGY_MODEL is not set
+CONFIG_ARCH_SUSPEND_POSSIBLE=y
+# end of Power management options
+
+#
+# CPU Power Management
+#
+
+#
+# CPU Idle
+#
+# CONFIG_CPU_IDLE is not set
+# end of CPU Idle
+
+#
+# CPU Frequency scaling
+#
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+CONFIG_CPU_FREQ_GOV_USERSPACE=y
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
+
+#
+# CPU frequency scaling drivers
+#
+CONFIG_CPUFREQ_DT=y
+# CONFIG_CPUFREQ_VIRT is not set
+CONFIG_CPUFREQ_DT_PLATDEV=y
+CONFIG_BCM63158_CPUFREQ=y
+# end of CPU Frequency scaling
+# end of CPU Power Management
+
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_CPU_MITIGATIONS=y
+
+#
+# General architecture-dependent options
+#
+# CONFIG_KPROBES is not set
+# CONFIG_JUMP_LABEL is not set
+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y
+CONFIG_HAVE_IOREMAP_PROT=y
+CONFIG_HAVE_KPROBES=y
+CONFIG_HAVE_KRETPROBES=y
+CONFIG_HAVE_FUNCTION_ERROR_INJECTION=y
+CONFIG_HAVE_NMI=y
+CONFIG_TRACE_IRQFLAGS_SUPPORT=y
+CONFIG_TRACE_IRQFLAGS_NMI_SUPPORT=y
+CONFIG_HAVE_ARCH_TRACEHOOK=y
+CONFIG_HAVE_DMA_CONTIGUOUS=y
+CONFIG_GENERIC_SMP_IDLE_THREAD=y
+CONFIG_GENERIC_IDLE_POLL_SETUP=y
+CONFIG_ARCH_HAS_FORTIFY_SOURCE=y
+CONFIG_ARCH_HAS_KEEPINITRD=y
+CONFIG_ARCH_HAS_SET_MEMORY=y
+CONFIG_ARCH_HAS_SET_DIRECT_MAP=y
+CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST=y
+CONFIG_ARCH_WANTS_NO_INSTR=y
+CONFIG_HAVE_ASM_MODVERSIONS=y
+CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y
+CONFIG_HAVE_RSEQ=y
+CONFIG_HAVE_RUST=y
+CONFIG_HAVE_FUNCTION_ARG_ACCESS_API=y
+CONFIG_HAVE_HW_BREAKPOINT=y
+CONFIG_HAVE_PERF_REGS=y
+CONFIG_HAVE_PERF_USER_STACK_DUMP=y
+CONFIG_HAVE_ARCH_JUMP_LABEL=y
+CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
+CONFIG_MMU_GATHER_TABLE_FREE=y
+CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
+CONFIG_MMU_LAZY_TLB_REFCOUNT=y
+CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
+CONFIG_ARCH_HAS_NMI_SAFE_THIS_CPU_OPS=y
+CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
+CONFIG_HAVE_CMPXCHG_LOCAL=y
+CONFIG_HAVE_CMPXCHG_DOUBLE=y
+CONFIG_HAVE_ARCH_SECCOMP=y
+CONFIG_HAVE_ARCH_SECCOMP_FILTER=y
+CONFIG_SECCOMP=y
+CONFIG_SECCOMP_FILTER=y
+# CONFIG_SECCOMP_CACHE_DEBUG is not set
+CONFIG_HAVE_ARCH_STACKLEAK=y
+CONFIG_HAVE_STACKPROTECTOR=y
+# CONFIG_STACKPROTECTOR is not set
+CONFIG_ARCH_SUPPORTS_SHADOW_CALL_STACK=y
+# CONFIG_SHADOW_CALL_STACK is not set
+CONFIG_ARCH_SUPPORTS_LTO_CLANG=y
+CONFIG_ARCH_SUPPORTS_LTO_CLANG_THIN=y
+CONFIG_LTO_NONE=y
+CONFIG_ARCH_SUPPORTS_CFI_CLANG=y
+CONFIG_HAVE_CONTEXT_TRACKING_USER=y
+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y
+CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y
+CONFIG_HAVE_MOVE_PUD=y
+CONFIG_HAVE_MOVE_PMD=y
+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y
+CONFIG_HAVE_ARCH_HUGE_VMAP=y
+CONFIG_HAVE_ARCH_HUGE_VMALLOC=y
+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
+CONFIG_HAVE_MOD_ARCH_SPECIFIC=y
+CONFIG_MODULES_USE_ELF_RELA=y
+CONFIG_ARCH_WANTS_EXECMEM_LATE=y
+CONFIG_HAVE_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_SOFTIRQ_ON_OWN_STACK=y
+CONFIG_ARCH_HAS_ELF_RANDOMIZE=y
+CONFIG_HAVE_ARCH_MMAP_RND_BITS=y
+CONFIG_ARCH_MMAP_RND_BITS=18
+CONFIG_HAVE_PAGE_SIZE_4KB=y
+CONFIG_PAGE_SIZE_4KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_64KB=y
+CONFIG_PAGE_SIZE_LESS_THAN_256KB=y
+CONFIG_PAGE_SHIFT=12
+CONFIG_ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT=y
+CONFIG_CLONE_BACKWARDS=y
+# CONFIG_COMPAT_32BIT_TIME is not set
+CONFIG_ARCH_SUPPORTS_RT=y
+CONFIG_HAVE_ARCH_VMAP_STACK=y
+CONFIG_VMAP_STACK=y
+CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET=y
+# CONFIG_RANDOMIZE_KSTACK_OFFSET is not set
+CONFIG_ARCH_HAS_STRICT_KERNEL_RWX=y
+CONFIG_STRICT_KERNEL_RWX=y
+CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
+CONFIG_STRICT_MODULE_RWX=y
+CONFIG_HAVE_ARCH_COMPILER_H=y
+CONFIG_HAVE_ARCH_PREL32_RELOCATIONS=y
+# CONFIG_LOCK_EVENT_COUNTS is not set
+CONFIG_ARCH_HAS_MEM_ENCRYPT=y
+CONFIG_ARCH_HAS_CC_PLATFORM=y
+CONFIG_HAVE_PREEMPT_DYNAMIC=y
+CONFIG_HAVE_PREEMPT_DYNAMIC_KEY=y
+CONFIG_ARCH_WANT_LD_ORPHAN_WARN=y
+CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
+CONFIG_ARCH_SUPPORTS_PAGE_TABLE_CHECK=y
+CONFIG_ARCH_HAVE_TRACE_MMIO_ACCESS=y
+CONFIG_ARCH_HAS_HW_PTE_YOUNG=y
+CONFIG_ARCH_HAS_KERNEL_FPU_SUPPORT=y
+
+#
+# GCOV-based kernel profiling
+#
+# CONFIG_GCOV_KERNEL is not set
+CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y
+# end of GCOV-based kernel profiling
+
+CONFIG_HAVE_GCC_PLUGINS=y
+CONFIG_GCC_PLUGINS=y
+# CONFIG_GCC_PLUGIN_LATENT_ENTROPY is not set
+CONFIG_FUNCTION_ALIGNMENT_4B=y
+CONFIG_FUNCTION_ALIGNMENT=4
+CONFIG_CC_HAS_MIN_FUNCTION_ALIGNMENT=y
+CONFIG_CC_HAS_SANE_FUNCTION_ALIGNMENT=y
+# end of General architecture-dependent options
+
+CONFIG_RT_MUTEXES=y
+CONFIG_MODULES=y
+# CONFIG_MODULE_DEBUG is not set
+# CONFIG_MODULE_FORCE_LOAD is not set
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_MODULE_FORCE_UNLOAD is not set
+# CONFIG_MODULE_UNLOAD_TAINT_TRACKING is not set
+# CONFIG_MODVERSIONS is not set
+# CONFIG_MODULE_SRCVERSION_ALL is not set
+# CONFIG_MODULE_SIG is not set
+# CONFIG_MODULE_COMPRESS is not set
+# CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS is not set
+CONFIG_MODPROBE_PATH="/sbin/modprobe"
+CONFIG_TRIM_UNUSED_KSYMS=y
+CONFIG_UNUSED_KSYMS_WHITELIST=""
+CONFIG_UNUSED_KSYMS_WHITELIST_SYMS="dib7000p_attach"
+CONFIG_MODULES_TREE_LOOKUP=y
+CONFIG_BLOCK=y
+# CONFIG_BLOCK_LEGACY_AUTOLOAD is not set
+CONFIG_BLK_DEV_BSG_COMMON=y
+# CONFIG_BLK_DEV_BSGLIB is not set
+# CONFIG_BLK_DEV_INTEGRITY is not set
+CONFIG_BLK_DEV_WRITE_MOUNTED=y
+# CONFIG_BLK_DEV_ZONED is not set
+# CONFIG_BLK_WBT is not set
+CONFIG_BLK_DEBUG_FS=y
+# CONFIG_BLK_SED_OPAL is not set
+# CONFIG_BLK_INLINE_ENCRYPTION is not set
+
+#
+# Partition Types
+#
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_ACORN_PARTITION is not set
+# CONFIG_AIX_PARTITION is not set
+# CONFIG_OSF_PARTITION is not set
+# CONFIG_AMIGA_PARTITION is not set
+# CONFIG_ATARI_PARTITION is not set
+# CONFIG_MAC_PARTITION is not set
+CONFIG_MSDOS_PARTITION=y
+# CONFIG_BSD_DISKLABEL is not set
+# CONFIG_MINIX_SUBPARTITION is not set
+# CONFIG_SOLARIS_X86_PARTITION is not set
+# CONFIG_UNIXWARE_DISKLABEL is not set
+# CONFIG_LDM_PARTITION is not set
+# CONFIG_SGI_PARTITION is not set
+# CONFIG_ULTRIX_PARTITION is not set
+# CONFIG_SUN_PARTITION is not set
+# CONFIG_KARMA_PARTITION is not set
+CONFIG_EFI_PARTITION=y
+# CONFIG_SYSV68_PARTITION is not set
+# CONFIG_CMDLINE_PARTITION is not set
+CONFIG_OF_PARTITION=y
+# CONFIG_OF_PARTITION_IGNORE_RO is not set
+# end of Partition Types
+
+CONFIG_BLK_MQ_PCI=y
+CONFIG_BLOCK_HOLDER_DEPRECATED=y
+CONFIG_BLK_MQ_STACKING=y
+
+#
+# IO Schedulers
+#
+CONFIG_MQ_IOSCHED_DEADLINE=y
+CONFIG_MQ_IOSCHED_KYBER=y
+# CONFIG_IOSCHED_BFQ is not set
+# end of IO Schedulers
+
+CONFIG_ASN1=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK=y
+CONFIG_ARCH_INLINE_SPIN_TRYLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_LOCK=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_READ_LOCK=y
+CONFIG_ARCH_INLINE_READ_LOCK_BH=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_READ_UNLOCK=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_READ_UNLOCK_IRQRESTORE=y
+CONFIG_ARCH_INLINE_WRITE_LOCK=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_LOCK_IRQSAVE=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_BH=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQ=y
+CONFIG_ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE=y
+CONFIG_UNINLINE_SPIN_UNLOCK=y
+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y
+CONFIG_MUTEX_SPIN_ON_OWNER=y
+CONFIG_RWSEM_SPIN_ON_OWNER=y
+CONFIG_LOCK_SPIN_ON_OWNER=y
+CONFIG_ARCH_USE_QUEUED_SPINLOCKS=y
+CONFIG_QUEUED_SPINLOCKS=y
+CONFIG_ARCH_USE_QUEUED_RWLOCKS=y
+CONFIG_QUEUED_RWLOCKS=y
+CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE=y
+CONFIG_ARCH_HAS_SYSCALL_WRAPPER=y
+
+#
+# Executable file formats
+#
+CONFIG_BINFMT_ELF=y
+CONFIG_ARCH_BINFMT_ELF_STATE=y
+CONFIG_ARCH_BINFMT_ELF_EXTRA_PHDRS=y
+CONFIG_ARCH_HAVE_ELF_PROT=y
+CONFIG_ARCH_USE_GNU_PROPERTY=y
+CONFIG_ELFCORE=y
+CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
+CONFIG_BINFMT_SCRIPT=y
+# CONFIG_BINFMT_MISC is not set
+CONFIG_COREDUMP=y
+# end of Executable file formats
+
+#
+# Memory Management options
+#
+# CONFIG_SWAP is not set
+
+#
+# Slab allocator options
+#
+CONFIG_SLUB=y
+# CONFIG_SLUB_TINY is not set
+CONFIG_SLAB_MERGE_DEFAULT=y
+# CONFIG_SLAB_FREELIST_RANDOM is not set
+# CONFIG_SLAB_FREELIST_HARDENED is not set
+# CONFIG_SLAB_BUCKETS is not set
+# CONFIG_SLUB_STATS is not set
+CONFIG_SLUB_CPU_PARTIAL=y
+# CONFIG_RANDOM_KMALLOC_CACHES is not set
+# end of Slab allocator options
+
+# CONFIG_SHUFFLE_PAGE_ALLOCATOR is not set
+CONFIG_PSEUDO_ASLR=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SPARSEMEM=y
+CONFIG_SPARSEMEM_EXTREME=y
+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y
+CONFIG_SPARSEMEM_VMEMMAP=y
+CONFIG_HAVE_GUP_FAST=y
+CONFIG_ARCH_KEEP_MEMBLOCK=y
+CONFIG_EXCLUSIVE_SYSTEM_RAM=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
+CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
+# CONFIG_MEMORY_HOTPLUG is not set
+CONFIG_ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE=y
+CONFIG_SPLIT_PTE_PTLOCKS=y
+CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+CONFIG_SPLIT_PMD_PTLOCKS=y
+CONFIG_COMPACTION=y
+CONFIG_COMPACT_UNEVICTABLE_DEFAULT=1
+# CONFIG_PAGE_REPORTING is not set
+CONFIG_MIGRATION=y
+CONFIG_PCP_BATCH_SCALE_MAX=5
+CONFIG_PHYS_ADDR_T_64BIT=y
+CONFIG_KSM=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
+CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y
+CONFIG_PAGE_FRAG_CACHE_ORDER=1
+# CONFIG_MEMORY_FAILURE is not set
+CONFIG_ARCH_WANTS_THP_SWAP=y
+# CONFIG_TRANSPARENT_HUGEPAGE is not set
+# CONFIG_CMA is not set
+CONFIG_GENERIC_EARLY_IOREMAP=y
+# CONFIG_DEFERRED_STRUCT_PAGE_INIT is not set
+# CONFIG_IDLE_PAGE_TRACKING is not set
+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
+CONFIG_ARCH_HAS_CURRENT_STACK_POINTER=y
+CONFIG_ARCH_HAS_PTE_DEVMAP=y
+CONFIG_ARCH_HAS_ZONE_DMA_SET=y
+# CONFIG_ZONE_DMA is not set
+# CONFIG_ZONE_DMA32 is not set
+CONFIG_VM_EVENT_COUNTERS=y
+# CONFIG_PERCPU_STATS is not set
+# CONFIG_GUP_TEST is not set
+# CONFIG_DMAPOOL_TEST is not set
+CONFIG_ARCH_HAS_PTE_SPECIAL=y
+CONFIG_MEMFD_CREATE=y
+# CONFIG_SECRETMEM is not set
+# CONFIG_ANON_VMA_NAME is not set
+# CONFIG_USERFAULTFD is not set
+# CONFIG_LRU_GEN is not set
+CONFIG_ARCH_SUPPORTS_PER_VMA_LOCK=y
+CONFIG_PER_VMA_LOCK=y
+CONFIG_LOCK_MM_AND_FIND_VMA=y
+CONFIG_EXECMEM=y
+
+#
+# Data Access Monitoring
+#
+# CONFIG_DAMON is not set
+# end of Data Access Monitoring
+# end of Memory Management options
+
+CONFIG_NET=y
+# CONFIG_NET_PROMISC_MESSAGES is not set
+CONFIG_NET_INGRESS=y
+CONFIG_NET_EGRESS=y
+CONFIG_NET_XGRESS=y
+CONFIG_SKB_EXTENSIONS=y
+CONFIG_NET_DEVMEM=y
+
+#
+# Networking options
+#
+CONFIG_NETSKBPAD=64
+CONFIG_PACKET=y
+# CONFIG_PACKET_DIAG is not set
+CONFIG_UNIX=y
+CONFIG_AF_UNIX_OOB=y
+CONFIG_UNIX_ABSTRACT_IGNORE_NETNS=y
+# CONFIG_UNIX_DIAG is not set
+# CONFIG_TLS is not set
+CONFIG_XFRM=y
+CONFIG_XFRM_OFFLOAD=y
+CONFIG_XFRM_ALGO=y
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+# CONFIG_XFRM_SUB_POLICY is not set
+# CONFIG_XFRM_MIGRATE is not set
+# CONFIG_XFRM_STATISTICS is not set
+CONFIG_XFRM_AH=y
+CONFIG_XFRM_ESP=y
+CONFIG_NET_KEY=y
+# CONFIG_NET_KEY_MIGRATE is not set
+CONFIG_NET_HANDSHAKE=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_IP_FIB_TRIE_STATS is not set
+CONFIG_IP_MULTIPLE_TABLES=y
+# CONFIG_IP_ROUTE_MULTIPATH is not set
+# CONFIG_IP_ROUTE_VERBOSE is not set
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_IP_PNP_RARP is not set
+# CONFIG_NET_IPIP is not set
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IP_TUNNEL=y
+CONFIG_NET_IPGRE=m
+# CONFIG_NET_IPGRE_BROADCAST is not set
+# CONFIG_IP_MROUTE is not set
+# CONFIG_SYN_COOKIES is not set
+# CONFIG_NET_IPVTI is not set
+CONFIG_NET_UDP_TUNNEL=y
+# CONFIG_NET_FOU is not set
+# CONFIG_NET_FOU_IP_TUNNELS is not set
+CONFIG_INET_AH=y
+CONFIG_INET_ESP=y
+# CONFIG_INET_ESP_OFFLOAD is not set
+# CONFIG_INET_ESPINTCP is not set
+# CONFIG_INET_IPCOMP is not set
+CONFIG_INET_TABLE_PERTURB_ORDER=16
+CONFIG_INET_TUNNEL=y
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
+# CONFIG_INET_UDP_DIAG is not set
+# CONFIG_INET_RAW_DIAG is not set
+# CONFIG_INET_DIAG_DESTROY is not set
+# CONFIG_TCP_CONG_ADVANCED is not set
+CONFIG_TCP_CONG_CUBIC=y
+CONFIG_DEFAULT_TCP_CONG="cubic"
+# CONFIG_TCP_AO is not set
+# CONFIG_TCP_MD5SIG is not set
+CONFIG_IPV6=y
+# CONFIG_IPV6_ROUTER_PREF is not set
+# CONFIG_IPV6_OPTIMISTIC_DAD is not set
+CONFIG_INET6_AH=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_ESP_OFFLOAD=y
+# CONFIG_INET6_ESPINTCP is not set
+# CONFIG_INET6_IPCOMP is not set
+# CONFIG_IPV6_MIP6 is not set
+# CONFIG_IPV6_ILA is not set
+CONFIG_INET6_TUNNEL=y
+# CONFIG_IPV6_VTI is not set
+CONFIG_IPV6_SIT=y
+CONFIG_IPV6_SIT_6RD=y
+CONFIG_IPV6_NDISC_NODETYPE=y
+CONFIG_IPV6_TUNNEL=y
+# CONFIG_IPV6_GRE is not set
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+# CONFIG_IPV6_MROUTE is not set
+# CONFIG_IPV6_SEG6_LWTUNNEL is not set
+# CONFIG_IPV6_SEG6_HMAC is not set
+# CONFIG_IPV6_RPL_LWTUNNEL is not set
+# CONFIG_IPV6_IOAM6_LWTUNNEL is not set
+# CONFIG_MPTCP is not set
+# CONFIG_NETWORK_SECMARK is not set
+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set
+CONFIG_NETFILTER=y
+CONFIG_NETFILTER_ADVANCED=y
+# CONFIG_BRIDGE_NETFILTER is not set
+
+#
+# Core Netfilter Configuration
+#
+# CONFIG_NETFILTER_INGRESS is not set
+# CONFIG_NETFILTER_EGRESS is not set
+CONFIG_NETFILTER_NETLINK=y
+# CONFIG_NETFILTER_NETLINK_ACCT is not set
+# CONFIG_NETFILTER_NETLINK_QUEUE is not set
+# CONFIG_NETFILTER_NETLINK_LOG is not set
+# CONFIG_NETFILTER_NETLINK_OSF is not set
+CONFIG_NF_CONNTRACK=y
+# CONFIG_NF_LOG_SYSLOG is not set
+# CONFIG_NF_CONNTRACK_MARK is not set
+# CONFIG_NF_CONNTRACK_ZONES is not set
+CONFIG_NF_CONNTRACK_PROCFS=y
+# CONFIG_NF_CONNTRACK_EVENTS is not set
+# CONFIG_NF_CONNTRACK_TIMEOUT is not set
+# CONFIG_NF_CONNTRACK_TIMESTAMP is not set
+# CONFIG_NF_CONNTRACK_LABELS is not set
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_GRE=y
+CONFIG_NF_CT_PROTO_SCTP=y
+# CONFIG_NF_CT_PROTO_UDPLITE is not set
+# CONFIG_NF_CONNTRACK_AMANDA is not set
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=m
+CONFIG_NF_CONNTRACK_IRC=m
+# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set
+# CONFIG_NF_CONNTRACK_SNMP is not set
+CONFIG_NF_CONNTRACK_PPTP=m
+# CONFIG_NF_CONNTRACK_SANE is not set
+CONFIG_NF_CONNTRACK_SIP=m
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NF_NAT=y
+CONFIG_NF_NAT_FTP=y
+CONFIG_NF_NAT_IRC=m
+CONFIG_NF_NAT_SIP=m
+CONFIG_NF_NAT_TFTP=y
+CONFIG_NF_NAT_REDIRECT=y
+CONFIG_NF_NAT_MASQUERADE=y
+# CONFIG_NF_TABLES is not set
+CONFIG_NETFILTER_XTABLES=y
+
+#
+# Xtables combined modules
+#
+CONFIG_NETFILTER_XT_MARK=y
+# CONFIG_NETFILTER_XT_CONNMARK is not set
+
+#
+# Xtables targets
+#
+# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set
+# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+# CONFIG_NETFILTER_XT_TARGET_HL is not set
+# CONFIG_NETFILTER_XT_TARGET_HMARK is not set
+# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set
+# CONFIG_NETFILTER_XT_TARGET_LED is not set
+# CONFIG_NETFILTER_XT_TARGET_LOG is not set
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_NAT=y
+# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set
+# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
+# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set
+# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
+CONFIG_NETFILTER_XT_TARGET_REDIRECT=y
+CONFIG_NETFILTER_XT_TARGET_MASQUERADE=y
+# CONFIG_NETFILTER_XT_TARGET_TEE is not set
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set
+
+#
+# Xtables matches
+#
+# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_BPF is not set
+# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set
+# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set
+# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+# CONFIG_NETFILTER_XT_MATCH_CPU is not set
+CONFIG_NETFILTER_XT_MATCH_DCCP=y
+# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+# CONFIG_NETFILTER_XT_MATCH_ECN is not set
+# CONFIG_NETFILTER_XT_MATCH_ESP is not set
+# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set
+# CONFIG_NETFILTER_XT_MATCH_HELPER is not set
+# CONFIG_NETFILTER_XT_MATCH_HL is not set
+# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
+# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set
+# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set
+# CONFIG_NETFILTER_XT_MATCH_OSF is not set
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+# CONFIG_NETFILTER_XT_MATCH_POLICY is not set
+# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set
+# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set
+# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set
+# CONFIG_NETFILTER_XT_MATCH_REALM is not set
+# CONFIG_NETFILTER_XT_MATCH_RECENT is not set
+CONFIG_NETFILTER_XT_MATCH_SCTP=y
+# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set
+# CONFIG_NETFILTER_XT_MATCH_STRING is not set
+# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set
+# CONFIG_NETFILTER_XT_MATCH_TIME is not set
+# CONFIG_NETFILTER_XT_MATCH_U32 is not set
+# end of Core Netfilter Configuration
+
+# CONFIG_IP_SET is not set
+# CONFIG_IP_VS is not set
+
+#
+# IP: Netfilter Configuration
+#
+CONFIG_IP_FFN=y
+CONFIG_IP_FFN_PROCFS=y
+CONFIG_NF_DEFRAG_IPV4=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
+# CONFIG_NF_SOCKET_IPV4 is not set
+CONFIG_NF_TPROXY_IPV4=y
+# CONFIG_NF_DUP_IPV4 is not set
+# CONFIG_NF_LOG_ARP is not set
+# CONFIG_NF_LOG_IPV4 is not set
+CONFIG_NF_REJECT_IPV4=y
+CONFIG_NF_NAT_PPTP=m
+CONFIG_NF_NAT_H323=m
+CONFIG_IP_NF_IPTABLES=y
+# CONFIG_IP_NF_MATCH_AH is not set
+# CONFIG_IP_NF_MATCH_ECN is not set
+# CONFIG_IP_NF_MATCH_RPFILTER is not set
+# CONFIG_IP_NF_MATCH_TTL is not set
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+# CONFIG_IP_NF_TARGET_SYNPROXY is not set
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+# CONFIG_IP_NF_TARGET_NETMAP is not set
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+# CONFIG_IP_NF_TARGET_ECN is not set
+# CONFIG_IP_NF_TARGET_TTL is not set
+# CONFIG_IP_NF_RAW is not set
+# CONFIG_IP_NF_ARPTABLES is not set
+# CONFIG_IP_NF_ARPFILTER is not set
+# end of IP: Netfilter Configuration
+
+#
+# IPv6: Netfilter Configuration
+#
+CONFIG_IP6_NF_IPTABLES_LEGACY=y
+CONFIG_IPV6_FFN=y
+CONFIG_IPV6_FFN_PROCFS=y
+# CONFIG_NF_SOCKET_IPV6 is not set
+CONFIG_NF_TPROXY_IPV6=y
+# CONFIG_NF_DUP_IPV6 is not set
+CONFIG_NF_REJECT_IPV6=y
+# CONFIG_NF_LOG_IPV6 is not set
+CONFIG_IP6_NF_IPTABLES=y
+# CONFIG_IP6_NF_MATCH_AH is not set
+# CONFIG_IP6_NF_MATCH_EUI64 is not set
+# CONFIG_IP6_NF_MATCH_FRAG is not set
+# CONFIG_IP6_NF_MATCH_OPTS is not set
+# CONFIG_IP6_NF_MATCH_HL is not set
+# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
+# CONFIG_IP6_NF_MATCH_MH is not set
+# CONFIG_IP6_NF_MATCH_RPFILTER is not set
+# CONFIG_IP6_NF_MATCH_RT is not set
+# CONFIG_IP6_NF_MATCH_SRH is not set
+# CONFIG_IP6_NF_TARGET_HL is not set
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+# CONFIG_IP6_NF_TARGET_SYNPROXY is not set
+CONFIG_IP6_NF_MANGLE=y
+# CONFIG_IP6_NF_RAW is not set
+CONFIG_IP6_NF_NAT=y
+CONFIG_IP6_NF_TARGET_MASQUERADE=y
+# CONFIG_IP6_NF_TARGET_NPT is not set
+# end of IPv6: Netfilter Configuration
+
+CONFIG_NF_DEFRAG_IPV6=y
+# CONFIG_NF_CONNTRACK_BRIDGE is not set
+# CONFIG_BRIDGE_NF_EBTABLES_LEGACY is not set
+# CONFIG_BRIDGE_NF_EBTABLES is not set
+# CONFIG_IP_DCCP is not set
+# CONFIG_IP_SCTP is not set
+# CONFIG_RDS is not set
+# CONFIG_TIPC is not set
+# CONFIG_ATM is not set
+# CONFIG_L2TP is not set
+CONFIG_FBXATM=y
+CONFIG_FBXATM_STACK=y
+# CONFIG_FBXATM_REMOTE_STUB is not set
+# CONFIG_FBXATM_REMOTE_DRIVER is not set
+CONFIG_FBXBRIDGE=y
+CONFIG_STP=y
+CONFIG_BRIDGE=y
+# CONFIG_BRIDGE_STATE_MESSAGES is not set
+# CONFIG_BRIDGE_IGMP_SNOOPING is not set
+# CONFIG_BRIDGE_VLAN_FILTERING is not set
+# CONFIG_BRIDGE_MRP is not set
+# CONFIG_BRIDGE_CFM is not set
+CONFIG_NET_DSA=y
+# CONFIG_NET_DSA_TAG_NONE is not set
+# CONFIG_NET_DSA_TAG_AR9331 is not set
+CONFIG_NET_DSA_TAG_BRCM_COMMON=y
+CONFIG_NET_DSA_TAG_BRCM=y
+# CONFIG_NET_DSA_TAG_BRCM_LEGACY is not set
+# CONFIG_NET_DSA_TAG_BRCM_PREPEND is not set
+# CONFIG_NET_DSA_TAG_HELLCREEK is not set
+CONFIG_NET_DSA_TAG_BRCM_FBX=y
+# CONFIG_NET_DSA_TAG_GSWIP is not set
+# CONFIG_NET_DSA_TAG_DSA is not set
+# CONFIG_NET_DSA_TAG_EDSA is not set
+# CONFIG_NET_DSA_TAG_MTK is not set
+# CONFIG_NET_DSA_TAG_KSZ is not set
+# CONFIG_NET_DSA_TAG_OCELOT is not set
+# CONFIG_NET_DSA_TAG_OCELOT_8021Q is not set
+# CONFIG_NET_DSA_TAG_QCA is not set
+# CONFIG_NET_DSA_TAG_RTL4_A is not set
+# CONFIG_NET_DSA_TAG_RTL8_4 is not set
+# CONFIG_NET_DSA_TAG_RZN1_A5PSW is not set
+# CONFIG_NET_DSA_TAG_LAN9303 is not set
+# CONFIG_NET_DSA_TAG_SJA1105 is not set
+# CONFIG_NET_DSA_TAG_TRAILER is not set
+# CONFIG_NET_DSA_TAG_VSC73XX_8021Q is not set
+# CONFIG_NET_DSA_TAG_XRS700X is not set
+CONFIG_VLAN_8021Q=y
+# CONFIG_VLAN_8021Q_GVRP is not set
+# CONFIG_VLAN_8021Q_MVRP is not set
+CONFIG_VLAN_FBX=y
+CONFIG_LLC=y
+# CONFIG_LLC2 is not set
+# CONFIG_ATALK is not set
+# CONFIG_X25 is not set
+# CONFIG_LAPB is not set
+# CONFIG_PHONET is not set
+# CONFIG_6LOWPAN is not set
+# CONFIG_IEEE802154 is not set
+CONFIG_NET_SCHED=y
+
+#
+# Queueing/Scheduling
+#
+# CONFIG_NET_SCH_HTB is not set
+# CONFIG_NET_SCH_HFSC is not set
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+# CONFIG_NET_SCH_RED is not set
+# CONFIG_NET_SCH_SFB is not set
+CONFIG_NET_SCH_SFQ=y
+# CONFIG_NET_SCH_TEQL is not set
+# CONFIG_NET_SCH_TBF is not set
+# CONFIG_NET_SCH_CBS is not set
+# CONFIG_NET_SCH_ETF is not set
+# CONFIG_NET_SCH_TAPRIO is not set
+# CONFIG_NET_SCH_GRED is not set
+# CONFIG_NET_SCH_NETEM is not set
+CONFIG_NET_SCH_DRR=y
+# CONFIG_NET_SCH_MQPRIO is not set
+# CONFIG_NET_SCH_SKBPRIO is not set
+# CONFIG_NET_SCH_CHOKE is not set
+# CONFIG_NET_SCH_QFQ is not set
+# CONFIG_NET_SCH_CODEL is not set
+CONFIG_NET_SCH_FQ_CODEL=y
+# CONFIG_NET_SCH_CAKE is not set
+# CONFIG_NET_SCH_FQ is not set
+# CONFIG_NET_SCH_HHF is not set
+# CONFIG_NET_SCH_PIE is not set
+CONFIG_NET_SCH_INGRESS=y
+# CONFIG_NET_SCH_PLUG is not set
+# CONFIG_NET_SCH_ETS is not set
+# CONFIG_NET_SCH_DEFAULT is not set
+
+#
+# Classification
+#
+CONFIG_NET_CLS=y
+# CONFIG_NET_CLS_BASIC is not set
+# CONFIG_NET_CLS_ROUTE4 is not set
+# CONFIG_NET_CLS_FW is not set
+CONFIG_NET_CLS_U32=y
+# CONFIG_CLS_U32_PERF is not set
+CONFIG_CLS_U32_MARK=y
+# CONFIG_NET_CLS_FLOW is not set
+# CONFIG_NET_CLS_CGROUP is not set
+# CONFIG_NET_CLS_BPF is not set
+# CONFIG_NET_CLS_FLOWER is not set
+# CONFIG_NET_CLS_MATCHALL is not set
+# CONFIG_NET_EMATCH is not set
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+# CONFIG_NET_ACT_GACT is not set
+# CONFIG_NET_ACT_MIRRED is not set
+# CONFIG_NET_ACT_SAMPLE is not set
+# CONFIG_NET_ACT_NAT is not set
+# CONFIG_NET_ACT_PEDIT is not set
+# CONFIG_NET_ACT_SIMP is not set
+CONFIG_NET_ACT_SKBEDIT=y
+# CONFIG_NET_ACT_CSUM is not set
+# CONFIG_NET_ACT_MPLS is not set
+# CONFIG_NET_ACT_VLAN is not set
+# CONFIG_NET_ACT_BPF is not set
+# CONFIG_NET_ACT_SKBMOD is not set
+# CONFIG_NET_ACT_IFE is not set
+# CONFIG_NET_ACT_TUNNEL_KEY is not set
+# CONFIG_NET_ACT_GATE is not set
+# CONFIG_NET_TC_SKB_EXT is not set
+CONFIG_NET_SCH_FIFO=y
+# CONFIG_DCB is not set
+# CONFIG_DNS_RESOLVER is not set
+CONFIG_BATMAN_ADV=y
+CONFIG_BATMAN_ADV_BATMAN_V=y
+# CONFIG_BATMAN_ADV_BLA is not set
+# CONFIG_BATMAN_ADV_DAT is not set
+# CONFIG_BATMAN_ADV_NC is not set
+# CONFIG_BATMAN_ADV_MCAST is not set
+# CONFIG_BATMAN_ADV_DEBUG is not set
+CONFIG_BATMAN_ADV_FBX=y
+CONFIG_BATMAN_ADV_FBX_MTU=y
+CONFIG_BATMAN_ADV_FBX_SLAP=y
+# CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER is not set
+# CONFIG_OPENVSWITCH is not set
+# CONFIG_VSOCKETS is not set
+# CONFIG_NETLINK_DIAG is not set
+# CONFIG_MPLS is not set
+# CONFIG_NET_NSH is not set
+# CONFIG_HSR is not set
+CONFIG_NET_SWITCHDEV=y
+# CONFIG_NET_L3_MASTER_DEV is not set
+CONFIG_QRTR=y
+# CONFIG_QRTR_TUN is not set
+CONFIG_QRTR_MHI=y
+# CONFIG_NET_NCSI is not set
+# CONFIG_PCPU_DEV_REFCNT is not set
+CONFIG_MAX_SKB_FRAGS=17
+CONFIG_RPS=y
+CONFIG_RFS_ACCEL=y
+CONFIG_SOCK_RX_QUEUE_MAPPING=y
+CONFIG_XPS=y
+# CONFIG_CGROUP_NET_PRIO is not set
+# CONFIG_CGROUP_NET_CLASSID is not set
+CONFIG_NET_RX_BUSY_POLL=y
+CONFIG_BQL=y
+CONFIG_NET_FLOW_LIMIT=y
+
+#
+# Network testing
+#
+CONFIG_NET_PKTGEN=y
+# end of Network testing
+# end of Networking options
+
+# CONFIG_HAMRADIO is not set
+# CONFIG_CAN is not set
+# CONFIG_BT is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_AF_KCM is not set
+# CONFIG_MCTP is not set
+CONFIG_FIB_RULES=y
+CONFIG_WIRELESS=y
+CONFIG_CFG80211=y
+CONFIG_NL80211_TESTMODE=y
+# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+# CONFIG_CFG80211_REQUIRE_SIGNED_REGDB is not set
+# CONFIG_CFG80211_REG_CELLULAR_HINTS is not set
+# CONFIG_CFG80211_REG_RELAX_NO_IR is not set
+CONFIG_CFG80211_DEFAULT_PS=y
+CONFIG_CFG80211_DEBUGFS=y
+CONFIG_CFG80211_DFS_CACHE=y
+CONFIG_CFG80211_CRDA_SUPPORT=y
+# CONFIG_CFG80211_WEXT is not set
+# CONFIG_FBX80211 is not set
+CONFIG_MAC80211=y
+CONFIG_MAC80211_HAS_RC=y
+CONFIG_MAC80211_RC_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y
+CONFIG_MAC80211_RC_DEFAULT="minstrel_ht"
+CONFIG_MAC80211_MESH=y
+CONFIG_MAC80211_LEDS=y
+CONFIG_MAC80211_DEBUGFS=y
+# CONFIG_MAC80211_DEBUG_MENU is not set
+CONFIG_MAC80211_STA_HASH_MAX_SIZE=0
+# CONFIG_RFKILL is not set
+# CONFIG_NET_9P is not set
+# CONFIG_CAIF is not set
+# CONFIG_CEPH_LIB is not set
+# CONFIG_NFC is not set
+# CONFIG_PSAMPLE is not set
+# CONFIG_NET_IFE is not set
+# CONFIG_LWTUNNEL is not set
+CONFIG_DST_CACHE=y
+CONFIG_GRO_CELLS=y
+CONFIG_NET_SELFTESTS=y
+CONFIG_NET_DEVLINK=y
+CONFIG_PAGE_POOL=y
+CONFIG_PAGE_POOL_STATS=y
+# CONFIG_FAILOVER is not set
+# CONFIG_ETHTOOL_NETLINK is not set
+
+#
+# Device Drivers
+#
+CONFIG_ARM_AMBA=y
+CONFIG_HAVE_PCI=y
+CONFIG_GENERIC_PCI_IOMAP=y
+CONFIG_PCI=y
+CONFIG_PCI_DOMAINS=y
+CONFIG_PCI_DOMAINS_GENERIC=y
+CONFIG_PCI_SYSCALL=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+# CONFIG_PCIEAER_INJECT is not set
+# CONFIG_PCIE_ECRC is not set
+# CONFIG_PCIEASPM is not set
+# CONFIG_PCIE_DPC is not set
+# CONFIG_PCIE_PTM is not set
+CONFIG_PCI_MSI=y
+CONFIG_PCI_QUIRKS=y
+# CONFIG_PCI_DEBUG is not set
+# CONFIG_PCI_STUB is not set
+# CONFIG_PCI_IOV is not set
+# CONFIG_PCI_NPEM is not set
+# CONFIG_PCI_PRI is not set
+# CONFIG_PCI_PASID is not set
+# CONFIG_PCIE_TPH is not set
+# CONFIG_PCI_DYNAMIC_OF_NODES is not set
+# CONFIG_PCIE_BUS_TUNE_OFF is not set
+CONFIG_PCIE_BUS_DEFAULT=y
+# CONFIG_PCIE_BUS_SAFE is not set
+# CONFIG_PCIE_BUS_PERFORMANCE is not set
+# CONFIG_PCIE_BUS_PEER2PEER is not set
+# CONFIG_VGA_ARB is not set
+# CONFIG_HOTPLUG_PCI is not set
+
+#
+# PCI controller drivers
+#
+# CONFIG_PCIE_ALTERA is not set
+# CONFIG_PCIE_BRCMSTB is not set
+# CONFIG_PCI_HOST_THUNDER_PEM is not set
+# CONFIG_PCI_HOST_THUNDER_ECAM is not set
+# CONFIG_PCI_FTPCI100 is not set
+# CONFIG_PCI_HOST_GENERIC is not set
+CONFIG_PCIE_BCM63XX=y
+# CONFIG_PCI_XGENE is not set
+# CONFIG_PCIE_XILINX is not set
+
+#
+# Cadence-based PCIe controllers
+#
+# CONFIG_PCIE_CADENCE_PLAT_HOST is not set
+# end of Cadence-based PCIe controllers
+
+#
+# DesignWare-based PCIe controllers
+#
+# CONFIG_PCIE_AL is not set
+# CONFIG_PCI_MESON is not set
+# CONFIG_PCI_HISI is not set
+# CONFIG_PCIE_KIRIN is not set
+# CONFIG_PCIE_DW_PLAT_HOST is not set
+# end of DesignWare-based PCIe controllers
+
+#
+# Mobiveil-based PCIe controllers
+#
+# end of Mobiveil-based PCIe controllers
+
+#
+# PLDA-based PCIe controllers
+#
+# CONFIG_PCIE_MICROCHIP_HOST is not set
+# end of PLDA-based PCIe controllers
+# end of PCI controller drivers
+
+#
+# PCI Endpoint
+#
+# CONFIG_PCI_ENDPOINT is not set
+# end of PCI Endpoint
+
+#
+# PCI switch controller drivers
+#
+# CONFIG_PCI_SW_SWITCHTEC is not set
+# end of PCI switch controller drivers
+
+# CONFIG_CXL_BUS is not set
+# CONFIG_PCCARD is not set
+# CONFIG_RAPIDIO is not set
+
+#
+# Generic Driver Options
+#
+# CONFIG_UEVENT_HELPER is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_DEVTMPFS_SAFE is not set
+CONFIG_STANDALONE=y
+CONFIG_PREVENT_FIRMWARE_BUILD=y
+
+#
+# Firmware loader
+#
+CONFIG_FW_LOADER=y
+CONFIG_FW_LOADER_PAGED_BUF=y
+CONFIG_FW_LOADER_SYSFS=y
+CONFIG_EXTRA_FIRMWARE="aquantia_phy/aqr112.uc xrdp/dsl_firmware/bcm63xx_dsl_runner.rpgm xrdp/enet_firmware/bcm63xx_enet_runner.rpgm"
+CONFIG_EXTRA_FIRMWARE_DIR="firmware"
+CONFIG_FW_LOADER_USER_HELPER=y
+CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+# CONFIG_FW_LOADER_COMPRESS is not set
+# CONFIG_FW_UPLOAD is not set
+# end of Firmware loader
+
+CONFIG_WANT_DEV_COREDUMP=y
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+# CONFIG_DEBUG_DRIVER is not set
+# CONFIG_DEBUG_DEVRES is not set
+# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set
+# CONFIG_TEST_ASYNC_DRIVER_PROBE is not set
+CONFIG_GENERIC_CPU_DEVICES=y
+CONFIG_GENERIC_CPU_AUTOPROBE=y
+CONFIG_GENERIC_CPU_VULNERABILITIES=y
+CONFIG_REGMAP=y
+CONFIG_REGMAP_I2C=y
+CONFIG_REGMAP_MMIO=y
+CONFIG_DMA_SHARED_BUFFER=y
+# CONFIG_DMA_FENCE_TRACE is not set
+CONFIG_GENERIC_ARCH_TOPOLOGY=y
+# CONFIG_FW_DEVLINK_SYNC_STATE_TIMEOUT is not set
+# end of Generic Driver Options
+
+#
+# Bus devices
+#
+# CONFIG_MOXTET is not set
+# CONFIG_VEXPRESS_CONFIG is not set
+CONFIG_MHI_BUS=y
+# CONFIG_MHI_BUS_DEBUG is not set
+# CONFIG_MHI_BUS_PCI_GENERIC is not set
+# CONFIG_MHI_BUS_EP is not set
+# end of Bus devices
+
+#
+# Cache Drivers
+#
+# end of Cache Drivers
+
+# CONFIG_CONNECTOR is not set
+
+#
+# Firmware Drivers
+#
+
+#
+# ARM System Control and Management Interface Protocol
+#
+# CONFIG_ARM_SCMI_PROTOCOL is not set
+# end of ARM System Control and Management Interface Protocol
+
+# CONFIG_FIRMWARE_MEMMAP is not set
+# CONFIG_FW_CFG_SYSFS is not set
+# CONFIG_ARM_FFA_TRANSPORT is not set
+# CONFIG_GOOGLE_FIRMWARE is not set
+CONFIG_ARM_PSCI_FW=y
+
+#
+# Qualcomm firmware drivers
+#
+# end of Qualcomm firmware drivers
+
+CONFIG_HAVE_ARM_SMCCC=y
+CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y
+# CONFIG_ARM_SMCCC_SOC_ID is not set
+
+#
+# Tegra firmware driver
+#
+# end of Tegra firmware driver
+# end of Firmware Drivers
+
+# CONFIG_GNSS is not set
+CONFIG_FREEBOX_PROCFS=y
+CONFIG_MTD=y
+# CONFIG_MTD_TESTS is not set
+CONFIG_MTD_ERASE_PRINTK=y
+
+#
+# Partition parsers
+#
+# CONFIG_MTD_BRCM_U_BOOT is not set
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_OF_PARTS=y
+# CONFIG_MTD_OF_PARTS_BCM4908 is not set
+# CONFIG_MTD_OF_PARTS_LINKSYS_NS is not set
+# CONFIG_MTD_OF_PARTS_IGNORE_RO is not set
+# CONFIG_MTD_AFS_PARTS is not set
+# CONFIG_MTD_REDBOOT_PARTS is not set
+# CONFIG_MTD_FBX6HD_PARTS is not set
+# end of Partition parsers
+
+#
+# User Modules And Translation Layers
+#
+CONFIG_MTD_BLKDEVS=y
+CONFIG_MTD_BLOCK=y
+
+#
+# Note that in some cases UBI block is preferred. See MTD_UBI_BLOCK.
+#
+# CONFIG_FTL is not set
+# CONFIG_NFTL is not set
+# CONFIG_INFTL is not set
+# CONFIG_RFD_FTL is not set
+# CONFIG_SSFDC is not set
+# CONFIG_SM_FTL is not set
+# CONFIG_MTD_OOPS is not set
+# CONFIG_MTD_PARTITIONED_MASTER is not set
+
+#
+# RAM/ROM/Flash chip drivers
+#
+# CONFIG_MTD_CFI is not set
+# CONFIG_MTD_JEDECPROBE is not set
+CONFIG_MTD_MAP_BANK_WIDTH_1=y
+CONFIG_MTD_MAP_BANK_WIDTH_2=y
+CONFIG_MTD_MAP_BANK_WIDTH_4=y
+CONFIG_MTD_CFI_I1=y
+CONFIG_MTD_CFI_I2=y
+# CONFIG_MTD_RAM is not set
+# CONFIG_MTD_ROM is not set
+# CONFIG_MTD_ABSENT is not set
+# end of RAM/ROM/Flash chip drivers
+
+#
+# Mapping drivers for chip access
+#
+# CONFIG_MTD_COMPLEX_MAPPINGS is not set
+# CONFIG_MTD_PLATRAM is not set
+# end of Mapping drivers for chip access
+
+#
+# Self-contained MTD device drivers
+#
+# CONFIG_MTD_PMC551 is not set
+# CONFIG_MTD_DATAFLASH is not set
+# CONFIG_MTD_MCHP23K256 is not set
+# CONFIG_MTD_MCHP48L640 is not set
+# CONFIG_MTD_SST25L is not set
+# CONFIG_MTD_SLRAM is not set
+# CONFIG_MTD_PHRAM is not set
+# CONFIG_MTD_MTDRAM is not set
+# CONFIG_MTD_BLOCK2MTD is not set
+
+#
+# Disk-On-Chip Device Drivers
+#
+# CONFIG_MTD_DOCG3 is not set
+# end of Self-contained MTD device drivers
+
+#
+# NAND
+#
+# CONFIG_MTD_ONENAND is not set
+# CONFIG_MTD_RAW_NAND is not set
+# CONFIG_MTD_SPI_NAND is not set
+
+#
+# ECC engine support
+#
+# CONFIG_MTD_NAND_ECC_SW_HAMMING is not set
+# CONFIG_MTD_NAND_ECC_SW_BCH is not set
+# CONFIG_MTD_NAND_ECC_MXIC is not set
+# end of ECC engine support
+# end of NAND
+
+#
+# LPDDR & LPDDR2 PCM memory drivers
+#
+# CONFIG_MTD_LPDDR is not set
+# end of LPDDR & LPDDR2 PCM memory drivers
+
+CONFIG_MTD_SPI_NOR=y
+# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set
+# CONFIG_MTD_SPI_NOR_SWP_DISABLE is not set
+CONFIG_MTD_SPI_NOR_SWP_DISABLE_ON_VOLATILE=y
+# CONFIG_MTD_SPI_NOR_SWP_KEEP is not set
+# CONFIG_MTD_UBI is not set
+# CONFIG_MTD_HYPERBUS is not set
+CONFIG_DTC=y
+CONFIG_OF=y
+# CONFIG_OF_UNITTEST is not set
+CONFIG_OF_DTB_BUILTIN_LIST=""
+CONFIG_OF_FLATTREE=y
+CONFIG_OF_EARLY_FLATTREE=y
+CONFIG_OF_KOBJ=y
+CONFIG_OF_DYNAMIC=y
+CONFIG_OF_ADDRESS=y
+CONFIG_OF_IRQ=y
+CONFIG_OF_RESERVED_MEM=y
+CONFIG_OF_RESOLVE=y
+CONFIG_OF_OVERLAY=y
+CONFIG_OF_CONFIGFS=y
+# CONFIG_PARPORT is not set
+CONFIG_BLK_DEV=y
+# CONFIG_BLK_DEV_NULL_BLK is not set
+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set
+# CONFIG_ZRAM is not set
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+# CONFIG_BLK_DEV_DRBD is not set
+# CONFIG_BLK_DEV_NBD is not set
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_COUNT=1
+CONFIG_BLK_DEV_RAM_SIZE=65536
+# CONFIG_CDROM_PKTCDVD is not set
+# CONFIG_ATA_OVER_ETH is not set
+# CONFIG_BLK_DEV_RBD is not set
+# CONFIG_BLK_DEV_UBLK is not set
+
+#
+# NVME Support
+#
+# CONFIG_BLK_DEV_NVME is not set
+# CONFIG_NVME_FC is not set
+# CONFIG_NVME_TCP is not set
+# CONFIG_NVME_TARGET is not set
+# end of NVME Support
+
+#
+# Misc devices
+#
+# CONFIG_AD525X_DPOT is not set
+# CONFIG_DUMMY_IRQ is not set
+# CONFIG_PHANTOM is not set
+# CONFIG_RPMB is not set
+# CONFIG_TIFM_CORE is not set
+# CONFIG_ICS932S401 is not set
+# CONFIG_ENCLOSURE_SERVICES is not set
+# CONFIG_HP_ILO is not set
+# CONFIG_APDS9802ALS is not set
+# CONFIG_ISL29003 is not set
+# CONFIG_ISL29020 is not set
+# CONFIG_SENSORS_TSL2550 is not set
+# CONFIG_SENSORS_BH1770 is not set
+# CONFIG_SENSORS_APDS990X is not set
+# CONFIG_HMC6352 is not set
+# CONFIG_DS1682 is not set
+# CONFIG_INTELCE_PIC16PMU is not set
+CONFIG_FBXSERIAL_OF=y
+# CONFIG_LATTICE_ECP3_CONFIG is not set
+# CONFIG_SRAM is not set
+# CONFIG_DW_XDATA_PCIE is not set
+# CONFIG_PCI_ENDPOINT_TEST is not set
+# CONFIG_XILINX_SDFEC is not set
+# CONFIG_OPEN_DICE is not set
+# CONFIG_VCPU_STALL_DETECTOR is not set
+# CONFIG_MCHP_LAN966X_PCI is not set
+CONFIG_DGASP=y
+# CONFIG_C2PORT is not set
+
+#
+# EEPROM support
+#
+CONFIG_EEPROM_AT24=m
+# CONFIG_EEPROM_AT25 is not set
+# CONFIG_EEPROM_MAX6875 is not set
+# CONFIG_EEPROM_93CX6 is not set
+# CONFIG_EEPROM_93XX46 is not set
+# CONFIG_EEPROM_IDT_89HPESX is not set
+# CONFIG_EEPROM_EE1004 is not set
+# CONFIG_EEPROM_EE1004_RAW is not set
+# end of EEPROM support
+
+# CONFIG_CB710_CORE is not set
+# CONFIG_SENSORS_LIS3_SPI is not set
+# CONFIG_SENSORS_LIS3_I2C is not set
+# CONFIG_ALTERA_STAPL is not set
+# CONFIG_VMWARE_VMCI is not set
+# CONFIG_GENWQE is not set
+# CONFIG_ECHO is not set
+# CONFIG_BCM_VK is not set
+# CONFIG_MISC_ALCOR_PCI is not set
+# CONFIG_MISC_RTSX_PCI is not set
+# CONFIG_MISC_RTSX_USB is not set
+# CONFIG_PVPANIC is not set
+# CONFIG_GP_PCI1XXXX is not set
+# CONFIG_KEBA_CP500 is not set
+
+#
+# RemoTI support
+#
+# end of RemoTI support
+
+#
+# HDMI CEC support
+#
+# CONFIG_HDMI_CEC is not set
+# end of HDMI CEC support
+# end of Misc devices
+
+#
+# SCSI device support
+#
+CONFIG_SCSI_MOD=y
+# CONFIG_RAID_ATTRS is not set
+CONFIG_SCSI_COMMON=y
+CONFIG_SCSI=y
+CONFIG_SCSI_DMA=y
+# CONFIG_SCSI_PROC_FS is not set
+
+#
+# SCSI support type (disk, tape, CD-ROM)
+#
+CONFIG_BLK_DEV_SD=y
+# CONFIG_CHR_DEV_ST is not set
+# CONFIG_BLK_DEV_SR is not set
+CONFIG_CHR_DEV_SG=y
+CONFIG_BLK_DEV_BSG=y
+# CONFIG_CHR_DEV_SCH is not set
+# CONFIG_SCSI_CONSTANTS is not set
+# CONFIG_SCSI_LOGGING is not set
+CONFIG_SCSI_SCAN_ASYNC=y
+
+#
+# SCSI Transports
+#
+# CONFIG_SCSI_SPI_ATTRS is not set
+# CONFIG_SCSI_FC_ATTRS is not set
+# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
+# CONFIG_SCSI_SAS_LIBSAS is not set
+# CONFIG_SCSI_SRP_ATTRS is not set
+# end of SCSI Transports
+
+# CONFIG_SCSI_LOWLEVEL is not set
+# CONFIG_SCSI_DH is not set
+# end of SCSI device support
+
+# CONFIG_ATA is not set
+CONFIG_MD=y
+# CONFIG_BLK_DEV_MD is not set
+# CONFIG_MD_BITMAP_FILE is not set
+# CONFIG_BCACHE is not set
+CONFIG_BLK_DEV_DM_BUILTIN=y
+CONFIG_BLK_DEV_DM=y
+# CONFIG_DM_DEBUG is not set
+# CONFIG_DM_UNSTRIPED is not set
+CONFIG_DM_CRYPT=y
+# CONFIG_DM_SNAPSHOT is not set
+# CONFIG_DM_THIN_PROVISIONING is not set
+# CONFIG_DM_CACHE is not set
+# CONFIG_DM_WRITECACHE is not set
+# CONFIG_DM_EBS is not set
+# CONFIG_DM_ERA is not set
+# CONFIG_DM_CLONE is not set
+# CONFIG_DM_MIRROR is not set
+# CONFIG_DM_RAID is not set
+# CONFIG_DM_ZERO is not set
+# CONFIG_DM_MULTIPATH is not set
+# CONFIG_DM_DELAY is not set
+# CONFIG_DM_DUST is not set
+# CONFIG_DM_INIT is not set
+# CONFIG_DM_UEVENT is not set
+# CONFIG_DM_FLAKEY is not set
+# CONFIG_DM_VERITY is not set
+# CONFIG_DM_SWITCH is not set
+# CONFIG_DM_LOG_WRITES is not set
+# CONFIG_DM_INTEGRITY is not set
+# CONFIG_DM_AUDIT is not set
+# CONFIG_DM_VDO is not set
+# CONFIG_TARGET_CORE is not set
+# CONFIG_FUSION is not set
+
+#
+# IEEE 1394 (FireWire) support
+#
+# CONFIG_FIREWIRE is not set
+# CONFIG_FIREWIRE_NOSY is not set
+# end of IEEE 1394 (FireWire) support
+
+CONFIG_NETDEVICES=y
+CONFIG_MII=y
+CONFIG_NET_CORE=y
+# CONFIG_BONDING is not set
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+# CONFIG_WIREGUARD_DEBUG is not set
+# CONFIG_EQUALIZER is not set
+# CONFIG_NET_FC is not set
+# CONFIG_NET_TEAM is not set
+# CONFIG_MACVLAN is not set
+# CONFIG_IPVLAN is not set
+# CONFIG_VXLAN is not set
+# CONFIG_GENEVE is not set
+# CONFIG_BAREUDP is not set
+# CONFIG_GTP is not set
+# CONFIG_PFCP is not set
+# CONFIG_AMT is not set
+# CONFIG_MACSEC is not set
+# CONFIG_NETCONSOLE is not set
+CONFIG_TUN=y
+# CONFIG_TUN_VNET_CROSS_LE is not set
+CONFIG_VETH=y
+# CONFIG_NLMON is not set
+# CONFIG_MHI_NET is not set
+# CONFIG_ARCNET is not set
+
+#
+# Distributed Switch Architecture drivers
+#
+# CONFIG_B53 is not set
+# CONFIG_NET_DSA_BCM_SF2 is not set
+# CONFIG_NET_DSA_LOOP is not set
+# CONFIG_NET_DSA_LANTIQ_GSWIP is not set
+# CONFIG_NET_DSA_MT7530 is not set
+# CONFIG_NET_DSA_MV88E6060 is not set
+# CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON is not set
+# CONFIG_NET_DSA_MV88E6XXX is not set
+# CONFIG_NET_DSA_AR9331 is not set
+# CONFIG_NET_DSA_QCA8K is not set
+# CONFIG_NET_DSA_SJA1105 is not set
+# CONFIG_NET_DSA_XRS700X_I2C is not set
+# CONFIG_NET_DSA_XRS700X_MDIO is not set
+# CONFIG_NET_DSA_REALTEK is not set
+# CONFIG_NET_DSA_SMSC_LAN9303_I2C is not set
+# CONFIG_NET_DSA_SMSC_LAN9303_MDIO is not set
+# CONFIG_NET_DSA_VITESSE_VSC73XX_SPI is not set
+# CONFIG_NET_DSA_VITESSE_VSC73XX_PLATFORM is not set
+# end of Distributed Switch Architecture drivers
+
+CONFIG_ETHERNET=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_AGERE is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+# CONFIG_ALTERA_TSE is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
+# CONFIG_NET_VENDOR_AMD is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
+# CONFIG_NET_VENDOR_ARC is not set
+# CONFIG_NET_VENDOR_ASIX is not set
+# CONFIG_NET_VENDOR_ATHEROS is not set
+CONFIG_NET_VENDOR_BROADCOM=y
+# CONFIG_B44 is not set
+# CONFIG_BCM4908_ENET is not set
+# CONFIG_BCM63XX_ENET_RUNNER is not set
+CONFIG_BCM63158_SF2=y
+CONFIG_BCM63158_ENET_RUNNER=y
+CONFIG_BCM63158_ENET_RUNNER_FF=y
+# CONFIG_BCMGENET is not set
+# CONFIG_BNX2 is not set
+# CONFIG_CNIC is not set
+# CONFIG_TIGON3 is not set
+# CONFIG_BNX2X is not set
+# CONFIG_SYSTEMPORT is not set
+# CONFIG_BNXT is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
+# CONFIG_NET_VENDOR_CAVIUM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
+# CONFIG_NET_VENDOR_DAVICOM is not set
+# CONFIG_DNET is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_ENGLEDER is not set
+# CONFIG_NET_VENDOR_EZCHIP is not set
+# CONFIG_NET_VENDOR_FUNGIBLE is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
+# CONFIG_NET_VENDOR_HISILICON is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_JME is not set
+# CONFIG_NET_VENDOR_ADI is not set
+# CONFIG_NET_VENDOR_LITEX is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_META is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_MICROSEMI is not set
+# CONFIG_NET_VENDOR_MICROSOFT is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_FEALNX is not set
+# CONFIG_NET_VENDOR_NI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_ETHOC is not set
+# CONFIG_OA_TC6 is not set
+# CONFIG_NET_VENDOR_PACKET_ENGINES is not set
+CONFIG_NET_VENDOR_PENSANDO=y
+# CONFIG_IONIC is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_QUALCOMM is not set
+# CONFIG_NET_VENDOR_RDC is not set
+CONFIG_NET_VENDOR_REALTEK=y
+# CONFIG_8139CP is not set
+# CONFIG_8139TOO is not set
+CONFIG_R8169=m
+# CONFIG_RTASE is not set
+# CONFIG_NET_VENDOR_RENESAS is not set
+# CONFIG_NET_VENDOR_ROCKER is not set
+# CONFIG_NET_VENDOR_SAMSUNG is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_SOCIONEXT is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_SYNOPSYS is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_VERTEXCOM is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WANGXUN is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_NET_VENDOR_XILINX is not set
+# CONFIG_FDDI is not set
+# CONFIG_HIPPI is not set
+CONFIG_PHYLINK=y
+CONFIG_PHYLIB=y
+CONFIG_SWPHY=y
+# CONFIG_LED_TRIGGER_PHY is not set
+CONFIG_PHYLIB_LEDS=y
+CONFIG_FIXED_PHY=y
+# CONFIG_SFP is not set
+
+#
+# MII PHY device drivers
+#
+# CONFIG_AIR_EN8811H_PHY is not set
+# CONFIG_AMD_PHY is not set
+# CONFIG_ADIN_PHY is not set
+# CONFIG_ADIN1100_PHY is not set
+CONFIG_AQUANTIA_PHY=y
+# CONFIG_AX88796B_PHY is not set
+CONFIG_BROADCOM_PHY=y
+# CONFIG_BCM54140_PHY is not set
+CONFIG_BCM7XXX_PHY=y
+# CONFIG_BCM84881_PHY is not set
+# CONFIG_BCM87XX_PHY is not set
+CONFIG_BCM_NET_PHYLIB=y
+# CONFIG_CICADA_PHY is not set
+# CONFIG_CORTINA_PHY is not set
+# CONFIG_DAVICOM_PHY is not set
+# CONFIG_ICPLUS_PHY is not set
+# CONFIG_LXT_PHY is not set
+# CONFIG_INTEL_XWAY_PHY is not set
+# CONFIG_LSI_ET1011C_PHY is not set
+# CONFIG_MARVELL_PHY is not set
+# CONFIG_MARVELL_10G_PHY is not set
+# CONFIG_MARVELL_88Q2XXX_PHY is not set
+# CONFIG_MARVELL_88X2222_PHY is not set
+# CONFIG_MAXLINEAR_GPHY is not set
+# CONFIG_MEDIATEK_GE_PHY is not set
+# CONFIG_MICREL_PHY is not set
+# CONFIG_MICROCHIP_T1S_PHY is not set
+# CONFIG_MICROCHIP_PHY is not set
+# CONFIG_MICROCHIP_T1_PHY is not set
+# CONFIG_MICROSEMI_PHY is not set
+# CONFIG_MOTORCOMM_PHY is not set
+# CONFIG_NATIONAL_PHY is not set
+# CONFIG_NXP_CBTX_PHY is not set
+# CONFIG_NXP_C45_TJA11XX_PHY is not set
+# CONFIG_NXP_TJA11XX_PHY is not set
+# CONFIG_NCN26000_PHY is not set
+# CONFIG_QCA83XX_PHY is not set
+# CONFIG_QCA808X_PHY is not set
+# CONFIG_QCA807X_PHY is not set
+# CONFIG_QCA8084_PHY is not set
+# CONFIG_QSEMI_PHY is not set
+CONFIG_REALTEK_PHY=y
+# CONFIG_RENESAS_PHY is not set
+# CONFIG_ROCKCHIP_PHY is not set
+# CONFIG_SMSC_PHY is not set
+# CONFIG_STE10XP is not set
+# CONFIG_TERANETICS_PHY is not set
+# CONFIG_DP83822_PHY is not set
+# CONFIG_DP83TC811_PHY is not set
+# CONFIG_DP83848_PHY is not set
+# CONFIG_DP83867_PHY is not set
+# CONFIG_DP83869_PHY is not set
+# CONFIG_DP83TD510_PHY is not set
+# CONFIG_DP83TG720_PHY is not set
+# CONFIG_VITESSE_PHY is not set
+# CONFIG_XILINX_GMII2RGMII is not set
+# CONFIG_MICREL_KS8995MA is not set
+CONFIG_MDIO_DEVICE=y
+CONFIG_MDIO_BUS=y
+CONFIG_FWNODE_MDIO=y
+CONFIG_OF_MDIO=y
+CONFIG_MDIO_DEVRES=y
+# CONFIG_MDIO_BITBANG is not set
+# CONFIG_MDIO_BCM_UNIMAC is not set
+# CONFIG_MDIO_HISI_FEMAC is not set
+# CONFIG_MDIO_MVUSB is not set
+# CONFIG_MDIO_MSCC_MIIM is not set
+# CONFIG_MDIO_OCTEON is not set
+# CONFIG_MDIO_IPQ4019 is not set
+# CONFIG_MDIO_IPQ8064 is not set
+# CONFIG_MDIO_THUNDER is not set
+
+#
+# MDIO Multiplexers
+#
+# CONFIG_MDIO_BUS_MUX_GPIO is not set
+# CONFIG_MDIO_BUS_MUX_MULTIPLEXER is not set
+# CONFIG_MDIO_BUS_MUX_MMIOREG is not set
+
+#
+# PCS device drivers
+#
+# CONFIG_PCS_XPCS is not set
+# end of PCS device drivers
+
+CONFIG_PPP=y
+# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_DEFLATE is not set
+# CONFIG_PPP_FILTER is not set
+CONFIG_PPP_MPPE=y
+# CONFIG_PPP_MULTILINK is not set
+CONFIG_PPPOE=y
+# CONFIG_PPPOE_HASH_BITS_1 is not set
+# CONFIG_PPPOE_HASH_BITS_2 is not set
+CONFIG_PPPOE_HASH_BITS_4=y
+# CONFIG_PPPOE_HASH_BITS_8 is not set
+CONFIG_PPPOE_HASH_BITS=4
+CONFIG_PPTP=y
+# CONFIG_PPP_ASYNC is not set
+# CONFIG_PPP_SYNC_TTY is not set
+# CONFIG_SLIP is not set
+CONFIG_SLHC=y
+# CONFIG_USB_NET_DRIVERS is not set
+CONFIG_WLAN=y
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH_COMMON=y
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH_DEBUG=y
+CONFIG_ATH_REG_IGNORE=y
+CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS=y
+CONFIG_ATH_REG_DYNAMIC_USER_CERT_TESTING=y
+# CONFIG_ATH5K is not set
+# CONFIG_ATH5K_PCI is not set
+CONFIG_ATH9K_HW=m
+CONFIG_ATH9K_COMMON=m
+CONFIG_ATH9K_COMMON_DEBUG=y
+CONFIG_ATH9K_BTCOEX_SUPPORT=y
+CONFIG_ATH9K=m
+CONFIG_ATH9K_PCI=y
+# CONFIG_ATH9K_AHB is not set
+CONFIG_ATH9K_DEBUGFS=y
+CONFIG_ATH9K_STATION_STATISTICS=y
+CONFIG_ATH9K_TX99=y
+# CONFIG_ATH9K_DFS_CERTIFIED is not set
+# CONFIG_ATH9K_DYNACK is not set
+# CONFIG_ATH9K_CHANNEL_CONTEXT is not set
+# CONFIG_ATH9K_PCOEM is not set
+# CONFIG_ATH9K_PCI_NO_EEPROM is not set
+# CONFIG_ATH9K_HTC is not set
+# CONFIG_ATH9K_HWRNG is not set
+# CONFIG_ATH9K_COMMON_SPECTRAL is not set
+# CONFIG_CARL9170 is not set
+# CONFIG_ATH6KL is not set
+# CONFIG_AR5523 is not set
+# CONFIG_WIL6210 is not set
+CONFIG_ATH10K=y
+CONFIG_ATH10K_CE=y
+CONFIG_ATH10K_PCI=m
+# CONFIG_ATH10K_AHB is not set
+# CONFIG_ATH10K_SDIO is not set
+# CONFIG_ATH10K_USB is not set
+CONFIG_ATH10K_DEBUG=y
+CONFIG_ATH10K_DEBUGFS=y
+CONFIG_ATH10K_LEDS=y
+# CONFIG_ATH10K_SPECTRAL is not set
+CONFIG_ATH10K_DFS_CERTIFIED=y
+# CONFIG_WCN36XX is not set
+CONFIG_ATH11K=y
+CONFIG_ATH11K_PCI=m
+CONFIG_ATH11K_DEBUG=y
+CONFIG_ATH11K_DEBUGFS=y
+CONFIG_ATH11K_SMALL_DP_RINGS=y
+# CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION is not set
+CONFIG_ATH12K=m
+CONFIG_ATH12K_DEBUG=y
+CONFIG_ATH12K_DEBUGFS=y
+# CONFIG_ATH12K_COREDUMP is not set
+# CONFIG_ATH12K_MEM_PROFILE_DEFAULT is not set
+CONFIG_ATH12K_MEM_PROFILE_512M=y
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_MICROCHIP is not set
+# CONFIG_WLAN_VENDOR_PURELIFI is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_SILABS is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+# CONFIG_MAC80211_HWSIM is not set
+# CONFIG_VIRT_WIFI is not set
+# CONFIG_WAN is not set
+
+#
+# Wireless WAN
+#
+# CONFIG_WWAN is not set
+# end of Wireless WAN
+
+# CONFIG_VMXNET3 is not set
+# CONFIG_NETDEVSIM is not set
+# CONFIG_NET_FAILOVER is not set
+# CONFIG_ISDN is not set
+
+#
+# Input device support
+#
+CONFIG_INPUT=y
+CONFIG_INPUT_LEDS=y
+# CONFIG_INPUT_FF_MEMLESS is not set
+# CONFIG_INPUT_SPARSEKMAP is not set
+# CONFIG_INPUT_MATRIXKMAP is not set
+
+#
+# Userland interfaces
+#
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_JOYDEV is not set
+CONFIG_INPUT_EVDEV=y
+# CONFIG_INPUT_EVBUG is not set
+
+#
+# Input Device Drivers
+#
+CONFIG_INPUT_KEYBOARD=y
+# CONFIG_KEYBOARD_ADP5588 is not set
+# CONFIG_KEYBOARD_ADP5589 is not set
+# CONFIG_KEYBOARD_ATKBD is not set
+# CONFIG_KEYBOARD_QT1050 is not set
+# CONFIG_KEYBOARD_QT1070 is not set
+# CONFIG_KEYBOARD_QT2160 is not set
+# CONFIG_KEYBOARD_DLINK_DIR685 is not set
+# CONFIG_KEYBOARD_LKKBD is not set
+CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_GPIO_POLLED=y
+# CONFIG_KEYBOARD_TCA6416 is not set
+# CONFIG_KEYBOARD_TCA8418 is not set
+# CONFIG_KEYBOARD_MATRIX is not set
+# CONFIG_KEYBOARD_LM8323 is not set
+# CONFIG_KEYBOARD_LM8333 is not set
+# CONFIG_KEYBOARD_MAX7359 is not set
+# CONFIG_KEYBOARD_MPR121 is not set
+# CONFIG_KEYBOARD_NEWTON is not set
+# CONFIG_KEYBOARD_OPENCORES is not set
+# CONFIG_KEYBOARD_SAMSUNG is not set
+# CONFIG_KEYBOARD_STOWAWAY is not set
+# CONFIG_KEYBOARD_SUNKBD is not set
+# CONFIG_KEYBOARD_OMAP4 is not set
+# CONFIG_KEYBOARD_TM2_TOUCHKEY is not set
+# CONFIG_KEYBOARD_XTKBD is not set
+# CONFIG_KEYBOARD_CAP11XX is not set
+# CONFIG_KEYBOARD_BCM is not set
+# CONFIG_KEYBOARD_CYPRESS_SF is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_INPUT_JOYSTICK is not set
+# CONFIG_INPUT_TABLET is not set
+# CONFIG_INPUT_TOUCHSCREEN is not set
+CONFIG_INPUT_MISC=y
+# CONFIG_INPUT_AD714X is not set
+# CONFIG_INPUT_ATMEL_CAPTOUCH is not set
+# CONFIG_INPUT_BMA150 is not set
+# CONFIG_INPUT_E3X0_BUTTON is not set
+# CONFIG_INPUT_MMA8450 is not set
+# CONFIG_INPUT_GPIO_BEEPER is not set
+# CONFIG_INPUT_GPIO_DECODER is not set
+# CONFIG_INPUT_GPIO_VIBRA is not set
+# CONFIG_INPUT_ATI_REMOTE2 is not set
+# CONFIG_INPUT_KEYSPAN_REMOTE is not set
+# CONFIG_INPUT_KXTJ9 is not set
+# CONFIG_INPUT_POWERMATE is not set
+# CONFIG_INPUT_YEALINK is not set
+# CONFIG_INPUT_CM109 is not set
+# CONFIG_INPUT_UINPUT is not set
+# CONFIG_INPUT_PCF8574 is not set
+# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set
+# CONFIG_INPUT_DA7280_HAPTICS is not set
+# CONFIG_INPUT_ADXL34X is not set
+# CONFIG_INPUT_IMS_PCU is not set
+# CONFIG_INPUT_IQS269A is not set
+# CONFIG_INPUT_IQS626A is not set
+# CONFIG_INPUT_IQS7222 is not set
+# CONFIG_INPUT_CMA3000 is not set
+# CONFIG_INPUT_DRV260X_HAPTICS is not set
+# CONFIG_INPUT_DRV2665_HAPTICS is not set
+# CONFIG_INPUT_DRV2667_HAPTICS is not set
+# CONFIG_INPUT_SMSC_CAP1066 is not set
+# CONFIG_RMI4_CORE is not set
+
+#
+# Hardware I/O ports
+#
+# CONFIG_SERIO is not set
+# CONFIG_GAMEPORT is not set
+# end of Hardware I/O ports
+# end of Input device support
+
+#
+# Character devices
+#
+CONFIG_TTY=y
+CONFIG_VT=y
+CONFIG_CONSOLE_TRANSLATIONS=y
+CONFIG_VT_CONSOLE=y
+CONFIG_VT_HW_CONSOLE_BINDING=y
+CONFIG_UNIX98_PTYS=y
+CONFIG_LEGACY_PTYS=y
+CONFIG_LEGACY_PTY_COUNT=16
+# CONFIG_LEGACY_TIOCSTI is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+
+#
+# Serial drivers
+#
+CONFIG_SERIAL_EARLYCON=y
+# CONFIG_SERIAL_8250 is not set
+
+#
+# Non-8250 serial port support
+#
+# CONFIG_SERIAL_AMBA_PL010 is not set
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+# CONFIG_SERIAL_EARLYCON_SEMIHOST is not set
+# CONFIG_SERIAL_MAX3100 is not set
+# CONFIG_SERIAL_MAX310X is not set
+# CONFIG_SERIAL_UARTLITE is not set
+CONFIG_SERIAL_CORE=y
+CONFIG_SERIAL_CORE_CONSOLE=y
+# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_SIFIVE is not set
+# CONFIG_SERIAL_SCCNXP is not set
+# CONFIG_SERIAL_SC16IS7XX is not set
+# CONFIG_SERIAL_BCM63XX is not set
+# CONFIG_SERIAL_ALTERA_JTAGUART is not set
+# CONFIG_SERIAL_ALTERA_UART is not set
+# CONFIG_SERIAL_XILINX_PS_UART is not set
+# CONFIG_SERIAL_ARC is not set
+# CONFIG_SERIAL_RP2 is not set
+# CONFIG_SERIAL_FSL_LPUART is not set
+# CONFIG_SERIAL_FSL_LINFLEXUART is not set
+# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set
+# CONFIG_SERIAL_SPRD is not set
+# end of Serial drivers
+
+# CONFIG_SERIAL_NONSTANDARD is not set
+# CONFIG_N_GSM is not set
+# CONFIG_NOZOMI is not set
+# CONFIG_NULL_TTY is not set
+# CONFIG_HVC_DCC is not set
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
+# CONFIG_TTY_PRINTK is not set
+# CONFIG_VIRTIO_CONSOLE is not set
+# CONFIG_IPMI_HANDLER is not set
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_TIMERIOMEM is not set
+# CONFIG_HW_RANDOM_BA431 is not set
+# CONFIG_HW_RANDOM_BCM2835 is not set
+# CONFIG_HW_RANDOM_BCM63XX is not set
+# CONFIG_HW_RANDOM_IPROC_RNG200 is not set
+# CONFIG_HW_RANDOM_OPTEE is not set
+# CONFIG_HW_RANDOM_CCTRNG is not set
+# CONFIG_HW_RANDOM_XIPHERA is not set
+# CONFIG_HW_RANDOM_ARM_SMCCC_TRNG is not set
+# CONFIG_HW_RANDOM_CN10K is not set
+# CONFIG_HW_RANDOM_CLP800 is not set
+# CONFIG_APPLICOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPHYSMEM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_TCG_TPM is not set
+# CONFIG_XILLYBUS is not set
+# CONFIG_XILLYUSB is not set
+# end of Character devices
+
+#
+# Diag Support
+#
+# CONFIG_DIAG_CHAR is not set
+# end of Diag Support
+
+#
+# DIAG traffic over USB
+#
+# end of DIAG traffic over USB
+
+#
+# DIAG traffic over QRTR
+#
+# end of DIAG traffic over QRTR
+
+#
+# HSIC/SMUX support for DIAG
+#
+# end of HSIC/SMUX support for DIAG
+
+#
+# I2C support
+#
+CONFIG_I2C=y
+CONFIG_I2C_BOARDINFO=y
+CONFIG_I2C_CHARDEV=y
+CONFIG_I2C_MUX=y
+
+#
+# Multiplexer I2C Chip support
+#
+# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set
+CONFIG_I2C_MUX_GPIO=y
+# CONFIG_I2C_MUX_GPMUX is not set
+# CONFIG_I2C_MUX_LTC4306 is not set
+# CONFIG_I2C_MUX_PCA9541 is not set
+# CONFIG_I2C_MUX_PCA954x is not set
+# CONFIG_I2C_MUX_PINCTRL is not set
+# CONFIG_I2C_MUX_REG is not set
+# CONFIG_I2C_DEMUX_PINCTRL is not set
+# CONFIG_I2C_MUX_MLXCPLD is not set
+# end of Multiplexer I2C Chip support
+
+CONFIG_I2C_HELPER_AUTO=y
+CONFIG_I2C_ALGOBIT=y
+
+#
+# I2C Hardware Bus support
+#
+
+#
+# PC SMBus host controller drivers
+#
+# CONFIG_I2C_ALI1535 is not set
+# CONFIG_I2C_ALI1563 is not set
+# CONFIG_I2C_ALI15X3 is not set
+# CONFIG_I2C_AMD756 is not set
+# CONFIG_I2C_AMD8111 is not set
+# CONFIG_I2C_I801 is not set
+# CONFIG_I2C_ISCH is not set
+# CONFIG_I2C_PIIX4 is not set
+# CONFIG_I2C_NFORCE2 is not set
+# CONFIG_I2C_NVIDIA_GPU is not set
+# CONFIG_I2C_SIS5595 is not set
+# CONFIG_I2C_SIS630 is not set
+# CONFIG_I2C_SIS96X is not set
+# CONFIG_I2C_VIA is not set
+# CONFIG_I2C_VIAPRO is not set
+
+#
+# I2C system bus drivers (mostly embedded / system-on-chip)
+#
+CONFIG_I2C_BRCMSTB=y
+# CONFIG_I2C_CADENCE is not set
+# CONFIG_I2C_CBUS_GPIO is not set
+# CONFIG_I2C_DESIGNWARE_CORE is not set
+# CONFIG_I2C_EMEV2 is not set
+CONFIG_I2C_GPIO=y
+# CONFIG_I2C_GPIO_FAULT_INJECTOR is not set
+# CONFIG_I2C_HISI is not set
+# CONFIG_I2C_NOMADIK is not set
+# CONFIG_I2C_OCORES is not set
+# CONFIG_I2C_PCA_PLATFORM is not set
+# CONFIG_I2C_RK3X is not set
+# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_THUNDERX is not set
+# CONFIG_I2C_XILINX is not set
+
+#
+# External I2C/SMBus adapter drivers
+#
+# CONFIG_I2C_DIOLAN_U2C is not set
+# CONFIG_I2C_CP2615 is not set
+# CONFIG_I2C_PCI1XXXX is not set
+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set
+# CONFIG_I2C_TAOS_EVM is not set
+# CONFIG_I2C_TINY_USB is not set
+
+#
+# Other I2C/SMBus bus drivers
+#
+# CONFIG_I2C_VIRTIO is not set
+# CONFIG_I2C_FBXGWR_PMU is not set
+# end of I2C Hardware Bus support
+
+# CONFIG_I2C_STUB is not set
+# CONFIG_I2C_SLAVE is not set
+# CONFIG_I2C_DEBUG_CORE is not set
+# CONFIG_I2C_DEBUG_ALGO is not set
+# CONFIG_I2C_DEBUG_BUS is not set
+# end of I2C support
+
+# CONFIG_I3C is not set
+CONFIG_SPI=y
+# CONFIG_SPI_DEBUG is not set
+CONFIG_SPI_MASTER=y
+CONFIG_SPI_MEM=y
+
+#
+# SPI Master Controller Drivers
+#
+# CONFIG_SPI_ALTERA is not set
+# CONFIG_SPI_AXI_SPI_ENGINE is not set
+CONFIG_SPI_BCM63XX_HSSPI=y
+# CONFIG_SPI_BCM_QSPI is not set
+# CONFIG_SPI_BCMBCA_HSSPI is not set
+# CONFIG_SPI_BITBANG is not set
+# CONFIG_SPI_CADENCE is not set
+# CONFIG_SPI_CADENCE_QUADSPI is not set
+# CONFIG_SPI_CADENCE_XSPI is not set
+# CONFIG_SPI_CH341 is not set
+# CONFIG_SPI_DESIGNWARE is not set
+# CONFIG_SPI_GPIO is not set
+# CONFIG_SPI_FSL_SPI is not set
+# CONFIG_SPI_MICROCHIP_CORE is not set
+# CONFIG_SPI_MICROCHIP_CORE_QSPI is not set
+# CONFIG_SPI_OC_TINY is not set
+# CONFIG_SPI_PCI1XXXX is not set
+# CONFIG_SPI_PL022 is not set
+# CONFIG_SPI_SC18IS602 is not set
+# CONFIG_SPI_SIFIVE is not set
+# CONFIG_SPI_SN_F_OSPI is not set
+# CONFIG_SPI_MXIC is not set
+# CONFIG_SPI_THUNDERX is not set
+# CONFIG_SPI_XCOMM is not set
+# CONFIG_SPI_XILINX is not set
+# CONFIG_SPI_ZYNQMP_GQSPI is not set
+# CONFIG_SPI_AMD is not set
+
+#
+# SPI Multiplexer support
+#
+# CONFIG_SPI_MUX is not set
+
+#
+# SPI Protocol Masters
+#
+# CONFIG_SPI_SPIDEV is not set
+# CONFIG_SPI_LOOPBACK_TEST is not set
+# CONFIG_SPI_TLE62X0 is not set
+# CONFIG_SPI_SLAVE is not set
+CONFIG_SPI_DYNAMIC=y
+# CONFIG_SPMI is not set
+# CONFIG_HSI is not set
+# CONFIG_PPS is not set
+
+#
+# PTP clock support
+#
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_PTP_1588_CLOCK_OPTIONAL=y
+
+#
+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks.
+#
+# end of PTP clock support
+
+CONFIG_PINCTRL=y
+CONFIG_PINMUX=y
+CONFIG_PINCONF=y
+CONFIG_GENERIC_PINCONF=y
+# CONFIG_DEBUG_PINCTRL is not set
+# CONFIG_PINCTRL_AW9523 is not set
+# CONFIG_PINCTRL_CY8C95X0 is not set
+# CONFIG_PINCTRL_MCP23S08 is not set
+# CONFIG_PINCTRL_MICROCHIP_SGPIO is not set
+# CONFIG_PINCTRL_OCELOT is not set
+# CONFIG_PINCTRL_SINGLE is not set
+# CONFIG_PINCTRL_STMFX is not set
+# CONFIG_PINCTRL_SX150X is not set
+# CONFIG_PINCTRL_BCM4908 is not set
+CONFIG_PINCTRL_BCM63138=y
+
+#
+# Renesas pinctrl drivers
+#
+# end of Renesas pinctrl drivers
+
+CONFIG_GPIOLIB=y
+CONFIG_GPIOLIB_FASTPATH_LIMIT=512
+CONFIG_OF_GPIO=y
+CONFIG_GPIOLIB_IRQCHIP=y
+# CONFIG_DEBUG_GPIO is not set
+CONFIG_GPIO_SYSFS=y
+CONFIG_GPIO_CDEV=y
+# CONFIG_GPIO_CDEV_V1 is not set
+
+#
+# Memory mapped GPIO drivers
+#
+# CONFIG_GPIO_74XX_MMIO is not set
+# CONFIG_GPIO_ALTERA is not set
+# CONFIG_GPIO_CADENCE is not set
+# CONFIG_GPIO_DWAPB is not set
+# CONFIG_GPIO_FTGPIO010 is not set
+# CONFIG_GPIO_GENERIC_PLATFORM is not set
+# CONFIG_GPIO_GRGPIO is not set
+# CONFIG_GPIO_HISI is not set
+# CONFIG_GPIO_HLWD is not set
+# CONFIG_GPIO_LOGICVC is not set
+# CONFIG_GPIO_MB86S7X is not set
+# CONFIG_GPIO_PL061 is not set
+# CONFIG_GPIO_POLARFIRE_SOC is not set
+# CONFIG_GPIO_SIFIVE is not set
+# CONFIG_GPIO_SYSCON is not set
+# CONFIG_GPIO_XGENE is not set
+# CONFIG_GPIO_XILINX is not set
+# CONFIG_GPIO_AMD_FCH is not set
+# end of Memory mapped GPIO drivers
+
+#
+# I2C GPIO expanders
+#
+# CONFIG_GPIO_ADNP is not set
+CONFIG_GPIO_FBXGWR_PMU=y
+# CONFIG_GPIO_FXL6408 is not set
+# CONFIG_GPIO_DS4520 is not set
+# CONFIG_GPIO_GW_PLD is not set
+# CONFIG_GPIO_MAX7300 is not set
+# CONFIG_GPIO_MAX732X is not set
+CONFIG_GPIO_PCA953X=y
+# CONFIG_GPIO_PCA953X_IRQ is not set
+# CONFIG_GPIO_PCA9570 is not set
+# CONFIG_GPIO_PCF857X is not set
+# CONFIG_GPIO_TPIC2810 is not set
+# end of I2C GPIO expanders
+
+#
+# MFD GPIO expanders
+#
+# end of MFD GPIO expanders
+
+#
+# PCI GPIO expanders
+#
+# CONFIG_GPIO_BT8XX is not set
+# CONFIG_GPIO_PCI_IDIO_16 is not set
+# CONFIG_GPIO_PCIE_IDIO_24 is not set
+# CONFIG_GPIO_RDC321X is not set
+# end of PCI GPIO expanders
+
+#
+# SPI GPIO expanders
+#
+# CONFIG_GPIO_74X164 is not set
+# CONFIG_GPIO_MAX3191X is not set
+# CONFIG_GPIO_MAX7301 is not set
+# CONFIG_GPIO_MC33880 is not set
+# CONFIG_GPIO_PISOSR is not set
+# CONFIG_GPIO_XRA1403 is not set
+# end of SPI GPIO expanders
+
+#
+# USB GPIO expanders
+#
+# CONFIG_GPIO_MPSSE is not set
+# end of USB GPIO expanders
+
+#
+# Virtual GPIO drivers
+#
+# CONFIG_GPIO_AGGREGATOR is not set
+# CONFIG_GPIO_LATCH is not set
+# CONFIG_GPIO_MOCKUP is not set
+# CONFIG_GPIO_SIM is not set
+# CONFIG_GPIOLIB_NONEXCLUSIVE_TEST is not set
+# end of Virtual GPIO drivers
+
+#
+# GPIO Debugging utilities
+#
+# CONFIG_GPIO_VIRTUSER is not set
+# end of GPIO Debugging utilities
+
+CONFIG_FREEBOX_GPIO=y
+CONFIG_FREEBOX_GPIO_DT=y
+# CONFIG_FREEBOX_JTAG is not set
+# CONFIG_W1 is not set
+CONFIG_POWER_RESET=y
+# CONFIG_POWER_RESET_GPIO is not set
+# CONFIG_POWER_RESET_GPIO_RESTART is not set
+# CONFIG_POWER_RESET_LTC2952 is not set
+# CONFIG_POWER_RESET_RESTART is not set
+# CONFIG_POWER_RESET_XGENE is not set
+CONFIG_POWER_RESET_SYSCON=y
+CONFIG_POWER_RESET_SYSCON_POWEROFF=y
+# CONFIG_SYSCON_REBOOT_MODE is not set
+# CONFIG_NVMEM_REBOOT_MODE is not set
+# CONFIG_POWER_RESET_SYSCON_REASON is not set
+# CONFIG_POWER_SEQUENCING is not set
+CONFIG_POWER_SUPPLY=y
+# CONFIG_POWER_SUPPLY_DEBUG is not set
+# CONFIG_POWER_SUPPLY_HWMON is not set
+# CONFIG_IP5XXX_POWER is not set
+# CONFIG_TEST_POWER is not set
+# CONFIG_CHARGER_ADP5061 is not set
+# CONFIG_BATTERY_CW2015 is not set
+# CONFIG_BATTERY_DS2780 is not set
+# CONFIG_BATTERY_DS2781 is not set
+# CONFIG_BATTERY_DS2782 is not set
+# CONFIG_BATTERY_SAMSUNG_SDI is not set
+# CONFIG_BATTERY_SBS is not set
+# CONFIG_CHARGER_SBS is not set
+# CONFIG_MANAGER_SBS is not set
+# CONFIG_BATTERY_BQ27XXX is not set
+# CONFIG_BATTERY_MAX17042 is not set
+# CONFIG_BATTERY_MAX1720X is not set
+# CONFIG_CHARGER_MAX8903 is not set
+# CONFIG_CHARGER_LP8727 is not set
+# CONFIG_CHARGER_GPIO is not set
+# CONFIG_CHARGER_LT3651 is not set
+# CONFIG_CHARGER_LTC4162L is not set
+# CONFIG_CHARGER_DETECTOR_MAX14656 is not set
+# CONFIG_CHARGER_MAX77976 is not set
+# CONFIG_CHARGER_BQ2415X is not set
+# CONFIG_CHARGER_BQ24257 is not set
+# CONFIG_CHARGER_BQ24735 is not set
+# CONFIG_CHARGER_BQ2515X is not set
+# CONFIG_CHARGER_BQ25890 is not set
+# CONFIG_CHARGER_BQ25980 is not set
+# CONFIG_CHARGER_BQ256XX is not set
+# CONFIG_BATTERY_GAUGE_LTC2941 is not set
+# CONFIG_BATTERY_GOLDFISH is not set
+# CONFIG_BATTERY_RT5033 is not set
+# CONFIG_CHARGER_RT9455 is not set
+# CONFIG_CHARGER_BD99954 is not set
+# CONFIG_BATTERY_UG3105 is not set
+# CONFIG_FUEL_GAUGE_MM8013 is not set
+CONFIG_HWMON=y
+CONFIG_HWMON_VID=y
+# CONFIG_HWMON_DEBUG_CHIP is not set
+
+#
+# Native drivers
+#
+# CONFIG_SENSORS_AD7314 is not set
+# CONFIG_SENSORS_AD7414 is not set
+# CONFIG_SENSORS_AD7418 is not set
+# CONFIG_SENSORS_ADM1025 is not set
+# CONFIG_SENSORS_ADM1026 is not set
+# CONFIG_SENSORS_ADM1029 is not set
+# CONFIG_SENSORS_ADM1031 is not set
+# CONFIG_SENSORS_ADM1177 is not set
+# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7310 is not set
+# CONFIG_SENSORS_ADT7410 is not set
+# CONFIG_SENSORS_ADT7411 is not set
+# CONFIG_SENSORS_ADT7462 is not set
+# CONFIG_SENSORS_ADT7470 is not set
+CONFIG_SENSORS_ADT7475=y
+# CONFIG_SENSORS_AHT10 is not set
+# CONFIG_SENSORS_AS370 is not set
+# CONFIG_SENSORS_ASC7621 is not set
+# CONFIG_SENSORS_ASUS_ROG_RYUJIN is not set
+# CONFIG_SENSORS_AXI_FAN_CONTROL is not set
+CONFIG_SENSORS_FBXGWR_PMU=y
+# CONFIG_SENSORS_PERICOM_PCIE is not set
+# CONFIG_SENSORS_ATXP1 is not set
+# CONFIG_SENSORS_CHIPCAP2 is not set
+# CONFIG_SENSORS_CORSAIR_CPRO is not set
+# CONFIG_SENSORS_CORSAIR_PSU is not set
+# CONFIG_SENSORS_DS620 is not set
+# CONFIG_SENSORS_DS1621 is not set
+# CONFIG_SENSORS_I5K_AMB is not set
+# CONFIG_SENSORS_F71805F is not set
+# CONFIG_SENSORS_F71882FG is not set
+# CONFIG_SENSORS_F75375S is not set
+# CONFIG_SENSORS_GL518SM is not set
+# CONFIG_SENSORS_GL520SM is not set
+# CONFIG_SENSORS_G760A is not set
+# CONFIG_SENSORS_G762 is not set
+# CONFIG_SENSORS_GPIO_FAN is not set
+# CONFIG_SENSORS_HIH6130 is not set
+# CONFIG_SENSORS_HS3001 is not set
+# CONFIG_SENSORS_ISL28022 is not set
+# CONFIG_SENSORS_IT87 is not set
+# CONFIG_SENSORS_JC42 is not set
+# CONFIG_SENSORS_POWERZ is not set
+# CONFIG_SENSORS_POWR1220 is not set
+# CONFIG_SENSORS_LINEAGE is not set
+# CONFIG_SENSORS_LTC2945 is not set
+# CONFIG_SENSORS_LTC2947_I2C is not set
+# CONFIG_SENSORS_LTC2947_SPI is not set
+# CONFIG_SENSORS_LTC2990 is not set
+# CONFIG_SENSORS_LTC2991 is not set
+# CONFIG_SENSORS_LTC2992 is not set
+# CONFIG_SENSORS_LTC4151 is not set
+# CONFIG_SENSORS_LTC4215 is not set
+# CONFIG_SENSORS_LTC4222 is not set
+# CONFIG_SENSORS_LTC4245 is not set
+# CONFIG_SENSORS_LTC4260 is not set
+# CONFIG_SENSORS_LTC4261 is not set
+# CONFIG_SENSORS_LTC4282 is not set
+# CONFIG_SENSORS_MAX1111 is not set
+# CONFIG_SENSORS_MAX127 is not set
+# CONFIG_SENSORS_MAX16065 is not set
+# CONFIG_SENSORS_MAX1619 is not set
+# CONFIG_SENSORS_MAX1668 is not set
+# CONFIG_SENSORS_MAX197 is not set
+# CONFIG_SENSORS_MAX31722 is not set
+# CONFIG_SENSORS_MAX31730 is not set
+# CONFIG_SENSORS_MAX31760 is not set
+# CONFIG_MAX31827 is not set
+# CONFIG_SENSORS_MAX6620 is not set
+# CONFIG_SENSORS_MAX6621 is not set
+# CONFIG_SENSORS_MAX6639 is not set
+# CONFIG_SENSORS_MAX6650 is not set
+# CONFIG_SENSORS_MAX6697 is not set
+# CONFIG_SENSORS_MAX31790 is not set
+# CONFIG_SENSORS_MC34VR500 is not set
+# CONFIG_SENSORS_MCP3021 is not set
+# CONFIG_SENSORS_TC654 is not set
+# CONFIG_SENSORS_TPS23861 is not set
+# CONFIG_SENSORS_MR75203 is not set
+# CONFIG_SENSORS_ADCXX is not set
+# CONFIG_SENSORS_LM63 is not set
+# CONFIG_SENSORS_LM70 is not set
+# CONFIG_SENSORS_LM73 is not set
+# CONFIG_SENSORS_LM75 is not set
+# CONFIG_SENSORS_LM77 is not set
+# CONFIG_SENSORS_LM78 is not set
+# CONFIG_SENSORS_LM80 is not set
+# CONFIG_SENSORS_LM83 is not set
+# CONFIG_SENSORS_LM85 is not set
+# CONFIG_SENSORS_LM87 is not set
+# CONFIG_SENSORS_LM90 is not set
+# CONFIG_SENSORS_LM92 is not set
+# CONFIG_SENSORS_LM93 is not set
+# CONFIG_SENSORS_LM95234 is not set
+# CONFIG_SENSORS_LM95241 is not set
+# CONFIG_SENSORS_LM95245 is not set
+# CONFIG_SENSORS_PC87360 is not set
+# CONFIG_SENSORS_PC87427 is not set
+# CONFIG_SENSORS_NCT6683 is not set
+# CONFIG_SENSORS_NCT6775 is not set
+# CONFIG_SENSORS_NCT6775_I2C is not set
+# CONFIG_SENSORS_NCT7363 is not set
+# CONFIG_SENSORS_NCT7802 is not set
+# CONFIG_SENSORS_NPCM7XX is not set
+# CONFIG_SENSORS_OCC_P8_I2C is not set
+# CONFIG_SENSORS_PCF8591 is not set
+# CONFIG_PMBUS is not set
+# CONFIG_SENSORS_PT5161L is not set
+# CONFIG_SENSORS_SBTSI is not set
+# CONFIG_SENSORS_SBRMI is not set
+# CONFIG_SENSORS_SHT15 is not set
+# CONFIG_SENSORS_SHT21 is not set
+# CONFIG_SENSORS_SHT3x is not set
+# CONFIG_SENSORS_SHT4x is not set
+# CONFIG_SENSORS_SHTC1 is not set
+# CONFIG_SENSORS_SIS5595 is not set
+# CONFIG_SENSORS_DME1737 is not set
+# CONFIG_SENSORS_EMC1403 is not set
+# CONFIG_SENSORS_EMC2103 is not set
+# CONFIG_SENSORS_EMC2305 is not set
+# CONFIG_SENSORS_EMC6W201 is not set
+# CONFIG_SENSORS_SMSC47M1 is not set
+# CONFIG_SENSORS_SMSC47M192 is not set
+# CONFIG_SENSORS_SMSC47B397 is not set
+# CONFIG_SENSORS_STTS751 is not set
+# CONFIG_SENSORS_ADC128D818 is not set
+# CONFIG_SENSORS_ADS7828 is not set
+# CONFIG_SENSORS_ADS7871 is not set
+# CONFIG_SENSORS_AMC6821 is not set
+# CONFIG_SENSORS_INA209 is not set
+# CONFIG_SENSORS_INA2XX is not set
+# CONFIG_SENSORS_INA238 is not set
+# CONFIG_SENSORS_INA3221 is not set
+# CONFIG_SENSORS_SPD5118 is not set
+# CONFIG_SENSORS_TC74 is not set
+# CONFIG_SENSORS_THMC50 is not set
+# CONFIG_SENSORS_TMP102 is not set
+# CONFIG_SENSORS_TMP103 is not set
+# CONFIG_SENSORS_TMP108 is not set
+# CONFIG_SENSORS_TMP401 is not set
+# CONFIG_SENSORS_TMP421 is not set
+# CONFIG_SENSORS_TMP464 is not set
+# CONFIG_SENSORS_TMP513 is not set
+# CONFIG_SENSORS_VIA686A is not set
+# CONFIG_SENSORS_VT1211 is not set
+# CONFIG_SENSORS_VT8231 is not set
+# CONFIG_SENSORS_W83773G is not set
+# CONFIG_SENSORS_W83781D is not set
+# CONFIG_SENSORS_W83791D is not set
+# CONFIG_SENSORS_W83792D is not set
+# CONFIG_SENSORS_W83793 is not set
+# CONFIG_SENSORS_W83795 is not set
+# CONFIG_SENSORS_W83L785TS is not set
+# CONFIG_SENSORS_W83L786NG is not set
+# CONFIG_SENSORS_W83627HF is not set
+# CONFIG_SENSORS_W83627EHF is not set
+CONFIG_SENSORS_LD6710_FBX=m
+# CONFIG_SENSORS_AP806 is not set
+CONFIG_THERMAL=y
+# CONFIG_THERMAL_NETLINK is not set
+CONFIG_THERMAL_STATISTICS=y
+# CONFIG_THERMAL_DEBUGFS is not set
+# CONFIG_THERMAL_CORE_TESTING is not set
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=0
+# CONFIG_THERMAL_HWMON is not set
+CONFIG_THERMAL_OF=y
+CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y
+# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set
+# CONFIG_THERMAL_DEFAULT_GOV_BANG_BANG is not set
+CONFIG_THERMAL_GOV_FAIR_SHARE=y
+CONFIG_THERMAL_GOV_STEP_WISE=y
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+# CONFIG_CPU_THERMAL is not set
+# CONFIG_PCIE_THERMAL is not set
+# CONFIG_THERMAL_EMULATION is not set
+# CONFIG_THERMAL_MMIO is not set
+
+#
+# Broadcom thermal drivers
+#
+# end of Broadcom thermal drivers
+
+CONFIG_FREEBOX_WATCHDOG=y
+# CONFIG_FREEBOX_WATCHDOG_CHAR is not set
+CONFIG_FREEBOX_WATCHDOG_BCM63XX_OF=y
+# CONFIG_FREEBOX_WATCHDOG_FBXGWR_PMU is not set
+# CONFIG_WATCHDOG is not set
+CONFIG_SSB_POSSIBLE=y
+# CONFIG_SSB is not set
+CONFIG_BCMA_POSSIBLE=y
+# CONFIG_BCMA is not set
+
+#
+# Multifunction device drivers
+#
+CONFIG_MFD_CORE=y
+# CONFIG_MFD_ADP5585 is not set
+# CONFIG_MFD_ACT8945A is not set
+# CONFIG_MFD_AS3711 is not set
+# CONFIG_MFD_SMPRO is not set
+# CONFIG_MFD_AS3722 is not set
+# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_AAT2870_CORE is not set
+# CONFIG_MFD_ATMEL_FLEXCOM is not set
+# CONFIG_MFD_ATMEL_HLCDC is not set
+# CONFIG_MFD_BCM590XX is not set
+# CONFIG_MFD_BD9571MWV is not set
+# CONFIG_MFD_AXP20X_I2C is not set
+# CONFIG_MFD_CS42L43_I2C is not set
+# CONFIG_MFD_MADERA is not set
+# CONFIG_MFD_MAX5970 is not set
+# CONFIG_PMIC_DA903X is not set
+# CONFIG_MFD_DA9052_SPI is not set
+# CONFIG_MFD_DA9052_I2C is not set
+# CONFIG_MFD_DA9055 is not set
+# CONFIG_MFD_DA9062 is not set
+# CONFIG_MFD_DA9063 is not set
+# CONFIG_MFD_DA9150 is not set
+# CONFIG_MFD_DLN2 is not set
+# CONFIG_MFD_GATEWORKS_GSC is not set
+# CONFIG_MFD_MC13XXX_SPI is not set
+# CONFIG_MFD_MC13XXX_I2C is not set
+# CONFIG_MFD_MP2629 is not set
+# CONFIG_MFD_HI6421_PMIC is not set
+# CONFIG_LPC_ICH is not set
+# CONFIG_LPC_SCH is not set
+# CONFIG_MFD_IQS62X is not set
+# CONFIG_MFD_JANZ_CMODIO is not set
+# CONFIG_MFD_KEMPLD is not set
+# CONFIG_MFD_88PM800 is not set
+# CONFIG_MFD_88PM805 is not set
+# CONFIG_MFD_88PM860X is not set
+# CONFIG_MFD_88PM886_PMIC is not set
+# CONFIG_MFD_MAX14577 is not set
+# CONFIG_MFD_MAX77541 is not set
+# CONFIG_MFD_MAX77620 is not set
+# CONFIG_MFD_MAX77650 is not set
+# CONFIG_MFD_MAX77686 is not set
+# CONFIG_MFD_MAX77693 is not set
+# CONFIG_MFD_MAX77714 is not set
+# CONFIG_MFD_MAX77843 is not set
+# CONFIG_MFD_MAX8907 is not set
+# CONFIG_MFD_MAX8925 is not set
+# CONFIG_MFD_MAX8997 is not set
+# CONFIG_MFD_MAX8998 is not set
+# CONFIG_MFD_MT6360 is not set
+# CONFIG_MFD_MT6370 is not set
+# CONFIG_MFD_MT6397 is not set
+# CONFIG_MFD_MENF21BMC is not set
+# CONFIG_MFD_OCELOT is not set
+# CONFIG_EZX_PCAP is not set
+# CONFIG_MFD_CPCAP is not set
+# CONFIG_MFD_VIPERBOARD is not set
+# CONFIG_MFD_NTXEC is not set
+# CONFIG_MFD_RETU is not set
+# CONFIG_MFD_PCF50633 is not set
+# CONFIG_MFD_SY7636A is not set
+# CONFIG_MFD_RDC321X is not set
+# CONFIG_MFD_RT4831 is not set
+# CONFIG_MFD_RT5033 is not set
+# CONFIG_MFD_RT5120 is not set
+# CONFIG_MFD_RC5T583 is not set
+# CONFIG_MFD_RK8XX_I2C is not set
+# CONFIG_MFD_RK8XX_SPI is not set
+# CONFIG_MFD_RN5T618 is not set
+# CONFIG_MFD_SEC_CORE is not set
+# CONFIG_MFD_SI476X_CORE is not set
+# CONFIG_MFD_SM501 is not set
+# CONFIG_MFD_SKY81452 is not set
+# CONFIG_MFD_STMPE is not set
+CONFIG_MFD_SYSCON=y
+# CONFIG_MFD_LP3943 is not set
+# CONFIG_MFD_LP8788 is not set
+# CONFIG_MFD_TI_LMU is not set
+# CONFIG_MFD_PALMAS is not set
+# CONFIG_TPS6105X is not set
+# CONFIG_TPS65010 is not set
+# CONFIG_TPS6507X is not set
+# CONFIG_MFD_TPS65086 is not set
+# CONFIG_MFD_TPS65090 is not set
+# CONFIG_MFD_TPS65217 is not set
+# CONFIG_MFD_TI_LP873X is not set
+# CONFIG_MFD_TI_LP87565 is not set
+# CONFIG_MFD_TPS65218 is not set
+# CONFIG_MFD_TPS65219 is not set
+# CONFIG_MFD_TPS6586X is not set
+# CONFIG_MFD_TPS65910 is not set
+# CONFIG_MFD_TPS65912_I2C is not set
+# CONFIG_MFD_TPS65912_SPI is not set
+# CONFIG_MFD_TPS6594_I2C is not set
+# CONFIG_MFD_TPS6594_SPI is not set
+# CONFIG_TWL4030_CORE is not set
+# CONFIG_TWL6040_CORE is not set
+# CONFIG_MFD_WL1273_CORE is not set
+# CONFIG_MFD_LM3533 is not set
+# CONFIG_MFD_TC3589X is not set
+# CONFIG_MFD_TQMX86 is not set
+# CONFIG_MFD_VX855 is not set
+# CONFIG_MFD_LOCHNAGAR is not set
+# CONFIG_MFD_ARIZONA_I2C is not set
+# CONFIG_MFD_ARIZONA_SPI is not set
+# CONFIG_MFD_WM8400 is not set
+# CONFIG_MFD_WM831X_I2C is not set
+# CONFIG_MFD_WM831X_SPI is not set
+# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
+# CONFIG_MFD_ROHM_BD718XX is not set
+# CONFIG_MFD_ROHM_BD71828 is not set
+# CONFIG_MFD_ROHM_BD957XMUF is not set
+# CONFIG_MFD_ROHM_BD96801 is not set
+# CONFIG_MFD_STPMIC1 is not set
+# CONFIG_MFD_STMFX is not set
+CONFIG_MFD_FBXGWR_PMU=y
+# CONFIG_MFD_FBXGW7R_PANEL is not set
+# CONFIG_MFD_ATC260X_I2C is not set
+# CONFIG_MFD_QCOM_PM8008 is not set
+# CONFIG_MFD_CS40L50_I2C is not set
+# CONFIG_MFD_CS40L50_SPI is not set
+# CONFIG_RAVE_SP_CORE is not set
+# CONFIG_MFD_INTEL_M10_BMC_SPI is not set
+# CONFIG_MFD_RSMU_I2C is not set
+# CONFIG_MFD_RSMU_SPI is not set
+# end of Multifunction device drivers
+
+# CONFIG_REGULATOR is not set
+CONFIG_RC_CORE=y
+# CONFIG_LIRC is not set
+# CONFIG_RC_MAP is not set
+# CONFIG_RC_DECODERS is not set
+# CONFIG_RC_DEVICES is not set
+
+#
+# CEC support
+#
+# CONFIG_MEDIA_CEC_SUPPORT is not set
+# end of CEC support
+
+CONFIG_MEDIA_SUPPORT=y
+# CONFIG_MEDIA_SUPPORT_FILTER is not set
+# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
+
+#
+# Media device types
+#
+CONFIG_MEDIA_CAMERA_SUPPORT=y
+CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
+CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
+CONFIG_MEDIA_RADIO_SUPPORT=y
+CONFIG_MEDIA_SDR_SUPPORT=y
+CONFIG_MEDIA_PLATFORM_SUPPORT=y
+CONFIG_MEDIA_TEST_SUPPORT=y
+# end of Media device types
+
+#
+# Media core support
+#
+# CONFIG_VIDEO_DEV is not set
+# CONFIG_MEDIA_CONTROLLER is not set
+CONFIG_DVB_CORE=y
+# end of Media core support
+
+#
+# Digital TV options
+#
+# CONFIG_DVB_NET is not set
+CONFIG_DVB_MAX_ADAPTERS=8
+# CONFIG_DVB_DYNAMIC_MINORS is not set
+# CONFIG_DVB_DEMUX_SECTION_LOSS_LOG is not set
+# CONFIG_DVB_ULE_DEBUG is not set
+# end of Digital TV options
+
+#
+# Media drivers
+#
+
+#
+# Media drivers
+#
+CONFIG_MEDIA_USB_SUPPORT=y
+
+#
+# Webcam devices
+#
+
+#
+# Analog TV USB devices
+#
+
+#
+# Analog/digital TV USB devices
+#
+
+#
+# Digital TV USB devices
+#
+# CONFIG_DVB_AS102 is not set
+# CONFIG_DVB_B2C2_FLEXCOP_USB is not set
+CONFIG_DVB_USB_V2=y
+# CONFIG_DVB_USB_AF9015 is not set
+CONFIG_DVB_USB_AF9035=m
+# CONFIG_DVB_USB_ANYSEE is not set
+# CONFIG_DVB_USB_AU6610 is not set
+# CONFIG_DVB_USB_AZ6007 is not set
+# CONFIG_DVB_USB_CE6230 is not set
+# CONFIG_DVB_USB_DVBSKY is not set
+# CONFIG_DVB_USB_EC168 is not set
+# CONFIG_DVB_USB_GL861 is not set
+# CONFIG_DVB_USB_LME2510 is not set
+# CONFIG_DVB_USB_MXL111SF is not set
+# CONFIG_DVB_USB_RTL28XXU is not set
+# CONFIG_DVB_USB_ZD1301 is not set
+CONFIG_DVB_USB=y
+# CONFIG_DVB_USB_DEBUG is not set
+# CONFIG_DVB_USB_A800 is not set
+# CONFIG_DVB_USB_AF9005 is not set
+# CONFIG_DVB_USB_AZ6027 is not set
+# CONFIG_DVB_USB_CINERGY_T2 is not set
+# CONFIG_DVB_USB_CXUSB is not set
+CONFIG_DVB_USB_DIB0700=m
+# CONFIG_DVB_USB_DIBUSB_MB is not set
+# CONFIG_DVB_USB_DIBUSB_MC is not set
+# CONFIG_DVB_USB_DIGITV is not set
+# CONFIG_DVB_USB_DTT200U is not set
+# CONFIG_DVB_USB_DTV5100 is not set
+# CONFIG_DVB_USB_DW2102 is not set
+# CONFIG_DVB_USB_GP8PSK is not set
+# CONFIG_DVB_USB_M920X is not set
+# CONFIG_DVB_USB_NOVA_T_USB2 is not set
+# CONFIG_DVB_USB_OPERA1 is not set
+# CONFIG_DVB_USB_PCTV452E is not set
+# CONFIG_DVB_USB_TECHNISAT_USB2 is not set
+# CONFIG_DVB_USB_TTUSB2 is not set
+# CONFIG_DVB_USB_UMT_010 is not set
+# CONFIG_DVB_USB_VP702X is not set
+# CONFIG_DVB_USB_VP7045 is not set
+# CONFIG_SMS_USB_DRV is not set
+# CONFIG_DVB_TTUSB_BUDGET is not set
+# CONFIG_DVB_TTUSB_DEC is not set
+
+#
+# Webcam, TV (analog/digital) USB devices
+#
+
+#
+# Software defined radio USB devices
+#
+# CONFIG_MEDIA_PCI_SUPPORT is not set
+# CONFIG_MEDIA_PLATFORM_DRIVERS is not set
+
+#
+# MMC/SDIO DVB adapters
+#
+# CONFIG_SMS_SDIO_DRV is not set
+# CONFIG_DVB_TEST_DRIVERS is not set
+CONFIG_CYPRESS_FIRMWARE=y
+# end of Media drivers
+
+#
+# Media ancillary drivers
+#
+CONFIG_MEDIA_ATTACH=y
+CONFIG_MEDIA_TUNER=y
+
+#
+# Customize TV tuners
+#
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+CONFIG_MEDIA_TUNER_IT913X=m
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# end of Customize TV tuners
+
+#
+# Customise DVB Frontends
+#
+
+#
+# Multistandard (satellite) frontends
+#
+CONFIG_DVB_M88DS3103=m
+# CONFIG_DVB_MXL5XX is not set
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV0910 is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_STV6111 is not set
+
+#
+# Multistandard (cable + terrestrial) frontends
+#
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_MN88472 is not set
+# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+
+#
+# DVB-S (satellite) frontends
+#
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+
+#
+# DVB-T (terrestrial) frontends
+#
+CONFIG_DVB_AF9013=m
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+CONFIG_DVB_DIB7000M=m
+CONFIG_DVB_DIB7000P=m
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_NXT6000 is not set
+CONFIG_DVB_RTL2830=m
+CONFIG_DVB_RTL2832=m
+# CONFIG_DVB_S5H1432 is not set
+CONFIG_DVB_SI2168=m
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_CXD2880 is not set
+
+#
+# DVB-C (cable) frontends
+#
+# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_VES1820 is not set
+
+#
+# ATSC (North American/Korean Terrestrial/Cable DTV) frontends
+#
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_LGDT3305 is not set
+CONFIG_DVB_LGDT3306A=m
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_MXL692 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_S5H1411 is not set
+
+#
+# ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_S921 is not set
+
+#
+# ISDB-S (satellite) & ISDB-T (terrestrial) frontends
+#
+# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_TC90522 is not set
+
+#
+# Digital terrestrial only tuners/PLL
+#
+# CONFIG_DVB_PLL is not set
+CONFIG_DVB_TUNER_DIB0070=m
+# CONFIG_DVB_TUNER_DIB0090 is not set
+
+#
+# SEC control devices for DVB-S
+#
+# CONFIG_DVB_A8293 is not set
+CONFIG_DVB_AF9033=m
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_DRX39XYJ is not set
+
+#
+# Common Interface (EN50221) controller drivers
+#
+# CONFIG_DVB_CXD2099 is not set
+# CONFIG_DVB_SP2 is not set
+# end of Customise DVB Frontends
+
+#
+# Tools to develop new frontends
+#
+# CONFIG_DVB_DUMMY_FE is not set
+# end of Media ancillary drivers
+
+#
+# Graphics support
+#
+CONFIG_VIDEO=y
+# CONFIG_AUXDISPLAY is not set
+# CONFIG_DRM is not set
+
+#
+# Frame buffer Devices
+#
+CONFIG_FB=y
+# CONFIG_FB_CIRRUS is not set
+# CONFIG_FB_PM2 is not set
+# CONFIG_FB_CYBER2000 is not set
+# CONFIG_FB_ASILIANT is not set
+# CONFIG_FB_IMSTT is not set
+# CONFIG_FB_OPENCORES is not set
+# CONFIG_FB_S1D13XXX is not set
+# CONFIG_FB_NVIDIA is not set
+# CONFIG_FB_RIVA is not set
+# CONFIG_FB_I740 is not set
+# CONFIG_FB_MATROX is not set
+# CONFIG_FB_RADEON is not set
+# CONFIG_FB_ATY128 is not set
+# CONFIG_FB_ATY is not set
+# CONFIG_FB_S3 is not set
+# CONFIG_FB_SAVAGE is not set
+# CONFIG_FB_SIS is not set
+# CONFIG_FB_NEOMAGIC is not set
+# CONFIG_FB_KYRO is not set
+# CONFIG_FB_3DFX is not set
+# CONFIG_FB_VOODOO1 is not set
+# CONFIG_FB_VT8623 is not set
+# CONFIG_FB_TRIDENT is not set
+# CONFIG_FB_ARK is not set
+# CONFIG_FB_PM3 is not set
+# CONFIG_FB_CARMINE is not set
+# CONFIG_FB_SMSCUFX is not set
+# CONFIG_FB_UDL is not set
+# CONFIG_FB_IBM_GXT4500 is not set
+# CONFIG_FB_VIRTUAL is not set
+# CONFIG_FB_METRONOME is not set
+# CONFIG_FB_MB862XX is not set
+# CONFIG_FB_SIMPLE is not set
+# CONFIG_FB_SSD1307 is not set
+# CONFIG_FB_SM712 is not set
+CONFIG_FB_SSD1320=y
+# CONFIG_FB_SSD1327 is not set
+CONFIG_FB_CORE=y
+CONFIG_FB_NOTIFY=y
+# CONFIG_FIRMWARE_EDID is not set
+CONFIG_FB_DEVICE=y
+CONFIG_FB_SYS_FILLRECT=y
+CONFIG_FB_SYS_COPYAREA=y
+CONFIG_FB_SYS_IMAGEBLIT=y
+# CONFIG_FB_FOREIGN_ENDIAN is not set
+CONFIG_FB_SYSMEM_FOPS=y
+CONFIG_FB_DEFERRED_IO=y
+CONFIG_FB_SYSMEM_HELPERS=y
+CONFIG_FB_SYSMEM_HELPERS_DEFERRED=y
+CONFIG_FB_BACKLIGHT=y
+# CONFIG_FB_MODE_HELPERS is not set
+# CONFIG_FB_TILEBLITTING is not set
+# end of Frame buffer Devices
+
+#
+# Backlight & LCD device support
+#
+# CONFIG_LCD_CLASS_DEVICE is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+# CONFIG_BACKLIGHT_KTD253 is not set
+# CONFIG_BACKLIGHT_KTD2801 is not set
+# CONFIG_BACKLIGHT_KTZ8866 is not set
+# CONFIG_BACKLIGHT_QCOM_WLED is not set
+# CONFIG_BACKLIGHT_ADP8860 is not set
+# CONFIG_BACKLIGHT_ADP8870 is not set
+# CONFIG_BACKLIGHT_LM3509 is not set
+# CONFIG_BACKLIGHT_LM3639 is not set
+# CONFIG_BACKLIGHT_GPIO is not set
+# CONFIG_BACKLIGHT_LV5207LP is not set
+# CONFIG_BACKLIGHT_BD6107 is not set
+# CONFIG_BACKLIGHT_ARCXCNN is not set
+# CONFIG_BACKLIGHT_LED is not set
+# end of Backlight & LCD device support
+
+#
+# Console display driver support
+#
+CONFIG_DUMMY_CONSOLE=y
+CONFIG_DUMMY_CONSOLE_COLUMNS=80
+CONFIG_DUMMY_CONSOLE_ROWS=25
+# CONFIG_FRAMEBUFFER_CONSOLE is not set
+# end of Console display driver support
+
+# CONFIG_LOGO is not set
+# end of Graphics support
+
+# CONFIG_SOUND is not set
+CONFIG_HID_SUPPORT=y
+CONFIG_HID=y
+# CONFIG_HID_BATTERY_STRENGTH is not set
+# CONFIG_HIDRAW is not set
+# CONFIG_UHID is not set
+# CONFIG_HID_GENERIC is not set
+
+#
+# Special HID drivers
+#
+# CONFIG_HID_A4TECH is not set
+# CONFIG_HID_ACRUX is not set
+# CONFIG_HID_APPLE is not set
+# CONFIG_HID_AUREAL is not set
+# CONFIG_HID_BELKIN is not set
+# CONFIG_HID_CHERRY is not set
+# CONFIG_HID_COUGAR is not set
+# CONFIG_HID_MACALLY is not set
+# CONFIG_HID_CMEDIA is not set
+# CONFIG_HID_CYPRESS is not set
+# CONFIG_HID_DRAGONRISE is not set
+# CONFIG_HID_EMS_FF is not set
+# CONFIG_HID_ELECOM is not set
+# CONFIG_HID_EVISION is not set
+# CONFIG_HID_EZKEY is not set
+# CONFIG_HID_GEMBIRD is not set
+# CONFIG_HID_GFRM is not set
+# CONFIG_HID_GLORIOUS is not set
+# CONFIG_HID_GOODIX_SPI is not set
+# CONFIG_HID_GOOGLE_STADIA_FF is not set
+# CONFIG_HID_VIVALDI is not set
+# CONFIG_HID_KEYTOUCH is not set
+# CONFIG_HID_KYE is not set
+# CONFIG_HID_WALTOP is not set
+# CONFIG_HID_VIEWSONIC is not set
+# CONFIG_HID_VRC2 is not set
+# CONFIG_HID_XIAOMI is not set
+# CONFIG_HID_GYRATION is not set
+# CONFIG_HID_ICADE is not set
+# CONFIG_HID_ITE is not set
+# CONFIG_HID_JABRA is not set
+# CONFIG_HID_TWINHAN is not set
+# CONFIG_HID_KENSINGTON is not set
+# CONFIG_HID_LCPOWER is not set
+# CONFIG_HID_LED is not set
+# CONFIG_HID_LENOVO is not set
+# CONFIG_HID_MAGICMOUSE is not set
+# CONFIG_HID_MALTRON is not set
+# CONFIG_HID_MAYFLASH is not set
+# CONFIG_HID_REDRAGON is not set
+# CONFIG_HID_MICROSOFT is not set
+# CONFIG_HID_MONTEREY is not set
+# CONFIG_HID_MULTITOUCH is not set
+# CONFIG_HID_NINTENDO is not set
+# CONFIG_HID_NTI is not set
+# CONFIG_HID_ORTEK is not set
+# CONFIG_HID_PANTHERLORD is not set
+# CONFIG_HID_PETALYNX is not set
+# CONFIG_HID_PICOLCD is not set
+# CONFIG_HID_PLANTRONICS is not set
+# CONFIG_HID_PXRC is not set
+# CONFIG_HID_RAZER is not set
+# CONFIG_HID_PRIMAX is not set
+# CONFIG_HID_SAITEK is not set
+# CONFIG_HID_SEMITEK is not set
+# CONFIG_HID_SPEEDLINK is not set
+# CONFIG_HID_STEAM is not set
+# CONFIG_HID_SUNPLUS is not set
+# CONFIG_HID_RMI is not set
+# CONFIG_HID_GREENASIA is not set
+# CONFIG_HID_SMARTJOYPLUS is not set
+# CONFIG_HID_TIVO is not set
+# CONFIG_HID_TOPSEED is not set
+# CONFIG_HID_TOPRE is not set
+# CONFIG_HID_THINGM is not set
+# CONFIG_HID_UDRAW_PS3 is not set
+# CONFIG_HID_WIIMOTE is not set
+# CONFIG_HID_XINMO is not set
+# CONFIG_HID_ZEROPLUS is not set
+# CONFIG_HID_ZYDACRON is not set
+# CONFIG_HID_SENSOR_HUB is not set
+# CONFIG_HID_ALPS is not set
+# end of Special HID drivers
+
+#
+# HID-BPF support
+#
+# end of HID-BPF support
+
+#
+# USB HID support
+#
+# CONFIG_USB_HID is not set
+# CONFIG_HID_PID is not set
+
+#
+# USB HID Boot Protocol drivers
+#
+# CONFIG_USB_KBD is not set
+# CONFIG_USB_MOUSE is not set
+# end of USB HID Boot Protocol drivers
+# end of USB HID support
+
+# CONFIG_I2C_HID is not set
+CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB_SUPPORT=y
+CONFIG_USB_COMMON=y
+# CONFIG_USB_LED_TRIG is not set
+# CONFIG_USB_ULPI_BUS is not set
+# CONFIG_USB_CONN_GPIO is not set
+CONFIG_USB_ARCH_HAS_HCD=y
+CONFIG_USB=y
+# CONFIG_USB_PCI is not set
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+
+#
+# Miscellaneous USB options
+#
+CONFIG_USB_DEFAULT_PERSIST=y
+# CONFIG_USB_FEW_INIT_RETRIES is not set
+# CONFIG_USB_DYNAMIC_MINORS is not set
+# CONFIG_USB_OTG_PRODUCTLIST is not set
+# CONFIG_USB_OTG_DISABLE_EXTERNAL_HUB is not set
+# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set
+CONFIG_USB_AUTOSUSPEND_DELAY=2
+CONFIG_USB_DEFAULT_AUTHORIZATION_MODE=1
+# CONFIG_USB_MON is not set
+
+#
+# USB Host Controller Drivers
+#
+# CONFIG_USB_C67X00_HCD is not set
+CONFIG_USB_XHCI_HCD=y
+# CONFIG_USB_XHCI_DBGCAP is not set
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_TT_NEWSCHED=y
+# CONFIG_USB_EHCI_FSL is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+# CONFIG_USB_OXU210HP_HCD is not set
+# CONFIG_USB_ISP116X_HCD is not set
+# CONFIG_USB_MAX3421_HCD is not set
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+# CONFIG_USB_SL811_HCD is not set
+# CONFIG_USB_R8A66597_HCD is not set
+CONFIG_USB_BCM63158=m
+# CONFIG_USB_HCD_TEST_MODE is not set
+
+#
+# USB Device Class drivers
+#
+CONFIG_USB_ACM=y
+CONFIG_USB_PRINTER=y
+# CONFIG_USB_WDM is not set
+# CONFIG_USB_TMC is not set
+
+#
+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may
+#
+
+#
+# also be needed; see USB_STORAGE Help for more info
+#
+CONFIG_USB_STORAGE=y
+# CONFIG_USB_STORAGE_DEBUG is not set
+# CONFIG_USB_STORAGE_REALTEK is not set
+# CONFIG_USB_STORAGE_DATAFAB is not set
+# CONFIG_USB_STORAGE_FREECOM is not set
+# CONFIG_USB_STORAGE_ISD200 is not set
+# CONFIG_USB_STORAGE_USBAT is not set
+# CONFIG_USB_STORAGE_SDDR09 is not set
+# CONFIG_USB_STORAGE_SDDR55 is not set
+# CONFIG_USB_STORAGE_JUMPSHOT is not set
+# CONFIG_USB_STORAGE_ALAUDA is not set
+# CONFIG_USB_STORAGE_ONETOUCH is not set
+# CONFIG_USB_STORAGE_KARMA is not set
+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set
+# CONFIG_USB_STORAGE_ENE_UB6250 is not set
+# CONFIG_USB_UAS is not set
+
+#
+# USB Imaging devices
+#
+# CONFIG_USB_MDC800 is not set
+# CONFIG_USB_MICROTEK is not set
+# CONFIG_USBIP_CORE is not set
+
+#
+# USB dual-mode controller drivers
+#
+# CONFIG_USB_CDNS_SUPPORT is not set
+# CONFIG_USB_MUSB_HDRC is not set
+# CONFIG_USB_DWC3 is not set
+# CONFIG_USB_DWC2 is not set
+# CONFIG_USB_CHIPIDEA is not set
+# CONFIG_USB_ISP1760 is not set
+
+#
+# USB port drivers
+#
+# CONFIG_USB_SERIAL is not set
+
+#
+# USB Miscellaneous drivers
+#
+# CONFIG_USB_EMI62 is not set
+# CONFIG_USB_EMI26 is not set
+# CONFIG_USB_ADUTUX is not set
+# CONFIG_USB_SEVSEG is not set
+# CONFIG_USB_LEGOTOWER is not set
+# CONFIG_USB_LCD is not set
+# CONFIG_USB_CYPRESS_CY7C63 is not set
+# CONFIG_USB_CYTHERM is not set
+# CONFIG_USB_IDMOUSE is not set
+# CONFIG_USB_APPLEDISPLAY is not set
+# CONFIG_APPLE_MFI_FASTCHARGE is not set
+# CONFIG_USB_SISUSBVGA is not set
+# CONFIG_USB_LD is not set
+# CONFIG_USB_TRANCEVIBRATOR is not set
+# CONFIG_USB_IOWARRIOR is not set
+# CONFIG_USB_TEST is not set
+# CONFIG_USB_EHSET_TEST_FIXTURE is not set
+# CONFIG_USB_ISIGHTFW is not set
+# CONFIG_USB_YUREX is not set
+# CONFIG_USB_EZUSB_FX2 is not set
+# CONFIG_USB_HUB_USB251XB is not set
+# CONFIG_USB_HSIC_USB3503 is not set
+# CONFIG_USB_HSIC_USB4604 is not set
+# CONFIG_USB_LINK_LAYER_TEST is not set
+# CONFIG_USB_CHAOSKEY is not set
+# CONFIG_USB_ONBOARD_DEV is not set
+
+#
+# USB Physical Layer drivers
+#
+# CONFIG_NOP_USB_XCEIV is not set
+# CONFIG_USB_GPIO_VBUS is not set
+# CONFIG_USB_ISP1301 is not set
+# CONFIG_USB_ULPI is not set
+# end of USB Physical Layer drivers
+
+# CONFIG_USB_GADGET is not set
+# CONFIG_TYPEC is not set
+# CONFIG_USB_ROLE_SWITCH is not set
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_BLOCK=y
+CONFIG_MMC_BLOCK_MINORS=8
+# CONFIG_SDIO_UART is not set
+# CONFIG_MMC_TEST is not set
+
+#
+# MMC/SD/SDIO Host Controller Drivers
+#
+# CONFIG_MMC_DEBUG is not set
+# CONFIG_MMC_ARMMMCI is not set
+CONFIG_MMC_SDHCI=y
+# CONFIG_MMC_SDHCI_PCI is not set
+CONFIG_MMC_SDHCI_PLTFM=y
+# CONFIG_MMC_SDHCI_OF_ARASAN is not set
+# CONFIG_MMC_SDHCI_OF_AT91 is not set
+# CONFIG_MMC_SDHCI_OF_DWCMSHC is not set
+# CONFIG_MMC_SDHCI_CADENCE is not set
+# CONFIG_MMC_SDHCI_F_SDH30 is not set
+# CONFIG_MMC_SDHCI_MILBEAUT is not set
+# CONFIG_MMC_TIFM_SD is not set
+# CONFIG_MMC_SPI is not set
+# CONFIG_MMC_CB710 is not set
+# CONFIG_MMC_VIA_SDMMC is not set
+# CONFIG_MMC_DW is not set
+# CONFIG_MMC_VUB300 is not set
+# CONFIG_MMC_USHC is not set
+# CONFIG_MMC_USDHI6ROL0 is not set
+CONFIG_MMC_CQHCI=y
+# CONFIG_MMC_HSQ is not set
+# CONFIG_MMC_TOSHIBA_PCI is not set
+# CONFIG_MMC_MTK is not set
+CONFIG_MMC_SDHCI_BRCMSTB=y
+# CONFIG_MMC_SDHCI_XENON is not set
+# CONFIG_SCSI_UFSHCD is not set
+# CONFIG_MEMSTICK is not set
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+# CONFIG_LEDS_CLASS_FLASH is not set
+# CONFIG_LEDS_CLASS_MULTICOLOR is not set
+# CONFIG_LEDS_BRIGHTNESS_HW_CHANGED is not set
+
+#
+# LED drivers
+#
+# CONFIG_LEDS_AN30259A is not set
+# CONFIG_LEDS_AW200XX is not set
+# CONFIG_LEDS_AW2013 is not set
+# CONFIG_LEDS_BCM6328 is not set
+# CONFIG_LEDS_BCM6358 is not set
+# CONFIG_LEDS_CR0014114 is not set
+# CONFIG_LEDS_EL15203000 is not set
+# CONFIG_LEDS_LM3530 is not set
+# CONFIG_LEDS_LM3532 is not set
+# CONFIG_LEDS_LM3642 is not set
+# CONFIG_LEDS_LM3692X is not set
+# CONFIG_LEDS_PCA9532 is not set
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_FBXGWR_PMU=y
+# CONFIG_LEDS_LP3944 is not set
+# CONFIG_LEDS_LP3952 is not set
+# CONFIG_LEDS_LP8860 is not set
+# CONFIG_LEDS_PCA955X is not set
+# CONFIG_LEDS_PCA963X is not set
+# CONFIG_LEDS_PCA995X is not set
+# CONFIG_LEDS_DAC124S085 is not set
+# CONFIG_LEDS_BD2606MVV is not set
+# CONFIG_LEDS_BD2802 is not set
+# CONFIG_LEDS_LT3593 is not set
+# CONFIG_LEDS_TCA6507 is not set
+# CONFIG_LEDS_TLC591XX is not set
+# CONFIG_LEDS_LM355x is not set
+# CONFIG_LEDS_IS31FL319X is not set
+# CONFIG_LEDS_IS31FL32XX is not set
+# CONFIG_LEDS_IS31FL3299 is not set
+
+#
+# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM)
+#
+# CONFIG_LEDS_BLINKM is not set
+# CONFIG_LEDS_SYSCON is not set
+# CONFIG_LEDS_MLXREG is not set
+# CONFIG_LEDS_USER is not set
+# CONFIG_LEDS_SPI_BYTE is not set
+# CONFIG_LEDS_LM3697 is not set
+# CONFIG_LEDS_LED1202 is not set
+# CONFIG_LEDS_BCM63138 is not set
+
+#
+# Flash and Torch LED drivers
+#
+
+#
+# RGB LED drivers
+#
+
+#
+# LED Triggers
+#
+CONFIG_LEDS_TRIGGERS=y
+# CONFIG_LEDS_TRIGGER_TIMER is not set
+# CONFIG_LEDS_TRIGGER_ONESHOT is not set
+# CONFIG_LEDS_TRIGGER_MTD is not set
+# CONFIG_LEDS_TRIGGER_HEARTBEAT is not set
+# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set
+# CONFIG_LEDS_TRIGGER_CPU is not set
+# CONFIG_LEDS_TRIGGER_ACTIVITY is not set
+# CONFIG_LEDS_TRIGGER_GPIO is not set
+# CONFIG_LEDS_TRIGGER_DEFAULT_ON is not set
+
+#
+# iptables trigger is under Netfilter config (LED target)
+#
+# CONFIG_LEDS_TRIGGER_TRANSIENT is not set
+# CONFIG_LEDS_TRIGGER_CAMERA is not set
+# CONFIG_LEDS_TRIGGER_PANIC is not set
+# CONFIG_LEDS_TRIGGER_NETDEV is not set
+# CONFIG_LEDS_TRIGGER_PATTERN is not set
+# CONFIG_LEDS_TRIGGER_TTY is not set
+# CONFIG_LEDS_TRIGGER_INPUT_EVENTS is not set
+
+#
+# Simple LED drivers
+#
+# CONFIG_ACCESSIBILITY is not set
+# CONFIG_INFINIBAND is not set
+CONFIG_EDAC_SUPPORT=y
+# CONFIG_EDAC is not set
+# CONFIG_RTC_CLASS is not set
+# CONFIG_DMADEVICES is not set
+
+#
+# DMABUF options
+#
+# CONFIG_SYNC_FILE is not set
+# CONFIG_UDMABUF is not set
+# CONFIG_DMABUF_MOVE_NOTIFY is not set
+# CONFIG_DMABUF_DEBUG is not set
+# CONFIG_DMABUF_SELFTESTS is not set
+# CONFIG_DMABUF_HEAPS is not set
+# CONFIG_DMABUF_SYSFS_STATS is not set
+# end of DMABUF options
+
+# CONFIG_UIO is not set
+# CONFIG_VFIO is not set
+# CONFIG_VIRT_DRIVERS is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_VDPA is not set
+# CONFIG_VHOST_MENU is not set
+
+#
+# Microsoft Hyper-V guest support
+#
+# end of Microsoft Hyper-V guest support
+
+# CONFIG_GREYBUS is not set
+# CONFIG_COMEDI is not set
+# CONFIG_STAGING is not set
+# CONFIG_GOLDFISH is not set
+# CONFIG_CHROME_PLATFORMS is not set
+# CONFIG_MELLANOX_PLATFORM is not set
+# CONFIG_SURFACE_PLATFORMS is not set
+# CONFIG_ARM64_PLATFORM_DEVICES is not set
+# CONFIG_FBXGW7R_PLATFORM is not set
+CONFIG_HAVE_CLK=y
+CONFIG_HAVE_CLK_PREPARE=y
+CONFIG_COMMON_CLK=y
+
+#
+# Clock driver for ARM Reference designs
+#
+# CONFIG_CLK_ICST is not set
+# CONFIG_CLK_SP810 is not set
+# end of Clock driver for ARM Reference designs
+
+# CONFIG_LMK04832 is not set
+# CONFIG_COMMON_CLK_MAX9485 is not set
+# CONFIG_COMMON_CLK_SI5341 is not set
+# CONFIG_COMMON_CLK_SI5351 is not set
+# CONFIG_COMMON_CLK_SI514 is not set
+# CONFIG_COMMON_CLK_SI544 is not set
+# CONFIG_COMMON_CLK_SI570 is not set
+# CONFIG_COMMON_CLK_CDCE706 is not set
+# CONFIG_COMMON_CLK_CDCE925 is not set
+# CONFIG_COMMON_CLK_CS2000_CP is not set
+# CONFIG_COMMON_CLK_AXI_CLKGEN is not set
+# CONFIG_COMMON_CLK_XGENE is not set
+# CONFIG_COMMON_CLK_RS9_PCIE is not set
+# CONFIG_COMMON_CLK_SI521XX is not set
+# CONFIG_COMMON_CLK_VC3 is not set
+# CONFIG_COMMON_CLK_VC5 is not set
+# CONFIG_COMMON_CLK_VC7 is not set
+# CONFIG_COMMON_CLK_FIXED_MMIO is not set
+# CONFIG_CLK_BCM_63XX is not set
+# CONFIG_XILINX_VCU is not set
+# CONFIG_COMMON_CLK_XLNX_CLKWZRD is not set
+# CONFIG_HWSPINLOCK is not set
+
+#
+# Clock Source drivers
+#
+CONFIG_TIMER_OF=y
+CONFIG_TIMER_PROBE=y
+CONFIG_ARM_ARCH_TIMER=y
+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y
+CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND=y
+CONFIG_FSL_ERRATUM_A008585=y
+CONFIG_HISILICON_ERRATUM_161010101=y
+CONFIG_ARM64_ERRATUM_858921=y
+# CONFIG_ARM_TIMER_SP804 is not set
+# end of Clock Source drivers
+
+# CONFIG_MAILBOX is not set
+# CONFIG_IOMMU_SUPPORT is not set
+
+#
+# Remoteproc drivers
+#
+# CONFIG_REMOTEPROC is not set
+# end of Remoteproc drivers
+
+#
+# Rpmsg drivers
+#
+# CONFIG_RPMSG_VIRTIO is not set
+# end of Rpmsg drivers
+
+#
+# SOC (System On Chip) specific Drivers
+#
+
+#
+# Amlogic SoC drivers
+#
+# end of Amlogic SoC drivers
+
+#
+# Broadcom SoC drivers
+#
+CONFIG_SOC_BCM63XX=y
+# CONFIG_SOC_BCM63XX_RDP is not set
+CONFIG_SOC_BCM63XX_XRDP=y
+# CONFIG_SOC_BCM63XX_XRDP_IOCTL is not set
+CONFIG_UBUS4_BCM63158=y
+CONFIG_SOC_MEMC_BCM63158=m
+CONFIG_PROCMON_BCM63158=y
+# end of Broadcom SoC drivers
+
+#
+# NXP/Freescale QorIQ SoC drivers
+#
+# CONFIG_QUICC_ENGINE is not set
+# end of NXP/Freescale QorIQ SoC drivers
+
+#
+# fujitsu SoC drivers
+#
+# end of fujitsu SoC drivers
+
+#
+# i.MX SoC drivers
+#
+# end of i.MX SoC drivers
+
+#
+# Enable LiteX SoC Builder specific drivers
+#
+# CONFIG_LITEX_SOC_CONTROLLER is not set
+# end of Enable LiteX SoC Builder specific drivers
+
+# CONFIG_WPCM450_SOC is not set
+
+#
+# Qualcomm SoC drivers
+#
+CONFIG_QCOM_QMI_HELPERS=y
+# CONFIG_QCOM_LICENSE_MANAGER_SIMPLE is not set
+# end of Qualcomm SoC drivers
+
+# CONFIG_SOC_TI is not set
+
+#
+# Xilinx SoC drivers
+#
+# end of Xilinx SoC drivers
+# end of SOC (System On Chip) specific Drivers
+
+#
+# PM Domains
+#
+
+#
+# Amlogic PM Domains
+#
+# end of Amlogic PM Domains
+
+#
+# Broadcom PM Domains
+#
+# CONFIG_BCM_PMB is not set
+# end of Broadcom PM Domains
+
+#
+# i.MX PM Domains
+#
+# end of i.MX PM Domains
+
+#
+# Qualcomm PM Domains
+#
+# end of Qualcomm PM Domains
+# end of PM Domains
+
+# CONFIG_PM_DEVFREQ is not set
+# CONFIG_EXTCON is not set
+# CONFIG_MEMORY is not set
+# CONFIG_IIO is not set
+# CONFIG_NTB is not set
+# CONFIG_PWM is not set
+
+#
+# IRQ chip support
+#
+CONFIG_IRQCHIP=y
+CONFIG_ARM_GIC=y
+CONFIG_ARM_GIC_MAX_NR=1
+CONFIG_ARM_GIC_V2M=y
+CONFIG_ARM_GIC_V3=y
+CONFIG_ARM_GIC_V3_ITS=y
+CONFIG_IRQ_MSI_LIB=y
+# CONFIG_AL_FIC is not set
+# CONFIG_XILINX_INTC is not set
+CONFIG_PARTITION_PERCPU=y
+# end of IRQ chip support
+
+# CONFIG_IPACK_BUS is not set
+CONFIG_RESET_CONTROLLER=y
+# CONFIG_RESET_GPIO is not set
+# CONFIG_RESET_SIMPLE is not set
+# CONFIG_RESET_TI_SYSCON is not set
+# CONFIG_RESET_TI_TPS380X is not set
+
+#
+# PHY Subsystem
+#
+CONFIG_GENERIC_PHY=y
+# CONFIG_PHY_CAN_TRANSCEIVER is not set
+# CONFIG_PHY_NXP_PTN3222 is not set
+CONFIG_XDSL_PHY_API=m
+
+#
+# PHY drivers for Broadcom platforms
+#
+# CONFIG_BCM_KONA_USB2_PHY is not set
+# CONFIG_PHY_BRCM_USB_63138 is not set
+# CONFIG_PHY_BRCM_SATA is not set
+# CONFIG_PHY_BRCM_USB is not set
+# end of PHY drivers for Broadcom platforms
+
+# CONFIG_PHY_CADENCE_TORRENT is not set
+# CONFIG_PHY_CADENCE_DPHY is not set
+# CONFIG_PHY_CADENCE_DPHY_RX is not set
+# CONFIG_PHY_CADENCE_SIERRA is not set
+# CONFIG_PHY_CADENCE_SALVO is not set
+# CONFIG_PHY_PXA_28NM_HSIC is not set
+# CONFIG_PHY_PXA_28NM_USB2 is not set
+# CONFIG_PHY_LAN966X_SERDES is not set
+# CONFIG_PHY_MAPPHONE_MDM6600 is not set
+# CONFIG_PHY_OCELOT_SERDES is not set
+# end of PHY Subsystem
+
+# CONFIG_POWERCAP is not set
+# CONFIG_MCB is not set
+
+#
+# Performance monitor support
+#
+# CONFIG_ARM_CCI_PMU is not set
+# CONFIG_ARM_CCN is not set
+# CONFIG_ARM_CMN is not set
+# CONFIG_ARM_NI is not set
+CONFIG_ARM_PMU=y
+# CONFIG_ARM_SMMU_V3_PMU is not set
+CONFIG_ARM_PMUV3=y
+# CONFIG_ARM_DSU_PMU is not set
+# CONFIG_ARM_SPE_PMU is not set
+# CONFIG_HISI_PCIE_PMU is not set
+# CONFIG_HNS3_PMU is not set
+# CONFIG_DWC_PCIE_PMU is not set
+# CONFIG_ARM_CORESIGHT_PMU_ARCH_SYSTEM_PMU is not set
+# end of Performance monitor support
+
+CONFIG_RAS=y
+# CONFIG_USB4 is not set
+
+#
+# Android
+#
+# CONFIG_ANDROID_BINDER_IPC is not set
+# end of Android
+
+# CONFIG_LIBNVDIMM is not set
+# CONFIG_DAX is not set
+CONFIG_NVMEM=y
+CONFIG_NVMEM_SYSFS=y
+CONFIG_NVMEM_LAYOUTS=y
+
+#
+# Layout Types
+#
+# CONFIG_NVMEM_LAYOUT_SL28_VPD is not set
+# CONFIG_NVMEM_LAYOUT_ONIE_TLV is not set
+# CONFIG_NVMEM_LAYOUT_U_BOOT_ENV is not set
+# end of Layout Types
+
+# CONFIG_NVMEM_IGNORE_RO is not set
+# CONFIG_NVMEM_RMEM is not set
+# CONFIG_NVMEM_IOMAP is not set
+# CONFIG_NVMEM_U_BOOT_ENV is not set
+
+#
+# HW tracing support
+#
+# CONFIG_STM is not set
+# CONFIG_INTEL_TH is not set
+# CONFIG_HISI_PTT is not set
+# end of HW tracing support
+
+# CONFIG_FPGA is not set
+# CONFIG_FSI is not set
+CONFIG_TEE=y
+CONFIG_OPTEE=y
+# CONFIG_OPTEE_INSECURE_LOAD_IMAGE is not set
+CONFIG_PM_OPP=y
+# CONFIG_SIOX is not set
+# CONFIG_SLIMBUS is not set
+# CONFIG_INTERCONNECT is not set
+# CONFIG_COUNTER is not set
+# CONFIG_MOST is not set
+# CONFIG_PECI is not set
+# CONFIG_HTE is not set
+# CONFIG_CDX_BUS is not set
+# end of Device Drivers
+
+#
+# File systems
+#
+CONFIG_DCACHE_WORD_ACCESS=y
+# CONFIG_VALIDATE_FS_PARSER is not set
+CONFIG_FS_IOMAP=y
+CONFIG_BUFFER_HEAD=y
+CONFIG_LEGACY_DIRECT_IO=y
+CONFIG_EXT2_FS=y
+# CONFIG_EXT2_FS_XATTR is not set
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_FS_POSIX_ACL is not set
+# CONFIG_EXT3_FS_SECURITY is not set
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_FS_POSIX_ACL is not set
+# CONFIG_EXT4_FS_SECURITY is not set
+# CONFIG_EXT4_DEBUG is not set
+CONFIG_JBD2=y
+# CONFIG_JBD2_DEBUG is not set
+CONFIG_FS_MBCACHE=y
+# CONFIG_JFS_FS is not set
+CONFIG_XFS_FS=y
+# CONFIG_XFS_SUPPORT_V4 is not set
+CONFIG_XFS_SUPPORT_ASCII_CI=y
+# CONFIG_XFS_QUOTA is not set
+# CONFIG_XFS_POSIX_ACL is not set
+# CONFIG_XFS_RT is not set
+# CONFIG_XFS_ONLINE_SCRUB is not set
+# CONFIG_XFS_WARN is not set
+# CONFIG_XFS_DEBUG is not set
+# CONFIG_GFS2_FS is not set
+# CONFIG_OCFS2_FS is not set
+# CONFIG_BTRFS_FS is not set
+# CONFIG_NILFS2_FS is not set
+# CONFIG_F2FS_FS is not set
+# CONFIG_BCACHEFS_FS is not set
+CONFIG_FS_POSIX_ACL=y
+CONFIG_EXPORTFS=y
+# CONFIG_EXPORTFS_BLOCK_OPS is not set
+CONFIG_FILE_LOCKING=y
+# CONFIG_FS_ENCRYPTION is not set
+# CONFIG_FS_VERITY is not set
+CONFIG_FSNOTIFY=y
+CONFIG_DNOTIFY=y
+CONFIG_INOTIFY_USER=y
+CONFIG_FANOTIFY=y
+# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set
+# CONFIG_QUOTA is not set
+# CONFIG_AUTOFS_FS is not set
+CONFIG_FUSE_FS=y
+# CONFIG_CUSE is not set
+# CONFIG_VIRTIO_FS is not set
+# CONFIG_FUSE_PASSTHROUGH is not set
+# CONFIG_OVERLAY_FS is not set
+
+#
+# Caches
+#
+# end of Caches
+
+#
+# CD-ROM/DVD Filesystems
+#
+# CONFIG_ISO9660_FS is not set
+# CONFIG_UDF_FS is not set
+# end of CD-ROM/DVD Filesystems
+
+#
+# DOS/FAT/EXFAT/NT Filesystems
+#
+CONFIG_FAT_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_DEFAULT_CODEPAGE=850
+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1"
+# CONFIG_FAT_DEFAULT_UTF8 is not set
+# CONFIG_EXFAT_FS is not set
+CONFIG_NTFS3_FS=y
+# CONFIG_NTFS3_64BIT_CLUSTER is not set
+# CONFIG_NTFS3_LZX_XPRESS is not set
+# CONFIG_NTFS3_FS_POSIX_ACL is not set
+CONFIG_NTFS_FS=y
+CONFIG_EXFAT_FS_FBX=y
+# end of DOS/FAT/EXFAT/NT Filesystems
+
+#
+# Pseudo filesystems
+#
+CONFIG_PROC_FS=y
+# CONFIG_PROC_KCORE is not set
+CONFIG_PROC_SYSCTL=y
+CONFIG_PROC_PAGE_MONITOR=y
+# CONFIG_PROC_CHILDREN is not set
+CONFIG_KERNFS=y
+CONFIG_SYSFS=y
+CONFIG_TMPFS=y
+# CONFIG_TMPFS_POSIX_ACL is not set
+# CONFIG_TMPFS_XATTR is not set
+# CONFIG_TMPFS_INODE64 is not set
+# CONFIG_TMPFS_QUOTA is not set
+CONFIG_ARCH_SUPPORTS_HUGETLBFS=y
+# CONFIG_HUGETLBFS is not set
+CONFIG_ARCH_HAS_GIGANTIC_PAGE=y
+CONFIG_CONFIGFS_FS=y
+# end of Pseudo filesystems
+
+CONFIG_MISC_FILESYSTEMS=y
+# CONFIG_ORANGEFS_FS is not set
+# CONFIG_ADFS_FS is not set
+# CONFIG_AFFS_FS is not set
+# CONFIG_ECRYPT_FS is not set
+CONFIG_HFS_FS=y
+CONFIG_HFSPLUS_FS=y
+# CONFIG_BEFS_FS is not set
+# CONFIG_BFS_FS is not set
+# CONFIG_EFS_FS is not set
+# CONFIG_JFFS2_FS is not set
+CONFIG_CRAMFS=y
+CONFIG_CRAMFS_BLOCKDEV=y
+CONFIG_CRAMFS_MTD=y
+CONFIG_SQUASHFS=y
+# CONFIG_SQUASHFS_FILE_CACHE is not set
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_SINGLE=y
+# CONFIG_SQUASHFS_CHOICE_DECOMP_BY_MOUNT is not set
+CONFIG_SQUASHFS_COMPILE_DECOMP_SINGLE=y
+# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI is not set
+# CONFIG_SQUASHFS_COMPILE_DECOMP_MULTI_PERCPU is not set
+# CONFIG_SQUASHFS_XATTR is not set
+# CONFIG_SQUASHFS_ZLIB is not set
+# CONFIG_SQUASHFS_LZ4 is not set
+# CONFIG_SQUASHFS_LZO is not set
+CONFIG_SQUASHFS_XZ=y
+# CONFIG_SQUASHFS_ZSTD is not set
+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set
+# CONFIG_SQUASHFS_EMBEDDED is not set
+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3
+# CONFIG_VXFS_FS is not set
+# CONFIG_MINIX_FS is not set
+# CONFIG_OMFS_FS is not set
+# CONFIG_HPFS_FS is not set
+# CONFIG_QNX4FS_FS is not set
+# CONFIG_QNX6FS_FS is not set
+# CONFIG_ROMFS_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_DEFAULT_KMSG_BYTES=10240
+# CONFIG_PSTORE_COMPRESS is not set
+# CONFIG_PSTORE_CONSOLE is not set
+# CONFIG_PSTORE_PMSG is not set
+CONFIG_PSTORE_RAM=y
+# CONFIG_PSTORE_BLK is not set
+# CONFIG_SYSV_FS is not set
+# CONFIG_UFS_FS is not set
+# CONFIG_EROFS_FS is not set
+CONFIG_NETWORK_FILESYSTEMS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V2=y
+CONFIG_NFS_V3=y
+# CONFIG_NFS_V3_ACL is not set
+# CONFIG_NFS_V4 is not set
+CONFIG_ROOT_NFS=y
+# CONFIG_NFS_FSCACHE is not set
+# CONFIG_NFS_DISABLE_UDP_SUPPORT is not set
+CONFIG_NFSD=y
+# CONFIG_NFSD_V2 is not set
+# CONFIG_NFSD_V3_ACL is not set
+CONFIG_NFSD_V4=y
+# CONFIG_NFSD_BLOCKLAYOUT is not set
+# CONFIG_NFSD_SCSILAYOUT is not set
+# CONFIG_NFSD_FLEXFILELAYOUT is not set
+# CONFIG_NFSD_LEGACY_CLIENT_TRACKING is not set
+CONFIG_GRACE_PERIOD=y
+CONFIG_LOCKD=y
+CONFIG_LOCKD_V4=y
+CONFIG_NFS_COMMON=y
+# CONFIG_NFS_LOCALIO is not set
+CONFIG_SUNRPC=y
+CONFIG_SUNRPC_GSS=y
+CONFIG_RPCSEC_GSS_KRB5=y
+# CONFIG_SUNRPC_DEBUG is not set
+# CONFIG_CEPH_FS is not set
+# CONFIG_CIFS is not set
+CONFIG_SMB_SERVER=y
+CONFIG_SMB_INSECURE_SERVER=y
+CONFIG_SMB_SERVER_CHECK_CAP_NET_ADMIN=y
+# CONFIG_SMB_SERVER_KERBEROS5 is not set
+CONFIG_SMBFS=y
+# CONFIG_CODA_FS is not set
+# CONFIG_AFS_FS is not set
+CONFIG_NLS=y
+CONFIG_NLS_DEFAULT="iso8859-1"
+CONFIG_NLS_CODEPAGE_437=y
+# CONFIG_NLS_CODEPAGE_737 is not set
+# CONFIG_NLS_CODEPAGE_775 is not set
+CONFIG_NLS_CODEPAGE_850=y
+# CONFIG_NLS_CODEPAGE_852 is not set
+# CONFIG_NLS_CODEPAGE_855 is not set
+# CONFIG_NLS_CODEPAGE_857 is not set
+# CONFIG_NLS_CODEPAGE_860 is not set
+# CONFIG_NLS_CODEPAGE_861 is not set
+# CONFIG_NLS_CODEPAGE_862 is not set
+# CONFIG_NLS_CODEPAGE_863 is not set
+# CONFIG_NLS_CODEPAGE_864 is not set
+# CONFIG_NLS_CODEPAGE_865 is not set
+# CONFIG_NLS_CODEPAGE_866 is not set
+# CONFIG_NLS_CODEPAGE_869 is not set
+# CONFIG_NLS_CODEPAGE_936 is not set
+# CONFIG_NLS_CODEPAGE_950 is not set
+# CONFIG_NLS_CODEPAGE_932 is not set
+# CONFIG_NLS_CODEPAGE_949 is not set
+# CONFIG_NLS_CODEPAGE_874 is not set
+# CONFIG_NLS_ISO8859_8 is not set
+# CONFIG_NLS_CODEPAGE_1250 is not set
+# CONFIG_NLS_CODEPAGE_1251 is not set
+# CONFIG_NLS_ASCII is not set
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+# CONFIG_NLS_ISO8859_3 is not set
+# CONFIG_NLS_ISO8859_4 is not set
+# CONFIG_NLS_ISO8859_5 is not set
+# CONFIG_NLS_ISO8859_6 is not set
+# CONFIG_NLS_ISO8859_7 is not set
+# CONFIG_NLS_ISO8859_9 is not set
+# CONFIG_NLS_ISO8859_13 is not set
+# CONFIG_NLS_ISO8859_14 is not set
+# CONFIG_NLS_ISO8859_15 is not set
+# CONFIG_NLS_KOI8_R is not set
+# CONFIG_NLS_KOI8_U is not set
+# CONFIG_NLS_MAC_ROMAN is not set
+# CONFIG_NLS_MAC_CELTIC is not set
+# CONFIG_NLS_MAC_CENTEURO is not set
+# CONFIG_NLS_MAC_CROATIAN is not set
+# CONFIG_NLS_MAC_CYRILLIC is not set
+# CONFIG_NLS_MAC_GAELIC is not set
+# CONFIG_NLS_MAC_GREEK is not set
+# CONFIG_NLS_MAC_ICELAND is not set
+# CONFIG_NLS_MAC_INUIT is not set
+# CONFIG_NLS_MAC_ROMANIAN is not set
+# CONFIG_NLS_MAC_TURKISH is not set
+CONFIG_NLS_UTF8=y
+CONFIG_NLS_UCS2_UTILS=y
+# CONFIG_DLM is not set
+# CONFIG_UNICODE is not set
+# end of File systems
+
+#
+# Security options
+#
+CONFIG_KEYS=y
+# CONFIG_KEYS_REQUEST_CACHE is not set
+# CONFIG_PERSISTENT_KEYRINGS is not set
+# CONFIG_BIG_KEYS is not set
+# CONFIG_TRUSTED_KEYS is not set
+# CONFIG_ENCRYPTED_KEYS is not set
+# CONFIG_KEY_DH_OPERATIONS is not set
+# CONFIG_SECURITY_DMESG_RESTRICT is not set
+CONFIG_PROC_MEM_ALWAYS_FORCE=y
+# CONFIG_PROC_MEM_FORCE_PTRACE is not set
+# CONFIG_PROC_MEM_NO_FORCE is not set
+# CONFIG_SECURITY is not set
+# CONFIG_SECURITYFS is not set
+# CONFIG_HARDENED_USERCOPY is not set
+# CONFIG_FORTIFY_SOURCE is not set
+# CONFIG_STATIC_USERMODEHELPER is not set
+CONFIG_DEFAULT_SECURITY_DAC=y
+CONFIG_LSM="yama,loadpin,safesetid,integrity"
+
+#
+# Kernel hardening options
+#
+
+#
+# Memory initialization
+#
+CONFIG_CC_HAS_AUTO_VAR_INIT_PATTERN=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO_BARE=y
+CONFIG_CC_HAS_AUTO_VAR_INIT_ZERO=y
+CONFIG_INIT_STACK_NONE=y
+# CONFIG_INIT_STACK_ALL_PATTERN is not set
+# CONFIG_INIT_STACK_ALL_ZERO is not set
+# CONFIG_GCC_PLUGIN_STACKLEAK is not set
+# CONFIG_INIT_ON_ALLOC_DEFAULT_ON is not set
+# CONFIG_INIT_ON_FREE_DEFAULT_ON is not set
+CONFIG_CC_HAS_ZERO_CALL_USED_REGS=y
+# CONFIG_ZERO_CALL_USED_REGS is not set
+# end of Memory initialization
+
+#
+# Hardening of kernel data structures
+#
+CONFIG_LIST_HARDENED=y
+# CONFIG_BUG_ON_DATA_CORRUPTION is not set
+# end of Hardening of kernel data structures
+
+CONFIG_RANDSTRUCT_NONE=y
+# CONFIG_RANDSTRUCT_FULL is not set
+# CONFIG_RANDSTRUCT_PERFORMANCE is not set
+# end of Kernel hardening options
+# end of Security options
+
+CONFIG_CRYPTO=y
+
+#
+# Crypto core or helper
+#
+CONFIG_CRYPTO_ALGAPI=y
+CONFIG_CRYPTO_ALGAPI2=y
+CONFIG_CRYPTO_AEAD=y
+CONFIG_CRYPTO_AEAD2=y
+CONFIG_CRYPTO_SIG=y
+CONFIG_CRYPTO_SIG2=y
+CONFIG_CRYPTO_SKCIPHER=y
+CONFIG_CRYPTO_SKCIPHER2=y
+CONFIG_CRYPTO_HASH=y
+CONFIG_CRYPTO_HASH2=y
+CONFIG_CRYPTO_RNG=y
+CONFIG_CRYPTO_RNG2=y
+CONFIG_CRYPTO_RNG_DEFAULT=y
+CONFIG_CRYPTO_AKCIPHER2=y
+CONFIG_CRYPTO_AKCIPHER=y
+CONFIG_CRYPTO_KPP2=y
+CONFIG_CRYPTO_KPP=y
+CONFIG_CRYPTO_ACOMP2=y
+CONFIG_CRYPTO_MANAGER=y
+CONFIG_CRYPTO_MANAGER2=y
+# CONFIG_CRYPTO_USER is not set
+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_NULL2=y
+# CONFIG_CRYPTO_PCRYPT is not set
+CONFIG_CRYPTO_CRYPTD=y
+CONFIG_CRYPTO_AUTHENC=y
+# CONFIG_CRYPTO_TEST is not set
+# end of Crypto core or helper
+
+#
+# Public-key cryptography
+#
+CONFIG_CRYPTO_RSA=y
+# CONFIG_CRYPTO_DH is not set
+CONFIG_CRYPTO_ECC=y
+CONFIG_CRYPTO_ECDH=y
+CONFIG_CRYPTO_ECDSA=y
+# CONFIG_CRYPTO_ECRDSA is not set
+# CONFIG_CRYPTO_CURVE25519 is not set
+# end of Public-key cryptography
+
+#
+# Block ciphers
+#
+CONFIG_CRYPTO_AES=y
+# CONFIG_CRYPTO_AES_TI is not set
+# CONFIG_CRYPTO_ARIA is not set
+# CONFIG_CRYPTO_BLOWFISH is not set
+# CONFIG_CRYPTO_CAMELLIA is not set
+# CONFIG_CRYPTO_CAST5 is not set
+# CONFIG_CRYPTO_CAST6 is not set
+CONFIG_CRYPTO_DES=y
+# CONFIG_CRYPTO_FCRYPT is not set
+# CONFIG_CRYPTO_SERPENT is not set
+# CONFIG_CRYPTO_SM4_GENERIC is not set
+# CONFIG_CRYPTO_TWOFISH is not set
+# end of Block ciphers
+
+#
+# Length-preserving ciphers and modes
+#
+# CONFIG_CRYPTO_ADIANTUM is not set
+CONFIG_CRYPTO_CHACHA20=y
+CONFIG_CRYPTO_CBC=y
+CONFIG_CRYPTO_CTR=y
+# CONFIG_CRYPTO_CTS is not set
+CONFIG_CRYPTO_ECB=y
+# CONFIG_CRYPTO_HCTR2 is not set
+# CONFIG_CRYPTO_KEYWRAP is not set
+# CONFIG_CRYPTO_LRW is not set
+# CONFIG_CRYPTO_PCBC is not set
+# CONFIG_CRYPTO_XTS is not set
+# end of Length-preserving ciphers and modes
+
+#
+# AEAD (authenticated encryption with associated data) ciphers
+#
+# CONFIG_CRYPTO_AEGIS128 is not set
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_GCM=y
+CONFIG_CRYPTO_GENIV=y
+CONFIG_CRYPTO_SEQIV=y
+CONFIG_CRYPTO_ECHAINIV=y
+CONFIG_CRYPTO_ESSIV=y
+# end of AEAD (authenticated encryption with associated data) ciphers
+
+#
+# Hashes, digests, and MACs
+#
+# CONFIG_CRYPTO_BLAKE2B is not set
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_GHASH=y
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MICHAEL_MIC=y
+CONFIG_CRYPTO_POLY1305=y
+# CONFIG_CRYPTO_RMD160 is not set
+CONFIG_CRYPTO_SHA1=y
+CONFIG_CRYPTO_SHA256=y
+CONFIG_CRYPTO_SHA512=y
+CONFIG_CRYPTO_SHA3=y
+# CONFIG_CRYPTO_SM3_GENERIC is not set
+# CONFIG_CRYPTO_STREEBOG is not set
+# CONFIG_CRYPTO_VMAC is not set
+# CONFIG_CRYPTO_WP512 is not set
+# CONFIG_CRYPTO_XCBC is not set
+# CONFIG_CRYPTO_XXHASH is not set
+# end of Hashes, digests, and MACs
+
+#
+# CRCs (cyclic redundancy checks)
+#
+CONFIG_CRYPTO_CRC32C=y
+# CONFIG_CRYPTO_CRC32 is not set
+# CONFIG_CRYPTO_CRCT10DIF is not set
+# end of CRCs (cyclic redundancy checks)
+
+#
+# Compression
+#
+CONFIG_CRYPTO_DEFLATE=y
+CONFIG_CRYPTO_LZO=y
+# CONFIG_CRYPTO_842 is not set
+# CONFIG_CRYPTO_LZ4 is not set
+# CONFIG_CRYPTO_LZ4HC is not set
+# CONFIG_CRYPTO_ZSTD is not set
+# end of Compression
+
+#
+# Random number generation
+#
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+CONFIG_CRYPTO_DRBG_MENU=y
+CONFIG_CRYPTO_DRBG_HMAC=y
+# CONFIG_CRYPTO_DRBG_HASH is not set
+# CONFIG_CRYPTO_DRBG_CTR is not set
+CONFIG_CRYPTO_DRBG=y
+CONFIG_CRYPTO_JITTERENTROPY=y
+CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKS=64
+CONFIG_CRYPTO_JITTERENTROPY_MEMORY_BLOCKSIZE=32
+CONFIG_CRYPTO_JITTERENTROPY_OSR=1
+# end of Random number generation
+
+#
+# Userspace interface
+#
+# CONFIG_CRYPTO_USER_API_HASH is not set
+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set
+# CONFIG_CRYPTO_USER_API_RNG is not set
+# CONFIG_CRYPTO_USER_API_AEAD is not set
+# end of Userspace interface
+
+CONFIG_CRYPTO_HASH_INFO=y
+# CONFIG_CRYPTO_NHPOLY1305_NEON is not set
+CONFIG_CRYPTO_CHACHA20_NEON=y
+
+#
+# Accelerated Cryptographic Algorithms for CPU (arm64)
+#
+# CONFIG_CRYPTO_GHASH_ARM64_CE is not set
+CONFIG_CRYPTO_POLY1305_NEON=y
+CONFIG_CRYPTO_SHA1_ARM64_CE=y
+CONFIG_CRYPTO_SHA256_ARM64=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=y
+# CONFIG_CRYPTO_SHA3_ARM64 is not set
+# CONFIG_CRYPTO_SM3_NEON is not set
+# CONFIG_CRYPTO_SM3_ARM64_CE is not set
+# CONFIG_CRYPTO_POLYVAL_ARM64_CE is not set
+CONFIG_CRYPTO_AES_ARM64=y
+CONFIG_CRYPTO_AES_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
+# CONFIG_CRYPTO_AES_ARM64_BS is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE_BLK is not set
+# CONFIG_CRYPTO_SM4_ARM64_NEON_BLK is not set
+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
+# CONFIG_CRYPTO_SM4_ARM64_CE_CCM is not set
+# CONFIG_CRYPTO_SM4_ARM64_CE_GCM is not set
+# end of Accelerated Cryptographic Algorithms for CPU (arm64)
+
+# CONFIG_CRYPTO_HW is not set
+CONFIG_ASYMMETRIC_KEY_TYPE=y
+CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
+CONFIG_X509_CERTIFICATE_PARSER=y
+# CONFIG_PKCS8_PRIVATE_KEY_PARSER is not set
+CONFIG_PKCS7_MESSAGE_PARSER=y
+# CONFIG_FIPS_SIGNATURE_SELFTEST is not set
+
+#
+# Certificates for signature checking
+#
+CONFIG_SYSTEM_TRUSTED_KEYRING=y
+CONFIG_SYSTEM_TRUSTED_KEYS=""
+# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set
+# CONFIG_SECONDARY_TRUSTED_KEYRING is not set
+# CONFIG_SYSTEM_BLACKLIST_KEYRING is not set
+# end of Certificates for signature checking
+
+#
+# Library routines
+#
+# CONFIG_PACKING is not set
+CONFIG_BITREVERSE=y
+CONFIG_HAVE_ARCH_BITREVERSE=y
+CONFIG_GENERIC_STRNCPY_FROM_USER=y
+CONFIG_GENERIC_STRNLEN_USER=y
+CONFIG_GENERIC_NET_UTILS=y
+# CONFIG_CORDIC is not set
+# CONFIG_PRIME_NUMBERS is not set
+CONFIG_RATIONAL=y
+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y
+CONFIG_ARCH_HAS_FAST_MULTIPLIER=y
+CONFIG_ARCH_USE_SYM_ANNOTATIONS=y
+# CONFIG_INDIRECT_PIO is not set
+
+#
+# Crypto library routines
+#
+CONFIG_CRYPTO_LIB_UTILS=y
+CONFIG_CRYPTO_LIB_AES=y
+CONFIG_CRYPTO_LIB_ARC4=y
+CONFIG_CRYPTO_LIB_GF128MUL=y
+CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC=y
+CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA=y
+CONFIG_CRYPTO_LIB_CHACHA_GENERIC=y
+CONFIG_CRYPTO_LIB_CHACHA=y
+CONFIG_CRYPTO_LIB_CURVE25519_GENERIC=y
+CONFIG_CRYPTO_LIB_CURVE25519=y
+CONFIG_CRYPTO_LIB_DES=y
+CONFIG_CRYPTO_LIB_POLY1305_RSIZE=9
+CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305=y
+CONFIG_CRYPTO_LIB_POLY1305_GENERIC=y
+CONFIG_CRYPTO_LIB_POLY1305=y
+CONFIG_CRYPTO_LIB_CHACHA20POLY1305=y
+CONFIG_CRYPTO_LIB_SHA1=y
+CONFIG_CRYPTO_LIB_SHA256=y
+# end of Crypto library routines
+
+# CONFIG_CRC_CCITT is not set
+CONFIG_CRC16=y
+# CONFIG_CRC_T10DIF is not set
+# CONFIG_CRC64_ROCKSOFT is not set
+CONFIG_CRC_ITU_T=y
+CONFIG_CRC32=y
+# CONFIG_CRC32_SELFTEST is not set
+CONFIG_CRC32_SLICEBY8=y
+# CONFIG_CRC32_SLICEBY4 is not set
+# CONFIG_CRC32_SARWATE is not set
+# CONFIG_CRC32_BIT is not set
+# CONFIG_CRC64 is not set
+# CONFIG_CRC4 is not set
+# CONFIG_CRC7 is not set
+CONFIG_LIBCRC32C=y
+# CONFIG_CRC8 is not set
+CONFIG_XXHASH=y
+CONFIG_AUDIT_GENERIC=y
+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y
+# CONFIG_RANDOM32_SELFTEST is not set
+CONFIG_ZLIB_INFLATE=y
+CONFIG_ZLIB_DEFLATE=y
+CONFIG_LZO_COMPRESS=y
+CONFIG_LZO_DECOMPRESS=y
+CONFIG_XZ_DEC=y
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+CONFIG_XZ_DEC_ARM=y
+CONFIG_XZ_DEC_ARMTHUMB=y
+# CONFIG_XZ_DEC_ARM64 is not set
+# CONFIG_XZ_DEC_SPARC is not set
+# CONFIG_XZ_DEC_RISCV is not set
+# CONFIG_XZ_DEC_MICROLZMA is not set
+CONFIG_XZ_DEC_BCJ=y
+# CONFIG_XZ_DEC_TEST is not set
+CONFIG_GENERIC_ALLOCATOR=y
+CONFIG_REED_SOLOMON=y
+CONFIG_REED_SOLOMON_ENC8=y
+CONFIG_REED_SOLOMON_DEC8=y
+CONFIG_ASSOCIATIVE_ARRAY=y
+CONFIG_HAS_IOMEM=y
+CONFIG_HAS_IOPORT=y
+CONFIG_HAS_IOPORT_MAP=y
+CONFIG_HAS_DMA=y
+CONFIG_NEED_SG_DMA_LENGTH=y
+CONFIG_NEED_DMA_MAP_STATE=y
+CONFIG_ARCH_DMA_ADDR_T_64BIT=y
+CONFIG_DMA_DECLARE_COHERENT=y
+CONFIG_ARCH_HAS_SETUP_DMA_OPS=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE=y
+CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU=y
+CONFIG_ARCH_HAS_DMA_PREP_COHERENT=y
+CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED=y
+CONFIG_SWIOTLB=y
+# CONFIG_SWIOTLB_DYNAMIC is not set
+CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC=y
+CONFIG_DMA_NEED_SYNC=y
+# CONFIG_DMA_RESTRICTED_POOL is not set
+CONFIG_DMA_NONCOHERENT_MMAP=y
+CONFIG_DMA_COHERENT_POOL=y
+CONFIG_DMA_DIRECT_REMAP=y
+# CONFIG_DMA_API_DEBUG is not set
+# CONFIG_DMA_MAP_BENCHMARK is not set
+CONFIG_SGL_ALLOC=y
+CONFIG_CPU_RMAP=y
+CONFIG_DQL=y
+CONFIG_NLATTR=y
+CONFIG_CLZ_TAB=y
+# CONFIG_IRQ_POLL is not set
+CONFIG_MPILIB=y
+CONFIG_LIBFDT=y
+CONFIG_OID_REGISTRY=y
+CONFIG_HAVE_GENERIC_VDSO=y
+CONFIG_GENERIC_GETTIMEOFDAY=y
+CONFIG_GENERIC_VDSO_TIME_NS=y
+CONFIG_VDSO_GETRANDOM=y
+CONFIG_SG_POOL=y
+CONFIG_ARCH_STACKWALK=y
+CONFIG_STACKDEPOT=y
+CONFIG_STACKDEPOT_ALWAYS_INIT=y
+CONFIG_STACKDEPOT_MAX_FRAMES=64
+CONFIG_SBITMAP=y
+# CONFIG_LWQ_TEST is not set
+CONFIG_ARCH_HAS_FBXSERIAL=y
+CONFIG_FBXSERIAL=y
+# end of Library routines
+
+CONFIG_GENERIC_IOREMAP=y
+CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED=y
+
+#
+# Kernel hacking
+#
+
+#
+# printk and dmesg options
+#
+CONFIG_PRINTK_TIME=y
+# CONFIG_PRINTK_CALLER is not set
+# CONFIG_STACKTRACE_BUILD_ID is not set
+CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7
+CONFIG_CONSOLE_LOGLEVEL_QUIET=4
+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4
+# CONFIG_BOOT_PRINTK_DELAY is not set
+# CONFIG_DYNAMIC_DEBUG is not set
+# CONFIG_DYNAMIC_DEBUG_CORE is not set
+# CONFIG_SYMBOLIC_ERRNAME is not set
+CONFIG_DEBUG_BUGVERBOSE=y
+# end of printk and dmesg options
+
+CONFIG_DEBUG_KERNEL=y
+# CONFIG_DEBUG_MISC is not set
+
+#
+# Compile-time checks and compiler options
+#
+CONFIG_AS_HAS_NON_CONST_ULEB128=y
+CONFIG_DEBUG_INFO_NONE=y
+# CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT is not set
+# CONFIG_DEBUG_INFO_DWARF4 is not set
+# CONFIG_DEBUG_INFO_DWARF5 is not set
+CONFIG_FRAME_WARN=2048
+# CONFIG_STRIP_ASM_SYMS is not set
+# CONFIG_READABLE_ASM is not set
+# CONFIG_HEADERS_INSTALL is not set
+# CONFIG_DEBUG_SECTION_MISMATCH is not set
+CONFIG_SECTION_MISMATCH_WARN_ONLY=y
+# CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B is not set
+CONFIG_ARCH_WANT_FRAME_POINTERS=y
+CONFIG_FRAME_POINTER=y
+# CONFIG_VMLINUX_MAP is not set
+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set
+# end of Compile-time checks and compiler options
+
+#
+# Generic Kernel Debugging Instruments
+#
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1
+CONFIG_MAGIC_SYSRQ_SERIAL=y
+CONFIG_MAGIC_SYSRQ_SERIAL_SEQUENCE=""
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_FS_ALLOW_ALL=y
+# CONFIG_DEBUG_FS_DISALLOW_MOUNT is not set
+# CONFIG_DEBUG_FS_ALLOW_NONE is not set
+CONFIG_HAVE_ARCH_KGDB=y
+# CONFIG_KGDB is not set
+CONFIG_ARCH_HAS_UBSAN=y
+# CONFIG_UBSAN is not set
+CONFIG_HAVE_ARCH_KCSAN=y
+CONFIG_HAVE_KCSAN_COMPILER=y
+# CONFIG_KCSAN is not set
+# end of Generic Kernel Debugging Instruments
+
+#
+# Networking Debugging
+#
+# CONFIG_NET_DEV_REFCNT_TRACKER is not set
+# CONFIG_NET_NS_REFCNT_TRACKER is not set
+# CONFIG_DEBUG_NET is not set
+# CONFIG_DEBUG_NET_SMALL_RTNL is not set
+# end of Networking Debugging
+
+#
+# Memory Debugging
+#
+CONFIG_PAGE_EXTENSION=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT=y
+CONFIG_SLUB_DEBUG=y
+# CONFIG_SLUB_DEBUG_ON is not set
+# CONFIG_PAGE_OWNER is not set
+# CONFIG_PAGE_TABLE_CHECK is not set
+CONFIG_PAGE_POISONING=y
+# CONFIG_DEBUG_RODATA_TEST is not set
+CONFIG_ARCH_HAS_DEBUG_WX=y
+# CONFIG_DEBUG_WX is not set
+CONFIG_GENERIC_PTDUMP=y
+# CONFIG_PTDUMP_DEBUGFS is not set
+CONFIG_HAVE_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK=y
+CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE=16000
+# CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF is not set
+# CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN is not set
+# CONFIG_PER_VMA_LOCK_STATS is not set
+CONFIG_DEBUG_OBJECTS=y
+CONFIG_DEBUG_OBJECTS_SELFTEST=y
+CONFIG_DEBUG_OBJECTS_FREE=y
+CONFIG_DEBUG_OBJECTS_TIMERS=y
+CONFIG_DEBUG_OBJECTS_WORK=y
+CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT=1
+# CONFIG_SHRINKER_DEBUG is not set
+# CONFIG_DEBUG_STACK_USAGE is not set
+# CONFIG_SCHED_STACK_END_CHECK is not set
+CONFIG_ARCH_HAS_DEBUG_VM_PGTABLE=y
+CONFIG_DEBUG_VM_IRQSOFF=y
+CONFIG_DEBUG_VM=y
+# CONFIG_DEBUG_VM_MAPLE_TREE is not set
+CONFIG_DEBUG_VM_RB=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+# CONFIG_DEBUG_VM_PGTABLE is not set
+CONFIG_ARCH_HAS_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_VIRTUAL=y
+CONFIG_DEBUG_MEMORY_INIT=y
+# CONFIG_DEBUG_PER_CPU_MAPS is not set
+# CONFIG_MEM_ALLOC_PROFILING is not set
+CONFIG_HAVE_ARCH_KASAN=y
+CONFIG_HAVE_ARCH_KASAN_SW_TAGS=y
+CONFIG_HAVE_ARCH_KASAN_VMALLOC=y
+CONFIG_CC_HAS_KASAN_GENERIC=y
+CONFIG_CC_HAS_KASAN_SW_TAGS=y
+CONFIG_CC_HAS_WORKING_NOSANITIZE_ADDRESS=y
+# CONFIG_KASAN is not set
+CONFIG_HAVE_ARCH_KFENCE=y
+# CONFIG_KFENCE is not set
+# end of Memory Debugging
+
+CONFIG_DEBUG_SHIRQ=y
+
+#
+# Debug Oops, Lockups and Hangs
+#
+# CONFIG_PANIC_ON_OOPS is not set
+CONFIG_PANIC_ON_OOPS_VALUE=0
+CONFIG_PANIC_TIMEOUT=10
+# CONFIG_SOFTLOCKUP_DETECTOR is not set
+CONFIG_HAVE_HARDLOCKUP_DETECTOR_BUDDY=y
+# CONFIG_HARDLOCKUP_DETECTOR is not set
+CONFIG_DETECT_HUNG_TASK=y
+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120
+CONFIG_BOOTPARAM_HUNG_TASK_PANIC=y
+# CONFIG_WQ_WATCHDOG is not set
+# CONFIG_WQ_CPU_INTENSIVE_REPORT is not set
+# CONFIG_TEST_LOCKUP is not set
+# end of Debug Oops, Lockups and Hangs
+
+#
+# Scheduler Debugging
+#
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_SCHEDSTATS is not set
+# end of Scheduler Debugging
+
+#
+# Lock Debugging (spinlocks, mutexes, etc...)
+#
+CONFIG_LOCK_DEBUGGING_SUPPORT=y
+# CONFIG_PROVE_LOCKING is not set
+# CONFIG_LOCK_STAT is not set
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set
+# CONFIG_DEBUG_RWSEMS is not set
+# CONFIG_DEBUG_LOCK_ALLOC is not set
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
+# CONFIG_LOCK_TORTURE_TEST is not set
+# CONFIG_WW_MUTEX_SELFTEST is not set
+# CONFIG_SCF_TORTURE_TEST is not set
+# CONFIG_CSD_LOCK_WAIT_DEBUG is not set
+# end of Lock Debugging (spinlocks, mutexes, etc...)
+
+# CONFIG_DEBUG_IRQFLAGS is not set
+CONFIG_STACKTRACE=y
+# CONFIG_WARN_ALL_UNSEEDED_RANDOM is not set
+# CONFIG_DEBUG_KOBJECT is not set
+# CONFIG_DEBUG_KOBJECT_RELEASE is not set
+
+#
+# Debug kernel data structures
+#
+CONFIG_DEBUG_LIST=y
+# CONFIG_DEBUG_PLIST is not set
+CONFIG_DEBUG_SG=y
+CONFIG_DEBUG_NOTIFIERS=y
+# CONFIG_DEBUG_MAPLE_TREE is not set
+# end of Debug kernel data structures
+
+#
+# RCU Debugging
+#
+# CONFIG_RCU_SCALE_TEST is not set
+# CONFIG_RCU_TORTURE_TEST is not set
+# CONFIG_RCU_REF_SCALE_TEST is not set
+CONFIG_RCU_CPU_STALL_TIMEOUT=21
+CONFIG_RCU_EXP_CPU_STALL_TIMEOUT=0
+# CONFIG_RCU_CPU_STALL_CPUTIME is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_RCU_EQS_DEBUG is not set
+# end of RCU Debugging
+
+# CONFIG_DEBUG_WQ_FORCE_RR_CPU is not set
+# CONFIG_LATENCYTOP is not set
+CONFIG_USER_STACKTRACE_SUPPORT=y
+CONFIG_HAVE_FUNCTION_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
+CONFIG_HAVE_FUNCTION_GRAPH_RETVAL=y
+CONFIG_HAVE_DYNAMIC_FTRACE=y
+CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS=y
+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y
+CONFIG_HAVE_C_RECORDMCOUNT=y
+CONFIG_TRACING_SUPPORT=y
+# CONFIG_FTRACE is not set
+# CONFIG_SAMPLES is not set
+CONFIG_HAVE_SAMPLE_FTRACE_DIRECT=y
+CONFIG_HAVE_SAMPLE_FTRACE_DIRECT_MULTI=y
+
+#
+# arm64 Debugging
+#
+# CONFIG_PID_IN_CONTEXTIDR is not set
+# CONFIG_ARM64_RELOC_TEST is not set
+# CONFIG_CORESIGHT is not set
+# end of arm64 Debugging
+
+#
+# Kernel Testing and Coverage
+#
+# CONFIG_KUNIT is not set
+# CONFIG_NOTIFIER_ERROR_INJECTION is not set
+# CONFIG_FAULT_INJECTION is not set
+CONFIG_ARCH_HAS_KCOV=y
+CONFIG_CC_HAS_SANCOV_TRACE_PC=y
+# CONFIG_KCOV is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_ARCH_USE_MEMTEST=y
+CONFIG_MEMTEST=y
+# end of Kernel Testing and Coverage
+
+#
+# Rust hacking
+#
+# end of Rust hacking
+# end of Kernel hacking
diff -Nruw linux-6.13.12-fbx/drivers/char/diag./Kconfig linux-6.13.12-fbx/drivers/char/diag/Kconfig
--- linux-6.13.12-fbx/drivers/char/diag./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/char/diag/Kconfig	2025-09-25 17:40:31.347346300 +0200
@@ -0,0 +1,38 @@
+menu "Diag Support"
+
+config DIAG_CHAR
+	tristate "char driver interface and diag forwarding to/from modem"
+	select CRC_CCITT
+	help
+	 Char driver interface for diag user space and diag-forwarding to modem ARM and back.
+	 This enables diagchar for maemo usb gadget or android usb gadget based on config selected.
+endmenu
+
+menu "DIAG traffic over USB"
+
+config DIAG_OVER_USB
+	bool "Enable DIAG traffic to go over USB"
+	depends on DIAG_CHAR
+	help
+	 This feature helps segregate code required for DIAG traffic to go over USB.
+endmenu
+
+menu "DIAG traffic over QRTR"
+
+config DIAG_OVER_QRTR
+	bool "Enable DIAG traffic to go over QRTR"
+        depends on QRTR && DIAG_CHAR
+	default n
+	help
+	 This feature helps segregate code required for DIAG traffic to go over QRTR.
+endmenu
+
+menu "HSIC/SMUX support for DIAG"
+
+config DIAGFWD_BRIDGE_CODE
+	bool "Enable QSC/9K DIAG traffic over SMUX/HSIC"
+	depends on DIAG_CHAR
+	depends on USB_QCOM_DIAG_BRIDGE || MHI_BUS
+	help
+	 SMUX/HSIC Transport Layer for DIAG Router
+endmenu
diff -Nruw linux-6.13.12-fbx/drivers/char/diag./Makefile linux-6.13.12-fbx/drivers/char/diag/Makefile
--- linux-6.13.12-fbx/drivers/char/diag./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/char/diag/Makefile	2025-09-25 17:40:31.347346300 +0200
@@ -0,0 +1,21 @@
+obj-$(CONFIG_DIAG_CHAR) := diagchar.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_hsic.o
+obj-$(CONFIG_USB_QCOM_DIAG_BRIDGE) += diagfwd_smux.o
+diagchar-objs := diagchar_core.o diagchar_hdlc.o diagfwd.o diagfwd_peripheral.o diag_mux.o diag_memorydevice.o diag_usb.o diagmem.o diagfwd_cntl.o diag_dci.o diag_masks.o diag_debugfs.o
+
+EXTRA_CFLAGS += -Wno-missing-prototypes -Wno-missing-declarations
+
+ifdef CONFIG_DIAG_OVER_QRTR
+diagchar-objs += qcom_diagfwd_socket.o
+else
+diagchar-objs += diagfwd_socket.o
+endif
+
+ifdef CONFIG_DIAGFWD_BRIDGE_CODE
+diagchar-objs += diagfwd_bridge.o
+
+ifdef CONFIG_MHI_BUS
+diagchar-objs += diagfwd_mhi.o
+endif
+
+endif
diff -Nruw linux-6.13.12-fbx/drivers/clk/cortina./Kconfig linux-6.13.12-fbx/drivers/clk/cortina/Kconfig
--- linux-6.13.12-fbx/drivers/clk/cortina./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/clk/cortina/Kconfig	2025-09-25 17:40:31.383346479 +0200
@@ -0,0 +1,3 @@
+config CLK_CORTINA_VENUS
+	default ARCH_CORTINA_VENUS
+	bool
diff -Nruw linux-6.13.12-fbx/drivers/clk/cortina./Makefile linux-6.13.12-fbx/drivers/clk/cortina/Makefile
--- linux-6.13.12-fbx/drivers/clk/cortina./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/clk/cortina/Makefile	2025-09-25 17:40:31.387346499 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_CLK_CORTINA_VENUS)		+= clk-ca8289.o clk-mmc-phase.o
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/cpufreq/bcm63158-cpufreq.c	2025-09-25 17:40:31.487346994 +0200
@@ -0,0 +1,430 @@
+/*
+ * bcm63158-cpufreq.c for bcm63158-cpufreq
+ * Created by <nschichan@freebox.fr> on Fri Jun 26 16:07:36 2020
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+
+#include <linux/pmc-bcm63xx.h>
+
+struct bcm63158_cpufreq {
+	struct bcm63xx_pmc *pmc;
+	struct device *dev;
+	struct cpufreq_driver drv;
+	struct cpufreq_frequency_table *freq_table;
+};
+
+/*
+ * CPU frequency change on 63158
+ */
+#define BIU_PLL_RESET_REG			(0x4 << 2)
+#define  BIU_PLL_RESET_BYP_EN			(1 << 20)
+#define  BIU_PLL_RESET_PLL_RST			(1 << 3)
+
+#define BIU_PLL_PDIV_REG			(0x8 << 2)
+#define  BIU_PLL_PDIV_NDIV_OVERRIDE		(1 << 31)
+
+#define BIU_PLL_NDIV_REG			(0x7 << 2)
+#define  BIU_PLL_NDIV_NDIV_MASK			(0x3ff)
+
+#define BIU_PLL_POSTDIV_REG			(0xb << 2)
+#define  BIU_PLL_POSTDIV_DIV0_MASK		(0xff)
+#define  BIU_PLL_POSTDIV_DIV0_OVERRIDE		(1 << 15)
+
+#define BIU_PLL_STAT_REG			(0xf << 2)
+#define  BIU_PLL_STAT_PLL_LOCK			(1 << 31)
+
+static void biu_pll_bypass_enable(struct bcm63xx_pmc *pmc, bool en)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_RESET_REG, &reg);
+
+	if (en)
+		reg |= BIU_PLL_RESET_BYP_EN;
+	else
+		reg &= ~BIU_PLL_RESET_BYP_EN;
+
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_RESET_REG, reg);
+}
+
+static bool biu_pll_get_bypass_enable(struct bcm63xx_pmc *pmc)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_RESET_REG, &reg);
+	return !!(reg & BIU_PLL_RESET_BYP_EN);
+}
+
+static void biu_pll_reset_assert(struct bcm63xx_pmc *pmc, bool en)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_RESET_REG, &reg);
+	if (en)
+		reg |= BIU_PLL_RESET_PLL_RST;
+	else
+		reg &= ~BIU_PLL_RESET_PLL_RST;
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_RESET_REG, reg);
+}
+
+static void biu_pll_set_ndiv_override(struct bcm63xx_pmc *pmc, bool en)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_PDIV_REG, &reg);
+	if (en)
+		reg |= BIU_PLL_PDIV_NDIV_OVERRIDE;
+	else
+		reg &= ~BIU_PLL_PDIV_NDIV_OVERRIDE;
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_PDIV_REG, reg);
+}
+
+static u32 biu_pll_get_ndiv(struct bcm63xx_pmc *pmc)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_NDIV_REG, &reg);
+	return reg & BIU_PLL_NDIV_NDIV_MASK;
+}
+
+static void biu_pll_set_ndiv(struct bcm63xx_pmc *pmc, u32 ndiv)
+{
+	u32 reg;
+
+	ndiv &= BIU_PLL_NDIV_NDIV_MASK;
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_NDIV_REG, &reg);
+	reg &= ~BIU_PLL_NDIV_NDIV_MASK;
+	reg |= ndiv;
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_NDIV_REG, reg);
+}
+
+static u32 biu_pll_get_mdiv(struct bcm63xx_pmc *pmc)
+{
+	u32 reg;
+
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_POSTDIV_REG,
+			       &reg);
+
+	return reg & BIU_PLL_POSTDIV_DIV0_MASK;
+}
+
+static void biu_pll_set_mdiv(struct bcm63xx_pmc *pmc, u8 mdiv)
+{
+	u32 reg;
+
+	if (mdiv == biu_pll_get_mdiv(pmc))
+		return ;
+
+	mdiv &= BIU_PLL_POSTDIV_DIV0_MASK;
+	pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_POSTDIV_REG,
+			       &reg);
+	reg &= ~BIU_PLL_POSTDIV_DIV0_OVERRIDE;
+	reg &= ~BIU_PLL_POSTDIV_DIV0_MASK;
+	reg |= mdiv;
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_POSTDIV_REG,
+				reg);
+	usleep_range(1000, 1200);
+
+	reg |= BIU_PLL_POSTDIV_DIV0_OVERRIDE;
+	pmc_write_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_POSTDIV_REG,
+				reg);
+	usleep_range(10000, 12000);
+}
+
+static int biu_pll_wait_lock(struct bcm63xx_pmc *pmc)
+{
+	int tries = 1000;
+
+	do {
+		u32 reg;
+		pmc_read_bpcm_register(pmc, PMB_ADDR_BIU_PLL, BIU_PLL_STAT_REG,
+				       &reg);
+
+		if (reg & BIU_PLL_STAT_PLL_LOCK)
+			break;
+		usleep_range(1000, 2000);
+		--tries;
+	} while (tries > 0);
+
+	return tries > 0 ? 0 : -ETIMEDOUT;
+}
+
+/*
+ * unless bypass clock is enabled, cpu frequency in mhz is:
+ *
+ * 50 * ndiv / mdiv
+ *
+ * mdiv is usually 2, unless for CPU speeds higher than 1675 mhz, in
+ * which case it is 1.
+ */
+static int pmc_cpufreq_get(struct bcm63158_cpufreq *priv)
+{
+	struct bcm63xx_pmc *pmc = priv->pmc;
+	u32 ndiv;
+	u32 mdiv;
+
+	if (biu_pll_get_bypass_enable(pmc)) {
+		/*
+		 * 400 mhz slow clock cpu clocking is active.
+		 */
+		return 400;
+	}
+
+	ndiv = biu_pll_get_ndiv(pmc);
+	mdiv = biu_pll_get_mdiv(pmc);
+	return 50 * ndiv / mdiv;
+}
+
+/*
+ * this will change the CPU speed to the requested frequency.
+ *
+ * valid frequencies are: 400, then 550 to 1675 mhz, frequency is
+ * rounted down to the nearest 25mhz. Frequencies from 1700 to 2200
+ * mhz are possible, rounded down to the nearest 50 mhz.
+ *
+ * Frequencies from 1700 to 2200 Mhz are in the overclocking range and
+ * the CPU may not work reliably in that case.
+ */
+static int pmc_cpufreq_set(struct bcm63158_cpufreq *priv, unsigned int val)
+{
+	struct bcm63xx_pmc *pmc = priv->pmc;
+	u32 mdiv, ndiv;
+	int error;
+	u32 ndiv_lo, ndiv_hi;
+
+	val /= 1000;
+
+	if (val > 2200)
+		return -EINVAL;
+
+	if (val > 1675) {
+		mdiv = 1;
+		ndiv_lo = 34;
+		ndiv_hi = 44;
+	} else {
+		mdiv = 2;
+		ndiv_lo = 22;
+		ndiv_hi = 67;
+	}
+
+	ndiv = mdiv * val / 50;
+
+	if ((ndiv < ndiv_lo || ndiv > ndiv_hi) && val != 400) {
+		dev_warn(priv->dev, "%u: unsupported frequency.\n", val);
+		return -EINVAL;
+	}
+
+	/*
+	 * enabling bypass clock will clock the CPU at 400 mhz from a
+	 * clock that will remain stable while we configure the
+	 * divisors.
+	 */
+	biu_pll_bypass_enable(pmc, true);
+	biu_pll_reset_assert(pmc, true);
+
+	if (val == 400)
+		/*
+		 * just leave the system with the slow clock strap
+		 * frequency.
+		 */
+		return 0;
+
+	/*
+	 * set ndiv
+	 */
+	biu_pll_set_ndiv_override(pmc, true);
+	biu_pll_set_ndiv(pmc, ndiv);
+
+	/*
+	 * deassert PLL and wait for it to lock.
+	 */
+	biu_pll_reset_assert(pmc, false);
+	error = biu_pll_wait_lock(pmc);
+	if (error) {
+		/*
+		 * if the pll fails to lock it looks like the system
+		 * will be left in a usable state as long as we keep
+		 * the bypass enabled. cpu freq will be 400 Mhz, the
+		 * slow clock strap freq.
+		 */
+		dev_crit(priv->dev, "BIU pll failed to lock. "
+			 "leaving PLL bypass enabled (%u Mhz was wanted).\n",
+			 val);
+		return error;
+	}
+
+	/*
+	 * set post divider (mdiv)
+	 */
+	biu_pll_set_mdiv(pmc, mdiv);
+
+	/*
+	 * disable clock bypass, the cpu will after be clocked at the
+	 * requested frequency.
+	 */
+	biu_pll_bypass_enable(pmc, false);
+	return 0;
+}
+
+static int bcm63158_cpufreq_init(struct cpufreq_policy *policy)
+{
+	struct bcm63158_cpufreq *priv = cpufreq_get_driver_data();
+
+	policy->cpuinfo.max_freq = 1675000;
+	policy->cpuinfo.min_freq = 400000;
+	policy->cur = pmc_cpufreq_get(priv) * 1000;
+	policy->cpuinfo.transition_latency = 20 * 1000 * 1000; /* 20 msec */
+	policy->freq_table = priv->freq_table;
+	policy->driver_data = cpufreq_get_driver_data();
+
+	/*
+	 * All cores share the same clock and thus the same policy.
+	 */
+	cpumask_setall(policy->cpus);
+
+	return 0;
+}
+
+static int bcm63158_cpufreq_target_index(struct cpufreq_policy *policy,
+					 unsigned int index)
+{
+	struct bcm63158_cpufreq *priv = policy->driver_data;
+
+	pmc_cpufreq_set(priv, policy->freq_table[index].frequency);
+	return 0;
+}
+
+static int add_freq_entry(struct cpufreq_frequency_table *tbl,
+			  unsigned int freq_mhz, size_t *cur, size_t size)
+{
+	if (*cur >= size) {
+		pr_err("unable to add frequency %u to table: no room left.\n",
+		       freq_mhz);
+		return -ENOSPC;
+	}
+
+	tbl = &tbl[*cur];
+	tbl->frequency = freq_mhz ? (freq_mhz * 1000) : CPUFREQ_TABLE_END;
+	++(*cur);
+	return 0;
+}
+
+/*
+ * NOTE: the hardware could do 550 Mhz, but in some case the BIU PLL
+ * won't lock at that frequency. just don't advertise 550 Mhz in the
+ * frequency table.
+ */
+#define FREQ_LOW	575
+#define FREQ_HI		1575
+
+/*
+ * the hardware can work in 25 mhz frequency increments, but it's
+ * probably not needed that much to have this kind of granularity.
+ *
+ * Also the 45 possible frequencies make it that the stats/trans_table
+ * sysfs attribute cannot work due to its content being larger than 4k
+ * (the max for sysfs attributes).
+ *
+ * allow all frequencies between FREQ_LO and FREQ_HI in 100 mhz
+ * increment as a result.
+ */
+#define FREQ_INC	100
+
+static int bcm63158_cpufreq_build_table(struct bcm63158_cpufreq *priv)
+{
+	size_t nr_freqs;
+	size_t cur_freq;
+	unsigned int freq;
+	struct cpufreq_frequency_table *table;
+	int error;
+
+	nr_freqs = (FREQ_HI - FREQ_LOW) / FREQ_INC + 1;
+	nr_freqs += 2; /* account  for 400 mhz entry and end element */
+
+	table = devm_kzalloc(priv->dev, sizeof (*table) * nr_freqs,
+			      GFP_KERNEL);
+	if (!table)
+		return -ENOMEM;
+
+	cur_freq = 0;
+	error = add_freq_entry(table, 400, &cur_freq, nr_freqs);
+	if (error)
+		return error;
+
+	for (freq = FREQ_LOW; freq <= FREQ_HI; freq += FREQ_INC) {
+		error = add_freq_entry(table, freq, &cur_freq, nr_freqs);
+		if (error)
+			return error;
+	}
+
+	error = add_freq_entry(table, 0, &cur_freq, nr_freqs);
+	if (error)
+		return error;
+
+	priv->freq_table = table;
+	return 0;
+}
+
+static int bcm63158_cpufreq_probe(struct platform_device *pdev)
+{
+	struct bcm63158_cpufreq *priv;
+	int err;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pmc = pmc_of_get(pdev->dev.of_node);
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	strscpy(priv->drv.name, "bcm63158-cpufreq", sizeof (priv->drv.name));
+	priv->drv.init = bcm63158_cpufreq_init;
+	priv->drv.verify = cpufreq_generic_frequency_table_verify;
+	priv->drv.target_index = bcm63158_cpufreq_target_index;
+	priv->drv.driver_data = priv;
+	priv->drv.flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK;
+	priv->dev = &pdev->dev;
+
+	err = bcm63158_cpufreq_build_table(priv);
+	if (err)
+		return err;
+
+	return cpufreq_register_driver(&priv->drv);
+}
+
+static void bcm63158_cpufreq_remove(struct platform_device *pdev)
+{
+	struct bcm63158_cpufreq *priv = dev_get_drvdata(&pdev->dev);
+
+	cpufreq_unregister_driver(&priv->drv);
+}
+
+static const struct of_device_id bcm63158_cpufreq_of_match[] = {
+	{ .compatible = "brcm,bcm63158-cpufreq" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63158_cpufreq_of_match);
+
+static struct platform_driver bcm63158_cpufreq_pdriver = {
+	.driver = {
+		.name = "bcm63158-cpufreq",
+		.of_match_table = bcm63158_cpufreq_of_match,
+	},
+	.probe = bcm63158_cpufreq_probe,
+	.remove = bcm63158_cpufreq_remove,
+};
+module_platform_driver(bcm63158_cpufreq_pdriver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr");
+MODULE_DESCRIPTION("Broadcom BCM63158 SoC cpufreq driver.");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.13.12-fbx/drivers/fbxgpio./Kconfig linux-6.13.12-fbx/drivers/fbxgpio/Kconfig
--- linux-6.13.12-fbx/drivers/fbxgpio./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxgpio/Kconfig	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,7 @@
+config FREEBOX_GPIO
+	tristate "Freebox GPIO control interface"
+	default n
+
+config FREEBOX_GPIO_DT
+	tristate "Freebox GPIO DT binding."
+	default n
diff -Nruw linux-6.13.12-fbx/drivers/fbxgpio./Makefile linux-6.13.12-fbx/drivers/fbxgpio/Makefile
--- linux-6.13.12-fbx/drivers/fbxgpio./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxgpio/Makefile	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,2 @@
+obj-$(CONFIG_FREEBOX_GPIO)	+= fbxgpio_core.o
+obj-$(CONFIG_FREEBOX_GPIO_DT)	+= fbxgpio_dt.o
diff -Nruw linux-6.13.12-fbx/drivers/fbxgpio./fbxgpio_core.c linux-6.13.12-fbx/drivers/fbxgpio/fbxgpio_core.c
--- linux-6.13.12-fbx/drivers/fbxgpio./fbxgpio_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxgpio/fbxgpio_core.c	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,336 @@
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+
+#define PFX	"fbxgpio_core: "
+
+/* #define DEBUG */
+#ifdef DEBUG
+#define dprint(Fmt, Arg...)	printk(PFX Fmt, Arg)
+#else
+#define dprint(Fmt, Arg...)	do { } while (0)
+#endif
+
+static struct class *fbxgpio_class;
+
+/*
+ * retrieval of a struct fbxgpio_pin from a phandle in the device
+ * tree.
+ *
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+struct fbxgpio_of_mach_data {
+	struct fbxgpio_pin *match;
+	struct device_node *np;
+};
+
+static int match_fbxgpio_of_node(struct device *dev, void *data)
+{
+	struct fbxgpio_of_mach_data *md = data;
+	struct fbxgpio_pin *pin = dev_get_drvdata(dev);
+
+	if (pin->of_node == md->np) {
+		md->match = pin;
+		return 1;
+	}
+	return 0;
+}
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np, const char *propname,
+				   int index)
+{
+	struct fbxgpio_of_mach_data md;
+
+	/*
+	 * get the pin device_node.
+	 */
+	md.match = NULL;
+	md.np = of_parse_phandle(np, propname, index);
+	if (!md.np)
+		return ERR_PTR(-ENOENT);
+
+	/*
+	 * find the struct fbxgpio_pin behind that device_node.
+	 */
+	class_for_each_device(fbxgpio_class, NULL, &md,
+			      match_fbxgpio_of_node);
+
+	return md.match ? md.match : ERR_PTR(-ENOENT);
+}
+EXPORT_SYMBOL(fbxgpio_of_get);
+
+/*
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+int fbxgpio_set_data_out(struct fbxgpio_pin *p, int val)
+{
+	struct gpio_desc *desc;
+
+	p->cur_dataout = val;
+
+	if (p->use_desc) {
+		desc = p->request_desc(p);
+		if (IS_ERR(desc))
+			return PTR_ERR(desc);
+		gpiod_set_value_cansleep(desc, val);
+		p->release_desc(p);
+	} else {
+		if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+			val = 1 - val;
+		gpio_set_value_cansleep(p->pin_num, val);
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(fbxgpio_set_data_out);
+
+/*
+ * can be removed when fbxjtag uses standard gpio library instead of
+ * fbxgpio
+ */
+int fbxgpio_get_data_in(struct fbxgpio_pin *p)
+{
+	const struct gpio_desc *desc;
+	int val;
+
+	if (p->use_desc) {
+		desc = p->request_desc(p);
+		if (IS_ERR(desc))
+			return PTR_ERR(desc);
+
+		val = gpiod_get_value_cansleep(desc);
+		p->release_desc(p);
+	} else {
+		val = gpio_get_value_cansleep(p->pin_num);
+		if (p->flags & FBXGPIO_PIN_REVERSE_POL)
+			val = 1 - val;
+	}
+
+	return val;
+}
+
+EXPORT_SYMBOL(fbxgpio_get_data_in);
+
+/*
+ * show direction in for gpio associated with class_device dev.
+ */
+static ssize_t show_direction(struct device *dev,
+			      struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+	int dir, ret = 0;
+
+	p = dev_get_drvdata(dev);
+
+	dir = p->direction;
+
+	switch (dir) {
+	case GPIO_DIR_IN:
+		ret += sprintf(buf, "input\n");
+		break;
+	case GPIO_DIR_OUT:
+	case GPIO_DIR_OUT_UNINITIALIZED:
+		ret += sprintf(buf, "output\n");
+		break;
+	default:
+		ret += sprintf(buf, "unknown\n");
+		break;
+	}
+	return ret;
+}
+
+/*
+ * show input data for input gpio pins.
+ */
+static ssize_t show_datain(struct device *dev,
+			   struct device_attribute *attr, char *buf)
+{
+	struct fbxgpio_pin *p;
+	int val;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction != GPIO_DIR_IN)
+		return -EINVAL;
+
+	val = fbxgpio_get_data_in(p);
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * show output data for output gpio pins.
+ */
+static ssize_t show_dataout(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	int val;
+	struct fbxgpio_pin *p;
+
+	p = dev_get_drvdata(dev);
+	if (p->direction != GPIO_DIR_OUT)
+		return -EINVAL;
+
+	val = p->cur_dataout;
+	return sprintf(buf, "%i\n", val);
+}
+
+/*
+ * store new dataout value for output gpio pins.
+ */
+static ssize_t store_dataout(struct device *dev,
+	    struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxgpio_pin *p;
+	int val;
+
+	if (*buf == ' ' || *buf == '\t' || *buf == '\r' || *buf == '\n')
+		/* silently eat any spaces/tab/linefeed/carriagereturn */
+		return 1;
+
+	p = dev_get_drvdata(dev);
+
+	if (p->direction != GPIO_DIR_OUT &&
+	    p->direction != GPIO_DIR_OUT_UNINITIALIZED)
+		return -EINVAL;
+
+	switch (*buf) {
+	case '0':
+		val = 0;
+		break ;
+	case '1':
+		val = 1;
+		break ;
+	default:
+		return -EINVAL;
+	}
+
+	p->direction = GPIO_DIR_OUT;
+	fbxgpio_set_data_out(p, val);
+	return 1;
+}
+
+/*
+ * attribute list associated with each class device.
+ */
+static struct device_attribute gpio_attributes[] = {
+	__ATTR(direction, 0400, show_direction, NULL),
+	__ATTR(data_in,   0400, show_datain, NULL),
+	__ATTR(data_out,  0600, show_dataout, store_dataout),
+};
+
+static int fbxgpio_register_pin(struct platform_device *ppdev,
+				struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i, ret;
+
+	if (pin->use_desc && (!pin->request_desc || !pin->release_desc))
+		return -EINVAL;
+
+	dprint("registering pin %s\n", pin->pin_name);
+
+	dev = device_create(fbxgpio_class, &ppdev->dev, 0, pin,
+			    "%s", pin->pin_name);
+	if (IS_ERR(dev))
+		return PTR_ERR(dev);
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++) {
+		ret = device_create_file(dev, &gpio_attributes[i]);
+		if (ret)
+			goto err_out;
+	}
+
+	pin->dev = dev;
+	return 0;
+
+err_out:
+	for (; i >= 0; i--)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+	return ret;
+}
+
+static void fbxgpio_unregister_pin(struct fbxgpio_pin *pin)
+{
+	struct device *dev;
+	int i;
+
+	dprint("unregistering pin %s\n", pin->pin_name);
+	dev = pin->dev;
+	pin->dev = NULL;
+
+	for (i = 0; i < ARRAY_SIZE(gpio_attributes); i++)
+		device_remove_file(dev, &gpio_attributes[i]);
+	device_unregister(dev);
+}
+
+static int fbxgpio_platform_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+	int err = 0;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		err = fbxgpio_register_pin(pdev, p);
+		if (err)
+			return err;
+		++p;
+	}
+	return 0;
+}
+
+static void fbxgpio_platform_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_pin *p;
+
+	p = pdev->dev.platform_data;
+	while (p->pin_name) {
+		fbxgpio_unregister_pin(p);
+		++p;
+	}
+}
+
+static struct platform_driver fbxgpio_platform_driver =
+{
+	.probe	= fbxgpio_platform_probe,
+	.remove	= fbxgpio_platform_remove,
+	.driver	= {
+		.name	= "fbxgpio",
+	}
+};
+
+static int __init fbxgpio_init(void)
+{
+	int ret;
+
+	fbxgpio_class = class_create("fbxgpio");
+	if (IS_ERR(fbxgpio_class))
+		return PTR_ERR(fbxgpio_class);
+
+	ret = platform_driver_register(&fbxgpio_platform_driver);
+	if (ret) {
+		printk(KERN_ERR PFX "unable to register fbxgpio driver.\n");
+		class_destroy(fbxgpio_class);
+		return ret;
+	}
+	return 0;
+}
+
+static void __exit fbxgpio_exit(void)
+{
+	platform_driver_unregister(&fbxgpio_platform_driver);
+	class_destroy(fbxgpio_class);
+}
+
+subsys_initcall(fbxgpio_init);
+module_exit(fbxgpio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nicolas.schichan@freebox.fr>");
diff -Nruw linux-6.13.12-fbx/drivers/fbxgpio./fbxgpio_dt.c linux-6.13.12-fbx/drivers/fbxgpio/fbxgpio_dt.c
--- linux-6.13.12-fbx/drivers/fbxgpio./fbxgpio_dt.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxgpio/fbxgpio_dt.c	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,263 @@
+/*
+ * fbxgpio_dt.c for fbxgpio
+ * Created by <nschichan@freebox.fr> on Tue Aug  1 14:01:01 2017
+ */
+
+#include <linux/kernel.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/fbxgpio_core.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_gpio.h>
+
+static atomic_t last_id = ATOMIC_INIT(0);
+
+/*
+ * fbxgpio driver fetching gpios names and configuration from
+ * device-tree.
+ */
+
+struct fbxgpio_dt_priv {
+	struct fbxgpio_pin *pins;
+	unsigned int npins;
+
+	/* dynamically created platform_device for fbxgpio_core */
+	struct platform_device *top_pdev;
+};
+
+/*
+ *
+ */
+static struct gpio_desc *request_desc_cb(struct fbxgpio_pin *pin)
+{
+	struct gpio_desc *desc;
+	int ret;
+
+	if (!pin->dt.no_claim) {
+		/* was requested earlier */
+		return pin->dt.desc;
+	}
+
+	/* try to request it for real first */
+	desc = fwnode_gpiod_get_index(of_fwnode_handle(pin->of_node),
+				      NULL, 0, pin->dt.flags,
+				      pin->dt.pin_name);
+	if (!IS_ERR(desc)) {
+		/* we want to release it later */
+		pin->dt.desc = desc;
+		return desc;
+	}
+
+	ret = PTR_ERR(desc);
+	if (ret != -EBUSY)
+		return desc;
+
+	/* device is busy, which is expected for no-claim, just fetch
+	 * a "light" reference, which we won't need to put */
+	return fwnode_gpiod_get_index(of_fwnode_handle(pin->of_node),
+				      NULL, 0, GPIOD_ASIS |
+				      GPIOD_FLAGS_BIT_NONEXCLUSIVE,
+				      pin->dt.pin_name);
+}
+
+/*
+ *
+ */
+static void release_desc_cb(struct fbxgpio_pin *pin)
+{
+	if (pin->dt.no_claim && pin->dt.desc) {
+		gpiod_put(pin->dt.desc);
+		pin->dt.desc = NULL;
+	}
+}
+
+/*
+ * fill an fbxgpio_pin with the configuration found in a device tree
+ * node.
+ *
+ * required properties are:
+ * - gpio: a phandle to a standard linux gpio.
+ *
+ * - the name of the node: the name of the gpio as it will appear under
+ *   /sys/class/fbxgpio/
+ *
+ * - <input>/<output-high>/<output-low>/<output-preserve>: how to declare
+ *   gpio and actually setup it unless no-claim is given
+ *
+ * - <no-claim>: just declare gpio, but don't request & setup it
+ */
+static int fbxgpio_dt_fill_gpio(struct platform_device *pdev,
+				struct device_node *np,
+				struct fbxgpio_pin *pin)
+{
+	enum gpiod_flags flags;
+	int error;
+
+	error = of_property_read_string(np, "name", &pin->pin_name);
+	if (error) {
+		dev_err(&pdev->dev, "gpio has no name.\n");
+		return error;
+	}
+
+	if (of_property_read_bool(np, "input")) {
+		pin->direction = GPIO_DIR_IN;
+		flags = GPIOD_IN;
+	} else if (of_property_read_bool(np, "output-low")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 0;
+		flags = GPIOD_OUT_LOW;
+	} else if (of_property_read_bool(np, "output-high")) {
+		pin->direction = GPIO_DIR_OUT;
+		pin->cur_dataout = 1;
+		flags = GPIOD_OUT_HIGH;
+	} else if (of_property_read_bool(np, "output-preserve")) {
+		pin->direction = GPIO_DIR_OUT_UNINITIALIZED;
+		flags = GPIOD_ASIS;
+	} else {
+		dev_err(&pdev->dev,
+			"no state specified for %s\n",
+			pin->pin_name);
+		return -EINVAL;
+	}
+
+	pin->use_desc = true;
+	pin->of_node = np;
+	pin->dt.flags = flags;
+	pin->request_desc = request_desc_cb;
+	pin->release_desc = release_desc_cb;
+	scnprintf(pin->dt.pin_name, sizeof (pin->dt.pin_name),
+		  "fbxgpio-dt/%s", pin->pin_name);
+
+	if (of_property_read_bool(np, "no-claim")) {
+		/* will be requested on demain */
+		pin->dt.no_claim = true;
+		return 0;
+	}
+
+	pin->dt.desc = devm_fwnode_gpiod_get(&pdev->dev, of_fwnode_handle(np),
+					     NULL, flags, pin->dt.pin_name);
+	if (IS_ERR(pin->dt.desc)) {
+		int ret = PTR_ERR(pin->dt.desc);
+
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev,
+				"unable to get gpio desc for %s: %d.\n",
+				pin->pin_name, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int fbxgpio_dt_probe(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv;
+	struct device_node *fbxgpio_node;
+	u32 cur_gpio;
+	int error = 0;
+	size_t priv_alloc_size;
+	int i;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	dev_set_drvdata(&pdev->dev, priv);
+
+	/*
+	 * first pass to get the number of struct fbxgpio_pin to
+	 * allocate.
+	 */
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		++priv->npins;
+	}
+
+	/*
+	 * allocate pins: use npins + 1 for zeroed end sentinel.
+	 */
+	priv_alloc_size = (priv->npins + 1) * sizeof (struct fbxgpio_pin);
+	priv->pins = devm_kzalloc(&pdev->dev, priv_alloc_size, GFP_KERNEL);
+	if (!priv->pins)
+		return -ENOMEM;
+
+	/*
+	 * second pass to fill the priv->pins array.
+	 */
+	cur_gpio = 0;
+	for_each_available_child_of_node(pdev->dev.of_node, fbxgpio_node) {
+		error = fbxgpio_dt_fill_gpio(pdev, fbxgpio_node,
+					     &priv->pins[cur_gpio]);
+		if (error)
+			return error;
+		++cur_gpio;
+	}
+
+	dev_info(&pdev->dev, "%u gpios.\n", priv->npins);
+
+	/*
+	 * create and register a platform device for fbxgpio_core.
+	 */
+	priv->top_pdev = platform_device_register_data(&pdev->dev,
+						       "fbxgpio",
+						       atomic_inc_return(&last_id),
+						       priv->pins,
+						       priv_alloc_size);
+
+	if (IS_ERR(priv->top_pdev)) {
+		dev_err(&pdev->dev, "unable to register fbxgpio platform "
+			"device: %ld\n", PTR_ERR(priv->top_pdev));
+		return PTR_ERR(priv->top_pdev);
+	}
+
+	for (i = 0; i < priv->npins; i++) {
+		struct fbxgpio_pin *pin = &priv->pins[i];
+
+		if (pin->direction == GPIO_DIR_OUT)
+			dev_dbg(&pdev->dev,
+				"%sgpio %s is output, default %d\n",
+				pin->dt.no_claim ? "unclaimed " : "",
+				pin->pin_name, pin->cur_dataout);
+		else if (pin->direction == GPIO_DIR_IN)
+			dev_dbg(&pdev->dev,
+				"%sgpio %s is input\n",
+				pin->dt.no_claim ? "unclaimed " : "",
+				pin->pin_name);
+		else
+			dev_dbg(&pdev->dev,
+				"%sgpio %s has output, value preserved\n",
+				pin->dt.no_claim ? "unclaimed " : "",
+				pin->pin_name);
+	}
+
+	return 0;
+}
+
+static void fbxgpio_dt_remove(struct platform_device *pdev)
+{
+	struct fbxgpio_dt_priv *priv = dev_get_drvdata(&pdev->dev);
+	platform_device_unregister(priv->top_pdev);
+}
+
+static const struct of_device_id fbxgpio_dt_of_match_table[] = {
+	{ .compatible = "fbx,fbxgpio" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, fbxgpio_dt_of_match_table);
+
+static struct platform_driver fbxgpio_dt_platform_driver = {
+	.probe		= fbxgpio_dt_probe,
+	.remove		= fbxgpio_dt_remove,
+	.driver		= {
+		.name		= "fbxgpio-dt",
+		.of_match_table	= fbxgpio_dt_of_match_table,
+	},
+};
+
+module_platform_driver(fbxgpio_dt_platform_driver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("DT Freebox GPIO Driver");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.13.12-fbx/drivers/fbxjtag./Kconfig linux-6.13.12-fbx/drivers/fbxjtag/Kconfig
--- linux-6.13.12-fbx/drivers/fbxjtag./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxjtag/Kconfig	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,3 @@
+config FREEBOX_JTAG
+	tristate "Freebox JTAG control interface"
+	default n
diff -Nruw linux-6.13.12-fbx/drivers/fbxjtag./Makefile linux-6.13.12-fbx/drivers/fbxjtag/Makefile
--- linux-6.13.12-fbx/drivers/fbxjtag./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxjtag/Makefile	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_FREEBOX_JTAG)	+= fbxjtag.o
diff -Nruw linux-6.13.12-fbx/drivers/fbxprocfs./Kconfig linux-6.13.12-fbx/drivers/fbxprocfs/Kconfig
--- linux-6.13.12-fbx/drivers/fbxprocfs./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxprocfs/Kconfig	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,2 @@
+config FREEBOX_PROCFS
+	tristate "Freebox procfs interface"
diff -Nruw linux-6.13.12-fbx/drivers/fbxprocfs./Makefile linux-6.13.12-fbx/drivers/fbxprocfs/Makefile
--- linux-6.13.12-fbx/drivers/fbxprocfs./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxprocfs/Makefile	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_FREEBOX_PROCFS) += fbxprocfs.o
diff -Nruw linux-6.13.12-fbx/drivers/fbxprocfs./fbxprocfs.c linux-6.13.12-fbx/drivers/fbxprocfs/fbxprocfs.c
--- linux-6.13.12-fbx/drivers/fbxprocfs./fbxprocfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxprocfs/fbxprocfs.c	2025-09-25 17:40:31.611347609 +0200
@@ -0,0 +1,299 @@
+/*
+ * Freebox ProcFs interface
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/sizes.h>
+
+#include <linux/fbxprocfs.h>
+
+#define PFX	"fbxprocfs: "
+
+
+static struct list_head clients;
+static struct mutex clients_mutex;
+
+static struct proc_dir_entry *root;
+
+/*
+ * register  a  fbxprocfs client  with  given  dirname, caller  should
+ * consider returned struct opaque
+ */
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner)
+{
+	struct fbxprocfs_client *ret, *p;
+
+	ret = NULL;
+	mutex_lock(&clients_mutex);
+
+	/* check for duplicate */
+	list_for_each_entry(p, &clients, list) {
+		if (!strcmp(dirname, p->dirname))
+			goto out;
+	}
+
+	if (!(ret = kmalloc(sizeof (*ret), GFP_KERNEL))) {
+		printk(KERN_ERR PFX "kmalloc failed\n");
+		goto out;
+	}
+
+	/* try to create client directory */
+	if (!(ret->dir = proc_mkdir(dirname, root))) {
+		printk(KERN_ERR PFX "can't create %s dir\n", dirname);
+		kfree(ret);
+		ret = NULL;
+		goto out;
+	}
+
+	atomic_set(&ret->refcount, 1);
+	ret->dirname = dirname;
+	list_add(&ret->list, &clients);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * unregister  a  fbxprocfs client, make sure usage count is zero
+ */
+int fbxprocfs_remove_client(struct fbxprocfs_client *client)
+{
+	int ret;
+
+	mutex_lock(&clients_mutex);
+
+	ret = 0;
+	if (atomic_read(&client->refcount) > 1) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	remove_proc_entry(client->dirname, root);
+	list_del(&client->list);
+	kfree(client);
+
+out:
+	mutex_unlock(&clients_mutex);
+	return ret;
+}
+
+/*
+ * remove given entries from client directory
+ */
+static int
+__remove_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	int i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		remove_proc_entry(ro_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		remove_proc_entry(rw_desc[i].name, client->dir);
+		atomic_dec(&client->refcount);
+	}
+
+	return 0;
+}
+
+/*
+ * replacement for NULL rfunc.
+ */
+static int bad_rfunc(struct seq_file *m, void *ptr)
+{
+	return -EACCES;
+}
+
+/*
+ * fbxprocfs write path is now handled by seq_file code. this
+ * simplifies client code greatly.
+ */
+static int fbxprocfs_open(struct inode *inode, struct file *file)
+{
+	const struct fbxprocfs_desc *desc = pde_data(inode);
+
+	return single_open(file, desc->rfunc ? desc->rfunc : bad_rfunc,
+			   (void*)desc->id);
+}
+
+/*
+ * no particular help from kernel in the write path, fetch user buffer
+ * in a kernel buffer and call write func.
+ */
+static ssize_t fbxprocfs_write(struct file *file, const char __user *ubuf,
+			       size_t len, loff_t *off)
+{
+	/*
+	 * get fbxprocfs desc via the proc_dir_entry in file inode
+	 */
+	struct fbxprocfs_desc *d = pde_data(file_inode(file));
+	char *kbuf;
+	int ret;
+
+	/*
+	 * must have a wfunc callback.
+	 */
+	if (!d->wfunc)
+		return -EACCES;
+
+	/*
+	 * allow up to SZ_4K bytes to be written.
+	 */
+	if (len > SZ_4K)
+		return -EOVERFLOW;
+
+	/*
+	 * alloc and fetch kernel buffer containing user data.
+	 */
+	kbuf = kmalloc(SZ_4K, GFP_KERNEL);
+	if (!kbuf)
+		return -ENOMEM;
+
+	ret = -EFAULT;
+	if (copy_from_user(kbuf, ubuf, len))
+		goto kfree;
+
+	ret = d->wfunc(file, kbuf, len, (void*)d->id);
+
+kfree:
+	kfree(kbuf);
+	return ret;
+}
+
+/*
+ * fbxprocfs file operations, read stuff is handled by seq_file code.
+ */
+static const struct proc_ops fbxprocfs_fops = {
+	.proc_open	= fbxprocfs_open,
+	.proc_lseek	= seq_lseek,
+	.proc_read	= seq_read,
+	.proc_release	= single_release,
+	.proc_write	= fbxprocfs_write,
+};
+
+/*
+ * replaces create_proc_read_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_read_entry(
+				       const struct fbxprocfs_desc *desc,
+				       struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, 0, base, &fbxprocfs_fops,
+				(void*)desc);
+}
+
+/*
+ * replaces create_proc_entry removed in latest kernels.
+ */
+static struct proc_dir_entry *__create_proc_entry(
+					const struct fbxprocfs_desc *desc,
+					struct proc_dir_entry *base)
+{
+	return proc_create_data(desc->name, S_IFREG | S_IWUSR | S_IRUGO,
+				base, &fbxprocfs_fops, (void*)desc);
+}
+
+/*
+ * create given entries in client directory
+ */
+static int
+__create_entries(struct fbxprocfs_client *client,
+		 const struct fbxprocfs_desc *ro_desc,
+		 const struct fbxprocfs_desc *rw_desc)
+{
+	struct proc_dir_entry	*proc;
+	int			i;
+
+	for (i = 0; ro_desc && ro_desc[i].name; i++) {
+		if (!(proc = __create_proc_read_entry(&ro_desc[i],
+						      client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	for (i = 0; rw_desc && rw_desc[i].name; i++) {
+		if (!(proc = __create_proc_entry(&rw_desc[i], client->dir))) {
+			printk(KERN_ERR PFX "can't create %s/%s entry\n",
+			       client->dirname, ro_desc[i].name);
+			goto err;
+		}
+		atomic_inc(&client->refcount);
+	}
+
+	return 0;
+
+err:
+	__remove_entries(client, ro_desc, rw_desc);
+	return -1;
+}
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __create_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc)
+{
+	int	ret;
+
+	ret = __remove_entries(client, ro_desc, rw_desc);
+	return ret;
+}
+
+
+static int __init
+fbxprocfs_init(void)
+{
+	INIT_LIST_HEAD(&clients);
+	mutex_init(&clients_mutex);
+
+	/* create freebox directory */
+	if (!(root = proc_mkdir("freebox", NULL))) {
+		printk(KERN_ERR PFX "can't create freebox/ dir\n");
+		return -EIO;
+	}
+	return 0;
+}
+
+static void __exit
+fbxprocfs_exit(void)
+{
+	remove_proc_entry("freebox", NULL);
+}
+
+module_init(fbxprocfs_init);
+module_exit(fbxprocfs_exit);
+
+EXPORT_SYMBOL(fbxprocfs_create_entries);
+EXPORT_SYMBOL(fbxprocfs_remove_entries);
+EXPORT_SYMBOL(fbxprocfs_add_client);
+EXPORT_SYMBOL(fbxprocfs_remove_client);
+
+MODULE_LICENSE("GPL");
+MODULE_VERSION("1.0");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+
diff -Nruw linux-6.13.12-fbx/drivers/fbxwatchdog./Kconfig linux-6.13.12-fbx/drivers/fbxwatchdog/Kconfig
--- linux-6.13.12-fbx/drivers/fbxwatchdog./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxwatchdog/Kconfig	2025-09-25 17:40:31.615347629 +0200
@@ -0,0 +1,28 @@
+menuconfig FREEBOX_WATCHDOG
+	tristate "Freebox Watchdog"
+	default n
+
+if FREEBOX_WATCHDOG
+
+config FREEBOX_WATCHDOG_CHAR
+	bool "Freebox Watchdog char device interface."
+	default n
+
+config FREEBOX_WATCHDOG_ORION
+	tristate "Marvell Orion support"
+	depends on PLAT_ORION
+
+config FREEBOX_WATCHDOG_BCM63XX
+	tristate "Broadcom 63xx Freebox Watchdog support"
+	depends on BCM63XX
+	default n
+
+config FREEBOX_WATCHDOG_BCM63XX_OF
+	tristate "Broadcom 63xx Freebox Watchdog support (generic)"
+	depends on OF && !FREEBOX_WATCHDOG_BCM63XX
+
+config FREEBOX_WATCHDOG_FBXGWR_PMU
+	tristate "Freebox PMU Watchdog support"
+	depends on MFD_FBXGWR_PMU
+
+endif
diff -Nruw linux-6.13.12-fbx/drivers/fbxwatchdog./Makefile linux-6.13.12-fbx/drivers/fbxwatchdog/Makefile
--- linux-6.13.12-fbx/drivers/fbxwatchdog./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxwatchdog/Makefile	2025-09-25 17:40:31.615347629 +0200
@@ -0,0 +1,11 @@
+obj-$(CONFIG_FREEBOX_WATCHDOG) += fbxwatchdog.o
+
+fbxwatchdog-objs = fbxwatchdog_core.o
+ifeq ($(CONFIG_FREEBOX_WATCHDOG_CHAR),y)
+fbxwatchdog-objs += fbxwatchdog_char.o
+endif
+
+obj-$(CONFIG_FREEBOX_WATCHDOG_ORION)	+= fbxwatchdog_orion.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_BCM63XX)	+= fbxwatchdog_bcm63xx.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_BCM63XX_OF)	+= fbxwatchdog_bcm63xx_of.o
+obj-$(CONFIG_FREEBOX_WATCHDOG_FBXGWR_PMU)	+= fbxwatchdog_gwr_pmu.o
diff -Nruw linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog.h linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog.h
--- linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog.h	2025-09-25 17:40:31.615347629 +0200
@@ -0,0 +1,49 @@
+#ifndef FBXWATCHDOG_H
+# define FBXWATCHDOG_H
+
+struct fbxwatchdog {
+	const char *name;
+	void *priv;
+
+	int enabled;
+	int countdown;
+	int countdown_min;
+
+	int (*wdt_init)(struct fbxwatchdog *wdt);
+	int (*wdt_cleanup)(struct fbxwatchdog *wdt);
+
+	/*
+	 * wdt_start and wdt_stop are called with wdt->lock held and irq
+	 * disabled.
+	 */
+	int (*wdt_start)(struct fbxwatchdog *wdt);
+	int (*wdt_stop)(struct fbxwatchdog *wdt);
+
+	/*
+	 * cb is called from interrupt/softirq context (depends on the
+	 * underlying driver/hardware).
+	 */
+	void (*cb)(struct fbxwatchdog *wdt);
+
+	struct timer_list timer;
+
+	struct device *dev;
+
+	/*
+	 * protect interrupt handlers & start/stop methods running in
+	 * thread context.
+	 */
+	spinlock_t	lock;
+	struct mutex	mutex;
+	bool		use_mutex;
+};
+
+int fbxwatchdog_register(struct fbxwatchdog *wdt);
+int fbxwatchdog_unregister(struct fbxwatchdog *wdt);
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+int fbxwatchdog_char_add(struct fbxwatchdog *wdt);
+void fbxwatchdog_char_remove(struct fbxwatchdog *wdt);
+#endif
+
+#endif /* !FBXWATCHDOG_H */
diff -Nruw linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog_bcm63xx_of.c linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog_bcm63xx_of.c
--- linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog_bcm63xx_of.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog_bcm63xx_of.c	2025-09-25 17:40:31.615347629 +0200
@@ -0,0 +1,499 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/panic_notifier.h>
+
+#include "fbxwatchdog.h"
+
+enum bcm63xx_wdt_reg {
+	/* Watchdog default count register */
+	WDT_DEFVAL_REG,
+	/* Watchdog control register */
+	WDT_CTL_REG,
+	/* Watchdog reset length register */
+	WDT_RSTLEN_REG,
+};
+
+enum bcmtimer_chip_id {
+	WDT_6328,
+	WDT_63158,
+};
+
+static const u32 regs_offsets_6328[] = {
+	[WDT_DEFVAL_REG]	= 0x28,
+	[WDT_CTL_REG]		= 0x2c,
+	[WDT_RSTLEN_REG]	= 0x30,
+};
+
+static const u32 regs_offsets_63158[] = {
+	[WDT_DEFVAL_REG]	= 0xc0,
+	[WDT_CTL_REG]		= 0xc4,
+	[WDT_RSTLEN_REG]	= 0xc8,
+};
+
+#define PERF_TIMER64_RESET_REASON	0x48
+#define  PERF_TIMER64_RESET_REASON_WD0		(1 << 31)
+#define  PERF_TIMER64_RESET_REASON_WD1		(1 << 30)
+#define  PERF_TIMER64_RESET_REASON_USR_MASK	0xff
+#define  PERF_TIMER64_RESET_REASON_USR_PANPAN	'P'
+
+/* Watchdog control register constants */
+#define WDT_START_1			(0xff00)
+#define WDT_START_2			(0x00ff)
+#define WDT_STOP_1			(0xee00)
+#define WDT_STOP_2			(0x00ee)
+
+#define PFX "fbxwatchdog_bcm63xx: "
+
+struct bcm_priv {
+	void __iomem		*regs;
+	void __iomem		*reg_top_reset_status;
+	const u32		*regs_offsets;
+	int			irq;
+	enum bcmtimer_chip_id	chip_id;
+	struct platform_device	*pdev;
+
+	struct notifier_block	panic_notifier;
+	u32 timer64_reset_reason;
+	u32 top_reset_status;
+};
+
+#define nb_to_bcm_priv(nb)	container_of(nb, struct bcm_priv, \
+					     panic_notifier)
+
+/*
+ * io helpers to access mac registers
+ */
+static inline u32 wdt_readl(struct bcm_priv *priv, enum bcm63xx_wdt_reg reg)
+{
+	return readl(priv->regs + priv->regs_offsets[reg]);
+}
+
+static inline void wdt_writel(struct bcm_priv *priv, u32 val,
+			      enum bcm63xx_wdt_reg reg)
+{
+	writel(val, priv->regs + priv->regs_offsets[reg]);
+}
+
+/*
+ * IRQ handler, called when half the hw countdown is reached
+ */
+static irqreturn_t bcm63xx_wdt_irq(int irq, void *dev_id)
+{
+	struct fbxwatchdog *wdt = dev_id;
+	struct bcm_priv *priv;
+
+	priv = wdt->priv;
+	spin_lock(&wdt->lock);
+
+	if (!wdt->enabled) {
+		printk(KERN_CRIT "watchdog is still enabled, stopping !\n");
+		wdt_writel(priv, WDT_STOP_1, WDT_CTL_REG);
+		wdt_writel(priv, WDT_STOP_2, WDT_CTL_REG);
+		goto out;
+	}
+
+	/* clear interrupt and reload */
+	wdt_writel(priv, WDT_START_1, WDT_CTL_REG);
+	wdt_writel(priv, WDT_START_2, WDT_CTL_REG);
+
+	if (wdt->cb)
+		wdt->cb(wdt);
+
+out:
+	spin_unlock(&wdt->lock);
+	return IRQ_HANDLED;
+}
+
+static int bcm63xx_wdt_init(struct fbxwatchdog *wdt)
+{
+	struct bcm_priv *priv = wdt->priv;
+	int ret;
+	u32 countdown;
+
+	ret = request_irq(priv->irq, bcm63xx_wdt_irq, 0,
+			  "fbxwatchdog_bcm63xx", wdt);
+	if (ret) {
+		printk(KERN_ERR PFX "request_irq failed: %d\n", ret);
+		return ret;
+	}
+
+	/* irq is triggerd at half time, install a 1 sec watchdog,
+	 * that gives 2 irq/s (freq is 50Mhz) */
+	countdown = (50 * 1000 * 1000);
+	wdt_writel(wdt->priv, countdown, WDT_DEFVAL_REG);
+
+	return 0;
+}
+
+static int bcm63xx_wdt_cleanup(struct fbxwatchdog *wdt)
+{
+	struct bcm_priv *priv = wdt->priv;
+
+	free_irq(priv->irq, wdt);
+	return 0;
+}
+
+static int bcm63xx_wdt_start(struct fbxwatchdog *wdt)
+{
+	printk(KERN_INFO PFX "watchdog enabled\n");
+	wdt_writel(wdt->priv, WDT_START_1, WDT_CTL_REG);
+	wdt_writel(wdt->priv, WDT_START_2, WDT_CTL_REG);
+	return 0;
+}
+
+static int bcm63xx_wdt_stop(struct fbxwatchdog *wdt)
+{
+	wdt_writel(wdt->priv, WDT_STOP_1, WDT_CTL_REG);
+	wdt_writel(wdt->priv, WDT_STOP_2, WDT_CTL_REG);
+	printk(KERN_INFO PFX "watchdog disabled\n");
+	return 0;
+}
+
+struct reset_reason {
+	int bit;
+	char *reason;
+};
+
+struct reset_reason bcm63158_reasons[] = {
+	{ .bit = 31, .reason = "por-reset", },
+	{ .bit = 30, .reason = "hw-reset", },
+	{ .bit = 29, .reason = "sw-reset", },
+	{ .bit = 28, .reason = "pcie-reset", },
+};
+
+/*
+ * all potentially self clearing data in the registers is stored in
+ * the priv structure for further reuse in
+ * bcm63xx_wdt_show_reset_reason.
+ */
+static void bcm63xx_wdt_report_reset_status(struct fbxwatchdog *wdt)
+{
+	struct bcm_priv *priv = wdt->priv;
+
+	if (priv->chip_id != WDT_63158)
+		return ;
+
+	if (priv->reg_top_reset_status) {
+		size_t i;
+		u32 reg = readl(priv->reg_top_reset_status);
+
+		dev_info(&priv->pdev->dev, "%08x (TOP reset status)\n", reg);
+
+		for (i = 0; i < ARRAY_SIZE(bcm63158_reasons); ++i) {
+			struct reset_reason *r = &bcm63158_reasons[i];
+
+			dev_info(&priv->pdev->dev, " %s: %u\n",
+				 r->reason, !!(reg & (1 << r->bit)));
+		}
+		priv->top_reset_status = reg;
+	}
+
+	/*
+	 * NOTE: WD0 & WD1 fields in the PERF_TIMER64_RESET_REASON
+	 * register will self clear on first register read.
+	 */
+	priv->timer64_reset_reason = readl(priv->regs +
+					   PERF_TIMER64_RESET_REASON);
+
+	dev_info(&priv->pdev->dev, "%08x (PERF timer64 reset reason)",
+		 priv->timer64_reset_reason);
+	if (priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_WD0)
+		dev_info(&priv->pdev->dev, " wd0 (nominal reboot)\n");
+	if (priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_WD1)
+		dev_info(&priv->pdev->dev, " wd1 (abnormal reboot)\n");
+
+	if ((priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_USR_MASK) ==
+	    PERF_TIMER64_RESET_REASON_USR_PANPAN)
+		dev_info(&priv->pdev->dev, " a panic was in progress.\n");
+
+	/*
+	 * explicitely clear user defined bits in reset reason register.
+	 */
+	writel(0x0, priv->regs + PERF_TIMER64_RESET_REASON);
+}
+
+extern int panic_timeout;
+
+/*
+ * watchdog panic notifier. in the unlikely case of a panic, the
+ * watchdog refreshing via the interrupt will be out of order.
+ *
+ * This will result in the board resetting before panic_timeout can
+ * elapse.
+ *
+ * In that case rearm the watchdog so that it fires at panic_timeout +
+ * 1 second.
+ */
+static int bcm63xx_wdt_on_panic(struct notifier_block *nb,
+				unsigned long code, void *unused)
+{
+	struct bcm_priv *priv = nb_to_bcm_priv(nb);
+	static int in_panic;
+	u32 new_wdt_countdown = (panic_timeout + 1) * (50 * 1000 * 1000);
+
+	/*
+	 * avoid recursive calls.
+	 */
+	if (in_panic)
+		return NOTIFY_DONE;
+	in_panic = 1;
+
+
+	dev_info(&priv->pdev->dev, "rearming watchdog to expire in %u "
+		 "seconds.\n", panic_timeout + 1);
+
+	wdt_writel(priv, new_wdt_countdown, WDT_DEFVAL_REG);
+
+	wdt_writel(priv, WDT_START_1, WDT_CTL_REG);
+	wdt_writel(priv, WDT_START_2, WDT_CTL_REG);
+
+	if (priv->chip_id == WDT_63158)
+		/*
+		 * write PANPAN as user defined reason to the reset
+		 * reason register. In the unlikely event a crashzone
+		 * log cannot be read back, it will preserve the knowledge
+		 * that the board crashed due to a panic.
+		 */
+		writel(PERF_TIMER64_RESET_REASON_USR_PANPAN,
+		       priv->regs + PERF_TIMER64_RESET_REASON);
+
+	return NOTIFY_DONE;
+}
+
+static ssize_t bcm63xx_wdt_show_reset_reason(struct device *dev,
+					     struct device_attribute *attr,
+					     char *buf)
+{
+	size_t i;
+	struct fbxwatchdog *wdt = dev_get_drvdata(dev);
+	struct bcm_priv *priv = wdt->priv;
+	size_t retsize;
+
+	if (priv->chip_id != WDT_63158)
+		return -ENOTSUPP;
+
+	/*
+	 * no register access here, bcm63xx_wdt_report_reset_status()
+	 * will store them in priv, as most of them are self clear on
+	 * first read.
+	 */
+	*buf = '\0';
+
+	/*
+	 * first iterate on the top reset status reset reasons.
+	 */
+	for (i = 0; i < ARRAY_SIZE(bcm63158_reasons); ++i) {
+		const struct reset_reason *r = &bcm63158_reasons[i];
+
+		if (priv->top_reset_status & (1 << r->bit)) {
+			strcat(buf, r->reason);
+			strcat(buf, ",");
+		}
+	}
+
+	/*
+	 * watchdog status in PERF_TIMER64_RESET_REASON register. in
+	 * our context, wd0 is used to reboot the box normally. wd1
+	 * (managed by this driver) will reboot the board if something
+	 * goes wrong and the hardware watchdog isn't refreshed.
+	 *
+	 * in a nutshell:
+	 * wd0 -> normal boot, used by ATF via the PSCI reset call
+	 * wd1 -> boot due to a hardware watchdog trigger.
+	 */
+	if (priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_WD0)
+		strcat(buf, "wd0,");
+	if (priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_WD1)
+		strcat(buf, "wd1,watchdog,");
+
+	/*
+	 * user defined reboot bits in PERF_TIMER64_RESET_REASON_REG.
+	 */
+	if ((priv->timer64_reset_reason & PERF_TIMER64_RESET_REASON_USR_MASK) ==
+	    PERF_TIMER64_RESET_REASON_USR_PANPAN)
+		strcat(buf, "panic,");
+
+	/*
+	 * '\n'-terminate the string.
+	 */
+	retsize = strlen(buf);
+	if (retsize)
+		buf[retsize - 1] = '\n';
+
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(reset_reason, 0444, bcm63xx_wdt_show_reset_reason, NULL);
+
+static struct attribute *bcm63xx_wdt_attrs[] = {
+	&dev_attr_reset_reason.attr,
+	NULL
+};
+
+static const struct attribute_group bcm63xx_wdt_attrs_group = {
+	.attrs = bcm63xx_wdt_attrs,
+};
+
+/*
+ *
+ */
+static int fbxwatchdog_bcm63xx_probe(struct platform_device *pdev)
+{
+	struct fbxwatchdog *wdt;
+	struct bcm_priv *priv;
+	struct resource *res;
+	int ret, irq_number;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "unable to get register core resource\n");
+		return -ENODEV;
+	}
+
+	priv->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(&pdev->dev, "unable to ioremap regs\n");
+		return PTR_ERR(priv->regs);
+	}
+
+	/*
+	 * ioremap optional reset cause register.
+	 */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (res) {
+		priv->reg_top_reset_status = devm_ioremap_resource(&pdev->dev,
+								   res);
+		if (IS_ERR(priv->reg_top_reset_status)) {
+			dev_err(&pdev->dev, "unable to ioremap TOP reset "
+				"status register.\n");
+			return PTR_ERR(priv->reg_top_reset_status);
+		}
+		dev_info(&pdev->dev, "TOP reset status regiser: %pR", res);
+	}
+
+	priv->chip_id = (unsigned long)of_device_get_match_data(&pdev->dev);
+	switch (priv->chip_id) {
+	case WDT_6328:
+		irq_number = 4;
+		priv->regs_offsets = regs_offsets_6328;
+		break;
+	case WDT_63158:
+		irq_number = 0;
+		priv->regs_offsets = regs_offsets_63158;
+		break;
+	default:
+		return -ENODEV;
+	}
+
+	priv->irq = platform_get_irq(pdev, irq_number);
+	if (priv->irq < 0) {
+		dev_err(&pdev->dev, "cannot get watchdog irq\n");
+		return priv->irq;
+	}
+
+	wdt = devm_kzalloc(&pdev->dev, sizeof (*wdt), GFP_KERNEL);
+	if (!wdt) {
+		printk(KERN_WARNING PFX "unable to allocate memory for "
+		       "watchdog.\n");
+		return -ENOMEM;
+	}
+
+	wdt->name = pdev->name;
+	wdt->priv = priv;
+	wdt->wdt_init = bcm63xx_wdt_init;
+	wdt->wdt_cleanup = bcm63xx_wdt_cleanup;
+	wdt->wdt_start = bcm63xx_wdt_start;
+	wdt->wdt_stop = bcm63xx_wdt_stop;
+
+	ret = fbxwatchdog_register(wdt);
+	if (ret) {
+		printk(KERN_WARNING PFX "unable to register watchdog %s.\n",
+		       wdt->name);
+		return ret;
+	}
+	bcm63xx_wdt_report_reset_status(wdt);
+
+	priv->panic_notifier.notifier_call = bcm63xx_wdt_on_panic,
+	atomic_notifier_chain_register(&panic_notifier_list,
+				       &priv->panic_notifier);
+
+	ret = sysfs_create_group(&wdt->dev->kobj, &bcm63xx_wdt_attrs_group);
+	if (ret) {
+		dev_info(&pdev->dev, "sysfs_create_group failed: %pe\n",
+			 ERR_PTR(ret));
+		goto err_unregister_notifier;
+	}
+
+	platform_set_drvdata(pdev, wdt);
+	return 0;
+
+err_unregister_notifier:
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &priv->panic_notifier);
+	fbxwatchdog_unregister(wdt);
+	return ret;
+}
+
+/*
+ *
+ */
+static void fbxwatchdog_bcm63xx_remove(struct platform_device *pdev)
+{
+	struct fbxwatchdog *wdt;
+	struct bcm_priv *priv;
+
+	wdt = platform_get_drvdata(pdev);
+	if (!wdt)
+		return;
+	priv = wdt->priv;
+
+	sysfs_remove_group(&wdt->dev->kobj, &bcm63xx_wdt_attrs_group);
+	atomic_notifier_chain_unregister(&panic_notifier_list,
+					 &priv->panic_notifier);
+	fbxwatchdog_unregister(wdt);
+}
+
+static const struct of_device_id bcmtimer_of_table[] = {
+	{ .compatible = "brcm,bcm6328-timer", .data = (void *)WDT_6328},
+	{ .compatible = "brcm,bcm63158-timer", .data = (void *)WDT_63158 },
+	{ }
+};
+MODULE_DEVICE_TABLE(of, bcmtimer_of_table);
+
+struct platform_driver fbxwatchdog_bcm63xx_driver = {
+	.probe		= fbxwatchdog_bcm63xx_probe,
+	.remove		= fbxwatchdog_bcm63xx_remove,
+	.driver		= {
+		.name	= "bcm63xx_wdt",
+		.of_match_table	= of_match_ptr(bcmtimer_of_table),
+	},
+};
+
+static int __init fbxwatchdog_bcm63xx_of_init(void)
+{
+	platform_driver_register(&fbxwatchdog_bcm63xx_driver);
+	return 0;
+}
+
+static void __exit fbxwatchdog_bcm63xx_of_exit(void)
+{
+	platform_driver_unregister(&fbxwatchdog_bcm63xx_driver);
+}
+
+module_init(fbxwatchdog_bcm63xx_of_init);
+module_exit(fbxwatchdog_bcm63xx_of_exit);
+
+MODULE_AUTHOR("Maxime Bizon");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c
--- linux-6.13.12-fbx/drivers/fbxwatchdog./fbxwatchdog_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/fbxwatchdog/fbxwatchdog_core.c	2025-09-25 17:40:31.615347629 +0200
@@ -0,0 +1,317 @@
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/device.h>
+#include <linux/reboot.h>
+#include <linux/timer.h>
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+
+#include "fbxwatchdog.h"
+
+#define SOFTTIMER_FREQ	(HZ / 10)
+
+#define PFX "fbxwatchdog: "
+
+static struct class *fbxwatchdog_class;
+
+static void wdt_lock(struct fbxwatchdog *wdt, unsigned long *flags)
+{
+	if (wdt->use_mutex)
+		mutex_lock(&wdt->mutex);
+	else
+		spin_lock_irqsave(&wdt->lock, *flags);
+}
+
+static void wdt_unlock(struct fbxwatchdog *wdt, unsigned long *flags)
+{
+	if (wdt->use_mutex)
+		mutex_unlock(&wdt->mutex);
+	else
+		spin_unlock_irqrestore(&wdt->lock, *flags);
+}
+
+static ssize_t
+show_enabled(struct device *dev,
+	     struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->enabled);
+}
+
+/*
+ * start/stop watchdog depending on the value of the first character
+ * of buf. set countdown_min to a sane value.
+ */
+static ssize_t
+store_enabled(struct device *dev,
+	      struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxwatchdog *wdt;
+	unsigned long flags;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (size == 0)
+		return 0;
+
+
+	wdt_lock(wdt, &flags);
+	switch (*buf) {
+	case '0':
+		if (wdt->enabled) {
+			wdt->enabled = 0;
+			wdt->wdt_stop(wdt);
+		}
+		break;
+
+	case '1':
+		if (!wdt->enabled) {
+			wdt->enabled = 1;
+			wdt->wdt_start(wdt);
+			wdt->countdown_min = INT_MAX;
+		}
+		break;
+
+	default:
+		break;
+	}
+	wdt_unlock(wdt, &flags);
+
+	return size;
+}
+
+static ssize_t
+show_countdown(struct device *dev,
+	       struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown);
+}
+
+/*
+ * update watchdog countdown with the userland value given in buf.
+ */
+static ssize_t
+store_countdown(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t size)
+{
+	struct fbxwatchdog *wdt;
+	int countdown;
+	char *ptr;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	if (size == 0)
+		return 0;
+
+	ptr = kzalloc(size + 1, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+	strscpy(ptr, buf, size + 1);
+
+	countdown = simple_strtoul(ptr, NULL, 10);
+	wdt->countdown = countdown;
+	kfree(ptr);
+
+	return size;
+}
+
+static ssize_t
+show_countdown_min(struct device *dev,
+		   struct device_attribute *attr, char *buf)
+{
+	struct fbxwatchdog *wdt;
+
+	wdt = dev_get_drvdata(dev);
+	if (!wdt) {
+		printk(KERN_DEBUG "ignoring request to dead watchdog.\n");
+		return -ENODEV;
+	}
+
+	return snprintf(buf, PAGE_SIZE, "%i\n", wdt->countdown_min);
+}
+
+static struct device_attribute wdt_attributes[] = {
+	__ATTR(enabled, 0600, show_enabled, store_enabled),
+	__ATTR(countdown, 0600, show_countdown, store_countdown),
+	__ATTR(countdown_min, 0400, show_countdown_min, NULL),
+};
+
+/*
+ * software timer callback: decrement countdown and update
+ * countdown_min if needed. this is called 10 times per second.
+ */
+static void fbxwatchdog_timer_cb(struct timer_list *t)
+{
+	struct fbxwatchdog *wdt = from_timer(wdt, t, timer);
+
+	if (wdt->enabled) {
+		wdt->countdown -= jiffies_to_msecs(SOFTTIMER_FREQ);
+		if (wdt->countdown < wdt->countdown_min)
+			wdt->countdown_min = wdt->countdown;
+	}
+
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+}
+
+/*
+ * called from half life interrupt handler, panic if countdown is too
+ * low (ie if userland has not reset countdown to before it reached
+ * 0).
+ */
+static void fbxwatchdog_halflife_cb(struct fbxwatchdog *wdt)
+{
+	if (wdt->countdown <= 0) {
+		wdt->wdt_stop(wdt);
+		panic("software fbxwatchdog triggered");
+	}
+}
+
+/*
+ * register a new watchdog device.
+ */
+int fbxwatchdog_register(struct fbxwatchdog *wdt)
+{
+	struct device *dev;
+	int i = 0, err = 0;
+
+	if (wdt == NULL)
+		return -EFAULT;
+
+	printk(KERN_INFO PFX "registering watchdog %s\n", wdt->name);
+
+	dev = device_create(fbxwatchdog_class, NULL, 0, wdt, "%s", wdt->name);
+	if (IS_ERR(dev)) {
+		printk(KERN_ERR PFX "unable to allocate device.\n");
+		err = PTR_ERR(dev);
+		goto out_error;
+	}
+	wdt->dev = dev;
+
+	for (i = 0; i < ARRAY_SIZE(wdt_attributes); i++) {
+		err = device_create_file(dev, &wdt_attributes[i]);
+		if (err)
+			goto out_error;
+	}
+
+	/* start countdown soft timer */
+	timer_setup(&wdt->timer, fbxwatchdog_timer_cb, 0);
+	wdt->timer.expires = jiffies + SOFTTIMER_FREQ;
+	add_timer(&wdt->timer);
+
+	if (wdt->use_mutex)
+		mutex_init(&wdt->mutex);
+	else
+		spin_lock_init(&wdt->lock);
+
+	wdt->cb = fbxwatchdog_halflife_cb;
+	err = wdt->wdt_init(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to do low level init of "
+		       "watchdog %s.\n", wdt->name);
+		goto out_del_timer;
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	err = fbxwatchdog_char_add(wdt);
+	if (err) {
+		printk(KERN_ERR PFX "unable to add %s to the fbxwatchdog char "
+		       "device interface.\n", wdt->name);
+		goto out_wdt_cleanup;
+	}
+#endif
+
+	return 0;
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+out_wdt_cleanup:
+	wdt->wdt_cleanup(wdt);
+#endif
+
+out_del_timer:
+	del_timer_sync(&wdt->timer);
+out_error:
+	if (wdt->dev) {
+		for (; i >= 0; i--)
+			device_remove_file(dev, &wdt_attributes[i]);
+		device_unregister(dev);
+	}
+	return err;
+}
+
+int fbxwatchdog_unregister(struct fbxwatchdog *wdt)
+{
+	int i;
+
+	printk(KERN_INFO PFX "unregistering watchdog %s\n", wdt->name);
+
+	if (wdt->enabled) {
+		unsigned long flags;
+
+		printk(KERN_WARNING "removing enabled watchdog.\n");
+		wdt_lock(wdt, &flags);
+		wdt->wdt_stop(wdt);
+		wdt_unlock(wdt, &flags);
+	}
+
+#ifdef CONFIG_FREEBOX_WATCHDOG_CHAR
+	fbxwatchdog_char_remove(wdt);
+#endif
+	wdt->wdt_cleanup(wdt);
+	del_timer_sync(&wdt->timer);
+	for (i = 0; i < ARRAY_SIZE(wdt_attributes); i++)
+		device_remove_file(wdt->dev, &wdt_attributes[i]);
+	device_unregister(wdt->dev);
+	wdt->dev = NULL;
+	return 0;
+}
+
+static int __init fbxwatchdog_init(void)
+{
+	printk(KERN_INFO PFX "2007, Freebox SA.\n");
+	fbxwatchdog_class = class_create("fbxwatchdog");
+	if (IS_ERR(fbxwatchdog_class))
+		return PTR_ERR(fbxwatchdog_class);
+	return 0;
+}
+
+static void __exit fbxwatchdog_exit(void)
+{
+	class_destroy(fbxwatchdog_class);
+}
+
+
+EXPORT_SYMBOL_GPL(fbxwatchdog_register);
+EXPORT_SYMBOL_GPL(fbxwatchdog_unregister);
+
+module_init(fbxwatchdog_init);
+module_exit(fbxwatchdog_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Watchdog Core - www.freebox.fr");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/gpio/gpio-fbxgwr-pmu.c	2025-09-25 17:40:31.663347867 +0200
@@ -0,0 +1,453 @@
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/interrupt.h>
+
+#define PMU_MAX_GPIOS		32
+#define PMU_BANK_SZ		8
+
+/*
+ * private context
+ */
+struct fbxgwr_pmu_gpio {
+	struct regmap		*regmap;
+	struct gpio_chip	chip;
+
+	int			irq;
+	struct mutex		irq_lock;
+
+	DECLARE_BITMAP(irq_enabled, PMU_MAX_GPIOS);
+	DECLARE_BITMAP(irq_enabled_new, PMU_MAX_GPIOS);
+};
+
+/*
+ * local functions
+ */
+static int fbxgwr_pmu_gpio_get_direction(struct gpio_chip *chip,
+					 unsigned int offset);
+static int fbxgwr_pmu_gpio_direction_input(struct gpio_chip *chip,
+					   unsigned int offset);
+static int fbxgwr_pmu_gpio_direction_output(struct gpio_chip *chip,
+					    unsigned int offset, int value);
+static int fbxgwr_pmu_gpio_get(struct gpio_chip *chip, unsigned int offset);
+static void fbxgwr_pmu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+				int value);
+
+static const struct gpio_chip fbxgwr_pmu_gpio_chip = {
+	.label			= "fbxgwr_pmu_gpio",
+	.owner			= THIS_MODULE,
+	.get_direction		= fbxgwr_pmu_gpio_get_direction,
+	.direction_input	= fbxgwr_pmu_gpio_direction_input,
+	.direction_output	= fbxgwr_pmu_gpio_direction_output,
+	.get			= fbxgwr_pmu_gpio_get,
+	.set			= fbxgwr_pmu_gpio_set,
+	.base			= -1,
+	.can_sleep		= 1,
+};
+
+static int fbxgwr_pmu_gpio_get_direction(struct gpio_chip *chip,
+					 unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, bit;
+	u32 reg_val;
+	int ret;
+
+	dirreg = PMU_REG_GPIO_DIR_GET_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	ret = regmap_read(priv->regmap, dirreg, &reg_val);
+	if (ret)
+		return ret;
+
+	if (!(reg_val & bit))
+		return GPIO_LINE_DIRECTION_IN;
+
+	return GPIO_LINE_DIRECTION_OUT;
+}
+
+static int fbxgwr_pmu_gpio_direction_input(struct gpio_chip *chip,
+					   unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, bit;
+
+	dirreg = PMU_REG_GPIO_DIR_CLR_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	return regmap_write(priv->regmap, dirreg, bit);
+}
+
+static int fbxgwr_pmu_gpio_direction_output(struct gpio_chip *chip,
+					    unsigned int offset, int value)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 dirreg, outreg, bit;
+	int ret;
+
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	if (value)
+		outreg = PMU_REG_GPIO_OUT_SET_0 + offset / PMU_BANK_SZ;
+	else
+		outreg = PMU_REG_GPIO_OUT_CLR_0 + offset / PMU_BANK_SZ;
+
+	ret = regmap_write(priv->regmap, outreg, bit);
+	if (ret)
+		return ret;
+
+	dirreg = PMU_REG_GPIO_DIR_SET_0 + offset / PMU_BANK_SZ;
+
+	return regmap_write(priv->regmap, dirreg, bit);
+}
+
+static int fbxgwr_pmu_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 inreg, bit;
+	u32 reg_val;
+	int ret;
+
+	inreg = PMU_REG_GPIO_IN_0 + offset / PMU_BANK_SZ;
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	ret = regmap_read(priv->regmap, inreg, &reg_val);
+	if (ret)
+		return ret;
+
+	return !!(reg_val & bit);
+}
+
+static void fbxgwr_pmu_gpio_set(struct gpio_chip *chip, unsigned int offset,
+				int value)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(chip);
+	u8 outreg, bit;
+
+	bit = BIT(offset % PMU_BANK_SZ);
+
+	if (value)
+		outreg = PMU_REG_GPIO_OUT_SET_0 + offset / PMU_BANK_SZ;
+	else
+		outreg = PMU_REG_GPIO_OUT_CLR_0 + offset / PMU_BANK_SZ;
+
+	regmap_write(priv->regmap, outreg, bit);
+}
+
+static void fbxgwr_pmu_irq_enable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	/* delay manipulation of registers to bus_sync_unlock()
+	 * callback */
+	gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+	set_bit(d->hwirq, priv->irq_enabled_new);
+}
+
+static void fbxgwr_pmu_irq_disable(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	/* delay manipulation of registers to bus_sync_unlock()
+	 * callback */
+	clear_bit(d->hwirq, priv->irq_enabled_new);
+	gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+static void fbxgwr_pmu_irq_bus_lock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+
+	mutex_lock(&priv->irq_lock);
+	bitmap_copy(priv->irq_enabled_new, priv->irq_enabled, PMU_MAX_GPIOS);
+}
+
+static int do_irqcmd(struct fbxgwr_pmu_gpio *priv,
+		     u32 cmd, u32 nr)
+{
+	int ret, i;
+	u32 val;
+
+	regmap_write(priv->regmap, PMU_REG_GPIO_IRQ_CMD_NR, nr);
+	regmap_write(priv->regmap, PMU_REG_GPIO_IRQ_CMD, cmd);
+
+	for (i = 0; i < 100; i++) {
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_CMD_STAT,
+				  &val);
+		if (ret) {
+			dev_err(priv->chip.parent, "regmap read: %d\n", ret);
+			return ret;
+		}
+
+		if (val & PMU_GPIOIRQCMD_RES_BUSY) {
+			msleep(1);
+			continue;
+		}
+
+		if ((val & PMU_GPIOIRQCMD_RES_SUCCESS))
+			return 0;
+
+		dev_err(priv->chip.parent, "failed to start/stop interrupt\n");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+static void fbxgwr_pmu_irq_bus_sync_unlock(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+	size_t i;
+
+	/* update enabled mask */
+	if (!bitmap_equal(priv->irq_enabled, priv->irq_enabled_new,
+			  PMU_MAX_GPIOS)) {
+		unsigned int p;
+		DECLARE_BITMAP(irq_changed, PMU_MAX_GPIOS);
+
+		bitmap_xor(irq_changed,
+			   priv->irq_enabled, priv->irq_enabled_new,
+			   PMU_MAX_GPIOS);
+
+		for_each_set_bit(p, irq_changed, PMU_MAX_GPIOS) {
+			unsigned int cmd;
+
+			if (test_bit(p, priv->irq_enabled_new))
+				cmd = PMU_GPIOIRQCMD_ENABLE;
+			else
+				cmd = PMU_GPIOIRQCMD_DISABLE;
+			do_irqcmd(priv, cmd, p);
+		}
+
+		/* update changed mask */
+		for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+			u8 mask;
+
+			mask = bitmap_get_value8(priv->irq_enabled_new, i * 8);
+			regmap_write(priv->regmap,
+				     PMU_REG_GPIO_IRQ_MASK_BASE + i,
+				     mask);
+		}
+
+		bitmap_copy(priv->irq_enabled,
+			    priv->irq_enabled_new, PMU_MAX_GPIOS);
+	}
+
+	mutex_unlock(&priv->irq_lock);
+}
+
+static irqreturn_t fbxgwr_pmu_irq_handler(int irq, void *devid)
+{
+	struct fbxgwr_pmu_gpio *priv = devid;
+	DECLARE_BITMAP(pending, PMU_MAX_GPIOS);
+	irqreturn_t irqret = IRQ_NONE;
+	unsigned int p;
+	size_t i;
+
+	bitmap_zero(pending, PMU_MAX_GPIOS);
+
+	for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+		u32 bank_pending;
+		u8 mask;
+		int ret;
+
+		mask = bitmap_get_value8(priv->irq_enabled, i * 8);
+		if (!mask)
+			continue;
+
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_STAT_BASE + i,
+				  &bank_pending);
+		if (ret)
+			break;
+
+		if (!bank_pending)
+			continue;
+
+		irqret = IRQ_HANDLED;
+
+		/* clear interrupt status */
+		ret = regmap_write(priv->regmap,
+				   PMU_REG_GPIO_IRQ_STAT_BASE + i,
+				   bank_pending);
+		if (ret)
+			break;
+
+		bitmap_set_value8(pending, bank_pending, i * 8);
+	}
+
+	for_each_set_bit(p, pending, PMU_MAX_GPIOS) {
+		int child_irq;
+		child_irq = irq_find_mapping(priv->chip.irq.domain, p);
+		handle_nested_irq(child_irq);
+	}
+
+	return irqret;
+}
+
+static int fbxgwr_pmu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+
+	/* we only support both edges */
+	if ((type & IRQ_TYPE_EDGE_BOTH) != IRQ_TYPE_EDGE_BOTH) {
+		dev_err(gc->parent, "irq %d: unsupported type %d\n",
+			d->irq, type);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void fbxgwr_pmu_init_irq_valid_mask(struct gpio_chip *gc,
+					   unsigned long *valid_mask,
+					   unsigned int ngpios)
+{
+	struct fbxgwr_pmu_gpio *priv = gpiochip_get_data(gc);
+	DECLARE_BITMAP(irq_valid, PMU_MAX_GPIOS);
+	unsigned int i;
+
+	for (i = 0; i < PMU_MAX_GPIOS / PMU_BANK_SZ; i++) {
+		u32 val;
+		int ret;
+
+		ret = regmap_read(priv->regmap, PMU_REG_GPIO_IRQ_CAP_BASE + i,
+				  &val);
+		if (ret) {
+			dev_err(gc->parent, "regmap read failed: %d\n", ret);
+			return;
+		}
+
+		bitmap_set_value8(irq_valid, val, i * 8);
+	}
+
+	bitmap_copy(valid_mask, irq_valid, ngpios);
+}
+
+static const struct irq_chip fbxgwr_pmu_irq_chip = {
+	.name			= "fbxgwr_pmu_gpio",
+	.irq_bus_lock		= fbxgwr_pmu_irq_bus_lock,
+	.irq_bus_sync_unlock	= fbxgwr_pmu_irq_bus_sync_unlock,
+	.irq_set_type		= fbxgwr_pmu_irq_set_type,
+	.irq_enable		= fbxgwr_pmu_irq_enable,
+	.irq_disable		= fbxgwr_pmu_irq_disable,
+	.flags			= IRQCHIP_IMMUTABLE,
+	 GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+static int fbxgwr_pmu_gpio_irq_setup(struct platform_device *pdev,
+				     struct fbxgwr_pmu_gpio *priv)
+{
+	struct gpio_irq_chip *girq;
+	u32 val;
+	int ret;
+
+	if (!priv->irq)
+		return 0;
+
+	ret = regmap_read(priv->regmap, PMU_REG_FW_CAPABILITIES, &val);
+	if (ret)
+		return -EIO;
+
+	if (!(val & PMU_FW_CAP_GPIO_IRQ))
+		return 0;
+
+	mutex_init(&priv->irq_lock);
+
+	ret = devm_request_threaded_irq(&pdev->dev, priv->irq,
+					NULL, fbxgwr_pmu_irq_handler,
+					IRQF_ONESHOT |
+					IRQF_SHARED |
+					IRQF_TRIGGER_LOW,
+					dev_name(&pdev->dev), priv);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to request irq %d\n", ret);
+		return ret;
+	}
+
+	girq = &priv->chip.irq;
+
+	gpio_irq_chip_set_chip(girq, &fbxgwr_pmu_irq_chip);
+	girq->parent_handler = NULL;
+	girq->num_parents = 0;
+	girq->parents = NULL;
+	girq->default_type = IRQ_TYPE_NONE;
+	girq->handler = handle_bad_irq;
+	girq->init_valid_mask = fbxgwr_pmu_init_irq_valid_mask;
+	girq->threaded = true;
+
+	return 0;
+}
+
+static int fbxgwr_pmu_gpio_probe(struct platform_device *pdev)
+{
+	struct fbxgwr_pmu_gpio *priv;
+	u32 ngpios;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_gpio),
+			     GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!priv->regmap)
+		return -ENODEV;
+
+	priv->irq = platform_get_irq_optional(pdev, 0);
+	if (priv->irq < 0)
+		return priv->irq;
+
+	priv->chip = fbxgwr_pmu_gpio_chip;
+	priv->chip.parent = &pdev->dev;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios);
+	if (ret) {
+		dev_err(&pdev->dev, "missing ngpios property in DT\n");
+		return ret;
+	}
+
+	/* reset io expander internal state, in case something else
+	 * enabled any irq before */
+	ret = regmap_write(priv->regmap, PMU_REG_GPIO_REINIT, 1);
+	if (ret)
+		return -EIO;
+
+	priv->chip.ngpio = ngpios;
+
+	ret = fbxgwr_pmu_gpio_irq_setup(pdev, priv);
+	if (ret)
+		return ret;
+
+	ret = devm_gpiochip_add_data(&pdev->dev, &priv->chip, priv);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static const struct of_device_id fbxgwr_pmu_gpio_of_id[] = {
+	{ .compatible = "freebox,fbxgwr-pmu-gpio", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fbxgwr_pmu_gpio_of_id);
+
+static struct platform_driver fbxgwr_pmu_gpio_driver = {
+	.probe		= fbxgwr_pmu_gpio_probe,
+	.driver = {
+		.name		= "fbxgwr-pmu-gpio",
+		.of_match_table	= fbxgwr_pmu_gpio_of_id,
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_gpio_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/hwmon/fbxgwr_pmu_hwmon.c	2025-09-25 17:40:32.663352826 +0200
@@ -0,0 +1,281 @@
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/platform_device.h>
+
+struct fbxgwr_pmu_hwmon {
+	struct regmap	*regmap;
+	u8		 *in_map[hwmon_max];
+};
+
+static inline int __pow10(u8 x)
+{
+	int r = 1;
+
+	while (x--)
+		r *= 10;
+
+	return r;
+}
+
+static int fbxgwr_pmu_hwmon_read(struct device *dev,
+				 enum hwmon_sensor_types type,
+				 u32 attr, int channel, long *val)
+{
+	struct fbxgwr_pmu_hwmon *priv = dev_get_drvdata(dev);
+	u32 reg_val;
+	int ret;
+
+	switch (type) {
+	case hwmon_in:
+	case hwmon_curr:
+	case hwmon_fan:
+	case hwmon_temp:
+	case hwmon_power:
+	{
+		int in_off = priv->in_map[type][channel] * 3;
+		u32 high, low;
+		u16 val16;
+		int div;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 0,
+				  &reg_val);
+		if (ret)
+			return ret;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 1,
+				  &low);
+		if (ret)
+			return ret;
+
+		ret = regmap_read(priv->regmap,
+				  PMU_REG_IN_BASE + in_off + 2,
+				  &high);
+		if (ret)
+			return ret;
+
+		val16 = (high << 8) | low;
+		if (reg_val & PMU_IN_SIGNED_MASK)
+			*val = sign_extend32(val16, 15);
+		else
+			*val = val16;
+
+		div = (reg_val & PMU_IN_DIVIDER_MASK) >> PMU_IN_DIVIDER_SHIFT;
+		if (div)
+			*val *= __pow10(div);
+
+		break;
+	}
+
+	case hwmon_pwm:
+		ret = regmap_read(priv->regmap, PMU_REG_FAN_PWM_BASE + channel,
+				  &reg_val);
+		if (ret)
+			return ret;
+		*val = reg_val;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+
+static int fbxgwr_pmu_hwmon_write(struct device *dev,
+				  enum hwmon_sensor_types type,
+				  u32 attr, int channel, long val)
+{
+	struct fbxgwr_pmu_hwmon *priv = dev_get_drvdata(dev);
+	int ret;
+
+	switch (type) {
+	case hwmon_pwm:
+		val = clamp_val(val, 0, 255);
+		ret = regmap_write(priv->regmap,
+				   PMU_REG_FAN_PWM_BASE + channel, val);
+		if (ret)
+			return ret;
+		break;
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+static umode_t fbxgwr_pmu_hwmon_is_visible(const void *_data,
+					   enum hwmon_sensor_types type,
+					   u32 attr, int channel)
+{
+	switch (type) {
+	case hwmon_in:
+	case hwmon_curr:
+	case hwmon_fan:
+	case hwmon_temp:
+	case hwmon_power:
+		return 0444;
+	case hwmon_pwm:
+		return 0644;
+	default:
+		return 0;
+	}
+}
+
+static const struct hwmon_ops fbxgwr_pmu_hwmon_ops = {
+	.is_visible = fbxgwr_pmu_hwmon_is_visible,
+	.read = fbxgwr_pmu_hwmon_read,
+	.write = fbxgwr_pmu_hwmon_write,
+};
+
+static u32 hwmon_attributes[hwmon_max] = {
+	[hwmon_temp] = HWMON_T_INPUT,
+	[hwmon_in] = HWMON_I_INPUT,
+	[hwmon_curr] = HWMON_C_INPUT,
+	[hwmon_power] = HWMON_P_INPUT,
+	[hwmon_pwm] = HWMON_PWM_INPUT,
+	[hwmon_fan] = HWMON_F_INPUT,
+};
+
+static int fbxgwr_pmu_hwmon_probe(struct platform_device *pdev)
+{
+	struct fbxgwr_pmu_hwmon *hwmon;
+	struct device *hwmon_dev;
+	enum hwmon_sensor_types type;
+	u32 fan_count, in_reg_count, in_count, cinfo_count, i;
+	int ret, nr_count[hwmon_max] = {0};
+	const struct hwmon_channel_info **ptr_cinfos;
+	struct hwmon_channel_info *cinfos;
+	struct hwmon_chip_info pmu_hwmon_chip_info = {
+		.ops = &fbxgwr_pmu_hwmon_ops,
+	};
+
+	hwmon = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_hwmon),
+			     GFP_KERNEL);
+	if (!hwmon)
+		return -ENOMEM;
+
+	hwmon->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!hwmon->regmap)
+		return -ENODEV;
+
+	/* enumerate hardware channels and build channel infos
+	 * dynamically */
+	ret = regmap_read(hwmon->regmap, PMU_REG_IN_COUNT, &in_reg_count);
+	if (ret)
+		return ret;
+
+	/* get actual sensor type from hardware */
+	in_count = 0;
+	for (i = 0; i < in_reg_count; i++) {
+		u32 in_type;
+
+		ret = regmap_read(hwmon->regmap, PMU_REG_IN_BASE + i * 3,
+				  &in_type);
+		if (ret)
+			return ret;
+
+		in_type &= PMU_IN_TYPE_MASK;
+		switch (in_type) {
+		case PMU_IN_TYPE_UNUSED:
+			continue;
+		case PMU_IN_TYPE_CURRENT:
+			type = hwmon_curr;
+			break;
+		case PMU_IN_TYPE_VOLTAGE:
+			type = hwmon_in;
+			break;
+		case PMU_IN_TYPE_POWER:
+			type = hwmon_power;
+			break;
+		case PMU_IN_TYPE_TEMPERATURE:
+			type = hwmon_temp;
+			break;
+		case PMU_IN_TYPE_FAN_INPUT:
+			type = hwmon_fan;
+			break;
+		default:
+			dev_warn(&pdev->dev, "unknown in-type, "
+				 "assume voltage\n");
+			type = hwmon_in;
+			break;
+		}
+
+		if (!hwmon->in_map[type]) {
+			hwmon->in_map[type] = devm_kcalloc(&pdev->dev,
+							   in_reg_count,
+							   sizeof (u8),
+							   GFP_KERNEL);
+			if (!hwmon->in_map[type])
+				return -ENOMEM;
+		}
+
+		hwmon->in_map[type][nr_count[type]] = i;
+		nr_count[type]++;
+		in_count++;
+	}
+
+	ret = regmap_read(hwmon->regmap, PMU_REG_FAN_PWM_COUNT, &fan_count);
+	if (ret)
+		return ret;
+	nr_count[hwmon_pwm] = fan_count;
+
+	/* finally allocate channel info */
+	cinfo_count = fan_count + in_count;
+
+	cinfos = devm_kcalloc(&pdev->dev, cinfo_count,
+			      sizeof (*cinfos), GFP_KERNEL);
+	if (!cinfos)
+		return -ENOMEM;
+
+	ptr_cinfos = devm_kcalloc(&pdev->dev, cinfo_count + 1,
+				  sizeof (*ptr_cinfos), GFP_KERNEL);
+	if (!ptr_cinfos)
+		return -ENOMEM;
+
+	pmu_hwmon_chip_info.info = ptr_cinfos;
+
+	for (type = 0; type < hwmon_max; type++) {
+		u32 *cfg;
+
+		if (!nr_count[type])
+			continue;
+
+		cfg = devm_kcalloc(&pdev->dev, nr_count[type] + 1,
+				   sizeof (*cfg), GFP_KERNEL);
+		if (!cfg)
+			return -ENOMEM;
+
+		for (i = 0; i < nr_count[type]; i++)
+			cfg[i] = hwmon_attributes[type];
+
+		cinfos->type = type;
+		cinfos->config = cfg;
+		*ptr_cinfos++ = cinfos++;
+	}
+
+	hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev,
+							 "fbxgwr_pmu_hwmon",
+							 hwmon,
+							 &pmu_hwmon_chip_info,
+							 NULL);
+	return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct platform_driver fbxgwr_pmu_hwmon_driver = {
+	.probe		= fbxgwr_pmu_hwmon_probe,
+	.driver = {
+		.name	= "fbxgwr-pmu-hwmon",
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_hwmon_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/hwmon/ld6710-fbx.c	2025-09-25 17:40:32.671352866 +0200
@@ -0,0 +1,340 @@
+/*
+ * ld6710-fbx.c for ld6710-fbx
+ * Created by <nschichan@freebox.fr> on Wed Sep 25 15:01:56 2019
+ */
+
+/*
+ * Driver for LD6710 power deliverance with freebox specific
+ * firmware. The power supply temperature report on the I2C register
+ * space is a feature of the ROMed firmware on the chip, which depends
+ * on the OEM.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/hwmon.h>
+
+#define LD6710_CHIPVER	0x00
+#define LD6710_FWVER	0x01
+
+#define LD6710_SINK_CURRENT		0x10
+#define LD6710_SINK_CURRENT_MAX		0x11
+
+#define LD6710_SINK_TEMP		0x20
+#define LD6710_SINK_TEMP_TURNOFF	0x21
+#define LD6710_SINK_TEMP_TURNON		0x22
+
+#define LD6710_SINK_STATUS		0x30
+#define  SINK_STATUS_OTP		(1 << 0)
+#define  SINK_STATUS_OCP		(1 << 1)
+#define  SINK_STATUS_OVP		(1 << 2)
+
+struct ld6710_priv {
+	struct device *hwmon_dev;
+	struct i2c_client *client;
+	struct mutex mutex;
+};
+
+static int ld6710_read(struct ld6710_priv *priv, u8 addr)
+{
+	int ret;
+
+	ret = i2c_smbus_read_byte_data(priv->client, addr);
+	if (ret < 0) {
+		dev_err(&priv->client->dev, "i2c read error at address %02x\n",
+			addr);
+		return 0xff;
+	}
+	return ret;
+}
+
+static void ld6710_write(struct ld6710_priv *priv, u8 addr, u8 value)
+{
+	i2c_smbus_write_byte_data(priv->client, addr, value);
+}
+
+static struct ld6710_priv *to_ld6710_priv(struct device *dev)
+{
+	struct i2c_client *client = to_i2c_client(dev);
+	return i2c_get_clientdata(client);
+}
+
+/*
+ * chip / fw
+ */
+static ssize_t version_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct ld6710_priv *priv = to_ld6710_priv(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	u32 v;
+
+	mutex_lock(&priv->mutex);
+	v = ld6710_read(priv, sattr->nr);
+	mutex_unlock(&priv->mutex);
+
+	return sprintf(buf, "0x%02x\n", v);
+}
+
+static SENSOR_DEVICE_ATTR_2_RO(chipver, version, LD6710_CHIPVER, 0);
+static SENSOR_DEVICE_ATTR_2_RO(fwver, version, LD6710_FWVER, 0);
+
+static struct attribute *ld6710_ver_attrs[] = {
+	&sensor_dev_attr_chipver.dev_attr.attr,
+	&sensor_dev_attr_fwver.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ld6710_ver_group = {
+	.attrs = ld6710_ver_attrs,
+};
+
+/*
+ * sink current (mA)
+ */
+static ssize_t current_show(struct device *dev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct ld6710_priv *priv = to_ld6710_priv(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	u32 v;
+
+	mutex_lock(&priv->mutex);
+	v = ld6710_read(priv, sattr->nr) * 100;
+	mutex_unlock(&priv->mutex);
+
+	return sprintf(buf, "%d\n", v);
+}
+
+static ssize_t current_store(struct device *dev,
+			     struct device_attribute *attr,
+			     const char *buf, size_t count)
+{
+	struct ld6710_priv *priv = to_ld6710_priv(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	val /= 100;
+	if (val > 255)
+		return -EINVAL;
+
+	mutex_lock(&priv->mutex);
+	ld6710_write(priv, sattr->nr, val);
+	mutex_unlock(&priv->mutex);
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR_2_RO(sink_current, current, LD6710_SINK_CURRENT, 0);
+static SENSOR_DEVICE_ATTR_2_RO(in1_input, current, LD6710_SINK_CURRENT, 0);
+static SENSOR_DEVICE_ATTR_2_RW(sink_current_max, current,
+			       LD6710_SINK_CURRENT_MAX, 0);
+
+static struct attribute *ld6710_sink_current_attrs[] = {
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_sink_current.dev_attr.attr,
+	&sensor_dev_attr_sink_current_max.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ld6710_sink_current_group = {
+	.attrs = ld6710_sink_current_attrs,
+};
+
+/*
+ * sink temperature (1/1000th degree)
+ */
+static ssize_t temperature_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct ld6710_priv *priv = to_ld6710_priv(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	u32 v;
+
+	mutex_lock(&priv->mutex);
+	v = ld6710_read(priv, sattr->nr) * 1000;
+	mutex_unlock(&priv->mutex);
+
+	return sprintf(buf, "%d\n", v);
+}
+
+static ssize_t temperature_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t count)
+{
+	struct ld6710_priv *priv = to_ld6710_priv(dev);
+	struct sensor_device_attribute_2 *sattr = to_sensor_dev_attr_2(attr);
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	val /= 1000;
+	if (val > 255)
+		return -EINVAL;
+
+	mutex_lock(&priv->mutex);
+	ld6710_write(priv, sattr->nr, val);
+	mutex_unlock(&priv->mutex);
+
+	return count;
+}
+
+static SENSOR_DEVICE_ATTR_2_RO(temp1_input, temperature, LD6710_SINK_TEMP, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_turnoff, temperature,
+			       LD6710_SINK_TEMP_TURNOFF, 0);
+static SENSOR_DEVICE_ATTR_2_RW(temp1_turnon, temperature,
+			       LD6710_SINK_TEMP_TURNON, 0);
+
+
+static struct attribute *ld6710_sink_temp_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_turnoff.dev_attr.attr,
+	&sensor_dev_attr_temp1_turnon.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ld6710_sink_temp_group = {
+	.attrs = ld6710_sink_temp_attrs,
+};
+
+/*
+ * status
+ */
+static ssize_t status_show(struct device *dev, struct device_attribute *attr,
+			   char *buf)
+{
+	return version_show(dev, attr, buf);
+}
+
+static SENSOR_DEVICE_ATTR_2_RO(status, status, LD6710_SINK_STATUS, 0);
+
+static struct attribute *ld6710_status_attrs[] = {
+	&sensor_dev_attr_status.dev_attr.attr,
+	NULL,
+};
+
+static const struct attribute_group ld6710_status_group = {
+	.attrs = ld6710_status_attrs,
+};
+
+
+static void ld6710_fbx_remove_files(struct i2c_client *client)
+{
+	sysfs_remove_group(&client->dev.kobj, &ld6710_ver_group);
+	sysfs_remove_group(&client->dev.kobj, &ld6710_sink_current_group);
+	sysfs_remove_group(&client->dev.kobj, &ld6710_sink_temp_group);
+	sysfs_remove_group(&client->dev.kobj, &ld6710_status_group);
+}
+
+static int ld6710_fbx_probe(struct i2c_client *client)
+{
+	struct ld6710_priv *priv;
+	u8 chipver, fwver;
+	int error;
+
+	dev_info(&client->dev, "probe\n");
+
+	priv = devm_kzalloc(&client->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->client = client;
+	mutex_init(&priv->mutex);
+	i2c_set_clientdata(client, priv);
+
+	/*
+	 * read chipver and fwver and check that they look sane.
+	 */
+	chipver = ld6710_read(priv, LD6710_CHIPVER);
+	fwver = ld6710_read(priv, LD6710_FWVER);
+	if (chipver == 0xff || fwver == 0xff) {
+		dev_err(&client->dev, "invalid chip version of firmware "
+			"version.\n");
+		return -ENXIO;
+	}
+
+	dev_info(&client->dev, "LD6710 chip %02x, fw %02x\n",
+		 chipver, fwver);
+
+	/*
+	 * create attributes
+	 */
+	error = sysfs_create_group(&client->dev.kobj, &ld6710_ver_group);
+	if (error)
+		goto remove_files;
+
+	error = sysfs_create_group(&client->dev.kobj,
+				   &ld6710_sink_current_group);
+	if (error)
+		goto remove_files;
+
+	error = sysfs_create_group(&client->dev.kobj, &ld6710_sink_temp_group);
+	if (error)
+		goto remove_files;
+
+	error = sysfs_create_group(&client->dev.kobj, &ld6710_status_group);
+	if (error)
+		goto remove_files;
+
+	/*
+	 * register hwmon device.
+	 */
+	priv->hwmon_dev = hwmon_device_register(&client->dev);
+	if (IS_ERR(priv->hwmon_dev)) {
+		dev_err(&client->dev, "unable to register hwmon device.\n");
+		error = PTR_ERR(priv->hwmon_dev);
+		goto remove_files;
+	}
+
+	return 0;
+
+remove_files:
+	ld6710_fbx_remove_files(client);
+	return error;
+}
+
+static void ld6710_fbx_remove(struct i2c_client *client)
+{
+	struct ld6710_priv *priv = i2c_get_clientdata(client);
+
+	dev_info(&client->dev, "remove\n");
+
+	hwmon_device_unregister(priv->hwmon_dev);
+	ld6710_fbx_remove_files(priv->client);
+}
+
+static const struct of_device_id ld6710_fbx_of_match[] = {
+	{ .compatible	= "leadtrend,ld6710-fbx" },
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ld6710_fbx_of_match);
+
+
+static const unsigned short ld6710_addrs[] = { 0x68, /* maybe some others ? */
+					       I2C_CLIENT_END };
+
+static struct i2c_driver ld6710_fbx_driver = {
+	.class		= I2C_CLASS_HWMON,
+	.driver = {
+		.name	= "ld6710_fbx",
+		.of_match_table = of_match_ptr(ld6710_fbx_of_match),
+	},
+	.probe		= ld6710_fbx_probe,
+	.remove		= ld6710_fbx_remove,
+	.address_list	= ld6710_addrs,
+};
+
+module_i2c_driver(ld6710_fbx_driver);
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/leds/leds-fbxgwr-pmu.c	2025-09-25 17:40:33.087354928 +0200
@@ -0,0 +1,118 @@
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+
+struct fbxgwr_pmu_led;
+
+struct pmu_led {
+	struct led_classdev	ldev;
+	u32			reg;
+	struct fbxgwr_pmu_led	*priv;
+};
+
+struct fbxgwr_pmu_led {
+	struct regmap		*regmap;
+	struct pmu_led		*leds;
+	size_t			nleds;
+};
+
+static int fbxgwr_pmu_led_brightness_set(struct led_classdev *led_cdev,
+					 enum led_brightness brightness)
+{
+	struct pmu_led *led = container_of(led_cdev, struct pmu_led, ldev);
+
+	return regmap_write(led->priv->regmap, led->reg, brightness);
+}
+
+static int fbxgwr_pmu_led_probe(struct platform_device *pdev)
+{
+	struct device_node *node, *child;
+	struct fbxgwr_pmu_led *priv;
+	struct pmu_led *led;
+	u32 nleds;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof(struct fbxgwr_pmu_led),
+			    GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+	if (!priv->regmap)
+		return -ENODEV;
+
+	ret = of_property_read_u32(pdev->dev.of_node, "nleds", &nleds);
+	if (ret) {
+		dev_err(&pdev->dev, "missing nleds property in DT\n");
+		return ret;
+	}
+
+	led = devm_kcalloc(&pdev->dev, nleds, sizeof(struct pmu_led),
+			   GFP_KERNEL);
+	if (!led)
+		return -ENOMEM;
+
+	priv->nleds = nleds;
+	priv->leds = led;
+
+	node = pdev->dev.of_node;
+
+	for_each_child_of_node(node, child) {
+		u32 reg;
+
+		led->ldev.name =
+			of_get_property(child, "label", NULL) ?: child->name;
+		led->ldev.flags = 0;
+		led->ldev.brightness_set_blocking =
+				fbxgwr_pmu_led_brightness_set;
+		led->ldev.max_brightness = LED_FULL;
+
+		ret = of_property_read_u32(child, "reg", &reg);
+		if (ret || reg >= nleds) {
+			of_node_put(child);
+			return -EINVAL;
+		}
+
+		led->reg = PMU_REG_LED0_PWM + reg;
+
+		ret = regmap_read(priv->regmap, led->reg,
+				  &led->ldev.brightness);
+		if (ret)
+			return ret;
+
+		ret = devm_led_classdev_register(&pdev->dev, &led->ldev);
+		if (ret)
+			return ret;
+
+		led->priv = priv;
+
+		led++;
+	}
+
+	return 0;
+}
+
+static const struct of_device_id fbxgwr_pmu_led_of_id[] = {
+	{ .compatible = "freebox,fbxgwr-pmu-led", },
+	{}
+};
+MODULE_DEVICE_TABLE(of, fbxgwr_pmu_led_of_id);
+
+static struct platform_driver fbxgwr_pmu_led_driver = {
+	.probe		= fbxgwr_pmu_led_probe,
+	.driver = {
+		.name	= "fbxgwr-pmu-led",
+		.of_match_table = fbxgwr_pmu_led_of_id,
+	},
+};
+
+module_platform_driver(fbxgwr_pmu_led_driver);
+
+MODULE_AUTHOR("Marios Makassikis");
+MODULE_LICENSE("GPL");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/mfd/fbxgwr-pmu.c	2025-09-25 17:40:33.359356277 +0200
@@ -0,0 +1,675 @@
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+
+#include <linux/i2c.h>
+#include <linux/mfd/core.h>
+#include <linux/mfd/fbxgwr_pmu.h>
+#include <linux/regmap.h>
+#include <linux/module.h>
+
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/input.h>
+#include <linux/gpio.h>
+#include <linux/gpio/driver.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+
+/*
+ * private context
+ */
+struct fbxgwr_pmu {
+	struct i2c_client 	*i2c_client;
+	struct regmap		*regmap;
+	u32			board_id;
+	u32			app_ver_rev;
+	u32			api_major;
+	u32			api_minor;
+};
+
+/* XXX dummy func to disable regmap caching */
+static bool volatile_reg(struct device *dev, unsigned int reg)
+{
+	return true;
+}
+
+static const struct regmap_config fbxgwr_pmu_regmap_config = {
+	.reg_bits = 8,
+	.val_bits = 8,
+	.max_register = 0xff,
+
+	.volatile_reg = volatile_reg,
+};
+
+static const struct mfd_cell fbxgwr_pmu_devs[] = {
+	MFD_CELL_OF("fbxgwr-pmu-gpio", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-gpio"),
+	MFD_CELL_OF("fbxgwr-pmu-led", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-led"),
+	MFD_CELL_NAME("fbxgwr-pmu-hwmon"),
+	MFD_CELL_OF("fbxgwr-pmu-watchdog", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-watchdog"),
+	MFD_CELL_OF("fbxgwr-pmu-i2c", NULL,
+		    NULL, 0, 0, "freebox,fbxgwr-pmu-i2c"),
+};
+
+static ssize_t pmu_board_id_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->board_id);
+}
+
+static ssize_t pmu_app_version_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u.%u\n",
+		       priv->app_ver_rev >> 16,
+		       priv->app_ver_rev & 0xffff);
+}
+
+static ssize_t pmu_app_iversion_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->app_ver_rev);
+}
+
+static ssize_t pmu_api_major_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->api_major);
+}
+
+static ssize_t pmu_api_minor_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	return sprintf(buf, "%u\n", priv->api_minor);
+}
+
+static ssize_t pmu_test_mode_show(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_TEST_MODE, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "0x%x\n", val);
+}
+
+static ssize_t pmu_cur_app_bank_show(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_CUR_APP_BANK, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t pmu_fw_capabilities_show(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	size_t len;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_FW_CAPABILITIES, &val);
+	if (ret)
+		return -EIO;
+
+	buf[0] = 0;
+	len = 0;
+	if (val & PMU_FW_CAP_FWUPGRADE)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "fw_upgrade ");
+	if (val & PMU_FW_CAP_BANK_SWITCH)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "bank_switch ");
+	if (val & PMU_FW_CAP_RTC)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "rtc ");
+	if (val & PMU_FW_CAP_STANDBY)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "standby ");
+	if (val & PMU_FW_CAP_WDT)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "watchdog ");
+	if (val & PMU_FW_CAP_I2C_PROXY)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "i2c-proxy ");
+	if (val & PMU_FW_CAP_GPIO_IRQ)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "gpio-irq");
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static ssize_t pmu_rtc_reg_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf,
+				int base)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	u32 val[4], rtc;
+	int ret, i;
+
+	for (i = 0; i < 4; i++) {
+		ret = regmap_read(priv->regmap, base + i, val + i);
+		if (ret)
+			return -EIO;
+	}
+
+	rtc = (val[3] << 24) | (val[2] << 16) | (val[1] << 8) | val[0];
+	return sprintf(buf, "%u\n", rtc);
+}
+
+static ssize_t pmu_rtc_show(struct device *dev,
+			    struct device_attribute *attr,
+			    char *buf)
+{
+	return pmu_rtc_reg_show(dev, attr, buf, PMU_REG_RTC_VALUE_0);
+}
+
+static ssize_t pmu_rtc_cmp_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	return pmu_rtc_reg_show(dev, attr, buf, PMU_REG_RTC_CMP_VALUE_0);
+}
+
+static ssize_t pmu_rtc_cmp_store(struct device *dev,
+				 struct device_attribute *attr,
+				 const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int rtc;
+	int ret, i;
+
+	rtc = simple_strtoul(buf, NULL, 10);
+	if (rtc < 0)
+		return ret;
+
+	for (i = 0; i < 4; i++) {
+		ret = regmap_write(priv->regmap, PMU_REG_RTC_CMP_VALUE_0 + i,
+				   (rtc >> (i * 8)) & 0xff);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_board_reset_store(struct device *dev,
+				     struct device_attribute *attr,
+				     const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_BOARD_RESET,
+				   PMU_RESET_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_fake_dgasp_store(struct device *dev,
+				    struct device_attribute *attr,
+				    const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_FAKE_DGASP,
+				   PMU_FAKE_DGASP_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_enter_standby_store(struct device *dev,
+				       struct device_attribute *attr,
+				       const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val > 0) {
+		/* clear wake reason */
+		ret = regmap_write(priv->regmap, PMU_REG_WAKE_REASON_MASK,
+				   0xff);
+		if (ret)
+			return -EIO;
+		ret = regmap_write(priv->regmap, PMU_REG_ENTER_STANDBY,
+				   PMU_STANDBY_MAGIC);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_wake_pon_interval_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret;
+	u32 val;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_PON_INTERVAL, &val);
+	if (ret)
+		return -EIO;
+
+	return sprintf(buf, "%u\n", val);
+}
+
+static ssize_t pmu_wake_pon_interval_store(struct device *dev,
+					   struct device_attribute *attr,
+					   const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int val, ret;
+
+	val = simple_strtoul(buf, NULL, 10);
+	if (val < 0)
+		return val;
+	if (val >= 0) {
+		ret = regmap_write(priv->regmap, PMU_REG_WAKE_PON_INTERVAL,
+				   val);
+		if (ret)
+			return -EIO;
+	}
+	return len;
+}
+
+static ssize_t pmu_wake_src_enabled_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	size_t len;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_SRC_MASK, &val);
+	if (ret)
+		return -EIO;
+
+	buf[0] = 0;
+	len = 0;
+	if (val & PMU_WAKE_R_RTC_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "rtc ");
+	if (val & PMU_WAKE_R_PWRBTN_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "button ");
+	if (val & PMU_WAKE_R_WAKEPON_MASK)
+		len += scnprintf(buf + len, PAGE_SIZE - len, "pon ");
+
+	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
+	return len;
+}
+
+static ssize_t pmu_wake_src_enabled_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t len)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	int ret, i, argc;
+	char **args;
+	u32 val;
+
+	args = argv_split(GFP_KERNEL, buf, &argc);
+	if (args == NULL)
+		return -ENOMEM;
+
+	val = 0;
+	for (i = 0; i < argc; i++) {
+		if (!strcmp(args[i], "rtc"))
+			val |= PMU_WAKE_R_RTC_MASK;
+		else if (!strcmp(args[i], "button"))
+			val |= PMU_WAKE_R_PWRBTN_MASK;
+		else if (!strcmp(args[i], "pon"))
+			val |= PMU_WAKE_R_WAKEPON_MASK;
+		else {
+			argv_free(args);
+			return -EINVAL;
+		}
+	}
+
+	argv_free(args);
+	ret = regmap_write(priv->regmap, PMU_REG_WAKE_SRC_MASK, val);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t pmu_wake_src_available_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	return sprintf(buf, "rtc button pon\n");
+}
+
+
+static ssize_t pmu_last_wake_reason_show(struct device *dev,
+					 struct device_attribute *attr,
+					 char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_WAKE_REASON_MASK, &val);
+	if (ret)
+		return -EIO;
+
+	reason = "unknown";
+	if (!val)
+		reason = "power-on-reset";
+	else if (val & PMU_WAKE_R_RTC_MASK)
+		reason = "rtc";
+	else if (val & PMU_WAKE_R_PWRBTN_MASK)
+		reason = "button";
+	else if (val & PMU_WAKE_R_WAKEPON_MASK)
+		reason = "pon";
+	else if (val & PMU_WAKE_R_WDT_RST_MASK)
+		reason = "wdt-reset";
+	else if (val & PMU_WAKE_R_SOC_RST_MASK)
+		reason = "soc-reset";
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static ssize_t pmu_last_wake_reason_store(struct device *dev,
+					  struct device_attribute *attr,
+					  const char *buf, size_t len)
+{
+	int ret;
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+
+	ret = regmap_write(priv->regmap, PMU_REG_WAKE_REASON_MASK, ~0);
+	if (ret)
+		return -EIO;
+
+	return len;
+}
+
+static ssize_t pmu_mcu_reboot_reason_show(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_PMU_RESET_REASON, &val);
+	if (ret)
+		return -EIO;
+
+	switch (val) {
+	case PMU_RESET_R_POR:
+		reason = "power-on-reset";
+		break;
+	case PMU_RESET_R_SWRESET:
+		reason = "swreset";
+		break;
+	case PMU_RESET_R_VDROP:
+		reason = "vdrop";
+		break;
+	case PMU_RESET_R_HWRESET:
+		reason = "hwreset";
+		break;
+	case PMU_RESET_R_WATCHDOG:
+		reason = "watchdog";
+		break;
+	case PMU_RESET_R_BUS_ERROR:
+		reason = "bus_error";
+		break;
+	case PMU_RESET_R_SRAM_PARITY:
+		reason = "sram_parity";
+		break;
+	case PMU_RESET_R_BOOTSTRAP:
+		reason = "bootstrap";
+		break;
+	default:
+		reason = "unknown";
+		break;
+	}
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static ssize_t pmu_cc_polarity_show(struct device *dev,
+				    struct device_attribute *attr,
+				    char *buf)
+{
+	struct fbxgwr_pmu *priv = dev_get_drvdata(dev);
+	const char *reason;
+	u32 val;
+	int ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_CC_POLARITY, &val);
+	if (ret)
+		return -EIO;
+
+	switch (val) {
+	case PMU_CC_POLARITY_CC1:
+		reason = "cc1";
+		break;
+	case PMU_CC_POLARITY_CC2:
+		reason = "cc2";
+		break;
+	case PMU_CC_POLARITY_UNKNOWN:
+	default:
+		reason = "unknown";
+		break;
+	}
+
+	return sprintf(buf, "%s\n", reason);
+}
+
+static DEVICE_ATTR_RO(pmu_board_id);
+static DEVICE_ATTR_RO(pmu_app_iversion);
+static DEVICE_ATTR_RO(pmu_app_version);
+static DEVICE_ATTR_RO(pmu_api_major);
+static DEVICE_ATTR_RO(pmu_api_minor);
+static DEVICE_ATTR_RO(pmu_test_mode);
+static DEVICE_ATTR_RO(pmu_cur_app_bank);
+static DEVICE_ATTR_RO(pmu_fw_capabilities);
+static DEVICE_ATTR_RO(pmu_rtc);
+static DEVICE_ATTR_RW(pmu_rtc_cmp);
+static DEVICE_ATTR_WO(pmu_board_reset);
+static DEVICE_ATTR_WO(pmu_enter_standby);
+static DEVICE_ATTR_WO(pmu_fake_dgasp);
+static DEVICE_ATTR_RO(pmu_wake_src_available);
+static DEVICE_ATTR_RW(pmu_wake_src_enabled);
+static DEVICE_ATTR_RW(pmu_wake_pon_interval);
+static DEVICE_ATTR_RW(pmu_last_wake_reason);
+static DEVICE_ATTR_RO(pmu_mcu_reboot_reason);
+static DEVICE_ATTR_RO(pmu_cc_polarity);
+
+static struct attribute *sysfs_attrs_ctrl[] = {
+	&dev_attr_pmu_board_id.attr,
+	&dev_attr_pmu_app_iversion.attr,
+	&dev_attr_pmu_app_version.attr,
+	&dev_attr_pmu_api_major.attr,
+	&dev_attr_pmu_api_minor.attr,
+	&dev_attr_pmu_test_mode.attr,
+	&dev_attr_pmu_cur_app_bank.attr,
+	&dev_attr_pmu_fw_capabilities.attr,
+	&dev_attr_pmu_rtc.attr,
+	&dev_attr_pmu_rtc_cmp.attr,
+	&dev_attr_pmu_board_reset.attr,
+	&dev_attr_pmu_enter_standby.attr,
+	&dev_attr_pmu_fake_dgasp.attr,
+	&dev_attr_pmu_wake_src_available.attr,
+	&dev_attr_pmu_wake_src_enabled.attr,
+	&dev_attr_pmu_wake_pon_interval.attr,
+	&dev_attr_pmu_last_wake_reason.attr,
+	&dev_attr_pmu_mcu_reboot_reason.attr,
+	&dev_attr_pmu_cc_polarity.attr,
+	NULL,
+};
+
+static const struct attribute_group pmu_attribute_group[] = {
+	{ .attrs = sysfs_attrs_ctrl },
+};
+
+static int fbxgwr_pmu_i2c_probe(struct i2c_client *i2c)
+{
+	struct fbxgwr_pmu *priv;
+	u32 magic0, magic1, val;
+	int ret;
+
+	priv = devm_kzalloc(&i2c->dev, sizeof(struct fbxgwr_pmu), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	i2c_set_clientdata(i2c, priv);
+	priv->i2c_client = i2c;
+	priv->regmap = devm_regmap_init_i2c(i2c, &fbxgwr_pmu_regmap_config);
+	if (IS_ERR(priv->regmap))
+		return PTR_ERR(priv->regmap);
+
+	/* read magic */
+	ret = regmap_read(priv->regmap, PMU_REG_MAGIC0, &magic0);
+	if (ret)
+		return ret;
+
+	ret = regmap_read(priv->regmap, PMU_REG_MAGIC1, &magic1);
+	if (ret)
+		return ret;
+
+	if (magic0 != PMU_MAGIC0_VAL || magic1 != PMU_MAGIC1_VAL) {
+		dev_err(&i2c->dev, "invalid magic\n");
+		return -EINVAL;
+	}
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_REVISION_LO, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev = (val << 0);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_REVISION_HI, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 8);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_VERSION_LO, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 16);
+
+	ret = regmap_read(priv->regmap, PMU_REG_APP_VERSION_HI, &val);
+	if (ret)
+		return -EIO;
+	priv->app_ver_rev |= (val << 24);
+
+	ret = regmap_read(priv->regmap, PMU_REG_API_MAJOR, &priv->api_major);
+	if (ret)
+		return -EIO;
+
+	ret = regmap_read(priv->regmap, PMU_REG_API_MINOR, &priv->api_minor);
+	if (ret)
+		return -EIO;
+
+	ret = regmap_read(priv->regmap, PMU_REG_BOARD_ID, &priv->board_id);
+	if (ret)
+		return -EIO;
+
+	ret = sysfs_create_group(&i2c->dev.kobj, pmu_attribute_group);
+	if (ret < 0) {
+		dev_err(&i2c->dev, "Sysfs registration failed\n");
+		return ret;
+	}
+
+	ret = devm_mfd_add_devices(&i2c->dev, PLATFORM_DEVID_NONE,
+				   fbxgwr_pmu_devs,
+				   ARRAY_SIZE(fbxgwr_pmu_devs), NULL, 0, 0);
+	if (ret) {
+		dev_err(&i2c->dev, "failed to register subdevices\n");
+		return ret;
+	}
+
+	dev_info(&i2c->dev, "Freebox PMU driver (fwver:%d.%d api:%d.%d)\n",
+		 priv->app_ver_rev >> 16, priv->app_ver_rev & 0xffff,
+		 priv->api_major, priv->api_minor);
+
+	return 0;
+}
+
+static void fbxgwr_pmu_i2c_remove(struct i2c_client *i2c)
+{
+	sysfs_remove_group(&i2c->dev.kobj, pmu_attribute_group);
+}
+const struct of_device_id fbxgwr_pmu_match[] = {
+	{ .compatible = "freebox,fbxgwr-pmu" },
+	{ /* sentinel */ },
+};
+
+static struct i2c_driver fbxgwr_pmu_driver = {
+	.driver = {
+		.name = "fbxgwr_pmu",
+		.of_match_table = of_match_ptr(fbxgwr_pmu_match),
+	},
+	.probe = fbxgwr_pmu_i2c_probe,
+	.remove = fbxgwr_pmu_i2c_remove,
+};
+
+static int __init fbxgwr_pmu_i2c_init(void)
+{
+	int ret;
+
+	ret = i2c_add_driver(&fbxgwr_pmu_driver);
+	if (ret != 0)
+		pr_err("Failed to register Freebox PMU driver: %d\n", ret);
+
+	return 0;
+}
+
+static void __exit fbxgwr_pmu_i2c_exit(void)
+{
+	i2c_del_driver(&fbxgwr_pmu_driver);
+}
+
+module_init(fbxgwr_pmu_i2c_init);
+module_exit(fbxgwr_pmu_i2c_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marios Makassikis");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/misc/dgasp.c	2025-09-25 17:40:33.387356416 +0200
@@ -0,0 +1,206 @@
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/property.h>
+#include <linux/dgasp.h>
+
+struct {
+	struct kthread_worker *worker;
+	struct kthread_work dgasp_work;
+	struct device *dev;
+} g;
+
+ATOMIC_NOTIFIER_HEAD(dgasp_notifier_list);
+
+static DEFINE_PER_CPU(spinlock_t, dgasp_cpu_lock);
+
+static void other_cpu_dgasp_func(void *info)
+{
+	spinlock_t *lock = &per_cpu(dgasp_cpu_lock, smp_processor_id());
+
+	local_irq_disable();
+	spin_lock(lock);
+	while (1)
+		cpu_relax();
+}
+
+static DEFINE_PER_CPU(call_single_data_t, dgasp_csd) =
+	CSD_INIT(other_cpu_dgasp_func, NULL);
+
+/*
+ *
+ */
+static void dgasp_work_func(struct kthread_work *work)
+{
+	struct gpio_descs *toset_gpiod;
+	struct gpio_descs *toreset_gpiod;
+	unsigned int i;
+	int old_level;
+
+	/* request all GPIOs now */
+	old_level = console_loglevel;
+	console_loglevel = CONSOLE_LOGLEVEL_SILENT;
+	toset_gpiod = gpiod_get_array_optional(g.dev, "toset",
+					       GPIOD_ASIS |
+					       GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+	console_loglevel = old_level;
+	if (IS_ERR(toset_gpiod)) {
+		printk(KERN_CRIT "dgasp: failed to get toset gpios\n");
+		toset_gpiod = NULL;
+	}
+
+	console_loglevel = CONSOLE_LOGLEVEL_SILENT;
+	toreset_gpiod = gpiod_get_array_optional(g.dev, "toreset",
+						 GPIOD_ASIS |
+						 GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+	if (IS_ERR(toreset_gpiod)) {
+		printk(KERN_CRIT "dgasp: failed to get toreset gpios\n");
+		toreset_gpiod = NULL;
+	}
+	console_loglevel = old_level;
+
+	/* neutralize other CPUs ASAP */
+	for_each_online_cpu(i) {
+		spinlock_t *lock = &per_cpu(dgasp_cpu_lock, i);
+		spin_lock_init(lock);
+	}
+
+	for_each_online_cpu(i) {
+		call_single_data_t *csd = &per_cpu(dgasp_csd, i);
+		if (i != smp_processor_id())
+			smp_call_function_single_async(i, csd);
+	}
+
+	/* wait for other CPUs to be dead */
+	for_each_online_cpu(i) {
+		spinlock_t *lock = &per_cpu(dgasp_cpu_lock, i);
+
+		if (i == smp_processor_id())
+			continue;
+		while (!spin_is_locked(lock))
+			;
+	}
+
+	/* disable interrupts on this CPU */
+	local_irq_disable();
+
+	/*  now that other CPUs cannot trigger any panic/error, toggle
+	 *  all GPIOs (toggling PCI reset could have caused unexpected
+	 *  exceptions otherwise) */
+	if (toset_gpiod) {
+		for (i = 0; i < toset_gpiod->ndescs; i++)
+			gpiod_direction_output(toset_gpiod->desc[i], 1);
+	}
+	if (toreset_gpiod) {
+		for (i = 0; i < toreset_gpiod->ndescs; i++)
+			gpiod_direction_output(toreset_gpiod->desc[i], 0);
+	}
+
+	/* call notifiers */
+	atomic_notifier_call_chain(&dgasp_notifier_list, 0, NULL);
+
+	/* wait to die */
+	printk("dying gasp sent\n");
+	while (1)
+		cpu_relax();
+}
+
+/*
+ *
+ */
+static irqreturn_t dgasp_irq(int irq, void *data)
+{
+	kthread_queue_work(g.worker, &g.dgasp_work);
+	disable_irq_nosync(irq);
+	return IRQ_HANDLED;
+}
+
+/*
+ *
+ */
+static int dgasp_probe(struct platform_device *pdev)
+{
+	struct kthread_worker *worker;
+	struct gpio_descs *gpiod;
+	int ret, irq;
+
+	printk("dgasp probe\n");
+
+	irq = platform_get_irq(pdev, 0);
+	if (irq < 0)
+		return dev_err_probe(&pdev->dev, irq,
+				     "failed to get dgasp interrupt\n");
+
+	/* sanity check on all gpios so we detect any error now */
+	gpiod = gpiod_get_array_optional(&pdev->dev, "toset",
+					 GPIOD_ASIS |
+					 GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+	if (IS_ERR(gpiod))
+		return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
+				     "failed to get dgasp toset gpios");
+	if (gpiod)
+		gpiod_put_array(gpiod);
+
+	gpiod = gpiod_get_array_optional(&pdev->dev, "toreset",
+					 GPIOD_ASIS |
+					 GPIOD_FLAGS_BIT_NONEXCLUSIVE);
+	if (IS_ERR(gpiod))
+		return dev_err_probe(&pdev->dev, PTR_ERR(gpiod),
+				     "failed to get dgasp toreset gpios");
+	if (gpiod)
+		gpiod_put_array(gpiod);
+
+	worker = kthread_create_worker(0, "dgasp-worker");
+	if (IS_ERR(worker))
+		return dev_err_probe(&pdev->dev, PTR_ERR(worker),
+				     "failed to create worker\n");
+	sched_set_fifo_low(worker->task);
+
+	g.worker = worker;
+	kthread_init_work(&g.dgasp_work, dgasp_work_func);
+	g.dev = &pdev->dev;
+
+	ret = devm_request_irq(&pdev->dev, irq, dgasp_irq,
+			       IRQF_ONESHOT | IRQF_NO_THREAD,
+			       "dgasp", NULL);
+	if (ret < 0) {
+		kthread_destroy_worker(worker);
+		return dev_err_probe(&pdev->dev, ret, "request irq failed\n");
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void dgasp_remove(struct platform_device *pdev)
+{
+}
+
+static const struct of_device_id id_table_dgasp[] = {
+	{ .compatible = "misc,dgasp" },
+	{}
+};
+
+MODULE_DEVICE_TABLE(of, id_table_dgasp);
+
+static struct platform_driver dgasp_driver = {
+	.probe = dgasp_probe,
+	.remove = dgasp_remove,
+	.driver = {
+		.name = "dgasp",
+		.of_match_table = id_table_dgasp,
+	},
+};
+
+module_platform_driver(dgasp_driver);
+
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Driver for dying gasp");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/misc/fbxserial_of.c	2025-09-25 17:40:33.391356436 +0200
@@ -0,0 +1,221 @@
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/fbxserial.h>
+#include <linux/random.h>
+#include <linux/nvmem-consumer.h>
+#include <linux/platform_device.h>
+
+static struct fbx_serial serial_info;
+static bool serial_info_ready;
+static bool serial_info_read;
+
+const struct fbx_serial *arch_get_fbxserial(void)
+{
+	if (!serial_info_ready)
+		return NULL;
+	if (!serial_info_read) {
+		printk(KERN_WARNING "warning: no serialinfo found, using default\n");
+		fbxserial_set_default(&serial_info);
+		serial_info_read = true;
+	}
+	return &serial_info;
+}
+
+EXPORT_SYMBOL(arch_get_fbxserial);
+
+/*
+ * read fbxserial from a nvmem cell
+ */
+static int fbxserial_nvmem_probe(struct platform_device *pdev)
+{
+	struct nvmem_cell *cell;
+	size_t len;
+	void *phex, *p;
+	int ret;
+
+	cell = devm_nvmem_cell_get(&pdev->dev, "fbxserialinfo");
+	if (IS_ERR(cell)) {
+		if (PTR_ERR(cell) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "missing fbxserialinfo cell\n");
+		serial_info_ready = true;
+		return PTR_ERR(cell);
+	}
+
+	serial_info_ready = true;
+
+	/* cell content is in hex */
+	phex = nvmem_cell_read(cell, &len);
+	if (!phex)
+		return -ENOMEM;
+
+	if (len / 2 < FBXSERIAL_MIN_SIZE) {
+		dev_err(&pdev->dev, "too small fbxserial\n");
+		kfree(phex);
+		return -EINVAL;
+	}
+
+	p = kmalloc(GFP_KERNEL, len / 2);
+	if (!p) {
+		kfree(phex);
+		return -ENOMEM;
+	}
+
+	ret = hex2bin(p, phex, len / 2);
+	kfree(phex);
+
+	if (ret) {
+		dev_err(&pdev->dev, "invalid fbxserialinfo hex data\n");
+		kfree(p);
+		return ret;
+	}
+
+	fbxserialinfo_read(p, &serial_info);
+	serial_info_read = true;
+	add_device_randomness(&serial_info, sizeof (serial_info));
+	kfree(p);
+
+	return 0;
+}
+
+static const struct of_device_id fbxserial_nvmem_dt_ids[] = {
+	{ .compatible = "fbx,fbxserial-nvmem-hex" },
+	{}
+};
+
+static struct platform_driver fbxserial_nvmem_driver = {
+	.driver = {
+		.name = "fbxserial_nvmem",
+		.of_match_table = fbxserial_nvmem_dt_ids,
+	},
+	.probe = fbxserial_nvmem_probe,
+};
+
+static int of_get_mac_addr(struct device_node *np, const char *name, u8 *addr)
+{
+	struct property *pp = of_find_property(np, name, NULL);
+
+	if (pp && pp->length == 6) {
+		memcpy(addr, pp->value, 6);
+		return 0;
+	}
+	return -ENODEV;
+}
+
+/*
+ * read fixed fbxserial from DT nodes
+ */
+static __init int fbxserial_of_fixed_read(struct device_node *np)
+{
+	struct fbx_serial *s;
+	const char *manufacturer, *bundle;
+	u32 val;
+	int ret;
+
+	serial_info_ready = true;
+
+	s = &serial_info;
+
+	ret = 0;
+	ret |= of_property_read_u32(np, "fbx,fbxserial-type", &val);
+	s->type = cpu_to_be16(val);
+	ret |= of_property_read_u32(np, "fbx,fbxserial-version", &val);
+	s->version = val;
+	ret |= of_property_read_string(np,
+				       "fbx,fbxserial-manufacturer",
+				       &manufacturer);
+	ret |= of_property_read_u32(np, "fbx,fbxserial-year", &val);
+	s->year = cpu_to_be16(val);
+	ret |= of_property_read_u32(np, "fbx,fbxserial-week", &val);
+	s->week = val;
+	ret |= of_property_read_u32(np, "fbx,fbxserial-number", &val);
+	s->number = cpu_to_be32(val);
+	ret |= of_get_mac_addr(np,
+			       "fbx,fbxserial-mac-addr-base",
+			       s->mac_addr_base);
+	ret |= of_property_read_u32(np, "fbx,fbxserial-mac-count", &val);
+	s->mac_count = val;
+
+	if (ret) {
+		printk(KERN_ERR "failed to get some fixed serial properties "
+		       "from DT: %d\n", ret);
+		return ret;
+	}
+
+	s->manufacturer = manufacturer[0];
+
+	ret = of_property_read_string(np,
+				      "fbx,fbxserial-bundle", &bundle);
+	if (ret < 0) {
+		printk(KERN_ERR "failed to get fixed serial bundle "
+		       "from DT: %d\n", ret);
+		return ret;
+	}
+
+	s->extinfo_count = cpu_to_be32(1);
+	s->extinfos[0].type = cpu_to_be32(EXTINFO_TYPE_EXTDEV);
+	s->extinfos[0].u.extdev.type = cpu_to_be32(EXTDEV_TYPE_BUNDLE);
+	strscpy(s->extinfos[0].u.extdev.serial, bundle,
+		sizeof (s->extinfos[0].u.extdev.serial));
+
+	s->magic = FBXSERIAL_MAGIC;
+	s->struct_version = FBXSERIAL_VERSION;
+	s->len = sizeof (serial_info);
+
+	serial_info_read = true;
+	add_device_randomness(&serial_info, sizeof (serial_info));
+
+	return 0;
+}
+
+/*
+ * read fbxserial from DT chosen tag
+ */
+static __init int fbxserial_of_chosen_read(void)
+{
+	struct device_node *np;
+	const void *fbxserial_data;
+	int len;
+
+	serial_info_ready = true;
+
+	np = of_find_node_by_path("/chosen");
+	if (!np)
+		return 0;
+
+	fbxserial_data = of_get_property(np, "fbx,serialinfo", &len);
+	if (!fbxserial_data)
+		return 0;
+
+	fbxserialinfo_read(fbxserial_data, &serial_info);
+	serial_info_read = true;
+	add_device_randomness(&serial_info, sizeof (serial_info));
+
+	return 0;
+}
+
+/*
+ * read fbxserial from DT
+ */
+static __init int fbxserial_of_read(void)
+{
+	struct device_node *np;
+
+	/* check which fbxserial of method DT has specified */
+	np = of_find_node_by_path("/fbxserial");
+	if (np) {
+		if (of_property_present(np, "fbx,fbxserial-use-fixed"))
+			return fbxserial_of_fixed_read(np);
+
+		if (of_property_present(np, "fbx,fbxserial-use-chosen"))
+			return fbxserial_of_chosen_read();
+
+		if (of_property_present(np, "fbx,fbxserial-use-nvmem"))
+			return platform_driver_register(&fbxserial_nvmem_driver);
+		printk(KERN_WARNING "no fbxserial read method specified\n");
+	}
+
+	/* default to chosen method */
+	return fbxserial_of_chosen_read();
+}
+
+arch_initcall(fbxserial_of_read);
diff -Nruw linux-6.13.12-fbx/drivers/misc/hdmi-cec./Kconfig linux-6.13.12-fbx/drivers/misc/hdmi-cec/Kconfig
--- linux-6.13.12-fbx/drivers/misc/hdmi-cec./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/misc/hdmi-cec/Kconfig	2025-09-25 17:40:33.395356456 +0200
@@ -0,0 +1,15 @@
+menu "HDMI CEC support"
+
+config HDMI_CEC
+	tristate "HDMI CEC (Consumer Electronics Control) support"
+	help
+	   HDMI Consumer Electronics Control support.
+
+config HDMI_CEC_REMOTI
+	tristate "RemoTI CEC driver"
+	depends on HDMI_CEC
+	select REMOTI
+	help
+	   HDMI CEC driver using RemoTI IPCs.
+
+endmenu
diff -Nruw linux-6.13.12-fbx/drivers/misc/hdmi-cec./Makefile linux-6.13.12-fbx/drivers/misc/hdmi-cec/Makefile
--- linux-6.13.12-fbx/drivers/misc/hdmi-cec./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/misc/hdmi-cec/Makefile	2025-09-25 17:40:33.395356456 +0200
@@ -0,0 +1,6 @@
+obj-$(CONFIG_HDMI_CEC)		+= hdmi-cec.o
+hdmi-cec-objs			+= core.o dev.o
+
+# drivers
+obj-$(CONFIG_HDMI_CEC_REMOTI)	+= remoti-cec.o
+remoti-cec-objs			:= remoti.o
diff -Nruw linux-6.13.12-fbx/drivers/misc/remoti./Kconfig linux-6.13.12-fbx/drivers/misc/remoti/Kconfig
--- linux-6.13.12-fbx/drivers/misc/remoti./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/misc/remoti/Kconfig	2025-09-25 17:40:33.407356515 +0200
@@ -0,0 +1,26 @@
+menu "RemoTI support"
+
+config REMOTI
+	tristate "RemoTI support"
+	depends on FBX6HD
+	help
+	  Texas Instruments RemoTI stack.
+
+config REMOTI_LEDS
+	tristate "RemoTI LEDS support"
+	depends on REMOTI
+	depends on LEDS_CLASS
+	help
+	  RemoTI LEDS class driver support.
+
+config REMOTI_GPIO
+	tristate "RemoTI gpio support"
+	depends on REMOTI
+	help
+	  gpiochip driver for the RemoTI RNP
+
+config REMOTI_USER
+	tristate "RemoTI userspace access"
+	depends on REMOTI
+
+endmenu
diff -Nruw linux-6.13.12-fbx/drivers/misc/remoti./Makefile linux-6.13.12-fbx/drivers/misc/remoti/Makefile
--- linux-6.13.12-fbx/drivers/misc/remoti./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/misc/remoti/Makefile	2025-09-25 17:40:33.407356515 +0200
@@ -0,0 +1,9 @@
+obj-$(CONFIG_REMOTI)		+= remoti.o
+obj-$(CONFIG_REMOTI_GPIO)	+= remoti-gpio.o
+obj-$(CONFIG_REMOTI_LEDS)	+= remoti-leds.o
+obj-$(CONFIG_REMOTI_USER)	+= remoti-user.o
+
+remoti-objs			:= core.o core-sysfs.o
+remoti-gpio-objs		:= gpio.o
+remoti-leds-objs		:= leds.o
+remoti-user-objs		:= user.o
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./Makefile linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/Makefile	2025-09-25 17:40:33.551357229 +0200
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCM63158_SF2) 		+= sf2/
+obj-$(CONFIG_BCM63158_ENET_RUNNER) 	+= enet/
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/Makefile linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/Makefile	2025-09-25 17:40:33.551357229 +0200
@@ -0,0 +1,11 @@
+obj-$(CONFIG_BCM63158_ENET_RUNNER) 	+= bcm63158_enet_runner.o
+
+bcm63158_enet_runner-y	:= \
+	ethtool.o \
+	main.o \
+	port_unimac.o \
+	port_xport.o \
+	port_xport_serdes.o \
+	port_xport_epon.o \
+	port_xport_epon_dbg.o \
+	port_xport_xlmac.o
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/bcm63158_enet_runner.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/bcm63158_enet_runner.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/bcm63158_enet_runner.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/bcm63158_enet_runner.h	2025-09-25 17:40:33.551357229 +0200
@@ -0,0 +1,292 @@
+#ifndef BCM63158_ENET_RUNNER_H_
+#define BCM63158_ENET_RUNNER_H_
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_net.h>
+#include <linux/reset.h>
+#include <linux/phy.h>
+#include <linux/phylink.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <net/dsa.h>
+
+#include <linux/soc/bcm63xx_xrdp_api.h>
+#include "enet_fw_defs.h"
+
+#define BCM_ENET_RUNNER_MAX_RXQ		1
+#define BCM_ENET_RUNNER_MAX_TXQ		2
+
+enum bcm_runner_port_type {
+	BCM_RUNNER_PORT_UNIMAC,
+	BCM_RUNNER_PORT_XPORT,
+};
+
+/*
+ * note: RDP has a maximum MTU hardcoded, prevent setting mac do a
+ * value higher than it has
+ */
+#define BCM_RUNNER_MAC_MAX_MTU		2048
+
+/*
+ * ethtool private flags
+ */
+#define PRIV_FLAGS_FORCE_LBE_OE_BIT		0
+#define PRIV_FLAGS_FORCE_LBE_OE_VAL_BIT		1
+#define PRIV_FLAGS_ST_SYNC_OK_BIT		2
+
+struct bcm_runner_ethtool_stat {
+	char		stat_string[ETH_GSTRING_LEN];
+	unsigned int	size;
+	unsigned int	offset;
+	unsigned int	reg;
+	unsigned int	type;
+};
+
+struct rx_queue {
+	int			index;
+	unsigned int		ring_size;
+	unsigned int		rx_curr_desc;
+	unsigned int		rx_desc_area_size;
+
+	struct rx_desc		*rx_desc_area;
+	dma_addr_t		rx_desc_dma;
+	void			**frags;
+};
+
+struct tx_queue_pdata {
+	/* keep SKB & len separated because if we
+	 * transmit a clone, then len may change and
+	 * during reclaim we must unmap using same len
+	 * that was mapped */
+	void			*data;
+	unsigned int		len;
+};
+
+struct tx_queue {
+	/*
+	 * "hot" fields first
+	 */
+	spinlock_t		tx_lock;
+	unsigned int		ring_size;
+	unsigned int		tx_cur_desc;
+	unsigned int		tx_dirty_desc;
+	struct tx_desc		*tx_desc_area;
+	struct tx_queue_pdata	*tx_desc_pdata;
+
+	int			index;
+	unsigned int		tx_desc_area_size;
+	dma_addr_t		tx_desc_dma;
+
+	uint8_t			use_dsa:1;
+	uint8_t			dsa_port:4;
+	uint8_t			dsa_queue:3;
+	uint8_t			dsa_imp_port:4;
+};
+
+/*
+ * per port/mode operations
+ */
+struct bcm_enet_runner_priv;
+
+struct bcm_enet_mode_ops {
+	const char	*name;
+
+	/* called *before* init */
+	u32		(*get_bbh_id)(void *port_priv);
+
+	/* called only before netdevice is registred or when netdevice
+	 * is down */
+	void		*(*init)(void *port_priv,
+				 const struct bcm_xrdp_enet_params *);
+	void		(*release)(void *mode_priv);
+
+	/* called on netdevice stop, after runner & netdev queue are
+	 * stopped, no start() operation, first phylink_mac_config()
+	 * calls act the start() operation */
+	void		(*stop)(void *mode_priv);
+
+	/* set interface mtu */
+	void		(*mtu_set)(void *mode_priv, unsigned int size);
+
+	/* check if sending this packet type is allowed */
+	bool		(*can_send)(void *mode_priv, unsigned int protocol);
+
+	/* stats update */
+	void		(*stats_update)(void *mode_priv,
+					struct net_device_stats *);
+
+	/* mib operation */
+	const struct bcm_runner_ethtool_stat *mib_estat;
+	size_t		mib_estat_count;
+	void		(*mib_update)(void *mode_priv);
+	void		*(*mib_get_data)(void *mode_priv);
+
+	/* get/set private on netdevice */
+	u32		(*get_priv_flags)(void *mode_priv);
+	int		(*set_priv_flags)(void *mode_priv, u32 flags);
+
+	/* ethtool epon params callback */
+	int		(*get_epon_param)(void *mode_priv,
+					  struct ethtool_epon_param *);
+	int		(*set_epon_param)(void *mode_priv,
+					  const struct ethtool_epon_param *);
+
+	/* dgasp operation */
+	bool		dgasp_supported;
+	int		(*dgasp_gen_data)(void *mode_priv,
+					  u8 *buf, size_t buf_len);
+
+	/*
+	 * phylink callback
+	 */
+	void		(*phylink_mac_config)(void *mode_priv,
+					      unsigned int pl_mode,
+					      const struct phylink_link_state *);
+
+	int		(*phylink_pcs_config)(void *mode_priv,
+					      unsigned int mode,
+					      phy_interface_t interface,
+					      const unsigned long *advertising);
+	void		(*phylink_link_down)(void *mode_priv,
+					     unsigned int pl_mode,
+					     phy_interface_t interface);
+
+	void		(*phylink_link_up)(void *mode_priv,
+					   unsigned int plmode,
+					   phy_interface_t interface,
+					   int speed, int duplex,
+					   struct phy_device *phy);
+	int		(*phylink_pcs_get_state)(void *mode_priv,
+						 struct phylink_link_state *);
+	void		(*phylink_pcs_an_restart)(void *mode_priv);
+};
+
+struct bcm_enet_port_ops {
+	const struct bcm_enet_mode_ops	*modes[4];
+	size_t				mode_count;
+
+	/* phylink validate for this port */
+	void		(*phylink_validate)(unsigned long *supported,
+					    struct phylink_link_state *state);
+	int		pcs_poll;
+
+	/* select correct mode for given interface */
+	int		(*mode_select)(phy_interface_t interface);
+
+	/* return list of supported interfaces */
+	void		(*get_supported_interfaces)(unsigned long *);
+
+	/* return mac capabilities  */
+	void		(*get_mac_capabilities)(unsigned long *);
+
+	/* called once at modprobe/rmmod */
+	void		*(*init)(struct bcm_enet_runner_priv *);
+	void		(*release)(void *port_priv);
+
+	void		(*pcs_shutdown)(void *port_priv);
+};
+
+struct bcm_dsa_port {
+	struct net_device		*slave_netdev;
+	unsigned int			imp_port;
+	unsigned int			port;
+	struct list_head		next;
+};
+
+struct queue_info {
+	char				irq_name[32];
+	cpumask_t			irq_affinity_mask;
+	struct irq_affinity_notify	affinity_notifier;
+};
+
+struct bcm_enet_runner_priv {
+	/*
+	 * valid after probe
+	 */
+	struct net_device		*netdev;
+	struct bcm_xrdp_priv		*xrdp;
+	struct phylink			*phylink;
+	struct phylink_config		phylink_config;
+	struct phylink_pcs		phylink_pcs;
+	struct platform_device		*pdev;
+
+	const struct bcm_enet_port_ops	*port_ops;
+	void				*port_priv;
+	enum bcm_runner_port_type	port_type;
+
+	bool				reset_scheduled;
+	struct delayed_work		reset_link_work;
+
+	struct queue_info		rxq_info[BCM_ENET_RUNNER_MAX_RXQ];
+	struct queue_info		txq_info[BCM_ENET_RUNNER_MAX_TXQ];
+
+	struct notifier_block		netdev_notifier;
+	bool				netdev_notifier_registered;
+	struct list_head		dsa_ports;
+	struct notifier_block		dgasp_nb;
+
+	/*
+	 * valid after netdevice is opened
+	 */
+	unsigned int			pkt_size;
+	unsigned int			frag_size;
+
+	/*
+	 * valid when valid mode is selected
+	 */
+	const struct bcm_enet_mode_ops	*mode_ops;
+	void				*mode_priv;
+	struct bcm_xrdp_enet_params	xrdp_params;
+
+	struct napi_struct		napi;
+	u32				irq_mask;
+	u32				work_todo;
+	u32				work_batch;
+
+	struct rx_queue			rxq[BCM_ENET_RUNNER_MAX_RXQ];
+	unsigned int			rxq_size;
+
+	struct tx_queue			txq[BCM_ENET_RUNNER_MAX_TXQ];
+	u8				txq_port_map[DSA_MAX_PORTS];
+	unsigned int			txq_count;
+	unsigned int			txq_size;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	unsigned int			ff_hw_id;
+	struct tx_queue			*ff_txq;
+	struct task_struct		*ff_reclaim_thread;
+#endif
+};
+
+extern struct dentry *bcm63158_dbg_root;
+
+extern const size_t bcm_runner_fw_estat_count;
+extern const struct bcm_runner_ethtool_stat bcm_runner_fw_estat[];
+u64 bcm_runner_fw_read_estat(struct bcm_enet_runner_priv *priv, int idx);
+void bcm_runner_fw_stop_tx(struct bcm_enet_runner_priv *priv);
+bool bcm_runner_fw_tx_is_stopped(struct bcm_enet_runner_priv *priv);
+void bcm_runner_fw_tx_stop_wait(struct bcm_enet_runner_priv *priv);
+bool bcm_runner_fw_bbh_is_empty(struct bcm_enet_runner_priv *priv);
+
+int bcm_enet_runner_toggle_mode(struct bcm_enet_runner_priv *priv,
+				unsigned int new_mode_idx);
+
+void bcm_enet_runner_schedule_reset(struct bcm_enet_runner_priv *priv,
+				    unsigned int delay_ms);
+void bcm_enet_runner_unschedule_reset(struct bcm_enet_runner_priv *priv);
+
+
+extern const struct ethtool_ops bcm_runner_ethtool_ops;
+extern const struct bcm_enet_port_ops port_unimac_ops;
+extern const struct bcm_enet_port_ops port_xport_ops;
+
+#endif /* BCM63158_ENET_RUNNER_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/enet_fw_defs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/enet_fw_defs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/enet_fw_defs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/enet_fw_defs.h	2025-09-25 17:40:33.551357229 +0200
@@ -0,0 +1,220 @@
+#ifndef ENET_FW_DEFS_H_
+#define ENET_FW_DEFS_H_
+
+#define RX_FQM_BURST				8
+#define RX_FQM_SIZE				(RX_FQM_BURST * 3)
+
+//
+// RX global "registers" area, 16 bytes needed
+//
+#define RX_CONTROL_REG				0x0
+#define RX_CONTROL_RX_EN_SHIFT			0
+#define RX_CONTROL_RX_EN_MASK			(1 << RX_CONTROL_RX_EN_SHIFT)
+#define RX_CONTROL_RX_EN_F_OFFSET		RX_CONTROL_RX_EN_SHIFT
+#define RX_CONTROL_RX_EN_F_WIDTH		1
+#define RX_DESC_ADDRESS_REG			0x4
+#define RX_DESC_COUNT_REG			0x8
+#define RX_IRQ_MASK_REG				0xc
+
+//
+// RX Free Queue Manager "registers" area, follows global, 32 + RX_FQM_SCRATCH_AREA size needed
+//
+#define RX_FQM_STATUS_REG			0x10
+#define RX_FQM_STATUS_RX_EN_SHIFT		0
+#define RX_FQM_STATUS_RX_EN_MASK		(1 << RX_FQM_STATUS_RX_EN_SHIFT)
+#define RX_FQM_STATUS_RX_EN_F_OFFSET		RX_FQM_STATUS_RX_EN_SHIFT
+#define RX_FQM_STATUS_RX_EN_F_WIDTH		1
+
+#define RX_FQM_STAT_DBG_TASK_CALL_REG		0x14
+#define RX_FQM_STAT_DBG_CPU_RING_IDX_REG	0x18
+#define RX_FQM_STAT_DBG_FULL_REG		0x1c
+#define RX_FQM_STAT_DBG_HOST_NOBUF_REG		0x20
+
+#define RX_FQM_HEAD_IDX_REG			0x24 /* 8 bits */
+#define RX_FQM_TAIL_IDX_REG			0x25 /* 8 bits */
+
+#define RX_FQM_SCRATCH_AREA			0x30 /* RX_FQM_SIZE * CPU_RX_DESC_LEN */
+
+//
+// RX xfer "registers" area, starts after global regs
+//
+#define RX_IF_REGS_BASE_OFF			0x100
+#define RX_IF_REGS_PERIF_SIZE			0x50
+
+#define RX_XF_STATUS_REG			0x0
+#define RX_XF_STATUS_RX_EN_SHIFT		0
+#define RX_XF_STATUS_RX_EN_MASK			(1 << RX_XF_STATUS_RX_EN_SHIFT)
+#define RX_XF_STATUS_RX_EN_F_OFFSET		RX_XF_STATUS_RX_EN_SHIFT
+#define RX_XF_STATUS_RX_EN_F_WIDTH		1
+
+#define RX_XF_STAT_RX_CNT_PKT_REG		0x04
+#define RX_XF_STAT_RX_CNT_DROP_NOBUF_REG	0x08
+#define RX_XF_STAT_RX_CNT_DROP_RXDIS_REG	0x0c
+#define RX_XF_STAT_RX_CNT_DROP_RXERR_REG	0x10
+
+#define RX_XF_STAT_DBG_TASK_CALL_REG		0x14
+#define RX_XF_STAT_DBG_LAST_PD0_REG		0x18
+#define RX_XF_STAT_DBG_LAST_PD1_REG		0x1c
+#define RX_XF_STAT_DBG_LAST_PD2_REG		0x20
+#define RX_XF_STAT_DBG_LAST_PD3_REG		0x24
+#define RX_XF_STAT_DBG_LAST_SN_REG		0x28
+#define RX_XF_STAT_DBG_LAST_BN_REG		0x2c
+#define RX_XF_STAT_DBG_LAST_PLEN_REG		0x30
+#define RX_XF_STAT_DBG_LAST_CPUDESC_IDX_REG	0x34
+#define RX_XF_STAT_DBG_INVALID_PD_CNT_REG	0x38
+#define RX_XF_SCRATCH_CPU_WBACK_DESC		0x3c /* 4 bytes needed */
+#define RX_XF_SCRATCH_CPU_DESC			0x40 /* CPU_RX_DESC_LEN space needed */
+#define RX_XF_SCRATCH_SBPM_REPLY		0x48 /* 8 bytes needed */
+
+//
+// CPU RX descriptor
+//
+#define CPU_RX_DESC_LEN				8
+#define CPU_RX_DESC_LEN_LOG2			3
+
+#define CPU_RX_DESC0_LEN_SHIFT			0
+#define CPU_RX_DESC0_LEN_MASK			(0xffff << CPU_RX_DESC0_LEN_SHIFT)
+#define CPU_RX_DESC0_LEN_F_OFFSET		CPU_RX_DESC0_LEN_SHIFT
+#define CPU_RX_DESC0_LEN_F_WIDTH		16
+
+// internal field
+#define CPU_RX_DESC0_ABS_IDX_SHIFT		16
+#define CPU_RX_DESC0_ABS_IDX_MASK		(0x3fff << CPU_RX_DESC0_ABS_IDX_SHIFT)
+#define CPU_RX_DESC0_ABS_IDX_F_OFFSET		CPU_RX_DESC0_ABS_IDX_SHIFT
+#define CPU_RX_DESC0_ABS_IDX_F_WIDTH		14
+
+#define CPU_RX_DESC0_HW_OWNED_SHIFT		31
+#define CPU_RX_DESC0_HW_OWNED_MASK		(1 << CPU_RX_DESC0_HW_OWNED_SHIFT)
+#define CPU_RX_DESC0_HW_OWNED_F_OFFSET		CPU_RX_DESC0_HW_OWNED_SHIFT
+#define CPU_RX_DESC0_HW_OWNED_F_WIDTH		1
+
+#define CPU_RX_DESC1_OFFSET			4
+
+#ifdef __KERNEL__
+struct rx_desc {
+	__be32	flags_len;
+	__be32	address;
+};
+#endif
+
+
+//
+// TX "registers" area, 512 bytes needed
+//
+
+/* fix TX_SCRATCH_SENT_DESC_QIDX if you change this */
+#define TXQ_MAX_COUNT				2
+#define TX_DESC_READ_BURST			8
+#define TX_PD_SEND_BURST			4
+
+/* global scoped */
+#define TX_CONTROL_REG				0x0	/* 32 bits */
+#define TX_CONTROL_TX_EN_SHIFT			0
+#define TX_CONTROL_TX_EN_MASK			(1 << TX_CONTROL_TX_EN_SHIFT)
+#define TX_CONTROL_TX_EN_F_OFFSET		TX_CONTROL_TX_EN_SHIFT
+#define TX_CONTROL_TX_EN_F_WIDTH		1
+
+#define TX_STATUS_REG				0x4	/* 32 bits */
+#define TX_STATUS_TX_EN_SHIFT			0
+#define TX_STATUS_TX_EN_MASK			(1 << TX_STATUS_TX_EN_SHIFT)
+#define TX_STATUS_TX_EN_F_OFFSET		TX_STATUS_TX_EN_SHIFT
+#define TX_STATUS_TX_EN_F_WIDTH			1
+
+#define TX_BBH_PD_QUEUE_SIZE_REG		0x8	/* 16 bits, must be power of 2 - 1, min value is 3 */
+#define TX_BBH_MDU_QUEUE_ADDR_REG		0xa	/* 16 bits */
+#define TX_BBH_BB_ID_REG			0xc	/* 8 bits */
+#define TX_EPON_REPORTING_REG			0xd	/* 8 bits */
+
+#define TX_STAT_CNT_TX_DISABLED_REG		0x10	/* 32 bits */
+
+#define TX_STAT_DBG_TASK_CALL_REG		0x14	/* 32 bits */
+#define TX_STAT_DBG_FIFO_FULL_REG		0x18	/* 32 bits */
+#define TX_STAT_DBG_MDU_FW_RECLAIM_IDX_REG	0x1c	/* 32 bits */
+#define TX_STAT_DBG_MDU_FW_PUSH_IDX_REG		0x20	/* 32 bits */
+
+#define TX_SCRATCH_MAX1				0x24
+#define TX_SCRATCH_MAX2				0x28
+#define TX_SCRATCH_MAX3				0x2c
+
+/* used for dma & temp */
+#define TX_SCRATCH_SENT_DESC_QIDX		0x30	/* 256 bits: 1 bit for each desc, XXX: works for 2 TX queues only  */
+#define TX_SCRATCH_ZERO				0x50	/* 4 bytes needed */
+#define TX_SCRATCH_ACB_STAT_BUF			0x60	/* 8 bytes needed */
+
+/* per-queue scoped */
+#define TXQ_REGS_BASE_OFF			0x80
+#define TXQ_REGS_PERQ_SIZE			0x80
+#define TXQ_REGS_PERQ_SIZE_LOG2			7	/* == 128 (0x80) */
+
+#define TXQ_OFF_DESC_ADDRESS_REG		0x0	/* 32 bits, DDR queue base address */
+#define TXQ_OFF_DESC_COUNT_REG			0x4	/* 32 bits, must be power of 2, max 2^15 */
+#define TXQ_OFF_IRQ_MASK_REG			0x8	/* 32 bits */
+#define TXQ_OFF_ACB_ENABLED_REG			0xc	/* 8 bits */
+#define TXQ_OFF_ACB_QIDX_REG			0xd	/* 8 bits */
+#define TXQ_OFF_ACB_CONTROL_REG			0xe	/* 16 bits */
+#define TXQ_OFF_TX_DESC_IDX			0x10	/* 8 bits */
+#define TXQ_OFF_TX_DESC_CNT			0x11	/* 8 bits */
+#define TXQ_OFF_RING_PUSH_IDX_REG		0x12	/* 16 bits */
+#define TXQ_OFF_RING_RECLAIM_IDX_REG		0x14	/* 16 bits */
+#define TXQ_OFF_STAT_CNT_PKT_SENT_REG		0x18	/* 32 bits */
+#define TXQ_OFF_STAT_CNT_PKT_RECLAIMED_REG	0x1c	/* 32 bits */
+#define TXQ_OFF_STAT_CNT_ACB_QFULL		0x20	/* 32 bits */
+#define TXQ_OFF_ACB_TX_BUF			0x30	/* 16 bytes needed */
+#define TXQ_OFF_TX_DESCS_BUF			0x40	/* CPU_TX_DESC_LEN * TX_DESC_READ_BURST size needed */
+
+
+/*
+ * ACB control field format
+ */
+#define TXQ_ACBCTRL_EGRESS_QUEUE_SHIFT		0
+#define TXQ_ACBCTRL_EGRESS_PORT_SHIFT		3
+#define TXQ_ACBCTRL_IMP_PORT_SHIFT		6
+
+#ifdef __KERNEL__
+static inline unsigned int enet_fw_imp_port_map(unsigned int p)
+{
+	switch (p) {
+	case 5:
+		return 1;
+	case 7:
+		return 2;
+	case 8:
+		return 0;
+	}
+	WARN(1, "unknown imp port %d", p);
+	return 0;
+}
+#endif
+
+/*
+ * runner enet tx descriptor
+ */
+#define CPU_TX_DESC_LEN				8
+#define CPU_TX_DESC_LEN_LOG2			3
+
+#define CPU_TX_DESC0_LEN_SHIFT			0
+#define CPU_TX_DESC0_LEN_MASK			(0xffff << CPU_TX_DESC0_LEN_SHIFT)
+#define CPU_TX_DESC0_LEN_F_OFFSET		CPU_TX_DESC0_LEN_SHIFT
+#define CPU_TX_DESC0_LEN_F_WIDTH		16
+
+#define CPU_TX_DESC0_HW_OWNED_SHIFT		31
+#define CPU_TX_DESC0_HW_OWNED_MASK		(1 << CPU_TX_DESC0_HW_OWNED_SHIFT)
+#define CPU_TX_DESC0_HW_OWNED_F_OFFSET		CPU_TX_DESC0_HW_OWNED_SHIFT
+#define CPU_TX_DESC0_HW_OWNED_F_WIDTH		1
+
+/* internal fw use */
+#define CPU_TX_DESC0_ACB_DONE_SHIFT		30
+#define CPU_TX_DESC0_ACB_DONE_MASK		(1 << CPU_TX_DESC0_ACB_DONE_SHIFT)
+#define CPU_TX_DESC0_ACB_DONE_F_OFFSET		CPU_TX_DESC0_ACB_DONE_SHIFT
+#define CPU_TX_DESC0_ACB_DONE_F_WIDTH		1
+
+#define CPU_TX_DESC1_OFFSET			4
+
+#ifdef __KERNEL__
+struct tx_desc {
+	__be32	flags_len;
+	__be32	address;
+};
+#endif
+
+#endif /* !ENET_FW_DEFS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/ethtool.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/ethtool.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/ethtool.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/ethtool.c	2025-09-25 17:40:33.551357229 +0200
@@ -0,0 +1,269 @@
+#include "bcm63158_enet_runner.h"
+
+static char bcm_enet_runner_driver_name[] = "bcm63158_enet_runner";
+static char bcm_enet_runner_driver_version[] = "1.0";
+
+/*
+ * ethtool callbacks
+ */
+static void bcm_runner_get_drvinfo(struct net_device *netdev,
+				   struct ethtool_drvinfo *drvinfo)
+{
+	strscpy(drvinfo->driver, bcm_enet_runner_driver_name, sizeof(drvinfo->driver));
+	strscpy(drvinfo->version, bcm_enet_runner_driver_version,
+		sizeof(drvinfo->version));
+	strscpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
+	strscpy(drvinfo->bus_info, "bcm63xx", sizeof(drvinfo->bus_info));
+}
+
+struct ethtool_priv_flags_strings {
+	const char string[ETH_GSTRING_LEN];
+};
+
+static const struct ethtool_priv_flags_strings bcm_runner_priv_flags_strings[] = {
+	{ .string = "force-lbe-output" },
+	{ .string = "force-lbe-output-value" },
+	{ .string = "st-sync-ok" },
+};
+
+static int bcm_runner_get_sset_count(struct net_device *netdev,
+				     int string_set)
+{
+	struct bcm_enet_runner_priv *priv;
+
+	priv = netdev_priv(netdev);
+
+	switch (string_set) {
+	case ETH_SS_STATS:
+		if (!priv->mode_ops)
+			return 0;
+
+		return priv->mode_ops->mib_estat_count + bcm_runner_fw_estat_count;
+
+	case ETH_SS_PRIV_FLAGS:
+		return ARRAY_SIZE(bcm_runner_priv_flags_strings);
+
+	default:
+		return -EINVAL;
+	}
+}
+
+static void bcm_runner_get_strings(struct net_device *netdev,
+				   u32 stringset, u8 *data)
+{
+	struct bcm_enet_runner_priv *priv;
+	int i, offset;
+
+	priv = netdev_priv(netdev);
+	switch (stringset) {
+	case ETH_SS_STATS:
+	{
+		const struct bcm_runner_ethtool_stat *estat;
+		size_t estat_count;
+
+		if (!priv->mode_ops)
+			return;
+
+		estat = priv->mode_ops->mib_estat;
+		estat_count = priv->mode_ops->mib_estat_count;
+
+		offset = 0;
+		for (i = 0; i < estat_count; i++) {
+			memcpy(data + (i + offset) * ETH_GSTRING_LEN,
+			       estat[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		offset += estat_count;
+		for (i = 0; i < bcm_runner_fw_estat_count; i++) {
+			memcpy(data + (i + offset) * ETH_GSTRING_LEN,
+			       bcm_runner_fw_estat[i].stat_string,
+			       ETH_GSTRING_LEN);
+		}
+		break;
+	}
+
+	case ETH_SS_PRIV_FLAGS:
+		memcpy(data, bcm_runner_priv_flags_strings,
+		       sizeof (bcm_runner_priv_flags_strings));
+		break;
+	}
+}
+
+
+static void bcm_runner_get_ethtool_stats(struct net_device *netdev,
+					 struct ethtool_stats *stats,
+					 u64 *data)
+{
+	struct bcm_enet_runner_priv *priv;
+	size_t i, offset;
+
+	priv = netdev_priv(netdev);
+
+	offset = 0;
+	if (priv->mode_ops) {
+		const struct bcm_runner_ethtool_stat *estat;
+		size_t estat_count;
+		void *mib_data;
+
+		estat = priv->mode_ops->mib_estat;
+		estat_count = priv->mode_ops->mib_estat_count;
+
+		priv->mode_ops->mib_update(priv->mode_priv);
+		mib_data = priv->mode_ops->mib_get_data(priv->mode_priv);
+
+		for (i = 0; i < estat_count; i++) {
+			const struct bcm_runner_ethtool_stat *s;
+			char *p;
+
+			s = &estat[i];
+			p = (char *)mib_data + s->offset;
+			data[offset + i] = (s->size == sizeof(u64)) ?
+				*(u64 *)p : *(u32 *)p;
+		}
+		offset += estat_count;
+	}
+
+	for (i = 0; i < bcm_runner_fw_estat_count; i++)
+		data[offset + i] = bcm_runner_fw_read_estat(priv, i);
+}
+
+static int bcm_runner_nway_reset(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	return phylink_ethtool_nway_reset(priv->phylink);
+}
+
+static int
+bcm_runner_get_link_ksettings(struct net_device *dev,
+			      struct ethtool_link_ksettings *cmd)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int
+bcm_runner_set_link_ksettings(struct net_device *dev,
+			      const struct ethtool_link_ksettings *cmd)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static int
+bcm_runner_set_priv_flags(struct net_device *dev, u32 flags)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (!priv->mode_ops)
+		return -ENETDOWN;
+
+	if (!priv->mode_ops->set_priv_flags)
+		return -EOPNOTSUPP;
+
+	return priv->mode_ops->set_priv_flags(priv->mode_priv, flags);
+}
+
+static u32
+bcm_runner_get_priv_flags(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (!priv->mode_ops || !priv->mode_ops->get_priv_flags)
+		return 0;
+
+	return priv->mode_ops->get_priv_flags(priv->mode_priv);
+}
+
+static int
+bcm_runner_get_epon_param(struct net_device *dev,
+			  struct ethtool_epon_param *param)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (!priv->mode_ops)
+		return -ENETDOWN;
+
+	if (!priv->mode_ops->get_epon_param)
+		return -EOPNOTSUPP;
+
+	return priv->mode_ops->get_epon_param(priv->mode_priv, param);
+}
+
+static int
+bcm_runner_set_epon_param(struct net_device *dev,
+			  const struct ethtool_epon_param *param)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (!priv->mode_ops)
+		return -ENETDOWN;
+
+	if (!priv->mode_ops->set_epon_param)
+		return -EOPNOTSUPP;
+
+	return priv->mode_ops->set_epon_param(priv->mode_priv, param);
+}
+
+static void
+bcm_runner_get_ringparam(struct net_device *dev,
+			 struct ethtool_ringparam *er,
+			 struct kernel_ethtool_ringparam *ker,
+			 struct netlink_ext_ack *extack)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	er->rx_max_pending = 4096;
+	er->tx_max_pending = 4096;
+
+	er->rx_pending = priv->rxq_size;
+	er->tx_pending = priv->txq_size;
+}
+
+static int
+bcm_runner_set_ringparam(struct net_device *dev,
+			 struct ethtool_ringparam *er,
+			 struct kernel_ethtool_ringparam *ker,
+			 struct netlink_ext_ack *extack)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (er->rx_mini_pending || er->rx_jumbo_pending)
+		return -EINVAL;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	priv->rxq_size = er->rx_pending;
+	priv->txq_size = er->tx_pending;
+
+	return 0;
+}
+
+static struct phylink *
+bcm_runner_get_phylink(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	return priv->phylink;
+}
+
+const struct ethtool_ops bcm_runner_ethtool_ops = {
+	.get_drvinfo		= bcm_runner_get_drvinfo,
+	.get_ethtool_stats	= bcm_runner_get_ethtool_stats,
+	.get_link		= ethtool_op_get_link,
+	.get_sset_count		= bcm_runner_get_sset_count,
+	.get_strings		= bcm_runner_get_strings,
+	.get_priv_flags		= bcm_runner_get_priv_flags,
+	.set_priv_flags		= bcm_runner_set_priv_flags,
+
+	.get_epon_param		= bcm_runner_get_epon_param,
+	.set_epon_param		= bcm_runner_set_epon_param,
+
+	.nway_reset		= bcm_runner_nway_reset,
+	.get_link_ksettings	= bcm_runner_get_link_ksettings,
+	.set_link_ksettings	= bcm_runner_set_link_ksettings,
+
+	.get_ringparam		= bcm_runner_get_ringparam,
+	.set_ringparam		= bcm_runner_set_ringparam,
+
+	.get_phylink		= bcm_runner_get_phylink,
+};
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/main.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/main.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/main.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/main.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,4803 @@
+#include <linux/rtnetlink.h>
+#include <linux/dsa/brcm.h>
+#include <linux/dgasp.h>
+#include <net/sock.h>
+#include <net/dsa.h>
+#include "bcm63158_enet_runner.h"
+#include "enet_fw_defs.h"
+
+struct dentry *bcm63158_dbg_root;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/arp.h>
+#include <net/ip_ffn.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_ffn.h>
+#include <net/ip6_route.h>
+#include <net/ip6_tunnel.h>
+#include <linux/inet.h>
+
+#include "../../../net/bridge/br_private.h"
+#include "../../../net/fbxbridge/fbxbr_private.h"
+#include "../../../net/dsa/user.h"
+#endif
+
+#define RX_OFFSET		(ALIGN(NET_SKB_PAD, SMP_CACHE_BYTES) + 2)
+
+
+/* Ingress and egress opcodes */
+#define BRCM_OPCODE_SHIFT	5
+#define BRCM_OPCODE_MASK	0x7
+
+/* Ingress fields */
+/* 1st byte in the tag */
+#define BRCM_IG_TC_SHIFT	2
+#define BRCM_IG_TC_MASK		0x7
+/* 2nd byte in the tag */
+#define BRCM_IG_TE_MASK		0x3
+#define BRCM_IG_TS_SHIFT	7
+/* 3rd byte in the tag */
+#define BRCM_IG_DSTMAP2_MASK	1
+#define BRCM_IG_DSTMAP1_MASK	0xff
+
+/*
+ * for FW dev (driver won't write to fw area)
+ */
+#undef NO_FW_IO
+
+/*
+ * discard all rx traffic and re-arm descriptor (for benchmark)
+ */
+#undef DBG_RX_DISCARD_ALL
+
+/*
+ * for dev (debug print on rx/tx)
+ */
+#undef RX_DBG_PRINT
+#undef TX_DBG_PRINT
+#undef FFTX_DBG_PRINT
+
+#ifdef RX_DBG_PRINT
+#define rxdbg(...)	printk(__VA_ARGS__)
+#else
+#define rxdbg(...)
+#endif
+
+#ifdef TX_DBG_PRINT
+#define txdbg(...)	printk(__VA_ARGS__)
+#else
+#define txdbg(...)
+#endif
+
+#ifdef FFTX_DBG_PRINT
+#define fftxdbg(...)	printk(__VA_ARGS__)
+#else
+#define fftxdbg(...)
+#endif
+
+/*
+ * io accessors, RX global regs
+ */
+static inline u8 fw_rx_reg_readb(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread8(priv->xrdp_params.rx_regs + offset);
+#endif
+}
+
+static inline u16 fw_rx_reg_readh(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread16be(priv->xrdp_params.rx_regs + offset);
+#endif
+}
+
+static inline u32 fw_rx_reg_readl(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread32be(priv->xrdp_params.rx_regs + offset);
+#endif
+}
+
+static inline void fw_rx_reg_writel(struct bcm_enet_runner_priv *priv, u32 val, u32 offset)
+{
+#ifndef NO_FW_IO
+	iowrite32be(val, priv->xrdp_params.rx_regs + offset);
+#endif
+}
+
+static inline u32 fw_rx_xf_off(u32 xf_id, u32 reg)
+{
+	return RX_IF_REGS_BASE_OFF + xf_id * RX_IF_REGS_PERIF_SIZE + reg;
+}
+
+/*
+ * io accessors, TX global regs
+ */
+static inline u8 fw_tx_reg_readb(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread8(priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+static inline u16 fw_tx_reg_readh(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread16be(priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+static inline u32 fw_tx_reg_readl(struct bcm_enet_runner_priv *priv, u32 offset)
+{
+#ifdef NO_FW_IO
+	return 0;
+#else
+	return ioread32be(priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+static inline u32 fw_txqoff(u32 qidx, u32 reg)
+{
+	return TXQ_REGS_BASE_OFF + qidx * TXQ_REGS_PERQ_SIZE + reg;
+}
+
+static inline void fw_tx_reg_writel(struct bcm_enet_runner_priv *priv, u32 val, u32 offset)
+{
+#ifndef NO_FW_IO
+	iowrite32be(val, priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+static inline void fw_tx_reg_writeh(struct bcm_enet_runner_priv *priv, u16 val, u32 offset)
+{
+#ifndef NO_FW_IO
+	iowrite16be(val, priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+static inline void fw_tx_reg_writeb(struct bcm_enet_runner_priv *priv, u8 val, u32 offset)
+{
+#ifndef NO_FW_IO
+	iowrite8(val, priv->xrdp_params.tx_regs + offset);
+#endif
+}
+
+/*
+ * FF declarations
+ */
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+struct ff_dev_desc {
+	bool			is_hardware;
+	const char		*netdev_name;
+	const char		*bridge_name;
+};
+
+struct ff_dev_ctx {
+	__be16			vlan_id;
+	uint8_t			is_hardware:1;
+	uint8_t			hw_id:2;
+	uint8_t			active:1;
+	uint8_t			use_dsa:1;
+	uint8_t			dsa_port:3;
+	u8			hwaddr[6];
+
+	struct net_device	*netdev;
+	struct net_device	*br_netdev;
+	struct net_bridge_port	*br_port;
+	struct fbxbr_port	*fbxbr_port;
+	struct net_device	*real_netdev;
+};
+
+struct ff_tun_ctx {
+	struct net_device	*netdev;
+	u16			mtu;
+	u8			active:1;
+
+	/* sit parameters */
+	union ff_tun_params {
+		struct {
+			u32		src;
+			u32		s6rd_prefix;
+			u32		s6rd_pmask;
+			u8		s6rd_plen;
+		} sit;
+
+		struct {
+			/* map parameters */
+			u32		ipv4_prefix;
+			u32		ipv4_pmask;
+			u8		ipv4_plen;
+			u8		ipv6_plen;
+			struct in6_addr	src;
+			struct in6_addr	br;
+
+			u64		ipv6_prefix;
+			u32		ea_addr_mask;
+			u16		ea_port_mask;
+			u8		psid_len;
+			u8		ea_lshift;
+		} map;
+	} u;
+
+	/* configured by userspace, cannot be infered from tunnel
+	 * netdev parameters */
+	u32			sit_6rd_br;
+};
+
+enum {
+	FF_HWDEV_ID_UNIMAC0,
+	FF_HWDEV_ID_UNIMAC1,
+	FF_HWDEV_ID_UNIMAC2,
+	FF_HWDEV_ID_FTTH,
+
+	FF_HWDEV_ID_LAST = FF_HWDEV_ID_FTTH,
+};
+
+enum {
+	FF_DEV_WLAN1,
+	FF_DEV_SWP1,
+	FF_DEV_SWP2,
+	FF_DEV_SWP3,
+	FF_DEV_WAN,
+	FF_DEV_LANWAN0,
+	FF_DEV_LANWAN1,
+
+	FF_DEV_LAST = FF_DEV_LANWAN1,
+	FF_DEV_LAN_LAST = FF_DEV_SWP3,
+};
+
+struct ff_ctx {
+	struct ff_dev_ctx		devs[FF_DEV_LAST + 1];
+	struct ff_tun_ctx		tun;
+	u32				jiffies;
+	struct bcm_enet_runner_priv	*ports_by_hw_id[FF_HWDEV_ID_LAST + 1];
+
+	struct ff_dev_desc	devs_desc[FF_DEV_LAST + 1];
+	int			wan_active_dev;
+	char			tun_netdev_name[IFNAMSIZ];
+	char			wan_netdev_name[IFNAMSIZ];
+};
+
+static struct notifier_block ff_notifier;
+static DEFINE_MUTEX(ff_notifier_mutex);
+static bool ff_enabled;
+
+static struct ff_ctx ff = {
+	.devs_desc = {
+		[FF_DEV_WLAN1] = {
+			.is_hardware		= false,
+			.netdev_name		= "wlan1",
+			.bridge_name		= "br0",
+		},
+
+		[FF_DEV_SWP1] = {
+			.is_hardware		= true,
+			.netdev_name		= "swp1",
+			.bridge_name		= "br0",
+		},
+
+		[FF_DEV_SWP2] = {
+			.is_hardware		= true,
+			.netdev_name		= "swp2",
+			.bridge_name		= "br0",
+		},
+
+		[FF_DEV_SWP3] = {
+			.is_hardware		= true,
+			.netdev_name		= "swp3",
+			.bridge_name		= "br0",
+		},
+
+		[FF_DEV_WAN] = {
+			.is_hardware		= true,
+			.netdev_name		= "ftthpub0",
+		},
+
+		[FF_DEV_LANWAN0] = {
+			.is_hardware		= true,
+			.netdev_name		= "lanwanpub0",
+		},
+
+		[FF_DEV_LANWAN1] = {
+			.is_hardware		= true,
+			.netdev_name		= "lanwanpub1",
+		},
+	},
+};
+
+static bool ff_idx_is_wan(size_t idx)
+{
+	return (idx == FF_DEV_WAN ||
+		idx == FF_DEV_LANWAN0 ||
+		idx == FF_DEV_LANWAN1);
+
+}
+
+static DEFINE_PER_CPU(spinlock_t, ff_plock);
+
+/*
+ * ff lock
+ */
+static void ff_lock_this_cpu(void)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, raw_smp_processor_id());
+	spin_lock(lock);
+}
+
+static void ff_unlock_this_cpu(void)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, raw_smp_processor_id());
+	spin_unlock(lock);
+}
+
+static void ff_lock_cpu_bh(int cpu)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, cpu);
+	spin_lock_bh(lock);
+}
+
+static void ff_unlock_cpu_bh(int cpu)
+{
+	spinlock_t *lock = &per_cpu(ff_plock, cpu);
+	spin_unlock_bh(lock);
+}
+
+static void ff_lock_all_cpu_bh(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		ff_lock_cpu_bh(cpu);
+}
+
+static void ff_unlock_all_cpu_bh(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu)
+		ff_unlock_cpu_bh(cpu);
+}
+
+
+/*
+ *
+ */
+static bool __ff_tx_queue_full(struct tx_queue *txq)
+{
+	unsigned int cur_desc, next_desc;
+
+	cur_desc = txq->tx_cur_desc;
+	next_desc = cur_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+
+	if (unlikely(next_desc == txq->tx_dirty_desc))
+		return true;
+
+	return false;
+}
+
+/*
+ *
+ */
+static bool __ff_tx_queue_can_reclaim(struct tx_queue *txq)
+{
+	struct tx_desc *desc;
+	unsigned int dirty_desc;
+	u32 flags_len;
+
+	dirty_desc = txq->tx_dirty_desc;
+	if (dirty_desc == txq->tx_cur_desc)
+		return false;
+
+	desc = &txq->tx_desc_area[dirty_desc];
+	flags_len = be32_to_cpu(desc->flags_len);
+
+	if ((flags_len & CPU_TX_DESC0_HW_OWNED_MASK))
+		return false;
+
+	return true;
+}
+
+static void *ff_tx_queue_frag_reclaim(struct bcm_enet_runner_priv *priv,
+				      unsigned int needed_frag_size)
+{
+	struct tx_desc *desc;
+	struct tx_queue *txq = priv->ff_txq;
+	unsigned int dirty_desc, next_desc;
+	void *frag;
+	unsigned int frag_size;
+
+	spin_lock(&txq->tx_lock);
+	if (!__ff_tx_queue_can_reclaim(txq)) {
+		spin_unlock(&txq->tx_lock);
+		return NULL;
+	}
+
+	dirty_desc = txq->tx_dirty_desc;
+	desc = &txq->tx_desc_area[dirty_desc];
+	frag = txq->tx_desc_pdata[dirty_desc].data;
+	frag_size = txq->tx_desc_pdata[dirty_desc].len;
+	txq->tx_desc_pdata[dirty_desc].data = NULL;
+
+	next_desc = dirty_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+	txq->tx_dirty_desc = next_desc;
+	spin_unlock(&txq->tx_lock);
+
+	if (needed_frag_size != frag_size) {
+		fftxdbg("ffrecl[%s/q%u]: desc_idx:%u, bad size:%u != %u\n",
+			priv->netdev->name,
+			txq->index,
+			dirty_desc,
+			frag_size,
+			needed_frag_size);
+		skb_free_frag(frag);
+		return NULL;
+	}
+
+	fftxdbg("ffrecl[%s/q%u]: desc_idx:%u frag:%pS size:%u\n",
+		priv->netdev->name,
+		txq->index,
+		dirty_desc,
+		frag,
+		frag_size);
+
+	return frag;
+}
+
+static void *ff_reclaim_any_sent_frag(unsigned int pkt_size,
+				      unsigned int rx_ff_dev_idx,
+				      int ff_peek_first_dev_idx)
+{
+	struct bcm_enet_runner_priv *oport;
+	unsigned int hw_id, idx_todo, hwid_done;
+	void *frag;
+	size_t i;
+
+	BUG_ON(!ff.devs[ff_peek_first_dev_idx].is_hardware);
+	hw_id = ff.devs[ff_peek_first_dev_idx].hw_id;
+	oport = ff.ports_by_hw_id[hw_id];
+	if (likely(oport)) {
+		frag = ff_tx_queue_frag_reclaim(oport, pkt_size);
+		if (frag)
+			return frag;
+	}
+
+	/*
+	 * failed, try other tx queues this port can ff packets to
+	 */
+	if (ff_idx_is_wan(rx_ff_dev_idx)) {
+		idx_todo = ((1 << FF_DEV_SWP1) |
+			    (1 << FF_DEV_SWP2) |
+			    (1 << FF_DEV_SWP3));
+	} else if (ff.wan_active_dev != -1)
+		idx_todo = (1 << ff.wan_active_dev);
+	else
+		idx_todo = 0;
+
+	idx_todo &= ~(1 << ff_peek_first_dev_idx);
+	hwid_done = (1 << hw_id);
+
+	for (i = 0; idx_todo && i < FF_DEV_LAST + 1; i++) {
+		if (!(idx_todo & (1 << i)))
+			continue;
+
+		BUG_ON(!ff.devs[i].is_hardware);
+		hw_id = ff.devs[i].hw_id;
+		if (hwid_done & (1 << hw_id))
+			continue;
+
+		oport = ff.ports_by_hw_id[hw_id];
+		if (unlikely(!oport))
+			continue;
+
+		frag = ff_tx_queue_frag_reclaim(oport, pkt_size);
+		if (frag)
+			return frag;
+
+		hwid_done |= (1 << hw_id);
+	}
+	return NULL;
+}
+
+#endif
+
+static int rxq_refill_desc(struct bcm_enet_runner_priv *priv,
+			   struct rx_queue *rxq,
+			   int desc_idx,
+			   bool unmap,
+			   void *forced_frag)
+{
+	struct rx_desc *desc;
+	void *frag;
+	bool frag_allocated = true;
+	dma_addr_t addr;
+	u32 val;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	if (forced_frag) {
+		frag = forced_frag;
+		frag_allocated = false;
+	}
+	else
+#endif
+		frag = napi_alloc_frag_align(priv->frag_size, 16);
+
+	if (!frag)
+		return -ENOMEM;
+
+	desc = &rxq->rx_desc_area[desc_idx];
+	if (unmap) {
+		addr = be32_to_cpu(desc->address);
+		dma_unmap_single(priv->netdev->dev.parent,
+				 addr,
+				 priv->pkt_size,
+				 DMA_FROM_DEVICE);
+	}
+
+	addr = dma_map_single(priv->netdev->dev.parent,
+			      frag + RX_OFFSET,
+			      priv->pkt_size,
+			      DMA_FROM_DEVICE);
+	if (unlikely(dma_mapping_error(priv->netdev->dev.parent, addr))) {
+		printk("dma_map_single failed\n");
+		if (frag_allocated) {
+			rxq->frags[desc_idx] = NULL;
+			skb_free_frag(frag);
+		}
+		return -ENOMEM;
+	}
+
+	rxdbg("rxq_refill_desc: idx:%u addr:0x%08llx\n",
+	      desc_idx, addr);
+
+	rxq->frags[desc_idx] = frag;
+	desc->address = cpu_to_be32(addr);
+	wmb();
+	val = CPU_RX_DESC0_HW_OWNED_MASK |
+		(priv->pkt_size << CPU_RX_DESC0_LEN_SHIFT);
+	desc->flags_len = cpu_to_be32(val);
+	return 0;
+}
+
+/*
+ * FF stuff
+ */
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+static inline u32 ff_gen_netmask(u8 len)
+{
+	return htonl(~((1 << (32 - len)) - 1));
+}
+
+static void __ff_tun_set_params(bool active,
+				unsigned int mtu,
+				const union ff_tun_params *tp)
+{
+	if (!active) {
+		if (!ff.tun.active)
+			return;
+
+		printk(KERN_DEBUG "ff: tunnel now NOT active\n");
+		ff.tun.active = 0;
+		return;
+	}
+
+	if (ff.tun.active) {
+		if (ff.tun.mtu == mtu && !memcmp(tp, &ff.tun.u, sizeof (*tp)))
+			return;
+	}
+
+	ff.tun.mtu = mtu;
+	memcpy(&ff.tun.u, tp, sizeof (*tp));
+
+	if (!ff.tun.active)
+		printk(KERN_DEBUG "ff: tunnel now active\n");
+	else
+		printk(KERN_DEBUG "ff: tunnel params updated\n");
+
+	ff.tun.active = true;
+}
+
+static void __ff_tun_read_params(void)
+{
+	union ff_tun_params tp;
+	const struct ff_dev_ctx *wan_ff_dev;
+
+	if (!ff.tun.netdev)
+		return;
+
+	if (ff.wan_active_dev == -1) {
+		__ff_tun_set_params(false, 0, NULL);
+		return;
+	}
+
+	wan_ff_dev = &ff.devs[ff.wan_active_dev];
+
+	memset(&tp, 0, sizeof (tp));
+
+	if (ff.tun.netdev->type == ARPHRD_SIT) {
+		const struct ip_tunnel *tun = netdev_priv(ff.tun.netdev);
+		const struct ip_tunnel_6rd_parm *ip6rd = &tun->ip6rd;
+
+		if (!ip6rd->prefixlen || ip6rd->prefixlen > 32) {
+			printk(KERN_DEBUG "ff: unsupported 6rd plen\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		if (ff.tun.netdev->mtu + sizeof (struct iphdr) > wan_ff_dev->netdev->mtu) {
+			printk(KERN_DEBUG "ff: WAN mtu too "
+			       "small for tunnel (%u => %u)\n",
+			       ff.tun.netdev->mtu, wan_ff_dev->netdev->mtu);
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.sit.src = tun->parms.iph.saddr;
+		tp.sit.s6rd_prefix = ip6rd->prefix.s6_addr32[0];
+		tp.sit.s6rd_pmask = ff_gen_netmask(ip6rd->prefixlen);
+		tp.sit.s6rd_plen = ip6rd->prefixlen;
+		__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+		return;
+	}
+
+	if (ff.tun.netdev->type == ARPHRD_TUNNEL6) {
+		const struct ip6_tnl *t = netdev_priv(ff.tun.netdev);
+		const struct __ip6_tnl_parm *prm = &t->parms;
+		const struct __ip6_tnl_fmr *fmr;
+
+		if (ff.tun.netdev->mtu + sizeof (struct ipv6hdr) >
+		    wan_ff_dev->netdev->mtu) {
+			printk(KERN_DEBUG "ff: WAN mtu too "
+			       "small for tunnel (%u => %u)\n",
+			       ff.tun.netdev->mtu, wan_ff_dev->netdev->mtu);
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.map.src = prm->laddr;
+		tp.map.br = prm->raddr;
+
+		fmr = prm->fmrs;
+		if (!fmr) {
+			tp.map.ipv4_prefix = 0;
+			__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+			return;
+		}
+
+		if (fmr->ip6_prefix_len < 32 ||
+		    (fmr->ip6_prefix_len + 32 - fmr->ip4_prefix_len > 64)) {
+			printk(KERN_DEBUG "ff: unsupp MAP-E: eabits "
+			       "span 32 bits\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		if (fmr->offset) {
+			printk(KERN_DEBUG "ff: unsupp MAP-E: non zero "
+			       "PSID offset\n");
+			__ff_tun_set_params(false, 0, NULL);
+			return;
+		}
+
+		tp.map.ipv4_prefix = fmr->ip4_prefix.s_addr;
+		tp.map.ipv4_pmask = ff_gen_netmask(fmr->ip4_prefix_len);
+		tp.map.ipv4_plen = fmr->ip4_prefix_len;
+		tp.map.ipv6_plen = fmr->ip6_prefix_len;
+		memcpy(&tp.map.ipv6_prefix, &fmr->ip6_prefix, 8);
+
+		tp.map.ea_addr_mask = ~ff_gen_netmask(fmr->ip4_prefix_len);
+		if (fmr->ea_len <= 32 - fmr->ip4_prefix_len) {
+			/* v4 prefix or full IP */
+			u32 addr_bits;
+
+			addr_bits = fmr->ip4_prefix_len + fmr->ea_len;
+			if (addr_bits != 32)
+				tp.map.ea_addr_mask &= ff_gen_netmask(addr_bits);
+			tp.map.psid_len = 0;
+		} else {
+			u8 psid_len;
+
+			psid_len = fmr->ea_len - (32 - fmr->ip4_prefix_len);
+			tp.map.psid_len = psid_len;
+			tp.map.ea_port_mask = ff_gen_netmask(psid_len);
+		}
+
+		tp.map.ea_lshift = 32 - (fmr->ip6_prefix_len - 32) -
+			fmr->ea_len;
+
+		__ff_tun_set_params(true, ff.tun.netdev->mtu, &tp);
+		return;
+	}
+}
+
+static void __ff_tun_capture(void)
+{
+	struct net_device *dev;
+
+	if (ff.tun.netdev) {
+		printk(KERN_ERR "ff: error: tun already registered\n");
+		return;
+	}
+
+	dev = dev_get_by_name(&init_net, ff.tun_netdev_name);
+	if (!dev) {
+		return;
+	}
+
+	if (dev->type != ARPHRD_SIT && dev->type != ARPHRD_TUNNEL6) {
+		return;
+	}
+
+	if (!(dev->flags & IFF_UP)) {
+		dev_put(ff.tun.netdev);
+		return;
+	}
+
+	ff.tun.netdev = dev;
+	__ff_tun_read_params();
+	printk(KERN_INFO "ff: tun dev grabbed\n");
+}
+
+static void __ff_tun_release(void)
+{
+	int was_on = 0;
+
+	if (ff.tun.netdev) {
+		dev_put(ff.tun.netdev);
+		ff.tun.netdev = NULL;
+		was_on = 1;
+	}
+	if (was_on)
+		printk(KERN_INFO "ff: tun dev released\n");
+}
+
+static void ff_notifier_event_tunnel(struct net_device *dev,
+				     unsigned long event)
+{
+	ff_lock_all_cpu_bh();
+
+	switch (event) {
+	case NETDEV_UP:
+		if (!ff.tun.netdev)
+			__ff_tun_capture();
+		break;
+
+	case NETDEV_CHANGE:
+	case NETDEV_CHANGEMTU:
+		if (ff.tun.netdev == dev)
+			__ff_tun_read_params();
+		break;
+
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		if (ff.tun.netdev == dev)
+			__ff_tun_release();
+		break;
+	}
+
+	ff_unlock_all_cpu_bh();
+}
+
+static int ff_dev_resolve_bridge(struct ff_dev_ctx *ff_dev,
+				 const char *bridge_name)
+{
+	bool ok = false;
+
+	rcu_read_lock();
+
+	if (netif_is_bridge_port(ff_dev->netdev)) {
+		struct net_bridge_port *br_port;
+		struct net_bridge *br;
+
+		ff_dev->fbxbr_port = NULL;
+		br_port = br_port_get_rcu(ff_dev->netdev);
+		if (!br_port) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		br = br_port->br;
+		if (!br) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		if (strcmp(br->dev->name, bridge_name)) {
+			ff_dev->br_port = NULL;
+			goto done;
+		}
+
+		ff_dev->br_port = br_port;
+		if (br->dev->flags & IFF_UP) {
+			memcpy(ff_dev->hwaddr, br->dev->dev_addr, 6);
+			ff_dev->br_netdev = br->dev;
+			ok = true;
+		}
+	}
+
+	if (netif_is_fbxbridge_port(ff_dev->netdev)) {
+		struct fbxbr_port *fbxbr_port;
+		struct fbxbr *fbxbr;
+
+		ff_dev->br_port = NULL;
+		fbxbr_port = fbxbr_port_get_rcu(ff_dev->netdev);
+		if (!fbxbr_port) {
+			ff_dev->fbxbr_port = NULL;
+			goto done;
+		}
+
+		fbxbr = fbxbr_port->br;
+		if (strcmp(fbxbr->dev->name, bridge_name)) {
+			ff_dev->fbxbr_port = NULL;
+			goto done;
+		}
+
+		ff_dev->fbxbr_port = fbxbr_port_get_rcu(ff_dev->netdev);
+		if (fbxbr->dev->flags & IFF_UP)
+			ok = true;
+	}
+
+done:
+	rcu_read_unlock();
+	return ok ? 0 : 1;
+}
+
+static bool ff_dev_bridge_is_up(struct ff_dev_ctx *ff_dev)
+{
+	if (ff_dev->br_port) {
+		struct net_bridge *br = ff_dev->br_port->br;
+		if (!br)
+			return false;
+		return br->dev->flags & IFF_UP;
+	}
+	if (ff_dev->fbxbr_port)
+		return ff_dev->fbxbr_port->br->dev->flags & IFF_UP;
+	return false;
+}
+
+static void __ff_select_active_wan(void)
+{
+	size_t idx;
+	int matching;
+
+	matching = -1;
+	for (idx = 0; idx < FF_DEV_LAST + 1; idx++) {
+		if (!ff_idx_is_wan(idx))
+			continue;
+		if (!ff.devs[idx].active)
+			continue;
+		if (strcmp(ff.devs[idx].netdev->name, ff.wan_netdev_name))
+			continue;
+		matching = (int)idx;
+		break;
+	}
+
+	if (ff.wan_active_dev != -1) {
+		if (matching == -1) {
+			printk(KERN_INFO "ff: no more selected wan\n");
+			ff.wan_active_dev = -1;
+		} else if (matching != ff.wan_active_dev) {
+			printk(KERN_INFO "ff: selected wan changed\n");
+			ff.wan_active_dev = matching;
+		}
+
+	} else if (matching != -1) {
+		printk(KERN_INFO "ff: selected wan now %s\n",
+		       ff.wan_netdev_name);
+		ff.wan_active_dev = matching;
+	}
+	__ff_tun_read_params();
+}
+
+static void ff_dev_mark_active(struct ff_dev_ctx *ff_dev, size_t dev_idx)
+{
+	ff_lock_all_cpu_bh();
+	ff_dev->active = true;
+
+	if (ff_idx_is_wan(dev_idx))
+		__ff_select_active_wan();
+
+	ff_unlock_all_cpu_bh();
+	printk(KERN_INFO "ff: ff_dev %s: now active\n", ff_dev->netdev->name);
+}
+
+static void ff_dev_mark_inactive(struct ff_dev_ctx *ff_dev, size_t dev_idx)
+{
+	bool was_active;
+
+	ff_lock_all_cpu_bh();
+	was_active = ff_dev->active;
+	ff_dev->active = false;
+
+	if (ff_idx_is_wan(dev_idx))
+		__ff_select_active_wan();
+
+	ff_unlock_all_cpu_bh();
+
+	if (was_active)
+		printk(KERN_INFO "ff: ff_dev %s: now inactive\n", ff_dev->netdev->name);
+}
+
+static void ff_notifier_event_dev(struct net_device *netdev,
+				  unsigned long event,
+				  unsigned int dev_idx)
+{
+	const struct ff_dev_desc *desc = &ff.devs_desc[dev_idx];
+	struct ff_dev_ctx *ff_dev = &ff.devs[dev_idx];
+	struct net_device *real_netdev = netdev;
+	size_t i;
+
+	switch (event) {
+	case NETDEV_UP:
+	case NETDEV_CHANGE:
+	{
+		struct dsa_port *dsap = NULL;
+		bool found;
+
+		if (ff_dev->active) {
+			/* ignore up event while already active */
+			return;
+		}
+
+		if (is_vlan_dev(netdev)) {
+			ff_dev->vlan_id = ntohs(vlan_dev_vlan_id(netdev));
+			real_netdev = vlan_dev_upper_dev(netdev);
+		} else
+			ff_dev->vlan_id = 0;
+
+		if (dsa_user_dev_check(real_netdev)) {
+			dsap = dsa_user_to_port(real_netdev);
+			real_netdev = dsa_user_to_conduit(real_netdev);
+		}
+
+		if (!(netdev->flags & IFF_UP))
+			return;
+
+		if (real_netdev != netdev && !(real_netdev->flags & IFF_UP))
+			return;
+
+		/* does this device matches one hardware port */
+		if (!desc->is_hardware)
+			ff_dev->is_hardware = 0;
+		else {
+			found = false;
+			for (i = 0; i < ARRAY_SIZE(ff.ports_by_hw_id); i++) {
+				struct bcm_enet_runner_priv *port;
+
+				port = ff.ports_by_hw_id[i];
+
+				if (!port)
+					continue;
+				if (port->netdev != real_netdev)
+					continue;
+
+				found = true;
+				break;
+			}
+
+			if (!found)
+				return;
+
+			if (dsap) {
+				ff_dev->use_dsa = 1;
+				ff_dev->dsa_port = dsap->index;
+			}
+			ff_dev->hw_id = i;
+			ff_dev->is_hardware = 1;
+		}
+
+		if (ff_dev->netdev)
+			dev_put(ff_dev->netdev);
+
+		ff_dev->netdev = netdev;
+		dev_hold(netdev);
+		ff_dev->real_netdev = real_netdev;
+		memcpy(ff_dev->hwaddr, netdev->dev_addr, 6);
+
+		/* resolve bridge */
+		if (desc->bridge_name) {
+			if (ff_dev_resolve_bridge(ff_dev, desc->bridge_name))
+				return;
+		}
+
+		ff_dev_mark_active(ff_dev, dev_idx);
+		break;
+	}
+
+	case NETDEV_CHANGEUPPER:
+		if (!desc->bridge_name || !ff_dev->netdev)
+			return;
+
+		if (!ff_dev->active) {
+			if (!ff_dev_resolve_bridge(ff_dev, desc->bridge_name))
+				ff_dev_mark_active(ff_dev, dev_idx);
+		} else {
+			if (!ff_dev_bridge_is_up(ff_dev))
+				ff_dev_mark_inactive(ff_dev, dev_idx);
+		}
+		break;
+
+	case NETDEV_GOING_DOWN:
+	case NETDEV_DOWN:
+	case NETDEV_UNREGISTER:
+		if (!ff_dev->netdev)
+			return;
+
+		ff_dev_mark_inactive(ff_dev, dev_idx);
+
+		/* remove all references */
+		dev_put(netdev);
+		ff_dev->netdev = NULL;
+		ff_dev->real_netdev = NULL;
+		ff_dev->br_port = NULL;
+		ff_dev->fbxbr_port = NULL;
+		ff_dev->br_netdev = NULL;
+		break;
+	}
+}
+
+static int ff_notifier_event(struct net_device *dev, unsigned long event)
+{
+	size_t i;
+
+	mutex_lock(&ff_notifier_mutex);
+
+	/*
+	 * check for tun match
+	 */
+	if (!strcmp(dev->name, ff.tun_netdev_name)) {
+		ff_notifier_event_tunnel(dev, event);
+		mutex_unlock(&ff_notifier_mutex);
+		return 0;
+	}
+
+	/*
+	 * check for dev match
+	 */
+	for (i = 0; i < ARRAY_SIZE(ff.devs_desc); i++) {
+		if (!strcmp(dev->name, ff.devs_desc[i].netdev_name)) {
+			ff_notifier_event_dev(dev, event, i);
+			mutex_unlock(&ff_notifier_mutex);
+			return 0;
+		}
+	}
+
+	/*
+	 * check for bridge/fbxbridge match
+	 *
+	 * bridge can change up/down status, but lower netdev will not get
+	 * CHANGE_UPPER
+	 */
+	if (netif_is_bridge_master(dev) || netif_is_fbxbridge_master(dev)) {
+		size_t i;
+
+		for (i = 0; i < ARRAY_SIZE(ff.devs_desc); i++) {
+			struct ff_dev_ctx *ff_dev = &ff.devs[i];
+			const char *bridge_name = ff.devs_desc[i].bridge_name;
+
+			if (!bridge_name)
+				continue;
+
+			if (!ff_dev->netdev)
+				continue;
+
+			if (!ff_dev->active) {
+				if (!ff_dev_resolve_bridge(ff_dev,
+							   bridge_name))
+					ff_dev_mark_active(ff_dev, i);
+			} else {
+				if (!ff_dev_bridge_is_up(ff_dev))
+					ff_dev_mark_inactive(ff_dev, i);
+			}
+		}
+	}
+
+	/*
+	 * check for real_dev match
+	 */
+	for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+		if (dev == ff.devs[i].real_netdev)
+			ff_notifier_event_dev(ff.devs[i].netdev, event, i);
+	}
+
+	mutex_unlock(&ff_notifier_mutex);
+	return 0;
+}
+
+static int ff_notifier_event_cb(struct notifier_block *this,
+				unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	if (!net_eq(dev_net(dev), &init_net))
+		return 0;
+
+	ff_notifier_event(dev, event);
+	return 0;
+}
+
+/*
+ * ipv4 forward cache private data
+ */
+struct ff_priv {
+	struct in6_addr		tun_dest_ip6;
+	struct dst_entry	*tun_dst;
+};
+
+static void ff_priv_release(const struct ff_priv *priv)
+{
+	dst_release(priv->tun_dst);
+}
+
+static void ff_priv_destructor_cb(void *data)
+{
+	const struct ff_priv *priv = (const struct ff_priv *)data;
+	ff_priv_release(priv);
+}
+
+static const struct ff_priv *ffn_get_ro_priv(const struct ffn_lookup_entry *e)
+{
+	if (e->manip.priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff_priv *)e->manip.ffn_priv_area;
+}
+
+static struct ff_priv *ffn_get_rw_priv(struct ffn_lookup_entry *e)
+{
+	BUILD_BUG_ON(sizeof (e->manip.ffn_priv_area) <
+		     sizeof (struct ff_priv));
+
+	if (e->manip.priv_destructor &&
+	    e->manip.priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff_priv *)e->manip.ffn_priv_area;
+}
+
+static const struct ff_priv *fwc_get_ro_priv(const struct fbxbr_fwcache *fwc)
+{
+	if (fwc->priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff_priv *)fwc->priv_area;
+}
+
+static struct ff_priv *fwc_get_rw_priv(const struct fbxbr_fwcache *fwc)
+{
+	BUILD_BUG_ON(sizeof (fwc->priv_area) < sizeof (struct ff_priv));
+
+	if (fwc->priv_destructor &&
+	    fwc->priv_destructor != ff_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff_priv *)fwc->priv_area;
+}
+
+/*
+ * ipv6 forward cache private data
+ */
+struct ff6_priv {
+	u32			tun_dest_ip;
+	struct dst_entry	*tun_dst;
+};
+
+static void ff6_priv_release(const struct ff6_priv *priv)
+{
+	dst_release(priv->tun_dst);
+}
+
+static void ff6_priv_destructor_cb(void *data)
+{
+	const struct ff6_priv *priv = (const struct ff6_priv *)data;
+	ff6_priv_release(priv);
+}
+
+static const struct ff6_priv *ffn6_get_ro_priv(const struct ffn6_lookup_entry *e6)
+{
+	if (e6->manip.priv_destructor != ff6_priv_destructor_cb)
+		return NULL;
+
+	return (const struct ff6_priv *)e6->manip.ffn_priv_area;
+}
+
+static struct ff6_priv *ffn6_get_rw_priv(struct ffn6_lookup_entry *e6)
+{
+	BUILD_BUG_ON(sizeof (e6->manip.ffn_priv_area) <
+		     sizeof (struct ff6_priv));
+
+	if (e6->manip.priv_destructor &&
+	    e6->manip.priv_destructor != ff6_priv_destructor_cb)
+		return NULL;
+
+	return (struct ff6_priv *)e6->manip.ffn_priv_area;
+}
+
+
+/*
+ *
+ */
+static u32 ff_tun_extract_6rd_addr(const struct in6_addr *d)
+{
+	u32 a1, a2;
+
+	a1 = ntohl(d->s6_addr32[0] & ~ff.tun.u.sit.s6rd_pmask);
+	a1 <<= ff.tun.u.sit.s6rd_plen;
+
+	a2 = ntohl(d->s6_addr32[1] & ff.tun.u.sit.s6rd_pmask);
+	a2 >>= (32 - ff.tun.u.sit.s6rd_plen);
+	return htonl(a1 | a2);
+}
+
+/*
+ *
+ */
+static void ff_tun_gen_mape_addr(u32 addr, u16 port, struct in6_addr *dest)
+{
+	u32 eabits;
+	u16 psid;
+
+	eabits = ntohl(addr & ff.tun.u.map.ea_addr_mask) << ff.tun.u.map.psid_len;
+	psid = 0;
+	if (ff.tun.u.map.psid_len) {
+		psid = ntohs(port & ff.tun.u.map.ea_port_mask) >>
+			(16 - ff.tun.u.map.psid_len);
+		eabits |= psid;
+	}
+
+	memcpy(dest, &ff.tun.u.map.ipv6_prefix, 8);
+	dest->s6_addr32[1] |= htonl(eabits << ff.tun.u.map.ea_lshift);
+
+	dest->s6_addr32[2] = htonl(ntohl(addr) >> 16);
+	dest->s6_addr32[3] = htonl((ntohl(addr) << 16) | psid);
+}
+
+/*
+ * broadcom DSA
+ */
+#define BRCM_TAG_LEN	4
+
+#define BRCM_OPCODE_SHIFT	5
+#define BRCM_OPCODE_MASK	0x7
+
+struct ff_pkt_info {
+	__be16	vlan_id;
+	u8	is_dsa:1;
+	u8	dsa_port:3;
+	u8	is_ipv4:1;
+	u8	l3_hdr_offset;
+	u16	l3_plen;
+};
+
+/* 2nd byte in the tag */
+#define BRCM_EG_CID_MASK	0xff
+
+/* 3rd byte in the tag */
+#define BRCM_EG_RC_MASK		0xff
+#define  BRCM_EG_RC_RSVD	(3 << 6)
+#define  BRCM_EG_RC_EXCEPTION	(1 << 5)
+#define  BRCM_EG_RC_PROT_SNOOP	(1 << 4)
+#define  BRCM_EG_RC_PROT_TERM	(1 << 3)
+#define  BRCM_EG_RC_SWITCH	(1 << 2)
+#define  BRCM_EG_RC_MAC_LEARN	(1 << 1)
+#define  BRCM_EG_RC_MIRROR	(1 << 0)
+#define BRCM_EG_TC_SHIFT	5
+#define BRCM_EG_TC_MASK		0x7
+#define BRCM_EG_PID_MASK	0x1f
+
+/*
+ *
+ */
+static bool ff_send(struct bcm_enet_runner_priv *priv,
+		    struct tx_queue *txq,
+		    u32 dma_buf_addr,
+		    void *frag,
+		    u32 frag_size,
+		    unsigned int send_len,
+		    int extra_pad_len)
+{
+	const struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	struct tx_desc *tx_desc;
+	unsigned int cur_desc, next_desc;
+	u32 flags_len;
+
+	/* pad small packets (add more for DSA packets) */
+	if (send_len < 60 + extra_pad_len)
+		send_len = 60 + extra_pad_len;
+
+	dma_sync_single_for_device(priv->netdev->dev.parent,
+				   dma_buf_addr,
+				   send_len,
+				   DMA_TO_DEVICE);
+
+	spin_lock(&txq->tx_lock);
+
+	/* make sure we have room */
+	cur_desc = txq->tx_cur_desc;
+	next_desc = cur_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+
+	if (WARN_ON(unlikely(next_desc == txq->tx_dirty_desc))) {
+		spin_unlock(&txq->tx_lock);
+		return 1;
+	}
+
+	txq->tx_desc_pdata[cur_desc].data = frag;
+	txq->tx_desc_pdata[cur_desc].len = frag_size;
+
+	/* point to the next available desc */
+	txq->tx_cur_desc = next_desc;
+
+	/* update descriptor index */
+	flags_len = CPU_TX_DESC0_HW_OWNED_MASK |
+		(send_len << CPU_TX_DESC0_LEN_SHIFT);
+
+	fftxdbg("ffxmit[%s/q%u]: desc_idx:%u frag:%pS len:%u size:%u\n",
+		priv->netdev->name,
+		txq->index,
+		cur_desc, frag, send_len, frag_size);
+
+	tx_desc = &txq->tx_desc_area[cur_desc];
+	tx_desc->address = cpu_to_be32(dma_buf_addr);
+	wmb();
+	tx_desc->flags_len = cpu_to_be32(flags_len);
+	spin_unlock(&txq->tx_lock);
+
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params->tx_core_id,
+			    params->txq_wakeup_thread[0]);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int ff_parse_packet(struct bcm_enet_runner_priv *port,
+			   struct ff_pkt_info *info,
+			   const void *frag,
+			   size_t offset,
+			   size_t eth_len)
+{
+	const struct ethhdr *eth;
+	const uint16_t *proto;
+
+	eth = (const struct ethhdr *)((uint8_t *)frag + offset);
+
+	/* extract DSA info */
+	if (netdev_uses_dsa(port->netdev)) {
+		const u8 *brcm_tag;
+
+		brcm_tag = (const u8 *)&eth->h_proto;
+
+		if (unlikely((brcm_tag[0] >> BRCM_OPCODE_SHIFT) & BRCM_OPCODE_MASK))
+			return 1;
+
+		if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
+			return 1;
+
+		info->is_dsa = 1;
+		info->dsa_port = brcm_tag[3] & BRCM_EG_PID_MASK;
+		proto = (const uint16_t *)(brcm_tag + BRCM_TAG_LEN);
+		eth_len -= BRCM_TAG_LEN;
+	} else
+		proto = (const uint16_t *)&eth->h_proto;
+
+	if (*proto == htons(ETH_P_8021Q)) {
+		const struct vlan_hdr *vhdr;
+
+		vhdr = (const struct vlan_hdr *)(proto + 1);
+		info->vlan_id = vhdr->h_vlan_TCI;
+
+		proto = (const uint16_t *)&vhdr->h_vlan_encapsulated_proto;
+		info->l3_hdr_offset = (const void *)(vhdr + 1) - (const void *)eth;
+		info->l3_plen = eth_len - VLAN_ETH_HLEN;
+	} else {
+		info->vlan_id = 0;
+		info->l3_hdr_offset = (const void *)(proto + 1) - (const void *)eth;
+		info->l3_plen = eth_len - ETH_HLEN;
+	}
+
+	if (*proto == htons(ETH_P_IP)) {
+		if (info->l3_plen < sizeof (struct iphdr))
+			return 1;
+		info->is_ipv4 = 1;
+		return 0;
+	}
+
+	info->is_ipv4 = 0;
+	if (*proto == htons(ETH_P_IPV6)) {
+		if (info->l3_plen < sizeof (struct ipv6hdr))
+			return 1;
+		return 0;
+	}
+
+	return 1;
+}
+
+enum ff_xmit_mode {
+	FF_XMIT_IPV4,
+	FF_XMIT_IPV6,
+	FF_XMIT_IPV6_IN_IPV4,
+	FF_XMIT_IPV4_IN_IPV6,
+};
+
+/*
+ *
+ */
+static bool ff_receive(struct bcm_enet_runner_priv *rx_port,
+		       struct rx_queue *rxq,
+		       unsigned int rx_desc_idx,
+		       void *frag,
+		       size_t frag_size,
+		       size_t offset, size_t eth_len)
+{
+	struct rx_desc *rx_desc;
+	struct ff_pkt_info pinfo;
+	struct ethhdr *eth;
+	struct bcm_enet_runner_priv *tx_port;
+	struct net_device_stats *tx_hw_stats;
+	struct net_device *last_rx_dev, *next_tx_dev;
+	struct ffn_lookup_entry *e = NULL;
+	struct ffn6_lookup_entry *e6 = NULL;
+	struct nf_conn *ct = NULL;
+	enum ff_xmit_mode xmit_mode;
+	const struct in6_addr *tun_v6_pdest = NULL;
+	struct net_device *tx_dev;
+	struct ff_dev_ctx *tx_ff_dev, *rx_ff_dev;
+	u32 tun_v4_dest = 0;
+	u8 dest_hw[6];
+	u32 buf_addr;
+	unsigned int timeout;
+	void *l2_hdr, *l3_hdr, *l4_hdr;
+	bool l3_is_ipv4, l4_is_tcp;
+	u16 proto;
+	u16 *pproto;
+	size_t i, rx_ff_dev_idx, tx_ff_dev_idx;
+	bool parsed;
+	void *new_frag;
+
+	/* make sure we have headroom for the worst case scenario */
+	BUILD_BUG_ON(NET_SKB_PAD <
+		     (sizeof (struct ipv6hdr) + VLAN_HLEN + BRCM_TAG_LEN));
+
+	if (!ff_enabled)
+		return false;
+
+	if (eth_len < ETH_HLEN)
+		return false;
+
+	/* locate rx ff device */
+	parsed = false;
+	for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+		if (!ff.devs[i].active)
+			continue;
+
+		if (ff.devs[i].hw_id != rx_port->ff_hw_id)
+			continue;
+
+		/* candidate, fully parse packet */
+		if (!parsed &&
+		    ff_parse_packet(rx_port, &pinfo, frag, offset, eth_len))
+			return false;
+
+		parsed = true;
+
+		/* make sure this is the right device */
+		if (ff.devs[i].use_dsa) {
+			if (!pinfo.is_dsa)
+				continue;
+
+			if (ff.devs[i].dsa_port != pinfo.dsa_port)
+				continue;
+		}
+
+		if (ff.devs[i].vlan_id != pinfo.vlan_id)
+			continue;
+
+		/* device match! */
+		break;
+	}
+
+	if (i == ARRAY_SIZE(ff.devs))
+		return false;
+
+	rx_ff_dev_idx = i;
+	rx_ff_dev = &ff.devs[rx_ff_dev_idx];
+	last_rx_dev = rx_ff_dev->netdev;
+
+	/* find opposing device */
+	if (ff_idx_is_wan(rx_ff_dev_idx)) {
+		if (!ff.devs[FF_DEV_LAN_LAST].active)
+			return false;
+
+		/* XXX: to get bridge/fbxbridge device, assume to be
+		 * the same on all devices, real tx dev not yet
+		 * known */
+		if (!ff.devs[FF_DEV_LAN_LAST].fbxbr_port)
+			tx_dev = ff.devs[FF_DEV_LAN_LAST].br_netdev;
+		else
+			tx_dev = ff.devs[FF_DEV_LAN_LAST].netdev;
+	} else {
+		if (ff.wan_active_dev != -1)
+			tx_dev = ff.devs[ff.wan_active_dev].netdev;
+		else
+			return false;
+	}
+
+	if (WARN_ON(!tx_dev))
+		return false;
+
+	/* make sure packet is for our mac address */
+	eth = (struct ethhdr *)((uint8_t *)frag + offset);
+	if (memcmp(eth->h_dest, ff.devs[i].hwaddr, 6))
+		return false;
+
+	l3_is_ipv4 = pinfo.is_ipv4;
+	l3_hdr = (u8 *)eth + pinfo.l3_hdr_offset;
+
+	if (l3_is_ipv4) {
+		struct iphdr *iph;
+		struct fbxbr_fwcache *fwc;
+		struct fbxbr *fbxbr = NULL;
+		struct fbxbr_port *fbxbr_fwd_port = NULL;
+		u16 sport, dport;
+		u8 ip_proto;
+
+handle_ipv4:
+		iph = (struct iphdr *)l3_hdr;
+
+		/* lookup IP ffn entry */
+		if (iph->ihl > 5 || (iph->frag_off & htons(IP_MF | IP_OFFSET)))
+			return false;
+
+		if (iph->ttl <= 1)
+			return false;
+
+		ip_proto = iph->protocol;
+		if (ip_proto == IPPROTO_TCP) {
+			struct tcphdr *tcph;
+
+			if (pinfo.l3_plen < sizeof (*iph) + sizeof (*tcph))
+				return false;
+
+			tcph = (struct tcphdr *)((u8 *)iph + 20);
+			if (tcph->fin ||
+			    tcph->syn ||
+			    tcph->rst ||
+			    !tcph->ack) {
+				return false;
+			}
+
+			sport = tcph->source;
+			dport = tcph->dest;
+			l4_hdr = tcph;
+			l4_is_tcp = true;
+
+		} else if (ip_proto == IPPROTO_UDP) {
+			struct udphdr *udph;
+
+			if (pinfo.l3_plen < sizeof (*iph) + sizeof (*udph))
+				return false;
+
+			udph = (struct udphdr *)((u8 *)iph + 20);
+			sport = udph->source;
+			dport = udph->dest;
+			l4_hdr = udph;
+			l4_is_tcp = false;
+
+		} else if (ip_proto == IPPROTO_IPV6) {
+			struct ipv6hdr *ip6hdr;
+			u32 ip6rd_daddr;
+			bool src_pfx_match;
+
+			if (!ff.tun.active)
+				return false;
+
+			/* must be for us */
+			if (iph->daddr != ff.tun.u.sit.src)
+				return false;
+
+			/* check len */
+			if (pinfo.l3_plen < sizeof (struct iphdr) +
+			    sizeof (struct ipv6hdr))
+				return false;
+
+			ip6hdr = (struct ipv6hdr *)(iph + 1);
+
+			/* must belong to 6rd prefix */
+			if ((ip6hdr->daddr.s6_addr32[0] &
+			     ff.tun.u.sit.s6rd_pmask) != ff.tun.u.sit.s6rd_prefix)
+				return false;
+
+			/* 6rd address */
+			ip6rd_daddr = ff_tun_extract_6rd_addr(&ip6hdr->daddr);
+			if (ip6rd_daddr != ff.tun.u.sit.src)
+				return false;
+
+			/* check for spoofing here */
+			if ((ip6hdr->saddr.s6_addr32[0] &
+			     ff.tun.u.sit.s6rd_pmask) == ff.tun.u.sit.s6rd_prefix)
+				src_pfx_match = true;
+			else
+				src_pfx_match = false;
+
+			/* if src prefix is the 6rd prefix, packet
+			 * should come from corresponding IPv4
+			 * source */
+			if (src_pfx_match) {
+				u32 addr;
+
+				addr = ff_tun_extract_6rd_addr(&ip6hdr->saddr);
+				if (addr != iph->saddr)
+					return false;
+			} else {
+				/* only accept packet from configured
+				 * border relay */
+				if (!ff.tun.sit_6rd_br ||
+				    iph->saddr != ff.tun.sit_6rd_br)
+					return false;
+			}
+
+			l3_hdr = ip6hdr;
+			pinfo.l3_plen -= 20;
+			l3_is_ipv4 = false;
+			goto handle_ipv6;
+
+		} else
+			return false;
+
+		if (netif_is_fbxbridge_port(last_rx_dev)) {
+			struct fbxbr_fwcache_key k;
+			struct fbxbr_port *p;
+			u32 hash;
+
+			p = fbxbr_port_get_rcu(last_rx_dev);
+			fbxbr = p->br;
+
+			if (p->is_wan) {
+				k.wan_ip = iph->saddr;
+				k.lan_ip = iph->daddr;
+				k.wan_port = sport;
+				k.lan_port = dport;
+				fbxbr_fwd_port = fbxbr->lan_port;
+			} else {
+				k.lan_ip = iph->saddr;
+				k.wan_ip = iph->daddr;
+				k.lan_port = sport;
+				k.wan_port = dport;
+				fbxbr_fwd_port = fbxbr->wan_port;
+			}
+			k.is_tcp = l4_is_tcp;
+
+			if (!unlikely(fbxbr_fwd_port))
+				return false;
+
+			hash = fbxbr_fwcache_hash(&k);
+			fwc = __fbxbr_fwcache_lookup_rcu(p->br, hash, &k);
+			if (!fwc)
+				return false;
+
+			next_tx_dev = fbxbr_fwd_port->dev;
+			e = NULL;
+		} else {
+			struct ffn_lookup_key k;
+
+			k.sip = iph->saddr;
+			k.dip = iph->daddr;
+			k.sport = sport;
+			k.dport = dport;
+			k.is_tcp = l4_is_tcp;
+			e = __ffn_get_rcu(&k);
+			if (!e)
+				return false;
+
+			if (e->manip.dst->obsolete > 0)
+				return false;
+
+			ct = e->manip.ct;
+
+			/* only fast forward TCP connections in established state */
+			if (l4_is_tcp &&
+			    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+				return false;
+
+			next_tx_dev = e->manip.dst->dev;
+			fwc = NULL;
+		}
+
+		/* find out if the packet is to be sent as-is or
+		 * tunneled */
+		if (ff.tun.netdev && next_tx_dev == ff.tun.netdev) {
+			const struct ff_priv *ff_priv;
+			struct ff_priv *ff_wpriv;
+			struct dst_entry *v6_dst;
+			const struct in6_addr *pdest, *nexthop;
+			struct in6_addr dest;
+			struct rt6_info *rt6;
+			struct neighbour *neigh;
+
+			/* IPv4 tunneled into MAP-E device */
+			if (!ff.tun.active) {
+				return false;
+			}
+
+			if (pinfo.l3_plen > ff.tun.mtu)
+				return false;
+
+			/* lookup ipv6 route cache */
+			if (e)
+				ff_priv = ffn_get_ro_priv(e);
+			else
+				ff_priv = fwc_get_ro_priv(fwc);
+
+			if (ff_priv) {
+				if (ff_priv->tun_dst->obsolete < 0) {
+					/* valid route found */
+					v6_dst = ff_priv->tun_dst;
+					pdest = &ff_priv->tun_dest_ip6;
+					goto cached_ipv6_route;
+				}
+
+				ff_priv_release(ff_priv);
+				if (e)
+					e->manip.priv_destructor = NULL;
+				else
+					fwc->priv_destructor = NULL;
+			}
+
+			/* cache miss, compute IPv6 destination */
+			if (ff.tun.u.map.ipv4_prefix &&
+			    (iph->daddr & ff.tun.u.map.ipv4_pmask) ==
+			    ff.tun.u.map.ipv4_prefix) {
+				/* compute dest using FMR */
+				ff_tun_gen_mape_addr(iph->daddr, dport, &dest);
+				pdest = &dest;
+			} else {
+				/* next hop is BR */
+				pdest = &ff.tun.u.map.br;
+			}
+
+			/* v6 route lookup */
+			rt6 = rt6_lookup(&init_net, pdest, NULL, 0, NULL, 0);
+			if (!rt6)
+				return false;
+
+			if (e)
+				ff_wpriv = ffn_get_rw_priv(e);
+			else
+				ff_wpriv = fwc_get_rw_priv(fwc);
+			if (!ff_wpriv)
+				return false;
+
+			/* cache this inside FFN private area */
+			ff_wpriv->tun_dst = (struct dst_entry *)rt6;
+			memcpy(&ff_wpriv->tun_dest_ip6, pdest, 16);
+			if (e)
+				e->manip.priv_destructor = ff_priv_destructor_cb;
+			else
+				fwc->priv_destructor = ff_priv_destructor_cb;
+			ff_priv = ff_wpriv;
+
+			v6_dst = (struct dst_entry *)rt6;
+
+cached_ipv6_route:
+			if (v6_dst->dev != tx_dev) {
+				return false;
+			}
+
+			/* is the neighboor ready ? */
+			rt6 = (struct rt6_info *)v6_dst;
+			nexthop = rt6_nexthop(rt6, (struct in6_addr *)pdest);
+			if (!nexthop) {
+				return false;
+			}
+
+			neigh = __ipv6_neigh_lookup_noref(tx_dev, nexthop);
+			if (!neigh || !(neigh->nud_state & NUD_VALID))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			xmit_mode = FF_XMIT_IPV4_IN_IPV6;
+			tun_v6_pdest = &ff_priv->tun_dest_ip6;
+
+		} else if (next_tx_dev == tx_dev) {
+			struct neighbour *neigh;
+			const struct rtable *rt;
+
+			/* is the neighboor ready ? */
+			if (e) {
+				u32 nexthop;
+
+				rt = (const struct rtable *)e->manip.dst;
+				nexthop = (__force u32)rt_nexthop(rt,
+							   e->manip.new_dip);
+				neigh = __ipv4_neigh_lookup_noref(tx_dev,
+								  nexthop);
+				if (!neigh || !(neigh->nud_state & NUD_VALID))
+					return false;
+
+				memcpy(dest_hw, neigh->ha, 6);
+			} else {
+				if (!fbxbr_fwd_port->is_wan) {
+					if (!fbxbr->have_hw_addr)
+						return false;
+					memcpy(dest_hw, fbxbr->lan_hwaddr, 6);
+				} else {
+					__be32 nh;
+
+					nh = iph->daddr;
+					if ((nh & fbxbr->wan_netmask) !=
+					    (fbxbr->wan_ipaddr &
+					     fbxbr->wan_netmask)) {
+						rt = fbxbr_fwd_port->rt;
+						if (!rt ||
+						    rt->dst.obsolete > 0)
+							return false;
+
+						nh = rt_nexthop(rt, nh);
+					}
+
+					neigh = __ipv4_neigh_lookup_noref(
+						tx_dev, nh);
+					if (!neigh ||
+					    !(neigh->nud_state & NUD_VALID))
+						return false;
+
+					memcpy(dest_hw, neigh->ha, 6);
+				}
+			}
+
+			xmit_mode = FF_XMIT_IPV4;
+		} else
+			return false;
+
+	} else {
+		struct ipv6hdr *ip6hdr;
+		struct ffn6_lookup_key k;
+		u16 sport, dport;
+		u8 ip_proto;
+
+handle_ipv6:
+		ip6hdr = (struct ipv6hdr *)l3_hdr;
+
+		if (ip6hdr->hop_limit <= 1 || !ip6hdr->payload_len)
+			return false;
+
+		if (ntohs(ip6hdr->payload_len) > pinfo.l3_plen)
+			return false;
+
+		ip_proto = ip6hdr->nexthdr;
+
+		if (ip_proto == IPPROTO_TCP) {
+			struct tcphdr *tcph;
+
+			if (pinfo.l3_plen < sizeof (*ip6hdr) + sizeof (*tcph))
+				return false;
+
+			tcph = (struct tcphdr *)((u8 *)ip6hdr +
+						 sizeof (*ip6hdr));
+
+			if (tcph->fin ||
+			    tcph->syn ||
+			    tcph->rst ||
+			    !tcph->ack) {
+				return false;
+			}
+
+			sport = tcph->source;
+			dport = tcph->dest;
+			l4_hdr = tcph;
+			l4_is_tcp = true;
+
+		} else if (ip_proto == IPPROTO_UDP) {
+			struct udphdr *udph;
+
+			if (pinfo.l3_plen < sizeof (*ip6hdr) + sizeof (*udph))
+				return false;
+
+			udph = (struct udphdr *)((u8 *)ip6hdr +
+						 sizeof (*ip6hdr));
+			sport = udph->source;
+			dport = udph->dest;
+			l4_hdr = udph;
+			l4_is_tcp = false;
+
+		} else if (ip_proto == IPPROTO_IPIP) {
+			struct iphdr *iph;
+
+			if (!ff.tun.active)
+				return false;
+
+			/* must be for us */
+			if (memcmp(&ip6hdr->daddr, &ff.tun.u.map.src, 16))
+				return false;
+
+			/* check len */
+			if (pinfo.l3_plen < sizeof (struct iphdr) +
+			    sizeof (struct ipv6hdr))
+				return false;
+
+			iph = (struct iphdr *)(ip6hdr + 1);
+
+			/* does it come from BR ? */
+			if (memcmp(&ip6hdr->saddr, &ff.tun.u.map.br, 16)) {
+				struct in6_addr exp_src_addr;
+
+				/* no, check FMR for spoofing */
+				if (!ff.tun.u.map.ipv4_prefix)
+					return false;
+
+				/* check up to PSID to reduce lookup
+				 * depth */
+				ff_tun_gen_mape_addr(iph->saddr, 0,
+						     &exp_src_addr);
+				if (!ipv6_prefix_equal(&ip6hdr->saddr,
+						       &exp_src_addr,
+						       ff.tun.u.map.ipv6_plen +
+						       ff.tun.u.map.ipv4_plen))
+					return false;
+			}
+
+			last_rx_dev = ff.tun.netdev;
+			if (!last_rx_dev)
+				return false;
+
+			l3_hdr = iph;
+			pinfo.l3_plen -= sizeof (*ip6hdr);
+			l3_is_ipv4 = true;
+			goto handle_ipv4;
+
+		} else
+			return false;
+
+		k.sip = ip6hdr->saddr.s6_addr32;
+		k.dip = ip6hdr->daddr.s6_addr32;
+		k.sport = sport;
+		k.dport = dport;
+		k.is_tcp = l4_is_tcp;
+		e6 = __ffn6_get_rcu(&k);
+		if (!e6) {
+			return false;
+		}
+
+		if (e6->manip.dst->obsolete > 0) {
+			return false;
+		}
+
+		ct = e6->manip.ct;
+
+		/* only fast forward TCP connections in established state */
+		if (l4_is_tcp &&
+		    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
+			return false;
+		}
+
+		/* find out if the packet is to be sent as-is or
+		 * tunneled */
+		if (ff.tun.netdev && e6->manip.dst->dev == ff.tun.netdev) {
+			const struct ff6_priv *ff6_priv;
+			struct ff6_priv *ff6_wpriv;
+			struct dst_entry *v4_dst;
+			struct flowi4 fl4;
+			struct rtable *rt;
+			struct neighbour *neigh;
+			u32 dest, nexthop;
+
+			/* IPv6 tunneled into SIT device using 6rd */
+			if (!ff.tun.active) {
+				return false;
+			}
+
+			if (pinfo.l3_plen > ff.tun.mtu)
+				return false;
+
+			/* lookup ipv4 route cache */
+			ff6_priv = ffn6_get_ro_priv(e6);
+			if (ff6_priv) {
+				if (!ff6_priv->tun_dst->obsolete) {
+					/* valid route found */
+					v4_dst = ff6_priv->tun_dst;
+					dest = ff6_priv->tun_dest_ip;
+					goto cached_ipv4_route;
+				}
+
+				ff6_priv_release(ff6_priv);
+				e6->manip.priv_destructor = NULL;
+			}
+
+			/* cache miss, compute IPv4 destination */
+			if ((ip6hdr->daddr.s6_addr32[0] &
+			     ff.tun.u.sit.s6rd_pmask) == ff.tun.u.sit.s6rd_prefix) {
+				/* next hop via prefix */
+				dest = ff_tun_extract_6rd_addr(&ip6hdr->daddr);
+			} else {
+				const struct in6_addr *nh6;
+				struct rt6_info *rt6;
+
+				/* next hop via route */
+				rt6 = (struct rt6_info *)e6->manip.dst;
+				nh6 = rt6_nexthop(rt6,
+				      (struct in6_addr *)e6->manip.new_dip);
+				if (!nh6) {
+					return false;
+				}
+
+				/* should be a v4 mapped */
+				if (nh6->s6_addr32[0] != 0 ||
+				    nh6->s6_addr32[1] != 0 ||
+				    nh6->s6_addr32[2] != 0) {
+					return false;
+				}
+
+				dest = nh6->s6_addr32[3];
+			}
+
+			/* v4 route lookup */
+			rt = ip_route_output_ports(&init_net, &fl4, NULL,
+						   dest, ff.tun.u.sit.src,
+						   0, 0,
+						   IPPROTO_IPV6, 0,
+						   0);
+			if (IS_ERR(rt) ||
+			    rt->rt_type != RTN_UNICAST)
+				return false;
+
+			ff6_wpriv = ffn6_get_rw_priv(e6);
+			if (!ff6_wpriv)
+				return false;
+
+			/* cache this inside FFN private area */
+			ff6_wpriv->tun_dst = (struct dst_entry *)rt;
+			ff6_wpriv->tun_dest_ip = dest;
+			e6->manip.priv_destructor = ff6_priv_destructor_cb;
+
+			v4_dst = (struct dst_entry *)rt;
+			ff6_priv = ff6_wpriv;
+
+cached_ipv4_route:
+			if (v4_dst->dev != tx_dev) {
+				return false;
+			}
+
+			/* is the neighboor ready ? */
+			rt = (struct rtable *)v4_dst;
+			nexthop = (__force u32)rt_nexthop(rt, dest);
+			neigh = __ipv4_neigh_lookup_noref(tx_dev, nexthop);
+			if (!neigh || !(neigh->nud_state & NUD_VALID))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			tun_v4_dest = dest;
+			xmit_mode = FF_XMIT_IPV6_IN_IPV4;
+
+		} else if (e6->manip.dst->dev == tx_dev) {
+			const struct in6_addr *nexthop;
+			struct rt6_info *rt6;
+			struct neighbour *neigh;
+
+			/* is the neighboor ready ? */
+			rt6 = (struct rt6_info *)e6->manip.dst;
+
+			nexthop = rt6_nexthop(rt6,
+				      (struct in6_addr *)e6->manip.new_dip);
+			if (!nexthop)
+				return false;
+
+			neigh = __ipv6_neigh_lookup_noref(tx_dev, nexthop);
+			if (!neigh || !(neigh->nud_state & NUD_VALID))
+				return false;
+			memcpy(dest_hw, neigh->ha, 6);
+
+			xmit_mode = FF_XMIT_IPV6;
+		} else
+			return false;
+	}
+
+	/* compute outgoing device */
+	if (!ff_idx_is_wan(rx_ff_dev_idx)) {
+		tx_ff_dev = &ff.devs[ff.wan_active_dev];
+		tx_ff_dev_idx = ff.wan_active_dev;
+
+	} else if (ff.devs[FF_DEV_LAN_LAST].br_port) {
+		struct net_bridge_port *br_port;
+		struct net_bridge_fdb_entry *fdb;
+
+		/* XXX get reference to bridge using last lan port */
+		br_port = ff.devs[FF_DEV_LAN_LAST].br_port;
+		fdb = br_fdb_find_rcu(br_port->br, dest_hw, 0);
+		if (!fdb)
+			return false;
+
+		tx_ff_dev = NULL;
+		for (i = 0; i < ARRAY_SIZE(ff.devs); i++) {
+			if (!ff.devs[i].active)
+				continue;
+			if (ff.devs[i].br_port == fdb->dst) {
+				tx_ff_dev = &ff.devs[i];
+				break;
+			}
+		}
+
+		if (!tx_ff_dev) {
+			return false;
+		}
+
+		tx_ff_dev_idx = i;
+
+	} else if (ff.devs[FF_DEV_LAN_LAST].fbxbr_port) {
+		tx_ff_dev = &ff.devs[FF_DEV_LAN_LAST];
+		tx_ff_dev_idx = FF_DEV_LAN_LAST;
+	} else
+		return false;
+
+	/* update rx statistics */
+	if (!ff_idx_is_wan(rx_ff_dev_idx) && rx_ff_dev->br_port) {
+		struct net_bridge *br;
+		struct net_bridge_port *p;
+
+		/* packet comes from a bridge, make sure we are
+		 * allowed to ingress it */
+		p = rx_ff_dev->br_port;
+		if (p->state != BR_STATE_FORWARDING)
+			return false;
+
+		/* refresh FDB entry for this source */
+		br = netdev_priv(rx_ff_dev->br_netdev);
+		if (!br_fdb_update_only(br, p, eth->h_source))
+			return false;
+
+		dev_sw_netstats_rx_add(br->dev, eth_len);
+	}
+
+	if (rx_ff_dev->vlan_id) {
+		struct vlan_dev_priv *vlan = vlan_dev_priv(rx_ff_dev->netdev);
+		struct vlan_pcpu_stats *stats;
+		stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+		u64_stats_inc(&stats->rx_packets);
+		u64_stats_add(&stats->rx_bytes, eth_len);
+	} else {
+		rx_ff_dev->netdev->stats.rx_packets++;
+		rx_ff_dev->netdev->stats.rx_bytes += eth_len;
+	}
+
+	rx_desc = &rxq->rx_desc_area[rx_desc_idx];
+
+	/* do we have room in the tx queue ? */
+	if (tx_ff_dev->is_hardware) {
+		tx_port = ff.ports_by_hw_id[tx_ff_dev->hw_id];
+
+		/* XXX: no lock, so this might return wrong result, or
+		 * the queue might become full after (if another cpu
+		 * is pushing to the same tx queue. We take the lock
+		 * later to ensure it won't happen */
+		if (__ff_tx_queue_full(tx_port->ff_txq) &&
+		    !__ff_tx_queue_can_reclaim(tx_port->ff_txq)) {
+			/* just rearm descriptor and fake success */
+			u32 flags;
+			flags = CPU_RX_DESC0_HW_OWNED_MASK |
+				(rx_port->pkt_size << CPU_RX_DESC0_LEN_SHIFT);
+			rx_desc->flags_len = cpu_to_be32(flags);
+			return true;
+		}
+	} else
+		tx_port = NULL;
+
+	/* can we allocate a new fragment to replace the descriptor we
+	 * are about to use ? */
+	if (tx_ff_dev->is_hardware) {
+		/* remember RX desc hw address before we reload it and
+		 * point if back to frag hw address */
+		buf_addr = be32_to_cpu(rx_desc->address);
+		buf_addr -= offset;
+		new_frag = ff_reclaim_any_sent_frag(rx_port->pkt_size,
+						    rx_ff_dev_idx,
+						    tx_ff_dev_idx);
+	} else {
+		buf_addr = 0;
+		new_frag = NULL;
+	}
+
+	if (rxq_refill_desc(rx_port, rxq, rx_desc_idx, true, new_frag)) {
+		/* just rearm descriptor and fake success */
+		u32 flags;
+		flags = CPU_RX_DESC0_HW_OWNED_MASK |
+			(rx_port->pkt_size << CPU_RX_DESC0_LEN_SHIFT);
+		rx_desc->flags_len = cpu_to_be32(flags);
+		return true;
+	}
+
+	if (ct && l4_is_tcp) {
+		/* don't try to track window anymore on this
+		 * connection */
+		ct->proto.tcp.no_window_track = 1;
+	}
+
+	/* alter l3 & l4 content if needed (routing only) */
+	if (l3_is_ipv4 && e) {
+		struct iphdr *iph = (struct iphdr *)l3_hdr;
+
+		if (e->manip.alter) {
+			if (l4_is_tcp) {
+				struct tcphdr *tcph = (struct tcphdr *)l4_hdr;
+				tcph->source = e->manip.new_sport;
+				tcph->dest = e->manip.new_dport;
+				tcph->check = csum16_sub(tcph->check,
+						 e->manip.l4_adjustment);
+			} else {
+				struct udphdr *udph = (struct udphdr *)l4_hdr;
+				udph->source = e->manip.new_sport;
+				udph->dest = e->manip.new_dport;
+				if (udph->check) {
+					u16 tcheck;
+
+					tcheck = csum16_sub(udph->check,
+						    e->manip.l4_adjustment);
+					udph->check = tcheck ? tcheck : 0xffff;
+				}
+			}
+
+			iph->saddr = e->manip.new_sip;
+			iph->daddr = e->manip.new_dip;
+		}
+
+		iph->ttl--;
+		iph->check = csum16_sub(iph->check,
+					e->manip.ip_adjustment);
+
+	} else if (!l3_is_ipv4 && e6) {
+		struct ipv6hdr *ip6hdr = (struct ipv6hdr *)l3_hdr;
+
+		if (e6->manip.alter) {
+			if (l4_is_tcp) {
+				struct tcphdr *tcph = (struct tcphdr *)l4_hdr;
+				tcph->source = e6->manip.new_sport;
+				tcph->dest = e6->manip.new_dport;
+				tcph->check = csum16_sub(tcph->check,
+							 e6->manip.adjustment);
+			} else {
+				struct udphdr *udph = (struct udphdr *)l4_hdr;
+				udph->source = e6->manip.new_sport;
+				udph->dest = e6->manip.new_dport;
+
+				if (udph->check) {
+					u16 tcheck;
+
+					tcheck = csum16_sub(udph->check,
+						    e6->manip.adjustment);
+					udph->check = tcheck ? tcheck : 0xffff;
+				}
+			}
+
+			memcpy(ip6hdr->saddr.s6_addr32, e6->manip.new_sip, 16);
+			memcpy(ip6hdr->daddr.s6_addr32, e6->manip.new_dip, 16);
+		}
+
+		ip6hdr->hop_limit--;
+	}
+
+	/* packet is ready to xmit */
+	switch (xmit_mode) {
+	case FF_XMIT_IPV4:
+		proto = ETH_P_IP;
+		break;
+
+	case FF_XMIT_IPV6:
+		proto = ETH_P_IPV6;
+		break;
+
+	case FF_XMIT_IPV6_IN_IPV4:
+	{
+		struct iphdr *tun_hdr;
+		/* prepend IPv4 */
+		tun_hdr = (struct iphdr *)((u8 *)l3_hdr - sizeof (*tun_hdr));
+		tun_hdr->ihl = 5;
+		tun_hdr->version = 4;
+		tun_hdr->tos = 0;
+		tun_hdr->tot_len = htons(pinfo.l3_plen + sizeof (*tun_hdr));
+		tun_hdr->id = 0;
+		tun_hdr->frag_off = 0;
+		tun_hdr->check = 0;
+		tun_hdr->ttl = 64;
+		tun_hdr->protocol = IPPROTO_IPV6;
+		tun_hdr->saddr = ff.tun.u.sit.src;
+		tun_hdr->daddr = tun_v4_dest;
+		tun_hdr->check = ip_fast_csum((u8 *)tun_hdr, 5);
+
+		l3_hdr = (u8 *)tun_hdr;
+		pinfo.l3_plen += sizeof (*tun_hdr);
+
+		proto = ETH_P_IP;
+		break;
+	}
+
+	case FF_XMIT_IPV4_IN_IPV6:
+	{
+		struct ipv6hdr *tun_6hdr;
+
+		/* prepend IPv6 */
+		tun_6hdr = (struct ipv6hdr *)((u8 *)l3_hdr - sizeof (*tun_6hdr));
+		tun_6hdr->version = 6;
+		tun_6hdr->priority = 0;
+		memset(tun_6hdr->flow_lbl, 0, sizeof (tun_6hdr->flow_lbl));
+		tun_6hdr->payload_len = htons(pinfo.l3_plen);
+		tun_6hdr->nexthdr = IPPROTO_IPIP;
+		tun_6hdr->hop_limit = 64;
+		tun_6hdr->saddr = ff.tun.u.map.src;
+		tun_6hdr->daddr = *tun_v6_pdest;
+
+		l3_hdr = (u8 *)tun_6hdr;
+		pinfo.l3_plen += sizeof (*tun_6hdr);
+
+		proto = ETH_P_IPV6;
+		break;
+	}
+	}
+
+	/* add vlan header if any */
+	l2_hdr = l3_hdr;
+	if (tx_ff_dev->vlan_id) {
+		struct vlan_hdr *vhdr;
+
+		l2_hdr -= VLAN_HLEN;
+		vhdr = (struct vlan_hdr *)l2_hdr;
+		vhdr->h_vlan_TCI = tx_ff_dev->vlan_id;
+		vhdr->h_vlan_encapsulated_proto = htons(proto);
+		proto = ETH_P_8021Q;
+	}
+
+	/* add protocol */
+	l2_hdr -= sizeof (*pproto);
+	pproto = (u16 *)l2_hdr;
+	*pproto = htons(proto);
+
+	/* add DSA header if any */
+	if (tx_ff_dev->use_dsa) {
+		u8 *brcm_tag;
+
+		l2_hdr -= BRCM_TAG_LEN;
+		brcm_tag = (u8 *)l2_hdr;
+		brcm_tag[0] = (1 << BRCM_OPCODE_SHIFT);
+		brcm_tag[1] = 0;
+		brcm_tag[2] = 0;
+		if (tx_ff_dev->dsa_port == 8)
+			brcm_tag[2] = (1 << 0);
+		brcm_tag[3] = (1 << tx_ff_dev->dsa_port);
+	}
+
+	/* finally add eth dst/src */
+	l2_hdr -= ETH_ALEN * 2;
+	eth = (struct ethhdr *)l2_hdr;
+	memcpy(eth->h_dest, dest_hw, 6);
+	memcpy(eth->h_source, tx_ff_dev->hwaddr, 6);
+
+	/* compute final len */
+	eth_len = pinfo.l3_plen + (l3_hdr - l2_hdr);
+
+	/* refresh conntrack */
+	if (ct) {
+		if (l4_is_tcp)
+			timeout = HZ * 3600 * 24 * 5;
+		else
+			timeout = HZ * 180;
+
+		if (ct->timeout - ff.jiffies < timeout - 10 * HZ) {
+			unsigned long newtime = ff.jiffies + timeout;
+			ct->timeout = newtime;
+		}
+	}
+
+	if (tx_ff_dev->is_hardware) {
+		if (ff_send(tx_port,
+			    tx_port->ff_txq,
+			    buf_addr + (void *)eth - frag,
+			    frag,
+			    rx_port->pkt_size,
+			    eth_len,
+			    tx_ff_dev->use_dsa ? BRCM_TAG_LEN : 0)) {
+			skb_free_frag(frag);
+			return true;
+		}
+
+		if (tx_ff_dev->vlan_id) {
+			struct vlan_dev_priv *vlan = vlan_dev_priv(tx_ff_dev->netdev);
+			struct vlan_pcpu_stats *stats;
+			stats = this_cpu_ptr(vlan->vlan_pcpu_stats);
+			u64_stats_inc(&stats->tx_packets);
+			u64_stats_add(&stats->tx_bytes, eth_len);
+		} else {
+			tx_ff_dev->netdev->stats.tx_packets++;
+			tx_ff_dev->netdev->stats.tx_bytes += eth_len;
+		}
+
+		if (tx_port->netdev != tx_ff_dev->netdev) {
+			tx_hw_stats = &tx_port->netdev->stats;
+			tx_hw_stats->tx_bytes += eth_len;
+			tx_hw_stats->tx_packets++;
+		}
+	} else {
+		struct sk_buff *skb;
+
+		skb = build_skb(frag, frag_size > PAGE_SIZE ? 0 : frag_size);
+		if (!skb) {
+			skb_free_frag(frag);
+			return true;
+		}
+
+		skb_reserve(skb, (void *)eth - frag);
+		skb_put(skb, eth_len);
+		skb->protocol = eth->h_proto;
+		skb_set_network_header(skb, l3_hdr - l2_hdr);
+		skb->dev = tx_ff_dev->netdev;
+		dev_queue_xmit(skb);
+	}
+
+	if (!ff_idx_is_wan(tx_ff_dev_idx) && tx_ff_dev->br_port) {
+		struct net_bridge *br;
+		br = netdev_priv(tx_ff_dev->br_netdev);
+
+		dev_sw_netstats_tx_add(br->dev, 1, eth_len);
+	}
+
+	return true;
+}
+
+/*
+ *
+ */
+static ssize_t ff_show_enabled(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%u\n", ff_enabled);
+}
+
+static ssize_t ff_store_enabled(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	unsigned long val;
+
+	if (kstrtoul(buf, 10, &val))
+		return -EINVAL;
+
+	if (ff_enabled == val)
+		return len;
+
+	printk(KERN_NOTICE "ff: fastpath now %s\n",
+	       val ? "enabled" : "disabled");
+	ff_enabled = val;
+	return len;
+}
+
+static struct device_attribute dev_attr_ff = {
+	.attr = { .name = "ff_enabled", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_enabled,
+	.store = ff_store_enabled,
+};
+
+/*
+ *
+ */
+static ssize_t ff_show_tun_dev(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%s\n", ff.tun_netdev_name);
+}
+
+static ssize_t ff_store_tun_dev(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	if (!len || buf[0] == '\n') {
+		ff.tun_netdev_name[0] = 0;
+		ff_lock_all_cpu_bh();
+		__ff_tun_release();
+		ff_unlock_all_cpu_bh();
+		printk(KERN_NOTICE "ff: tun dev unset\n");
+		return len;
+	}
+
+	ff_lock_all_cpu_bh();
+	__ff_tun_release();
+	strncpy(ff.tun_netdev_name, buf, len);
+	strim(ff.tun_netdev_name);
+	printk(KERN_NOTICE "ff: tun dev set to %s\n", ff.tun_netdev_name);
+	__ff_tun_capture();
+	ff_unlock_all_cpu_bh();
+	return len;
+}
+
+static struct device_attribute dev_attr_tun = {
+	.attr = { .name = "ff_tun_dev", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_tun_dev,
+	.store = ff_store_tun_dev,
+};
+
+
+/*
+ *
+ */
+static ssize_t ff_show_tun_6rd_br(struct device *dev,
+				  struct device_attribute *attr,
+				  char *buf)
+{
+	return sprintf(buf, "%pI4\n", &ff.tun.sit_6rd_br);
+}
+
+static ssize_t ff_store_tun_6rd_br(struct device *dev,
+				   struct device_attribute *attr,
+				   const char *buf, size_t len)
+{
+	if (!len || buf[0] == '\n') {
+		ff.tun.sit_6rd_br = 0;
+		return len;
+	}
+
+	ff.tun.sit_6rd_br = in_aton(buf);
+	printk(KERN_NOTICE "ff: tun border relay set to %pI4\n",
+	       &ff.tun.sit_6rd_br);
+	return len;
+}
+
+static struct device_attribute dev_attr_tun_6rd_br = {
+	.attr = { .name = "ff_tun_sit_6rd_br", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_tun_6rd_br,
+	.store = ff_store_tun_6rd_br,
+};
+
+/*
+ *
+ */
+static ssize_t ff_show_wan_dev(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	return sprintf(buf, "%s\n", ff.wan_netdev_name);
+}
+
+static ssize_t ff_store_wan_dev(struct device *dev,
+				struct device_attribute *attr,
+				const char *buf, size_t len)
+{
+	if (!len || buf[0] == '\n') {
+		ff.wan_netdev_name[0] = 0;
+		ff_lock_all_cpu_bh();
+		__ff_select_active_wan();
+		ff_unlock_all_cpu_bh();
+		printk(KERN_NOTICE "ff: requested wan dev unset\n");
+		return len;
+	}
+
+	ff_lock_all_cpu_bh();
+	ff.wan_netdev_name[0] = 0;
+	__ff_select_active_wan();
+	strncpy(ff.wan_netdev_name, buf, len);
+	strim(ff.wan_netdev_name);
+	printk(KERN_NOTICE "ff: requested wan dev set to %s\n",
+	       ff.wan_netdev_name);
+	__ff_select_active_wan();
+	ff_unlock_all_cpu_bh();
+	return len;
+}
+
+static struct device_attribute dev_attr_wan = {
+	.attr = { .name = "ff_wan_dev", .mode = (S_IRUGO | S_IWUSR) },
+	.show = ff_show_wan_dev,
+	.store = ff_store_wan_dev,
+};
+
+static void ff_init(struct device *dev)
+{
+	static bool done;
+	int i;
+
+	if (done)
+		return;
+
+	for_each_possible_cpu(i) {
+		spinlock_t *lock = &per_cpu(ff_plock, i);
+		spin_lock_init(lock);
+        }
+
+	ff.wan_active_dev = -1;
+	device_create_file(dev, &dev_attr_ff);
+	device_create_file(dev, &dev_attr_wan);
+	device_create_file(dev, &dev_attr_tun);
+	device_create_file(dev, &dev_attr_tun_6rd_br);
+	printk(KERN_DEBUG "ff_init\n");
+	done = true;
+}
+#endif
+
+
+/*
+ *
+ */
+static void rxq_deinit(struct bcm_enet_runner_priv *priv, struct rx_queue *rxq)
+{
+	int i;
+
+	for (i = 0; i < rxq->ring_size; i++) {
+		struct rx_desc *desc;
+		dma_addr_t addr;
+
+		if (!rxq->frags || !rxq->frags[i])
+			continue;
+
+		desc = &rxq->rx_desc_area[i];
+		addr = be32_to_cpu(desc->address);
+		dma_unmap_single(priv->netdev->dev.parent,
+				 addr,
+				 priv->pkt_size,
+				 DMA_FROM_DEVICE);
+		skb_free_frag(rxq->frags[i]);
+	}
+
+	if (rxq->rx_desc_area)
+		dma_free_coherent(priv->netdev->dev.parent,
+				  rxq->rx_desc_area_size,
+				  rxq->rx_desc_area,
+				  rxq->rx_desc_dma);
+	if (rxq->frags)
+		kfree(rxq->frags);
+}
+
+/*
+ *
+ */
+static int rxq_init(struct bcm_enet_runner_priv *priv, int index)
+{
+	struct rx_queue *rxq = priv->rxq + index;
+	int size;
+	int i;
+
+	memset(rxq, 0, sizeof (*rxq));
+	rxq->index = index;
+	rxq->ring_size = priv->rxq_size;
+
+	size = rxq->ring_size * sizeof (struct rx_desc);
+	rxq->rx_desc_area_size = size;
+	rxq->rx_desc_area = dma_alloc_coherent(priv->netdev->dev.parent,
+					       size, &rxq->rx_desc_dma,
+					       GFP_KERNEL);
+
+	if (rxq->rx_desc_area == NULL) {
+		netdev_err(priv->netdev,
+			   "can't allocate rx ring (%d bytes)\n", size);
+		goto out;
+	}
+
+	memset(rxq->rx_desc_area, 0, size);
+
+	rxq->frags = kzalloc(sizeof (*rxq->frags) * rxq->ring_size, GFP_KERNEL);
+	if (!rxq->frags) {
+		netdev_err(priv->netdev, "can't allocate rx frags\n");
+		goto out;
+	}
+
+	for (i = 0; i < rxq->ring_size; i++) {
+		int ret;
+
+		local_bh_disable();
+                ret = rxq_refill_desc(priv, rxq, i, false, NULL);
+		local_bh_enable();
+		if (ret)
+			goto out;
+	}
+
+	return 0;
+
+out:
+	rxq_deinit(priv, rxq);
+	return -ENOMEM;
+}
+
+/*
+ *
+ */
+static int bcm_runner_do_rx(struct bcm_enet_runner_priv *priv,
+			    struct rx_queue *rxq, int budget)
+{
+	struct net_device_stats *stats = &priv->netdev->stats;
+	int rx_done;
+	u32 rcvd_pkts = 0;
+	u32 rcvd_bytes = 0;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	ff_lock_this_cpu();
+#endif
+
+	rx_done = 0;
+	while (rx_done < budget) {
+		struct rx_desc *rx_desc;
+		struct sk_buff *skb;
+		void *frag;
+		u32 flags;
+		unsigned int idx, pkt_len;
+		int ret;
+
+		idx = rxq->rx_curr_desc;
+		rx_desc = &rxq->rx_desc_area[idx];
+
+		rmb();
+
+		flags = be32_to_cpu(rx_desc->flags_len);
+
+		rxdbg("bcm_runner_do_rx: idx:%u flags:0x%08x\n",
+		      idx, flags);
+
+		if ((flags & CPU_RX_DESC0_HW_OWNED_MASK))
+			break;
+
+		pkt_len = (flags & CPU_RX_DESC0_LEN_MASK) >>
+			CPU_RX_DESC0_LEN_SHIFT;
+		pkt_len -= ETH_FCS_LEN;
+		frag = rxq->frags[idx];
+
+		rxq->rx_curr_desc++;
+		rx_done++;
+		if (rxq->rx_curr_desc == rxq->ring_size)
+			rxq->rx_curr_desc = 0;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+		rcu_read_lock();
+		ret = ff_receive(priv,
+				 rxq,
+				 idx,
+				 frag,
+				 priv->frag_size,
+				 RX_OFFSET,
+				 pkt_len);
+		rcu_read_unlock();
+		if (ret) {
+			rcvd_pkts++;
+			rcvd_bytes += pkt_len;
+			continue;
+		}
+#endif
+
+#ifdef DBG_RX_DISCARD_ALL
+		/* re-arm with old buffer */
+		flags = CPU_RX_DESC0_HW_OWNED_MASK |
+			(priv->pkt_size << CPU_RX_DESC0_LEN_SHIFT);
+		rx_desc->flags_len = cpu_to_be32(flags);
+		rcvd_pkts++;
+		rcvd_bytes += pkt_len;
+		continue;
+#endif
+
+		ret = rxq_refill_desc(priv, rxq, idx, true, NULL);
+		if (ret) {
+			netdev_err(priv->netdev, "oom while refill\n");
+			stats->rx_packets++;
+			stats->rx_dropped++;
+
+			/* re-arm with old buffer */
+			flags = CPU_RX_DESC0_HW_OWNED_MASK |
+				(priv->pkt_size << CPU_RX_DESC0_LEN_SHIFT);
+			rx_desc->flags_len = cpu_to_be32(flags);
+			continue;
+		}
+
+		/* descriptor is re-armed now */
+		skb = build_skb(frag,
+				priv->frag_size > PAGE_SIZE ?
+				0 : priv->frag_size);
+		if (!skb) {
+			skb_free_frag(frag);
+			stats->rx_dropped++;
+			continue;
+		}
+
+		skb_reserve(skb, RX_OFFSET);
+		skb_put(skb, pkt_len);
+
+		rcvd_pkts++;
+		rcvd_bytes += pkt_len;
+		skb->protocol = eth_type_trans(skb, priv->netdev);
+		netif_receive_skb(skb);
+	}
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	ff_unlock_this_cpu();
+#endif
+
+	if (rcvd_pkts) {
+		stats->rx_packets += rcvd_pkts;
+		stats->rx_bytes += rcvd_bytes;
+	}
+
+	return rx_done;
+}
+
+/*
+ * try to or force reclaim of transmitted buffers
+ */
+static int __bcm_runner_tx_reclaim_one(struct bcm_enet_runner_priv *priv,
+				       struct net_device *dev,
+				       struct tx_queue *txq,
+				       int force)
+{
+	struct tx_desc *desc;
+	void *pdata_ptr;
+	dma_addr_t address;
+	u32 flags_len;
+	unsigned int dirty_desc;
+	unsigned int pdata_len;
+
+	if (txq->tx_dirty_desc == txq->tx_cur_desc) {
+		txdbg("bcm_runner_tx_reclaim[q%d]: reach end of desc to reclaim\n",
+		      txq->index);
+		return 0;
+	}
+
+	dirty_desc = txq->tx_dirty_desc;
+	desc = &txq->tx_desc_area[dirty_desc];
+	flags_len = be32_to_cpu(desc->flags_len);
+
+	if (!force && (flags_len & CPU_TX_DESC0_HW_OWNED_MASK)) {
+		txdbg("bcm_runner_tx_reclaim[q%d]: tx desc %u owned by hw\n",
+		      txq->index, dirty_desc);
+		return 0;
+	}
+
+	/* ensure other field of the descriptor were not read before
+	 * we checked ownership */
+	rmb();
+
+	pdata_ptr = txq->tx_desc_pdata[dirty_desc].data;
+	pdata_len = txq->tx_desc_pdata[dirty_desc].len;
+	txq->tx_desc_pdata[dirty_desc].data = NULL;
+
+	txdbg("tx_reclaim[q%u]: dirty_desc:%u skb:%pS => free\n",
+	      txq->index, dirty_desc, pdata_ptr);
+
+	address = be32_to_cpu(desc->address);
+
+	dirty_desc++;
+	if (unlikely(dirty_desc >= txq->ring_size))
+		dirty_desc = 0;
+
+	txq->tx_dirty_desc = dirty_desc;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	if (txq == priv->ff_txq)
+		skb_free_frag(pdata_ptr);
+	else
+#endif
+	{
+		dma_unmap_single(dev->dev.parent,
+				 address, pdata_len,
+				 DMA_TO_DEVICE);
+		dev_kfree_skb((struct sk_buff *)pdata_ptr);
+	}
+	return 1;
+}
+
+/*
+ * try to or force reclaim of transmitted buffers
+ */
+static int bcm_runner_tx_reclaim(struct bcm_enet_runner_priv *priv,
+				 struct net_device *dev,
+				 struct tx_queue *txq,
+				 int budget,
+				 int force)
+{
+	struct netdev_queue *netdev_txq;
+	unsigned int cur_desc, next_desc;
+	int released;
+
+	txdbg("bcm_runner_tx_reclaim[q%d]: budget:%u\n",  txq->index, budget);
+
+	released = 0;
+	while (released < budget) {
+		int done;
+
+		/* We run in a bh and fight against start_xmit, which
+		 * is called with bh disabled */
+		spin_lock(&txq->tx_lock);
+		done = __bcm_runner_tx_reclaim_one(priv, dev, txq, force);
+		spin_unlock(&txq->tx_lock);
+		if (!done)
+			break;
+
+		released++;
+	}
+
+	if (force || !released)
+		goto end;
+
+	netdev_txq = netdev_get_tx_queue(dev, txq->index);
+	if (!netif_tx_queue_stopped(netdev_txq))
+		goto end;
+
+	/* recheck in case xmit already filled all available space */
+	spin_lock(&txq->tx_lock);
+	cur_desc = txq->tx_cur_desc;
+	next_desc = cur_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+
+	if (next_desc != txq->tx_dirty_desc)
+		netif_tx_wake_queue(netdev_txq);
+	spin_unlock(&txq->tx_lock);
+
+end:
+	return released;
+}
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+static int ff_reclaim_threadfn(void *data)
+{
+	struct bcm_enet_runner_priv *priv = data;
+
+	set_user_nice(current, MAX_NICE);
+
+	while (!kthread_should_stop()) {
+		while (!kthread_should_stop()) {
+			int done = 0;
+
+			if (spin_trylock_bh(&priv->ff_txq->tx_lock)) {
+				done = __bcm_runner_tx_reclaim_one(
+					priv,
+					priv->netdev,
+					priv->ff_txq,
+					0);
+				spin_unlock_bh(&priv->ff_txq->tx_lock);
+			}
+
+			if (!done)
+				break;
+			schedule();
+		}
+		msleep(10);
+	}
+
+	return 0;
+}
+#endif
+
+/*
+ * this is called with interrupts disabled, and all CPUs beside the
+ * current one are also blocked in a loop with interrupt disable
+ */
+static int bcm_enet_runner_dgasp_handler(struct notifier_block *nb,
+					 unsigned long action, void *data)
+{
+	const struct bcm_xrdp_enet_params *params;
+	struct bcm_enet_runner_priv *priv;
+	struct tx_queue *txq;
+	struct ethhdr *eth;
+	unsigned int cur_desc, next_desc;
+	struct tx_desc *tx_desc;
+	dma_addr_t address;
+	u32 flags_len;
+	u8 *buf;
+	int dlen;
+
+	priv = container_of(nb, struct bcm_enet_runner_priv, dgasp_nb);
+	if (!netif_carrier_ok(priv->netdev))
+		return 0;
+
+	buf = kmalloc(priv->pkt_size, GFP_ATOMIC);
+	if (!buf) {
+		printk("failed to alloc dgasp data\n");
+		return 0;
+	}
+
+	dlen = priv->mode_ops->dgasp_gen_data(priv->mode_priv,
+					      buf, priv->pkt_size);
+	if (dlen < 0) {
+		printk("failed to gen dgasp data\n");
+		kfree(buf);
+		return 0;
+	}
+
+	eth = (struct ethhdr *)buf;
+
+	if (unlikely((priv->mode_ops->can_send &&
+		      !priv->mode_ops->can_send(priv->mode_priv,
+						ntohs(eth->h_proto))))) {
+		kfree(buf);
+		return 0;
+	}
+
+	params = &priv->xrdp_params;
+	txq = &priv->txq[0];
+
+	/* pad small packets */
+	if (dlen < 60) {
+		memset(buf + dlen, 0, 60 - dlen);
+		dlen = 60;
+	}
+
+	/* map buffer */
+	address = dma_map_single(priv->netdev->dev.parent,
+				 buf,
+				 dlen,
+				 DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(priv->netdev->dev.parent, address))) {
+		printk("dma_map_single failed\n");
+		kfree(buf);
+		return 0;
+	}
+
+	/* make sure we have room */
+	cur_desc = txq->tx_cur_desc;
+	next_desc = cur_desc + 1;
+	if (next_desc >= txq->ring_size)
+		next_desc = 0;
+
+	if (next_desc == txq->tx_dirty_desc) {
+		/* queue is full, poll until the dirty desc is sent */
+		tx_desc = &txq->tx_desc_area[txq->tx_dirty_desc];
+		while (1) {
+			flags_len = be32_to_cpu(tx_desc->flags_len);
+			if (!(flags_len & CPU_TX_DESC0_HW_OWNED_MASK))
+				break;
+		}
+	}
+
+	/* update descriptor index */
+	flags_len = CPU_TX_DESC0_HW_OWNED_MASK |
+		(dlen << CPU_TX_DESC0_LEN_SHIFT);
+
+	tx_desc = &txq->tx_desc_area[cur_desc];
+	tx_desc->address = cpu_to_be32(address);
+	wmb();
+	tx_desc->flags_len = cpu_to_be32(flags_len);
+
+	/* kick tx dma */
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params->tx_core_id,
+			    params->txq_wakeup_thread[0]);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static u32 collect_work(struct bcm_enet_runner_priv *priv)
+{
+	const struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	u32 val;
+
+	val = bcm_xrdp_api_irq_read_status(priv->xrdp, params->rx_core_id);
+	val &= priv->irq_mask;
+	if (val) {
+		/* ack */
+		bcm_xrdp_api_irq_write_status(priv->xrdp, params->rx_core_id,
+					      val);
+		(void)bcm_xrdp_api_irq_read_status(priv->xrdp,
+						   params->rx_core_id);
+	}
+	return val;
+}
+
+/*
+ *
+ */
+static int bcm_runner_poll(struct napi_struct *napi, int budget)
+{
+	struct bcm_enet_runner_priv *priv;
+	struct net_device *dev;
+	struct bcm_xrdp_enet_params *params;
+	int work_done;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	ff.jiffies = nfct_time_stamp;
+#endif
+
+	priv = container_of(napi, struct bcm_enet_runner_priv, napi);
+	params = &priv->xrdp_params;
+	dev = priv->netdev;
+
+	work_done = 0;
+
+	txdbg("bcm_runner_poll: enter budget:%d work_todo:%08x work_batch:%08x\n",
+	      budget, priv->work_todo, priv->work_batch);
+
+	while (work_done < budget) {
+		int small_budget, done, i;
+
+		/* collect work todo */
+		if (!priv->work_batch) {
+			if (!priv->work_todo) {
+				priv->work_todo = collect_work(priv);
+				txdbg("bcm_runner_poll: colloect work_todo:%08x\n",
+					 priv->work_todo);
+				if (!priv->work_todo)
+					break;
+			}
+
+			priv->work_batch = priv->work_todo;
+		}
+
+		txdbg("bcm_runner_poll: loop work_todo:%08x work_batch:%08x\n",
+			 priv->work_todo, priv->work_batch);
+
+		small_budget = budget - work_done;
+		if (small_budget > 16)
+			small_budget = 16;
+
+		for (i = 0; i < dev->real_num_tx_queues; i++) {
+			if (!(priv->work_batch & params->tx_done_irq_mask[i]))
+				continue;
+
+			txdbg("bcm_runner_poll: tx done work for queue %u\n", i);
+			priv->work_batch &= ~params->tx_done_irq_mask[i];
+
+			/* reclaim sent skb */
+			done = bcm_runner_tx_reclaim(priv, dev,
+						     &priv->txq[i],
+						     small_budget, 0);
+
+			txdbg("bcm_runner_poll: tx reclaim done => %u\n", done);
+			/* if we reclaimed everything, clear the work bit */
+			if (done < small_budget)
+				priv->work_todo &= ~params->tx_done_irq_mask[i];
+			small_budget -= done;
+			work_done += done;
+		}
+
+		if (!small_budget)
+			continue;
+
+		for (i = 0; i < dev->real_num_rx_queues; i++) {
+			if (!(priv->work_batch & params->rx_irq_mask[i]))
+				continue;
+
+			txdbg("bcm_runner_poll: rx done work for queue %u\n", i);
+			priv->work_batch &= ~params->rx_irq_mask[i];
+
+			/* do rx */
+			done = bcm_runner_do_rx(priv, &priv->rxq[i],
+						small_budget);
+
+			/* if we reclaimed everything, clear the work bit */
+			if (done < small_budget)
+				priv->work_todo &= ~params->rx_irq_mask[i];
+
+			small_budget -= done;
+			work_done += done;
+		}
+	}
+
+	if (work_done < budget && !priv->work_todo) {
+		txdbg("bcm_runner_poll: all work done\n");
+
+		/* no more packet in rx/tx queue, remove device from
+		 * poll queue */
+		napi_complete_done(napi, work_done);
+
+		/* restore rx/tx interrupt */
+		bcm_xrdp_api_irq_mask_set(priv->xrdp,
+					  params->rx_core_id, priv->irq_mask);
+	}
+
+	return work_done;
+}
+
+/*
+ *
+ */
+static void txq_deinit(struct bcm_enet_runner_priv *priv, struct tx_queue *txq)
+{
+	unsigned int i;
+
+	for (i = 0; i < txq->ring_size; i++) {
+		struct tx_desc *desc;
+		dma_addr_t addr;
+		void *pdata_ptr;
+		unsigned int pdata_len;
+
+		if (!txq->tx_desc_pdata)
+			continue;
+
+		pdata_ptr = txq->tx_desc_pdata[i].data;
+		if (!pdata_ptr)
+			continue;
+
+		pdata_len = txq->tx_desc_pdata[i].len;
+		desc = &txq->tx_desc_area[i];
+		addr = be32_to_cpu(desc->address);
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+		if (txq == priv->ff_txq)
+			skb_free_frag(pdata_ptr);
+		else
+#endif
+		{
+			dma_unmap_single(priv->netdev->dev.parent,
+					 addr, pdata_len,
+					 DMA_TO_DEVICE);
+			dev_kfree_skb((struct sk_buff *)pdata_ptr);
+		}
+
+	}
+
+	if (txq->tx_desc_area)
+		dma_free_coherent(priv->netdev->dev.parent,
+				  txq->tx_desc_area_size,
+				  txq->tx_desc_area,
+				  txq->tx_desc_dma);
+	if (txq->tx_desc_pdata)
+		kfree(txq->tx_desc_pdata);
+}
+
+/*
+ *
+ */
+static int txq_init(struct bcm_enet_runner_priv *priv, int index)
+{
+	struct tx_queue *txq = priv->txq + index;
+	int size;
+
+	memset(txq, 0, sizeof (*txq));
+	spin_lock_init(&txq->tx_lock);
+	txq->index = index;
+	txq->ring_size = priv->txq_size;
+
+	size = txq->ring_size * sizeof (struct tx_desc);
+	txq->tx_desc_area_size = size;
+	txq->tx_desc_area = dma_alloc_coherent(priv->netdev->dev.parent,
+					       size, &txq->tx_desc_dma,
+					       GFP_KERNEL);
+
+	if (txq->tx_desc_area == NULL) {
+		netdev_err(priv->netdev,
+			   "can't allocate tx ring (%d bytes)\n", size);
+		goto out;
+	}
+
+	memset(txq->tx_desc_area, 0, size);
+
+	txq->tx_desc_pdata = kzalloc(sizeof (*txq->tx_desc_pdata) *
+				     txq->ring_size, GFP_KERNEL);
+	if (!txq->tx_desc_pdata) {
+		netdev_err(priv->netdev, "can't allocate tx skbs ring\n");
+		goto out;
+	}
+
+	return 0;
+
+out:
+	txq_deinit(priv, txq);
+	return -ENOMEM;
+}
+
+/*
+ *
+ */
+static u16 bcm_runner_select_queue(struct net_device *dev, struct sk_buff *skb,
+				   struct net_device *sb_dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	u16 queue = skb_get_queue_mapping(skb);
+	u8 index;
+
+	if (!netdev_uses_dsa(dev))
+		return netdev_pick_tx(dev, skb, NULL);
+
+	/* DSA tagging layer will have configured the correct queue */
+	index = priv->txq_port_map[BRCM_TAG_GET_PORT(queue)];
+
+	if (unlikely(index >= priv->txq_count))
+		return netdev_pick_tx(dev, skb, NULL);
+
+	return index;
+}
+
+/*
+ *
+ */
+static int bcm_runner_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	struct tx_queue *txq;
+	struct netdev_queue *netdev_txq;
+	struct tx_desc *desc;
+	unsigned int cur_desc, next_desc;
+	dma_addr_t address;
+	u32 flags_len;
+	u16 queue;
+
+	if (unlikely((priv->mode_ops->can_send &&
+		      !priv->mode_ops->can_send(priv->mode_priv,
+						ntohs(skb->protocol))))) {
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	queue = skb_get_queue_mapping(skb);
+	netdev_txq = netdev_get_tx_queue(dev, queue);
+	txq = &priv->txq[queue];
+
+	if (params->tx_need_batch)
+		sk_pacing_shift_update(skb->sk, 6);
+
+	/* pad small packets */
+	if (skb->len < 60) {
+		int needed = 60 - skb->len;
+		char *data;
+
+		if (unlikely(skb_tailroom(skb) < needed)) {
+			struct sk_buff *nskb;
+
+			nskb = skb_copy_expand(skb, 0, needed, GFP_ATOMIC);
+			if (!nskb)
+				return NETDEV_TX_BUSY;
+
+			dev_kfree_skb(skb);
+			skb = nskb;
+		}
+		data = skb_put_zero(skb, needed);
+	}
+
+	/* map buffer */
+	address = dma_map_single(dev->dev.parent,
+				 skb->data,
+				 skb->len,
+				 DMA_TO_DEVICE);
+
+	if (unlikely(dma_mapping_error(dev->dev.parent, address))) {
+		netdev_err(dev, "dma_map_single failed\n");
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* lock against tx reclaim */
+	spin_lock(&txq->tx_lock);
+
+	/* make sure we have room */
+	cur_desc = txq->tx_cur_desc;
+	next_desc = cur_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+
+	txdbg("runnerxmit[q%u]: cur_desc:%u skb:%pS len:%u\n",
+		 txq->index,
+		 cur_desc, skb, skb->len);
+
+	if (unlikely(next_desc == txq->tx_dirty_desc)) {
+		/* queue was full */
+		netif_tx_stop_queue(netdev_txq);
+		spin_unlock(&txq->tx_lock);
+
+		netdev_err(dev, "tx queue full unexpected\n");
+		dma_unmap_single(dev->dev.parent,
+				 address,
+				 skb->len,
+				 DMA_TO_DEVICE);
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* point to the next available desc */
+	desc = &txq->tx_desc_area[cur_desc];
+	txq->tx_desc_pdata[cur_desc].data = skb;
+	txq->tx_desc_pdata[cur_desc].len = skb->len;
+
+	/* update descriptor index */
+	cur_desc = next_desc;
+	txq->tx_cur_desc = cur_desc;
+	next_desc = cur_desc + 1;
+	if (unlikely(next_desc >= txq->ring_size))
+		next_desc = 0;
+
+	/* check if queue is now full */
+	if (unlikely(next_desc == txq->tx_dirty_desc)) {
+		txdbg("runnerxmit[q%u]: queue now full (dirty_desc %u)\n",
+			 txq->index,
+			 txq->tx_dirty_desc);
+		netif_tx_stop_queue(netdev_txq);
+	}
+
+	/* fill current descriptor */
+	flags_len = CPU_TX_DESC0_HW_OWNED_MASK |
+		(skb->len << CPU_TX_DESC0_LEN_SHIFT);
+
+	desc->address = cpu_to_be32(address);
+	wmb();
+	desc->flags_len = cpu_to_be32(flags_len);
+	wmb();
+
+	spin_unlock(&txq->tx_lock);
+
+	/* kick tx dma */
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params->tx_core_id,
+			    params->txq_wakeup_thread[0]);
+
+	dev->stats.tx_bytes += skb->len;
+	dev->stats.tx_packets++;
+	return NETDEV_TX_OK;
+}
+
+/*
+ *
+ */
+static irqreturn_t bcm_enet_runner_isr(int irq, void *dev_id)
+{
+	struct bcm_enet_runner_priv *priv = (struct bcm_enet_runner_priv *)dev_id;
+	struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	u32 val;
+
+	/* check if interrupt is for us */
+	val = bcm_xrdp_api_irq_read_status(priv->xrdp,
+					   params->rx_core_id);
+	txdbg("bcm_enet_runner_isr: val:%08x\n", val);
+	val &= priv->irq_mask;
+	if (!val)
+		return IRQ_NONE;
+
+	/* mask irq */
+	bcm_xrdp_api_irq_mask_clear(priv->xrdp,
+				    params->rx_core_id, priv->irq_mask);
+	napi_schedule(&priv->napi);
+	return IRQ_HANDLED;
+}
+
+/*
+ * called when irq affinity is changed from userspace, so we can save
+ * it and reapply later
+ */
+static void
+bcm_enet_runner_irq_affinity_notify(struct irq_affinity_notify *notify,
+				    const cpumask_t *mask)
+{
+	struct queue_info *txq_info =
+		container_of(notify, struct queue_info, affinity_notifier);
+	cpumask_copy(&txq_info->irq_affinity_mask, mask);
+}
+
+/*
+ * release callback for irq affinity notifier
+ */
+static void
+bcm_enet_runner_irq_affinity_release(struct kref *ref)
+{
+}
+
+/*
+ *
+ */
+static int bcm_runner_mode_start(struct bcm_enet_runner_priv *priv,
+				 phy_interface_t interface)
+{
+	struct net_device *dev = priv->netdev;
+	const struct bcm_enet_mode_ops *mode_ops;
+	struct bcm_xrdp_enet_params params;
+	unsigned int txq_with_irq_count;
+	struct bcm_dsa_port *dp;
+	void *mode_priv;
+	int rxq_ready = 0, txq_ready = 0;
+	int rxq_irq_ready = 0, txq_irq_ready = 0;
+	int ret, i, first_queue, mode_idx;
+	u32 bbh_id, val;
+
+	/*
+	 * select the correct mode for the given interface
+	 */
+	mode_idx = priv->port_ops->mode_select(interface);
+	if (WARN_ON(mode_idx < 0)) {
+		/* should not happen since the interface was
+		 * validated in phylink_validate() */
+		return -ENOTSUPP;
+	}
+
+	if (WARN_ON((unsigned)mode_idx >= priv->port_ops->mode_count))
+               return -EINVAL;
+
+	mode_ops = priv->port_ops->modes[mode_idx];
+	if (priv->port_ops->mode_count > 1)
+		netdev_info(dev, "switching to mode %s\n",
+			    mode_ops->name);
+
+	bbh_id = mode_ops->get_bbh_id(priv->port_priv);
+	ret = bcm_xrdp_api_get_enet_params(priv->xrdp, bbh_id, &params);
+	if (ret < 0) {
+		netdev_err(dev,
+			   "failed to get rdp params for bbh %u\n",  bbh_id);
+		return ret;
+	}
+
+	/* sanity check on parameters */
+	if (WARN_ON(params.rx_queue_count > ARRAY_SIZE(priv->rxq)))
+		return -EINVAL;
+
+	if (WARN_ON(params.tx_queue_count > ARRAY_SIZE(priv->txq)))
+		return -EINVAL;
+
+	/* do actual mode init */
+	mode_priv = mode_ops->init(priv->port_priv, &params);
+	if (IS_ERR(mode_priv)) {
+		netdev_err(dev, "failed to switch mode: %ld\n",
+			   PTR_ERR(mode_priv));
+		return PTR_ERR(mode_priv);
+	}
+
+	mode_ops->mtu_set(mode_priv, priv->pkt_size);
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	switch (priv->port_type) {
+	case BCM_RUNNER_PORT_UNIMAC:
+		priv->ff_hw_id = FF_HWDEV_ID_UNIMAC0 + bbh_id;
+		break;
+	case BCM_RUNNER_PORT_XPORT:
+		priv->ff_hw_id = FF_HWDEV_ID_FTTH;
+		break;
+	default:
+		WARN(1, "unknown port type");
+		return -EINVAL;
+	}
+#endif
+
+	/* cap rxq & txq count to the hardware capabilities */
+	ret = netif_set_real_num_rx_queues(dev, params.rx_queue_count);
+	if (ret)
+		goto out;
+
+	priv->txq_count = params.tx_queue_count;
+	txq_with_irq_count = priv->txq_count;
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	txq_with_irq_count--;
+	if (WARN_ON(!txq_with_irq_count))
+		goto out;
+#endif
+
+	ret = netif_set_real_num_tx_queues(dev, txq_with_irq_count);
+	if (ret)
+		goto out;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	priv->ff_reclaim_thread = kthread_create(ff_reclaim_threadfn,
+						 priv, "ff_reclaim_%s",
+						 dev->name);
+	if (IS_ERR(priv->ff_reclaim_thread)) {
+		ret = PTR_ERR(priv->ff_reclaim_thread);
+		priv->ff_reclaim_thread = NULL;
+		goto out;
+	}
+#endif
+
+	/* allocate rx rings */
+	for (i = 0; i < dev->real_num_rx_queues; i++) {
+		ret = rxq_init(priv, i);
+		if (ret)
+			goto out;
+		rxq_ready++;
+	}
+
+	/* allocate tx rings */
+	for (i = 0; i < priv->txq_count; i++) {
+		ret = txq_init(priv, i);
+		if (ret)
+			goto out;
+		txq_ready++;
+	}
+
+	/*
+	 * configure txq DSA port queue mapping
+	 *
+	 * for now, use 1 queue per port
+	 */
+	i = 0;
+	list_for_each_entry(dp, &priv->dsa_ports, next) {
+		struct tx_queue *txq;
+
+		if (i == dev->real_num_tx_queues) {
+			netdev_warn(dev, "too many DSA ports "
+				    "vs tx queue, ACB flow control "
+				    "will not work correctly\n");
+			break;
+		}
+
+		txq = &priv->txq[i];
+		txq->use_dsa = true;
+		txq->dsa_port = dp->port;
+		txq->dsa_imp_port = dp->imp_port;
+		txq->dsa_queue = 0;
+		netif_set_real_num_tx_queues(dp->slave_netdev, 1);
+		priv->txq_port_map[dp->port] = i;
+		i++;
+	}
+
+	/* assign same dsa data to remaining queues, but the kernel
+	 * should not use them */
+	first_queue = i;
+	if (first_queue) {
+		struct tx_queue *prev_txq = &priv->txq[first_queue - 1];
+
+		for (i = first_queue; i < priv->txq_count; i++) {
+			struct tx_queue *txq = &priv->txq[i];
+			txq->use_dsa = prev_txq->use_dsa;
+			txq->dsa_port = prev_txq->dsa_port;
+			txq->dsa_imp_port = prev_txq->dsa_imp_port;
+			txq->dsa_queue = prev_txq->dsa_queue;
+		}
+	}
+
+	/* interrupt clear */
+	priv->irq_mask = 0;
+	for (i = 0; i < dev->real_num_rx_queues; i++)
+		priv->irq_mask |= params.rx_irq_mask[i];
+	for (i = 0; i < dev->real_num_tx_queues; i++)
+		priv->irq_mask |= params.tx_done_irq_mask[i];
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	priv->ff_txq = &priv->txq[priv->txq_count - 1];
+#endif
+
+	bcm_xrdp_api_irq_write_status(priv->xrdp,
+				      params.rx_core_id, priv->irq_mask);
+
+	for (i = 0; i < dev->real_num_rx_queues; i++) {
+		struct queue_info *rxq_info = &priv->rxq_info[i];
+
+		scnprintf(rxq_info->irq_name,
+			  sizeof (rxq_info->irq_name),
+			  "%s-rx%u",
+			  dev->name, i);
+
+		ret = request_irq(params.rx_irq[i],
+				  bcm_enet_runner_isr,
+				  0, rxq_info->irq_name, priv);
+		if (ret) {
+			netdev_err(dev, "request_irq failed\n");
+			goto out;
+		}
+
+		rxq_info->affinity_notifier.notify =
+			bcm_enet_runner_irq_affinity_notify;
+		rxq_info->affinity_notifier.release =
+			bcm_enet_runner_irq_affinity_release;
+		irq_set_affinity_notifier(params.rx_irq[i],
+					  &rxq_info->affinity_notifier);
+		irq_set_affinity_hint(params.rx_irq[i],
+				      &rxq_info->irq_affinity_mask);
+		rxq_irq_ready++;
+	}
+
+	for (i = 0; i < dev->real_num_tx_queues; i++) {
+		struct queue_info *txq_info = &priv->txq_info[i];
+
+		scnprintf(txq_info->irq_name,
+			  sizeof (txq_info->irq_name),
+			  "%s-tx%u",
+			  dev->name, i);
+
+		ret = request_irq(params.tx_irq[i],
+				  bcm_enet_runner_isr,
+				  0, txq_info->irq_name, priv);
+		if (ret) {
+			netdev_err(dev, "request_irq failed\n");
+			goto out;
+		}
+
+		txq_info->affinity_notifier.notify =
+			bcm_enet_runner_irq_affinity_notify;
+		txq_info->affinity_notifier.release =
+			bcm_enet_runner_irq_affinity_release;
+		irq_set_affinity_notifier(params.tx_irq[i],
+					  &txq_info->affinity_notifier);
+		irq_set_affinity_hint(params.tx_irq[i],
+				      &txq_info->irq_affinity_mask);
+		txq_irq_ready++;
+	}
+
+	/* all set, start rx/tx */
+	memcpy(&priv->xrdp_params, &params, sizeof (priv->xrdp_params));
+	priv->mode_ops = mode_ops;
+	priv->mode_priv = mode_priv;
+
+	/* setup firmware */
+	for (i = 0; i < dev->real_num_rx_queues; i++) {
+		struct rx_queue *rxq = &priv->rxq[i];
+
+		/* assign rx queue pointers & size */
+		fw_rx_reg_writel(priv, rxq->rx_desc_dma, RX_DESC_ADDRESS_REG);
+		fw_rx_reg_writel(priv, rxq->ring_size, RX_DESC_COUNT_REG);
+		fw_rx_reg_writel(priv, params.rx_irq_mask[i],
+				 RX_IRQ_MASK_REG);
+	}
+
+	/* setup TX */
+	fw_tx_reg_writeb(priv, params.tx_bbh_bbid,
+			 TX_BBH_BB_ID_REG);
+	fw_tx_reg_writeb(priv, params.tx_need_reporting,
+			 TX_EPON_REPORTING_REG);
+	fw_tx_reg_writeh(priv, params.tx_bbh_pd_queue_size,
+			 TX_BBH_PD_QUEUE_SIZE_REG);
+	fw_tx_reg_writeh(priv, params.tx_bbh_mdu_addr,
+			 TX_BBH_MDU_QUEUE_ADDR_REG);
+
+	for (i = 0; i < priv->txq_count; i++) {
+		struct tx_queue *txq = &priv->txq[i];
+		u16 acb_control;
+		unsigned int imp_port_mapped;
+
+		/* assign tx queue pointers & size */
+		fw_tx_reg_writel(priv, txq->tx_desc_dma,
+				 fw_txqoff(i, TXQ_OFF_DESC_ADDRESS_REG));
+		fw_tx_reg_writel(priv, txq->ring_size,
+				 fw_txqoff(i, TXQ_OFF_DESC_COUNT_REG));
+		fw_tx_reg_writel(priv, params.tx_done_irq_mask[i],
+				 fw_txqoff(i, TXQ_OFF_IRQ_MASK_REG));
+
+
+		if (!txq->use_dsa)
+			continue;
+
+		fw_tx_reg_writeb(priv, txq->use_dsa ? 1 : 0,
+				 fw_txqoff(i, TXQ_OFF_ACB_ENABLED_REG));
+		fw_tx_reg_writeb(priv, txq->dsa_port * 8 + txq->dsa_queue,
+				 fw_txqoff(i, TXQ_OFF_ACB_QIDX_REG));
+
+		imp_port_mapped = enet_fw_imp_port_map(txq->dsa_imp_port);
+		acb_control =
+			(imp_port_mapped << TXQ_ACBCTRL_IMP_PORT_SHIFT) |
+			(txq->dsa_port << TXQ_ACBCTRL_EGRESS_PORT_SHIFT) |
+			(txq->dsa_queue << TXQ_ACBCTRL_EGRESS_QUEUE_SHIFT);
+		fw_tx_reg_writeh(priv, acb_control,
+				 fw_txqoff(i, TXQ_OFF_ACB_CONTROL_REG));
+	}
+
+	/* unmask interrupt */
+	bcm_xrdp_api_irq_mask_set(priv->xrdp,
+				  params.rx_core_id, priv->irq_mask);
+	napi_enable(&priv->napi);
+
+	/* start firmware RX operation */
+	val = fw_rx_reg_readl(priv, RX_CONTROL_REG);
+	val |= RX_CONTROL_RX_EN_MASK;
+	fw_rx_reg_writel(priv, val, RX_CONTROL_REG);
+
+	/* kick FQM */
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params.rx_core_id,
+			    params.rxq_fqm_wakeup_thread);
+
+	/* start firmware TX operation */
+	val = fw_tx_reg_readl(priv, TX_CONTROL_REG);
+	val |= TX_CONTROL_TX_EN_MASK;
+	fw_tx_reg_writel(priv, val, TX_CONTROL_REG);
+
+	netif_tx_start_all_queues(dev);
+
+	/* register dgasp notifier */
+	if (priv->mode_ops->dgasp_supported)
+		dgasp_notifier_chain_register(&priv->dgasp_nb);
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	/* ff can reference this while looking for opposite device */
+	ff_lock_all_cpu_bh();
+	WARN_ON(ff.ports_by_hw_id[priv->ff_hw_id] != NULL);
+	ff.ports_by_hw_id[priv->ff_hw_id] = priv;
+	ff_unlock_all_cpu_bh();
+
+	wake_up_process(priv->ff_reclaim_thread);
+
+	/*
+	 * this can be called from:
+	 *  1) phylink major reconfig
+	 *  2) netdevice ifup path
+	 *  3) reset_link_path
+	 *
+	 * the netdevice may either be up or not at this point
+	 */
+	ff_notifier_event(priv->netdev, NETDEV_CHANGE);
+#endif
+	return 0;
+
+out:
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	if (priv->ff_reclaim_thread) {
+		kthread_stop(priv->ff_reclaim_thread);
+		priv->ff_reclaim_thread = NULL;
+	}
+#endif
+	for (i = 0; i < rxq_irq_ready; i++) {
+		irq_set_affinity_notifier(params.rx_irq[i], NULL);
+		irq_set_affinity_hint(params.rx_irq[i], NULL);
+		free_irq(params.rx_irq[i], priv);
+	}
+	for (i = 0; i < txq_irq_ready; i++) {
+		irq_set_affinity_notifier(params.tx_irq[i], NULL);
+		irq_set_affinity_hint(params.tx_irq[i], NULL);
+		free_irq(params.tx_irq[i], priv);
+	}
+
+	for (i = 0; i < rxq_ready; i++)
+		rxq_deinit(priv, priv->rxq + i);
+	for (i = 0; i < txq_ready; i++)
+		txq_deinit(priv, priv->txq + i);
+
+	mode_ops->release(mode_priv);
+	return ret;
+}
+
+/*
+ *
+ */
+bool bcm_runner_fw_bbh_is_empty(struct bcm_enet_runner_priv *priv)
+{
+	struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	u32 bbh_id;
+
+	bbh_id = priv->mode_ops->get_bbh_id(priv->port_priv);
+	return bcm_xrdp_api_bbh_txq_is_empty(priv->xrdp,
+					     bbh_id,
+					     params->tx_bbh_queue_id);
+}
+
+/*
+ *
+ */
+void bcm_runner_fw_stop_tx(struct bcm_enet_runner_priv *priv)
+{
+	struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	u32 val;
+
+	val = fw_tx_reg_readl(priv, TX_CONTROL_REG);
+	val &= ~TX_CONTROL_TX_EN_MASK;
+	fw_tx_reg_writel(priv, val, TX_CONTROL_REG);
+
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params->tx_core_id,
+			    params->txq_wakeup_thread[0]);
+}
+
+/*
+ *
+ */
+bool bcm_runner_fw_tx_is_stopped(struct bcm_enet_runner_priv *priv)
+{
+	u32 val;
+
+	val = fw_tx_reg_readl(priv, TX_STATUS_REG);
+	if ((val & TX_STATUS_TX_EN_MASK))
+		return false;
+	return true;
+}
+
+/*
+ *
+ */
+void bcm_runner_fw_tx_stop_wait(struct bcm_enet_runner_priv *priv)
+{
+	unsigned int i;
+	bool stopped;
+
+	bcm_runner_fw_stop_tx(priv);
+	for (i = 0; i < 1000; i++) {
+		stopped = bcm_runner_fw_tx_is_stopped(priv);
+		if (stopped)
+			break;
+		usleep_range(1000, 2000);
+	}
+
+	if (!stopped)
+		netdev_err(priv->netdev, "failed to stop TX DMA");
+}
+
+/*
+ *
+ */
+static void bcm_runner_mode_uninit(struct bcm_enet_runner_priv *priv)
+{
+	struct net_device *dev = priv->netdev;
+	struct bcm_xrdp_enet_params *params = &priv->xrdp_params;
+	unsigned int i, xfid;
+	u32 val;
+
+	/* unregister from dgasp notifier */
+	if (priv->mode_ops->dgasp_supported)
+		dgasp_notifier_chain_unregister(&priv->dgasp_nb);
+
+	/* prevent poll() from being called */
+	napi_disable(&priv->napi);
+
+	/* mask all interrupts */
+	bcm_xrdp_api_irq_mask_clear(priv->xrdp,
+				    params->rx_core_id, priv->irq_mask);
+
+	/* prevent hardxmit from being called */
+	netif_tx_disable(dev);
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	if (priv->ff_reclaim_thread) {
+		kthread_stop(priv->ff_reclaim_thread);
+		priv->ff_reclaim_thread = NULL;
+	}
+
+	ff_lock_all_cpu_bh();
+	ff.ports_by_hw_id[priv->ff_hw_id] = NULL;
+	ff_unlock_all_cpu_bh();
+
+	/*
+	 * this can be called from:
+	 *  1) phylink major reconfig
+	 *  2) netdevice ifdown path
+	 *  3) reset_link_path
+	 *
+	 * it may have already been sent when coming from 2), but
+	 * there is no harm in doing it again
+	 */
+	ff_notifier_event(priv->netdev, NETDEV_GOING_DOWN);
+#endif
+
+	/* stop firmware RX operation & wait for RX enable status to
+	 * clear */
+	val = fw_rx_reg_readl(priv, RX_CONTROL_REG);
+	val &= ~RX_CONTROL_RX_EN_MASK;
+	fw_rx_reg_writel(priv, val, RX_CONTROL_REG);
+
+	/* wait for all XF first */
+	for (xfid = 0; xfid < ARRAY_SIZE(params->rxq_xf_wakeup_thread);
+	     xfid++) {
+		bcm_xrdp_api_wakeup(priv->xrdp,
+				    params->rx_core_id,
+				    params->rxq_xf_wakeup_thread[xfid]);
+
+		for (i = 0; i < 1000; i++) {
+			val = fw_rx_reg_readl(priv,
+					      fw_rx_xf_off(xfid, RX_XF_STATUS_REG));
+			if (!(val & RX_XF_STATUS_RX_EN_MASK))
+				break;
+
+			usleep_range(1000, 2000);
+		}
+
+		if ((val & RX_XF_STATUS_RX_EN_MASK))
+			netdev_err(dev, "failed to stop RX for xf %u", xfid);
+	}
+
+	/* wait for FQM */
+	bcm_xrdp_api_wakeup(priv->xrdp,
+			    params->rx_core_id,
+			    params->rxq_fqm_wakeup_thread);
+
+	for (i = 0; i < 1000; i++) {
+		val = fw_rx_reg_readl(priv, RX_FQM_STATUS_REG);
+		if (!(val & RX_FQM_STATUS_RX_EN_MASK))
+			break;
+
+		usleep_range(1000, 2000);
+	}
+
+	if ((val & RX_FQM_STATUS_RX_EN_MASK))
+		netdev_err(dev, "failed to stop RX for FQM");
+
+	priv->mode_ops->stop(priv->mode_priv);
+	priv->mode_ops->release(priv->mode_priv);
+	priv->mode_ops = NULL;
+	priv->mode_priv = NULL;
+
+	/* force reclaim of all tx buffers now that firmware does not
+	 * use them  */
+	local_bh_disable();
+	for (i = 0; i < priv->txq_count; i++)
+		bcm_runner_tx_reclaim(priv, dev, &priv->txq[i], INT_MAX, 1);
+	local_bh_enable();
+
+	for (i = 0; i < dev->real_num_rx_queues; i++) {
+		irq_set_affinity_notifier(params->rx_irq[i], NULL);
+		irq_set_affinity_hint(params->rx_irq[i], NULL);
+		free_irq(params->rx_irq[i], priv);
+	}
+	for (i = 0; i < dev->real_num_tx_queues; i++) {
+		irq_set_affinity_notifier(params->tx_irq[i], NULL);
+		irq_set_affinity_hint(params->tx_irq[i], NULL);
+		free_irq(params->tx_irq[i], priv);
+	}
+
+	for (i = 0; i < dev->real_num_rx_queues; i++)
+		rxq_deinit(priv, priv->rxq + i);
+	for (i = 0; i < priv->txq_count; i++)
+		txq_deinit(priv, priv->txq + i);
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	priv->ff_txq = NULL;
+#endif
+	priv->reset_scheduled = false;
+}
+
+/*
+ * Change the interface's mac address.
+ */
+static int bcm_runner_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr = p;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	dev_addr_mod(dev, 0, addr->sa_data, ETH_ALEN);
+	return 0;
+}
+
+/*
+ * return device statistics
+ */
+static struct net_device_stats *
+bcm_runner_get_stats(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv;
+
+        if (!netif_running(dev))
+                return &dev->stats;
+
+	priv = netdev_priv(dev);
+
+	/* update some error counters that are only visible in MIB */
+	if (priv->mode_ops && priv->mode_ops->stats_update)
+		priv->mode_ops->stats_update(priv->mode_priv, &dev->stats);
+
+        return &dev->stats;
+}
+
+/*
+ * called by phylink before a major config, give us a chance to return
+ * an error if some initialization fails
+ */
+static int phylink_mac_prepare(struct phylink_config *config,
+			       unsigned int pl_mode,
+			       phy_interface_t interface)
+{
+       struct net_device *ndev = to_net_dev(config->dev);
+       struct bcm_enet_runner_priv *priv = netdev_priv(ndev);
+       int ret;
+
+	if (priv->mode_ops)
+		bcm_runner_mode_uninit(priv);
+
+	ret = bcm_runner_mode_start(priv, interface);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static struct phylink_pcs *phylink_select_pcs(struct phylink_config *config,
+					      phy_interface_t interface)
+{
+       struct net_device *ndev = to_net_dev(config->dev);
+       struct bcm_enet_runner_priv *priv = netdev_priv(ndev);
+       return &priv->phylink_pcs;
+}
+
+/*
+ * called by phylink to apply new pcs config, with rtnl held
+ *
+ * only called when netdevice is up
+ */
+static int phylink_pcs_config(struct phylink_pcs *pcs,
+			      unsigned int mode,
+			      phy_interface_t interface,
+			      const unsigned long *advertising,
+			      bool permit_pause_to_mac)
+{
+	struct bcm_enet_runner_priv *priv;
+
+	priv = container_of(pcs, struct bcm_enet_runner_priv, phylink_pcs);
+	if (!priv->mode_ops || !priv->mode_ops->phylink_pcs_config)
+		return 0;
+
+	return priv->mode_ops->phylink_pcs_config(priv->mode_priv,
+						  mode,
+						  interface,
+						  advertising);
+}
+
+
+/*
+ * called by phylink to apply new pcs config, with rtnl held
+ *
+ * only called when netdevice is up
+ */
+static void phylink_mac_config(struct phylink_config *config,
+			       unsigned int pl_mode,
+			       const struct phylink_link_state *state)
+{
+       struct net_device *ndev = to_net_dev(config->dev);
+       struct bcm_enet_runner_priv *priv = netdev_priv(ndev);
+
+       if (!priv->mode_ops || !priv->mode_ops->phylink_mac_config)
+	       return;
+
+       priv->mode_ops->phylink_mac_config(priv->mode_priv, pl_mode, state);
+}
+
+/*
+ *
+ */
+static void phylink_mac_link_up(struct phylink_config *config,
+				struct phy_device *phy,
+				unsigned int mode,
+				phy_interface_t interface,
+				int speed, int duplex,
+				bool tx_fc, bool rx_fc)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct bcm_enet_runner_priv *priv = netdev_priv(ndev);
+
+	if (!priv->mode_ops)
+		return;
+
+	priv->mode_ops->phylink_link_up(priv->mode_priv, mode, interface,
+					speed, duplex, phy);
+}
+
+/*
+ *
+ */
+static void phylink_mac_link_down(struct phylink_config *config,
+				  unsigned int mode,
+				  phy_interface_t interface)
+{
+	struct net_device *ndev = to_net_dev(config->dev);
+	struct bcm_enet_runner_priv *priv = netdev_priv(ndev);
+
+	if (!priv->mode_ops)
+		return;
+
+	priv->mode_ops->phylink_link_down(priv->mode_priv, mode, interface);
+}
+
+/*
+ * phylink callback, only for inband autoneg
+ */
+static void phylink_pcs_an_restart(struct phylink_pcs *pcs)
+{
+	struct bcm_enet_runner_priv *priv;
+
+	priv = container_of(pcs, struct bcm_enet_runner_priv, phylink_pcs);
+	if (!priv->mode_ops)
+		return;
+
+	if (priv->mode_ops->phylink_pcs_an_restart)
+		priv->mode_ops->phylink_pcs_an_restart(priv->mode_priv);
+}
+
+/*
+ * phylink callback, only for inband autoneg
+ */
+static void phylink_pcs_get_state(struct phylink_pcs *pcs,
+				  struct phylink_link_state *state)
+{
+	struct bcm_enet_runner_priv *priv;
+
+	priv = container_of(pcs, struct bcm_enet_runner_priv, phylink_pcs);
+	if (!priv->mode_ops)
+		return;
+
+	if (priv->mode_ops->phylink_pcs_get_state)
+		priv->mode_ops->phylink_pcs_get_state(priv->mode_priv, state);
+}
+
+/*
+ *
+ */
+static void recalc_frag_size(struct bcm_enet_runner_priv *priv)
+{
+	/*
+	 * Reserve 14 bytes for an ethernet header + 8 bytes for up
+	 * to two VLAN tags
+	 */
+	priv->pkt_size = priv->netdev->mtu + ETH_HLEN + 4 * 2;
+
+	/*
+	 * add NET_SKB_PAD per build_skb() requirement, make sure we
+	 * have room to align data to cache size after reserving
+	 */
+	priv->frag_size = priv->pkt_size + RX_OFFSET;
+
+	/*
+	 * per build_skb() requirement
+	 */
+	priv->frag_size = (SKB_DATA_ALIGN(priv->frag_size) +
+			   SKB_DATA_ALIGN(sizeof (struct skb_shared_info)));
+}
+
+/*
+ * netdevice open callback
+ */
+static int bcm_runner_open(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	/* not much to do here, we have to wait for phylink to device
+	 * the interface type to decide which mode to use, only then
+	 * we can initialize everything */
+	recalc_frag_size(priv);
+	phylink_start(priv->phylink);
+	return 0;
+}
+
+/*
+ * netdevice stop callback
+ */
+static int bcm_runner_stop(struct net_device *dev)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+
+	if (priv->mode_ops)
+		bcm_runner_mode_uninit(priv);
+
+	phylink_stop(priv->phylink);
+
+	if (priv->port_ops->pcs_shutdown)
+		priv->port_ops->pcs_shutdown(priv->port_priv);
+
+	return 0;
+}
+
+/*
+ * Change the interface's MTU
+ */
+static int bcm_runner_change_mtu(struct net_device *dev,
+				 int new_mtu)
+{
+	struct bcm_enet_runner_priv *priv = netdev_priv(dev);
+	bool running = netif_running(dev);
+	int ret = 0;
+
+	if (running) {
+		ret = bcm_runner_stop(dev);
+		if (ret < 0)
+			return ret;
+	}
+
+	dev->mtu = new_mtu;
+
+	if (running)
+		ret = bcm_runner_open(priv->netdev);
+
+	return ret;
+}
+
+const struct phylink_mac_ops bcm_enet_runner_phylink_ops = {
+	.mac_prepare		= phylink_mac_prepare,
+	.mac_config		= phylink_mac_config,
+	.mac_link_up		= phylink_mac_link_up,
+	.mac_link_down		= phylink_mac_link_down,
+	.mac_select_pcs		= phylink_select_pcs,
+};
+
+const struct phylink_pcs_ops bcm_enet_runner_phylink_pcs_ops = {
+	.pcs_get_state		= phylink_pcs_get_state,
+	.pcs_an_restart		= phylink_pcs_an_restart,
+	.pcs_config		= phylink_pcs_config,
+};
+
+const struct net_device_ops bcm_runner_ops = {
+	.ndo_open		= bcm_runner_open,
+	.ndo_stop		= bcm_runner_stop,
+	.ndo_start_xmit		= bcm_runner_start_xmit,
+	.ndo_set_mac_address	= bcm_runner_set_mac_address,
+	.ndo_get_stats		= bcm_runner_get_stats,
+	.ndo_change_mtu		= bcm_runner_change_mtu,
+	.ndo_select_queue	= bcm_runner_select_queue,
+};
+
+/*
+ *
+ */
+static void reset_link_work(struct work_struct *w)
+{
+	struct delayed_work *dwork = to_delayed_work(w);
+	struct bcm_enet_runner_priv *priv;
+
+	priv = container_of(dwork, struct bcm_enet_runner_priv,
+			    reset_link_work);
+
+	rtnl_lock();
+	if (!priv->reset_scheduled || !netif_running(priv->netdev)) {
+		rtnl_unlock();
+		return;
+	}
+
+	priv->reset_scheduled = false;
+	netdev_info(priv->netdev, "resetting link\n");
+	bcm_runner_stop(priv->netdev);
+	if (bcm_runner_open(priv->netdev))
+		netdev_err(priv->netdev,  "failed to reset link\n");
+	rtnl_unlock();
+}
+
+/*
+ *
+ */
+void bcm_enet_runner_schedule_reset(struct bcm_enet_runner_priv *priv,
+				    unsigned int delay_ms)
+{
+	priv->reset_scheduled = true;
+	schedule_delayed_work(&priv->reset_link_work,
+			      msecs_to_jiffies(delay_ms));
+}
+
+/*
+ *
+ */
+void bcm_enet_runner_unschedule_reset(struct bcm_enet_runner_priv *priv)
+{
+	priv->reset_scheduled = false;
+}
+
+/*
+ * runner mib
+ */
+#define GEN_RUNNER_RX_STAT(reg)	4, 0, reg, 1
+#define GEN_RUNNER_RX_XF_STAT(xfid, reg)	\
+	GEN_RUNNER_RX_STAT(RX_IF_REGS_BASE_OFF + xfid * RX_IF_REGS_PERIF_SIZE + reg)
+
+#define GEN_RUNNER_TX_STAT(reg)	4, 0, reg, 2
+#define GEN_RUNNER_TXQ_STAT(q, reg)	\
+	GEN_RUNNER_TX_STAT(TXQ_REGS_BASE_OFF + (q) * TXQ_REGS_PERQ_SIZE + reg)
+
+#define GEN_RUNNER_RX_STAT16(reg)	2, 0, reg, 1
+#define GEN_RUNNER_RX_XF_STAT16(xfid, reg) \
+	GEN_RUNNER_RX_STAT16(RX_IF_REGS_BASE_OFF + xfid * RX_IF_REGS_PERIF_SIZE + reg)
+
+#define GEN_RUNNER_TX_STAT16(reg)	2, 0, reg, 2
+#define GEN_RUNNER_TXQ_STAT16(q, reg)	\
+	GEN_RUNNER_TX_STAT16(TXQ_REGS_BASE_OFF + (q) * TXQ_REGS_PERQ_SIZE + reg)
+
+#define GEN_RUNNER_RX_STAT8(reg)	1, 0, reg, 1
+#define GEN_RUNNER_RX_XF_STAT8(xfid, reg)					\
+	GEN_RUNNER_RX_STAT8(RX_IF_REGS_BASE_OFF + xfid * RX_IF_REGS_PERIF_SIZE + reg)
+
+#define GEN_RUNNER_TX_STAT8(reg)	1, 0, reg, 2
+#define GEN_RUNNER_TXQ_STAT8(q, reg)	\
+	GEN_RUNNER_TX_STAT8(TXQ_REGS_BASE_OFF + (q) * TXQ_REGS_PERQ_SIZE + reg)
+
+const struct bcm_runner_ethtool_stat bcm_runner_fw_estat[] = {
+	{ "rnr_rx_control", GEN_RUNNER_RX_STAT(RX_CONTROL_REG) },
+	{ "rnr_rx_desc_addr", GEN_RUNNER_RX_STAT(RX_DESC_ADDRESS_REG) },
+	{ "rnr_rx_desc_size", GEN_RUNNER_RX_STAT(RX_DESC_COUNT_REG) },
+	{ "rnr_rx_irq_mask", GEN_RUNNER_RX_STAT(RX_IRQ_MASK_REG) },
+
+	{ "rnr_rx_fqm_status", GEN_RUNNER_RX_STAT(RX_FQM_STATUS_REG) },
+	{ "rnr_rx_fqm_dbg_task_call", GEN_RUNNER_RX_STAT(RX_FQM_STAT_DBG_TASK_CALL_REG) },
+	{ "rnr_rx_fqm_dbg_cpu_ring_idx", GEN_RUNNER_RX_STAT(RX_FQM_STAT_DBG_CPU_RING_IDX_REG) },
+	{ "rnr_rx_fqm_dbg_full", GEN_RUNNER_RX_STAT(RX_FQM_STAT_DBG_FULL_REG) },
+	{ "rnr_rx_fqm_dbg_host_nobuf", GEN_RUNNER_RX_STAT(RX_FQM_STAT_DBG_HOST_NOBUF_REG) },
+	{ "rnr_rx_fqm_head_idx", GEN_RUNNER_RX_STAT8(RX_FQM_HEAD_IDX_REG) },
+	{ "rnr_rx_fqm_tail_idx", GEN_RUNNER_RX_STAT8(RX_FQM_TAIL_IDX_REG) },
+
+	{ "rnr_rx_xf0_status", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STATUS_REG) },
+	{ "rnr_rx_xf0_cnt_pkt_count", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_RX_CNT_PKT_REG) },
+	{ "rnr_rx_xf0_cnt_drop_nobuf", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_RX_CNT_DROP_NOBUF_REG) },
+	{ "rnr_rx_xf0_cnt_drop_rxdis", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_RX_CNT_DROP_RXDIS_REG) },
+	{ "rnr_rx_xf0_cnt_drop_rxerr", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_RX_CNT_DROP_RXERR_REG) },
+	{ "rnr_rx_xf0_dbg_task_call", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_TASK_CALL_REG) },
+	{ "rnr_rx_xf0_dbg_last_pd0", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_PD0_REG) },
+	{ "rnr_rx_xf0_dbg_last_pd1", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_PD1_REG) },
+	{ "rnr_rx_xf0_dbg_last_pd2", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_PD2_REG) },
+	{ "rnr_rx_xf0_dbg_last_pd3", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_PD3_REG) },
+	{ "rnr_rx_xf0_dbg_last_sn", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_SN_REG) },
+	{ "rnr_rx_xf0_dbg_last_bn", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_BN_REG) },
+	{ "rnr_rx_xf0_dbg_last_plen", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_PLEN_REG) },
+	{ "rnr_rx_xf0_dbg_last_cpudesc_idx", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_LAST_CPUDESC_IDX_REG) },
+	{ "rnr_rx_xf0_dbg_cnt_invalid_pd", GEN_RUNNER_RX_XF_STAT(0, RX_XF_STAT_DBG_INVALID_PD_CNT_REG) },
+
+	{ "rnr_rx_xf1_status", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STATUS_REG) },
+	{ "rnr_rx_xf1_cnt_pkt_count", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_RX_CNT_PKT_REG) },
+	{ "rnr_rx_xf1_cnt_drop_nobuf", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_RX_CNT_DROP_NOBUF_REG) },
+	{ "rnr_rx_xf1_cnt_drop_rxdis", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_RX_CNT_DROP_RXDIS_REG) },
+	{ "rnr_rx_xf1_cnt_drop_rxerr", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_RX_CNT_DROP_RXERR_REG) },
+	{ "rnr_rx_xf1_dbg_task_call", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_TASK_CALL_REG) },
+	{ "rnr_rx_xf1_dbg_last_pd0", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_PD0_REG) },
+	{ "rnr_rx_xf1_dbg_last_pd1", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_PD1_REG) },
+	{ "rnr_rx_xf1_dbg_last_pd2", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_PD2_REG) },
+	{ "rnr_rx_xf1_dbg_last_pd3", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_PD3_REG) },
+	{ "rnr_rx_xf1_dbg_last_sn", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_SN_REG) },
+	{ "rnr_rx_xf1_dbg_last_bn", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_BN_REG) },
+	{ "rnr_rx_xf1_dbg_last_plen", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_PLEN_REG) },
+	{ "rnr_rx_xf1_dbg_last_cpudesc_idx", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_LAST_CPUDESC_IDX_REG) },
+	{ "rnr_rx_xf1_dbg_cnt_invalid_pd", GEN_RUNNER_RX_XF_STAT(1, RX_XF_STAT_DBG_INVALID_PD_CNT_REG) },
+
+	{ "rnr_rx_xf2_status", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STATUS_REG) },
+	{ "rnr_rx_xf2_cnt_pkt_count", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_RX_CNT_PKT_REG) },
+	{ "rnr_rx_xf2_cnt_drop_nobuf", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_RX_CNT_DROP_NOBUF_REG) },
+	{ "rnr_rx_xf2_cnt_drop_rxdis", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_RX_CNT_DROP_RXDIS_REG) },
+	{ "rnr_rx_xf2_cnt_drop_rxerr", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_RX_CNT_DROP_RXERR_REG) },
+	{ "rnr_rx_xf2_dbg_task_call", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_TASK_CALL_REG) },
+	{ "rnr_rx_xf2_dbg_last_pd0", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_PD0_REG) },
+	{ "rnr_rx_xf2_dbg_last_pd1", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_PD1_REG) },
+	{ "rnr_rx_xf2_dbg_last_pd2", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_PD2_REG) },
+	{ "rnr_rx_xf2_dbg_last_pd3", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_PD3_REG) },
+	{ "rnr_rx_xf2_dbg_last_sn", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_SN_REG) },
+	{ "rnr_rx_xf2_dbg_last_bn", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_BN_REG) },
+	{ "rnr_rx_xf2_dbg_last_plen", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_PLEN_REG) },
+	{ "rnr_rx_xf2_dbg_last_cpudesc_idx", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_LAST_CPUDESC_IDX_REG) },
+	{ "rnr_rx_xf2_dbg_cnt_invalid_pd", GEN_RUNNER_RX_XF_STAT(2, RX_XF_STAT_DBG_INVALID_PD_CNT_REG) },
+
+	{ "rnr_tx_status", GEN_RUNNER_TX_STAT(TX_STATUS_REG) },
+	{ "rnr_tx_control", GEN_RUNNER_TX_STAT(TX_CONTROL_REG) },
+	{ "rnr_tx_bbh_id", GEN_RUNNER_TX_STAT(TX_BBH_BB_ID_REG) },
+	{ "rnr_tx_pd_queue_size", GEN_RUNNER_TX_STAT(TX_BBH_PD_QUEUE_SIZE_REG) },
+	{ "rnr_tx_cnt_drop_txdis", GEN_RUNNER_TX_STAT(TX_STAT_CNT_TX_DISABLED_REG) },
+	{ "rnr_tx_dbg_task_call", GEN_RUNNER_TX_STAT(TX_STAT_DBG_TASK_CALL_REG) },
+	{ "rnr_tx_dbg_hw_fifo_full", GEN_RUNNER_TX_STAT(TX_STAT_DBG_FIFO_FULL_REG) },
+	{ "rnr_tx_dbg_mdu_reclaim_idx", GEN_RUNNER_TX_STAT(TX_STAT_DBG_MDU_FW_RECLAIM_IDX_REG) },
+	{ "rnr_tx_dbg_mdu_push_idx", GEN_RUNNER_TX_STAT(TX_STAT_DBG_MDU_FW_PUSH_IDX_REG) },
+
+	{ "rnr_txq0_desc_addr", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_DESC_ADDRESS_REG) },
+	{ "rnr_txq0_desc_count", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_DESC_COUNT_REG) },
+	{ "rnr_txq0_irq_mask", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_IRQ_MASK_REG) },
+	{ "rnr_txq0_tx_idx", GEN_RUNNER_TXQ_STAT8(0, TXQ_OFF_TX_DESC_IDX) },
+	{ "rnr_txq0_tx_cnt", GEN_RUNNER_TXQ_STAT8(0, TXQ_OFF_TX_DESC_CNT) },
+	{ "rnr_txq0_cnt_pkt", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_STAT_CNT_PKT_SENT_REG) },
+	{ "rnr_txq0_cnt_reclaimed", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_STAT_CNT_PKT_RECLAIMED_REG) },
+	{ "rnr_txq0_ring_push_idx", GEN_RUNNER_TXQ_STAT16(0, TXQ_OFF_RING_PUSH_IDX_REG) },
+	{ "rnr_txq0_ring_reclaim_idx", GEN_RUNNER_TXQ_STAT16(0, TXQ_OFF_RING_RECLAIM_IDX_REG) },
+	{ "rnr_txq0_acb_qfull", GEN_RUNNER_TXQ_STAT(0, TXQ_OFF_STAT_CNT_ACB_QFULL) },
+
+	{ "rnr_txq1_desc_addr", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_DESC_ADDRESS_REG) },
+	{ "rnr_txq1_desc_count", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_DESC_COUNT_REG) },
+	{ "rnr_txq1_irq_mask", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_IRQ_MASK_REG) },
+	{ "rnr_txq1_tx_idx", GEN_RUNNER_TXQ_STAT8(1, TXQ_OFF_TX_DESC_IDX) },
+	{ "rnr_txq1_tx_cnt", GEN_RUNNER_TXQ_STAT8(1, TXQ_OFF_TX_DESC_CNT) },
+	{ "rnr_txq1_cnt_pkt", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_STAT_CNT_PKT_SENT_REG) },
+	{ "rnr_txq1_cnt_reclaimed", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_STAT_CNT_PKT_RECLAIMED_REG) },
+	{ "rnr_txq1_ring_push_idx", GEN_RUNNER_TXQ_STAT16(1, TXQ_OFF_RING_PUSH_IDX_REG) },
+	{ "rnr_txq1_ring_reclaim_idx", GEN_RUNNER_TXQ_STAT16(1, TXQ_OFF_RING_RECLAIM_IDX_REG) },
+	{ "rnr_txq1_acb_qfull", GEN_RUNNER_TXQ_STAT(1, TXQ_OFF_STAT_CNT_ACB_QFULL) },
+};
+
+const size_t bcm_runner_fw_estat_count = ARRAY_SIZE(bcm_runner_fw_estat);
+
+u64 bcm_runner_fw_read_estat(struct bcm_enet_runner_priv *priv, int idx)
+{
+	const struct bcm_runner_ethtool_stat *s;
+
+	if (!priv->mode_ops)
+		return 0;
+
+	BUG_ON(idx >=bcm_runner_fw_estat_count);
+	s = &bcm_runner_fw_estat[idx];
+
+	switch (s->type) {
+	case 1:
+		switch (s->size) {
+		case 1:
+			return fw_rx_reg_readb(priv, s->reg);
+		case 2:
+			return fw_rx_reg_readh(priv, s->reg);
+		case 4:
+		default:
+			return fw_rx_reg_readl(priv, s->reg);
+		}
+	case 2:
+		switch (s->size) {
+		case 1:
+			return fw_tx_reg_readb(priv, s->reg);
+		case 2:
+			return fw_tx_reg_readh(priv, s->reg);
+		case 4:
+		default:
+			return fw_tx_reg_readl(priv, s->reg);
+		}
+	}
+	return 0;
+}
+
+/*
+ * callback when a new DSA slave device is using this dev as master
+ */
+static int
+bcm_enet_runner_dsa_port_reg(struct bcm_enet_runner_priv *priv,
+			     struct net_device *slave_dev)
+{
+	struct dsa_port *dp = dsa_port_from_netdev(slave_dev);
+	struct bcm_dsa_port *bdp;
+
+	if (dp->ds->index)
+		return 0;
+
+	bdp = kmalloc(sizeof (*dp), GFP_ATOMIC);
+	if (!bdp)
+		return 0;
+
+	bdp->port = dp->index;
+	bdp->imp_port = dp->cpu_dp ? dp->cpu_dp->index : 0;
+	bdp->slave_netdev = slave_dev;
+	list_add(&bdp->next, &priv->dsa_ports);
+	/* resolve queue mapping at device open time, when actual
+	 * number of queue is known */
+	return 0;
+}
+
+/*
+ * callback when a new DSA slave device is detached from this master
+ */
+static int
+bcm_enet_runner_dsa_port_remove(struct bcm_enet_runner_priv *priv,
+				struct net_device *slave_dev)
+{
+	struct bcm_dsa_port *dp;
+
+	list_for_each_entry(dp, &priv->dsa_ports, next) {
+		if (dp->slave_netdev == slave_dev) {
+			list_del(&dp->next);
+			kfree(dp);
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * DSA slave device change notifier
+ */
+static int bcm_enet_runner_netdev_notifier(struct notifier_block *nb,
+					unsigned long event, void *ptr)
+{
+	int ret = NOTIFY_DONE;
+	struct netdev_notifier_changeupper_info *info = ptr;
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+	struct bcm_enet_runner_priv *priv;
+
+	priv = container_of(nb, struct bcm_enet_runner_priv, netdev_notifier);
+	if (priv->netdev != dev)
+		return NOTIFY_DONE;
+
+	switch (event) {
+	case NETDEV_CHANGEUPPER:
+		if (dev->netdev_ops != &bcm_runner_ops)
+			return NOTIFY_DONE;
+
+		if (!dsa_user_dev_check(info->upper_dev))
+			return NOTIFY_DONE;
+
+		if (info->linking)
+			ret = bcm_enet_runner_dsa_port_reg(priv,
+							   info->upper_dev);
+		else
+			ret = bcm_enet_runner_dsa_port_remove(priv,
+							      info->upper_dev);
+		break;
+	}
+
+	return notifier_from_errno(ret);
+}
+
+/*
+ *
+ */
+static int bcm_enet_runner_probe(struct platform_device *pdev)
+{
+	struct bcm_enet_runner_priv *priv;
+	struct device_node *xrdp_node;
+	struct platform_device *xrdp_pdev;
+	struct bcm_xrdp_priv *xrdp_priv;
+	struct net_device *netdev;
+	enum bcm_runner_port_type port_type;
+	u8 macaddr[ETH_ALEN];
+	phy_interface_t phy_mode;
+	int ret, err;
+	size_t i;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	ff_init(&pdev->dev);
+#endif
+
+	ret = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "incorrect phy-mode\n");
+		return -ENODEV;
+	}
+
+	xrdp_node = of_parse_phandle(pdev->dev.of_node, "enet-runner,xrdp", 0);
+	if (!xrdp_node) {
+		dev_err(&pdev->dev, "failed to find XRDP node\n");
+		return -ENODEV;
+	}
+
+	xrdp_pdev = of_find_device_by_node(xrdp_node);
+	of_node_put(xrdp_node);
+	if (!xrdp_pdev) {
+		dev_err(&pdev->dev, "failed to find XRDP device\n");
+		return -ENODEV;
+	}
+
+	xrdp_priv = platform_get_drvdata(xrdp_pdev);
+	if (!xrdp_priv) {
+		dev_dbg(&pdev->dev, "XRDP not yet initialized\n");
+		return -EPROBE_DEFER;
+	}
+
+	err = of_get_mac_address(pdev->dev.of_node, macaddr);
+	if (err) {
+		dev_err(&pdev->dev,
+			"failed to find address node\n");
+		return err;
+	}
+
+	netdev = devm_alloc_etherdev_mqs(&pdev->dev, sizeof (*priv),
+					 BCM_ENET_RUNNER_MAX_TXQ,
+					 BCM_ENET_RUNNER_MAX_RXQ);
+	if (!netdev)
+		return -ENOMEM;
+
+	priv = netdev_priv(netdev);
+	priv->netdev = netdev;
+	priv->pdev = pdev;
+	priv->xrdp = xrdp_priv;
+	priv->rxq_size = 512;
+	priv->txq_size = 512;
+	INIT_DELAYED_WORK(&priv->reset_link_work, reset_link_work);
+	INIT_LIST_HEAD(&priv->dsa_ports);
+	for (i = 0; i < ARRAY_SIZE(priv->rxq_info); i++)
+		cpumask_copy(&priv->rxq_info[i].irq_affinity_mask,
+			     cpu_all_mask);
+	for (i = 0; i < ARRAY_SIZE(priv->txq_info); i++)
+		cpumask_copy(&priv->txq_info[i].irq_affinity_mask,
+			     cpu_all_mask);
+
+	port_type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+	switch (port_type) {
+	case BCM_RUNNER_PORT_UNIMAC:
+		priv->port_ops = &port_unimac_ops;
+		break;
+	case BCM_RUNNER_PORT_XPORT:
+		priv->port_ops = &port_xport_ops;
+		break;
+	default:
+		return -EINVAL;
+	}
+	priv->port_type = port_type;
+
+	priv->port_priv = priv->port_ops->init(priv);
+	if (IS_ERR(priv->port_priv))
+		return PTR_ERR(priv->port_priv);
+
+	/* register netdevice */
+	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
+	netdev->max_mtu = BCM_RUNNER_MAC_MAX_MTU;
+	netdev->netdev_ops = &bcm_runner_ops;
+	netdev->ethtool_ops = &bcm_runner_ethtool_ops;
+	dev_addr_mod(netdev, 0, macaddr, ETH_ALEN);
+	SET_NETDEV_DEV(netdev, &pdev->dev);
+	netif_napi_add_weight(netdev, &priv->napi, bcm_runner_poll, 16);
+
+	priv->phylink_pcs.ops = &bcm_enet_runner_phylink_pcs_ops;
+	priv->phylink_pcs.poll = priv->port_ops->pcs_poll;
+
+	priv->phylink_config.dev = &netdev->dev;
+	priv->phylink_config.type = PHYLINK_NETDEV;
+	priv->port_ops->get_mac_capabilities(
+		&priv->phylink_config.mac_capabilities);
+	priv->port_ops->get_supported_interfaces(
+		priv->phylink_config.supported_interfaces);
+
+	priv->phylink = phylink_create(&priv->phylink_config,
+				       pdev->dev.fwnode,
+				       phy_mode,
+				       &bcm_enet_runner_phylink_ops);
+	if (IS_ERR(priv->phylink))
+		return PTR_ERR(priv->phylink);
+
+	ret = phylink_of_phy_connect(priv->phylink, pdev->dev.of_node, 0);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to connect to PHY\n");
+		goto out;
+	}
+
+	priv->netdev_notifier.notifier_call = bcm_enet_runner_netdev_notifier;
+	ret = register_netdevice_notifier(&priv->netdev_notifier);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to register DSA notifier\n");
+		goto out;
+	}
+	priv->netdev_notifier_registered = true;
+	priv->dgasp_nb.notifier_call = bcm_enet_runner_dgasp_handler;
+
+	ret = register_netdev(netdev);
+	if (ret)
+		goto out;
+
+	platform_set_drvdata(pdev, priv);
+	return 0;
+
+out:
+	if (priv->netdev_notifier_registered)
+		unregister_netdevice_notifier(&priv->netdev_notifier);
+	if (priv->phylink)
+		phylink_destroy(priv->phylink);
+	return ret;
+}
+
+/*
+ *
+ */
+static void bcm_enet_runner_remove(struct platform_device *pdev)
+{
+	struct bcm_enet_runner_priv *priv = platform_get_drvdata(pdev);
+
+	unregister_netdev(priv->netdev);
+	unregister_netdevice_notifier(&priv->netdev_notifier);
+	phylink_destroy(priv->phylink);
+}
+
+static const struct of_device_id bcm63158_enet_runner_of_match[] = {
+	{ .compatible = "brcm,bcm63158-enet-runner-unimac",
+	  .data = (void *)BCM_RUNNER_PORT_UNIMAC },
+	{ .compatible = "brcm,bcm63158-enet-runner-xport" ,
+	  .data = (void *)BCM_RUNNER_PORT_XPORT },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, bcm63158_enet_runner_of_match);
+
+/*
+ *
+ */
+struct platform_driver bcm63158_enet_runner_driver = {
+	.probe	= bcm_enet_runner_probe,
+	.remove	= bcm_enet_runner_remove,
+	.driver	= {
+		.name		= "bcm63158_enet_runner",
+		.of_match_table = bcm63158_enet_runner_of_match,
+		.owner		= THIS_MODULE,
+	},
+};
+
+static int __init bcm63158_enet_runner_init(void)
+{
+	bcm63158_dbg_root = debugfs_create_dir("bcm63158_enet_runner", NULL);
+	if (!bcm63158_dbg_root)
+		return -ENOMEM;
+
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	ff_notifier.notifier_call = ff_notifier_event_cb;
+	register_netdevice_notifier(&ff_notifier);
+#endif
+	return platform_driver_register(&bcm63158_enet_runner_driver);
+}
+
+static void __exit bcm63158_enet_runner_exit(void)
+{
+#ifdef CONFIG_BCM63158_ENET_RUNNER_FF
+	unregister_netdevice_notifier(&ff_notifier);
+#endif
+	platform_driver_unregister(&bcm63158_enet_runner_driver);
+	debugfs_remove_recursive(bcm63158_dbg_root);
+}
+
+
+module_init(bcm63158_enet_runner_init);
+module_exit(bcm63158_enet_runner_exit);
+
+MODULE_DESCRIPTION("BCM63158 ethernet runner driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_unimac.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_unimac.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_unimac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_unimac.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,391 @@
+#include "port_unimac.h"
+
+#define GEN_MIB_STAT(m)					\
+	sizeof(((struct unimac_mib *)0)->m),	\
+		offsetof(struct unimac_mib, m)
+
+static const struct bcm_runner_ethtool_stat unimac_mib_estat[] = {
+	{ "rx_64", GEN_MIB_STAT(rx_64), UNIMAC_MIB_GR64_REG, },
+	{ "rx_65_127", GEN_MIB_STAT(rx_65_127), UNIMAC_MIB_GR127_REG, },
+	{ "rx_128_255", GEN_MIB_STAT(rx_128_255), UNIMAC_MIB_GR255_REG, },
+	{ "rx_256_511", GEN_MIB_STAT(rx_256_511), UNIMAC_MIB_GR511_REG, },
+	{ "rx_512_1023", GEN_MIB_STAT(rx_512_1023), UNIMAC_MIB_GR1023_REG, },
+	{ "rx_1024_1518", GEN_MIB_STAT(rx_1024_1518), UNIMAC_MIB_GR1518_REG, },
+	{ "rx_1519_1522_vlan", GEN_MIB_STAT(rx_1519_1522_vlan), UNIMAC_MIB_GRMGV_REG, },
+	{ "rx_1519_2047", GEN_MIB_STAT(rx_1519_2047), UNIMAC_MIB_GR2047_REG, },
+	{ "rx_2048_4095", GEN_MIB_STAT(rx_2048_4095), UNIMAC_MIB_GR4095_REG, },
+	{ "rx_4096_9216", GEN_MIB_STAT(rx_4096_9216), UNIMAC_MIB_GR9216_REG, },
+	{ "rx_all_pkts", GEN_MIB_STAT(rx_all_pkts), UNIMAC_MIB_GRPKT_REG, },
+	{ "rx_all_octets", GEN_MIB_STAT(rx_all_octets), UNIMAC_MIB_GRBYT_REG, },
+	{ "rx_mult", GEN_MIB_STAT(rx_mult), UNIMAC_MIB_GRMCA_REG, },
+	{ "rx_brdcast", GEN_MIB_STAT(rx_brdcast), UNIMAC_MIB_GRBCA_REG, },
+	{ "rx_crc", GEN_MIB_STAT(rx_crc), UNIMAC_MIB_GRFCS_REG, },
+	{ "rx_cntrl", GEN_MIB_STAT(rx_cntrl), UNIMAC_MIB_GRXCF_REG, },
+	{ "rx_pause", GEN_MIB_STAT(rx_pause), UNIMAC_MIB_GRXPF_REG, },
+	{ "rx_und", GEN_MIB_STAT(rx_und), UNIMAC_MIB_GRXUO_REG, },
+	{ "rx_align", GEN_MIB_STAT(rx_align), UNIMAC_MIB_GRALN_REG, },
+	{ "rx_frame_len", GEN_MIB_STAT(rx_frame_len), UNIMAC_MIB_GRFLR_REG, },
+	{ "rx_code", GEN_MIB_STAT(rx_code), UNIMAC_MIB_GRCDE_REG, },
+	{ "rx_carrier", GEN_MIB_STAT(rx_carrier), UNIMAC_MIB_GRFCR_REG, },
+	{ "rx_oversize", GEN_MIB_STAT(rx_oversize), UNIMAC_MIB_GROVR_REG, },
+	{ "rx_jabber", GEN_MIB_STAT(rx_jabber), UNIMAC_MIB_GRJBR_REG, },
+	{ "rx_too_big", GEN_MIB_STAT(rx_too_big), UNIMAC_MIB_GRMTUE_REG, },
+	{ "rx_gd_pkts", GEN_MIB_STAT(rx_gd_pkts), UNIMAC_MIB_GRPOK_REG, },
+	{ "rx_unicast", GEN_MIB_STAT(rx_unicast), UNIMAC_MIB_GRUC_REG, },
+	{ "rx_ppp", GEN_MIB_STAT(rx_ppp), UNIMAC_MIB_GRPPP_REG, },
+	{ "rx_crc_match", GEN_MIB_STAT(rx_crc_match), UNIMAC_MIB_GRCRC_REG, },
+	{ "tx_64", GEN_MIB_STAT(tx_64), UNIMAC_MIB_TR64_REG, },
+	{ "tx_65_127", GEN_MIB_STAT(tx_65_127), UNIMAC_MIB_TR127_REG, },
+	{ "tx_128_255", GEN_MIB_STAT(tx_128_255), UNIMAC_MIB_TR255_REG, },
+	{ "tx_256_511", GEN_MIB_STAT(tx_256_511), UNIMAC_MIB_TR511_REG, },
+	{ "tx_512_1023", GEN_MIB_STAT(tx_512_1023), UNIMAC_MIB_TR1023_REG, },
+	{ "tx_1024_1518", GEN_MIB_STAT(tx_1024_1518), UNIMAC_MIB_TR1518_REG, },
+	{ "tx_1519_1522_vlan", GEN_MIB_STAT(tx_1519_1522_vlan), UNIMAC_MIB_TRMGV_REG, },
+	{ "tx_1523_2047", GEN_MIB_STAT(tx_1523_2047), UNIMAC_MIB_TR2047_REG, },
+	{ "tx_2048_4095", GEN_MIB_STAT(tx_2048_4095), UNIMAC_MIB_TR4095_REG, },
+	{ "tx_4096_9216", GEN_MIB_STAT(tx_4096_9216), UNIMAC_MIB_TR9216_REG, },
+	{ "tx_all_pkts", GEN_MIB_STAT(tx_all_pkts), UNIMAC_MIB_GTPKT_REG, },
+	{ "tx_mult", GEN_MIB_STAT(tx_mult), UNIMAC_MIB_GTMCA_REG, },
+	{ "tx_brdcast", GEN_MIB_STAT(tx_brdcast), UNIMAC_MIB_GTBCA_REG, },
+	{ "tx_pause", GEN_MIB_STAT(tx_pause), UNIMAC_MIB_GTXPF_REG, },
+	{ "tx_control", GEN_MIB_STAT(tx_control), UNIMAC_MIB_GTXCF_REG, },
+	{ "tx_fcs", GEN_MIB_STAT(tx_fcs), UNIMAC_MIB_GTFCS_REG, },
+	{ "tx_oversize", GEN_MIB_STAT(tx_oversize), UNIMAC_MIB_GTOVR_REG, },
+	{ "tx_defer", GEN_MIB_STAT(tx_defer), UNIMAC_MIB_GTDRF_REG, },
+	{ "tx_ex_defer", GEN_MIB_STAT(tx_ex_defer), UNIMAC_MIB_GTEDF_REG, },
+	{ "tx_1_col", GEN_MIB_STAT(tx_1_col), UNIMAC_MIB_GTSCL_REG, },
+	{ "tx_m_col", GEN_MIB_STAT(tx_m_col), UNIMAC_MIB_GTMCL_REG, },
+	{ "tx_late_col", GEN_MIB_STAT(tx_late_col), UNIMAC_MIB_GTLCL_REG, },
+	{ "tx_ex_col", GEN_MIB_STAT(tx_ex_col), UNIMAC_MIB_GTXCL_REG, },
+	{ "tx_frag", GEN_MIB_STAT(tx_frag), UNIMAC_MIB_GTFRG_REG, },
+	{ "tx_col", GEN_MIB_STAT(tx_col), UNIMAC_MIB_GTNCL_REG, },
+	{ "tx_jabber", GEN_MIB_STAT(tx_jabber), UNIMAC_MIB_GTJBR_REG, },
+	{ "tx_gd_octets", GEN_MIB_STAT(tx_gd_octets), UNIMAC_MIB_GTBYT_REG, },
+	{ "tx_gd_pkts", GEN_MIB_STAT(tx_gd_pkts), UNIMAC_MIB_GTPOK_REG, },
+	{ "tx_unicast", GEN_MIB_STAT(tx_unicast), UNIMAC_MIB_GTUC_REG, },
+	{ "rx_runt0_pkts", GEN_MIB_STAT(rx_runt0_pkts), UNIMAC_MIB_RRPKT_REG, },
+	{ "rx_runt1_pkts", GEN_MIB_STAT(rx_runt1_pkts), UNIMAC_MIB_RRUND_REG, },
+	{ "rx_runt2_pkts", GEN_MIB_STAT(rx_runt2_pkts), UNIMAC_MIB_RRFRG_REG, },
+	{ "rx_runt2_bytes", GEN_MIB_STAT(rx_runt2_bytes), UNIMAC_MIB_RRBYT_REG, },
+};
+
+/*
+ *
+ */
+static void mode_unimac_stats_update(void *mode_priv,
+				    struct net_device_stats *s)
+{
+	struct unimac_priv *port = mode_priv;
+	u32 val;
+
+	val = mac_mib_reg_readl(port, UNIMAC_MIB_GRFCS_REG);
+	s->rx_crc_errors = val;
+
+	val = mac_mib_reg_readl(port, UNIMAC_MIB_GRFLR_REG);
+	s->rx_length_errors = val;
+
+	val = mac_mib_reg_readl(port, UNIMAC_MIB_GRCDE_REG);
+	val += mac_mib_reg_readl(port, UNIMAC_MIB_GRJBR_REG);
+	val += mac_mib_reg_readl(port, UNIMAC_MIB_GRALN_REG);
+	s->rx_errors = val;
+}
+
+
+/*
+ * read mib data
+ */
+static void mode_unimac_mib_update(void *mode_priv)
+{
+	struct unimac_priv *port = mode_priv;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(unimac_mib_estat); i++) {
+		const struct bcm_runner_ethtool_stat *s;
+		u32 val;
+		char *p;
+
+		s = &unimac_mib_estat[i];
+
+		val = mac_mib_reg_readl(port, s->reg);
+		p = (char *)&port->mib + s->offset;
+
+		if (s->size == sizeof(u64))
+			*(u64 *)p = val;
+		else
+			*(u32 *)p = val;
+	}
+}
+
+/*
+ *
+ */
+static void *mode_unimac_mib_get_data(void *mode_priv)
+{
+	struct unimac_priv *port = mode_priv;
+	return &port->mib;
+}
+
+
+/*
+ * reset unimac
+ */
+static void unimac_reset(struct unimac_priv *port)
+{
+	u32 val;
+
+	val = mac_cfg_reg_readl(port, UNIMAC_CFG_CMD_REG);
+	val |= UNIMAC_CFG_CMD_SW_RESET_MASK;
+	mac_cfg_reg_writel(port, UNIMAC_CFG_CMD_REG, val);
+	msleep(1);
+
+	val = mac_cfg_reg_readl(port, UNIMAC_CFG_CMD_REG);
+	val &= ~UNIMAC_CFG_CMD_SW_RESET_MASK;
+	mac_cfg_reg_writel(port, UNIMAC_CFG_CMD_REG, val);
+}
+
+/*
+ * setup unimac for given speed
+ */
+static void unimac_setup(struct unimac_priv *port, unsigned int speed)
+{
+	u32 val;
+
+	/* set correct speed  */
+	val = mac_cfg_reg_readl(port, UNIMAC_CFG_CMD_REG);
+	val |= UNIMAC_CFG_CMD_CTRL_FRM_EN_MASK;
+	val |= UNIMAC_CFG_CMD_PROMISC_EN_MASK;
+	val &= ~UNIMAC_CFG_CMD_SPEED_MASK;
+	switch (speed) {
+	case SPEED_10:
+		val |= UNIMAC_CFG_CMD_SPEED_10 << UNIMAC_CFG_CMD_SPEED_SHIFT;
+		break;
+	case SPEED_100:
+		val |= UNIMAC_CFG_CMD_SPEED_100 << UNIMAC_CFG_CMD_SPEED_SHIFT;
+		break;
+	case SPEED_1000:
+		val |= UNIMAC_CFG_CMD_SPEED_1000 << UNIMAC_CFG_CMD_SPEED_SHIFT;
+		break;
+	case SPEED_2500:
+		val |= UNIMAC_CFG_CMD_SPEED_2500 << UNIMAC_CFG_CMD_SPEED_SHIFT;
+		break;
+	}
+	mac_cfg_reg_writel(port, UNIMAC_CFG_CMD_REG, val);
+
+	val = mac_misc_reg_readl(port, UNIMAC_MISC_CFG_REG);
+	val |= UNIMAC_MISC_CFG_GMII_DIRECT_MASk;
+	mac_misc_reg_writel(port, UNIMAC_MISC_CFG_REG, val);
+}
+
+/*
+ *
+ */
+static void unimac_enable(struct unimac_priv *port)
+{
+	u32 val;
+
+	val = mac_cfg_reg_readl(port, UNIMAC_CFG_CMD_REG);
+	val |= UNIMAC_CFG_CMD_RX_EN_MASK;
+	val |= UNIMAC_CFG_CMD_TX_EN_MASK;
+	mac_cfg_reg_writel(port, UNIMAC_CFG_CMD_REG, val);
+}
+
+/*
+ * disable unimac rx/tx
+ */
+static void unimac_disable(struct unimac_priv *port)
+{
+	u32 val;
+
+	val = mac_misc_reg_readl(port, UNIMAC_MISC_CFG_REG);
+	val &= ~UNIMAC_MISC_CFG_GMII_DIRECT_MASk;
+	mac_misc_reg_writel(port, UNIMAC_MISC_CFG_REG, val);
+
+	val = mac_cfg_reg_readl(port, UNIMAC_CFG_CMD_REG);
+	val &= ~UNIMAC_CFG_CMD_RX_EN_MASK;
+	val &= ~UNIMAC_CFG_CMD_TX_EN_MASK;
+	mac_cfg_reg_writel(port, UNIMAC_CFG_CMD_REG, val);
+
+	unimac_reset(port);
+}
+
+/*
+ *
+ */
+static void mode_unimac_mtu_set(void *mode_priv, unsigned int size)
+{
+	struct unimac_priv *port = mode_priv;
+	u32 val;
+
+	/* (from vendor BSP init) seems to have no effect on rx */
+	mac_cfg_reg_writel(port, UNIMAC_CFG_RX_MAX_PKT_SIZE_REG, size);
+
+	/* this will truncate any bigger RX packet to size (size
+	 * includes FCS), and increment rx_too_big MIB counter. TX
+	 * path effect not tested */
+	mac_cfg_reg_writel(port, UNIMAC_CFG_FRM_LEN_REG, size);
+
+	/* (from vendor BSP init) seems to have no effect on rx */
+	val = mac_misc_reg_readl(port, UNIMAC_MISC_EXT_CFG1_REG);
+	val &= ~UNIMAC_MISC_EXT_CFG1_MAX_PKT_SIZE_MASK;
+	val |= size;
+	mac_misc_reg_writel(port, UNIMAC_MISC_EXT_CFG1_REG, val);
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_up(void *mode_priv,
+				 unsigned int pl_mode,
+				 phy_interface_t interface,
+				 int speed,
+				 int duplex,
+				 struct phy_device *phy)
+{
+	struct unimac_priv *port = mode_priv;
+	unimac_setup(port, speed);
+	unimac_enable(port);
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_down(void *mode_priv,
+				   unsigned int pl_mode,
+				   phy_interface_t interface)
+{
+	struct unimac_priv *port = mode_priv;
+	unimac_disable(port);
+}
+
+/*
+ *
+ */
+static void *mode_unimac_init(void *port_priv,
+			      const struct bcm_xrdp_enet_params *params)
+
+{
+	struct unimac_priv *port = port_priv;
+	port->regs = params->mac_regs;
+	return port_priv;
+}
+
+static void mode_unimac_stop(void *mode_priv)
+{
+	struct unimac_priv *port = mode_priv;
+	bcm_runner_fw_tx_stop_wait(port->priv);
+}
+
+/*
+ *
+ */
+static u32 mode_get_bbh_id(void *port_priv)
+{
+	struct unimac_priv *port = port_priv;
+	return port->bbh_id;
+}
+
+/*
+ *
+ */
+static void mode_unimac_release(void *mode_priv)
+{
+}
+
+const struct bcm_enet_mode_ops unimac_mode_ops = {
+	.name			= "gmii",
+
+	.init			= mode_unimac_init,
+	.stop			= mode_unimac_stop,
+	.release		= mode_unimac_release,
+	.get_bbh_id		= mode_get_bbh_id,
+	.mtu_set		= mode_unimac_mtu_set,
+	.stats_update		= mode_unimac_stats_update,
+
+	/* mib operation */
+	.mib_estat		= unimac_mib_estat,
+	.mib_estat_count	= ARRAY_SIZE(unimac_mib_estat),
+	.mib_update		= mode_unimac_mib_update,
+	.mib_get_data		= mode_unimac_mib_get_data,
+
+	/*
+	 * phylink callback
+	 */
+	.phylink_link_down	= mode_phylink_link_down,
+	.phylink_link_up	= mode_phylink_link_up,
+};
+
+/*
+ *
+ */
+static void *unimac_port_init(struct bcm_enet_runner_priv *priv)
+{
+	struct unimac_priv *port;
+	int ret;
+
+	port = devm_kzalloc(&priv->pdev->dev, sizeof (*port), GFP_KERNEL);
+	if (!port)
+		return ERR_PTR(-ENODEV);
+
+	port->priv = priv;
+
+	ret = of_property_read_u32(priv->pdev->dev.of_node,
+				   "enet-runner,bbh",
+				   &port->bbh_id);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return port;
+}
+
+/*
+ *
+ */
+static void unimac_port_release(void *data)
+{
+	struct unimac_priv *port = data;
+	devm_kfree(&port->priv->pdev->dev, port);
+}
+
+/*
+ *
+ */
+static int unimac_mode_select(phy_interface_t interface)
+{
+	switch (interface) {
+	case PHY_INTERFACE_MODE_GMII:
+	case PHY_INTERFACE_MODE_INTERNAL:
+		return 0;
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+/*
+ *
+ */
+static void unimac_get_supported_interfaces(unsigned long *modes)
+{
+	__set_bit(PHY_INTERFACE_MODE_GMII, modes);
+	__set_bit(PHY_INTERFACE_MODE_INTERNAL, modes);
+}
+
+/*
+ *
+ */
+static void unimac_get_mac_capabilities(unsigned long *caps)
+{
+	*caps = MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+}
+
+const struct bcm_enet_port_ops port_unimac_ops = {
+	.modes				= {
+		&unimac_mode_ops,
+	},
+	.mode_count			= 1,
+
+	.init				= unimac_port_init,
+	.release			= unimac_port_release,
+	.mode_select			= unimac_mode_select,
+	.get_supported_interfaces	= unimac_get_supported_interfaces,
+	.get_mac_capabilities		= unimac_get_mac_capabilities,
+};
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_unimac.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_unimac.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_unimac.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_unimac.h	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,119 @@
+#ifndef PORT_UNIMAC_H_
+#define PORT_UNIMAC_H_
+
+#include "bcm63158_enet_runner.h"
+#include "regs/unimac_regs.h"
+
+/*
+ * unimac MIB
+ */
+struct unimac_mib {
+	u32			rx_64;
+	u32			rx_65_127;
+	u32			rx_128_255;
+	u32			rx_256_511;
+	u32			rx_512_1023;
+	u32			rx_1024_1518;
+	u32			rx_1519_1522_vlan;
+	u32			rx_1519_2047;
+	u32			rx_2048_4095;
+	u32			rx_4096_9216;
+
+	u32			rx_all_pkts;
+	u64			rx_all_octets;
+	u32			rx_mult;
+	u32			rx_brdcast;
+	u32			rx_crc;
+	u32			rx_cntrl;
+	u32			rx_pause;
+	u32			rx_und;
+	u32			rx_align;
+	u32			rx_frame_len;
+	u32			rx_code;
+	u32			rx_carrier;
+	u32			rx_oversize;
+	u32			rx_jabber;
+	u32			rx_too_big;
+
+	u32			rx_gd_pkts;
+	u32			rx_unicast;
+	u32			rx_ppp;
+	u32			rx_crc_match;
+
+	u32			tx_64;
+	u32			tx_65_127;
+	u32			tx_128_255;
+	u32			tx_256_511;
+	u32			tx_512_1023;
+	u32			tx_1024_1518;
+	u32			tx_1519_1522_vlan;
+	u32			tx_1523_2047;
+	u32			tx_2048_4095;
+	u32			tx_4096_9216;
+
+	u32			tx_all_pkts;
+	u32			tx_mult;
+	u32			tx_brdcast;
+	u32			tx_pause;
+	u32			tx_control;
+	u32			tx_fcs;
+	u32			tx_oversize;
+	u32			tx_defer;
+	u32			tx_ex_defer;
+	u32			tx_1_col;
+	u32			tx_m_col;
+	u32			tx_late_col;
+	u32			tx_ex_col;
+	u32			tx_frag;
+	u32			tx_col;
+	u32			tx_jabber;
+
+	u64			tx_gd_octets;
+	u32			tx_gd_pkts;
+	u32			tx_unicast;
+
+	u32			rx_runt0_pkts;
+	u32			rx_runt1_pkts;
+	u32			rx_runt2_pkts;
+	u32			rx_runt2_bytes;
+};
+
+struct unimac_priv {
+	struct bcm_enet_runner_priv	*priv;
+	void __iomem			*regs;
+	struct unimac_mib		mib;
+	u32				bbh_id;
+};
+
+/*
+ * io accessors
+ */
+static inline u32 mac_cfg_reg_readl(struct unimac_priv *port, u32 offset)
+{
+	return ioread32(port->regs + UNIMAC_CFG_OFFSET(port->bbh_id) + offset);
+}
+
+static inline void mac_cfg_reg_writel(struct unimac_priv *port, u32 offset,
+			       u32 val)
+{
+	iowrite32(val, port->regs + UNIMAC_CFG_OFFSET(port->bbh_id) + offset);
+}
+
+static inline u32 mac_misc_reg_readl(struct unimac_priv *port, u32 offset)
+{
+	return ioread32(port->regs + UNIMAC_MISC_OFFSET(port->bbh_id) + offset);
+}
+
+static inline void mac_misc_reg_writel(struct unimac_priv *port, u32 offset,
+				u32 val)
+{
+	iowrite32(val, port->regs + UNIMAC_MISC_OFFSET(port->bbh_id) + offset);
+}
+
+static inline u32 mac_mib_reg_readl(struct unimac_priv *port, u32 offset)
+{
+	return ioread32(port->regs + UNIMAC_MIB_OFFSET(port->bbh_id) + offset);
+}
+
+
+#endif /* PORT_UNIMAC_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,271 @@
+#include "port_xport.h"
+
+/*
+ * fetch pinctrl for rogue1_in related stuff for xport, and select the
+ * rogue1 state.
+ */
+static int xport_set_pinctrl_rogue1(struct xport_priv *port)
+{
+	int err = 0;
+	struct device *dev = &port->priv->pdev->dev;
+	struct pinctrl_state *pins_rogue1;
+
+	if (port->pinctrl) {
+		dev_dbg(dev, "we already have a pinctrl for this port?!\n");
+		goto lookup_state;
+	}
+
+	port->pinctrl = devm_pinctrl_get(dev);
+	if (IS_ERR(port->pinctrl)) {
+		port->pinctrl = NULL;
+		return PTR_ERR(port->pinctrl);
+	}
+
+lookup_state:
+	pins_rogue1 = pinctrl_lookup_state(port->pinctrl, "rogue1");
+	if (IS_ERR(pins_rogue1)) {
+		dev_err(dev, "unable to get rogue1 pinctrl state.\n");
+		err = PTR_ERR(pins_rogue1);
+		goto err_put_pinctrl;
+	}
+
+	err = pinctrl_select_state(port->pinctrl, pins_rogue1);
+	if (err) {
+		dev_err(dev, "unable to set xport pinctrl state to 'rogue1'\n");
+		goto err_put_pinctrl;
+	}
+
+	return 0;
+
+err_put_pinctrl:
+	devm_pinctrl_put(port->pinctrl);
+	port->pinctrl = NULL;
+	return err;
+}
+
+/*
+ * just release the pinctrl for the port, we do not otherwise need to
+ * manipulate the rs0 gpio here, and the testbed needs this gpio to
+ * test the rogue_in function. releasing the pinctrl will make it
+ * accessible via fbxgpio.
+ */
+static int xport_set_pinctrl_rs0(struct xport_priv *port)
+{
+	if (port->pinctrl)
+		devm_pinctrl_put(port->pinctrl);
+	port->pinctrl = NULL;
+
+	return 0;
+}
+
+/*
+ * switch pinctrl state on xport, called from port_xport_epon or
+ * port_xport_xlmac code, when switching the port mode.
+ */
+int xport_switch_pinctrl(struct xport_priv *port, int which)
+{
+	struct device *dev = &port->priv->pdev->dev;
+	int err;
+
+	switch (which) {
+	case XPORT_PIN_ROGUE1:
+		err = xport_set_pinctrl_rogue1(port);
+		break;
+
+	case XPORT_PIN_RS0:
+		err = xport_set_pinctrl_rs0(port);
+		break;
+
+	default:
+		dev_warn(dev, "invalid xport pin ID %d\n", which);
+		return -EINVAL;
+	}
+
+	if (err) {
+		dev_err(dev, "unable to change xport pinctrl state.\n");
+		return err;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void *xport_init(struct bcm_enet_runner_priv *priv)
+{
+	struct resource *res_wan_top;
+	struct resource *res_xport;
+	struct resource *res_xlif;
+	struct resource *res_epon;
+	struct reset_control *wan_ae_rst;
+	struct xport_priv *port;
+	int ret;
+
+	port = devm_kzalloc(&priv->pdev->dev, sizeof (*port), GFP_KERNEL);
+	if (!port)
+		return ERR_PTR(-ENODEV);
+
+	port->priv = priv;
+
+	ret = of_property_read_u32(priv->pdev->dev.of_node,
+				   "enet-runner,xport-ae-bbh",
+				   &port->ae_bbh_id);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = of_property_read_u32(priv->pdev->dev.of_node,
+				   "enet-runner,xport-pon-bbh",
+				   &port->pon_bbh_id);
+	if (ret)
+		return ERR_PTR(ret);
+
+	res_wan_top = platform_get_resource_byname(priv->pdev,
+						   IORESOURCE_MEM, "wan_top");
+	if (!res_wan_top) {
+		dev_err(&priv->pdev->dev, "unable to get wan_top resource\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	port->regs[0] = devm_ioremap_resource(&priv->pdev->dev, res_wan_top);
+	if (IS_ERR(port->regs[0])) {
+		dev_err(&priv->pdev->dev, "unable to ioremap regs\n");
+		return port->regs[0];
+	}
+	port->regs_size[0] = resource_size(res_wan_top);
+
+	res_xport = platform_get_resource_byname(priv->pdev,
+						 IORESOURCE_MEM, "xport");
+	if (!res_xport) {
+		dev_err(&priv->pdev->dev, "unable to get xport resource\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	port->regs[1] = devm_ioremap_resource(&priv->pdev->dev, res_xport);
+	if (IS_ERR(port->regs[1])) {
+		dev_err(&priv->pdev->dev, "unable to ioremap regs\n");
+		return port->regs[1];
+	}
+	port->regs_size[1] = resource_size(res_xport);
+
+	res_xlif = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM,
+						"xlif");
+	if (!res_xlif) {
+		dev_err(&priv->pdev->dev, "unable to get xlif resource\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	port->regs[2] = devm_ioremap_resource(&priv->pdev->dev, res_xlif);
+	if (IS_ERR(port->regs[2])) {
+		dev_err(&priv->pdev->dev, "unable to ioremap regs\n");
+		return port->regs[2];
+	}
+	port->regs_size[2] = resource_size(res_xlif);
+
+	res_epon = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM,
+						"epon");
+	if (!res_epon) {
+		dev_err(&priv->pdev->dev, "unable to get epon resource\n");
+		return ERR_PTR(-ENODEV);
+	}
+
+	port->regs[3] = devm_ioremap_resource(&priv->pdev->dev, res_epon);
+	if (IS_ERR(port->regs[3])) {
+		dev_err(&priv->pdev->dev, "unable to ioremap epon regs\n");
+		return port->regs[3];
+	}
+	port->regs_size[3] = resource_size(res_epon);
+
+	wan_ae_rst = devm_reset_control_get(&priv->pdev->dev, "wan_ae");
+	if (IS_ERR(wan_ae_rst)) {
+		dev_err(&priv->pdev->dev, "missing wan_ae reset control: %ld\n",
+			PTR_ERR(wan_ae_rst));
+		return ERR_PTR(-ENODEV);
+	}
+	port->wan_ae_rst = wan_ae_rst;
+
+	/*
+	 * default to forcing LBE to 1, this is connected to TX
+	 * disabled so it will disable TX by default
+	 *
+	 * this is only applied by AE mode, the PON mode will disable
+	 * it
+	 *
+	 */
+	port->lbe_force = true;
+	port->lbe_force_value = true;
+
+	return port;
+}
+
+/*
+ *
+ */
+static void xport_pcs_shutdown(void *data)
+{
+	struct xport_priv *port = data;
+	xport_serdes_shutdown(port);
+}
+
+/*
+ *
+ */
+static void xport_release(void *data)
+{
+	struct xport_priv *port = data;
+	devm_kfree(&port->priv->pdev->dev, port);
+}
+
+/*
+ *
+ */
+static int xport_mode_select(phy_interface_t interface)
+{
+	switch (interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_SGMII:
+	case PHY_INTERFACE_MODE_10GBASER:
+		return 0;
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+		return 1;
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+/*
+ *
+ */
+static void xport_get_supported_interfaces(unsigned long *modes)
+{
+	__set_bit(PHY_INTERFACE_MODE_1000BASEX, modes);
+	__set_bit(PHY_INTERFACE_MODE_2500BASEX, modes);
+	__set_bit(PHY_INTERFACE_MODE_SGMII, modes);
+	__set_bit(PHY_INTERFACE_MODE_10GBASER, modes);
+	__set_bit(PHY_INTERFACE_MODE_10000_1000_BASEPRX_U, modes);
+}
+
+/*
+ *
+ */
+static void xport_get_mac_capabilities(unsigned long *caps)
+{
+	*caps = MAC_1000FD | MAC_2500FD | MAC_10000FD;
+}
+
+const struct bcm_enet_port_ops port_xport_ops = {
+	.modes				= {
+		&xport_xlmac_mode_ops,
+		&xport_epon_mode_ops,
+	},
+	.mode_count			= 2,
+
+	.init				= xport_init,
+	.release			= xport_release,
+	.pcs_poll			= 1,
+	.pcs_shutdown			= xport_pcs_shutdown,
+	.mode_select			= xport_mode_select,
+	.get_supported_interfaces	= xport_get_supported_interfaces,
+	.get_mac_capabilities		= xport_get_mac_capabilities,
+};
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport.h	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,120 @@
+#ifndef PORT_XPORT_H_
+#define PORT_XPORT_H_
+
+#include "bcm63158_enet_runner.h"
+#include <linux/io.h>
+#include <linux/reset.h>
+#include <linux/pinctrl/consumer.h>
+
+struct xport_priv {
+	struct bcm_enet_runner_priv	*priv;
+
+	void __iomem			*regs[4];
+	u32				regs_size[4];
+	struct reset_control		*wan_ae_rst;
+	u32				ae_bbh_id;
+	u32				pon_bbh_id;
+
+	bool				lbe_force;
+	bool				lbe_force_value;
+
+	struct pinctrl *pinctrl;
+};
+
+/*
+ * serdes utils
+ */
+struct serdes_params {
+	u32 tx_pll_vco_div4;
+	u32 tx_pll_vco_div2;
+	u32 rx_pll_vco_div4;
+	u32 rx_pll_vco_div2;
+	u32 rx_pll_id;
+	u32 tx_pll_id;
+
+	u32 tx_pll_force_kvh_bw;
+	u32 rx_pll_force_kvh_bw;
+	u32 tx_pll_kvh_force;
+	u32 rx_pll_kvh_force;
+
+	u32 tx_pll_2rx_bw;
+	u32 rx_pll_2rx_bw;
+
+	u32 rx_pll_fracn_div;
+	u32 rx_pll_fracn_ndiv;
+
+	u32 tx_pll_fracn_div;
+	u32 tx_pll_fracn_ndiv;
+
+	u32 tx_pll_fracn_sel;
+	u32 rx_pll_fracn_sel;
+
+	u32 rx_pll_ditheren;
+	u32 tx_pll_ditheren;
+
+	u32 rx_pll_mode;
+	u32 tx_pll_mode;
+
+	u32 rx_tx_rate_ratio;
+
+	u32 rx_pon_mac_ctrl;
+	u32 tx_pon_mac_ctrl;
+	u32 tx_sync_e_ctrl;
+
+	u32 rx_osr_mode;
+	u32 tx_osr_mode;
+
+	bool do_rx_pi_spacing;
+	u32 clk90_offset;
+	u32 p1_offset;
+
+	bool do_pll_charge_pump;
+	bool do_pll_charge_pump_10g;
+	bool do_vga_rf;
+	bool do_sigdetect;
+	bool do_ae;
+	u32 dsc_a_cdr_control_2;
+
+	bool serdes_ae_full_rate;
+	bool serdes_ae_20b_width;
+};
+
+struct serdes_misc3_params {
+	u32 misc3_if_select;
+	u32 misc3_laser_mode;
+	bool misc3_sgmii;
+};
+
+int xport_serdes_set_params(struct xport_priv *port,
+			    const struct serdes_params *params,
+			    const struct serdes_misc3_params *m3params);
+void xport_serdes_update_m3_params(struct xport_priv *port,
+				   const struct serdes_misc3_params *m3params);
+int xport_serdes_pcs_read_reg(struct xport_priv *port,
+			      u16 lane, u16 address);
+int xport_serdes_pcs_write_reg(struct xport_priv *port,
+			       u16 lane, u16 address,
+			       u16 wrdata, u16 mask);
+
+void xport_serdes_lbe_force_enable(struct xport_priv *port);
+
+void xport_serdes_lbe_force_disable(struct xport_priv *port);
+
+void xport_serdes_lbe_dont_force(struct xport_priv *port);
+
+void xport_serdes_lbe_get_forced_state(struct xport_priv *port,
+				       bool *force, bool *forced_value);
+
+void xport_serdes_shutdown(struct xport_priv *port);
+
+
+extern const struct bcm_enet_mode_ops xport_xlmac_mode_ops;
+extern const struct bcm_enet_mode_ops xport_epon_mode_ops;
+
+enum {
+	XPORT_PIN_ROGUE1,
+	XPORT_PIN_RS0,
+};
+int xport_switch_pinctrl(struct xport_priv *port, int which);
+
+#endif /* PORT_XPORT_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,3201 @@
+#include "port_xport_epon.h"
+#include "regs/serdes_regs.h"
+
+/*
+ * maximum number of LLID supported by hardware
+ */
+#define BCM_LLID_COUNT			32
+#define BCM_L2_COUNT			BCM_LLID_COUNT
+
+/*
+ * maximum number of pending grants per LLID for hardware
+ */
+#define BCM_MAX_PENDING_GRANTS		16
+
+/*
+ * estimated time from packet xmit to actual hardware tx, used during
+ * discovery to avoid scheduling packet in the past
+ */
+#define SOFTWARE_TX_LATENCY_TQ		USEC_TO_TQ(20)
+
+/*
+ * llid index we force, to ease debugging
+ */
+#define USER_LLID_IDX			0
+#define BROADCAST_LLID_IDX		31
+
+/*
+ * default burst cap
+ */
+#define DEFAULT_BURST_CAP		1024
+
+/*
+ * standard MPCP frame destination address
+ */
+static const u8 mpcp_frame_da[6] = { 0x01, 0x80, 0xc2, 0x00, 0x00, 0x01 };
+
+enum epon_stat_type {
+	EPON_STAT_XPCS_RX,
+	EPON_STAT_XPCS_RX64,
+	EPON_STAT_XIF,
+	EPON_STAT_LIF,
+	EPON_STAT_EPN,
+	EPON_STAT_EPN_RAM,
+	EPON_STAT_EPN_L1_ACC,
+	EPON_STAT_LOCAL,
+};
+
+#define GEN_MIB_STAT(m)					\
+	sizeof(((struct eponmac_mib *)0)->m),	\
+		offsetof(struct eponmac_mib, m)
+
+#define GEN_EPN_RAM_STAT(m, llid, port, ram_off)		\
+	#m,							\
+		GEN_MIB_STAT(m),				\
+		((llid << 24) | (port << 16) | ram_off),	\
+		EPON_STAT_EPN_RAM,
+
+#define GEN_LOCAL_STAT(m)					\
+	#m,							\
+		GEN_MIB_STAT(m),				\
+		0,						\
+		EPON_STAT_LOCAL,
+
+const struct bcm_runner_ethtool_stat epon_mib_estat[] = {
+	{ "xpcs_rx_framer_misbrst", GEN_MIB_STAT(xpcs_rx_framer_misbrst),
+	  XPCSRX_RX_FRAMER_MISBRST_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_framer_bd_err", GEN_MIB_STAT(xpcs_rx_framer_bd_err),
+	  XPCSRX_RX_FRAMER_BD_ERR_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_64b66b_ipg_det", GEN_MIB_STAT(xpcs_rx_64b66b_ipg_det),
+	  XPCSRX_RX_64B66B_IPG_DET_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_fec_nque_in", GEN_MIB_STAT(xpcs_rx_fec_nque_in),
+	  XPCSRX_RX_FEC_NQUE_IN_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_fec_nque_out", GEN_MIB_STAT(xpcs_rx_fec_nque_out),
+	  XPCSRX_RX_FEC_NQUE_OUT_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_idle_start", GEN_MIB_STAT(xpcs_rx_idle_start),
+	  XPCSRX_RX_IDLE_START_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_idle_stop", GEN_MIB_STAT(xpcs_rx_idle_stop),
+	  XPCSRX_RX_IDLE_STOP_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_fec_cw_fail", GEN_MIB_STAT(xpcs_rx_fec_cw_fail),
+	  XPCSRX_RX_FEC_CW_FAIL_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_fec_cw_tot", GEN_MIB_STAT(xpcs_rx_fec_cw_tot),
+	  XPCSRX_RX_FEC_CW_TOT_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_fec_correct", GEN_MIB_STAT(xpcs_rx_fec_correct),
+	  XPCSRX_RX_FEC_CORRECT_CNT_LO_REG, EPON_STAT_XPCS_RX64, },
+	{ "xpcs_rx_fec_ones_cor", GEN_MIB_STAT(xpcs_rx_fec_ones_cor),
+	  XPCSRX_RX_FEC_ONES_COR_CNT_LO_REG, EPON_STAT_XPCS_RX64, },
+	{ "xpcs_rx_fec_zeros_cor", GEN_MIB_STAT(xpcs_rx_fec_zeros_cor),
+	  XPCSRX_RX_FEC_ZEROS_COR_CNT_LO_REG, EPON_STAT_XPCS_RX64, },
+	{ "xpcs_rx_64b66b_fail", GEN_MIB_STAT(xpcs_rx_64b66b_fail),
+	  XPCSRX_RX_64B66B_FAIL_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_frmr_bad_sh", GEN_MIB_STAT(xpcs_rx_frmr_bad_sh),
+	  XPCSRX_RX_FRMR_BAD_SH_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_psudo", GEN_MIB_STAT(xpcs_rx_psudo),
+	  XPCSRX_RX_PSUDO_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_prbs", GEN_MIB_STAT(xpcs_rx_prbs),
+	  XPCSRX_RX_PRBS_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_64b66b_start", GEN_MIB_STAT(xpcs_rx_64b66b_start),
+	  XPCSRX_RX_64B66B_START_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_idle_good_pkt", GEN_MIB_STAT(xpcs_rx_idle_good_pkt),
+	  XPCSRX_RX_IDLE_GOOD_PKT_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_idle_err_pkt", GEN_MIB_STAT(xpcs_rx_idle_err_pkt),
+	  XPCSRX_RX_IDLE_ERR_PKT_CNT_REG, EPON_STAT_XPCS_RX, },
+	{ "xpcs_rx_64b66b_stop", GEN_MIB_STAT(xpcs_rx_64b66b_stop),
+	  XPCSRX_RX_64B66B_STOP_CNT_REG, EPON_STAT_XPCS_RX, },
+
+	{ "xif_pmc_frame_rx", GEN_MIB_STAT(xif_pmc_frame_rx),
+	  XIF_PMC_FRAME_RX_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_pmc_byte_rx", GEN_MIB_STAT(xif_pmc_byte_rx),
+	  XIF_PMC_BYTE_RX_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_pmc_runt_rx", GEN_MIB_STAT(xif_pmc_runt_rx),
+	  XIF_PMC_RUNT_RX_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_pmc_cw_err_rx", GEN_MIB_STAT(xif_pmc_cw_err_rx),
+	  XIF_PMC_CW_ERR_RX_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_pmc_crc8_err_rx", GEN_MIB_STAT(xif_pmc_crc8_err_rx),
+	  XIF_PMC_CRC8_ERR_RX_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_data_frm_tx", GEN_MIB_STAT(xif_xpn_data_frm),
+	  XIF_XPN_DATA_FRM_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_data_byte_tx", GEN_MIB_STAT(xif_xpn_data_byte),
+	  XIF_XPN_DATA_BYTE_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_mpcp_frm_tx", GEN_MIB_STAT(xif_xpn_mpcp_frm),
+	  XIF_XPN_MPCP_FRM_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_oam_frm_tx", GEN_MIB_STAT(xif_xpn_oam_frm),
+	  XIF_XPN_OAM_FRM_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_oam_byte_tx", GEN_MIB_STAT(xif_xpn_oam_byte),
+	  XIF_XPN_OAM_BYTE_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_oversize_frm_tx", GEN_MIB_STAT(xif_xpn_oversize_frm),
+	  XIF_XPN_OVERSIZE_FRM_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_sec_abort_frm_rx", GEN_MIB_STAT(xif_sec_abort_frm),
+	  XIF_SEC_ABORT_FRM_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_pmc_tx_neg_event", GEN_MIB_STAT(xif_pmc_tx_neg_event),
+	  XIF_PMC_TX_NEG_EVENT_CNT_REG, EPON_STAT_XIF, },
+	{ "xif_xpn_idle_pkt", GEN_MIB_STAT(xif_xpn_idle_pkt),
+	  XIF_XPN_IDLE_PKT_CNT_REG, EPON_STAT_XIF, },
+
+	{ "lif_rx_line_code_err_cnt", GEN_MIB_STAT(lif_rx_line_code_err_cnt),
+	  LIF_RX_LINE_CODE_ERR_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_mpcp_frm", GEN_MIB_STAT(lif_rx_agg_mpcp_frm),
+	  LIF_RX_AGG_MPCP_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_good_frm", GEN_MIB_STAT(lif_rx_agg_good_frm),
+	  LIF_RX_AGG_GOOD_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_good_byte", GEN_MIB_STAT(lif_rx_agg_good_byte),
+	  LIF_RX_AGG_GOOD_BYTE_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_undersz_frm", GEN_MIB_STAT(lif_rx_agg_undersz_frm),
+	  LIF_RX_AGG_UNDERSZ_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_oversz_frm", GEN_MIB_STAT(lif_rx_agg_oversz_frm),
+	  LIF_RX_AGG_OVERSZ_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_crc8_frm", GEN_MIB_STAT(lif_rx_agg_crc8_frm),
+	  LIF_RX_AGG_CRC8_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_fec_frm", GEN_MIB_STAT(lif_rx_agg_fec_frm),
+	  LIF_RX_AGG_FEC_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_fec_byte", GEN_MIB_STAT(lif_rx_agg_fec_byte),
+	  LIF_RX_AGG_FEC_BYTE_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_fec_exc_err_frm",
+	  GEN_MIB_STAT(lif_rx_agg_fec_exc_err_frm),
+	  LIF_RX_AGG_FEC_EXC_ERR_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_nonfec_good_frm",
+	  GEN_MIB_STAT(lif_rx_agg_nonfec_good_frm),
+	  LIF_RX_AGG_NONFEC_GOOD_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_nonfec_good_byte",
+	  GEN_MIB_STAT(lif_rx_agg_nonfec_good_byte),
+	  LIF_RX_AGG_NONFEC_GOOD_BYTE_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_err_bytes", GEN_MIB_STAT(lif_rx_agg_err_bytes),
+	  LIF_RX_AGG_ERR_BYTES_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_err_zeroes", GEN_MIB_STAT(lif_rx_agg_err_zeroes),
+	  LIF_RX_AGG_ERR_ZEROES_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_no_err_blks", GEN_MIB_STAT(lif_rx_agg_no_err_blks),
+	  LIF_RX_AGG_NO_ERR_BLKS_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_cor_blks", GEN_MIB_STAT(lif_rx_agg_cor_blks),
+	  LIF_RX_AGG_COR_BLKS_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_uncor_blks", GEN_MIB_STAT(lif_rx_agg_uncor_blks),
+	  LIF_RX_AGG_UNCOR_BLKS_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_err_ones", GEN_MIB_STAT(lif_rx_agg_err_ones),
+	  LIF_RX_AGG_ERR_ONES_REG, EPON_STAT_LIF, },
+	{ "lif_rx_agg_err_frm", GEN_MIB_STAT(lif_rx_agg_err_frm),
+	  LIF_RX_AGG_ERR_FRM_REG, EPON_STAT_LIF, },
+	{ "lif_tx_pkt_cnt", GEN_MIB_STAT(lif_tx_pkt_cnt),
+	  LIF_TX_PKT_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_byte_cnt", GEN_MIB_STAT(lif_tx_byte_cnt),
+	  LIF_TX_BYTE_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_non_fec_pkt_cnt", GEN_MIB_STAT(lif_tx_non_fec_pkt_cnt),
+	  LIF_TX_NON_FEC_PKT_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_non_fec_byte_cnt", GEN_MIB_STAT(lif_tx_non_fec_byte_cnt),
+	  LIF_TX_NON_FEC_BYTE_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_fec_pkt_cnt", GEN_MIB_STAT(lif_tx_fec_pkt_cnt),
+	  LIF_TX_FEC_PKT_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_fec_byte_cnt", GEN_MIB_STAT(lif_tx_fec_byte_cnt),
+	  LIF_TX_FEC_BYTE_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_fec_blk_cnt", GEN_MIB_STAT(lif_tx_fec_blk_cnt),
+	  LIF_TX_FEC_BLK_CNT_REG, EPON_STAT_LIF, },
+	{ "lif_tx_mpcp_pkt_cnt", GEN_MIB_STAT(lif_tx_mpcp_pkt_cnt),
+	  LIF_TX_MPCP_PKT_CNT_REG, EPON_STAT_LIF, },
+
+	{ GEN_EPN_RAM_STAT(epn00_rx_bytes, 0, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_fcs, 0, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_oam, 0, 0, 2) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_gate, 0, 0, 3) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_64, 0, 0, 4) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_65_127, 0, 0, 5) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_128_255, 0, 0, 6) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_256_511, 0, 0, 7) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_512_1023, 0, 0, 8) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_1024_1518, 0, 0, 9) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_1519_2047, 0, 0, 10) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_2048_4095, 0, 0, 11) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_4096_9216, 0, 0, 12) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_gt_9216, 0, 0, 13) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_oversize, 0, 0, 14) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_bcast, 0, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_mcast, 0, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_unicast, 0, 0, 17) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_undersized, 0, 0, 18) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_oam_bytes, 0, 0, 19) },
+	{ GEN_EPN_RAM_STAT(epn00_rx_register, 0, 0, 20) },
+
+	{ GEN_EPN_RAM_STAT(epn00_tx_bytes, 0, 1, 0) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_oam, 0, 1, 1) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_report, 0, 1, 2) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_64, 0, 1, 3) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_65_127, 0, 1, 4) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_128_255, 0, 1, 5) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_256_511, 0, 1, 6) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_512_1023, 0, 1, 7) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_1024_1518, 0, 1, 8) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_1519_2047, 0, 1, 9) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_2048_4095, 0, 1, 10) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_4096_9216, 0, 1, 11) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_gt_9216, 0, 1, 12) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_oam_bytes, 0, 1, 13) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_bcast, 0, 1, 14) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_mcast, 0, 1, 15) },
+	{ GEN_EPN_RAM_STAT(epn00_tx_unicast, 0, 1, 16) },
+	{ "epn00_unused_tq", GEN_MIB_STAT(epn00_unused_tq),
+	  0, EPN_UNUSED_TQ_CNTx_0_7_REG(0), },
+	{ "epn00_l1_acc_bytes", GEN_MIB_STAT(epn00_l1_acc_bytes),
+	  0, EPON_STAT_EPN_L1_ACC, },
+
+	{ GEN_EPN_RAM_STAT(epn24_rx_bytes, 24, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn24_rx_fcs, 24, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn24_rx_bcast, 24, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn24_rx_mcast, 24, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn25_rx_bytes, 25, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn25_rx_fcs, 25, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn25_rx_bcast, 25, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn25_rx_mcast, 25, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn26_rx_bytes, 26, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn26_rx_fcs, 26, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn26_rx_bcast, 26, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn26_rx_mcast, 26, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn27_rx_bytes, 27, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn27_rx_fcs, 27, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn27_rx_bcast, 27, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn27_rx_mcast, 27, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn28_rx_bytes, 28, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn28_rx_fcs, 28, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn28_rx_bcast, 28, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn28_rx_mcast, 28, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn29_rx_bytes, 29, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn29_rx_fcs, 29, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn29_rx_bcast, 29, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn29_rx_mcast, 29, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn30_rx_bytes, 30, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn30_rx_fcs, 30, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn30_rx_bcast, 30, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn30_rx_mcast, 30, 0, 16) },
+	{ GEN_EPN_RAM_STAT(epn31_rx_bytes, 31, 0, 0) },
+	{ GEN_EPN_RAM_STAT(epn31_rx_fcs, 31, 0, 1) },
+	{ GEN_EPN_RAM_STAT(epn31_rx_bcast, 31, 0, 15) },
+	{ GEN_EPN_RAM_STAT(epn31_rx_mcast, 31, 0, 16) },
+
+	{ "epn_unmap_big", GEN_MIB_STAT(epn_unmap_big),
+	  EPN_UNMAP_BIG_CNT_REG, EPON_STAT_EPN, },
+	{ "epn_unmap_frame", GEN_MIB_STAT(epn_unmap_frame),
+	  EPN_UNMAP_FRAME_CNT_REG, EPON_STAT_EPN, },
+	{ "epn_unmap_fcs", GEN_MIB_STAT(epn_unmap_fcs),
+	  EPN_UNMAP_FCS_CNT_REG, EPON_STAT_EPN, },
+	{ "epn_unmap_gate", GEN_MIB_STAT(epn_unmap_gate),
+	  EPN_UNMAP_GATE_CNT_REG, EPON_STAT_EPN, },
+	{ "epn_unmap_oam", GEN_MIB_STAT(epn_unmap_oam),
+	  EPN_UNMAP_OAM_CNT_REG, EPON_STAT_EPN, },
+	{ "epn_unmap_small", GEN_MIB_STAT(epn_unmap_small),
+	  EPN_UNMAP_SMALL_CNT_REG, EPON_STAT_EPN, },
+
+	{ GEN_LOCAL_STAT(reg_mpcp_rx) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_invalid) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_unk_opcode) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_disc) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_disc_info_mismatch) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_disc_late) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_disc_last_slot) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_reg_for_other) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_reg_unk_flag) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_reg_dereg) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_reg_nack) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_reg_timeout) },
+	{ GEN_LOCAL_STAT(reg_mpcp_rx_other_err) },
+	{ GEN_LOCAL_STAT(reg_mpcp_tx_reg_req) },
+	{ GEN_LOCAL_STAT(reg_mpcp_tx_reg_ack) },
+};
+
+/*
+ *
+ */
+static const struct serdes_params serdes_params_10g_1g = {
+	.tx_pll_vco_div2	= 0x0,
+	.rx_pll_vco_div2	= 0x0,
+	.tx_pll_vco_div4	= 0x0,
+	.rx_pll_vco_div4	= 0x0,
+	.rx_pll_id		= serdes_PLL_1,
+	.tx_pll_id		= serdes_PLL_0,
+
+	.tx_pll_force_kvh_bw	= 0x1,
+	.rx_pll_force_kvh_bw	= 0x1,
+	.tx_pll_kvh_force	= 0x1,
+	.rx_pll_kvh_force	= 0x1,
+
+	.tx_pll_2rx_bw		= 0x0,
+	.rx_pll_2rx_bw		= 0x0,
+
+	.tx_pll_fracn_sel	= 0x1,
+	.rx_pll_fracn_sel	= 0x1,
+
+	.tx_pll_ditheren	= 0x1,
+	.rx_pll_ditheren	= 0x1,
+
+	.rx_pll_fracn_div	= 0x10000,
+	.rx_pll_fracn_ndiv	= 0x0ce,
+
+	.tx_pll_fracn_div	= 0x00000,
+	.tx_pll_fracn_ndiv	= 0x0c8,
+
+	.rx_pll_mode		= 0x5,
+	.tx_pll_mode		= 0x5,
+
+	.rx_tx_rate_ratio	= 0x5,
+
+	.rx_pon_mac_ctrl	= 0x7,
+	.tx_pon_mac_ctrl	= 0x0,
+	.tx_sync_e_ctrl		= 0x0,
+
+	.rx_osr_mode		= 0x0,
+	.tx_osr_mode		= 0x7,
+
+	.do_rx_pi_spacing	= true,
+	.clk90_offset		= 32,
+	.p1_offset		= 0,
+	.dsc_a_cdr_control_2	= 0x030,
+
+	.do_pll_charge_pump	= true,
+	.do_pll_charge_pump_10g	= true,
+	.do_vga_rf		= true,
+	.do_ae			= false,
+	.do_sigdetect		= false,
+	.serdes_ae_full_rate	= false,
+	.serdes_ae_20b_width	= false,
+};
+
+static const struct serdes_misc3_params serdes_m3params_10g_1g = {
+	.misc3_if_select	= 5,
+	.misc3_laser_mode	= 0,
+};
+
+/*
+ *
+ */
+static u32 extract_mac_addr_lo(const u8 *addr)
+{
+	return ((u32)addr[2] << 24) |
+		((u32)addr[3] << 16) |
+		((u32)addr[4] << 8) |
+		((u32)addr[5]);
+}
+
+/*
+ *
+ */
+static u32 extract_mac_addr_hi(const u8 *addr)
+{
+	return ((u32)addr[0] << 8) | addr[1];
+}
+
+/*
+ *
+ */
+static u32 get_register_req_data_size(bool include_preambles)
+{
+	size_t len = 0;
+
+	if (include_preambles)
+		len += PREAMBLE_LEN_BYTES;
+	/* ethernet payload cannot be smaller than 60 or it has to be
+	 * padded */
+	len += max_t(size_t,
+		     ETH_HLEN +
+		     sizeof (struct mpcp_hdr) +
+		     sizeof (struct mpcp_register_req),
+		     ETH_ZLEN);
+	if (include_preambles) {
+		len += ETH_FCS_LEN;
+		len += IPG_BYTES_1G;
+	}
+	return len;
+}
+
+/*
+ *
+ */
+static u32 get_register_req_data_duration(struct xport_epon_priv *mode)
+{
+	if (mode->up_speed == 1000)
+		return BYTES_TO_TQ_1G(get_register_req_data_size(true));
+	else {
+		/* FIXME: implement */
+		BUG();
+	}
+}
+
+/*
+ *
+ */
+static u16 get_broadcast_llid(struct xport_epon_priv *mode)
+{
+	if (mode->down_speed == 10000)
+		return BROADCAST_LLID_10G;
+	else
+		return BROADCAST_LLID_1G;
+}
+
+/*
+ *
+ */
+static u16 get_disc_info_caps(struct xport_epon_priv *mode, u16 *mask)
+{
+	u16 value = 0;
+
+	/* OLT must support our upstream speed */
+	if (mode->up_speed == 1000) {
+		if (mask)
+			*mask &= ~MPCP_DISCINFO_1G_CAP;
+		value |= MPCP_DISCINFO_1G_CAP;
+	} else {
+		if (mask)
+			*mask &= ~MPCP_DISCINFO_10G_CAP;
+		value |= MPCP_DISCINFO_10G_CAP;
+	}
+
+	/* only use discovery window for our upstream speed */
+	if (mode->up_speed == 1000) {
+		if (mask)
+			*mask &= ~MPCP_DISCINFO_1G_WINDOW;
+		value |= MPCP_DISCINFO_1G_WINDOW;
+	} else {
+		if (mask)
+			*mask &= ~MPCP_DISCINFO_10G_WINDOW;
+		value |= MPCP_DISCINFO_10G_WINDOW;
+	}
+	return value;
+}
+
+/*
+ *
+ */
+static void epon_reset_modules(struct xport_epon_priv *mode,
+			       bool active, u32 mask)
+{
+	u32 val;
+
+	val = epon_top_reg_readl(mode, EPON_TOP_RESET_REG);
+	/* reset is active low */
+	if (!active)
+		val |= mask;
+	else
+		val &= ~mask;
+	epon_top_reg_writel(mode, EPON_TOP_RESET_REG, val);
+}
+
+/*
+ *
+ */
+static int xpcs_rx_init(struct xport_epon_priv *mode)
+{
+	u32 val;
+	size_t i;
+
+	/* release xpcs rx reset */
+	epon_reset_modules(mode, false, RESET_XPCSRXRST_N_MASK);
+	msleep(20);
+
+	val = epon_xpcsrx_reg_readl(mode, XPCSRX_RX_RST_REG);
+	val |= RX_RST_CFGXPCSRXCLK161RSTN_MASK;
+	epon_xpcsrx_reg_writel(mode, XPCSRX_RX_RST_REG, val);
+	udelay(10);
+
+	/* poll until ready */
+	for (i = 0; i < 100; i++) {
+		val = epon_xpcsrx_reg_readl(mode, XPCSRX_RX_RAM_ECC_INT_STAT_REG);
+		if (val & RX_RAM_ECC_INT_STAT_INTRXIDLERAMINITDONE_MASK)
+			break;
+		msleep(1);
+	}
+
+	if (!(val & RX_RAM_ECC_INT_STAT_INTRXIDLERAMINITDONE_MASK)) {
+		netdev_err(mode->port->priv->netdev,
+			   "xpcsrx RAM init failed\n");
+		return 1;
+	}
+
+	/* start with FEC enabled */
+	val = RX_FRAMER_CTL_CFGXPCSRXFRMREN_MASK |
+		RX_FRAMER_CTL_CFGXPCSRXFRAMEFEC_MASK |
+		RX_FRAMER_CTL_CFGXPCSRXFRMREBDVLDEN_MASK |
+		RX_FRAMER_CTL_CFGXPCSRXFRMRSPULKEN_MASK;
+	epon_xpcsrx_reg_writel(mode, XPCSRX_RX_FRAMER_CTL_REG, val);
+
+	val = RX_FEC_CTL_CFGXPCSRXFECEN_MASK |
+		RX_FEC_CTL_CFGXPCSRXFECIDLEINS_MASK |
+		RX_FEC_CTL_CFGXPCSRXFECFAILBLKSH0_MASK;
+	epon_xpcsrx_reg_writel(mode, XPCSRX_RX_FEC_CTL_REG, val);
+
+	val = RX_INT_STAT_INTRXIDLEDAJIT_MASK |
+		RX_INT_STAT_INTRXFRMRMISBRST_MASK |
+		RX_INT_STAT_INTRXIDLESOPEOPGAPBIG_MASK |
+		RX_INT_STAT_INTRXIDLEFRCINS_MASK |
+		RX_INT_STAT_INTRX64B66BMINIPGERR_MASK |
+		RX_INT_STAT_INTRXFECNQUECNTNEQ_MASK |
+		RX_INT_STAT_INTRXIDLEFIFOUNDRUN_MASK |
+		RX_INT_STAT_INTRXIDLEFIFOOVRRUN_MASK |
+		RX_INT_STAT_INTRXFECHIGHCOR_MASK |
+		RX_INT_STAT_INTRXFECDECSTOPONERR_MASK |
+		RX_INT_STAT_INTRXFECDECPASS_MASK |
+		RX_INT_STAT_INTRXSTATFRMRHIGHBER_MASK |
+		RX_INT_STAT_INTRXFRMREXITBYSP_MASK |
+		RX_INT_STAT_INTRXFRMRBADSHMAX_MASK |
+		RX_INT_STAT_INTRXDSCRAMBURSTSEQOUT_MASK |
+		RX_INT_STAT_INTRXTESTPSUDOLOCK_MASK |
+		RX_INT_STAT_INTRXTESTPSUDOTYPE_MASK |
+		RX_INT_STAT_INTRXTESTPSUDOERR_MASK |
+		RX_INT_STAT_INTRXTESTPRBSLOCK_MASK |
+		RX_INT_STAT_INTRXTESTPRBSERR_MASK |
+		RX_INT_STAT_INTRXFECPSISTDECFAIL_MASK |
+		RX_INT_STAT_INTRXFRAMERBADSH_MASK |
+		RX_INT_STAT_INTRXFRAMERCWLOSS_MASK |
+		RX_INT_STAT_INTRXFRAMERCWLOCK_MASK |
+		RX_INT_STAT_INTRXFECDECFAIL_MASK |
+		RX_INT_STAT_INTRX64B66BDECERR_MASK |
+		RX_INT_STAT_INTRXFRMRNOLOCKLOS_MASK |
+		RX_INT_STAT_INTRXFRMRROGUE_MASK |
+		RX_INT_STAT_INT_REGS_ERR_MASK;
+	epon_xpcsrx_reg_writel(mode, XPCSRX_RX_INT_STAT_REG, val);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void xif_set_llid(struct xport_epon_priv *mode,
+			 unsigned int llid_idx,
+			 unsigned int llid,
+			 bool enabled)
+{
+	u32 val;
+
+	val = epon_xif_reg_readl(mode, XIF_LLIDx_0_31_REG(llid_idx));
+	val &= ~XIF_LLIDx_0_31_CFGONULLID0_MASK;
+	val |= llid << XIF_LLIDx_0_31_CFGONULLID0_SHIFT;
+	if (enabled)
+		val |= (1 << (16 + XIF_LLIDx_0_31_CFGONULLID0_SHIFT));
+	else
+		val &= ~(1 << (16 + XIF_LLIDx_0_31_CFGONULLID0_SHIFT));
+	epon_xif_reg_writel(mode, XIF_LLIDx_0_31_REG(llid_idx), val);
+}
+
+/*
+ *
+ */
+static u32 xif_get_local_mpcp_time(struct xport_epon_priv *mode)
+{
+	return epon_xif_reg_readl(mode, XIF_MPCP_TIME_REG);
+}
+
+/*
+ *
+ */
+static int xif_data_port_write(struct xport_epon_priv *mode,
+			       unsigned int port,
+			       u32 addr)
+{
+	u32 val;
+	size_t i;
+
+	val = (addr << XIF_PORT_COMMAND_PORTADDRESS_SHIFT) |
+		(1 << XIF_PORT_COMMAND_PORTOPCODE_SHIFT) |
+		(port << XIF_PORT_COMMAND_PORTSELECT_SHIFT);
+
+	epon_xif_reg_writel(mode, XIF_PORT_COMMAND_REG, val);
+
+	for (i = 0; i < 100; i++) {
+		val = epon_xif_reg_readl(mode, XIF_PORT_COMMAND_REG);
+		if ((val & XIF_PORT_COMMAND_DATAPORTBUSY_MASK))
+			break;
+		udelay(5);
+	}
+
+	if ((val & XIF_PORT_COMMAND_DATAPORTBUSY_MASK)) {
+		netdev_err(mode->port->priv->netdev,
+			   "xif data port busy does not clear\n");
+		return 1;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void xif_set_down_key(struct xport_epon_priv *mode,
+			     unsigned int llid_idx,
+			     unsigned int key_idx,
+			     const uint8_t key[16],
+			     const uint8_t sci[8])
+{
+	u32 val;
+
+	BUG_ON(key_idx > 1);
+
+	/* make sure decryption is enabled */
+	val = epon_xif_reg_readl(mode, XIF_CTL_REG);
+	val &= ~XIF_CTL_RXENCRYPTMODE_MASK;
+	val |= XIF_CTL_RXENCRYPTEN_MASK;
+	epon_xif_reg_writel(mode, XIF_CTL_REG, val);
+
+	/* load the key */
+	memcpy(&val, key + 0, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(3), val);
+	memcpy(&val, key + 4, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(2), val);
+	memcpy(&val, key + 8, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(1), val);
+	memcpy(&val, key + 12, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(0), val);
+
+	memcpy(&val, sci, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(5), val);
+	memcpy(&val, sci + 4, 4);
+	val = cpu_to_be32(val);
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(4), val);
+
+	/* enable decrypt for this key */
+	val = 1;
+	epon_xif_reg_writel(mode, XIF_PORT_DATA_REG(7), val);
+
+	val = (llid_idx << 1) | key_idx;
+	/* port 0 => rx key */
+	xif_data_port_write(mode, 0, val);
+}
+
+/*
+ *
+ */
+static void
+xif_get_llid_last_rx_encrypt(struct xport_epon_priv *mode,
+			     unsigned int llid_idx,
+			     bool *last_encrypted,
+			     unsigned int *last_encrypted_key_id)
+{
+	u32 val;
+
+	val = epon_xif_reg_readl(mode, XIF_SECRX_ENCRYPT_REG);
+	*last_encrypted = (val & (1 << llid_idx));
+
+	val = epon_xif_reg_readl(mode, XIF_SECRX_KEYNUM_REG);
+	*last_encrypted_key_id = (val & (1 << llid_idx)) ? 1 : 0;
+}
+
+/*
+ *
+ */
+static int xif_init(struct xport_epon_priv *mode)
+{
+	u32 val;
+	size_t i;
+
+	/* release xif reset */
+	epon_reset_modules(mode, false, RESET_XIFRST_N_MASK);
+	msleep(20);
+
+	/* xif control, RAM init */
+	val = XIF_CTL_XIFDTPORTRSTN_MASK;
+	epon_xif_reg_writel(mode, XIF_CTL_REG, val);
+	udelay(10);
+
+	for (i = 0; i < 1000; i++) {
+		val = epon_xif_reg_readl(mode, XIF_CTL_REG);
+		if (!(val & XIF_CTL_XIFNOTRDY_MASK))
+			break;
+		msleep(1);
+	}
+
+	if ((val & XIF_CTL_XIFNOTRDY_MASK)) {
+		netdev_err(mode->port->priv->netdev,
+			   "xif RAM init failed\n");
+		return 1;
+	}
+
+	val |= XIF_CTL_CFGPMCRXENCRC8CHK_MASK;
+	val |= XIF_CTL_PMCRXRSTN_MASK;
+	val |= XIF_CTL_SECRXRSTN_MASK;
+	epon_xif_reg_writel(mode, XIF_CTL_REG, val);
+
+	/* oversize */
+	val = epon_xif_reg_readl(mode, XIF_XPN_OVERSIZE_THRESH_REG);
+	val &= ~XIF_XPN_OVERSIZE_THRESH_CFGXPNOVRSZTHRESH_MASK;
+	val |= 10000 << XIF_XPN_OVERSIZE_THRESH_CFGXPNOVRSZTHRESH_SHIFT;
+	epon_xif_reg_writel(mode, XIF_XPN_OVERSIZE_THRESH_REG, val);
+
+	/* interrupt status */
+	val = XIF_INT_STATUS_SECRXRPLYPRTCTABRTINT_MASK |
+		XIF_INT_STATUS_SECTXPKTNUMMAXINT_MASK |
+		XIF_INT_STATUS_TSFULLUPDINT_MASK |
+		XIF_INT_STATUS_TXHANGINT_MASK |
+		XIF_INT_STATUS_NEGTIMEINT_MASK |
+		XIF_INT_STATUS_PMCTSJTTRINT_MASK |
+		XIF_INT_STATUS_SECRXOUTFFOVRFLWINT_MASK;
+	epon_xif_reg_writel(mode, XIF_INT_STATUS_REG, val);
+
+	if (mode->down_speed == 10000) {
+		if (xpcs_rx_init(mode))
+			return 1;
+	}
+
+	if (mode->up_speed == 10000) {
+		/* FIXME: implement */
+		BUG();
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void lif_set_llid(struct xport_epon_priv *mode,
+			 unsigned int llid_idx,
+			 unsigned int llid,
+			 bool enabled)
+{
+	u32 val;
+	u32 reg;
+
+	switch (llid_idx) {
+	case 0 ... 7:
+		reg = LIF_LLIDx_0_7_REG(llid_idx);
+		break;
+	case 16 ... 23:
+		reg = LIF_LLIDx_16_23_REG(llid_idx);
+		break;
+	case 24 ... 31:
+		reg = LIF_LLIDx_24_31_REG(llid_idx);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	val = epon_lif_reg_readl(mode, reg);
+	val &= ~LIF_LLIDx_0_7_CFGLLID0_MASK;
+	val |= llid << LIF_LLIDx_0_7_CFGLLID0_SHIFT;
+	if (enabled)
+		val |= (1 << (16 + LIF_LLIDx_0_7_CFGLLID0_SHIFT));
+	else
+		val &= ~(1 << (16 + LIF_LLIDx_0_7_CFGLLID0_SHIFT));
+	epon_lif_reg_writel(mode, reg, val);
+}
+
+/*
+ *
+ */
+static void lif_set_laser_normal_op(struct xport_epon_priv *mode,
+				    bool normal_op)
+{
+	u32 val;
+
+	val = epon_lif_reg_readl(mode, LIF_PON_CONTROL_REG);
+	if (normal_op)
+		val |= LIF_PON_CONTROL_CFLASEREN_MASK;
+	else
+		val &= ~LIF_PON_CONTROL_CFLASEREN_MASK;
+	epon_lif_reg_writel(mode, LIF_PON_CONTROL_REG, val);
+}
+
+/*
+ *
+ */
+static void lif_set_idle_time(struct xport_epon_priv *mode,
+			      unsigned int front,
+			      unsigned int back)
+{
+	u32 val;
+
+	/* 'front' idle time before sending (for non FEC mode)  */
+	val = front << LIF_LASER_OFF_IDLE_CFTXINITIDLE_SHIFT;
+	/* turn off the laser 'back' tq before end of grant */
+	val |= (1 << 7) | (back << LIF_LASER_OFF_IDLE_CFTXLASEROFFDELTA_SHIFT);
+	epon_lif_reg_writel(mode, LIF_LASER_OFF_IDLE_REG, val);
+
+	/* 'front' idle time before sending (for FEC mode)  */
+	val = front << LIF_FEC_INIT_IDLE_CFTXFECINITIDLE_SHIFT;
+	epon_lif_reg_writel(mode, LIF_FEC_INIT_IDLE_REG, val);
+
+	/* fixup MPCP offset so that it corresponds to first bytes of
+	 * DA */
+	val = front + BYTES_TO_TQ_1G(PREAMBLE_LEN_BYTES);
+	epon_lif_reg_writel(mode, LIF_SEC_UP_MPCP_OFFSET_REG, val);
+}
+
+/*
+ *
+ */
+static int lif_init(struct xport_epon_priv *mode)
+{
+	u32 val;
+
+	/* release lif reset */
+	epon_reset_modules(mode, false, RESET_LIFRST_N_MASK);
+	msleep(20);
+
+	/* pon control */
+	val = LIF_PON_CONTROL_CFGRXDATABITFLIP_MASK |
+		LIF_PON_CONTROL_CFPPSCLKRBC_MASK;
+	if (!mode->laser_active_hi) {
+		/* XXX: this is not a typo, polarity is reversed */
+		val |= LIF_PON_CONTROL_CFTXLASERONACTHI_MASK;
+	}
+	epon_lif_reg_writel(mode, LIF_PON_CONTROL_REG, val);
+
+	/* interop control */
+	val = (2 << LIF_PON_INTER_OP_CONTROL_CFTXIPGCNT_SHIFT) |
+		(6 << LIF_PON_INTER_OP_CONTROL_CFTXPIPEDELAY_SHIFT);
+	epon_lif_reg_writel(mode, LIF_PON_INTER_OP_CONTROL_REG, val);
+
+	/* fec control, globally enable FEC, still needs to be enabled
+	 * per llid */
+	val = LIF_FEC_CONTROL_CFFECTXENABLE_MASK |
+		LIF_FEC_CONTROL_CFFECTXFECPERLLID_MASK |
+		LIF_FEC_CONTROL_CFFECRXENABLE_MASK;
+	epon_lif_reg_writel(mode, LIF_FEC_CONTROL_REG, val);
+
+	/* security control */
+	val = epon_lif_reg_readl(mode, LIF_SEC_CONTROL_REG);
+	val |= LIF_SEC_CONTROL_SECDNRSTN_PRE_MASK;
+	val |= LIF_SEC_CONTROL_SECUPRSTN_PRE_MASK;
+	epon_lif_reg_writel(mode, LIF_SEC_CONTROL_REG, val);
+
+	/* pon control, enable tx only */
+	val = epon_lif_reg_readl(mode, LIF_PON_CONTROL_REG);
+	val |= LIF_PON_CONTROL_LIFTXRSTN_PRE_MASK;
+	val |= LIF_PON_CONTROL_LIFTXEN_MASK;
+	epon_lif_reg_writel(mode, LIF_PON_CONTROL_REG, val);
+
+	/* clear all interrupts */
+	val = LIF_INT_STATUS_INT_SOP_SFEC_IPG_VIOLATION_MASK |
+		LIF_INT_STATUS_LASERONMAX_MASK |
+		LIF_INT_STATUS_LASEROFF_MASK |
+		LIF_INT_STATUS_SECDNREPLAYPROTCTABORT_MASK |
+		LIF_INT_STATUS_SECUPPKTNUMOVERFLOW_MASK |
+		LIF_INT_STATUS_INTLASEROFFDURBURST_MASK |
+		LIF_INT_STATUS_INTRXBERTHRESHEXC_MASK |
+		LIF_INT_STATUS_INTFECRXFECRECVSTATUS_MASK |
+		LIF_INT_STATUS_INTFECRXCORERRFIFOFULLSTATUS_MASK |
+		LIF_INT_STATUS_INTFECRXCORERRFIFOUNEXPEMPTY_MASK |
+		LIF_INT_STATUS_INTFECBUFPOPEMPTYPUSH_MASK |
+		LIF_INT_STATUS_INTFECBUFPOPEMPTYNOPUSH_MASK |
+		LIF_INT_STATUS_INTFECBUFPUSHFULL_MASK |
+		LIF_INT_STATUS_INTUPTIMEFULLUPDSTAT_MASK |
+		LIF_INT_STATUS_INTFROUTOFALIGNSTAT_MASK |
+		LIF_INT_STATUS_INTGRNTSTARTTIMELAGSTAT_MASK |
+		LIF_INT_STATUS_INTABORTRXFRMSTAT_MASK |
+		LIF_INT_STATUS_INTNORXCLKSTAT_MASK |
+		LIF_INT_STATUS_INTRXMAXLENERRSTAT_MASK |
+		LIF_INT_STATUS_INTRXERRAFTALIGNSTAT_MASK |
+		LIF_INT_STATUS_INTRXSYNCHACQSTAT_MASK |
+		LIF_INT_STATUS_INTRXOUTOFSYNCHSTAT_MASK;
+	epon_lif_reg_writel(mode, LIF_INT_STATUS_REG, val);
+
+	/* enable laser monitoring interrupt */
+	val = LIF_INT_STATUS_LASERONMAX_MASK;
+	epon_lif_reg_writel(mode, LIF_INT_MASK_REG, val);
+
+	/* setup laser monitoring threshold */
+	val = USEC_TO_TQ(250 * 1000);
+	epon_lif_reg_writel(mode, LIF_LSR_MON_A_MAX_THR_REG, val);
+
+	/* enable laser monitoring */
+	val = LIF_LSR_MON_A_CTRL_PBILASERMONRSTA_N_PRE_MASK;
+	if (mode->laser_active_hi)
+		val |= LIF_LSR_MON_A_CTRL_CFGLSRMONACTHI_MASK;
+	epon_lif_reg_writel(mode, LIF_LSR_MON_A_CTRL_REG, val);
+
+	if (mode->down_speed == 1000) {
+		/* FIXME: implement */
+		BUG();
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void epon_hw_set_idle_time(struct xport_epon_priv *mode,
+				  unsigned int laser_on_time,
+				  unsigned int sync_time,
+				  unsigned int laser_off_time,
+				  bool for_discovery)
+{
+	unsigned int front, back, total;
+	unsigned int timestamp_off;
+	u32 val;
+
+	/*
+	 * sanity check, don't go below what hardware can do
+	 */
+	if (laser_on_time < DEF_LASER_ON_TIME)
+		laser_on_time = DEF_LASER_ON_TIME;
+	if (laser_off_time < DEF_LASER_OFF_TIME)
+		laser_off_time = DEF_LASER_OFF_TIME;
+
+	front = sync_time + laser_on_time;
+	back = laser_off_time;
+	total = front + back;
+
+	switch (mode->up_speed) {
+	case 1000:
+		lif_set_idle_time(mode, front, back);
+		break;
+	case 10000:
+		/* FIXME: implement */
+		BUG();
+		break;
+	}
+
+	timestamp_off = BYTES_TO_TQ_1G(PREAMBLE_LEN_BYTES) + front;
+	val = front << UP_TIME_STAMP_OFF_TIMESTAMPOFFSETFEC_SHIFT;
+	val |= front << UP_TIME_STAMP_OFF_TIMESTAMPOFFSET_SHIFT;
+	epon_epn_reg_writel(mode, EPN_UP_TIME_STAMP_OFF_REG, val);
+
+	if (for_discovery) {
+		val = total << DISC_GRANT_OVR_HD_DISCGNTOVRHD_SHIFT;
+		epon_epn_reg_writel(mode, EPN_DISC_GRANT_OVR_HD_REG, val);
+
+		/* note: bcm driver accounts FEC here */
+		val = (total + get_register_req_data_duration(mode)) <<
+			DN_DISCOVERY_SIZE_CFGDISCSIZE_SHIFT;
+		epon_epn_reg_writel(mode, EPN_DN_DISCOVERY_SIZE_REG, val);
+	} else {
+		val = (total << GRANT_OVR_HD_GNTOVRHDFEC_SHIFT) |
+			(total << GRANT_OVR_HD_GNTOVRHD_SHIFT);
+		epon_epn_reg_writel(mode, EPN_GRANT_OVR_HD_REG, val);
+	}
+}
+
+/*
+ * set laser on/off + sync time to use in normal operation
+ */
+static void epon_hw_set_normal_idle_time(struct xport_epon_priv *mode,
+					 unsigned int laser_on_time,
+					 unsigned int sync_time,
+					 unsigned int laser_off_time)
+{
+	epon_hw_set_idle_time(mode,
+			      laser_on_time,
+			      sync_time,
+			      laser_off_time,
+			      false);
+}
+
+/*
+ * set laser on/off + sync time to use during discovery
+ */
+static void epon_hw_set_disc_idle_time(struct xport_epon_priv *mode,
+				       unsigned int laser_on_time,
+				       unsigned int sync_time,
+				       unsigned int laser_off_time)
+{
+	epon_hw_set_idle_time(mode,
+			      laser_on_time,
+			      sync_time,
+			      laser_off_time,
+			      true);
+}
+
+/*
+ *
+ */
+static void epn_init_l2_sizes(struct xport_epon_priv *mode)
+{
+	/* FIXME: this depends on reporting type */
+	const u32 l2_size = 824;
+	u32 start_addr;
+	size_t i;
+
+	start_addr = 0;
+	for (i = 0; i < 8; i++) {
+		u32 end_addr, val;
+
+		end_addr = start_addr + (l2_size >> 2);
+		val = (end_addr << TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUEEND0_SHIFT) |
+			(start_addr << TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUESTART0_SHIFT);
+		epon_epn_reg_writel(mode,
+				    EPN_TX_L2S_QUE_CONFIGx_0_7_REG(i),
+				    val);
+		start_addr = end_addr + 1;
+	}
+
+	for (i = 8; i < 32; i++) {
+		u32 end_addr, val;
+
+		end_addr = start_addr + (l2_size >> 2);
+		val = (end_addr << TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUEEND8_SHIFT) |
+			(start_addr << TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUESTART8_SHIFT);
+		epon_epn_reg_writel(mode,
+				    EPN_TX_L2S_QUE_CONFIGx_8_31_REG(i),
+				    val);
+		start_addr = end_addr + 1;
+	}
+}
+
+/*
+ *
+ */
+static void epon_hw_set_laser_normal_op(struct xport_epon_priv *mode,
+					bool normal_op)
+{
+	if (mode->up_speed == 1000)
+		lif_set_laser_normal_op(mode, normal_op);
+	else {
+		/* FIXME: implement */
+		BUG();
+	}
+}
+
+/*
+ *
+ */
+static bool epon_hw_get_los(struct xport_epon_priv *mode)
+{
+	bool los;
+	u32 val;
+
+	switch (mode->down_speed) {
+	case 10000:
+	{
+		val = epon_xpcsrx_reg_readl(mode, XPCSRX_RX_INT_STAT_REG);
+		los = (val & RX_INT_STAT_INTRXFRAMERCWLOSS_MASK);
+		if (!los)
+			break;
+
+		/* clear interrupt */
+		epon_xpcsrx_reg_writel(mode, XPCSRX_RX_INT_STAT_REG,
+				       RX_INT_STAT_INTRXFRAMERCWLOSS_MASK);
+		break;
+	}
+	default:
+		BUG();
+		break;
+	}
+	return los;
+}
+
+/*
+ *
+ */
+static void epon_hw_set_llid(struct xport_epon_priv *mode,
+			     unsigned int llid_idx,
+			     u16 llid,
+			     bool enabled)
+{
+	if (mode->up_speed == 10000 || mode->down_speed == 10000)
+		xif_set_llid(mode, llid_idx, llid, enabled);
+
+	if (mode->up_speed == 1000 || mode->down_speed == 1000)
+		lif_set_llid(mode, llid_idx, llid, enabled);
+}
+
+/*
+ *
+ */
+static void epon_hw_pass_gates_frames(struct xport_epon_priv *mode,
+				      unsigned int llid_idx,
+				      bool pass)
+{
+	u32 val;
+
+	val = epon_epn_reg_readl(mode, EPN_PASS_GATES_REG);
+	if (pass)
+		val |= (1 << llid_idx);
+	else
+		val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_PASS_GATES_REG, val);
+}
+
+/*
+ *
+ */
+static u32 epon_hw_get_local_mpcp_time(struct xport_epon_priv *mode)
+{
+	if (mode->down_speed == 10000)
+		return xif_get_local_mpcp_time(mode);
+	else {
+		/* implement for LIF */
+		BUG();
+		return 0;
+	}
+}
+
+/*
+ *
+ */
+static void epon_hw_set_down_key(struct xport_epon_priv *mode,
+				 unsigned int llid_idx,
+				 unsigned int key_idx,
+				 const uint8_t key[16],
+				 const uint8_t sci[8])
+{
+	if (mode->down_speed == 10000)
+		xif_set_down_key(mode, llid_idx, key_idx, key, sci);
+	else {
+		/* implement for LIF */
+		BUG();
+	}
+}
+
+/*
+ *
+ */
+static void
+epon_hw_get_llid_last_rx_encrypt(struct xport_epon_priv *mode,
+				 unsigned int llid_idx,
+				 bool *last_encrypted,
+				 unsigned int *last_encrypted_key_id)
+{
+	if (mode->down_speed == 10000)
+		xif_get_llid_last_rx_encrypt(mode, llid_idx,
+					     last_encrypted,
+					     last_encrypted_key_id);
+	else {
+		/* implement for LIF */
+		BUG();
+	}
+}
+
+/*
+ *
+ */
+static int epon_hw_data_port_read(struct xport_epon_priv *mode,
+				  unsigned int port,
+				  unsigned int ram_offset,
+				  u32 *ret_value)
+{
+	u32 val;
+	size_t i;
+
+	epon_epn_reg_writel(mode, EPN_DATA_PORT_ADDR_REG, ram_offset);
+
+	val = (port << DATA_PORT_COMMAND_DPORTSELECT_SHIFT);
+	epon_epn_reg_writel(mode, EPN_DATA_PORT_COMMAND_REG, val);
+
+	for (i = 0; i < 100; i++) {
+		val = epon_epn_reg_readl(mode, EPN_DATA_PORT_COMMAND_REG);
+		if ((val & DATA_PORT_COMMAND_DPORTBUSY_MASK))
+			break;
+		udelay(5);
+	}
+
+	if ((val & DATA_PORT_COMMAND_DPORTBUSY_MASK)) {
+		netdev_err(mode->port->priv->netdev,
+			   "data port busy does not clear\n");
+		*ret_value = 0xdeadbeef;
+		return 1;
+	}
+
+	*ret_value = epon_epn_reg_readl(mode, EPN_DATA_PORT_DATA_0_REG);
+	return 0;
+}
+
+/*
+ *
+ */
+static void epon_hw_link_remove_llid(struct xport_epon_priv *mode,
+				     unsigned int llid_idx)
+{
+	epon_hw_set_llid(mode, llid_idx, 0, false);
+}
+
+/*
+ *
+ */
+static void epon_hw_link_update_llid(struct xport_epon_priv *mode,
+				     unsigned int llid_idx,
+				     unsigned int new_llid)
+{
+	/* FIXME: should reset grant FIFOs and maybe do other stuff */
+	epon_hw_set_llid(mode, llid_idx, new_llid, true);
+}
+
+/*
+ *
+ */
+static void epon_hw_link_start_tx(struct xport_epon_priv *mode,
+				  unsigned int llid_idx)
+{
+	u32 val;
+	size_t i;
+
+	/* unreset L2 & L1 fifo */
+	val = epon_epn_reg_readl(mode, EPN_RESET_L2_RPT_FIFO_REG);
+	val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_L2_RPT_FIFO_REG, val);
+
+	val = epon_epn_reg_readl(mode, EPN_RESET_L1_ACCUMULATOR_REG);
+	val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_L1_ACCUMULATOR_REG, val);
+
+	/* start upstream processing */
+	val = epon_epn_reg_readl(mode, EPN_ENABLE_UPSTREAM_REG);
+	val |= (1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_ENABLE_UPSTREAM_REG, val);
+
+	/* monitor feedback */
+	for (i = 0; i < 100; i++) {
+		val = epon_epn_reg_readl(mode, EPN_ENABLE_UPSTREAM_FB_REG);
+		if (val & (1 << llid_idx))
+			break;
+		udelay(5);
+	}
+
+	if (!(val & (1 << llid_idx))) {
+		netdev_err(mode->port->priv->netdev,
+			   "upstream feedback wont toggle for llid %u\n",
+			   llid_idx);
+	}
+
+	/* unreset grant fifo */
+	val = epon_epn_reg_readl(mode, EPN_RESET_GNT_FIFO_REG);
+	val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_GNT_FIFO_REG, val);
+
+	/* start processing grants */
+	val = epon_epn_reg_readl(mode, EPN_ENABLE_GRANTS_REG);
+	val |= (1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_ENABLE_GRANTS_REG, val);
+}
+
+/*
+ *
+ */
+static bool epon_hw_l2_queue_is_empty(struct xport_epon_priv *mode,
+				      unsigned int l2_idx)
+{
+	u32 val;
+
+	/* check that L2 is empty */
+	val = epon_epn_reg_readl(mode, EPN_TX_L2S_QUE_EMPTY_REG);
+	if (val & (1 << l2_idx))
+		return true;
+	return false;
+}
+
+/*
+ *
+ */
+static void epon_hw_link_stop_tx(struct xport_epon_priv *mode,
+				 unsigned int llid_idx)
+{
+	u32 val;
+	size_t i, j;
+
+	/* stop processing grants */
+	val = epon_epn_reg_readl(mode, EPN_ENABLE_GRANTS_REG);
+	val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_ENABLE_GRANTS_REG, val);
+
+	/* stop upstream processing */
+	val = epon_epn_reg_readl(mode, EPN_ENABLE_UPSTREAM_REG);
+	val &= ~(1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_ENABLE_UPSTREAM_REG, val);
+
+	/* monitor feedback */
+	for (i = 0; i < 100; i++) {
+		val = epon_epn_reg_readl(mode, EPN_ENABLE_UPSTREAM_FB_REG);
+		if (!(val & (1 << llid_idx)))
+			break;
+		udelay(5);
+	}
+
+	if ((val & (1 << llid_idx))) {
+		netdev_err(mode->port->priv->netdev,
+			   "upstream feedback wont clear for llid %u\n",
+			   llid_idx);
+	}
+
+	for (i = 0; i < 100; i++) {
+		/* flush l2 FIFO until its empty */
+		val = llid_idx << L2S_FLUSH_CONFIG_CFGFLUSHL2SSEL_SHIFT;
+		epon_epn_reg_writel(mode,
+				    EPN_L2S_FLUSH_CONFIG_REG,
+				    val);
+		val |= L2S_FLUSH_CONFIG_CFGFLUSHL2SEN_MASK;
+		epon_epn_reg_writel(mode,
+				    EPN_L2S_FLUSH_CONFIG_REG,
+				    val);
+
+		for (j = 0; j < 100; j++) {
+			val = epon_epn_reg_readl(mode,
+						 EPN_L2S_FLUSH_CONFIG_REG);
+			if (val & L2S_FLUSH_CONFIG_FLUSHL2SDONE_MASK)
+				break;
+			udelay(5);
+		}
+
+		if (!(val & L2S_FLUSH_CONFIG_FLUSHL2SDONE_MASK)) {
+			netdev_err(mode->port->priv->netdev,
+				   "L2 flush not done %u\n", llid_idx);
+			break;
+		}
+
+		val = llid_idx << L2S_FLUSH_CONFIG_CFGFLUSHL2SSEL_SHIFT;
+		epon_epn_reg_writel(mode,
+				    EPN_L2S_FLUSH_CONFIG_REG,
+				    val);
+		for (j = 0; j < 100; j++) {
+			val = epon_epn_reg_readl(mode,
+						 EPN_L2S_FLUSH_CONFIG_REG);
+			if (!(val & L2S_FLUSH_CONFIG_FLUSHL2SDONE_MASK))
+				break;
+			udelay(5);
+		}
+
+		if ((val & L2S_FLUSH_CONFIG_FLUSHL2SDONE_MASK)) {
+			netdev_err(mode->port->priv->netdev,
+				   "L2 flush does not stop %u\n", llid_idx);
+			break;
+		}
+
+		udelay(100);
+
+		if (epon_hw_l2_queue_is_empty(mode, llid_idx) &&
+		    bcm_runner_fw_bbh_is_empty(mode->port->priv))
+			break;
+	}
+
+	if (!epon_hw_l2_queue_is_empty(mode, llid_idx))
+		netdev_err(mode->port->priv->netdev,
+			   "L2 queue %u not empty after reset\n", llid_idx);
+
+	if (!bcm_runner_fw_bbh_is_empty(mode->port->priv))
+		netdev_err(mode->port->priv->netdev,
+			   "BBH queue %u not empty after reset\n", llid_idx);
+
+	/* reset L2 & L1 fifo */
+	val = epon_epn_reg_readl(mode, EPN_RESET_L1_ACCUMULATOR_REG);
+	val |= (1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_L1_ACCUMULATOR_REG, val);
+
+	val = epon_epn_reg_readl(mode, EPN_RESET_L2_RPT_FIFO_REG);
+	val |= (1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_L2_RPT_FIFO_REG, val);
+
+	/* reset grant fifo */
+	val = epon_epn_reg_readl(mode, EPN_RESET_GNT_FIFO_REG);
+	val |= (1 << llid_idx);
+	epon_epn_reg_writel(mode, EPN_RESET_GNT_FIFO_REG, val);
+
+}
+
+/*
+ *
+ */
+static void epon_hw_set_burst_cap(struct xport_epon_priv *mode,
+				  unsigned int burst_cap)
+{
+	u32 val;
+
+	/* update max grant size */
+	val = epon_epn_reg_readl(mode, EPN_MAX_GNT_SIZE_REG);
+	val &= ~MAX_GNT_SIZE_MAXGNTSIZE_MASK;
+	val |= burst_cap << MAX_GNT_SIZE_MAXGNTSIZE_SHIFT;
+	epon_epn_reg_writel(mode, EPN_MAX_GNT_SIZE_REG, val);
+
+	if (mode->up_speed == 1000)
+		epon_epn_reg_writel(mode, EPN_BURST_CAPx_0_7_REG(0),
+				    burst_cap);
+	else {
+		/* FIXME: implement */
+		BUG();
+	}
+}
+
+/*
+ *
+ */
+static void epon_hw_init(struct xport_epon_priv *mode)
+{
+	size_t i;
+	u32 val;
+
+	/* sanity check */
+	val = epon_top_reg_readl(mode, EPON_TOP_SCRATCH_REG);
+	if (val != 0x1baddad) {
+		netdev_err(mode->port->priv->netdev,
+			   "epon module not functional\n");
+		return;
+	}
+
+	/* put all ePON blocks in reset */
+	epon_reset_modules(mode, true,
+			  RESET_EPNRST_N_MASK |
+			  RESET_LIFRST_N_MASK |
+			  RESET_NCORST_N_MASK |
+			  RESET_CLKPRGRST_N_MASK |
+			  RESET_TODRST_N_MASK |
+			  RESET_XIFRST_N_MASK |
+			  RESET_XPCSTXRST_N_MASK |
+			  RESET_XPCSRXRST_N_MASK);
+	msleep(20);
+
+	/* clear all epon top interrupts */
+	val = INTERRUPT_INT_1PPS_MASK |
+		INTERRUPT_INT_XPCS_TX_MASK |
+		INTERRUPT_INT_XPCS_RX_MASK |
+		INTERRUPT_INT_XIF_MASK |
+		INTERRUPT_INT_NCO_MASK |
+		INTERRUPT_INT_LIF_MASK |
+		INTERRUPT_INT_EPN_MASK;
+	epon_top_reg_writel(mode, EPON_TOP_INTERRUPT_REG, val);
+
+	/*
+	 * set epon top rate
+	 */
+	val = 0;
+	switch (mode->down_speed) {
+	case 1000:
+		break;
+	case 10000:
+		val |= CONTROL_CFGTENGIGDNS_MASK;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	switch (mode->up_speed) {
+	case 1000:
+		break;
+	case 10000:
+		val |= CONTROL_CFGTENGIGPONUP_MASK;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	epon_top_reg_writel(mode, EPON_TOP_CONTROL_REG, val);
+
+	/* release epn reset */
+	epon_reset_modules(mode, false, RESET_EPNRST_N_MASK);
+	msleep(20);
+
+	/* epn control 0 */
+	val = CONTROL_0_PRVDROPUNMAPPPEDLLID_MASK |
+		CONTROL_0_CFGREPLACEUPFCS_MASK |
+		CONTROL_0_CFGAPPENDUPFCS_MASK |
+		CONTROL_0_DRXRST_PRE_N_MASK |
+		CONTROL_0_DRXEN_MASK |
+		CONTROL_0_UTXRST_PRE_N_MASK |
+		CONTROL_0_UTXEN_MASK;
+	val &= ~CONTROL_0_RPTSELECT_MASK;
+	val |= (1 << CONTROL_0_RPTSELECT_SHIFT);
+	epon_epn_reg_writel(mode, EPN_CONTROL_0_REG, val);
+
+	/* max grant size */
+	val = epon_epn_reg_readl(mode, EPN_MAX_GNT_SIZE_REG);
+	val &= ~MAX_GNT_SIZE_MAXGNTSIZE_MASK;
+	val |= 20000 << MAX_GNT_SIZE_MAXGNTSIZE_SHIFT;
+	epon_epn_reg_writel(mode, EPN_MAX_GNT_SIZE_REG, val);
+
+	/* time stamp diff */
+	val = epon_epn_reg_readl(mode, EPN_TIME_STAMP_DIFF_REG);
+	val &= ~TIME_STAMP_DIFF_TIMESTAMPDIFFDELTA_MASK;
+	val |= 0x40UL << TIME_STAMP_DIFF_TIMESTAMPDIFFDELTA_SHIFT;
+	epon_epn_reg_writel(mode, EPN_TIME_STAMP_DIFF_REG, val);
+
+	/* main int status */
+	epon_epn_reg_writel(mode, EPN_MAIN_INT_STATUS_REG, 0xffffffff);
+
+	/* grant time start delta */
+	val = epon_epn_reg_readl(mode, EPN_GNT_TIME_START_DELTA_REG);
+	val &= ~GNT_TIME_START_DELTA_GNTSTARTTIMEDELTA_MASK;
+	val |= 0x3e8UL << GNT_TIME_START_DELTA_GNTSTARTTIMEDELTA_SHIFT;
+	epon_epn_reg_writel(mode, EPN_GNT_TIME_START_DELTA_REG, val);
+
+	/* grand start time margin */
+	val = epon_epn_reg_readl(mode, EPN_DN_RD_GNT_MARGIN_REG);
+	val &= ~DN_RD_GNT_MARGIN_RDGNTSTARTMARGIN_MASK;
+	val |= 0x3ffUL << DN_RD_GNT_MARGIN_RDGNTSTARTMARGIN_SHIFT;
+	epon_epn_reg_writel(mode, EPN_DN_RD_GNT_MARGIN_REG, val);
+
+	/* misalign threshold */
+	val = epon_epn_reg_readl(mode, EPN_DN_GNT_MISALIGN_THR_REG);
+	val &= ~DN_GNT_MISALIGN_THR_PRVUNUSEDGNTTHRESHOLD_MASK;
+	val |= 0 << DN_GNT_MISALIGN_THR_PRVUNUSEDGNTTHRESHOLD_SHIFT;
+	val &= ~DN_GNT_MISALIGN_THR_GNTMISALIGNTHRESH_MASK;
+	val |= 2 << DN_GNT_MISALIGN_THR_GNTMISALIGNTHRESH_SHIFT;
+	epon_epn_reg_writel(mode, EPN_DN_GNT_MISALIGN_THR_REG, val);
+
+	/* misalign pause */
+	val = epon_epn_reg_readl(mode, EPN_DN_GNT_MISALIGN_PAUSE_REG);
+	val &= ~DN_GNT_MISALIGN_PAUSE_GNTMISALIGNPAUSE_MASK;
+	val |= 300 << DN_GNT_MISALIGN_PAUSE_GNTMISALIGNPAUSE_SHIFT;
+	epon_epn_reg_writel(mode, EPN_DN_GNT_MISALIGN_PAUSE_REG, val);
+
+	/* grant interval */
+	val = epon_epn_reg_readl(mode, EPN_GNT_INTERVAL_REG);
+	val &= ~GNT_INTERVAL_GNTINTERVAL_MASK;
+	/* 1 second in 262us units */
+	val |= (1000000 / 262) << GNT_INTERVAL_GNTINTERVAL_SHIFT;
+	epon_epn_reg_writel(mode, EPN_GNT_INTERVAL_REG, val);
+
+	/* report byte length */
+	val = epon_epn_reg_readl(mode, EPN_REPORT_BYTE_LENGTH_REG);
+	val &= ~REPORT_BYTE_LENGTH_PRVRPTBYTELEN_MASK;
+	val |= 84 << REPORT_BYTE_LENGTH_PRVRPTBYTELEN_SHIFT;
+	epon_epn_reg_writel(mode, EPN_REPORT_BYTE_LENGTH_REG, val);
+
+	/* minimum grant setup set */
+	val = epon_epn_reg_readl(mode, EPN_MINIMUM_GRANT_SETUP_REG);
+	val &= ~MINIMUM_GRANT_SETUP_CFGMINGRANTSETUP_MASK;
+	val |= 0x64UL << MINIMUM_GRANT_SETUP_CFGMINGRANTSETUP_SHIFT;
+	epon_epn_reg_writel(mode, EPN_MINIMUM_GRANT_SETUP_REG, val);
+
+	/* spare register */
+	val = SPARE_CTL_ECOJIRA758ENABLE_MASK;
+	epon_epn_reg_writel(mode, EPN_SPARE_CTL_REG, val);
+
+	/* put all l1/l2 queue in reset*/
+	epon_epn_reg_writel(mode, EPN_RESET_L1_ACCUMULATOR_REG, 0xffffffffUL);
+	epon_epn_reg_writel(mode, EPN_RESET_L2_RPT_FIFO_REG, 0xffffffffUL);
+
+	/* disable grant processing on all LLIDs */
+	epon_epn_reg_writel(mode, EPN_ENABLE_GRANTS_REG, 0);
+
+	/* keep grant fifo in reset */
+	epon_epn_reg_writel(mode, EPN_RESET_GNT_FIFO_REG, 0xffffffffUL);
+
+	/* stop upstream processing */
+	epon_epn_reg_writel(mode, EPN_ENABLE_UPSTREAM_REG, 0);
+
+	/* monitor feedback */
+	for (i = 0; i < 100; i++) {
+		val = epon_epn_reg_readl(mode, EPN_ENABLE_UPSTREAM_FB_REG);
+		if (val == 0)
+			break;
+		udelay(5);
+	}
+
+	if (val)
+		netdev_err(mode->port->priv->netdev,
+			   "failed to disable upstream on all llid\n");
+
+	/* epn control 1 */
+	val = CONTROL_1_CFGSTALEGNTCHK_MASK |
+		CONTROL_1_DISABLEDISCSCALE_MASK |
+		CONTROL_1_CLRONRD_MASK |
+		0;
+	epon_epn_reg_writel(mode, EPN_CONTROL_1_REG, val);
+
+	/* setup discovery filter so we only accept discovery gates
+	 * for the correct upstream speed */
+	if (mode->down_speed == 10000) {
+		u16 mask, value;
+
+		/* default to not care */
+		mask = 0xffff;
+		value = get_disc_info_caps(mode, &mask);
+
+		val = (mask << DISCOVERY_FILTER_PRVDISCINFOMASK_SHIFT) |
+			(value << DISCOVERY_FILTER_PRVDISCINFOVALUE_SHIFT);
+		epon_epn_reg_writel(mode, EPN_DISCOVERY_FILTER_REG, val);
+	}
+
+	/* setup DA mac address for report frames, use standard mpcp
+	 * multicast */
+	epon_epn_reg_writel(mode, EPN_OLT_MAC_ADDR_HI_REG,
+			    extract_mac_addr_hi(mpcp_frame_da));
+	epon_epn_reg_writel(mode, EPN_OLT_MAC_ADDR_LO_REG,
+			    extract_mac_addr_lo(mpcp_frame_da));
+
+	/* setup our mac address on every LLID, this should be if we
+	 * do multiple TX LLID because they have to be different */
+	for (i = 0; i < 8; i++) {
+		epon_epn_reg_writel(mode,
+				    EPN_ONU_MAC_ADDRx_0_7_LO_REG(i),
+				    extract_mac_addr_lo(mode->mac_addr));
+		epon_epn_reg_writel(mode,
+				    EPN_ONU_MAC_ADDRx_0_7_HI_REG(i),
+				    extract_mac_addr_hi(mode->mac_addr));
+	}
+	for (i = 8; i < 32; i++) {
+		epon_epn_reg_writel(mode,
+				    EPN_ONU_MAC_ADDRx_8_31_LO_REG(i),
+				    extract_mac_addr_lo(mode->mac_addr));
+		epon_epn_reg_writel(mode,
+				    EPN_ONU_MAC_ADDRx_8_31_LO_REG(i),
+				    extract_mac_addr_hi(mode->mac_addr));
+	}
+
+	if (mode->up_speed == 1000 || mode->down_speed == 1000) {
+		if (lif_init(mode))
+			return;
+	}
+
+	if (mode->down_speed == 10000 || mode->up_speed == 10000) {
+		if (xif_init(mode))
+			return;
+	}
+
+	epon_hw_set_laser_normal_op(mode, true);
+
+	/* set default reporting mode using only one queue (set before
+	 * in control_0), clear all multi-prio bits */
+	val = epon_epn_reg_readl(mode, EPN_MULTI_PRI_CFG_0_REG);
+	val &= ~MULTI_PRI_CFG_0_CFGRPTMULTIPRI0_MASK;
+	val &= ~MULTI_PRI_CFG_0_CFGRPTSWAPQS0_MASK;
+	val &= ~MULTI_PRI_CFG_0_CFGRPTGNTSOUTST0_MASK;
+	val &= ~MULTI_PRI_CFG_0_CFGSHAREDL2_MASK;
+	val &= ~MULTI_PRI_CFG_0_CFGSHAREDBURSTCAP_MASK;
+	epon_epn_reg_writel(mode, EPN_MULTI_PRI_CFG_0_REG, val);
+
+	/* setup timer to trigger an interrupt if we don't get grants
+	 * on a llid, (unit is 262 us) */
+	epon_epn_reg_writel(mode,
+			    EPN_GNT_INTERVAL_REG,
+			    (10 * 1000 * 1000) / 262);
+
+	/* setup L2 sizes */
+	epn_init_l2_sizes(mode);
+
+	for (i = 0; i < 8; i++) {
+		/* same as typo in bcm code */
+		bcm_xrdp_api_pon_flow_id_set(mode->port->priv->xrdp,
+					     i, 1);
+	}
+
+	epon_hw_set_burst_cap(mode, DEFAULT_BURST_CAP);
+}
+
+/*
+ *
+ */
+static void mode_epon_mib_update(void *mode_priv)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(epon_mib_estat); i++) {
+		const struct bcm_runner_ethtool_stat *s;
+		u64 val;
+		bool incr;
+		char *p;
+
+		s = &epon_mib_estat[i];
+		val = 0;
+		incr = false;
+		switch (s->type) {
+		case EPON_STAT_XPCS_RX:
+			val = epon_xpcsrx_reg_readl(mode, s->reg);
+			incr = true;
+			break;
+		case EPON_STAT_XPCS_RX64:
+			val = epon_xpcsrx_reg_readl(mode, s->reg);
+			val |= (u64)epon_xpcsrx_reg_readl(mode,
+							  s->reg + 0x4) << 32;
+			incr = true;
+			break;
+		case EPON_STAT_XIF:
+			val = epon_xif_reg_readl(mode, s->reg);
+			incr = true;
+			break;
+		case EPON_STAT_LIF:
+			val = epon_lif_reg_readl(mode, s->reg);
+			incr = true;
+			break;
+		case EPON_STAT_EPN:
+			val = epon_epn_reg_readl(mode, s->reg);
+			incr = true;
+			break;
+		case EPON_STAT_EPN_L1_ACC:
+		{
+			u32 val32;
+			unsigned int l1 = s->reg;
+
+			val32 = (l1 << L1_ACCUMULATOR_SEL_CFGL1SUVASIZESEL_SHIFT) |
+				(l1 << L1_ACCUMULATOR_SEL_CFGL1SSVASIZESEL_SHIFT);
+			epon_epn_reg_writel(mode, EPN_L1_ACCUMULATOR_SEL_REG,
+					    val32);
+			val = epon_epn_reg_readl(mode, EPN_L1_SVA_BYTES_REG);
+			incr = false;
+			break;
+		}
+		case EPON_STAT_EPN_RAM:
+		{
+			unsigned int llid, port, stat, ram_offset;
+			u32 val32;
+
+			llid = (s->reg >> 24) & 0xff;
+			port = (s->reg >> 16) & 0xff;
+			stat = (s->reg) & 0xffff;
+
+			ram_offset = 0;
+			switch (port) {
+			case 0:
+				ram_offset = (llid * 21) + stat;
+				break;
+			case 1:
+				ram_offset = (llid * 17) + stat;
+				break;
+			}
+
+			epon_hw_data_port_read(mode, port, ram_offset, &val32);
+			val = val32;
+			incr = true;
+			break;
+		}
+		case EPON_STAT_LOCAL:
+			continue;
+		}
+
+		p = (char *)&mode->mib + s->offset;
+		if (incr)
+			*(u64 *)p += val;
+		else
+			*(u64 *)p = val;
+	}
+}
+
+/*
+ *
+ */
+static void *mode_epon_mib_get_data(void *mode_priv)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	return &mode->mib;
+}
+
+/*
+ *
+ */
+static void mode_epon_mtu_set(void *mode_priv, unsigned int size)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	epon_epn_reg_writel(mode, EPN_MAX_FRAME_SIZE_REG, size);
+}
+
+/*
+ *
+ */
+static void __epon_link_start(struct xport_epon_priv *mode,
+			      struct epon_link *link,
+			      bool start_tx)
+{
+	if (!link->rx_enabled) {
+		epon_hw_link_update_llid(mode, link->idx, link->llid);
+		link->rx_enabled = true;
+	}
+
+	if (start_tx) {
+		epon_hw_link_start_tx(mode, link->idx);
+		link->tx_enabled = true;
+	}
+}
+
+/*
+ *
+ */
+static void epon_link_update_llid(struct xport_epon_priv *mode,
+				  struct epon_link *link,
+				  unsigned int llid)
+{
+	link->llid = llid;
+	epon_hw_link_update_llid(mode, link->idx, link->llid);
+}
+
+/*
+ *
+ */
+static void epon_link_start_rx_only(struct xport_epon_priv *mode,
+				    struct epon_link *link,
+				    unsigned int llid)
+{
+	link->llid = llid;
+	__epon_link_start(mode, link, false);
+}
+
+/*
+ *
+ */
+static void epon_link_start_bidir(struct xport_epon_priv *mode,
+				  struct epon_link *link,
+				  unsigned int llid)
+{
+	link->llid = llid;
+	__epon_link_start(mode, link, true);
+}
+
+/*
+ *
+ */
+static void epon_link_pass_gates_frame(struct xport_epon_priv *mode,
+				       struct epon_link *link,
+				       bool pass)
+{
+	epon_hw_pass_gates_frames(mode, link->idx, pass);
+}
+
+
+/*
+ *
+ */
+static void epon_link_stop(struct xport_epon_priv *mode,
+			   struct epon_link *link)
+{
+	if (link->tx_enabled) {
+		epon_hw_link_stop_tx(mode, link->idx);
+		link->tx_enabled = false;
+	}
+
+	if (link->rx_enabled) {
+		epon_hw_link_remove_llid(mode, link->idx);
+		link->rx_enabled = false;
+	}
+
+	epon_link_pass_gates_frame(mode, link, false);
+}
+
+/*
+ *
+ */
+static struct epon_link *epon_link_find(struct xport_epon_priv *mode,
+					unsigned int llid)
+{
+	struct epon_link *link, *ret_link;
+
+	ret_link = NULL;
+	mutex_lock(&mode->links_lock);
+	list_for_each_entry(link, &mode->links_list, next) {
+		if (link->llid == llid) {
+			ret_link = link;
+			break;
+		}
+	}
+	mutex_unlock(&mode->links_lock);
+	return ret_link;
+}
+
+/*
+ *
+ */
+static struct epon_link *epon_link_alloc(struct xport_epon_priv *mode,
+					 int force_idx)
+{
+	struct epon_link *link = NULL;
+	unsigned int idx;
+
+	mutex_lock(&mode->links_lock);
+
+	if (!mode->links_free)
+		goto end;
+
+	/* choose llid idx */
+	if (force_idx != -1) {
+		if (!(mode->links_free & (1ULL << force_idx))) {
+			WARN(1, "forced llid %d is not free\n", force_idx);
+			goto end;
+		}
+		idx = force_idx;
+	} else
+		idx = ffs(mode->links_free) - 1;
+
+	link = kzalloc(sizeof (*link), GFP_KERNEL);
+	link->idx = idx;
+	mode->links_free &= ~(1ULL << idx);
+	list_add_tail(&link->next, &mode->links_list);
+
+end:
+	mutex_unlock(&mode->links_lock);
+	return link;
+}
+
+/*
+ *
+ */
+static void __epon_link_release(struct xport_epon_priv *mode,
+				struct epon_link *link)
+{
+	WARN_ON(link->tx_enabled || link->rx_enabled);
+	list_del(&link->next);
+	mode->links_free |= (1ULL << link->idx);
+	kfree(link);
+}
+
+/*
+ *
+ */
+static void epon_link_release(struct xport_epon_priv *mode,
+			      struct epon_link *link)
+{
+	mutex_lock(&mode->links_lock);
+	__epon_link_release(mode, link);
+	mutex_unlock(&mode->links_lock);
+}
+
+/*
+ *
+ */
+static void epon_schedule_reset_backoff(struct xport_epon_priv *mode)
+{
+	netdev_info(mode->port->priv->netdev,
+		    "scheduling reset in %u ms\n",
+		    mode->epon_reset_duration_ms);
+	bcm_enet_runner_schedule_reset(mode->port->priv,
+				       mode->epon_reset_duration_ms);
+	mode->epon_reset_duration_ms *= 2;
+	if (mode->epon_reset_duration_ms > 5000)
+		mode->epon_reset_duration_ms = 5000;
+}
+
+/*
+ *
+ */
+static int mpcp_rcv_handler(struct sk_buff *skb,
+			    struct net_device *dev,
+			    struct packet_type *pt,
+			    struct net_device *orig_dev)
+{
+	struct xport_epon_priv *mode = pt->af_packet_priv;
+	struct mpcp_hdr *hdr;
+	struct sk_buff *nskb = NULL;
+
+	/*
+	 * do nothing if until global link status is correct
+	 *
+	 * not necessary per-se, but this avoids processing frames
+	 * when the link just came up, or when it's down and we are
+	 * waiting for manager to reset
+	 */
+	if (mode->glob_link_state != EPON_GLINK_UP)
+		goto drop;
+
+	if (!pskb_may_pull(skb, sizeof (struct mpcp_hdr))) {
+		mode->mib.reg_mpcp_rx_invalid++;
+		goto drop;
+	}
+
+
+	hdr = (struct mpcp_hdr *)skb->data;
+	switch (be16_to_cpu(hdr->opcode)) {
+	case MPCP_OPCODE_GATE:
+	case MPCP_OPCODE_REGISTER:
+	case MPCP_OPCODE_REGISTER_ACK:
+		mode->mib.reg_mpcp_rx++;
+		break;
+	default:
+		mode->mib.reg_mpcp_rx_unk_opcode++;
+		goto drop;
+	}
+
+	switch (mode->reg_state) {
+	case EPON_REG_WAIT_DISCOVERY:
+	{
+		struct mpcp_disc_gate *disc;
+		struct mpcp_disc_gate10g *disc_10g;
+		struct mpcp_hdr *rhdr;
+		struct mpcp_register_req *reg_req;
+		size_t disc_size, reg_req_size, to_pad;
+		u32 now, disc_start, disc_remain, disc_end, reg_req_duration;
+		u32 sync_time, rnd_value, window_len;
+		u32 slot, slot_count, slot_skip;
+		u32 val;
+
+		if (be16_to_cpu(hdr->opcode) != MPCP_OPCODE_GATE)
+			goto drop;
+
+		if (mode->down_speed == 10000)
+			disc_size = sizeof (*disc_10g);
+		else
+			disc_size = sizeof (*disc);
+
+		if (!pskb_may_pull(skb, sizeof (*hdr) + disc_size)) {
+			mode->mib.reg_mpcp_rx_invalid++;
+			goto drop;
+		}
+
+		disc = (struct mpcp_disc_gate *)(hdr + 1);
+		if (!(disc->nb_grants_flags & MPCP_GATE_F_IS_DISC))
+			goto drop;
+
+		if (mode->down_speed == 1000) {
+			sync_time = be16_to_cpu(disc->sync_time);
+		} else {
+			u16 caps = get_disc_info_caps(mode, NULL);
+
+			disc_10g = (struct mpcp_disc_gate10g *)(disc);
+			if ((be16_to_cpu(disc_10g->disc_info) & caps) != caps) {
+				mode->mib.reg_mpcp_rx_disc_info_mismatch++;
+				goto drop;
+			}
+
+			sync_time = be16_to_cpu(disc_10g->sync_time);
+		}
+
+		mode->mib.reg_mpcp_rx_disc++;
+
+		/* if first frame, then just capture capture synctime */
+		if (!mode->reg_cfg.valid_sync_time ||
+		    mode->reg_cfg.sync_time != sync_time) {
+			mode->reg_cfg.valid_sync_time = true;
+			mode->reg_cfg.sync_time = sync_time;
+			epon_hw_set_disc_idle_time(mode,
+						   DEF_LASER_ON_TIME,
+						   mode->reg_cfg.sync_time,
+						   DEF_LASER_OFF_TIME);
+			goto drop;
+		}
+
+		rnd_value = get_random_u32();
+
+		/*
+		 * allocate reply skb now, do all costly operation
+		 * before calculating the remaining time
+		 */
+		reg_req_size = get_register_req_data_size(false);
+		nskb = dev_alloc_skb(reg_req_size);
+		if (!nskb) {
+			mode->mib.reg_mpcp_rx_other_err++;
+			goto drop;
+		}
+
+		skb_reset_network_header(nskb);
+		rhdr = skb_put(nskb, sizeof (*rhdr) + sizeof (*reg_req));
+		rhdr->opcode = cpu_to_be16(MPCP_OPCODE_REGISTER_REQ);
+		/* timestamp will be updated by hardware */
+		rhdr->timestamp = cpu_to_be32(0xdeadbeef);
+		reg_req = (struct mpcp_register_req *)(rhdr + 1);
+		reg_req->flags = MPCP_REGREQ_F_REGISTER;
+		reg_req->pending_grants = BCM_MAX_PENDING_GRANTS;
+		reg_req->disc_info = cpu_to_be16(get_disc_info_caps(mode,
+								    NULL));
+		reg_req->laser_on = DEF_LASER_ON_TIME;
+		reg_req->laser_off = DEF_LASER_OFF_TIME;
+		/* pad remaining bytes with zero */
+		to_pad = reg_req_size - ETH_HLEN - nskb->len;
+		memset(skb_put(nskb, to_pad), 0, to_pad);
+
+		nskb->dev = mode->port->priv->netdev;
+		nskb->protocol = htons(ETH_P_PAUSE);
+		dev_hard_header(nskb, nskb->dev, ETH_P_PAUSE,
+				mpcp_frame_da, NULL, 0);
+
+		/*
+		 * reply packet is ready, check time left we have to
+		 * reply
+		 */
+		now = epon_hw_get_local_mpcp_time(mode);
+		window_len = be16_to_cpu(disc->length);
+		disc_start = be32_to_cpu(disc->start_time);
+		disc_end = disc_start + window_len;
+		if (now >= disc_end) {
+			/* really late */
+			mode->mib.reg_mpcp_rx_disc_late++;
+			goto drop;
+		}
+
+		/* make sure we have time to send the frame before end
+		 * of discovery */
+		disc_remain = disc_end - now;
+		reg_req_duration = get_register_req_data_duration(mode) +
+			DEF_LASER_ON_TIME +
+			DEF_LASER_OFF_TIME +
+			mode->reg_cfg.sync_time;
+
+		/* if remaining time is too small (time for us to
+		 * schedule tx), dont even try */
+		if (disc_remain < reg_req_duration ||
+		    reg_req_duration - disc_remain < SOFTWARE_TX_LATENCY_TQ) {
+			mode->mib.reg_mpcp_rx_disc_late++;
+			goto drop;
+		}
+
+		/*
+		 * Take a random offset inside transmit inside window.
+		 *
+		 * Since everyone else has the same reg_req_duration
+		 * value, divide window into equal slots so we reduce
+		 * chance of colliding (of course, only works if
+		 * everyone does the same...)
+		 */
+		slot_count = window_len / reg_req_duration;
+		slot_skip = 0;
+
+		/* don't use slots that are already late */
+		if (now + SOFTWARE_TX_LATENCY_TQ > disc_start)
+			slot_skip = DIV_ROUND_UP(now + SOFTWARE_TX_LATENCY_TQ -
+						 disc_start,
+						 reg_req_duration);
+
+		slot = slot_skip + (rnd_value % (slot_count - slot_skip));
+		mode->mib.reg_mpcp_rx_disc_last_slot = slot;
+
+		/* program hardware to skip this number of tq before
+		 * sending register_req */
+		val = (slot * reg_req_duration) <<
+			DN_DISCOVERY_SEED_CFGDISCSEED_SHIFT;
+		epon_epn_reg_writel(mode, EPN_DN_DISCOVERY_SEED_REG, val);
+
+		/* finally send packet */
+		dev_queue_xmit(nskb);
+		nskb = NULL;
+		mode->mib.reg_mpcp_tx_reg_req++;
+
+#if 0
+		printk("DISC_WINDOW: [%u - %u], slot: count:%u "
+		       "skip:%d chosen:%d\n",
+		       be32_to_cpu(disc->start_time) - now,
+		       disc_remain,
+		       slot_count,
+		       slot_skip,
+		       slot);
+#endif
+		mode->reg_state = EPON_REG_WAIT_REGISTER;
+		mode->reg_state_last_change = jiffies;
+		break;
+	}
+
+	case EPON_REG_WAIT_REGISTER:
+	{
+		struct mpcp_register *reg;
+		struct mpcp_register10g *reg_10g;
+		struct mpcp_register_ack *reg_ack;
+		struct mpcp_hdr *rhdr;
+		size_t reg_size, reg_ack_size, to_pad;
+
+		if (be16_to_cpu(hdr->opcode) != MPCP_OPCODE_REGISTER)
+			goto drop;
+
+		if (mode->down_speed == 10000)
+			reg_size = sizeof (*reg_10g);
+		else
+			reg_size = sizeof (*reg);
+
+		if (!pskb_may_pull(skb, sizeof (*hdr) + reg_size)) {
+			mode->mib.reg_mpcp_rx_invalid++;
+			goto drop;
+		}
+
+		/* the frame is sent to us on the broadcast LLID, we
+		 * have to make sure it's addressed to us only */
+		if (skb->pkt_type != PACKET_HOST) {
+			mode->mib.reg_mpcp_rx_reg_for_other++;
+			goto drop;
+		}
+
+		reg = (struct mpcp_register *)(hdr + 1);
+		switch (reg->flags) {
+		case MPCP_REG_F_REREGISTER:
+		case MPCP_REG_F_DEREGISTER:
+			/* go back to previous state */
+			mode->reg_state = EPON_REG_WAIT_DISCOVERY;
+			mode->reg_state_last_change = jiffies;
+			mode->mib.reg_mpcp_rx_reg_dereg++;
+			goto drop;
+
+		case MPCP_REG_F_NACK:
+			/* go back to previous state, might backoff
+			 * here */
+			mode->reg_state = EPON_REG_WAIT_DISCOVERY;
+			mode->reg_state_last_change = jiffies;
+			mode->mib.reg_mpcp_rx_reg_nack++;
+			goto drop;
+
+		case MPCP_REG_F_ACK:
+			/* fallthrough */
+			break;
+
+		default:
+			mode->mib.reg_mpcp_rx_reg_unk_flag++;
+			goto drop;
+		}
+
+		/* registration success, update LLID with assigned one */
+		mode->reg_cfg.assigned_llid = be16_to_cpu(reg->assigned_port);
+
+		/* filter gates from now */
+		epon_link_pass_gates_frame(mode, mode->user_link, false);
+
+		/* now filter the new llid */
+		epon_link_update_llid(mode,
+				      mode->user_link,
+				      mode->reg_cfg.assigned_llid);
+
+		/* capture and apply the OLT parameters */
+		mode->reg_cfg.sync_time = be16_to_cpu(reg->sync_time);
+		if (mode->down_speed == 10000) {
+			reg_10g = (struct mpcp_register10g *)(reg);
+			mode->reg_cfg.laser_on_time = reg_10g->target_laser_on;
+			mode->reg_cfg.laser_off_time = reg_10g->target_laser_off;
+		} else {
+			mode->reg_cfg.laser_on_time = DEF_LASER_ON_TIME;
+			mode->reg_cfg.laser_off_time = DEF_LASER_OFF_TIME;
+		}
+
+		epon_hw_set_normal_idle_time(mode,
+					     mode->reg_cfg.laser_on_time,
+					     mode->reg_cfg.sync_time,
+					     mode->reg_cfg.laser_off_time);
+
+
+		/* send registration ACK */
+		reg_ack_size = max_t(size_t,
+				     ETH_HLEN +
+				     sizeof (struct mpcp_hdr) +
+				     sizeof (struct mpcp_register_ack),
+				     ETH_ZLEN);
+		nskb = dev_alloc_skb(reg_ack_size);
+		if (!nskb) {
+			mode->mib.reg_mpcp_rx_other_err++;
+			goto drop;
+		}
+
+		skb_reset_network_header(nskb);
+		rhdr = skb_put(nskb, sizeof (*rhdr) + sizeof (*reg_ack));
+		rhdr->opcode = cpu_to_be16(MPCP_OPCODE_REGISTER_ACK);
+		/* timestamp will be updated by hardware */
+		rhdr->timestamp = cpu_to_be32(0xdeadbeef);
+		reg_ack = (struct mpcp_register_ack *)(rhdr + 1);
+		reg_ack->flags = MPCP_REGACK_F_ACK;
+		reg_ack->echoed_assigned_port =
+			cpu_to_be16(mode->reg_cfg.assigned_llid);
+		reg_ack->echoed_sync_time =
+			cpu_to_be16(mode->reg_cfg.sync_time);
+		/* pad remaining bytes with zero */
+		to_pad = reg_ack_size - ETH_HLEN - nskb->len;
+		memset(skb_put(nskb, to_pad), 0, to_pad);
+
+		nskb->dev = mode->port->priv->netdev;
+		nskb->protocol = htons(ETH_P_PAUSE);
+		dev_hard_header(nskb, nskb->dev, ETH_P_PAUSE,
+				mpcp_frame_da, NULL, 0);
+
+		dev_queue_xmit(nskb);
+		nskb = NULL;
+		mode->mib.reg_mpcp_tx_reg_ack++;
+
+		/* clear some IRQs that link monitoring will be
+		 * checking from now */
+		epon_epn_reg_writel(mode,
+				    EPN_MAIN_INT_STATUS_REG,
+				    MAIN_INT_STATUS_INTDNOUTOFORDER_MASK |
+				    MAIN_INT_STATUS_INTDNTIMENOTINSYNC_MASK);
+		epon_epn_reg_writel(mode,
+				    EPN_GNT_INTV_INT_STATUS_REG,
+				    ~0);
+
+		netdev_info(mode->port->priv->netdev,
+			    "MPCP registration complete, llid 0x%04x\n",
+			 mode->reg_cfg.assigned_llid);
+		mode->reg_state = EPON_REG_COMPLETE;
+		mode->reg_state_last_change = jiffies;
+		break;
+	}
+
+	case EPON_REG_COMPLETE:
+	{
+		struct mpcp_register *reg;
+		struct mpcp_register10g *reg_10g;
+		size_t reg_size;
+
+		if (be16_to_cpu(hdr->opcode) != MPCP_OPCODE_REGISTER)
+			goto drop;
+
+		if (mode->down_speed == 10000)
+			reg_size = sizeof (*reg_10g);
+		else
+			reg_size = sizeof (*reg);
+
+		if (!pskb_may_pull(skb, sizeof (*hdr) + reg_size)) {
+			mode->mib.reg_mpcp_rx_invalid++;
+			goto drop;
+		}
+
+		if (skb->pkt_type != PACKET_HOST) {
+			mode->mib.reg_mpcp_rx_reg_for_other++;
+			goto drop;
+		}
+
+		reg = (struct mpcp_register *)(hdr + 1);
+		switch (reg->flags) {
+		case MPCP_REG_F_REREGISTER:
+		case MPCP_REG_F_DEREGISTER:
+			netdev_info(mode->port->priv->netdev,
+				    "received MPCP deregister\n");
+			mode->reg_state = EPON_REG_FAILED;
+			mode->reg_state_last_change = jiffies;
+			phylink_mac_change(mode->port->priv->phylink, false);
+			epon_schedule_reset_backoff(mode);
+			goto drop;
+
+		default:
+			goto drop;
+		}
+	}
+
+	case EPON_REG_FAILED:
+		goto drop;
+	}
+
+
+drop:
+	if (nskb)
+		kfree_skb(nskb);
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+/*
+ *
+ */
+static void glob_link_work_resched(struct xport_epon_priv *mode)
+{
+	queue_delayed_work(mode->epon_wq,
+			   &mode->glob_link_work, HZ / 5);
+}
+
+/*
+ *
+ */
+static struct {
+	u32		mask;
+	const char	*name;
+	bool		fatal;
+	bool		ignore_when_not_reg;
+} epn_irqs[] = {
+	{
+		MAIN_INT_STATUS_INTBBHUPFRABORT_MASK,
+		"BBHUPFRABORT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOL2SBURSTCAPOVERFLOWPRES_MASK,
+		"COL2SBURSTCAPOVERFLOWPRES",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOEMPTYRPT_MASK,
+		"COEMPTYRPT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCODRXERRABORTPRES_MASK,
+		"CODRXERRABORTPRES",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTL2SFIFOOVERRUN_MASK,
+		"L2SFIFOOVERRUN",
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTCO1588TSINT_MASK,
+		"CO1588TSINT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCODELSTALEGNT_MASK,
+		"CODELSTALEGNT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTNONPOLL_MASK,
+		"COGNTNONPOLL",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTMISALIGN_MASK,
+		"COGNTMISALIGN",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTTOOFAR_MASK,
+		"COGNTTOOFAR",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTINTERVAL_MASK,
+		"COGNTINTERVAL",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTMISSABORT_MASK,
+		"COGNTMISSABORT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTCOGNTFULLABORT_MASK,
+		"COGNTFULLABORT",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTBADUPFRLEN_MASK,
+		"BADUPFRLEN",
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTUPTARDYPACKET_MASK,
+		"UPTARDYPACKET",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTBIFIFOOVERRUN_MASK,
+		"BIFIFOOVERRUN",
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTDNSTATSOVERRUN_MASK,
+		"DNSTATSOVERRUN",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTUPSTATSOVERRUN_MASK,
+		"UPSTATSOVERRUN",
+		false,
+	},
+	{
+		MAIN_INT_STATUS_INTDNOUTOFORDER_MASK,
+		"DNOUTOFORDER",
+		false,
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTTRUANTBBHHALT_MASK,
+		"TRUANTBBHHALT",
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTCOBBHUPSFAULT_MASK,
+		"COBBHUPSFAULT",
+		true,
+	},
+	{
+		MAIN_INT_STATUS_INTDNTIMENOTINSYNC_MASK,
+		"DNTIMENOTINSYNC",
+		true,
+		true,
+	},
+};
+
+/*
+ *
+ */
+static bool epon_has_fatal_pending_irq(struct xport_epon_priv *mode)
+{
+	u32 val;
+	size_t i;
+	bool fatal;
+	bool reg_done;
+
+	/* make sure we still receive grant */
+	if (mode->reg_state == EPON_REG_COMPLETE) {
+		val = epon_epn_reg_readl(mode, EPN_GNT_INTV_INT_STATUS_REG);
+		if ((val & (1 << 0))) {
+			netdev_err(mode->port->priv->netdev,
+				   "not receiving any grant on user "
+				   "llid anymore\n");
+			return true;
+		}
+	}
+
+	val = epon_epn_reg_readl(mode, EPN_MAIN_INT_STATUS_REG);
+	if (!val)
+		return false;
+
+	reg_done = (mode->reg_state == EPON_REG_COMPLETE);
+	fatal = false;
+	for (i = 0; i < ARRAY_SIZE(epn_irqs); i++) {
+		if (!(epn_irqs[i].mask & val))
+			continue;
+
+		if (epn_irqs[i].ignore_when_not_reg && !reg_done)
+			continue;
+
+		netdev_err(mode->port->priv->netdev,
+			   "%slink IRQ pending: %s\n",
+			   epn_irqs[i].fatal ? "fatal " : "",
+			   epn_irqs[i].name);
+
+		fatal |= epn_irqs[i].fatal;
+	}
+
+	epon_epn_reg_writel(mode, EPN_MAIN_INT_STATUS_REG, val);
+	return fatal;
+}
+
+/*
+ *
+ */
+static void glob_link_work(struct work_struct *w)
+{
+	struct delayed_work *dwork = to_delayed_work(w);
+	struct xport_epon_priv *mode;
+	bool link;
+
+	mode = container_of(dwork, struct xport_epon_priv, glob_link_work);
+
+	/* check for laser monitoring error */
+	switch (mode->up_speed) {
+	case 1000:
+	{
+		u32 val = epon_lif_reg_readl(mode, LIF_INT_STATUS_REG);
+		if (val & LIF_INT_STATUS_LASERONMAX_MASK) {
+			netdev_err(mode->port->priv->netdev,
+				   "laser monitoring error\n");
+			mode->lasermon_event_count++;
+			epon_lif_reg_writel(mode,
+					    LIF_INT_STATUS_REG,
+					    LIF_INT_STATUS_LASERONMAX_MASK);
+		}
+		break;
+	}
+	case 10000:
+		/* FIXME implement */
+		break;
+	}
+
+
+	switch (mode->glob_link_state) {
+	case EPON_GLINK_DOWN:
+		link = !epon_hw_get_los(mode);
+		if (!link) {
+			glob_link_work_resched(mode);
+			return;
+		}
+
+		mode->glob_link_state = EPON_GLINK_UP;
+		phylink_mac_change(mode->port->priv->phylink, true);
+		glob_link_work_resched(mode);
+		break;
+
+	case EPON_GLINK_UP:
+		link = !epon_hw_get_los(mode);
+		if (!link) {
+			netdev_info(mode->port->priv->netdev,
+				    "PCS link down\n");
+			mode->glob_link_state = EPON_GLINK_FAILED;
+			phylink_mac_change(mode->port->priv->phylink, false);
+			bcm_enet_runner_schedule_reset(mode->port->priv, 0);
+			return;
+		}
+
+		if (epon_has_fatal_pending_irq(mode)) {
+			netdev_err(mode->port->priv->netdev,
+				   "fatal IRQ, will reset\n");
+			mode->glob_link_state = EPON_GLINK_FAILED;
+			phylink_mac_change(mode->port->priv->phylink, false);
+			epon_schedule_reset_backoff(mode);
+			return;
+		}
+
+		if (mode->reg_state == EPON_REG_WAIT_REGISTER &&
+		    time_after(jiffies,
+			       mode->reg_state_last_change + HZ * 5)) {
+			mode->reg_state = EPON_REG_WAIT_DISCOVERY;
+			mode->reg_state_last_change = jiffies;
+			mode->mib.reg_mpcp_rx_reg_timeout++;
+			if (mode->mib.reg_mpcp_rx_reg_timeout < 5)
+				netdev_info(mode->port->priv->netdev,
+					    "restart stalled registration\n");
+			else {
+				netdev_err(mode->port->priv->netdev,
+					   "registration stalled for too "
+					   "long, hard reset\n");
+				epon_schedule_reset_backoff(mode);
+				return;
+			}
+		}
+
+		glob_link_work_resched(mode);
+		break;
+
+	case EPON_GLINK_FAILED:
+		break;
+	}
+}
+
+/*
+ *
+ */
+static int __epon_start(struct xport_epon_priv *mode,
+			unsigned int down_speed, unsigned int up_speed)
+{
+	xport_switch_pinctrl(mode->port, XPORT_PIN_ROGUE1);
+
+	switch (down_speed) {
+	case 10000:
+		break;
+	default:
+		BUG();
+		return -ENOTSUPP;
+	}
+
+	switch (up_speed) {
+	case 1000:
+		break;
+	default:
+		BUG();
+		return -ENOTSUPP;
+	}
+
+	BUG_ON(mode->glob_link_state != EPON_GLINK_DOWN);
+	BUG_ON(mode->reg_state != EPON_REG_WAIT_DISCOVERY);
+	mode->reg_state_last_change = jiffies;
+	mode->user_cfg.burst_cap = DEFAULT_BURST_CAP;
+
+	mode->down_speed = down_speed;
+	mode->up_speed = up_speed;
+	mode->start_count++;
+
+	/* create static links we need */
+	mode->user_link = epon_link_alloc(mode, USER_LLID_IDX);
+	if (!mode->user_link)
+		return 1;
+
+	mode->bcast_link = epon_link_alloc(mode, BROADCAST_LLID_IDX);
+	if (!mode->bcast_link) {
+		epon_link_release(mode, mode->bcast_link);
+		mode->bcast_link = NULL;
+		return 1;
+	}
+
+	/* low level hardware init */
+	xport_serdes_set_params(mode->port, &serdes_params_10g_1g,
+				&serdes_m3params_10g_1g);
+	epon_hw_init(mode);
+
+	/* start link work & enable registration */
+	epon_link_start_rx_only(mode,
+				mode->bcast_link,
+				get_broadcast_llid(mode));
+	epon_link_start_bidir(mode,
+			      mode->user_link,
+			      get_broadcast_llid(mode));
+	epon_link_pass_gates_frame(mode,
+				   mode->user_link,
+				   true);
+
+	dev_add_pack(&mode->reg_tap);
+	glob_link_work_resched(mode);
+	return 0;
+}
+
+/*
+ *
+ */
+static void __epon_stop(struct xport_epon_priv *mode)
+{
+	struct epon_link *link;
+
+	/* stop */
+	cancel_delayed_work_sync(&mode->glob_link_work);
+	dev_remove_pack(&mode->reg_tap);
+	flush_workqueue(mode->epon_wq);
+
+	/* make sure carrier is off */
+	phylink_mac_change(mode->port->priv->phylink, false);
+	netif_carrier_off(mode->port->priv->netdev);
+
+	/* try to avoid any spurious TX while we reset */
+	epon_hw_set_laser_normal_op(mode, false);
+
+	/* NOTE: netdevice queues are stopped */
+	bcm_runner_fw_stop_tx(mode->port->priv);
+
+	/* nothing is touching hardware now, stop all links */
+	mutex_lock(&mode->links_lock);
+	list_for_each_entry(link, &mode->links_list, next)
+		epon_link_stop(mode, link);
+	mutex_unlock(&mode->links_lock);
+
+	if (!bcm_runner_fw_tx_is_stopped(mode->port->priv))
+		netdev_err(mode->port->priv->netdev,
+			   "failed to stop TX runner DMA\n");
+
+	/*
+	 * reset state
+	 */
+	mode->glob_link_state = EPON_GLINK_DOWN;
+	mode->reg_state = EPON_REG_WAIT_DISCOVERY;
+	memset(&mode->reg_cfg, 0, sizeof (mode->reg_cfg));
+	memset(&mode->user_cfg, 0, sizeof (mode->user_cfg));
+
+	epon_link_release(mode, mode->bcast_link);
+	mode->bcast_link = NULL;
+	epon_link_release(mode, mode->user_link);
+	mode->user_link = NULL;
+	mode->down_speed = 0;
+	mode->up_speed = 0;
+}
+
+/*
+ *
+ */
+static int __epon_add_mcast_llid(struct xport_epon_priv *mode,
+				 unsigned int llid)
+{
+	struct epon_link *link;
+
+	if (epon_link_find(mode, llid))
+		return -EEXIST;
+
+	link = epon_link_alloc(mode, -1);
+	if (!link)
+		return -ENOSPC;
+
+	epon_link_start_rx_only(mode, link, llid);
+	mode->links_mcast |= (1 << link->idx);
+	return 0;
+}
+
+/*
+ *
+ */
+static int __epon_del_mcast_llid(struct xport_epon_priv *mode,
+				 unsigned int llid)
+{
+	struct epon_link *link;
+
+	link = epon_link_find(mode, llid);
+	if (!link)
+		return -ENOENT;
+
+	epon_link_stop(mode, link);
+	epon_link_release(mode, link);
+	mode->links_mcast &= ~(1 << link->idx);
+	return 0;
+}
+
+/*
+ *
+ */
+static void __epon_del_all_mcast_llid(struct xport_epon_priv *mode)
+{
+	struct epon_link *link, *tmp;
+
+	mutex_lock(&mode->links_lock);
+	list_for_each_entry_safe(link, tmp, &mode->links_list, next) {
+		if ((1 << link->idx) & mode->links_mcast) {
+			epon_link_stop(mode, link);
+			__epon_link_release(mode, link);
+		}
+	}
+	mutex_unlock(&mode->links_lock);
+	mode->links_mcast = 0;
+}
+
+/*
+ *
+ */
+static int epon_start(struct xport_epon_priv *mode,
+		      unsigned int down_speed, unsigned int up_speed)
+
+{
+	int ret;
+
+	bcm_enet_runner_unschedule_reset(mode->port->priv);
+
+	mutex_lock(&mode->epon_lock);
+	if (WARN_ON(mode->epon_started)) {
+		netdev_err(mode->port->priv->netdev,
+			   "tried to start epon twice");
+		mutex_unlock(&mode->epon_lock);
+		return -EBUSY;
+	}
+
+	ret = __epon_start(mode, down_speed, up_speed);
+	if (!ret)
+		mode->epon_started = true;
+	mutex_unlock(&mode->epon_lock);
+
+	return ret;
+}
+
+/*
+ *
+ */
+static void epon_stop(struct xport_epon_priv *mode)
+{
+	mutex_lock(&mode->epon_lock);
+	if (mode->epon_started) {
+		__epon_stop(mode);
+		mode->epon_started = false;
+	}
+	memset(&mode->mib, 0, sizeof (mode->mib));
+	__epon_del_all_mcast_llid(mode);
+	mutex_unlock(&mode->epon_lock);
+	bcm_enet_runner_unschedule_reset(mode->port->priv);
+}
+
+/*
+ *
+ */
+static int mode_get_epon_param(void *mode_priv,
+			       struct ethtool_epon_param *param)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	bool last_encrypted;
+	unsigned int last_encrypted_key_id;
+
+	memset(param, 0, sizeof (*param));
+	param->change_count = mode->start_count;
+	param->lasermon_event_count = mode->lasermon_event_count;
+
+	if (!mode->epon_started)
+		return 0;
+
+	mutex_lock(&mode->epon_lock);
+	param->burst_cap = mode->user_cfg.burst_cap;
+	param->discovery_rx = (mode->mib.reg_mpcp_rx_disc > 0);
+	param->registered = (mode->reg_state == EPON_REG_COMPLETE);
+	if (!param->registered) {
+		mutex_unlock(&mode->epon_lock);
+		return 0;
+	}
+
+	param->llid = mode->reg_cfg.assigned_llid;
+	param->down_encrypt = mode->user_cfg.down_enc_enabled;
+
+	memcpy(param->key_sci, mode->user_cfg.key_sci,
+	       sizeof (param->key_sci));
+	memcpy(param->down_key0, mode->user_cfg.down_key0,
+	       sizeof (param->down_key0));
+	memcpy(param->down_key1, mode->user_cfg.down_key1,
+	       sizeof (param->down_key1));
+
+	epon_hw_get_llid_last_rx_encrypt(mode,
+					 mode->user_link->idx,
+					 &last_encrypted,
+					 &last_encrypted_key_id);
+	param->down_last_rx_encrypted = last_encrypted;
+	param->down_last_rx_key_id = last_encrypted_key_id;
+
+	mutex_unlock(&mode->epon_lock);
+	return 0;
+}
+
+/*
+ *
+ */
+static int mode_set_epon_param(void *mode_priv,
+			       const struct ethtool_epon_param *param)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	int ret;
+
+	if (!mode->epon_started)
+		return -ENETDOWN;
+
+	switch (param->cmd) {
+	case ETHTOOL_SEPON_KEYS:
+	{
+		u8 *d;
+		const u8 *s;
+
+		if (param->keys_update_id == 0) {
+			s = param->down_key0;
+			d = mode->user_cfg.down_key0;
+		} else if (param->keys_update_id == 1) {
+			s = param->down_key1;
+			d = mode->user_cfg.down_key1;
+		} else
+			return -EINVAL;
+
+		memcpy(d, s, sizeof (mode->user_cfg.down_key0));
+		memcpy(mode->user_cfg.key_sci, param->key_sci,
+		       sizeof (mode->user_cfg.key_sci));
+
+		mutex_lock(&mode->epon_lock);
+		epon_hw_set_down_key(mode,
+				     mode->user_link->idx,
+				     param->keys_update_id,
+				     s,
+				     param->key_sci);
+		mutex_unlock(&mode->epon_lock);
+		break;
+	}
+	case ETHTOOL_SEPON_ENCRYPT:
+		mode->user_cfg.down_enc_enabled = param->down_encrypt;
+		break;
+	case ETHTOOL_SEPON_BURST:
+		mode->user_cfg.burst_cap = param->burst_cap;
+		mutex_lock(&mode->epon_lock);
+		epon_hw_set_burst_cap(mode, mode->user_cfg.burst_cap);
+		mutex_unlock(&mode->epon_lock);
+		break;
+	case ETHTOOL_SEPON_RESTART:
+		netdev_info(mode->port->priv->netdev,
+			    "restart link request from userspace\n");
+		bcm_enet_runner_schedule_reset(mode->port->priv, 0);
+		break;
+	case ETHTOOL_SEPON_ADD_MCLLID:
+		mutex_lock(&mode->epon_lock);
+		ret = __epon_add_mcast_llid(mode, param->mcast_llid);
+		mutex_unlock(&mode->epon_lock);
+		return ret;
+	case ETHTOOL_SEPON_DEL_MCLLID:
+		mutex_lock(&mode->epon_lock);
+		ret = __epon_del_mcast_llid(mode, param->mcast_llid);
+		mutex_unlock(&mode->epon_lock);
+		return ret;
+	case ETHTOOL_SEPON_CLR_MCLLID:
+		mutex_lock(&mode->epon_lock);
+		__epon_del_all_mcast_llid(mode);
+		mutex_unlock(&mode->epon_lock);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/*
+ *
+ */
+static int mode_phylink_pcs_config(void *mode_priv,
+				   unsigned int pl_mode,
+				   phy_interface_t interface,
+				   const unsigned long *advertising)
+{
+	struct xport_epon_priv *mode = mode_priv;
+
+	switch (interface) {
+	case PHY_INTERFACE_MODE_10000_1000_BASEPRX_U:
+		epon_start(mode, 10000, 1000);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ * called for each packet during netdevice xmit
+ */
+static bool mode_epon_can_send(void *mode_priv, unsigned int protocol)
+{
+	struct xport_epon_priv *mode = mode_priv;
+
+	switch (protocol) {
+	case ETH_P_PAUSE:
+		return true;
+	default:
+		return mode->reg_state == EPON_REG_COMPLETE;
+	}
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_up(void *mode_priv,
+				 unsigned int pl_mode,
+				 phy_interface_t interface,
+				 int speed, int duplex,
+				 struct phy_device *phy)
+{
+	/* not used */
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_down(void *mode_priv,
+				   unsigned int pl_mode,
+				   phy_interface_t interface)
+{
+	/* not used */
+}
+
+/*
+ *
+ */
+static void mode_phylink_pcs_an_restart(void *mode_priv)
+{
+	/* not supported */
+}
+
+/*
+ *
+ */
+static int mode_phylink_pcs_get_state(void *mode_priv,
+				      struct phylink_link_state *state)
+{
+	struct xport_epon_priv *mode = mode_priv;
+
+	/* fake link down after failed registration */
+	state->link = ((mode->glob_link_state == EPON_GLINK_UP) &&
+		       (mode->reg_state != EPON_REG_FAILED));
+	if (state->link) {
+		state->an_complete = 1;
+		state->speed = 10000;
+		state->duplex = 1;
+	}
+
+	return 0;
+}
+
+/*
+ * called when netdevice is started or after phylink interface change
+ */
+static void mode_epon_stop(void *mode_priv)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	epon_stop(mode);
+}
+
+/*
+ * called when netdevice is started or after phylink interface change
+ */
+static void *mode_epon_init(void *port_priv,
+			    const struct bcm_xrdp_enet_params *params)
+{
+	struct xport_priv *port = port_priv;
+	struct xport_epon_priv *mode;
+	char name[64];
+	size_t i;
+
+	mode = kzalloc(sizeof (*mode), GFP_KERNEL);
+	if (!mode)
+		return ERR_PTR(-ENOMEM);
+
+	mode->port = port;
+	INIT_DELAYED_WORK(&mode->glob_link_work, glob_link_work);
+	mutex_init(&mode->links_lock);
+	INIT_LIST_HEAD(&mode->links_list);
+	mode->links_free = (1 << USER_LLID_IDX) | (1 << BROADCAST_LLID_IDX);
+	/* ethtool mib only reports llid 24 to 30 rx bytes, don't use
+	 * other llid */
+	for (i = 24; i < 31; i++)
+		mode->links_free |= (1 << i);
+
+	mutex_init(&mode->epon_lock);
+	mode->epon_reset_duration_ms = 100;
+
+	/* capture mac address */
+	memcpy(mode->mac_addr,
+	       mode->port->priv->netdev->dev_addr,
+	       ETH_ALEN);
+
+	/* set default config */
+	mode->laser_active_hi = true;
+
+	scnprintf(name, sizeof (name), "%s-epon", port->priv->netdev->name);
+	mode->epon_wq = create_singlethread_workqueue(name);
+	if (!mode->epon_wq) {
+		kfree(mode);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* setup MPCP TAP */
+	mode->reg_tap.type = cpu_to_be16(ETH_P_PAUSE);
+	mode->reg_tap.func = mpcp_rcv_handler;
+	mode->reg_tap.af_packet_priv = mode;
+	mode->reg_tap.ignore_outgoing = true;
+	mode->reg_tap.dev = port->priv->netdev;
+
+	mode_epon_dbg_init(mode);
+	return mode;
+}
+
+/*
+ * called when netdevice is stopped or after phylink interface change
+ */
+static void mode_epon_release(void *mode_priv)
+{
+	struct xport_epon_priv *mode = mode_priv;
+	mode_epon_dbg_release(mode);
+	destroy_workqueue(mode->epon_wq);
+	kfree(mode);
+}
+
+/*
+ *
+ */
+static u32 mode_get_bbh_id(void *port_priv)
+{
+	struct xport_priv *port = port_priv;
+	return port->pon_bbh_id;
+}
+
+struct oam_event_notif_pdu {
+	u16	sequence;
+	u8	tlv_type;
+	u8	len;
+	u8	oui[3];
+	u8	code;
+	u8	raised;
+	u16	object_type;
+	u16	object_instance;
+} __attribute__((packed));
+
+/*
+ *
+ */
+static int
+mode_epon_dgasp_gen_data(void *mode_priv, u8 *buf, size_t buf_len)
+{
+	const u8 dpoe_oui[3] = { 0x00, 0x10, 0x00 };
+	struct xport_epon_priv *mode = mode_priv;
+	struct ethhdr *eth;
+	struct oam_event_notif_pdu *pdu;
+	u8 *p;
+
+	if (buf_len < sizeof (struct ethhdr) + 4 + sizeof (*pdu))
+		return -EINVAL;
+
+	p = buf;
+	eth = (struct ethhdr *)p;
+	memcpy(eth->h_dest, "\x01\x80\xc2\x00\x00\x02", 6);
+	memcpy(eth->h_source, mode->mac_addr, 6);
+	eth->h_proto = cpu_to_be16(ETH_P_SLOW);
+	p += sizeof (*eth);
+
+	/* SLOW_PROTO_SUBTYPE_OAM */
+	*p++ = 0x3;
+
+	/* OAM proto flags (16 bits), local+remote stable + dgasp */
+	*p++ = 0x00;
+	*p++ = 0x52;
+
+	 /* Event Notification PDU */
+	*p++ = 0x01;
+
+	pdu = (struct oam_event_notif_pdu *)p;
+	pdu->sequence = 0;
+	pdu->tlv_type = 0xfe; /* org specific */
+	pdu->len = sizeof (*pdu);
+	memcpy(pdu->oui, &dpoe_oui, 3);
+	pdu->code = 0x41; /* power failure */
+	pdu->raised = 1;
+	pdu->object_type = 0;
+	pdu->object_instance = 0;
+	p += sizeof (*pdu);
+
+	return p - buf;
+}
+
+const struct bcm_enet_mode_ops xport_epon_mode_ops = {
+	.name			= "EPON",
+
+	.init			= mode_epon_init,
+	.release		= mode_epon_release,
+	.can_send		= mode_epon_can_send,
+
+	.stop			= mode_epon_stop,
+	.get_bbh_id		= mode_get_bbh_id,
+	.mtu_set		= mode_epon_mtu_set,
+
+	.dgasp_supported	= true,
+	.dgasp_gen_data		= mode_epon_dgasp_gen_data,
+
+	/* ethtool operations*/
+	.get_epon_param		= mode_get_epon_param,
+	.set_epon_param		= mode_set_epon_param,
+
+	/* mib operation */
+	.mib_estat		= epon_mib_estat,
+	.mib_estat_count	= ARRAY_SIZE(epon_mib_estat),
+	.mib_update		= mode_epon_mib_update,
+	.mib_get_data		= mode_epon_mib_get_data,
+
+	/*
+	 * phylink callback
+	 */
+	.phylink_link_down	= mode_phylink_link_down,
+	.phylink_link_up	= mode_phylink_link_up,
+	.phylink_pcs_config	= mode_phylink_pcs_config,
+	.phylink_pcs_get_state	= mode_phylink_pcs_get_state,
+	.phylink_pcs_an_restart	= mode_phylink_pcs_an_restart,
+};
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon.h	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,490 @@
+#ifndef PORT_XPORT_EPON_H_
+#define PORT_XPORT_EPON_H_
+
+#include <linux/list.h>
+
+#include "port_xport.h"
+
+#include "regs/epon_epon_top.h"
+#include "regs/epon_epn.h"
+#include "regs/epon_epn_onu_mac_addr.h"
+#include "regs/epon_epn_tx_l1s_shp.h"
+#include "regs/epon_epon_top.h"
+#include "regs/epon_lif.h"
+#include "regs/epon_nco_addr.h"
+#include "regs/epon_xif.h"
+#include "regs/epon_xpcsrx.h"
+#include "regs/epon_xpcstx.h"
+
+/*
+ * EPON definitions
+ */
+#define BROADCAST_LLID_1G		0x7fff
+#define BROADCAST_LLID_10G		0x7ffe
+
+#define NSEC_TO_TQ(x)			((x) / 16)
+#define USEC_TO_TQ(x)			(((x) * 1000) / 16)
+#define TQ_TO_NSEC(x)			((x) * 16)
+#define BYTES_TO_TQ_1G(x)		((x) / 2)
+#define PREAMBLE_LEN_BYTES		8
+#define IPG_BYTES_1G			12
+
+/* default laser on/off time per specification */
+#define DEF_LASER_ON_TIME		32
+#define DEF_LASER_OFF_TIME		32
+
+/*
+ * MPCP protocol
+ */
+enum {
+	MPCP_OPCODE_PAUSE		= 1,
+	MPCP_OPCODE_GATE		= 2,
+	MPCP_OPCODE_REPORT		= 3,
+	MPCP_OPCODE_REGISTER_REQ	= 4,
+	MPCP_OPCODE_REGISTER		= 5,
+	MPCP_OPCODE_REGISTER_ACK	= 6,
+};
+
+struct mpcp_hdr {
+	__be16	opcode;
+	__be32	timestamp;
+} __packed;
+
+struct mpcp_grant_hdr {
+	__be32			start_time;
+	__be16			len;
+} __packed;
+
+struct mpcp_gate {
+	u8			nb_grants_flags;
+	struct mpcp_grant_hdr	grants[0];
+} __packed;
+
+#define MPCP_GATE_F_IS_DISC	(1 << 3)
+
+struct mpcp_disc_gate {
+	u8			nb_grants_flags;
+	__be32			start_time;
+	__be16			length;
+	__be16			sync_time;
+} __packed;
+
+enum {
+	MPCP_DISCINFO_1G_CAP		= (1 << 0),
+	MPCP_DISCINFO_10G_CAP		= (1 << 1),
+
+	MPCP_DISCINFO_1G_WINDOW		= (1 << 4),
+	MPCP_DISCINFO_10G_WINDOW	= (1 << 5),
+};
+
+struct mpcp_disc_gate10g {
+	u8			nb_grants_flags;
+	__be32			start_time;
+	__be16			length;
+	__be16			sync_time;
+	__be16			disc_info;
+} __packed;
+
+enum {
+	MPCP_REGREQ_F_REGISTER		= 1,
+	MPCP_REGREQ_F_DEREGISTER	= 3,
+};
+
+struct mpcp_register_req {
+	u8			flags;
+	u8			pending_grants;
+	/* remaining fields only in 10G, but since framed is zero
+	 * padded, it does not matter if they are present or not, a 1G
+	 * OLT won't make a difference between those and padding */
+	__be16			disc_info;
+	u8			laser_on;
+	u8			laser_off;
+} __packed;
+
+
+enum {
+	MPCP_REG_F_REREGISTER	= 1,
+	MPCP_REG_F_DEREGISTER	= 2,
+	MPCP_REG_F_ACK		= 3,
+	MPCP_REG_F_NACK		= 4,
+};
+
+struct mpcp_register {
+	__be16			assigned_port;
+	u8			flags;
+	__be16			sync_time;
+	u8			echoed_pending_grants;
+} __packed;
+
+struct mpcp_register10g {
+	__be16			assigned_port;
+	u8			flags;
+	__be16			sync_time;
+	u8			echoed_pending_grants;
+	u8			target_laser_on;
+	u8			target_laser_off;
+} __packed;
+
+
+enum {
+	MPCP_REGACK_F_NACK	= 0,
+	MPCP_REGACK_F_ACK	= 1,
+};
+
+struct mpcp_register_ack {
+	u8			flags;
+	__be16			echoed_assigned_port;
+	__be16			echoed_sync_time;
+} __packed;
+
+
+/*
+ * eponmac MIB
+ */
+struct eponmac_mib {
+	/*
+	 * XPCS RX
+	 */
+	u64	xpcs_rx_framer_misbrst;
+	u64	xpcs_rx_framer_bd_err;
+	u64	xpcs_rx_64b66b_ipg_det;
+	u64	xpcs_rx_fec_nque_in;
+	u64	xpcs_rx_fec_nque_out;
+	u64	xpcs_rx_idle_start;
+	u64	xpcs_rx_idle_stop;
+	u64	xpcs_rx_fec_cw_fail;
+	u64	xpcs_rx_fec_cw_tot;
+	u64	xpcs_rx_fec_correct;
+	u64	xpcs_rx_fec_ones_cor;
+	u64	xpcs_rx_fec_zeros_cor;
+	u64	xpcs_rx_64b66b_fail;
+	u64	xpcs_rx_frmr_bad_sh;
+	u64	xpcs_rx_psudo;
+	u64	xpcs_rx_prbs;
+	u64	xpcs_rx_64b66b_start;
+	u64	xpcs_rx_idle_good_pkt;
+	u64	xpcs_rx_idle_err_pkt;
+	u64	xpcs_rx_64b66b_stop;
+
+	/*
+	 * XIF
+	 */
+	u64	xif_pmc_frame_rx;
+	u64	xif_pmc_byte_rx;
+	u64	xif_pmc_runt_rx;
+	u64	xif_pmc_cw_err_rx;
+	u64	xif_pmc_crc8_err_rx;
+	u64	xif_xpn_data_frm;
+	u64	xif_xpn_data_byte;
+	u64	xif_xpn_mpcp_frm;
+	u64	xif_xpn_oam_frm;
+	u64	xif_xpn_oam_byte;
+	u64	xif_xpn_oversize_frm;
+	u64	xif_sec_abort_frm;
+	u64	xif_pmc_tx_neg_event;
+	u64	xif_xpn_idle_pkt;
+
+	/*
+	 * LIF
+	 */
+	u64	lif_rx_line_code_err_cnt;
+	u64	lif_rx_agg_mpcp_frm;
+	u64	lif_rx_agg_good_frm;
+	u64	lif_rx_agg_good_byte;
+	u64	lif_rx_agg_undersz_frm;
+	u64	lif_rx_agg_oversz_frm;
+	u64	lif_rx_agg_crc8_frm;
+	u64	lif_rx_agg_fec_frm;
+	u64	lif_rx_agg_fec_byte;
+	u64	lif_rx_agg_fec_exc_err_frm;
+	u64	lif_rx_agg_nonfec_good_frm;
+	u64	lif_rx_agg_nonfec_good_byte;
+	u64	lif_rx_agg_err_bytes;
+	u64	lif_rx_agg_err_zeroes;
+	u64	lif_rx_agg_no_err_blks;
+	u64	lif_rx_agg_cor_blks;
+	u64	lif_rx_agg_uncor_blks;
+	u64	lif_rx_agg_err_ones;
+	u64	lif_rx_agg_err_frm;
+	u64	lif_tx_pkt_cnt;
+	u64	lif_tx_byte_cnt;
+	u64	lif_tx_non_fec_pkt_cnt;
+	u64	lif_tx_non_fec_byte_cnt;
+	u64	lif_tx_fec_pkt_cnt;
+	u64	lif_tx_fec_byte_cnt;
+	u64	lif_tx_fec_blk_cnt;
+	u64	lif_tx_mpcp_pkt_cnt;
+
+	/*
+	 * EPN RAM
+	 */
+	u64	epn00_rx_bytes;
+	u64	epn00_rx_fcs;
+	u64	epn00_rx_oam;
+	u64	epn00_rx_gate;
+	u64	epn00_rx_64;
+	u64	epn00_rx_65_127;
+	u64	epn00_rx_128_255;
+	u64	epn00_rx_256_511;
+	u64	epn00_rx_512_1023;
+	u64	epn00_rx_1024_1518;
+	u64	epn00_rx_1519_2047;
+	u64	epn00_rx_2048_4095;
+	u64	epn00_rx_4096_9216;
+	u64	epn00_rx_gt_9216;
+	u64	epn00_rx_oversize;
+	u64	epn00_rx_bcast;
+	u64	epn00_rx_mcast;
+	u64	epn00_rx_unicast;
+	u64	epn00_rx_undersized;
+	u64	epn00_rx_oam_bytes;
+	u64	epn00_rx_register;
+	u64	epn00_tx_bytes;
+	u64	epn00_tx_oam;
+	u64	epn00_tx_report;
+	u64	epn00_tx_64;
+	u64	epn00_tx_65_127;
+	u64	epn00_tx_128_255;
+	u64	epn00_tx_256_511;
+	u64	epn00_tx_512_1023;
+	u64	epn00_tx_1024_1518;
+	u64	epn00_tx_1519_2047;
+	u64	epn00_tx_2048_4095;
+	u64	epn00_tx_4096_9216;
+	u64	epn00_tx_gt_9216;
+	u64	epn00_tx_oam_bytes;
+	u64	epn00_tx_bcast;
+	u64	epn00_tx_mcast;
+	u64	epn00_tx_unicast;
+
+	/* only used for mcast/bcast */
+	u64	epn24_rx_bytes;
+	u64	epn24_rx_fcs;
+	u64	epn24_rx_bcast;
+	u64	epn24_rx_mcast;
+	u64	epn25_rx_bytes;
+	u64	epn25_rx_fcs;
+	u64	epn25_rx_bcast;
+	u64	epn25_rx_mcast;
+	u64	epn26_rx_bytes;
+	u64	epn26_rx_fcs;
+	u64	epn26_rx_bcast;
+	u64	epn26_rx_mcast;
+	u64	epn27_rx_bytes;
+	u64	epn27_rx_fcs;
+	u64	epn27_rx_bcast;
+	u64	epn27_rx_mcast;
+	u64	epn28_rx_bytes;
+	u64	epn28_rx_fcs;
+	u64	epn28_rx_bcast;
+	u64	epn28_rx_mcast;
+	u64	epn29_rx_bytes;
+	u64	epn29_rx_fcs;
+	u64	epn29_rx_bcast;
+	u64	epn29_rx_mcast;
+	u64	epn30_rx_bytes;
+	u64	epn30_rx_fcs;
+	u64	epn30_rx_bcast;
+	u64	epn30_rx_mcast;
+	u64	epn31_rx_bytes;
+	u64	epn31_rx_fcs;
+	u64	epn31_rx_bcast;
+	u64	epn31_rx_mcast;
+
+	/*
+	 * EPN reg
+	 */
+	u64	epn00_l1_acc_bytes;
+	u64	epn00_unused_tq;
+	u64	epn_unmap_big;
+	u64	epn_unmap_frame;
+	u64	epn_unmap_fcs;
+	u64	epn_unmap_gate;
+	u64	epn_unmap_oam;
+	u64	epn_unmap_small;
+
+	/*
+	 * registration
+	 */
+	u64	reg_mpcp_rx;
+	u64	reg_mpcp_rx_invalid;
+	u64	reg_mpcp_rx_unk_opcode;
+	u64	reg_mpcp_rx_disc;
+	u64	reg_mpcp_rx_disc_info_mismatch;
+	u64	reg_mpcp_rx_disc_late;
+	u64	reg_mpcp_rx_disc_last_slot;
+	u64	reg_mpcp_rx_reg_for_other;
+	u64	reg_mpcp_rx_reg_unk_flag;
+	u64	reg_mpcp_rx_reg_dereg;
+	u64	reg_mpcp_rx_reg_nack;
+	u64	reg_mpcp_rx_reg_timeout;
+	u64	reg_mpcp_rx_other_err;
+	u64	reg_mpcp_tx_reg_req;
+	u64	reg_mpcp_tx_reg_ack;
+	u64	reg_fsm_state;
+};
+
+struct epon_link {
+	unsigned int			idx;
+	unsigned int			llid;
+	bool				rx_enabled;
+	bool				tx_enabled;
+	struct list_head		next;
+};
+
+struct epon_reg_config {
+	unsigned int			laser_on_time;
+	unsigned int			laser_off_time;
+
+	bool				valid_sync_time;
+	unsigned int			sync_time;
+
+	u16				assigned_llid;
+};
+
+struct epon_user_config {
+	u32				burst_cap;
+	bool				down_enc_enabled;
+	u8				key_sci[8];
+	u8				down_key0[16];
+	u8				down_key1[16];
+};
+
+/*
+ * global link status (PCS + time sync)
+ */
+enum epon_global_link_state {
+	EPON_GLINK_DOWN,
+	EPON_GLINK_UP,
+	EPON_GLINK_FAILED,
+};
+
+enum epon_registration_state {
+	EPON_REG_WAIT_DISCOVERY,
+	EPON_REG_WAIT_REGISTER,
+	EPON_REG_COMPLETE,
+	EPON_REG_FAILED,
+};
+
+struct xport_epon_priv {
+	struct xport_priv		*port;
+	struct dentry			*regs_dbg;
+	struct eponmac_mib		mib;
+	struct workqueue_struct		*epon_wq;
+	struct packet_type		reg_tap;
+
+	/*
+	 * pon configuration, cannot be changed while epon is started
+	 */
+	u8				mac_addr[6];
+	bool				laser_active_hi;
+
+	/*
+	 * link list, empty when epon is not started
+	 */
+	struct mutex			links_lock;
+	struct list_head		links_list;
+	u32				links_free;
+	u32				links_mcast;
+
+	struct mutex			epon_lock;
+	bool				epon_started;
+	unsigned int			epon_reset_duration_ms;
+
+	/*
+	 * current epon state
+	 */
+	unsigned int			down_speed;
+	unsigned int			up_speed;
+	u32				start_count;
+	unsigned int			lasermon_event_count;
+	struct epon_link		*user_link;
+	struct epon_link		*bcast_link;
+	struct delayed_work		glob_link_work;
+	enum epon_global_link_state	glob_link_state;
+	enum epon_registration_state	reg_state;
+	unsigned long			reg_state_last_change;
+	struct epon_reg_config		reg_cfg;
+	struct epon_user_config		user_cfg;
+};
+
+/*
+ * io accessors
+ */
+static inline u32 epon_top_reg_readl(struct xport_epon_priv *mode,
+				     u32 offset)
+{
+	return ioread32(mode->port->regs[3] + EPON_TOP_OFFSET_0 + offset);
+}
+
+static inline void epon_top_reg_writel(struct xport_epon_priv *mode,
+				       u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] +
+			 EPON_TOP_OFFSET_0 + offset);
+}
+
+static inline u32 epon_epn_reg_readl(struct xport_epon_priv *mode,
+				     u32 offset)
+{
+	return ioread32(mode->port->regs[3] + EPN_OFFSET_0 + offset);
+}
+
+static inline void epon_epn_reg_writel(struct xport_epon_priv *mode,
+				       u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] + EPN_OFFSET_0 + offset);
+}
+
+static inline u32 epon_xif_reg_readl(struct xport_epon_priv *mode,
+				     u32 offset)
+{
+	return ioread32(mode->port->regs[3] + XIF_OFFSET_0 + offset);
+}
+
+static inline void epon_xif_reg_writel(struct xport_epon_priv *mode,
+				       u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] + XIF_OFFSET_0 + offset);
+}
+
+static inline u32 epon_lif_reg_readl(struct xport_epon_priv *mode,
+				     u32 offset)
+{
+	return ioread32(mode->port->regs[3] + LIF_OFFSET_0 + offset);
+}
+
+static inline void epon_lif_reg_writel(struct xport_epon_priv *mode,
+				       u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] + LIF_OFFSET_0 + offset);
+}
+
+static inline u32 epon_xpcsrx_reg_readl(struct xport_epon_priv *mode,
+					u32 offset)
+{
+	return ioread32(mode->port->regs[3] + XPCSRX_OFFSET_0 + offset);
+}
+
+static inline void epon_xpcsrx_reg_writel(struct xport_epon_priv *mode,
+					  u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] + XPCSRX_OFFSET_0 + offset);
+}
+
+static inline u32 epon_xpcstx_reg_readl(struct xport_epon_priv *mode,
+					u32 offset)
+{
+	return ioread32(mode->port->regs[3] + XPCSTX_OFFSET_0 + offset);
+}
+
+static inline void epon_xpcstx_reg_writel(struct xport_epon_priv *mode,
+					  u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[3] + XPCSTX_OFFSET_0 + offset);
+}
+
+void mode_epon_dbg_init(struct xport_epon_priv *mode);
+void mode_epon_dbg_release(struct xport_epon_priv *mode);
+
+#endif /* PORT_XPORT_EPON_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon_dbg.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon_dbg.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_epon_dbg.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_epon_dbg.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,537 @@
+#include "port_xport_epon.h"
+
+enum {
+	REG_XPCS_RX,
+	REG_XIF,
+	REG_LIF,
+	REG_EPN,
+};
+
+struct reg_desc {
+	const char	*name;
+	u32		offset;
+	unsigned int	type;
+};
+
+static const struct reg_desc epon_regs[] = {
+	{ "EPN_CONTROL_0", EPN_CONTROL_0_REG, REG_EPN, },
+	{ "EPN_CONTROL_1", EPN_CONTROL_1_REG, REG_EPN, },
+	{ "EPN_ENABLE_GRANTS", EPN_ENABLE_GRANTS_REG, REG_EPN, },
+	{ "EPN_DROP_DISC_GATES", EPN_DROP_DISC_GATES_REG, REG_EPN, },
+	{ "EPN_DIS_FCS_CHK", EPN_DIS_FCS_CHK_REG, REG_EPN, },
+	{ "EPN_PASS_GATES", EPN_PASS_GATES_REG, REG_EPN, },
+	{ "EPN_CFG_MISALGN_FB", EPN_CFG_MISALGN_FB_REG, REG_EPN, },
+	{ "EPN_DISCOVERY_FILTER", EPN_DISCOVERY_FILTER_REG, REG_EPN, },
+	{ "EPN_MINIMUM_GRANT_SETUP", EPN_MINIMUM_GRANT_SETUP_REG, REG_EPN, },
+	{ "EPN_RESET_GNT_FIFO", EPN_RESET_GNT_FIFO_REG, REG_EPN, },
+	{ "EPN_RESET_L1_ACCUMULATOR", EPN_RESET_L1_ACCUMULATOR_REG, REG_EPN, },
+	{ "EPN_L1_ACCUMULATOR_SEL", EPN_L1_ACCUMULATOR_SEL_REG, REG_EPN, },
+	{ "EPN_L1_SVA_BYTES", EPN_L1_SVA_BYTES_REG, REG_EPN, },
+	{ "EPN_L1_UVA_BYTES", EPN_L1_UVA_BYTES_REG, REG_EPN, },
+	{ "EPN_L1_SVA_OVERFLOW", EPN_L1_SVA_OVERFLOW_REG, REG_EPN, },
+	{ "EPN_L1_UVA_OVERFLOW", EPN_L1_UVA_OVERFLOW_REG, REG_EPN, },
+	{ "EPN_RESET_RPT_PRI", EPN_RESET_RPT_PRI_REG, REG_EPN, },
+	{ "EPN_RESET_L2_RPT_FIFO", EPN_RESET_L2_RPT_FIFO_REG, REG_EPN, },
+	{ "EPN_ENABLE_UPSTREAM", EPN_ENABLE_UPSTREAM_REG, REG_EPN, },
+	{ "EPN_ENABLE_UPSTREAM_FB", EPN_ENABLE_UPSTREAM_FB_REG, REG_EPN, },
+	{ "EPN_ENABLE_UPSTREAM_FEC", EPN_ENABLE_UPSTREAM_FEC_REG, REG_EPN, },
+	{ "EPN_REPORT_BYTE_LENGTH", EPN_REPORT_BYTE_LENGTH_REG, REG_EPN, },
+	{ "EPN_MAIN_INT_STATUS", EPN_MAIN_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_FULL_INT_STATUS", EPN_GNT_FULL_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_FULL_INT_MASK", EPN_GNT_FULL_INT_MASK_REG, REG_EPN, },
+	{ "EPN_GNT_MISS_INT_STATUS", EPN_GNT_MISS_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_MISS_INT_MASK", EPN_GNT_MISS_INT_MASK_REG, REG_EPN, },
+	{ "EPN_DISC_RX_INT_STATUS", EPN_DISC_RX_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_DISC_RX_INT_MASK", EPN_DISC_RX_INT_MASK_REG, REG_EPN, },
+	{ "EPN_GNT_INTV_INT_STATUS", EPN_GNT_INTV_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_INTV_INT_MASK", EPN_GNT_INTV_INT_MASK_REG, REG_EPN, },
+	{ "EPN_GNT_FAR_INT_STATUS", EPN_GNT_FAR_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_FAR_INT_MASK", EPN_GNT_FAR_INT_MASK_REG, REG_EPN, },
+	{ "EPN_GNT_MISALGN_INT_STATUS", EPN_GNT_MISALGN_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_MISALGN_INT_MASK", EPN_GNT_MISALGN_INT_MASK_REG, REG_EPN, },
+	{ "EPN_NP_GNT_INT_STATUS", EPN_NP_GNT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_NP_GNT_INT_MASK", EPN_NP_GNT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_DEL_STALE_INT_STATUS", EPN_DEL_STALE_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_DEL_STALE_INT_MASK", EPN_DEL_STALE_INT_MASK_REG, REG_EPN, },
+	{ "EPN_GNT_PRES_INT_STATUS", EPN_GNT_PRES_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_GNT_PRES_INT_MASK", EPN_GNT_PRES_INT_MASK_REG, REG_EPN, },
+	{ "EPN_RPT_PRES_INT_STATUS", EPN_RPT_PRES_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_RPT_PRES_INT_MASK", EPN_RPT_PRES_INT_MASK_REG, REG_EPN, },
+	{ "EPN_DRX_ABORT_INT_STATUS", EPN_DRX_ABORT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_DRX_ABORT_INT_MASK", EPN_DRX_ABORT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_EMPTY_RPT_INT_STATUS", EPN_EMPTY_RPT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_EMPTY_RPT_INT_MASK", EPN_EMPTY_RPT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_BCAP_OVERFLOW_INT_STATUS", EPN_BCAP_OVERFLOW_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_BCAP_OVERFLOW_INT_MASK", EPN_BCAP_OVERFLOW_INT_MASK_REG, REG_EPN, },
+	{ "EPN_BBH_DNS_FAULT_INT_STATUS", EPN_BBH_DNS_FAULT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_BBH_DNS_FAULT_INT_MASK", EPN_BBH_DNS_FAULT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_BBH_UPS_FAULT_INT_STATUS", EPN_BBH_UPS_FAULT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_BBH_UPS_FAULT_INT_MASK", EPN_BBH_UPS_FAULT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_BBH_UPS_ABORT_INT_STATUS", EPN_BBH_UPS_ABORT_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_BBH_UPS_ABORT_INT_MASK", EPN_BBH_UPS_ABORT_INT_MASK_REG, REG_EPN, },
+	{ "EPN_MAIN_INT_MASK", EPN_MAIN_INT_MASK_REG, REG_EPN, },
+	{ "EPN_MAX_GNT_SIZE", EPN_MAX_GNT_SIZE_REG, REG_EPN, },
+	{ "EPN_MAX_FRAME_SIZE", EPN_MAX_FRAME_SIZE_REG, REG_EPN, },
+	{ "EPN_GRANT_OVR_HD", EPN_GRANT_OVR_HD_REG, REG_EPN, },
+	{ "EPN_POLL_SIZE", EPN_POLL_SIZE_REG, REG_EPN, },
+	{ "EPN_DN_RD_GNT_MARGIN", EPN_DN_RD_GNT_MARGIN_REG, REG_EPN, },
+	{ "EPN_GNT_TIME_START_DELTA", EPN_GNT_TIME_START_DELTA_REG, REG_EPN, },
+	{ "EPN_TIME_STAMP_DIFF", EPN_TIME_STAMP_DIFF_REG, REG_EPN, },
+	{ "EPN_UP_TIME_STAMP_OFF", EPN_UP_TIME_STAMP_OFF_REG, REG_EPN, },
+	{ "EPN_GNT_INTERVAL", EPN_GNT_INTERVAL_REG, REG_EPN, },
+	{ "EPN_DN_GNT_MISALIGN_THR", EPN_DN_GNT_MISALIGN_THR_REG, REG_EPN, },
+	{ "EPN_DN_GNT_MISALIGN_PAUSE", EPN_DN_GNT_MISALIGN_PAUSE_REG, REG_EPN, },
+	{ "EPN_NON_POLL_INTV", EPN_NON_POLL_INTV_REG, REG_EPN, },
+	{ "EPN_FORCE_FCS_ERR", EPN_FORCE_FCS_ERR_REG, REG_EPN, },
+	{ "EPN_GRANT_OVERLAP_LIMIT", EPN_GRANT_OVERLAP_LIMIT_REG, REG_EPN, },
+	{ "EPN_AES_CONFIGURATION_0", EPN_AES_CFG_0_REG, REG_EPN, },
+	{ "EPN_DISC_GRANT_OVR_HD", EPN_DISC_GRANT_OVR_HD_REG, REG_EPN, },
+	{ "EPN_DN_DISCOVERY_SEED", EPN_DN_DISCOVERY_SEED_REG, REG_EPN, },
+	{ "EPN_DN_DISCOVERY_INC", EPN_DN_DISCOVERY_INC_REG, REG_EPN, },
+	{ "EPN_DN_DISCOVERY_SIZE", EPN_DN_DISCOVERY_SIZE_REG, REG_EPN, },
+	{ "EPN_FEC_IPG_LENGTH", EPN_FEC_IPG_LENGTH_REG, REG_EPN, },
+	{ "EPN_FAKE_REPORT_VALUE_EN", EPN_FAKE_REPORT_VALUE_EN_REG, REG_EPN, },
+	{ "EPN_FAKE_REPORT_VALUE", EPN_FAKE_REPORT_VALUE_REG, REG_EPN, },
+	{ "EPN_BURST_CAP_0", EPN_BURST_CAPx_0_7_REG(0), REG_EPN, },
+	{ "EPN_BURST_CAP_1", EPN_BURST_CAPx_0_7_REG(1), REG_EPN, },
+	{ "EPN_BURST_CAP_2", EPN_BURST_CAPx_0_7_REG(2), REG_EPN, },
+	{ "EPN_BURST_CAP_3", EPN_BURST_CAPx_0_7_REG(3), REG_EPN, },
+	{ "EPN_BURST_CAP_4", EPN_BURST_CAPx_0_7_REG(4), REG_EPN, },
+	{ "EPN_BURST_CAP_5", EPN_BURST_CAPx_0_7_REG(5), REG_EPN, },
+	{ "EPN_BURST_CAP_6", EPN_BURST_CAPx_0_7_REG(6), REG_EPN, },
+	{ "EPN_BURST_CAP_7", EPN_BURST_CAPx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_QUEUE_LLID_MAP_0", EPN_QUEUE_LLID_MAPx_0_7_REG(0), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_1", EPN_QUEUE_LLID_MAPx_0_7_REG(1), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_2", EPN_QUEUE_LLID_MAPx_0_7_REG(2), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_3", EPN_QUEUE_LLID_MAPx_0_7_REG(3), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_4", EPN_QUEUE_LLID_MAPx_0_7_REG(4), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_5", EPN_QUEUE_LLID_MAPx_0_7_REG(5), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_6", EPN_QUEUE_LLID_MAPx_0_7_REG(6), REG_EPN, },
+	{ "EPN_QUEUE_LLID_MAP_7", EPN_QUEUE_LLID_MAPx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_VALID_OPCODE_MAP", EPN_VALID_OPCODE_MAP_REG, REG_EPN, },
+	{ "EPN_UP_PACKET_TX_MARGIN", EPN_UP_PACKET_TX_MARGIN_REG, REG_EPN, },
+	{ "EPN_MULTI_PRI_CFG_0", EPN_MULTI_PRI_CFG_0_REG, REG_EPN, },
+	{ "EPN_SHARED_BCAP_OVRFLOW", EPN_SHARED_BCAP_OVRFLOW_REG, REG_EPN, },
+	{ "EPN_FORCED_REPORT_EN", EPN_FORCED_REPORT_EN_REG, REG_EPN, },
+	{ "EPN_FORCED_REPORT_MAX_INTERVAL", EPN_FORCED_REPORT_MAX_INTERVAL_REG, REG_EPN, },
+	{ "EPN_L2S_FLUSH_CONFIG", EPN_L2S_FLUSH_CONFIG_REG, REG_EPN, },
+	{ "EPN_DATA_PORT_COMMAND", EPN_DATA_PORT_COMMAND_REG, REG_EPN, },
+	{ "EPN_DATA_PORT_ADDRESS", EPN_DATA_PORT_ADDR_REG, REG_EPN, },
+	{ "EPN_DATA_PORT_DATA_0", EPN_DATA_PORT_DATA_0_REG, REG_EPN, },
+	{ "EPN_UNMAP_BIG_CNT", EPN_UNMAP_BIG_CNT_REG, REG_EPN, },
+	{ "EPN_UNMAP_FRAME_CNT", EPN_UNMAP_FRAME_CNT_REG, REG_EPN, },
+	{ "EPN_UNMAP_FCS_CNT", EPN_UNMAP_FCS_CNT_REG, REG_EPN, },
+	{ "EPN_UNMAP_GATE_CNT", EPN_UNMAP_GATE_CNT_REG, REG_EPN, },
+	{ "EPN_UNMAP_OAM_CNT", EPN_UNMAP_OAM_CNT_REG, REG_EPN, },
+	{ "EPN_UNMAP_SMALL_CNT", EPN_UNMAP_SMALL_CNT_REG, REG_EPN, },
+	{ "EPN_FIF_DEQUEUE_EVENT_CNT", EPN_FIF_DEQUEUE_EVENT_CNT_REG, REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT0", EPN_UNUSED_TQ_CNTx_0_7_REG(0), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT1", EPN_UNUSED_TQ_CNTx_0_7_REG(1), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT2", EPN_UNUSED_TQ_CNTx_0_7_REG(2), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT3", EPN_UNUSED_TQ_CNTx_0_7_REG(3), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT4", EPN_UNUSED_TQ_CNTx_0_7_REG(4), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT5", EPN_UNUSED_TQ_CNTx_0_7_REG(5), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT6", EPN_UNUSED_TQ_CNTx_0_7_REG(6), REG_EPN, },
+	{ "EPN_UNUSED_TQ_CNT7", EPN_UNUSED_TQ_CNTx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_BBH_UP_FAULT_HALT_EN", EPN_BBH_UP_FAULT_HALT_EN_REG, REG_EPN, },
+	{ "EPN_BBH_UP_TARDY_HALT_EN", EPN_BBH_UP_TARDY_HALT_EN_REG, REG_EPN, },
+	{ "EPN_DEBUG_STATUS_0", EPN_DEBUG_STATUS_0_REG, REG_EPN, },
+	{ "EPN_DEBUG_STATUS_1", EPN_DEBUG_STATUS_1_REG, REG_EPN, },
+	{ "EPN_DEBUG_L2S_PTR_SEL", EPN_DEBUG_L2S_PTR_SEL_REG, REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_0", EPN_ONU_MAC_ADDRx_0_7_LO_REG(0), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_0", EPN_ONU_MAC_ADDRx_0_7_HI_REG(0), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_1", EPN_ONU_MAC_ADDRx_0_7_LO_REG(1), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_1", EPN_ONU_MAC_ADDRx_0_7_HI_REG(1), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_2", EPN_ONU_MAC_ADDRx_0_7_LO_REG(2), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_2", EPN_ONU_MAC_ADDRx_0_7_HI_REG(2), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_3", EPN_ONU_MAC_ADDRx_0_7_LO_REG(3), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_3", EPN_ONU_MAC_ADDRx_0_7_HI_REG(3), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_4", EPN_ONU_MAC_ADDRx_0_7_LO_REG(4), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_4", EPN_ONU_MAC_ADDRx_0_7_HI_REG(4), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_5", EPN_ONU_MAC_ADDRx_0_7_LO_REG(5), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_5", EPN_ONU_MAC_ADDRx_0_7_HI_REG(5), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_6", EPN_ONU_MAC_ADDRx_0_7_LO_REG(6), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_6", EPN_ONU_MAC_ADDRx_0_7_HI_REG(6), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_LO_7", EPN_ONU_MAC_ADDRx_0_7_LO_REG(7), REG_EPN, },
+	{ "EPN_ONU_MAC_ADDR_HI_7", EPN_ONU_MAC_ADDRx_0_7_HI_REG(7), REG_EPN, },
+	{ "EPN_OLT_MAC_ADDR_LO", EPN_OLT_MAC_ADDR_LO_REG, REG_EPN, },
+	{ "EPN_OLT_MAC_ADDR_HI", EPN_OLT_MAC_ADDR_HI_REG, REG_EPN, },
+
+	{ "EPN_TX_L1S_SHP_CONFIG_0", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(0), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_0", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(0), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_1", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(1), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_1", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(1), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_2", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(2), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_2", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(2), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_3", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(3), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_3", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(3), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_4", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(4), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_4", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(4), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_5", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(5), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_5", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(5), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_6", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(6), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_6", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(6), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_CONFIG_7", EPN_TX_L1S_SHP_CONFIGx_0_7_REG(7), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_EN_7", EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_TX_L1S_SHP_DQU_EMPTY", EPN_TX_L1S_SHP_DQU_EMPTY_REG, REG_EPN, },
+	{ "EPN_TX_L1S_UNSHAPED_EMPTY", EPN_TX_L1S_UNSHAPED_EMPTY_REG, REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_0", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(0), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_1", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(1), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_2", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(2), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_3", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(3), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_4", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(4), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_5", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(5), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_6", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(6), REG_EPN, },
+	{ "EPN_TX_L1S_SHP_QUE_MASK_7", EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_TX_L2S_QUE_CONFIG_0", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(0), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_1", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(1), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_2", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(2), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_3", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(3), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_4", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(4), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_5", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(5), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_6", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(6), REG_EPN, },
+	{ "EPN_TX_L2S_QUE_CONFIG_7", EPN_TX_L2S_QUE_CONFIGx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_TX_L2S_QUE_EMPTY", EPN_TX_L2S_QUE_EMPTY_REG, REG_EPN, },
+	{ "EPN_TX_L2S_QUE_FULL", EPN_TX_L2S_QUE_FULL_REG, REG_EPN, },
+	{ "EPN_TX_L2S_QUE_STOPPED", EPN_TX_L2S_QUE_STOPPED_REG, REG_EPN, },
+
+	{ "EPN_TX_CTC_BURST_LIMIT_0", EPN_TX_CTC_BURST_LIMITx_0_7_REG(0), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_1", EPN_TX_CTC_BURST_LIMITx_0_7_REG(1), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_2", EPN_TX_CTC_BURST_LIMITx_0_7_REG(2), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_3", EPN_TX_CTC_BURST_LIMITx_0_7_REG(3), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_4", EPN_TX_CTC_BURST_LIMITx_0_7_REG(4), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_5", EPN_TX_CTC_BURST_LIMITx_0_7_REG(5), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_6", EPN_TX_CTC_BURST_LIMITx_0_7_REG(6), REG_EPN, },
+	{ "EPN_TX_CTC_BURST_LIMIT_7", EPN_TX_CTC_BURST_LIMITx_0_7_REG(7), REG_EPN, },
+
+	{ "EPN_BBH_MAX_OUTSTANDING_TARDY_PACKETS", EPN_BBH_MAX_OUTSTANDING_TARDY_PACKETS_REG, REG_EPN, },
+	{ "EPN_MIN_REPORT_VALUE_DIFFERENCE", EPN_MIN_REPORT_VALUE_DIFFERENCE_REG, REG_EPN, },
+	{ "EPN_BBH_STATUS_FIFO_OVERFLOW", EPN_BBH_STATUS_FIFO_OVERFLOW_REG, REG_EPN, },
+	{ "EPN_SPARE_CTL", EPN_SPARE_CTL_REG, REG_EPN, },
+	{ "EPN_TS_SYNC_OFFSET", EPN_TS_SYNC_OFFSET_REG, REG_EPN, },
+	{ "EPN_DN_TS_OFFSET", EPN_DN_TS_OFFSET_REG, REG_EPN, },
+	{ "EPN_UP_TS_OFFSET_LO", EPN_UP_TS_OFFSET_LO_REG, REG_EPN, },
+	{ "EPN_UP_TS_OFFSET_HI", EPN_UP_TS_OFFSET_HI_REG, REG_EPN, },
+	{ "EPN_TWO_STEP_TS_CTL", EPN_TWO_STEP_TS_CTL_REG, REG_EPN, },
+	{ "EPN_TWO_STEP_TS_VALUE_LO", EPN_TWO_STEP_TS_VALUE_LO_REG, REG_EPN, },
+	{ "EPN_TWO_STEP_TS_VALUE_HI", EPN_TWO_STEP_TS_VALUE_HI_REG, REG_EPN, },
+	{ "EPN_1588_TIMESTAMP_INT_STATUS", EPN_1588_TIMESTAMP_INT_STATUS_REG, REG_EPN, },
+	{ "EPN_1588_TIMESTAMP_INT_MASK", EPN_1588_TIMESTAMP_INT_MASK_REG, REG_EPN, },
+	{ "EPN_UP_PACKET_FETCH_MARGIN", EPN_UP_PACKET_FETCH_MARGIN_REG, REG_EPN, },
+	{ "EPN_DN_1588_TIMESTAMP", EPN_DN_1588_TIMESTAMP_REG, REG_EPN, },
+	{ "EPN_PERSISTENT_REPORT_CFG", EPN_PERSISTENT_REPORT_CFG_REG, REG_EPN, },
+	{ "EPN_PERSISTENT_REPORT_ENABLES", EPN_PERSISTENT_REPORT_ENABLES_REG, REG_EPN, },
+	{ "EPN_PERSISTENT_REPORT_REQUEST_SIZE", EPN_PERSISTENT_REPORT_REQUEST_SIZE_REG, REG_EPN, },
+	{ "EPN_AES_CONFIGURATION_1", EPN_AES_CFG_1_REG, REG_EPN, },
+
+
+	/*
+	 * LIF
+	 */
+	{ "LIF_PON_CONTROL", LIF_PON_CONTROL_REG, REG_LIF, },
+	{ "LIF_PON_INTER_OP_CONTROL", LIF_PON_INTER_OP_CONTROL_REG , REG_LIF, },
+	{ "LIF_FEC_CONTROL", LIF_FEC_CONTROL_REG , REG_LIF, },
+	{ "LIF_SEC_CONTROL", LIF_SEC_CONTROL_REG , REG_LIF, },
+	{ "LIF_MACSEC", LIF_MACSEC_REG , REG_LIF, },
+	{ "LIF_INT_STATUS", LIF_INT_STATUS_REG , REG_LIF, },
+	{ "LIF_INT_MASK", LIF_INT_MASK_REG , REG_LIF, },
+	{ "LIF_DATA_PORT_COMMAND", LIF_DATA_PORT_COMMAND_REG , REG_LIF, },
+
+	{ "LIF_LLID_0", LIF_LLIDx_0_7_REG(0), REG_LIF, },
+	{ "LIF_LLID_1", LIF_LLIDx_0_7_REG(1), REG_LIF, },
+	{ "LIF_LLID_2", LIF_LLIDx_0_7_REG(2), REG_LIF, },
+	{ "LIF_LLID_3", LIF_LLIDx_0_7_REG(3), REG_LIF, },
+	{ "LIF_LLID_4", LIF_LLIDx_0_7_REG(4), REG_LIF, },
+	{ "LIF_LLID_5", LIF_LLIDx_0_7_REG(5), REG_LIF, },
+	{ "LIF_LLID_6", LIF_LLIDx_0_7_REG(6), REG_LIF, },
+	{ "LIF_LLID_7", LIF_LLIDx_0_7_REG(7), REG_LIF, },
+	{ "LIF_LLID_16", LIF_LLIDx_16_23_REG(0), REG_LIF, },
+	{ "LIF_LLID_17", LIF_LLIDx_16_23_REG(1), REG_LIF, },
+	{ "LIF_LLID_18", LIF_LLIDx_16_23_REG(2), REG_LIF, },
+	{ "LIF_LLID_19", LIF_LLIDx_16_23_REG(3), REG_LIF, },
+	{ "LIF_LLID_20", LIF_LLIDx_16_23_REG(4), REG_LIF, },
+	{ "LIF_LLID_21", LIF_LLIDx_16_23_REG(5), REG_LIF, },
+	{ "LIF_LLID_22", LIF_LLIDx_16_23_REG(6), REG_LIF, },
+	{ "LIF_LLID_23", LIF_LLIDx_16_23_REG(7), REG_LIF, },
+
+	{ "LIF_TIME_REF_CNT", LIF_TIME_REF_CNT_REG , REG_LIF, },
+	{ "LIF_TIMESTAMP_UPD_PER", LIF_TIMESTAMP_UPD_PER_REG , REG_LIF, },
+	{ "LIF_TP_TIME", LIF_TP_TIME_REG , REG_LIF, },
+	{ "LIF_MPCP_TIME", LIF_MPCP_TIME_REG , REG_LIF, },
+	{ "LIF_MAXLEN_CTR", LIF_MAXLEN_CTR_REG , REG_LIF, },
+	{ "LIF_LASER_ON_DELTA", LIF_LASER_ON_DELTA_REG , REG_LIF, },
+	{ "LIF_LASER_OFF_IDLE", LIF_LASER_OFF_IDLE_REG , REG_LIF, },
+	{ "LIF_FEC_INIT_IDLE", LIF_FEC_INIT_IDLE_REG , REG_LIF, },
+	{ "LIF_FEC_ERR_ALLOW", LIF_FEC_ERR_ALLOW_REG , REG_LIF, },
+	{ "LIF_SEC_KEY_SEL", LIF_SEC_KEY_SEL_REG , REG_LIF, },
+	{ "LIF_DN_ENCRYPT_STAT", LIF_DN_ENCRYPT_STAT_REG , REG_LIF, },
+	{ "LIF_SEC_UP_KEY_STAT", LIF_SEC_UP_KEY_STAT_REG , REG_LIF, },
+	{ "LIF_SEC_UP_ENCRYPT_STAT", LIF_SEC_UP_ENCRYPT_STAT_REG , REG_LIF, },
+	{ "LIF_SEC_UP_MPCP_OFFSET", LIF_SEC_UP_MPCP_OFFSET_REG , REG_LIF, },
+	{ "LIF_FEC_PER_LLID", LIF_FEC_PER_LLID_REG , REG_LIF, },
+	{ "LIF_RX_LINE_CODE_ERR_CNT", LIF_RX_LINE_CODE_ERR_CNT_REG , REG_LIF, },
+	{ "LIF_RX_AGG_MPCP_FRM", LIF_RX_AGG_MPCP_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_GOOD_FRM", LIF_RX_AGG_GOOD_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_GOOD_BYTE", LIF_RX_AGG_GOOD_BYTE_REG , REG_LIF, },
+	{ "LIF_RX_AGG_UNDERSZ_FRM", LIF_RX_AGG_UNDERSZ_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_OVERSZ_FRM", LIF_RX_AGG_OVERSZ_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_CRC8_FRM", LIF_RX_AGG_CRC8_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_FEC_FRM", LIF_RX_AGG_FEC_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_FEC_BYTE", LIF_RX_AGG_FEC_BYTE_REG , REG_LIF, },
+	{ "LIF_RX_AGG_FEC_EXC_ERR_FRM", LIF_RX_AGG_FEC_EXC_ERR_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_NONFEC_GOOD_FRM", LIF_RX_AGG_NONFEC_GOOD_FRM_REG , REG_LIF, },
+	{ "LIF_RX_AGG_NONFEC_GOOD_BYTE", LIF_RX_AGG_NONFEC_GOOD_BYTE_REG , REG_LIF, },
+	{ "LIF_RX_AGG_ERR_BYTES", LIF_RX_AGG_ERR_BYTES_REG , REG_LIF, },
+	{ "LIF_RX_AGG_ERR_ZEROES", LIF_RX_AGG_ERR_ZEROES_REG , REG_LIF, },
+	{ "LIF_RX_AGG_NO_ERR_BLKS", LIF_RX_AGG_NO_ERR_BLKS_REG , REG_LIF, },
+	{ "LIF_RX_AGG_COR_BLKS", LIF_RX_AGG_COR_BLKS_REG , REG_LIF, },
+	{ "LIF_RX_AGG_UNCOR_BLKS", LIF_RX_AGG_UNCOR_BLKS_REG , REG_LIF, },
+	{ "LIF_RX_AGG_ERR_ONES", LIF_RX_AGG_ERR_ONES_REG , REG_LIF, },
+	{ "LIF_RX_AGG_ERR_FRM", LIF_RX_AGG_ERR_FRM_REG , REG_LIF, },
+	{ "LIF_TX_PKT_CNT", LIF_TX_PKT_CNT_REG , REG_LIF, },
+	{ "LIF_TX_BYTE_CNT", LIF_TX_BYTE_CNT_REG , REG_LIF, },
+	{ "LIF_TX_NON_FEC_PKT_CNT", LIF_TX_NON_FEC_PKT_CNT_REG , REG_LIF, },
+	{ "LIF_TX_NON_FEC_BYTE_CNT", LIF_TX_NON_FEC_BYTE_CNT_REG , REG_LIF, },
+	{ "LIF_TX_FEC_PKT_CNT", LIF_TX_FEC_PKT_CNT_REG , REG_LIF, },
+	{ "LIF_TX_FEC_BYTE_CNT", LIF_TX_FEC_BYTE_CNT_REG , REG_LIF, },
+	{ "LIF_TX_FEC_BLK_CNT", LIF_TX_FEC_BLK_CNT_REG , REG_LIF, },
+	{ "LIF_TX_MPCP_PKT_CNT", LIF_TX_MPCP_PKT_CNT_REG , REG_LIF, },
+	{ "LIF_DEBUG_TX_DATA_PKT_CNT", LIF_DEBUG_TX_DATA_PKT_CNT_REG , REG_LIF, },
+	{ "LIF_FEC_LLID_STATUS", LIF_FEC_LLID_STATUS_REG , REG_LIF, },
+	{ "LIF_SEC_RX_TEK_IG_IV_LLID", LIF_SEC_RX_TEK_IG_IV_LLID_REG , REG_LIF, },
+	{ "LIF_PON_BER_INTERV_THRESH", LIF_PON_BER_INTERV_THRESH_REG , REG_LIF, },
+	{ "LIF_LSR_MON_A_CTRL", LIF_LSR_MON_A_CTRL_REG , REG_LIF, },
+	{ "LIF_LSR_MON_A_MAX_THR", LIF_LSR_MON_A_MAX_THR_REG , REG_LIF, },
+	{ "LIF_LSR_MON_A_BST_LEN", LIF_LSR_MON_A_BST_LEN_REG , REG_LIF, },
+	{ "LIF_LSR_MON_A_BST_CNT", LIF_LSR_MON_A_BST_CNT_REG , REG_LIF, },
+	{ "LIF_DEBUG_PON_SM", LIF_DEBUG_PON_SM_REG , REG_LIF, },
+	{ "LIF_DEBUG_FEC_SM", LIF_DEBUG_FEC_SM_REG , REG_LIF, },
+	{ "LIF_AE_PKTNUM_WINDOW", LIF_AE_PKTNUM_WINDOW_REG , REG_LIF, },
+	{ "LIF_AE_PKTNUM_THRESH", LIF_AE_PKTNUM_THRESH_REG , REG_LIF, },
+	{ "LIF_AE_PKTNUM_STAT", LIF_AE_PKTNUM_STAT_REG , REG_LIF, },
+
+	{ "LIF_LLID_8", LIF_LLIDx_8_15_REG(0), REG_LIF, },
+	{ "LIF_LLID_9", LIF_LLIDx_8_15_REG(1), REG_LIF, },
+	{ "LIF_LLID_10", LIF_LLIDx_8_15_REG(2), REG_LIF, },
+	{ "LIF_LLID_11", LIF_LLIDx_8_15_REG(3), REG_LIF, },
+	{ "LIF_LLID_12", LIF_LLIDx_8_15_REG(4), REG_LIF, },
+	{ "LIF_LLID_13", LIF_LLIDx_8_15_REG(5), REG_LIF, },
+	{ "LIF_LLID_14", LIF_LLIDx_8_15_REG(6), REG_LIF, },
+	{ "LIF_LLID_15", LIF_LLIDx_8_15_REG(7), REG_LIF, },
+
+	{ "LIF_LLID_24", LIF_LLIDx_24_31_REG(0), REG_LIF, },
+	{ "LIF_LLID_25", LIF_LLIDx_24_31_REG(1), REG_LIF, },
+	{ "LIF_LLID_26", LIF_LLIDx_24_31_REG(2), REG_LIF, },
+	{ "LIF_LLID_27", LIF_LLIDx_24_31_REG(3), REG_LIF, },
+	{ "LIF_LLID_28", LIF_LLIDx_24_31_REG(4), REG_LIF, },
+	{ "LIF_LLID_29", LIF_LLIDx_24_31_REG(5), REG_LIF, },
+	{ "LIF_LLID_30", LIF_LLIDx_24_31_REG(6), REG_LIF, },
+	{ "LIF_LLID_31", LIF_LLIDx_24_31_REG(7), REG_LIF, },
+
+	{ "LIF_VLAN_TYPE", LIF_VLAN_TYPE_REG , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_EN", LIF_P2P_AE_SCI_EN_REG , REG_LIF, },
+
+	{ "LIF_P2P_AE_SCI_LO_0", LIF_P2P_AE_SCI_LOx_REG(0) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_0", LIF_P2P_AE_SCI_HIx_REG(0) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_1", LIF_P2P_AE_SCI_LOx_REG(1) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_1", LIF_P2P_AE_SCI_HIx_REG(1) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_2", LIF_P2P_AE_SCI_LOx_REG(2) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_2", LIF_P2P_AE_SCI_HIx_REG(2) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_3", LIF_P2P_AE_SCI_LOx_REG(3) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_3", LIF_P2P_AE_SCI_HIx_REG(3) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_4", LIF_P2P_AE_SCI_LOx_REG(4) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_4", LIF_P2P_AE_SCI_HIx_REG(4) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_5", LIF_P2P_AE_SCI_LOx_REG(5) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_5", LIF_P2P_AE_SCI_HIx_REG(5) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_6", LIF_P2P_AE_SCI_LOx_REG(6) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_6", LIF_P2P_AE_SCI_HIx_REG(6) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_7", LIF_P2P_AE_SCI_LOx_REG(7) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_7", LIF_P2P_AE_SCI_HIx_REG(7) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_8", LIF_P2P_AE_SCI_LOx_REG(8) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_8", LIF_P2P_AE_SCI_HIx_REG(8) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_9", LIF_P2P_AE_SCI_LOx_REG(9) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_9", LIF_P2P_AE_SCI_HIx_REG(9) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_10", LIF_P2P_AE_SCI_LOx_REG(10) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_10", LIF_P2P_AE_SCI_HIx_REG(10) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_11", LIF_P2P_AE_SCI_LOx_REG(11) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_11", LIF_P2P_AE_SCI_HIx_REG(11) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_12", LIF_P2P_AE_SCI_LOx_REG(12) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_12", LIF_P2P_AE_SCI_HIx_REG(12) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_13", LIF_P2P_AE_SCI_LOx_REG(13) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_13", LIF_P2P_AE_SCI_HIx_REG(13) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_14", LIF_P2P_AE_SCI_LOx_REG(14) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_14", LIF_P2P_AE_SCI_HIx_REG(14) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_LO_15", LIF_P2P_AE_SCI_LOx_REG(15) , REG_LIF, },
+	{ "LIF_P2P_AE_SCI_HI_15", LIF_P2P_AE_SCI_HIx_REG(15) , REG_LIF, },
+
+	{ "LIF_SEC_UP_KEY_STAT_1", LIF_SEC_UP_KEY_STAT_1_REG , REG_LIF, },
+	{ "LIF_SEC_KEY_SEL_1", LIF_SEC_KEY_SEL_1_REG , REG_LIF, },
+	{ "LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL", LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_REG , REG_LIF, },
+	{ "LIF_P2P_AUTONEG_CONTROL", LIF_P2P_AUTONEG_CONTROL_REG , REG_LIF, },
+	{ "LIF_P2P_AUTONEG_STATUS", LIF_P2P_AUTONEG_STATUS_REG , REG_LIF, },
+
+	/*
+	 * XIF
+	 */
+	{ "XIF_CTL", XIF_CTL_REG, REG_XIF, },
+	{ "XIF_INT_STATUS", XIF_INT_STATUS_REG, REG_XIF, },
+	{ "XIF_INT_MASK", XIF_INT_MASK_REG, REG_XIF, },
+	{ "XIF_PORT_DATA_REG_0", XIF_PORT_DATA_REG(0), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_1", XIF_PORT_DATA_REG(1), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_2", XIF_PORT_DATA_REG(2), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_3", XIF_PORT_DATA_REG(3), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_4", XIF_PORT_DATA_REG(4), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_5", XIF_PORT_DATA_REG(5), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_6", XIF_PORT_DATA_REG(6), REG_XIF, },
+	{ "XIF_PORT_DATA_REG_7", XIF_PORT_DATA_REG(7), REG_XIF, },
+	{ "XIF_MACSEC", XIF_MACSEC_REG, REG_XIF, },
+	{ "XIF_XPN_XMT_OFFSET", XIF_XPN_XMT_OFFSET_REG, REG_XIF, },
+	{ "XIF_XPN_TIMESTAMP_OFFSET", XIF_XPN_TIMESTAMP_OFFSET_REG, REG_XIF, },
+	{ "XIF_XPN_PKTGEN_CTL", XIF_XPN_PKTGEN_CTL_REG, REG_XIF, },
+	{ "XIF_XPN_PKTGEN_LLID", XIF_XPN_PKTGEN_LLID_REG, REG_XIF, },
+	{ "XIF_XPN_PKTGEN_PKT_CNT", XIF_XPN_PKTGEN_PKT_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_PKTGEN_PKT_SIZE", XIF_XPN_PKTGEN_PKT_SIZE_REG, REG_XIF, },
+	{ "XIF_XPN_PKTGEN_IPG", XIF_XPN_PKTGEN_IPG_REG, REG_XIF, },
+	{ "XIF_TS_JITTER_THRESH", XIF_TS_JITTER_THRESH_REG, REG_XIF, },
+	{ "XIF_TS_UPDATE", XIF_TS_UPDATE_REG, REG_XIF, },
+	{ "XIF_GNT_OVERHEAD", XIF_GNT_OVERHEAD_REG, REG_XIF, },
+	{ "XIF_DISCOVER_OVERHEAD", XIF_DISCOVER_OVERHEAD_REG, REG_XIF, },
+	{ "XIF_DISCOVER_INFO", XIF_DISCOVER_INFO_REG, REG_XIF, },
+	{ "XIF_XPN_OVERSIZE_THRESH", XIF_XPN_OVERSIZE_THRESH_REG, REG_XIF, },
+	{ "XIF_SECRX_KEYNUM", XIF_SECRX_KEYNUM_REG, REG_XIF, },
+	{ "XIF_SECRX_ENCRYPT", XIF_SECRX_ENCRYPT_REG, REG_XIF, },
+	{ "XIF_PMC_FRAME_RX_CNT", XIF_PMC_FRAME_RX_CNT_REG, REG_XIF, },
+	{ "XIF_PMC_BYTE_RX_CNT", XIF_PMC_BYTE_RX_CNT_REG, REG_XIF, },
+	{ "XIF_PMC_RUNT_RX_CNT", XIF_PMC_RUNT_RX_CNT_REG, REG_XIF, },
+	{ "XIF_PMC_CW_ERR_RX_CNT", XIF_PMC_CW_ERR_RX_CNT_REG, REG_XIF, },
+	{ "XIF_PMC_CRC8_ERR_RX_CNT", XIF_PMC_CRC8_ERR_RX_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_DATA_FRM_CNT", XIF_XPN_DATA_FRM_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_DATA_BYTE_CNT", XIF_XPN_DATA_BYTE_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_MPCP_FRM_CNT", XIF_XPN_MPCP_FRM_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_OAM_FRM_CNT", XIF_XPN_OAM_FRM_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_OAM_BYTE_CNT", XIF_XPN_OAM_BYTE_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_OVERSIZE_FRM_CNT", XIF_XPN_OVERSIZE_FRM_CNT_REG, REG_XIF, },
+	{ "XIF_SEC_ABORT_FRM_CNT", XIF_SEC_ABORT_FRM_CNT_REG, REG_XIF, },
+	{ "XIF_PMC_TX_NEG_EVENT_CNT", XIF_PMC_TX_NEG_EVENT_CNT_REG, REG_XIF, },
+	{ "XIF_XPN_IDLE_PKT_CNT", XIF_XPN_IDLE_PKT_CNT_REG, REG_XIF, },
+	{ "XIF_MAX_MPCP_UPDATE", XIF_MAX_MPCP_UPDATE_REG, REG_XIF, },
+	{ "XIF_IPG_INSERTION", XIF_IPG_INSERTION_REG, REG_XIF, },
+	{ "XIF_TRANSPORT_TIME", XIF_TRANSPORT_TIME_REG, REG_XIF, },
+	{ "XIF_MPCP_TIME", XIF_MPCP_TIME_REG, REG_XIF, },
+	{ "XIF_OVERLAP_GNT_OH", XIF_OVERLAP_GNT_OH_REG, REG_XIF, },
+	{ "XIF_MAC_MODE", XIF_MAC_MODE_REG, REG_XIF, },
+	{ "XIF_PMCTX_CTL", XIF_PMCTX_CTL_REG, REG_XIF, },
+	{ "XIF_SEC_CTL", XIF_SEC_CTL_REG, REG_XIF, },
+	{ "XIF_AE_PKTNUM_WINDOW", XIF_AE_PKTNUM_WINDOW_REG, REG_XIF, },
+	{ "XIF_AE_PKTNUM_THRESH", XIF_AE_PKTNUM_THRESH_REG, REG_XIF, },
+	{ "XIF_SECTX_KEYNUM", XIF_SECTX_KEYNUM_REG, REG_XIF, },
+	{ "XIF_SECTX_ENCRYPT", XIF_SECTX_ENCRYPT_REG, REG_XIF, },
+	{ "XIF_AE_PKTNUM_STAT", XIF_AE_PKTNUM_STAT_REG, REG_XIF, },
+	{ "XIF_MPCP_UPDATE", XIF_MPCP_UPDATE_REG, REG_XIF, },
+	{ "XIF_BURST_PRELAUNCH_OFFSET", XIF_BURST_PRELAUNCH_OFFSET_REG, REG_XIF, },
+	{ "XIF_VLAN_TYPE", XIF_VLAN_TYPE_REG, REG_XIF, },
+	{ "XIF_P2P_AE_SCI_EN", XIF_P2P_AE_SCI_EN_REG, REG_XIF, },
+	{ "XIF_SECTX_KEYNUM_1", XIF_SECTX_KEYNUM_1_REG, REG_XIF, },
+	{ "XIF_SECRX_KEYNUM_1", XIF_SECRX_KEYNUM_1_REG, REG_XIF, },
+};
+
+/*
+ * regs dump functions
+ */
+static void *regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < ARRAY_SIZE(epon_regs)) ? pos : NULL;
+}
+
+static void *regs_dump_seq_next(struct seq_file *s,
+				       void __always_unused *v,
+				       loff_t *pos)
+{
+	return (++(*pos) < ARRAY_SIZE(epon_regs)) ? pos : NULL;
+}
+
+static void regs_dump_seq_stop(struct seq_file __always_unused *s,
+			       void __always_unused *v)
+{
+}
+
+static int regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct xport_epon_priv *mode = s->private;
+	const struct reg_desc *rdesc;
+	int i = *(loff_t *)v;
+	u32 val;
+
+	rdesc = &epon_regs[i];
+	switch (rdesc->type) {
+	case REG_EPN:
+		val = epon_epn_reg_readl(mode, rdesc->offset);
+		break;
+	case REG_LIF:
+		val = epon_lif_reg_readl(mode, rdesc->offset);
+		break;
+	case REG_XIF:
+		val = epon_xif_reg_readl(mode, rdesc->offset);
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	seq_printf(s, "%-40s\t0x%08x\n",
+		   rdesc->name, val);
+
+	return 0;
+}
+
+static const struct seq_operations regs_dump_seq_ops = {
+	.start = regs_dump_seq_start,
+	.next  = regs_dump_seq_next,
+	.stop  = regs_dump_seq_stop,
+	.show  = regs_dump_seq_show,
+};
+
+static int regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct xport_epon_priv *mode = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = mode;
+	return 0;
+}
+
+static const struct file_operations regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+void mode_epon_dbg_init(struct xport_epon_priv *mode)
+{
+	char name[32];
+
+	snprintf(name, sizeof(name), "epon_regs");
+	mode->regs_dbg = debugfs_create_file(name, 0400,
+					     bcm63158_dbg_root,
+					     mode,
+					     &regs_dump_fops);
+
+}
+
+void mode_epon_dbg_release(struct xport_epon_priv *mode)
+{
+	if (mode->regs_dbg)
+		debugfs_remove(mode->regs_dbg);
+}
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_serdes.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_serdes.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_serdes.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_serdes.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,1686 @@
+#include "port_xport.h"
+#include "regs/wan_top.h"
+#include "regs/serdes_regs.h"
+
+#undef DUMP_SERDES_IO
+
+#ifdef DUMP_SERDES_IO
+struct reg_dump_desc {
+	const char	*name;
+	unsigned int	reg;
+	bool		skip_dump;
+	bool		skip_consec;
+};
+
+const struct reg_dump_desc wan_top_regs[] = {
+	{ "SCRATCH_REG",
+	  WAN_TOP_SCRATCH_REG, },
+	{ "RESET_REG",
+	  WAN_TOP_RESET_REG, },
+	{ "GPON_GEARBOX_0_REG",
+	  WAN_TOP_GPON_GEARBOX_0_REG, },
+	{ "GPON_PATTERN_CFG1_REG",
+	  WAN_TOP_GPON_PATTERN_CFG1_REG, },
+	{ "GPON_PATTERN_CFG2_REG",
+	  WAN_TOP_GPON_PATTERN_CFG2_REG, },
+	{ "GPON_GEARBOX_2_REG",
+	  WAN_TOP_GPON_GEARBOX_2_REG, },
+	{ "EARLY_TXEN_TXEN_REG",
+	  WAN_TOP_EARLY_TXEN_TXEN_REG, },
+	{ "RESCAL_AL_CFG_REG",
+	  WAN_TOP_RESCAL_AL_CFG_REG, },
+	{ "RESCAL_STATUS_0_REG",
+	  WAN_TOP_RESCAL_STATUS_0_REG, },
+	{ "RESCAL_STATUS_1_REG",
+	  WAN_TOP_RESCAL_STATUS_1_REG, },
+	{ "MISC_0_REG",
+	  WAN_TOP_MISC_0_REG, },
+	{ "MISC_1_REG",
+	  WAN_TOP_MISC_1_REG, },
+	{ "MISC_2_REG",
+	  WAN_TOP_MISC_2_REG, },
+	{ "MISC_3_REG",
+	  WAN_TOP_MISC_3_REG, },
+	{ "MISC_4_REG",
+	  WAN_TOP_MISC_4_REG, },
+	{ "SERDES_PLL_CTL_REG",
+	  WAN_TOP_SERDES_PLL_CTL_REG, },
+	{ "SERDES_TEMP_CTL_REG",
+	  WAN_TOP_SERDES_TEMP_CTL_REG, },
+	{ "SERDES_PRAM_CTL_REG",
+	  WAN_TOP_SERDES_PRAM_CTL_REG, },
+	{ "SERDES_PRAM_CTL_2_REG",
+	  WAN_TOP_SERDES_PRAM_CTL_2_REG, },
+	{ "SERDES_PRAM_CTL_3_REG",
+	  WAN_TOP_SERDES_PRAM_CTL_3_REG, },
+	{ "PMI_LP_0_REG",
+	  WAN_TOP_PMI_LP_0_REG, true },
+	{ "PMI_LP_1_REG",
+	  WAN_TOP_PMI_LP_1_REG, true },
+	{ "PMI_LP_2_REG",
+	  WAN_TOP_PMI_LP_2_REG, true },
+	{ "PMI_LP_3_REG",
+	  WAN_TOP_PMI_LP_3_REG, true },
+	{ "PMI_LP_4_REG",
+	  WAN_TOP_PMI_LP_4_REG, true },
+	{ "TOD_CONFIG_0_REG",
+	  WAN_TOP_TOD_CONFIG_0_REG, },
+	{ "TOD_CONFIG_1_REG",
+	  WAN_TOP_TOD_CONFIG_1_REG, },
+	{ "TOD_CONFIG_2_REG",
+	  WAN_TOP_TOD_CONFIG_2_REG, },
+	{ "TOD_CONFIG_3_REG",
+	  WAN_TOP_TOD_CONFIG_3_REG, },
+	{ "TOD_CONFIG_4_REG",
+	  WAN_TOP_TOD_CONFIG_4_REG, },
+	{ "TOD_CONFIG_5_REG",
+	  WAN_TOP_TOD_CONFIG_5_REG, },
+	{ "TOD_TS48_MSB_REG",
+	  WAN_TOP_TOD_TS48_MSB_REG, },
+	{ "TOD_TS48_LSB_REG",
+	  WAN_TOP_TOD_TS48_LSB_REG, },
+	{ "TOD_TS64_MSB_REG",
+	  WAN_TOP_TOD_TS64_MSB_REG, },
+	{ "TOD_TS64_LSB_REG",
+	  WAN_TOP_TOD_TS64_LSB_REG, },
+	{ "TOD_STATUS_0_REG",
+	  WAN_TOP_TOD_STATUS_0_REG, },
+	{ "TOD_STATUS_1_REG",
+	  WAN_TOP_TOD_STATUS_1_REG, },
+	{ "SERDES_STATUS_REG",
+	  WAN_TOP_SERDES_STATUS_REG, },
+	{ "INT_STATUS_REG",
+	  WAN_TOP_INT_STATUS_REG, },
+	{ "INT_MASK_REG",
+	  WAN_TOP_INT_MASK_REG, },
+	{ "CLK_DEJITTER_SAMPLING_CTL_0_REG",
+	  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_0_REG, },
+	{ "CLK_DEJITTER_SAMPLING_CTL_1_REG",
+	  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_1_REG, },
+	{ "CLK_SAMPLE_COUNTER_REG",
+	  WAN_TOP_CLK_SAMPLE_COUNTER_REG, },
+	{ "SYNCE_PLL_CONFIG_REG",
+	  WAN_TOP_SYNCE_PLL_CONFIG_REG, },
+	{ "OSR_CONTROL_REG",
+	  WAN_TOP_OSR_CONTROL_REG, },
+	{ "GPON_GEARBOX_STATUS_GEARBOX_STATUS_REG",
+	  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_STATUS_REG, },
+	{ "GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_REG",
+	  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_REG, },
+	{ "GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_REG",
+	  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_REG, },
+	{ "GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_0_REG",
+	  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_0_REG, },
+	{ "GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_1_REG",
+	  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_1_REG, },
+	{ "AE_GEARBOX_CONTROL_0_REG",
+	  WAN_TOP_AE_GEARBOX_CONTROL_0_REG, },
+	{ "VOLTAGE_REGULATOR_DIVIDER_IDER_REG",
+	  WAN_TOP_VOLTAGE_REGULATOR_DIVIDER_IDER_REG, },
+	{ "CLOCK_SYNC_CONFIG_REG",
+	  WAN_TOP_CLOCK_SYNC_CONFIG_REG, },
+	{ "AEPCS_IEEE_REGID_REG",
+	  WAN_TOP_AEPCS_IEEE_REGID_REG, },
+	{ "FORCE_LBE_CONTROL_REG",
+	  WAN_TOP_FORCE_LBE_CONTROL_REG, },
+	{ "NGPON_GEARBOX_RX_CTL_0_REG",
+	  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_REG, },
+	{ "NGPON_GEARBOX_RX_CTL_1_REG",
+	  WAN_TOP_NGPON_GEARBOX_RX_CTL_1_REG, },
+	{ "NGPON_GEARBOX_RX_CTL_2_REG",
+	  WAN_TOP_NGPON_GEARBOX_RX_CTL_2_REG, },
+	{ "NGPON_GEARBOX_RX_CTL_3_REG",
+	  WAN_TOP_NGPON_GEARBOX_RX_CTL_3_REG, },
+	{ "NGPON_GEARBOX_TX_CTL_REG",
+	  WAN_TOP_NGPON_GEARBOX_TX_CTL_REG, },
+	{ "NGPON_GEARBOX_STATUS_REG",
+	  WAN_TOP_NGPON_GEARBOX_STATUS_REG, },
+	{ "EPON_10G_GEARBOX_REG",
+	  WAN_TOP_EPON_10G_GEARBOX_REG, },
+};
+
+const struct reg_dump_desc serdes_regs[] = {
+	{ "DSC_A_cdr_control_0", DSC_A_cdr_control_0 },
+	{ "DSC_A_cdr_control_1", DSC_A_cdr_control_1 },
+	{ "DSC_A_cdr_control_2", DSC_A_cdr_control_2 },
+	{ "DSC_A_rx_pi_control", DSC_A_rx_pi_control },
+	{ "DSC_A_cdr_status_integ_reg", DSC_A_cdr_status_integ_reg },
+	{ "DSC_A_cdr_status_phase_error", DSC_A_cdr_status_phase_error },
+	{ "DSC_A_rx_pi_cnt_bin_d", DSC_A_rx_pi_cnt_bin_d },
+	{ "DSC_A_rx_pi_cnt_bin_p", DSC_A_rx_pi_cnt_bin_p },
+	{ "DSC_A_rx_pi_cnt_bin_m", DSC_A_rx_pi_cnt_bin_m },
+	{ "DSC_A_rx_pi_diff_bin", DSC_A_rx_pi_diff_bin },
+	{ "DSC_A_trnsum_cntl_5", DSC_A_trnsum_cntl_5 },
+	{ "DSC_A_dsc_uc_ctrl", DSC_A_dsc_uc_ctrl },
+	{ "DSC_A_dsc_scratch", DSC_A_dsc_scratch },
+	{ "DSC_B_dsc_sm_ctrl_0", DSC_B_dsc_sm_ctrl_0 },
+	{ "DSC_B_dsc_sm_ctrl_1", DSC_B_dsc_sm_ctrl_1 },
+	{ "DSC_B_dsc_sm_ctrl_2", DSC_B_dsc_sm_ctrl_2 },
+	{ "DSC_B_dsc_sm_ctrl_3", DSC_B_dsc_sm_ctrl_3 },
+	{ "DSC_B_dsc_sm_ctrl_4", DSC_B_dsc_sm_ctrl_4 },
+	{ "DSC_B_dsc_sm_ctrl_5", DSC_B_dsc_sm_ctrl_5 },
+	{ "DSC_B_dsc_sm_ctrl_6", DSC_B_dsc_sm_ctrl_6 },
+	{ "DSC_B_dsc_sm_ctrl_7", DSC_B_dsc_sm_ctrl_7 },
+	{ "DSC_B_dsc_sm_ctrl_8", DSC_B_dsc_sm_ctrl_8 },
+	{ "DSC_B_dsc_sm_ctrl_9", DSC_B_dsc_sm_ctrl_9 },
+	{ "DSC_B_dsc_sm_status_dsc_lock", DSC_B_dsc_sm_status_dsc_lock },
+	{ "DSC_B_dsc_sm_status_dsc_state_one_hot", DSC_B_dsc_sm_status_dsc_state_one_hot },
+	{ "DSC_B_dsc_sm_status_dsc_state_eee_one_hot", DSC_B_dsc_sm_status_dsc_state_eee_one_hot },
+	{ "DSC_B_dsc_sm_status_restart", DSC_B_dsc_sm_status_restart },
+	{ "DSC_B_dsc_sm_status_dsc_state", DSC_B_dsc_sm_status_dsc_state },
+	{ "DSC_C_dfe_common_ctl", DSC_C_dfe_common_ctl },
+	{ "DSC_C_dfe_1_ctl", DSC_C_dfe_1_ctl },
+	{ "DSC_C_dfe_1_pat_ctl", DSC_C_dfe_1_pat_ctl },
+	{ "DSC_C_dfe_2_ctl", DSC_C_dfe_2_ctl },
+	{ "DSC_C_dfe_2_pat_ctl", DSC_C_dfe_2_pat_ctl },
+	{ "DSC_C_dfe_3_ctl", DSC_C_dfe_3_ctl },
+	{ "DSC_C_dfe_3_pat_ctl", DSC_C_dfe_3_pat_ctl },
+	{ "DSC_C_dfe_4_ctl", DSC_C_dfe_4_ctl },
+	{ "DSC_C_dfe_4_pat_ctl", DSC_C_dfe_4_pat_ctl },
+	{ "DSC_C_dfe_5_ctl", DSC_C_dfe_5_ctl },
+	{ "DSC_C_dfe_5_pat_ctl", DSC_C_dfe_5_pat_ctl },
+	{ "DSC_C_dfe_vga_override", DSC_C_dfe_vga_override },
+	{ "DSC_C_vga_ctl", DSC_C_vga_ctl },
+	{ "DSC_C_vga_pat_eyediag_ctl", DSC_C_vga_pat_eyediag_ctl },
+	{ "DSC_C_p1_frac_offs_ctl", DSC_C_p1_frac_offs_ctl },
+	{ "DSC_D_trnsum_ctl_1", DSC_D_trnsum_ctl_1 },
+	{ "DSC_D_trnsum_ctl_2", DSC_D_trnsum_ctl_2 },
+	{ "DSC_D_trnsum_ctl_3", DSC_D_trnsum_ctl_3 },
+	{ "DSC_D_trnsum_ctl_4", DSC_D_trnsum_ctl_4 },
+	{ "DSC_D_trnsum_sts_1", DSC_D_trnsum_sts_1 },
+	{ "DSC_D_trnsum_sts_2", DSC_D_trnsum_sts_2 },
+	{ "DSC_D_trnsum_sts_3", DSC_D_trnsum_sts_3 },
+	{ "DSC_D_trnsum_sts_4", DSC_D_trnsum_sts_4 },
+	{ "DSC_D_trnsum_sts_5", DSC_D_trnsum_sts_5 },
+	{ "DSC_D_trnsum_sts_6", DSC_D_trnsum_sts_6 },
+	{ "DSC_D_vga_p1eyediag_sts", DSC_D_vga_p1eyediag_sts },
+	{ "DSC_D_dfe_1_sts", DSC_D_dfe_1_sts },
+	{ "DSC_D_dfe_2_sts", DSC_D_dfe_2_sts },
+	{ "DSC_D_dfe_3_4_5_sts", DSC_D_dfe_3_4_5_sts },
+	{ "DSC_D_vga_tap_bin", DSC_D_vga_tap_bin },
+	{ "DSC_E_dsc_e_ctrl", DSC_E_dsc_e_ctrl },
+	{ "DSC_E_dsc_e_pf_ctrl", DSC_E_dsc_e_pf_ctrl },
+	{ "DSC_E_dsc_e_pf2_lowp_ctrl", DSC_E_dsc_e_pf2_lowp_ctrl },
+	{ "DSC_E_dsc_e_offset_adj_data_odd", DSC_E_dsc_e_offset_adj_data_odd },
+	{ "DSC_E_dsc_e_offset_adj_data_even", DSC_E_dsc_e_offset_adj_data_even },
+	{ "DSC_E_dsc_e_offset_adj_p1_odd", DSC_E_dsc_e_offset_adj_p1_odd },
+	{ "DSC_E_dsc_e_offset_adj_p1_even", DSC_E_dsc_e_offset_adj_p1_even },
+	{ "DSC_E_dsc_e_offset_adj_m1_odd", DSC_E_dsc_e_offset_adj_m1_odd },
+	{ "DSC_E_dsc_e_offset_adj_m1_even", DSC_E_dsc_e_offset_adj_m1_even },
+	{ "DSC_E_dsc_e_dc_offset", DSC_E_dsc_e_dc_offset },
+	{ "DSC_F_ONU10G_looptiming_ctrl_0", DSC_F_ONU10G_looptiming_ctrl_0 },
+	{ "TX_PI_LBE_tx_pi_control_0", TX_PI_LBE_tx_pi_control_0 },
+	{ "TX_PI_LBE_tx_pi_control_1", TX_PI_LBE_tx_pi_control_1 },
+	{ "TX_PI_LBE_tx_pi_control_2", TX_PI_LBE_tx_pi_control_2 },
+	{ "TX_PI_LBE_tx_pi_control_3", TX_PI_LBE_tx_pi_control_3 },
+	{ "TX_PI_LBE_tx_pi_control_4", TX_PI_LBE_tx_pi_control_4 },
+	{ "TX_PI_LBE_tx_pi_control_6", TX_PI_LBE_tx_pi_control_6 },
+	{ "TX_PI_LBE_tx_pi_status_0", TX_PI_LBE_tx_pi_status_0 },
+	{ "TX_PI_LBE_tx_pi_status_1", TX_PI_LBE_tx_pi_status_1 },
+	{ "TX_PI_LBE_tx_pi_status_2", TX_PI_LBE_tx_pi_status_2 },
+	{ "TX_PI_LBE_tx_pi_status_3", TX_PI_LBE_tx_pi_status_3 },
+	{ "TX_PI_LBE_tx_lbe_control_0", TX_PI_LBE_tx_lbe_control_0 },
+	{ "CKRST_CTRL_OSR_MODE_CONTROL", CKRST_CTRL_OSR_MODE_CONTROL },
+	{ "CKRST_CTRL_LANE_CLK_RESET_N_POWERDOWN_CONTROL", CKRST_CTRL_LANE_CLK_RESET_N_POWERDOWN_CONTROL },
+	{ "CKRST_CTRL_LANE_AFE_RESET_PWRDWN_CONTROL_CONTROL", CKRST_CTRL_LANE_AFE_RESET_PWRDWN_CONTROL_CONTROL },
+	{ "CKRST_CTRL_LANE_RESET_N_PWRDN_PIN_KILL_CONTROL", CKRST_CTRL_LANE_RESET_N_PWRDN_PIN_KILL_CONTROL },
+	{ "CKRST_CTRL_LANE_DEBUG_RESET_CONTROL", CKRST_CTRL_LANE_DEBUG_RESET_CONTROL },
+	{ "CKRST_CTRL_UC_ACK_LANE_CONTROL", CKRST_CTRL_UC_ACK_LANE_CONTROL },
+	{ "CKRST_CTRL_LANE_REG_RESET_OCCURRED_CONTROL", CKRST_CTRL_LANE_REG_RESET_OCCURRED_CONTROL },
+	{ "CKRST_CTRL_CLOCK_N_RESET_DEBUG_CONTROL", CKRST_CTRL_CLOCK_N_RESET_DEBUG_CONTROL },
+	{ "CKRST_CTRL_PMD_LANE_MODE_STATUS", CKRST_CTRL_PMD_LANE_MODE_STATUS },
+	{ "CKRST_CTRL_LANE_DP_RESET_STATE_STATUS", CKRST_CTRL_LANE_DP_RESET_STATE_STATUS },
+	{ "CKRST_CTRL_LN_MASK", CKRST_CTRL_LN_MASK },
+	{ "CKRST_CTRL_OSR_MODE_STATUS", CKRST_CTRL_OSR_MODE_STATUS },
+	{ "CKRST_CTRL_AFE_RESET_PWRDN_OSR_MODE_PIN_STATUS", CKRST_CTRL_AFE_RESET_PWRDN_OSR_MODE_PIN_STATUS },
+	{ "CKRST_CTRL_PLL_SELECT_CONTROL", CKRST_CTRL_PLL_SELECT_CONTROL },
+	{ "CKRST_CTRL_LN_S_RSTB_CONTROL", CKRST_CTRL_LN_S_RSTB_CONTROL },
+	{ "AMS_RX_RX_CONTROL_0", AMS_RX_RX_CONTROL_0 },
+	{ "AMS_RX_RX_CONTROL_1", AMS_RX_RX_CONTROL_1 },
+	{ "AMS_RX_RX_CONTROL_2", AMS_RX_RX_CONTROL_2 },
+	{ "AMS_RX_RX_CONTROL_3", AMS_RX_RX_CONTROL_3 },
+	{ "AMS_RX_RX_CONTROL_4", AMS_RX_RX_CONTROL_4 },
+	{ "AMS_RX_RX_INTCTRL", AMS_RX_RX_INTCTRL },
+	{ "AMS_RX_RX_STATUS", AMS_RX_RX_STATUS },
+	{ "AMS_TX_TX_CONTROL_0", AMS_TX_TX_CONTROL_0 },
+	{ "AMS_TX_TX_CONTROL_1", AMS_TX_TX_CONTROL_1 },
+	{ "AMS_TX_TX_CONTROL_2", AMS_TX_TX_CONTROL_2 },
+	{ "AMS_TX_TX_INTCTRL", AMS_TX_TX_INTCTRL },
+	{ "AMS_TX_TX_STATUS", AMS_TX_TX_STATUS },
+	{ "AMS_COM_PLL_CONTROL_0", AMS_COM_PLL_CONTROL_0 },
+	{ "AMS_COM_PLL_CONTROL_1", AMS_COM_PLL_CONTROL_1 },
+	{ "AMS_COM_PLL_CONTROL_2", AMS_COM_PLL_CONTROL_2 },
+	{ "AMS_COM_PLL_CONTROL_3", AMS_COM_PLL_CONTROL_3 },
+	{ "AMS_COM_PLL_CONTROL_4", AMS_COM_PLL_CONTROL_4 },
+	{ "AMS_COM_PLL_CONTROL_5", AMS_COM_PLL_CONTROL_5 },
+	{ "AMS_COM_PLL_CONTROL_6", AMS_COM_PLL_CONTROL_6 },
+	{ "AMS_COM_PLL_CONTROL_7", AMS_COM_PLL_CONTROL_7 },
+	{ "AMS_COM_PLL_CONTROL_8", AMS_COM_PLL_CONTROL_8 },
+	{ "AMS_COM_PLL_INTCTRL", AMS_COM_PLL_INTCTRL },
+	{ "AMS_COM_PLL_STATUS", AMS_COM_PLL_STATUS },
+	{ "SIGDET_SIGDET_CTRL_0", SIGDET_SIGDET_CTRL_0 },
+	{ "SIGDET_SIGDET_CTRL_1", SIGDET_SIGDET_CTRL_1 },
+	{ "SIGDET_SIGDET_CTRL_2", SIGDET_SIGDET_CTRL_2 },
+	{ "SIGDET_SIGDET_CTRL_3", SIGDET_SIGDET_CTRL_3 },
+	{ "SIGDET_SIGDET_STATUS_0", SIGDET_SIGDET_STATUS_0 },
+	{ "TLB_RX_prbs_chk_cnt_config", TLB_RX_prbs_chk_cnt_config },
+	{ "TLB_RX_prbs_chk_config", TLB_RX_prbs_chk_config },
+	{ "TLB_RX_dig_lpbk_config", TLB_RX_dig_lpbk_config },
+	{ "TLB_RX_tlb_rx_misc_config", TLB_RX_tlb_rx_misc_config },
+	{ "TLB_RX_prbs_chk_en_timer_control", TLB_RX_prbs_chk_en_timer_control },
+	{ "TLB_RX_dig_lpbk_pd_status", TLB_RX_dig_lpbk_pd_status },
+	{ "TLB_RX_prbs_chk_lock_status", TLB_RX_prbs_chk_lock_status },
+	{ "TLB_RX_prbs_chk_err_cnt_msb_status", TLB_RX_prbs_chk_err_cnt_msb_status },
+	{ "TLB_RX_prbs_chk_err_cnt_lsb_status", TLB_RX_prbs_chk_err_cnt_lsb_status },
+	{ "TLB_RX_pmd_rx_lock_status", TLB_RX_pmd_rx_lock_status },
+	{ "TLB_TX_patt_gen_config", TLB_TX_patt_gen_config },
+	{ "TLB_TX_prbs_gen_config", TLB_TX_prbs_gen_config },
+	{ "TLB_TX_rmt_lpbk_config", TLB_TX_rmt_lpbk_config },
+	{ "TLB_TX_tlb_tx_misc_config", TLB_TX_tlb_tx_misc_config },
+	{ "TLB_TX_tx_pi_loop_timing_config", TLB_TX_tx_pi_loop_timing_config },
+	{ "TLB_TX_rmt_lpbk_pd_status", TLB_TX_rmt_lpbk_pd_status },
+	{ "DIG_COM_REVID0", DIG_COM_REVID0 },
+	{ "DIG_COM_RESET_CONTROL_PMD", DIG_COM_RESET_CONTROL_PMD },
+	{ "DIG_COM_RESET_CONTROL_CORE_DP", DIG_COM_RESET_CONTROL_CORE_DP },
+	{ "DIG_COM_DEBUG_CONTROL", DIG_COM_DEBUG_CONTROL },
+	{ "DIG_COM_TOP_USER_CONTROL_0", DIG_COM_TOP_USER_CONTROL_0 },
+	{ "DIG_COM_CORE_REG_RESET_OCCURRED_CONTROL", DIG_COM_CORE_REG_RESET_OCCURRED_CONTROL },
+	{ "DIG_COM_RST_SEQ_TIMER_CONTROL", DIG_COM_RST_SEQ_TIMER_CONTROL },
+	{ "DIG_COM_CORE_DP_RESET_STATE_STATUS", DIG_COM_CORE_DP_RESET_STATE_STATUS },
+	{ "DIG_COM_REVID1", DIG_COM_REVID1 },
+	{ "DIG_COM_REVID2", DIG_COM_REVID2 },
+	{ "PATT_GEN_patt_gen_seq_0", PATT_GEN_patt_gen_seq_0 },
+	{ "PATT_GEN_patt_gen_seq_1", PATT_GEN_patt_gen_seq_1 },
+	{ "PATT_GEN_patt_gen_seq_2", PATT_GEN_patt_gen_seq_2 },
+	{ "PATT_GEN_patt_gen_seq_3", PATT_GEN_patt_gen_seq_3 },
+	{ "PATT_GEN_patt_gen_seq_4", PATT_GEN_patt_gen_seq_4 },
+	{ "PATT_GEN_patt_gen_seq_5", PATT_GEN_patt_gen_seq_5 },
+	{ "PATT_GEN_patt_gen_seq_6", PATT_GEN_patt_gen_seq_6 },
+	{ "PATT_GEN_patt_gen_seq_7", PATT_GEN_patt_gen_seq_7 },
+	{ "PATT_GEN_patt_gen_seq_8", PATT_GEN_patt_gen_seq_8 },
+	{ "PATT_GEN_patt_gen_seq_9", PATT_GEN_patt_gen_seq_9 },
+	{ "PATT_GEN_patt_gen_seq_10", PATT_GEN_patt_gen_seq_10 },
+	{ "PATT_GEN_patt_gen_seq_11", PATT_GEN_patt_gen_seq_11 },
+	{ "PATT_GEN_patt_gen_seq_12", PATT_GEN_patt_gen_seq_12 },
+	{ "PATT_GEN_patt_gen_seq_13", PATT_GEN_patt_gen_seq_13 },
+	{ "PATT_GEN_patt_gen_seq_14", PATT_GEN_patt_gen_seq_14 },
+	{ "TX_FED_txfir_control1", TX_FED_txfir_control1 },
+	{ "TX_FED_txfir_control2", TX_FED_txfir_control2 },
+	{ "TX_FED_txfir_control3", TX_FED_txfir_control3 },
+	{ "TX_FED_txfir_status1", TX_FED_txfir_status1 },
+	{ "TX_FED_txfir_status2", TX_FED_txfir_status2 },
+	{ "TX_FED_txfir_status3", TX_FED_txfir_status3 },
+	{ "TX_FED_txfir_status4", TX_FED_txfir_status4 },
+	{ "TX_FED_micro_control", TX_FED_micro_control },
+	{ "TX_FED_misc_control1", TX_FED_misc_control1 },
+	{ "TX_FED_txfir_control4", TX_FED_txfir_control4 },
+	{ "TX_FED_misc_status0", TX_FED_misc_status0 },
+	{ "PLL_CAL_COM_CTL_0", PLL_CAL_COM_CTL_0 },
+	{ "PLL_CAL_COM_CTL_1", PLL_CAL_COM_CTL_1 },
+	{ "PLL_CAL_COM_CTL_2", PLL_CAL_COM_CTL_2 },
+	{ "PLL_CAL_COM_CTL_3", PLL_CAL_COM_CTL_3 },
+	{ "PLL_CAL_COM_CTL_4", PLL_CAL_COM_CTL_4 },
+	{ "PLL_CAL_COM_CTL_5", PLL_CAL_COM_CTL_5 },
+	{ "PLL_CAL_COM_CTL_6", PLL_CAL_COM_CTL_6 },
+	{ "PLL_CAL_COM_CTL_7", PLL_CAL_COM_CTL_7 },
+	{ "PLL_CAL_COM_CTL_STATUS_0", PLL_CAL_COM_CTL_STATUS_0, false, false },
+	{ "PLL_CAL_COM_CTL_STATUS_1", PLL_CAL_COM_CTL_STATUS_1, false, false },
+	{ "TXCOM_CL72_tap_preset_control", TXCOM_CL72_tap_preset_control },
+	{ "TXCOM_CL72_debug_1_register", TXCOM_CL72_debug_1_register },
+	{ "CORE_PLL_COM_PMD_CORE_MODE_STATUS", CORE_PLL_COM_PMD_CORE_MODE_STATUS },
+	{ "CORE_PLL_COM_RESET_CONTROL_PLL_DP", CORE_PLL_COM_RESET_CONTROL_PLL_DP },
+	{ "CORE_PLL_COM_TOP_USER_CONTROL", CORE_PLL_COM_TOP_USER_CONTROL },
+	{ "CORE_PLL_COM_UC_ACK_CORE_CONTROL", CORE_PLL_COM_UC_ACK_CORE_CONTROL },
+	{ "CORE_PLL_COM_PLL_DP_RESET_STATE_STATUS", CORE_PLL_COM_PLL_DP_RESET_STATE_STATUS },
+	{ "CORE_PLL_COM_CORE_PLL_COM_STATUS_2", CORE_PLL_COM_CORE_PLL_COM_STATUS_2 },
+	{ "MICRO_A_ramword", MICRO_A_ramword },
+	{ "MICRO_A_address", MICRO_A_address },
+	{ "MICRO_A_command", MICRO_A_command },
+	{ "MICRO_A_ram_wrdata", MICRO_A_ram_wrdata },
+	{ "MICRO_A_ram_rddata", MICRO_A_ram_rddata },
+	{ "MICRO_A_download_status", MICRO_A_download_status },
+	{ "MICRO_A_sfr_status", MICRO_A_sfr_status },
+	{ "MICRO_A_mdio_uc_mailbox_msw", MICRO_A_mdio_uc_mailbox_msw },
+	{ "MICRO_A_mdio_uc_mailbox_lsw", MICRO_A_mdio_uc_mailbox_lsw },
+	{ "MICRO_A_uc_mdio_mailbox_lsw", MICRO_A_uc_mdio_mailbox_lsw },
+	{ "MICRO_A_command2", MICRO_A_command2 },
+	{ "MICRO_A_uc_mdio_mailbox_msw", MICRO_A_uc_mdio_mailbox_msw },
+	{ "MICRO_A_command3", MICRO_A_command3 },
+	{ "MICRO_A_command4", MICRO_A_command4 },
+	{ "MICRO_A_temperature_status", MICRO_A_temperature_status },
+	{ "MICRO_B_program_ram_control1", MICRO_B_program_ram_control1 },
+	{ "MICRO_B_dataram_control1", MICRO_B_dataram_control1 },
+	{ "MICRO_B_iram_control1", MICRO_B_iram_control1 },
+	{ "MDIO_MMDSEL_AER_COM_mdio_maskdata", MDIO_MMDSEL_AER_COM_mdio_maskdata },
+	{ "MDIO_MMDSEL_AER_COM_mdio_brcst_port_addr", MDIO_MMDSEL_AER_COM_mdio_brcst_port_addr },
+	{ "MDIO_MMDSEL_AER_COM_mdio_mmd_select", MDIO_MMDSEL_AER_COM_mdio_mmd_select },
+	{ "MDIO_MMDSEL_AER_COM_mdio_aer", MDIO_MMDSEL_AER_COM_mdio_aer },
+	{ "MDIO_BLK_ADDR_BLK_ADDR", MDIO_BLK_ADDR_BLK_ADDR },
+};
+
+/*
+ *
+ */
+static const struct reg_dump_desc *find_wan_top_reg(unsigned int reg)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(wan_top_regs); i++) {
+		if (reg == wan_top_regs[i].reg)
+			return &wan_top_regs[i];
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+static const struct reg_dump_desc *find_serdes_reg(unsigned int reg)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(serdes_regs); i++) {
+		if (reg == serdes_regs[i].reg)
+			return &serdes_regs[i];
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+static void dump_wan_top_regs(struct xport_priv *port)
+{
+	size_t i;
+	for (i = 0; i < ARRAY_SIZE(wan_top_regs); i++) {
+		printk("%30s: %08x\n",
+		       wan_top_regs[i].name,
+		       wan_top_readl(port, wan_top_regs[i].reg));
+	}
+}
+#endif
+
+/*
+ *
+ */
+static __maybe_unused const char *get_serdes_lane(unsigned int lane)
+{
+	static char buf[64];
+
+	buf[0] = 0;
+	if (lane & serdes_PLL_1)
+		strcat(buf, "PLL_1/");
+	else
+		strcat(buf, "PLL_0/");
+
+	switch ((lane & LANE_BRDCST)) {
+	case LANE_0:
+		strcat(buf, "L0");
+		break;
+	case LANE_1:
+		strcat(buf, "L1");
+		break;
+	case LANE_2:
+		strcat(buf, "L2");
+		break;
+	case LANE_3:
+		strcat(buf, "L3");
+		break;
+	case LANE_BRDCST:
+		strcat(buf, "LBCAST");
+		break;
+	default:
+		strcat(buf, "LUNKN");
+		break;
+	}
+	return buf;
+}
+
+/*
+ *
+ */
+static u32 __wan_top_readl(struct xport_priv *port, u32 reg)
+{
+	BUG_ON(reg >= port->regs_size[0]);
+	return readl(port->regs[0] + reg);
+}
+
+/*
+ *
+ */
+static u32 wan_top_readl(struct xport_priv *port, u32 reg)
+{
+#ifdef DUMP_SERDES_IO
+	const struct reg_dump_desc *d;
+#endif
+	u32 val;
+
+#ifdef DUMP_SERDES_IO
+	d = find_wan_top_reg(reg);
+	BUG_ON(!d);
+#endif
+
+	BUG_ON(reg >= port->regs_size[0]);
+	val = readl(port->regs[0] + reg);
+#ifdef DUMP_SERDES_IO
+	if (!d->skip_dump)
+		printk("wan_top_readl: %s => %x\n", d->name, val);
+#endif
+	return val;
+}
+
+/*
+ *
+ */
+static void wan_top_writel(struct xport_priv *port, u32 reg, u32 val)
+{
+	u32 reread;
+#ifdef DUMP_SERDES_IO
+	u32 before;
+	const struct reg_dump_desc *d;
+#endif
+
+	BUG_ON(reg >= port->regs_size[0]);
+
+#ifdef DUMP_SERDES_IO
+	d = find_wan_top_reg(reg);
+	BUG_ON(!d);
+	before = __wan_top_readl(port, reg);
+#endif
+	writel(val, port->regs[0] + reg);
+
+#ifdef DUMP_SERDES_IO
+	if (!d->skip_dump)
+		printk("wan_top_write: %s <= %x (prev %x)\n",
+		       d->name, val, before);
+#endif
+
+	reread = __wan_top_readl(port, reg);
+	if (reread != val)
+		WARN(1, "failed to reread from reg:%u %08x != %08x\n",
+		     reg, val, reread);
+}
+
+/*
+ *
+ */
+static void wan_top_clear(struct xport_priv *port, u32 reg, u32 mask)
+{
+	u32 val;
+
+	val = wan_top_readl(port, reg);
+	val &= ~mask;
+	wan_top_writel(port, reg, val);
+}
+
+/*
+ *
+ */
+static void wan_top_set(struct xport_priv *port, u32 reg, u32 mask)
+{
+	u32 val;
+
+	val = wan_top_readl(port, reg);
+	val |= mask;
+	wan_top_writel(port, reg, val);
+}
+
+/*
+ *
+ */
+static int serdes_op_wait(struct xport_priv *port,
+			  u32 status_reg, u32 *ret_val)
+{
+	size_t i;
+
+	for (i = 0; i < 1000; i++) {
+		u32 val;
+
+		val = wan_top_readl(port, status_reg);
+		if (!(val & WAN_TOP_PMI_LP_3_PMI_LP_ACK_MASK)) {
+			udelay(1);
+			continue;
+		}
+
+		if (val & WAN_TOP_PMI_LP_3_PMI_LP_ERR_MASK)
+			return 2;
+
+		*ret_val = val & WAN_TOP_PMI_LP_3_PMI_LP_RDDATA_MASK;
+		return 0;
+	}
+
+	return 1;
+}
+
+/*
+ *
+ */
+static int __serdes_read_reg(struct xport_priv *port,
+			     u16 lane, u16 address,
+			     bool is_misc,
+			     bool no_dump)
+{
+	u32 val, status_reg;
+	int ret, wait_ret;
+#ifdef DUMP_SERDES_IO
+	const struct reg_dump_desc *d;
+	static u32 last_reg;
+
+	d = find_serdes_reg(address);
+	BUG_ON(!d);
+#endif
+
+	if (is_misc)
+		lane |= DEVID_1;
+	else
+		lane |= DEVID_0;
+	val = (lane << 16) | address;
+	wan_top_writel(port, WAN_TOP_PMI_LP_1_REG, val);
+
+	if (is_misc)
+		val = WAN_TOP_PMI_LP_0_MISC_EN_MASK;
+	else
+		val = WAN_TOP_PMI_LP_0_PCS_EN_MASK;
+	wan_top_writel(port, WAN_TOP_PMI_LP_0_REG, val);
+
+	if (is_misc)
+		status_reg = WAN_TOP_PMI_LP_3_REG;
+	else
+		status_reg = WAN_TOP_PMI_LP_4_REG;
+
+	wait_ret = serdes_op_wait(port, status_reg, &ret);
+	wan_top_writel(port, WAN_TOP_PMI_LP_0_REG, 0);
+	udelay(5);
+
+	if (wait_ret) {
+		/* FIXME */
+		/* netdev_err(port->priv->netdev, */
+		/* 	   "serdes reg read failed: " */
+		/* 	   "ret:%d tgt:%s lane:%x address:%x\n", */
+		/* 	   wait_ret, */
+		/* 	   is_misc ? "misc" : "pcs", */
+		/* 	   lane, address); */
+		return -1;
+	}
+
+#ifdef DUMP_SERDES_IO
+	if (!no_dump && !d->skip_dump) {
+		if (!d->skip_consec ||
+		    last_reg != ((lane << 16) | address)) {
+			/* printk("serdes_readl: %s %s <= %04x\n", */
+			/*        get_serdes_lane(lane), d->name, ret); */
+			printk("serdes_readl: %04x@%04x <= %04x\n",
+			       lane, address, ret);
+		}
+		last_reg = (lane << 16) | address;
+	}
+#endif
+
+	return ret;
+}
+
+/*
+ *
+ */
+static int __serdes_write_reg(struct xport_priv *port,
+			      u16 lane, u16 address,
+			      u16 wrdata, u16 mask,
+			      bool is_misc)
+{
+	u32 val, status_reg;
+	u16 nmask = ~mask;
+	int ret, wait_ret;
+#ifdef DUMP_SERDES_IO
+	const struct reg_dump_desc *d;
+	int old, reread;
+#endif
+
+	if (is_misc)
+		lane |= DEVID_1;
+	else
+		lane |= DEVID_0;
+
+#ifdef DUMP_SERDES_IO
+	d = find_serdes_reg(address);
+	BUG_ON(!d);
+	old = __serdes_read_reg(port, lane, address, is_misc, true);
+#endif
+
+	val = (lane << 16) | address;
+	wan_top_writel(port, WAN_TOP_PMI_LP_1_REG, val);
+
+	val = (wrdata << WAN_TOP_PMI_LP_2_PMI_LP_WRDATA_SHIFT);
+	val |= (nmask << WAN_TOP_PMI_LP_2_PMI_LP_MASKDATA_SHIFT);
+	wan_top_writel(port, WAN_TOP_PMI_LP_2_REG, val);
+
+	if (is_misc)
+		val = WAN_TOP_PMI_LP_0_MISC_EN_MASK;
+	else
+		val = WAN_TOP_PMI_LP_0_PCS_EN_MASK;
+	val |= WAN_TOP_PMI_LP_0_WRITE_MASK;
+	wan_top_writel(port, WAN_TOP_PMI_LP_0_REG, val);
+	udelay(5);
+
+	if (is_misc)
+		status_reg = WAN_TOP_PMI_LP_3_REG;
+	else
+		status_reg = WAN_TOP_PMI_LP_4_REG;
+
+	wait_ret = serdes_op_wait(port, status_reg, &ret);
+	wan_top_writel(port, WAN_TOP_PMI_LP_0_REG, 0);
+	udelay(5);
+
+	if (wait_ret) {
+		netdev_err(port->priv->netdev,
+			   "serdes reg write failed: ret:%d tgt:%s lane:%x "
+			   "address:%x val/mask:%04x/%04x\n",
+			   wait_ret, is_misc ? "misc" : "pcs",
+			   lane, address, wrdata, mask);
+		return -1;
+	}
+
+#ifdef DUMP_SERDES_IO
+	reread = __serdes_read_reg(port, lane, address, is_misc, true);
+	if (!d->skip_dump) {
+		/* printk("serdes_write: %s %s <= %04x / %04x\n", */
+		/*        get_serdes_lane(lane), d->name, wrdata, mask); */
+		printk("serdes_write: %04x@%04x (%04x/%04x): %04x -> %04x\n",
+		       lane, address, wrdata, mask, old, reread);
+	}
+#endif
+	return 0;
+}
+
+/*
+ *
+ */
+static int serdes_misc_read_reg(struct xport_priv *port,
+				u16 lane, u16 address)
+{
+	return __serdes_read_reg(port, lane, address, true, false);
+}
+
+/*
+ *
+ */
+static int serdes_misc_check_reg(struct xport_priv *port,
+				 u16 lane, u16 address, u16 exp_value)
+{
+	int ret;
+
+	ret = serdes_misc_read_reg(port, lane, address);
+	if (ret < 0)
+		return ret;
+
+	if ((u16)ret != exp_value) {
+		WARN(1, "serdes check reg failed: address:%04x %04x != %08x\n",
+		     address, ret, exp_value);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ *
+ */
+static int serdes_misc_write_reg(struct xport_priv *port,
+				 u16 lane, u16 address,
+				 u16 wrdata, u16 mask)
+{
+	return __serdes_write_reg(port, lane, address, wrdata, mask, true);
+}
+
+/*
+ *
+ */
+static int serdes_misc_write_check_reg(struct xport_priv *port,
+				       u16 lane, u16 address,
+				       u16 wrdata, u16 mask,
+				       u16 exp_value)
+{
+	int ret;
+
+	ret = __serdes_write_reg(port, lane, address, wrdata, mask, true);
+	if (ret < 0)
+		return ret;
+	return serdes_misc_check_reg(port, lane, address, exp_value);
+}
+
+/*
+ *
+ */
+int xport_serdes_pcs_read_reg(struct xport_priv *port,
+			      u16 lane, u16 address)
+{
+	return __serdes_read_reg(port, lane, address, false, false);
+}
+
+/*
+ *
+ */
+int xport_serdes_pcs_write_reg(struct xport_priv *port,
+			       u16 lane, u16 address,
+			       u16 wrdata, u16 mask)
+{
+	return __serdes_write_reg(port, lane, address, wrdata, mask, false);
+}
+
+/*
+ *
+ */
+static int serdes_poll_pll_lock(struct xport_priv *port,
+				unsigned int pll_id)
+{
+	int ret;
+	size_t i;
+
+	for (i = 0; i < 1000; i++) {
+		ret = serdes_misc_read_reg(port, pll_id,
+					   PLL_CAL_COM_CTL_STATUS_0);
+		if (ret < 0)
+			return 1;
+
+		if (ret & 0x0100)
+			return 0;
+
+		udelay(1);
+	}
+
+	netdev_err(port->priv->netdev, "PLL %d lock timeout\n", pll_id);
+	return 1;
+}
+
+/*
+ *
+ */
+static int serdes_poll_dsc_lock(struct xport_priv *port)
+
+{
+	int ret;
+	size_t i;
+
+	for (i = 0; i < 10000; i++) {
+		ret = serdes_misc_read_reg(port, LANE_0,
+					   DSC_B_dsc_sm_status_dsc_lock);
+		if (ret < 0)
+			return 1;
+
+		if (ret & 0x1)
+			return 0;
+
+		udelay(1);
+	}
+
+	netdev_err(port->priv->netdev, "DSC lock timeout\n");
+	return 1;
+}
+
+/*
+ *
+ */
+static int serdes_poll_pmd_rx_lock(struct xport_priv *port)
+
+{
+	int ret;
+	size_t i;
+
+	for (i = 0; i < 10000; i++) {
+		ret = serdes_misc_read_reg(port, LANE_0,
+					   TLB_RX_pmd_rx_lock_status);
+		if (ret < 0)
+			return 1;
+
+		if (ret & 0x1)
+			return 0;
+
+		udelay(1);
+	}
+
+	netdev_err(port->priv->netdev, "PMD rx lock timeout\n");
+	return 1;
+}
+
+/*
+ *
+ */
+static int wan_top_poll_pll_lock(struct xport_priv *port,
+				 unsigned int pll_id)
+{
+	u32 val;
+	size_t i;
+
+	for (i = 0; i < 10000; i++) {
+		u32 mask;
+
+		val = wan_top_readl(port, WAN_TOP_SERDES_STATUS_REG);
+
+		if (pll_id == serdes_PLL_0)
+			mask = WAN_TOP_SERDES_STATUS_PMD_PLL0_LOCK_MASK;
+		else
+			mask = WAN_TOP_SERDES_STATUS_PMD_PLL1_LOCK_MASK;
+
+		if (val & mask)
+			return 0;
+
+		udelay(1);
+	}
+
+	netdev_err(port->priv->netdev, "WAN TOP PLL %d lock timeout\n",
+		   pll_id);
+	return 1;
+}
+
+/*
+ *
+ */
+static void __serdes_lbe_op(struct xport_priv *port,
+			    bool force, bool forced_value)
+{
+	u32 val;
+
+	val = wan_top_readl(port, WAN_TOP_FORCE_LBE_CONTROL_REG);
+
+	if (!force) {
+		val &= ~WAN_TOP_FORCE_LBE_CONTROL_OE_MASK;
+		val &= ~WAN_TOP_FORCE_LBE_CONTROL_OE_VALUE_MASK;
+		val &= ~WAN_TOP_FORCE_LBE_CONTROL_VALUE_MASK;
+		val &= ~WAN_TOP_FORCE_LBE_CONTROL_MASK;
+	} else {
+		val |= WAN_TOP_FORCE_LBE_CONTROL_OE_VALUE_MASK;
+		val |= WAN_TOP_FORCE_LBE_CONTROL_OE_MASK;
+		val |= WAN_TOP_FORCE_LBE_CONTROL_MASK;
+
+		if (!forced_value)
+			val &= ~WAN_TOP_FORCE_LBE_CONTROL_VALUE_MASK;
+		else
+			val |= WAN_TOP_FORCE_LBE_CONTROL_VALUE_MASK;
+	}
+
+	wan_top_writel(port, WAN_TOP_FORCE_LBE_CONTROL_REG, val);
+}
+
+/*
+ *
+ */
+void xport_serdes_lbe_force_enable(struct xport_priv *port)
+{
+	__serdes_lbe_op(port, true, true);
+}
+
+/*
+ *
+ */
+void xport_serdes_lbe_force_disable(struct xport_priv *port)
+{
+	__serdes_lbe_op(port, true, false);
+}
+
+/*
+ *
+ */
+void xport_serdes_lbe_dont_force(struct xport_priv *port)
+{
+	__serdes_lbe_op(port, false, false);
+}
+
+/*
+ *
+ */
+void xport_serdes_lbe_get_forced_state(struct xport_priv *port,
+				       bool *force, bool *forced_value)
+{
+	u32 val;
+
+	val = wan_top_readl(port, WAN_TOP_FORCE_LBE_CONTROL_REG);
+	*force = (val & WAN_TOP_FORCE_LBE_CONTROL_OE_VALUE_MASK);
+	*forced_value = (val & WAN_TOP_FORCE_LBE_CONTROL_VALUE_MASK);
+}
+
+/*
+ *
+ */
+static void set_clk90_offset(struct xport_priv *port,
+			     uint8_t desired_m1_d_offset)
+{
+	u8 steps, count, d_location, now_m1_location, next_m1_location;
+	u16 rd_serdes;
+
+	desired_m1_d_offset = desired_m1_d_offset % 64;
+	rd_serdes = serdes_misc_read_reg(port, LANE_0, DSC_A_rx_pi_cnt_bin_m);
+	now_m1_location = (rd_serdes & 0x7f);
+	d_location = (rd_serdes & 0x7f00) >> 8;
+	next_m1_location = d_location + desired_m1_d_offset;
+
+	/* calculate number of movement steps and direction */
+	if (next_m1_location >= now_m1_location) {
+		steps = next_m1_location - now_m1_location;
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x4401,
+				      0x74ff);
+	} else {
+		steps = now_m1_location - next_m1_location;
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x4001,
+				      0x74ff);
+	}
+
+	/* move the Slicer(P) the required steps  */
+	for (count = 0; count < steps; count++)
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x0200,
+				      0x0200);
+
+	rd_serdes = serdes_misc_read_reg(port, LANE_0, DSC_A_rx_pi_cnt_bin_m);
+#if 0
+	d_location       = (rd_serdes & 0x7f00) >> 8;
+	now_m1_location  = (rd_serdes & 0x7f);
+	printk("0xd009 = 0x%04x\n", rd_serdes);
+	printk("d_location = %d\n", d_location);
+	printk("now_m1_location = %d\n", now_m1_location);
+	printk("Done Slicer-M1-Adjustment\n");
+#endif
+}
+
+/*
+ *
+ */
+static void set_clkp1_offset(struct xport_priv *port,
+			     u8 desired_p_d_offset)
+{
+	u8 step, count, d_location, now_p_location, next_p_location;
+	u16 rd_serdes;
+
+	desired_p_d_offset = desired_p_d_offset % 64;
+	rd_serdes = serdes_misc_read_reg(port, LANE_0,
+					 DSC_A_rx_pi_cnt_bin_d);
+	d_location = (rd_serdes & 0x7f);
+	now_p_location = (rd_serdes & 0x7f00) >> 8;
+	next_p_location = d_location + desired_p_d_offset;
+
+	/* calculate number of movement steps and direction */
+	if (next_p_location >= now_p_location) {
+		step = next_p_location - now_p_location;
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x2401,
+				      0x74ff);
+ 	} else {
+		step = now_p_location - next_p_location;
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x2001,
+				      0x74ff);
+ 	}
+
+	/* move the Slicer(P) the required steps */
+	for (count = 0; count < step; count++)
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_A_rx_pi_control,
+				      0x0200,
+				      0x0200);
+
+	rd_serdes = serdes_misc_read_reg(port, LANE_0, DSC_A_rx_pi_cnt_bin_d);
+#if 0
+	d_location = (rd_serdes & 0x7f);
+	now_p_location = (rd_serdes & 0x7f00) >> 8;
+	printk("0xd007 = 0x%04x\n", rd_serdes);
+	printk("d_location = %d\n", d_location);
+	printk("now_p_location = %d\n", now_p_location);
+	printk("Done Slicer-P-Adjustment\n");
+#endif
+}
+
+/*
+ *
+ */
+static void rx_pi_spacing(struct xport_priv *port,
+			  u8 desired_m1_d_offset,
+			  u8 desired_p_d_offset)
+{
+	serdes_misc_write_reg(port, LANE_0,
+			      DSC_A_rx_pi_control,
+			      0x0800,
+			      0x0800);
+	set_clk90_offset(port, desired_m1_d_offset);
+	set_clkp1_offset(port, desired_p_d_offset);
+	serdes_misc_write_reg(port, LANE_0,
+			      DSC_A_rx_pi_control,
+			      0x0000,
+			      0x7800);
+}
+
+/*
+ *
+ */
+static void xport_serdes_reset_ctl(struct xport_priv *port,
+				   bool do_assert)
+{
+	u32 val;
+
+	/* assert & deassert all reset (from bcm code) */
+	val = WAN_TOP_MISC_2_PMD_POR_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_CORE_1_DP_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_LN_DP_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_LN_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_CORE_0_DP_H_RSTB_MASK;
+	if (do_assert)
+		wan_top_writel(port, WAN_TOP_MISC_2_REG, 0);
+	else
+		wan_top_writel(port, WAN_TOP_MISC_2_REG, val);
+}
+
+/*
+ *
+ */
+int xport_serdes_set_params(struct xport_priv *port,
+			    const struct serdes_params *params,
+			    const struct serdes_misc3_params *m3params)
+{
+	u32 val;
+	u16 wr_data, wr_mask;
+
+	/*
+	 * init WAN TOP block
+	 */
+	xport_serdes_reset_ctl(port, true);
+	udelay(10);
+	xport_serdes_reset_ctl(port, false);
+	udelay(10);
+
+	/* rescal reset */
+	/* FIXME: first two lines from xgae driver only */
+	/* val = WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_PWRDN_MASK; */
+	/* wan_top_writel(port, WAN_TOP_RESCAL_AL_CFG_REG, val); */
+	val = WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_RSTB_MASK;
+	wan_top_writel(port, WAN_TOP_RESCAL_AL_CFG_REG, val);
+	udelay(100);
+	val = wan_top_readl(port, WAN_TOP_RESCAL_STATUS_0_REG);
+	if (!(val & WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_DONE_MASK)) {
+		netdev_err(port->priv->netdev, "rescal status is not done\n");
+		return 1;
+	}
+	if (!(val & WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_VALID_MASK)) {
+		netdev_err(port->priv->netdev, "rescal status is not valid\n");
+		return 1;
+	}
+
+	/* make sure we reset forced state of LBE */
+	xport_serdes_lbe_dont_force(port);
+
+	/* test scratch registers */
+	wan_top_writel(port, WAN_TOP_SCRATCH_REG, 0x01234567);
+	wan_top_writel(port, WAN_TOP_SCRATCH_REG, 0x89abcdef);
+
+	/* assert all resets */
+	wan_top_writel(port, WAN_TOP_MISC_2_REG, 0);
+	udelay(5);
+
+	/* reset all remaining registers (xgae driver) */
+	wan_top_writel(port, WAN_TOP_MISC_0_REG, 0);
+	wan_top_writel(port, WAN_TOP_MISC_1_REG, 0);
+	wan_top_writel(port, WAN_TOP_MISC_2_REG, 0);
+	wan_top_writel(port, WAN_TOP_MISC_3_REG, 0);
+	wan_top_writel(port, WAN_TOP_SERDES_PLL_CTL_REG, 0);
+	wan_top_writel(port, WAN_TOP_SERDES_PRAM_CTL_REG, 0);
+	wan_top_writel(port, WAN_TOP_OSR_CONTROL_REG, 0);
+	udelay(5);
+
+	/* set correct refclk (LCREF 50Mhz refclock)  */
+	val = WAN_TOP_SERDES_PLL_CTL_CFG_PLL0_REFIN_EN_MASK |
+		WAN_TOP_SERDES_PLL_CTL_CFG_PLL1_REFIN_EN_MASK |
+		WAN_TOP_SERDES_PLL_CTL_CFG_PLL0_LCREF_SEL_MASK;
+	wan_top_writel(port, WAN_TOP_SERDES_PLL_CTL_REG, val);
+
+	/* wait for clock to be stable */
+	mdelay(10);
+
+	/* de-assert resets */
+	val = WAN_TOP_MISC_2_PMD_POR_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_CORE_1_DP_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_LN_DP_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_LN_H_RSTB_MASK |
+		WAN_TOP_MISC_2_PMD_CORE_0_DP_H_RSTB_MASK;
+	wan_top_writel(port, WAN_TOP_MISC_2_REG, val);
+
+	/* disable laser while we change config */
+	wan_top_clear(port, WAN_TOP_MISC_3_REG, WAN_TOP_MISC_3_LASER_OE_MASK);
+
+	/* set oversample mode */
+	val = wan_top_readl(port, WAN_TOP_MISC_2_REG);
+	val &= ~WAN_TOP_MISC_2_PMD_RX_OSR_MODE_MASK;
+	val &= ~WAN_TOP_MISC_2_PMD_TX_OSR_MODE_MASK;
+	val |= params->rx_osr_mode << WAN_TOP_MISC_2_PMD_RX_OSR_MODE_SHIFT;
+	val |= params->tx_osr_mode << WAN_TOP_MISC_2_PMD_TX_OSR_MODE_SHIFT;
+	/* FIXME: this is AE_25 in PON driver, dunno what it is*/
+//	val |= (1 << 24);
+	wan_top_writel(port, WAN_TOP_MISC_2_REG, val);
+
+	/* set correct interface */
+	val = wan_top_readl(port, WAN_TOP_MISC_3_REG);
+	val |= m3params->misc3_if_select << WAN_TOP_MISC_3_WAN_IFSELECT_SHIFT;
+	val |= m3params->misc3_laser_mode << WAN_TOP_MISC_3_LASER_MODE_SHIFT;
+	if (m3params->misc3_sgmii)
+		val |= WAN_TOP_MISC_3_SGMII_MODE_MASK;
+	wan_top_writel(port, WAN_TOP_MISC_3_REG, val);
+
+	/* set epon gearbox configuration */
+	if (!params->do_ae) {
+		val = wan_top_readl(port, WAN_TOP_EPON_10G_GEARBOX_REG);
+		val |= WAN_TOP_EPON_10G_GEARBOX_RX_CGEN_RSTN_MASK;
+		val |= WAN_TOP_EPON_10G_GEARBOX_TX_CGEN_RSTN_MASK;
+		val |= WAN_TOP_EPON_10G_GEARBOX_RX_GBOX_RSTN_MASK;
+		val |= WAN_TOP_EPON_10G_GEARBOX_TX_GBOX_RSTN_MASK;
+		val |= WAN_TOP_EPON_10G_GEARBOX_CLK_EN_MASK;
+		wan_top_writel(port, WAN_TOP_EPON_10G_GEARBOX_REG, val);
+		udelay(10);
+	}
+
+	/* set OSR gearbox */
+	val = wan_top_readl(port, WAN_TOP_OSR_CONTROL_REG);
+	val &= ~WAN_TOP_OSR_CFG_GPON_RX_CLK_MASK;
+	val |= (2 << WAN_TOP_OSR_CFG_GPON_RX_CLK_SHIFT);
+	wan_top_writel(port, WAN_TOP_OSR_CONTROL_REG, val);
+
+	/*
+	 * now init serdes
+	 */
+
+	/* sanity check */
+	serdes_misc_write_check_reg(port, LANE_BRDCST, DIG_COM_REVID0,
+				    0x0000, 0xffff, 0x42e5);
+	serdes_misc_write_check_reg(port, LANE_BRDCST, DIG_COM_REVID1,
+				    0x0000, 0xffff, 0x1034);
+	serdes_misc_write_check_reg(port, LANE_BRDCST, DIG_COM_REVID2,
+				    0x0000, 0xffff, 0x0000);
+	serdes_misc_write_check_reg(port, LANE_BRDCST,
+				    MDIO_MMDSEL_AER_COM_mdio_maskdata,
+				    0x0000, 0xffff, 0x0000);
+	serdes_misc_write_check_reg(port, LANE_BRDCST,
+				    MDIO_MMDSEL_AER_COM_mdio_maskdata,
+				    0xaaaa, 0xffff, 0xaaaa);
+	serdes_misc_write_check_reg(port, LANE_BRDCST,
+				    MDIO_MMDSEL_AER_COM_mdio_maskdata,
+				    0x5555, 0xffff, 0x5555);
+
+	/* make sure PLL0 is used for tx and PLL1 for rx in dual pll
+	 * mode */
+	BUG_ON(params->tx_pll_id > params->rx_pll_id);
+	BUG_ON(params->tx_pll_id == params->rx_pll_id &&
+	       params->tx_pll_id != serdes_PLL_0);
+
+	/* power up needed PLL */
+	serdes_misc_write_reg(port, LANE_0 | params->tx_pll_id,
+			      AMS_COM_PLL_INTCTRL,
+			      0,
+			      (1 << 2));
+
+	if (params->rx_pll_id != params->tx_pll_id) {
+		serdes_misc_write_reg(port, LANE_0 | params->rx_pll_id,
+				      AMS_COM_PLL_INTCTRL,
+				      0,
+				      (1 << 2));
+	} else {
+		/* make sure second pll is disabled */
+		serdes_misc_write_reg(port, LANE_0 | serdes_PLL_1,
+				      AMS_COM_PLL_INTCTRL,
+				      (1 << 2),
+				      (1 << 2));
+	}
+
+	/* select PLL for tx clock (0 => PLL0, 1 => PLL1)  */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      CKRST_CTRL_PLL_SELECT_CONTROL,
+			      (params->tx_pll_id == serdes_PLL_1) ? (1 << 0) : 0,
+			      (1 << 0));
+
+	/* select PLL for rx clock (0 => PLL0, 1 => PLL1)  */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      CKRST_CTRL_PLL_SELECT_CONTROL,
+			      (params->rx_pll_id == serdes_PLL_1) ? (1 << 1) : 0,
+			      (1 << 1));
+
+	// TX AMS_COM_PLL_CONTROL_1
+	wr_data = (((params->tx_pll_vco_div4 << 7) & (0x0001 << 7)) |
+		   ((params->tx_pll_vco_div2 << 6) & (0x0001 << 6)));
+	wr_mask = (0x0001 << 7) |
+		(0x0001 << 6);
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      AMS_COM_PLL_CONTROL_1,
+			      wr_data,
+			      wr_mask);
+
+	// TX AMS_COM_PLL_CONTROL_4
+	wr_data = (((params->tx_pll_force_kvh_bw << 14) & (0x0001 << 14)) |
+		   ((params->tx_pll_kvh_force << 12) & (0x0003 << 12)) |
+		   ((params->tx_pll_2rx_bw << 8) & (0x0003 << 8)));
+	wr_mask = (0x0001 << 14) |
+		(0x0003 << 12) |
+		(0x0003 << 8);
+	serdes_misc_write_reg(port,
+			      params->tx_pll_id,
+			      AMS_COM_PLL_CONTROL_4,
+			      wr_data,
+			      wr_mask);
+
+	// TX AMS_COM_PLL_CONTROL_7
+	wr_data = (((params->tx_pll_fracn_div & 0xffff) << 0) & (0xffff << 0));
+	wr_mask = (0xffff << 0);
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      AMS_COM_PLL_CONTROL_7,
+			      wr_data,
+			      wr_mask);
+
+	// TX AMS_COM_PLL_CONTROL_8
+	wr_data = (((params->tx_pll_fracn_sel << 15) & (0x0001 << 15)) |
+		   ((params->tx_pll_ditheren << 14) & (0x0001 << 14)) |
+		   ((params->tx_pll_fracn_ndiv << 4) & (0x03ff << 4)) |
+		   ((((params->tx_pll_fracn_div & 0x30000) >> 16) << 0) & (0x0003 << 0)));
+	wr_mask = (0x0001 << 15) |
+		(0x0001 << 14) |
+		(0x03ff << 4) |
+		(0x0003 << 0);
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      AMS_COM_PLL_CONTROL_8,
+			      wr_data,
+			      wr_mask);
+
+	// TX PLL_CAL_COM_CTL_7
+	wr_data = ((params->tx_pll_mode << 0) & (0x000f << 0));
+	wr_mask = (0x000f << 0);
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      PLL_CAL_COM_CTL_7,
+			      wr_data,
+			      wr_mask);
+
+	if (params->rx_pll_id != params->tx_pll_id) {
+		// RX AMS_COM_PLL_CONTROL_1
+		wr_data = (((params->rx_pll_vco_div4 << 7) & (0x0001 << 7)) |
+			   ((params->rx_pll_vco_div2 << 6) & (0x0001 << 6)));
+		wr_mask = (0x0001 << 7) |
+			(0x0001 << 6);
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      AMS_COM_PLL_CONTROL_1,
+				      wr_data,
+				      wr_mask);
+
+		// RX AMS_COM_PLL_CONTROL_4
+		wr_data = (((params->rx_pll_force_kvh_bw << 14) & (0x0001 << 14)) |
+			   ((params->rx_pll_kvh_force << 12) & (0x0003 << 12)) |
+			   ((params->rx_pll_2rx_bw << 8) & (0x0003 << 8)));
+		wr_mask = (0x0001 << 14) |
+			(0x0003 << 12) |
+			(0x0003 << 8);
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      AMS_COM_PLL_CONTROL_4,
+				      wr_data,
+				      wr_mask);
+
+		// RX AMS_COM_PLL_CONTROL_7
+		wr_data = (((params->rx_pll_fracn_div & 0xffff) << 0) & (0xffff << 0));
+		wr_mask = (0xffff << 0);
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      AMS_COM_PLL_CONTROL_7,
+				      wr_data,
+				      wr_mask);
+
+		// RX AMS_COM_PLL_CONTROL_8
+		wr_data = (((params->rx_pll_fracn_sel << 15) & (0x0001 << 15)) |
+			   ((params->rx_pll_ditheren << 14) & (0x0001 << 14)) |
+			   ((params->rx_pll_fracn_ndiv << 4) & (0x03ff << 4)) |
+			   ((((params->rx_pll_fracn_div & 0x30000) >> 16) << 0) & (0x0003 << 0)));
+		wr_mask = (0x0001 << 15) |
+			(0x0001 << 14) |
+			(0x03ff << 4) |
+			(0x0003 << 0);
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      AMS_COM_PLL_CONTROL_8,
+				      wr_data,
+				      wr_mask);
+
+		// RX PLL_CAL_COM_CTL_7
+		wr_data = ((params->rx_pll_mode << 0) & (0x000f << 0));
+		wr_mask  = (0x000f << 0);
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      PLL_CAL_COM_CTL_7,
+				      wr_data,
+				      wr_mask);
+	}
+
+	if (params->do_pll_charge_pump) {
+		/* from PON driver only */
+		// pll_iqp [04:01] = 0x5 = default
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      AMS_COM_PLL_CONTROL_2,
+				      0x000a, 0x001e);
+	}
+
+	if (params->do_pll_charge_pump_10g) {
+		/* from PON driver only */
+		// 0xD0B2[bit0]=en_HRz<1> =1,  0xD0B0[bit11]=en_HRz<0>=1  -->>> en_HRz = 6[kOhm]
+		serdes_misc_write_reg(port, params->tx_pll_id,
+				      AMS_COM_PLL_CONTROL_2,
+				      0x0001,
+				      0x0001);
+		serdes_misc_write_reg(port, params->tx_pll_id,
+				      AMS_COM_PLL_CONTROL_0,
+				      0x0800,
+				      0x0800);
+
+		if (params->rx_pll_id != params->tx_pll_id) {
+			serdes_misc_write_reg(port, params->rx_pll_id,
+					      AMS_COM_PLL_CONTROL_2,
+					      0x0001,
+					      0x0001);
+			serdes_misc_write_reg(port, params->rx_pll_id,
+					      AMS_COM_PLL_CONTROL_0,
+					      0x0800,
+					      0x0800);
+		}
+
+		// set PLL-Current-ChargePump=ipq[bit4:1]= 0x0 -->>> 50[uA]
+		serdes_misc_write_reg(port, params->tx_pll_id,
+				      AMS_COM_PLL_CONTROL_2,
+				      0x0000,
+				      0x001e);
+
+		if (params->rx_pll_id != params->tx_pll_id) {
+			serdes_misc_write_reg(port, params->rx_pll_id,
+					      AMS_COM_PLL_CONTROL_2,
+					      0x0000,
+					      0x001e);
+		}
+	}
+
+	/* from PON driver only */
+	//*  #Looptiming Control
+	//#(*) DSC_F_ONU10G_looptiming_ctrl_0.
+	// RX line rate to TX line rate ratio
+	// 000: 1 to 1   001: 1.25 to 1  010: 2 to 1
+	// 011: 4 to 1   100: 5 to 1     101: 8.25 to 1
+	serdes_misc_write_reg(port, serdes_PLL_0,
+			      DSC_F_ONU10G_looptiming_ctrl_0,
+			      params->rx_tx_rate_ratio,
+			      0x0007);
+
+	/* from PON driver only */
+	//#(5.a)  RX=OS4 and TX=OS8 // address 0xD080 = 16'hc074
+	//  # OSR Mode Value: 0= OSx1, 1=OSx2, 4=OSX4, 7=OSX8, A =OSx16 (revB0)
+	//  # rx_osr_mode_frc     = 1
+	//  # rx_osr_mode_frc_val = mode dependent
+	//  # tx_osr_mode_frc     = 1
+	//  # tx_osr_mode_frc_val = mode dependent
+	wr_data = (((params->tx_osr_mode << 4) | (0x0001 << 15)) |
+		   (params->rx_osr_mode | (0x0001 << 14)));
+	serdes_misc_write_reg(port, serdes_PLL_0,
+			      CKRST_CTRL_OSR_MODE_CONTROL,
+			      wr_data,
+			      0xffff);
+
+	/* from PON driver, value different in xgae and init is done
+	 * later */
+	/* #(*) TX Phase Interpolator Control 0 (0xD070)    */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      TX_PI_LBE_tx_pi_control_0,
+			      0x2003, /* 0x2000 in xgae driver */
+			      0xffff);
+
+	/*
+	 * CDR programming
+	 */
+	 // cdr_freq_en=1, cdr_integ_sat_sel=0, cdr_freq_override_en=0, cdr_phase_sat_ctrl=1
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      DSC_A_cdr_control_0,
+			      0x0005,
+			      0x7ff7);
+
+	/* Configure DSC_A_cdr_control_2 */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      DSC_A_cdr_control_2,
+			      params->dsc_a_cdr_control_2,
+			      0x1ff3);
+
+	/* Configure DSC_B_dsc_sm_ctrl_7 */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      DSC_B_dsc_sm_ctrl_7,
+			      0x0000,
+			      0xffff);
+
+	/* Configure DSC_A_cdr_control_1 */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      DSC_A_cdr_control_1,
+			      0x0690,
+			      0xffff);
+
+	/*Configure DSC_B_dsc_sm_ctrl_8 */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      DSC_B_dsc_sm_ctrl_8,
+			      0x0010,
+			      0xcfff);
+
+	/* from PON driver only */
+	if (params->do_vga_rf) {
+		// (*)  Analog = VGA, and PF programming
+		serdes_misc_write_reg(port, serdes_PLL_0,
+				      DSC_E_dsc_e_pf_ctrl,
+				      0x0007,
+				      0x000F); // _set_rx_pf_main(7);
+
+		serdes_misc_write_reg(port, serdes_PLL_0,
+				      DSC_E_dsc_e_pf2_lowp_ctrl,
+				      0x0003,
+				      0x0007); // _set_rx_pf2(3);
+
+		serdes_misc_write_reg(port, serdes_PLL_0,
+				      DSC_C_dfe_vga_override,
+				      0x0000,
+				      0x3E00); // _set_rx_vga(32);
+
+		serdes_misc_write_reg(port, serdes_PLL_0,
+				      DSC_C_dfe_vga_override,
+				      0x0100,
+				      0x01FF);
+
+		serdes_misc_write_reg(port, serdes_PLL_0,
+				      DSC_C_dfe_vga_override,
+				      0x8000,
+				      0x8000);
+	}
+
+	/*
+	 * (#11) RX-&-TX PON_MAC_CLK Division Control and SYNC_E_CLK
+	 */
+	// TX AMS_TX_TX_CONTROL_1  [ 0xD0A1 ]
+	wr_data = (((params->tx_pon_mac_ctrl << 4) & (0x0007 << 4)) |
+		   ((params->tx_sync_e_ctrl << 1) & (0x0007 << 1)));
+	wr_mask  = (0x0007 << 4) |
+		(0x0007 << 1);
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      AMS_TX_TX_CONTROL_1,
+			      wr_data,
+			      wr_mask);
+
+	// TX AMS_RX_RX_CONTROL_2 [ 0xD092 ]
+	wr_data = (((params->rx_pon_mac_ctrl << 0) & (0x0007 << 0)));
+	wr_mask = (0x0007 << 0);
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      AMS_RX_RX_CONTROL_2,
+			      wr_data,
+			      wr_mask);
+
+	/*
+	 *  Enable PLL's.  De-assert core_dp_s_rstb --> will start the
+	 *  PLL calibration.
+	 *
+	 */
+
+	/* afe_s_pll_pwrdn=1, core_dp_s_rs=0 */
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      CORE_PLL_COM_TOP_USER_CONTROL,
+			      0x4000,
+			      0x6000);
+	if (params->tx_pll_id != params->rx_pll_id)
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      CORE_PLL_COM_TOP_USER_CONTROL,
+				      0x4000,
+				      0x6000);
+	udelay(1);
+
+	/* afe_s_pll_pwrdn=0, core_dp_s_rs=1 */
+	serdes_misc_write_reg(port, params->tx_pll_id,
+			      CORE_PLL_COM_TOP_USER_CONTROL,
+			      0x2000,
+			      0x6000);
+	if (params->tx_pll_id != params->rx_pll_id)
+		serdes_misc_write_reg(port, params->rx_pll_id,
+				      CORE_PLL_COM_TOP_USER_CONTROL,
+				      0x2000,
+				      0x6000);
+
+	/* poll for PLL lock */
+	if (serdes_poll_pll_lock(port, params->tx_pll_id))
+		return 1;
+
+	if (params->tx_pll_id != params->rx_pll_id) {
+		if (serdes_poll_pll_lock(port, params->rx_pll_id))
+			return 1;
+	}
+
+	/* also poll for PLL lock in wan top block */
+	if (wan_top_poll_pll_lock(port, params->tx_pll_id))
+		return 1;
+
+	if (params->tx_pll_id != params->rx_pll_id) {
+		if (wan_top_poll_pll_lock(port, params->rx_pll_id))
+			return 1;
+	}
+
+	/*
+	 * #(11) De-assert ln_dp_s_rstb
+	 */
+
+	/* note: LANE_BRDCST in xgae driver only */
+	// ln_dp_s_rstb = 1 = serdes data-path out of reset  (0xd081)
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      CKRST_CTRL_LANE_CLK_RESET_N_POWERDOWN_CONTROL,
+			      0x0002,
+			      0x0002);
+
+	/* note: LANE_BRDCST in xgae driver only */
+	serdes_misc_write_reg(port, LANE_BRDCST,
+			      AMS_TX_TX_CONTROL_0,
+			      0x0000,
+			      0x00c0);
+
+	/*
+	 * xgae driver configures sigdetect, pon driver disable it
+	 */
+	if (params->do_sigdetect) {
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_B_dsc_sm_ctrl_0,
+				      0x0008,
+				      0xffff);
+		serdes_misc_write_reg(port, LANE_0,
+				      SIGDET_SIGDET_CTRL_3,
+				      0x0023,
+				      0xffff);
+		serdes_misc_write_reg(port, LANE_0,
+				      SIGDET_SIGDET_CTRL_1,
+				      0xa00a,
+				      0);
+	} else {
+		/* Ignore SigDetect:  0xd010[09] = ignore_sigdet =1 */
+		serdes_misc_write_reg(port, LANE_0,
+				      DSC_B_dsc_sm_ctrl_0,
+				      0x0200,
+				      0x0200);
+
+		// 0xD0C1 bits 6, 5 and 0
+		// energy_detect_frc_val -> 1
+		// energy_detect_frc -> 1
+		// afe_signal_detect_dis -> 1
+		serdes_misc_write_reg(port, LANE_0,
+				      SIGDET_SIGDET_CTRL_1,
+				      0x0061,
+				      0x0061);
+
+
+		//   0xD0C2 bits 0-2 and 4-6:
+		//   los_thresh -> 0
+		//   signal_detect_thresh -> 0
+		serdes_misc_write_reg(port, LANE_0,
+				      SIGDET_SIGDET_CTRL_2,
+				      0x0000,
+				      0x0077);
+
+		//  0xD0C3 bits 6-7:
+		// analog_sd_override -> 1
+		serdes_misc_write_reg(port, LANE_0,
+				      SIGDET_SIGDET_CTRL_3,
+				      0x0040,
+				      0x00C0);
+	}
+
+	/* check DSC lock */
+	if (serdes_poll_dsc_lock(port))
+		return 1;
+
+	/* check PMD rx lock */
+	if (serdes_poll_pmd_rx_lock(port))
+		return 1;
+
+	// (*) PLL PPM Adjustment for RX=10G modes
+	/* FIXME, only done in PON driver and 10G_10G mode, which is
+	 * not yet implemented here */
+
+	/*  Adjust RX PI intial location  */
+	if (params->do_rx_pi_spacing)
+		rx_pi_spacing(port, params->clk90_offset, params->p1_offset);
+
+	/* set AE 10G gearbox */
+	val = 0;
+	if (params->serdes_ae_full_rate)
+		val |= WAN_TOP_AE_GEARBOX_CONTROL_0_CR_FULL_RATE_MODE_MASK;
+	if (params->serdes_ae_20b_width)
+		val |= WAN_TOP_AE_GEARBOX_CONTROL_0_CR_WIDTH_MODE_MASK;
+	wan_top_writel(port, WAN_TOP_AE_GEARBOX_CONTROL_0_REG, val);
+
+	/* reset WAN AE */
+	if (params->do_ae)
+		reset_control_reset(port->wan_ae_rst);
+
+	/* reset WAN TOP pcs */
+	wan_top_clear(port, WAN_TOP_RESET_REG, WAN_TOP_RESET_CFG_PCS_RESET_N_MASK);
+	msleep(10);
+	wan_top_set(port, WAN_TOP_RESET_REG, WAN_TOP_RESET_CFG_PCS_RESET_N_MASK);
+	msleep(1);
+
+	if (!params->do_ae) {
+		/* FIXME: bcm pon driver has early TX config for pon
+		 * 1G and optics BCM_I2C_PON_OPTICS_TYPE_PMD, which
+		 * does not seem to apply to us */
+	}
+
+	/* enable laser now that config is done */
+	wan_top_set(port, WAN_TOP_MISC_3_REG, WAN_TOP_MISC_3_LASER_OE_MASK);
+
+	return 0;
+}
+
+void xport_serdes_update_m3_params(struct xport_priv *port,
+				  const struct serdes_misc3_params *m3params)
+{
+	u32 val;
+
+	val = wan_top_readl(port, WAN_TOP_MISC_3_REG);
+	val &= ~WAN_TOP_MISC_3_WAN_IFSELECT_MASK;
+	val |= m3params->misc3_if_select << WAN_TOP_MISC_3_WAN_IFSELECT_SHIFT;
+	wan_top_writel(port, WAN_TOP_MISC_3_REG, val);
+
+}
+
+void xport_serdes_shutdown(struct xport_priv *port)
+{
+	xport_serdes_reset_ctl(port, true);
+}
+
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_xlmac.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_xlmac.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_xlmac.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_xlmac.c	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,1234 @@
+#include "port_xport_xlmac.h"
+#include "regs/serdes_regs.h"
+
+#define GEN_MIB_STAT(m)					\
+	sizeof(((struct xlmac_mib *)0)->m),	\
+		offsetof(struct xlmac_mib, m)
+
+const struct bcm_runner_ethtool_stat xlmac_mib_estat[] = {
+	{ "rx_64", GEN_MIB_STAT(rx_64), XPORT_MIB_CORE_GRX64_REG, },
+	{ "rx_127", GEN_MIB_STAT(rx_127), XPORT_MIB_CORE_GRX127_REG, },
+	{ "rx_255", GEN_MIB_STAT(rx_255), XPORT_MIB_CORE_GRX255_REG, },
+	{ "rx_511", GEN_MIB_STAT(rx_511), XPORT_MIB_CORE_GRX511_REG, },
+	{ "rx_1023", GEN_MIB_STAT(rx_1023), XPORT_MIB_CORE_GRX1023_REG, },
+	{ "rx_1518", GEN_MIB_STAT(rx_1518), XPORT_MIB_CORE_GRX1518_REG, },
+	{ "rx_1522", GEN_MIB_STAT(rx_1522), XPORT_MIB_CORE_GRX1522_REG, },
+	{ "rx_2047", GEN_MIB_STAT(rx_2047), XPORT_MIB_CORE_GRX2047_REG, },
+	{ "rx_4095", GEN_MIB_STAT(rx_4095), XPORT_MIB_CORE_GRX4095_REG, },
+	{ "rx_9216", GEN_MIB_STAT(rx_9216), XPORT_MIB_CORE_GRX9216_REG, },
+	{ "rx_16383", GEN_MIB_STAT(rx_16383), XPORT_MIB_CORE_GRX16383_REG, },
+	{ "rx_all_pkt", GEN_MIB_STAT(rx_pkt), XPORT_MIB_CORE_GRXPKT_REG, },
+	{ "rx_unicast", GEN_MIB_STAT(rx_uca), XPORT_MIB_CORE_GRXUCA_REG, },
+	{ "rx_multicast", GEN_MIB_STAT(rx_mca), XPORT_MIB_CORE_GRXMCA_REG, },
+	{ "rx_bcast", GEN_MIB_STAT(rx_bca), XPORT_MIB_CORE_GRXBCA_REG, },
+	{ "rx_fcs", GEN_MIB_STAT(rx_fcs), XPORT_MIB_CORE_GRXFCS_REG, },
+	{ "rx_control", GEN_MIB_STAT(rx_cf), XPORT_MIB_CORE_GRXCF_REG, },
+	{ "rx_pause", GEN_MIB_STAT(rx_pf), XPORT_MIB_CORE_GRXPF_REG, },
+	{ "rx_pfc", GEN_MIB_STAT(rx_pp), XPORT_MIB_CORE_GRXPP_REG, },
+	{ "rx_unsupp_opcode", GEN_MIB_STAT(rx_uo), XPORT_MIB_CORE_GRXUO_REG, },
+	{ "rx_unsupp_da", GEN_MIB_STAT(rx_uda), XPORT_MIB_CORE_GRXUDA_REG, },
+	{ "rx_wrong_sa", GEN_MIB_STAT(rx_wsa), XPORT_MIB_CORE_GRXWSA_REG, },
+	{ "rx_align", GEN_MIB_STAT(rx_aln), XPORT_MIB_CORE_GRXALN_REG, },
+	{ "rx_length", GEN_MIB_STAT(rx_flr), XPORT_MIB_CORE_GRXFLR_REG, },
+	{ "rx_code_err", GEN_MIB_STAT(rx_frerr), XPORT_MIB_CORE_GRXFRERR_REG, },
+	{ "rx_false_carrier", GEN_MIB_STAT(rx_fcr), XPORT_MIB_CORE_GRXFCR_REG, },
+	{ "rx_oversize", GEN_MIB_STAT(rx_ovr), XPORT_MIB_CORE_GRXOVR_REG, },
+	{ "rx_jabber", GEN_MIB_STAT(rx_jbr), XPORT_MIB_CORE_GRXJBR_REG, },
+	{ "rx_bad_mtu", GEN_MIB_STAT(rx_mtue), XPORT_MIB_CORE_GRXMTUE_REG, },
+	{ "rx_matched_crc", GEN_MIB_STAT(rx_mcrc), XPORT_MIB_CORE_GRXMCRC_REG, },
+	{ "rx_promisc", GEN_MIB_STAT(rx_prm), XPORT_MIB_CORE_GRXPRM_REG, },
+	{ "rx_vlan", GEN_MIB_STAT(rx_vln), XPORT_MIB_CORE_GRXVLN_REG, },
+	{ "rx_dvlan", GEN_MIB_STAT(rx_dvln), XPORT_MIB_CORE_GRXDVLN_REG, },
+	{ "rx_trunc", GEN_MIB_STAT(rx_trfu), XPORT_MIB_CORE_GRXTRFU_REG, },
+	{ "rx_good", GEN_MIB_STAT(rx_pok), XPORT_MIB_CORE_GRXPOK_REG, },
+	{ "rx_pfcoff0", GEN_MIB_STAT(rx_pfcoff0), XPORT_MIB_CORE_GRXPFCOFF0_REG, },
+	{ "rx_pfcoff1", GEN_MIB_STAT(rx_pfcoff1), XPORT_MIB_CORE_GRXPFCOFF1_REG, },
+	{ "rx_pfcoff2", GEN_MIB_STAT(rx_pfcoff2), XPORT_MIB_CORE_GRXPFCOFF2_REG, },
+	{ "rx_pfcoff3", GEN_MIB_STAT(rx_pfcoff3), XPORT_MIB_CORE_GRXPFCOFF3_REG, },
+	{ "rx_pfcoff4", GEN_MIB_STAT(rx_pfcoff4), XPORT_MIB_CORE_GRXPFCOFF4_REG, },
+	{ "rx_pfcoff5", GEN_MIB_STAT(rx_pfcoff5), XPORT_MIB_CORE_GRXPFCOFF5_REG, },
+	{ "rx_pfcoff6", GEN_MIB_STAT(rx_pfcoff6), XPORT_MIB_CORE_GRXPFCOFF6_REG, },
+	{ "rx_pfcoff7", GEN_MIB_STAT(rx_pfcoff7), XPORT_MIB_CORE_GRXPFCOFF7_REG, },
+	{ "rx_pfcp0", GEN_MIB_STAT(rx_pfcp0), XPORT_MIB_CORE_GRXPFCP0_REG, },
+	{ "rx_pfcp1", GEN_MIB_STAT(rx_pfcp1), XPORT_MIB_CORE_GRXPFCP1_REG, },
+	{ "rx_pfcp2", GEN_MIB_STAT(rx_pfcp2), XPORT_MIB_CORE_GRXPFCP2_REG, },
+	{ "rx_pfcp3", GEN_MIB_STAT(rx_pfcp3), XPORT_MIB_CORE_GRXPFCP3_REG, },
+	{ "rx_pfcp4", GEN_MIB_STAT(rx_pfcp4), XPORT_MIB_CORE_GRXPFCP4_REG, },
+	{ "rx_pfcp5", GEN_MIB_STAT(rx_pfcp5), XPORT_MIB_CORE_GRXPFCP5_REG, },
+	{ "rx_pfcp6", GEN_MIB_STAT(rx_pfcp6), XPORT_MIB_CORE_GRXPFCP6_REG, },
+	{ "rx_pfcp7", GEN_MIB_STAT(rx_pfcp7), XPORT_MIB_CORE_GRXPFCP7_REG, },
+	{ "rx_schcrc", GEN_MIB_STAT(rx_schcrc), XPORT_MIB_CORE_GRXSCHCRC_REG, },
+	{ "rx_bytes", GEN_MIB_STAT(rx_byt), XPORT_MIB_CORE_GRXBYT_REG, },
+	{ "rx_runt", GEN_MIB_STAT(rx_rpkt), XPORT_MIB_CORE_GRXRPKT_REG, },
+	{ "rx_undersize", GEN_MIB_STAT(rx_und), XPORT_MIB_CORE_GRXUND_REG, },
+	{ "rx_frag", GEN_MIB_STAT(rx_frg), XPORT_MIB_CORE_GRXFRG_REG, },
+	{ "rx_runt_bytes", GEN_MIB_STAT(rx_rbyt), XPORT_MIB_CORE_GRXRBYT_REG, },
+	{ "tx_64", GEN_MIB_STAT(tx_64), XPORT_MIB_CORE_GTX64_REG, },
+	{ "tx_127", GEN_MIB_STAT(tx_127), XPORT_MIB_CORE_GTX127_REG, },
+	{ "tx_255", GEN_MIB_STAT(tx_255), XPORT_MIB_CORE_GTX255_REG, },
+	{ "tx_511", GEN_MIB_STAT(tx_511), XPORT_MIB_CORE_GTX511_REG, },
+	{ "tx_1023", GEN_MIB_STAT(tx_1023), XPORT_MIB_CORE_GTX1023_REG, },
+	{ "tx_1518", GEN_MIB_STAT(tx_1518), XPORT_MIB_CORE_GTX1518_REG, },
+	{ "tx_1522", GEN_MIB_STAT(tx_1522), XPORT_MIB_CORE_GTX1522_REG, },
+	{ "tx_2047", GEN_MIB_STAT(tx_2047), XPORT_MIB_CORE_GTX2047_REG, },
+	{ "tx_4095", GEN_MIB_STAT(tx_4095), XPORT_MIB_CORE_GTX4095_REG, },
+	{ "tx_9216", GEN_MIB_STAT(tx_9216), XPORT_MIB_CORE_GTX9216_REG, },
+	{ "tx_16383", GEN_MIB_STAT(tx_16383), XPORT_MIB_CORE_GTX16383_REG, },
+	{ "tx_good", GEN_MIB_STAT(tx_pok), XPORT_MIB_CORE_GTXPOK_REG, },
+	{ "tx_all_pkt", GEN_MIB_STAT(tx_pkt), XPORT_MIB_CORE_GTXPKT_REG, },
+	{ "tx_unicast", GEN_MIB_STAT(tx_uca), XPORT_MIB_CORE_GTXUCA_REG, },
+	{ "tx_multicast", GEN_MIB_STAT(tx_mca), XPORT_MIB_CORE_GTXMCA_REG, },
+	{ "tx_bcast", GEN_MIB_STAT(tx_bca), XPORT_MIB_CORE_GTXBCA_REG, },
+	{ "tx_pause", GEN_MIB_STAT(tx_pf), XPORT_MIB_CORE_GTXPF_REG, },
+	{ "tx_pfc", GEN_MIB_STAT(tx_pfc), XPORT_MIB_CORE_GTXPFC_REG, },
+	{ "tx_jabber", GEN_MIB_STAT(tx_jbr), XPORT_MIB_CORE_GTXJBR_REG, },
+	{ "tx_fcs", GEN_MIB_STAT(tx_fcs), XPORT_MIB_CORE_GTXFCS_REG, },
+	{ "tx_control", GEN_MIB_STAT(tx_cf), XPORT_MIB_CORE_GTXCF_REG, },
+	{ "tx_oversize", GEN_MIB_STAT(tx_ovr), XPORT_MIB_CORE_GTXOVR_REG, },
+	{ "tx_defer", GEN_MIB_STAT(tx_dfr), XPORT_MIB_CORE_GTXDFR_REG, },
+	{ "tx_multi_defer", GEN_MIB_STAT(tx_edf), XPORT_MIB_CORE_GTXEDF_REG, },
+	{ "tx_col", GEN_MIB_STAT(tx_scl), XPORT_MIB_CORE_GTXSCL_REG, },
+	{ "tx_multi_col", GEN_MIB_STAT(tx_mcl), XPORT_MIB_CORE_GTXMCL_REG, },
+	{ "tx_late_col", GEN_MIB_STAT(tx_lcl), XPORT_MIB_CORE_GTXLCL_REG, },
+	{ "tx_excess_col", GEN_MIB_STAT(tx_xcl), XPORT_MIB_CORE_GTXXCL_REG, },
+	{ "tx_fragment", GEN_MIB_STAT(tx_frg), XPORT_MIB_CORE_GTXFRG_REG, },
+	{ "tx_err", GEN_MIB_STAT(tx_err), XPORT_MIB_CORE_GTXERR_REG, },
+	{ "tx_vlan", GEN_MIB_STAT(tx_vln), XPORT_MIB_CORE_GTXVLN_REG, },
+	{ "tx_dvlan", GEN_MIB_STAT(tx_dvln), XPORT_MIB_CORE_GTXDVLN_REG, },
+	{ "tx_runt", GEN_MIB_STAT(tx_rpkt), XPORT_MIB_CORE_GTXRPKT_REG, },
+	{ "tx_underrun", GEN_MIB_STAT(tx_ufl), XPORT_MIB_CORE_GTXUFL_REG, },
+	{ "tx_pfcp0", GEN_MIB_STAT(tx_pfcp0), XPORT_MIB_CORE_GTXPFCP0_REG, },
+	{ "tx_pfcp1", GEN_MIB_STAT(tx_pfcp1), XPORT_MIB_CORE_GTXPFCP1_REG, },
+	{ "tx_pfcp2", GEN_MIB_STAT(tx_pfcp2), XPORT_MIB_CORE_GTXPFCP2_REG, },
+	{ "tx_pfcp3", GEN_MIB_STAT(tx_pfcp3), XPORT_MIB_CORE_GTXPFCP3_REG, },
+	{ "tx_pfcp4", GEN_MIB_STAT(tx_pfcp4), XPORT_MIB_CORE_GTXPFCP4_REG, },
+	{ "tx_pfcp5", GEN_MIB_STAT(tx_pfcp5), XPORT_MIB_CORE_GTXPFCP5_REG, },
+	{ "tx_pfcp6", GEN_MIB_STAT(tx_pfcp6), XPORT_MIB_CORE_GTXPFCP6_REG, },
+	{ "tx_pfcp7", GEN_MIB_STAT(tx_pfcp7), XPORT_MIB_CORE_GTXPFCP7_REG, },
+	{ "tx_tot_col", GEN_MIB_STAT(tx_ncl), XPORT_MIB_CORE_GTXNCL_REG, },
+	{ "tx_bytes", GEN_MIB_STAT(tx_byt), XPORT_MIB_CORE_GTXBYT_REG, },
+	{ "rx_lpi", GEN_MIB_STAT(rx_lpi), XPORT_MIB_CORE_GRXLPI_REG, },
+	{ "rx_dlpi", GEN_MIB_STAT(rx_dlpi), XPORT_MIB_CORE_GRXDLPI_REG, },
+	{ "tx_lpi", GEN_MIB_STAT(tx_lpi), XPORT_MIB_CORE_GTXLPI_REG, },
+	{ "tx_dlpi", GEN_MIB_STAT(tx_dlpi), XPORT_MIB_CORE_GTXDLPI_REG, },
+	{ "rx_ptllfc", GEN_MIB_STAT(rx_ptllfc), XPORT_MIB_CORE_GRXPTLLFC_REG, },
+	{ "rx_ltllfc", GEN_MIB_STAT(rx_ltllfc), XPORT_MIB_CORE_GRXLTLLFC_REG, },
+	{ "rx_llfcfcs", GEN_MIB_STAT(rx_llfcfcs), XPORT_MIB_CORE_GRXLLFCFCS_REG, },
+	{ "tx_ltllfc", GEN_MIB_STAT(tx_ltllfc), XPORT_MIB_CORE_GTXLTLLFC_REG, },
+};
+
+/*
+ *
+ */
+static void mode_xlmac_stats_update(void *mode_priv,
+				    struct net_device_stats *s)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	u64 val;
+
+	val = xport_mib_core_readl(mode, XPORT_MIB_CORE_GRXFCS_REG);
+	s->rx_crc_errors = val;
+
+	val = xport_mib_core_readl(mode, XPORT_MIB_CORE_GRXFLR_REG);
+	s->rx_length_errors = val;
+
+	val = xport_mib_core_readl(mode, XPORT_MIB_CORE_GRXFRERR_REG);
+	val += xport_mib_core_readl(mode, XPORT_MIB_CORE_GRXJBR_REG);
+	val += xport_mib_core_readl(mode, XPORT_MIB_CORE_GRXALN_REG);
+	s->rx_errors = val;
+}
+
+/*
+ *
+ */
+static void mode_xlmac_mib_update(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(xlmac_mib_estat); i++) {
+		const struct bcm_runner_ethtool_stat *s;
+		u64 val;
+		char *p;
+
+		s = &xlmac_mib_estat[i];
+		val = xport_mib_core_readl(mode, s->reg);
+		p = (char *)&mode->mib + s->offset;
+		*(u64 *)p = val;
+	}
+}
+
+/*
+ *
+ */
+static void *mode_xlmac_mib_get_data(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	return &mode->mib;
+}
+
+/*
+ *
+ */
+static void mode_xlmac_mtu_set(void *mode_priv, unsigned int size)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_RX_MAX_SIZE_REG,
+				 size);
+}
+
+/*
+ *
+ */
+static void xlmac_setup(struct xport_xlmac_priv *mode, unsigned int speed)
+{
+	u64 val64;
+	u32 val;
+
+	/* release XLIF credits */
+	val = xlif_reg_readl(mode, XLIF_TX_IF_SET_CREDITS_REG(mode->xlmac_id));
+	val &= ~XLIF_TX_IF_SET_CREDITS_EN_MASK;
+	xlif_reg_writel(mode, XLIF_TX_IF_SET_CREDITS_REG(mode->xlmac_id), val);
+
+	/* Enable 2.5G/10G AE PFC_STATS_EN for Hardware work around */
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_PFC_CTRL_REG);
+	val64 |= PFC_CTRL_PFC_STATS_EN_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_PFC_CTRL_REG, val64);
+
+	val = xport_reg_readl(mode, XPORT_REG_XPORT_CNTRL_1_REG);
+	/* FIXME: should be set if we ever use more than 1 port */
+	val &= ~XPORT_CNTRL_1_REG_MSBUS_CLK_SEL_MASK;
+	val &= ~XPORT_CNTRL_1_REG_TIMEOUT_RST_DISABLE_MASK;
+	if (speed == 10000)
+		val |= XPORT_CNTRL_1_REG_P0_MODE_MASK;
+	else
+		val &= ~XPORT_CNTRL_1_REG_P0_MODE_MASK;
+	xport_reg_writel(mode, XPORT_REG_XPORT_CNTRL_1_REG, val);
+
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_TX_CTRL_REG);
+	val64 |= TX_CTRL_PAD_EN_MASK;
+	val64 &= ~TX_CTRL_CRC_MODE_MASK;
+	/* XLMAC_TX_CTRL_CRC_MODE_PER_PKT == 3 */
+	val64 |= (3 << TX_CTRL_CRC_MODE_SHIFT); /* FIXME: set it back to 2 */
+	val64 &= ~TX_CTRL_TX_THRESHOLD_MASK;
+	val64 |= (2ULL << TX_CTRL_TX_THRESHOLD_SHIFT);
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_TX_CTRL_REG, val64);
+
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_RX_CTRL_REG);
+	/* CRC validated by BBH, so keep it */
+	val64 &= ~RX_CTRL_STRIP_CRC_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_RX_CTRL_REG, val64);
+
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_CTRL_REG);
+	val64 &= ~CTRL_EXTENDED_HIG2_EN_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_CTRL_REG, val64);
+
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_MODE_REG);
+	val64 &= ~MODE_SPEED_MODE_MASK;
+	switch (speed) {
+	case 10:
+		val64 |= 0 << MODE_SPEED_MODE_SHIFT;
+		break;
+	case 100:
+		val64 |= 1 << MODE_SPEED_MODE_SHIFT;
+		break;
+	case 1000:
+		val64 |= 2 << MODE_SPEED_MODE_SHIFT;
+		break;
+	case 2500:
+		val64 |= 3 << MODE_SPEED_MODE_SHIFT;
+		break;
+	case 10000:
+		val64 |= 4 << MODE_SPEED_MODE_SHIFT;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_MODE_REG, val64);
+
+	/* release xlmac reset */
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_CTRL_REG);
+	val64 &= ~CTRL_SOFT_RESET_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_CTRL_REG, val64);
+
+	/* setup mbus in work conserving mode, clear weight of unused
+	 * ports */
+	val = xport_mab_reg_readl(mode, XPORT_MAB_TX_WRR_CTRL_REG);
+	val |= TX_WRR_CTRL_ARB_MODE_MASK;
+	val &= ~TX_WRR_CTRL_P2_WEIGHT_MASK;
+	val &= ~TX_WRR_CTRL_P3_WEIGHT_MASK;
+	xport_mab_reg_writel(mode, XPORT_MAB_TX_WRR_CTRL_REG, val);
+
+	/* release msbus reset */
+	val = xport_mab_reg_readl(mode, XPORT_MAB_CNTRL_REG);
+	val &= ~(1 << (CNTRL_GMII_TX_RST_SHIFT + mode->xlmac_id));
+	val &= ~(1 << (CNTRL_GMII_RX_RST_SHIFT + mode->xlmac_id));
+	if (mode->xlmac_id == 0) {
+		val &= ~CNTRL_XGMII_TX_RST_MASK;
+		val &= ~CNTRL_XGMII_RX_RST_MASK;
+	}
+	xport_mab_reg_writel(mode, XPORT_MAB_CNTRL_REG, val);
+}
+
+/*
+ *
+ */
+static void xlmac_reset(struct xport_xlmac_priv *mode)
+{
+	u64 val64;
+	u32 val;
+
+	/*
+	 * msbus reset
+	 */
+	val = xport_mab_reg_readl(mode, XPORT_MAB_CNTRL_REG);
+	val |= 1 << (CNTRL_GMII_TX_RST_SHIFT + mode->xlmac_id);
+	val |= 1 << (CNTRL_GMII_RX_RST_SHIFT + mode->xlmac_id);
+
+	if (mode->xlmac_id == 0) {
+		val |= CNTRL_XGMII_TX_RST_MASK;
+		val |= CNTRL_XGMII_RX_RST_MASK;
+	}
+	xport_mab_reg_writel(mode, XPORT_MAB_CNTRL_REG, val);
+
+	/*
+	 * xlmac reset
+	 */
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_CTRL_REG);
+	val64 |= CTRL_SOFT_RESET_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_CTRL_REG, val64);
+
+	/* reset XLIF credits */
+	xlif_reg_writel(mode, XLIF_TX_IF_SET_CREDITS_REG(mode->xlmac_id),
+			XLIF_TX_IF_SET_CREDITS_EN_MASK);
+}
+
+/*
+ *
+ */
+static void xlmac_enable(struct xport_xlmac_priv *mode)
+{
+	u64 val64;
+
+	val64 = xport_xlmcore_reg_readl(mode, XPORT_XLMAC_CORE_CTRL_REG);
+	val64 |= CTRL_TX_EN_MASK;
+	val64 |= CTRL_RX_EN_MASK;
+	xport_xlmcore_reg_writel(mode, XPORT_XLMAC_CORE_CTRL_REG, val64);
+}
+
+/*
+ *
+ */
+static void xlmac_disable(struct xport_xlmac_priv *mode)
+{
+	xlmac_reset(mode);
+	mdelay(1);
+}
+
+/*
+ *
+ */
+static void xlif_init(struct xport_xlmac_priv *mode)
+{
+	u32 val;
+
+	val = xlif_reg_readl(mode, XLIF_TX_IF_IF_ENABLE_REG(mode->xlmac_id));
+	val &= ~XLIF_TX_IF_IF_ENABLE_DISABLE_WITH_CREDITS_MASK;
+	val &= ~XLIF_TX_IF_IF_ENABLE_DISABLE_WO_CREDITS_MASK;
+	xlif_reg_writel(mode, XLIF_TX_IF_IF_ENABLE_REG(mode->xlmac_id), val);
+
+	val = xlif_reg_readl(mode,
+			     XLIF_TX_IF_URUN_PORT_ENABLE_REG(mode->xlmac_id));
+	val &= ~XLIF_TX_IF_URUN_PORT_ENABLE_ENABLE_MASK;
+	xlif_reg_writel(mode, XLIF_TX_IF_URUN_PORT_ENABLE_REG(mode->xlmac_id),
+			val);
+
+	val = xlif_reg_readl(mode, XLIF_TX_IF_TX_THRESHOLD_REG(mode->xlmac_id));
+	val &= ~XLIF_TX_IF_TX_THRESHOLD_VALUE_MASK;
+	/* value from bcm code */
+	val |= (0xc << XLIF_TX_IF_TX_THRESHOLD_VALUE_SHIFT);
+	xlif_reg_writel(mode, XLIF_TX_IF_TX_THRESHOLD_REG(mode->xlmac_id), val);
+
+	val = xlif_reg_readl(mode, XLIF_RX_IF_IF_DIS_REG(mode->xlmac_id));
+	val &= ~XLIF_RX_IF_IF_DIS_DISABLE_MASK;
+	xlif_reg_writel(mode, XLIF_RX_IF_IF_DIS_REG(mode->xlmac_id), val);
+}
+
+/*
+ *
+ */
+static void serdes_setup_pcs_1000basex(struct xport_xlmac_priv *mode,
+				       const unsigned long *advertising)
+{
+	int val;
+
+	/* setup serdes in fiber mode */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					SerdesDigital_Control1000X1);
+        val |= SerdesDigital_FibreSgmiiModeFibre;
+	val &= ~(SerdesDigital_SigDetEn |
+		 SerdesDigital_InvertSigDet);
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_Control1000X1,
+				   val, 0xffff);
+
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					SerdesDigital_Control1000X2);
+	/* disable feature that tries to detect autoneg mismatch and
+	 * auto-disable autoneg if it sees that remote is not sending
+	 * autoneg codewords */
+	val &= ~SerdesDigital_EnableParallelDetection;
+
+	/* enable feature that report link as down when it receives a
+	 * valid 8b/10b signal, but only autoneg codeword */
+	val |= SerdesDigital_DisableFalseLink;
+
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_Control1000X2,
+				   val, 0xff);
+
+	/* setup autoneg */
+	val = 0;
+	if (phylink_test(advertising, Autoneg)) {
+		val = ADVERTISE_1000XFULL;
+		if (phylink_test(advertising, Pause))
+			val |= ADVERTISE_1000XPAUSE;
+		if (phylink_test(advertising, Asym_Pause))
+			val |= ADVERTISE_1000XPSE_ASYM;
+	}
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, MII_ADVERTISE,
+				   val, 0xffff);
+
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST, MII_BMCR);
+	val |= BMCR_SPEED1000 | BMCR_FULLDPLX;
+	val &= ~BMCR_ISOLATE;
+	if (phylink_test(advertising, Autoneg))
+		val |= BMCR_ANENABLE | BMCR_ANRESTART;
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, MII_BMCR,
+				   val, 0xffff);
+}
+
+/*
+ *
+ */
+static void serdes_setup_pcs_sgmii(struct xport_xlmac_priv *mode)
+{
+	int val;
+
+	/* setup serdes in sgmii mode */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					SerdesDigital_Control1000X1);
+	val &= ~(SerdesDigital_FibreSgmiiModeFibre |
+		 SerdesDigital_SigDetEn |
+		 SerdesDigital_InvertSigDet);
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_Control1000X1,
+				   val, 0xffff);
+
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					SerdesDigital_Control1000X2);
+	/* disable feature that tries to detect autoneg mismatch and
+	 * auto-disable autoneg if it sees that remote is not sending
+	 * autoneg codewords */
+	val &= ~SerdesDigital_EnableParallelDetection;
+
+	val |= SerdesDigital_AutoNegoFastTimer;
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_Control1000X2,
+				   val, 0xff);
+
+	/* enable autoneg */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST, MII_BMCR);
+	val &= ~(BMCR_SPEED1000 | BMCR_SPEED100 | BMCR_FULLDPLX);
+	val |= BMCR_ANENABLE | BMCR_ANRESTART;
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, MII_BMCR,
+				   val, 0xffff);
+}
+
+/*
+ *
+ */
+static void serdes_setup_pcs_10g(struct xport_xlmac_priv *mode)
+{
+	int val;
+
+	/* setup serdes in fiber mode */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					SerdesDigital_Control1000X1);
+        val |= SerdesDigital_FibreSgmiiModeFibre;
+	val &= ~(SerdesDigital_SigDetEn |
+		 SerdesDigital_InvertSigDet);
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_Control1000X1,
+				   val, 0xffff);
+
+	/* no autoneg on 10Gbase-R */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST, MII_BMCR);
+        val &= ~BMCR_ANENABLE;
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, MII_BMCR,
+				   val, 0xffff);
+
+	/* init from BCM */
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, XGXSBLK0_XGXSCTRL,
+				   0x260f, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, XGXSBLK1_LANECTRL0,
+				   0x1011, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   SerdesDigital_misc1,
+				   0x6015, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   Digital5_parDetINDControl1,
+				   0x5015, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   Digital5_parDetINDControl2,
+				   0x0008, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, Digital5_Misc7 ,
+				   0x0008, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, Digital5_Misc6,
+				   0x2a00, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, Digital4_Misc3,
+				   0x8188, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, Digital4_Misc4,
+				   0x6000, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, tx66_Control,
+				   0x4041, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, tx66_Control,
+				   0x4001, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   rx66b1_rx66b1_Control1 ,
+				   rfifo_ptr_sw_rst, 0xffff);
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   rx66b1_rx66b1_Control1 ,
+				   0, 0xffff);
+}
+
+/*
+ *
+ */
+static void serdes_pcs_restart_aneg(struct xport_xlmac_priv *mode)
+{
+	int val;
+
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST, MII_BMCR);
+	if (val < 0)
+		return;
+        val |= BMCR_ANRESTART;
+        xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   MII_BMCR, val, 0xffff);
+}
+
+/*
+ *
+ */
+static void serdes_pcs_get_sync_ok(struct xport_xlmac_priv *mode,
+				   bool *sync_ok_1g,
+				   bool *sync_ok_10g)
+{
+	size_t i;
+	int ret;
+
+	for (i = 0; sync_ok_1g && i < 3; i++) {
+		*sync_ok_1g = false;
+
+		ret = xport_serdes_pcs_read_reg(mode->port, LANE_0,
+						SerdesDigital_Status1000X2);
+		if (ret < 0) {
+			netdev_err(mode->port->priv->netdev,
+				   "failed to read phy status\n");
+			return;
+		}
+
+		if (i == 0) {
+			/* ignore first read */
+			continue;
+		}
+
+		if (!(ret & Status1000X2_sync_ok))
+			break;
+
+		if ((ret & Status1000X2_sync_failed))
+			break;
+
+		/*
+		 * on 10GEPON link, sync_ok can go to 1 briefly
+		 *
+		 * to avoid false detection, sync failed must stay 0
+		 * and sync ok must stay one for two iterations
+		 */
+		if (i == 2 && sync_ok_1g)
+			*sync_ok_1g = true;
+	}
+
+	if (sync_ok_10g) {
+		*sync_ok_10g = false;
+		ret = xport_serdes_pcs_read_reg(mode->port, LANE_0,
+						SerdesDigital_rx66_Status);
+		if (ret < 0) {
+			netdev_err(mode->port->priv->netdev,
+				   "failed to read phy status\n");
+			return;
+		}
+
+		*sync_ok_10g = !!(ret & (1 << 3));
+	}
+}
+
+static const struct serdes_params serdes_params_1g = {
+	.tx_pll_vco_div2	= 0,
+	.tx_pll_vco_div4	= 0,
+	.rx_pll_id		= serdes_PLL_0,
+	.tx_pll_id		= serdes_PLL_0,
+
+	.tx_pll_force_kvh_bw	= 0x1,
+	.tx_pll_kvh_force	= 0,
+
+	.tx_pll_2rx_bw		= 0,
+
+	.tx_pll_fracn_sel	= 0x1,
+
+	.tx_pll_ditheren	= 0x1,
+
+	.tx_pll_fracn_div	= 0x00000,
+	.tx_pll_fracn_ndiv	= 0x0c8,
+
+	.tx_pll_mode		= 0x5,
+
+	.rx_tx_rate_ratio	= 0,
+
+	.rx_pon_mac_ctrl	= 0,
+	.tx_pon_mac_ctrl	= 0,
+	.tx_sync_e_ctrl		= 0,
+
+	.rx_osr_mode		= 0x7,
+	.tx_osr_mode		= 0x7,
+
+	.do_rx_pi_spacing	= false,
+	.clk90_offset		= 0,
+	.p1_offset		= 0,
+	.dsc_a_cdr_control_2	= 0x00f0,
+
+	.do_pll_charge_pump	= false,
+	.do_pll_charge_pump_10g	= false,
+	.do_vga_rf		= false,
+	.do_sigdetect		= false,
+	.do_ae			= true,
+	.serdes_ae_full_rate	= true,
+	.serdes_ae_20b_width	= false,
+};
+
+static const struct serdes_params serdes_params_2d5g = {
+	.tx_pll_vco_div2	= 0,
+	.tx_pll_vco_div4	= 0,
+	.rx_pll_id		= serdes_PLL_0,
+	.tx_pll_id		= serdes_PLL_0,
+
+	.tx_pll_force_kvh_bw	= 0x1,
+	.tx_pll_kvh_force	= 0,
+
+	.tx_pll_2rx_bw		= 0,
+
+	.tx_pll_fracn_sel	= 0x1,
+
+	.tx_pll_ditheren	= 0x1,
+
+	.tx_pll_fracn_div	= 0x00000,
+	.tx_pll_fracn_ndiv	= 0x0fa,
+
+	.tx_pll_mode		= 0x5,
+
+	.rx_tx_rate_ratio	= 0,
+
+	.rx_pon_mac_ctrl	= 0x5,
+	.tx_pon_mac_ctrl	= 0x5,
+	.tx_sync_e_ctrl		= 0x1,
+
+	.rx_osr_mode		= 0x4,
+	.tx_osr_mode		= 0x4,
+
+	.do_rx_pi_spacing	= false,
+	.clk90_offset		= 0,
+	.p1_offset		= 0,
+	.dsc_a_cdr_control_2	= 0x00f0,
+
+	.do_pll_charge_pump	= false,
+	.do_pll_charge_pump_10g	= false,
+	.do_vga_rf		= false,
+	.do_sigdetect		= false,
+	.do_ae			= true,
+	.serdes_ae_full_rate	= true,
+	.serdes_ae_20b_width	= false,
+};
+
+static const struct serdes_misc3_params serdes_m3params_1g_1000basex = {
+	.misc3_if_select	= 1,
+	.misc3_laser_mode	= 0,
+};
+
+static const struct serdes_misc3_params serdes_m3params_1g_2500basex = {
+	.misc3_if_select	= 2,
+	.misc3_laser_mode	= 0,
+};
+
+static const struct serdes_misc3_params serdes_m3params_1g_sgmii = {
+	.misc3_if_select	= 1,
+	.misc3_laser_mode	= 0,
+	.misc3_sgmii		= 1,
+};
+
+static const struct serdes_misc3_params serdes_m3params_100m_sgmii = {
+	.misc3_if_select	= 0,
+	.misc3_laser_mode	= 0,
+	.misc3_sgmii		= 1,
+};
+
+static const struct serdes_params serdes_params_10g = {
+	.tx_pll_vco_div2	= 0,
+	.tx_pll_vco_div4	= 0,
+	.rx_pll_id		= serdes_PLL_0,
+	.tx_pll_id		= serdes_PLL_0,
+
+	.tx_pll_force_kvh_bw	= 0x1,
+	.tx_pll_kvh_force	= 0,
+
+	.tx_pll_2rx_bw		= 0,
+
+	.tx_pll_fracn_sel	= 0x1,
+
+	.tx_pll_ditheren	= 0x1,
+
+	.tx_pll_fracn_div	= 0x10000,
+	.tx_pll_fracn_ndiv	= 0x0ce,
+
+	.tx_pll_mode		= 0x2,
+
+	.rx_tx_rate_ratio	= 0,
+
+	.rx_pon_mac_ctrl	= 0x3,
+	.tx_pon_mac_ctrl	= 0x3,
+	.tx_sync_e_ctrl		= 0x7,
+
+	.rx_osr_mode		= 0x0,
+	.tx_osr_mode		= 0x0,
+
+	.do_rx_pi_spacing	= false,
+	.clk90_offset		= 0,
+	.p1_offset		= 0,
+	.dsc_a_cdr_control_2	= 0x00c0, /* 0x30 in pon driver */
+
+	.do_pll_charge_pump	= false,
+	.do_pll_charge_pump_10g	= false,
+	.do_vga_rf		= false,
+	.do_sigdetect		= false,
+	.do_ae			= true,
+	.serdes_ae_full_rate	= true,
+	.serdes_ae_20b_width	= true,
+};
+
+static const struct serdes_misc3_params serdes_m3params_10g = {
+	.misc3_if_select	= 3,
+	.misc3_laser_mode	= 0,
+};
+
+/*
+ *
+ */
+static int mode_phylink_pcs_config(void *mode_priv,
+				   unsigned int pl_mode,
+				   phy_interface_t interface,
+				   const unsigned long *advertising)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	const struct serdes_params *serdes_params;
+	const struct serdes_misc3_params *serdes_m3params;
+	int val;
+
+	xlmac_disable(mode);
+	xport_switch_pinctrl(mode->port, XPORT_PIN_RS0);
+
+	mode->interface = interface;
+	mode->aneg_error_reported = false;
+
+	/*
+	 * choose correct serdes params
+	 */
+	switch (interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+		serdes_params = &serdes_params_1g;
+		serdes_m3params = &serdes_m3params_1g_1000basex;
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		serdes_params = &serdes_params_1g;
+		serdes_m3params = &serdes_m3params_1g_sgmii;
+		break;
+	case PHY_INTERFACE_MODE_2500BASEX:
+		serdes_params = &serdes_params_2d5g;
+		serdes_m3params = &serdes_m3params_1g_2500basex;
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		serdes_params = &serdes_params_10g;
+		serdes_m3params = &serdes_m3params_10g;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	xport_serdes_set_params(mode->port, serdes_params, serdes_m3params);
+
+	/*
+	 * restore LBE forced value
+	 */
+	if (!mode->port->lbe_force)
+		xport_serdes_lbe_dont_force(mode->port);
+	else if (mode->port->lbe_force_value)
+		xport_serdes_lbe_force_enable(mode->port);
+	else
+		xport_serdes_lbe_force_disable(mode->port);
+
+	/* from BCM AE code */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					CL49_UserB0_Control);
+	val &= ~CL49_fast_lock_cya;
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   CL49_UserB0_Control,
+				   val, 0xffff);
+
+	/* from BCM AE code */
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST,
+					XgxsBlk10_tx_pi_control4);
+	val |= tx_pi_sm_enable_override_value;
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST,
+				   XgxsBlk10_tx_pi_control4,
+				   val, 0xffff);
+
+	switch (interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+		serdes_setup_pcs_1000basex(mode, advertising);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		serdes_setup_pcs_sgmii(mode);
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		serdes_setup_pcs_10g(mode);
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_up(void *mode_priv,
+				 unsigned int pl_mode,
+				 phy_interface_t interface,
+				 int speed, int duplex,
+				 struct phy_device *phy)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	xlmac_setup(mode, speed);
+	xlmac_enable(mode);
+}
+
+/*
+ *
+ */
+static void mode_phylink_link_down(void *mode_priv,
+				   unsigned int pl_mode,
+				   phy_interface_t interface)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	xlmac_disable(mode);
+}
+
+/*
+ *
+ */
+static void mode_phylink_pcs_an_restart(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	mode->aneg_error_reported = false;
+	serdes_pcs_restart_aneg(mode);
+}
+
+/*
+ *
+ */
+static void pcs_get_state_1000basex(struct xport_xlmac_priv *mode,
+				    struct phylink_link_state *state)
+{
+	bool link, an_complete;
+	bool tx_pause, rx_pause, aneg_failed;
+	int fd_bit;
+	int lpa, ret;
+
+	state->link = 0;
+
+	ret = xport_serdes_pcs_read_reg(mode->port, LANE_0,
+					SerdesDigital_Status1000X1);
+	if (ret < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+
+	link = (ret & Status1000X1_link);
+	if (!phylink_test(state->advertising, Autoneg)) {
+		state->link = link;
+		if (link) {
+			if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+				state->speed = SPEED_2500;
+			else
+				state->speed = SPEED_1000;
+			state->duplex = DUPLEX_FULL;
+		}
+		return;
+	}
+
+	ret = xport_serdes_pcs_read_reg(mode->port, LANE_0, MII_BMSR);
+	if (ret < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+	an_complete = (ret & BMSR_ANEGCOMPLETE);
+
+	lpa = xport_serdes_pcs_read_reg(mode->port, LANE_0, MII_LPA);
+	if (lpa < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+
+	state->an_complete = an_complete;
+
+	if (!state->an_complete) {
+		state->link = 0;
+		return;
+	}
+
+	/* resolve autoneg result, we only support full duplex */
+	fd_bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT;
+	if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+		fd_bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT;
+	else
+		fd_bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT;
+	mii_lpa_mod_linkmode_x(state->lp_advertising, lpa, fd_bit);
+
+	/* check for autoneg failure on duplex */
+	aneg_failed = false;
+
+	if (!linkmode_test_bit(fd_bit, state->advertising) ||
+	    !linkmode_test_bit(fd_bit, state->lp_advertising))
+		aneg_failed = true;
+
+	linkmode_resolve_pause(state->advertising,
+			       state->lp_advertising,
+			       &tx_pause, &rx_pause);
+
+	if (tx_pause)
+		state->pause |= MLO_PAUSE_TX;
+	if (rx_pause)
+		state->pause |= MLO_PAUSE_RX;
+
+	if (aneg_failed) {
+		if (!mode->aneg_error_reported) {
+			netdev_err(mode->port->priv->netdev,
+				   "autoneg error (lpa 0x%x)", lpa);
+			mode->aneg_error_reported = true;
+		}
+		return;
+	}
+
+	state->link = link;
+	if (link) {
+		if (state->interface == PHY_INTERFACE_MODE_2500BASEX)
+			state->speed = SPEED_2500;
+		else
+			state->speed = SPEED_1000;
+		state->duplex = DUPLEX_FULL;
+	}
+}
+
+/*
+ *
+ */
+static void pcs_get_state_sgmii(struct xport_xlmac_priv *mode,
+				struct phylink_link_state *state)
+{
+	bool an_complete;
+	int lpa, ret, val;
+
+	state->link = 0;
+
+	ret = xport_serdes_pcs_read_reg(mode->port, LANE_0, MII_BMSR);
+	if (ret < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+	an_complete = (ret & BMSR_ANEGCOMPLETE);
+
+	lpa = xport_serdes_pcs_read_reg(mode->port, LANE_0, MII_LPA);
+	if (lpa < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+
+	if (!(lpa & LPA_SGMII)) {
+		/* remote is not an SGMII PHY */
+		return;
+	}
+
+	state->link = !!(lpa & LPA_SGMII_LINK);
+	if (!state->link) {
+		 mode->sgmii_config_set = false;
+		return;
+	}
+
+	state->duplex = (lpa & LPA_SGMII_FULL_DUPLEX) ?
+		DUPLEX_FULL : DUPLEX_HALF;
+
+	switch (lpa & LPA_SGMII_SPD_MASK) {
+	case LPA_SGMII_1000:
+		state->speed = SPEED_1000;
+		break;
+	case LPA_SGMII_100:
+		//state->speed = SPEED_100;
+
+		/* for unknown reason, only 1Gbit/s is working,
+		 * revisit later */
+		state->link = 0;
+		break;
+	default:
+		/* other speed not supported */
+		state->link = 0;
+		return;
+	}
+
+	if (mode->sgmii_config_set)
+		return;
+
+	switch (state->speed) {
+	case SPEED_100:
+		xport_serdes_update_m3_params(mode->port,
+					      &serdes_m3params_100m_sgmii);
+		break;
+	case SPEED_1000:
+		xport_serdes_update_m3_params(mode->port,
+					      &serdes_m3params_1g_sgmii);
+		break;
+	}
+	val = xport_serdes_pcs_read_reg(mode->port, LANE_BRDCST, MII_BMCR);
+	val &= ~(BMCR_SPEED1000 | BMCR_SPEED10);
+	val |= BMCR_FULLDPLX;
+
+	switch (state->speed) {
+	case SPEED_100:
+		val |= BMCR_SPEED100;
+		break;
+	case SPEED_1000:
+		val |= BMCR_SPEED1000;
+		break;
+	}
+
+	xport_serdes_pcs_write_reg(mode->port, LANE_BRDCST, MII_BMCR,
+				   val, 0xffff);
+	mode->sgmii_config_set = true;
+}
+
+/*
+ *
+ */
+static void pcs_get_state_10gbaser(struct xport_xlmac_priv *mode,
+				   struct phylink_link_state *state)
+{
+	int ret;
+
+	state->link = 0;
+	ret = xport_serdes_pcs_read_reg(mode->port, LANE_0,
+					XGXSBLK4_xgxsStatus1);
+	if (ret < 0) {
+		netdev_err(mode->port->priv->netdev,
+			   "failed to read phy status\n");
+		return;
+	}
+
+	state->link = !!(ret & XgxsStatus1_LinkStat);
+	if (state->link) {
+		state->speed = SPEED_10000;
+		state->duplex = DUPLEX_FULL;
+	}
+}
+
+/*
+ *
+ */
+static int mode_phylink_pcs_get_state(void *mode_priv,
+				      struct phylink_link_state *state)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	switch (state->interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+		pcs_get_state_1000basex(mode, state);
+		break;
+	case PHY_INTERFACE_MODE_SGMII:
+		pcs_get_state_sgmii(mode, state);
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		pcs_get_state_10gbaser(mode, state);
+		break;
+	default:
+		WARN(1, "invalid interface");
+		break;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static u32 mode_get_priv_flags(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	bool sync_ok_1g, sync_ok_10g;
+	u32 val;
+
+	val = (mode->port->lbe_force ? 1 : 0) <<
+		PRIV_FLAGS_FORCE_LBE_OE_BIT;
+	val |= (mode->port->lbe_force_value ? 1 : 0) <<
+		PRIV_FLAGS_FORCE_LBE_OE_VAL_BIT;
+
+	serdes_pcs_get_sync_ok(mode, &sync_ok_1g, &sync_ok_10g);
+
+	switch (mode->interface) {
+	case PHY_INTERFACE_MODE_1000BASEX:
+	case PHY_INTERFACE_MODE_2500BASEX:
+	case PHY_INTERFACE_MODE_SGMII:
+		if (sync_ok_1g)
+			val |= 1 << PRIV_FLAGS_ST_SYNC_OK_BIT;
+		break;
+	case PHY_INTERFACE_MODE_10GBASER:
+		if (sync_ok_10g)
+			val |= 1 << PRIV_FLAGS_ST_SYNC_OK_BIT;
+		break;
+	default:
+		break;
+	}
+
+	return val;
+}
+
+/*
+ *
+ */
+static int mode_set_priv_flags(void *mode_priv, u32 flags)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	if (!(flags & (1 << PRIV_FLAGS_FORCE_LBE_OE_BIT))) {
+		mode->port->lbe_force = false;
+		xport_serdes_lbe_dont_force(mode->port);
+	} else {
+		mode->port->lbe_force = true;
+		if (flags & (1 << PRIV_FLAGS_FORCE_LBE_OE_VAL_BIT)) {
+			mode->port->lbe_force_value = true;
+			xport_serdes_lbe_force_enable(mode->port);
+		} else {
+			mode->port->lbe_force_value = false;
+			xport_serdes_lbe_force_disable(mode->port);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * called when netdevice is stopped or after phylink interface change
+ */
+static void mode_xlmac_stop(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+
+	bcm_runner_fw_tx_stop_wait(mode->port->priv);
+}
+
+/*
+ * called when netdevice is started or after phylink interface change
+ */
+static void *mode_xlmac_init(void *port_priv,
+			     const struct bcm_xrdp_enet_params *params)
+{
+	struct xport_priv *port = port_priv;
+	struct xport_xlmac_priv *mode;
+
+	mode = kzalloc(sizeof (*mode), GFP_KERNEL);
+	if (!mode)
+		return ERR_PTR(-ENOMEM);
+
+	mode->port = port;
+
+	xlif_init(mode);
+	xlmac_reset(mode);
+
+	return mode;
+}
+
+/*
+ * called when netdevice is stopped or after phylink interface change
+ */
+static void mode_xlmac_release(void *mode_priv)
+{
+	struct xport_xlmac_priv *mode = mode_priv;
+	kfree(mode);
+}
+
+/*
+ *
+ */
+static u32 mode_get_bbh_id(void *port_priv)
+{
+	struct xport_priv *port = port_priv;
+	return port->ae_bbh_id;
+}
+
+const struct bcm_enet_mode_ops xport_xlmac_mode_ops = {
+	.name			= "AE",
+
+	.init			= mode_xlmac_init,
+	.release		= mode_xlmac_release,
+
+	.stop			= mode_xlmac_stop,
+	.get_bbh_id		= mode_get_bbh_id,
+	.mtu_set		= mode_xlmac_mtu_set,
+	.stats_update		= mode_xlmac_stats_update,
+
+	/* mib operation */
+	.mib_estat		= xlmac_mib_estat,
+	.mib_estat_count	= ARRAY_SIZE(xlmac_mib_estat),
+	.mib_update		= mode_xlmac_mib_update,
+	.mib_get_data		= mode_xlmac_mib_get_data,
+
+	/* get/set private on netdevice */
+	.get_priv_flags		= mode_get_priv_flags,
+	.set_priv_flags		= mode_set_priv_flags,
+
+	/*
+	 * phylink callback
+	 */
+	.phylink_link_down	= mode_phylink_link_down,
+	.phylink_link_up	= mode_phylink_link_up,
+	.phylink_pcs_config	= mode_phylink_pcs_config,
+	.phylink_pcs_get_state	= mode_phylink_pcs_get_state,
+	.phylink_pcs_an_restart	= mode_phylink_pcs_an_restart,
+};
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_xlmac.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_xlmac.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/port_xport_xlmac.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/port_xport_xlmac.h	2025-09-25 17:40:33.555357249 +0200
@@ -0,0 +1,255 @@
+#ifndef PORT_XPORT_XLMAC_H_
+#define PORT_XPORT_XLMAC_H_
+
+#include "port_xport.h"
+
+#include "regs/xport_xlmac_core.h"
+#include "regs/xport_xlmac_reg.h"
+#include "regs/xport_mib_core.h"
+#include "regs/xport_reg.h"
+#include "regs/xport_mib_reg.h"
+#include "regs/xport_mab.h"
+#include "regs/xlif.h"
+
+struct xlmac_mib {
+	u64		rx_64;
+	u64		rx_127;
+	u64		rx_255;
+	u64		rx_511;
+	u64		rx_1023;
+	u64		rx_1518;
+	u64		rx_1522;
+	u64		rx_2047;
+	u64		rx_4095;
+	u64		rx_9216;
+	u64		rx_16383;
+	u64		rx_pkt;
+	u64		rx_uca;
+	u64		rx_mca;
+	u64		rx_bca;
+	u64		rx_fcs;
+	u64		rx_cf;
+	u64		rx_pf;
+	u64		rx_pp;
+	u64		rx_uo;
+	u64		rx_uda;
+	u64		rx_wsa;
+	u64		rx_aln;
+	u64		rx_flr;
+	u64		rx_frerr;
+	u64		rx_fcr;
+	u64		rx_ovr;
+	u64		rx_jbr;
+	u64		rx_mtue;
+	u64		rx_mcrc;
+	u64		rx_prm;
+	u64		rx_vln;
+	u64		rx_dvln;
+	u64		rx_trfu;
+	u64		rx_pok;
+	u64		rx_pfcoff0;
+	u64		rx_pfcoff1;
+	u64		rx_pfcoff2;
+	u64		rx_pfcoff3;
+	u64		rx_pfcoff4;
+	u64		rx_pfcoff5;
+	u64		rx_pfcoff6;
+	u64		rx_pfcoff7;
+	u64		rx_pfcp0;
+	u64		rx_pfcp1;
+	u64		rx_pfcp2;
+	u64		rx_pfcp3;
+	u64		rx_pfcp4;
+	u64		rx_pfcp5;
+	u64		rx_pfcp6;
+	u64		rx_pfcp7;
+	u64		rx_schcrc;
+	u64		rx_byt;
+	u64		rx_rpkt;
+	u64		rx_und;
+	u64		rx_frg;
+	u64		rx_rbyt;
+	u64		tx_64;
+	u64		tx_127;
+	u64		tx_255;
+	u64		tx_511;
+	u64		tx_1023;
+	u64		tx_1518;
+	u64		tx_1522;
+	u64		tx_2047;
+	u64		tx_4095;
+	u64		tx_9216;
+	u64		tx_16383;
+	u64		tx_pok;
+	u64		tx_pkt;
+	u64		tx_uca;
+	u64		tx_mca;
+	u64		tx_bca;
+	u64		tx_pf;
+	u64		tx_pfc;
+	u64		tx_jbr;
+	u64		tx_fcs;
+	u64		tx_cf;
+	u64		tx_ovr;
+	u64		tx_dfr;
+	u64		tx_edf;
+	u64		tx_scl;
+	u64		tx_mcl;
+	u64		tx_lcl;
+	u64		tx_xcl;
+	u64		tx_frg;
+	u64		tx_err;
+	u64		tx_vln;
+	u64		tx_dvln;
+	u64		tx_rpkt;
+	u64		tx_ufl;
+	u64		tx_pfcp0;
+	u64		tx_pfcp1;
+	u64		tx_pfcp2;
+	u64		tx_pfcp3;
+	u64		tx_pfcp4;
+	u64		tx_pfcp5;
+	u64		tx_pfcp6;
+	u64		tx_pfcp7;
+	u64		tx_ncl;
+	u64		tx_byt;
+	u64		rx_lpi;
+	u64		rx_dlpi;
+	u64		tx_lpi;
+	u64		tx_dlpi;
+	u64		rx_ptllfc;
+	u64		rx_ltllfc;
+	u64		rx_llfcfcs;
+	u64		tx_ltllfc;
+};
+
+struct xport_xlmac_priv {
+	struct xport_priv		*port;
+
+	unsigned int			xlmac_id;
+	struct xlmac_mib		mib;
+
+	phy_interface_t			interface;
+	bool				aneg_error_reported;
+
+	int				sgmii_serdes_speed;
+	bool				sgmii_config_set;
+};
+
+
+/*
+ * io accessors
+ */
+static inline u32 xlif_reg_readl(struct xport_xlmac_priv *mode,
+			  u32 offset)
+{
+	return ioread32(mode->port->regs[2] + offset);
+}
+
+static inline void xlif_reg_writel(struct xport_xlmac_priv *mode,
+				   u32 offset, u32 val)
+{
+	return iowrite32(val, mode->port->regs[2] + offset);
+}
+
+static inline u32 xport_xlmreg_reg_readl(struct xport_xlmac_priv *mode,
+					 u32 offset)
+{
+	return ioread32(mode->port->regs[1] +
+			XPORT_XLMAC_REG_OFFSET_0 + offset);
+}
+
+static inline void xport_xlmreg_reg_writel(struct xport_xlmac_priv *mode,
+					   u32 offset, u32 val)
+{
+	iowrite32(val, mode->port->regs[1] +
+		  XPORT_XLMAC_REG_OFFSET_0 + offset);
+}
+
+static inline u64 xport_xlmcore_reg_readl(struct xport_xlmac_priv *mode,
+					  u32 offset)
+{
+	u32 val32, hold;
+	u64 ret;
+
+	/* indirect access */
+	val32 = ioread32(mode->port->regs[1] +
+			 XPORT_XLMAC_CORE_OFFSET(mode->xlmac_id) + offset);
+	hold = xport_xlmreg_reg_readl(mode,
+				      XPORT_XLMAC_REG_DIR_ACC_DATA_READ_REG);
+
+	ret = (((u64)hold) << 32) | val32;
+	return ret;
+}
+
+static inline void xport_xlmcore_reg_writel(struct xport_xlmac_priv *mode,
+					    u32 offset, u64 val)
+{
+	u32 val32, hold;
+
+	val32 = val & 0xffffffff;
+	hold = val >> 32;
+
+	xport_xlmreg_reg_writel(mode,
+				XPORT_XLMAC_REG_DIR_ACC_DATA_WRITE_REG,
+				hold);
+	iowrite32(val, mode->port->regs[1] +
+		  XPORT_XLMAC_CORE_OFFSET(mode->xlmac_id) + offset);
+}
+
+static inline u32 xport_reg_readl(struct xport_xlmac_priv *mode,
+				  u32 offset)
+{
+	return ioread32(mode->port->regs[1] +
+			XPORT_REG_OFFSET_0 + offset);
+}
+
+static inline void xport_reg_writel(struct xport_xlmac_priv *mode,
+				    u32 offset, u32 val)
+{
+	iowrite32(val, mode->port->regs[1] +
+		  XPORT_REG_OFFSET_0 + offset);
+}
+
+static inline u32 xport_mib_reg_readl(struct xport_xlmac_priv *mode,
+				      u32 offset)
+{
+	return ioread32(mode->port->regs[1] +
+			XPORT_MIB_REG_OFFSET_0 + offset);
+}
+
+static inline void xport_mib_reg_writel(struct xport_xlmac_priv *mode,
+					u32 offset, u32 val)
+{
+	iowrite32(val, mode->port->regs[1] + XPORT_MIB_REG_OFFSET_0 + offset);
+}
+
+static inline u64 xport_mib_core_readl(struct xport_xlmac_priv *mode,
+				       u32 offset)
+{
+	u32 val32, hold;
+	u64 ret;
+
+	/* indirect access */
+	val32 = ioread32(mode->port->regs[1] +
+			 XPORT_MIB_CORE_OFFSET(mode->xlmac_id) + offset);
+	hold = xport_mib_reg_readl(mode,
+				   XPORT_MIB_REG_DIR_ACC_DATA_READ_REG);
+
+	ret = (((u64)hold) << 32) | val32;
+	return ret;
+}
+
+static inline u32 xport_mab_reg_readl(struct xport_xlmac_priv *mode,
+				      u32 offset)
+{
+	return ioread32(mode->port->regs[1] + XPORT_MAB_OFFSET_0 + offset);
+}
+
+static inline void xport_mab_reg_writel(struct xport_xlmac_priv *mode,
+					u32 offset, u32 val)
+{
+	iowrite32(val, mode->port->regs[1] + XPORT_MAB_OFFSET_0 + offset);
+}
+
+#endif /* PORT_XPORT_XLMAC_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,3456 @@
+#ifndef EPON_EPN_H_
+#define EPON_EPN_H_
+
+/* relative to epon */
+#define EPN_OFFSET_0			0x1000
+
+/*
+ * Register <EPN_CONTROL_0>
+ *
+ * This register controls and configures the modules in the EPON block.
+ */
+#define EPN_CONTROL_0_REG		0x0
+
+/*
+ * Enable IEEE 1588 packet timestamping, applicable only inpoint-to-point
+ * mode.
+*/
+#define  CONTROL_0_CFGEN1588TS_MASK	0x80000000
+
+/*
+ * Replaces FCS of upstream packet, resulting in no change in
+ * packet'slength.
+ * Set cfgAppendUpFcs/cfgReplaceUpFcs to 0 for pass-through.
+*/
+#define  CONTROL_0_CFGREPLACEUPFCS_MASK	0x40000000
+
+/*
+ * Appends FCS to upstream packet, resulting in increase of packet'slength
+ * +4 bytes.
+ * Set cfgAppendUpFcs/cfgReplaceUpFcs to 0 forpass-through.
+*/
+#define  CONTROL_0_CFGAPPENDUPFCS_MASK	0x20000000
+
+/*
+ * Drops Single Copy Broadcast packets that are unmapped by the LIF.
+ * 0:
+ * Ignore LLID bit 15.
+ * 1:
+ * Drop packets that have LLID bit 15 set and have LLID index bit 5set.
+*/
+#define  CONTROL_0_CFGDROPSCB_MASK	0x10000000
+
+/*
+ * Ignores the first queue set limits.
+ * This is only applied whenThreshold First Queue Service Discipline is
+ * enabled.
+ * 0:
+ * Transmit packets in the order they were last reported in thefirst queue
+ * set.
+ * 1:
+ * Transmit packets in the order they were last reported in theun-capped
+ * queue set.
+*/
+#define  CONTROL_0_MODUNCAPPEDREPORTLIMIT_MASK	0x8000000
+
+/*
+ * Enables Threshold First Queue Service Discipline.
+ * This is onlyapplied to multi-priority reporting mode.
+ * 0:
+ * Strict priority.
+ * The highest priority packet is transmitted.
+ * This mode 'pulls' late-arriving/un-reported high priority packetsahead
+ * of previously reported lower priority packets.
+ * 1:
+ * Transmit packets in the order they were last reported.
+*/
+#define  CONTROL_0_MODMPQUESETFIRST_MASK	0x4000000
+
+/*
+ * Propagates the ONU's local MPCP time by replacing the last fourbytes of
+ * a GATE frame with the MPCP time the GATE arrived at theONU.
+ * This only applies to downstream gate messages passed from theEpn to the
+ * BBH.
+ * 0:
+ * Do not propagate the local MPCP time1:
+ * Propagate the local MPCP time
+*/
+#define  CONTROL_0_PRVLOCALMPCPPROPAGATION_MASK	0x1000000
+
+/*
+ * Allows accumulator 3 to be prefetched before accumulator 0 isemptied and
+ * the accumulator shift occurs.
+ * This eliminates the racestarting when the accumulators shift and their
+ * values are reported(i.
+ * e.
+ * accumulator 3 will fully represent the current queue state).
+ * 0:
+ * Do not prefetch accumulator 31:
+ * Prefetch accumulator 3
+*/
+#define  CONTROL_0_PRVTEKMODEPREFETCH_MASK	0x800000
+
+/*
+ * Causes non-zero accumulator values to be incremented before
+ * beingreported upstream.
+ * 0:
+ * No accumulator values are incremented1:
+ * Non-zero accumulator values are incremented
+*/
+#define  CONTROL_0_PRVINCNONZEROACCUM_MASK	0x200000
+
+/*
+ * Disables FCS checking of un-mapped frames.
+ * This is intended to beused when passing unmapped frames to a UNI port.
+ * 0:
+ * All FCS errored un-mapped frames are discarded1:
+ * All un-mapped frames are passed to a UNI port
+*/
+#define  CONTROL_0_PRVNOUNMAPPPEDFCS_MASK	0x100000
+
+/*
+ * Causes discovery gates for empty queues to be discarded.
+ * 0:
+ * All discovery gates are processed1:
+ * Discovery gates for empty queues are discarded
+*/
+#define  CONTROL_0_PRVSUPRESSDISCEN_MASK	0x80000
+
+/*
+ * Overrides the value in EPON Downstream Max Size Frame register.
+ * 0:
+ * Use Downstream Max Size Frame register value1:
+ * The maximum frame size for non-VLAN frames is 1518 and themaximum size
+ * for VLAN-tagged frames is 1522
+*/
+#define  CONTROL_0_CFGVLANMAX_MASK	0x40000
+
+/*
+ * Determines which types of upstream frames are affected when
+ * forcingupstream FCS errors (as configured in EPON Force FCS Error
+ * register)0:
+ * Force FCS errors on all upstream frames.
+ * 1:
+ * Force FCS errors on user data frames only (not REPORT orprocessor
+ * frames).
+*/
+#define  CONTROL_0_FCSERRONLYDATAFR_MASK	0x20000
+
+/*
+ * Determines handling of traffic not mapped to a provisioned LLID0:
+ * Forward unmapped packets1:
+ * Drop unmapped packets
+*/
+#define  CONTROL_0_PRVDROPUNMAPPPEDLLID_MASK	0x1000
+
+/*
+ * Controls LLID mode bit suppression0:
+ * LLID mode enabled1:
+ * Suppress LLID mode by masking bit-15 of the LLID
+*/
+#define  CONTROL_0_PRVSUPPRESSLLIDMODEBIT_MASK	0x800
+
+/*
+ * Discovery gate destination address filter enable.
+ * 0:
+ * Always ignore discovery gate DA value1:
+ * Process discovery gate only if the following criteria are met.
+ * Please keep in mind the functionality provided by the legacy
+ * "DropDiscovery Gate" controls.
+ * No discovery gate will be processed if thediscovery gate's LLID index
+ * has been provisioned to "Drop DiscoveryGates".
+ * Also, only the first 8 LLID index values are eligible fordiscovery gate
+ * processing.
+ * Discovery gates with any other LLID indexvalues received from the LIF
+ * will not be processed.
+ * When this feature is disabled the discovery DA is ignored.
+ * When thisfeature is enabled there are four possible scenarios:
+ * (1) Received broadcast LLID (0x7FFF) and a unicast (not broadcastand not
+ * multicast) DA.
+ * Discovery gate is processed if one of the provisioned ONT addressesmust
+ * match the discovery gate's DA.
+ * (2) Received broadcast LLID (0x7FFF) and a broadcast DA.
+ * Discovery gate is processed.
+ * i.
+ * e.
+ * , the discovery gate's DA isignored.
+ * (3) Received a non-broadcast LLID and a unicast DA.
+ * Discovery gate is processed if the discovery gate's LLID
+ * indexprovisioned ONT address matches its DA.
+ * (4) Received a non-broadcast LLID and a broadcast DA.
+ * Discovery gate is processed.
+ * i.
+ * e.
+ * , the discovery gate's DA isignored.
+ * Please remember the "Drop Discovery Gate" control takes precedenceover
+ * all other configuration options
+*/
+#define  CONTROL_0_MODDISCOVERYDAFILTEREN_MASK	0x400
+
+/*
+ * Selects the number of Queue Sets generated for all LLID Indexes0:
+ * Dual queue set (default)1:
+ * Multi queue set ('Teknovus-style')Others:
+ * Reserved
+*/
+#define  CONTROL_0_RPTSELECT_SHIFT	8
+#define  CONTROL_0_RPTSELECT_MASK	0x300
+
+/*
+ * Disables Shaped Virtual Accumulator backpressure of BBH queue
+ * statusinterface.
+ * 0:
+ * SVA normal operation.
+ * Allows shapers to backpressure the BBHqueue status interface.
+ * 1:
+ * Disables Shaped Virtual Accumulator backpressure of BBH queuestatus
+ * interface.
+ * .
+*/
+#define  CONTROL_0_PRVDISABLESVAQUESTATUSBP_MASK	0x80
+
+/*
+ * Places the upstream transmitter (UTX) in loopback mode.
+ * 0:
+ * UTX normal operation1:
+ * UTX is in loopback mode.
+ * This setting is also used for Point toPoint mode (in conjunction with
+ * settings in the LIF Controlregister).
+*/
+#define  CONTROL_0_UTXLOOPBACK_MASK	0x40
+
+/*
+ * UTX Enable bit.
+ * 0:
+ * Disable the UTX block1:
+ * Enable UTX operation
+*/
+#define  CONTROL_0_UTXEN_MASK		0x20
+
+/*
+ * Reset the EPN upstream transmitter (UTX) logic.
+ * Asserting (activelow) this bit resets all UTX state machines and
+ * pointers.
+ * Note thatthis does not reset the UTX configuration registers.
+ * 0:
+ * Hold the UTX in reset1:
+ * Normal UTX operation
+*/
+#define  CONTROL_0_UTXRST_PRE_N_MASK	0x10
+
+/*
+ * Prevents any downstream traffic from being sent to the BBH.
+ * Thiscontrol is only applied between downstream packets.
+ * So, it can betoggled any time the drxEn bit is set.
+ * 0:
+ * Normal operation.
+ * 1:
+ * No data is sent to BBH.
+*/
+#define  CONTROL_0_CFGDISABLEDNS_MASK	0x8
+
+/*
+ * Places the downstream receiver (DRX) in loopback mode.
+ * The loopbackmode disables the EPON processing in EPN's downstream data
+ * path.
+ * Itdoes not 'loopback' any data.
+ * 0:
+ * DRX normal operation1:
+ * DRX is in loopback mode
+*/
+#define  CONTROL_0_DRXLOOPBACK_MASK	0x4
+
+/*
+ * DRX Enable bit.
+ * 0:
+ * Disable the DRX block1:
+ * Enable DRX operation
+*/
+#define  CONTROL_0_DRXEN_MASK		0x2
+
+/*
+ * Reset the EPN downstream receiver (DRX) logic.
+ * Asserting (activelow) this bit resets all DRX state machines and
+ * pointers.
+ * Note thatthis does not reset the DRX configuration registers.
+ * 0:
+ * Hold the DRX in reset1:
+ * Normal DRX operation
+*/
+#define  CONTROL_0_DRXRST_PRE_N_MASK	0x1
+
+
+/*
+ * Register <EPN_CONTROL_1>
+ *
+ * This register controls and configures the modules in the EPN block.
+ */
+#define EPN_CONTROL_1_REG		0x4
+
+/*
+ * When this bit is set, the EPON MAC will tag completely tardyupstream
+ * packets as an idle packet by replacingthe SOF transfer type with 5'd23.
+ * The XIF will detect the idlepacket SOF tag and replace the entire packet
+ * with idles.
+ * In the case of 1G the 5'd23 SOF is converted to 3'd6 to get past
+ * theTimeStamp logic and then converted to 3'd5 so the LIFcan detect the
+ * idle packet SOF tag and replace the entire packetwith idles.
+ * Partially tardy packets are still faked and their FCS valuesreplaced
+ * with zeros.
+ * Do not enable this feature for AE or P2Poperating modes.
+ * Default value is 0
+*/
+#define  CONTROL_1_CFGIDLEPACKETTXENABLE_MASK	0x800000
+
+/*
+ * When this bit is set, the EPON MAC will not dither the MPCPcorrection
+ * values sent to the LIF.
+ * Default value is 0
+*/
+#define  CONTROL_1_CFGDISABLEMPCPCORRECTIONDITHERING_MASK	0x400000
+
+/*
+ * Enables adjustment of "destructively overlapped" grants.
+ * Destructively overlapped grants are grants that overlap by more thanthe
+ * provisioned grant overhead (Lon + Loff + Sync time).
+ * 0:
+ * Disable adjustment of destructively overlapped grants.
+ * Destuctively overlapped grants are dropped and the
+ * GrantMisaligninterrupt sets.
+ * 1:
+ * Destructively overlapped grants are adjusted to limit overlap tothe
+ * grant overhead value (which maximizes the useful length of theearlier
+ * grant).
+*/
+#define  CONTROL_1_PRVOVERLAPPEDGNTENABLE_MASK	0x200000
+
+/* Reset the grant misalignment hardware */
+#define  CONTROL_1_RSTMISALIGNTHR_MASK	0x100000
+
+/*
+ * When this bit is set, the EPON MAC will check the grant FIFOs forstale
+ * grants, and delete them.
+ * A stale grant has a grant start timethat is smaller (earlier) than the
+ * local ONU time.
+ * Default value is1
+*/
+#define  CONTROL_1_CFGSTALEGNTCHK_MASK	0x40000
+
+/*
+ * Global upstream FEC enable.
+ * When this bit is set, the EPON MAC willtake into account FEC overhead
+ * when generating report frames andfilling grants.
+ * Please note that the per-LLID index bit must also beset.
+*/
+#define  CONTROL_1_FECRPTEN_MASK	0x20000
+
+/*
+ * Enables an alternate scheme in the L1-to-L2 strict-priorityscheduler.
+ * This alternate scheme is useful only when bothcfgSharedBurstCap and
+ * cfgSharedL2 are set ("TK3715 CTCcompatibility" mode).
+ * Note:
+ * Support for this bit begins in Revision B0.
+ * 0:
+ * Default scheme.
+ * 1:
+ * Alternate mode.
+ * Use only in multi-priority mode whencfgSharedBurstCap is set.
+*/
+#define  CONTROL_1_CFGL1L2TRUESTRICT_MASK	0x80
+
+/*
+ * Sets the number of priorities for multi-priority mode.
+ * The 24available L2s queues are sequentially mapped to the
+ * prioritieswithin each LLID index.
+ * For example in mode "01":
+ * L2s queue 0 ismapped to LLID index 0 priority 0; L2s queue 1 is mapped
+ * to LLIDindex 0 priority 1; L2s queue 2 is mapped to LLID index 0
+ * priority2; L2s queue 3 is mapped to LLID index 1 priority 0; and so on.
+ * 00:
+ * Multi-priority mode disabled.
+ * 01:
+ * Eight LLID indexes with 3 priorities each.
+ * 10:
+ * Six LLID indexes with 4 priorities each.
+ * 11:
+ * Three LLID indexes with 8-priorities each.
+*/
+#define  CONTROL_1_CFGCTCRPT_SHIFT	5
+#define  CONTROL_1_CFGCTCRPT_MASK	0x60
+
+/*
+ * Disables incremental (+1, -1) correction of local downstream MPCPtime.
+ * When set, MPCP time is updated only when the differencebetween the local
+ * MPCP time and the timestamp received in an MPCPDUis different by greater
+ * than the EPN Time Stamp Differentialregister value.
+*/
+#define  CONTROL_1_CFGTSCORRDIS_MASK	0x10
+
+/* When set the ONU will ignore the force report bit on discoveryframes. */
+#define  CONTROL_1_CFGNODISCRPT_MASK	0x8
+
+/*
+ * When this bit is set, the start time offset for a discovery responsewill
+ * be equal to the discovery seed.
+ * Units of the offset are 16-bittimes.
+*/
+#define  CONTROL_1_DISABLEDISCSCALE_MASK	0x4
+
+/* All statistics RAM reads will clear the read location. */
+#define  CONTROL_1_CLRONRD_MASK		0x2
+
+
+/*
+ * Register <EPN_ENABLE_GRANTS>
+ *
+ * This register allows per-LLID control over whether the EPON MAC
+ * acceptsgrants from the OLT.
+ */
+#define EPN_ENABLE_GRANTS_REG		0x8
+
+/*
+ * Enable Grants on LLID Index 0.
+ * Reset default is 1.
+*/
+#define  ENABLE_GRANTS_ENGNTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DROP_DISC_GATES>
+ *
+ * This register allows per-LLID control over whether the EPON MACprocesses
+ * Discovery Gates.
+ */
+#define EPN_DROP_DISC_GATES_REG		0xc
+
+/* Discard Discovery Gates on LLID Index 0. */
+#define  DROP_DISC_GATES_SINKDISCGATESx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DIS_FCS_CHK>
+ *
+ * This register allows per-LLID control over Ethernet frame CRC checkingin
+ * the EPON downstream block.
+ */
+#define EPN_DIS_FCS_CHK_REG		0x10
+
+/* Do not check FCS on downstream frames received on LLID index 0 */
+#define  DIS_FCS_CHK_DISABLEFCSCHKx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_PASS_GATES>
+ *
+ * This register allows per-LLID control over whether the EPON MAC
+ * passesdownstream gate frames to the BBH.
+ */
+#define EPN_PASS_GATES_REG		0x14
+
+/* If set, downstream gate frames will be passed to the BBH for LLID 0 */
+#define  PASS_GATES_PASSGATELLIDx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_CFG_MISALGN_FB>
+ *
+ * This register allows per-LLID control over grant misalignment
+ * checkingand feedback.
+ */
+#define EPN_CFG_MISALGN_FB_REG		0x18
+
+/*
+ * 0:
+ * Ignore misalignment condition1:
+ * Enable grant misalignment detection on LLID Index 0.
+ * Whendetected, EPON MAC will temporarily report empty queue status(REPORT
+ * frame) on the LLID Index.
+*/
+#define  CFG_MISALGN_FB_CFGMISALIGNFEEDBACKx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DISCOVERY_FILTER>
+ *
+ * The 10G Discovery Information field is 16 bits right after the Synctime
+ * in the Disc Gate.
+ * The 10G discovery gate filter consistes of twobit-fields:
+ * a 16-bit Disc Info Value and 16-bit Disc Info Mask.
+ * For allbits in Disc Info Value whose corresponding Mask bits are clear
+ * (notmasked), the bits in Disc Info Value must exactly match
+ * thecorresponding bits in the GATE Discovery Information field.
+ * If theydon't match, drop the GATE (nothing is added into the Grant
+ * FIFO).
+ * Ifthey match, the Start Time and Length are added to the Grant FIFO.
+ */
+#define EPN_DISCOVERY_FILTER_REG	0x1c
+
+/*
+ * Any mask bit that is set will exclude its corresponding bit from
+ * theabove comparison (i.
+ * e.
+ * set mask bits are considered "don't carebits).
+*/
+#define  DISCOVERY_FILTER_PRVDISCINFOMASK_SHIFT	16
+#define  DISCOVERY_FILTER_PRVDISCINFOMASK_MASK	0xffff0000
+
+/* The value to match */
+#define  DISCOVERY_FILTER_PRVDISCINFOVALUE_SHIFT	0
+#define  DISCOVERY_FILTER_PRVDISCINFOVALUE_MASK	0xffff
+
+
+/*
+ * Register <EPN_MINIMUM_GRANT_SETUP>
+ *
+ * The EPN requires a minimum amount of time to process each grant.
+ * Thisprocessing time includes the time for the BBH to fetch a packet
+ * andsetup time for theupstream data to be processed by the LIF/XIF.
+ * It is possible forbackpressure generated by the BBH and LIF/XIF to stall
+ * the grantprocessing beyond the time required to process the grant.
+ * Any grants that are not processed this many TimeQuanta before it
+ * GrantStart Time will be aborted and a grant miss-abort interrupt will
+ * begenerated.
+ */
+#define EPN_MINIMUM_GRANT_SETUP_REG	0x20
+
+/*
+ * Minimum amount of grant processing time required to guarantee
+ * theupstream data will be transmitted.
+ * The units are EPON TimeQuanta(16 nS).
+*/
+#define  MINIMUM_GRANT_SETUP_CFGMINGRANTSETUP_SHIFT	0
+#define  MINIMUM_GRANT_SETUP_CFGMINGRANTSETUP_MASK	0xffff
+
+
+/*
+ * Register <EPN_RESET_GNT_FIFO>
+ *
+ * This register allows resetting of the Grant FIFOs on a per-LLID basis.
+ */
+#define EPN_RESET_GNT_FIFO_REG		0x24
+
+/* Resets the read and write pointers for grant FIFO 0. */
+#define  RESET_GNT_FIFO_RSTGNTFIFOx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_RESET_L1_ACCUMULATOR>
+ *
+ * This register allows resetting of the L1 accumulators.
+ */
+#define EPN_RESET_L1_ACCUMULATOR_REG	0x28
+
+/* Set the respective bit(s) to reset L1 accumulator(s). */
+#define  RESET_L1_ACCUMULATOR_CFGL1SCLRACUM_SHIFT	0
+#define  RESET_L1_ACCUMULATOR_CFGL1SCLRACUM_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_L1_ACCUMULATOR_SEL>
+ *
+ * This register selects which virtual accumulator sizes are reported.
+ */
+#define EPN_L1_ACCUMULATOR_SEL_REG	0x2c
+
+/* Selects which L1S Un-shaped Virtual Accumulator size will bereported. */
+#define  L1_ACCUMULATOR_SEL_CFGL1SUVASIZESEL_SHIFT	5
+#define  L1_ACCUMULATOR_SEL_CFGL1SUVASIZESEL_MASK	0x3e0
+
+/* Selects which L1S Shaped Virtual Accumulator size will be reported. */
+#define  L1_ACCUMULATOR_SEL_CFGL1SSVASIZESEL_SHIFT	0
+#define  L1_ACCUMULATOR_SEL_CFGL1SSVASIZESEL_MASK	0x1f
+
+
+/*
+ * Register <EPN_L1_SVA_BYTES> - read-only
+ *
+ * Signed number of bytes in the selected L1S Shaped Virtual Accumulator
+ */
+#define EPN_L1_SVA_BYTES_REG		0x30
+
+/*
+ * Signed number of bytes in the selected L1S Shaped VirtualAccumulator.
+ * Bit-29 is the sign bit.
+ * A negative number indicates the Runner/BBHcreated a rounding errorBit-28
+ * can be considered an overflow indication.
+ * Bits 27-0 are the actual number of bytes.
+*/
+#define  L1_SVA_BYTES_L1SSVASIZE_SHIFT	0
+#define  L1_SVA_BYTES_L1SSVASIZE_MASK	0x3fffffff
+
+
+/*
+ * Register <EPN_L1_UVA_BYTES> - read-only
+ *
+ * Signed number of bytes in the selected L1S Un-shaped VirtualAccumulator
+ */
+#define EPN_L1_UVA_BYTES_REG		0x34
+
+/*
+ * Signed number of bytes in the selected L1S Un-shaped VirtualAccumulator.
+ * Bit-29 is the sign bit.
+ * A negative number indicates the Runner/BBHcreated a rounding errorBit-28
+ * can be considered an overflow indication.
+ * Bits 27-0 are the actual number of bytes.
+*/
+#define  L1_UVA_BYTES_L1SUVASIZE_SHIFT	0
+#define  L1_UVA_BYTES_L1SUVASIZE_MASK	0x3fffffff
+
+
+/*
+ * Register <EPN_L1_SVA_OVERFLOW> - read-only
+ *
+ * Indicates which SVAs have overflowed
+ */
+#define EPN_L1_SVA_OVERFLOW_REG		0x38
+
+/*
+ * Indicates which SVAs have overflowed.
+ * The overflow can only becorrected by reset.
+*/
+#define  L1_SVA_OVERFLOW_L1SSVAOVERFLOW_SHIFT	0
+#define  L1_SVA_OVERFLOW_L1SSVAOVERFLOW_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_L1_UVA_OVERFLOW> - read-only
+ *
+ * Indicates which UVAs have overflowed
+ */
+#define EPN_L1_UVA_OVERFLOW_REG		0x3c
+
+/*
+ * Indicates which UVAs have overflowed.
+ * The overflow can only becorrected by reset.
+*/
+#define  L1_UVA_OVERFLOW_L1SUVAOVERFLOW_SHIFT	0
+#define  L1_UVA_OVERFLOW_L1SUVAOVERFLOW_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_RESET_RPT_PRI>
+ *
+ * This register allows real-time forcing the per-priority report valuesto
+ * zero.
+ * This applies only to multi-priority reporting modes (CTC,NTT).
+ * Note:
+ * These bits are used for debug only.
+ */
+#define EPN_RESET_RPT_PRI_REG		0x40
+
+/* Force priority 0 report values to zero. */
+#define  RESET_RPT_PRI_NULLRPTPRIx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_RESET_L2_RPT_FIFO>
+ *
+ * This register allows resetting of the L2 Report FIFO pointers.
+ * Thecorresponding L2 accumulators are also cleared.
+ */
+#define EPN_RESET_L2_RPT_FIFO_REG	0x44
+
+/* Set the respective bit(s) to reset L2 FIFO(s). */
+#define  RESET_L2_RPT_FIFO_CFGL2SCLRQUE_SHIFT	0
+#define  RESET_L2_RPT_FIFO_CFGL2SCLRQUE_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_ENABLE_UPSTREAM>
+ *
+ * This register allows per-LLID enabling of upstream traffic.
+ * Disablingthe upstream on a particular LLID Index means that:
+ * 1.
+ * REPORT frames sent upstream on the LLID Index will report NOdata2.
+ * Grants on the LLID Index will be acted upon (the laser willturn on and
+ * any requested REPORT frame will be sent), but no userframes will be
+ * pulled from FIF and sent upstream.
+ */
+#define EPN_ENABLE_UPSTREAM_REG		0x48
+
+/* Set the respective bit(s) to enable the upstream LLID(s). */
+#define  ENABLE_UPSTREAM_CFGENABLEUPSTREAMREG_SHIFT	0
+#define  ENABLE_UPSTREAM_CFGENABLEUPSTREAMREG_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_ENABLE_UPSTREAM_FB> - read-only
+ *
+ * Feedback register to indicate pending/complete changes in the EPNEnable
+ * Upstream register.
+ * A non-zero result from a bitwise XOR betweenthis register and EPN Enable
+ * Upstream indicates that a new valuewritten to EPN Enable Upstream has
+ * not yet taken effect.
+ */
+#define EPN_ENABLE_UPSTREAM_FB_REG	0x4c
+
+/*
+ * Indicates the operational state of the upstream LLIDs.
+ * SeeEPN_ENABLE_UPSTREAM register description for details.
+*/
+#define  ENABLE_UPSTREAM_FB_CFGENABLEUPSTREAMFEEDBACK_SHIFT	0
+#define  ENABLE_UPSTREAM_FB_CFGENABLEUPSTREAMFEEDBACK_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_ENABLE_UPSTREAM_FEC>
+ *
+ * Per-LLID index based upstream FEC enable.
+ * Set the bit corresponding tothe LLID index to enable FEC overhead to be
+ * added to the packet lengthadjustment.
+ * Please note that the global FEC enable in Control register1 must also be
+ * set.
+ * For 10G upstream operation, per-LLID FEC enable isnot supported; set all
+ * of these bits for FEC operation, and clear allof them for non-FEC.
+ */
+#define EPN_ENABLE_UPSTREAM_FEC_REG	0x50
+
+/* Set the respective bit(s) to enable upstream FEC for LLID(s). */
+#define  ENABLE_UPSTREAM_FEC_CFGENABLEUPSTREAMFEC_SHIFT	0
+#define  ENABLE_UPSTREAM_FEC_CFGENABLEUPSTREAMFEC_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_REPORT_BYTE_LENGTH>
+ *
+ * The number of bytes of 10G upstream payload that should be reserved fora
+ * piggy-back report.
+ * Note that if the "force report" is not set then this register is
+ * notused.
+ * Also, this value must be increased by 16-bytes for FEC-lessupstream 10G
+ * mode.
+ * The extra bytes are required to compensate for the10G upstream
+ * "scrambler sync pattern".
+ */
+#define EPN_REPORT_BYTE_LENGTH_REG	0x54
+
+/* Number of bytes reserved for upstream report. */
+#define  REPORT_BYTE_LENGTH_PRVRPTBYTELEN_SHIFT	0
+#define  REPORT_BYTE_LENGTH_PRVRPTBYTELEN_MASK	0xff
+
+
+/*
+ * Register <EPN_MAIN_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ * The Data Port Busy Interrupt indicates whether or not the data port
+ * isfree to do another RAM access.
+ * The Grant Full interrupt indicates that a grant was aborted due to
+ * itsgrant FIFO being full.
+ * The Missed Grant interrupt indicates that a grant missed its time
+ * totransmit and was aborted.
+ * This occurs from the MPCP time havingexceeded the start time when the
+ * grant is to be scheduled.
+ * (Note thatgrant start time is adjusted by the Grant Start Time Delta
+ * register forthis calculation)The Grant Interval interrupt indicates that
+ * an LLID is not receivinggates quickly enough.
+ * If the amount of time elapsed since receiving aGate frame exceeds a
+ * programmed value the interrupt will be asserted.
+ * See the EPON LLID Grant Interval register.
+ * The Discovery Gate interrupt indicates that a discovery gate
+ * wasreceived.
+ * The Local Time Not Synced interrupt is used to indicate that the
+ * ONU'slocal time is out of sync with EPON time.
+ * The time a MPCPDU framesarrives is compared with the value of its time
+ * stamp.
+ * If thisdifference is greater than value specified by the Time
+ * StampDifferential register the interrupt will assert.
+ * The Local Time Synced interrupt is used to indicate that the ONU'slocal
+ * time is in sync with the OLT EPON time.
+ * The time a MPCPDU framesarrives is compared with the value of its time
+ * stamp.
+ * If thisdifference is less than or equal to the value specified by the
+ * TimeStamp Differential register the interrupt will assert.
+ */
+#define EPN_MAIN_INT_STATUS_REG		0x58
+
+/*
+ * Indicates the Runner/BBH aborted an upstream frame transfer.
+ * Pleasereference the Runner/BBH documentation for a list of events
+ * thatwill cause Runner/BBH to abort packets.
+*/
+#define  MAIN_INT_STATUS_INTBBHUPFRABORT_MASK	0x80000000
+
+/*
+ * Coalesced per-L2 burst cap overflow event indicator.
+ * This istriggered when the burst cap is dynamically resized below
+ * respectiveL2 accumulator's value.
+*/
+#define  MAIN_INT_STATUS_INTCOL2SBURSTCAPOVERFLOWPRES_MASK	0x40000000
+
+/*
+ * Coalesced Empty Report interrupt.
+ * One or more LLID indexes has atransmitted a report in which all time
+ * quanta values were zero.
+ * SeeEPON Empty Report Interrupt Status for per-LLID Index interruptbits.
+*/
+#define  MAIN_INT_STATUS_INTCOEMPTYRPT_MASK	0x20000000
+
+/*
+ * The Drx detected an error that required the frame to be aborted.
+ * Culpable errors are FCS, oversize-frame, or undersize-frame.
+ * Note:
+ * The intDrxErrorAbortMask will prevent this 'coalesced bit frombeing set.
+ * This is in contrast to the 'individual' bits (0x41b)still being set even
+ * if the interrupt is masked.
+*/
+#define  MAIN_INT_STATUS_INTCODRXERRABORTPRES_MASK	0x10000000
+
+/*
+ * The Level 2 structure FIFO has overflowed.
+ * A frame length has beenlost.
+ * The Runner/BBH and EPN must be reset to recover from this.
+*/
+#define  MAIN_INT_STATUS_INTL2SFIFOOVERRUN_MASK	0x8000000
+
+/*
+ * Coalesced 1588 timestamp interrupt.
+ * SeeEPN_1588_TIMESTAMP_INT_STATUS for the interupts.
+*/
+#define  MAIN_INT_STATUS_INTCO1588TSINT_MASK	0x4000000
+
+/*
+ * Coalesced Report FIFO non-empty interrupt.
+ * One or more LLID indiceshas a frame length present in its report FIFO.
+ * See EPON ReportPresent Interrupt Status for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCORPTPRES_MASK	0x2000000
+
+/*
+ * Coalesced Grant Ready interrupt.
+ * One or more LLID indexes has agrant present in its Grant RAM.
+ * See EPON Grant Present InterruptStatus for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTPRES_MASK	0x1000000
+
+/*
+ * Coalesced stale grant delete interrupt.
+ * One or more LLID indexesdeleted a grant deleted from its grant RAM.
+ * See EPON Deleted StaleGrant Interrupt Status for per-LLID Index
+ * interrupt bits.
+*/
+#define  MAIN_INT_STATUS_INTCODELSTALEGNT_MASK	0x800000
+
+/*
+ * Coalesced grant non-poll interrupt.
+ * One or more LLID indexesexceeded the Non-poll grant interval.
+ * See EPON Non-Poll GrantInterrupt Status for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTNONPOLL_MASK	0x400000
+
+/*
+ * Coalesced grant misalign interrupt.
+ * One or more LLID indexesreceived a grant that was not aligned on frame
+ * boundaries.
+ * See EPONGrant Misalign Interrupt Status for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTMISALIGN_MASK	0x200000
+
+/*
+ * Coalesced grant too far abort interrupt.
+ * One or more LLID indexesreceived a grant for greater than 34 seconds
+ * into the future.
+ * SeeEPON Grant Too Far Interrupt Status for per-LLID Index interruptbits.
+ * Note:
+ * This interrupt can set during registration of the first LLIDIndex
+ * (before the local MPCP clock is synchronized to the OLT).
+ * Firmware should check and clear this interrupt while registering
+ * thefirst link.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTTOOFAR_MASK	0x100000
+
+/*
+ * Coalesced grant interval interrupt.
+ * One or more LLID indexes is notreceiving gates fast enough.
+ * See EPON Grant Interval InterruptStatus for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTINTERVAL_MASK	0x80000
+
+/*
+ * Coalesced Discovery Gate received interrupt.
+ * One or more LLIDindexes received a Discovery Gate.
+ * See EPON Discovery Gate InterruptStatus for per-LLID Index interrupt
+ * bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTDISCOVERY_MASK	0x40000
+
+/*
+ * Coalesced grant miss abort interrupt.
+ * One or more LLID indexesaborted a grant because it missed its slot time
+ * to transmit.
+ * SeeEPON Grant Miss Interrupt Status for per-LLID Index interrupt bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTMISSABORT_MASK	0x20000
+
+/*
+ * Coalesced grant full abort interrupt.
+ * One or more LLID indexesaborted a grant due to its grant FIFO being
+ * full.
+ * See EPON GrantFull Interrupt Status for per-LLID Index interrupt bits.
+*/
+#define  MAIN_INT_STATUS_INTCOGNTFULLABORT_MASK	0x10000
+
+/*
+ * [FATAL] The EPN received an upstream frame whose length did notmatch the
+ * expected frame length.
+ * This is a fatal event.
+ * The entiredata path must be reset to recover from this event.
+*/
+#define  MAIN_INT_STATUS_INTBADUPFRLEN_MASK	0x8000
+
+/*
+ * The Runner/BBH upstream data path failed to deliver upstream data intime
+ * to meet the upPacketTxMargin requirement.
+*/
+#define  MAIN_INT_STATUS_INTUPTARDYPACKET_MASK	0x4000
+
+/* Report frame has been transmitted by EPON MAC. */
+#define  MAIN_INT_STATUS_INTUPRPTFRXMT_MASK	0x2000
+
+/*
+ * [FATAL] The burst information FIFO over ran.
+ * This is a fatal eventand requires the entire device to be reset and
+ * re-initialized.
+*/
+#define  MAIN_INT_STATUS_INTBIFIFOOVERRUN_MASK	0x1000
+
+/*
+ * A grant passed to the Upstream transmitter has size greater thanthat
+ * defined EPON Max Grant Size register.
+*/
+#define  MAIN_INT_STATUS_INTBURSTGNTTOOBIG_MASK	0x800
+
+/*
+ * A grant written into EPON grant RAM has size greater than thatdefined
+ * EPON Max Grant Size register.
+*/
+#define  MAIN_INT_STATUS_INTWRGNTTOOBIG_MASK	0x400
+
+/*
+ * A grant received by EPON MAC has size greater than that defined inthe
+ * EPON Max Grant Size register.
+*/
+#define  MAIN_INT_STATUS_INTRCVGNTTOOBIG_MASK	0x200
+
+/*
+ * EPON block cannot accumulate statistics quickly enough to count
+ * runtframes.
+ * Bursts of frames less than 20 bytes in size will cause thisinterrupt.
+*/
+#define  MAIN_INT_STATUS_INTDNSTATSOVERRUN_MASK	0x100
+
+/* EPON block was not able to process an upstream transmission event. */
+#define  MAIN_INT_STATUS_INTUPSTATSOVERRUN_MASK	0x80
+
+/* An out of order grant was received */
+#define  MAIN_INT_STATUS_INTDNOUTOFORDER_MASK	0x40
+
+/*
+ * [FATAL] Fatal Event.
+ * The Runner/BBH upstream data path stoppeddelivering packets.
+ * All upstream traffic for all LLID indexes hasbeen halted.
+ * Any upstream grants received are terminated with emptyreports (if
+ * requested).
+ * The only way to recover from this fatalevent is to reset the entire
+ * upstream data path.
+ * Check the EPN Fatal Upstream Fault Interrupt Status register to seewhich
+ * LLID(s) experienced the fault.
+*/
+#define  MAIN_INT_STATUS_INTTRUANTBBHHALT_MASK	0x20
+
+/*
+ * Grant length is less than overhead.
+ * Possible configuration error.
+*/
+#define  MAIN_INT_STATUS_INTUPINVLDGNTLEN_MASK	0x10
+
+/*
+ * [FATAL] Coalesced per-LLID index Runner/BBH fatal upstream deliveryfault
+ * indicator.
+ * Runner/BBH has lost coherency with the EPN.
+ * Thefour trigger events are:
+ * 1.
+ * Runner/BBH aborted a packet2.
+ * Runner/BBH transferred a packet shorter than was requested3.
+ * Runner/BBH transferred a packet longer than was requested4.
+ * Runner/BBH stopped transferring packets (as indicated
+ * byintTruantBbhHalt, below)Check the EPN Fatal Upstream Fault Interrupt
+ * Status register to seewhich LLID(s) experienced the fault.
+*/
+#define  MAIN_INT_STATUS_INTCOBBHUPSFAULT_MASK	0x8
+
+/* ONU timer is in sync */
+#define  MAIN_INT_STATUS_INTDNTIMEINSYNC_MASK	0x4
+
+/* ONU timer is out of sync */
+#define  MAIN_INT_STATUS_INTDNTIMENOTINSYNC_MASK	0x2
+
+/*
+ * EPON Data Port is ready0:
+ * Data Port is busy1:
+ * Data Port is ready
+*/
+#define  MAIN_INT_STATUS_INTDPORTRDY_MASK	0x1
+
+
+/*
+ * Register <EPN_GNT_FULL_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_GNT_FULL_INT_STATUS_REG	0x5c
+
+/* LLID index 0 aborted a grant due to its grant FIFO being full. */
+#define  GNT_FULL_INT_STATUS_INTDNGNTFULLABORTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_FULL_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_FULL_INT_MASK_REG	0x60
+
+/*
+ * Mask LLID index 0 aborted a grant due to its grant FIFO being
+ * fullinterrupt.
+*/
+#define  GNT_FULL_INT_MASK_MASKINTDNGNTFULLABORTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_MISS_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_GNT_MISS_INT_STATUS_REG	0x64
+
+/* LLID index 0 aborted a grant because it missed its slot time totransmit. */
+#define  GNT_MISS_INT_STATUS_INTDNGNTMISSABORTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_MISS_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_MISS_INT_MASK_REG	0x68
+
+/*
+ * Mask LLID index 0 aborted a grant because it missed its slot time
+ * totransmit interrupt.
+*/
+#define  GNT_MISS_INT_MASK_MASKINTDNGNTMISSABORTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DISC_RX_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_DISC_RX_INT_STATUS_REG	0x6c
+
+/* LLID index 0 received a discovery gate */
+#define  DISC_RX_INT_STATUS_INTDNGNTDISCOVERYx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DISC_RX_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_DISC_RX_INT_MASK_REG	0x70
+
+/* Mask LLID index 0 received a discovery gate interrupt. */
+#define  DISC_RX_INT_MASK_MASKINTDNGNTDISCOVERYx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_INTV_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_GNT_INTV_INT_STATUS_REG	0x74
+
+/*
+ * LLID index 0 failed to receive a GATE within a time period definedby the
+ * EPN Grant Interval register.
+*/
+#define  GNT_INTV_INT_STATUS_INTDNGNTINTERVALx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_INTV_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_INTV_INT_MASK_REG	0x78
+
+/*
+ * Mask LLID index 0 failed to receive a GATE within a time perioddefined
+ * by the EPN Grant Interval register interrupt.
+*/
+#define  GNT_INTV_INT_MASK_MASKINTDNGNTINTERVALx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_FAR_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_GNT_FAR_INT_STATUS_REG	0x7c
+
+/*
+ * LLID index 0 received (and aborted) a grant with a start timegreater
+ * than 34 sec in the future
+*/
+#define  GNT_FAR_INT_STATUS_INTDNGNTTOOFARx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_FAR_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_FAR_INT_MASK_REG	0x80
+
+/*
+ * Mask LLID index 0 received (and aborted) a grant with a start
+ * timegreater than 34 sec in the future interrupt
+*/
+#define  GNT_FAR_INT_MASK_MASKDNGNTTOOFARx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_MISALGN_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_GNT_MISALGN_INT_STATUS_REG	0x84
+
+/* LLID index 0 received a misaligned grant */
+#define  GNT_MISALGN_INT_STATUS_INTDNGNTMISALIGNx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_MISALGN_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_MISALGN_INT_MASK_REG	0x88
+
+/* Mask LLID index 0 received a misaligned grant interrupt */
+#define  GNT_MISALGN_INT_MASK_MASKINTDNGNTMISALIGNx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_NP_GNT_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_NP_GNT_INT_STATUS_REG	0x8c
+
+/* Non poll grant interval exceeded on LLID Index 0 */
+#define  NP_GNT_INT_STATUS_INTDNGNTNONPOLLx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_NP_GNT_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_NP_GNT_INT_MASK_REG		0x90
+
+/* Non poll grant interval exceeded on LLID Index 0 interrupt mask */
+#define  NP_GNT_INT_MASK_MASKDNGNTNONPOLLx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DEL_STALE_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_DEL_STALE_INT_STATUS_REG	0x94
+
+/* Stale grant deleted from LLID Index 0 grant RAM. */
+#define  DEL_STALE_INT_STATUS_INTDELSTALEGNTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DEL_STALE_INT_MASK>
+ *
+ * Interrupt mask for EPN_DEL_STALE_INT_STATUS
+ */
+#define EPN_DEL_STALE_INT_MASK_REG	0x98
+
+/* Stale grant deleted from LLID Index 0 grant RAM interrupt mask. */
+#define  DEL_STALE_INT_MASK_MASKINTDELSTALEGNTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_PRES_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write a 1 to it.
+ */
+#define EPN_GNT_PRES_INT_STATUS_REG	0x9c
+
+/* Grant present in LLID Index 0 grant RAM */
+#define  GNT_PRES_INT_STATUS_INTDNGNTRDYx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GNT_PRES_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_GNT_PRES_INT_MASK_REG	0xa0
+
+/* Grant present in LLID Index 0 grant RAM interrupt mask */
+#define  GNT_PRES_INT_MASK_MASKDNGNTRDYx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_RPT_PRES_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_RPT_PRES_INT_STATUS_REG	0xa4
+
+/* Frame length present in LLID Index 0 report FIFO */
+#define  RPT_PRES_INT_STATUS_INTUPRPTFIFOx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_RPT_PRES_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module.
+ */
+#define EPN_RPT_PRES_INT_MASK_REG	0xa8
+
+/* Frame length present in LLID Index 0 report FIFO interrupt mask */
+#define  RPT_PRES_INT_MASK_MASKINTUPRPTFIFOx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DRX_ABORT_INT_STATUS>
+ *
+ * This register contains interrupt status for the Drx error abort events.
+ * These bits are sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_DRX_ABORT_INT_STATUS_REG	0xac
+
+/*
+ * The Drx detected an error that required an LLID Index 0-31
+ * (bitwise)frame be aborted in the RDP
+*/
+#define  DRX_ABORT_INT_STATUS_INTDRXERRABORT_SHIFT	0
+#define  DRX_ABORT_INT_STATUS_INTDRXERRABORT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_DRX_ABORT_INT_MASK>
+ *
+ * This register contains interrupt mask for the Drx error abort events.
+ */
+#define EPN_DRX_ABORT_INT_MASK_REG	0xb0
+
+/*
+ * Mask the Drx detected an error that required an LLID Index 0-31(bitwise)
+ * frame be aborted in the RDP interrupt.
+*/
+#define  DRX_ABORT_INT_MASK_MASKINTDRXERRABORT_SHIFT	0
+#define  DRX_ABORT_INT_MASK_MASKINTDRXERRABORT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_EMPTY_RPT_INT_STATUS>
+ *
+ * This register contains interrupt status for the EPON module's
+ * emptyreport transmission.
+ * Any time the EPON module sends a report upstreamand all the report
+ * values are zero, the bit corresponding to the LLIDindex will be set.
+ * These bits are sticky; to clear a bit, write 1 toit.
+ */
+#define EPN_EMPTY_RPT_INT_STATUS_REG	0xb4
+
+/* Time quanta values present in LLID Index 0 report were all zero. */
+#define  EMPTY_RPT_INT_STATUS_INTEMPTYRPTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_EMPTY_RPT_INT_MASK>
+ *
+ * This register contains interrupt mask for the EPON module's emptyreport
+ * transmission.
+ * Any time the EPON module sends a report upstreamand all the report
+ * values are zero, the bit corresponding to the LLIDindex will be set.
+ */
+#define EPN_EMPTY_RPT_INT_MASK_REG	0xb8
+
+/*
+ * Mask time quanta values present in LLID Index 0 report were all
+ * zerointerrupt.
+*/
+#define  EMPTY_RPT_INT_MASK_MASKINTEMPTYRPTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_BCAP_OVERFLOW_INT_STATUS>
+ *
+ * This register contains interrupt status indicating when the
+ * L2accumulators exceed their burst-cap values.
+ * These bits are sticky; toclear a bit, write 1 to it.
+ */
+#define EPN_BCAP_OVERFLOW_INT_STATUS_REG	0xbc
+
+/*
+ * Indicates that the L2 accumulator 0-31 (bitwise) has exceeded itsburst
+ * cap value.
+*/
+#define  BCAP_OVERFLOW_INT_STATUS_INTL2SBURSTCAPOVERFLOW_SHIFT	0
+#define  BCAP_OVERFLOW_INT_STATUS_INTL2SBURSTCAPOVERFLOW_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_BCAP_OVERFLOW_INT_MASK>
+ *
+ * This register contains interrupt mask indicating when the L2accumulators
+ * exceed their burst-cap values.
+ */
+#define EPN_BCAP_OVERFLOW_INT_MASK_REG	0xc0
+
+/*
+ * Mask interrupt indicating that the L2 accumulator 0-31 (bitwise)
+ * hasexceeded its burst cap value.
+*/
+#define  BCAP_OVERFLOW_INT_MASK_MASKINTL2SBURSTCAPOVERFLOW_SHIFT	0
+#define  BCAP_OVERFLOW_INT_MASK_MASKINTL2SBURSTCAPOVERFLOW_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_BBH_DNS_FAULT_INT_STATUS>
+ *
+ */
+#define EPN_BBH_DNS_FAULT_INT_STATUS_REG	0xc4
+
+/*
+ * Indicates the downstream BBH interface failed to transfer thedownstream
+ * fast enough.
+ * This occurs when the Epn dropped adownstream packet (sent abort).
+*/
+#define  BBH_DNS_FAULT_INT_STATUS_INTBBHDNSOVERFLOW_MASK	0x1
+
+
+/*
+ * Register <EPN_BBH_DNS_FAULT_INT_MASK>
+ *
+ */
+#define EPN_BBH_DNS_FAULT_INT_MASK_REG	0xc8
+
+/* Mask downstream BBH data path overflow interrupt. */
+#define  BBH_DNS_FAULT_INT_MASK_MASKINTBBHDNSOVERFLOW_MASK	0x1
+
+
+/*
+ * Register <EPN_BBH_UPS_FAULT_INT_STATUS>
+ *
+ */
+#define EPN_BBH_UPS_FAULT_INT_STATUS_REG	0xcc
+
+/*
+ * Indicates upstream LLID index 0 has lost coherency with theRunner/BBH.
+ * This condition can be recovered by resetting the datapath associated
+ * with the LLID index.
+ * Note:
+ * Do not clear these interrupts until the LLID index data pathhas been
+ * reset or the LLID index's upstream traffic has beendisabled using 'EPN
+ * Enable Upstream' register.
+*/
+#define  BBH_UPS_FAULT_INT_STATUS_INTBBHUPSFAULTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_BBH_UPS_FAULT_INT_MASK>
+ *
+ */
+#define EPN_BBH_UPS_FAULT_INT_MASK_REG	0xd0
+
+/*
+ * Mask upstream LLID index 0 has lost coherency with the
+ * Runner/BBHinterrupt.
+*/
+#define  BBH_UPS_FAULT_INT_MASK_MASKINTBBHUPSFAULTx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_BBH_UPS_ABORT_INT_STATUS>
+ *
+ */
+#define EPN_BBH_UPS_ABORT_INT_STATUS_REG	0xd4
+
+/*
+ * [FATAL] This bit indicates that BBH aborted an upstream packet at atime
+ * it was considered tardy by EPN.
+ * It is valid only if bit 31 ofregister 0x4b0 (fatalTardyBbhAbortEn) is
+ * set.
+ * 0:
+ * BBH has not aborted a tardy upstream packet.
+ * 1:
+ * BBH has aborted a tardy upstream packet.
+ * All upstream datatraffic has been disabled.
+ * EPN must be reset to recover from thiscondition.
+*/
+#define  BBH_UPS_ABORT_INT_STATUS_TARDYBBHABORT_MASK	0x1
+
+
+/*
+ * Register <EPN_BBH_UPS_ABORT_INT_MASK>
+ *
+ */
+#define EPN_BBH_UPS_ABORT_INT_MASK_REG	0xd8
+
+/*
+ * Mask BBH aborted an upstream packet at a time it was consideredtardy by
+ * EPN interrupt.
+*/
+#define  BBH_UPS_ABORT_INT_MASK_MASKTARDYBBHABORT_MASK	0x1
+
+
+/*
+ * Register <EPN_MAIN_INT_MASK>
+ *
+ */
+#define EPN_MAIN_INT_MASK_REG		0xdc
+
+#define  MAIN_INT_MASK_BBHUPFRABORTMASK_MASK	0x80000000
+#define  MAIN_INT_MASK_INTL2SBURSTCAPOVERFLOWMASK_MASK	0x40000000
+#define  MAIN_INT_MASK_INTCOEMPTYRPTMASK_MASK	0x20000000
+#define  MAIN_INT_MASK_INTDRXERRABORTMASK_MASK	0x10000000
+#define  MAIN_INT_MASK_INTL2SFIFOOVERRUNMASK_MASK	0x8000000
+#define  MAIN_INT_MASK_INTCO1588TSMASK_MASK	0x4000000
+#define  MAIN_INT_MASK_INTCORPTPRESMASK_MASK	0x2000000
+#define  MAIN_INT_MASK_INTCOGNTPRESMASK_MASK	0x1000000
+#define  MAIN_INT_MASK_INTCODELSTALEGNTMASK_MASK	0x800000
+#define  MAIN_INT_MASK_INTCOGNTNONPOLLMASK_MASK	0x400000
+#define  MAIN_INT_MASK_INTCOGNTMISALIGNMASK_MASK	0x200000
+#define  MAIN_INT_MASK_INTCOGNTTOOFARMASK_MASK	0x100000
+#define  MAIN_INT_MASK_INTCOGNTINTERVALMASK_MASK	0x80000
+#define  MAIN_INT_MASK_INTCOGNTDISCOVERYMASK_MASK	0x40000
+#define  MAIN_INT_MASK_INTCOGNTMISSABORTMASK_MASK	0x20000
+#define  MAIN_INT_MASK_INTCOGNTFULLABORTMASK_MASK	0x10000
+#define  MAIN_INT_MASK_BADUPFRLENMASK_MASK	0x8000
+#define  MAIN_INT_MASK_UPTARDYPACKETMASK_MASK	0x4000
+#define  MAIN_INT_MASK_UPRPTFRXMTMASK_MASK	0x2000
+#define  MAIN_INT_MASK_INTBIFIFOOVERRUNMASK_MASK	0x1000
+#define  MAIN_INT_MASK_BURSTGNTTOOBIGMASK_MASK	0x800
+#define  MAIN_INT_MASK_WRGNTTOOBIGMASK_MASK	0x400
+#define  MAIN_INT_MASK_RCVGNTTOOBIGMASK_MASK	0x200
+#define  MAIN_INT_MASK_DNSTATSOVERRUNMASK_MASK	0x100
+#define  MAIN_INT_MASK_INTUPSTATSOVERRUNMASK_MASK	0x80
+#define  MAIN_INT_MASK_DNOUTOFORDERMASK_MASK	0x40
+#define  MAIN_INT_MASK_TRUANTBBHHALTMASK_MASK	0x20
+#define  MAIN_INT_MASK_UPINVLDGNTLENMASK_MASK	0x10
+#define  MAIN_INT_MASK_INTCOBBHUPSFAULTMASK_MASK	0x8
+#define  MAIN_INT_MASK_DNTIMEINSYNCMASK_MASK	0x4
+#define  MAIN_INT_MASK_DNTIMENOTINSYNCMASK_MASK	0x2
+#define  MAIN_INT_MASK_DPORTRDYMASK_MASK	0x1
+
+/*
+ * Register <EPN_MAX_GNT_SIZE>
+ *
+ * The Maximum Grant Size register sets the threshold for the three
+ * GrantToo Big interrupts (in the EPN Main Interrupt Status register).
+ */
+#define EPN_MAX_GNT_SIZE_REG		0xe0
+
+/*
+ * Sets the Grant Size threshold for the three Grant Too Biginterrupts.
+ * Units are TQ.
+*/
+#define  MAX_GNT_SIZE_MAXGNTSIZE_SHIFT	0
+#define  MAX_GNT_SIZE_MAXGNTSIZE_MASK	0xffff
+
+
+/*
+ * Register <EPN_MAX_FRAME_SIZE>
+ *
+ * Provisions the maximum allowable downstream frame size.
+ * The resetdefault is 1536.
+ * This register is overridden by the cfgVlanMaxSize bit.
+ * The maximum allowable value for this register is 2000 in 1G/2G mode
+ * and10000 in 10G mode.
+ */
+#define EPN_MAX_FRAME_SIZE_REG		0xe4
+
+/*
+ * Maximum allowable downstream frame size.
+ * Frames larger than thisvalue are discarded.
+*/
+#define  MAX_FRAME_SIZE_CFGMAXFRAMESIZE_SHIFT	0
+#define  MAX_FRAME_SIZE_CFGMAXFRAMESIZE_MASK	0x3fff
+
+
+/*
+ * Register <EPN_GRANT_OVR_HD>
+ *
+ * Defines how much of the grant length is consumed by laser on time,laser
+ * off time, and idle (sync) time.
+ * This value is subtracted from thegrant length and the remainder is used
+ * to fill frames from FIF queuesinto the upstream burst.
+ * This register is used in both 1G and 10Gupstream modes.
+ * The units are in TQ.
+ * Reset default is 0.
+ * NOTE:
+ * In 10G mode the Xif requires 2 extra TimeQuanta for "Eob".
+ * Also,in FECless 10G mode the Epn's "Report Byte Length" must have an
+ * extra16-bytes added to its value to account for 10G "scrambler sync
+ * time".
+ */
+#define EPN_GRANT_OVR_HD_REG		0xe8
+
+/*
+ * 1G upstream mode -> Grant length consumed by overhead when FEC
+ * isenabled.
+ * 10G upstream mode -> Not used.
+*/
+#define  GRANT_OVR_HD_GNTOVRHDFEC_SHIFT	16
+#define  GRANT_OVR_HD_GNTOVRHDFEC_MASK	0xffff0000
+
+/*
+ * 1G upstream mode -> Grant length consumed by overhead when FEC
+ * isdisabled.
+ * 10G upstream mode -> Grant length consumed by overhead.
+ * Used forboth FEC and FEC-less modes.
+*/
+#define  GRANT_OVR_HD_GNTOVRHD_SHIFT	0
+#define  GRANT_OVR_HD_GNTOVRHD_MASK	0xffff
+
+
+/*
+ * Register <EPN_POLL_SIZE>
+ *
+ * Sets the size of polling grants for the purpose of generating
+ * thednGntNonPoll interrupts.
+ * If a received grant's length, less EPN GrantLength Overhead, is less
+ * than or equal to the poll size, the grant isconsidered a poll and resets
+ * the poll grant interval timer for the LLIDIndex.
+ * Reset default is 64 decimal.
+ */
+#define EPN_POLL_SIZE_REG		0xec
+
+/*
+ * Size of polling grants when FEC is enabled.
+ * Units are TQ.
+ * Defaultsto 64.
+*/
+#define  POLL_SIZE_POLLSIZEFEC_SHIFT	16
+#define  POLL_SIZE_POLLSIZEFEC_MASK	0xffff0000
+
+/*
+ * Size of polling grants when FEC is disabled.
+ * Units are TQ.
+ * Defaultsto 64.
+*/
+#define  POLL_SIZE_POLLSIZE_SHIFT	0
+#define  POLL_SIZE_POLLSIZE_MASK	0xffff
+
+
+/*
+ * Register <EPN_DN_RD_GNT_MARGIN>
+ *
+ * This register determines how far in advance of the Grant Start Timethat
+ * grants are considered for removal from the DRX Grant FIFO.
+ * Once agrant is chosen (the various LLID Indexes compete for the next
+ * burstslot - the Index with a grant that is within Read Grant Margin
+ * andclosest to its Grant Start Time wins), it is popped from its grant
+ * FIFOand held until it meets the Grant Start Time Delta criteria
+ * (seebelow).
+ * The units of this register are TQ.
+ * The reset default value is 256decimal.
+ */
+#define EPN_DN_RD_GNT_MARGIN_REG	0xf0
+
+/*
+ * How far in advance of Grant Start Time to consider a grant forremoval
+ * from the grant FIFO.
+*/
+#define  DN_RD_GNT_MARGIN_RDGNTSTARTMARGIN_SHIFT	0
+#define  DN_RD_GNT_MARGIN_RDGNTSTARTMARGIN_MASK	0xffff
+
+
+/*
+ * Register <EPN_GNT_TIME_START_DELTA>
+ *
+ * This value determines how far in advance of the Grant Start Time thatthe
+ * next selected grant (already extracted from the Grant FIFO) will
+ * behanded to the EPN UTX (upstream transmit) logic and start to
+ * pre-fetchframes.
+ * .The units of this register are TQ.
+ * The reset default value is 640decimal.
+ */
+#define EPN_GNT_TIME_START_DELTA_REG	0xf4
+
+/*
+ * This value determines how far in advance of the Grant Start Timethat the
+ * next selected grant (already extracted from the Grant FIFO)will be
+ * handed to the EPN UTX (upstream transmit) logic and start topre-fetch
+ * frames.
+ * .The units of this register are TQ.
+ * The reset default value is 640decimal.
+*/
+#define  GNT_TIME_START_DELTA_GNTSTARTTIMEDELTA_SHIFT	0
+#define  GNT_TIME_START_DELTA_GNTSTARTTIMEDELTA_MASK	0xffff
+
+
+/*
+ * Register <EPN_TIME_STAMP_DIFF>
+ *
+ * This register sets a threshold for
+ * LocalTimeInSync/LocalTimeNotSyncinterrupts, and for local time reference
+ * updates.
+ * When the differencebetween the EPON MAC's local time and an MPCPDU's
+ * timestamp exceedsthis value the LocalTimeNotSync interrupt is asserted.
+ * The units ofthis register are TQ.
+ * Reset default is 10 decimal.
+ */
+#define EPN_TIME_STAMP_DIFF_REG		0xf8
+
+/*
+ * Threshold for local time reference updates and related interrupts.
+ * Reset default is10 decimal.
+*/
+#define  TIME_STAMP_DIFF_TIMESTAMPDIFFDELTA_SHIFT	0
+#define  TIME_STAMP_DIFF_TIMESTAMPDIFFDELTA_MASK	0xffff
+
+
+/*
+ * Register <EPN_UP_TIME_STAMP_OFF>
+ *
+ * This register helps determine the value for the Timestamp fieldinserted
+ * into REPORT and Processor frames.
+ * This value specifies anoffset from the Grant Start Time of the upstream
+ * burst.
+ * The valueprogrammed here will be roughly equivalent to the sum of
+ * Laser-On timeplus IDLE time plus Preamble time.
+ * The goal is for the Timestampinserted into a frame to match the MPCP
+ * time at which the first byte ofthe frame's Destination Address is
+ * transmitted.
+ */
+#define EPN_UP_TIME_STAMP_OFF_REG	0xfc
+
+/*
+ * Offset from Grant Start Time to use as the Timestamp field inREPORTs and
+ * processor-sent packets when FEC is enabled.
+ * Only used for 1G modes.
+ * Units are TQ.
+*/
+#define  UP_TIME_STAMP_OFF_TIMESTAMPOFFSETFEC_SHIFT	16
+#define  UP_TIME_STAMP_OFF_TIMESTAMPOFFSETFEC_MASK	0xffff0000
+
+/*
+ * Offset from Grant Start Time to use as the Timestamp field inREPORTs and
+ * processor-sent packets when FEC is disabled.
+ * Used for both 1G no FEC and all 10G modes.
+ * Units are TQ.
+*/
+#define  UP_TIME_STAMP_OFF_TIMESTAMPOFFSET_SHIFT	0
+#define  UP_TIME_STAMP_OFF_TIMESTAMPOFFSET_MASK	0xffff
+
+
+/*
+ * Register <EPN_GNT_INTERVAL>
+ *
+ * This register specifies the maximum allowed time between GATE
+ * messagesreceived on an LLID.
+ * If the time elapsed is greater than the specifiedvalue, the Gate
+ * Interval interrupt asserts for that LLID Index.
+ * Theunits of this register are 262 us.
+ * The maximum interval is ~17 seconds.
+ */
+#define EPN_GNT_INTERVAL_REG		0x100
+
+/* Grant interval */
+#define  GNT_INTERVAL_GNTINTERVAL_SHIFT	0
+#define  GNT_INTERVAL_GNTINTERVAL_MASK	0xffff
+
+
+/*
+ * Register <EPN_DN_GNT_MISALIGN_THR>
+ *
+ * Sets the threshold for misalignment detection and handling.
+ * A grant misalignment condition is detected by the ONU whenever a grantto
+ * the ONU cannot be used efficiently (due to the grant size notaligning to
+ * even frame boundaries).
+ * When cfgGntMisalignX bits are set, the ONU uses this register
+ * todetermine a misalignment condition and to take corrective action.
+ * prvUnusedGntThresh determines how many unused TQ there must be in agiven
+ * grant in order for it to be considered misaligned.
+ * gntMisalignThresh indicates how many consecutive misaligned grants
+ * mustbe received in order to trigger handling of this condition.
+ * The ONU handles the misaligned condition by temporarily reporting 0("no
+ * data") in that LLID's REPORT frames.
+ */
+#define EPN_DN_GNT_MISALIGN_THR_REG	0x104
+
+/*
+ * Sets the minimum number of unused time quanta in a grant required
+ * inorder for it to be considered misaligned.
+*/
+#define  DN_GNT_MISALIGN_THR_PRVUNUSEDGNTTHRESHOLD_SHIFT	16
+#define  DN_GNT_MISALIGN_THR_PRVUNUSEDGNTTHRESHOLD_MASK	0xffff0000
+
+/*
+ * Sets the number of misaligned grants needed to trigger
+ * misalignmenthandling.
+ * The value set here is one fewer than the desired number ofconsecutive
+ * misaligned grants, i.
+ * e.
+ * setting a value of 2 means thatthree misaligned grants in a row will
+ * trigger misalignment handling.
+*/
+#define  DN_GNT_MISALIGN_THR_GNTMISALIGNTHRESH_SHIFT	0
+#define  DN_GNT_MISALIGN_THR_GNTMISALIGNTHRESH_MASK	0x3ff
+
+
+/*
+ * Register <EPN_DN_GNT_MISALIGN_PAUSE>
+ *
+ * Indicates for how long after the misalignment condition is detectedthat
+ * the LLID Index's reporting will be "paused".
+ * This is achievedthrough reporting queue report values of zero.
+ * Units are 1 us.
+ */
+#define EPN_DN_GNT_MISALIGN_PAUSE_REG	0x108
+
+/* How long to stall reporting of queue status. */
+#define  DN_GNT_MISALIGN_PAUSE_GNTMISALIGNPAUSE_SHIFT	0
+#define  DN_GNT_MISALIGN_PAUSE_GNTMISALIGNPAUSE_MASK	0xffff
+
+
+/*
+ * Register <EPN_NON_POLL_INTV>
+ *
+ * Defines the amount of time required for triggering the non poll
+ * grantinterrupts.
+ */
+#define EPN_NON_POLL_INTV_REG		0x10c
+
+/*
+ * If amount of time since last non poll grant exceed this value,
+ * therespective LLID's interrupt will assert.
+ * Units of 65 us.
+*/
+#define  NON_POLL_INTV_NONPOLLGNTINTV_SHIFT	0
+#define  NON_POLL_INTV_NONPOLLGNTINTV_MASK	0xffff
+
+
+/*
+ * Register <EPN_FORCE_FCS_ERR>
+ *
+ * Forces upstream FCS errors on the selected LLID(s).
+ */
+#define EPN_FORCE_FCS_ERR_REG		0x110
+
+/* Force bad FCS for frames transmitting out of LLID 0 */
+#define  FORCE_FCS_ERR_FORCEFCSERRx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_GRANT_OVERLAP_LIMIT>
+ *
+ * Defines how much overlap is allowed between consecutive grants to
+ * thesame ONU.
+ * Effectively this register defines how much of the grantoverhead the Epn
+ * should attempt to recover when processing overlappedgrants.
+ * Normally this register is provisioned with the same value as inthe EPN
+ * Grant Overhead Length register.
+ * However, if the Xif or Lifrequire extra overhead not associated with
+ * Lon/Sync/Loff, then thisregister value must be smaller than the EPN
+ * Grant Overhead Lengthregister value.
+ * This register is used in both 1G and 10G upstream modes.
+ * The units arein TQ.
+ * Reset default is 0.
+ * NOTE:
+ * In 10G upstream mode this register must be provisioned to matchthe value
+ * written to the XIF Overlapping Grant Overhead register(0x0364).
+ * The proper value is calculated as:
+ * EPN Grant Overlap Limit = Lon + Sync Time + Loff - 1 (start of burst) -1
+ * (Idle Sync) - 3 (XIF Overlapping Grant Overhead).
+ */
+#define EPN_GRANT_OVERLAP_LIMIT_REG	0x114
+
+/*
+ * Number of time quanta by which two consecutive grants are allowed
+ * tooverlap
+*/
+#define  GRANT_OVERLAP_LIMIT_PRVGRANTOVERLAPLIMIT_SHIFT	0
+#define  GRANT_OVERLAP_LIMIT_PRVGRANTOVERLAPLIMIT_MASK	0xffff
+
+
+/*
+ * Register <EPN_AES_CONFIGURATION_0>
+ *
+ * Allows control over reporting the extra per-packet overhead
+ * associatedwith 802.
+ * 1AE encryption.
+ * The AES overhead compensation logic supportstwo overhead modes:
+ * implicit SCI and explicit SCI.
+ * The implicit SCImode increases the per-packet overhead by 24 bytes.
+ * Explicit SCI modeincreases the per-packet overhead by 32 bytes.
+ */
+#define EPN_AES_CFG_0_REG		0x118
+
+/*
+ * LLID index 0 AES overhead mode.
+ * 0:
+ * Implicit SCI AES overhead mode.
+ * 1:
+ * Explicit SCI AES overhead mode.
+ * ..
+ * LLID index 15 AES overhead mode.
+ * 30:
+ * Implicit SCI AES overhead mode.
+ * 31:
+ * Explicit SCI AES overhead mode.
+*/
+#define  AES_CFG_0_PRVUPSTREAMAESMODE_0_SHIFT	0
+#define  AES_CFG_0_PRVUPSTREAMAESMODE_0_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_DISC_GRANT_OVR_HD>
+ *
+ * The amount of overhead used in discovery gates.
+ */
+#define EPN_DISC_GRANT_OVR_HD_REG	0x11c
+
+/*
+ * This defines the amount of overhead used for Discovery gates.
+ * Unitsare 16-bit words.
+*/
+#define  DISC_GRANT_OVR_HD_DISCGNTOVRHD_SHIFT	0
+#define  DISC_GRANT_OVR_HD_DISCGNTOVRHD_MASK	0xffff
+
+
+/*
+ * Register <EPN_DN_DISCOVERY_SEED>
+ *
+ * Serves as the seed for generating the random offset during
+ * discoverygates.
+ * When this register is written the discovery random offset becomethis
+ * value
+ */
+#define EPN_DN_DISCOVERY_SEED_REG	0x120
+
+/*
+ * Specifies basis for generating the discovery offset.
+ * Units are TQ.
+*/
+#define  DN_DISCOVERY_SEED_CFGDISCSEED_SHIFT	0
+#define  DN_DISCOVERY_SEED_CFGDISCSEED_MASK	0xfff
+
+
+/*
+ * Register <EPN_DN_DISCOVERY_INC>
+ *
+ * Sets the amount by which the discovery random offset is incremented.
+ */
+#define EPN_DN_DISCOVERY_INC_REG	0x124
+
+/* Units are TQ */
+#define  DN_DISCOVERY_INC_CFGDISCINC_SHIFT	0
+#define  DN_DISCOVERY_INC_CFGDISCINC_MASK	0x3ff
+
+
+/*
+ * Register <EPN_DN_DISCOVERY_SIZE>
+ *
+ * Size of the grant for responses to Discovery Gates.
+ * When a DiscoveryGate is received from the PON, EPN substitutes this
+ * value into thegrant length as the grant goes into the Grant FIFO.
+ * Normally, the valueset in this register will be 42 greater than what is
+ * set in EPNDiscovery Gate Overhead.
+ */
+#define EPN_DN_DISCOVERY_SIZE_REG	0x128
+
+/*
+ * Size of response to discovery gate.
+ * Units are TQ.
+*/
+#define  DN_DISCOVERY_SIZE_CFGDISCSIZE_SHIFT	0
+#define  DN_DISCOVERY_SIZE_CFGDISCSIZE_MASK	0xffff
+
+
+/*
+ * Register <EPN_FEC_IPG_LENGTH>
+ *
+ * Specifies the IPG and REPORT frame sizes used in computating
+ * reportedvalues.
+ * cfgRptLen and cfgFecRptLen are also used to qualify grantlengths and
+ * generate the intInvGntLength interrupt.
+ * Note the operating modes in which each of these fields is used:
+ * cfgFecIpgLength - Used only in 1G upstream FEC.
+ * Note 8-bytes ofoverhead are built in to the Epn's 1G upstream FEC
+ * calculations.
+ * cfgFecRptLen - Used only in 1G upstream, for LLIDs which areFEC-enabled.
+ * cfgRptLen - Used in 1G upstream for non-FEC LLIDs.
+ * Also used in 10Gupstream, whether or not FEC is enabled (FEC is global
+ * at 10Gupstream).
+ */
+#define EPN_FEC_IPG_LENGTH_REG		0x12c
+
+/*
+ * 10G and 1G upstream:
+ * The number of the bytes in the sum of IPG +PREAMBLE.
+ * This value must be programmed before the upstream isenabled.
+ * It must not be modified while the upstream is active.
+ * Default is 20.
+ * Units are bytes.
+*/
+#define  FEC_IPG_LENGTH_MODIPGPREAMBLEBYTES_SHIFT	24
+#define  FEC_IPG_LENGTH_MODIPGPREAMBLEBYTES_MASK	0x1f000000
+
+/*
+ * 1G upstream non-FEC:
+ * The length of the REPORT Frame + IPG +PREAMBLE.
+ * Used for non-FEC LLIDs.
+ * Use default value of 42.
+ * 10G upstream FEC :
+ * The length of the Report Frame + IPG + PREAMBLE +FEC.
+ * Set to 13.
+ * 10G upstream non-FEC :
+ * The length of the Report Frame + IPG +PREAMBLE.
+ * Set to 5.
+ * Defaults to 42 for 1G non-FEC.
+ * Units are TQ.
+*/
+#define  FEC_IPG_LENGTH_CFGRPTLEN_SHIFT	16
+#define  FEC_IPG_LENGTH_CFGRPTLEN_MASK	0xff0000
+
+/*
+ * The length of the REPORT Frame + IPG + PREAMBLE + FEC.
+ * Used only for1G FEC upstream operation.
+ * Default is 58 for 1G upstream FEC.
+ * Units are TQ.
+*/
+#define  FEC_IPG_LENGTH_CFGFECRPTLENGTH_SHIFT	8
+#define  FEC_IPG_LENGTH_CFGFECRPTLENGTH_MASK	0xff00
+
+/*
+ * Length of IPG to be used in 1G upstream FEC computations.
+ * There are8-bytes of overhead built into the Epn's 1G upstream
+ * FECcalculations.
+ * This means that if this register is set to zero then the Epn willadd
+ * 8-bytes of overhead to each packet transmitted upstream.
+ * The size of the 1G upstream FEC IPG+preamble used by the Epn
+ * is(cfgFecIpgLength + 8).
+ * The smallest supported FEC IPG+preamble value is 8.
+ * To use theLIF's short preamble (7-byte minimum) capability; set the
+ * LIF's IPGvalue to 1 and set the Epn's cfgFecIpgLength to 0.
+ * Default 10.
+ * Units are bytes.
+*/
+#define  FEC_IPG_LENGTH_CFGFECIPGLENGTH_SHIFT	0
+#define  FEC_IPG_LENGTH_CFGFECIPGLENGTH_MASK	0xff
+
+
+/*
+ * Register <EPN_FAKE_REPORT_VALUE_EN>
+ *
+ * Enables a mode in which an LLID Index falsely reports that it has
+ * data,even when it does not.
+ * This mode is enabled per-LLID Index.
+ * Enablingthis mode for an LLID Index causes it to report the value set in
+ * EPNFake Report Value, regardless of any actual data that is in
+ * itsassociated queue(s).
+ */
+#define EPN_FAKE_REPORT_VALUE_EN_REG	0x130
+
+/*
+ * Per-LLID Index bits for enabling fake reporting.
+ * Setting a bit willcause that Index to send fake reports (when requested
+ * by a ForceReport grant) with the value specified in EPN Fake Report
+ * Value.
+*/
+#define  FAKE_REPORT_VALUE_EN_FAKEREPORTVALUEEN_SHIFT	0
+#define  FAKE_REPORT_VALUE_EN_FAKEREPORTVALUEEN_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_FAKE_REPORT_VALUE>
+ *
+ * Specifies the value sent in fake reports.
+ */
+#define EPN_FAKE_REPORT_VALUE_REG	0x134
+
+/*
+ * The value to be used when sending fake reports.
+ * This value is inunits of bytes.
+ * (This value is converted to TQ as necessary for theconfigured upstream
+ * data rate.
+ * )
+*/
+#define  FAKE_REPORT_VALUE_FAKEREPORTVALUE_SHIFT	0
+#define  FAKE_REPORT_VALUE_FAKEREPORTVALUE_MASK	0x1fffff
+
+
+/*
+ * Register <EPN_VALID_OPCODE_MAP>
+ *
+ * Specifies which of the first 16 MPCP opcode values should have theirMPCP
+ * time overwritten.
+ * Opcode values 0 and 1 are always disabled.
+ * The14 MPCP opcode values in the range of 2-15 can be enabled by
+ * settingthe corresponding bit.
+ * The reset default value is 0x0058.
+ */
+#define EPN_VALID_OPCODE_MAP_REG	0x178
+
+/*
+ * The 14 MPCP opcode values in the range of 2-15 can be enabled bysetting
+ * the corresponding bit.
+ * Note:
+ * Opcode values 0 and 1 must always be disabled!
+*/
+#define  VALID_OPCODE_MAP_PRVVALIDMPCPOPCODES_SHIFT	0
+#define  VALID_OPCODE_MAP_PRVVALIDMPCPOPCODES_MASK	0xffff
+
+
+/*
+ * Register <EPN_UP_PACKET_TX_MARGIN>
+ *
+ * Specifies the setup time margin for upstream data transfers to LIF.
+ * This margin is used to police the arrival time of data from
+ * theRunner/BBH upstream data path.
+ * If BBH fails to deliver (any part of) apacket in time, EPON inserts a
+ * "fake packet" into the upstream burst tosubstitute for the
+ * late-delivered packet.
+ * When the actual packet iseventually delivered from BBH, EPON discards
+ * it.
+ * Fake packets arezero-padded and contain a guaranteed-bad FCS.
+ * The upPacketTxMargin value needs to be larger than the upstream datapath
+ * latency.
+ * The upstream data path latency is composed of LIF datapath delay + MPCP
+ * time jitter + EPN data path delay (upTimeStampOff +42 + 1 + 10).
+ * Please note that EPN's 10 TQ upstream data path latencyincludes 3 TQ for
+ * the LIF to respond to valid data on the upstreamEPN-to-LIF interface,
+ * and LIF's upstream data path latency includes theprovisioned
+ * upTimeStampOff value.
+ * This is because the LIF does notprocess a grant until it receives data
+ * from the EPN.
+ * So, the initialpacket data must arrive 42 time quanta before the Grant
+ * Start Time.
+ */
+#define EPN_UP_PACKET_TX_MARGIN_REG	0x17c
+
+/*
+ * Minimum upstream data setup time for LIF/XIF.
+ * Units are TQ.
+*/
+#define  UP_PACKET_TX_MARGIN_UPPACKETTXMARGIN_SHIFT	0
+#define  UP_PACKET_TX_MARGIN_UPPACKETTXMARGIN_MASK	0xffff
+
+
+/*
+ * Register <EPN_MULTI_PRI_CFG_0>
+ *
+ * This register configures Multi-Priority reporting for all LLID indices
+ */
+#define EPN_MULTI_PRI_CFG_0_REG		0x180
+
+/*
+ * Enables deficit accounting in the CTC scheduler.
+ * Applies only whenthe CTC scheduler is configured for weighted
+ * round-robin operation.
+*/
+#define  MULTI_PRI_CFG_0_CFGCTCSCHDEFICITEN_MASK	0x1000
+
+/*
+ * Determines an internal burst cap value for any priority/L2 which hasits
+ * burst cap set to zero.
+ * The internal burst cap value is used bythe L1-to-L2 packet transfer
+ * logic.
+ * 0:
+ * Use an internal burst cap value of zero.
+ * (Do not use.
+ * )1:
+ * Use an internal burst cap value of 2 KB.
+ * Reset default.
+ * 2:
+ * Use a "max value" internal burst cap:
+ * 128KB for 1G upstream.
+ * 3.
+ * Reserved (do not use)
+*/
+#define  MULTI_PRI_CFG_0_PRVZEROBURSTCAPOVERRIDEMODE_SHIFT	8
+#define  MULTI_PRI_CFG_0_PRVZEROBURSTCAPOVERRIDEMODE_MASK	0x300
+
+/*
+ * For the burst-cap limited queue set, configures whether each
+ * queuereport/priority uses a separate L2 queue, or a shared one.
+ * Settingthis bit, along with cfgSharedBurstCap, configures EPN into
+ * aTK3715-compatible reporting mode.
+ * Note:
+ * Using a shared L2 queue effectively disables the CTC outputscheduler,
+ * since packets from all priorities are merged into asingle pipe and
+ * scheduled (in strict priority) upon entry to the L2queue.
+ * 0:
+ * Each priority level uses its own dedicated L2 queue.
+ * 1:
+ * All priority levels use a single, shared L2 queue
+*/
+#define  MULTI_PRI_CFG_0_CFGSHAREDL2_MASK	0x40
+
+/*
+ * For the burst-cap limited queue set, configures whether each
+ * queuereport/priority is individually limited to the Burst Cap, or
+ * whetherthe sum of all queue report values is limited to the Burst Cap.
+ * 0:
+ * Limit each queue report value to a Burst Cap.
+ * Each queuereport/priority uses the Burst Cap corresponding to its L2
+ * FIFO.
+ * This bit has effect only when cfgRptMultiPri is 1.
+ * Note:
+ * Setting this bit, along with cfgSharedL2, configures EPN intoa
+ * TK3715-compatible reporting mode.
+ * 1:
+ * Limit the sum of the queue reports/priorities to the Burst Capvalue.
+ * (Use the Burst Cap value corresponding to the lowest L2 FIFOassigned to
+ * the LLID index).
+ * Note:
+ * The following offsets must be included in the burst cap valuecalculation
+ * when "shared burst cap mode" is enabled.
+ * Decrease thedesired burst cap value by 21 bytes in 1G non-FEC mode, 193
+ * bytes in1G FEC mode.
+*/
+#define  MULTI_PRI_CFG_0_CFGSHAREDBURSTCAP_MASK	0x20
+
+/*
+ * Configures whether Multi-Priority REPORT frames includealready-granted
+ * frames.
+ * 0:
+ * Report frames do NOT include frames that have already beengranted.
+ * This is the setting for NTT operation.
+ * 1:
+ * Report frames DO include frames that have already been granted(but not
+ * yet sent upstream).
+ * This bit has effect only when cfgRptMultiPri is 1.
+*/
+#define  MULTI_PRI_CFG_0_CFGRPTGNTSOUTST0_MASK	0x10
+
+/*
+ * Determines the order within a queue set in which priorities arereported
+ * on.
+ * 0:
+ * Priorities are reported low to high.
+ * 1:
+ * Priorities are reported high to low.
+ * This bit has effect only when cfgRptMultiPri0 is 1.
+*/
+#define  MULTI_PRI_CFG_0_CFGRPTHIPRIFIRST0_MASK	0x8
+
+/*
+ * Configure order of queue sets.
+ * 0:
+ * The first queue set reports the full packet buffer queue depths;the
+ * second queue set reports up to the T/Burst Cap 0 threshold.
+ * 1:
+ * Swap queue sets in Multi-Priority REPORT frames.
+ * The first Queueset reports up to the T/Burst Cap 0 threshold; the second
+ * queue setreports the the full packet buffer queue depths.
+ * This bit has effect only when cfgRptMultiPri0 is 1.
+*/
+#define  MULTI_PRI_CFG_0_CFGRPTSWAPQS0_MASK	0x4
+
+/*
+ * Configure Multi-Priority mode.
+ * 0:
+ * Disable Multi-Priority mode.
+ * The reporting style is selected byReport Select bit (bit 8 in the EPON
+ * Control 0 register).
+ * 1:
+ * Configured for Multi-Priority reporting mode.
+*/
+#define  MULTI_PRI_CFG_0_CFGRPTMULTIPRI0_MASK	0x1
+
+
+/*
+ * Register <EPN_SHARED_BCAP_OVRFLOW> - read-only
+ *
+ */
+#define EPN_SHARED_BCAP_OVRFLOW_REG	0x184
+
+#define  SHARED_BCAP_OVRFLOW_SHAREDBURSTCAPOVERFLOW_SHIFT	0
+#define  SHARED_BCAP_OVRFLOW_SHAREDBURSTCAPOVERFLOW_MASK	0x7ff
+
+/*
+ * Register <EPN_FORCED_REPORT_EN>
+ *
+ * Option to force an upstream report for an LLID Index that has
+ * notreceived an upstream grant with the "force report" bit set in more
+ * than50 mS.
+ * This mode is enabled per-LLID Index.
+ * Discovery gates will nothave their "force report" bits set.
+ * This bit should not be enabledunless the LLID index is registered with
+ * the OLT.
+ */
+#define EPN_FORCED_REPORT_EN_REG	0x188
+
+/*
+ * Per-LLID Index bits for enabling forced reporting.
+ * Setting a bitwill cause that Index to send a report as part of the first
+ * upstreamburst that occurs more than the number of mS provisioned
+ * incfgMaxReportInterval" after the last report was sent.
+*/
+#define  FORCED_REPORT_EN_CFGFORCEREPORTEN_SHIFT	0
+#define  FORCED_REPORT_EN_CFGFORCEREPORTEN_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_FORCED_REPORT_MAX_INTERVAL>
+ *
+ * Option to force an upstream report for an LLID Index that has
+ * notreceived an upstream grant with the "force report" bit set in more
+ * than50 mS.
+ * This mode is enabled per-LLID Index.
+ * Discovery gates will nothave their "force report" bits set.
+ * This bit should not be enabledunless the LLID index is registered with
+ * the OLT.
+ */
+#define EPN_FORCED_REPORT_MAX_INTERVAL_REG	0x18c
+
+/* The number of mS after the last report was sent to force the report. */
+#define  FORCED_REPORT_MAX_INTERVAL_CFGMAXREPORTINTERVAL_SHIFT	0
+#define  FORCED_REPORT_MAX_INTERVAL_CFGMAXREPORTINTERVAL_MASK	0x3f
+
+
+/*
+ * Register <EPN_L2S_FLUSH_CONFIG>
+ *
+ * Provides a mechanism to flush an L2 queue.
+ * The associated LLID indexmust be upstream disabled (including waiting
+ * for the upstream LLIDindex enable feedback confirmation) before starting
+ * the flushmechanism.
+ * Once an LLID index has been disabled, the L2 queues thatcomprise it may
+ * be flushed one at a time via this register.
+ * A flush isstarted by writing the cfgFlushL2sSel field and setting
+ * thecfgFlushL2sEn bit.
+ * The flushL2sDone bit will be set by the hardwarewhen the selected L2
+ * queue has been flushed.
+ * Note:
+ * Do not change thecfgFlushL2sSel value unless both "flushL2sDone" and
+ * "cfgFlushL2sEn" arelow.
+ */
+#define EPN_L2S_FLUSH_CONFIG_REG	0x190
+
+/*
+ * Enables the selected L2 queue to be flushed.
+ * This configuration bitmust be forced low and "flushL2sDone" must be read
+ * back as low;before, "cfgFlushL2sSel" is written.
+ * 0:
+ * L2 queue flush is disabled1:
+ * L2 queue flush is enabled
+*/
+#define  L2S_FLUSH_CONFIG_CFGFLUSHL2SEN_MASK	0x80000000
+
+/*
+ * This bit is set when the selected L2 queue has no more packets toflush.
+ * This bit will always be zero when the enable bit is zero.
+*/
+#define  L2S_FLUSH_CONFIG_FLUSHL2SDONE_MASK	0x40000000
+
+/*
+ * Selects the L2 queue that will be flushed when the enable is set.
+ * Do not write this register unless both "flushL2sDone" andcfgFlushL2sEn"
+ * are low.
+*/
+#define  L2S_FLUSH_CONFIG_CFGFLUSHL2SSEL_SHIFT	0
+#define  L2S_FLUSH_CONFIG_CFGFLUSHL2SSEL_MASK	0x1f
+
+
+/*
+ * Register <EPN_DATA_PORT_COMMAND>
+ *
+ * This set of registers allows processor access to RAMs controlled by
+ * themodules in the EPON block.
+ * Data Port Control indicates whether a reador write access is occurring.
+ * Data Port Select indicates which RAM isbeing accessed.
+ * Writing to the Data Port Command register (offset 0)initiates the memory
+ * access.
+ * The processor may only access the Downstream Statistics RAM andUpstream
+ * Statistics RAM during run time.
+ * Accessing the L2 queue RAM maycause the EPON to fail in an unknown and
+ * random way.
+ * The flow for a write operation is as follows.
+ * 1.
+ * Check if the Data Port Interrupt is ready.
+ * 2.
+ * Update the Data Port Address register.
+ * 3.
+ * Update the Data Port Data register4.
+ * Update the Data Port Command register.
+ * Write a "1" to the DataPort Control and the RAM's index into the Data
+ * Port Select.
+ * 5.
+ * The operation is completed when the Data Port Interrupt isready again.
+ * The flow for a read operation is as follows.
+ * 1.
+ * Check if the Data Port Interrupt is ready.
+ * 2.
+ * Update the Data Port Address register.
+ * 3.
+ * Update the Data Port Command register.
+ * Write a "0" to the DataPort Control and the RAM's index into the Data
+ * Port Select.
+ * 4.
+ * Check to see if the Data Port Interrupt is read.
+ * 5.
+ * Read the Data Port Data register to get the operation'sresults.
+ * RAM Name RAM Size AutoInit?Downstream Statistics 672 x 32 YesUpstream
+ * Statistics 1088 x 32 YesL2 Queue RAM 16384 x 22 NoThe Downstream
+ * Statistics RAMs accumulate statistics for 32 LLIDs.
+ * EachLLID occupies 21 RAM offsets.
+ * Downstream Statistics RAM definition areas follows :
+ * LLID Index RAM Locations0 0 - 201 21 - 412 42 - 62.
+ * .n (up to 32) (n*21) - (n*21)+20RAM Offset Description0 Total bytes
+ * received1 FCS Errors2 OAM frames received3 GATE frames received4 64 byte
+ * frames received5 65 - 127 byte frames received6 128 - 255 byte frames
+ * received7 256 - 511 byte frames received8 512 - 1023 byte frames
+ * received9 1024 - 1518 byte frames received10 1519 - 2047 byte frames
+ * received11 2048 - 4095 byte frames received12 4096 - 9216 byte frames
+ * received13 Greater than 9216 byte frames received14 Oversized frames
+ * received15 Broadcast frames received16 Multicast frames received17
+ * Unicast frames received18 Undersized frames received19 OAM bytes
+ * received20 Register frames receivedThe Upstream Statistics RAMs
+ * accumulate statistics for 32 LLIDs.
+ * EachLLID occupies 17 RAM offsets.
+ * It is logically divided into 64segments.
+ * The lower 32 segments contain the normal upstream statisticsfor each of
+ * the 32 LLID Indexes.
+ * The upper 32 segments are used toreport the "fake packet" statistics.
+ * Fake packets are inserted intoupstream bursts to substitute for packets
+ * that are delivered late fromthe Runner/BBH subsystem.
+ * Fake packets are zero-padded and contain aguaranteed-bad FCS.
+ * Note that a faked packet can cause broadcast andmulticast packets to be
+ * reported as unicast packets, depending onwhether the Runner/BBH upstream
+ * data delivery failure occurred beforethe DA data was delivered.
+ * LLID Index RAM Locations0 0 - 161 17 - 332 34 - 50.
+ * .n (up to 32) (n*17) - (n*17)+16Fake packet 0 544 - 560Fake packet 1 561
+ * - 577Fake packet 2 578 - 594.
+ * .Fake packet n [544+(n*17)] - [544+(n*17)+16]RAM Offset Description0
+ * Total bytes sent1 OAM frames sent2 REPORT frames sent3 64 byte frames
+ * sent4 65 - 127 byte frames sent5 128 - 255 byte frames sent6 256 - 511
+ * byte frames sent7 512 - 1023 byte frames sent8 1024 - 1518 byte frames
+ * sent9 1519 - 2047 byte frames sent10 2048 - 4095 byte frames sent11 4096
+ * - 9216 byte frames sent12 Greater than 9216 byte frames sent13 OAM bytes
+ * sent14 Broadcast frames sent15 Multicast frames sent16 Unicast frames
+ * sentNotes:
+ * Total bytes sent do not include OAM or Report frames.
+ * The various frame "bucket" statistics do not include OAM or
+ * Reportframes.
+ * Oversized frames are frames that are greater than the value specifiedby
+ * the EPON Max Frame Size register.
+ * The exception is when cfgVlanMaxmode is used.
+ * In this case an oversized condition occurs when a frameis greater than
+ * 1522 byte when a VLAN tag is present, otherwise greaterthan 1518 bytesAn
+ * undersized frame condition occurs when the received frame is lessthan 64
+ * bytes in length and has a valid FCS.
+ * The Unused Words counts the number of words that in a grant that theLLID
+ * was not able to send upstream traffic on.
+ * If the LLID received agrant for 2K words, but it has only 1K words of
+ * data, the unused wordcount will increase by 1 K.
+ * In most cases this condition should nothappen.
+ * It indicates that the OLT is granting inefficiently.
+ * Therecould be a problem with the ONU's EPON overhead setting
+ * beingincorrect.
+ */
+#define EPN_DATA_PORT_COMMAND_REG	0x194
+
+/*
+ * Indicates access to RAM is in progress.
+ * 0:
+ * Data port is ready to accept a command1:
+ * Data port is busy
+*/
+#define  DATA_PORT_COMMAND_DPORTBUSY_MASK	0x80000000
+
+/*
+ * Selects RAM to be access0:
+ * Downstream statistics RAM1:
+ * Upstream statistics RAM2:
+ * Reserved3:
+ * L2 Queue RAM4-31:
+ * Reserved
+*/
+#define  DATA_PORT_COMMAND_DPORTSELECT_SHIFT	4
+#define  DATA_PORT_COMMAND_DPORTSELECT_MASK	0x1f0
+
+/*
+ * Indicates data port operation0:
+ * Do a RAM read operation1:
+ * Do a RAM write operation
+*/
+#define  DATA_PORT_COMMAND_DPORTCONTROL_MASK	0x1
+
+
+/*
+ * Register <EPN_DATA_PORT_ADDRESS>
+ *
+ */
+#define EPN_DATA_PORT_ADDR_REG		0x198
+
+/* Address for RAM accesses. */
+#define  DATA_PORT_ADDR_DPORTADDR_SHIFT	0
+#define  DATA_PORT_ADDR_DPORTADDR_MASK	0x3fff
+
+
+/*
+ * Register <EPN_DATA_PORT_DATA_0>
+ *
+ */
+#define EPN_DATA_PORT_DATA_0_REG	0x19c
+
+/* Low-order data dword for data port accesses */
+#define  DATA_PORT_DATA_0_DPORTDATA0_SHIFT	0
+#define  DATA_PORT_DATA_0_DPORTDATA0_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_BIG_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_BIG_CNT_REG		0x1a0
+
+/* Counts illegally large frames. */
+#define  UNMAP_BIG_CNT_UNMAPBIGERRCNT_SHIFT	0
+#define  UNMAP_BIG_CNT_UNMAPBIGERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_FRAME_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_FRAME_CNT_REG		0x1a4
+
+/* Counts valid frames that are not Gates or OAM frames. */
+#define  UNMAP_FRAME_CNT_UNMAPFRCNT_SHIFT	0
+#define  UNMAP_FRAME_CNT_UNMAPFRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_FCS_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_FCS_CNT_REG		0x1a8
+
+/* Counts frame with FCS errors. */
+#define  UNMAP_FCS_CNT_UNMAPFCSERRCNT_SHIFT	0
+#define  UNMAP_FCS_CNT_UNMAPFCSERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_GATE_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_GATE_CNT_REG		0x1ac
+
+/* Counts un-mapped gate frames. */
+#define  UNMAP_GATE_CNT_UNMAPGATECNT_SHIFT	0
+#define  UNMAP_GATE_CNT_UNMAPGATECNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_OAM_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_OAM_CNT_REG		0x1b0
+
+/* Counts un-mapped OAM frames. */
+#define  UNMAP_OAM_CNT_UNMAPOAMCNT_SHIFT	0
+#define  UNMAP_OAM_CNT_UNMAPOAMCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UNMAP_SMALL_CNT>
+ *
+ * Statistics register for traffic sent on unmapped LLIDs.
+ * This register saturates at maximum value and self-clears when read.
+ */
+#define EPN_UNMAP_SMALL_CNT_REG		0x1b4
+
+/* Counts illegally small frames. */
+#define  UNMAP_SMALL_CNT_UNMAPSMALLERRCNT_SHIFT	0
+#define  UNMAP_SMALL_CNT_UNMAPSMALLERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_FIF_DEQUEUE_EVENT_CNT>
+ *
+ */
+#define EPN_FIF_DEQUEUE_EVENT_CNT_REG	0x1b8
+
+/* Debug only! */
+#define  FIF_DEQUEUE_EVENT_CNT_FIFDEQUEUEEVENTCNT_SHIFT	0
+#define  FIF_DEQUEUE_EVENT_CNT_FIFDEQUEUEEVENTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_BBH_UP_FAULT_HALT_EN>
+ *
+ * Per-LLID index Runner/BBH upstream fault halt enable.
+ * This allows thedefault fatal upstream fault halt behavior to be
+ * disabled.
+ * The resetdefault value is enabled (all ones).
+ * This register should only be usedfor debug.
+ * All the bits in this register must be set during normaloperation.
+ */
+#define EPN_BBH_UP_FAULT_HALT_EN_REG	0x1dc
+
+/*
+ * Per-LLID Index Runner/BBH upstream fault halt enable.
+ * 0:
+ * Do not disable upstream data traffic.
+ * 1:
+ * Disable upstream data traffic for LLID indexes the Runner/BBHfaulted on.
+*/
+#define  BBH_UP_FAULT_HALT_EN_BBHUPSFAULTHALTEN_SHIFT	0
+#define  BBH_UP_FAULT_HALT_EN_BBHUPSFAULTHALTEN_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_BBH_UP_TARDY_HALT_EN>
+ *
+ * Per-LLID index Runner/BBH upstream fault halt enable.
+ * This allows thedefault fatal upstream fault halt behavior to be
+ * disabled.
+ * The resetdefault value is enabled (all ones).
+ * This register should only be usedfor debug.
+ * All the bits in this register must be set during normaloperation.
+ */
+#define EPN_BBH_UP_TARDY_HALT_EN_REG	0x1e0
+
+/*
+ * Disable all upstream data traffic when the BHH aborts a tardypacket.
+ * 0:
+ * Do not disable upstream data traffic on tardy/abort1:
+ * Disable all upstream data traffic when the BBH aborts a tardypacket.
+*/
+#define  BBH_UP_TARDY_HALT_EN_FATALTARDYBBHABORTEN_MASK	0x1
+
+
+/*
+ * Register <EPN_DEBUG_STATUS_0> - read-only
+ *
+ * This register contains the real time status bits to aid debugging theEPN
+ * module.
+ */
+#define EPN_DEBUG_STATUS_0_REG		0x1e4
+
+/* Indicates which LLID's report FIFO is full */
+#define  DEBUG_STATUS_0_L2SQUEFULLDEBUG_SHIFT	8
+#define  DEBUG_STATUS_0_L2SQUEFULLDEBUG_MASK	0xff00
+
+#define  DEBUG_STATUS_0_DNDLUFULL_MASK	0x10
+/* SEC and EPN downstream */
+#define  DEBUG_STATUS_0_DNSECFULL_MASK	0x8
+
+/* SEC and EPN upstream interface FIFO is full */
+#define  DEBUG_STATUS_0_EPNLIFFIFOFULL_MASK	0x4
+
+
+/*
+ * Register <EPN_DEBUG_STATUS_1> - read-only
+ *
+ * This register contains the real time status bits to aid debugging theEPN
+ * module.
+ */
+#define EPN_DEBUG_STATUS_1_REG		0x1e8
+
+/* LLID Index 0 has grant(s) pending */
+#define  DEBUG_STATUS_1_GNTRDYx_MASK(x)	(1 << ((x) + 1))
+
+
+/*
+ * Register <EPN_DEBUG_L2S_PTR_SEL>
+ *
+ */
+#define EPN_DEBUG_L2S_PTR_SEL_REG	0x1ec
+
+#define  DEBUG_L2S_PTR_SEL_CFGL2SDEBUGPTRSEL_SHIFT	16
+#define  DEBUG_L2S_PTR_SEL_CFGL2SDEBUGPTRSEL_MASK	0x70000
+#define  DEBUG_L2S_PTR_SEL_L2SDEBUGPTRSTATE_SHIFT	0
+#define  DEBUG_L2S_PTR_SEL_L2SDEBUGPTRSTATE_MASK	0x7fff
+
+/*
+ * Register <EPN_OLT_MAC_ADDR_LO>
+ *
+ * This register stores a MAC address for the OLT.
+ * This address isinserted as the DA in REPORT frames sent upstream.
+ */
+#define EPN_OLT_MAC_ADDR_LO_REG		0x230
+
+/* OLT MAC Address */
+#define  OLT_MAC_ADDR_LO_OLTADDRLO_SHIFT	0
+#define  OLT_MAC_ADDR_LO_OLTADDRLO_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_OLT_MAC_ADDR_HI>
+ *
+ * This register stores a MAC address for the OLT.
+ * This address isinserted as the DA in REPORT frames sent upstream.
+ */
+#define EPN_OLT_MAC_ADDR_HI_REG		0x234
+
+/* OLT MAC Address */
+#define  OLT_MAC_ADDR_HI_OLTADDRHI_SHIFT	0
+#define  OLT_MAC_ADDR_HI_OLTADDRHI_MASK	0xffff
+
+
+/*
+ * Register <EPN_TX_L1S_SHP_DQU_EMPTY> - read-only
+ *
+ * Indicates empty status of L1 shaped queues
+ */
+#define EPN_TX_L1S_SHP_DQU_EMPTY_REG	0x278
+
+/*
+ * Each bit in this register contains the status of the respectivequeue0:
+ * L1 accumulator is not empty1:
+ * L1 accumulator is empty
+*/
+#define  TX_L1S_SHP_DQU_EMPTY_L1SDQUQUEEMPTY_SHIFT	0
+#define  TX_L1S_SHP_DQU_EMPTY_L1SDQUQUEEMPTY_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_TX_L1S_UNSHAPED_EMPTY> - read-only
+ *
+ * Indicates status of L1 unshaped-empty accumulators
+ */
+#define EPN_TX_L1S_UNSHAPED_EMPTY_REG	0x27c
+
+/*
+ * Each bit in this register contains the status of the
+ * respectiveaccumulators0:
+ * L1 unshaped accumulator is not empty1:
+ * L1 unshaped accumulator is empty
+*/
+#define  TX_L1S_UNSHAPED_EMPTY_L1SUNSHAPEDQUEEMPTY_SHIFT	0
+#define  TX_L1S_UNSHAPED_EMPTY_L1SUNSHAPEDQUEEMPTY_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_TX_L2S_QUE_EMPTY> - read-only
+ *
+ * L2 queue empty status
+ */
+#define EPN_TX_L2S_QUE_EMPTY_REG	0x2c0
+
+/*
+ * Each bit in this register contains the status of the respectivequeue0:
+ * L2 queue is not empty1:
+ * L2 queue is empty
+*/
+#define  TX_L2S_QUE_EMPTY_L2SQUEEMPTY_SHIFT	0
+#define  TX_L2S_QUE_EMPTY_L2SQUEEMPTY_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_TX_L2S_QUE_FULL> - read-only
+ *
+ * L2 queue full status
+ */
+#define EPN_TX_L2S_QUE_FULL_REG		0x2c4
+
+/*
+ * Each bit in this register contains the status of the respectivequeue0:
+ * L2 queue is not full1:
+ * L2 queue is full
+*/
+#define  TX_L2S_QUE_FULL_L2SQUEFULL_SHIFT	0
+#define  TX_L2S_QUE_FULL_L2SQUEFULL_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_TX_L2S_QUE_STOPPED> - read-only
+ *
+ * L2 queue stopped status
+ */
+#define EPN_TX_L2S_QUE_STOPPED_REG	0x2c8
+
+/*
+ * Each bit in this register contains the status of the respectivequeue0:
+ * L2 queue is not stopped1:
+ * L2 queue is stopped
+*/
+#define  TX_L2S_QUE_STOPPED_L2SSTOPPEDQUEUES_SHIFT	0
+#define  TX_L2S_QUE_STOPPED_L2SSTOPPEDQUEUES_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_BBH_MAX_OUTSTANDING_TARDY_PACKETS>
+ *
+ * Everytime the BBH fails to deliver a packet in time to be
+ * transmittedupstreamit is placed in failed state and the Epn increments a
+ * tardy packetcounter.
+ * Every time the BBH delivers a packetwhile it is in the failed state the
+ * tardy packet counter isdecremented.
+ * The BBH will be taken out of thefailed state when the tardy packet
+ * counter reaches 0.
+ * If the tardypacket countervalue exceeds the value of this register; then
+ * the BBH becomes truantand the Epn stops upstream traffic untilthe Epn is
+ * reset.
+ * It is expected the BBH, runner and Epn will be resetat the same time.
+ */
+#define EPN_BBH_MAX_OUTSTANDING_TARDY_PACKETS_REG	0x2ec
+
+/*
+ * Maximum number of packets outstanding tardy packet the BBH canaccumulate
+ * at one time.
+*/
+#define  BBH_MAX_OUTSTANDING_TARDY_PACKETS_CFGMAXOUTSTANDINGTARDYPACKETS_SHIFT	0
+#define  BBH_MAX_OUTSTANDING_TARDY_PACKETS_CFGMAXOUTSTANDINGTARDYPACKETS_MASK	0x3ff
+
+
+/*
+ * Register <EPN_MIN_REPORT_VALUE_DIFFERENCE>
+ *
+ * The Virtual Thresholds are determined from the smaller of the queue
+ * setshaper and the accumulated queue length.
+ * The Virtual Thresholds mustalso prevent pathological values from being
+ * reported.
+ * The reportedvalue should be at least one maximum frame length greater
+ * than theprevious queue set threshold or else it will be reported as
+ * zero.
+ * Thisprevents a head of line blocking issue if a large frame is at the
+ * headof the queue and also prevents from reporting illegally short
+ * grantlengths.
+ * Everytime the BBH fails to deliver a packet in time to betransmitted
+ * upstream
+ */
+#define EPN_MIN_REPORT_VALUE_DIFFERENCE_REG	0x2f0
+
+/* The smallest allowable difference between ajacent non-zero queuesets. */
+#define  MIN_REPORT_VALUE_DIFFERENCE_PRVMINREPORTDIFF_SHIFT	0
+#define  MIN_REPORT_VALUE_DIFFERENCE_PRVMINREPORTDIFF_MASK	0x3fff
+
+
+/*
+ * Register <EPN_BBH_STATUS_FIFO_OVERFLOW> - read-only
+ *
+ * Indicates which BBH queue status interface event FIFOs have overflowed.
+ * Note:
+ * These bits are used for debug only.
+ */
+#define EPN_BBH_STATUS_FIFO_OVERFLOW_REG	0x2f4
+
+/*
+ * Indicates which BBH queue status interface event FIFOs haveoverflowed.
+ * The overflow can only be corrected by reset.
+*/
+#define  BBH_STATUS_FIFO_OVERFLOW_UTXBBHSTATUSFIFOOVERFLOW_SHIFT	0
+#define  BBH_STATUS_FIFO_OVERFLOW_UTXBBHSTATUSFIFOOVERFLOW_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_SPARE_CTL>
+ *
+ * Spare RW bits
+ */
+#define EPN_SPARE_CTL_REG		0x2f8
+
+/* Spare RW bits */
+#define  SPARE_CTL_CFGEPNSPARE_SHIFT	2
+#define  SPARE_CTL_CFGEPNSPARE_MASK	0xfffffffc
+
+/*
+ * Set this bit to enable store and forward upstream AE operation.
+ * Only set this bit when operating in Point-to-Point or ActiveEthernet
+ * modes.
+*/
+#define  SPARE_CTL_ECOUTXSNFENABLE_MASK	0x2
+
+/*
+ * Set this bit to prevent transmitting when the LLID's shapers areempty.
+ * This will preventthe ONU from using excess bandwidth granted to it by
+ * the OLT.
+ * Do not set this bit when operating in Point-to-Point or ActiveEthernet
+ * modes.
+*/
+#define  SPARE_CTL_ECOJIRA758ENABLE_MASK	0x1
+
+
+/*
+ * Register <EPN_TS_SYNC_OFFSET>
+ *
+ * Timestamp synchronizer offset.
+ */
+#define EPN_TS_SYNC_OFFSET_REG		0x2fc
+
+/*
+ * Provides lowest 6 bits of timestamp synchronizer, from 250 MHzdomain to
+ * 125 MHz.
+*/
+#define  TS_SYNC_OFFSET_CFGTSSYNCOFFSET_SHIFT	0
+#define  TS_SYNC_OFFSET_CFGTSSYNCOFFSET_MASK	0x3f
+
+
+/*
+ * Register <EPN_DN_TS_OFFSET>
+ *
+ * Downstream timestamp offset.
+ */
+#define EPN_DN_TS_OFFSET_REG		0x300
+
+/* Provides signed offset for downstream packet timestamping. */
+#define  DN_TS_OFFSET_CFGDNTSOFFSET_SHIFT	0
+#define  DN_TS_OFFSET_CFGDNTSOFFSET_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UP_TS_OFFSET_LO>
+ *
+ * Upstream timestamp offset, lower 32 bits.
+ */
+#define EPN_UP_TS_OFFSET_LO_REG		0x304
+
+/* Provides signed offset for upstream packet timestamping. */
+#define  UP_TS_OFFSET_LO_CFGUPTSOFFSET_LO_SHIFT	0
+#define  UP_TS_OFFSET_LO_CFGUPTSOFFSET_LO_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_UP_TS_OFFSET_HI>
+ *
+ * Upstream timestamp offset, upper 16 bits.
+ */
+#define EPN_UP_TS_OFFSET_HI_REG		0x308
+
+/* Provides signed offset for upstream packet timestamping. */
+#define  UP_TS_OFFSET_HI_CFGUPTSOFFSET_HI_SHIFT	0
+#define  UP_TS_OFFSET_HI_CFGUPTSOFFSET_HI_MASK	0xffff
+
+
+/*
+ * Register <EPN_TWO_STEP_TS_CTL>
+ *
+ * Provides control for the reading of two step timestamp FIFO, 4
+ * entriesdeep.
+ */
+#define EPN_TWO_STEP_TS_CTL_REG		0x30c
+
+/*
+ * Provides the reading of the two step timestamp FIFO.
+ * A write valueof 1 will advance the FIFO to the next entry.
+ * The 48-bits value isprovided by
+ * registersEPN_TWO_STEP_TS_VALUE_HI/EPN_TWO_STEP_TS_VALUE_LO.
+*/
+#define  TWO_STEP_TS_CTL_TWOSTEPFFRD_MASK	0x80000000
+
+/* Indicates the number of entries in the two step timestamp FIFO. */
+#define  TWO_STEP_TS_CTL_TWOSTEPFFENTRIES_SHIFT	0
+#define  TWO_STEP_TS_CTL_TWOSTEPFFENTRIES_MASK	0x7
+
+
+/*
+ * Register <EPN_TWO_STEP_TS_VALUE_LO> - read-only
+ *
+ * Lower 32-bits of 48-bits timestamp value, applicable for two
+ * steptimestamping.
+ */
+#define EPN_TWO_STEP_TS_VALUE_LO_REG	0x310
+
+/* Lower 32-bits of two-step timestamp value for IEEE 1588timestamping. */
+#define  TWO_STEP_TS_VALUE_LO_TWOSTEPTIMESTAMP_LO_SHIFT	0
+#define  TWO_STEP_TS_VALUE_LO_TWOSTEPTIMESTAMP_LO_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_TWO_STEP_TS_VALUE_HI> - read-only
+ *
+ * Upper 16-bits of 48-bits timestamp value, applicable for two
+ * steptimestamping.
+ */
+#define EPN_TWO_STEP_TS_VALUE_HI_REG	0x314
+
+/* Upper 16-bits of two-step timestamp value for IEEE 1588timestamping. */
+#define  TWO_STEP_TS_VALUE_HI_TWOSTEPTIMESTAMP_HI_SHIFT	0
+#define  TWO_STEP_TS_VALUE_HI_TWOSTEPTIMESTAMP_HI_MASK	0xffff
+
+
+/*
+ * Register <EPN_1588_TIMESTAMP_INT_STATUS>
+ *
+ * This register contains interrupt status for 1588 timestamp.
+ * These bitsare sticky; to clear a bit, write 1 to it.
+ */
+#define EPN_1588_TIMESTAMP_INT_STATUS_REG	0x318
+
+/*
+ * Indicated 1588 timestamp packet was aborted due to illegal checksumor
+ * timestamp offsets.
+*/
+#define  EPN_1588_TIMESTAMP_INT_STATUS_INT1588PKTABORT_MASK	0x2
+
+/*
+ * Indicates timestamp in two step FIFO is available for reading.
+ * The48-bits value is provided by
+ * registersEPN_TWO_STEP_TS_VALUE_HI/EPN_TWO_STEP_TS_VALUE_LO.
+*/
+#define  EPN_1588_TIMESTAMP_INT_STATUS_INT1588TWOSTEPFFINT_MASK	0x1
+
+
+/*
+ * Register <EPN_1588_TIMESTAMP_INT_MASK>
+ *
+ * This register contains interrupt mask for 1588 timestamp interrupts.
+ */
+#define EPN_1588_TIMESTAMP_INT_MASK_REG	0x31c
+
+/* Mask 1588 timestamp packet abort. */
+#define  EPN_1588_TIMESTAMP_INT_MASK_TS1588PKTABORT_MASK_MASK	0x2
+
+/* Mask two step FIFO interrupt. */
+#define  EPN_1588_TIMESTAMP_INT_MASK_TS1588TWOSTEPFF_MASK_MASK	0x1
+
+
+/*
+ * Register <EPN_UP_PACKET_FETCH_MARGIN>
+ *
+ * Specifies the setup time margin for the BBH to fetch upstream data
+ * totransfer to EPN.
+ * This margin MUST be used when the Epn is provisioned to delay the
+ * bursttermination while waiting for the Runner/BBH upstream queue status
+ * tobe updated.
+ * (see prvBbhQueStatDelay)The Power-On Reset default value of 0 will
+ * disable this delay.
+ * The units are in TimeQuanta (TQ = 16nS).
+ * The estimated DDR latency is2 [uS].
+ * So, the minimum non-zero value for this register is 223 [TQ].
+ */
+#define EPN_UP_PACKET_FETCH_MARGIN_REG	0x320
+
+/*
+ * Minimum BBH upstream packet request latency.
+ * Units are TQ.
+*/
+#define  UP_PACKET_FETCH_MARGIN_UPPACKETFETCHMARGIN_SHIFT	0
+#define  UP_PACKET_FETCH_MARGIN_UPPACKETFETCHMARGIN_MASK	0xffff
+
+
+/*
+ * Register <EPN_DN_1588_TIMESTAMP> - read-only
+ *
+ * Provides real time indication of 1588 downstream timestamp.
+ * A changein value indicates downstream traffic to BBH.
+ */
+#define EPN_DN_1588_TIMESTAMP_REG	0x324
+
+/* 32-bits timestamp value of downstream packet. */
+#define  DN_1588_TIMESTAMP_DN_1588_TS_SHIFT	0
+#define  DN_1588_TIMESTAMP_DN_1588_TS_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_PERSISTENT_REPORT_CFG>
+ *
+ * Specifies how long reports should persist.
+ */
+#define EPN_PERSISTENT_REPORT_CFG_REG	0x328
+
+/*
+ * How long report persistance lasts.
+ * (How many report persistance timer ticks after the last non-zeroreport
+ * should empty reportsbe replaced with a persistant report)Units are
+ * report perisitance timer ticks.
+*/
+#define  PERSISTENT_REPORT_CFG_CFGPERSRPTDURATION_SHIFT	16
+#define  PERSISTENT_REPORT_CFG_CFGPERSRPTDURATION_MASK	0x3ff0000
+
+/*
+ * How many clock cycles are in each report persistance timer tick.
+ * The 125 MHz core clock rate requires 125 clocks per 1 uS tick.
+ * Units are core clock cycles.
+*/
+#define  PERSISTENT_REPORT_CFG_CFGPERSRPTTICKSIZE_SHIFT	0
+#define  PERSISTENT_REPORT_CFG_CFGPERSRPTTICKSIZE_MASK	0xffff
+
+
+/*
+ * Register <EPN_PERSISTENT_REPORT_ENABLES>
+ *
+ * Per LLID enable for persistent reporting.
+ */
+#define EPN_PERSISTENT_REPORT_ENABLES_REG	0x32c
+
+/*
+ * Per LLID enable for persistent reporting.
+ * Set the bit to enablepersistent reporting.
+ * 0:
+ * Disable persistent reporting.
+ * 1:
+ * Enable persistent reporting.
+*/
+#define  PERSISTENT_REPORT_ENABLES_CFGPERSRPTENABLE_SHIFT	0
+#define  PERSISTENT_REPORT_ENABLES_CFGPERSRPTENABLE_MASK	0xffffffff
+
+
+/*
+ * Register <EPN_PERSISTENT_REPORT_REQUEST_SIZE>
+ *
+ * How many Time Quanta the persistent report should request.
+ */
+#define EPN_PERSISTENT_REPORT_REQUEST_SIZE_REG	0x330
+
+/*
+ * How many Time Quanta the persistent report should request.
+ * Smallervalues waste less upstream bandwidth.
+ * Units are Time Quanta.
+*/
+#define  PERSISTENT_REPORT_REQUEST_SIZE_CFGPERSRPTREQTQ_SHIFT	0
+#define  PERSISTENT_REPORT_REQUEST_SIZE_CFGPERSRPTREQTQ_MASK	0xffff
+
+
+/*
+ * Register <EPN_AES_CONFIGURATION_1>
+ *
+ * Allows control over reporting the extra per-packet overhead
+ * associatedwith 802.
+ * 1AE encryption.
+ * The AES overhead compensation logic supportstwo overhead modes:
+ * implicit SCI and explicit SCI.
+ * The implicit SCImode increases the per-packet overhead by 24 bytes.
+ * Explicit SCI modeincreases the per-packet overhead by 32 bytes.
+ */
+#define EPN_AES_CFG_1_REG		0x334
+
+/*
+ * LLID index 16 AES overhead mode.
+ * 0:
+ * Implicit SCI AES overhead mode.
+ * 1:
+ * Explicit SCI AES overhead mode.
+ * ..
+ * LLID index 31 AES overhead mode.
+ * 30:
+ * Implicit SCI AES overhead mode.
+ * 31:
+ * Explicit SCI AES overhead mode.
+*/
+#define  AES_CFG_1_PRVUPSTREAMAESMODE_1_SHIFT	0
+#define  AES_CFG_1_PRVUPSTREAMAESMODE_1_MASK	0xffffffff
+
+
+/*
+ * Registers <EPN_BURST_CAP_0> - <x> is [ 0 => 8 ]
+ *
+ * These registers limit how much data can be reported in REPORT frames.
+ * The EPON MAC reports data (that is sitting in the FIFO Queues) inbetween
+ * 1 and 4 "chunks".
+ * The size of these chunks is determined by theBurst Cap settings here.
+ * The units for these registers are 1 TQ for 1Gupstream mode.
+ * The units are one-tenth of a TQ for 10G upstream mode.
+ * To provision 4 TQ in 1G upstream mode, write a value of 4.
+ * To provision4 TQ in 10G upstream mode write a value of 40.
+ * Note that when register0x400 bit 21 is set that the provisioned value
+ * must be one time quantumless than the maximum value that should be
+ * reported to the OLT.
+ * When configured in shared burst cap mode, the burst cap value must
+ * bereduced by the following amounts (relative to the threshold/token
+ * sizevalue sent from the OLT)Mode Reduction Amount
+ * (bytes)---------------- -----------------------------------1G non-FEC 21
+ * + number of CTC priority levels1G FEC 193 + number of CTC priority
+ * levels10G 20 + number of CTC priority levels10G implicit SCI 44 + number
+ * of CTC priority levels10G explicit SCI 52 + number of CTC priority
+ * levels
+ */
+#define EPN_BURST_CAPx_0_7_REG(x)	(0x138 + (x) * 0x4)
+
+/*
+ * Defines maximum size of a report on LLID index 0 in Tek mode and onL2 0
+ * in multi-priority mode.
+*/
+#define  BURST_CAPx_0_7_BURSTCAP0_SHIFT	0
+#define  BURST_CAPx_0_7_BURSTCAP0_MASK	0xfffff
+
+
+/*
+ * Registers <EPN_QUEUE_LLID_MAP_0> - <x> is [ 0 => 8 ]
+ *
+ */
+#define EPN_QUEUE_LLID_MAPx_0_7_REG(x)	(0x158 + (x) * 0x4)
+
+/* Selects the L2 FIFO to which Queue 0 is mapped. */
+#define  QUEUE_LLID_MAPx_0_7_QUELLIDMAP0_SHIFT	0
+#define  QUEUE_LLID_MAPx_0_7_QUELLIDMAP0_MASK	0x1f
+
+
+/*
+ * Registers <EPN_UNUSED_TQ_CNT0> - <x> is [ 0 => 8 ]
+ *
+ * Statistics register that accumulates the number of upstream unused
+ * timequanta for an upstream LLID.
+ * The register saturates at maximum value.
+ * The register will clear uponread.
+ */
+#define EPN_UNUSED_TQ_CNTx_0_7_REG(x)	(0x1bc + (x) * 0x4)
+
+/* The number of unused time quanta for LLID 0. */
+#define  UNUSED_TQ_CNTx_0_7_UNUSEDTQCNT0_SHIFT	0
+#define  UNUSED_TQ_CNTx_0_7_UNUSEDTQCNT0_MASK	0xffffffff
+
+
+/*
+ * Registers <EPN_TX_L1S_SHP_QUE_MASK_0> - <x> is [ 0 => 8 ]
+ *
+ * This register allows the effects of the upstream shaper 0 to be masked.
+ * When multiple priority queues point to a common shaping bucket, thelow
+ * priority traffic can use all the credits prior to a high priorityframe
+ * arriving.
+ * The high priority would then incur additional latencywaiting for credits
+ * to accrue.
+ * This adds undesirable delay to the datapath.
+ * This feature separates the queues that apply credit to a shaperand those
+ * that are masked by the shaper.
+ * The Shaping function has aninput that credits the shaping bucket when a
+ * frame is sent.
+ * It also hasan output that masks a queue from being eligible to transmit
+ * when theshaping rate is violated.
+ * In this case the low priority queue will bothcredit and be masked by the
+ * shaper as typically done.
+ * The high priorityon the other hand will credit the shaper, but not be
+ * limited by it.
+ * Theshaper will then be allowed to go into a deficit.
+ * In most cases thehigh priority is a small portion of the total
+ * bandwidth.
+ * In some caseshowever it may be desired to limit the high priority
+ * traffic with adifferent shaper.
+ */
+#define EPN_TX_L1S_SHP_QUE_MASKx_0_7_REG(x)	(0x280 + (x) * 0x4)
+
+/*
+ * Each mask bit in this register controls the effect of shaper 0 onthe
+ * respective queue0:
+ * Shaper 0 can affect this queue1:
+ * Shaper 0 does not affect this queue
+*/
+#define  TX_L1S_SHP_QUE_MASKx_0_7_CFGSHPMASK0_SHIFT	0
+#define  TX_L1S_SHP_QUE_MASKx_0_7_CFGSHPMASK0_MASK	0xffffffff
+
+
+/*
+ * Registers <EPN_TX_L2S_QUE_CONFIG_0> - <x> is [ 0 => 8 ]
+ *
+ * This registers configures the base addresses of the L2 structure 0.
+ * Internally the least significant two bits are treated as constants:
+ * Thetwo least significant start address bits are 0 and the two
+ * leastsignificant end address bits are 1.
+ * Therefore the queue base addressescannot be read directly from the
+ * register contents without performingsome mental gymnastics.
+ * Try to picture a 2-bit left shift and stuffoperation.
+ * The base address granularity is 4 entries.
+ * It is criticalthat the number of entries allocated for each FIFO be
+ * sufficient toaccommodate the worst case number of frames that can be
+ * contained inthe associated Fif queue.
+ * A general rule-of-thumb would be to dividethe number of bytes in the
+ * respective Fif queue by 80.
+ * The quotient+1will be the minimum number of entries that should be
+ * allocated.
+ * Do notforget to round up to the nearest 4-entry quanta.
+ * If the L2 is beingsized to match the burst cap (instead of the entire
+ * Fif queue size) donot forget to multiply the quotient+1 by four (or by 5
+ * ifprvTekModePrefetch is set) when operating in Teknovus mode.
+ * There arefour burst cap values reported in Teknovus modeNote:
+ * This register cannot be programmed "on-the-fly".
+ * The queuestart/end address values should be changed only when its
+ * associatedclear L2 report FIFO" bit is set.
+ */
+#define EPN_TX_L2S_QUE_CONFIGx_0_7_REG(x)	(0x2a0 + (x) * 0x4)
+
+/* Queue 0 End address */
+#define  TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUEEND0_SHIFT	16
+#define  TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUEEND0_MASK	0xfff0000
+
+/* Queue 0 Start address */
+#define  TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUESTART0_SHIFT	0
+#define  TX_L2S_QUE_CONFIGx_0_7_CFGL2SQUESTART0_MASK	0xfff
+
+
+/*
+ * Registers <EPN_TX_CTC_BURST_LIMIT_0> - <x> is [ 0 => 8 ]
+ *
+ * This register configures the maximum number of bytes that L2 queue 0can
+ * transmit during any given round.
+ * A round ends when all L2 queueshave reached their respective burst limit
+ * or there is no more data totransmit.
+ * Note that setting a burst limit to zero enables therespective L2 queue
+ * to transmit in strict priority.
+ * Also, any burstlimits that are set to 1 will cause those L2 queues to
+ * transmit inround-robin" fashion.
+ * It is possible to allocate bandwidth as apercentage.
+ * Simply multiply the desired percentage by 2000 bytes (themaximum frame
+ * length) and write that value in the associated burstlimit register.
+ * Example bandwidth sharing configuration:
+ * Priority Burst Limit Description-------- ----------- ------------0 0
+ * High priority unlimited bandwidth1 0 Low priority unlimited bandwidth2 1
+ * Equal priority UNI 1 unlimited bandwidth3 1 Equal priority UNI 2
+ * unlimited bandwidth4 1 Equal priority UNI 3 unlimited bandwidth5 1 Equal
+ * priority UNI 4 unlimited bandwidth6 0 High priority best effort
+ * bandwidth7 0 Low priority best effort bandwidthPriority 0 is for the
+ * highest priority traffic.
+ * i.
+ * e.
+ * , ManagementtrafficPriority 1 is for real-time traffic.
+ * i.
+ * e.
+ * , VOIPPriorities 2, 3, 4 and 5 equally share bandwidth, i.
+ * e.
+ * Premium businesstraffic.
+ * Priorities 6 and 7 provide two classes of best effort traffic, i.
+ * e.
+ * twoclasses of consumer trafficPlease note that the EPN shaper is the
+ * mechanism used to limit theamount of traffic.
+ */
+#define EPN_TX_CTC_BURST_LIMITx_0_7_REG(x)	(0x2cc + (x) * 0x4)
+
+/* L2 queue 0 CTC mode burst limit */
+#define  TX_CTC_BURST_LIMITx_0_7_PRVBURSTLIMIT0_SHIFT	0
+#define  TX_CTC_BURST_LIMITx_0_7_PRVBURSTLIMIT0_MASK	0x3ffff
+
+
+/*
+ * Registers <EPN_BURST_CAP_8> - <x> is [ 0 => 24 ]
+ *
+ * These registers limit how much data can be reported in REPORT frames.
+ * The EPON MAC reports data (that is sitting in the FIFO Queues) inbetween
+ * 1 and 4 "chunks".
+ * The size of these chunks is determined by theBurst Cap settings here.
+ * The units for these registers are 1 TQ for 1Gupstream mode.
+ * The units are one-tenth of a TQ for 10G upstream mode.
+ * To provision 4 TQ in 1G upstream mode, write a value of 4.
+ * To provision4 TQ in 10G upstream mode write a value of 40.
+ * Note that when register0x400 bit 21 is set that the provisioned value
+ * must be one time quantumless than the maximum value that should be
+ * reported to the OLT.
+ * When configured in shared burst cap mode, the burst cap value must
+ * bereduced by the following amounts (relative to the threshold/token
+ * sizevalue sent from the OLT)Mode Reduction Amount
+ * (bytes)---------------- -----------------------------------1G non-FEC 21
+ * + number of CTC priority levels1G FEC 193 + number of CTC priority
+ * levels10G 20 + number of CTC priority levels10G implicit SCI 44 + number
+ * of CTC priority levels10G explicit SCI 52 + number of CTC priority
+ * levels
+ */
+#define EPN_BURST_CAPx_8_31_REG(x)	(0x338 + (x) * 0x4)
+
+/*
+ * Defines maximum size of a report on LLID index 8 in Tek mode and onL2 8
+ * in multi-priority mode.
+*/
+#define  BURST_CAPx_8_31_BURSTCAP8_SHIFT	0
+#define  BURST_CAPx_8_31_BURSTCAP8_MASK	0xfffff
+
+
+/*
+ * Registers <EPN_QUEUE_LLID_MAP_8> - <x> is [ 0 => 24 ]
+ *
+ */
+#define EPN_QUEUE_LLID_MAPx_8_31_REG(x)	(0x398 + (x) * 0x4)
+
+/* Selects the L2 FIFO to which Queue 8 is mapped. */
+#define  QUEUE_LLID_MAPx_8_31_QUELLIDMAP8_SHIFT	0
+#define  QUEUE_LLID_MAPx_8_31_QUELLIDMAP8_MASK	0x1f
+
+
+/*
+ * Registers <EPN_UNUSED_TQ_CNT8> - <x> is [ 0 => 24 ]
+ *
+ * Statistics register that accumulates the number of upstream unused
+ * timequanta for an upstream LLID.
+ * The register saturates at maximum value.
+ * The register will clear uponread.
+ */
+#define EPN_UNUSED_TQ_CNTx_8_31_REG(x)	(0x3f8 + (x) * 0x4)
+
+/* The number of unused time quanta for LLID 8. */
+#define  UNUSED_TQ_CNTx_8_31_UNUSEDTQCNT8_SHIFT	0
+#define  UNUSED_TQ_CNTx_8_31_UNUSEDTQCNT8_MASK	0xffffffff
+
+
+/*
+ * Registers <EPN_TX_L1S_SHP_QUE_MASK_8> - <x> is [ 0 => 24 ]
+ *
+ * This register allows the effects of the upstream shaper 8 to be masked.
+ * When multiple priority queues point to a common shaping bucket, thelow
+ * priority traffic can use all the credits prior to a high priorityframe
+ * arriving.
+ * The high priority would then incur additional latencywaiting for credits
+ * to accrue.
+ * This adds undesirable delay to the datapath.
+ * This feature separates the queues that apply credit to a shaperand those
+ * that are masked by the shaper.
+ * The Shaping function has aninput that credits the shaping bucket when a
+ * frame is sent.
+ * It also hasan output that masks a queue from being eligible to transmit
+ * when theshaping rate is violated.
+ * In this case the low priority queue will bothcredit and be masked by the
+ * shaper as typically done.
+ * The high priorityon the other hand will credit the shaper, but not be
+ * limited by it.
+ * Theshaper will then be allowed to go into a deficit.
+ * In most cases thehigh priority is a small portion of the total
+ * bandwidth.
+ * In some caseshowever it may be desired to limit the high priority
+ * traffic with adifferent shaper.
+ */
+#define EPN_TX_L1S_SHP_QUE_MASKx_8_31_REG(x)	(0x5d8 + (x) * 0x4)
+
+/*
+ * Each mask bit in this register controls the effect of shaper 8 onthe
+ * respective queue0:
+ * Shaper 8 can affect this queue1:
+ * Shaper 8 does not affect this queue
+*/
+#define  TX_L1S_SHP_QUE_MASKx_8_31_CFGSHPMASK8_SHIFT	0
+#define  TX_L1S_SHP_QUE_MASKx_8_31_CFGSHPMASK8_MASK	0xffffffff
+
+
+/*
+ * Registers <EPN_TX_L2S_QUE_CONFIG_8> - <x> is [ 0 => 24 ]
+ *
+ * This registers configures the base addresses of the L2 structure 8.
+ * Internally the least significant two bits are treated as constants:
+ * Thetwo least significant start address bits are 0 and the two
+ * leastsignificant end address bits are 1.
+ * Therefore the queue base addressescannot be read directly from the
+ * register contents without performingsome mental gymnastics.
+ * Try to picture a 2-bit left shift and stuffoperation.
+ * The base address granularity is 4 entries.
+ * It is criticalthat the number of entries allocated for each FIFO be
+ * sufficient toaccommodate the worst case number of frames that can be
+ * contained inthe associated Fif queue.
+ * A general rule-of-thumb would be to dividethe number of bytes in the
+ * respective Fif queue by 80.
+ * The quotient+1will be the minimum number of entries that should be
+ * allocated.
+ * Do notforget to round up to the nearest 4-entry quanta.
+ * If the L2 is beingsized to match the burst cap (instead of the entire
+ * Fif queue size) donot forget to multiply the quotient+1 by four (or by 5
+ * ifprvTekModePrefetch is set) when operating in Teknovus mode.
+ * There arefour burst cap values reported in Teknovus modeNote:
+ * This register cannot be programmed "on-the-fly".
+ * The queuestart/end address values should be changed only when its
+ * associatedclear L2 report FIFO" bit is set.
+ */
+#define EPN_TX_L2S_QUE_CONFIGx_8_31_REG(x)	(0x638 + (x) * 0x4)
+
+/* Queue 8 End address */
+#define  TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUEEND8_SHIFT	16
+#define  TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUEEND8_MASK	0xfff0000
+
+/* Queue 8 Start address */
+#define  TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUESTART8_SHIFT	0
+#define  TX_L2S_QUE_CONFIGx_8_31_CFGL2SQUESTART8_MASK	0xfff
+
+
+/*
+ * Registers <EPN_TX_CTC_BURST_LIMIT_8> - <x> is [ 0 => 24 ]
+ *
+ * This register configures the maximum number of bytes that L2 queue 8can
+ * transmit during any given round.
+ * A round ends when all L2 queueshave reached their respective burst limit
+ * or there is no more data totransmit.
+ * Note that setting a burst limit to zero enables therespective L2 queue
+ * to transmit in strict priority.
+ * Also, any burstlimits that are set to 1 will cause those L2 queues to
+ * transmit inround-robin" fashion.
+ * It is possible to allocate bandwidth as apercentage.
+ * Simply multiply the desired percentage by 2000 bytes (themaximum frame
+ * length) and write that value in the associated burstlimit register.
+ * Example bandwidth sharing configuration:
+ * Priority Burst Limit Description-------- ----------- ------------0 0
+ * High priority unlimited bandwidth1 0 Low priority unlimited bandwidth2 1
+ * Equal priority UNI 1 unlimited bandwidth3 1 Equal priority UNI 2
+ * unlimited bandwidth4 1 Equal priority UNI 3 unlimited bandwidth5 1 Equal
+ * priority UNI 4 unlimited bandwidth6 0 High priority best effort
+ * bandwidth7 0 Low priority best effort bandwidthPriority 0 is for the
+ * highest priority traffic.
+ * i.
+ * e.
+ * , ManagementtrafficPriority 1 is for real-time traffic.
+ * i.
+ * e.
+ * , VOIPPriorities 2, 3, 4 and 5 equally share bandwidth, i.
+ * e.
+ * Premium businesstraffic.
+ * Priorities 6 and 7 provide two classes of best effort traffic, i.
+ * e.
+ * twoclasses of consumer trafficPlease note that the EPN shaper is the
+ * mechanism used to limit theamount of traffic.
+ */
+#define EPN_TX_CTC_BURST_LIMITx_8_31_REG(x)	(0x698 + (x) * 0x4)
+
+/* L2 queue 8 CTC mode burst limit */
+#define  TX_CTC_BURST_LIMITx_8_31_PRVBURSTLIMIT8_SHIFT	0
+#define  TX_CTC_BURST_LIMITx_8_31_PRVBURSTLIMIT8_MASK	0x3ffff
+
+
+/*
+ * Registers <EPN_10G_ABC_SIZE0> - <x> is [ 0 => 48 ]
+ *
+ * How many additional Time Quanta the queue sets should use in 10G mode.
+ * This feature is intended to be used inSIEPON mode.
+ */
+#define EPN_10G_ABC_SIZEx_REG(x)	(0x6f8 + (x) * 0x4)
+
+/*
+ * How many additional Time Quanta LLID 0 queue set 2 should use in
+ * 10Gmode.
+ * If all three ABC registers are non-zero; this LLID's effective burstcap
+ * will be:
+ * burstCap0 + cfgAddBurstCap0_1 + cfgAddBurstCap0_2 +cfgAddBurstCap0_3.
+ * Units are Time Quanta.
+*/
+#define  EPN_10G_ABC_SIZEx_CFGADDBURSTCAP0_2_SHIFT	16
+#define  EPN_10G_ABC_SIZEx_CFGADDBURSTCAP0_2_MASK	0xffff0000
+
+/*
+ * How many additional Time Quanta LLID 0 queue set 1 should use in
+ * 10Gmode.
+ * If all three ABC registers are non-zero; this LLID's effective burstcap
+ * will be:
+ * burstCap0 + cfgAddBurstCap0_1 + cfgAddBurstCap0_2 +cfgAddBurstCap0_3.
+ * Units are Time Quanta.
+*/
+#define  EPN_10G_ABC_SIZEx_CFGADDBURSTCAP0_1_SHIFT	0
+#define  EPN_10G_ABC_SIZEx_CFGADDBURSTCAP0_1_MASK	0xffff
+
+
+#endif /* ! EPON_EPN_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn_onu_mac_addr.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn_onu_mac_addr.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn_onu_mac_addr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn_onu_mac_addr.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,22 @@
+#ifndef EPON_EPN_ONU_MAC_ADDR_H_
+#define EPON_EPN_ONU_MAC_ADDR_H_
+
+/* relative to epon */
+
+/*
+ * Register <EPN_ONU_MAC_ADDR_0_LO>
+ *
+ * These registers store a MAC address for each bidirectional ONU LLID.
+ * These addresses are inserted as the SA in REPORT frames sent upstream.
+ * Note:
+ * ONU MAC Address registers 8 through 23 are used only in
+ * loopbackoperation.
+ */
+
+#define EPN_ONU_MAC_ADDRx_0_7_LO_REG(x)		(0x1f0 + (x) * 8)
+#define EPN_ONU_MAC_ADDRx_0_7_HI_REG(x)		(0x1f4 + (x) * 8)
+
+#define EPN_ONU_MAC_ADDRx_8_31_LO_REG(x)	(0x458 + (x) * 8)
+#define EPN_ONU_MAC_ADDRx_8_31_HI_REG(x)	(0x45c + (x) * 8)
+
+#endif /* ! EPON_EPN_ONU_MAC_ADDR_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn_tx_l1s_shp.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn_tx_l1s_shp.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epn_tx_l1s_shp.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epn_tx_l1s_shp.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,53 @@
+#ifndef EPON_EPN_TX_L1S_SHP_H_
+#define EPON_EPN_TX_L1S_SHP_H_
+
+/* relative to epon */
+
+/*
+ * Register <EPN_TX_L1S_SHP_CONFIG_0>
+ *
+ * This register configures upstream shaper i.
+ * There are 32 shapersavailable for shaping upstream traffic.
+ * The cfgShpEn bits are bit wiseselects for enabling shaping of the
+ * associated queues.
+ * Each shaper canhave shaping enabled on select queues or on all of the
+ * queues.
+ * If thebit is set, the corresponding shaper will be used for shaping
+ * creditsand control of its associated queue.
+ * The cfgShpRate and cfgMaxBstSizedefine the shaping.
+ * The cfgShpRate value represents the number of bytesthat are added to the
+ * shaper's byte credit accumulator each clockcycle.
+ * Given a 125 MHz clock-cycle; the cfgShpRate is in units of 2^-19Gbps
+ * (~1907.
+ * 34863 bps).
+ * The maximum burst size is in units of 256bytes.
+ */
+#define EPN_TX_L1S_SHP_CONFIGx_0_7_REG(x)	(0x238 + (x) * 8)
+
+/* Shaper i Rate */
+#define  CONFIG_CFGSHPRATE_SHIFT	8
+#define  CONFIG_CFGSHPRATE_MASK		0x7fffff00
+
+/* Shaper i Maximum Burst Size */
+#define  CONFIG_CFGSHPBSTSIZE_SHIFT	0
+#define  CONFIG_CFGSHPBSTSIZE_MASK	0xff
+
+
+/*
+ * Register <EPN_TX_L1S_SHP_QUE_EN_31>
+ *
+ * The bit wise selects for enabling shaping of the associated queues.
+ * Each shaper can have shaping enabled on select queues or on all of
+ * thequeues.
+ * If the bit is set, the corresponding shaper will be used forshaping
+ * credits and control of its associated queue.
+ */
+#define EPN_TX_L1S_SHP_QUE_ENx_0_7_REG(x)	(0x23c + (x) * 8)
+#define EPN_TX_L1S_SHP_QUE_EN_REG	0x4
+
+/* Set the bit(s) corresponding to the queue(s) this shaper shouldpolice. */
+#define  QUE_EN_CFGSHPEN_SHIFT		0
+#define  QUE_EN_CFGSHPEN_MASK		0xffffffff
+
+
+#endif /* ! EPON_EPN_TX_L1S_SHP_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epon_top.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epon_top.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_epon_top.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_epon_top.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,306 @@
+#ifndef EPON_EPON_TOP_H_
+#define EPON_EPON_TOP_H_
+
+/* relative to epon */
+#define EPON_TOP_OFFSET_0		0x0
+
+/*
+ * Register <EPON_TOP_SCRATCH>
+ *
+ * Register used for testing read and write access into epon_top block.
+ */
+#define EPON_TOP_SCRATCH_REG		0x0
+
+/* Scratch pad. */
+#define  SCRATCH_SCRATCH_SHIFT		0
+#define  SCRATCH_SCRATCH_MASK		0xffffffff
+
+
+/*
+ * Register <EPON_TOP_RESET>
+ *
+ */
+#define EPON_TOP_RESET_REG		0x4
+
+/* Active low reset for XPcsRx module. */
+#define  RESET_XPCSRXRST_N_MASK		0x80
+
+/* Active low reset for XPcsTx module. */
+#define  RESET_XPCSTXRST_N_MASK		0x40
+
+/* Active low reset for XIF module. */
+#define  RESET_XIFRST_N_MASK		0x20
+
+/* Active low reset for Tod module. */
+#define  RESET_TODRST_N_MASK		0x10
+
+/* Active low reset for ClkPrgSwch module. */
+#define  RESET_CLKPRGRST_N_MASK		0x8
+
+/* Active low reset for Nco module. */
+#define  RESET_NCORST_N_MASK		0x4
+
+/* Active low reset for Lif module. */
+#define  RESET_LIFRST_N_MASK		0x2
+
+/* Active low reset for Epn module. */
+#define  RESET_EPNRST_N_MASK		0x1
+
+
+/*
+ * Register <EPON_TOP_INTERRUPT>
+ *
+ * Top level interrupts for all EPON blocks.
+ */
+#define EPON_TOP_INTERRUPT_REG		0x8
+
+/* Interrupt from 1 pps input. */
+#define  INTERRUPT_INT_1PPS_MASK	0x80
+
+/* Interrupt from XPcsTx module. */
+#define  INTERRUPT_INT_XPCS_TX_MASK	0x40
+
+/* Interrupt from XPcsRx module. */
+#define  INTERRUPT_INT_XPCS_RX_MASK	0x20
+
+/* Interrupt from XIF module. */
+#define  INTERRUPT_INT_XIF_MASK		0x10
+
+/* Interrupt from NCO module. */
+#define  INTERRUPT_INT_NCO_MASK		0x8
+
+/* Interrupt from LIF module. */
+#define  INTERRUPT_INT_LIF_MASK		0x4
+
+/* Interrupt from EPN module. */
+#define  INTERRUPT_INT_EPN_MASK		0x1
+
+
+/*
+ * Register <EPON_TOP_INTERRUPT_MASK>
+ *
+ * Top level interrupts for all EPON blocks.
+ * For any bit, a value of 1will enable the interrupt, and a value of 0
+ * will mask the interrupt.
+ * By default, all interrupts are masked.
+ */
+#define EPON_TOP_INTERRUPT_MASK_REG	0xc
+
+/* Mask for interrupt from 1 pps input. */
+#define  INTERRUPT_MASK_INT_1PPS_MASK_MASK	0x80
+
+/* Mask for interrupt from XPcsTx module. */
+#define  INTERRUPT_MASK_INT_XPCS_TX_MASK_MASK	0x40
+
+/* Mask for interrupt from XPcsRx module. */
+#define  INTERRUPT_MASK_INT_XPCS_RX_MASK_MASK	0x20
+
+/* Mask for interrupt from XIF module. */
+#define  INTERRUPT_MASK_INT_XIF_MASK_MASK	0x10
+
+/* Mask for interrupt from NCO module. */
+#define  INTERRUPT_MASK_INT_NCO_MASK_MASK	0x8
+
+/* Mask for interrupt from LIF module. */
+#define  INTERRUPT_MASK_INT_LIF_MASK_MASK	0x4
+
+/* Mask for interrupt from EPN module. */
+#define  INTERRUPT_MASK_INT_EPN_MASK_MASK	0x1
+
+
+/*
+ * Register <EPON_TOP_CONTROL>
+ *
+ * High level configuration for the EPON block.
+ */
+#define EPON_TOP_CONTROL_REG		0x10
+
+/*
+ * 0:1G downstream mode
+ * 1:2G downstream mode
+*/
+#define  CONTROL_CFGTWOGIGPONDNS_MASK	0x4
+
+/*
+ * 0:
+ * 1G uptream mode 1:
+ * 10G upstream mode
+*/
+#define  CONTROL_CFGTENGIGPONUP_MASK	0x2
+
+/*
+ * 0:
+ * 1G downstream mode 1:
+ * 10G downstream mode
+*/
+#define  CONTROL_CFGTENGIGDNS_MASK	0x1
+
+
+/*
+ * Register <ONE_PPS_MPCP_OFFSET>
+ *
+ * High level configuration for the EPON block.
+ */
+#define EPON_TOP_ONE_PPS_MPCP_OFFSET_REG	0x14
+
+/* Provides additional offset to MPCP sampling due to 1 pps inputassertion. */
+#define  ONE_PPS_MPCP_OFFSET_CFG_1PPS_MPCP_OFFSET_SHIFT	0
+#define  ONE_PPS_MPCP_OFFSET_CFG_1PPS_MPCP_OFFSET_MASK	0xffffffff
+
+
+/*
+ * Register <ONE_PPS_CAPTURED_MPCP_TIME> - read-only
+ *
+ * High level configuration for the EPON block.
+ */
+#define EPON_TOP_ONE_PPS_CAPTURED_MPCP_TIME_REG	0x18
+
+/* Captured MPCP time due to 1 pps input assertion. */
+#define  ONE_PPS_CAPTURED_MPCP_TIME_CAPTURE_1PPS_MPCP_TIME_SHIFT	0
+#define  ONE_PPS_CAPTURED_MPCP_TIME_CAPTURE_1PPS_MPCP_TIME_MASK	0xffffffff
+
+
+/*
+ * Register <EPON_TOP_TOD_CONFIG>
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) configuration.
+ */
+#define EPON_TOP_TOD_CONFIG_REG		0x1c
+
+/*
+ * When this bit is set, hardware will update the internal
+ * nanosecondcounter, cfg_tod_ns[31:
+ * 0], when the local MPCP time equalscfg_tod_mpcp[31:
+ * 0].
+ * Software should set this bit and wait untilhardware clears it before
+ * setting it again.
+*/
+#define  TOD_CONFIG_CFG_TOD_LOAD_NS_MASK	0x80000000
+
+/*
+ * When this bit is set, hardware will latch the internal ts48, ns,
+ * andseconds counters.
+ * Software should set this bit and wait untilhardware clears it before
+ * setting it again.
+ * Once hardware hascleared the bit, the timers are available to be read.
+*/
+#define  TOD_CONFIG_CFG_TOD_READ_MASK	0x800000
+
+/*
+ * Select the block to read the timers from.
+ * 0:
+ * Reserved.
+ * 1:
+ * 1G EPON.
+ * 2:
+ * 10G EPON.
+ * 3:
+ * AE.
+ * This field should not be changed whilecfg_tod_read is set.
+*/
+#define  TOD_CONFIG_CFG_TOD_READ_SEL_SHIFT	21
+#define  TOD_CONFIG_CFG_TOD_READ_SEL_MASK	0x600000
+
+/*
+ * Allow 1PPS pulse to clear the counter if set.
+ * If not set, the 1PPSpulse will have no effect on the TS48.
+*/
+#define  TOD_CONFIG_CFG_TOD_PPS_CLEAR_MASK	0x100000
+
+/*
+ * The rising edge will be latched, and cfg_tod_seconds will be loadedon
+ * the next 1PPS pulse or when the next second rolls over.
+*/
+#define  TOD_CONFIG_CFG_TOD_LOAD_MASK	0x80000
+
+/* Number of seconds to be loaded. */
+#define  TOD_CONFIG_CFG_TOD_SECONDS_SHIFT	0
+#define  TOD_CONFIG_CFG_TOD_SECONDS_MASK	0x7ffff
+
+
+/*
+ * Register <EPON_TOP_TOD_NS>
+ *
+ * Register used to load nanosecond counter.
+ */
+#define EPON_TOP_TOD_NS_REG		0x20
+
+/*
+ * Value to be loaded when the MPCP time reaches cfg_tod_mpcp.
+ * Thisfield should not be updated while cfg_tod_load_ns is set.
+*/
+#define  TOD_NS_CFG_TOD_NS_SHIFT	0
+#define  TOD_NS_CFG_TOD_NS_MASK		0xffffffff
+
+
+/*
+ * Register <EPON_TOP_TOD_MPCP>
+ *
+ * Register used to hold MPCP value that will be used to determine whenthe
+ * nanosecond counter is updated.
+ * This field should not be updatedwhile cfg_tod_load_ns is set.
+ */
+#define EPON_TOP_TOD_MPCP_REG		0x24
+
+/* MPCP value to wait for before loading cfg_tod_ns. */
+#define  TOD_MPCP_CFG_TOD_MPCP_SHIFT	0
+#define  TOD_MPCP_CFG_TOD_MPCP_MASK	0xffffffff
+
+
+/*
+ * Register <EPON_TOP_TS48_MSB> - read-only
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) read back
+ * fromEPON/AE block.
+ */
+#define EPON_TOP_TS48_MSB_REG		0x28
+
+/* Upper 16-bits of TS48. */
+#define  TS48_MSB_TS48_EPON_READ_MSB_SHIFT	0
+#define  TS48_MSB_TS48_EPON_READ_MSB_MASK	0xffff
+
+
+/*
+ * Register <EPON_TOP_TS48_LSB> - read-only
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) read back
+ * fromEPON/AE block.
+ */
+#define EPON_TOP_TS48_LSB_REG		0x2c
+
+/* Lower 32-bits of TS48. */
+#define  TS48_LSB_TS48_EPON_READ_LSB_SHIFT	0
+#define  TS48_LSB_TS48_EPON_READ_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <EPON_TOP_TSEC> - read-only
+ *
+ * Register used for seconds read back from EPON/AE block.
+ */
+#define EPON_TOP_TSEC_REG		0x30
+
+/*
+ * Seconds[18:
+ * 0].
+*/
+#define  TSEC_TSEC_EPON_READ_SHIFT	0
+#define  TSEC_TSEC_EPON_READ_MASK	0x7ffff
+
+
+/*
+ * Register <EPON_TOP_TNS_EPON> - read-only
+ *
+ * Register used for nanoseconds read back from EPON/AE block.
+ */
+#define EPON_TOP_TNS_EPON_REG		0x34
+
+/*
+ * Nanoseconds[31:
+ * 0].
+*/
+#define  TNS_EPON_TNS_EPON_READ_SHIFT	0
+#define  TNS_EPON_TNS_EPON_READ_MASK	0xffffffff
+
+
+#endif /* ! EPON_EPON_TOP_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_lif.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_lif.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_lif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_lif.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,2489 @@
+#ifndef EPON_LIF_H_
+#define EPON_LIF_H_
+
+/* relative to epon */
+#define LIF_OFFSET_0			0x1800
+
+/*
+ * Register <LIF_PON_CONTROL>
+ *
+ * This register controls and configures the LIF PON module.
+ * Configurationbits dealing directly with the operational mode, laser
+ * control, andloopback are here.
+ */
+#define LIF_PON_CONTROL_REG		0x0
+
+/* Enable adaptive short preamble for P2P mode. */
+#define  LIF_PON_CONTROL_CFP2PMODEADAPTIVESHORTPRE_MASK	0x40000000
+
+/* Disable runt packet filtering. */
+#define  LIF_PON_CONTROL_CFGDISRUNTFILTER_MASK	0x20000000
+
+/*
+ * Maximum number of allowable comma errors before LOS is asserted.
+ * 0:
+ * Functionality Disabled
+*/
+#define  LIF_PON_CONTROL_CFMAXCOMMAERRCNT_SHIFT	25
+#define  LIF_PON_CONTROL_CFMAXCOMMAERRCNT_MASK	0x1e000000
+
+/*
+ * Selects between three 802.
+ * 3 synchronization state machines:
+ * 0:
+ * 802.
+ * 3ah-65 FEC (single code word traversal)1:
+ * 802.
+ * 3-36 non FEC (single code word traversal)2:
+ * Legacy (based on FEC, but traverses state machine two code wordsat a
+ * time).
+ * 3:
+ * Reserved
+*/
+#define  LIF_PON_CONTROL_CFSYNCSMSELECT_SHIFT	23
+#define  LIF_PON_CONTROL_CFSYNCSMSELECT_MASK	0x1800000
+
+/*
+ * Force aborts any NonFec frames in the PON module.
+ * This abort occursbefore the FecRx module; therefore any frames dropped
+ * at this pointwill not have statistics tabulated.
+ * 0:
+ * Disabled1:
+ * Enabled
+*/
+#define  LIF_PON_CONTROL_CFPONRXFORCENONFECABORT_MASK	0x400000
+
+/*
+ * Force aborts any FEC frames in the PON module.
+ * This abort occursbefore the FecRx module; therefore any frames dropped
+ * at this pointwill not have statistics tabulated.
+ * 0:
+ * Disabled1:
+ * Enabled
+*/
+#define  LIF_PON_CONTROL_CFPONRXFORCEFECABORT_MASK	0x200000
+
+/*
+ * Controls the order of the 10B/20B sent from the SERDES to the LIFmodule.
+ * 0:
+ * Receive data is unflipped.
+ * 1:
+ * Receive data is flipped.
+ * Default :
+ * 1
+*/
+#define  LIF_PON_CONTROL_CFGRXDATABITFLIP_MASK	0x100000
+
+/*
+ * Enable upstream padding of MPCP/OAM in an 802.
+ * 1ae encrypted link toinclude security overhead.
+*/
+#define  LIF_PON_CONTROL_CFGENMPCPOAMPAD_MASK	0x40000
+
+/* Enable IDLE packet support to prevent upstream underrun. */
+#define  LIF_PON_CONTROL_CFGENTXIDLEPKT_MASK	0x20000
+
+/*
+ * Lock synchronization state indefinitely.
+ * This bit must be used inconjunction with cfEnableExtendSync.
+*/
+#define  LIF_PON_CONTROL_CFENABLESOFTWARESYNCHOLD_MASK	0x10000
+
+/*
+ * Extend synchronization state.
+ * This can be used to improve FEC gain.
+*/
+#define  LIF_PON_CONTROL_CFENABLEEXTENDSYNC_MASK	0x8000
+
+/*
+ * Allow alignment state machine to achieve code word lock in threeidles,
+ * as opposed to four.
+*/
+#define  LIF_PON_CONTROL_CFENABLEQUICKSYNC_MASK	0x4000
+
+/*
+ * No function in this release of EPON.
+ * Default:
+ * 1
+*/
+#define  LIF_PON_CONTROL_CFPPSEN_MASK	0x2000
+
+/*
+ * 0:
+ * 1PPS is not aligned to the 10MHz clock.
+ * 1:
+ * 1PPS is aligned to the positive edge of the 10MHz clock.
+ * Default:
+ * 1
+*/
+#define  LIF_PON_CONTROL_CFPPSCLKRBC_MASK	0x1000
+
+/*
+ * 0:
+ * Disable Loop Back from Downstream to Upstream in the LIF.
+ * 1:
+ * Enable Loop Back from Downstream to Upstream in the LIF.
+*/
+#define  LIF_PON_CONTROL_CFRX2TXLPBACK_MASK	0x800
+
+/*
+ * 0:
+ * Disable Loop Back from Upstream to Downstream in the LIF.
+ * 1:
+ * Enable Loop Back from Upstream to Downstream in the LIF.
+ * Note:
+ * Due to a bug in BCM6838A0, RX stats will not tabulatecorrectly while in
+ * this loopback mode.
+ * TX stats should be usedinstead.
+*/
+#define  LIF_PON_CONTROL_CFTX2RXLPBACK_MASK	0x400
+
+/*
+ * 0:
+ * IDLE words continue to transmit while laser is turned off.
+ * 1:
+ * Transmitted data bus is de-asserted to zero when laser is turnedoff.
+*/
+#define  LIF_PON_CONTROL_CFTXDATAENDURLON_MASK	0x200
+
+/*
+ * 0:
+ * LIF sends standard EPON Preamble.
+ * 1:
+ * LIF sends Ethernet Preamble for Point-to-Point operation.
+*/
+#define  LIF_PON_CONTROL_CFP2PMODE_MASK	0x100
+
+/*
+ * 0:
+ * Send Standard P2P Ethernet Preamble.
+ * 1:
+ * Send Short (7 byte) P2P Ethernet Preamble.
+ * This bit must be setwith cfP2PMode if full line rate is desired with odd
+ * sized frames.
+ * If this bit is not set in P2P mode, only even sized frames arecapable of
+ * line rate.
+ * The link partner must also be short preamblereceive capable.
+*/
+#define  LIF_PON_CONTROL_CFP2PSHORTPRE_MASK	0x80
+
+/*
+ * The output enable control for the 1Gbps upstream laser control(TXEN)
+ * pin.
+ * 0:
+ * 1G laser control pin is tri-stated1:
+ * 1G laser control is driven by the LIF module.
+*/
+#define  LIF_PON_CONTROL_CFLASEREN_MASK	0x40
+
+/*
+ * 0:
+ * Laser is turned on at grant start time.
+ * 1:
+ * Laser is turned on continuously.
+*/
+#define  LIF_PON_CONTROL_CFTXLASERON_MASK	0x20
+
+/*
+ * 0:
+ * Configures the Laser On signal as active low.
+ * 1:
+ * Configures the Laser On signal as active high.
+*/
+#define  LIF_PON_CONTROL_CFTXLASERONACTHI_MASK	0x10
+
+/*
+ * Resets the transmit side of the LIF.
+ * 0:
+ * Reset LIF TX.
+ * 1:
+ * Normal operation.
+*/
+#define  LIF_PON_CONTROL_LIFTXRSTN_PRE_MASK	0x8
+
+/*
+ * Resets the receive side of the LIF.
+ * 0:
+ * Reset LIF RX.
+ * 1:
+ * Normal operation.
+*/
+#define  LIF_PON_CONTROL_LIFRXRSTN_PRE_MASK	0x4
+
+/*
+ * Enables LIF TX for operation.
+ * 0:
+ * Disable the external interface to and from the LIF TX.
+ * 1:
+ * Enable the external interface to and from the LIF TX.
+*/
+#define  LIF_PON_CONTROL_LIFTXEN_MASK	0x2
+
+/*
+ * Enables LIF RX for operation.
+ * 0:
+ * Disable the external interface to and from the LIF RX.
+ * 1:
+ * Enable the external interface to and from the LIF RX.
+*/
+#define  LIF_PON_CONTROL_LIFRXEN_MASK	0x1
+
+
+/*
+ * Register <LIF_PON_INTER_OP_CONTROL>
+ *
+ * This register controls and configures the LIF PON module
+ * specificallydealing with interoperability.
+ */
+#define LIF_PON_INTER_OP_CONTROL_REG	0x4
+
+/*
+ * Units are in code group pairs (two 10b code groups).
+ * 0:
+ * Disable Ipg Filter (Legacy Behavior)1-3:
+ * DO NOT USE, Illegal Values, allows faster than line rateoperation.
+ * 4-15:
+ * Allow 4 to 15 code group pairs to elapse after a frame beforebecoming
+ * receptive to a SOP or SFEC again.
+ * Default:
+ * 5 code group pairs or "10 bytes of IPG".
+ * Per spec, "12bytes of IPG" is line rate, but thedefault setting allows
+ * for a rate that is slightly faster than linerate for tolerance purposes.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFIPGFILTER_SHIFT	27
+#define  LIF_PON_INTER_OP_CONTROL_CFIPGFILTER_MASK	0x78000000
+
+/*
+ * Allows for control of logic which blocks laser enable based oncondition
+ * of downstream sync.
+ * 0:
+ * Allow for los to block laser enable.
+ * 1:
+ * Laser will toggle regardless of state of downstream code groupsync
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFDISABLELOSLASERBLOCK_MASK	0x4000000
+
+/*
+ * All unmapped LLIDs will be redirected and mapped to Index 0.
+ * 0:
+ * Unmapped LLIDs will appear to be unmapped to EPN.
+ * 1:
+ * Unmapped LLIDs will appear on Index 0 to EPN.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFGLLIDPROMISCUOUSMODE_MASK	0x2000000
+
+/*
+ * Masks MSB of 16 bit raw LLID for Index translation.
+ * 0:
+ * Don't mask, look at full 16 bits.
+ * 1:
+ * Mask bit[15], map based on [14:
+ * 0].
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFGLLIDMODMSK_MASK	0x1000000
+
+/*
+ * Allows SFEC to consume 4 bytes of IPG per standard.
+ * 0:
+ * 4 Additional bytes of IPG will be added for SFEC.
+ * 1:
+ * SFEC will carve 4 bytes out of existing IPG.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFUSEFECIPG_MASK	0x800000
+
+/*
+ * Enable inverted CRC-8 checking.
+ * 0:
+ * Disable CRC-8 checking.
+ * Packets with inverted CRC-8 arediscarded.
+ * 1:
+ * Enable CRC-8 checking.
+ * Packets with inverted CRC-8 are consideredvalid.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFRXCRC8INVCHK_MASK	0x400000
+
+/*
+ * 0:
+ * CRC-8 is checked from LSB to MSB in the downstream direction.
+ * 1:
+ * CRC-8 is checked from MSB to LSB in the downstream direction.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFRXCRC8BITSWAP_MASK	0x200000
+
+/*
+ * 0:
+ * CRC-8 is checked by shifting data from MSB to LSB in thedownstream
+ * direction.
+ * 1:
+ * CRC-8 is checked by shifting data from LSB to MSB in thedownstream
+ * direction.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFRXCRC8MSB2LSB_MASK	0x100000
+
+/*
+ * 0:
+ * Enable Crc-8 checking.
+ * 1:
+ * Disable Crc-8 checking.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFRXCRC8DISABLE_MASK	0x80000
+
+/*
+ * 0:
+ * Bit 15 of LLID in the upstream path is zero.
+ * 1:
+ * Bit 15 of LLID in the upstream path is set.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXLLIDBIT15SET_MASK	0x20000
+
+/*
+ * 0:
+ * Transmit correct Crc-81:
+ * Transmit inverted Crc-8 on a per packet basis.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXCRC8INV_MASK	0x10000
+
+/*
+ * 0:
+ * Transmit correct Crc-81:
+ * Transmit bad Crc-8
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXCRC8BAD_MASK	0x8000
+
+/*
+ * 0:
+ * Generated upstream Crc-8 byte is transmitted from Lsb to Msb.
+ * 1:
+ * Generated upstream Crc-8 byte is transmitted from Msb to Lsb.
+ * Note:
+ * This feature is added to give the ONU more flexibility oftransmitting
+ * the Crc-8 byte.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXCRC8BITSWAP_MASK	0x4000
+
+/*
+ * 0:
+ * Generate upstream Crc-8 by shifting data from Lsb to Msb.
+ * 1:
+ * Generate upstream Crc-8 by shifting data from Msb to Lsb.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXCRC8MSB2LSB_MASK	0x2000
+
+/*
+ * 0:
+ * Normal operation.
+ * 1:
+ * Enable the LIF module to transmit short pre-amble in the upstreampath.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXSHORTPRE_MASK	0x1000
+
+/*
+ * LIF upstream IPG counter.
+ * Each unit represents one time quanta or16ns.
+ * Default:
+ * 2
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXIPGCNT_SHIFT	8
+#define  LIF_PON_INTER_OP_CONTROL_CFTXIPGCNT_MASK	0xf00
+
+/*
+ * 0:
+ * Normal OperationGreater than 0:
+ * This bit field is used for testing with Panasonictransceivers.
+ * The pattern of 10'b10_1010_1010 (2AA) will transmit atthe beginning of
+ * every burst.
+ * The number transmitted word isprogrammable based on the configuration of
+ * cfInitIdle in register0x10d.
+ * The amount transmitted is always a portion of cfInitIdle upto a maximum
+ * of 16.
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXAASYNCLEN_SHIFT	4
+#define  LIF_PON_INTER_OP_CONTROL_CFTXAASYNCLEN_MASK	0xf0
+
+/*
+ * Pipeline delay to be added on to laser on time and laser off
+ * timeDefault:
+ * 6
+*/
+#define  LIF_PON_INTER_OP_CONTROL_CFTXPIPEDELAY_SHIFT	0
+#define  LIF_PON_INTER_OP_CONTROL_CFTXPIPEDELAY_MASK	0xf
+
+
+/*
+ * Register <LIF_FEC_CONTROL>
+ *
+ * This register controls and configures the LIF FEC sub-module block.
+ * Configuration bits dealing directly with forward error correction
+ * arehere.
+ */
+#define LIF_FEC_CONTROL_REG		0x8
+
+/*
+ * 0:
+ * Uncorrectable frames are aborted.
+ * 1:
+ * Uncorrectable frames are forwarded.
+*/
+#define  LIF_FEC_CONTROL_CFFECRXERRORPROP_MASK	0x20
+
+/*
+ * Aborts non-FEC frames after the FecRx module.
+ * Statistics will stillbe tabulated for frames aborted through this
+ * manner.
+ * 0:
+ * Non-FEC frames are forwarded.
+ * 1:
+ * Non-FEC frames are aborted.
+*/
+#define  LIF_FEC_CONTROL_CFFECRXFORCENONFECABORT_MASK	0x10
+
+/*
+ * Aborts FEC frames after the FecRx module.
+ * Statistics will still betabulated for frames aborted through this
+ * manner.
+ * 0:
+ * FEC frames are forwarded.
+ * 1:
+ * FEC frames are aborted.
+*/
+#define  LIF_FEC_CONTROL_CFFECRXFORCEFECABORT_MASK	0x8
+
+/*
+ * Enable FEC on Receiver0:
+ * FEC Disabled1:
+ * FEC Enabled
+*/
+#define  LIF_FEC_CONTROL_CFFECRXENABLE_MASK	0x4
+
+/*
+ * Enables per LLID Fec Parity Generation.
+ * This bit, cfFecTxEn, corresponding llid enable bit
+ * cfFecTxFecLlidEn#below must be set.
+ * 0:
+ * Disabled1:
+ * Enabled
+*/
+#define  LIF_FEC_CONTROL_CFFECTXFECPERLLID_MASK	0x2
+
+/*
+ * Enable FEC on Transmitter0:
+ * FEC Disabled1:
+ * FEC Enabled
+*/
+#define  LIF_FEC_CONTROL_CFFECTXENABLE_MASK	0x1
+
+
+/*
+ * Register <LIF_SEC_CONTROL>
+ *
+ * This register controls and configures the LIF SEC sub-module block.
+ * Configuration bits dealing directly with security encryption are here.
+ */
+#define LIF_SEC_CONTROL_REG		0xc
+
+/* Disable OAM encryption. */
+#define  LIF_SEC_CONTROL_CFGDISMPCPENCRYPT_MASK	0x80000000
+
+/* Disable OAM encryption. */
+#define  LIF_SEC_CONTROL_CFGDISOAMENCRYPT_MASK	0x40000000
+
+/* Enables downstream security short length support. */
+#define  LIF_SEC_CONTROL_CFGSECENSHORTLEN_MASK	0x20000000
+
+/* Enables downstream security packet number rollover. */
+#define  LIF_SEC_CONTROL_CFGSECDNENPKTNUMRLOVR_MASK	0x10000000
+
+/* Enables upstream security packet number rollover. */
+#define  LIF_SEC_CONTROL_CFGSECUPENPKTNUMRLOVR_MASK	0x8000000
+
+/* Enables replay protection on RX security. */
+#define  LIF_SEC_CONTROL_CFGENAEREPLAYPRCT_MASK	0x4000000
+
+/* Enables legacy RCC priority encoding mode for EPON encryption. */
+#define  LIF_SEC_CONTROL_CFGENLEGACYRCC_MASK	0x2000000
+
+/*
+ * Enables fake AES mode in the upstream for FPGA testing.
+ * 0:
+ * Normal operation.
+ * 1:
+ * Enable fake AES.
+*/
+#define  LIF_SEC_CONTROL_ENFAKEUPAES_MASK	0x1000000
+
+/*
+ * Enables fake AES mode in the downstream for FPGA testing.
+ * 0:
+ * Normal operation.
+ * 1:
+ * Enable fake AES.
+*/
+#define  LIF_SEC_CONTROL_ENFAKEDNAES_MASK	0x800000
+
+/*
+ * FEC IPG Len used by SEC to support certain security modes.
+ * Default:
+ * 0xa
+*/
+#define  LIF_SEC_CONTROL_CFGFECIPGLEN_SHIFT	13
+#define  LIF_SEC_CONTROL_CFGFECIPGLEN_MASK	0x1fe000
+
+/*
+ * Disable downstream DA/SA encryption.
+ * 0:
+ * Enable DA/SA encryption1:
+ * Disable DA/SA encryption
+*/
+#define  LIF_SEC_CONTROL_DISDNDASAENCRPT_MASK	0x1000
+
+/*
+ * 0:
+ * Single Churning encryption (do not use)1:
+ * Triple Churning encryptionThis bit matters only when dnEncryptScheme is
+ * set to "CEPON".
+ * Default:
+ * 1
+*/
+#define  LIF_SEC_CONTROL_ENTRIPLECHURN_MASK	0x800
+
+/*
+ * 0:
+ * In EPON mode, all packets must be either encrypted ornon-encrypted; a
+ * mixture of both is not allowed.
+ * The turning on/offof encryption can be initiated only by the OLT.
+ * 1:
+ * In EPON mode, mixing of encrypted and non-encrypted packets isallowed
+ * for a particular LLID.
+ * Default:
+ * 1
+*/
+#define  LIF_SEC_CONTROL_ENEPNMIXENCRYPT_MASK	0x400
+
+/*
+ * Disable upstream DA/SA encryption.
+ * 0:
+ * Enable DA/SA encryption1:
+ * Disable DA/SA encryption
+*/
+#define  LIF_SEC_CONTROL_DISUPDASAENCRPT_MASK	0x200
+
+/*
+ * Defines the upstream security decryption scheme.
+ * 0:
+ * Teknovus encryption1:
+ * Reserved2:
+ * EPON encryption
+*/
+#define  LIF_SEC_CONTROL_SECUPENCRYPTSCHEME_SHIFT	7
+#define  LIF_SEC_CONTROL_SECUPENCRYPTSCHEME_MASK	0x180
+
+/*
+ * Defines the downstream security decryption scheme.
+ * 0:
+ * Teknovus encryption1:
+ * Reserved2:
+ * EPON encryption3:
+ * CEPON encryption4:
+ * Zero-overhead encryption5:
+ * AE encryption
+*/
+#define  LIF_SEC_CONTROL_SECDNENCRYPTSCHEME_SHIFT	4
+#define  LIF_SEC_CONTROL_SECDNENCRYPTSCHEME_MASK	0x70
+
+/*
+ * Resets upstream SEC.
+ * 0:
+ * Reset UP SEC.
+ * 1:
+ * Normal operation
+*/
+#define  LIF_SEC_CONTROL_SECUPRSTN_PRE_MASK	0x8
+
+/*
+ * Resets downstream SEC.
+ * 0:
+ * Reset DN SEC.
+ * 1:
+ * Normal operation.
+*/
+#define  LIF_SEC_CONTROL_SECDNRSTN_PRE_MASK	0x4
+
+/*
+ * Global enable for upstream encryption0:
+ * Disable upstream encryption.
+ * 1:
+ * Enable upstream encryption.
+*/
+#define  LIF_SEC_CONTROL_SECENUP_MASK	0x2
+
+/*
+ * Global enable for downstream decryption0:
+ * Disable downstream decryption.
+ * 1:
+ * Enable downstream decryption.
+*/
+#define  LIF_SEC_CONTROL_SECENDN_MASK	0x1
+
+
+/*
+ * Register <LIF_MACSEC>
+ *
+ * This register specifies the 802.
+ * 1ae MacSec Ethertype to be insertedinto the packet.
+ */
+#define LIF_MACSEC_REG			0x10
+
+/* Defines the MacSec Ethertype. */
+#define  LIF_MACSEC_CFGMACSECETHERTYPE_SHIFT	0
+#define  LIF_MACSEC_CFGMACSECETHERTYPE_MASK	0xffff
+
+
+/*
+ * Register <LIF_INT_STATUS>
+ *
+ * This register contains interrupt status for LIF modules.
+ * These bits are sticky; to clear a bit, write 1 to it.
+ */
+#define LIF_INT_STATUS_REG		0x14
+
+/*
+ * [NON-FATAL]Indicates that an SOP or SFEC was detected in an IPG window
+ * inexcess of what was provisioned in cfIpgFilter.
+ * Please seecfIpgFilter for more details.
+*/
+#define  LIF_INT_STATUS_INT_SOP_SFEC_IPG_VIOLATION_MASK	0x200000
+
+/*
+ * Indicates laser enable on time exceeed the maximum threshold, asdefined
+ * by register LIF_LSR_MON_A_MAX_THR.
+*/
+#define  LIF_INT_STATUS_LASERONMAX_MASK	0x100000
+
+/* Indicates laser enable deassertion. */
+#define  LIF_INT_STATUS_LASEROFF_MASK	0x80000
+
+/*
+ * [NON-FATAL] Applicable only in 802.
+ * 1ae security.
+ * Indicates thereceived packet was aborted due to replay protection.
+*/
+#define  LIF_INT_STATUS_SECDNREPLAYPROTCTABORT_MASK	0x40000
+
+/*
+ * [NON-FATAL] Applicable only in 802.
+ * 1ae security.
+ * Indicates thetransmit packet number exceeded the maximum threshold and
+ * about tooverflow.
+ * Threshold is programmed in register LIF_AE_PKTNUM_THRESH.
+*/
+#define  LIF_INT_STATUS_SECUPPKTNUMOVERFLOW_MASK	0x20000
+
+/*
+ * [NON-FATAL]Laser was turned off in the middle of a burst.
+ * This usuallyindicates misconfiguration which results in EPN
+ * "overstuffing" aburst.
+ * Note:
+ * This interrupt will fire while in P2P or in DN2UP Loopbackmode.
+ * S/W is to mask this bit during those modes.
+ * A fix may beintroduced into later revisions of chip to fix this cosmetic
+ * issue(FLEXIPON-138).
+*/
+#define  LIF_INT_STATUS_INTLASEROFFDURBURST_MASK	0x10000
+
+/*
+ * [NON-FATAL]Line code error threshold was exceeded.
+ * Program with LIF RX BER Threshold and Interval register (0x1b4)
+*/
+#define  LIF_INT_STATUS_INTRXBERTHRESHEXC_MASK	0x8000
+
+/* The LIF detected a FEC receive frame. */
+#define  LIF_INT_STATUS_INTFECRXFECRECVSTATUS_MASK	0x4000
+
+/*
+ * [FATAL] Error location FIFO in Corrector logic has overflowed; somedata
+ * blocks will go uncorrected.
+ * This is considered a fatal interrupt because this introduces FECblock
+ * level inconsistencies, which may cause the correction of thewrong
+ * blocks.
+*/
+#define  LIF_INT_STATUS_INTFECRXCORERRFIFOFULLSTATUS_MASK	0x2000
+
+/*
+ * Error location FIFO in Corrector logic has gone empty beforefinishing a
+ * FEC frame.
+ * This is considered a fatal interrupt becausethis introduces FEC block
+ * level inconsistencies, which may cause thecorrection of the wrong
+ * blocks.
+*/
+#define  LIF_INT_STATUS_INTFECRXCORERRFIFOUNEXPEMPTY_MASK	0x1000
+
+/*
+ * [FATAL] Data was popped from an empty FEC Buffer pipeline,
+ * whichsimultaneously was having data pushed into it from the FEC
+ * BufferSRAM.
+*/
+#define  LIF_INT_STATUS_INTFECBUFPOPEMPTYPUSH_MASK	0x800
+
+/*
+ * [FATAL] Data was popped from an empty FEC Buffer pipeline,
+ * whichsimultaneously no data was being pushed into the pipeline from
+ * theFEC Buffer SRAM.
+*/
+#define  LIF_INT_STATUS_INTFECBUFPOPEMPTYNOPUSH_MASK	0x400
+
+/*
+ * [FATAL] Data was read from FEC Buffer SRAM and pushed into a fullFEC
+ * Buffer pipeline.
+ * This is fatal.
+ * Write a one to clear this bit.
+*/
+#define  LIF_INT_STATUS_INTFECBUFPUSHFULL_MASK	0x200
+
+/*
+ * [NON-FATAL] Gate frame's MPCP timestamp vastly different thancurrent
+ * MPCP time.
+ * Triggered afull timestamp update.
+*/
+#define  LIF_INT_STATUS_INTUPTIMEFULLUPDSTAT_MASK	0x100
+
+/*
+ * [FATAL] LIF detects a frame from Epn Utx that should be first frameof
+ * the burst but it is not.
+*/
+#define  LIF_INT_STATUS_INTFROUTOFALIGNSTAT_MASK	0x80
+
+/*
+ * [FATAL] LIF detects a grant start time that is less than its
+ * currenttimer.
+*/
+#define  LIF_INT_STATUS_INTGRNTSTARTTIMELAGSTAT_MASK	0x40
+
+/* [NON-FATAL] LIF had to abort frames due to misalignment. */
+#define  LIF_INT_STATUS_INTABORTRXFRMSTAT_MASK	0x20
+
+/* [NON-FATAL] LIF detects an idle condition for received clock. */
+#define  LIF_INT_STATUS_INTNORXCLKSTAT_MASK	0x10
+
+/*
+ * [NON-FATAL] Lif detects a runaway frame.
+ * LIF could not detect an endof frame character for 64K clocks.
+*/
+#define  LIF_INT_STATUS_INTRXMAXLENERRSTAT_MASK	0x8
+
+/*
+ * [NON-FATAL] LIF detects code error after it has successfullyacquired
+ * sync.
+*/
+#define  LIF_INT_STATUS_INTRXERRAFTALIGNSTAT_MASK	0x4
+
+/* [NON-FATAL] LIF is successfully acquired sync. */
+#define  LIF_INT_STATUS_INTRXSYNCHACQSTAT_MASK	0x2
+
+/*
+ * [NON-FATAL] LIF is out of sync.
+ * This condition indicates that theLIF could not align to IDLE characters
+ * or it detects code errors.
+*/
+#define  LIF_INT_STATUS_INTRXOUTOFSYNCHSTAT_MASK	0x1
+
+
+/*
+ * Register <LIF_INT_MASK>
+ *
+ * This register contains interrupt masks for LIF modules.
+ */
+#define LIF_INT_MASK_REG		0x18
+
+/*
+ * [NON-FATAL]Mask for int_sop_sfec_ipg_violation interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INT_SOP_SFEC_IPG_VIOLATION_MASK_MASK	0x200000
+
+/* Mask. */
+#define  LIF_INT_MASK_LASERONMAXMASK_MASK	0x100000
+
+/* Mask. */
+#define  LIF_INT_MASK_LASEROFFMASK_MASK	0x80000
+
+/* Mask for replay protection abort interrupt */
+#define  LIF_INT_MASK_SECDNREPLAYPROTCTABORTMSK_MASK	0x40000
+
+/* Mask for packet number overflow interrupt */
+#define  LIF_INT_MASK_SECUPPKTNUMOVERFLOWMSK_MASK	0x20000
+
+/*
+ * Mask for laserOffDurBurstMask interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTLASEROFFDURBURSTMASK_MASK	0x10000
+
+/*
+ * Mask for rxBerThreshExc interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTRXBERTHRESHEXCMASK_MASK	0x8000
+
+/*
+ * Mask for fecRxFrmRecv interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECRXFECRECVMASK_MASK	0x4000
+
+/*
+ * Mask for fecCorrErrFifFullMask interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECRXCORERRFIFOFULLMASK_MASK	0x2000
+
+/*
+ * Mask for FecRxCorErrFifoUnExpEmpty interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECRXCORERRFIFOUNEXPEMPTYMASK_MASK	0x1000
+
+/*
+ * Mask for fecBufPopEmptyPush interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECBUFPOPEMPTYPUSHMASK_MASK	0x800
+
+/*
+ * Mask for fecBufPopEmptyNoPush interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECBUFPOPEMPTYNOPUSHMASK_MASK	0x400
+
+/*
+ * Mask for fecBufPushFull interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFECBUFPUSHFULLMASK_MASK	0x200
+
+/*
+ * Mask for upTimeFullUpdStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTUPTIMEFULLUPDMASK_MASK	0x100
+
+/*
+ * Mask for frmOutOfAlignStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTFROUTOFALIGNMASK_MASK	0x80
+
+/*
+ * Mask for grntStartTimeLagStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTGRNTSTARTTIMELAGMASK_MASK	0x40
+
+/*
+ * Mask for rxFrmAbortStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTABORTRXFRMMASK_MASK	0x20
+
+/*
+ * Mask for noRxClkStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTNORXCLKMASK_MASK	0x10
+
+/*
+ * Mask for noRxClkStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTRXMAXLENERRMASK_MASK	0x8
+
+/*
+ * Mask for rxErrAftAlignStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTRXERRAFTALIGNMASK_MASK	0x4
+
+/*
+ * Mask for rxSynchAcqStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTRXSYNCHACQMASK_MASK	0x2
+
+/*
+ * Mask for rxOutOfSyncStat interrupt.
+ * 0:
+ * Disabled -- don't toggle interrupt pin.
+ * 1:
+ * Enabled -- toggle interrupt pin.
+ * Default :
+ * 1
+*/
+#define  LIF_INT_MASK_INTRXOUTOFSYNCHMASK_MASK	0x1
+
+
+/*
+ * Register <LIF_DATA_PORT_COMMAND>
+ *
+ * This set of registers allows reading the LIF per-LLID statistics.
+ * Italso allows general access to the LIF internal RAMs.
+ * All RAMs areinitialized to zero upon hardware reset.
+ * The following describes dataport read/write sequences:
+ * Write cycle:
+ * 1) Write the address of the RAM entry to Offset 1.
+ * 2) Write data to be written to Offsets 2 through 5 (as requiredfor the
+ * given RAM's width).
+ * 3) Write to Offset 0 to select the RAM and to indicate Writecycle.
+ * 4) Poll on Offset 0 until the dataPortBusy bit is cleared.
+ * Read cycle:
+ * 1) Write the address of the RAM entry to Offset 1.
+ * 2) Write to Offset 0 to select the RAM and to indicate Read cycle.
+ * 3) Poll on Offset 0 until the dataPortBusy bit is cleared.
+ * 4) Read Offsets 2 through 5 (as needed according to the width ofthe
+ * accessed RAM) to retrieve the RAM read data.
+ * The following paragraphs describe the port data format for each RAM:
+ * Statistics Downstream Per LLID - Stores the statistics information
+ * foreach downstream LLID (32 bidirectional).
+ * The memory is auto-initialized within the hardware, and can be
+ * accessedas soon as the module is brought out of reset.
+ * The statistics arecleared on read.
+ * portData0[31:
+ * 0] - Desired statisticLLID Index RAM LocationsBidirectional 0 0 -
+ * 15Bidirectional 1 16 - 31.
+ * ..
+ * Bidirectional 30 480-495Bidirectional 31 496-511Table:
+ * RAM Offset Description Remarks0 Bidir Downstream Good Frames Received
+ * Not Aborted Prior toFEC1 Bidir Downstream Good Bytes Received Not
+ * Aborted Prior toFEC2 Bidir Downstream Oversized Frames As defined by
+ * LIFSanitizer Register (0x134)3 Bidir Downstream NonFEC Good Frames
+ * Received NonFEC Framesregardless of FEC Enable4 Bidir Downstream NonFEC
+ * Good Bytes Received NonFEC Bytesregardless of FEC Enable5 Bidir
+ * Downstream FEC Good Frames Received Will incrementeven if FEC is
+ * disabled.
+ * 6 Bidir Downstream FEC Good Bytes Received Will incrementeven if FEC is
+ * disabled.
+ * 7 Bidir Downstream FEC Frames Exceeded Error Threshold8 Bidir Downstream
+ * FEC Data Blocks No Errors9 Bidir Downstream FEC Data Blocks Corrected10
+ * Bidir Downstream FEC Data Blocks Uncorrected11 Bidir Downstream FEC Data
+ * Corrected Bytes12 Bidir Downstream FEC Data Corrected Zeroes13 Bidir
+ * Downstream FEC Data Corrected Ones14 Bidir Downstream Undersized
+ * Frames15 Bidir Downstream Errored FramesSEC Downstream Key RAM -
+ * specifies the security key.
+ * When writing tothe key RAM, the mode and encryption key MUST be
+ * specified.
+ * All keysmust be written to EVEN offsets.
+ * For LLID X, the corresponding RAMoffset for the even key is X*2, while
+ * the odd key is (X*2) + 1.
+ * CEPON encryption mode:
+ * portData0 - Specifies input to 1st churning key generation.
+ * Bits [15:
+ * 0] specify P16 - P1; and bits [23:
+ * 16] specify X8 - X1.
+ * portData1 - Specifies input to 2nd churning key generation.
+ * The input is byte shift of 1st churning key.
+ * Bits [7:
+ * 0]specify P16 - P8; bits [15:
+ * 8] specify X8 - X1; andbits [23:
+ * 16] specify P7 - P1.
+ * portData2 - Specifies input to 3rd churning key generation.
+ * The input is byte shift of 2nd churning key.
+ * Bits [7:
+ * 0] specify X8 -X1; and bits [23:
+ * 8] specify P16 - P1.
+ * portData3 - Specifies P-input to churning function.
+ * Bits [15:
+ * 0]specify P16 - P1.
+ * Same data as portData0.
+ * Broadcom and NTT encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * 802.
+ * 1ae encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * portData4 - Specifies bits [63:
+ * 0] of the 64-bits implicit SCIportData5 - Specifies bits [127:
+ * 64] of the 64-bits implicit SCIportData6 - Specifies bits [31:
+ * 64] of the initial packet number forreplay protection.
+ * portData7[9] - encryption enable.
+ * When cleared, 802.
+ * 1ae packets willpass through undecrypted.
+ * Zero-ovehead encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * portData4 - Specifies bits [63:
+ * 0] of the 64-bits implicit SCIportData5 - Specifies bits [127:
+ * 64] of the 64-bits implicit SCIportData6 - Specifies bits [31:
+ * 64] of the initial packet number forreplay protection.
+ * SEC Upstream Key RAM - specifies the security key.
+ * When writing to thekey RAM, the mode and encryption key MUST be
+ * specified.
+ * Each LLIDoccupies 2 entries (N and N+1), ie.
+ * LLID Index 0 corresponds to entries0 and 1, while LLID Index 1
+ * corresponds to entries 2 and 3.
+ * Controlinformation via portData4 will need to be conveyed to indicate to
+ * theSEC Receiver (OLT 1G RX) which key (0/1) will need to be loaded.
+ * Broadcom and NTT encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * portData7[8] - key number.
+ * portData7[9] - encryption enable.
+ * 802.
+ * 1ae encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * portData4 - Specifies bits [63:
+ * 0] of the 64-bits explicit SCI.
+ * portData5 - Specifies bits [127:
+ * 64] of the 64-bits explicit SCI.
+ * portData6 - Specifies bits [31:
+ * 64] of the initial packet number.
+ * portData7[7:
+ * 0] - TCI[7:
+ * 0]TCI[1:
+ * 0] - key number.
+ * In PON mode, only TCI[0] is utilized foreven/odd key.
+ * Must match key number specified in portData7[8].
+ * In P2Pmode, TCI[1:
+ * 0]specifies 1 of 4 keys.
+ * TCI[3:
+ * 2] - encryption mode bits :
+ * TCI[3] - E encryption bit; TCI[2] -C change bit.
+ * - E=0; C=0 :
+ * Authentication only.
+ * Data is not encryption.
+ * Only ICV is inserted at the end of packet.
+ * - E=0; C=1 :
+ * Reserved.
+ * - E=1; C=0 :
+ * Reserved.
+ * - E=1; C=1 :
+ * Encryption/authentication.
+ * Data is encryptedand ICV inserted.
+ * TCI[4] - single copy broadcast.
+ * Set to 0.
+ * TCI[5] - SC specifies whether SecTag's SCI isimplicit(0)/explicit(1).
+ * TCI[6] - ES end station byte.
+ * Set to 0.
+ * TCI[7] - V version number.
+ * Set to 0.
+ * portData7[8] - key number.
+ * portData7[9] - encryption enable.
+ * Zero-ovehead encryption modesportData0 - Specifies bits [31:
+ * 0] of the 128-bits Encryption key.
+ * portData1 - Specifies bits [63:
+ * 32] of the 128-bits Encryption key.
+ * portData2 - Specifies bits [95:
+ * 64] of the 128-bits Encryption key.
+ * portData3 - Specifies bits [127:
+ * 96] of the 128-bits Encryption key.
+ * portData4 - Specifies bits [63:
+ * 0] of the 64-bits implicit SCI.
+ * portData5 - Specifies bits [127:
+ * 64] of the 64-bits implicit SCI.
+ * portData6 - Specifies bits [31:
+ * 64] of the initial packet number.
+ * portData7[8] - key number.
+ * portData7[9] - encryption enable.
+ */
+#define LIF_DATA_PORT_COMMAND_REG	0x1c
+
+/*
+ * Indicates access to RAM is in progress.
+ * 0:
+ * Data port is ready to accept a command1:
+ * Data port is busy
+*/
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_BUSY_MASK	0x80000000
+
+/*
+ * 0 = Previous Data Port Operation Successful1 = Previous Data Port
+ * Operation Failed
+*/
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_ERROR_MASK	0x40000000
+
+/*
+ * Selects the internal RAM for access:
+ * 0:
+ * Downstream Statistics (per-LLID)(256 x 32 bits)1:
+ * SEC Downstream Key (64 x 225 bits)2:
+ * FEC Downstream Data (2048 x 20 bits)*3:
+ * SEC Upstream Key (32 x 234 bits)5:
+ * FEC Downstream Partial Syndrome (16 x 136 bits)*6:
+ * FEC Downstream Full Syndrome (16 x 136 bits) *7:
+ * FEC Upstream Parity (16 x 128)*See LIF Data Port Data register for bit
+ * descriptions of these RAMs.
+ * * Module level resets must be active to access these rams.
+*/
+#define  LIF_DATA_PORT_COMMAND_RAM_SELECT_SHIFT	24
+#define  LIF_DATA_PORT_COMMAND_RAM_SELECT_MASK	0x3f000000
+
+/*
+ * Specifies RAM read or write operation.
+ * 0:
+ * Read1:
+ * Write2-255:
+ * NO OP
+*/
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_OP_CODE_SHIFT	16
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_OP_CODE_MASK	0xff0000
+
+/*
+ * Specifies the RAM address for the port operation.
+ * Note:
+ * This field is also used by the LIF memory initializationlogic, so it has
+ * a non-zero value after reset.
+ * Default:
+ * 0xffff
+*/
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_ADDR_SHIFT	0
+#define  LIF_DATA_PORT_COMMAND_DATA_PORT_ADDR_MASK	0xffff
+
+
+/*
+ * Registers <LIF_DATA_PORT_DATA> - <x> is [ 0 => 7 ]
+ *
+ */
+#define LIF_DATA_PORT_DATA_REG(x)	(0x20 + (x) * 0x4)
+
+/*
+ * For write operations, the data to be written to the RAM location.
+ * For read operations, the data read back from the RAM location.
+*/
+#define  LIF_DATA_PORT_DATA_PBIPORTDATA_SHIFT	0
+#define  LIF_DATA_PORT_DATA_PBIPORTDATA_MASK	0xffffffff
+
+
+/*
+ * Registers <LIF_LLID_0> - <x> is [ 0 => 8 ]
+ *
+ * Provides configuration for LLID mapping.
+ * LIF supports 32 bidirectionalLLIDs.
+ */
+#define LIF_LLIDx_0_7_REG(x)		(0x40 + (x) * 0x4)
+
+/*
+ * [15:
+ * 0]:
+ * Specifies LLID Index lookup value[16]:
+ * Enable LLID0:
+ * Disable LLID1:
+ * Enable LLIDIn upstream P2P, 802.
+ * 1ae mode, bit[11:
+ * 0] provides lookup with VLAN'sVID to index 0.
+ * In downstream P2P, 802.
+ * 1ae mode, registersLIF_P2P_AE_SCI_LO[0:
+ * 15]/LIF_P2P_AE_SCI_HI[0:
+ * 15]provide lookup with packet's explicit SCI to an index.
+*/
+#define  LIF_LLIDx_0_7_CFGLLID0_SHIFT	0
+#define  LIF_LLIDx_0_7_CFGLLID0_MASK	0x1ffff
+
+
+/*
+ * Registers <LIF_LLID_16> - <x> is [ 0 => 8 ]
+ *
+ * Provides configuration for LLID mapping.
+ * LIF supports 32 bidirectionalLLIDs.
+ */
+#define LIF_LLIDx_16_23_REG(x)		(0x60 + (x) * 0x4)
+
+/*
+ * [15:
+ * 0]:
+ * Specifies LLID Index lookup value[16]:
+ * Enable LLID0:
+ * Disable LLID1:
+ * Enable LLIDIn upstream P2P, 802.
+ * 1ae mode, bit[11:
+ * 0] provides lookup with VLAN'sVID to index 16.
+*/
+#define  LIF_LLIDx_16_23_CFGLLID16_SHIFT	0
+#define  LIF_LLIDx_16_23_CFGLLID16_MASK	0x1ffff
+
+
+/*
+ * Register <LIF_TIME_REF_CNT>
+ *
+ * This register provides programmable parameters for dynamic updates tothe
+ * MPCP timer.
+ */
+#define LIF_TIME_REF_CNT_REG		0x80
+
+/*
+ * If the (absolute) difference between the timestamp received in aGATE
+ * message and the MPCP timer is larger than this value, then afull update"
+ * occurs:
+ * the downstream timestamp is transferred intothe MPCP timer.
+ * Default:
+ * 0x20
+*/
+#define  LIF_TIME_REF_CNT_CFFULLUPDATEVALUE_SHIFT	16
+#define  LIF_TIME_REF_CNT_CFFULLUPDATEVALUE_MASK	0xff0000
+
+/*
+ * If the difference between the timestamp received in a GATE messageand
+ * the MPCP timer is negative AND larger than this value (but notlarger
+ * than cfFullUpdate) then the MPCP timer is held for one TQ(effectively
+ * decrementing it).
+ * Default:
+ * 0x2
+*/
+#define  LIF_TIME_REF_CNT_CFMAXNEGVALUE_SHIFT	8
+#define  LIF_TIME_REF_CNT_CFMAXNEGVALUE_MASK	0xff00
+
+/*
+ * If the difference between the timestamp received in a GATE messageand
+ * the MPCP timer is positive AND larger than this value (but notlarger
+ * than cfFullUpdate) then the MPCP timer is incremented by twoTQ.
+ * Default:
+ * 0x4
+*/
+#define  LIF_TIME_REF_CNT_CFMAXPOSVALUE_SHIFT	0
+#define  LIF_TIME_REF_CNT_CFMAXPOSVALUE_MASK	0xff
+
+
+/*
+ * Register <LIF_TIMESTAMP_UPD_PER>
+ *
+ * This register provides the LIF the ability to filter MPCP
+ * timecorrections when the EPON MAC requests them too frequently.
+ * Thisregister specifies a time period after an update during which the
+ * LIFwill ignore MPCP time corrections from EPN.
+ */
+#define LIF_TIMESTAMP_UPD_PER_REG	0x84
+
+/*
+ * Time period after an MPCP time correction during which LIF
+ * ignoresfurther MPCP time corrections.
+ * The units are TQ.
+*/
+#define  LIF_TIMESTAMP_UPD_PER_CFTIMESTAMPUPDPER_SHIFT	0
+#define  LIF_TIMESTAMP_UPD_PER_CFTIMESTAMPUPDPER_MASK	0xffff
+
+
+/*
+ * Register <LIF_TP_TIME>
+ *
+ * The one pulse per second signal is asserted when the local MPCP
+ * timeincrements past the value programmed in cfTransportTime.
+ * Software mustupdate this register once per second.
+ */
+#define LIF_TP_TIME_REG			0x88
+
+/* MPCP time at which the pulse per second output will be asserted. */
+#define  LIF_TP_TIME_CFTRANSPORTTIME_SHIFT	0
+#define  LIF_TP_TIME_CFTRANSPORTTIME_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_MPCP_TIME> - read-only
+ *
+ * Provides the receive MPCP time of the most recently received
+ * downstreampacket.
+ * It is updated only when a downstream packet is received.
+ */
+#define LIF_MPCP_TIME_REG		0x8c
+
+/* Provides the least significant 32 bits of the receive time. */
+#define  LIF_MPCP_TIME_LTMPCPTIME_SHIFT	0
+#define  LIF_MPCP_TIME_LTMPCPTIME_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_MAXLEN_CTR>
+ *
+ * Max Length setting for the receive sanitizer.
+ * The sanitizer ensuresthat frames entering latter LIF receive logic are
+ * truncated to auseable size.
+ */
+#define LIF_MAXLEN_CTR_REG		0x90
+
+/*
+ * Approximately the maximum number of double words which the frame
+ * cancontain before being truncated by the LIF Sanitizer circuit.
+ * Thedefault of 1005 corresponds to a frame length of 2000 bytes.
+ * Default:
+ * 1005
+*/
+#define  LIF_MAXLEN_CTR_CFRXMAXFRAMELENGTH_SHIFT	0
+#define  LIF_MAXLEN_CTR_CFRXMAXFRAMELENGTH_MASK	0x3fff
+
+
+/*
+ * Register <LIF_LASER_ON_DELTA>
+ *
+ * Specifies an offset, before or after the grant start time, to turn onthe
+ * laser.
+ * Units are double-octet words.
+ */
+#define LIF_LASER_ON_DELTA_REG		0x94
+
+/*
+ * [11:
+ * 0] Offset (+/-) from Grant Start time to turn laser on[12] 0:
+ * Positive value:
+ * turn on laser after the grant start time.
+ * 1:
+ * Negative value:
+ * turn on laser prior to the grant starttime
+*/
+#define  LIF_LASER_ON_DELTA_CFTXLASERONDELTA_SHIFT	0
+#define  LIF_LASER_ON_DELTA_CFTXLASERONDELTA_MASK	0x1fff
+
+
+/*
+ * Register <LIF_LASER_OFF_IDLE>
+ *
+ * Defines when to turn the laser off and the number of IDLE characters
+ * totransmit at the beginning of a grant of nonFEC frames.
+ * Units aredouble-octet words (TQ).
+ */
+#define LIF_LASER_OFF_IDLE_REG		0x98
+
+/*
+ * A period during which the LIF transmits idle characters before
+ * thetransmission burst of non-FEC frames.
+*/
+#define  LIF_LASER_OFF_IDLE_CFTXINITIDLE_SHIFT	16
+#define  LIF_LASER_OFF_IDLE_CFTXINITIDLE_MASK	0xffff0000
+
+/*
+ * [6:
+ * 0] Offset (+/-) from Grant End time at which to turn laser off[7]:
+ * 0:
+ * Positive value:
+ * turn off laser after end of grant slot.
+ * 1:
+ * Negative value:
+ * turn off laser before end of grant slot.
+*/
+#define  LIF_LASER_OFF_IDLE_CFTXLASEROFFDELTA_SHIFT	0
+#define  LIF_LASER_OFF_IDLE_CFTXLASEROFFDELTA_MASK	0xff
+
+
+/*
+ * Register <LIF_FEC_INIT_IDLE>
+ *
+ * Defines the number of IDLE characters to transmit at the beginning of
+ * agrant of FEC frames.
+ * Units are double-octet words (TQ).
+ */
+#define LIF_FEC_INIT_IDLE_REG		0x9c
+
+/*
+ * A period during which the LIF transmits idle characters before
+ * thetransmission burst of FEC frames.
+*/
+#define  LIF_FEC_INIT_IDLE_CFTXFECINITIDLE_SHIFT	0
+#define  LIF_FEC_INIT_IDLE_CFTXFECINITIDLE_MASK	0xffff
+
+
+/*
+ * Register <LIF_FEC_ERR_ALLOW>
+ *
+ * Allowed hamming distance between received SFEC/TFEC and referencevalue.
+ */
+#define LIF_FEC_ERR_ALLOW_REG		0xa0
+
+/*
+ * The number of bit error allow for TFEC detection.
+ * Default :
+ * 0x5
+*/
+#define  LIF_FEC_ERR_ALLOW_CFRXTFECBITERRALLOW_SHIFT	4
+#define  LIF_FEC_ERR_ALLOW_CFRXTFECBITERRALLOW_MASK	0xf0
+
+/*
+ * The number of bit error allow for SFEC detection.
+ * Default :
+ * 0x5
+*/
+#define  LIF_FEC_ERR_ALLOW_CFRXSFECBITERRALLOW_SHIFT	0
+#define  LIF_FEC_ERR_ALLOW_CFRXSFECBITERRALLOW_MASK	0xf
+
+
+/*
+ * Register <LIF_SEC_KEY_SEL> - read-only
+ *
+ * This register is a read-only status of the last downstream key
+ * selectedon the 32 downstream LLIDs.
+ * This register allows for software to detecta key switchover or to
+ * determine the current downstream key.
+ */
+#define LIF_SEC_KEY_SEL_REG		0xa4
+
+/*
+ * [31:
+ * 0] Key select status for bidirectional LLIDs; bitwise encodedper LLID
+ * index
+*/
+#define  LIF_SEC_KEY_SEL_KEYSEL_SHIFT	0
+#define  LIF_SEC_KEY_SEL_KEYSEL_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_DN_ENCRYPT_STAT>
+ *
+ * Provides per-LLID status of downstream encryption.
+ * Clear a bit (disable encryption on an LLID) by writing 1 to it.
+ */
+#define LIF_DN_ENCRYPT_STAT_REG		0xa8
+
+/*
+ * Provides the status of the current encryption mode for each LLID.
+ * InEPON mode (and with bit enEpnMixEncryption set in the LIF
+ * ControlRegister) encryption for an LLID can be disabled only by writing
+ * "1to the appropriate bit in this register.
+ * 0:
+ * Encryption disabled1:
+ * Encryption enabledBitwise encoded per LLID index.
+*/
+#define  LIF_DN_ENCRYPT_STAT_ENENCRYPT_SHIFT	0
+#define  LIF_DN_ENCRYPT_STAT_ENENCRYPT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_UP_KEY_STAT> - read-only
+ *
+ * Provides per-LLID status of upstream security key.
+ */
+#define LIF_SEC_UP_KEY_STAT_REG		0xac
+
+/*
+ * Provides current active key for upstream LLIDs.
+ * Bitwise encoded perLLID index.
+*/
+#define  LIF_SEC_UP_KEY_STAT_KEYUPSEL_SHIFT	0
+#define  LIF_SEC_UP_KEY_STAT_KEYUPSEL_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_UP_ENCRYPT_STAT> - read-only
+ *
+ * Provides per-LLID status of upstream security.
+ */
+#define LIF_SEC_UP_ENCRYPT_STAT_REG	0xb0
+
+/*
+ * Provides per-LLID status of the current upstream encryption mode.
+ * 0:
+ * Encryption disabled1:
+ * Encryption enabledBitwise encoded per LLID index.
+*/
+#define  LIF_SEC_UP_ENCRYPT_STAT_ENUPENCRYPT_SHIFT	0
+#define  LIF_SEC_UP_ENCRYPT_STAT_ENUPENCRYPT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_UP_MPCP_OFFSET>
+ *
+ * Provides MPCP correction for EPON encryption.
+ */
+#define LIF_SEC_UP_MPCP_OFFSET_REG	0xb4
+
+/* Provides MPCP offset correction. */
+#define  LIF_SEC_UP_MPCP_OFFSET_SECUPMPCPOFFSET_SHIFT	0
+#define  LIF_SEC_UP_MPCP_OFFSET_SECUPMPCPOFFSET_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_FEC_PER_LLID>
+ *
+ * Provides upstream per LLID FEC enabling.
+ */
+#define LIF_FEC_PER_LLID_REG		0xb8
+
+/* Per-LLID FEC Enable for LLID 0-31. */
+#define  LIF_FEC_PER_LLID_CFFECTXFECENLLID_SHIFT	0
+#define  LIF_FEC_PER_LLID_CFFECTXFECENLLID_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_LINE_CODE_ERR_CNT>
+ *
+ * Aggregate statistics for the LIF receive channel.
+ * These registerssaturate at their maximum and clear when read.
+ * Note:
+ * These registers are also writable for test/diagnostics purposes.
+ */
+#define LIF_RX_LINE_CODE_ERR_CNT_REG	0xbc
+
+/* Counter value. */
+#define  LIF_RX_LINE_CODE_ERR_CNT_RXLINECODEERRCNT_SHIFT	0
+#define  LIF_RX_LINE_CODE_ERR_CNT_RXLINECODEERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_MPCP_FRM>
+ *
+ */
+#define LIF_RX_AGG_MPCP_FRM_REG		0xc0
+
+/* Counter value. */
+#define  LIF_RX_AGG_MPCP_FRM_RXAGGMPCPCNT_SHIFT	0
+#define  LIF_RX_AGG_MPCP_FRM_RXAGGMPCPCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_GOOD_FRM>
+ *
+ */
+#define LIF_RX_AGG_GOOD_FRM_REG		0xc4
+
+/* Counter value. */
+#define  LIF_RX_AGG_GOOD_FRM_RXAGGGOODCNT_SHIFT	0
+#define  LIF_RX_AGG_GOOD_FRM_RXAGGGOODCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_GOOD_BYTE>
+ *
+ */
+#define LIF_RX_AGG_GOOD_BYTE_REG	0xc8
+
+/* Counter value. */
+#define  LIF_RX_AGG_GOOD_BYTE_RXAGGGOODBYTESCNT_SHIFT	0
+#define  LIF_RX_AGG_GOOD_BYTE_RXAGGGOODBYTESCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_UNDERSZ_FRM>
+ *
+ */
+#define LIF_RX_AGG_UNDERSZ_FRM_REG	0xcc
+
+/* Counter value. */
+#define  LIF_RX_AGG_UNDERSZ_FRM_RXAGGUNDERSZCNT_SHIFT	0
+#define  LIF_RX_AGG_UNDERSZ_FRM_RXAGGUNDERSZCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_OVERSZ_FRM>
+ *
+ */
+#define LIF_RX_AGG_OVERSZ_FRM_REG	0xd0
+
+/* Counter value. */
+#define  LIF_RX_AGG_OVERSZ_FRM_RXAGGOVERSZCNT_SHIFT	0
+#define  LIF_RX_AGG_OVERSZ_FRM_RXAGGOVERSZCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_CRC8_FRM>
+ *
+ */
+#define LIF_RX_AGG_CRC8_FRM_REG		0xd4
+
+/* Counter value. */
+#define  LIF_RX_AGG_CRC8_FRM_RXAGGCRC8ERRCNT_SHIFT	0
+#define  LIF_RX_AGG_CRC8_FRM_RXAGGCRC8ERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_FEC_FRM>
+ *
+ */
+#define LIF_RX_AGG_FEC_FRM_REG		0xd8
+
+/* Counter value. */
+#define  LIF_RX_AGG_FEC_FRM_RXAGGFEC_SHIFT	0
+#define  LIF_RX_AGG_FEC_FRM_RXAGGFEC_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_FEC_BYTE>
+ *
+ */
+#define LIF_RX_AGG_FEC_BYTE_REG		0xdc
+
+/* Counter value. */
+#define  LIF_RX_AGG_FEC_BYTE_RXAGGFECBYTES_SHIFT	0
+#define  LIF_RX_AGG_FEC_BYTE_RXAGGFECBYTES_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_FEC_EXC_ERR_FRM>
+ *
+ */
+#define LIF_RX_AGG_FEC_EXC_ERR_FRM_REG	0xe0
+
+/* Counter value. */
+#define  LIF_RX_AGG_FEC_EXC_ERR_FRM_RXAGGFECEXCEEDERRS_SHIFT	0
+#define  LIF_RX_AGG_FEC_EXC_ERR_FRM_RXAGGFECEXCEEDERRS_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_NONFEC_GOOD_FRM>
+ *
+ */
+#define LIF_RX_AGG_NONFEC_GOOD_FRM_REG	0xe4
+
+/* Counter value. */
+#define  LIF_RX_AGG_NONFEC_GOOD_FRM_RXAGGNONFECGOOD_SHIFT	0
+#define  LIF_RX_AGG_NONFEC_GOOD_FRM_RXAGGNONFECGOOD_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_NONFEC_GOOD_BYTE>
+ *
+ */
+#define LIF_RX_AGG_NONFEC_GOOD_BYTE_REG	0xe8
+
+/* Counter value. */
+#define  LIF_RX_AGG_NONFEC_GOOD_BYTE_RXAGGNONFECGOODBYTES_SHIFT	0
+#define  LIF_RX_AGG_NONFEC_GOOD_BYTE_RXAGGNONFECGOODBYTES_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_ERR_BYTES>
+ *
+ */
+#define LIF_RX_AGG_ERR_BYTES_REG	0xec
+
+/* Counter value. */
+#define  LIF_RX_AGG_ERR_BYTES_RXAGGERRBYTES_SHIFT	0
+#define  LIF_RX_AGG_ERR_BYTES_RXAGGERRBYTES_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_ERR_ZEROES>
+ *
+ */
+#define LIF_RX_AGG_ERR_ZEROES_REG	0xf0
+
+/* Counter value. */
+#define  LIF_RX_AGG_ERR_ZEROES_RXAGGERRZEROES_SHIFT	0
+#define  LIF_RX_AGG_ERR_ZEROES_RXAGGERRZEROES_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_NO_ERR_BLKS>
+ *
+ */
+#define LIF_RX_AGG_NO_ERR_BLKS_REG	0xf4
+
+/* Counter value. */
+#define  LIF_RX_AGG_NO_ERR_BLKS_RXAGGNOERRBLKS_SHIFT	0
+#define  LIF_RX_AGG_NO_ERR_BLKS_RXAGGNOERRBLKS_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_COR_BLKS>
+ *
+ */
+#define LIF_RX_AGG_COR_BLKS_REG		0xf8
+
+/* Counter value. */
+#define  LIF_RX_AGG_COR_BLKS_RXAGGCORRBLKS_SHIFT	0
+#define  LIF_RX_AGG_COR_BLKS_RXAGGCORRBLKS_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_UNCOR_BLKS>
+ *
+ */
+#define LIF_RX_AGG_UNCOR_BLKS_REG	0xfc
+
+/* Counter value. */
+#define  LIF_RX_AGG_UNCOR_BLKS_RXAGGUNCORRBLKS_SHIFT	0
+#define  LIF_RX_AGG_UNCOR_BLKS_RXAGGUNCORRBLKS_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_ERR_ONES>
+ *
+ */
+#define LIF_RX_AGG_ERR_ONES_REG		0x100
+
+/* Counter value. */
+#define  LIF_RX_AGG_ERR_ONES_RXAGGERRONES_SHIFT	0
+#define  LIF_RX_AGG_ERR_ONES_RXAGGERRONES_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_RX_AGG_ERR_FRM>
+ *
+ */
+#define LIF_RX_AGG_ERR_FRM_REG		0x104
+
+/* Counter value. */
+#define  LIF_RX_AGG_ERR_FRM_RXAGGERROREDCNT_SHIFT	0
+#define  LIF_RX_AGG_ERR_FRM_RXAGGERROREDCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_PKT_CNT>
+ *
+ * Aggregate statistics for the LIF transmit channel.
+ * These registerssaturate at their maximum value and clear when read.
+ * Note:
+ * These registers are also writable for test/diagnostics purposes.
+ */
+#define LIF_TX_PKT_CNT_REG		0x108
+
+/* Counter value. */
+#define  LIF_TX_PKT_CNT_TXFRAMECNT_SHIFT	0
+#define  LIF_TX_PKT_CNT_TXFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_BYTE_CNT>
+ *
+ */
+#define LIF_TX_BYTE_CNT_REG		0x10c
+
+/* Counter value. */
+#define  LIF_TX_BYTE_CNT_TXBYTECNT_SHIFT	0
+#define  LIF_TX_BYTE_CNT_TXBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_NON_FEC_PKT_CNT>
+ *
+ */
+#define LIF_TX_NON_FEC_PKT_CNT_REG	0x110
+
+/* Counter value. */
+#define  LIF_TX_NON_FEC_PKT_CNT_TXNONFECFRAMECNT_SHIFT	0
+#define  LIF_TX_NON_FEC_PKT_CNT_TXNONFECFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_NON_FEC_BYTE_CNT>
+ *
+ */
+#define LIF_TX_NON_FEC_BYTE_CNT_REG	0x114
+
+/* Counter value. */
+#define  LIF_TX_NON_FEC_BYTE_CNT_TXNONFECBYTECNT_SHIFT	0
+#define  LIF_TX_NON_FEC_BYTE_CNT_TXNONFECBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_FEC_PKT_CNT>
+ *
+ */
+#define LIF_TX_FEC_PKT_CNT_REG		0x118
+
+/* Counter value. */
+#define  LIF_TX_FEC_PKT_CNT_TXFECFRAMECNT_SHIFT	0
+#define  LIF_TX_FEC_PKT_CNT_TXFECFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_FEC_BYTE_CNT>
+ *
+ * Count of FEC bytes transmitted by Line-Coder.
+ */
+#define LIF_TX_FEC_BYTE_CNT_REG		0x11c
+
+/* Counter value. */
+#define  LIF_TX_FEC_BYTE_CNT_TXFECBYTECNT_SHIFT	0
+#define  LIF_TX_FEC_BYTE_CNT_TXFECBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_FEC_BLK_CNT>
+ *
+ */
+#define LIF_TX_FEC_BLK_CNT_REG		0x120
+
+/* Counter value. */
+#define  LIF_TX_FEC_BLK_CNT_TXFECBLKSCNT_SHIFT	0
+#define  LIF_TX_FEC_BLK_CNT_TXFECBLKSCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_TX_MPCP_PKT_CNT>
+ *
+ */
+#define LIF_TX_MPCP_PKT_CNT_REG		0x124
+
+/* Counter value. */
+#define  LIF_TX_MPCP_PKT_CNT_TXMPCPFRAMECNT_SHIFT	0
+#define  LIF_TX_MPCP_PKT_CNT_TXMPCPFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_DEBUG_TX_DATA_PKT_CNT>
+ *
+ * Count of transmitted frames.
+ * For debug.
+ */
+#define LIF_DEBUG_TX_DATA_PKT_CNT_REG	0x128
+
+/* Counter value. */
+#define  LIF_DEBUG_TX_DATA_PKT_CNT_TXDATAFRAMECNT_SHIFT	0
+#define  LIF_DEBUG_TX_DATA_PKT_CNT_TXDATAFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_FEC_LLID_STATUS>
+ *
+ * Provides sticky status of which LLIDs have received FEC-encoded frames.
+ * Status is provided bitwise per-LLID; each status bit can be cleared
+ * bywriting 1 to it.
+ */
+#define LIF_FEC_LLID_STATUS_REG		0x12c
+
+/*
+ * [31:
+ * 0] stkyFecFecvLlid Per-LLID FEC receive status forbidirectional LLIDs.
+ * 0:
+ * No FEC frames detected1:
+ * FEC frame reception detected
+*/
+#define  LIF_FEC_LLID_STATUS_STKYFECREVCLLIDBMSK_SHIFT	0
+#define  LIF_FEC_LLID_STATUS_STKYFECREVCLLIDBMSK_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_RX_TEK_IG_IV_LLID>
+ *
+ * Provides a programmable LLID field that will allow for theinitialization
+ * vector (in TEK mode only) belonging to non-Raman framesto be preserved
+ * across Raman frames.
+ */
+#define LIF_SEC_RX_TEK_IG_IV_LLID_REG	0x130
+
+/*
+ * [15:
+ * 0] cfIgIvNullLlid Program with the 16 bit LLID of theRaman generated
+ * random frames.
+ * [16] cfIgIvNullLlidEn Enable Ignore LLID functionality.
+*/
+#define  LIF_SEC_RX_TEK_IG_IV_LLID_CFIGIVNULLLLID_SHIFT	0
+#define  LIF_SEC_RX_TEK_IG_IV_LLID_CFIGIVNULLLLID_MASK	0x1ffff
+
+
+/*
+ * Register <LIF_PON_BER_INTERV_THRESH>
+ *
+ * Provides control for determining when to assert an interrupt when
+ * adefined number of programmable errors are observed within a
+ * definedwindow of time.
+ * These parameters directly control the behavior ofintRxBerThreshExc.
+ */
+#define LIF_PON_BER_INTERV_THRESH_REG	0x134
+
+/*
+ * Programmable interval of time.
+ * Units are in 16 ns increments, for amaximum of 1 ms.
+*/
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERINTERVAL_SHIFT	15
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERINTERVAL_MASK	0xffff8000
+
+/*
+ * Programmable error threshold.
+ * Maximum number of errors seen within aprogrammed interval.
+*/
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERTHRESHLD_SHIFT	2
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERTHRESHLD_MASK	0x7ffc
+
+/*
+ * 0:
+ * Disabled1:
+ * Count Line Code Errors2:
+ * Count Corrected Symbols3:
+ * Count Uncorrectable Blocks (9 symbol errors)
+*/
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERCNTRL_SHIFT	0
+#define  LIF_PON_BER_INTERV_THRESH_CFRXLIFBERCNTRL_MASK	0x3
+
+
+/*
+ * Register <LIF_LSR_MON_A_CTRL>
+ *
+ * Provides control over the laser monitor.
+ */
+#define LIF_LSR_MON_A_CTRL_REG		0x138
+
+/* Provides status of laser enable, directly from the I/O pin inputstage. */
+#define  LIF_LSR_MON_A_CTRL_IOPBILASERENS1A_MASK	0x20
+
+/*
+ * Laser monitor polarity.
+ * 0 - active low; 1 - active high.
+*/
+#define  LIF_LSR_MON_A_CTRL_CFGLSRMONACTHI_MASK	0x10
+
+/*
+ * Main reset for laser monitor.
+ * 0:
+ * Reset1:
+ * Normal operation
+*/
+#define  LIF_LSR_MON_A_CTRL_PBILASERMONRSTA_N_PRE_MASK	0x1
+
+
+/*
+ * Register <LIF_LSR_MON_A_MAX_THR>
+ *
+ * Defines a threshold for the laserOnMaxInt interrupt.
+ * laserOnMaxIntasserts when the laser enable signal stays active for a
+ * time greaterthan or equal to the laserOnMaxThresh setting.
+ * laserOnMaxThresh isexpressed in units of TQ.
+ * However, be aware that the laser monitoroperates from the core epnClk125
+ * clock, so there may be some inaccuracyif the transmitter is running
+ * loop-timed (i.
+ * e.
+ * from the recoveredreceive clock).
+ * In addition, the laser-on time can jitter by 8 ns (1/2TQ) even for
+ * non-loop timed applications as the laser monitor on-timecounter may not
+ * be TQ-aligned so an off-by-one error will occur halfthe time.
+ */
+#define LIF_LSR_MON_A_MAX_THR_REG	0x13c
+
+/*
+ * Specifies the threshold for laserOnMaxInt.
+ * Units are TQ.
+ * Default:
+ * 0x80ffff
+*/
+#define  LIF_LSR_MON_A_MAX_THR_CFGLASERMONMAXA_SHIFT	0
+#define  LIF_LSR_MON_A_MAX_THR_CFGLASERMONMAXA_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_LSR_MON_A_BST_LEN> - read-only
+ *
+ * Indicates the laser-on time of the burst that set laserOffInt (i.
+ * e.
+ * thefirst burst which ended while laserOffInt was clear).
+ * Value is latchedupon assertion of laserOffInt.
+ */
+#define LIF_LSR_MON_A_BST_LEN_REG	0x140
+
+/* Indicates length of most recent burst, in TQ. */
+#define  LIF_LSR_MON_A_BST_LEN_LASERONTIMEA_SHIFT	0
+#define  LIF_LSR_MON_A_BST_LEN_LASERONTIMEA_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_LSR_MON_A_BST_CNT> - read-only
+ *
+ * Counts the number of bursts (laser-off events) since the last
+ * LaserMonitor reset or since the last read of this register.
+ * This registerclears when read.
+ */
+#define LIF_LSR_MON_A_BST_CNT_REG	0x144
+
+/*
+ * This value increments on the negating edge of laser enable.
+ * Saturates at 0xffffffff; and clears on read.
+*/
+#define  LIF_LSR_MON_A_BST_CNT_LASERMONBRSTCNTA_SHIFT	0
+#define  LIF_LSR_MON_A_BST_CNT_LASERMONBRSTCNTA_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_DEBUG_PON_SM> - read-only
+ *
+ * Provides status of state machines in the PON receive side of the LIF.
+ */
+#define LIF_DEBUG_PON_SM_REG		0x148
+
+/* 10B Alignment State Machine States */
+#define  LIF_DEBUG_PON_SM_ALIGNCSQQ_SHIFT	8
+#define  LIF_DEBUG_PON_SM_ALIGNCSQQ_MASK	0x3f00
+
+/* 8B State Machine States */
+#define  LIF_DEBUG_PON_SM_RXFECIFCSQQ_SHIFT	0
+#define  LIF_DEBUG_PON_SM_RXFECIFCSQQ_MASK	0x1f
+
+
+/*
+ * Register <LIF_DEBUG_FEC_SM> - read-only
+ *
+ * Provides status of state machines in the FEC receive side of the LIF.
+ */
+#define LIF_DEBUG_FEC_SM_REG		0x14c
+
+/* FEC Receive Syndrome States */
+#define  LIF_DEBUG_FEC_SM_RXSYNCSQQ_SHIFT	16
+#define  LIF_DEBUG_FEC_SM_RXSYNCSQQ_MASK	0x1f0000
+
+/* FEC Receive Corrector States */
+#define  LIF_DEBUG_FEC_SM_RXCORCS_SHIFT	8
+#define  LIF_DEBUG_FEC_SM_RXCORCS_MASK	0x300
+
+/* FEC Receive Output States */
+#define  LIF_DEBUG_FEC_SM_FECRXOUTCS_SHIFT	0
+#define  LIF_DEBUG_FEC_SM_FECRXOUTCS_MASK	0x1f
+
+
+/*
+ * Register <LIF_AE_PKTNUM_WINDOW>
+ *
+ * Provides the tolerance for packet number reception in replay
+ * protectionmode.
+ * Only applicable in 802.
+ * 1ae security mode.
+ */
+#define LIF_AE_PKTNUM_WINDOW_REG	0x150
+
+/*
+ * In replay protection, the packet number is checked against theexpected
+ * packet number.
+ * If it is greater than or equal to, packetwill be accepted.
+ * Otherwise, it will be discarded.
+ * This registerprovides the tolerance by subtracting the current expected
+ * packetnumber by this amount.
+*/
+#define  LIF_AE_PKTNUM_WINDOW_CFGAEPKTNUMWND_SHIFT	0
+#define  LIF_AE_PKTNUM_WINDOW_CFGAEPKTNUMWND_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_AE_PKTNUM_THRESH>
+ *
+ * Provides the threshold to warn of impending packet number rollover
+ * ontransmit.
+ */
+#define LIF_AE_PKTNUM_THRESH_REG	0x154
+
+/* Defines the maximum packet number rollover. */
+#define  LIF_AE_PKTNUM_THRESH_CFGPKTNUMMAXTHRESH_SHIFT	0
+#define  LIF_AE_PKTNUM_THRESH_CFGPKTNUMMAXTHRESH_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_AE_PKTNUM_STAT> - read-only
+ *
+ * Provides the status of packet number.
+ */
+#define LIF_AE_PKTNUM_STAT_REG		0x158
+
+/*
+ * Provides the LLID index whose packet number exceeded the maximumpacket
+ * number threhsold.
+*/
+#define  LIF_AE_PKTNUM_STAT_SECUPINDXWTPKTNUMMAX_SHIFT	16
+#define  LIF_AE_PKTNUM_STAT_SECUPINDXWTPKTNUMMAX_MASK	0x1f0000
+
+/* Provides the LLID index that was aborted due to replay protection. */
+#define  LIF_AE_PKTNUM_STAT_SECDNINDXWTPKTNUMABORT_SHIFT	0
+#define  LIF_AE_PKTNUM_STAT_SECDNINDXWTPKTNUMABORT_MASK	0x1f
+
+
+/*
+ * Registers <LIF_LLID_8> - <x> is [ 0 => 8 ]
+ *
+ * Provides configuration for LLID mapping.
+ * LIF supports 32 bidirectionalLLIDs.
+ */
+#define LIF_LLIDx_8_15_REG(x)		(0x15c + (x) * 0x4)
+
+/*
+ * [15:
+ * 0]:
+ * Specifies LLID Index lookup value[16]:
+ * Enable LLID0:
+ * Disable LLID1:
+ * Enable LLIDIn upstream P2P, 802.
+ * 1ae mode, bit[11:
+ * 0] provides lookup with VLAN'sVID to index 8.
+ * In downstream P2P, 802.
+ * 1ae mode, registersLIF_P2P_AE_SCI_LO[0:
+ * 15]/LIF_P2P_AE_SCI_HI[0:
+ * 15]provide lookup with packet's explicit SCI to an index.
+*/
+#define  LIF_LLIDx_8_15_CFGLLID8_SHIFT	0
+#define  LIF_LLIDx_8_15_CFGLLID8_MASK	0x1ffff
+
+
+/*
+ * Registers <LIF_LLID_24> - <x> is [ 0 => 8 ]
+ *
+ * Provides configuration for LLID mapping.
+ * LIF supports 32 bidirectionalLLIDs.
+ */
+#define LIF_LLIDx_24_31_REG(x)		(0x17c + (x) * 0x4)
+
+/*
+ * [15:
+ * 0]:
+ * Specifies LLID Index lookup value[16]:
+ * Enable LLID0:
+ * Disable LLID1:
+ * Enable LLIDIn upstream P2P, 802.
+ * 1ae mode, bit[11:
+ * 0] provides lookup with VLAN'sVID to index 24.
+*/
+#define  LIF_LLIDx_24_31_CFGLLID24_SHIFT	0
+#define  LIF_LLIDx_24_31_CFGLLID24_MASK	0x1ffff
+
+
+/*
+ * Register <LIF_VLAN_TYPE>
+ *
+ * Provides a programmable VLAN type identifier for upstream P2P traffic.
+ */
+#define LIF_VLAN_TYPE_REG		0x19c
+
+/* Defines a VLAN type, in addition to 0x8100. */
+#define  LIF_VLAN_TYPE_CFGVLANTYPE_SHIFT	0
+#define  LIF_VLAN_TYPE_CFGVLANTYPE_MASK	0xffff
+
+
+/*
+ * Register <LIF_P2P_AE_SCI_EN>
+ *
+ * Enables SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define LIF_P2P_AE_SCI_EN_REG		0x1a0
+
+/*
+ * Enables SCI lookup, viaLIF_P2P_AE_SCI_LO_[0:
+ * 15]/LIF_P2P_AE_SCI_HI[0:
+ * 15] registers.
+ * Each bitcorresponds to index 0 - 15.
+*/
+#define  LIF_P2P_AE_SCI_EN_CFGP2PSCIEN_SHIFT	0
+#define  LIF_P2P_AE_SCI_EN_CFGP2PSCIEN_MASK	0xffff
+
+
+/*
+ * Registers <LIF_P2P_AE_SCI_LO_0> - <x> is [ 0 => 16 ]
+ *
+ * Provides SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define LIF_P2P_AE_SCI_LOx_REG(x)	(0x1a4 + (x) * 0x8)
+
+/*
+ * Defines the lower 32-bits lookup value of SCI to index 0.
+ * Ifimplicit SCI mode, index defaults to what was mapped byLIF_LLID_[0:
+ * 15] with value 0x5555.
+*/
+#define  LIF_P2P_AE_SCI_LOx_CFGP2PSCI_LO_0_SHIFT	0
+#define  LIF_P2P_AE_SCI_LOx_CFGP2PSCI_LO_0_MASK	0xffffffff
+
+
+/*
+ * Registers <LIF_P2P_AE_SCI_HI_0> - <x> is [ 0 => 16 ]
+ *
+ * Provides SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define LIF_P2P_AE_SCI_HIx_REG(x)	(0x1a8 + (x) * 0x8)
+
+/*
+ * Defines the upper 32-bits lookup value of SCI to index 0.
+ * Ifimplicit SCI mode, index defaults to what was mapped byLIF_LLID_[0:
+ * 15] with value 0x5555.
+*/
+#define  LIF_P2P_AE_SCI_HIx_CFGP2PSCI_HI_0_SHIFT	0
+#define  LIF_P2P_AE_SCI_HIx_CFGP2PSCI_HI_0_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_UP_KEY_STAT_1> - read-only
+ *
+ * Provides additon per-LLID status of upstream security key for 802.
+ * 1aeP2P.
+ */
+#define LIF_SEC_UP_KEY_STAT_1_REG	0x224
+
+/*
+ * In 802.
+ * 1ae P2P mode, the number of key supported per LLID is 4.
+ * This register provides the upper bit of the 2-bits key number.
+ * Thelower bit is provided by LIF_SEC_UP_KEY_STAT.
+*/
+#define  LIF_SEC_UP_KEY_STAT_1_KEYUPSEL_HI_SHIFT	0
+#define  LIF_SEC_UP_KEY_STAT_1_KEYUPSEL_HI_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_SEC_KEY_SEL_1> - read-only
+ *
+ * Provides addition per-LLID status of downstream security key for802.
+ * 1ae P2P.
+ */
+#define LIF_SEC_KEY_SEL_1_REG		0x228
+
+/*
+ * In 802.
+ * 1ae P2P mode, the number of key supported per LLID is 4.
+ * This register provides the upper bit of the 2-bits key number.
+ * Thelower bit is provided by LIF_SEC_KEY_SEL.
+*/
+#define  LIF_SEC_KEY_SEL_1_KEYSEL_HI_SHIFT	0
+#define  LIF_SEC_KEY_SEL_1_KEYSEL_HI_MASK	0xffffffff
+
+
+/*
+ * Register <LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL>
+ *
+ */
+#define LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_REG	0x22c
+
+/*
+ * Used to pad plain text frames in 802.
+ * 1AE encryption mode with afixed number of IPG equivalent to theAE
+ * overhead.
+ * Overhead B is equivalent to 16TQ or 32 bytes ofpreceding IPG for
+ * explicit SCI.
+*/
+#define  LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_CF_PLAINTXT_OH_B_IDLE_PAD_SHIFT	6
+#define  LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_CF_PLAINTXT_OH_B_IDLE_PAD_MASK	0xfc0
+
+/*
+ * Used to pad plain text frames in 802.
+ * 1AE encryption mode with afixed number of IPG equivalent to theAE
+ * overhead.
+ * Overhead A is equivalent to 12TQ or 24 bytes ofpreceding IPG for
+ * implicit SCI.
+*/
+#define  LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_CF_PLAINTXT_OH_A_IDLE_PAD_SHIFT	0
+#define  LIF_PON_SEC_TX_PLAINTXT_AE_PAD_CONTROL_CF_PLAINTXT_OH_A_IDLE_PAD_MASK	0x3f
+
+
+/*
+ * Register <LIF_P2P_AUTONEG_CONTROL>
+ *
+ * Autonegotiation Configuration
+ */
+#define LIF_P2P_AUTONEG_CONTROL_REG	0x230
+
+/*
+ * Link Timer to allow link partner time to process current statebefore
+ * advancing to next stateHW Default is about 33.
+ * 5 msS/W should set to 0x0fff for 2.
+ * 0ms timer for SGMII Style Formatting
+*/
+#define  LIF_P2P_AUTONEG_CONTROL_CF_AUTONEG_LINKTIMER_SHIFT	16
+#define  LIF_P2P_AUTONEG_CONTROL_CF_AUTONEG_LINKTIMER_MASK	0xffff0000
+
+/*
+ * Mode Select for Auto Neg0 = CL37 Style (33.
+ * 5ms link timer)1 = SGMII Style (2.
+ * 0ms link timer)See LP ability and advertisement registers for format
+*/
+#define  LIF_P2P_AUTONEG_CONTROL_CF_AUTONEG_MODE_SEL_MASK	0x4
+
+/*
+ * 0 = Restart Disabled / Completed1 = Trigger RestartH/W will clear to 0
+ * when restart occurs.
+*/
+#define  LIF_P2P_AUTONEG_CONTROL_CF_AUTONEG_RESTART_MASK	0x2
+
+/* 0 = Disable Autonegotiation1 = Enable Autonegotiation */
+#define  LIF_P2P_AUTONEG_CONTROL_CF_AUTONEG_EN_MASK	0x1
+
+
+/*
+ * Register <LIF_P2P_AUTONEG_STATUS> - read-only
+ *
+ * Autonegotiation Status
+ */
+#define LIF_P2P_AUTONEG_STATUS_REG	0x234
+
+/*
+ * 0 = Remote Fault Not Detected from Link Partner1 = Remote Fault Detected
+ * from Link PartnerWill only update after AN process.
+ * Will reset upon restart.
+*/
+#define  LIF_P2P_AUTONEG_STATUS_AN_LP_REMOTE_FAULT_MASK	0x4
+
+/*
+ * 0 = No sync after AN complete1 = Sync after AN completeWill only update
+ * after AN process.
+ * Will reset upon restart.
+*/
+#define  LIF_P2P_AUTONEG_STATUS_AN_SYNC_STATUS_MASK	0x2
+
+/*
+ * 0 = Autoneg Not Complete1 = Autoneg CompletedWill only update after AN
+ * process.
+ * Will reset upon restart.
+*/
+#define  LIF_P2P_AUTONEG_STATUS_AN_COMPLETE_MASK	0x1
+
+
+/*
+ * Register <LIF_P2P_AUTONEG_ABILITY_CONFIG_REG>
+ *
+ * Autonegotiation Ability / Config Register of this device
+ */
+#define LIF_P2P_AUTONEG_ABILITY_CONFIG_REG_REG	0x238
+
+/*
+ * Defines the Autonegotiation Ability / Config Register of this
+ * devicecf_autoneg_mode_sel = 0 (CL37)Bits[15]:
+ * NP, Next Page[14]:
+ * ACK (H/W overwrite)[13]:
+ * RF2, Remote Fault[12]:
+ * RF1, Remote Fault[11:
+ * 9]:
+ * Reserved[8]:
+ * PS2, Asymmetric Pause[7]:
+ * PS1, Symmetric Pause[6]:
+ * HD, Half Duplex[5]:
+ * FD, Full Duplex[4:
+ * 0]:
+ * Reservedcf_autoneg_mode_sel = 1 (SGMII)Bits[15]:
+ * Link State (HW Controlled) 1 = Link Up, 0 = Link Down[14]:
+ * ACK (H/W overwrite)[13]:
+ * Reserved[12]:
+ * Duplex mode:
+ * 1 = Full Duplex, 0 = Half Duplex[11:
+ * 10]:
+ * 11 = Reserved, 10 = 1000Mbps, 01 = 100Mbps, 00 = 10 Mbps[9:
+ * 1]:
+ * Reserved[0]:
+ * Set 1 to per SGMII Spec (S/W must set)
+*/
+#define  LIF_P2P_AUTONEG_ABILITY_CONFIG_REG_CF_LIF_P2P_AE_AUTONEG_CONFIG_ABILITY_SHIFT	0
+#define  LIF_P2P_AUTONEG_ABILITY_CONFIG_REG_CF_LIF_P2P_AE_AUTONEG_CONFIG_ABILITY_MASK	0xffff
+
+
+/*
+ * Register <LIF_P2P_AUTONEG_LINK_PARTNER_ABILITY_CONFIG_READ> - read-only
+ *
+ * Autonegotiation Ability / Config Register of Link Partner
+ */
+#define LIF_P2P_AUTONEG_LINK_PARTNER_ABILITY_CONFIG_READ_REG	0x23c
+
+/*
+ * Defines the Autonegotiation Ability / Config Register of LINKPARTNEROnly
+ * updates when AN is completecf_autoneg_mode_sel = 0 (CL37)Bits[15]:
+ * NP, Next Page[14]:
+ * ACK (H/W overwrite)[13]:
+ * RF2, Remote Fault[12]:
+ * RF1, Remote Fault[11:
+ * 9]:
+ * Reserved[8]:
+ * PS2, Asymmetric Pause[7]:
+ * PS1, Symmetric Pause[6]:
+ * HD, Half Duplex[5]:
+ * FD, Full Duplex[4:
+ * 0]:
+ * Reservedcf_autoneg_mode_sel = 1 (SGMII)Bits[15]:
+ * Link State (HW Controlled) 1 = Link Up, 0 = Link Down[14]:
+ * ACK (H/W overwrite)[13]:
+ * Reserved[12]:
+ * Duplex mode:
+ * 1 = Full Duplex, 0 = Half Duplex[11:
+ * 10]:
+ * 11 = Reserved, 10 = 1000Mbps, 01 = 100Mbps, 00 = 10 Mbps[9:
+ * 1]:
+ * Reserved[0]:
+ * Set 1 to per SGMII Spec (S/W must set)
+*/
+#define  LIF_P2P_AUTONEG_LINK_PARTNER_ABILITY_CONFIG_READ_CF_LIF_P2P_AE_AUTONEG_LP_ABILITY_READ_SHIFT	0
+#define  LIF_P2P_AUTONEG_LINK_PARTNER_ABILITY_CONFIG_READ_CF_LIF_P2P_AE_AUTONEG_LP_ABILITY_READ_MASK	0xffff
+
+
+#endif /* ! EPON_LIF_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_nco_addr.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_nco_addr.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_nco_addr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_nco_addr.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,309 @@
+#ifndef EPON_NCO_ADDR_H_
+#define EPON_NCO_ADDR_H_
+
+/* relative to epon */
+#define NCO_ADDR_OFFSET_0		0x2000
+
+/*
+ * Register <ADDR_NCO_CFG>
+ *
+ * This register is used to provision the Numerically ControlledOscillator
+ * (NCO) function.
+ */
+#define NCO_ADDR_NCO_CFG_REG		0x0
+
+/*
+ * Bypass the programmable duty cycle when cfgSrcOut is set to 1 or
+ * 2(direct Lif/Xif pass through).
+ * Default :
+ * 1
+*/
+#define  NCO_CFG_CFGBYPASS_MASK		0x80
+
+/*
+ * Selects the 10MHz output source0:
+ * NCO drives 10MHz output1:
+ * Lif 10MHz drives 10MHz output2:
+ * Reserved3:
+ * Reserved
+*/
+#define  NCO_CFG_CFGSRCOUT10MHZ_SHIFT	5
+#define  NCO_CFG_CFGSRCOUT10MHZ_MASK	0x60
+
+/*
+ * Selects the one PPS output source.
+ * 0:
+ * NCO drives one PPS output1:
+ * lifPpsSig drives one PPS output2:
+ * xifPpsSig drives one PPS output3:
+ * Output is zeroThe output source should be set to the Lif or Xif input
+ * until theNCO converges.
+ * Only then should the NCO output be selected as theone PPS source.
+*/
+#define  NCO_CFG_CFGSRCOUT_SHIFT	3
+#define  NCO_CFG_CFGSRCOUT_MASK		0x18
+
+/*
+ * Selects the input reference source.
+ * 0:
+ * NCO "free runs" at 125 MHz core clock.
+ * 1:
+ * NCO tracks lifPpsSig2:
+ * NCO tracks xifPpsSig3:
+ * Reserved
+*/
+#define  NCO_CFG_CFGSRCIN_SHIFT		1
+#define  NCO_CFG_CFGSRCIN_MASK		0x6
+
+/*
+ * Set this bit to reset the NCO logic.
+ * Note:
+ * This does not reset the NCO configuration registers.
+ * 0:
+ * Normal NCO operation1:
+ * Hold the NCO logic in resetDefault:
+ * 1
+*/
+#define  NCO_CFG_CFGNCOCLR_MASK		0x1
+
+
+/*
+ * Register <ADDR_NCO_INT>
+ *
+ * This register is used to detect the presence and synchronization lockof
+ * the 1PPS signals.
+ * These bits are sticky; to clear a bit, write 1 to it.
+ */
+#define NCO_ADDR_NCO_INT_REG		0x4
+
+/*
+ * The NCO has not detected a 1pps input edge within +/-1us of the
+ * NCOgenerated edge.
+*/
+#define  NCO_INT_INTNONCOSYNC_MASK	0x4
+
+/* No edges of the Xif 1PPS signal have been detected over 2 NCOperiods. */
+#define  NCO_INT_INTNOXIFPPS_MASK	0x2
+
+/* No edges of the Lif 1PPS signal have been detected over 2 NCOperiods. */
+#define  NCO_INT_INTNOLIFPPS_MASK	0x1
+
+
+/*
+ * Register <ADDR_NCO_MSK>
+ *
+ * This register is used to mask NCO interrupts.
+ */
+#define NCO_ADDR_NCO_MSK_REG		0x8
+
+/* Mask for intNoNcoSync */
+#define  NCO_MSK_INTNONCOSYNCMASK_MASK	0x4
+
+/* Mask for intNoXifPps */
+#define  NCO_MSK_INTNOXIFPPSMASK_MASK	0x2
+
+/* Mask for intNoLifPps */
+#define  NCO_MSK_INTNOLIFPPSMASK_MASK	0x1
+
+
+/*
+ * Register <ADDR_NCO_1PPS_PERIOD>
+ *
+ * This register is used to provision the NCO's one pulse per second(1PPS)
+ * period.
+ */
+#define NCO_ADDR_NCO_1PPS_PERIOD_REG	0xc
+
+/*
+ * This register sets the period of the 1PPS signal, in units of 100ns.
+ * Default:
+ * 10,000,000
+*/
+#define  NCO_1PPS_PERIOD_CFG1PPSPERIOD_SHIFT	0
+#define  NCO_1PPS_PERIOD_CFG1PPSPERIOD_MASK	0xffffff
+
+
+/*
+ * Register <ADDR_NCO_8KHZ_PERIOD>
+ *
+ * This register is used to provision the NCO's 8 KHz period.
+ */
+#define NCO_ADDR_NCO_8KHZ_PERIOD_REG	0x10
+
+/*
+ * This register sets the period of the 8 KHz signal, in units of 100ns.
+ * Default:
+ * 1,250
+*/
+#define  NCO_8KHZ_PERIOD_CFG8KHZPERIOD_SHIFT	0
+#define  NCO_8KHZ_PERIOD_CFG8KHZPERIOD_MASK	0xffffff
+
+
+/*
+ * Register <ADDR_NCO_CENTER_FREQUENCY>
+ *
+ * This register is used to provision the NCO's initial period
+ * integralvalue.
+ * The reset default is calculated as (8nS/100nS)*(2^32) => 343,597,394
+ */
+#define NCO_ADDR_NCO_CENTER_FREQUENCY_REG	0x14
+
+/*
+ * Initial NCO period integralDefault is 343,597,394.
+ * The NCO Period Count register may be readto align the input frequency
+ * with the default to speed up lockingtime.
+*/
+#define  NCO_CENTER_FREQUENCY_CFGNCODEFAULT_SHIFT	0
+#define  NCO_CENTER_FREQUENCY_CFGNCODEFAULT_MASK	0xffffffff
+
+
+/*
+ * Register <ADDR_NCO_INT_GAIN>
+ *
+ * This register is used to provision the NCO's integral gain value.
+ * The provisioned value must be within the range of 15ppm.
+ * The value is in 0.
+ * 001 ppb units
+ */
+#define NCO_ADDR_NCO_INT_GAIN_REG	0x18
+
+/*
+ * NCO integral gain in number of 0.
+ * 001 ppb quanta.
+ * The gain may be increased for faster convergence, and then decreasedfor
+ * increased accuracy and holdover.
+ * Default is 0x400.
+*/
+#define  NCO_INT_GAIN_CFGNCOGAIN_SHIFT	0
+#define  NCO_INT_GAIN_CFGNCOGAIN_MASK	0xffff
+
+
+/*
+ * Register <ADDR_NCO_PRO_GAIN>
+ *
+ * This register is used to provision the NCO's initial period
+ * integralvalue.
+ * The reset default is calculated as (8nS/100nS)*(2^32) => 343,597,394
+ */
+#define NCO_ADDR_NCO_PRO_GAIN_REG	0x1c
+
+/*
+ * NCO proportional gain in number of 0.
+ * 2 ppb quanta.
+ * The gain may be increased for faster convergence, and then decreasedfor
+ * increased accuracy and holdover.
+ * Default is 0x400
+*/
+#define  NCO_PRO_GAIN_CFGNCOPROPGAIN_SHIFT	0
+#define  NCO_PRO_GAIN_CFGNCOPROPGAIN_MASK	0xffff
+
+
+/*
+ * Register <ADDR_NCO_CNT> - read-only
+ *
+ * The value in this register is the NCO's current period integral.
+ * The value is in xxx units.
+ * (This value reflects the relationship between the accuracy of
+ * thereference clock frequency and the accuracy of the 125 MHz core
+ * clockfrequency.
+ * The closer this value is to the ideal value calculated for the
+ * NCOInitial Period Integral Value in register 0x0c5,the closer the core
+ * 125 MHz frequency error matches the referenceclock's frequency error.
+ * )Note:
+ * Once the system has locked to a valid downstream reference andreached
+ * steady state,the value in this register can be transferred to the "NCO
+ * InitialPeriod Integral Value" in register 0x0c5.
+ * This will ensure that the NCO's "hold-over" frequency will closelymatch
+ * the reference frequency.
+ */
+#define NCO_ADDR_NCO_CNT_REG		0x20
+
+/* Current NCO period integral value. */
+#define  NCO_CNT_NCOCNT_SHIFT		0
+#define  NCO_CNT_NCOCNT_MASK		0xffffffff
+
+
+/*
+ * Register <ADDR_NCO_1PPS_HALF>
+ *
+ * This register is used to set the NCO's 1PPS duty cycle.
+ * The provisioned value represents the "high time" of the 1PPS signal
+ * andis in 100 nS units.
+ */
+#define NCO_ADDR_NCO_1PPS_HALF_REG	0x24
+
+/*
+ * This register sets the portion of the 1PPS period that the signal
+ * ishigh.
+ * This value should be set to the duty cycle high %% times
+ * thecfg1ppsPeriod (e.
+ * g.
+ * 10%% * 10,000,000 = 1,000,000).
+ * Default is 5,000,000 (50%% duty cycle).
+*/
+#define  NCO_1PPS_HALF_CFG1PPSHALFPERIOD_SHIFT	0
+#define  NCO_1PPS_HALF_CFG1PPSHALFPERIOD_MASK	0xffffff
+
+
+/*
+ * Register <ADDR_NCO_8KHZ_HALF>
+ *
+ * This register is used to provision the NCO's 8 KHz duty cycle.
+ * The provisioned value represents the "high time" of the 8 KHz signaland
+ * is in 100 nS units.
+ */
+#define NCO_ADDR_NCO_8KHZ_HALF_REG	0x28
+
+/*
+ * This register sets the portion of the 8 KHz period that the signalis
+ * high.
+ * Reset default is 625 (50/50 duty cycle).
+*/
+#define  NCO_8KHZ_HALF_CFG8KHZHALFPERIOD_SHIFT	0
+#define  NCO_8KHZ_HALF_CFG8KHZHALFPERIOD_MASK	0xffffff
+
+
+/*
+ * Register <ADDR_NCO_PERIOD_CNT> - read-only
+ *
+ * This register is used to measure the incoming clock period in terms
+ * ofthe local oscillator.
+ */
+#define NCO_ADDR_NCO_PERIOD_CNT_REG	0x2c
+
+/*
+ * The number of 8ns clocks in one input period.
+ * This register willread zero until the first period is complete.
+ * It will be updated on each subsequent period.
+ * This value may beused to program the NCO default period for faster lock
+ * time.
+ * For a 1PPS, the cfgNcoDefault should equal 343,597,394 *125M/periodCnt.
+*/
+#define  NCO_PERIOD_CNT_PERIODCNT_SHIFT	0
+#define  NCO_PERIOD_CNT_PERIODCNT_MASK	0xffffffff
+
+
+/*
+ * Register <ADDR_NCO_PHS_ERR_CNT> - read-only
+ *
+ * This register is used to measure the incoming clock phase error in 8
+ * nsunits.
+ */
+#define NCO_ADDR_NCO_PHS_ERR_CNT_REG	0x30
+
+/*
+ * The number of 8 ns clocks between the ppsNco rising edge and the LIFPPS
+ * input signal rising edge.
+ * This is an up/down counter:
+ * LIF PPS leading represents a positiveerror, and ppsNco input leading
+ * represents a negative error.
+ * The error will saturate at 0x7ff on a positive error and 0x800 on
+ * anegative error.
+ * This register is updated for each rising edge sample following thefirst
+ * negative edge of ppsNco at start up.
+*/
+#define  NCO_PHS_ERR_CNT_NCOPHSERR_SHIFT	0
+#define  NCO_PHS_ERR_CNT_NCOPHSERR_MASK	0xfff
+
+
+#endif /* ! EPON_NCO_ADDR_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xif.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xif.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xif.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,1190 @@
+#ifndef EPON_XIF_H_
+#define EPON_XIF_H_
+
+/* relative to epon */
+#define XIF_OFFSET_0			0x2800
+
+/*
+ * Register <XIF_CTL>
+ *
+ * XIF control register.
+ */
+#define XIF_CTL_REG			0x0
+
+/*
+ * Global downstream receive encryption enable :
+ * 0 - Normal operation;1 - Enable encryption.
+*/
+#define  XIF_CTL_RXENCRYPTEN_MASK	0x80000000
+
+/*
+ * Disable DA/SA downstream decryption :
+ * 0 - Normal operation; 1 -Disable DA/SA decryption.
+*/
+#define  XIF_CTL_CFGDISRXDASAENCRPT_MASK	0x40000000
+
+/*
+ * Downstream receive security mode :
+ * 0 - Zero overhead; 2 - 802.
+ * 1ae; 3- 3Churn.
+*/
+#define  XIF_CTL_RXENCRYPTMODE_SHIFT	28
+#define  XIF_CTL_RXENCRYPTMODE_MASK	0x30000000
+
+/*
+ * Global upstream transmit encryption enable :
+ * 0 - Normal operation;1 - Enable encryption.
+*/
+#define  XIF_CTL_TXENCRYPTEN_MASK	0x8000000
+
+/*
+ * Disable DA/SA upstream encryption :
+ * 0 - Normal operation; 1 -Disable DA/SA encryption.
+*/
+#define  XIF_CTL_CFGDISTXDASAENCRPT_MASK	0x4000000
+
+/*
+ * Upstream transmit security mode :
+ * 0 - Zero overhead; 2 - 802.
+ * 1ae.
+*/
+#define  XIF_CTL_TXENCRYPTMODE_SHIFT	24
+#define  XIF_CTL_TXENCRYPTMODE_MASK	0x3000000
+
+/*
+ * Masks MSB of 16 bit raw LLID for Index translation.
+ * 0:
+ * Don't mask, look at full 16 bits.
+ * 1:
+ * Mask bit[15], map based on [14:
+ * 0].
+*/
+#define  XIF_CTL_CFGLLIDMODEMSK_MASK	0x400000
+
+/*
+ * Enable bad upstream FCS generation :
+ * 0 - Normal operation; 1 -Enable bad FCS generation of 0's.
+*/
+#define  XIF_CTL_CFGXPNBADCRC32_MASK	0x200000
+
+/*
+ * Disable Discovery Info field :
+ * 0 - Normal operation; 1 - DisableDISCOVERY info.
+*/
+#define  XIF_CTL_CFGDISDISCINFO_MASK	0x100000
+
+/*
+ * Enable PMC loopback :
+ * 0 - Normal operation; 1 - Loopback.
+ * NOTAPPLICABLE in ONU since Tx/Rx clocks are not the same clock.
+*/
+#define  XIF_CTL_CFGPMCTX2RXLPBK_MASK	0x80000
+
+/*
+ * Enable upstream bad CRC8 transmission :
+ * 0 - Normal operation; 1 -Enable bad CRC8 generation.
+*/
+#define  XIF_CTL_CFGPMCTXENCRC8BAD_MASK	0x40000
+
+/*
+ * Enable point-2-point mode for downstream and upstream :
+ * 0 - PON mode.
+ * Upstream's preamble will be of type 0x55_55_d5_55.
+ * Downstream expects the same preamble type else packet will beaborted.
+ * CRC8 checking is configurable by bit "cfgPmcRxEnCrc8Chk".
+ * 1 - P2P mode.
+ * Upstream's preamble will be of type 0x55_55_55_55.
+ * Downstream expects the same preamble type else packet will beaborted.
+ * CRC8 checking will be disabled.
+*/
+#define  XIF_CTL_CFGENP2P_MASK		0x20000
+
+/*
+ * All unmapped LLIDs will be redirected and mapped to Index 00:
+ * Unmapped LLIDs will appear to be unmapped to EPN;1:
+ * Unmapped LLIs will appear on Index 0 to EPN.
+ * ",
+*/
+#define  XIF_CTL_CFGLLIDPROMISCUOUSMODE_MASK	0x10000
+
+/* Enable IDLE packet support to prevent upstream underrun. */
+#define  XIF_CTL_CFGENIDLEPKTSUP_MASK	0x8000
+
+/*
+ * Enable PMC-RX checking of CRC8 :
+ * 0 - Disable; 1 - Enable.
+*/
+#define  XIF_CTL_CFGPMCRXENCRC8CHK_MASK	0x4000
+
+/* Enable 1st IDLE packet in a burst to be converted to IDLEs. */
+#define  XIF_CTL_CFGEN1STIDLEPKTCONVERT_MASK	0x2000
+
+/*
+ * Enable upstream FEC :
+ * 0 - no FEC.
+ * 1 - FEC.
+*/
+#define  XIF_CTL_CFGFECEN_MASK		0x1000
+
+/* Enable legacy receive timestamp update. */
+#define  XIF_CTL_CFGLEGACYRCVTSUPD_MASK	0x800
+
+/*
+ * Enable FCS pass through :
+ * 0 - Modify packet's FCS; 1 - Pass throughwith no FCS modification.
+ * Feature is only supported in A0.
+*/
+#define  XIF_CTL_CFGXPNENCRCPASSTHRU_MASK	0x400
+
+/*
+ * Debug function to disable timestamp modification of MPCP packet.
+ * 0 -Normal Operation; 1 - Disable timestamp modification.
+*/
+#define  XIF_CTL_CFGXPNDISTIMESTAMPMOD_MASK	0x200
+
+/*
+ * XIF not ready indication due to RAM init :
+ * 1 - Not ready.
+ * 0 - Readyfor operation.
+ * All RAMs are initialized to 0's.
+*/
+#define  XIF_CTL_XIFNOTRDY_MASK		0x100
+
+/*
+ * Active low reset for RAM data port.
+ * RAM init starts upondeassertion.
+ * Bit xifNotRdy is to be polled for completion
+*/
+#define  XIF_CTL_XIFDTPORTRSTN_MASK	0x80
+
+/*
+ * Reset control for transmit XPN module.
+ * 0 - Reset.
+ * 1 - NormalOperation.
+*/
+#define  XIF_CTL_XPNTXRSTN_MASK		0x40
+
+/*
+ * Reset control for transmit PMC module.
+ * 0 - Reset.
+ * 1 - NormalOperation.
+*/
+#define  XIF_CTL_PMCTXRSTN_MASK		0x20
+
+/*
+ * Reset control for transmit security module.
+ * 0 - Reset.
+ * 1 - NormalOperation.
+*/
+#define  XIF_CTL_SECTXRSTN_MASK		0x10
+
+/*
+ * Disable OAM encryption.
+ * 0 - Normal Operation.
+ * 1 - Disable MPCPencryption.
+*/
+#define  XIF_CTL_CFGDISTXOAMENCRPT_MASK	0x8
+
+/*
+ * Disable MPCP encryption.
+ * 0 - Normal Operation.
+ * 1 - Disable MPCPencryption.
+*/
+#define  XIF_CTL_CFGDISTXMPCPENCRPT_MASK	0x4
+
+/*
+ * Reset control for receive PMC module.
+ * 0 - Reset.
+ * 1 - NormalOperation.
+*/
+#define  XIF_CTL_PMCRXRSTN_MASK		0x2
+
+/*
+ * Reset control for receive security module.
+ * 0 - Reset.
+ * 1 - NormalOperation.
+*/
+#define  XIF_CTL_SECRXRSTN_MASK		0x1
+
+
+/*
+ * Register <XIF_INT_STATUS>
+ *
+ * Interrupts.
+ */
+#define XIF_INT_STATUS_REG		0x4
+
+/*
+ * [NON-FATAL] Applicable only in 802.
+ * 1ae security.
+ * Indicates thereceived packet was aborted due to replay protection.
+*/
+#define  XIF_INT_STATUS_SECRXRPLYPRTCTABRTINT_MASK	0x80
+
+/*
+ * [NON-FATAL] Applicable only in 802.
+ * 1ae security.
+ * Indicates thetransmit packet number exceeded the maximum threshold and
+ * about tooverflow.
+ * Threshold is programmed in register XIF_AE_PKTNUM_THRESH.
+*/
+#define  XIF_INT_STATUS_SECTXPKTNUMMAXINT_MASK	0x40
+
+/*
+ * Indicates full MPCP timestamp update due to value greater thanthreshold
+ * programmed into cfgTsFullUpdThr in register XIF_TS_UPDATE.
+*/
+#define  XIF_INT_STATUS_TSFULLUPDINT_MASK	0x10
+
+/* [FATAL] Indicates request to transmit never got serviced. */
+#define  XIF_INT_STATUS_TXHANGINT_MASK	0x8
+
+/*
+ * [FATAL] Indicates scheduled transmit time is negative, relative tothe
+ * current MPCP time.
+*/
+#define  XIF_INT_STATUS_NEGTIMEINT_MASK	0x4
+
+/*
+ * [NON-FATAL] Indicates the magnitude of the MPCP timestamp
+ * updatedexceeded the value programmed into XIF_TS_JITTER_THRESH register.
+*/
+#define  XIF_INT_STATUS_PMCTSJTTRINT_MASK	0x2
+
+/* [FATAL] Indicates SEC-RX output FIFO overflowed. */
+#define  XIF_INT_STATUS_SECRXOUTFFOVRFLWINT_MASK	0x1
+
+
+/*
+ * Register <XIF_INT_MASK>
+ *
+ * Interrupt masks, active low :
+ * 0 - mask interrupt; 1 - enable interrupt.
+ */
+#define XIF_INT_MASK_REG		0x8
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKSECRXREPLAYPROTCTABORT_MASK	0x80
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKPKTNUMTHRESHINT_MASK	0x40
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKTSFULLUPDINT_MASK	0x10
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKTXHANGINT_MASK	0x8
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKNEGTIMEINT_MASK	0x4
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKPMCTSJTTRINT_MASK	0x2
+
+/* Interrupt mask, active low. */
+#define  XIF_INT_MASK_MSKSECRXOUTFFINT_MASK	0x1
+
+
+/*
+ * Register <XIF_PORT_COMMAND>
+ *
+ * Provides dataPort read/write access to various RAMs.
+ */
+#define XIF_PORT_COMMAND_REG		0xc
+
+/*
+ * Indicates dataPort access is in progress.
+ * Bit must be clearedbefore the next dataPort access can be issued.
+*/
+#define  XIF_PORT_COMMAND_DATAPORTBUSY_MASK	0x80000000
+
+/*
+ * Selects the RAM for access :
+ * 0 - RX key; 2 - TX key; 4 - RX IV; 5 -TX IV.
+*/
+#define  XIF_PORT_COMMAND_PORTSELECT_SHIFT	24
+#define  XIF_PORT_COMMAND_PORTSELECT_MASK	0x3f000000
+
+/*
+ * Indicates write access :
+ * 0 - read; 1 - write.
+*/
+#define  XIF_PORT_COMMAND_PORTOPCODE_SHIFT	16
+#define  XIF_PORT_COMMAND_PORTOPCODE_MASK	0xff0000
+
+/* Specifies the RAM address for access. */
+#define  XIF_PORT_COMMAND_PORTADDRESS_SHIFT	0
+#define  XIF_PORT_COMMAND_PORTADDRESS_MASK	0xffff
+
+
+/*
+ * Registers <XIF_PORT_DATA> - <x> is [ 0 => 7 ]
+ *
+ * Stores the pre-write data for writing; and the post-read data
+ * forreading.
+ */
+#define XIF_PORT_DATA_REG(x)		(0x14 + (x) * 0x4)
+
+/*
+ * TX/RX SEC key RAM, key[31:
+ * 0].
+*/
+#define  XIF_PORT_DATA_PORTDATA_SHIFT	0
+#define  XIF_PORT_DATA_PORTDATA_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_MACSEC>
+ *
+ * This register specifies the 802.
+ * 1ae MacSec Ethertype to be insertedinto the packet.
+ */
+#define XIF_MACSEC_REG			0x34
+
+/* Defines the MacSec Ethertype. */
+#define  XIF_MACSEC_CFGMACSECETHERTYPE_SHIFT	0
+#define  XIF_MACSEC_CFGMACSECETHERTYPE_MASK	0xffff
+
+
+/*
+ * Register <XIF_XPN_XMT_OFFSET>
+ *
+ * Specifies the transmit offset, relative to the current MPCP.
+ */
+#define XIF_XPN_XMT_OFFSET_REG		0x38
+
+/*
+ * Specifies the transmit offset, to account for the delay throughSEC-TX
+ * and PMC-TX.
+*/
+#define  XIF_XPN_XMT_OFFSET_CFGXPNXMTOFFSET_SHIFT	0
+#define  XIF_XPN_XMT_OFFSET_CFGXPNXMTOFFSET_MASK	0xffff
+
+
+/*
+ * Register <XIF_XPN_TIMESTAMP_OFFSET>
+ *
+ * Specifies the offset to add to MPCP's timestamp.
+ */
+#define XIF_XPN_TIMESTAMP_OFFSET_REG	0x3c
+
+/* Debug funtion to add the offset to the regenerated MPCP's timestamp. */
+#define  XIF_XPN_TIMESTAMP_OFFSET_CFGXPNMPCPTSOFFSET_SHIFT	0
+#define  XIF_XPN_TIMESTAMP_OFFSET_CFGXPNMPCPTSOFFSET_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_PKTGEN_CTL>
+ *
+ * This register controls Xif's packet generator.
+ * When enabled the packetgenerator's frames will be inserted in place of
+ * the normal downstreamdata.
+ */
+#define XIF_XPN_PKTGEN_CTL_REG		0x40
+
+/* Burst since, in TQ unit. */
+#define  XIF_XPN_PKTGEN_CTL_CFGONUBURSTSIZE_SHIFT	16
+#define  XIF_XPN_PKTGEN_CTL_CFGONUBURSTSIZE_MASK	0xffff0000
+
+/* Enable back-2-back grants for overlap testing. */
+#define  XIF_XPN_PKTGEN_CTL_CFGENBCK2BCKPKTGEN_MASK	0x20
+
+/* Enable all MPCP packet generation. */
+#define  XIF_XPN_PKTGEN_CTL_CFGENALLMPCPPKTGEN_MASK	0x10
+
+/* Starts packet generator. */
+#define  XIF_XPN_PKTGEN_CTL_CFGXPNSTARTPKTGEN_MASK	0x2
+
+/* Enables packet generator. */
+#define  XIF_XPN_PKTGEN_CTL_CFGXPNENPKTGEN_MASK	0x1
+
+
+/*
+ * Register <XIF_XPN_PKTGEN_LLID>
+ *
+ * Specifies the packet generation LLID for index 0 and 1.
+ */
+#define XIF_XPN_PKTGEN_LLID_REG		0x44
+
+/* LLID for index 1. */
+#define  XIF_XPN_PKTGEN_LLID_CFGXPNPKTGENLLID1_SHIFT	16
+#define  XIF_XPN_PKTGEN_LLID_CFGXPNPKTGENLLID1_MASK	0xffff0000
+
+/* LLID for index 0. */
+#define  XIF_XPN_PKTGEN_LLID_CFGXPNPKTGENLLID0_SHIFT	0
+#define  XIF_XPN_PKTGEN_LLID_CFGXPNPKTGENLLID0_MASK	0xffff
+
+
+/*
+ * Register <XIF_XPN_PKTGEN_PKT_CNT>
+ *
+ * Specifies the number of packet to transmit.
+ */
+#define XIF_XPN_PKTGEN_PKT_CNT_REG	0x48
+
+/*
+ * Burst mode generation :
+ * 0 - continuous; 1 - burst mode as defined bycfgXpnPktGenBurstSize.
+*/
+#define  XIF_XPN_PKTGEN_PKT_CNT_CFGXPNPKTGENBURSTMODE_MASK	0x80000000
+
+/* Number of packets to transmit. */
+#define  XIF_XPN_PKTGEN_PKT_CNT_CFGXPNPKTGENBURSTSIZE_SHIFT	0
+#define  XIF_XPN_PKTGEN_PKT_CNT_CFGXPNPKTGENBURSTSIZE_MASK	0xffffff
+
+
+/*
+ * Register <XIF_XPN_PKTGEN_PKT_SIZE>
+ *
+ * Specifies the size of each packet.
+ */
+#define XIF_XPN_PKTGEN_PKT_SIZE_REG	0x4c
+
+/*
+ * Size mode :
+ * 0 - fixed packet size, defined by cfgXpnPktGenSizeStart;1 - increment
+ * packet size, from cfgXpnPktGenSizeStart tocfgXpnPktGenSizeEnd.
+*/
+#define  XIF_XPN_PKTGEN_PKT_SIZE_CFGXPNPKTGENSIZEINCR_MASK	0x80000000
+
+/* Indicates the ending size. */
+#define  XIF_XPN_PKTGEN_PKT_SIZE_CFGXPNPKTGENSIZEEND_SHIFT	16
+#define  XIF_XPN_PKTGEN_PKT_SIZE_CFGXPNPKTGENSIZEEND_MASK	0xfff0000
+
+/* Indicates the starting size. */
+#define  XIF_XPN_PKTGEN_PKT_SIZE_CFGXPNPKTGENSIZESTART_SHIFT	0
+#define  XIF_XPN_PKTGEN_PKT_SIZE_CFGXPNPKTGENSIZESTART_MASK	0xfff
+
+
+/*
+ * Register <XIF_XPN_PKTGEN_IPG>
+ *
+ * IPG insertion for packet generator.
+ */
+#define XIF_XPN_PKTGEN_IPG_REG		0x50
+
+/* IPG insertion for back-2-back grants. */
+#define  XIF_XPN_PKTGEN_IPG_CFGXPNPKTGENBCK2BCKIPG_SHIFT	16
+#define  XIF_XPN_PKTGEN_IPG_CFGXPNPKTGENBCK2BCKIPG_MASK	0xffff0000
+
+/* IPG insertion in between packets. */
+#define  XIF_XPN_PKTGEN_IPG_CFGXPNPKTGENIPG_SHIFT	0
+#define  XIF_XPN_PKTGEN_IPG_CFGXPNPKTGENIPG_MASK	0xffff
+
+
+/*
+ * Register <XIF_TS_JITTER_THRESH>
+ *
+ * Specifies the threshold to generate pmcRxTsJitter interrupt.
+ */
+#define XIF_TS_JITTER_THRESH_REG	0x54
+
+/*
+ * Defines the value to generate jitter interrupt when timestamp
+ * updateexceeds this threshold.
+*/
+#define  XIF_TS_JITTER_THRESH_CFGTSJTTRTHRESH_SHIFT	0
+#define  XIF_TS_JITTER_THRESH_CFGTSJTTRTHRESH_MASK	0x7fffffff
+
+
+/*
+ * Register <XIF_TS_UPDATE>
+ *
+ * Provides timestamp update control.
+ */
+#define XIF_TS_UPDATE_REG		0x58
+
+/*
+ * Defines the full update threshold.
+ * Timestamp update is done in 1 TQincrement.
+ * If update is equal to or greater than threshold, fullupdate will result.
+*/
+#define  XIF_TS_UPDATE_CFGTSFULLUPDTHR_SHIFT	16
+#define  XIF_TS_UPDATE_CFGTSFULLUPDTHR_MASK	0xffff0000
+
+/*
+ * Provides auto timestamp update for debugging.
+ * This is to test fortimestamp jitter.
+*/
+#define  XIF_TS_UPDATE_CFGENAUTOTSUPD_MASK	0x8000
+
+/* Defines the period between MPCP timestamp update. */
+#define  XIF_TS_UPDATE_CFGTSUPDPER_SHIFT	0
+#define  XIF_TS_UPDATE_CFGTSUPDPER_MASK	0xff
+
+
+/*
+ * Register <XIF_GNT_OVERHEAD>
+ *
+ * Specifies the burst overhead for normal grant.
+ */
+#define XIF_GNT_OVERHEAD_REG		0x5c
+
+/* Burst overhead of laser_on + sync_time. */
+#define  XIF_GNT_OVERHEAD_CFGGNTOH_SHIFT	0
+#define  XIF_GNT_OVERHEAD_CFGGNTOH_MASK	0xffff
+
+
+/*
+ * Register <XIF_DISCOVER_OVERHEAD>
+ *
+ * Specifies the burst overhead for discovery grant.
+ */
+#define XIF_DISCOVER_OVERHEAD_REG	0x60
+
+/* Burst overhead of laser_on + sync_time. */
+#define  XIF_DISCOVER_OVERHEAD_CFGDISCOH_SHIFT	0
+#define  XIF_DISCOVER_OVERHEAD_CFGDISCOH_MASK	0xffff
+
+
+/*
+ * Register <XIF_DISCOVER_INFO>
+ *
+ * Specifies the discovery information field.
+ */
+#define XIF_DISCOVER_INFO_REG		0x64
+
+/*
+ * Defines the discovery info field :
+ * 0 - upstream 1G; 1 - upstream10G; 4 - open 1G window; 5 - open 10G
+ * window.
+*/
+#define  XIF_DISCOVER_INFO_CFGDISCINFOFLD_SHIFT	0
+#define  XIF_DISCOVER_INFO_CFGDISCINFOFLD_MASK	0xffff
+
+
+/*
+ * Register <XIF_XPN_OVERSIZE_THRESH>
+ *
+ * Specifies the oversize threshold to increment oversize stat.
+ */
+#define XIF_XPN_OVERSIZE_THRESH_REG	0x68
+
+/*
+ * Increments oversize stat when packet's size is greater than or equalto
+ * threshold.
+*/
+#define  XIF_XPN_OVERSIZE_THRESH_CFGXPNOVRSZTHRESH_SHIFT	0
+#define  XIF_XPN_OVERSIZE_THRESH_CFGXPNOVRSZTHRESH_MASK	0x3fff
+
+
+/*
+ * Register <XIF_SECRX_KEYNUM> - read-only
+ *
+ * Provides downstream encryption key number stat, per LLID.
+ */
+#define XIF_SECRX_KEYNUM_REG		0x6c
+
+/* Key number stat. */
+#define  XIF_SECRX_KEYNUM_KEYSTATRX_SHIFT	0
+#define  XIF_SECRX_KEYNUM_KEYSTATRX_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SECRX_ENCRYPT> - read-only
+ *
+ * Provides downstream encryption stat, per LLID.
+ */
+#define XIF_SECRX_ENCRYPT_REG		0x70
+
+/* Encryption stat. */
+#define  XIF_SECRX_ENCRYPT_ENCRSTATRX_SHIFT	0
+#define  XIF_SECRX_ENCRYPT_ENCRSTATRX_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_FRAME_RX_CNT> - read-only
+ *
+ * PMC-RX receive frame count stat.
+ */
+#define XIF_PMC_FRAME_RX_CNT_REG	0x78
+
+/*
+ * Frame count stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_FRAME_RX_CNT_PMCRXFRAMECNT_SHIFT	0
+#define  XIF_PMC_FRAME_RX_CNT_PMCRXFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_BYTE_RX_CNT> - read-only
+ *
+ * PMC-RX byte count stat.
+ */
+#define XIF_PMC_BYTE_RX_CNT_REG		0x7c
+
+/*
+ * Byte count stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_BYTE_RX_CNT_PMCRXBYTECNT_SHIFT	0
+#define  XIF_PMC_BYTE_RX_CNT_PMCRXBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_RUNT_RX_CNT> - read-only
+ *
+ * PMC-RX runt count stat.
+ */
+#define XIF_PMC_RUNT_RX_CNT_REG		0x80
+
+/*
+ * Runt count stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_RUNT_RX_CNT_PMCRXRUNTCNT_SHIFT	0
+#define  XIF_PMC_RUNT_RX_CNT_PMCRXRUNTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_CW_ERR_RX_CNT> - read-only
+ *
+ * PMC-RX code word error stat.
+ */
+#define XIF_PMC_CW_ERR_RX_CNT_REG	0x84
+
+/*
+ * Codeword error stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_CW_ERR_RX_CNT_PMCRXCWERRCNT_SHIFT	0
+#define  XIF_PMC_CW_ERR_RX_CNT_PMCRXCWERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_CRC8_ERR_RX_CNT> - read-only
+ *
+ * PMC-RX crc8 error stat.
+ */
+#define XIF_PMC_CRC8_ERR_RX_CNT_REG	0x88
+
+/*
+ * CRC-8 error stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_CRC8_ERR_RX_CNT_PMCRXCRC8ERRCNT_SHIFT	0
+#define  XIF_PMC_CRC8_ERR_RX_CNT_PMCRXCRC8ERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_DATA_FRM_CNT> - read-only
+ *
+ * XPN transmit data frame count.
+ */
+#define XIF_XPN_DATA_FRM_CNT_REG	0x8c
+
+/*
+ * Data frame count stat, excluding MPCP/OAM.
+ * Peg at max.
+*/
+#define  XIF_XPN_DATA_FRM_CNT_XPNDTFRAMECNT_SHIFT	0
+#define  XIF_XPN_DATA_FRM_CNT_XPNDTFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_DATA_BYTE_CNT> - read-only
+ *
+ * XPN transmit data byte count.
+ */
+#define XIF_XPN_DATA_BYTE_CNT_REG	0x90
+
+/*
+ * Data byte count stat, excluding MPCP/OAM.
+ * Peg at max.
+*/
+#define  XIF_XPN_DATA_BYTE_CNT_XPNDTBYTECNT_SHIFT	0
+#define  XIF_XPN_DATA_BYTE_CNT_XPNDTBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_MPCP_FRM_CNT> - read-only
+ *
+ * XPN transmit MPCP frame count.
+ */
+#define XIF_XPN_MPCP_FRM_CNT_REG	0x94
+
+/*
+ * MPCP frame count stat.
+ * Peg at max.
+*/
+#define  XIF_XPN_MPCP_FRM_CNT_XPNMPCPFRAMECNT_SHIFT	0
+#define  XIF_XPN_MPCP_FRM_CNT_XPNMPCPFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_OAM_FRM_CNT> - read-only
+ *
+ * XPN transmit OAM frame count.
+ */
+#define XIF_XPN_OAM_FRM_CNT_REG		0x98
+
+/*
+ * MPCP frame count stat.
+ * Peg at max.
+*/
+#define  XIF_XPN_OAM_FRM_CNT_XPNOAMFRAMECNT_SHIFT	0
+#define  XIF_XPN_OAM_FRM_CNT_XPNOAMFRAMECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_OAM_BYTE_CNT> - read-only
+ *
+ * XPN transmit OAM byte count.
+ */
+#define XIF_XPN_OAM_BYTE_CNT_REG	0x9c
+
+/*
+ * OAM byte count stat.
+ * Peg at max.
+*/
+#define  XIF_XPN_OAM_BYTE_CNT_XPNOAMBYTECNT_SHIFT	0
+#define  XIF_XPN_OAM_BYTE_CNT_XPNOAMBYTECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_XPN_OVERSIZE_FRM_CNT> - read-only
+ *
+ * XPN transmit oversize frame stat.
+ */
+#define XIF_XPN_OVERSIZE_FRM_CNT_REG	0xa0
+
+/* Oversize frame, as defined by XIF_XPN_OVERSIZE_THRESH register. */
+#define  XIF_XPN_OVERSIZE_FRM_CNT_XPNDTOVERSIZECNT_SHIFT	0
+#define  XIF_XPN_OVERSIZE_FRM_CNT_XPNDTOVERSIZECNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SEC_ABORT_FRM_CNT> - read-only
+ *
+ * SEC-RX abort frame stat.
+ */
+#define XIF_SEC_ABORT_FRM_CNT_REG	0xa4
+
+/*
+ * Abort frame stat.
+ * Peg at max.
+*/
+#define  XIF_SEC_ABORT_FRM_CNT_SECRXABORTFRMCNT_SHIFT	0
+#define  XIF_SEC_ABORT_FRM_CNT_SECRXABORTFRMCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_PMC_TX_NEG_EVENT_CNT> - read-only
+ *
+ * PMC-TX negative transmit time event.
+ */
+#define XIF_PMC_TX_NEG_EVENT_CNT_REG	0xa8
+
+/*
+ * Negative event count stat.
+ * Peg at max value.
+*/
+#define  XIF_PMC_TX_NEG_EVENT_CNT_PMCTXNEGEVENTCNT_SHIFT	0
+#define  XIF_PMC_TX_NEG_EVENT_CNT_PMCTXNEGEVENTCNT_MASK	0xff
+
+
+/*
+ * Register <XIF_XPN_IDLE_PKT_CNT> - read-only
+ *
+ * Idle packet count.
+ */
+#define XIF_XPN_IDLE_PKT_CNT_REG	0xac
+
+/*
+ * Idle packet count stat.
+ * Peg at max value.
+*/
+#define  XIF_XPN_IDLE_PKT_CNT_XPNIDLEFRAMECNT_SHIFT	0
+#define  XIF_XPN_IDLE_PKT_CNT_XPNIDLEFRAMECNT_MASK	0xffff
+
+
+/*
+ * Registers <XIF_LLID_0> - <x> is [ 0 => 32 ]
+ *
+ * Configures LLID index 0 translation.
+ */
+#define XIF_LLIDx_0_31_REG(x)		(0xc0 + (x) * 0x4)
+
+/*
+ * Defines the 16-bits LLID for index 0 :
+ * [15:
+ * 0] - LLID; [16] - enableLLID.
+ * In upstream P2P, 802.
+ * 1ae mode, bit[11:
+ * 0] provides lookup withVLAN's VID to index 0.
+ * In downstream P2P, 802.
+ * 1ae mode, registersXIF_P2P_AE_SCI_LO[0:
+ * 15]/XIF_P2P_AE_SCI_HI[0:
+ * 15] provide lookup withpacket's explicit SCI to an index.
+*/
+#define  XIF_LLIDx_0_31_CFGONULLID0_SHIFT	0
+#define  XIF_LLIDx_0_31_CFGONULLID0_MASK	0x1ffff
+
+
+/*
+ * Register <XIF_MAX_MPCP_UPDATE>
+ *
+ * Specifies the maximum MPCP update.
+ */
+#define XIF_MAX_MPCP_UPDATE_REG		0x140
+
+/* Maximum MPCP update value. */
+#define  XIF_MAX_MPCP_UPDATE_CFGMAXPOSMPCPUPD_SHIFT	0
+#define  XIF_MAX_MPCP_UPDATE_CFGMAXPOSMPCPUPD_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_IPG_INSERTION>
+ *
+ * Specifies the IPG insertion between packets.
+ */
+#define XIF_IPG_INSERTION_REG		0x144
+
+/*
+ * Enable short IPG insertion, average of 8 bytes.
+ * Should only beenabled only in FEC mode.
+ * Otherwise, average of 12 bytes isinserted.
+*/
+#define  XIF_IPG_INSERTION_CFGSHORTIPG_MASK	0x200
+
+/* Debug function to enable IPG insertion. */
+#define  XIF_IPG_INSERTION_CFGINSERTIPG_MASK	0x100
+
+/*
+ * Configure the number of IPG word (2 bytes) to insert.
+ * Only validwhen cfgInsertIpg is asserted.
+*/
+#define  XIF_IPG_INSERTION_CFGIPGWORD_SHIFT	0
+#define  XIF_IPG_INSERTION_CFGIPGWORD_MASK	0x7f
+
+
+/*
+ * Register <XIF_TRANSPORT_TIME>
+ *
+ * Specifies the MPCP time to generate a one pulse per second (PPS)signal.
+ */
+#define XIF_TRANSPORT_TIME_REG		0x148
+
+/* PPS is generated when the current MPCP is equal to the programmedvalue. */
+#define  XIF_TRANSPORT_TIME_CFTRANSPORTTIME_SHIFT	0
+#define  XIF_TRANSPORT_TIME_CFTRANSPORTTIME_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_MPCP_TIME> - read-only
+ *
+ * Provides the current MPCP time.
+ */
+#define XIF_MPCP_TIME_REG		0x14c
+
+/* Current MPCP time. */
+#define  XIF_MPCP_TIME_CURMPCPTS_SHIFT	0
+#define  XIF_MPCP_TIME_CURMPCPTS_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_OVERLAP_GNT_OH>
+ *
+ * Provides the overhead for overlapping grant.
+ */
+#define XIF_OVERLAP_GNT_OH_REG		0x150
+
+/* Provides the amount the laser_on time and laser_off time mayoverlap. */
+#define  XIF_OVERLAP_GNT_OH_CFGOVRLPOH_SHIFT	0
+#define  XIF_OVERLAP_GNT_OH_CFGOVRLPOH_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_MAC_MODE>
+ *
+ * Specifies the MAC mode of operation.
+ */
+#define XIF_MAC_MODE_REG		0x154
+
+/*
+ * Enable point-2-point transmission without grant.
+ * Must also set bitcfgEnP2P.
+*/
+#define  XIF_MAC_MODE_CFGENNOGNTXMT_MASK	0x2
+
+
+/*
+ * Register <XIF_PMCTX_CTL>
+ *
+ * Provides the control for PMC.
+ */
+#define XIF_PMCTX_CTL_REG		0x158
+
+/*
+ * Define the MPCP update period.
+ * A value of 0xff disables update.
+*/
+#define  XIF_PMCTX_CTL_CFGMPCPUPDPERIOD_SHIFT	16
+#define  XIF_PMCTX_CTL_CFGMPCPUPDPERIOD_MASK	0xff0000
+
+/*
+ * Disable the requirement of 4 IDLEs preceeding start character toconsider
+ * packet valid.
+*/
+#define  XIF_PMCTX_CTL_CFGDIS4IDLEB4STARTCHAR_MASK	0x20
+
+/* Enable upstream IDLE discard */
+#define  XIF_PMCTX_CTL_CFGENIDLEDSCRD_MASK	0x10
+
+/*
+ * Selects the source of transmit MPCP time :
+ * 0 - RX; 1 - TX.
+*/
+#define  XIF_PMCTX_CTL_CFGSELTXPONTIME_MASK	0x8
+
+/* Enable continous MPCP update. */
+#define  XIF_PMCTX_CTL_CFGMPCPCONTUPD_MASK	0x4
+
+/*
+ * Enable the restriction of positive MPCP update, limitted
+ * bycfgMaxPosMpcpUpd value set in register XIF_MAX_MPCP_UPDATE.
+*/
+#define  XIF_PMCTX_CTL_CFGENMAXMPCPUPD_MASK	0x2
+
+/*
+ * Enable the discard of packet with negative scheduled transmit
+ * time,relative to the current MPCP.
+*/
+#define  XIF_PMCTX_CTL_CFGENNEGTIMEABORT_MASK	0x1
+
+
+/*
+ * Register <XIF_SEC_CTL>
+ *
+ * Provides control for security.
+ */
+#define XIF_SEC_CTL_REG			0x15c
+
+/*
+ * [A0 BUG] - HWBCM6858-457Enables downstream short length support.
+ * This feature cannot besupported inA0.
+ * The workaround would be to disable this feature by clearing thebit.
+*/
+#define  XIF_SEC_CTL_CFGSECRXENSHORTLEN_MASK	0x40
+
+/* Enable fake AES on TX security. */
+#define  XIF_SEC_CTL_CFGENSECTXFAKEAES_MASK	0x20
+
+/* Enable fake AES on RX security. */
+#define  XIF_SEC_CTL_CFGENSECRXFAKEAES_MASK	0x10
+
+/* Enables packet number rollover on receive. */
+#define  XIF_SEC_CTL_CFGSECRXENPKTNUMRLOVR_MASK	0x8
+
+/* Enables packet number rollover on transmit. */
+#define  XIF_SEC_CTL_CFGSECTXENPKTNUMRLOVR_MASK	0x2
+
+/* Enables replay protection on RX security. */
+#define  XIF_SEC_CTL_CFGENAEREPLAYPRCT_MASK	0x1
+
+
+/*
+ * Register <XIF_AE_PKTNUM_WINDOW>
+ *
+ * Provides the tolerance for packet number reception in replay
+ * protectionmode.
+ * Only applicable in 802.
+ * 1ae security mode.
+ */
+#define XIF_AE_PKTNUM_WINDOW_REG	0x160
+
+/*
+ * In replay protection, the packet number is checked against theexpected
+ * packet number.
+ * If it is greater than or equal to, packetwill be accepted.
+ * Otherwise, it will be discarded.
+ * This registerprovides the tolerance by subtracting the current expected
+ * packetnumber by this amount.
+*/
+#define  XIF_AE_PKTNUM_WINDOW_CFGAEPKTNUMWND_SHIFT	0
+#define  XIF_AE_PKTNUM_WINDOW_CFGAEPKTNUMWND_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_AE_PKTNUM_THRESH>
+ *
+ * Provides the threshold to warn of impending packet number rollover
+ * ontransmit.
+ */
+#define XIF_AE_PKTNUM_THRESH_REG	0x164
+
+/* Defines the threshold of impending packet number rollover. */
+#define  XIF_AE_PKTNUM_THRESH_CFGPKTNUMMAXTHRESH_SHIFT	0
+#define  XIF_AE_PKTNUM_THRESH_CFGPKTNUMMAXTHRESH_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SECTX_KEYNUM> - read-only
+ *
+ * Provides upstream encryption key number stat, per LLID.
+ */
+#define XIF_SECTX_KEYNUM_REG		0x168
+
+/* KeyNumber stat */
+#define  XIF_SECTX_KEYNUM_KEYSTATTX_SHIFT	0
+#define  XIF_SECTX_KEYNUM_KEYSTATTX_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SECTX_ENCRYPT> - read-only
+ *
+ * Provides upstream encryption stat, per LLID.
+ */
+#define XIF_SECTX_ENCRYPT_REG		0x16c
+
+/* Encryption stat. */
+#define  XIF_SECTX_ENCRYPT_ENCRSTATTX_SHIFT	0
+#define  XIF_SECTX_ENCRYPT_ENCRSTATTX_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_AE_PKTNUM_STAT> - read-only
+ *
+ * Provides packet number status.
+ */
+#define XIF_AE_PKTNUM_STAT_REG		0x170
+
+/*
+ * Provides the LLID index whose packet number exceeded the maximumpacket
+ * number threhsold.
+*/
+#define  XIF_AE_PKTNUM_STAT_SECTXINDXWTPKTNUMMAX_SHIFT	16
+#define  XIF_AE_PKTNUM_STAT_SECTXINDXWTPKTNUMMAX_MASK	0x1f0000
+
+/* Provides the LLID index that was aborted due to replay protection. */
+#define  XIF_AE_PKTNUM_STAT_SECRXINDXWTPKTNUMABORT_SHIFT	0
+#define  XIF_AE_PKTNUM_STAT_SECRXINDXWTPKTNUMABORT_MASK	0x1f
+
+
+/*
+ * Register <XIF_MPCP_UPDATE> - read-only
+ *
+ * Debug register showing time between MPCP updates.
+ */
+#define XIF_MPCP_UPDATE_REG		0x174
+
+/* Time between MPCP updates. */
+#define  XIF_MPCP_UPDATE_MPCPUPDPERIOD_SHIFT	0
+#define  XIF_MPCP_UPDATE_MPCPUPDPERIOD_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_BURST_PRELAUNCH_OFFSET>
+ *
+ * Provides prelaunch time of burst data from ONU, relative to
+ * thegrant-start-time.
+ */
+#define XIF_BURST_PRELAUNCH_OFFSET_REG	0x178
+
+/* Defines the prelaunch time of burst data, in unit of TQ. */
+#define  XIF_BURST_PRELAUNCH_OFFSET_CFGBURSTPRELAUNCHOFFSET_SHIFT	0
+#define  XIF_BURST_PRELAUNCH_OFFSET_CFGBURSTPRELAUNCHOFFSET_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_VLAN_TYPE>
+ *
+ * Provides a programmable VLAN type identifier for upstream P2P traffic.
+ */
+#define XIF_VLAN_TYPE_REG		0x17c
+
+/* Defines a VLAN type, in addition to 0x8100. */
+#define  XIF_VLAN_TYPE_CFGVLANTYPE_SHIFT	0
+#define  XIF_VLAN_TYPE_CFGVLANTYPE_MASK	0xffff
+
+
+/*
+ * Register <XIF_P2P_AE_SCI_EN>
+ *
+ * Enables SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define XIF_P2P_AE_SCI_EN_REG		0x180
+
+/*
+ * Enables SCI lookup, viaXIF_P2P_AE_SCI_LO[0:
+ * 15]/XIF_P2P_AE_SCI_HI[0:
+ * 15] registers.
+ * Each bitcorresponds to index 0 - 15.
+*/
+#define  XIF_P2P_AE_SCI_EN_CFGP2PSCIEN_SHIFT	0
+#define  XIF_P2P_AE_SCI_EN_CFGP2PSCIEN_MASK	0xffff
+
+
+/*
+ * Registers <XIF_P2P_AE_SCI_LO_0> - <x> is [ 0 => 16 ]
+ *
+ * Provides SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define XIF_P2P_AE_SCI_LOx_REG(x)	(0x184 + (x) * 0x8)
+
+/*
+ * Defines the lower 32-bits lookup value of SCI to index 0.
+ * Ifimplicit SCI mode, index defaults to what was mapped byXIF_LLID_[0:
+ * 15] with value 0x5555.
+*/
+#define  XIF_P2P_AE_SCI_LOx_CFGP2PSCI_LO_0_SHIFT	0
+#define  XIF_P2P_AE_SCI_LOx_CFGP2PSCI_LO_0_MASK	0xffffffff
+
+
+/*
+ * Registers <XIF_P2P_AE_SCI_HI_0> - <x> is [ 0 => 16 ]
+ *
+ * Provides SCI lookup for 802.
+ * 1ae, P2P downstream traffic.
+ */
+#define XIF_P2P_AE_SCI_HIx_REG(x)	(0x188 + (x) * 0x8)
+
+/*
+ * Defines the upper 32-bits lookup value of SCI to index 0.
+ * Ifimplicit SCI mode, index defaults to what was mapped byXIF_LLID_[0:
+ * 15] with value 0x5555.
+*/
+#define  XIF_P2P_AE_SCI_HIx_CFGP2PSCI_HI_0_SHIFT	0
+#define  XIF_P2P_AE_SCI_HIx_CFGP2PSCI_HI_0_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SECTX_KEYNUM_1> - read-only
+ *
+ * Provides additon per-LLID status of upstream security key for 802.
+ * 1aeP2P.
+ */
+#define XIF_SECTX_KEYNUM_1_REG		0x204
+
+/*
+ * In 802.
+ * 1ae P2P mode, the number of key supported per LLID is 4.
+ * This register provides the upper bit of the 2-bits key number.
+ * Thelower bit is provided by XIF_SECTX_KEYNUM.
+*/
+#define  XIF_SECTX_KEYNUM_1_KEYSTATTX_HI_SHIFT	0
+#define  XIF_SECTX_KEYNUM_1_KEYSTATTX_HI_MASK	0xffffffff
+
+
+/*
+ * Register <XIF_SECRX_KEYNUM_1> - read-only
+ *
+ * Provides addition per-LLID status of downstream security key for802.
+ * 1ae P2P.
+ */
+#define XIF_SECRX_KEYNUM_1_REG		0x208
+
+/*
+ * In 802.
+ * 1ae P2P mode, the number of key supported per LLID is 4.
+ * This register provides the upper bit of the 2-bits key number.
+ * Thelower bit is provided by XIF_SECRX_KEYNUM.
+*/
+#define  XIF_SECRX_KEYNUM_1_KEYSTATRX_HI_SHIFT	0
+#define  XIF_SECRX_KEYNUM_1_KEYSTATRX_HI_MASK	0xffffffff
+
+
+#endif /* ! EPON_XIF_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xpcsrx.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xpcsrx.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xpcsrx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xpcsrx.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,1623 @@
+#ifndef EPON_XPCSRX_H_
+#define EPON_XPCSRX_H_
+
+/* relative to epon */
+#define XPCSRX_OFFSET_0			0x3000
+
+/*
+ * Register <XPCSRX_RST>
+ *
+ * Provides reset for submodules within XPCS RX and XSBI.
+ */
+#define XPCSRX_RX_RST_REG		0x0
+
+/* Active low reset for 161 MHz domain in XPCS RX. */
+#define  RX_RST_CFGXPCSRXCLK161RSTN_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_INT_STAT>
+ *
+ * Interrupt status for XPcsRx module.
+ */
+#define XPCSRX_RX_INT_STAT_REG		0x4
+
+/* DA jitter detected. */
+#define  RX_INT_STAT_INTRXIDLEDAJIT_MASK	0x40000000
+
+/* Missing burst detected. */
+#define  RX_INT_STAT_INTRXFRMRMISBRST_MASK	0x20000000
+
+/* Over size packet detected. */
+#define  RX_INT_STAT_INTRXIDLESOPEOPGAPBIG_MASK	0x10000000
+
+/*
+ * No idle insert opportunity in 2000 bytes.
+ * Idle insert was forced.
+*/
+#define  RX_INT_STAT_INTRXIDLEFRCINS_MASK	0x8000000
+
+/* Min IPG error detected. */
+#define  RX_INT_STAT_INTRX64B66BMINIPGERR_MASK	0x4000000
+
+/* FEC CW store/foward enqueue input and output count not equal. */
+#define  RX_INT_STAT_INTRXFECNQUECNTNEQ_MASK	0x2000000
+
+/*
+ * Idle insert FIFO under run.
+ * Fatal
+*/
+#define  RX_INT_STAT_INTRXIDLEFIFOUNDRUN_MASK	0x1000000
+
+/*
+ * Idle insert FIFO over run.
+ * Fatal
+*/
+#define  RX_INT_STAT_INTRXIDLEFIFOOVRRUN_MASK	0x800000
+
+/*
+ * FEC high correction alarm.
+ * High FEC correctoin occured overcfgXPcsRxFecCorIntval
+*/
+#define  RX_INT_STAT_INTRXFECHIGHCOR_MASK	0x400000
+
+/* FEC CW decode fail and FEC decoder is frozen percfgXPcsRxFecStopOnErr */
+#define  RX_INT_STAT_INTRXFECDECSTOPONERR_MASK	0x80000
+
+/* FEC CW decode passed. */
+#define  RX_INT_STAT_INTRXFECDECPASS_MASK	0x40000
+
+/* Framer has high BER. */
+#define  RX_INT_STAT_INTRXSTATFRMRHIGHBER_MASK	0x20000
+
+/* Framer exited by hitting max count on SP. */
+#define  RX_INT_STAT_INTRXFRMREXITBYSP_MASK	0x10000
+
+/* Framer hit bad SH max count. */
+#define  RX_INT_STAT_INTRXFRMRBADSHMAX_MASK	0x8000
+
+/* Burst sequence out of order. */
+#define  RX_INT_STAT_INTRXDSCRAMBURSTSEQOUT_MASK	0x4000
+
+/* Test pattern psudo lock. */
+#define  RX_INT_STAT_INTRXTESTPSUDOLOCK_MASK	0x2000
+
+/* Test pattern psudo type. */
+#define  RX_INT_STAT_INTRXTESTPSUDOTYPE_MASK	0x1000
+
+/* Test pattern psudo error. */
+#define  RX_INT_STAT_INTRXTESTPSUDOERR_MASK	0x800
+
+/* Test pattern PRBS lock. */
+#define  RX_INT_STAT_INTRXTESTPRBSLOCK_MASK	0x400
+
+/* Test pattern PRBS error. */
+#define  RX_INT_STAT_INTRXTESTPRBSERR_MASK	0x200
+
+/* Three consecative failed FEC CW decode. */
+#define  RX_INT_STAT_INTRXFECPSISTDECFAIL_MASK	0x100
+
+/* Framer detected bad SH while in lock. */
+#define  RX_INT_STAT_INTRXFRAMERBADSH_MASK	0x80
+
+/* Framer went into loss. */
+#define  RX_INT_STAT_INTRXFRAMERCWLOSS_MASK	0x40
+
+/* Framer went into lock. */
+#define  RX_INT_STAT_INTRXFRAMERCWLOCK_MASK	0x20
+
+/* FEC CW decode failed. */
+#define  RX_INT_STAT_INTRXFECDECFAIL_MASK	0x10
+
+/* 66b to 64b decode error has occured. */
+#define  RX_INT_STAT_INTRX64B66BDECERR_MASK	0x8
+
+/* No frmrCwLk before the no lock timer expired. */
+#define  RX_INT_STAT_INTRXFRMRNOLOCKLOS_MASK	0x4
+
+/* SP count hit max in ranging but no lock */
+#define  RX_INT_STAT_INTRXFRMRROGUE_MASK	0x2
+
+/* register access error has occured. */
+#define  RX_INT_STAT_INT_REGS_ERR_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_INT_MSK>
+ *
+ * Interrupt mask for XPcsRx module.
+ */
+#define XPCSRX_RX_INT_MSK_REG		0x8
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXIDLEDAJIT_MASK	0x40000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRMRMISBRST_MASK	0x20000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXIDLESOPEOPGAPBIG_MASK	0x10000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXIDLEFRCINS_MASK	0x8000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRX64B66BMINIPGERR_MASK	0x4000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECNQUECNTNEQ_MASK	0x2000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXIDLEFIFOUNDRUN_MASK	0x1000000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXIDLEFIFOOVRRUN_MASK	0x800000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECHIGHCOR_MASK	0x400000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECDECSTOPONERR_MASK	0x80000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECDECPASS_MASK	0x40000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXSTATFRMRHIGHBER_MASK	0x20000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRMREXITBYSP_MASK	0x10000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRMRBADSHMAX_MASK	0x8000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXDSCRAMBURSTSEQOUT_MASK	0x4000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXTESTPSUDOLOCK_MASK	0x2000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXTESTPSUDOTYPE_MASK	0x1000
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXTESTPSUDOERR_MASK	0x800
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXTESTPRBSLOCK_MASK	0x400
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXTESTPRBSERR_MASK	0x200
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECPSISTDECFAIL_MASK	0x100
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRAMERBADSH_MASK	0x80
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRAMERCWLOSS_MASK	0x40
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRAMERCWLOCK_MASK	0x20
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFECDECFAIL_MASK	0x10
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRX64B66BDECERR_MASK	0x8
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRMRNOLOCKLOS_MASK	0x4
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSKRXFRMRROGUE_MASK	0x2
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK_MSK_INT_REGS_ERR_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_FRAMER_CTL>
+ *
+ * Provides control over framer in XPCS RX.
+ */
+#define XPCSRX_RX_FRAMER_CTL_REG	0xc
+
+/*
+ * 1 = block loading ofr alingMux0Sel when spLkCntMax achievedThis wasthe
+ * ECO or PIONEER B2
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRFRCEARLYALIGN_MASK	0x8000
+
+/*
+ * 0 = use framing mode for mux select = alignMux0SelQ forbdEbdAlignedData1
+ * = use framing mode for mux select = alignMux0SelQQfor bdEbdAlignedData
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRMODEA_MASK	0x4000
+
+/*
+ * Allows framer to not require any space between EBD and BD faterframing
+ * once during overlaping grants.
+ * cfgXPcsRxFrmrOverlapGntEn must be set to use this bit.
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMROVERLAPBDEBDZERO_MASK	0x2000
+
+/*
+ * Allows framer to not require spLkCntMax (look for BD withoutpreceeding
+ * SP) after framing once during overlaping grants.
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMROVERLAPGNTEN_MASK	0x1000
+
+/*
+ * Enable for burst mode framing using old alignment
+ * (xsbiPcsRxFifoVldinstead of xsbiPcsRxFifoVldQ) for alignMuxSelQ and
+ * alignMuzSelQQ.
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRAMEBURSTOLDALIGN_MASK	0x200
+
+/*
+ * 0 - Use falling edge of SP count max and no framer lock to countmissing
+ * burst.
+ * 1 - Use unassigned strobe for detection of missingburst.
+*/
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRMISBRSTTYPE_MASK	0x100
+
+/* In burst mode only look at EBD at the end of codewords. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMREBDVLDEN_MASK	0x80
+
+/* FPGA only. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRBDCNTEN_MASK	0x40
+
+/* Allows framer to lose lock from 16 bad SH in 62 blocks for burstmode. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRBURSTBADSHEN_MASK	0x20
+
+/* Allows framer to lose lock from detection of sync pattern. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRSPULKEN_MASK	0x10
+
+/* Enable for burst mode framing. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRAMEBURST_MASK	0x8
+
+/* Enable for framer. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMREN_MASK	0x4
+
+/* Allows for ignoring of FEC persist decode fail in clause 76 framing. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRMRBLKFECFAIL_MASK	0x2
+
+/* Enable for FEC framing. */
+#define  RX_FRAMER_CTL_CFGXPCSRXFRAMEFEC_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_FEC_CTL>
+ *
+ * Provides control over FEC in XPCS RX.
+ */
+#define XPCSRX_RX_FEC_CTL_REG		0x10
+
+/*
+ * Freezes FEC decoder from writing and reading FEC RAM when an erroris
+ * decoded.
+ * No recovery after wards, requires that XPCS RX be reset.
+*/
+#define  RX_FEC_CTL_CFGXPCSRXFECSTOPONERR_MASK	0x400
+
+/*
+ * Tells FEC stats engine to count CW enqueued insted of total FECdecoded
+ * CW
+*/
+#define  RX_FEC_CTL_CFGXPCSRXFECCNTNQUECW_MASK	0x200
+
+/* Reset the store and foward FIFO. */
+#define  RX_FEC_CTL_CFGXPCSRXFECNQUERST_MASK	0x100
+
+/*
+ * 0 - Count every bit correction for the FEC CW.
+ * 1 - Count only a single correction per 8 bits.
+ * This only affects the corrected ones and corrected zero stats.
+ * The total corrected is not effected by this control.
+*/
+#define  RX_FEC_CTL_CFGXPCSRXFECONEZEROMODE_MASK	0x80
+
+/* Stop FEC decoder from making corrections. */
+#define  RX_FEC_CTL_CFGXPCSRXFECBLKCORRECT_MASK	0x40
+
+/* Replace all FEC CW with IEEE test CW. */
+#define  RX_FEC_CTL_CFGXPCSRXFECNQUETESTPAT_MASK	0x20
+
+/*
+ * 0 - Do not blank out SH for failed FEC CW decodes.
+ * 1 - Blank out SHfor failed FEC CW.
+ * CW will pass as /E.
+*/
+#define  RX_FEC_CTL_CFGXPCSRXFECFAILBLKSH0_MASK	0x10
+
+/* Enable stripping of FEC parity for FEC decode bypass. */
+#define  RX_FEC_CTL_CFGXPCSRXFECSTRIP_MASK	0x8
+
+/* Elable FEC decode bypass. */
+#define  RX_FEC_CTL_CFGXPCSRXFECBYPAS_MASK	0x4
+
+/* Enable idle insert to replace FEC parity lost in FEC decode. */
+#define  RX_FEC_CTL_CFGXPCSRXFECIDLEINS_MASK	0x2
+
+/* Enable FEC decode. */
+#define  RX_FEC_CTL_CFGXPCSRXFECEN_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_DSCRAM_CTL>
+ *
+ * Provides control over descrambler in XPCS RX.
+ */
+#define XPCSRX_RX_DSCRAM_CTL_REG	0x14
+
+/* Enable descrambler bypass. */
+#define  RX_DSCRAM_CTL_CFGXPCSRXDSCRAMBYPAS_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_64B66B_CTL>
+ *
+ * Provides control over 64b/66b decoder in XPCS RX.
+ */
+#define XPCSRX_RX_64B66B_CTL_REG	0x18
+
+/* Defalut to T7 to T4 IPG vioalate det. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTMASK1_SHIFT	24
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTMASK1_MASK	0xff000000
+
+/* Defalut to T7 to T4 IPG vioalate det. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTMASK0_SHIFT	16
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTMASK0_MASK	0xff0000
+
+/* Defalut to S0 IPG vioalate det. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BSMASK1_SHIFT	12
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BSMASK1_MASK	0x3000
+
+/* Defalut to S0 IPG vioalate det. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BSMASK0_SHIFT	8
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BSMASK0_MASK	0x300
+
+/* Compare S one pipe behind T. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTDLAY_SHIFT	4
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BTDLAY_MASK	0x30
+
+/* Enable 64B/66B decoder bypass. */
+#define  RX_64B66B_CTL_CFGXPCSRX64B66BDECBYPAS_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_TEST_CTL>
+ *
+ * Provides control over test circuits in XPCS RX.
+ */
+#define XPCSRX_RX_TEST_CTL_REG		0x1c
+
+/* Enable PRBS test pattern detector. */
+#define  RX_TEST_CTL_CFGXPCSRXTESTPRBSDETEN_MASK	0x2
+
+/* Enable psudo test pattern detector. */
+#define  RX_TEST_CTL_CFGXPCSRXTESTPSUDODETEN_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_IDLE_RD_TIMER_DLY>
+ *
+ * Sets the delay time to start read of burst from idle insert FIFO.
+ */
+#define XPCSRX_RX_IDLE_RD_TIMER_DLY_REG	0x20
+
+/*
+ * The delay time to start read of burst (default is 8'd60 ticks).
+ * Sets the delay to 8'd232 for 10K MTU.
+*/
+#define  RX_IDLE_RD_TIMER_DLY_CFGXPCSRXIDLERDDELAYTIMERMAX_SHIFT	0
+#define  RX_IDLE_RD_TIMER_DLY_CFGXPCSRXIDLERDDELAYTIMERMAX_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_IDLE_GAP_SIZ_MAX>
+ *
+ * Sets the size for over size frames based on delta between SOP and EOP.
+ */
+#define XPCSRX_RX_IDLE_GAP_SIZ_MAX_REG	0x24
+
+/*
+ * Max size in blocks without an idle insert opportunity before idleinsert
+ * is forced.
+*/
+#define  RX_IDLE_GAP_SIZ_MAX_CFGXPCSRXIDLEOVRSIZMAX_SHIFT	16
+#define  RX_IDLE_GAP_SIZ_MAX_CFGXPCSRXIDLEOVRSIZMAX_MASK	0x7ff0000
+
+/* Max distance in blocks between SOP and EOP. */
+#define  RX_IDLE_GAP_SIZ_MAX_CFGXPCSRXIDLESOPEOPGAP_SHIFT	0
+#define  RX_IDLE_GAP_SIZ_MAX_CFGXPCSRXIDLESOPEOPGAP_MASK	0xffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_LK_MAX>
+ *
+ * Sets the delay for indicating lock to FEC decode circuit.
+ * For FEC modes use the defalut of 8'd280.
+ * For FEC bypass modes use the defalut of 8'd26.
+ */
+#define XPCSRX_RX_FRAMER_LK_MAX_REG	0x28
+
+/* Max delay for framer to inditcate to FEC circuit lock has beenachieved. */
+#define  RX_FRAMER_LK_MAX_CFGXPCSRXFRMRCWLKTIMERMAX_SHIFT	0
+#define  RX_FRAMER_LK_MAX_CFGXPCSRXFRMRCWLKTIMERMAX_MASK	0x1ff
+
+
+/*
+ * Register <XPCSRX_FRAMER_UNLK_MAX>
+ *
+ * Sets the delay for indicating unlock to FEC decode circuit.
+ * For FEC modes use the defalut of 8'd280.
+ * For FEC bypass modes use the defalut of 8'd26.
+ */
+#define XPCSRX_RX_FRAMER_UNLK_MAX_REG	0x2c
+
+/* Max delay for framer to inditcate to FEC circuit unlock has occured. */
+#define  RX_FRAMER_UNLK_MAX_CFGXPCSRXFRMRCWUNLKTIMERMAX_SHIFT	0
+#define  RX_FRAMER_UNLK_MAX_CFGXPCSRXFRMRCWUNLKTIMERMAX_MASK	0x1ff
+
+
+/*
+ * Register <XPCSRX_FRAMER_BD_SH>
+ *
+ * Sets the SH value for the BD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_BD_SH_REG	0x30
+
+/* BD sync header. */
+#define  RX_FRAMER_BD_SH_CFGXPCSRXOLTBDSH_SHIFT	0
+#define  RX_FRAMER_BD_SH_CFGXPCSRXOLTBDSH_MASK	0x3
+
+
+/*
+ * Register <XPCSRX_FRAMER_BD_LO>
+ *
+ * Sets the low 32 bit value for the BD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_BD_LO_REG	0x34
+
+/* Low 32 bit value for the BD. */
+#define  RX_FRAMER_BD_LO_CFGXPCSRXOLTBDLO_SHIFT	0
+#define  RX_FRAMER_BD_LO_CFGXPCSRXOLTBDLO_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_BD_HI>
+ *
+ * Sets the high 32 bit value for the BD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_BD_HI_REG	0x38
+
+/* High 32 bit value for the BD. */
+#define  RX_FRAMER_BD_HI_CFGXPCSRXOLTBDHI_SHIFT	0
+#define  RX_FRAMER_BD_HI_CFGXPCSRXOLTBDHI_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_EBD_SH>
+ *
+ * Sets the SH value for the EBD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_EBD_SH_REG	0x3c
+
+/* EBD sync header. */
+#define  RX_FRAMER_EBD_SH_CFGXPCSRXOLTEBDSH_SHIFT	0
+#define  RX_FRAMER_EBD_SH_CFGXPCSRXOLTEBDSH_MASK	0x3
+
+
+/*
+ * Register <XPCSRX_FRAMER_EBD_LO>
+ *
+ * Sets the low 32 bit value for the EBD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_EBD_LO_REG	0x40
+
+/* Low 32 bit value for the EBD. */
+#define  RX_FRAMER_EBD_LO_CFGXPCSRXOLTEBDLO_SHIFT	0
+#define  RX_FRAMER_EBD_LO_CFGXPCSRXOLTEBDLO_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_EBD_HI>
+ *
+ * Sets the high 32 bit value for the EBD for the framer.
+ */
+#define XPCSRX_RX_FRAMER_EBD_HI_REG	0x44
+
+/* High 32 bit value for the EBD. */
+#define  RX_FRAMER_EBD_HI_CFGXPCSRXOLTEBDHI_SHIFT	0
+#define  RX_FRAMER_EBD_HI_CFGXPCSRXOLTEBDHI_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_STATUS> - read-only
+ *
+ * Raw value for interrupt status.
+ */
+#define XPCSRX_RX_STATUS_REG		0x48
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXIDLEDAJIT_MASK	0x40000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRMRMISBRST_MASK	0x20000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXIDLESOPEOPGAPBIG_MASK	0x10000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXIDLEFRCINS_MASK	0x8000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRX64B66BMINIPGERR_MASK	0x4000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFECNQUECNTNEQ_MASK	0x2000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXIDLEFIFOUNDRUN_MASK	0x1000000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXIDLEFIFOOVRRUN_MASK	0x800000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFECHIGHCOR_MASK	0x400000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFECDECPASS_MASK	0x40000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXSTATFRMRHIGHBER_MASK	0x20000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRMREXITBYSP_MASK	0x10000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRMRBADSHMAX_MASK	0x8000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXDSCRAMBURSTSEQOUT_MASK	0x4000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXTESTPSUDOLOCK_MASK	0x2000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXTESTPSUDOTYPE_MASK	0x1000
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXTESTPSUDOERR_MASK	0x800
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXTESTPRBSLOCK_MASK	0x400
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXTESTPRBSERR_MASK	0x200
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFECPSISTDECFAIL_MASK	0x100
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRAMERBADSH_MASK	0x80
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRAMERCWLOSS_MASK	0x40
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRAMERCWLOCK_MASK	0x20
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFECDECFAIL_MASK	0x10
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRX64B66BDECERR_MASK	0x8
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRMRNOLOCKLOS_MASK	0x4
+
+/* Raw status on interrups. */
+#define  RX_STATUS_STATRXFRMRROGUE_MASK	0x2
+
+
+/*
+ * Register <XPCSRX_FRAMER_LK_ULK_MAX>
+ *
+ * Sets number of SP detected in order to validate lock or unlock.
+ */
+#define XPCSRX_RX_FRAMER_LK_ULK_MAX_REG	0x4c
+
+/* The number of consecutive SP before a BD required to gain lock. */
+#define  RX_FRAMER_LK_ULK_MAX_CFGXPCSRXFRMRSPLKMAX_SHIFT	16
+#define  RX_FRAMER_LK_ULK_MAX_CFGXPCSRXFRMRSPLKMAX_MASK	0x1fff0000
+
+/* The number of consecutive SP required to lose lock. */
+#define  RX_FRAMER_LK_ULK_MAX_CFGXPCSRXFRMRSPULKMAX_SHIFT	0
+#define  RX_FRAMER_LK_ULK_MAX_CFGXPCSRXFRMRSPULKMAX_MASK	0x1fff
+
+
+/*
+ * Register <XPCSRX_FRAMER_SP_SH>
+ *
+ * Sets the SH value for the SP for the framer.
+ */
+#define XPCSRX_RX_FRAMER_SP_SH_REG	0x50
+
+/* The SH value for SP. */
+#define  RX_FRAMER_SP_SH_CFGXPCSRXOLTSPSH_SHIFT	0
+#define  RX_FRAMER_SP_SH_CFGXPCSRXOLTSPSH_MASK	0x3
+
+
+/*
+ * Register <XPCSRX_FRAMER_SP_LO>
+ *
+ * Sets the low 32 bit value for the SP for the framer.
+ */
+#define XPCSRX_RX_FRAMER_SP_LO_REG	0x54
+
+/* The lowest 32 bit value for the SP. */
+#define  RX_FRAMER_SP_LO_CFGXPCSRXOLTSPLO_SHIFT	0
+#define  RX_FRAMER_SP_LO_CFGXPCSRXOLTSPLO_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_SP_HI>
+ *
+ * Sets the high 32 bit value for the SP for the framer.
+ */
+#define XPCSRX_RX_FRAMER_SP_HI_REG	0x58
+
+/* The highest 32 bit value for the SP. */
+#define  RX_FRAMER_SP_HI_CFGXPCSRXOLTSPHI_SHIFT	0
+#define  RX_FRAMER_SP_HI_CFGXPCSRXOLTSPHI_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_STATE> - read-only
+ *
+ * The current value of framer state machine.
+ */
+#define XPCSRX_RX_FRAMER_STATE_REG	0x5c
+
+/* The state of the framer state machine. */
+#define  RX_FRAMER_STATE_XPCSRXFRMRSTATE_SHIFT	0
+#define  RX_FRAMER_STATE_XPCSRXFRMRSTATE_MASK	0xf
+
+
+/*
+ * Register <XPCSRX_FRAMER_BD_EBD_HAM>
+ *
+ * Sets the hamming distance for SP
+ */
+#define XPCSRX_RX_FRAMER_BD_EBD_HAM_REG	0x60
+
+/* Hamming distance for SP. */
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMRSPHAM_SHIFT	8
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMRSPHAM_MASK	0xf00
+
+/* Hamming distance for EBD. */
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMREBDHAM_SHIFT	4
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMREBDHAM_MASK	0xf0
+
+/* Hamming distance for BD. */
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMRBDHAM_SHIFT	0
+#define  RX_FRAMER_BD_EBD_HAM_CFGXPCSRXFRMRBDHAM_MASK	0xf
+
+
+/*
+ * Register <XPCSRX_FRAMER_MISBRST_CNT>
+ *
+ * The count of the possible missing bursts that were detected.
+ * This isbased on SP detect and SP loss with no BD found.
+ */
+#define XPCSRX_RX_FRAMER_MISBRST_CNT_REG	0x64
+
+/* Count of possibe missed burst. */
+#define  RX_FRAMER_MISBRST_CNT_RXFRMRMISBRSTCNT_SHIFT	0
+#define  RX_FRAMER_MISBRST_CNT_RXFRMRMISBRSTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_BD_ERR> - read-only
+ *
+ * The number of bit errors found in the last BD when lock was declared.
+ */
+#define XPCSRX_RX_FRAMER_BD_ERR_REG	0x68
+
+/* A count of the errors in BD when it was found. */
+#define  RX_FRAMER_BD_ERR_XPCSRXSTATFRMRBDERR_SHIFT	0
+#define  RX_FRAMER_BD_ERR_XPCSRXSTATFRMRBDERR_MASK	0xf
+
+
+/*
+ * Register <XPCSRX_FRAMER_ROGUE_CTL>
+ *
+ * Config for LOS based on no lock during a time interval
+ */
+#define XPCSRX_RX_FRAMER_ROGUE_CTL_REG	0x6c
+
+/*
+ * 0 - Rogue detection is disable.
+ * 1 - Rogue detection is enable.
+*/
+#define  RX_FRAMER_ROGUE_CTL_CFGXPCSRXFRMRROGUEEN_MASK	0x80000000
+
+/*
+ * If SP count hits treshold and the ranging window endswithout a lockthen
+ * a rogue detectoin alarm is set.
+*/
+#define  RX_FRAMER_ROGUE_CTL_CFGXPCSRXFRMRROGUESPTRESH_SHIFT	0
+#define  RX_FRAMER_ROGUE_CTL_CFGXPCSRXFRMRROGUESPTRESH_MASK	0xffff
+
+
+/*
+ * Register <XPCSRX_FRAMER_NOLOCK_CTL>
+ *
+ * Config for LOS based on no lock during a time interval
+ */
+#define XPCSRX_RX_FRAMER_NOLOCK_CTL_REG	0x70
+
+/*
+ * 0 - No lock LOS detection is disable.
+ * 1 - No lock LOS detection isenable.
+*/
+#define  RX_FRAMER_NOLOCK_CTL_CFGXPCSRXFRMRNOLOCKLOSEN_MASK	0x80000000
+
+/*
+ * Interval for LOS based on no lock found during this time.
+ * These are6.
+ * 206 ns steps with default of 1ms.
+ * The counter is 24 bits with a maxinterval of 104 ms.
+*/
+#define  RX_FRAMER_NOLOCK_CTL_CFGXPCSRXFRMRNOLOCKLOSINTVAL_SHIFT	0
+#define  RX_FRAMER_NOLOCK_CTL_CFGXPCSRXFRMRNOLOCKLOSINTVAL_MASK	0xffffff
+
+
+/*
+ * Register <XPCSRX_64B66B_IPG_DET_CNT>
+ *
+ * Min IPG violation detection count.
+ */
+#define XPCSRX_RX_64B66B_IPG_DET_CNT_REG	0x74
+
+/*
+ * This is the number of times that a realtionship between the EOP andSOP
+ * is found.
+ * The defalut is set to detect min-IPG violations.
+*/
+#define  RX_64B66B_IPG_DET_CNT_RX64B66BIPGDETCNT_SHIFT	0
+#define  RX_64B66B_IPG_DET_CNT_RX64B66BIPGDETCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_NQUE_IN_CNT>
+ *
+ * Counts the number of FEC CW written to the store/foward enqueue FIFO.
+ */
+#define XPCSRX_RX_FEC_NQUE_IN_CNT_REG	0x78
+
+/* The number of FEC CW written into the store/foward FIFO. */
+#define  RX_FEC_NQUE_IN_CNT_RXFECNQUEINCNT_SHIFT	0
+#define  RX_FEC_NQUE_IN_CNT_RXFECNQUEINCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_NQUE_OUT_CNT>
+ *
+ * Counts the number of FEC codewrods read from the store/foward
+ * enqueueFIFO.
+ */
+#define XPCSRX_RX_FEC_NQUE_OUT_CNT_REG	0x7c
+
+/* The number of FEC CW read from the store/foward FIFO. */
+#define  RX_FEC_NQUE_OUT_CNT_RXFECNQUEOUTCNT_SHIFT	0
+#define  RX_FEC_NQUE_OUT_CNT_RXFECNQUEOUTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_IDLE_START_CNT>
+ *
+ * Counts the number of SOP detected in the IDLE insert circuit.
+ */
+#define XPCSRX_RX_IDLE_START_CNT_REG	0x80
+
+/* The number of SOP that the idle insertion logic detected. */
+#define  RX_IDLE_START_CNT_RXIDLESTARTCNT_SHIFT	0
+#define  RX_IDLE_START_CNT_RXIDLESTARTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_IDLE_STOP_CNT>
+ *
+ * Counts the number of EOP detected in the IDLE insert circuit.
+ */
+#define XPCSRX_RX_IDLE_STOP_CNT_REG	0x84
+
+/* The number of EOP that the idle insertion logic detected. */
+#define  RX_IDLE_STOP_CNT_RXIDLESTOPCNT_SHIFT	0
+#define  RX_IDLE_STOP_CNT_RXIDLESTOPCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_COR_INTVAL>
+ *
+ * Time interval setting (in 6.
+ * 2 ns quanta) over which the number of FECcorrectines is counted.
+ * Used for creating high FEC correction alarm.
+ */
+#define XPCSRX_RX_FEC_COR_INTVAL_REG	0x88
+
+/* Number of 161 MHz clock period for the ber interval (default is1ms). */
+#define  RX_FEC_COR_INTVAL_CFGXPCSRXFECCORINTVAL_SHIFT	0
+#define  RX_FEC_COR_INTVAL_CFGXPCSRXFECCORINTVAL_MASK	0xffffff
+
+
+/*
+ * Register <XPCSRX_FEC_COR_TRESH>
+ *
+ * The threshold on the number of FEC correctoins made over a timerinterval
+ * that will cause the FEC high correction alarm.
+ */
+#define XPCSRX_RX_FEC_COR_TRESH_REG	0x8c
+
+/*
+ * Number of FEC corrections made over a given time interval that
+ * willtrigger the FEC high correction alarm (defalut = 0 = off).
+*/
+#define  RX_FEC_COR_TRESH_CFGXPCSRXFECCORTRESH_SHIFT	0
+#define  RX_FEC_COR_TRESH_CFGXPCSRXFECCORTRESH_MASK	0xffffff
+
+
+/*
+ * Register <XPCSRX_FEC_CW_FAIL_CNT>
+ *
+ * Count the number of uncorrectable FEC CW that have been recieved.
+ */
+#define XPCSRX_RX_FEC_CW_FAIL_CNT_REG	0x90
+
+/* The number of uncorrectable FEC CW that have been recieved. */
+#define  RX_FEC_CW_FAIL_CNT_RXFECDECCWFAILCNT_SHIFT	0
+#define  RX_FEC_CW_FAIL_CNT_RXFECDECCWFAILCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_CW_TOT_CNT>
+ *
+ * Count the number of total FEC CW that have been recievec.
+ */
+#define XPCSRX_RX_FEC_CW_TOT_CNT_REG	0x94
+
+/* The number of total FEC CW that have been recieved. */
+#define  RX_FEC_CW_TOT_CNT_RXFECDECCWTOTCNT_SHIFT	0
+#define  RX_FEC_CW_TOT_CNT_RXFECDECCWTOTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_CORRECT_CNT_LOWER>
+ *
+ * The number of correctoins made in the FEC CW that have been recieved.
+ * Lower 32 bits of a 39 bit stat.
+ * Read this location before reading the upper 7 bis
+ * atXPCSRX_FEC_CORRECT_CNT_UPERER.
+ */
+#define XPCSRX_RX_FEC_CORRECT_CNT_LO_REG	0x98
+
+/* Lower 32 bits of number of bits that the FEC corrected. */
+#define  RX_FEC_CORRECT_CNT_LO_RXFECDECERRCORCNTLOWER_SHIFT	0
+#define  RX_FEC_CORRECT_CNT_LO_RXFECDECERRCORCNTLOWER_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_CORRECT_CNT_UPPER> - read-only
+ *
+ * The number of correctoins made in the FEC CW that have been recieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * Read this location after reading the lower 32 bits
+ * atXPCSRX_FEC_CORRECT_CNT_LOWER.
+ */
+#define XPCSRX_RX_FEC_CORRECT_CNT_HI_REG	0x9c
+
+/* Upper 7 bits number of bits that the FEC corrected. */
+#define  RX_FEC_CORRECT_CNT_HI_RXFECDECERRCORCNTUPPER_SHIFT	0
+#define  RX_FEC_CORRECT_CNT_HI_RXFECDECERRCORCNTUPPER_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_CORRECT_CNT_SHADOW>
+ *
+ * The number of correctoins made in the FEC CW that have been recieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * This is a HW shadow for the upper bits DO NOT USE.
+ */
+#define XPCSRX_RX_FEC_CORRECT_CNT_SHADOW_REG	0xa0
+
+/* Upper 7 of bits that the FEC corrected. */
+#define  RX_FEC_CORRECT_CNT_SHADOW_RXFECDECERRCORCNTSHADOW_SHIFT	0
+#define  RX_FEC_CORRECT_CNT_SHADOW_RXFECDECERRCORCNTSHADOW_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_ONES_COR_CNT_LOWER>
+ *
+ * The number of correctoins made to ones in the FEC CW that have
+ * beenrecieved.
+ * Lower 32 bits of a 39 bit stat.
+ * Read this location before reading the upper 7 bis
+ * atXPCSRX_FEC_CORRECT_CNT_UPPER.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ONES_COR_CNT_LO_REG	0xa4
+
+/* The number of ones that are correctd by the FEC decoder. */
+#define  RX_FEC_ONES_COR_CNT_LO_RXFECDECONESCORCNTLOWER_SHIFT	0
+#define  RX_FEC_ONES_COR_CNT_LO_RXFECDECONESCORCNTLOWER_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_ONES_COR_CNT_UPPER> - read-only
+ *
+ * The number of correctoins made to ones in the FEC CW that have
+ * beenrecieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * Read this location after reading the lower 32 bits
+ * atXPCSRX_FEC_CORRECT_CNT_LOWER.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ONES_COR_CNT_HI_REG	0xa8
+
+/* The number of ones that are correctd by the FEC decoder. */
+#define  RX_FEC_ONES_COR_CNT_HI_RXFECDECONESCORCNTUPPER_SHIFT	0
+#define  RX_FEC_ONES_COR_CNT_HI_RXFECDECONESCORCNTUPPER_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_ONES_COR_CNT_SHADOW>
+ *
+ * The number of correctoins made to ones in the FEC CW that have
+ * beenrecieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * This is a HW shadow for the upper bits DO NOT USE.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ONES_COR_CNT_SHADOW_REG	0xac
+
+/* The number of ones that are correctd by the FEC decoder. */
+#define  RX_FEC_ONES_COR_CNT_SHADOW_RXFECDECONESCORCNTSHADOW_SHIFT	0
+#define  RX_FEC_ONES_COR_CNT_SHADOW_RXFECDECONESCORCNTSHADOW_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_ZEROS_COR_CNT_LOWER>
+ *
+ * The number of correctoins made to zeros in the FEC CW that have
+ * beenrecieved.
+ * Lower 32 bits of a 39 bit stat.
+ * Read this location before reading the upper 7 bis
+ * atXPCSRX_FEC_CORRECT_CNT_UPPER.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ZEROS_COR_CNT_LO_REG	0xb0
+
+/* The number of zeros that are correctd by the FEC decoder. */
+#define  RX_FEC_ZEROS_COR_CNT_LO_RXFECDECZEROSCORCNTLOWER_SHIFT	0
+#define  RX_FEC_ZEROS_COR_CNT_LO_RXFECDECZEROSCORCNTLOWER_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FEC_ZEROS_COR_CNT_UPPER> - read-only
+ *
+ * The number of correctoins made to zeros in the FEC CW that have
+ * beenrecieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * Read this location after reading the lower 32 bits
+ * atXPCSRX_FEC_CORRECT_CNT_LOWER.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ZEROS_COR_CNT_HI_REG	0xb4
+
+/* The number of zeros that are correctd by the FEC decoder. */
+#define  RX_FEC_ZEROS_COR_CNT_HI_RXFECDECZEROSCORCNTUPPER_SHIFT	0
+#define  RX_FEC_ZEROS_COR_CNT_HI_RXFECDECZEROSCORCNTUPPER_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_ZEROS_COR_CNT_SHADOW>
+ *
+ * The number of correctoins made to zeros in the FEC CW that have
+ * beenrecieved.
+ * Upper 7 bits of a 39 bit stat.
+ * Bit 8 of this register represents overflow of the 39 bit stat.
+ * This is a HW shadow for the upper bits DO NOT USE.
+ * NOTE:
+ * Covers from start of CW through the first 40 bits of the parity.
+ */
+#define XPCSRX_RX_FEC_ZEROS_COR_CNT_SHADOW_REG	0xb8
+
+/* The number of zeros that are correctd by the FEC decoder. */
+#define  RX_FEC_ZEROS_COR_CNT_SHADOW_RXFECDECZEROSCORCNTSHADOW_SHIFT	0
+#define  RX_FEC_ZEROS_COR_CNT_SHADOW_RXFECDECZEROSCORCNTSHADOW_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_STOP_ON_ERR_READ_POINTER> - read-only
+ *
+ * Captures the write and read pointer for the FEC decoder when a
+ * faildecode occurs.
+ */
+#define XPCSRX_RX_FEC_STOP_ON_ERR_READ_POINTER_REG	0xbc
+
+/*
+ * Captures the read pointer for the FEC decoder when a fail decodehappens.
+ * This is for the feature that freezes the FEC decoder when decodesfails.
+ * Requires cfgXPcsRxFecStopOnErr = 1.
+*/
+#define  RX_FEC_STOP_ON_ERR_READ_POINTER_RXFECSTOPONERRRDPTR_SHIFT	8
+#define  RX_FEC_STOP_ON_ERR_READ_POINTER_RXFECSTOPONERRRDPTR_MASK	0xff00
+
+/*
+ * Captures the write pointer for the FEC decoder when a fail
+ * decodehappens.
+ * This is for the feature that freezes the FEC decoder when decodesfails.
+ * Requires cfgXPcsRxFecStopOnErr = 1.
+*/
+#define  RX_FEC_STOP_ON_ERR_READ_POINTER_RXFECSTOPONERRWRPTR_SHIFT	0
+#define  RX_FEC_STOP_ON_ERR_READ_POINTER_RXFECSTOPONERRWRPTR_MASK	0xff
+
+
+/*
+ * Register <XPCSRX_FEC_STOP_ON_ERR_BURST_LOCATION> - read-only
+ *
+ * Captures the location within the burst where the stop on error occured
+ */
+#define XPCSRX_RX_FEC_STOP_ON_ERR_BURST_LOCATION_REG	0xc0
+
+/*
+ * Captures the location witing hte burst where the stop on erroroccured.
+ * This is in ticks of 161 clocks.
+ * Requires cfgXPcsRxFecStopOnErr = 1.
+*/
+#define  RX_FEC_STOP_ON_ERR_BURST_LOCATION_RXFECSTOPONERRBRSTLOC_SHIFT	0
+#define  RX_FEC_STOP_ON_ERR_BURST_LOCATION_RXFECSTOPONERRBRSTLOC_MASK	0xffffff
+
+
+/*
+ * Register <XPCSRX_64B66B_FAIL_CNT>
+ *
+ * Count the number of 64b/66b decode errors.
+ */
+#define XPCSRX_RX_64B66B_FAIL_CNT_REG	0xc4
+
+/* The number of 64b/66b decoed errors. */
+#define  RX_64B66B_FAIL_CNT_RX64B66BDECERRCNT_SHIFT	0
+#define  RX_64B66B_FAIL_CNT_RX64B66BDECERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_FRMR_BAD_SH_CNT>
+ *
+ * Count the number of bad SH during CW lock.
+ */
+#define XPCSRX_RX_FRMR_BAD_SH_CNT_REG	0xc8
+
+/* The number bad SH while in CW lock. */
+#define  RX_FRMR_BAD_SH_CNT_RXFRMRBADSHCNT_SHIFT	0
+#define  RX_FRMR_BAD_SH_CNT_RXFRMRBADSHCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_PSUDO_CNT>
+ *
+ * Count the number of errors in test pattern 2 while in patter lock.
+ */
+#define XPCSRX_RX_PSUDO_CNT_REG		0xcc
+
+/* The number of errors in test pattern 2 while in pattern lock. */
+#define  RX_PSUDO_CNT_RXTESTPSUDOERRCNT_SHIFT	0
+#define  RX_PSUDO_CNT_RXTESTPSUDOERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_PRBS_CNT>
+ *
+ * Count the number of errors in test pattern PRBS-31 while in patternlock.
+ */
+#define XPCSRX_RX_PRBS_CNT_REG		0xd0
+
+/* The number of errors in test pattern PRBS-31 while in patern lock. */
+#define  RX_PRBS_CNT_RXTESTPRBSERRCNT_SHIFT	0
+#define  RX_PRBS_CNT_RXTESTPRBSERRCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_BER_INTVAL>
+ *
+ * The interval over which SH BER is mearsured in the framer.
+ * Used togenerate xPcsRxStatFrmrHighBer alarm.
+ */
+#define XPCSRX_RX_BER_INTVAL_REG	0xd4
+
+/*
+ * Number of 161 MHz clock period for the interval over which SH BER
+ * iscounter in the framer(default is 256 us).
+*/
+#define  RX_BER_INTVAL_CFGXPCSRXFRMRBERINTVAL_SHIFT	0
+#define  RX_BER_INTVAL_CFGXPCSRXFRMRBERINTVAL_MASK	0xffffff
+
+
+/*
+ * Register <XPCSRX_BER_TRESH>
+ *
+ * The threshold on the number of bit errors made over a timer intervalthat
+ * will cause the high BER alarm.
+ */
+#define XPCSRX_RX_BER_TRESH_REG		0xd8
+
+/*
+ * Number of SH error permitted over the BER interval.
+ * (defalut = 0 =off).
+*/
+#define  RX_BER_TRESH_CFGXPCSRXFRMRBERTRESH_SHIFT	0
+#define  RX_BER_TRESH_CFGXPCSRXFRMRBERTRESH_MASK	0x1ff
+
+
+/*
+ * Register <XPCSRX_64B66B_START_CNT>
+ *
+ * Count the number of SOP detected in the 64b/66b decoder.
+ */
+#define XPCSRX_RX_64B66B_START_CNT_REG	0xdc
+
+/* The number of SOP that the 64b/66b decoder found. */
+#define  RX_64B66B_START_CNT_RX64B66BDECSTARTCNT_SHIFT	0
+#define  RX_64B66B_START_CNT_RX64B66BDECSTARTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_IDLE_GOOD_PKT_CNT>
+ *
+ * Count the number of good packets detected in the idle insertion logic.
+ * Based on finding SOP followed by EOP.
+ */
+#define XPCSRX_RX_IDLE_GOOD_PKT_CNT_REG	0xe0
+
+/* The number of good packets found by the idle insertion logic. */
+#define  RX_IDLE_GOOD_PKT_CNT_RXIDLEGOODPKTCNT_SHIFT	0
+#define  RX_IDLE_GOOD_PKT_CNT_RXIDLEGOODPKTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_IDLE_ERR_PKT_CNT>
+ *
+ * Count the number of bad packets detected in the idle insertion logic.
+ */
+#define XPCSRX_RX_IDLE_ERR_PKT_CNT_REG	0xe4
+
+/* The number of errored packets found by the idle insertion logic. */
+#define  RX_IDLE_ERR_PKT_CNT_RXIDLEERRPKTCNT_SHIFT	0
+#define  RX_IDLE_ERR_PKT_CNT_RXIDLEERRPKTCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_64B66B_STOP_CNT>
+ *
+ * Count the number of EOP detected in the 64b/66b decoder.
+ */
+#define XPCSRX_RX_64B66B_STOP_CNT_REG	0xe8
+
+/* The number of EOP that the 64b/66b decoder found. */
+#define  RX_64B66B_STOP_CNT_RX64B66BDECSTOPCNT_SHIFT	0
+#define  RX_64B66B_STOP_CNT_RX64B66BDECSTOPCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_BURST_OUT_ODR_CNT>
+ *
+ * Test mode used in FPGA only.
+ * Count the number of burst recieved out oforder.
+ */
+#define XPCSRX_RX_BURST_OUT_ODR_CNT_REG	0xec
+
+/* The number of times recieved burst sequence number was out of order. */
+#define  RX_BURST_OUT_ODR_CNT_RXBURSTSEQOUTOFORDERCNT_SHIFT	0
+#define  RX_BURST_OUT_ODR_CNT_RXBURSTSEQOUTOFORDERCNT_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_IDLE_DA_JIT_DLY> - read-only
+ *
+ * Gives delay values for two different DA through the IDLE insertprocess.
+ */
+#define XPCSRX_RX_IDLE_DA_JIT_DLY_REG	0xf0
+
+/* Previous delay for DA through the idle insert process. */
+#define  RX_IDLE_DA_JIT_DLY_RXIDLELASTDACNT_SHIFT	16
+#define  RX_IDLE_DA_JIT_DLY_RXIDLELASTDACNT_MASK	0x1ff0000
+
+/* Current delay for DA through the idle insert process. */
+#define  RX_IDLE_DA_JIT_DLY_RXIDLEDACNT_SHIFT	0
+#define  RX_IDLE_DA_JIT_DLY_RXIDLEDACNT_MASK	0x1ff
+
+
+/*
+ * Register <XPCSRX_DPORT_CTL>
+ *
+ * Provides data port access to all XPCS RX RAMS.
+ * The data port functions as a means to access RAMs by way ofindirection.
+ * The address, commands and busy status are located in this register.
+ * XPCSRX_DPORT_CTLThe data to be read or written are accesses at:
+ * XPCSRX_DPORT_DATA0XPCSRX_DPORT_DATA1XPCSRX_DPORT_DATA2XPCSRX_DPORT_DATA3XPCSRX_DPORT_DATA4The
+ * control to allow this data port to access data path RAMs arelocated:
+ * XPCSRX_DPORT_ACCThe control to disable clear on read for XPCS RX Stats
+ * Ram is located:
+ * XPCSRX_DPORT_FEC_STATS_CTLThe capture FIFO RAM, FEC decode RAM, FEC
+ * enqueue RAM and idle insertRAMare data path rams.
+ * In order to access them a select must be set.
+ * Thisdisables the data path and allows the data port to have access.
+ * TheXPCS RXwill not function properly under these conditions.
+ * Accesses to theseRAMsby this data port is for test only.
+ * The FEC decode stats RAM also has a control bit associated with it.
+ * Thiscontrol bit is not associated with permitting access to this RAM.
+ * Accessesto this RAM by this data port is intended for both the normal
+ * mode ofoperation and testing.
+ * This bit is to disable the clear on readfunctionthat occurs with normal
+ * read accesses to this RAM by this data port.
+ * CONTRLS:
+ * To access the capture FIFO RAM, FEC decode RAM, FEC enqueue RAM or
+ * idleinsert RAM, set the data port select bit in XPCSRX_DPORT_ACC.
+ * To access the FEC decode stats RAM without clear on read set, set
+ * thedisable bit in XPCSRX_DPORT_FEC_STATS_CTL.
+ * Do not set this innormal operation.
+ * It is intended that statistics are clear on read.
+ * For writes, set the write values in the data registers.
+ * Set the address, RAM select and control fields in this register.
+ * Poll the busy bit in this register.
+ * Once the busy bit is not set the data port has completed the command.
+ * For reads, read the data register after the busy bit clears.
+ * Statistics and Data Fields descriptions for the RAMS:
+ * RAM | Type | Size | ecc field |
+ * datafield============================================================================Capture
+ * FIFO | PD | 256x80 | [79:
+ * 72] |[71:
+ * 0]FEC enqueue | RF | 32x80 | [79:
+ * 72] |[71:
+ * 0]FEC decode | PD | 256x80 | [79:
+ * 72] |[71:
+ * 0]FEC stats | SP | 128x151 | [150:
+ * 142] |[141:
+ * 0]Idle insert | PD | 256x82 | [81:
+ * 73] |[73:
+ * 0]Data fields descripton:
+ * Capture FIFO[71:
+ * 64] XPCS RX control[63:
+ * 0] XPCS RX dataFEC enqueue[71:
+ * 0] aligned 66b data geared up to 72bFEC decode[71:
+ * 0] aligned 66b data geared up to 72bFEC stats RAM[141:
+ * 103] ones corected statistics[102:
+ * 64] zeros corrected statistics[63:
+ * 32] code words decode fails statistics[31:
+ * 0] code words total statisticsIdle insert[73:
+ * 72] number of invalid times between valid[71:
+ * 68] control indication[69:
+ * 36] four bytes of 8b data[35:
+ * 32] control indication[31:
+ * 0] four bytes of 8b data
+ */
+#define XPCSRX_RX_DPORT_CTL_REG		0xf4
+
+/* Data port busy. */
+#define  RX_DPORT_CTL_XPCSRXDPBUSY_MASK	0x80000000
+
+/* Data port error (always 0 for XPCS RX). */
+#define  RX_DPORT_CTL_XPCSRXDPERR_MASK	0x40000000
+
+/* Data port command (0 = read and 1 = write). */
+#define  RX_DPORT_CTL_CFGXPCSRXDPCTL_SHIFT	20
+#define  RX_DPORT_CTL_CFGXPCSRXDPCTL_MASK	0xff00000
+
+/*
+ * Data port RAM select :
+ * 0 = capture FIFO RAM1 = FEC decode RAM2 = FEC stats RAM3 = FEC enqueue
+ * RAM4 = idle insert RAM
+*/
+#define  RX_DPORT_CTL_CFGXPCSRXDPRAMSEL_SHIFT	16
+#define  RX_DPORT_CTL_CFGXPCSRXDPRAMSEL_MASK	0xf0000
+
+/*
+ * Data port address.
+ * NOTE:
+ * all 16 bits are available but the largest ram in XPCS
+ * RXXPcsRxCapFifoRam.
+ * vbis x256.
+*/
+#define  RX_DPORT_CTL_CFGXPCSRXDPADDR_SHIFT	0
+#define  RX_DPORT_CTL_CFGXPCSRXDPADDR_MASK	0xffff
+
+
+/*
+ * Register <XPCSRX_DPORT_DATA0>
+ *
+ * Rreadback register and write register for data port accesses.
+ */
+#define XPCSRX_RX_DPORT_DATA0_REG	0xf8
+
+/* Data port data bits 31 to 0. */
+#define  RX_DPORT_DATA0_XPCSRXDPDATA0_SHIFT	0
+#define  RX_DPORT_DATA0_XPCSRXDPDATA0_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_DPORT_DATA1>
+ *
+ * Rreadback register and write register for data port accesses.
+ */
+#define XPCSRX_RX_DPORT_DATA1_REG	0xfc
+
+/* Data port data bits 63 to 32. */
+#define  RX_DPORT_DATA1_XPCSRXDPDATA1_SHIFT	0
+#define  RX_DPORT_DATA1_XPCSRXDPDATA1_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_DPORT_DATA2>
+ *
+ * Rreadback register and write register for data port accesses.
+ */
+#define XPCSRX_RX_DPORT_DATA2_REG	0x100
+
+/* Data port data bits 95 to 64. */
+#define  RX_DPORT_DATA2_XPCSRXDPDATA2_SHIFT	0
+#define  RX_DPORT_DATA2_XPCSRXDPDATA2_MASK	0xffffffff
+
+
+/*
+ * Register <XPCSRX_DPORT_ACC>
+ *
+ * Provides data port access to all XPCS RX RAMS.
+ */
+#define XPCSRX_RX_DPORT_ACC_REG		0x104
+
+/* Disable data path and selects data port for RAM access. */
+#define  RX_DPORT_ACC_CFGXPCSRXIDLERAMDPSEL_MASK	0x8
+
+/* Disable data path and selects data port for RAM access. */
+#define  RX_DPORT_ACC_CFGXPCSRXFECDECRAMDPSEL_MASK	0x4
+
+/* Disable data path and selects data port for RAM access. */
+#define  RX_DPORT_ACC_CFGXPCSRXFECNQUERAMDPSEL_MASK	0x2
+
+
+/*
+ * Register <XPCSRX_RAM_ECC_INT_STAT>
+ *
+ * Interrupt status for XPcsRx RAMs ECC.
+ */
+#define XPCSRX_RX_RAM_ECC_INT_STAT_REG	0x108
+
+/* Idle insert FIFO RAM init done interrupt. */
+#define  RX_RAM_ECC_INT_STAT_INTRXIDLERAMINITDONE_MASK	0x10
+
+/* FEC enqueue FIFO RAM init done interrupt. */
+#define  RX_RAM_ECC_INT_STAT_INTRXFECNQUERAMINITDONE_MASK	0x8
+
+/* FEC decode FIFO RAM init done interrupt. */
+#define  RX_RAM_ECC_INT_STAT_INTRXFECDECRAMINITDONE_MASK	0x2
+
+
+/*
+ * Register <XPCSRX_RAM_ECC_INT_MSK>
+ *
+ * Interrupt mask for XPcsRx RAMs ECC.
+ */
+#define XPCSRX_RX_RAM_ECC_INT_MSK_REG	0x10c
+
+/* Idle insert FIFO RAM init done interrupt mask. */
+#define  RX_RAM_ECC_INT_MSK_MSKRXIDLERAMINITDONE_MASK	0x10
+
+/* FEC enqueue FIFO RAM init done interrupt mask. */
+#define  RX_RAM_ECC_INT_MSK_MSKRXFECNQUERAMINITDONE_MASK	0x8
+
+/* FEC decode FIFO RAM init done interrupt mask. */
+#define  RX_RAM_ECC_INT_MSK_MSKRXFECDECRAMINITDONE_MASK	0x2
+
+
+/*
+ * Register <XPCSRX_DFT_TESTMODE>
+ *
+ * DFT test mode for PD RAMs
+ */
+#define XPCSRX_RX_DFT_TESTMODE_REG	0x110
+
+/* DFT test mode for PD RAMs */
+#define  RX_DFT_TESTMODE_TM_PD_SHIFT	0
+#define  RX_DFT_TESTMODE_TM_PD_MASK	0x3ff
+
+
+/*
+ * Register <XPCSRX_RAM_POWER_PDA_CTL0>
+ *
+ * Control register to selectively power one or more rowblocks of thememory
+ * to acieve improved power reduction.
+ * There is one bit perrowblock for the specific RAM.
+ * All array contents are lost for therowblocks that are powered down.
+ * the rowblocks in operational modewill be available for read/write and
+ * data retention.
+ * 1 = power down0 = operationalNOTE:
+ * When powering up, do NOT power up more than one array in one RAMat a
+ * time.
+ * It may damage the RAM.
+ */
+#define XPCSRX_RX_RAM_POWER_PDA_CTL0_REG	0x114
+
+/* Array power down control for FEC decode RAM */
+#define  RX_RAM_POWER_PDA_CTL0_CFGXPCSRXIDLERAMPDA_MASK	0x8
+
+/* Array power down control for FEC decode RAM */
+#define  RX_RAM_POWER_PDA_CTL0_CFGXPCSRXFECDECRAMPDA_MASK	0x2
+
+
+/*
+ * Register <XPCSRX_INT_STAT1>
+ *
+ * More Interrupt status for XPcsRx module.
+ */
+#define XPCSRX_RX_INT_STAT1_REG		0x118
+
+/* trailing start at the end of burst */
+#define  RX_INT_STAT1_INTRX64B66BTRAILSTART_MASK	0x8
+
+/* two stops in a row detected */
+#define  RX_INT_STAT1_INTRX64B66BTWOSTOP_MASK	0x4
+
+/* two starts in a row detected */
+#define  RX_INT_STAT1_INTRX64B66BTWOSTART_MASK	0x2
+
+/* leading stop at the start of burst */
+#define  RX_INT_STAT1_INTRX64B66BLEADSTOP_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_INT_MSK1>
+ *
+ * More Interrupt mask for XPcsRx module.
+ */
+#define XPCSRX_RX_INT_MSK1_REG		0x11c
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK1_MSKRX64B66BTRAILSTART_MASK	0x8
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK1_MSKRX64B66BTWOSTOP_MASK	0x4
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK1_MSKRX64B66BTWOSTART_MASK	0x2
+
+/*
+ * 0 - Interrupt is masked.
+ * 1 - Interrupt is unmasked.
+*/
+#define  RX_INT_MSK1_MSKRX64B66BLEADSTOP_MASK	0x1
+
+
+/*
+ * Register <XPCSRX_SPARE_CTL>
+ *
+ * Spare RW bits
+ */
+#define XPCSRX_RX_SPARE_CTL_REG		0x120
+
+/* Spare RW bits */
+#define  RX_SPARE_CTL_CFGXPCSRXSPARE_SHIFT	0
+#define  RX_SPARE_CTL_CFGXPCSRXSPARE_MASK	0xffffffff
+
+
+#endif /* ! EPON_XPCSRX_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xpcstx.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xpcstx.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/epon_xpcstx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/epon_xpcstx.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,454 @@
+#ifndef EPON_XPCSTX_H_
+#define EPON_XPCSTX_H_
+
+/* relative to epon */
+#define XPCSTX_OFFSET_0			0x3800
+
+/*
+ * Register <XPCS_TX_CONTROL>
+ *
+ * XPCS-TX control register.
+ */
+#define XPCSTX_TX_CONTROL_REG		0x0
+
+/* Enable remote fault detection. */
+#define  TX_CONTROL_CFGENRMTFAULTDET125_MASK	0x8000
+
+/* Enable the laser enable tri-state output. */
+#define  TX_CONTROL_CFGLSRTRISTATEEN125_MASK	0x4000
+
+/*
+ * Debug function has been deprecated.
+ * It is now utilized to enableIDLE packet support :
+ * 0 - enable; 1 - disable.
+*/
+#define  TX_CONTROL_CFGENSEQNUM125_MASK	0x2000
+
+/*
+ * Enable the scrambler to run continously.
+ * 0 - Only during burst; 1 -Continuosly.
+*/
+#define  TX_CONTROL_CFGENSCRMBCONT125_MASK	0x800
+
+/*
+ * Laser on polarity :
+ * 0 - laser on active low; 1 - laser on activehigh.
+*/
+#define  TX_CONTROL_CFGLSRENACTHI125_MASK	0x400
+
+/* Enable laser on always. */
+#define  TX_CONTROL_CFGENLSRALWAYS125_MASK	0x200
+
+/* Enable laser until the end-of-grant, non-strict IEEE mode. */
+#define  TX_CONTROL_CFGENLSRTILENDSLOT125_MASK	0x100
+
+/* Flip Gearbox's byte output to SERDES. */
+#define  TX_CONTROL_CFGTXOUTBYTEFLIP125_MASK	0x80
+
+/*
+ * Enable transmit Gearbox's output.
+ * 0 - disable transmit; 1 - enabletransmit.
+*/
+#define  TX_CONTROL_CFGENTXOUT125_MASK	0x40
+
+/*
+ * Enable transmit scrambler.
+ * 0 - disable scrambler; 1 - enablescrambler.
+*/
+#define  TX_CONTROL_CFGENTXSCRB125_MASK	0x20
+
+/*
+ * Enables upstream FEC :
+ * 0 - nonFEC; 1 - FEC.
+*/
+#define  TX_CONTROL_CFGENTXFEC125_MASK	0x10
+
+/* Indicates XPCS-TX not ready for operation. */
+#define  TX_CONTROL_PCSTXNOTRDY_MASK	0x8
+
+/*
+ * Active low reset for RAM data port.
+ * RAM init starts upondeassertion.
+ * Bit pcstxNotRdy is to be polled for completion.
+*/
+#define  TX_CONTROL_PCSTXDTPORTRSTN_MASK	0x2
+
+/*
+ * Reset control for XPCS-TX module.
+ * 0 - Reset.
+ * 1 - Normal Operation.
+*/
+#define  TX_CONTROL_PCSTXRSTN_MASK	0x1
+
+
+/*
+ * Register <XPCS_TX_INT_STAT>
+ *
+ */
+#define XPCSTX_TX_INT_STAT_REG		0x4
+
+/*
+ * Indicates laser enable on time exceeed the maximum threshold, asdefined
+ * by register XPCS_TX_LASER_MONITOR_MAX_THRESH.
+*/
+#define  TX_INT_STAT_LASERONMAX_MASK	0x80
+
+/* Indicates laser enable deassertion. */
+#define  TX_INT_STAT_LASEROFF_MASK	0x40
+
+/* [FATAL] Indicates scheduled time lagged current MPCP time. */
+#define  TX_INT_STAT_GRANTLAGERR_MASK	0x20
+
+/* [NON-FATAL] Indicates back-2-back grants. */
+#define  TX_INT_STAT_BACK2BACKGNT_MASK	0x8
+
+/* [FATAL] FEC transmit FIFO underrun. */
+#define  TX_INT_STAT_FECUNDERRUN_MASK	0x4
+
+/* [FATAL] Gearbox underrun. */
+#define  TX_INT_STAT_GEARBOXUNDERRUN_MASK	0x2
+
+/* [NON-FATAL] Indicates grant slot is too short for transfer. */
+#define  TX_INT_STAT_GNTTOOSHORT_MASK	0x1
+
+
+/*
+ * Register <XPCS_TX_INT_MASK>
+ *
+ * Interrupt masks, active low :
+ * 0 - mask interrupt; 1 - enable interrupt.
+ */
+#define XPCSTX_TX_INT_MASK_REG		0x8
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_LASERONMAXMASK_MASK	0x80
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_LASEROFFMASK_MASK	0x40
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_GRANTLAGERRMSK_MASK	0x20
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_BACK2BCKGNTMSK_MASK	0x8
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_FECUNDERRUNMSK_MASK	0x4
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_GEARBOXUNDERRUNMSK_MASK	0x2
+
+/* Interrupt mask, active low. */
+#define  TX_INT_MASK_GNTTOOSHORTMSK_MASK	0x1
+
+
+/*
+ * Register <XPCS_TX_PORT_COMMAND>
+ *
+ * Provides dataPort read/write access to various RAMs.
+ */
+#define XPCSTX_TX_PORT_COMMAND_REG	0xc
+
+/*
+ * Indicates dataPort is in progress.
+ * Bit must be cleared before thenext dataPort access can be issued.
+*/
+#define  TX_PORT_COMMAND_DATAPORTBUSY_MASK	0x80000000
+
+/*
+ * Selects the RAM for access :
+ * 0 - FEC vector RAM.
+*/
+#define  TX_PORT_COMMAND_PORTSELECT_SHIFT	24
+#define  TX_PORT_COMMAND_PORTSELECT_MASK	0x3f000000
+
+/*
+ * Indicates write access :
+ * 0 - read; 1 - write.
+*/
+#define  TX_PORT_COMMAND_PORTOPCODE_SHIFT	16
+#define  TX_PORT_COMMAND_PORTOPCODE_MASK	0xff0000
+
+/* Specifies the RAM address for access. */
+#define  TX_PORT_COMMAND_PORTADDRESS_SHIFT	0
+#define  TX_PORT_COMMAND_PORTADDRESS_MASK	0xffff
+
+
+/*
+ * Register <XPCS_TX_DATA_PORT_0>
+ *
+ * Stores the pre-write data for writing; and the post-read data
+ * forreading.
+ */
+#define XPCSTX_TX_DATA_PORT_0_REG	0x10
+
+/*
+ * XPCSTX_VEC_RAM[31:
+ * 0].
+*/
+#define  TX_DATA_PORT_0_PORTDATA0_SHIFT	0
+#define  TX_DATA_PORT_0_PORTDATA0_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_DATA_PORT_1>
+ *
+ * Stores the pre-write data for writing; and the post-read data
+ * forreading.
+ */
+#define XPCSTX_TX_DATA_PORT_1_REG	0x14
+
+/*
+ * XPCSTX_VEC_RAM[63:
+ * 32].
+*/
+#define  TX_DATA_PORT_1_PORTDATA1_SHIFT	0
+#define  TX_DATA_PORT_1_PORTDATA1_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_DATA_PORT_2>
+ *
+ * Stores the pre-write data for writing; and the post-read data
+ * forreading.
+ */
+#define XPCSTX_TX_DATA_PORT_2_REG	0x18
+
+/*
+ * [1:
+ * 0] - XPCSTX_VEC_RAM[66:
+ * 65].
+*/
+#define  TX_DATA_PORT_2_PORTDATA2_SHIFT	0
+#define  TX_DATA_PORT_2_PORTDATA2_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_SYNC_PATT_CWORD_LO>
+ *
+ * Specifies the sync pattern codeword.
+ */
+#define XPCSTX_TX_SYNC_PATT_CWORD_LO_REG	0x1c
+
+/* Defines the low order sync pattern codeword. */
+#define  TX_SYNC_PATT_CWORD_LO_CFGSYNCPATCWL_SHIFT	0
+#define  TX_SYNC_PATT_CWORD_LO_CFGSYNCPATCWL_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_SYNC_PATT_CWORD_HI>
+ *
+ * Specifies the sync pattern codeword.
+ */
+#define XPCSTX_TX_SYNC_PATT_CWORD_HI_REG	0x20
+
+/* Defines the high order sync pattern codeword. */
+#define  TX_SYNC_PATT_CWORD_HI_CFGSYNCPATCWH_SHIFT	0
+#define  TX_SYNC_PATT_CWORD_HI_CFGSYNCPATCWH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_START_BURST_DEL_CWORD_LO>
+ *
+ * Specifies the start-of-burst delimiter codeword.
+ */
+#define XPCSTX_TX_START_BURST_DEL_CWORD_LO_REG	0x24
+
+/* Defines the low order start-of-burst delimiter codeword. */
+#define  TX_START_BURST_DEL_CWORD_LO_CFGSTRTBRSTDLMTRCWL_SHIFT	0
+#define  TX_START_BURST_DEL_CWORD_LO_CFGSTRTBRSTDLMTRCWL_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_START_BURST_DEL_CWORD_HI>
+ *
+ * Specifies the start-of-burst delimiter codeword.
+ */
+#define XPCSTX_TX_START_BURST_DEL_CWORD_HI_REG	0x28
+
+/* Defines the high order start-of-burst delimiter codeword. */
+#define  TX_START_BURST_DEL_CWORD_HI_CFGSTRTBRSTDLMTRCWH_SHIFT	0
+#define  TX_START_BURST_DEL_CWORD_HI_CFGSTRTBRSTDLMTRCWH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_END_BURST_DEL_CWORD_LO>
+ *
+ * Specifies the end-of-burst terminating codeword.
+ */
+#define XPCSTX_TX_END_BURST_DEL_CWORD_LO_REG	0x2c
+
+/* Defines the low order terminating codeword. */
+#define  TX_END_BURST_DEL_CWORD_LO_CFGENDBRSTDLMTRCWL_SHIFT	0
+#define  TX_END_BURST_DEL_CWORD_LO_CFGENDBRSTDLMTRCWL_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_END_BURST_DEL_CWORD_HI>
+ *
+ * Specifies the end-of-burst terminating codeword.
+ */
+#define XPCSTX_TX_END_BURST_DEL_CWORD_HI_REG	0x30
+
+/* Defines the high order terminating codeword. */
+#define  TX_END_BURST_DEL_CWORD_HI_CFGENDBRSTDLMTRCWH_SHIFT	0
+#define  TX_END_BURST_DEL_CWORD_HI_CFGENDBRSTDLMTRCWH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_IDLE_CWORD_LO>
+ *
+ * Specifies the IDLE codeword.
+ */
+#define XPCSTX_TX_IDLE_CWORD_LO_REG	0x34
+
+/* Defines the low order codeword. */
+#define  TX_IDLE_CWORD_LO_CFGIDLECWL_SHIFT	0
+#define  TX_IDLE_CWORD_LO_CFGIDLECWL_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_IDLE_CWORD_HI>
+ *
+ * Specifies the IDLE codeword.
+ */
+#define XPCSTX_TX_IDLE_CWORD_HI_REG	0x38
+
+/* Defines the high order codeword. */
+#define  TX_IDLE_CWORD_HI_CFGIDLECWH_SHIFT	0
+#define  TX_IDLE_CWORD_HI_CFGIDLECWH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_BURST_PATT_CWORD_LO>
+ *
+ * Specifies the codeword in between burst.
+ */
+#define XPCSTX_TX_BURST_PATT_CWORD_LO_REG	0x3c
+
+/* Defines the low order burst delimiter codeword. */
+#define  TX_BURST_PATT_CWORD_LO_CFGBURSTPATCWL_SHIFT	0
+#define  TX_BURST_PATT_CWORD_LO_CFGBURSTPATCWL_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_BURST_PATT_CWORD_HI>
+ *
+ * Specifies the codeword in between burst.
+ */
+#define XPCSTX_TX_BURST_PATT_CWORD_HI_REG	0x40
+
+/* Defines the high order burst delimiter codeword. */
+#define  TX_BURST_PATT_CWORD_HI_CFGBURSTPATCWH_SHIFT	0
+#define  TX_BURST_PATT_CWORD_HI_CFGBURSTPATCWH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_LASER_TIME>
+ *
+ * Provides control for the laser enable.
+ */
+#define XPCSTX_TX_LASER_TIME_REG	0x44
+
+/*
+ * Specifies the laser pipeline delay, in unit of 6.
+ * 2ns (161 MHzclock).
+*/
+#define  TX_LASER_TIME_CFGLASERPIPE125_SHIFT	16
+#define  TX_LASER_TIME_CFGLASERPIPE125_MASK	0x3f0000
+
+/*
+ * Specifies the laser off delay to the actual laser off time, value X.
+ * Resulting delay = INT(X*2.
+ * 5)*6.
+ * 2 ns, based on 161 MHz clock.
+*/
+#define  TX_LASER_TIME_CFGLASEROFFDLYTQ125_SHIFT	8
+#define  TX_LASER_TIME_CFGLASEROFFDLYTQ125_MASK	0xf00
+
+/*
+ * Specifies the laser enable delay to the actual laser on time, valueX.
+ * Resulting delay = INT(X*2.
+ * 5)*6.
+ * 2 ns, based on 161 MHz clock .
+*/
+#define  TX_LASER_TIME_CFGLASERONDLYTQ125_SHIFT	0
+#define  TX_LASER_TIME_CFGLASERONDLYTQ125_MASK	0xf
+
+
+/*
+ * Register <XPCS_TX_MAC_MODE>
+ *
+ * Specifies the MAC mode of operation.
+ */
+#define XPCSTX_TX_MAC_MODE_REG		0x48
+
+/* Enable point-to-point transmission without grant. */
+#define  TX_MAC_MODE_CFGENNOGNTXMT125_MASK	0x2
+
+
+/*
+ * Register <XPCS_TX_LASER_MONITOR_CTL>
+ *
+ * Provides control for laser monitor.
+ */
+#define XPCSTX_TX_LASER_MONITOR_CTL_REG	0x4c
+
+/* Status of laser enable, directly from I/O pin. */
+#define  TX_LASER_MONITOR_CTL_LASERENSTATUS_MASK	0x20
+
+/*
+ * Laser monitor polarity.
+ * 0 - active low; 1 - active high.
+*/
+#define  TX_LASER_MONITOR_CTL_CFGLSRMONACTHI_MASK	0x10
+
+/*
+ * Laser monitor reset.
+ * 0 - Reset; 1 - Normal operation.
+*/
+#define  TX_LASER_MONITOR_CTL_LASERMONRSTN_MASK	0x1
+
+
+/*
+ * Register <XPCS_TX_LASER_MONITOR_MAX_THRESH>
+ *
+ * Specifies maximum threshold of laser_on assertion before interrupt
+ * isgenerated.
+ */
+#define XPCSTX_TX_LASER_MONITOR_MAX_THRESH_REG	0x50
+
+/* Maximum assertion threshold, in unit of TQ. */
+#define  TX_LASER_MONITOR_MAX_THRESH_CFGLSRMONMAXTQ_SHIFT	0
+#define  TX_LASER_MONITOR_MAX_THRESH_CFGLSRMONMAXTQ_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_LASER_MONITOR_BURST_LEN> - read-only
+ *
+ * Indicates the burst length of current grant, in unit of TQ (16 ns).
+ */
+#define XPCSTX_TX_LASER_MONITOR_BURST_LEN_REG	0x54
+
+/* Indicates the laser-on time of the burst that set laserOffinterrupt. */
+#define  TX_LASER_MONITOR_BURST_LEN_LASERONLENGTH_SHIFT	0
+#define  TX_LASER_MONITOR_BURST_LEN_LASERONLENGTH_MASK	0xffffffff
+
+
+/*
+ * Register <XPCS_TX_LASER_MONITOR_BURST_COUNT> - read-only
+ *
+ * Counts the number of burst.
+ * Clear on read.
+ */
+#define XPCSTX_TX_LASER_MONITOR_BURST_COUNT_REG	0x58
+
+/*
+ * This values increments on deassertion edge of laser enable.
+ * Peg atmax value of 0xFFFFFFFF
+*/
+#define  TX_LASER_MONITOR_BURST_COUNT_BURSTCNT_SHIFT	0
+#define  TX_LASER_MONITOR_BURST_COUNT_BURSTCNT_MASK	0xffffffff
+
+
+#endif /* ! EPON_XPCSTX_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/serdes_regs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/serdes_regs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/serdes_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/serdes_regs.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,294 @@
+/***************************************************************************
+// Serdes registers in full chip address map
+ ***************************************************************************/
+#define    PCS_ID       0x0000
+
+#define    DEVID_0      0x0000
+#define    DEVID_1      0x0800
+
+#define    serdes_PLL_0    0x0000
+#define    serdes_PLL_1    0x0100
+
+#define    LANE_0       0x0000
+#define    LANE_1       0x0001
+#define    LANE_2       0x0002
+#define    LANE_3       0x0003
+#define    LANE_BRDCST  0x00FF
+#define    LANE_01      0x0200
+#define    LANE_23      0x0201
+
+#define DSC_A_cdr_control_0 0xD001
+#define DSC_A_cdr_control_1 0xD002
+#define DSC_A_cdr_control_2 0xD003
+#define DSC_A_rx_pi_control 0xD004
+#define DSC_A_cdr_status_integ_reg 0xD005
+#define DSC_A_cdr_status_phase_error 0xD006
+#define DSC_A_rx_pi_cnt_bin_d 0xD007
+#define DSC_A_rx_pi_cnt_bin_p 0xD008
+#define DSC_A_rx_pi_cnt_bin_m 0xD009
+#define DSC_A_rx_pi_diff_bin 0xD00A
+#define DSC_A_trnsum_cntl_5 0xD00B
+#define DSC_A_dsc_uc_ctrl 0xD00D
+#define DSC_A_dsc_scratch 0xD00E
+#define DSC_B_dsc_sm_ctrl_0 0xD010
+#define DSC_B_dsc_sm_ctrl_1 0xD011
+#define DSC_B_dsc_sm_ctrl_2 0xD012
+#define DSC_B_dsc_sm_ctrl_3 0xD013
+#define DSC_B_dsc_sm_ctrl_4 0xD014
+#define DSC_B_dsc_sm_ctrl_5 0xD015
+#define DSC_B_dsc_sm_ctrl_6 0xD016
+#define DSC_B_dsc_sm_ctrl_7 0xD017
+#define DSC_B_dsc_sm_ctrl_8 0xD018
+#define DSC_B_dsc_sm_ctrl_9 0xD019
+#define DSC_B_dsc_sm_status_dsc_lock 0xD01A
+#define DSC_B_dsc_sm_status_dsc_state_one_hot 0xD01B
+#define DSC_B_dsc_sm_status_dsc_state_eee_one_hot 0xD01C
+#define DSC_B_dsc_sm_status_restart 0xD01D
+#define DSC_B_dsc_sm_status_dsc_state 0xD01E
+#define DSC_C_dfe_common_ctl 0xD020
+#define DSC_C_dfe_1_ctl 0xD021
+#define DSC_C_dfe_1_pat_ctl 0xD022
+#define DSC_C_dfe_2_ctl 0xD023
+#define DSC_C_dfe_2_pat_ctl 0xD024
+#define DSC_C_dfe_3_ctl 0xD025
+#define DSC_C_dfe_3_pat_ctl 0xD026
+#define DSC_C_dfe_4_ctl 0xD027
+#define DSC_C_dfe_4_pat_ctl 0xD028
+#define DSC_C_dfe_5_ctl 0xD029
+#define DSC_C_dfe_5_pat_ctl 0xD02A
+#define DSC_C_dfe_vga_override 0xD02B
+#define DSC_C_vga_ctl 0xD02C
+#define DSC_C_vga_pat_eyediag_ctl 0xD02D
+#define DSC_C_p1_frac_offs_ctl 0xD02E
+#define DSC_D_trnsum_ctl_1 0xD030
+#define DSC_D_trnsum_ctl_2 0xD031
+#define DSC_D_trnsum_ctl_3 0xD032
+#define DSC_D_trnsum_ctl_4 0xD033
+#define DSC_D_trnsum_sts_1 0xD034
+#define DSC_D_trnsum_sts_2 0xD035
+#define DSC_D_trnsum_sts_3 0xD036
+#define DSC_D_trnsum_sts_4 0xD037
+#define DSC_D_trnsum_sts_5 0xD038
+#define DSC_D_trnsum_sts_6 0xD039
+#define DSC_D_vga_p1eyediag_sts 0xD03A
+#define DSC_D_dfe_1_sts 0xD03B
+#define DSC_D_dfe_2_sts 0xD03C
+#define DSC_D_dfe_3_4_5_sts 0xD03D
+#define DSC_D_vga_tap_bin 0xD03E
+#define DSC_E_dsc_e_ctrl 0xD040
+#define DSC_E_dsc_e_pf_ctrl 0xD041
+#define DSC_E_dsc_e_pf2_lowp_ctrl 0xD042
+#define DSC_E_dsc_e_offset_adj_data_odd 0xD043
+#define DSC_E_dsc_e_offset_adj_data_even 0xD044
+#define DSC_E_dsc_e_offset_adj_p1_odd 0xD045
+#define DSC_E_dsc_e_offset_adj_p1_even 0xD046
+#define DSC_E_dsc_e_offset_adj_m1_odd 0xD047
+#define DSC_E_dsc_e_offset_adj_m1_even 0xD048
+#define DSC_E_dsc_e_dc_offset 0xD049
+#define DSC_F_ONU10G_looptiming_ctrl_0 0xD050
+#define TX_PI_LBE_tx_pi_control_0 0xD070
+#define TX_PI_LBE_tx_pi_control_1 0xD071
+#define TX_PI_LBE_tx_pi_control_2 0xD072
+#define TX_PI_LBE_tx_pi_control_3 0xD073
+#define TX_PI_LBE_tx_pi_control_4 0xD074
+#define TX_PI_LBE_tx_pi_control_6 0xD076
+#define TX_PI_LBE_tx_pi_status_0 0xD078
+#define TX_PI_LBE_tx_pi_status_1 0xD079
+#define TX_PI_LBE_tx_pi_status_2 0xD07A
+#define TX_PI_LBE_tx_pi_status_3 0xD07B
+#define TX_PI_LBE_tx_lbe_control_0 0xD07C
+#define CKRST_CTRL_OSR_MODE_CONTROL 0xD080
+#define CKRST_CTRL_LANE_CLK_RESET_N_POWERDOWN_CONTROL 0xD081
+#define CKRST_CTRL_LANE_AFE_RESET_PWRDWN_CONTROL_CONTROL 0xD082
+#define CKRST_CTRL_LANE_RESET_N_PWRDN_PIN_KILL_CONTROL 0xD083
+#define CKRST_CTRL_LANE_DEBUG_RESET_CONTROL 0xD084
+#define CKRST_CTRL_UC_ACK_LANE_CONTROL 0xD085
+#define CKRST_CTRL_LANE_REG_RESET_OCCURRED_CONTROL 0xD086
+#define CKRST_CTRL_CLOCK_N_RESET_DEBUG_CONTROL 0xD087
+#define CKRST_CTRL_PMD_LANE_MODE_STATUS 0xD088
+#define CKRST_CTRL_LANE_DP_RESET_STATE_STATUS 0xD089
+#define CKRST_CTRL_LN_MASK 0xD08A
+#define CKRST_CTRL_OSR_MODE_STATUS 0xD08B
+#define CKRST_CTRL_AFE_RESET_PWRDN_OSR_MODE_PIN_STATUS 0xD08C
+#define CKRST_CTRL_PLL_SELECT_CONTROL 0xD08D
+#define CKRST_CTRL_LN_S_RSTB_CONTROL 0xD08E
+#define AMS_RX_RX_CONTROL_0 0xD090
+#define AMS_RX_RX_CONTROL_1 0xD091
+#define AMS_RX_RX_CONTROL_2 0xD092
+#define AMS_RX_RX_CONTROL_3 0xD093
+#define AMS_RX_RX_CONTROL_4 0xD094
+#define AMS_RX_RX_INTCTRL 0xD098
+#define AMS_RX_RX_STATUS 0xD099
+#define AMS_TX_TX_CONTROL_0 0xD0A0
+#define AMS_TX_TX_CONTROL_1 0xD0A1
+#define AMS_TX_TX_CONTROL_2 0xD0A2
+#define AMS_TX_TX_INTCTRL 0xD0A8
+#define AMS_TX_TX_STATUS 0xD0A9
+#define AMS_COM_PLL_CONTROL_0 0xD0B0
+#define AMS_COM_PLL_CONTROL_1 0xD0B1
+#define AMS_COM_PLL_CONTROL_2 0xD0B2
+#define AMS_COM_PLL_CONTROL_3 0xD0B3
+#define AMS_COM_PLL_CONTROL_4 0xD0B4
+#define AMS_COM_PLL_CONTROL_5 0xD0B5
+#define AMS_COM_PLL_CONTROL_6 0xD0B6
+#define AMS_COM_PLL_CONTROL_7 0xD0B7
+#define AMS_COM_PLL_CONTROL_8 0xD0B8
+#define AMS_COM_PLL_INTCTRL 0xD0B9
+#define AMS_COM_PLL_STATUS 0xD0BA
+#define SIGDET_SIGDET_CTRL_0 0xD0C0
+#define SIGDET_SIGDET_CTRL_1 0xD0C1
+#define SIGDET_SIGDET_CTRL_2 0xD0C2
+#define SIGDET_SIGDET_CTRL_3 0xD0C3
+#define SIGDET_SIGDET_STATUS_0 0xD0C8
+#define TLB_RX_prbs_chk_cnt_config 0xD0D0
+#define TLB_RX_prbs_chk_config 0xD0D1
+#define TLB_RX_dig_lpbk_config 0xD0D2
+#define TLB_RX_tlb_rx_misc_config 0xD0D3
+#define TLB_RX_prbs_chk_en_timer_control 0xD0D4
+#define TLB_RX_dig_lpbk_pd_status 0xD0D8
+#define TLB_RX_prbs_chk_lock_status 0xD0D9
+#define TLB_RX_prbs_chk_err_cnt_msb_status 0xD0DA
+#define TLB_RX_prbs_chk_err_cnt_lsb_status 0xD0DB
+#define TLB_RX_pmd_rx_lock_status 0xD0DC
+#define TLB_TX_patt_gen_config 0xD0E0
+#define TLB_TX_prbs_gen_config 0xD0E1
+#define TLB_TX_rmt_lpbk_config 0xD0E2
+#define TLB_TX_tlb_tx_misc_config 0xD0E3
+#define TLB_TX_tx_pi_loop_timing_config 0xD0E4
+#define TLB_TX_rmt_lpbk_pd_status 0xD0E8
+#define DIG_COM_REVID0 0xD0F0
+#define DIG_COM_RESET_CONTROL_PMD 0xD0F1
+#define DIG_COM_RESET_CONTROL_CORE_DP 0xD0F2
+#define DIG_COM_DEBUG_CONTROL 0xD0F3
+#define DIG_COM_TOP_USER_CONTROL_0 0xD0F4
+#define DIG_COM_CORE_REG_RESET_OCCURRED_CONTROL 0xD0F6
+#define DIG_COM_RST_SEQ_TIMER_CONTROL 0xD0F7
+#define DIG_COM_CORE_DP_RESET_STATE_STATUS 0xD0F8
+#define DIG_COM_REVID1 0xD0FA
+#define DIG_COM_REVID2 0xD0FE
+#define PATT_GEN_patt_gen_seq_0 0xD100
+#define PATT_GEN_patt_gen_seq_1 0xD101
+#define PATT_GEN_patt_gen_seq_2 0xD102
+#define PATT_GEN_patt_gen_seq_3 0xD103
+#define PATT_GEN_patt_gen_seq_4 0xD104
+#define PATT_GEN_patt_gen_seq_5 0xD105
+#define PATT_GEN_patt_gen_seq_6 0xD106
+#define PATT_GEN_patt_gen_seq_7 0xD107
+#define PATT_GEN_patt_gen_seq_8 0xD108
+#define PATT_GEN_patt_gen_seq_9 0xD109
+#define PATT_GEN_patt_gen_seq_10 0xD10A
+#define PATT_GEN_patt_gen_seq_11 0xD10B
+#define PATT_GEN_patt_gen_seq_12 0xD10C
+#define PATT_GEN_patt_gen_seq_13 0xD10D
+#define PATT_GEN_patt_gen_seq_14 0xD10E
+#define TX_FED_txfir_control1 0xD110
+#define TX_FED_txfir_control2 0xD111
+#define TX_FED_txfir_control3 0xD112
+#define TX_FED_txfir_status1 0xD113
+#define TX_FED_txfir_status2 0xD114
+#define TX_FED_txfir_status3 0xD115
+#define TX_FED_txfir_status4 0xD116
+#define TX_FED_micro_control 0xD117
+#define TX_FED_misc_control1 0xD118
+#define TX_FED_txfir_control4 0xD119
+#define TX_FED_misc_status0 0xD11B
+#define PLL_CAL_COM_CTL_0 0xD120
+#define PLL_CAL_COM_CTL_1 0xD121
+#define PLL_CAL_COM_CTL_2 0xD122
+#define PLL_CAL_COM_CTL_3 0xD123
+#define PLL_CAL_COM_CTL_4 0xD124
+#define PLL_CAL_COM_CTL_5 0xD125
+#define PLL_CAL_COM_CTL_6 0xD126
+#define PLL_CAL_COM_CTL_7 0xD127
+#define PLL_CAL_COM_CTL_STATUS_0 0xD128
+#define PLL_CAL_COM_CTL_STATUS_1 0xD129
+#define TXCOM_CL72_tap_preset_control 0xD132
+#define TXCOM_CL72_debug_1_register 0xD133
+#define CORE_PLL_COM_PMD_CORE_MODE_STATUS 0xD150
+#define CORE_PLL_COM_RESET_CONTROL_PLL_DP 0xD152
+#define CORE_PLL_COM_TOP_USER_CONTROL 0xD154
+#define CORE_PLL_COM_UC_ACK_CORE_CONTROL 0xD155
+#define CORE_PLL_COM_PLL_DP_RESET_STATE_STATUS 0xD158
+#define CORE_PLL_COM_CORE_PLL_COM_STATUS_2 0xD159
+#define MICRO_A_ramword 0xD200
+#define MICRO_A_address 0xD201
+#define MICRO_A_command 0xD202
+#define MICRO_A_ram_wrdata 0xD203
+#define MICRO_A_ram_rddata 0xD204
+#define MICRO_A_download_status 0xD205
+#define MICRO_A_sfr_status 0xD206
+#define MICRO_A_mdio_uc_mailbox_msw 0xD207
+#define MICRO_A_mdio_uc_mailbox_lsw 0xD208
+#define MICRO_A_uc_mdio_mailbox_lsw 0xD209
+#define MICRO_A_command2 0xD20A
+#define MICRO_A_uc_mdio_mailbox_msw 0xD20B
+#define MICRO_A_command3 0xD20C
+#define MICRO_A_command4 0xD20D
+#define MICRO_A_temperature_status 0xD20E
+#define MICRO_B_program_ram_control1 0xD210
+#define MICRO_B_dataram_control1 0xD214
+#define MICRO_B_iram_control1 0xD218
+#define MDIO_MMDSEL_AER_COM_mdio_maskdata 0xFFDB
+#define MDIO_MMDSEL_AER_COM_mdio_brcst_port_addr 0xFFDC
+#define MDIO_MMDSEL_AER_COM_mdio_mmd_select 0xFFDD
+#define MDIO_MMDSEL_AER_COM_mdio_aer 0xFFDE
+#define MDIO_BLK_ADDR_BLK_ADDR 0xFFDF
+
+#define XGXSBLK0_XGXSCTRL 0x8000
+#define XGXSBLK1_LANECTRL0 0x8015
+#define XGXSBLk1_LANECTRL1 0x8016
+
+#define XgxsBlk10_tx_pi_control4 0x00008190
+    #define tx_pi_sm_enable_override_value  (1<<14) // RXSM Status 0x8366 Read & Clear control
+
+#define SerdesDigital_misc1 0x8308
+#define Digital4_Misc3      0x833c
+#define Digital4_Misc4 0x833d
+#define Digital5_parDetINDControl1 0x8347
+#define Digital5_parDetINDControl2 0x8348
+#define Digital5_Misc7 0x8349
+#define Digital5_Misc6 0x8345
+#define tx66_Control 0x83b0
+#define FX100_Control3 0x8402
+#define ieee0Blk_MIICntl 0x0000
+#define FX100_Control1 0x8400
+#define rx66b1_rx66b1_Control1 0x8441
+    #define rfifo_ptr_sw_rst    (1<<0)
+
+#define XGXSBLK4_xgxsStatus1 0x00008122
+#define XgxsStatus1_LinkStat    (1<<9)
+#define XgxsStatus1_Speed_Mask  (0xf)
+#define XgxsStatus1_Speed_10G   (0x6)
+
+#define SerdesDigital_Control1000X1		0x8300
+#define SerdesDigital_SgmiiMasterMode		(1<<5)
+#define SerdesDigital_SgmiiAutoMode		(1<<4)
+#define SerdesDigital_InvertSigDet		(1<<3)
+#define SerdesDigital_SigDetEn			(1<<2)
+#define SerdesDigital_FibreSgmiiModeFibre	(1<<0)
+
+#define SerdesDigital_Control1000X2		0x8301
+#define SerdesDigital_AutoNegoFastTimer (1<<6)
+#define SerdesDigital_DisableRemoteFaultSending (1<<4)
+#define SerdesDigital_FilterForceLink       (1<<2)
+#define SerdesDigital_DisableFalseLink     (1<<1)
+#define SerdesDigital_EnableParallelDetection (1<<0)
+
+#define SerdesDigital_Control1000X3 0x8302
+
+#define SerdesDigital_Status1000X1 0x8304
+#define Status1000X1_Sgmii_Mode (1 << 0)
+#define Status1000X1_link (1 << 1)
+#define Status1000X1_FullDuplex (1 << 0)
+#define Status1000X1_SpeedShift  3
+#define Status1000X1_SpeedMask  (0x3 << 3)
+#define Status1000X1_LinkChanged (1 << 7)
+
+#define SerdesDigital_Status1000X2 0x8305
+#define Status1000X2_sync_failed (1 << 11)
+#define Status1000X2_sync_ok (1 << 10)
+
+#define SerdesDigital_rx66_Status 0x83c1
+
+#define CL49_UserB0_Control     0x8368
+    #define CL49_fast_lock_cya  (1<<5)
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/unimac_regs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/unimac_regs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/unimac_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/unimac_regs.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,482 @@
+#ifndef UNIMAC_REGS_H_
+#define UNIMAC_REGS_H_
+
+#define UNIMAC_CFG_OFFSET(mac)			(0x0000 + (mac) * 0x1000)
+#define UNIMAC_MIB_OFFSET(mac)			(0x8000 + (mac) * 0x400)
+#define UNIMAC_MISC_OFFSET(mac)			(0xa000 + (mac) * 0x400)
+
+/*
+ * configuration registers
+ */
+
+/* UniMAC Dummy Register */
+#define UNIMAC_CFG_UMAC_DUMMY_REG		(0x0000)
+
+/* UniMAC Half Duplex Backpressure Control Register */
+#define UNIMAC_CFG_HD_BKP_CNTL_REG		(0x0004)
+
+/* UniMAC Command Register */
+#define UNIMAC_CFG_CMD_REG			(0x0008)
+
+/*
+ * 1: The MAC transmit function is enabled. 0: The MAC transmit
+ * function is disabled.  The enable works on packet boundary meaning
+ * that only on the assertion of the bit during every SOP.  Reset
+ * value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_TX_EN_SHIFT		0
+#define UNIMAC_CFG_CMD_TX_EN_MASK		(1 << UNIMAC_CFG_CMD_TX_EN_SHIFT)
+
+/*
+ * 1: The MAC receive function is enabled. 0: The MAC receive function
+ * is disabled.  The enable works on packet boundary meaning that only
+ * on the assertion on the bit during every 0->1 transition of rx_dv.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_RX_EN_SHIFT		1
+#define UNIMAC_CFG_CMD_RX_EN_MASK		(1 << UNIMAC_CFG_CMD_RX_EN_SHIFT)
+
+
+/*
+ * 00: 10Mbps, 01: 100Mbps, 10: 1000Mbps, 11: 2500Mbps
+ * Reset value is 0x2.
+ */
+#define UNIMAC_CFG_CMD_SPEED_SHIFT		2
+#define UNIMAC_CFG_CMD_SPEED_MASK		(0x3 << UNIMAC_CFG_CMD_SPEED_SHIFT)
+#define UNIMAC_CFG_CMD_SPEED_10			0
+#define UNIMAC_CFG_CMD_SPEED_100		1
+#define UNIMAC_CFG_CMD_SPEED_1000		2
+#define UNIMAC_CFG_CMD_SPEED_2500		3
+
+/*
+ * 1: All frames are received without Unicast address filtering.
+ * Reset value is 0x1.
+ */
+#define UNIMAC_CFG_CMD_PROMISC_EN_SHIFT		4
+#define UNIMAC_CFG_CMD_PROMISC_EN_MASK		(1 << UNIMAC_CFG_CMD_PROMISC_EN_SHIFT)
+
+/*
+ * 1: Padding is removed along with crc field before the frame is sent to
+ * the user application.
+ *
+ * 0: No padding is removed by the MAC.
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_PAD_EN_SHIFT		5
+#define UNIMAC_CFG_CMD_PAD_EN_MASK              (1 << UNIMAC_CFG_CMD_PAD_EN_SHIFT)
+
+/*
+ * 1: The CRC field of received frames is transmitted to the user application.
+ * 0: The CRC field is stripped from the frame.
+ *
+ * Reset value is 0x1.
+ */
+#define UNIMAC_CFG_CMD_CRC_FWD_SHIFT		6
+#define UNIMAC_CFG_CMD_CRC_FWD_MASK		(1 << UNIMAC_CFG_CMD_CRC_FWD_SHIFT)
+
+/*
+ * 1: PAUSE frames are forwarded to the user application.
+ * 0: The PAUSE frames are terminated and discarded in the MAC.
+ *
+ * Reset value is 0x1.
+ */
+#define UNIMAC_CFG_CMD_PAUSE_FWD_SHIFT		7
+#define UNIMAC_CFG_CMD_PAUSE_FWD_MASK		(1 << UNIMAC_CFG_CMD_PAUSE_FWD_SHIFT)
+
+/*
+ * 1: Receive PAUSE frames are ignored by the MAC.
+ *
+ * 0: The tramsmit process is stiooed for the amount of time specified
+ * in the pause quanta received within the PAUSE frame.
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_RX_PAUSE_IGN_SHIFT	8
+#define UNIMAC_CFG_CMD_RX_PAUSE_IGN_MASK	(1 << UNIMAC_CFG_CMD_RX_PAUSE_IGN_SHIFT)
+
+/*
+ * 1: The MAC overwrites the source MAC address with a programmed MAC
+ * address in register MAC_0 and MAC_1.
+ *
+ * 0: Not modify the source address received from the transmit
+ * application client.
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_TX_ADDR_INSERT_SHIFT	9
+#define UNIMAC_CFG_CMD_TX_ADDR_INSERT_MASK	(1 << UNIMAC_CFG_CMD_TX_ADDR_INSERT_SHIFT)
+
+/*
+ *  Ignored when RTH_SPEED[1]==1, gigabit mode.
+ *  1: half duplex
+ *  0: full duplex
+ *  Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_HD_ENA_SHIFT		10
+#define UNIMAC_CFG_CMD_HD_ENA_MASK		(1 << UNIMAC_CFG_CMD_HD_ENA)
+
+/*
+ * 1: RX and RX engines are put in reset.
+ * 0: come out of SW reset.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_SW_RESET_SHIFT		13
+#define UNIMAC_CFG_CMD_SW_RESET_MASK		(1 << UNIMAC_CFG_CMD_SW_RESET_SHIFT)
+
+/*
+ * Enable GMII/MII loopback
+ * 1: Loopback enabled.
+ * 0: Normal operation.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_LCL_LOOP_EN_SHIFT	15
+#define UNIMAC_CFG_CMD_LCL_LOOP_EN_MASK		(1 << UNIMAC_CFG_CMD_LCL_LOOP_EN_SHIFT)
+
+/*
+ * Enable/Disable auto-configuration.
+ * 1: Enable
+ * 0: Disable
+ * Reset value is 0x0
+ */
+#define UNIMAC_CFG_CMD_ENA_EXT_EN_SHIFT		22
+#define UNIMAC_CFG_CMD_ENA_EXT_EN_MASK		(1 << UNIMAC_CFG_CMD_ENA_EXT_EN_SHIFT)
+
+/*
+ * MAC control frame enable.
+ *
+ * 1: MAC control frames with opcode other than 0x0001 are accepted
+ * and forwarded to the client interface.
+ *
+ * 0: MAC control frames with opcode other than 0x0000 and 0x0001 are
+ * silently discarded.
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_CTRL_FRM_EN_SHIFT	23
+#define UNIMAC_CFG_CMD_CTRL_FRM_EN_MASK		(1 << UNIMAC_CFG_CMD_CTRL_FRM_EN_SHIFT)
+
+/*
+ * Payload length check.
+ * 0: Check payload length with Length/Type field.
+ * 1: Check disabled.
+ * Reset value is 0x1.
+ */
+#define UNIMAC_CFG_CMD_DIS_LEN_CHECK_SHIFT	26
+#define UNIMAC_CFG_CMD_DIS_LEN_CHECK_IGN_MASK	(1 << UNIMAC_CFG_CMD_DIS_LEN_CHECK_SHIFT)
+
+/*
+ * Enable remote loopback at the fifo system side.
+ * 0: Normal operation
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_RMT_LOOP_EN_SHIFT	25
+#define UNIMAC_CFG_CMD_RMT_LOOP_EN_MASK		(1 << UNIMAC_CFG_CMD_RMT_LOOP_EN_SHIFT)
+
+/*
+ * Enable extract/insert of EFM headers.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_PRBL_EN_SHIFT		27
+#define UNIMAC_CFG_CMD_PRBL_EN_MASK		(1 << UNIMAC_CFG_CMD_PRBL_EN_SHIFT)
+
+/*
+ * Ignore TX PAUSE frame transmit request.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_TX_PAUSE_IGN_SHIFT	28
+#define UNIMAC_CFG_CMD_TX_PAUSE_IGN_MASK	(1 << UNIMAC_CFG_CMD_TX_PAUSE_IGN_SHIFT)
+
+/*
+ * This mode only works in auto-config mode:
+ *
+ * 0: After auto-config, TX_ENA and RX_ENA bits are set to 1.
+ *
+ * 1: After auto-config, TX_ENA and RX_ENA bits are set to 0, meaning
+ * SW will have to come in and enable TX and RX.
+ *
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_TXRX_EN_SHIFT		29
+#define UNIMAC_CFG_CMD_TXRX_EN_MASK		(1 << UNIMAC_CFG_CMD_TXRX_EN_SHIFT)
+
+/*
+ * 1: Disable RX side RUNT filtering.
+ * 0: Enable RUNT filtering.
+ * Reset value is 0x0.
+ */
+#define UNIMAC_CFG_CMD_RUNT_FILT_DIS_SHIFT	30
+#define UNIMAC_CFG_CMD_RUNT_FILT_DIS_MASK	(1 << UNIMAC_CFG_CMD_RUNT_FILT_DIS_SHIFT)
+
+
+/* UniMAC MAC address first 4 bytes */
+#define UNIMAC_CFG_MAC0_REG			(0x000c)
+
+/* UniMAC MAC address 2 last bytes */
+#define UNIMAC_CFG_MAC1_REG			(0x0010)
+
+/* UniMAC Frame Length */
+#define UNIMAC_CFG_FRM_LEN_REG			(0x0014)
+
+/* UniMAC Pause Quanta */
+#define UNIMAC_CFG_PAUSE_QUNAT_REG		(0x0018)
+
+/* UniMAC EFM Preamble Length */
+#define UNIMAC_CFG_SFD_OFFSET_REG		(0x0040)
+
+/* UniMAC Mode */
+#define UNIMAC_CFG_MODE_REG			(0x0044)
+
+/* UniMAC Preamble Outer TAG 0 */
+#define UNIMAC_CFG_FRM_TAG0_REG			(0x0048)
+
+/* UniMAC Preamble Outer TAG 1 */
+#define UNIMAC_CFG_FRM_TAG1_REG			(0x004c)
+
+/* UniMAC Inter Packet Gap */
+#define UNIMAC_CFG_TX_IPG_LEN_REG		(0x005c)
+
+/* UniMAC Energy Efficient Ethernet Control */
+#define UNIMAC_CFG_EEE_CTRL_REG			(0x0064)
+
+/* EEE LPI timer */
+#define UNIMAC_CFG_EEE_LPI_TIMER_REG		(0x0068)
+
+/* EEE wakeup timer */
+#define UNIMAC_CFG_EEE_WAKE_TIMER_REG		(0x006c)
+
+/* UniMAC Energy Efficient Ethernet Ref Clock Speed */
+#define UNIMAC_CFG_EEE_REF_COUNT_REG		(0x0070)
+
+/* MII EEE LPI timer */
+#define UNIMAC_CFG_EEE_MII_LPI_TIMER_REG	(0x0068)
+
+/* GMII EEE LPI timer */
+#define UNIMAC_CFG_EEE_GMII_LPI_TIMER_REG	(0x006c)
+
+/* MII EEE wakeup timer */
+#define UNIMAC_CFG_EEE_MII_WAKE_TIMER_REG	(0x0080)
+
+/* GMII EEE wakeup timer */
+#define UNIMAC_CFG_EEE_GMII_WAKE_TIMER_REG	(0x0084)
+
+/* UniMAC Repetitive Pause Control in TX direction */
+#define UNIMAC_CFG_PAUSE_CNTRL_REG		(0x0330)
+
+/* UniMAC RX MAX packet Size Register */
+#define UNIMAC_CFG_RX_MAX_PKT_SIZE_REG		(0x0608)
+
+
+/*
+ * MIB registers
+ */
+
+/* Receive 64B Frame Counter */
+#define UNIMAC_MIB_GR64_REG			(0x0000)
+
+/* Receive 65B to 127B Frame Counter */
+#define UNIMAC_MIB_GR127_REG			(0x0004)
+
+/* Receive 128B to 255B Frame Counter */
+#define UNIMAC_MIB_GR255_REG			(0x0008)
+
+/* Receive 256B to 511B Frame Counter */
+#define UNIMAC_MIB_GR511_REG			(0x000c)
+
+/* Receive 512B to 1023B Frame Counter */
+#define UNIMAC_MIB_GR1023_REG			(0x0010)
+
+/* Receive 1024B to 1518B Frame Counter */
+#define UNIMAC_MIB_GR1518_REG			(0x0014)
+
+/* Receive 1519B to 1522B Good VLAN Frame Counter */
+#define UNIMAC_MIB_GRMGV_REG			(0x0018)
+
+/* Receive 1519B to 2047B Frame Counter */
+#define UNIMAC_MIB_GR2047_REG			(0x001c)
+
+/* Receive 2048B to 4095B Frame Counter */
+#define UNIMAC_MIB_GR4095_REG			(0x0020)
+
+/* Receive 4096B to 9216B Frame Counter */
+#define UNIMAC_MIB_GR9216_REG			(0x0024)
+
+/* Receive Packet Counter */
+#define UNIMAC_MIB_GRPKT_REG			(0x0028)
+
+/* Receive Byte Counter */
+#define UNIMAC_MIB_GRBYT_REG			(0x002c)
+
+/* Receive Multicast Frame Counter */
+#define UNIMAC_MIB_GRMCA_REG			(0x0030)
+
+/* Receive Broadcast Frame Counter */
+#define UNIMAC_MIB_GRBCA_REG			(0x0034)
+
+/* Receive FCS Error Counter */
+#define UNIMAC_MIB_GRFCS_REG			(0x0038)
+
+/* Receive Control Frame Packet Counter */
+#define UNIMAC_MIB_GRXCF_REG			(0x003c)
+
+/* Receive Pause Frame Packet Counter */
+#define UNIMAC_MIB_GRXPF_REG			(0x0040)
+
+/* Receive Unknown OP Code Packet Counter */
+#define UNIMAC_MIB_GRXUO_REG			(0x0044)
+
+/* Receive Alignmenet Error Counter */
+#define UNIMAC_MIB_GRALN_REG			(0x0048)
+
+/* Receive Frame Length Out Of Range Counter */
+#define UNIMAC_MIB_GRFLR_REG			(0x004c)
+
+/* Receive Code Error Packet Counter */
+#define UNIMAC_MIB_GRCDE_REG			(0x0050)
+
+/* Receive Carrier Sense Error Packet Counter */
+#define UNIMAC_MIB_GRFCR_REG			(0x0054)
+
+/* Receive Oversize Packet Counter */
+#define UNIMAC_MIB_GROVR_REG			(0x0058)
+
+/* Receive Jabber Counter */
+#define UNIMAC_MIB_GRJBR_REG			(0x005c)
+
+/* Receive MTU Error Packet Counter */
+#define UNIMAC_MIB_GRMTUE_REG			(0x0060)
+
+/* Receive Good Packet Counter */
+#define UNIMAC_MIB_GRPOK_REG			(0x0064)
+
+/* Receive Unicast Packet Counter */
+#define UNIMAC_MIB_GRUC_REG			(0x0068)
+
+/* Receive PPP Packet Counter */
+#define UNIMAC_MIB_GRPPP_REG			(0x006c)
+
+/* Receive CRC Match Packet Counter */
+#define UNIMAC_MIB_GRCRC_REG			(0x0070)
+
+/* Transmit 64B Frame Counter */
+#define UNIMAC_MIB_TR64_REG			(0x0080)
+
+/* Transmit 65B to 127B Frame Counter */
+#define UNIMAC_MIB_TR127_REG			(0x0084)
+
+/* Transmit 128B to 255B Frame Counter */
+#define UNIMAC_MIB_TR255_REG			(0x0088)
+
+/* Transmit 256B to 511B Frame Counter */
+#define UNIMAC_MIB_TR511_REG			(0x008c)
+
+/* Transmit 512B to 1023B Frame Counter */
+#define UNIMAC_MIB_TR1023_REG			(0x0090)
+
+/* Transmit 1024B to 1518B Frame Counter */
+#define UNIMAC_MIB_TR1518_REG			(0x0094)
+
+/* Transmit 1519B to 1522B Good VLAN Frame Counter */
+#define UNIMAC_MIB_TRMGV_REG			(0x0098)
+
+/* Transmit 1519B to 2047B Frame Counter */
+#define UNIMAC_MIB_TR2047_REG			(0x009c)
+
+/* Transmit 2048B to 4095B Frame Counter */
+#define UNIMAC_MIB_TR4095_REG			(0x00a0)
+
+/* Transmit 4096B to 9216B Frame Counter */
+#define UNIMAC_MIB_TR9216_REG			(0x00a4)
+
+/* Transmit Packet Counter */
+#define UNIMAC_MIB_GTPKT_REG			(0x00a8)
+
+/* Transmit Multicast Packet Counter */
+#define UNIMAC_MIB_GTMCA_REG			(0x00ac)
+
+/* Transmit Broadcast Packet Counter */
+#define UNIMAC_MIB_GTBCA_REG			(0x00b0)
+
+/* Transmit Pause Frame Packet Counter */
+#define UNIMAC_MIB_GTXPF_REG			(0x00b4)
+
+/* Transmit Control Frame Packet Counter */
+#define UNIMAC_MIB_GTXCF_REG			(0x00b8)
+
+/* Transmit FCS Error Counter */
+#define UNIMAC_MIB_GTFCS_REG			(0x00bc)
+
+/* Transmit Oversize Packet Counter */
+#define UNIMAC_MIB_GTOVR_REG			(0x00c0)
+
+/* Transmit Deferral Packet Counter */
+#define UNIMAC_MIB_GTDRF_REG			(0x00c4)
+
+/* Transmit Excessive Deferral Packet Counter */
+#define UNIMAC_MIB_GTEDF_REG			(0x00c8)
+
+/* Transmit Single Collision Packet Counter */
+#define UNIMAC_MIB_GTSCL_REG			(0x00cc)
+
+/* Transmit Multiple Collision Packet Counter */
+#define UNIMAC_MIB_GTMCL_REG			(0x00d0)
+
+/* Transmit Late Collision Packet Counter */
+#define UNIMAC_MIB_GTLCL_REG			(0x00d4)
+
+/* Transmit Excessive Collision Packet Counter */
+#define UNIMAC_MIB_GTXCL_REG			(0x00d8)
+
+/* Transmit Fragments Packet Counter */
+#define UNIMAC_MIB_GTFRG_REG			(0x00dc)
+
+/* Transmit Total Collision Counter */
+#define UNIMAC_MIB_GTNCL_REG			(0x00e0)
+
+/* Transmit Jabber Counter */
+#define UNIMAC_MIB_GTJBR_REG			(0x00e4)
+
+/* Transmit Byte Counter */
+#define UNIMAC_MIB_GTBYT_REG			(0x00e8)
+
+/* Transmit Good Packet Counter */
+#define UNIMAC_MIB_GTPOK_REG			(0x00ec)
+
+/* Transmit Unicast Packet Counter */
+#define UNIMAC_MIB_GTUC_REG			(0x00f0)
+
+/* Receive RUNT Packet Counter */
+#define UNIMAC_MIB_RRPKT_REG			(0x0100)
+
+/* Receive RUNT Packet And Contain A Valid FCS */
+#define UNIMAC_MIB_RRUND_REG			(0x0104)
+
+/* Receive RUNT Packet And Contain Invalid FCS or Alignment Error */
+#define UNIMAC_MIB_RRFRG_REG			(0x0108)
+
+/* Receive RUNT Packet Byte Counter */
+#define UNIMAC_MIB_RRBYT_REG			(0x010c)
+
+/* MIB Control Register */
+#define UNIMAC_MIB_MIB_CNTRL_REG		(0x0180)
+
+
+/*
+ * MISC registers
+ */
+
+/* UNIMAC_CFG Register */
+#define UNIMAC_MISC_CFG_REG			(0x0000)
+#define UNIMAC_MISC_CFG_GMII_DIRECT_SHIFT	0
+#define UNIMAC_MISC_CFG_GMII_DIRECT_MASk	(1 << UNIMAC_MISC_CFG_GMII_DIRECT_SHIFT)
+
+/* UNIMAC_EXT_CFG1 Register */
+#define UNIMAC_MISC_EXT_CFG1_REG		(0x0004)
+#define UNIMAC_MISC_EXT_CFG1_MAX_PKT_SIZE_SHIFT	0
+#define UNIMAC_MISC_EXT_CFG1_MAX_PKT_SIZE_MASK	(0x3fff << UNIMAC_MISC_EXT_CFG1_MAX_PKT_SIZE_SHIFT)
+
+/* UNIMAC_EXT_CFG2 Register */
+#define UNIMAC_MISC_EXT_CFG2_REG		(0x0008)
+
+#endif /* !UNIMAC_REGS_H */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/wan_top.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/wan_top.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/wan_top.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/wan_top.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,1923 @@
+#ifndef WAN_TOP_H_
+#define WAN_TOP_H_
+
+/* relative to core */
+#define SCRATCH_OFFSET_0		0x0
+
+/*
+ * Register <WAN_TOP_SCRATCH>
+ *
+ * Register used for testing read and write access into wan_top block.
+ */
+#define WAN_TOP_SCRATCH_REG		0x0
+
+/* Scratch pad. */
+#define  WAN_TOP_SCRATCH_SCRATCH_SHIFT	0
+#define  WAN_TOP_SCRATCH_SCRATCH_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_RESET>
+ *
+ * Various resets to be applied within wan_top.
+ */
+#define WAN_TOP_RESET_REG		0x4
+
+/*
+ * Active low PCS reset.
+ * Set to 1 for normal operation
+*/
+#define  WAN_TOP_RESET_CFG_PCS_RESET_N_MASK	0x1
+
+
+/*
+ * Register <GPON_GEARBOX_0>
+ *
+ * Configuration for the GPON gearbox.
+ */
+#define WAN_TOP_GPON_GEARBOX_0_REG	0x8
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_PTG_STATUS2_SEL_MASK	0x40000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_PTG_STATUS1_SEL_MASK	0x20000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_STATUS_SEL_MASK	0x10000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TXLBE_BIT_ORDER_MASK	0x8000000
+
+/*
+ * This field changes the bit order of the 16-bit Rx data exiting theRx
+ * FIFO to GPON MAC.
+ * 0:
+ * No changes1:
+ * Rx data is reversed from [15:
+ * 0] to [0:
+ * 15]
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_RX_16BIT_ORDER_MASK	0x4000000
+
+/*
+ * This field changes the bit order of the 8-bit Tx data enteringthe Tx
+ * FIFO.
+ * 0:
+ * No changes1:
+ * Tx data is reversed from [7:
+ * 0] to [0:
+ * 7]
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_8BIT_ORDER_MASK	0x2000000
+
+/*
+ * This field changes the bit order of the 16-bit Tx data exitingthe Tx
+ * FIFO to ONU2G PMD.
+ * 0:
+ * Bit 0 is transmitted first1:
+ * Bit 19 is transmitted first
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_16BIT_ORDER_MASK	0x1000000
+
+/*
+ * Minimum distance allowed between the Tx FIFO write and readpointers.
+ * The TXFIFO_DRIFTED status bit is asserted ifTX_POINTER_DISTANCE goes
+ * below this minimum value.
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_POINTER_DISTANCE_MIN_SHIFT	16
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_POINTER_DISTANCE_MIN_MASK	0x1f0000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_0_FIFO_CFG_0_ASYM_LOOPBACK_MASK	0x2000
+
+/*
+ * Maximum distance allowed between the Tx FIFO write and readpointers.
+ * The TXFIFO_DRIFTED status bit is asserted ifTX_POINTER_DISTANCE goes
+ * above this maximum value.
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_POINTER_DISTANCE_MAX_SHIFT	8
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_POINTER_DISTANCE_MAX_MASK	0x1f00
+
+/* This bit enables logically inversion of every Tx bit. */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_BIT_INV_MASK	0x80
+
+/*
+ * Delay Tx FIFO write pointer by 1 location (8 Tx bits).
+ * The pointeris adjusted on every 0 to 1 transition in this register
+ * field.
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_WR_PTR_DLY_MASK	0x40
+
+/*
+ * Advance Tx FIFO write pointer by 1 location (8 Tx bits).
+ * Thepointer is adjusted on every 0 to 1 transition in this registerfield.
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_TX_WR_PTR_ADV_MASK	0x20
+
+/* if 1, the TXFIFO_COLLISION status bit resets to 0. */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_CLEAR_TXFIFO_COLLISION_MASK	0x10
+
+/*
+ * If 1, the output of Rx FIFO is looped back to the input of Tx FIFO.
+ * In this case, the SATA PHY Tx data rate is the same as the Rx datarate
+ * regardless of whether Gen2 or Gen3 is selected.
+*/
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_LOOPBACK_RX_MASK	0x8
+
+/* If 1, the TXFIFO_DRIFTED statsu bit resets to 0. */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_0_CLEAR_TXFIFO_DRIFTED_MASK	0x4
+
+/* If 1, the Tx FIFO goes into reset. */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_SW_RESET_TXFIFO_RESET_MASK	0x2
+
+/* If 1, The Tx Pattern Generator goes into reset. */
+#define  WAN_TOP_GPON_GEARBOX_0_TOP_WAN_MISC_GPON_GEARBOX_SW_RESET_TXPG_RESET_MASK	0x1
+
+
+/*
+ * Register <GPON_PATTERN_CFG1>
+ *
+ * The GPON Gearbox has a pattern generator to be used for laser
+ * burstenable calibration and during SATA PHY characterization/testing.
+ */
+#define WAN_TOP_GPON_PATTERN_CFG1_REG	0xc
+
+/* 8-bit pattern to placed between Tx bursts when PG_MODE is 1. */
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_FILLER_SHIFT	24
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_FILLER_MASK	0xff000000
+
+/*
+ * 8-bit pattern to placed after the HEADER byte in every Tx burst
+ * whenPG_MODE is 1.
+*/
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_PAYLOAD_SHIFT	16
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_PAYLOAD_MASK	0xff0000
+
+/* 8-bit pattern to placed at the start of every Tx burst when PG_MODEis 1. */
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_HEADER_SHIFT	8
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_HEADER_MASK	0xff00
+
+/*
+ * Pattern generator modes:
+ * 0:
+ * Pattern generator disabled.
+ * GPON MAC has control of Tx outputand burst enable.
+ * 1:
+ * Generate repetitive Tx bursts.
+ * Each burst consists of 1 headerbyte and1 or more payload bytes.
+ * Filler bytes are placed between Txbursts.
+ * 2:
+ * Reserved3:
+ * Reserved4:
+ * Generate PRBS7 pattern5:
+ * Generate PRBS15 pattern6:
+ * Generate PRBS23 pattern7:
+ * Generate PRBS31 patternMode 0 is for GPON normal operation.
+ * Mode 1 is for laser burstenable calibration.
+*/
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_PG_MODE_SHIFT	0
+#define  WAN_TOP_GPON_PATTERN_CFG1_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG1_PG_MODE_MASK	0x7
+
+
+/*
+ * Register <GPON_PATTERN_CFG2>
+ *
+ * The GPON Gearbox has a pattern generator to be used for laser
+ * burstenable calibration and during SATA PHY characterization/testing.
+ */
+#define WAN_TOP_GPON_PATTERN_CFG2_REG	0x10
+
+/* Number of filler bytes to be placed between Tx bursts when PG_MODEis 1. */
+#define  WAN_TOP_GPON_PATTERN_CFG2_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG2_GAP_SIZE_SHIFT	8
+#define  WAN_TOP_GPON_PATTERN_CFG2_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG2_GAP_SIZE_MASK	0xff00
+
+/* Total length of Tx burst in bytes when PG_MODE is 1. */
+#define  WAN_TOP_GPON_PATTERN_CFG2_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG2_BURST_SIZE_SHIFT	0
+#define  WAN_TOP_GPON_PATTERN_CFG2_TOP_WAN_MISC_GPON_GEARBOX_PATTERN_CFG2_BURST_SIZE_MASK	0xff
+
+
+/*
+ * Register <GPON_GEARBOX_2>
+ *
+ * Configuration for the GPON gearbox.
+ */
+#define WAN_TOP_GPON_GEARBOX_2_REG	0x14
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_CONFIG_BURST_DELAY_CYC_SHIFT	24
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_CONFIG_BURST_DELAY_CYC_MASK	0xf000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_TX_VLD_DELAY_CYC_SHIFT	21
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_TX_VLD_DELAY_CYC_MASK	0xe00000
+
+/*
+ * Initial value to be loaded into Tx FIFO write pointer whenTXFIFO_RESET
+ * is asserted.
+ * Legal values are 0 to 19.
+*/
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_1_TX_WR_POINTER_SHIFT	16
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_1_TX_WR_POINTER_MASK	0x1f0000
+
+/*
+ * Initial value to be loaded into Tx FIFO read pointer whenTXFIFO_REET is
+ * asserted.
+ * Legal values are 0 to 31.
+*/
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_1_TX_RD_POINTER_SHIFT	8
+#define  WAN_TOP_GPON_GEARBOX_2_TOP_WAN_MISC_GPON_GEARBOX_FIFO_CFG_1_TX_RD_POINTER_MASK	0x1f00
+
+
+/*
+ * Register <EARLY_TXEN>
+ *
+ * EARLY_TXEN_CFG Register valid for (EPON & GPON mode).
+ */
+#define WAN_TOP_EARLY_TXEN_TXEN_REG	0x18
+
+/*
+ * Early TXEN Enable Logic Bypass.
+ * 0 = NO_BYPASS
+*/
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_EARLY_TXEN_BYPASS_MASK	0x4000000
+
+/*
+ * Mac TXEN input polarity.
+ * 0 = ACTIVE_LOW
+*/
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_INPUT_TXEN_POLARITY_MASK	0x2000000
+
+/*
+ * Mac TXEN output polarity.
+ * 0 = ACTIVE_LOW
+*/
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_OUTPUT_TXEN_POLARITY_MASK	0x1000000
+
+/* Early TXEN Toff Time */
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_TOFF_TIME_SHIFT	16
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_TOFF_TIME_MASK	0xff0000
+
+/* Early TXEN Setup Time */
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_SETUP_TIME_SHIFT	8
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_SETUP_TIME_MASK	0xff00
+
+/* Early TXEN Hold Time */
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_HOLD_TIME_SHIFT	0
+#define  WAN_TOP_EARLY_TXEN_TXEN_TOP_WAN_MISC_EARLY_TXEN_CFG_HOLD_TIME_MASK	0xff
+
+
+/*
+ * Register <WAN_TOP_RESCAL_CFG>
+ *
+ * Register used for configuring the RESCAL.
+ */
+#define WAN_TOP_RESCAL_AL_CFG_REG	0x1c
+
+/* Connects to i_rstb. */
+#define  WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_RSTB_MASK	0x8000
+
+/* Connects to i_diag_on. */
+#define  WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_DIAG_ON_MASK	0x4000
+
+/* Connects to i_pwrdn. */
+#define  WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_PWRDN_MASK	0x2000
+
+/* Connects to i_rescal_ctrl. */
+#define  WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_CTRL_SHIFT	0
+#define  WAN_TOP_RESCAL_AL_CFG_WAN_RESCAL_CTRL_MASK	0x1fff
+
+
+/*
+ * Register <WAN_TOP_RESCAL_STATUS_0> - read-only
+ *
+ * Register used for reading RESCAL status.
+ */
+#define WAN_TOP_RESCAL_STATUS_0_REG	0x20
+
+/* Connects to o_done. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_DONE_MASK	0x4000000
+
+/* Connects to o_pon. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_PON_SHIFT	22
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_PON_MASK	0x3c00000
+
+/* Connects to o_prev_comp_cnt. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_PREV_COMP_CNT_SHIFT	18
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_PREV_COMP_CNT_MASK	0x3c0000
+
+/* Connects to o_rescal_ctrl_dfs. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_CTRL_DFS_SHIFT	5
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_CTRL_DFS_MASK	0x3ffe0
+
+/* Connects to o_rescal_state. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_STATE_SHIFT	2
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_STATE_MASK	0x1c
+
+/* Connects to o_rescalcomp. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_COMP_MASK	0x2
+
+/* Connects to o_valid. */
+#define  WAN_TOP_RESCAL_STATUS_0_WAN_RESCAL_VALID_MASK	0x1
+
+
+/*
+ * Register <WAN_TOP_RESCAL_STATUS_1> - read-only
+ *
+ * Register used for reading RESCAL status.
+ */
+#define WAN_TOP_RESCAL_STATUS_1_REG	0x24
+
+/* Connects to o_curr_comp_cnt. */
+#define  WAN_TOP_RESCAL_STATUS_1_WAN_RESCAL_CURR_COMP_CNT_SHIFT	0
+#define  WAN_TOP_RESCAL_STATUS_1_WAN_RESCAL_CURR_COMP_CNT_MASK	0x3f
+
+
+/*
+ * Register <WAN_TOP_MISC_0>
+ *
+ * Register used for wan_top configuration.
+ */
+#define WAN_TOP_MISC_0_REG		0x28
+
+/*
+ * Reserved mode bus for lane.
+ * Mode bus for lane used by the PCS tocommunicate lane info to PMD.
+ * This bus should only be written towhen the lane is in reset since the
+ * firmware will only read thisafter coming out of reset.
+ * This signal will be latched to a lanebased register during core_dp_rstb.
+ * Asynchronous signal to the PMD
+*/
+#define  WAN_TOP_MISC_0_PMD_LANE_MODE_SHIFT	16
+#define  WAN_TOP_MISC_0_PMD_LANE_MODE_MASK	0xffff0000
+
+/*
+ * 0:
+ * All other modes 1:
+ * Explicit 2.
+ * 5G Full Rate Serdes Mode
+*/
+#define  WAN_TOP_MISC_0_EPON_GBOX_AE_2P5_FULL_RATE_MODE_MASK	0x1000
+
+/*
+ * 0:
+ * 10b mode, 1G Operation 1:
+ * 20b mode, 2G Operation
+*/
+#define  WAN_TOP_MISC_0_EPON_GBOX_PON_RX_WIDTH_MODE_MASK	0x800
+
+/*
+ * Strap input for selecting the port address to decode on the
+ * mdiotransaction.
+*/
+#define  WAN_TOP_MISC_0_ONU2G_PHYA_SHIFT	6
+#define  WAN_TOP_MISC_0_ONU2G_PHYA_MASK	0x7c0
+
+/* Load vlue in cr_xgwan_top_wan_misc_epon_tx_fifo_off. */
+#define  WAN_TOP_MISC_0_EPON_TX_FIFO_OFF_LD_MASK	0x20
+
+/*
+ * Debug bit that disables the 32-bit preamble to allow mdio frames torun
+ * at 2x speed.
+*/
+#define  WAN_TOP_MISC_0_MDIO_FAST_MODE_MASK	0x10
+
+/*
+ * MDIO transaction indicator needs to be asserted in a configurationwhere
+ * an external mdio controller is trying to access the internalPMD
+ * registers directly.
+*/
+#define  WAN_TOP_MISC_0_MDIO_MODE_MASK	0x8
+
+/* Not used in this chip. */
+#define  WAN_TOP_MISC_0_REFOUT_EN_MASK	0x4
+
+/* Not used in this chip. */
+#define  WAN_TOP_MISC_0_REFIN_EN_MASK	0x2
+
+
+/*
+ * Register <WAN_TOP_MISC_1>
+ *
+ * Register used for wan_top configuration.
+ */
+#define WAN_TOP_MISC_1_REG		0x2c
+
+/*
+ * Reserved mode bus for the entire core.
+ * Mode bus for core used bythe PCS to communicate core info to PMD .
+ * This bus should only bewritten to when the core is in reset since the
+ * firmware will onlyread this after coming out of reset.
+ * This signal will be latched toa core register during core_dp_rstb.
+ * Asynchronous signal to the PMD
+*/
+#define  WAN_TOP_MISC_1_PMD_CORE_1_MODE_SHIFT	16
+#define  WAN_TOP_MISC_1_PMD_CORE_1_MODE_MASK	0xffff0000
+
+/*
+ * Reserved mode bus for the entire core.
+ * Mode bus for core used bythe PCS to communicate core info to PMD .
+ * This bus should only bewritten to when the core is in reset since the
+ * firmware will onlyread this after coming out of reset.
+ * This signal will be latched toa core register during core_dp_rstb.
+ * Asynchronous signal to the PMD
+*/
+#define  WAN_TOP_MISC_1_PMD_CORE_0_MODE_SHIFT	0
+#define  WAN_TOP_MISC_1_PMD_CORE_0_MODE_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_MISC_2>
+ *
+ * Register used for wan_top configuration.
+ */
+#define WAN_TOP_MISC_2_REG		0x30
+
+/*
+ * Oversample mode for rx lane.
+ * Asynchronous signal to the PMD.
+ * 0 -OSR1.
+ * 1 - OSR2.
+*/
+#define  WAN_TOP_MISC_2_PMD_RX_OSR_MODE_SHIFT	20
+#define  WAN_TOP_MISC_2_PMD_RX_OSR_MODE_MASK	0xf00000
+
+/*
+ * EEE tx mode function for lane.
+ * Asynchronous signal to the PMD.
+*/
+#define  WAN_TOP_MISC_2_PMD_TX_MODE_SHIFT	16
+#define  WAN_TOP_MISC_2_PMD_TX_MODE_MASK	0x30000
+
+/*
+ * Oversample mode for tx lane.
+ * Asynchronous signal to the PMD.
+ * 0 -OSR1.
+ * 1 - OSR2.
+*/
+#define  WAN_TOP_MISC_2_PMD_TX_OSR_MODE_SHIFT	12
+#define  WAN_TOP_MISC_2_PMD_TX_OSR_MODE_MASK	0xf000
+
+/* Pmd_tx_disable is asserted to squelch the transmit signal for lane. */
+#define  WAN_TOP_MISC_2_PMD_TX_DISABLE_MASK	0x200
+
+/*
+ * Lane RX power down.
+ * Minimum assertion time:
+ * 25 comclk period.
+*/
+#define  WAN_TOP_MISC_2_PMD_LN_RX_H_PWRDN_MASK	0x100
+
+/*
+ * Lane TX power down.
+ * Minimum assertion time:
+ * 25 comclk period.
+*/
+#define  WAN_TOP_MISC_2_PMD_LN_TX_H_PWRDN_MASK	0x80
+
+/*
+ * External Loss of signal.
+ * LOS = 1.
+ * Signal presence = 0.
+*/
+#define  WAN_TOP_MISC_2_PMD_EXT_LOS_MASK	0x40
+
+/*
+ * PMD main reset, resets registers, data path for entire coreincluding all
+ * lanes.
+ * Active Low.
+ * Minimum assertion time:
+ * 25 comclkperiod.
+*/
+#define  WAN_TOP_MISC_2_PMD_POR_H_RSTB_MASK	0x20
+
+/*
+ * Core reset for datapath for all lanes and corresponding PLL.
+ * Doesnot reset registers.
+ * Active Low.
+ * Minimum assertion time:
+ * 25 comclkperiod.
+*/
+#define  WAN_TOP_MISC_2_PMD_CORE_1_DP_H_RSTB_MASK	0x10
+
+/*
+ * Core reset for datapath for all lanes and corresponding PLL.
+ * Doesnot reset registers.
+ * Active Low.
+ * Minimum assertion time:
+ * 25 comclkperiod.
+*/
+#define  WAN_TOP_MISC_2_PMD_CORE_0_DP_H_RSTB_MASK	0x8
+
+/*
+ * Lane reset registers and data path.
+ * Active Low.
+ * Minimum assertiontime:
+ * 25 comclk period.
+*/
+#define  WAN_TOP_MISC_2_PMD_LN_H_RSTB_MASK	0x4
+
+/*
+ * Lane datapath reset, does not reset registers.
+ * Active Low.
+ * Minimumassertion time:
+ * 25 comclk period.
+*/
+#define  WAN_TOP_MISC_2_PMD_LN_DP_H_RSTB_MASK	0x2
+
+/*
+ * EEE rx mode function for lane.
+ * Asynchronous signal to the PMD
+*/
+#define  WAN_TOP_MISC_2_PMD_RX_MODE_MASK	0x1
+
+
+/*
+ * Register <WAN_TOP_MISC_3>
+ *
+ * Register used for wan_top configuration.
+ */
+#define WAN_TOP_MISC_3_REG		0x34
+
+/* TXFIFO OFF LOAD signal for EPON's gearbox. */
+#define  WAN_TOP_MISC_3_EPON_TX_FIFO_OFF_SHIFT	24
+#define  WAN_TOP_MISC_3_EPON_TX_FIFO_OFF_MASK	0x3f000000
+
+/*
+ * This field selects the block that the 40-bit debug bus comesfrom.
+ * 0:
+ * GPON1:
+ * XPORT_0 BBH RX2:
+ * XPORT_0 BBH TX3:
+ * XPORT_1 BBH RX4:
+ * XPORT_1 BBH TX5:
+ * SERDES6:
+ * XLIF7:
+ * GPON BBH RX8:
+ * GPON BBH TX9:
+ * SAR BBH RX10:
+ * SAR BBH TX11:
+ * XPORT12:
+ * SAR
+*/
+#define  WAN_TOP_MISC_3_WAN_DEBUG_SEL_SHIFT	12
+#define  WAN_TOP_MISC_3_WAN_DEBUG_SEL_MASK	0x1f000
+
+/* Allows the bypassing of the NTR double sync and pulse generator. */
+#define  WAN_TOP_MISC_3_CFG_NTR_PERIPH_PULSE_BYPASS_MASK	0x800
+
+/* Allows the bypassing of the NTR double sync and pulse generator. */
+#define  WAN_TOP_MISC_3_CFG_NTR_GPIO_PULSE_BYPASS_MASK	0x400
+
+/*
+ * Selects the source of NTR pulse :
+ * 0 - programmable clock; 1 - GPON;2 - GPIO, 3 - NCO.
+*/
+#define  WAN_TOP_MISC_3_CFG_NTR_SRC_SHIFT	8
+#define  WAN_TOP_MISC_3_CFG_NTR_SRC_MASK	0x300
+
+/*
+ * 0:
+ * Output enable for laser is disabled.
+ * 1:
+ * Output enable for laseris enabled.
+*/
+#define  WAN_TOP_MISC_3_LASER_OE_MASK	0x80
+
+/*
+ * Selects between various speed AE modes and GPON mode 0:
+ * 0: 100M AE
+ * 1: 1G AE
+ * 2: 5G AE
+ * 3: 10G AE
+ * 4: GPON
+ * 5-7: Reserved
+*/
+#define  WAN_TOP_MISC_3_WAN_IFSELECT_SHIFT	4
+#define  WAN_TOP_MISC_3_WAN_IFSELECT_MASK	0x70
+
+/*
+ * Bit 0 selects the speed, and bit 1 selects the technology.
+ * 2:
+ * GPON;0,1,3 :
+ * Disable laser.
+*/
+#define  WAN_TOP_MISC_3_LASER_MODE_SHIFT	1
+#define  WAN_TOP_MISC_3_LASER_MODE_MASK	0xe
+
+/*
+ * enable SGMII mode
+ */
+#define  WAN_TOP_MISC_3_SGMII_MODE_MASK		(1 << 17)
+
+/*
+ * 0:
+ * Normal operation.
+ * 1:
+ * Invert laser enable.
+*/
+#define  WAN_TOP_MISC_3_LASER_INVERT_MASK	0x80000000
+
+/* REB going to WAN memories. */
+#define  WAN_TOP_MISC_3_MEM_REB_MASK	0x1
+
+
+/*
+ * Register <WAN_TOP_MISC_4>
+ *
+ * Register used for wan_top configuration.
+ */
+#define WAN_TOP_MISC_4_REG		0xf4
+
+/* Specifies the width of NTR pulse, in unit of 4 ns. */
+#define  WAN_TOP_MISC_4_CFG_NTR_PULSE_WIDTH_SHIFT	16
+#define  WAN_TOP_MISC_4_CFG_NTR_PULSE_WIDTH_MASK	0xffff0000
+
+
+/*
+ * Register <SERDES_PLL_CTL>
+ *
+ * Register used for low configuration of PLL clocks.
+ */
+#define WAN_TOP_SERDES_PLL_CTL_REG	0x38
+
+/*
+ * 0 - select pll1_lcref.
+ * 1 - select pll0_lcref.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL1_LCREF_SEL_MASK	0x400
+
+/*
+ * Enables SERDES to drive the pll1_refout pin.
+ * 0 - output is hiZ.
+ * 1- output is pad_pll1_refclk.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL1_REFOUT_EN_MASK	0x200
+
+/*
+ * Reference select.
+ * 0 - select pad_pll1_refclkp/n.
+ * 1 - selectpll1_lcrefp/n.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL1_REFIN_EN_MASK	0x100
+
+/*
+ * 0 - select pll0_lcref.
+ * 1 - select pll1_lcref.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL0_LCREF_SEL_MASK	0x4
+
+/*
+ * Enables SERDES to drive the pll0_refout pin.
+ * 0 - output is hiZ.
+ * 1- output is pad_pll0_refclk.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL0_REFOUT_EN_MASK	0x2
+
+/*
+ * Reference select.
+ * 0 - select pad_pll0_refclkp/n.
+ * 1 - selectpll0_lcrefp/n.
+*/
+#define  WAN_TOP_SERDES_PLL_CTL_CFG_PLL0_REFIN_EN_MASK	0x1
+
+
+/*
+ * Register <SERDES_TEMP_CTL> - read-only
+ *
+ * Register used for temperature read.
+ */
+#define WAN_TOP_SERDES_TEMP_CTL_REG	0x3c
+
+/*
+ * 10-bit temperature data.
+ * Please refer to TMON documentation for howto convert this value to a
+ * useful number.
+*/
+#define  WAN_TOP_SERDES_TEMP_CTL_WAN_TEMPERATURE_DATA_SHIFT	0
+#define  WAN_TOP_SERDES_TEMP_CTL_WAN_TEMPERATURE_DATA_MASK	0x3ff
+
+
+/*
+ * Register <SERDES_PRAM_CTL>
+ *
+ * Register used for PRAM control.
+ */
+#define WAN_TOP_SERDES_PRAM_CTL_REG	0x40
+
+/*
+ * Perform pRAM operation.
+ * This field is only valid for the B0 orbeyond.
+ * Software sets and hardware clears this bit.
+ * Do not writeto this register if this bit is set.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_GO_MASK	0x80000000
+
+/* Program RAM write strobe. */
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_WE_MASK	0x4000000
+
+/*
+ * Program RAM chip select.
+ * This field is only valid for the A0 versionof the chip.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_CS_MASK	0x2000000
+
+/*
+ * Ability to support parallel bus interface to access program RAM.
+ * 0- not supported.
+ * 1 - supported.
+ * This field is only valid for the A0version of the chip.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_ABILITY_MASK	0x1000000
+
+/*
+ * Deprecated.
+ * Use the data field in SERDES_PRAM_CTL_2/3.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_DATAIN_SHIFT	16
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_DATAIN_MASK	0xff0000
+
+/* Program RAM address. */
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_ADDR_SHIFT	0
+#define  WAN_TOP_SERDES_PRAM_CTL_CFG_PRAM_ADDR_MASK	0xffff
+
+
+/*
+ * Register <SERDES_PRAM_CTL_2>
+ *
+ * Register used for PRAM control.
+ */
+#define WAN_TOP_SERDES_PRAM_CTL_2_REG	0x44
+
+/*
+ * Bits [31:
+ * 0] of the 64-bit pRAM write data interface.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_2_CFG_PRAM_DATAIN_0_SHIFT	0
+#define  WAN_TOP_SERDES_PRAM_CTL_2_CFG_PRAM_DATAIN_0_MASK	0xffffffff
+
+
+/*
+ * Register <SERDES_PRAM_CTL_3>
+ *
+ * Register used for PRAM control.
+ */
+#define WAN_TOP_SERDES_PRAM_CTL_3_REG	0x48
+
+/*
+ * Bits [63:
+ * 32] of the 64-bit pRAM write data interface.
+*/
+#define  WAN_TOP_SERDES_PRAM_CTL_3_CFG_PRAM_DATAIN_1_SHIFT	0
+#define  WAN_TOP_SERDES_PRAM_CTL_3_CFG_PRAM_DATAIN_1_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_PMI_LP_0>
+ *
+ * Register used for low priority configuration.
+ */
+#define WAN_TOP_PMI_LP_0_REG		0x4c
+
+/*
+ * Transaction enable control from master.
+ * This is treated asasynchronous to the PCS.
+ * The bus master should wait forpcs_pmi_lp_ack to be deasserted before
+ * pcs_pmi_lp_en is asserted.
+ * The bus master should then wait for pcs_pmi_lp_ack to be
+ * assertedindicating that the transaction is complete before it
+ * deassertspcs_pmi_lp_en.
+*/
+#define  WAN_TOP_PMI_LP_0_PCS_EN_MASK	0x4
+
+/*
+ * Transaction enable control from master.
+ * This is treated asasynchronous to the rmic.
+ * The bus master should wait for pmi_lp_ackto be deasserted before
+ * pmi_lp_en is asserted.
+ * The bus master shouldthen wait for pmi_lp_ack to be asserted indicating
+ * that thetransaction is complete before it deasserts pmi_lp_en.
+*/
+#define  WAN_TOP_PMI_LP_0_MISC_EN_MASK	0x2
+
+/*
+ * Read/Write control from master.
+ * 1-write, 0-read.
+ * This should beasserted before or with the pmi_lp_en and should be driven
+ * until thenext transaction.
+*/
+#define  WAN_TOP_PMI_LP_0_WRITE_MASK	0x1
+
+
+/*
+ * Register <WAN_TOP_PMI_LP_1>
+ *
+ * Register used for low priority configuration.
+ */
+#define WAN_TOP_PMI_LP_1_REG		0x50
+
+/*
+ * 32-bit address driven by master for read or write transaction.
+ * Thisshould be asserted before or with the pmi_lp_en and should be
+ * drivenuntil the next transaction
+*/
+#define  WAN_TOP_PMI_LP_1_PMI_LP_ADDR_SHIFT	0
+#define  WAN_TOP_PMI_LP_1_PMI_LP_ADDR_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_PMI_LP_2>
+ *
+ * Register used for low priority configuration.
+ */
+#define WAN_TOP_PMI_LP_2_REG		0x54
+
+/*
+ * 16-bit data bus driven by master for write transaction.
+ * This shouldbe driven before or with the pmi_lp_en and should be driven
+ * untilthe next transaction.
+*/
+#define  WAN_TOP_PMI_LP_2_PMI_LP_WRDATA_SHIFT	16
+#define  WAN_TOP_PMI_LP_2_PMI_LP_WRDATA_MASK	0xffff0000
+
+/*
+ * 16-bit mask bus driven by master for write transaction.
+ * 0 means nomask (wrdata bit is written to register), 1 means mask (wrdata
+ * bitis ignored).
+ * This bus has no affect during a read operation.
+ * Thisshould be asserted before or with the pmi_lp_en and should be
+ * drivenuntil the next transaction.
+*/
+#define  WAN_TOP_PMI_LP_2_PMI_LP_MASKDATA_SHIFT	0
+#define  WAN_TOP_PMI_LP_2_PMI_LP_MASKDATA_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_PMI_LP_3> - read-only
+ *
+ * Register used for low priority read back.
+ */
+#define WAN_TOP_PMI_LP_3_REG		0x58
+
+/*
+ * Error response from RMIC slave indicating an address error whichmeans
+ * that either the block address does not exist or that the deviddid not
+ * match the strap value.
+ * The ack signal indicates that thetransaction is complete and the error
+ * signal indicates that therewas an address error with this transaction.
+ * This signal is assertedalong with the ack signal and should be treated
+ * an asynchronoussignal the same way as the ack signal.
+*/
+#define  WAN_TOP_PMI_LP_3_PMI_LP_ERR_MASK	0x20000
+
+/*
+ * Ack response back from the RMIC slave indicating that the write orread
+ * transaction is complete.
+ * This signal is driven in the registersblocks clock domain and should be
+ * treated as an asynchronous inputby the master.
+*/
+#define  WAN_TOP_PMI_LP_3_PMI_LP_ACK_MASK	0x10000
+
+/*
+ * 16-bit data bus driven RMIC slave during a read transaction.
+ * Thisdata is latched in the register clock domain but this data
+ * isguaranteed to be stable by the end of the read transaction so thisdoes
+ * not have to metastabilized.
+*/
+#define  WAN_TOP_PMI_LP_3_PMI_LP_RDDATA_SHIFT	0
+#define  WAN_TOP_PMI_LP_3_PMI_LP_RDDATA_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_PMI_LP_4> - read-only
+ *
+ * Register used for PCS low priority read back.
+ */
+#define WAN_TOP_PMI_LP_4_REG		0x5c
+
+/*
+ * Error response from PCS slave indicating an address error whichmeans
+ * that either the block address does not exist or that the deviddid not
+ * match the strap value.
+ * The ack signal indicates that thetransaction is complete and the error
+ * signal indicates that therewas an address error with this transaction.
+ * This signal is assertedalong with the ack signal and should be treated
+ * an asynchronoussignal the same way as the ack signal.
+*/
+#define  WAN_TOP_PMI_LP_4_PCS_PMI_LP_ERR_MASK	0x20000
+
+/*
+ * Ack response back from the PCS slave indicating that the write orread
+ * transaction is complete.
+ * This signal is driven in the registersblocks clock domain and should be
+ * treated as an asynchronous inputby the master.
+*/
+#define  WAN_TOP_PMI_LP_4_PCS_PMI_LP_ACK_MASK	0x10000
+
+/*
+ * 16-bit data bus driven PCS slave during a read transaction.
+ * Thisdata is latched in the register clock domain but this data
+ * isguaranteed to be stable by the end of the read transaction so thisdoes
+ * not have to metastabilized.
+*/
+#define  WAN_TOP_PMI_LP_4_PCS_PMI_LP_RDDATA_SHIFT	0
+#define  WAN_TOP_PMI_LP_4_PCS_PMI_LP_RDDATA_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_0>
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) configuration.
+ */
+#define WAN_TOP_TOD_CONFIG_0_REG	0x60
+
+/*
+ * Indicates TOD read is in progress.
+ * Deassertive value indicatesvalid values at WAN_TOD_TS48/WAN_TOD_TS64
+ * registers.
+*/
+#define  WAN_TOP_TOD_CONFIG_0_TOD_READ_BUSY_MASK	0x400000
+
+/*
+ * 0:
+ * New mode.
+ * Transfer TS48 using FIFO.
+ * 1:
+ * Legacy mode.
+ * Transferupper TS48 bits between clock domains.
+*/
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_PRE_SYNC_FIFO_DISABLE_MASK	0x200000
+
+/* Number of clock ticks between consecutive writes to the TS48 FIFO. */
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_PRE_SYNC_FIFO_LOAD_RATE_SHIFT	16
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_PRE_SYNC_FIFO_LOAD_RATE_MASK	0x1f0000
+
+/*
+ * Allows 1PPS pulse to load cfg_tod_1pps_ns_offset into nanosecondcounter.
+ * If not set, the 1PPS pulse will have no effect on theTS48.
+*/
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TOD_PPS_CLEAR_MASK	0x8000
+
+/*
+ * Arm the reading of the TS48/TS64 timestamps.
+ * Values are valid atthe deassertion of tod_read_busy
+*/
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TOD_READ_MASK	0x4000
+
+/*
+ * The TS48 offset value.
+ * In legacy, GPON mode (cfg_ts48_pre_sync_fifo_disable = 1), therising
+ * edge ofTS48/TS64's bit[9] loads cfg_ts48_offset[8:
+ * 0] into the lower 9 bitsof the synchronized TS48/TS64.
+ * In the new mode, the timestamp is transfer to the 250 MHz clockdomain
+ * via an asynchronousFIFO.
+ * The offset is added to the output of the FIFO.
+ * Thecfg_ts48_offset[8] is the signbit, allowing +/- adjustment to the
+ * timestamp value.
+ * It is signextended to make theoffset 48-bits.
+ * In AE mode, the offset is added to the current TS48 value andloading it
+ * back into AE TS48.
+ * Loading is accomplished by setting the cfg_tod_load_ts48_offset bit.
+ * The sign extension ofcfg_ts48_offset[8] also applies.
+*/
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_OFFSET_SHIFT	4
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_OFFSET_MASK	0x3ff0
+
+/*
+ * This field selects the MAC that the timestamp comes from.
+ * 2:
+ * GPON4:
+ * Active Ethernet0,1,3,5,6,7:
+ * Reserved
+*/
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_MAC_SELECT_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_0_CFG_TS48_MAC_SELECT_MASK	0x7
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_1>
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) configuration.
+ */
+#define WAN_TOP_TOD_CONFIG_1_REG	0x64
+
+/*
+ * The rising edge will load the cfg_ts48_offset into AE TS48, subjectto a
+ * lockout window of 1us before and after rollover.
+*/
+#define  WAN_TOP_TOD_CONFIG_1_CFG_TOD_LOAD_TS48_OFFSET_MASK	0x100000
+
+/*
+ * The rising edge will be latched, and cfg_tod_seconds will be loadedinto
+ * AE TS48 on the next 1PPS pulse or when the next second rollsover.
+*/
+#define  WAN_TOP_TOD_CONFIG_1_CFG_TOD_LOAD_MASK	0x80000
+
+/* Number of seconds to be loaded into AE TS48. */
+#define  WAN_TOP_TOD_CONFIG_1_CFG_TOD_SECONDS_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_1_CFG_TOD_SECONDS_MASK	0x7ffff
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_2>
+ *
+ * Register used for 16-bit timestamp configuration.
+ */
+#define WAN_TOP_TOD_CONFIG_2_REG	0x68
+
+/*
+ * The TS48 offset value for TX clock timestamp.
+ * In legacy mode (cfg_ts48_pre_sync_fifo_disable = 1), the rising edgeof
+ * cfg_ts48_offset[9]loads the lower 9 bits into the 16-bits timestamp.
+ * In the new mode,the timestamp istransfer to the 250 MHz clock domain via
+ * an asynchronous FIFO.
+ * Theoffset is added to theoutput of the FIFO.
+ * The cfg_ts48_offset[8] is the sign bit,allowing +/- adjustment tothe
+ * timestamp value.
+ * It is sign extended to make the offset 48-bits.
+*/
+#define  WAN_TOP_TOD_CONFIG_2_CFG_TX_OFFSET_SHIFT	16
+#define  WAN_TOP_TOD_CONFIG_2_CFG_TX_OFFSET_MASK	0x3ff0000
+
+/*
+ * The TS48 offset value for RX clock timestamp.
+ * In legacy mode (cfg_ts48_pre_sync_fifo_disable = 1), the rising edgeof
+ * cfg_ts48_offset[9]loads the lower 9 bits into the 16-bits timestamp.
+ * In the new mode,the timestamp istransfer to the 250 MHz clock domain via
+ * an asynchronous FIFO.
+ * Theoffset is added to theoutput of the FIFO.
+ * The cfg_ts48_offset[8] is the sign bit,allowing +/- adjustment tothe
+ * timestamp value.
+ * It is sign extended to make the offset 48-bits.
+*/
+#define  WAN_TOP_TOD_CONFIG_2_CFG_RX_OFFSET_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_2_CFG_RX_OFFSET_MASK	0x3ff
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_3>
+ *
+ * Register used for 16-bit timestamp configuration.
+ */
+#define WAN_TOP_TOD_CONFIG_3_REG	0x6c
+
+/*
+ * The TS48 offset value for RX clock timestamp.
+ * In legacy mode (cfg_ts48_pre_sync_fifo_disable = 1), the rising edgeof
+ * cfg_ts48_offset[9]loads the lower 9 bits into the 16-bits timestamp.
+ * In the new mode,the timestamp istransfer to the 250 MHz clock domain via
+ * an asynchronous FIFO.
+ * Theoffset is added to theoutput of the FIFO.
+ * The cfg_ts48_offset[8] is the sign bit,allowing +/- adjustment tothe
+ * timestamp value.
+ * It is sign extended to make the offset 48-bits.
+*/
+#define  WAN_TOP_TOD_CONFIG_3_CFG_REF_OFFSET_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_3_CFG_REF_OFFSET_MASK	0x3ff
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_4>
+ *
+ * Offset for 1pps loading.
+ */
+#define WAN_TOP_TOD_CONFIG_4_REG	0x70
+
+/*
+ * Value to be loaded into nanosecond counter by 1PPS pulse,
+ * providedcfg_tod_pps_clear is set.
+*/
+#define  WAN_TOP_TOD_CONFIG_4_CFG_TOD_1PPS_NS_OFFSET_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_4_CFG_TOD_1PPS_NS_OFFSET_MASK	0x3fffffff
+
+
+/*
+ * Register <WAN_TOP_TOD_CONFIG_5>
+ *
+ * Debug register, used for loading TOD nanosecond counte for
+ * rollovertesting.
+ */
+#define WAN_TOP_TOD_CONFIG_5_REG	0x74
+
+/*
+ * Rising edge immediately load cfg_tod_ns_offset into nanosecondcounter.
+ * This is mainly utilized for debugging.
+*/
+#define  WAN_TOP_TOD_CONFIG_5_CFG_TOD_LOAD_NS_OFFSET_MASK	0x40000000
+
+/*
+ * Value to be loaded into nanosecond counter.
+ * The rollover value is at0x3B9ACA00.
+*/
+#define  WAN_TOP_TOD_CONFIG_5_CFG_TOD_NS_OFFSET_SHIFT	0
+#define  WAN_TOP_TOD_CONFIG_5_CFG_TOD_NS_OFFSET_MASK	0x3fffffff
+
+
+/*
+ * Register <WAN_TOD_TS48_MSB> - read-only
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) read back.
+ */
+#define WAN_TOP_TOD_TS48_MSB_REG	0x78
+
+/* Upper 16-bits of TS48. */
+#define  WAN_TOP_TOD_TS48_MSB_TS48_WAN_READ_MSB_SHIFT	0
+#define  WAN_TOP_TOD_TS48_MSB_TS48_WAN_READ_MSB_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOD_TS48_LSB> - read-only
+ *
+ * Register used for 48-bit timestamp Time Of Day (TOD) read back.
+ */
+#define WAN_TOP_TOD_TS48_LSB_REG	0x7c
+
+/* Lower 32-bits of TS48. */
+#define  WAN_TOP_TOD_TS48_LSB_TS48_WAN_READ_LSB_SHIFT	0
+#define  WAN_TOP_TOD_TS48_LSB_TS48_WAN_READ_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOD_TS64_MSB> - read-only
+ *
+ * Register used for 64-bit timestamp Time Of Day (TOD) read back.
+ */
+#define WAN_TOP_TOD_TS64_MSB_REG	0x80
+
+/*
+ * Upper value of TS64 :
+ * AE - second = ts64_wan_read_msb[18:
+ * 0]GPON - second[33:
+ * 2] = ts64_wan_read_msb[31:
+ * 0]
+*/
+#define  WAN_TOP_TOD_TS64_MSB_TS64_WAN_READ_MSB_SHIFT	0
+#define  WAN_TOP_TOD_TS64_MSB_TS64_WAN_READ_MSB_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOD_TS64_LSB> - read-only
+ *
+ * Register used for 64-bit timestamp Time Of Day (TOD) read back.
+ */
+#define WAN_TOP_TOD_TS64_LSB_REG	0x84
+
+/*
+ * Lower value of TS64 :
+ * AE - nanosecond = ts64_wan_read_lsb[31:
+ * 0]GPON - second[1:
+ * 0] = ts64_wan_read_lsb[31:
+ * 30]; nanosecond =ts64_wan_read_lsb[29:
+ * 0];
+*/
+#define  WAN_TOP_TOD_TS64_LSB_TS64_WAN_READ_LSB_SHIFT	0
+#define  WAN_TOP_TOD_TS64_LSB_TS64_WAN_READ_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_TOD_STATUS_0> - read-only
+ *
+ * Register used for 16-bit timestamp read back.
+ */
+#define WAN_TOP_TOD_STATUS_0_REG	0x88
+
+/* REF clock timestamp. */
+#define  WAN_TOP_TOD_STATUS_0_TS16_REF_SYNCE_READ_SHIFT	0
+#define  WAN_TOP_TOD_STATUS_0_TS16_REF_SYNCE_READ_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_TOD_STATUS_1> - read-only
+ *
+ * Register used for 16-bit timestamp read back.
+ */
+#define WAN_TOP_TOD_STATUS_1_REG	0x8c
+
+/* TX MAC clock timestamp. */
+#define  WAN_TOP_TOD_STATUS_1_TS16_MAC_TX_READ_SHIFT	16
+#define  WAN_TOP_TOD_STATUS_1_TS16_MAC_TX_READ_MASK	0xffff0000
+
+/* RX MAC clock timestamp. */
+#define  WAN_TOP_TOD_STATUS_1_TS16_MAC_RX_READ_SHIFT	0
+#define  WAN_TOP_TOD_STATUS_1_TS16_MAC_RX_READ_MASK	0xffff
+
+
+/*
+ * Register <WAN_TOP_SERDES_STATUS> - read-only
+ *
+ * Register used for various WAN status bits.
+ */
+#define WAN_TOP_SERDES_STATUS_REG	0x90
+
+/* Assertion of this signal indicates that the pll has achieved lock. */
+#define  WAN_TOP_SERDES_STATUS_PMD_PLL1_LOCK_MASK	0x400
+
+/*
+ * If set, the SERDES is attempting to enable the laser.
+ * The actualstate of the laser also depends on the laser output enable.
+*/
+#define  WAN_TOP_SERDES_STATUS_O_LASER_BURST_EN_MASK	0x200
+
+/*
+ * Error response from RMIC slave indicating an address error whichmeans
+ * that either the block address does not exist or that the deviddid not
+ * match the strap value.
+ * The ack signal indicates that thetransaction is complete and the error
+ * signal indicates that therewas an address error with this transaction.
+ * This signal is assertedalong with the ack signal and should be treated
+ * an asynchronoussignal the same way as the ack signal.
+*/
+#define  WAN_TOP_SERDES_STATUS_PMI_LP_ERROR_MASK	0x100
+
+/*
+ * Ack response back from the RMIC slave indicating that the write orread
+ * transaction is complete.
+ * This signal is driven in the registersblocks clock domain and should be
+ * treated as an asynchronous inputby the master.
+*/
+#define  WAN_TOP_SERDES_STATUS_PMI_LP_ACKNOWLEDGE_MASK	0x80
+
+/*
+ * Signal detect status from the analog.
+ * This signal is not related toany interface clock or data validity.
+*/
+#define  WAN_TOP_SERDES_STATUS_PMD_SIGNAL_DETECT_0_MASK	0x40
+
+/* EEE energy detect. */
+#define  WAN_TOP_SERDES_STATUS_PMD_ENERGY_DETECT_0_MASK	0x20
+
+/*
+ * Receive PMD lock.
+ * WHen this signal is low, the receiver isacquiring lock.
+ * During this period, the phase of the receive clockand alignment of data
+ * are not reliable.
+*/
+#define  WAN_TOP_SERDES_STATUS_PMD_RX_LOCK_0_MASK	0x10
+
+/* Transmit clock valid. */
+#define  WAN_TOP_SERDES_STATUS_PMD_TX_CLK_VLD_MASK	0x8
+
+/* Receive clock valid. */
+#define  WAN_TOP_SERDES_STATUS_PMD_RX_CLK_VLD_0_MASK	0x4
+
+/* Assertion of this signal indicates that the pll has not achievedlock. */
+#define  WAN_TOP_SERDES_STATUS_PMD_RX_LOCK_0_INVERT_MASK	0x2
+
+/* Assertion of this signal indicates that the pll has achieved lock. */
+#define  WAN_TOP_SERDES_STATUS_PMD_PLL0_LOCK_MASK	0x1
+
+
+/*
+ * Register <WAN_INT_STATUS>
+ *
+ * Interrupts.
+ */
+#define WAN_TOP_INT_STATUS_REG		0x98
+
+/*
+ * Indicates the sampling of clock counter, as specified bycfg_pll_smpl_prd
+ * sampling period.
+*/
+#define  WAN_TOP_INT_STATUS_CLK_SAMPLE_INT_MASK	0x1
+
+
+/*
+ * Register <WAN_INT_MASK>
+ *
+ * Interrupt masks.
+ */
+#define WAN_TOP_INT_MASK_REG		0x9c
+
+/* Interrupt mask, active low. */
+#define  WAN_TOP_INT_MASK_MSK_CLK_SAMPLE_INT_MASK	0x1
+
+
+/*
+ * Register <WAN_CLK_DEJITTER_SAMPLING_CTL_0>
+ *
+ * Clock dejittering control register.
+ */
+#define WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_0_REG	0xa0
+
+/*
+ * Specifies the sampling period of the sampling counters, running inthe
+ * following domains :
+ * PON_SERDES :
+ * 10G - 515.
+ * 625 MHz; 2.
+ * 5G - 312.
+ * 5 MHz; 1G - 125 MHz;100FX - 25 MHz; GPON - 155.
+ * 5 MHz.
+ * LAN_SERDES :
+ * 156.
+ * 25 MHzSGPHY :
+ * 25 MHzDSL :
+ * 35.
+ * 328 MHzThe sampling counter should be set around 100 ms.
+ * The unit is ineach clock domain's period.
+ * Hence, sampling period = X value/frequency.
+*/
+#define  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_0_CFG_PLL_SMPL_PRD_SHIFT	0
+#define  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_0_CFG_PLL_SMPL_PRD_MASK	0x3ffffff
+
+
+/*
+ * Register <WAN_CLK_DEJITTER_SAMPLING_CTL_1>
+ *
+ * Clock dejittering control register.
+ */
+#define WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_1_REG	0xa4
+
+/* Enable PBI write to SyncE_PLL integer/fractional dividers. */
+#define  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_1_CFG_EN_PBI_WR_2_SYNCE_PLL_MASK	0x10
+
+/*
+ * Specifies the source of the sample pulse generator :
+ * 0 - PON_SERDES;1 - LAN_SERDES; 2 - SGPHY; 3 - DSL; 4 - NTR.
+ * This samples thecounter running in SyncE_PLL's 250 MHz clock domain.
+*/
+#define  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_1_CFG_CLK_SMPL_SRC_SHIFT	0
+#define  WAN_TOP_CLK_DEJITTER_SAMPLING_CTL_1_CFG_CLK_SMPL_SRC_MASK	0x7
+
+
+/*
+ * Register <WAN_CLK_SAMPLE_COUNTER> - read-only
+ *
+ * Clock counter sample register.
+ */
+#define WAN_TOP_CLK_SAMPLE_COUNTER_REG	0xa8
+
+/*
+ * Sample clock counter value of SyncE_PLL's dejittering counter.
+ * Value should be read uponreceiving interrupt clk_sample_int.
+ * Difference in time is obtainedby subtracting currentvalue from previous.
+ * The value will be different for the differentsampling sources.
+*/
+#define  WAN_TOP_CLK_SAMPLE_COUNTER_PBI_CLK_CNT_SMPL_SHIFT	0
+#define  WAN_TOP_CLK_SAMPLE_COUNTER_PBI_CLK_CNT_SMPL_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_SYNCE_PLL_CONFIG>
+ *
+ * Specifies the SyncE_PLL integer/fractional dividers, applicable
+ * whencfg_en_pbi_wr_2_syncE_pll is set.
+ */
+#define WAN_TOP_SYNCE_PLL_CONFIG_REG	0xac
+
+/* Integer divider. */
+#define  WAN_TOP_SYNCE_PLL_CONFIG_CFG_SYNCE_PLL_NDIV_INT_SHIFT	24
+#define  WAN_TOP_SYNCE_PLL_CONFIG_CFG_SYNCE_PLL_NDIV_INT_MASK	0xff000000
+
+/* Fractional divider. */
+#define  WAN_TOP_SYNCE_PLL_CONFIG_CFG_SYNCE_PLL_NDIV_FRAC_SHIFT	0
+#define  WAN_TOP_SYNCE_PLL_CONFIG_CFG_SYNCE_PLL_NDIV_FRAC_MASK	0xffffff
+
+
+/*
+ * Register <WAN_TOP_OSR_CONTROL>
+ *
+ * Register used to control the oversample mode of the SERDES gearboxes.
+ */
+#define WAN_TOP_OSR_CONTROL_REG	0xb4
+
+/* TBD */
+#define  WAN_TOP_OSR_TXLBE_SER_ORDER_MASK	0x80
+
+/* TBD */
+#define  WAN_TOP_OSR_TXLBE_SER_INIT_VAL_SHIFT	4
+#define  WAN_TOP_OSR_TXLBE_SER_INIT_VAL_MASK	0x70
+
+/* TBD */
+#define  WAN_TOP_OSR_TXLBE_SER_EN_MASK	0x8
+
+/*
+ * 0:
+ * New oversample mode.
+ * 1:
+ * Legacy mode.
+*/
+#define  WAN_TOP_OSR_TXFIFO_RD_LEGACY_MODE_MASK	0x4
+
+/*
+ * 0:
+ * Select div2 clock.
+ * 1:
+ * Select div4 clock.
+ * 2:
+ * Select legacy modeclocking.
+ * 3:
+ * Reserved.
+*/
+#define  WAN_TOP_OSR_CFG_GPON_RX_CLK_SHIFT	0
+#define  WAN_TOP_OSR_CFG_GPON_RX_CLK_MASK	0x3
+
+
+/*
+ * Register <WAN_TOP_GPON_GEARBOX_STATUS> - read-only
+ *
+ * Register used for various WAN status bits.
+ */
+#define WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_STATUS_REG	0x94
+
+/*
+ * Status indication based on status_sel signals.
+ * Ifgpon_gearbox_fifo_status_sel is high, this status will
+ * begpon_gearbox_fifo_status.
+ * If gpon_gearbox_ptg_status1_sel is high,this status will be
+ * gpon_gearbox_ptg_status1.
+ * Ifgpon_gearbox_ptg_status2_sel is high, this status will
+ * begpon_gearbox_ptg_status2.
+*/
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_STATUS_CR_RD_DATA_CLX_SHIFT	0
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_STATUS_CR_RD_DATA_CLX_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_GPON_GEARBOX_PRBS_CONTROL_0>
+ *
+ * Register used to control the GPON gearbox PRBS checker.
+ */
+#define WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_REG	0xb0
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_SIG_PRBS_STATUS_CLR_MASK	0x2000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_INV_MASK	0x1000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_OOL_CNT_SHIFT	19
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_OOL_CNT_MASK	0xf80000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_LOCK_CNT_SHIFT	14
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_LOCK_CNT_MASK	0x7c000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_ERR_CNT_BURST_MODE_MASK	0x2000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_MODE_SEL_SHIFT	10
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_MODE_SEL_MASK	0x1c00
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_EN_TIMEOUT_SHIFT	2
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_EN_TIMEOUT_MASK	0x7c
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_EN_TIMER_MODE_SHIFT	0
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_0_GPON_GEARBOX_RG_PRBS_CHK_CTRL_0_EN_TIMER_MODE_MASK	0x3
+
+
+/*
+ * Register <WAN_TOP_GPON_GEARBOX_PRBS_CONTROL_1>
+ *
+ * Register used to control the GPON gearbox PRBS checker.
+ */
+#define WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_REG	0xb8
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_GPON_GEARBOX_RG_PRBS_CHK_CTRL_1_EN_MASK	0x80000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_GPON_GEARBOX_RG_PRBS_CHK_CTRL_1_MODE_SHIFT	29
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_GPON_GEARBOX_RG_PRBS_CHK_CTRL_1_MODE_MASK	0x60000000
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_GPON_GEARBOX_RG_PRBS_CHK_CTRL_1_TIMER_VAL_SHIFT	0
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_CONTROL_1_GPON_GEARBOX_RG_PRBS_CHK_CTRL_1_TIMER_VAL_MASK	0xfffff
+
+
+/*
+ * Register <WAN_TOP_GPON_GEARBOX_PRBS_STATUS_0> - read-only
+ *
+ * Register used to monitor the GPON gearbox PRBS checker.
+ */
+#define WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_0_REG	0xbc
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_0_GPON_GEARBOX_PRBS_STAT_0_VECTOR_SHIFT	0
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_0_GPON_GEARBOX_PRBS_STAT_0_VECTOR_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_GPON_GEARBOX_PRBS_STATUS_1> - read-only
+ *
+ * Register used to monitor the GPON gearbox PRBS checker.
+ */
+#define WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_1_REG	0xc0
+
+/* TBD */
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_1_GPON_GEARBOX_PRBS_STAT_1_VECTOR_SHIFT	0
+#define  WAN_TOP_GPON_GEARBOX_STATUS_GEARBOX_PRBS_STATUS_1_GPON_GEARBOX_PRBS_STAT_1_VECTOR_MASK	0x3
+
+
+/*
+ * Register <WAN_TOP_AE_GEARBOX_CONTROL_0>
+ *
+ * Register used to control AE Gearbox
+ */
+#define WAN_TOP_AE_GEARBOX_CONTROL_0_REG	0xc4
+
+/*
+ * Set to 1 then 0 to load initial offset value determined
+ * bycr_wan_top_ae_gearbox_tx_fifo_offset reg
+*/
+#define  WAN_TOP_AE_GEARBOX_CONTROL_0_CR_TX_FIFO_OFFSET_SHIFT	4
+#define  WAN_TOP_AE_GEARBOX_CONTROL_0_CR_TX_FIFO_OFFSET_MASK	0x3f0
+
+/*
+ * Set to 1 then 0 to load initial offset value determined
+ * bycr_wan_top_ae_gearbox_tx_fifo_offset reg
+*/
+#define  WAN_TOP_AE_GEARBOX_CONTROL_0_CR_TX_FIFO_OFFSET_LD_MASK	0x4
+
+/*
+ * 0 = compatible with sub rate serdes mode1 = compatible with full rate
+ * serdes mode.
+ * 100FX - sub rate only.
+ * 1G - sub/full rate2.
+ * 5G - sub/full rate10G - full rate only.
+*/
+#define  WAN_TOP_AE_GEARBOX_CONTROL_0_CR_FULL_RATE_MODE_MASK	0x2
+
+/*
+ * 0 = 10b mode1 = 20b mode100FX - 10b mode1G - 10b mode2.
+ * 5G - 10b mode10G - 20b mode
+*/
+#define  WAN_TOP_AE_GEARBOX_CONTROL_0_CR_WIDTH_MODE_MASK	0x1
+
+
+/*
+ * Register <WAN_VOLTAGE_REGULATOR_DIVIDER>
+ *
+ * Provides the divider for the voltage regulator sync output.
+ */
+#define WAN_TOP_VOLTAGE_REGULATOR_DIVIDER_IDER_REG	0xc8
+
+/* Allows the bypassing of the N divider. */
+#define  WAN_TOP_VOLTAGEULATOR_DIVIDER_IDER_REG_CFG_VREG_CLK_BYPASS_MASK	0x200
+
+/*
+ * Specifies the clock source of the voltage regulator sync output :
+ * 1- VDSL PHY; 0 - 50 MHz XTAL clock.
+*/
+#define  WAN_TOP_VOLTAGEULATOR_DIVIDER_IDER_REG_CFG_VREG_CLK_SRC_MASK	0x100
+
+/*
+ * N divider value of voltage regulator sync output.
+ * Assertive 1 valuewill be INT((N/2); and assertive 0, N - INT(N/2).
+*/
+#define  WAN_TOP_VOLTAGEULATOR_DIVIDER_IDER_REG_CFG_VREG_DIV_SHIFT	0
+#define  WAN_TOP_VOLTAGEULATOR_DIVIDER_IDER_REG_CFG_VREG_DIV_MASK	0xff
+
+
+/*
+ * Register <WAN_CLOCK_SYNC_CONFIG>
+ *
+ * Provides the configuration for clock syncing.
+ */
+#define WAN_TOP_CLOCK_SYNC_CONFIG_REG	0xcc
+
+/*
+ * Output enable for 1PPS output to GPIO, applicable only
+ * ifcfg_gpio_1pps_src is cleared.
+*/
+#define  WAN_TOP_CLOCK_SYNC_CONFIG_CFG_GPIO_1PPS_OEB_MASK	0x4
+
+/*
+ * Selects the source of 1PPS output to GPIO :
+ * 0 - NCO; 1 - Switch.
+*/
+#define  WAN_TOP_CLOCK_SYNC_CONFIG_CFG_GPIO_1PPS_SRC_MASK	0x2
+
+/*
+ * Selects the source of sync pulse output to switch :
+ * 0 - 1PPS; 1 -recovered PHY/SerDes clock.
+*/
+#define  WAN_TOP_CLOCK_SYNC_CONFIG_CFG_SWITCH_SYNCIN_SRC_MASK	0x1
+
+
+/*
+ * Register <WAN_AEPCS_IEEE_REGID>
+ *
+ * Provides the configuration for AE PCS IEEE device ID register.
+ */
+#define WAN_TOP_AEPCS_IEEE_REGID_REG	0xd0
+
+/* The AE PCS IEEE device ID. */
+#define  WAN_TOP_AEPCS_IEEEID_REG_CFG_AEPCS_IEEE_REGID_SHIFT	0
+#define  WAN_TOP_AEPCS_IEEEID_REG_CFG_AEPCS_IEEE_REGID_MASK	0xffffffff
+
+
+/*
+ * Register <WAN_TOP_FORCE_LBE_CONTROL>
+ *
+ * Register used to force the laser burst enable (LBE) and LBE outputenable
+ * signals.
+ */
+#define WAN_TOP_FORCE_LBE_CONTROL_REG	0xd4
+
+/*
+ * This field is only used when cfg_force_lbe_oe is set.
+ * This signalis then inverted prior to connecting to the OEB pin.
+ * 0:
+ * LBE outputenable is set to 0.
+ * 1:
+ * LBE output enable is set to 1.
+*/
+#define  WAN_TOP_FORCE_LBE_CONTROL_OE_VALUE_MASK	0x8
+
+/*
+ * 0:
+ * The MAC and cr_xgwan_top_wan_misc_wan_cfg_laser_oe control theLBE output
+ * enable signal.
+ * 1:
+ * The LBE output enable signal is forcedto cfg_force_lbe_oe_value.
+*/
+#define  WAN_TOP_FORCE_LBE_CONTROL_OE_MASK	0x4
+
+/*
+ * This field is only used when cfg_force_lbe is set.
+ * 0:
+ * LBE is set to0.
+ * 1:
+ * LBE is set to 1.
+*/
+#define  WAN_TOP_FORCE_LBE_CONTROL_VALUE_MASK	0x2
+
+/*
+ * 0:
+ * The MAC controls the LBE signal.
+ * 1:
+ * The LBE signal is forced tocfg_force_lbe_value.
+*/
+#define  WAN_TOP_FORCE_LBE_CONTROL_MASK	0x1
+
+
+/*
+ * Register <NGPON_GEARBOX_RX_CTL_0>
+ *
+ * Configuration for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_RX_CTL_0_REG	0xd8
+
+/* Value for RX output RIFO read pointer. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFIFORDPTR_SHIFT	24
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFIFORDPTR_MASK	0xf000000
+
+/* Disable pointer auto-load going into lock for output FIFO. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXPTRAUTOLDDIS_MASK	0x800000
+
+/* Number of bad KChar to go out of lock. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXMAXBADK_SHIFT	20
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXMAXBADK_MASK	0x700000
+
+/*
+ * Use only K28.
+ * 5 for framing.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFRMK28ONLY_MASK	0x80000
+
+/* Number of good KChar in a row for lock. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXMAXGOODK_SHIFT	16
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXMAXGOODK_MASK	0x70000
+
+/* Force 10b framer to go to HUNT state on rising edge. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFRCHUNT_MASK	0x8000
+
+/* Bitwise flip 32b output data. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXOUTDATAFLIP_MASK	0x4000
+
+/* Force mux select to value in cfNGponGboxRxFrcMuxVal. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFRCMUXSEL_MASK	0x2000
+
+/*
+ * Value that will be forced to mux select when cfNGponGboxRxFrcMuxSelis
+ * asserted.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFRCMUXVAL_SHIFT	8
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFRCMUXVAL_MASK	0x1f00
+
+/* Bitwise flip RX 20b gearbox data. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRX20BDATAFLIP_MASK	0x80
+
+/* Bitwise flip RX 16b data from SERDES. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXSERDATAFLIP_MASK	0x40
+
+/* Bitwise invert RX 16b data from SERDES. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXSERDATAINV_MASK	0x20
+
+/*
+ * Load value for FIFO read pointer.
+ * Write pointer will be loaded to0.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXFIFOPTRLD_MASK	0x10
+
+/* When set, synchronization will be held indefinitely. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXSWSYNCHOLD_MASK	0x8
+
+/*
+ * 0.
+ * 8B/10B decoder mode operating at 777 MHz.
+ * 1.
+ * Pass through modeoperating at 622 MHz.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXMODE_MASK	0x4
+
+/* Synchronous enable for RX gearbox. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRXEN_MASK	0x2
+
+/* Asynchronous, active-low, software reset for gearbox. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_0_CFNGPONGBOXRSTN_MASK	0x1
+
+
+/*
+ * Register <NGPON_GEARBOX_RX_CTL_1>
+ *
+ * Configuration for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_RX_CTL_1_REG	0xdc
+
+/* Max counter for 125 us timer. */
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_1_CFNGPONGBOXRXMAXTIMERCNT_SHIFT	0
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_1_CFNGPONGBOXRXMAXTIMERCNT_MASK	0x1ffff
+
+
+/*
+ * Register <NGPON_GEARBOX_RX_CTL_2>
+ *
+ * Configuration for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_RX_CTL_2_REG	0xe0
+
+/*
+ * RD+ K28.
+ * 5 pattern.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_2_CFNGPONGBOXRXK28D5RDP_SHIFT	16
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_2_CFNGPONGBOXRXK28D5RDP_MASK	0x3ff0000
+
+/*
+ * RD- K28.
+ * 5 pattern.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_2_CFNGPONGBOXRXK28D5RDN_SHIFT	0
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_2_CFNGPONGBOXRXK28D5RDN_MASK	0x3ff
+
+
+/*
+ * Register <NGPON_GEARBOX_RX_CTL_3>
+ *
+ * Configuration for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_RX_CTL_3_REG	0xe4
+
+/*
+ * RD+ D5.
+ * 7 pattern.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_3_CFNGPONGBOXRXD5D7RDP_SHIFT	16
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_3_CFNGPONGBOXRXD5D7RDP_MASK	0x3ff0000
+
+/*
+ * RD- D5.
+ * 7 pattern.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_3_CFNGPONGBOXRXD5D7RDN_SHIFT	0
+#define  WAN_TOP_NGPON_GEARBOX_RX_CTL_3_CFNGPONGBOXRXD5D7RDN_MASK	0x3ff
+
+
+/*
+ * Register <NGPON_GEARBOX_TX_CTL>
+ *
+ * Configuration for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_TX_CTL_REG	0xe8
+
+/*
+ * Value for TX data FIFO read pointer.
+ * Steps of 2 x txClk (622 MHz),jumps of 32 bits.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFODATARDPTR_SHIFT	16
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFODATARDPTR_MASK	0xf0000
+
+/*
+ * Value for TX valid FIFO offset.
+ * Steps of txClk (622 MHz), jumps of16 bits.
+ * 1 to 15 are advances valid vs data, valid comes out ahead.
+ * 1=1 clock, 2=2 clocks, 3=3 clocks.
+ * ..
+ * 31 to 16 are advances validvs data, valid comes out behind.
+ * 31=1 clock, 30=2 clocks, 29=3clocks.
+ * ..
+*/
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFOVLDOFF_SHIFT	8
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFOVLDOFF_MASK	0x1f00
+
+/* Flip TX data valid endian on 32b input. */
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXSERVLDFLIP_MASK	0x40
+
+/* Flip TX data endian on 32b input. */
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXSERDATAFLIP_MASK	0x20
+
+/* Bitwise invert TX 4b valid to SERDES. */
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXSERVLDINV_MASK	0x10
+
+/* Bitwise invert TX 16b data to SERDES. */
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXSERDATAINV_MASK	0x8
+
+/*
+ * Load only the offset for TX valid FIFO pointer.
+ * This is an offsetfrom the data read pointer.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFOVLDPTRLD_MASK	0x4
+
+/*
+ * Load value for TX data FIFO read pointer and valid read pointeroffset.
+ * Data/valid write will be loaded to 0.
+*/
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXFIFOPTRLD_MASK	0x2
+
+/* Synchronous enable for TX gearbox. */
+#define  WAN_TOP_NGPON_GEARBOX_TX_CTL_CFNGPONGBOXTXEN_MASK	0x1
+
+
+/*
+ * Register <NGPON_GEARBOX_STATUS>
+ *
+ * Status for the NGPON gearbox.
+ */
+#define WAN_TOP_NGPON_GEARBOX_STATUS_REG	0xec
+
+/* Pointer collision. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONTXGBOXFIFOVLDPTRCOL_MASK	0x4000000
+
+/* Framer state. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXSTATE_SHIFT	24
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXSTATE_MASK	0x3000000
+
+/* Pointer collision. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONTXGBOXFIFODATAPTRCOL_MASK	0x800000
+
+/* Number of KChar. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXKCNT_SHIFT	20
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXKCNT_MASK	0x700000
+
+/* Pointer delta. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXFIFOPTRDELTA_SHIFT	16
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXFIFOPTRDELTA_MASK	0xf0000
+
+/* 10b sync acquired. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXSYNCACQ_MASK	0x8000
+
+/* FIFO pointer collision. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXFIFOPTRCOL_MASK	0x4000
+
+/* Line errors. */
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXCODEERRCNTSTAT_SHIFT	0
+#define  WAN_TOP_NGPON_GEARBOX_STATUS_NGPONRXGBOXCODEERRCNTSTAT_MASK	0x3fff
+
+
+/*
+ * Register <EPON_10G_GEARBOX>
+ *
+ * Configuration for the 10G EPON gearbox.
+ */
+#define WAN_TOP_EPON_10G_GEARBOX_REG	0xf0
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_RX_CGEN_RSTN_MASK	0x1
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_TX_CGEN_RSTN_MASK	0x2
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_RX_GBOX_RSTN_MASK	0x4
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_TX_GBOX_RSTN_MASK	0x8
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_CLK_EN_MASK	0x10
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_RX_DATA_END_MASK	0x20
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_TX2RX_LOOP_EN_MASK	0x40
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_TX_FIFO_OFF_LD_MASK	0x80
+
+/* TBD */
+#define  WAN_TOP_EPON_10G_GEARBOX_TX_FIFO_OFF_SHIFT	8
+#define  WAN_TOP_EPON_10G_GEARBOX_TX_FIFO_OFF_MASK	0x700
+
+/* Reserved */
+#define  WAN_TOP_EPON_10G_GEARBOX_R0_SHIFT	11
+#define  WAN_TOP_EPON_10G_GEARBOX_R0_MASK	0xfffff800
+
+
+#endif /* ! WAN_TOP_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xlif.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xlif.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xlif.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xlif.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,209 @@
+#ifndef XLIF_H_
+#define XLIF_H_
+
+/*
+ * Register <INTERFACE_DISABLE>
+ *
+ * Interface_Disable
+ */
+#define XLIF_RX_IF_IF_DIS_REG(x)	(0x0 + (x) * 0x200)
+
+/* Disable */
+#define  XLIF_RX_IF_IF_DIS_DISABLE_MASK	0x1
+
+
+/*
+ * Register <OVRFLOW_FLAG> - read-only
+ *
+ * Indicate an overflow event (data valid while FIFO is full).
+ * read clear
+ */
+#define XLIF_RX_IF_OFLW_FLAG_REG	0x4
+
+/* Overflow */
+#define  XLIF_RX_IF_OFLW_FLAG_OFLW_MASK	0x1
+
+
+/*
+ * Register <PROTOCOL_ERR> - read-only
+ *
+ * Indicate RX protocol Error.
+ * read clear
+ */
+#define XLIF_RX_IF_ERR_FLAG_REG		0x8
+
+/* Error */
+#define  XLIF_RX_IF_ERR_FLAG_ERR_MASK	0x1
+
+
+/*
+ * Register <INDICATIONS> - read-only
+ *
+ * eee indications from the XLMAC interface
+ */
+#define XLIF_EEE_IND_REG(r)		(0x78 + (r) * 0x200)
+
+/* lpi_rx_detect */
+#define  XLIF_EEE_IND_LPI_RX_DETECT_MASK	0x1
+
+/* lpi_tx_detect */
+#define  XLIF_EEE_IND_LPI_TX_DETECT_MASK	0x10
+
+
+/*
+ * Register <COSMAP_EN> - read-only
+ *
+ * .
+ */
+#define XLIF_RX_FLOW_CONTROL_COSMAP_EN_REG(r)	(0x20 + (r) * 0x200)
+
+/* PFC_EN */
+#define  XLIF_RX_FLOW_CONTROL_COSMAP_EN_PFC_EN_MASK	0x1
+
+/* LLFC_en */
+#define  XLIF_RX_FLOW_CONTROL_COSMAP_EN_LLFC_EN_MASK	0x10
+
+
+/*
+ * Register <COSMAP> - read-only
+ *
+ * .
+ */
+#define XLIF_RX_FLOW_CONTROL_COSMAP_REG(r)	(0x24 + (r) * 0x200)
+
+/* value */
+#define  XLIF_RX_FLOW_CONTROL_COSMAP_VALUE_SHIFT	0
+#define  XLIF_RX_FLOW_CONTROL_COSMAP_VALUE_MASK	0xffff
+
+
+/*
+ * Register <INTERFACE_ENABLE>
+ *
+ * Interface_Enable
+ */
+#define XLIF_TX_IF_IF_ENABLE_REG(r)	(0x40 + (r) * 0x200)
+
+/* Disable_With_Credits */
+#define  XLIF_TX_IF_IF_ENABLE_DISABLE_WITH_CREDITS_MASK	0x1
+
+/* Disable_WO_Credits */
+#define  XLIF_TX_IF_IF_ENABLE_DISABLE_WO_CREDITS_MASK	0x2
+
+
+/*
+ * Register <READ_CREDITS> - read-only
+ *
+ * Read_Credits
+ */
+#define XLIF_TX_IF_READ_CREDITS_REG(r)	(0x44 + (r) * 0x200)
+
+/* Value */
+#define  XLIF_TX_IF_READ_CREDITS_VALUE_SHIFT	0
+#define  XLIF_TX_IF_READ_CREDITS_VALUE_MASK	0x3ff
+
+
+/*
+ * Register <SET_CREDITS>
+ *
+ * Set_CreditsThe enable bit and the new value can be set together.
+ * Then, the enable bit must be turned off, while the new value remain
+ * stable.
+ */
+#define XLIF_TX_IF_SET_CREDITS_REG(r)	(0x48 + (r) * 0x200)
+
+/* Value */
+#define  XLIF_TX_IF_SET_CREDITS_VALUE_SHIFT	0
+#define  XLIF_TX_IF_SET_CREDITS_VALUE_MASK	0x3ff
+
+/* enable */
+#define  XLIF_TX_IF_SET_CREDITS_EN_MASK	0x1000
+
+
+/*
+ * Register <OUTPUTS_CONTROL>
+ *
+ * Control the values of several output signals on the XRDP -> XLMAC
+ * interface.
+ */
+#define XLIF_TX_IF_OUT_CTRL_REG(r)	(0x4c + (r) * 0x200)
+
+/* mac_txerr */
+#define  XLIF_TX_IF_OUT_CTRL_MAC_TXERR_MASK	0x1
+
+/* mac_txcrcerr */
+#define  XLIF_TX_IF_OUT_CTRL_MAC_TXCRCERR_MASK	0x2
+
+/* mac_txosts_sinext */
+#define  XLIF_TX_IF_OUT_CTRL_MAC_TXOSTS_SINEXT_MASK	0x4
+
+/* mac_txcrcmode */
+#define  XLIF_TX_IF_OUT_CTRL_MAC_TXCRCMODE_SHIFT	4
+#define  XLIF_TX_IF_OUT_CTRL_MAC_TXCRCMODE_MASK	0x30
+
+
+/*
+ * Register <UNDERRUN_PROTECTION_ENABLE>
+ *
+ * Underrun_Protection_Enable
+ */
+#define XLIF_TX_IF_URUN_PORT_ENABLE_REG(r)	(0x50 + (r) * 0x200)
+
+/* Enable */
+#define  XLIF_TX_IF_URUN_PORT_ENABLE_ENABLE_MASK	0x1
+
+
+/*
+ * Register <TX_THRESHOLD>
+ *
+ * TX threshold for the TX CDC FIFO in units of 128 bit.
+ * The TX CDC FIFO is depth is 16 entries.
+ */
+#define XLIF_TX_IF_TX_THRESHOLD_REG(r)	(0x54 + (r) * 0x200)
+
+/* Value */
+#define  XLIF_TX_IF_TX_THRESHOLD_VALUE_SHIFT	0
+#define  XLIF_TX_IF_TX_THRESHOLD_VALUE_MASK	0xf
+
+
+/*
+ * Register <COSMAP_EN_STATUS> - read-only
+ *
+ * cosmap_en indications from the XLMAC Interface
+ */
+#define XLIF_TX_FLOW_CONTROL_COSMAP_EN_STAT_REG(r)	(0x60 + (r) * 0x200)
+
+/* PFC_EN */
+#define  XLIF_TX_FLOW_CONTROL_COSMAP_EN_STAT_PFC_EN_MASK	0x1
+
+/* LLFC_en */
+#define  XLIF_TX_FLOW_CONTROL_COSMAP_EN_STAT_LLFC_EN_MASK	0x10
+
+
+/*
+ * Register <COSMAP_STATUS> - read-only
+ *
+ * cosmap_status from the XLMAC Interface
+ */
+#define XLIF_TX_FLOW_CONTROL_COSMAP_STAT_REG(r)	(0x64 + (r) * 0x200)
+
+/* value */
+#define  XLIF_TX_FLOW_CONTROL_COSMAP_STAT_VALUE_SHIFT	0
+#define  XLIF_TX_FLOW_CONTROL_COSMAP_STAT_VALUE_MASK	0xffff
+
+
+/*
+ * Register <INDICATIONS> - read-only
+ *
+ * indications from the XLMAC IF
+ */
+#define XLIF_Q_OFF_IND_REG(r)		(0x7c + (r) * 0x200)
+
+/* Q_OFF */
+#define  XLIF_Q_OFF_IND_Q_OFF_SHIFT	0
+#define  XLIF_Q_OFF_IND_Q_OFF_MASK	0xff
+
+/* Failover_on */
+#define  XLIF_Q_OFF_IND_FAILOVER_ON_MASK	0x100
+
+
+#endif /* ! XLIF_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mab.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mab.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mab.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mab.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,164 @@
+#ifndef WAN_TOPXPORT_MAB_H_
+#define WAN_TOPXPORT_MAB_H_
+
+/* relative to core */
+#define XPORT_MAB_OFFSET_0		0x3300
+
+/*
+ * Register <MSBUS Adaptation Control>
+ *
+ */
+#define XPORT_MAB_CNTRL_REG		0x0
+
+/*
+ * When this bit is set asynchronous RX and TX FIFOs are reset for a port
+ * when the link goes down that is when the local fault is detected.
+*/
+#define  CNTRL_LINK_DOWN_RST_EN_MASK	0x10000
+
+/*
+ * When set resets 10G Port 0 asynchronous TX FIFO and associated logic
+ * (such as credit logic).
+*/
+#define  CNTRL_XGMII_TX_RST_MASK	0x1000
+
+/*
+ * When a bit in this vector is set it resets corresponding port (Port 3-0)
+ * asynchronous TX FIFO and associated logic (such as credit logic and byte
+ * slicers).
+*/
+#define  CNTRL_GMII_TX_RST_SHIFT	8
+#define  CNTRL_GMII_TX_RST_MASK		0xf00
+
+/* When set resets 10G Port 0 asynchronous RX FIFO and associated logic. */
+#define  CNTRL_XGMII_RX_RST_MASK	0x10
+
+/*
+ * When a bit in this vector is set it resets corresponding port (Port 3-0)
+ * asynchronous RX FIFO and associated logic (such as byte packers).
+*/
+#define  CNTRL_GMII_RX_RST_SHIFT	0
+#define  CNTRL_GMII_RX_RST_MASK		0xf
+
+
+/*
+ * Register <MSBUS Adaptation TX WRR Control>
+ *
+ */
+#define XPORT_MAB_TX_WRR_CTRL_REG	0x4
+
+/*
+ * Arbiter Mode 1'b0 - Fixed Mode.
+ * TDM slots allocated regardless of the port activity.
+ * 1'b1 - Work-Conserving Mode.
+ * TDM slots allocation is affected by the port activity.
+*/
+#define  TX_WRR_CTRL_ARB_MODE_MASK	0x10000
+
+/* P3 weight expressed in TDM time slots. */
+#define  TX_WRR_CTRL_P3_WEIGHT_SHIFT	12
+#define  TX_WRR_CTRL_P3_WEIGHT_MASK	0xf000
+
+/* P2 weight expressed in TDM time slots. */
+#define  TX_WRR_CTRL_P2_WEIGHT_SHIFT	8
+#define  TX_WRR_CTRL_P2_WEIGHT_MASK	0xf00
+
+/* P1 weight expressed in TDM time slots. */
+#define  TX_WRR_CTRL_P1_WEIGHT_SHIFT	4
+#define  TX_WRR_CTRL_P1_WEIGHT_MASK	0xf0
+
+/*
+ * P0 weight expressed in TDM time slots.
+ * Note:
+ * Arbitration weights should not be changed from their default values due
+ * to XLMAC implementation specifics.
+ * In 4-port mode MSBUS clock should be set to 4*MAX_PORT_RATE/64.
+*/
+#define  TX_WRR_CTRL_P0_WEIGHT_SHIFT	0
+#define  TX_WRR_CTRL_P0_WEIGHT_MASK	0xf
+
+
+/*
+ * Register <MSBUS Adaptation TX Threshold>
+ *
+ */
+#define XPORT_MAB_TX_THRESHOLD_REG	0x8
+
+/*
+ * XGMII0 (P0) asynchronous TX FIFO read depth at which packet dequeue
+ * starts.
+*/
+#define  TX_THRESHOLD_XGMII0_TX_THRESHOLD_SHIFT	16
+#define  TX_THRESHOLD_XGMII0_TX_THRESHOLD_MASK	0xf0000
+
+/* GMII P3 asynchronous TX FIFO read depth at which packet dequeue starts. */
+#define  TX_THRESHOLD_GMII3_TX_THRESHOLD_SHIFT	12
+#define  TX_THRESHOLD_GMII3_TX_THRESHOLD_MASK	0xf000
+
+/* GMII P2 asynchronous TX FIFO read depth at which packet dequeue starts. */
+#define  TX_THRESHOLD_GMII2_TX_THRESHOLD_SHIFT	8
+#define  TX_THRESHOLD_GMII2_TX_THRESHOLD_MASK	0xf00
+
+/* GMII P1 asynchronous TX FIFO read depth at which packet dequeue starts. */
+#define  TX_THRESHOLD_GMII1_TX_THRESHOLD_SHIFT	4
+#define  TX_THRESHOLD_GMII1_TX_THRESHOLD_MASK	0xf0
+
+/* GMII P0 asynchronous TX FIFO read depth at which packet dequeue starts. */
+#define  TX_THRESHOLD_GMII0_TX_THRESHOLD_SHIFT	0
+#define  TX_THRESHOLD_GMII0_TX_THRESHOLD_MASK	0xf
+
+
+/*
+ * Register <MSBUS Adaptation Link down TX Data>
+ *
+ */
+#define XPORT_MAB_LINK_DOWN_TX_DATA_REG	0xc
+
+/*
+ * When LINK_DOWN_RST_EN = 1 and link is down content of this register is
+ * sent to serdes over XGMII interface.
+ * In GMII mode 0 is sent.
+*/
+#define  LINK_DOWN_TX_DATA_TXCTL_MASK	0x100
+
+/*
+ * When LINK_DOWN_RST_EN = 1 and link is down content of this register is
+ * sent to serdes over XGMII interface.
+ * In GMII mode 0's are sent.
+*/
+#define  LINK_DOWN_TX_DATA_TXD_SHIFT	0
+#define  LINK_DOWN_TX_DATA_TXD_MASK	0xff
+
+
+/*
+ * Register <MSBUS Adaptation Status> - read-only
+ *
+ */
+#define XPORT_MAB_STATUS_REG		0x10
+
+/* 10G Port 0 asynchronous RX FIFO over-run status. */
+#define  STATUS_XGMII_RX_AFIFO_OVERRUN_MASK	0x8000
+
+/* Port 3-0 asynchronous RX FIFO over-run status. */
+#define  STATUS_GMII_RX_AFIFO_OVERRUN_VECT_SHIFT	11
+#define  STATUS_GMII_RX_AFIFO_OVERRUN_VECT_MASK	0x7800
+
+/* 10G Port 0 TX frame under-run status. */
+#define  STATUS_XGMII_TX_FRM_UNDERRUN_MASK	0x400
+
+/* 10G Port 0 TX credits under-run status. */
+#define  STATUS_XGMII_OUTSTANDING_CREDITS_CNT_UNDERRUN_MASK	0x200
+
+/* Port 3-0 TX credits under-run status. */
+#define  STATUS_GMII_OUTSTANDING_CREDITS_CNT_UNDERRUN_VECT_SHIFT	5
+#define  STATUS_GMII_OUTSTANDING_CREDITS_CNT_UNDERRUN_VECT_MASK	0x1e0
+
+/* 10G Port 0 asynchronous TX FIFO over-run status. */
+#define  STATUS_XGMII_TX_AFIFO_OVERRUN_MASK	0x10
+
+/* Port 3-0 asynchronous TX FIFO over-run status. */
+#define  STATUS_GMII_TX_AFIFO_OVERRUN_VECT_SHIFT	0
+#define  STATUS_GMII_TX_AFIFO_OVERRUN_VECT_MASK	0xf
+
+
+#endif /* ! WAN_TOPXPORT_MAB_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mib_core.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mib_core.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mib_core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mib_core.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,117 @@
+#ifndef WAN_TOPXPORT_MIB_CORE_H_
+#define WAN_TOPXPORT_MIB_CORE_H_
+
+/* relative to core */
+#define XPORT_MIB_CORE_OFFSET(x)	(0x1000 + (x) * 0x400)
+
+#define XPORT_MIB_CORE_GRX64_REG	0x0
+#define XPORT_MIB_CORE_GRX127_REG	0x8
+#define XPORT_MIB_CORE_GRX255_REG	0x10
+#define XPORT_MIB_CORE_GRX511_REG	0x18
+#define XPORT_MIB_CORE_GRX1023_REG	0x20
+#define XPORT_MIB_CORE_GRX1518_REG	0x28
+#define XPORT_MIB_CORE_GRX1522_REG	0x30
+#define XPORT_MIB_CORE_GRX2047_REG	0x38
+#define XPORT_MIB_CORE_GRX4095_REG	0x40
+#define XPORT_MIB_CORE_GRX9216_REG	0x48
+#define XPORT_MIB_CORE_GRX16383_REG	0x50
+#define XPORT_MIB_CORE_GRXPKT_REG	0x58
+#define XPORT_MIB_CORE_GRXUCA_REG	0x60
+#define XPORT_MIB_CORE_GRXMCA_REG	0x68
+#define XPORT_MIB_CORE_GRXBCA_REG	0x70
+#define XPORT_MIB_CORE_GRXFCS_REG	0x78
+#define XPORT_MIB_CORE_GRXCF_REG	0x80
+#define XPORT_MIB_CORE_GRXPF_REG	0x88
+#define XPORT_MIB_CORE_GRXPP_REG	0x90
+#define XPORT_MIB_CORE_GRXUO_REG	0x98
+#define XPORT_MIB_CORE_GRXUDA_REG	0xa0
+#define XPORT_MIB_CORE_GRXWSA_REG	0xa8
+#define XPORT_MIB_CORE_GRXALN_REG	0xb0
+#define XPORT_MIB_CORE_GRXFLR_REG	0xb8
+#define XPORT_MIB_CORE_GRXFRERR_REG	0xc0
+#define XPORT_MIB_CORE_GRXFCR_REG	0xc8
+#define XPORT_MIB_CORE_GRXOVR_REG	0xd0
+#define XPORT_MIB_CORE_GRXJBR_REG	0xd8
+#define XPORT_MIB_CORE_GRXMTUE_REG	0xe0
+#define XPORT_MIB_CORE_GRXMCRC_REG	0xe8
+#define XPORT_MIB_CORE_GRXPRM_REG	0xf0
+#define XPORT_MIB_CORE_GRXVLN_REG	0xf8
+#define XPORT_MIB_CORE_GRXDVLN_REG	0x100
+#define XPORT_MIB_CORE_GRXTRFU_REG	0x108
+#define XPORT_MIB_CORE_GRXPOK_REG	0x110
+#define XPORT_MIB_CORE_GRXPFCOFF0_REG	0x118
+#define XPORT_MIB_CORE_GRXPFCOFF1_REG	0x120
+#define XPORT_MIB_CORE_GRXPFCOFF2_REG	0x128
+#define XPORT_MIB_CORE_GRXPFCOFF3_REG	0x130
+#define XPORT_MIB_CORE_GRXPFCOFF4_REG	0x138
+#define XPORT_MIB_CORE_GRXPFCOFF5_REG	0x140
+#define XPORT_MIB_CORE_GRXPFCOFF6_REG	0x148
+#define XPORT_MIB_CORE_GRXPFCOFF7_REG	0x150
+#define XPORT_MIB_CORE_GRXPFCP0_REG	0x158
+#define XPORT_MIB_CORE_GRXPFCP1_REG	0x160
+#define XPORT_MIB_CORE_GRXPFCP2_REG	0x168
+#define XPORT_MIB_CORE_GRXPFCP3_REG	0x170
+#define XPORT_MIB_CORE_GRXPFCP4_REG	0x178
+#define XPORT_MIB_CORE_GRXPFCP5_REG	0x180
+#define XPORT_MIB_CORE_GRXPFCP6_REG	0x188
+#define XPORT_MIB_CORE_GRXPFCP7_REG	0x190
+#define XPORT_MIB_CORE_GRXSCHCRC_REG	0x198
+#define XPORT_MIB_CORE_GRXBYT_REG	0x1a0
+#define XPORT_MIB_CORE_GRXRPKT_REG	0x1a8
+#define XPORT_MIB_CORE_GRXUND_REG	0x1b0
+#define XPORT_MIB_CORE_GRXFRG_REG	0x1b8
+#define XPORT_MIB_CORE_GRXRBYT_REG	0x1c0
+#define XPORT_MIB_CORE_GTX64_REG	0x1c8
+#define XPORT_MIB_CORE_GTX127_REG	0x1d0
+#define XPORT_MIB_CORE_GTX255_REG	0x1d8
+#define XPORT_MIB_CORE_GTX511_REG	0x1e0
+#define XPORT_MIB_CORE_GTX1023_REG	0x1e8
+#define XPORT_MIB_CORE_GTX1518_REG	0x1f0
+#define XPORT_MIB_CORE_GTX1522_REG	0x1f8
+#define XPORT_MIB_CORE_GTX2047_REG	0x200
+#define XPORT_MIB_CORE_GTX4095_REG	0x208
+#define XPORT_MIB_CORE_GTX9216_REG	0x210
+#define XPORT_MIB_CORE_GTX16383_REG	0x218
+#define XPORT_MIB_CORE_GTXPOK_REG	0x220
+#define XPORT_MIB_CORE_GTXPKT_REG	0x228
+#define XPORT_MIB_CORE_GTXUCA_REG	0x230
+#define XPORT_MIB_CORE_GTXMCA_REG	0x238
+#define XPORT_MIB_CORE_GTXBCA_REG	0x240
+#define XPORT_MIB_CORE_GTXPF_REG	0x248
+#define XPORT_MIB_CORE_GTXPFC_REG	0x250
+#define XPORT_MIB_CORE_GTXJBR_REG	0x258
+#define XPORT_MIB_CORE_GTXFCS_REG	0x260
+#define XPORT_MIB_CORE_GTXCF_REG	0x268
+#define XPORT_MIB_CORE_GTXOVR_REG	0x270
+#define XPORT_MIB_CORE_GTXDFR_REG	0x278
+#define XPORT_MIB_CORE_GTXEDF_REG	0x280
+#define XPORT_MIB_CORE_GTXSCL_REG	0x288
+#define XPORT_MIB_CORE_GTXMCL_REG	0x290
+#define XPORT_MIB_CORE_GTXLCL_REG	0x298
+#define XPORT_MIB_CORE_GTXXCL_REG	0x2a0
+#define XPORT_MIB_CORE_GTXFRG_REG	0x2a8
+#define XPORT_MIB_CORE_GTXERR_REG	0x2b0
+#define XPORT_MIB_CORE_GTXVLN_REG	0x2b8
+#define XPORT_MIB_CORE_GTXDVLN_REG	0x2c0
+#define XPORT_MIB_CORE_GTXRPKT_REG	0x2c8
+#define XPORT_MIB_CORE_GTXUFL_REG	0x2d0
+#define XPORT_MIB_CORE_GTXPFCP0_REG	0x2d8
+#define XPORT_MIB_CORE_GTXPFCP1_REG	0x2e0
+#define XPORT_MIB_CORE_GTXPFCP2_REG	0x2e8
+#define XPORT_MIB_CORE_GTXPFCP3_REG	0x2f0
+#define XPORT_MIB_CORE_GTXPFCP4_REG	0x2f8
+#define XPORT_MIB_CORE_GTXPFCP5_REG	0x300
+#define XPORT_MIB_CORE_GTXPFCP6_REG	0x308
+#define XPORT_MIB_CORE_GTXPFCP7_REG	0x310
+#define XPORT_MIB_CORE_GTXNCL_REG	0x318
+#define XPORT_MIB_CORE_GTXBYT_REG	0x320
+#define XPORT_MIB_CORE_GRXLPI_REG	0x328
+#define XPORT_MIB_CORE_GRXDLPI_REG	0x330
+#define XPORT_MIB_CORE_GTXLPI_REG	0x338
+#define XPORT_MIB_CORE_GTXDLPI_REG	0x340
+#define XPORT_MIB_CORE_GRXPTLLFC_REG	0x348
+#define XPORT_MIB_CORE_GRXLTLLFC_REG	0x350
+#define XPORT_MIB_CORE_GRXLLFCFCS_REG	0x358
+#define XPORT_MIB_CORE_GTXLTLLFC_REG	0x360
+
+#endif /* ! WAN_TOPXPORT_MIB_CORE_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mib_reg.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mib_reg.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_mib_reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_mib_reg.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,706 @@
+#ifndef WAN_TOPXPORT_MIB_REG_H_
+#define WAN_TOPXPORT_MIB_REG_H_
+
+/* relative to core */
+#define XPORT_MIB_REG_OFFSET_0		0x3100
+
+/*
+ * Register <MIB 32-bit Direct Access Data Write>
+ *
+ */
+#define XPORT_MIB_REG_DIR_ACC_DATA_WRITE_REG	0x0
+
+/*
+ * Direct register access data write register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_MIB_DIR_ACC_DATA_WRITE_REG_WRITE_DATA_SHIFT	0
+#define  XPORT_MIB_DIR_ACC_DATA_WRITE_REG_WRITE_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <MIB 32-bit Direct Access Data Read>
+ *
+ */
+#define XPORT_MIB_REG_DIR_ACC_DATA_READ_REG	0x4
+
+/*
+ * Direct register access data read register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_MIB_DIR_ACC_DATA_READ_REG_READ_DATA_SHIFT	0
+#define  XPORT_MIB_DIR_ACC_DATA_READ_REG_READ_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <MIB Indirect Access Address>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_ADDR_0_REG	0x8
+
+/*
+ * Transaction Status.
+ * When transaction completes (START_BUSY = 0 after it was set to 1) and
+ * this bit is set it indicates that register transaction completed with
+ * error.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_ERR_MASK	0x1000
+
+/*
+ * START_BUSY, Self-clearing.
+ * CPU writes this bit to 1 in order to initiate indirect register
+ * read/write transaction.
+ * When transaction completes hardware clears this bit.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_START_BUSY_MASK	0x800
+
+/*
+ * Register transaction:
+ * 0 :
+ * Register Write.
+ * '1 :
+ * Register Read.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_R_W_MASK	0x400
+
+/* Register Port ID. */
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_REG_PORT_ID_SHIFT	8
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_REG_PORT_ID_MASK	0x300
+
+/*
+ * Register offset.
+ * Note:
+ * Bit 7 is ignored by HW.
+ * Write it as 0.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_REG_OFFSET_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_ADDR_0_REG_REG_OFFSET_MASK	0xff
+
+
+/*
+ * Register <MIB Indirect Access Data Low>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_DATA_LOW_0_REG	0xc
+
+/*
+ * Indirect register access data register, bits [31:
+ * 0].
+*/
+#define  XPORT_MIB_INDIR_ACC_DATA_LOW_0_REG_DATA_LOW_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_DATA_LOW_0_REG_DATA_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <MIB Indirect Access Data High>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_DATA_HIGH_0_REG	0x10
+
+/*
+ * Indirect register access data register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_MIB_INDIR_ACC_DATA_HIGH_0_REG_DATA_HIGH_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_DATA_HIGH_0_REG_DATA_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <MIB Indirect Access Address>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_ADDR_1_REG	0x14
+
+/*
+ * Transaction Status.
+ * When transaction completes (START_BUSY = 0 after it was set to 1) and
+ * this bit is set it indicates that register transaction completed with
+ * error.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_ERR_MASK	0x1000
+
+/*
+ * START_BUSY, Self-clearing.
+ * CPU writes this bit to 1 in order to initiate indirect register
+ * read/write transaction.
+ * When transaction completes hardware clears this bit.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_START_BUSY_MASK	0x800
+
+/*
+ * Register transaction:
+ * 0 :
+ * Register Write.
+ * '1 :
+ * Register Read.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_R_W_MASK	0x400
+
+/* Register Port ID. */
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_REG_PORT_ID_SHIFT	8
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_REG_PORT_ID_MASK	0x300
+
+/*
+ * Register offset.
+ * Note:
+ * Bit 7 is ignored by HW.
+ * Write it as 0.
+*/
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_REG_OFFSET_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_ADDR_1_REG_REG_OFFSET_MASK	0xff
+
+
+/*
+ * Register <MIB Indirect Access Data Low>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_DATA_LOW_1_REG	0x18
+
+/*
+ * Indirect register access data register, bits [31:
+ * 0].
+*/
+#define  XPORT_MIB_INDIR_ACC_DATA_LOW_1_REG_DATA_LOW_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_DATA_LOW_1_REG_DATA_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <MIB Indirect Access Data High>
+ *
+ */
+#define XPORT_MIB_REG_INDIR_ACC_DATA_HIGH_1_REG	0x1c
+
+/*
+ * Indirect register access data register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_MIB_INDIR_ACC_DATA_HIGH_1_REG_DATA_HIGH_SHIFT	0
+#define  XPORT_MIB_INDIR_ACC_DATA_HIGH_1_REG_DATA_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <MIB Control>
+ *
+ */
+#define XPORT_MIB_REG_CNTRL_REG		0x20
+
+/*
+ * RX and TX EEE Duration Counter Behavior 0 :
+ * Counter behavior is asymmetric mode (100Base-TX, for example).
+ * 1 :
+ * Counter behavior is symmetric mode (1000Base-T, for example).
+*/
+#define  XPORT_MIB_CNTRL_REG_EEE_CNT_MODE_SHIFT	12
+#define  XPORT_MIB_CNTRL_REG_EEE_CNT_MODE_MASK	0xf000
+
+/*
+ * When a bit in this vector is set corresponding XLMAC port statistic
+ * counters saturate at their respective maximum values.
+*/
+#define  XPORT_MIB_CNTRL_REG_SATURATE_EN_SHIFT	8
+#define  XPORT_MIB_CNTRL_REG_SATURATE_EN_MASK	0xf00
+
+/*
+ * When a bit in this vector is set corresponding XLMAC port statistic
+ * counters are clear-on-read.
+*/
+#define  XPORT_MIB_CNTRL_REG_COR_EN_SHIFT	4
+#define  XPORT_MIB_CNTRL_REG_COR_EN_MASK	0xf0
+
+/*
+ * When a bit in this vector is set corresponding XLMAC port statistic
+ * counters are reset.
+*/
+#define  XPORT_MIB_CNTRL_REG_CNT_RST_SHIFT	0
+#define  XPORT_MIB_CNTRL_REG_CNT_RST_MASK	0xf
+
+
+/*
+ * Register <MIB EEE Pulse Duration Control>
+ *
+ */
+#define XPORT_MIB_REG_EEE_PULSE_DURATION_CNTRL_REG	0x24
+
+/*
+ * Timer to generate 10us pulse based on 25MHz refclk.
+ * Using LFSR to count up to 250 value.
+*/
+#define  XPORT_MIB_EEE_PULSE_DURATION_CNTRL_REG_CNT_SHIFT	0
+#define  XPORT_MIB_EEE_PULSE_DURATION_CNTRL_REG_CNT_MASK	0xff
+
+
+/*
+ * Register <MIB Max Packet Size>
+ *
+ */
+#define XPORT_MIB_REG_GPORT0_MAX_PKT_SIZE_REG	0x28
+
+/*
+ * Maximum Packet Size, defaults to 1518B.
+ * Packets over this size are counted by MIB as oversized.
+*/
+#define  XPORT_MIB_GPORT0_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_SHIFT	0
+#define  XPORT_MIB_GPORT0_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_MASK	0x3fff
+
+
+/*
+ * Register <MIB Max Packet Size>
+ *
+ */
+#define XPORT_MIB_REG_GPORT1_MAX_PKT_SIZE_REG	0x2c
+
+/*
+ * Maximum Packet Size, defaults to 1518B.
+ * Packets over this size are counted by MIB as oversized.
+*/
+#define  XPORT_MIB_GPORT1_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_SHIFT	0
+#define  XPORT_MIB_GPORT1_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_MASK	0x3fff
+
+
+/*
+ * Register <MIB Max Packet Size>
+ *
+ */
+#define XPORT_MIB_REG_GPORT2_MAX_PKT_SIZE_REG	0x30
+
+/*
+ * Maximum Packet Size, defaults to 1518B.
+ * Packets over this size are counted by MIB as oversized.
+*/
+#define  XPORT_MIB_GPORT2_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_SHIFT	0
+#define  XPORT_MIB_GPORT2_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_MASK	0x3fff
+
+
+/*
+ * Register <MIB Max Packet Size>
+ *
+ */
+#define XPORT_MIB_REG_GPORT3_MAX_PKT_SIZE_REG	0x34
+
+/*
+ * Maximum Packet Size, defaults to 1518B.
+ * Packets over this size are counted by MIB as oversized.
+*/
+#define  XPORT_MIB_GPORT3_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_SHIFT	0
+#define  XPORT_MIB_GPORT3_MAX_PKT_SIZE_REG_MAX_PKT_SIZE_MASK	0x3fff
+
+
+/*
+ * Register <MIB ECC Control>
+ *
+ */
+#define XPORT_MIB_REG_ECC_CNTRL_REG	0x38
+
+/* ECC enable for Tx MIB memories. */
+#define  XPORT_MIB_ECC_CNTRL_REG_TX_MIB_ECC_EN_MASK	0x2
+
+/* ECC enable for Rx MIB memories. */
+#define  XPORT_MIB_ECC_CNTRL_REG_RX_MIB_ECC_EN_MASK	0x1
+
+
+/*
+ * Register <MIB Force Single Bit ECC Error>
+ *
+ */
+#define XPORT_MIB_REG_FORCE_SB_ECC_ERR_REG	0x3c
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 3 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_TX_MEM3_SERR_MASK	0x100
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 2 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_TX_MEM2_SERR_MASK	0x80
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 1 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_TX_MEM1_SERR_MASK	0x40
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 0 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_TX_MEM0_SERR_MASK	0x20
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 4 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_RX_MEM4_SERR_MASK	0x10
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 3 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_RX_MEM3_SERR_MASK	0x8
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 2 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_RX_MEM2_SERR_MASK	0x4
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 1 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_RX_MEM1_SERR_MASK	0x2
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 0 single bit ECC error.
+ * Do not assert together with force double bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_SB_ECC_ERR_REG_FORCE_RX_MEM0_SERR_MASK	0x1
+
+
+/*
+ * Register <MIB Force Double Bit ECC Error>
+ *
+ */
+#define XPORT_MIB_REG_FORCE_DB_ECC_ERR_REG	0x40
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 3 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_TX_MEM3_DERR_MASK	0x100
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 2 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_TX_MEM2_DERR_MASK	0x80
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 1 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_TX_MEM1_DERR_MASK	0x40
+
+/*
+ * Self-clearing.
+ * Force Tx MIB memory instance 0 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_TX_MEM0_DERR_MASK	0x20
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 4 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_RX_MEM4_DERR_MASK	0x10
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 3 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_RX_MEM3_DERR_MASK	0x8
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 2 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_RX_MEM2_DERR_MASK	0x4
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 1 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_RX_MEM1_DERR_MASK	0x2
+
+/*
+ * Self-clearing.
+ * Force Rx MIB memory instance 0 double bit ECC error.
+ * Do not assert together with force single bit ECC error.
+*/
+#define  XPORT_MIB_FORCE_DB_ECC_ERR_REG_FORCE_RX_MEM0_DERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_RX_MEM0_ECC_STATUS_REG	0x44
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM0_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_RX_MEM0_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_RX_MEM0_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM0_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_RX_MEM0_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_RX_MEM1_ECC_STATUS_REG	0x48
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM1_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_RX_MEM1_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_RX_MEM1_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM1_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_RX_MEM1_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_RX_MEM2_ECC_STATUS_REG	0x4c
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM2_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_RX_MEM2_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_RX_MEM2_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM2_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_RX_MEM2_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_RX_MEM3_ECC_STATUS_REG	0x50
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM3_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_RX_MEM3_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_RX_MEM3_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM3_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_RX_MEM3_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_RX_MEM4_ECC_STATUS_REG	0x54
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM4_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_RX_MEM4_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_RX_MEM4_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_RX_MEM4_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_RX_MEM4_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_TX_MEM0_ECC_STATUS_REG	0x58
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM0_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_TX_MEM0_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_TX_MEM0_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM0_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_TX_MEM0_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_TX_MEM1_ECC_STATUS_REG	0x5c
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM1_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_TX_MEM1_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_TX_MEM1_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM1_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_TX_MEM1_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_TX_MEM2_ECC_STATUS_REG	0x60
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM2_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_TX_MEM2_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_TX_MEM2_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM2_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_TX_MEM2_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+/*
+ * Register <MIB TX MEM3 ECC Status>
+ *
+ */
+#define XPORT_MIB_REG_TX_MEM3_ECC_STATUS_REG	0x64
+
+/*
+ * First memory address in which single bit error or double bit error is
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM3_ECC_STATUS_REG_MEM_ADDR_SHIFT	3
+#define  XPORT_MIB_TX_MEM3_ECC_STATUS_REG_MEM_ADDR_MASK	0x78
+
+/* Double Bit Error indicates an uncorrectable error occurred. */
+#define  XPORT_MIB_TX_MEM3_ECC_STATUS_REG_DOUBLE_BIT_ECC_ERR_MASK	0x4
+
+/*
+ * Indicates more than one single bit error or double bit error are
+ * detected.
+*/
+#define  XPORT_MIB_TX_MEM3_ECC_STATUS_REG_MULTI_ECC_ERR_MASK	0x2
+
+/*
+ * Single Bit Error (correctable) or Double Bit Error (Uncorrectable)
+ * occurred.
+*/
+#define  XPORT_MIB_TX_MEM3_ECC_STATUS_REG_ECC_ERR_MASK	0x1
+
+
+#endif /* ! WAN_TOPXPORT_MIB_REG_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_reg.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_reg.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_reg.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,842 @@
+#ifndef WAN_TOPXPORT_REG_H_
+#define WAN_TOPXPORT_REG_H_
+
+/* relative to core */
+#define XPORT_REG_OFFSET_0		0x2004
+
+/*
+ * Register <XPORT Revision Control> - read-only
+ *
+ */
+#define XPORT_REG_XPORT_REVISION_REG	0x0
+
+/* XPORT revision. */
+#define  XPORT_XPORT_REVISION_REG_XPORT_REV_SHIFT	0
+#define  XPORT_XPORT_REVISION_REG_XPORT_REV_MASK	0xffffff
+
+
+/*
+ * Register <LED PWM Control>
+ *
+ */
+#define XPORT_REG_LED_PWM_CNTRL_REG	0x68
+
+/* When set LED intensity can be controlled using PWM. */
+#define  XPORT_LED_PWM_CNTRL_REG_PWM_ENABLE_MASK	0x1
+
+
+/*
+ * Register <LED Intensity Control>
+ *
+ */
+#define XPORT_REG_LED_INTENSITY_CNTRL_REG	0x6c
+
+/*
+ * LED_ON_TIME PWM modulated ON (low) time.
+ * LED_ON_LOW and LED_ON_HIGH determine PWM duty cycle for the LED
+ * intensity.
+ * Expressed in 50us units.
+*/
+#define  XPORT_LED_INTENSITY_CNTRL_REG_LED_ON_LOW_SHIFT	16
+#define  XPORT_LED_INTENSITY_CNTRL_REG_LED_ON_LOW_MASK	0xffff0000
+
+/*
+ * LED_ON_TIME PWM modulated OFF (high) time.
+ * LED_ON_LOW and LED_ON_HIGH determine PWM duty cycle for the LED
+ * intensity.
+ * Expressed in 50us units.
+*/
+#define  XPORT_LED_INTENSITY_CNTRL_REG_LED_ON_HIGH_SHIFT	0
+#define  XPORT_LED_INTENSITY_CNTRL_REG_LED_ON_HIGH_MASK	0xffff
+
+
+/*
+ * Register <LED 1 Control>
+ *
+ */
+#define XPORT_REG_LED_0_CNTRL_REG	0x70
+
+/*
+ * When this bit is set MAC/PHY provided link indication is overridden
+ * using lnk_status_ovrd.
+*/
+#define  XPORT_LED_0_CNTRL_REG_LNK_OVRD_EN_MASK	0x8000
+
+/*
+ * When this bit is set MAC/PHY provided speed indications are overridden
+ * using led_spd_ovrd[2:
+ * 0].
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPD_OVRD_EN_MASK	0x4000
+
+/*
+ * Link status override.
+ * Used only for LED.
+*/
+#define  XPORT_LED_0_CNTRL_REG_LNK_STATUS_OVRD_MASK	0x2000
+
+/*
+ * LED speed override.
+ * Default encoding is:
+ * 000 :
+ * 10Mb/s.
+ * 001 :
+ * 100Mb/s.
+ * 010 :
+ * 1000Mb/s.
+ * 011 :
+ * 2.
+ * 5Gb/s.
+ * 100 :
+ * 10Gb/s or higher.
+ * 101 :
+ * Custom speed 1.
+ * 110 :
+ * Custom speed 2.
+ * 111 :
+ * no-link.
+ * Using this register LED speeds can be encoded in any way that suits
+ * customer application.
+*/
+#define  XPORT_LED_0_CNTRL_REG_LED_SPD_OVRD_SHIFT	10
+#define  XPORT_LED_0_CNTRL_REG_LED_SPD_OVRD_MASK	0x1c00
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for ACT_LED.
+*/
+#define  XPORT_LED_0_CNTRL_REG_ACT_LED_POL_SEL_MASK	0x200
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[2].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED2_ACT_POL_SEL_MASK	0x100
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[1].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED1_ACT_POL_SEL_MASK	0x80
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[0].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED0_ACT_POL_SEL_MASK	0x40
+
+/*
+ * Selects source of activity for ACT_LED.
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_0_CNTRL_REG_ACT_LED_ACT_SEL_MASK	0x20
+
+/*
+ * Selects source of activity for SPDLNK_LED[2].
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED2_ACT_SEL_MASK	0x10
+
+/*
+ * Selects source of activity for SPDLNK_LED[1].
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED1_ACT_SEL_MASK	0x8
+
+/*
+ * Selects source of the activity for SPDLNK_LED[0]:
+ * 0 :
+ * LED is 0 when link is up and blinks when there is activity.
+ * 1 :
+ * LED is 1 and blinks when there is activity.
+*/
+#define  XPORT_LED_0_CNTRL_REG_SPDLNK_LED0_ACT_SEL_MASK	0x4
+
+/* Enables TX_SOP event to contribute to the activity. */
+#define  XPORT_LED_0_CNTRL_REG_TX_ACT_EN_MASK	0x2
+
+/* Enables RX_SOP event to contribute to the activity. */
+#define  XPORT_LED_0_CNTRL_REG_RX_ACT_EN_MASK	0x1
+
+
+/*
+ * Register <LED 1 Link And Speed Encoding Selection>
+ *
+ */
+#define XPORT_REG_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG	0x74
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding select.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_2_SHIFT	21
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_2_MASK	0xe00000
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding select.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_1_SHIFT	18
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_1_MASK	0x1c0000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 10Gb/s and higher link speed.
+ * When SPDLNK_LED_SEL[x] = 1'''b0, SPDLNK_LED[x] is driven by bits [17:
+ * 0] of Link and Speed Encoding Register.
+ * When SPDLNK_LED_SEL[x] = 1'''b1, SPDLNK_LED[x] is driven by the
+ * activity.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10G_ENCODE_SHIFT	15
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10G_ENCODE_MASK	0x38000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 2500Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_2500M_ENCODE_SHIFT	12
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_2500M_ENCODE_MASK	0x7000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 1000Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_1000M_ENCODE_SHIFT	9
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_1000M_ENCODE_MASK	0xe00
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 100Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_100M_ENCODE_SHIFT	6
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_100M_ENCODE_MASK	0x1c0
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 10Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10M_ENCODE_SHIFT	3
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10M_ENCODE_MASK	0x38
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for the no-link state.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_NO_LINK_ENCODE_SHIFT	0
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_NO_LINK_ENCODE_MASK	0x7
+
+
+/*
+ * Register <LED 1 Link And Speed Encoding>
+ *
+ */
+#define XPORT_REG_LED_0_LINK_AND_SPEED_ENCODING_REG	0x78
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_2_SHIFT	21
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_2_MASK	0xe00000
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_1_SHIFT	18
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_1_MASK	0x1c0000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 10Gb/s and higherlink speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M10G_ENCODE_SHIFT	15
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M10G_ENCODE_MASK	0x38000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 2.
+ * 5Gb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M2500_ENCODE_SHIFT	12
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M2500_ENCODE_MASK	0x7000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 1Gb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M1000_ENCODE_SHIFT	9
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M1000_ENCODE_MASK	0xe00
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 100Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M100_ENCODE_SHIFT	6
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M100_ENCODE_MASK	0x1c0
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 10Mb/s link speed.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M10_ENCODE_SHIFT	3
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_M10_ENCODE_MASK	0x38
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for the no-link state.
+*/
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_NO_LINK_ENCODE_SHIFT	0
+#define  XPORT_LED_0_LINK_AND_SPEED_ENCODING_REG_NO_LINK_ENCODE_MASK	0x7
+
+
+/*
+ * Register <LED 1 Control>
+ *
+ */
+#define XPORT_REG_LED_1_CNTRL_REG	0x7c
+
+/*
+ * When this bit is set MAC/PHY provided link indication is overridden
+ * using lnk_status_ovrd.
+*/
+#define  XPORT_LED_1_CNTRL_REG_LNK_OVRD_EN_MASK	0x8000
+
+/*
+ * When this bit is set MAC/PHY provided speed indications are overridden
+ * using led_spd_ovrd[2:
+ * 0].
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPD_OVRD_EN_MASK	0x4000
+
+/*
+ * Link status override.
+ * Used only for LED.
+*/
+#define  XPORT_LED_1_CNTRL_REG_LNK_STATUS_OVRD_MASK	0x2000
+
+/*
+ * LED speed override.
+ * Default encoding is:
+ * 000 :
+ * 10Mb/s.
+ * 001 :
+ * 100Mb/s.
+ * 010 :
+ * 1000Mb/s.
+ * 011 :
+ * 2.
+ * 5Gb/s.
+ * 100 :
+ * 10Gb/s or higher.
+ * 101 :
+ * Custom speed 1.
+ * 110 :
+ * Custom speed 2.
+ * 111 :
+ * no-link.
+ * Using this register LED speeds can be encoded in any way that suits
+ * customer application.
+*/
+#define  XPORT_LED_1_CNTRL_REG_LED_SPD_OVRD_SHIFT	10
+#define  XPORT_LED_1_CNTRL_REG_LED_SPD_OVRD_MASK	0x1c00
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for ACT_LED.
+*/
+#define  XPORT_LED_1_CNTRL_REG_ACT_LED_POL_SEL_MASK	0x200
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[2].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED2_ACT_POL_SEL_MASK	0x100
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[1].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED1_ACT_POL_SEL_MASK	0x80
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for SPDLNK_LED[0].
+ * Applicable only when the activity drives this LED.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED0_ACT_POL_SEL_MASK	0x40
+
+/*
+ * Selects source of activity for ACT_LED.
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_1_CNTRL_REG_ACT_LED_ACT_SEL_MASK	0x20
+
+/*
+ * Selects source of activity for SPDLNK_LED[2].
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED2_ACT_SEL_MASK	0x10
+
+/*
+ * Selects source of activity for SPDLNK_LED[1].
+ * For encoding see description for spdlnk_led0_act_sel.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED1_ACT_SEL_MASK	0x8
+
+/*
+ * Selects source of the activity for SPDLNK_LED[0]:
+ * 0 :
+ * LED is 0 when link is up and blinks when there is activity.
+ * 1 :
+ * LED is 1 and blinks when there is activity.
+*/
+#define  XPORT_LED_1_CNTRL_REG_SPDLNK_LED0_ACT_SEL_MASK	0x4
+
+/* Enables TX_SOP event to contribute to the activity. */
+#define  XPORT_LED_1_CNTRL_REG_TX_ACT_EN_MASK	0x2
+
+/* Enables RX_SOP event to contribute to the activity. */
+#define  XPORT_LED_1_CNTRL_REG_RX_ACT_EN_MASK	0x1
+
+
+/*
+ * Register <LED 1 Link And Speed Encoding Selection>
+ *
+ */
+#define XPORT_REG_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG	0x80
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding select.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_2_SHIFT	21
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_2_MASK	0xe00000
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding select.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_1_SHIFT	18
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_RSVD_SEL_SPD_ENCODE_1_MASK	0x1c0000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 10Gb/s and higher link speed.
+ * When SPDLNK_LED_SEL[x] = 1'''b0, SPDLNK_LED[x] is driven by bits [17:
+ * 0] of Link and Speed Encoding Register.
+ * When SPDLNK_LED_SEL[x] = 1'''b1, SPDLNK_LED[x] is driven by the
+ * activity.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10G_ENCODE_SHIFT	15
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10G_ENCODE_MASK	0x38000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 2500Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_2500M_ENCODE_SHIFT	12
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_2500M_ENCODE_MASK	0x7000
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 1000Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_1000M_ENCODE_SHIFT	9
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_1000M_ENCODE_MASK	0xe00
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 100Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_100M_ENCODE_SHIFT	6
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_100M_ENCODE_MASK	0x1c0
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for 10Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10M_ENCODE_SHIFT	3
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_10M_ENCODE_MASK	0x38
+
+/*
+ * SPDLNK_LED_SEL[2:
+ * 0] encoding for the no-link state.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_NO_LINK_ENCODE_SHIFT	0
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_SEL_REG_SEL_NO_LINK_ENCODE_MASK	0x7
+
+
+/*
+ * Register <LED 1 Link And Speed Encoding>
+ *
+ */
+#define XPORT_REG_LED_1_LINK_AND_SPEED_ENCODING_REG	0x84
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_2_SHIFT	21
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_2_MASK	0xe00000
+
+/*
+ * Reserved SPDLNK_LED_SEL[2:
+ * 0] encoding.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_1_SHIFT	18
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_RSVD_SPD_ENCODE_1_MASK	0x1c0000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 10Gb/s and higherlink speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M10G_ENCODE_SHIFT	15
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M10G_ENCODE_MASK	0x38000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 2.
+ * 5Gb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M2500_ENCODE_SHIFT	12
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M2500_ENCODE_MASK	0x7000
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 1Gb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M1000_ENCODE_SHIFT	9
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M1000_ENCODE_MASK	0xe00
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 100Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M100_ENCODE_SHIFT	6
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M100_ENCODE_MASK	0x1c0
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for 10Mb/s link speed.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M10_ENCODE_SHIFT	3
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_M10_ENCODE_MASK	0x38
+
+/*
+ * SPDLNK_LED[2:
+ * 0] encoding for the no-link state.
+*/
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_NO_LINK_ENCODE_SHIFT	0
+#define  XPORT_LED_1_LINK_AND_SPEED_ENCODING_REG_NO_LINK_ENCODE_MASK	0x7
+
+
+/*
+ * Register <Aggregate LED Blink Rate Control>
+ *
+ */
+#define XPORT_REG_LED_BLINK_RATE_CNTRL_REG	0xd0
+
+/*
+ * Led ON time.
+ * Expressed in 50us units.
+*/
+#define  XPORT_LED_BLINK_RATE_CNTRL_REG_LED_ON_TIME_SHIFT	16
+#define  XPORT_LED_BLINK_RATE_CNTRL_REG_LED_ON_TIME_MASK	0xffff0000
+
+/*
+ * Led OFF time.
+ * Expressed in 50us units.
+*/
+#define  XPORT_LED_BLINK_RATE_CNTRL_REG_LED_OFF_TIME_SHIFT	0
+#define  XPORT_LED_BLINK_RATE_CNTRL_REG_LED_OFF_TIME_MASK	0xffff
+
+
+/*
+ * Register <LED Serial Control>
+ *
+ */
+#define XPORT_REG_LED_SERIAL_CNTRL_REG	0xd4
+
+/*
+ * Indicates number of LED signals per port that are shifted out:
+ * 11 :
+ * 4 LEDs per port mode (SPDLNK_LED[2:
+ * 0] and ACT_LED).
+ * 10 :
+ * 3 LEDs per port mode (SPDLNK_LED[2:
+ * 0]).
+ * 01 :
+ * 3 LEDs per port mode (SPDLNK_LED[1:
+ * 0] and ACT_LED).
+ * 00 :
+ * 2 LEDs per port mode (SPDLNK_LED[1:
+ * 0])
+*/
+#define  XPORT_LED_SERIAL_CNTRL_REG_SMODE_SHIFT	23
+#define  XPORT_LED_SERIAL_CNTRL_REG_SMODE_MASK	0x1800000
+
+/*
+ * Indicates SLED_CLK frequency.
+ * 0 :
+ * SLED_CLK is 6.
+ * 25Mhz.
+ * 1 :
+ * SLED_CLK is 3.
+ * 125Mhz.
+*/
+#define  XPORT_LED_SERIAL_CNTRL_REG_SLED_CLK_FREQUENCY_MASK	0x400000
+
+/*
+ * When this bit is 1'b1 serial LED clock(SCLK) polarity is inveretd.
+ * Used with shift registers that trigger on the falling edge.
+*/
+#define  XPORT_LED_SERIAL_CNTRL_REG_SLED_CLK_POL_MASK	0x200000
+
+/*
+ * Serial LED refresh period.
+ * Expressed in 5ms units.
+ * Value of 0 means 32x5ms period.
+*/
+#define  XPORT_LED_SERIAL_CNTRL_REG_REFRESH_PERIOD_SHIFT	16
+#define  XPORT_LED_SERIAL_CNTRL_REG_REFRESH_PERIOD_MASK	0x1f0000
+
+/*
+ * When the corresponding bit is set, port LEDs are shifted out.
+ * When all bits are cleared, serial LED interface is disabled.
+*/
+#define  XPORT_LED_SERIAL_CNTRL_REG_PORT_EN_SHIFT	0
+#define  XPORT_LED_SERIAL_CNTRL_REG_PORT_EN_MASK	0xffff
+
+
+/*
+ * Register <Refresh Period Control>
+ *
+ */
+#define XPORT_REG_REFRESH_PERIOD_CNTRL_REG	0xd8
+
+/*
+ * This register is used only in debug purposes.
+ * It controls REFRESH_PERIOD time unit that is based on 25MHz clock.
+ * default is 5 ms.
+*/
+#define  XPORT_REFRESH_PERIOD_CNTRL_REG_REFRESH_PERIOD_CNT_SHIFT	0
+#define  XPORT_REFRESH_PERIOD_CNTRL_REG_REFRESH_PERIOD_CNT_MASK	0xffffff
+
+
+/*
+ * Register <Aggregate LED Control>
+ *
+ */
+#define XPORT_REG_AGGREGATE_LED_CNTRL_REG	0xdc
+
+/*
+ * When set to 1'b1 inverts polarity of the link signal that is used for
+ * aggregate LNK_LED.
+*/
+#define  XPORT_AGGREGATE_LED_CNTRL_REG_LNK_POL_SEL_MASK	0x40000
+
+/*
+ * When set to 1'b1 inverts polarity of the activity signal that is used
+ * for aggregate ACT_LED.
+*/
+#define  XPORT_AGGREGATE_LED_CNTRL_REG_ACT_POL_SEL_MASK	0x20000
+
+/*
+ * Selects behavior for aggregate ACT_LED.
+ * Encoded as:
+ * 0 :
+ * LED is 0 when aggregate link is up and blinks when there is activity.
+ * LED is 1 when aggregate link is down.
+ * 1 :
+ * LED is 1 and blinks when there is activity, regardless of the aggregate
+ * link status.
+*/
+#define  XPORT_AGGREGATE_LED_CNTRL_REG_ACT_SEL_MASK	0x10000
+
+/*
+ * When the corresponding bit is set, port LEDs are included in aggregate
+ * LED signals.
+ * When all bits are cleared, aggregate LED interface is disabled.
+*/
+#define  XPORT_AGGREGATE_LED_CNTRL_REG_PORT_EN_SHIFT	0
+#define  XPORT_AGGREGATE_LED_CNTRL_REG_PORT_EN_MASK	0xffff
+
+
+/*
+ * Register <Aggregate LED Blink Rate Control>
+ *
+ */
+#define XPORT_REG_AGGREGATE_LED_BLINK_RATE_CNTRL_REG	0xe0
+
+/*
+ * Led ON time.
+ * Expressed in 50us units.
+*/
+#define  XPORT_AGGREGATE_LED_BLINK_RATE_CNTRL_REG_LED_ON_TIME_SHIFT	16
+#define  XPORT_AGGREGATE_LED_BLINK_RATE_CNTRL_REG_LED_ON_TIME_MASK	0xffff0000
+
+/*
+ * Led OFF time.
+ * Expressed in 50us units.
+*/
+#define  XPORT_AGGREGATE_LED_BLINK_RATE_CNTRL_REG_LED_OFF_TIME_SHIFT	0
+#define  XPORT_AGGREGATE_LED_BLINK_RATE_CNTRL_REG_LED_OFF_TIME_MASK	0xffff
+
+
+/*
+ * Register <Spare Control>
+ *
+ */
+#define XPORT_REG_SPARE_CNTRL_REG	0xe4
+
+/*
+ * Spare register.
+ * Reserved for future use.
+*/
+#define  XPORT_SPARE_CNTRL_REG_SPARE_REG_SHIFT	0
+#define  XPORT_SPARE_CNTRL_REG_SPARE_REG_MASK	0xffffffff
+
+
+/*
+ * Register <XPORT Control 1>
+ *
+ */
+#define XPORT_REG_XPORT_CNTRL_1_REG	0x1fc
+
+/*
+ * MSBUS clock select.
+ * 0 :
+ * 500MHz.
+ * 1 :
+ * 644.
+ * 53125MHz.
+ * 644.
+ * 53125MHz clock should be used ONLY when 10G AE and SGMII serdes are
+ * simultaneously connected to XRDP.
+*/
+#define  XPORT_CNTRL_1_REG_MSBUS_CLK_SEL_MASK	0x1000
+
+/*
+ * selects port driving WAN LED0 set.
+ * 0 :
+ * P0 drives LEDs.
+ * 1 :
+ * P1 drives LEDs.
+*/
+#define  XPORT_CNTRL_1_REG_WAN_LED0_SEL_MASK	0x800
+
+/*
+ * When this bit is set, XPORT internal register bus bridges are not
+ * automatically reseted/reinitalized when the UBUS slave port times out.
+*/
+#define  XPORT_CNTRL_1_REG_TIMEOUT_RST_DISABLE_MASK	0x400
+
+/*
+ * P0 Mode:
+ * 0 :
+ * P0 operates in GMII mode.
+ * 1 :
+ * P0 operates in XGMII mode.
+*/
+#define  XPORT_CNTRL_1_REG_P0_MODE_MASK	0x100
+
+
+/*
+ * Register <Crossbar Status> - read-only
+ *
+ */
+#define XPORT_REG_CROSSBAR_STATUS_REG	0x200
+
+/*
+ * When set indicates that full-duplex link is established.
+ * Half-duplex is not supported and indicates erroneous link.
+*/
+#define  XPORT_CROSSBAR_STATUS_REG_FULL_DUPLEX_MASK	0x80
+
+/* When set indicates that TX PAUSE is negotiated. */
+#define  XPORT_CROSSBAR_STATUS_REG_PAUSE_TX_MASK	0x40
+
+/* When set indicates that RX PAUSE is negotiated. */
+#define  XPORT_CROSSBAR_STATUS_REG_PAUSE_RX_MASK	0x20
+
+/*
+ * When set indicate that link is 2.
+ * 5Gb.
+*/
+#define  XPORT_CROSSBAR_STATUS_REG_SPEED_2500_MASK	0x10
+
+/* When set indicate that link is 1Gb. */
+#define  XPORT_CROSSBAR_STATUS_REG_SPEED_1000_MASK	0x8
+
+/* When set indicate that link is 100Mb. */
+#define  XPORT_CROSSBAR_STATUS_REG_SPEED_100_MASK	0x4
+
+/* When set indicate that link is 10Mb. */
+#define  XPORT_CROSSBAR_STATUS_REG_SPEED_10_MASK	0x2
+
+/*
+ * Link Status.
+ * When 1 indicates that link is up for the selected PHY/RGMII.
+*/
+#define  XPORT_CROSSBAR_STATUS_REG_LINK_STATUS_MASK	0x1
+
+
+/*
+ * Register <PON AE SERDES Status> - read-only
+ *
+ */
+#define XPORT_REG_PON_AE_SERDES_STATUS_REG	0x204
+
+/* When 0 indicates presence of the optical module. */
+#define  XPORT_PON_AE_SERDES_STATUS_REG_MOD_DEF0_MASK	0x40
+
+/*
+ * Non-filtered signal detect (or loss of signal) from the pin as provided
+ * by the external optical module.
+ * Please consult used optical module datasheet for polarity.
+ * NVRAM bit that indicates expected polarity is recommended.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_EXT_SIG_DET_MASK	0x20
+
+/*
+ * PLL1 Lock.
+ * When 1'b1, indicates that single SERDES PLL1 is locked.
+ * Only one of PLLs (PLL0 or PLL1) is active at any time, depending on the
+ * operational mode.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_PLL1_LOCK_MASK	0x10
+
+/*
+ * PLL0 Lock.
+ * When 1'b1, indicates that single SERDES PLL0 is locked.
+ * Only one of PLLs (PLL0 or PLL1) is active at any time, depending on the
+ * operational mode.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_PLL0_LOCK_MASK	0x8
+
+/*
+ * Link Status.
+ * When 1'b1, indicates that link is up.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_LINK_STATUS_MASK	0x4
+
+/*
+ * CDR Lock.
+ * When 1'b1, indicates that CDR is locked.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_CDR_LOCK_MASK	0x2
+
+/*
+ * Filtered Rx Signal Detect.
+ * When 1'b1 indicates presence of the signal on Rx pins.
+*/
+#define  XPORT_PON_AE_SERDES_STATUS_REG_RX_SIGDET_MASK	0x1
+
+
+#endif /* ! WAN_TOPXPORT_REG_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_xlmac_core.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_xlmac_core.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_xlmac_core.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_xlmac_core.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,2010 @@
+#ifndef WAN_TOPXPORT_XLMAC_CORE_H_
+#define WAN_TOPXPORT_XLMAC_CORE_H_
+
+/* relative to core */
+#define XPORT_XLMAC_CORE_OFFSET(x)	(0x0 + (x) * 0x400)
+
+/*
+ * Register <MAC control for XLMAC0/port0 (LPORT port0)>
+ *
+ * MAC control.
+ */
+#define XPORT_XLMAC_CORE_CTRL_REG	0x0
+
+/*
+ * Extended Higig 2 header is also known as sirius header.
+ * Setting this bit to 0 will disable parsing for the extended header
+ * bit(5th bit of 8th header byte) in HG2 header and hence all the Higig 2
+ * packets will be treated as normal Higig2 packets irrespective of
+ * extended header bit value.
+ * Default value of this field is 1 which will enable parsing extended
+ * header bit in every Higig 2 header.
+*/
+#define  CTRL_EXTENDED_HIG2_EN_MASK	0x4000
+
+/*
+ * This configuration chooses between link status indication from software
+ * (SW_LINK_STATUS) or the hardware link status (hw_link_status)indication
+ * from the TSC.
+ * If reset, it selects the software link status
+*/
+#define  CTRL_LINK_STATUS_SELECT_MASK	0x2000
+
+/*
+ * Link status indication from Software.
+ * If set, indicates that link is active.
+*/
+#define  CTRL_SW_LINK_STATUS_MASK	0x1000
+
+/*
+ * If set, this will override the one column idle/sequence ordered set
+ * check before SOP in XGMII mode - effectively supporting reception of
+ * packets with 1 byte IPG in XGMII mode
+*/
+#define  CTRL_XGMII_IPG_CHECK_DISABLE_MASK	0x800
+
+/*
+ * Resets the RS layer functionality - Fault detection and related
+ * responses are disabled and IDLEs are sent on line
+*/
+#define  CTRL_RS_SOFT_RESET_MASK	0x400
+
+/* Reserved */
+#define  CTRL_RSVD_5_MASK		0x200
+
+/*
+ * If set, during the local loopback mode, the transmit packets are also
+ * sent to the transmit line interface, apart from the loopback operation
+*/
+#define  CTRL_LOCAL_LPBK_LEAK_ENB_MASK	0x100
+
+/* Reserved */
+#define  CTRL_RSVD_4_MASK		0x80
+
+/*
+ * If set, disables the corresponding port logic and status registers only.
+ * Packet data and flow control logic is disabled.
+ * Fault handling is active and the MAC will continue to respond to credits
+ * from TSC.
+ * When the soft reset is cleared MAC will issue a fresh set of credits to
+ * EP in transmit side.
+*/
+#define  CTRL_SOFT_RESET_MASK		0x40
+
+/*
+ * If set, enable LAG Failover.
+ * This bit has priority over LOCAL_LPBK.
+ * The lag failover kicks in when the link status selected by
+ * LINK_STATUS_SELECT transitions from 1 to 0.
+ * TSC clock and TSC credits must be active for lag failover.
+*/
+#define  CTRL_LAG_FAILOVER_EN_MASK	0x20
+
+/*
+ * If set, XLMAC will move from lag failover state to normal operation.
+ * This bit should be set after link is up.
+*/
+#define  CTRL_REMOVE_FAILOVER_LPBK_MASK	0x10
+
+/* Reserved */
+#define  CTRL_RSVD_1_MASK		0x8
+
+/*
+ * If set, enables local loopback from TX to RX.
+ * This loopback is on the line side after clock domain crossing - from the
+ * last TX pipeline stage to the first RX pipeline stage.
+ * Hence, TSC clock and TSC credits must be active for loopback.
+ * LAG_FAILOVER_EN should be disabled for this bit to work.
+*/
+#define  CTRL_LOCAL_LPBK_MASK		0x4
+
+/* If set, enables MAC receive datapath and flowcontrol logic. */
+#define  CTRL_RX_EN_MASK		0x2
+
+/*
+ * If set, enables MAC transmit datapath and flowcontrol logic.
+ * When disabled, MAC will respond to TSC credits with IDLE codewords.
+*/
+#define  CTRL_TX_EN_MASK		0x1
+
+
+/*
+ * Register <XLMAC Modefor XLMAC0/port0 (LPORT port0)>
+ *
+ * XLMAC Mode register
+ */
+#define XPORT_XLMAC_CORE_MODE_REG	0x8
+
+/* Port Speed, used for LED indications and internal buffer sizing. */
+#define  MODE_SPEED_MODE_SHIFT		4
+#define  MODE_SPEED_MODE_MASK		0x70
+
+/* If set, excludes the SOP byte for CRC calculation in HIGIG modes */
+#define  MODE_NO_SOP_FOR_CRC_HG_MASK	0x8
+
+/* Packet Header mode. */
+#define  MODE_HDR_MODE_SHIFT		0
+#define  MODE_HDR_MODE_MASK		0x7
+
+
+/*
+ * Register <Spare reg 0 for ECO for XLMAC0/port0 (LPORT port0)>
+ *
+ * Spare reg 0 for ECO
+ */
+#define XPORT_XLMAC_CORE_SPARE0_REG	0x10
+
+/* SPARE REGISTER 0 */
+#define  SPARE0_RSVD_SHIFT		0
+#define  SPARE0_RSVD_MASK		0xffffffff
+
+
+/*
+ * Register <Spare reg 1 for ECO for XLMAC0/port0 (LPORT port0)>
+ *
+ * Spare reg 1 for ECO
+ */
+#define XPORT_XLMAC_CORE_SPARE1_REG	0x18
+
+/* SPARE REGISTER 1 */
+#define  SPARE1_RSVD_SHIFT		0
+#define  SPARE1_RSVD_MASK		0x3
+
+
+/*
+ * Register <Transmit control for XLMAC0/port0 (LPORT port0)>
+ *
+ * Transmit control.
+ * This XLMAC core register is 42 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_TX_CTRL) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_TX_CTRL_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_TX_CTRL_REG	0x20
+
+/*
+ * Indicates the number of 16-byte cells that are buffered per packet in
+ * the Tx CDC FIFO, before starting transmission of the packet on the line
+ * side.
+ * This setting is useful to prevent underflow issues if the EP logic pumps
+ * in data at port rate, rather than bursting at full rate.
+ * This mode will increase the overall latency.
+ * In quad port mode, this field should be set >= 1 and <= 4 for each port.
+ * In single port mode, this field should be set >= 1 and <= 16 for the
+ * four lane port (port0).
+ * In dual port mode, this field should be set >= 1 and <= 8 for each two
+ * lane port (port0 and port2).
+ * In tri1/tri2, this field should be set >= 1 and <= 4 for each single
+ * lane port, and >= 1 and <= 8 for the two lane port.
+*/
+#define  TX_CTRL_TX_THRESHOLD_SHIFT	38
+#define  TX_CTRL_TX_THRESHOLD_MASK	0x3c000000000
+
+/*
+ * If set, MAC accepts packets from the EP but does not write to the CDC
+ * FIFO and discards them on the core side without updating the statistics.
+*/
+#define  TX_CTRL_EP_DISCARD_MASK	0x2000000000
+
+/*
+ * Number of preamble bytes for transmit IEEE packets, this value should
+ * include the K.
+ * SOP & SFD character as well
+*/
+#define  TX_CTRL_TX_PREAMBLE_LENGTH_SHIFT	33
+#define  TX_CTRL_TX_PREAMBLE_LENGTH_MASK	0x1e00000000
+
+/*
+ * Number of bytes to transmit before adding THROT_NUM bytes to the IPG.
+ * This configuration is used for WAN IPG throttling.
+ * Refer MAC specs for more details.
+*/
+#define  TX_CTRL_THROT_DENOM_SHIFT	25
+#define  TX_CTRL_THROT_DENOM_MASK	0x1fe000000
+
+/*
+ * Number of bytes of extra IPG added whenever THROT_DENOM bytes have been
+ * transmitted.
+ * This configuration is used for WAN IPG throttling.
+ * Refer MAC specs for more details.
+*/
+#define  TX_CTRL_THROT_NUM_SHIFT	19
+#define  TX_CTRL_THROT_NUM_MASK		0x1f80000
+
+/*
+ * Average interpacket gap.
+ * Must be programmed >= 8.
+ * Per packet IPG will vary based on DIC for 10G+ speeds.
+*/
+#define  TX_CTRL_AVERAGE_IPG_SHIFT	12
+#define  TX_CTRL_AVERAGE_IPG_MASK	0x7f000
+
+/*
+ * If padding is enabled, packets smaller than PAD_THRESHOLD are padded to
+ * this size.
+ * This must be set to a value >= 17 (decimal)
+*/
+#define  TX_CTRL_PAD_THRESHOLD_SHIFT	5
+#define  TX_CTRL_PAD_THRESHOLD_MASK	0xfe0
+
+/* If set, enable XLMAC to pad packets smaller than PAD_THRESHOLD on the Tx */
+#define  TX_CTRL_PAD_EN_MASK		0x10
+
+/*
+ * If reset, MAC forces the first byte of a packet to be /S/ character
+ * (0xFB) irrespective of incoming EP data at SOP location in HIGIG modes
+*/
+#define  TX_CTRL_TX_ANY_START_MASK	0x8
+
+/*
+ * If set, MAC accepts packets from the EP and discards them on the line
+ * side.
+ * The statistics are updated.
+*/
+#define  TX_CTRL_DISCARD_MASK		0x4
+
+/* CRC mode for Transmit Side */
+#define  TX_CTRL_CRC_MODE_SHIFT		0
+#define  TX_CTRL_CRC_MODE_MASK		0x3
+
+
+/*
+ * Register <Transmit Source Address for XLMAC0/port0 (LPORT port0)>
+ *
+ * Transmit Source Address.
+ * This XLMAC core register is 48 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_TX_MAC_SA) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_TX_MAC_SA_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_TX_MAC_SA_REG	0x28
+
+/* Source Address for PAUSE/PFC packets generated by the MAC */
+#define  TX_MAC_SA_CTRL_SA_SHIFT	0
+#define  TX_MAC_SA_CTRL_SA_MASK		0xffffffffffff
+
+
+/*
+ * Register <Receive control for XLMAC0/port0 (LPORT port0)>
+ *
+ * Receive control.
+ */
+#define XPORT_XLMAC_CORE_RX_CTRL_REG	0x30
+
+/*
+ * This configuration is used to pass or drop pfc packetw when
+ * pfc_ether_type is not equal to 0x8808.
+ * If set, PFC frames are passed to system side.
+ * Otherwise, PFC frames are dropped in XLMAC.
+ * This configuration is used in Rx CDC mode only.
+*/
+#define  RX_CTRL_RX_PASS_PFC_MASK	0x8000
+
+/*
+ * If set, PAUSE frames are passed to sytem side.
+ * Otherwise, PAUSE frames are dropped in XLMAC This configuration is used
+ * in Rx CDC mode only.
+*/
+#define  RX_CTRL_RX_PASS_PAUSE_MASK	0x4000
+
+/*
+ * This configuration is used to drop or pass all control frames (with
+ * ether type 0x8808) except pause packets.
+ * If set, all control frames are passed to system side.
+ * Otherwise, control frames (including pfc frames wih ether type 0x8808)
+ * are dropped in XLMAC.
+ * This configuration is used in Rx CDC mode only.
+*/
+#define  RX_CTRL_RX_PASS_CTRL_MASK	0x2000
+
+/* Reserved */
+#define  RX_CTRL_RSVD_3_MASK		0x1000
+
+/* Reserved */
+#define  RX_CTRL_RSVD_2_MASK		0x800
+
+/*
+ * The runt threshold, below which the packets are dropped (CDC mode) or
+ * marked as runt (Low latency mode).
+ * Should be programmed < 96 bytes (decimal)
+*/
+#define  RX_CTRL_RUNT_THRESHOLD_SHIFT	4
+#define  RX_CTRL_RUNT_THRESHOLD_MASK	0x7f0
+
+/*
+ * If set, MAC checks for IEEE Ethernet preamble - K.
+ * SOP + 6 "0x55" preamble bytes + "0xD5" SFD character - if this sequence
+ * is missing it is treated as an errored packet
+*/
+#define  RX_CTRL_STRICT_PREAMBLE_MASK	0x8
+
+/* If set, CRC is stripped from the received packet */
+#define  RX_CTRL_STRIP_CRC_MASK		0x4
+
+/* If set, MAC allows any undefined control character to start a packet */
+#define  RX_CTRL_RX_ANY_START_MASK	0x2
+
+/* Reserved */
+#define  RX_CTRL_RSVD_1_MASK		0x1
+
+
+/*
+ * Register <Receive source address for XLMAC0/port0 (LPORT port0)>
+ *
+ * Receive source address.
+ * This XLMAC core register is 48 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_RX_MAC_SA) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_RX_MAC_SA_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_RX_MAC_SA_REG	0x38
+
+/*
+ * Source Address recognized for MAC control packets in addition to the
+ * standard 0x0180C2000001
+*/
+#define  RX_MAC_SA_RX_SA_SHIFT		0
+#define  RX_MAC_SA_RX_SA_MASK		0xffffffffffff
+
+
+/*
+ * Register <Receive maximum packet size for XLMAC0/port0 (LPORT port0)>
+ *
+ * Receive maximum packet size.
+ */
+#define XPORT_XLMAC_CORE_RX_MAX_SIZE_REG	0x40
+
+/*
+ * Maximum packet size in receive direction, exclusive of preamble & CRC in
+ * strip mode.
+ * Packets greater than this size are truncated to this value.
+*/
+#define  RX_MAX_SIZE_RX_MAX_SIZE_SHIFT	0
+#define  RX_MAX_SIZE_RX_MAX_SIZE_MASK	0x3fff
+
+
+/*
+ * Register <Inner and Outer VLAN tag fields for XLMAC0/port0 (LPORT port0)>
+ *
+ * Inner and Outer VLAN tag fields This XLMAC core register is 34 bits wide
+ * in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_RX_VLAN_TAG) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ */
+#define XPORT_XLMAC_CORE_RX_VLAN_TAG_REG	0x48
+
+/* If set, MAC enables VLAN tag detection using the OUTER_VLAN_TAG */
+#define  RX_VLAN_TAG_OUTER_VLAN_TAG_ENABLE_MASK	0x200000000
+
+/* If set, MAC enables VLAN tag detection using the INNER_VLAN_TAG */
+#define  RX_VLAN_TAG_INNER_VLAN_TAG_ENABLE_MASK	0x100000000
+
+/* TPID field for Outer VLAN tag */
+#define  RX_VLAN_TAG_OUTER_VLAN_TAG_SHIFT	16
+#define  RX_VLAN_TAG_OUTER_VLAN_TAG_MASK	0xffff0000
+
+/* TPID field for Inner VLAN tag */
+#define  RX_VLAN_TAG_INNER_VLAN_TAG_SHIFT	0
+#define  RX_VLAN_TAG_INNER_VLAN_TAG_MASK	0xffff
+
+
+/*
+ * Register <Control for LSS (ordered set) messages for XLMAC0/port0 (LPORT port0)>
+ *
+ * Control for LSS (ordered set) messages
+ */
+#define XPORT_XLMAC_CORE_RX_LSS_CTRL_REG	0x50
+
+/*
+ * If set, the Receive Pause, PFC & LLFC timers are reset whenever the link
+ * status is down, or local or remote faults are received.
+*/
+#define  RX_LSS_CTRL_RESET_FLOW_CONTROL_TIMERS_ON_LINK_DOWN_MASK	0x80
+
+/*
+ * This bit determines the way MAC handles data during link interruption
+ * state, if LINK_INTERRUPTION_DISABLE is reset.
+ * If set, during link interruption state, MAC drops transmit-data
+ * (statistics are updated) and sends IDLEs on the wire.
+ * If reset, transmit data is stalled in the internal FIFO under link
+ * interruption state.
+*/
+#define  RX_LSS_CTRL_DROP_TX_DATA_ON_LINK_INTERRUPT_MASK	0x40
+
+/*
+ * This bit determines the way MAC handles data during remote fault state,
+ * if REMOTE_FAULT_DISABLE is reset.
+ * If set, during remote fault state, MAC drops transmit-data (statistics
+ * are updated) and sends IDLEs on the wire.
+ * If reset, transmit data is stalled in the internal FIFO under remote
+ * fault state.
+*/
+#define  RX_LSS_CTRL_DROP_TX_DATA_ON_REMOTE_FAULT_MASK	0x20
+
+/*
+ * This bit determines the way MAC handles data during local fault state,
+ * if LOCAL_FAULT_DISABLE is reset.
+ * If set, during local fault state, MAC drops transmit-data (statistics
+ * are updated) and sends remote faults on the wire.
+ * If reset, transmit data is stalled in the internal FIFO under local
+ * fault state.
+*/
+#define  RX_LSS_CTRL_DROP_TX_DATA_ON_LOCAL_FAULT_MASK	0x10
+
+/*
+ * This bit determines the transmit response during link interruption
+ * state.
+ * The LINK_INTERRUPTION_STATUS bit is always updated irrespective of this
+ * configuration.
+ * If set, MAC will continue to transmit data irrespective of
+ * LINK_INTERRUPTION_STATUS.
+ * If reset, MAC transmit behavior is governed by
+ * DROP_TX_DATA_ON_LINK_INTERRUPT configuration.
+*/
+#define  RX_LSS_CTRL_LINK_INTERRUPTION_DISABLE_MASK	0x8
+
+/*
+ * If set, the transmit fault responses are determined from input pins
+ * rather than internal receive status.
+ * In this mode, input fault from pins (from a peer MAC) is directly
+ * relayed on the transmit side of this MAC.
+ * See specification document for more details.
+*/
+#define  RX_LSS_CTRL_USE_EXTERNAL_FAULTS_FOR_TX_MASK	0x4
+
+/*
+ * This bit determines the transmit response during remote fault state.
+ * The REMOTE_FAULT_STATUS bit is always updated irrespective of this
+ * configuration.
+ * If set, MAC will continue to transmit data irrespective of
+ * REMOTE_FAULT_STATUS.
+ * If reset, MAC transmit behavior is governed by
+ * DROP_TX_DATA_ON_REMOTE_FAULT configuration.
+*/
+#define  RX_LSS_CTRL_REMOTE_FAULT_DISABLE_MASK	0x2
+
+/*
+ * This bit determines the transmit response during local fault state.
+ * The LOCAL_FAULT_STATUS bit is always updated irrespective of this
+ * configuration.
+ * If set, MAC will continue to transmit data irrespective of
+ * LOCAL_FAULT_STATUS.
+ * If reset, MAC transmit behavior is governed by
+ * DROP_TX_DATA_ON_LOCAL_FAULT configuration.
+*/
+#define  RX_LSS_CTRL_LOCAL_FAULT_DISABLE_MASK	0x1
+
+
+/*
+ * Register <Status for RS layerThese bits are sticky by nature, and can be cleared by writing to the clearfor XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Status for RS layer.
+ * These bits are sticky by nature, and can be cleared by writing to the
+ * clear register
+ */
+#define XPORT_XLMAC_CORE_RX_LSS_STATUS_REG	0x58
+
+/*
+ * True when link interruption state is detected as per RS layer state
+ * machine.
+ * Sticky bit is cleared by CLEAR_LINK_INTERRUPTION_STATUS.
+*/
+#define  RX_LSS_STATUS_LINK_INTERRUPTION_STATUS_MASK	0x4
+
+/*
+ * True when remote fault state is detected as per RS layer state machine.
+ * Sticky bit is cleared by CLEAR_REMOTE_FAULT_STATUS.
+*/
+#define  RX_LSS_STATUS_REMOTE_FAULT_STATUS_MASK	0x2
+
+/*
+ * True when local fault state is detected as per RS layer state machine.
+ * Sticky bit is cleared by CLEAR_LOCAL_FAULT_STATUS
+*/
+#define  RX_LSS_STATUS_LOCAL_FAULT_STATUS_MASK	0x1
+
+
+/*
+ * Register <Clear the XLMAC_RX_LSS_STATUS, used for resetting the sticky status bits for XLMAC0/port0 (LPORT port0)>
+ *
+ * Clear the XLMAC_RX_LSS_STATUS register, used for resetting the sticky
+ * status bits
+ */
+#define XPORT_XLMAC_CORE_CLEAR_RX_LSS_STATUS_REG	0x60
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * LINK_INTERRUPTION_STATUS bit
+*/
+#define  CLEAR_RX_LSS_STATUS_CLEAR_LINK_INTERRUPTION_STATUS_MASK	0x4
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * REMOTE_FAULT_STATUS bit
+*/
+#define  CLEAR_RX_LSS_STATUS_CLEAR_REMOTE_FAULT_STATUS_MASK	0x2
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * LOCAL_FAULT_STATUS bit
+*/
+#define  CLEAR_RX_LSS_STATUS_CLEAR_LOCAL_FAULT_STATUS_MASK	0x1
+
+
+/*
+ * Register <PAUSE controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * PAUSE control register This XLMAC core register is 37 bits wide in
+ * hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_PAUSE_CTRL) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_PAUSE_CTRL_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_PAUSE_CTRL_REG	0x68
+
+/*
+ * Pause time value sent in the timer field for XOFF state (unit is 512
+ * bit-times)
+*/
+#define  PAUSE_CTRL_PAUSE_XOFF_TIMER_SHIFT	21
+#define  PAUSE_CTRL_PAUSE_XOFF_TIMER_MASK	0x1fffe00000
+
+/* Reserved */
+#define  PAUSE_CTRL_RSVD_2_MASK		0x100000
+
+/* Reserved */
+#define  PAUSE_CTRL_RSVD_1_MASK		0x80000
+
+/*
+ * When set, enables detection of pause frames in the receive direction and
+ * pause/resume the transmit data path
+*/
+#define  PAUSE_CTRL_RX_PAUSE_EN_MASK	0x40000
+
+/*
+ * When set, enables the transmission of pause frames whenever there is a
+ * transition on txbkp input to MAC from MMU
+*/
+#define  PAUSE_CTRL_TX_PAUSE_EN_MASK	0x20000
+
+/*
+ * When set, enables the periodic re-generation of XOFF pause frames based
+ * on the interval specified in PAUSE_REFRESH_TIMER
+*/
+#define  PAUSE_CTRL_PAUSE_REFRESH_EN_MASK	0x10000
+
+/*
+ * This field specifies the interval at which pause frames are re-generated
+ * during XOFF state, provided PAUSE_REFRESH_EN is set (unit is 512
+ * bit-times)
+*/
+#define  PAUSE_CTRL_PAUSE_REFRESH_TIMER_SHIFT	0
+#define  PAUSE_CTRL_PAUSE_REFRESH_TIMER_MASK	0xffff
+
+
+/*
+ * Register <PFC controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * PFC control register This XLMAC core register is 38 bits wide in
+ * hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_PFC_CTRL) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_PFC_CTRL_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_PFC_CTRL_REG	0x70
+
+/* When set, enables the transmission of PFC frames */
+#define  PFC_CTRL_TX_PFC_EN_MASK	0x2000000000
+
+/*
+ * When set, enables detection of PFC frames in the receive direction and
+ * generation of COSMAPs to MMU based on incoming timer values
+*/
+#define  PFC_CTRL_RX_PFC_EN_MASK	0x1000000000
+
+/*
+ * When set, enables the generation of receive and transmit PFC events into
+ * the corresponding statistics vectors (RSV and TSV)
+*/
+#define  PFC_CTRL_PFC_STATS_EN_MASK	0x800000000
+
+/* Reserved */
+#define  PFC_CTRL_RSVD_MASK		0x400000000
+
+/*
+ * When set, forces the MAC to generate an XON indication to the MMU for
+ * all classes of service in the receive direction
+*/
+#define  PFC_CTRL_FORCE_PFC_XON_MASK	0x200000000
+
+/*
+ * When set, enables the periodic re-generation of PFC frames based on the
+ * interval specified in PFC_REFRESH_TIMER
+*/
+#define  PFC_CTRL_PFC_REFRESH_EN_MASK	0x100000000
+
+/*
+ * Pause time value sent in the timer field for classes in XOFF state (unit
+ * is 512 bit-times)
+*/
+#define  PFC_CTRL_PFC_XOFF_TIMER_SHIFT	16
+#define  PFC_CTRL_PFC_XOFF_TIMER_MASK	0xffff0000
+
+/*
+ * This field specifies the interval at which PFC frames are re-generated
+ * for a class of service in XOFF state, provided PFC_REFRESH_EN is set
+ * (unit is 512 bit-times)
+*/
+#define  PFC_CTRL_PFC_REFRESH_TIMER_SHIFT	0
+#define  PFC_CTRL_PFC_REFRESH_TIMER_MASK	0xffff
+
+
+/*
+ * Register <PFC Ethertype for XLMAC0/port0 (LPORT port0)>
+ *
+ * PFC Ethertype
+ */
+#define XPORT_XLMAC_CORE_PFC_TYPE_REG	0x78
+
+/*
+ * This field is used in the ETHERTYPE field of the PFC frame that is
+ * generated and transmitted by the MAC and also used for detection in the
+ * receive direction
+*/
+#define  PFC_TYPE_PFC_ETH_TYPE_SHIFT	0
+#define  PFC_TYPE_PFC_ETH_TYPE_MASK	0xffff
+
+
+/*
+ * Register <PFC Opcode for XLMAC0/port0 (LPORT port0)>
+ *
+ * PFC Opcode
+ */
+#define XPORT_XLMAC_CORE_PFC_OPCODE_REG	0x80
+
+/*
+ * This field is used in the OPCODE field of the PFC frame that is
+ * generated and transmitted by the MAC and also used for detection in the
+ * receive direction
+*/
+#define  PFC_OPCODE_PFC_OPCODE_SHIFT	0
+#define  PFC_OPCODE_PFC_OPCODE_MASK	0xffff
+
+
+/*
+ * Register <PFC Destination Address for XLMAC0/port0 (LPORT port0)>
+ *
+ * PFC Destination Address.
+ * This XLMAC core register is 48 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_PFC_DA) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_PFC_DA_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_PFC_DA_REG	0x88
+
+/*
+ * This field is used in the destination-address field of the PFC frame
+ * that is generated and transmitted by the MAC and also used for detection
+ * in the receive direction
+*/
+#define  PFC_DA_PFC_MACDA_SHIFT		0
+#define  PFC_DA_PFC_MACDA_MASK		0xffffffffffff
+
+
+/*
+ * Register <LLFC Controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * LLFC Control Register
+ */
+#define XPORT_XLMAC_CORE_LLFC_CTRL_REG	0x90
+
+/*
+ * This field indicates the minimum Inter Message Gap that is enforced by
+ * the MAC between 2 LLFC messages in the transmit direction (unit is 1
+ * credit)
+*/
+#define  LLFC_CTRL_LLFC_IMG_SHIFT	6
+#define  LLFC_CTRL_LLFC_IMG_MASK	0x3fc0
+
+/* When set, LLFC CRC computation does not include the SOM character */
+#define  LLFC_CTRL_NO_SOM_FOR_CRC_LLFC_MASK	0x20
+
+/*
+ * When set, disables the CRC check for LLFC messages in the receive
+ * direction
+*/
+#define  LLFC_CTRL_LLFC_CRC_IGNORE_MASK	0x10
+
+/*
+ * When LLFC_IN_IPG_ONLY is reset, the mode of transmission of LLFC
+ * messages is controlled by this bit depending upon whether the LLFC
+ * message is XON or XOFF When LLFC_CUT_THROUGH_MODE is reset, all LLFC
+ * messages are transmitted pre-emptively (within a packet) When
+ * LLFC_CUT_THROUGH_MODE is set, only XOFF LLFC messages are transmitted
+ * pre-emptively, XON LLFC messages are transmitted during IPG
+*/
+#define  LLFC_CTRL_LLFC_CUT_THROUGH_MODE_MASK	0x8
+
+/*
+ * When set, all LLFC messages are transmitted during IPG When reset, the
+ * mode of insertion of LLFC messages is controlled by
+ * LLFC_CUT_THROUGH_MODE
+*/
+#define  LLFC_CTRL_LLFC_IN_IPG_ONLY_MASK	0x4
+
+/*
+ * When set, enables processing of LLFC frames in the receive direction and
+ * generation of COSMAPs to MMU
+*/
+#define  LLFC_CTRL_RX_LLFC_EN_MASK	0x2
+
+/*
+ * When set, enables the generation and transmission of LLFC frames in the
+ * transmit direction
+*/
+#define  LLFC_CTRL_TX_LLFC_EN_MASK	0x1
+
+
+/*
+ * Register <Programmable TX LLFC Message fields for XLMAC0/port0 (LPORT port0)>
+ *
+ * Programmable TX LLFC Message fields.
+ */
+#define XPORT_XLMAC_CORE_TX_LLFC_MSG_FIELDS_REG	0x98
+
+/*
+ * Pause time value sent in the XOFF_TIME field of the outgoing LLFC
+ * message
+*/
+#define  TX_LLFC_MSG_FIELDS_LLFC_XOFF_TIME_SHIFT	12
+#define  TX_LLFC_MSG_FIELDS_LLFC_XOFF_TIME_MASK	0xffff000
+
+/*
+ * This field is used in the FC_OBJ_LOGICAL field of the outgoing LLFC
+ * message
+*/
+#define  TX_LLFC_MSG_FIELDS_TX_LLFC_FC_OBJ_LOGICAL_SHIFT	8
+#define  TX_LLFC_MSG_FIELDS_TX_LLFC_FC_OBJ_LOGICAL_MASK	0xf00
+
+/*
+ * This field is used in the MSG_TYPE_LOGICAL field of the outgoing LLFC
+ * message
+*/
+#define  TX_LLFC_MSG_FIELDS_TX_LLFC_MSG_TYPE_LOGICAL_SHIFT	0
+#define  TX_LLFC_MSG_FIELDS_TX_LLFC_MSG_TYPE_LOGICAL_MASK	0xff
+
+
+/*
+ * Register <Programmable RX LLFC Message fields for XLMAC0/port0 (LPORT port0)>
+ *
+ * Programmable RX LLFC Message fields
+ */
+#define XPORT_XLMAC_CORE_RX_LLFC_MSG_FIELDS_REG	0xa0
+
+/*
+ * This value is compared against the FC_OBJ_PHYSICAL field of an incoming
+ * LLFC message in order to decode the message
+*/
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_FC_OBJ_PHYSICAL_SHIFT	20
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_FC_OBJ_PHYSICAL_MASK	0xf00000
+
+/*
+ * This value is compared against the MSG_TYPE_PHYSICAL field of an
+ * incoming LLFC message in order to decode the message
+*/
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_MSG_TYPE_PHYSICAL_SHIFT	12
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_MSG_TYPE_PHYSICAL_MASK	0xff000
+
+/*
+ * This value is compared against the FC_OBJ_LOGICAL field of an incoming
+ * LLFC message in order to decode the message
+*/
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_FC_OBJ_LOGICAL_SHIFT	8
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_FC_OBJ_LOGICAL_MASK	0xf00
+
+/*
+ * This value is compared against the MSG_TYPE_LOGICAL field of an incoming
+ * LLFC message in order to decode the message
+*/
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_MSG_TYPE_LOGICAL_SHIFT	0
+#define  RX_LLFC_MSG_FIELDS_RX_LLFC_MSG_TYPE_LOGICAL_MASK	0xff
+
+
+/*
+ * Register <The TimeStamp value of the Tx two-step packets for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * The TimeStamp value of the Tx two-step packets.
+ * This XLMAC core register is 49 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_TX_TIMESTAMP_FIFO_DATA) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ */
+#define XPORT_XLMAC_CORE_TX_TIMESTAMP_FIFO_DATA_REG	0xa8
+
+/* Active high qualifier for the TimeStamp & SEQUENCE_ID fields. */
+#define  TX_TIMESTAMP_FIFO_DATA_TS_ENTRY_VALID_MASK	0x1000000000000
+
+/*
+ * The Sequence Identifier extracted from the Timesync packet based on the
+ * header offset
+*/
+#define  TX_TIMESTAMP_FIFO_DATA_SEQUENCE_ID_SHIFT	32
+#define  TX_TIMESTAMP_FIFO_DATA_SEQUENCE_ID_MASK	0xffff00000000
+
+/* The TimeStamp value of the Tx two-step enabled packet. */
+#define  TX_TIMESTAMP_FIFO_DATA_TIME_STAMP_SHIFT	0
+#define  TX_TIMESTAMP_FIFO_DATA_TIME_STAMP_MASK	0xffffffff
+
+
+/*
+ * Register <Tx TimeStamp FIFO Status for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Tx TimeStamp FIFO Status.
+ */
+#define XPORT_XLMAC_CORE_TX_TIMESTAMP_FIFO_STATUS_REG	0xb0
+
+/*
+ * Number of TX time stamps currently buffered in TX Time Stamp FIFO.
+ * A valid entry is popped out whenever XLMAC_TX_TIMESTMAP_FIFO_DATA is
+ * read
+*/
+#define  TX_TIMESTAMP_FIFO_STATUS_ENTRY_COUNT_SHIFT	0
+#define  TX_TIMESTAMP_FIFO_STATUS_ENTRY_COUNT_MASK	0x7
+
+
+/*
+ * Register <FIFO statusThese bits (except LINK_STATUS) are sticky by nature, and can be cleared by writing to the clear register. for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * FIFO status register.
+ * These bits (except LINK_STATUS) are sticky by nature, and can be cleared
+ * by writing to the clear register.
+ */
+#define XPORT_XLMAC_CORE_FIFO_STATUS_REG	0xb8
+
+/*
+ * This bit indicates the link status used by XLMAC EEE and lag-failover
+ * state machines.
+ * This reflects the live status of the link as seen by the MAC.
+ * If set, indicates that link is active.
+*/
+#define  FIFO_STATUS_LINK_STATUS_MASK	0x100
+
+/* If set, indicates RX packet fifo overflow */
+#define  FIFO_STATUS_RX_PKT_OVERFLOW_MASK	0x80
+
+/* If set, indicates overflow occurred in TX two-step Time Stamp FIFO */
+#define  FIFO_STATUS_TX_TS_FIFO_OVERFLOW_MASK	0x40
+
+/* If set, indicates TX LLFC message fifo overflow */
+#define  FIFO_STATUS_TX_LLFC_MSG_OVERFLOW_MASK	0x20
+
+/* Reserved */
+#define  FIFO_STATUS_RSVD_2_MASK	0x10
+
+/* If set, indicates tx packet fifo overflow */
+#define  FIFO_STATUS_TX_PKT_OVERFLOW_MASK	0x8
+
+/* If set, indicates tx packet fifo underflow */
+#define  FIFO_STATUS_TX_PKT_UNDERFLOW_MASK	0x4
+
+/* If set, indicates rx message fifo overflow */
+#define  FIFO_STATUS_RX_MSG_OVERFLOW_MASK	0x2
+
+/* Reserved */
+#define  FIFO_STATUS_RSVD_1_MASK	0x1
+
+
+/*
+ * Register <Clear XLMAC_FIFO_STATUS, used for resetting the sticky status bits for XLMAC0/port0 (LPORT port0)>
+ *
+ * Clear XLMAC_FIFO_STATUS register, used for resetting the sticky status
+ * bits
+ */
+#define XPORT_XLMAC_CORE_CLEAR_FIFO_STATUS_REG	0xc0
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * RX_PKT_OVERFLOW status bit.
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_RX_PKT_OVERFLOW_MASK	0x80
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_TS_FIFO_OVERFLOW status bit.
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_TX_TS_FIFO_OVERFLOW_MASK	0x40
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_LLFC_MSG_OVERFLOW status bit.
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_TX_LLFC_MSG_OVERFLOW_MASK	0x20
+
+/* Reserved */
+#define  CLEAR_FIFO_STATUS_RSVD_2_MASK	0x10
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_PKT_OVERFLOW status bit.
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_TX_PKT_OVERFLOW_MASK	0x8
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_PKT_UNDERFLOW status bit
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_TX_PKT_UNDERFLOW_MASK	0x4
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * RX_MSG_OVERFLOW status bit
+*/
+#define  CLEAR_FIFO_STATUS_CLEAR_RX_MSG_OVERFLOW_MASK	0x2
+
+/* Reserved */
+#define  CLEAR_FIFO_STATUS_RSVD_1_MASK	0x1
+
+
+/*
+ * Register <Lag Failover Status for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Lag Failover Status.
+ */
+#define XPORT_XLMAC_CORE_LAG_FAILOVER_STATUS_REG	0xc8
+
+/* Reserved */
+#define  LAG_FAILOVER_STATUS_RSVD_MASK	0x2
+
+/* Set when XLMAC is in lag failover state */
+#define  LAG_FAILOVER_STATUS_LAG_FAILOVER_LOOPBACK_MASK	0x1
+
+
+/*
+ * Register <for EEE Control  for XLMAC0/port0 (LPORT port0)>
+ *
+ * Register for EEE Control
+ */
+#define XPORT_XLMAC_CORE_EEE_CTRL_REG	0xd0
+
+/* Reserved */
+#define  EEE_CTRL_RSVD_MASK		0x2
+
+/*
+ * When set, enables EEE state machine in the transmit direction and LPI
+ * detection/prediction in the receive direction
+*/
+#define  EEE_CTRL_EEE_EN_MASK		0x1
+
+
+/*
+ * Register <EEE Timers for XLMAC0/port0 (LPORT port0)>
+ *
+ * EEE Timers This XLMAC core register is 64 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_EEE_TIMERS) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_EEE_TIMERS_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_EEE_TIMERS_REG	0xd8
+
+/*
+ * This field controls clock divider used to generate ~1us reference pulses
+ * used by EEE timers.
+ * It specifies integer number of clock cycles for 1us reference using
+ * tsc_clk
+*/
+#define  EEE_TIMERS_EEE_REF_COUNT_SHIFT	48
+#define  EEE_TIMERS_EEE_REF_COUNT_MASK	0xffff000000000000
+
+/*
+ * This is the duration for which MAC must wait to go back to ACTIVE state
+ * from LPI state when it receives packet/flow-control frames for
+ * transmission.
+ * Unit is micro seconds
+*/
+#define  EEE_TIMERS_EEE_WAKE_TIMER_SHIFT	32
+#define  EEE_TIMERS_EEE_WAKE_TIMER_MASK	0xffff00000000
+
+/*
+ * This is the duration for which the MAC must wait in EMPTY state before
+ * transitioning to LPI state.
+ * Unit is micro seconds
+*/
+#define  EEE_TIMERS_EEE_DELAY_ENTRY_TIMER_SHIFT	0
+#define  EEE_TIMERS_EEE_DELAY_ENTRY_TIMER_MASK	0xffffffff
+
+
+/*
+ * Register <EEE One Second Link Status Timer for XLMAC0/port0 (LPORT port0)>
+ *
+ * EEE One Second Link Status Timer
+ */
+#define XPORT_XLMAC_CORE_EEE_1_SEC_LINK_STATUS_TIMER_REG	0xe0
+
+/*
+ * This is the duration for which EEE FSM must wait when Link status
+ * becomes active before transitioning to ACTIVE state.
+ * Unit is micro seconds
+*/
+#define  EEE_1_SEC_LINK_STATUS_TIMER_ONE_SECOND_TIMER_SHIFT	0
+#define  EEE_1_SEC_LINK_STATUS_TIMER_ONE_SECOND_TIMER_MASK	0xffffff
+
+
+/*
+ * Register <HiGig2 and HiHig+ header- MS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * HiGig2 and HiHig+ header register - MS bytes This XLMAC core register is
+ * 64 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_HIGIG_HDR_0) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_HIGIG_HDR_0_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_HIGIG_HDR_0_REG	0xe8
+
+/*
+ * In HiGig2 mode, this register contains bits 127:
+ * 64 of 16-byte HiGig2 header.
+ * In HiGig+ mode, bits 31:
+ * 0 of this register contains bits 95:
+ * 64 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * pause and PFC frames in the transmit direction.
+*/
+#define  HIGIG_HDR_0_HIGIG_HDR_0_SHIFT	0
+#define  HIGIG_HDR_0_HIGIG_HDR_0_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <HiGig2 and HiHig+ header- LS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * HiGig2 and HiHig+ header register - LS bytes This XLMAC core register is
+ * 64 bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_HIGIG_HDR_1) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_HIGIG_HDR_1_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_HIGIG_HDR_1_REG	0xf0
+
+/*
+ * In HiGig2 mode, this register contains bits 63:
+ * 0 of 16-byte HiGig2 header.
+ * In HiGig+ mode, this register contains bits 63:
+ * 0 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * pause and PFC frames in the transmit direction.
+*/
+#define  HIGIG_HDR_1_HIGIG_HDR_1_SHIFT	0
+#define  HIGIG_HDR_1_HIGIG_HDR_1_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <MAC EEE control in GMII mode for XLMAC0/port0 (LPORT port0)>
+ *
+ * MAC EEE control in GMII mode.
+ */
+#define XPORT_XLMAC_CORE_GMII_EEE_CTRL_REG	0xf8
+
+/* When set, enables LPI prediction */
+#define  GMII_EEE_CTRL_GMII_LPI_PREDICT_MODE_EN_MASK	0x10000
+
+/*
+ * If GMII_LPI_PREDICT_MODE_EN is set then this field defines the number of
+ * IDLEs to be received before allowing LPIs to be sent to Link Partner
+*/
+#define  GMII_EEE_CTRL_GMII_LPI_PREDICT_THRESHOLD_SHIFT	0
+#define  GMII_EEE_CTRL_GMII_LPI_PREDICT_THRESHOLD_MASK	0xffff
+
+
+/*
+ * Register <Timestamp AdjustRefer specification document for more details for XLMAC0/port0 (LPORT port0)>
+ *
+ * Timestamp Adjust register.
+ * Refer specification document for more details
+ */
+#define XPORT_XLMAC_CORE_TIMESTAMP_ADJUST_REG	0x100
+
+/*
+ * When set, indicates that the checksum offset is referenced by input port
+ * checksumoffset, else checksum offset is referenced by txtsoffset
+*/
+#define  TIMESTAMP_ADJUST_TS_USE_CS_OFFSET_MASK	0x8000
+
+/*
+ * This is an unsigned value to account for synchronization delay of TS
+ * timer from TS clk to TSC_CLK domain.
+ * Unit is 1ns.
+ * The latency is [2.
+ * 5 TSC_CLK period + 1 TS_CLK period].
+*/
+#define  TIMESTAMP_ADJUST_TS_TSTS_ADJUST_SHIFT	9
+#define  TIMESTAMP_ADJUST_TS_TSTS_ADJUST_MASK	0x7e00
+
+/*
+ * This is a signed value which is 2s complement added to synchronized
+ * timestamp to account for MAC pipeline delay in OSTS.
+ * Unit is 1ns The latency is [6 TSC_CLK period + 1 TS_CLK period ].
+*/
+#define  TIMESTAMP_ADJUST_TS_OSTS_ADJUST_SHIFT	0
+#define  TIMESTAMP_ADJUST_TS_OSTS_ADJUST_MASK	0x1ff
+
+
+/*
+ * Register <Timestamp Byte AdjustRefer specification document for more details for XLMAC0/port0 (LPORT port0)>
+ *
+ * Timestamp Byte Adjust register.
+ * Refer specification document for more details
+ */
+#define XPORT_XLMAC_CORE_TIMESTAMP_BYTE_ADJUST_REG	0x108
+
+/*
+ * When set, enables byte based adjustment for receive timestamp capture.
+ * This should be enabled in GMII/MII modes only.
+*/
+#define  TIMESTAMP_BYTE_ADJUST_RX_TIMER_BYTE_ADJUST_EN_MASK	0x200000
+
+/*
+ * This is a per byte unsigned value which is subtracted from sampled
+ * timestamp to account for timestamp jitter due to wider MSBUS interface.
+ * Unit is 1ns
+*/
+#define  TIMESTAMP_BYTE_ADJUST_RX_TIMER_BYTE_ADJUST_SHIFT	11
+#define  TIMESTAMP_BYTE_ADJUST_RX_TIMER_BYTE_ADJUST_MASK	0x1ff800
+
+/*
+ * When set, enables byte based adjustment for transmit timestamp capture
+ * (OSTS and TSTS).
+ * This should be enabled in GMII/MII modes only.
+*/
+#define  TIMESTAMP_BYTE_ADJUST_TX_TIMER_BYTE_ADJUST_EN_MASK	0x400
+
+/*
+ * This is a per byte unsigned value which is added to sampled timestamp to
+ * account for timestamp jitter due to wider MSBUS interface.
+ * Unit is 1ns
+*/
+#define  TIMESTAMP_BYTE_ADJUST_TX_TIMER_BYTE_ADJUST_SHIFT	0
+#define  TIMESTAMP_BYTE_ADJUST_TX_TIMER_BYTE_ADJUST_MASK	0x3ff
+
+
+/*
+ * Register <Tx CRC corrupt controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * Tx CRC corrupt control register This XLMAC core register is 35 bits wide
+ * in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_TX_CRC_CORRUPT_CTRL) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_TX_CRC_CORRUPT_CTRL_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_TX_CRC_CORRUPT_CTRL_REG	0x110
+
+/*
+ * Programmable CRC value used to corrupt the Tx CRC.
+ * The computed CRC is replaced by this programmed CRC value based on
+ * TX_CRC_CORRUPTION_MODE
+*/
+#define  TX_CRC_CORRUPT_CTRL_PROG_TX_CRC_SHIFT	3
+#define  TX_CRC_CORRUPT_CTRL_PROG_TX_CRC_MASK	0x7fffffff8
+
+/*
+ * When set, the computed CRC is replaced with PROG_TX_CRC, else computed
+ * CRC is inverted
+*/
+#define  TX_CRC_CORRUPT_CTRL_TX_CRC_CORRUPTION_MODE_MASK	0x4
+
+/*
+ * When set, MAC enables the CRC corruption on the transmitted packets.
+ * Mode of corruption is determined by TX_CRC_CORRUPTION_MODE
+*/
+#define  TX_CRC_CORRUPT_CTRL_TX_CRC_CORRUPT_EN_MASK	0x2
+
+/*
+ * When set, this bit causes packets with TXERR to corrupt the CRC of the
+ * packet when it is transmitted.
+ * When reset, packets with TXERR are transmitted with /E/ termination
+ * character (/T/ is not enforced); packet CRC is unaffected
+*/
+#define  TX_CRC_CORRUPT_CTRL_TX_ERR_CORRUPTS_CRC_MASK	0x1
+
+
+/*
+ * Register <Transmit E2EFC/E2ECC controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * Transmit E2EFC/E2ECC control register
+ */
+#define XPORT_XLMAC_CORE_E2E_CTRL_REG	0x118
+
+/*
+ * When set, dual modid is enabled for E2EFC (Only 32 ports IBP is sent
+ * out).
+ * When reset, single modid is enabled for E2EFC (64 ports IBP is sent)
+*/
+#define  E2E_CTRL_E2EFC_DUAL_MODID_EN_MASK	0x10
+
+/*
+ * When set, legacy E2ECC stage2 loading enabled (single stage2 buffer for
+ * all ports).
+ * When reset, new E2ECC stage2 loading enabled (per port stage2 buffer)
+*/
+#define  E2E_CTRL_E2ECC_LEGACY_IMP_EN_MASK	0x8
+
+/*
+ * When set, dual modid is enabled for E2ECC.
+ * When reset, single modid is enabled for E2ECC
+*/
+#define  E2E_CTRL_E2ECC_DUAL_MODID_EN_MASK	0x4
+
+/*
+ * When set, E2ECC/FC frames are not transmitted during pause state.
+ * When reset, E2ECC/FC frames are transmitted even during pause state
+ * similar to other flow control frames.
+*/
+#define  E2E_CTRL_HONOR_PAUSE_FOR_E2E_MASK	0x2
+
+/* When set, MAC enables E2EFC/E2ECC frame generation and transmission. */
+#define  E2E_CTRL_E2E_ENABLE_MASK	0x1
+
+
+/*
+ * Register <E2ECC module header- MS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2ECC module header register - MS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2ECC_MODULE_HDR_0) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2ECC_MODULE_HDR_0_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2ECC_MODULE_HDR_0_REG	0x120
+
+/*
+ * In HiGig2 mode, this register contains bits 127:
+ * 64 of 16-byte HiGig2 header.
+ * In HiGig+ mode, this register contains bits 95:
+ * 32 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * E2ECC frames in the transmit direction.
+*/
+#define  E2ECC_MODULE_HDR_0_E2ECC_MODULE_HDR_0_SHIFT	0
+#define  E2ECC_MODULE_HDR_0_E2ECC_MODULE_HDR_0_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2ECC module header- LS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2ECC module header register - LS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2ECC_MODULE_HDR_1) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2ECC_MODULE_HDR_1_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2ECC_MODULE_HDR_1_REG	0x128
+
+/*
+ * In HiGig2 mode, this register contains bits 63:
+ * 0 of 16-byte HiGig2 header.
+ * In HiGig+ mode, bits 63:
+ * 32 of this register contains bits 31:
+ * 0 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * E2ECC frames in the transmit direction.
+*/
+#define  E2ECC_MODULE_HDR_1_E2ECC_MODULE_HDR_1_SHIFT	0
+#define  E2ECC_MODULE_HDR_1_E2ECC_MODULE_HDR_1_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2ECC Ethernet header- MS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2ECC Ethernet header register - MS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2ECC_DATA_HDR_0) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2ECC_DATA_HDR_0_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2ECC_DATA_HDR_0_REG	0x130
+
+/*
+ * This register contains bits 127:
+ * 64 of 16-byte IEEE header (DA + SA + Length/Type + Opcode).
+ * This field is used for constructing the Ethernet header for
+ * HiGig2/HiGig+ E2ECC frames in the transmit direction.
+*/
+#define  E2ECC_DATA_HDR_0_E2ECC_DATA_HDR_0_SHIFT	0
+#define  E2ECC_DATA_HDR_0_E2ECC_DATA_HDR_0_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2ECC Ethernet header- LS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2ECC Ethernet header register - LS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2ECC_DATA_HDR_1) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2ECC_DATA_HDR_1_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2ECC_DATA_HDR_1_REG	0x138
+
+/*
+ * This register contains bits 63:
+ * 0 of 16-byte IEEE header (DA + SA + Length/Type + Opcode).
+ * This field is used for constructing the Ethernet header for
+ * HiGig2/HiGig+ E2ECC frames in the transmit direction.
+*/
+#define  E2ECC_DATA_HDR_1_E2ECC_DATA_HDR_1_SHIFT	0
+#define  E2ECC_DATA_HDR_1_E2ECC_DATA_HDR_1_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2EFC module header- MS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2EFC module header register - MS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2EFC_MODULE_HDR_0) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2EFC_MODULE_HDR_0_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2EFC_MODULE_HDR_0_REG	0x140
+
+/*
+ * In HiGig2 mode, this register contains bits 127:
+ * 64 of 16-byte HiGig2 header.
+ * In HiGig+ mode, this register contains bits 95:
+ * 32 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * E2EFC frames in the transmit direction.
+*/
+#define  E2EFC_MODULE_HDR_0_E2EFC_MODULE_HDR_0_SHIFT	0
+#define  E2EFC_MODULE_HDR_0_E2EFC_MODULE_HDR_0_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2EFC module header- LS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2EFC module header register - LS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2EFC_MODULE_HDR_1) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2EFC_MODULE_HDR_1_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2EFC_MODULE_HDR_1_REG	0x148
+
+/*
+ * In HiGig2 mode, this register contains bits 63:
+ * 0 of 16-byte HiGig2 header.
+ * In HiGig+ mode, bits 63:
+ * 32 of this register contains bits 31:
+ * 0 of 12-byte HiGig+ header.
+ * This field is used for constructing the module header for HiGig2/HiGig+
+ * E2EFC frames in the transmit direction.
+*/
+#define  E2EFC_MODULE_HDR_1_E2EFC_MODULE_HDR_1_SHIFT	0
+#define  E2EFC_MODULE_HDR_1_E2EFC_MODULE_HDR_1_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2EFC Ethernet header- MS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2EFC Ethernet header register - MS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2EFC_DATA_HDR_0) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2EFC_DATA_HDR_0_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2EFC_DATA_HDR_0_REG	0x150
+
+/*
+ * This register contains bits 127:
+ * 64 of 16-byte IEEE header (DA + SA + Length/Type + Opcode).
+ * This field is used for constructing the Ethernet header for
+ * HiGig2/HiGig+ E2EFC frames in the transmit direction.
+*/
+#define  E2EFC_DATA_HDR_0_E2EFC_DATA_HDR_0_SHIFT	0
+#define  E2EFC_DATA_HDR_0_E2EFC_DATA_HDR_0_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <E2EFC Ethernet header- LS bytes for XLMAC0/port0 (LPORT port0)>
+ *
+ * E2EFC Ethernet header register - LS bytes This XLMAC core register is 64
+ * bits wide in hardware.
+ * LPORT register reads and writes are however 32 bits per transaction.
+ * When reading from this address, higher XLMAC register bits are copied to
+ * XLMAC0 32-bit Direct Access Data Read Register, and can subsequently be
+ * obtained by reading from that register.
+ * Similarly, when writing to this address, higher XLMAC register bits are
+ * taken from XLMAC0 32-bit Direct Access Data Write Register, so it is
+ * important to ensure proper value for the higher bits is present in that
+ * register prior to writing to this (.
+ * ..
+ * XLMAC_E2EFC_DATA_HDR_1) register.
+ * Alternatively, Indirect Access mechanism can be used to access XLMAC
+ * core registers (see XLMACx Indirect Access registers).
+ * NOTE:
+ * THIS REGISTER HAS AN ALTERNATIVE/OVERLAY FIELD LAYOUT VIEW - see
+ * Type_XLMAC_E2EFC_DATA_HDR_1_OVERLAY in lport_xlmac_core_regtypes.
+ * rdb
+ */
+#define XPORT_XLMAC_CORE_E2EFC_DATA_HDR_1_REG	0x158
+
+/*
+ * This register contains bits 63:
+ * 0 of 16-byte IEEE header (DA + SA + Length/Type + Opcode).
+ * This field is used for constructing the Ethernet header for
+ * HiGig2/HiGig+ E2EFC frames in the transmit direction.
+*/
+#define  E2EFC_DATA_HDR_1_E2EFC_DATA_HDR_1_SHIFT	0
+#define  E2EFC_DATA_HDR_1_E2EFC_DATA_HDR_1_MASK	0xffffffffffffffff
+
+
+/*
+ * Register <XLMAC TX FIFO Cell Countfor XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * XLMAC TX FIFO Cell Count register
+ */
+#define XPORT_XLMAC_CORE_TXFIFO_CELL_CNT_REG	0x160
+
+/*
+ * Number of cell counts in XLMAC TX FIFO.
+ * Should range from 0 to 32 for XLMAC core in single port mode, or 0 to 16
+ * if XLMAC core is in dual port mode, or 0 to 8 if XLMAC core is in quad
+ * port mode during traffic.
+ * This should reset to 0 after the traffic has stopped for all port modes.
+*/
+#define  TXFIFO_CELL_CNT_CELL_CNT_SHIFT	0
+#define  TXFIFO_CELL_CNT_CELL_CNT_MASK	0x3f
+
+
+/*
+ * Register <XLMAC TX FIFO Cell Request Countfor XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * XLMAC TX FIFO Cell Request Count Register
+ */
+#define XPORT_XLMAC_CORE_TXFIFO_CELL_REQ_CNT_REG	0x168
+
+/*
+ * Number of cell requests made to Egress Pipeline.
+ * Should range from 0 to 32 for XLMAC core in single port mode, or 0 to 16
+ * if XLMAC core is in dual port mode, or 0 to 8 if XLMAC core is in quad
+ * port mode during traffic.
+ * This should saturate at the maximum value for the corresponding port
+ * mode after traffic has stopped.
+*/
+#define  TXFIFO_CELL_REQ_CNT_REQ_CNT_SHIFT	0
+#define  TXFIFO_CELL_REQ_CNT_REQ_CNT_MASK	0x3f
+
+
+/*
+ * Register <Memory Controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * Memory Control register
+ */
+#define XPORT_XLMAC_CORE_MEM_CTRL_REG	0x170
+
+/* Test mode configuration of Tx CDC Memory */
+#define  MEM_CTRL_TX_CDC_MEM_CTRL_TM_SHIFT	12
+#define  MEM_CTRL_TX_CDC_MEM_CTRL_TM_MASK	0xfff000
+
+/* Test mode configuration of Rx CDC Memory. */
+#define  MEM_CTRL_RX_CDC_MEM_CTRL_TM_SHIFT	0
+#define  MEM_CTRL_RX_CDC_MEM_CTRL_TM_MASK	0xfff
+
+
+/*
+ * Register <XLMAC memories ECC controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * XLMAC memories ECC control register
+ */
+#define XPORT_XLMAC_CORE_ECC_CTRL_REG	0x178
+
+/* When set, MAC enables Tx CDC memory ECC logic */
+#define  ECC_CTRL_TX_CDC_ECC_CTRL_EN_MASK	0x2
+
+/* When set, MAC enables Rx CDC memory ECC logic */
+#define  ECC_CTRL_RX_CDC_ECC_CTRL_EN_MASK	0x1
+
+
+/*
+ * Register <XLMAC memories double bit error controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * XLMAC memories double bit error control register
+ */
+#define XPORT_XLMAC_CORE_ECC_FORCE_DOUBLE_BIT_ERR_REG	0x180
+
+/*
+ * Tx CDC memory force double bit error enable.
+ * The LSB 2 bits will be inverted at the next memory read.
+ * This should never be asserted simultaneously with
+ * TX_CDC_FORCE_SINGLE_BIT_ERR.
+ * In order to inject double bit error again, this bit needs to be written
+ * to 0 before being re-written to 1.
+*/
+#define  ECC_FORCE_DOUBLE_BIT_ERR_TX_CDC_FORCE_DOUBLE_BIT_ERR_MASK	0x2
+
+/*
+ * Rx CDC memory force double bit error enable.
+ * The LSB 2 bits will be inverted at the next memory read.
+ * This should never be asserted simultaneously with force
+ * RX_CDC_FORCE_SINGLE_BIT_ERR.
+ * In order to inject double bit error again, this bit needs to be written
+ * to 0 before being re-written to 1.
+*/
+#define  ECC_FORCE_DOUBLE_BIT_ERR_RX_CDC_FORCE_DOUBLE_BIT_ERR_MASK	0x1
+
+
+/*
+ * Register <XLMAC memories single bit error controlfor XLMAC0/port0 (LPORT port0)>
+ *
+ * XLMAC memories single bit error control register
+ */
+#define XPORT_XLMAC_CORE_ECC_FORCE_SINGLE_BIT_ERR_REG	0x188
+
+/*
+ * Tx CDC memory force single bit error enable.
+ * The LSB 1 bit will be inverted at the next memory read.
+ * This should never be asserted simultaneously with
+ * TX_CDC_FORCE_DOUBLE_BIT_ERR.
+ * In order to inject single bit error again, this bit needs to be written
+ * to 0 before being re-written to 1.
+*/
+#define  ECC_FORCE_SINGLE_BIT_ERR_TX_CDC_FORCE_SINGLE_BIT_ERR_MASK	0x2
+
+/*
+ * Rx CDC memory force single bit error enable.
+ * The LSB 1 bit will be inverted at the next memory read.
+ * This should never be asserted simultaneously with
+ * RX_CDC_FORCE_DOUBLE_BIT_ERR.
+ * In order to inject single bit error again, this bit needs to be written
+ * to 0 before being re-written to 1.
+*/
+#define  ECC_FORCE_SINGLE_BIT_ERR_RX_CDC_FORCE_SINGLE_BIT_ERR_MASK	0x1
+
+
+/*
+ * Register <Rx CDC memory ECC statusThese bits are sticky by nature, and can be cleared by writing to the clear register. for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Rx CDC memory ECC status register.
+ * These bits are sticky by nature, and can be cleared by writing to the
+ * clear register.
+ */
+#define XPORT_XLMAC_CORE_RX_CDC_ECC_STATUS_REG	0x190
+
+/*
+ * This status bit indicates a double bit error occurred in the Rx CDC
+ * memory
+*/
+#define  RX_CDC_ECC_STATUS_RX_CDC_DOUBLE_BIT_ERR_MASK	0x2
+
+/*
+ * This status bit indicates a single bit error occurred in the Rx CDC
+ * memory
+*/
+#define  RX_CDC_ECC_STATUS_RX_CDC_SINGLE_BIT_ERR_MASK	0x1
+
+
+/*
+ * Register <Tx CDC memory ECC statusThese bits are sticky by nature, and can be cleared by writing to the clear register. for XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Tx CDC memory ECC status register.
+ * These bits are sticky by nature, and can be cleared by writing to the
+ * clear register.
+ */
+#define XPORT_XLMAC_CORE_TX_CDC_ECC_STATUS_REG	0x198
+
+/*
+ * This status bit indicates a double bit error occurred in the Tx CDC
+ * memory
+*/
+#define  TX_CDC_ECC_STATUS_TX_CDC_DOUBLE_BIT_ERR_MASK	0x2
+
+/*
+ * This status bit indicates a single bit error occurred in the Tx CDC
+ * memory
+*/
+#define  TX_CDC_ECC_STATUS_TX_CDC_SINGLE_BIT_ERR_MASK	0x1
+
+
+/*
+ * Register <Clear ECC status, used to reset the sticky status bits for XLMAC0/port0 (LPORT port0)>
+ *
+ * Clear ECC status register, used to reset the sticky status bits
+ */
+#define XPORT_XLMAC_CORE_CLEAR_ECC_STATUS_REG	0x1a0
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_CDC_DOUBLE_BIT_ERR status bit
+*/
+#define  CLEAR_ECC_STATUS_CLEAR_TX_CDC_DOUBLE_BIT_ERR_MASK	0x8
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * TX_CDC_SINGLE_BIT_ERR status bit
+*/
+#define  CLEAR_ECC_STATUS_CLEAR_TX_CDC_SINGLE_BIT_ERR_MASK	0x4
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * RX_CDC_DOUBLE_BIT_ERR status bit
+*/
+#define  CLEAR_ECC_STATUS_CLEAR_RX_CDC_DOUBLE_BIT_ERR_MASK	0x2
+
+/*
+ * A rising edge on this register bit (0->1), clears the sticky
+ * RX_CDC_SINGLE_BIT_ERR status bit
+*/
+#define  CLEAR_ECC_STATUS_CLEAR_RX_CDC_SINGLE_BIT_ERR_MASK	0x1
+
+
+/*
+ * Register <XLMAC interrupt statusfor XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * XLMAC interrupt status register
+ */
+#define XPORT_XLMAC_CORE_INTR_STATUS_REG	0x1a8
+
+/* Active high qualifier for the TimeStamp & SEQUENCE_ID fields. */
+#define  INTR_STATUS_SUM_TS_ENTRY_VALID_MASK	0x2000
+
+/*
+ * True when link interruption state is detected as per RS layer state
+ * machine.
+ * Sticky bit is cleared by CLEAR_LINK_INTERRUPTION_STATUS.
+*/
+#define  INTR_STATUS_SUM_LINK_INTERRUPTION_STATUS_MASK	0x1000
+
+/*
+ * True when remote fault state is detected as per RS layer state machine.
+ * Sticky bit is cleared by CLEAR_REMOTE_FAULT_STATUS.
+*/
+#define  INTR_STATUS_SUM_REMOTE_FAULT_STATUS_MASK	0x800
+
+/*
+ * True when local fault state is detected as per RS layer state machine.
+ * Sticky bit is cleared by CLEAR_LOCAL_FAULT_STATUS
+*/
+#define  INTR_STATUS_SUM_LOCAL_FAULT_STATUS_MASK	0x400
+
+/*
+ * This status bit indicates a double bit error occurred in the Rx CDC
+ * memory
+*/
+#define  INTR_STATUS_SUM_RX_CDC_DOUBLE_BIT_ERR_MASK	0x200
+
+/*
+ * This status bit indicates a single bit error occurred in the Rx CDC
+ * memory
+*/
+#define  INTR_STATUS_SUM_RX_CDC_SINGLE_BIT_ERR_MASK	0x100
+
+/*
+ * This status bit indicates a double bit error occurred in the Tx CDC
+ * memory
+*/
+#define  INTR_STATUS_SUM_TX_CDC_DOUBLE_BIT_ERR_MASK	0x80
+
+/*
+ * This status bit indicates a single bit error occurred in the Tx CDC
+ * memory
+*/
+#define  INTR_STATUS_SUM_TX_CDC_SINGLE_BIT_ERR_MASK	0x40
+
+/* If set, indicates rx message fifo overflow */
+#define  INTR_STATUS_SUM_RX_MSG_OVERFLOW_MASK	0x20
+
+/* If set, indicates RX packet fifo overflow */
+#define  INTR_STATUS_SUM_RX_PKT_OVERFLOW_MASK	0x10
+
+/* If set, indicates overflow occurred in TX two-step Time Stamp FIFO */
+#define  INTR_STATUS_SUM_TX_TS_FIFO_OVERFLOW_MASK	0x8
+
+/* If set, indicates TX LLFC message fifo overflow */
+#define  INTR_STATUS_SUM_TX_LLFC_MSG_OVERFLOW_MASK	0x4
+
+/* If set, indicates tx packet fifo overflow */
+#define  INTR_STATUS_SUM_TX_PKT_OVERFLOW_MASK	0x2
+
+/* If set, indicates tx packet fifo underflow */
+#define  INTR_STATUS_SUM_TX_PKT_UNDERFLOW_MASK	0x1
+
+
+/*
+ * Register <XLMAC interrupt enablefor XLMAC0/port0 (LPORT port0)>
+ *
+ * XLMAC interrupt enable register
+ */
+#define XPORT_XLMAC_CORE_INTR_ENABLE_REG	0x1b0
+
+/* If set, SUM_TS_ENTRY_VALID can set mac interrupt. */
+#define  INTR_ENABLE_EN_TS_ENTRY_VALID_MASK	0x2000
+
+/* If set, SUM_LINK_INTERRUPTION_STATUS can set mac interrupt. */
+#define  INTR_ENABLE_EN_LINK_INTERRUPTION_STATUS_MASK	0x1000
+
+/* If set, SUM_REMOTE_FAULT_STATUS can set mac interrupt. */
+#define  INTR_ENABLE_EN_REMOTE_FAULT_STATUS_MASK	0x800
+
+/* If set, SUM_LOCAL_FAULT_STATUS can set mac interrupt. */
+#define  INTR_ENABLE_EN_LOCAL_FAULT_STATUS_MASK	0x400
+
+/* If set, SUM_RX_CDC_DOUBLE_BIT_ERR can set mac interrupt. */
+#define  INTR_ENABLE_EN_RX_CDC_DOUBLE_BIT_ERR_MASK	0x200
+
+/* If set, SUM_RX_CDC_SINGLE_BIT_ERR can set mac interrupt. */
+#define  INTR_ENABLE_EN_RX_CDC_SINGLE_BIT_ERR_MASK	0x100
+
+/* If set, SUM_TX_CDC_DOUBLE_BIT_ERR can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_CDC_DOUBLE_BIT_ERR_MASK	0x80
+
+/* If set, SUM_TX_CDC_SINGLE_BIT_ERR can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_CDC_SINGLE_BIT_ERR_MASK	0x40
+
+/* If set, SUM_RX_MSG_OVERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_RX_MSG_OVERFLOW_MASK	0x20
+
+/* If set, SUM_RX_PKT_OVERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_RX_PKT_OVERFLOW_MASK	0x10
+
+/* If set, SUM_TX_TS_FIFO_OVERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_TS_FIFO_OVERFLOW_MASK	0x8
+
+/* If set, SUM_TX_LLFC_MSG_OVERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_LLFC_MSG_OVERFLOW_MASK	0x4
+
+/* If set, SUM_TX_PKT_OVERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_PKT_OVERFLOW_MASK	0x2
+
+/* If set, SUM_TX_PKT_UNDERFLOW can set mac interrupt. */
+#define  INTR_ENABLE_EN_TX_PKT_UNDERFLOW_MASK	0x1
+
+
+/*
+ * Register <Version IDfor XLMAC0/port0 (LPORT port0)> - read-only
+ *
+ * Version ID register
+ */
+#define XPORT_XLMAC_CORE_VERSION_ID_REG	0x1b8
+
+/* XLMAC IP Version ID - corresponds to RTL/DV label */
+#define  VERSION_ID_XLMAC_VERSION_SHIFT	0
+#define  VERSION_ID_XLMAC_VERSION_MASK	0xffff
+
+
+#endif /* ! WAN_TOPXPORT_XLMAC_CORE_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_xlmac_reg.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_xlmac_reg.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./enet/regs/xport_xlmac_reg.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/enet/regs/xport_xlmac_reg.h	2025-09-25 17:40:33.559357269 +0200
@@ -0,0 +1,429 @@
+#ifndef WAN_TOPXPORT_XLMAC_REG_H_
+#define WAN_TOPXPORT_XLMAC_REG_H_
+
+/* relative to core */
+#define XPORT_XLMAC_REG_OFFSET_0	0x3000
+
+/*
+ * Register <XLMAC 32-bit Direct Access Data Write>
+ *
+ */
+#define XPORT_XLMAC_REG_DIR_ACC_DATA_WRITE_REG	0x0
+
+/*
+ * Direct register access data write register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_XLMAC_DIR_ACC_DATA_WRITE_REG_WRITE_DATA_SHIFT	0
+#define  XPORT_XLMAC_DIR_ACC_DATA_WRITE_REG_WRITE_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC 32-bit Direct Access Data Read>
+ *
+ */
+#define XPORT_XLMAC_REG_DIR_ACC_DATA_READ_REG	0x4
+
+/*
+ * Direct register access data read register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_XLMAC_DIR_ACC_DATA_READ_REG_READ_DATA_SHIFT	0
+#define  XPORT_XLMAC_DIR_ACC_DATA_READ_REG_READ_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC Indirect Access Address>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_ADDR_0_REG	0x8
+
+/*
+ * Transaction Status.
+ * When transaction completes (START_BUSY = 0 after it was set to 1) and
+ * this bit is set it indicates that register transaction completed with
+ * error.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_ERR_MASK	0x1000
+
+/*
+ * START_BUSY, Self-clearing.
+ * CPU writes this bit to 1 in order to initiate indirect register
+ * read/write transaction.
+ * When transaction completes hardware clears this bit.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_START_BUSY_MASK	0x800
+
+/*
+ * Register transaction:
+ * 0 :
+ * Register Write.
+ * '1 :
+ * Register Read.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_R_W_MASK	0x400
+
+/* Register Port ID. */
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_REG_PORT_ID_SHIFT	8
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_REG_PORT_ID_MASK	0x300
+
+/*
+ * Register offset.
+ * Note:
+ * Bit 7 is ignored by HW.
+ * Write it as 0.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_REG_OFFSET_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_0_REG_REG_OFFSET_MASK	0xff
+
+
+/*
+ * Register <XLMAC Indirect Access Data Low>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_DATA_LOW_0_REG	0xc
+
+/*
+ * Indirect register access data register, bits [31:
+ * 0].
+*/
+#define  XPORT_XLMAC_INDIR_ACC_DATA_LOW_0_REG_DATA_LOW_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_DATA_LOW_0_REG_DATA_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC Indirect Access Data High>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_DATA_HIGH_0_REG	0x10
+
+/*
+ * Indirect register access data register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_DATA_HIGH_0_REG_DATA_HIGH_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_DATA_HIGH_0_REG_DATA_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC Indirect Access Address>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_ADDR_1_REG	0x14
+
+/*
+ * Transaction Status.
+ * When transaction completes (START_BUSY = 0 after it was set to 1) and
+ * this bit is set it indicates that register transaction completed with
+ * error.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_ERR_MASK	0x1000
+
+/*
+ * START_BUSY, Self-clearing.
+ * CPU writes this bit to 1 in order to initiate indirect register
+ * read/write transaction.
+ * When transaction completes hardware clears this bit.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_START_BUSY_MASK	0x800
+
+/*
+ * Register transaction:
+ * 0 :
+ * Register Write.
+ * '1 :
+ * Register Read.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_R_W_MASK	0x400
+
+/* Register Port ID. */
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_REG_PORT_ID_SHIFT	8
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_REG_PORT_ID_MASK	0x300
+
+/*
+ * Register offset.
+ * Note:
+ * Bit 7 is ignored by HW.
+ * Write it as 0.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_REG_OFFSET_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_ADDR_1_REG_REG_OFFSET_MASK	0xff
+
+
+/*
+ * Register <XLMAC Indirect Access Data Low>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_DATA_LOW_1_REG	0x18
+
+/*
+ * Indirect register access data register, bits [31:
+ * 0].
+*/
+#define  XPORT_XLMAC_INDIR_ACC_DATA_LOW_1_REG_DATA_LOW_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_DATA_LOW_1_REG_DATA_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC Indirect Access Data High>
+ *
+ */
+#define XPORT_XLMAC_REG_INDIR_ACC_DATA_HIGH_1_REG	0x1c
+
+/*
+ * Indirect register access data register, bits [63:
+ * 32].
+ * Used only for 64-bit register accesses.
+*/
+#define  XPORT_XLMAC_INDIR_ACC_DATA_HIGH_1_REG_DATA_HIGH_SHIFT	0
+#define  XPORT_XLMAC_INDIR_ACC_DATA_HIGH_1_REG_DATA_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <XLMAC Configure>
+ *
+ */
+#define XPORT_XLMAC_REG_CONFIG_REG	0x20
+
+/* Active high XLMAC hard reset. */
+#define  XPORT_XLMAC_CONFIG_REG_XLMAC_RESET_MASK	0x200
+
+/*
+ * When set, Rx CDC FIFO read TDM order has same port for 2 consecutive
+ * cycles.
+ * This is a strap input for the MAC core and should be changed only while
+ * hard reset is asserted.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_RX_DUAL_CYCLE_TDM_EN_MASK	0x100
+
+/*
+ * When set, RX CDC FIFO read TDM generation order for quad mode is
+ * 0,2,1,3.
+ * Otherwise, it is 0,1,2,3.
+ * This is a strap input for the MAC core and should be changed only while
+ * hard reset is asserted.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_RX_NON_LINEAR_QUAD_TDM_EN_MASK	0x80
+
+/*
+ * Enables non-linear TDM generation on the receive system interface, based
+ * on data availability in Rx FIFOs.
+ * 0 :
+ * Flex TDM Enabled.
+ * 1 :
+ * Flex TDM Disabled.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_RX_FLEX_TDM_ENABLE_MASK	0x40
+
+/*
+ * Number of ports supported by XLMAC.
+ * 000 :
+ * Quad Port.
+ * All ports are used.
+ * 001 :
+ * Tri-Port.
+ * Ports 0, 1 and 2 are used.
+ * 010 :
+ * Tri-Port.
+ * Ports 0, 2 and 3 are used.
+ * 011 :
+ * Dual Port.
+ * Port 0 and 2 are used.
+ * 1xx :
+ * Single Port.
+ * Port 0 is used.
+ * Note:
+ * Valid combinations for 63158 are single Port (P0 active) or Quad Port
+ * (P0 and/or P1 active).
+*/
+#define  XPORT_XLMAC_CONFIG_REG_MAC_MODE_SHIFT	3
+#define  XPORT_XLMAC_CONFIG_REG_MAC_MODE_MASK	0x38
+
+/*
+ * OSTS time-stamping disable.
+ * 0 :
+ * OSTS Enabled.
+ * 1 :
+ * OSTS Disabled.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_OSTS_TIMER_DISABLE_MASK	0x4
+
+/*
+ * Bypasses transmit OSTS functionality.
+ * When set, reduces Tx path latency.
+ * 0 :
+ * Do not bypass transmit OSTS function.
+ * 1 :
+ * Bypass transmit OSTS function.
+ * XLMAC must be reset for this bit to take effect.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_BYPASS_OSTS_MASK	0x2
+
+/*
+ * 1588 Egress Time-stamping mode.
+ * 0 :
+ * Legacy, sign extended 32-bit timestamp mode.
+ * 1 :
+ * 48-bit timestamp mode.
+ * XLMAC must be reset for this bit to take effect.
+*/
+#define  XPORT_XLMAC_CONFIG_REG_EGR_1588_TIMESTAMPING_MODE_MASK	0x1
+
+
+/*
+ * Register <XLMAC Interrupt Check>
+ *
+ */
+#define XPORT_XLMAC_REG_INTERRUPT_CHECK_REG	0x24
+
+/*
+ * Each bit of this field corresponds to one XLMAC port.
+ * SW should write 1 to the corresponding bit(s) of this field any time
+ * XLMAC interrupt is in use and all events obtained by reading XLMAC
+ * status register are serviced and corresponding statuses cleared.
+ * Prevents XLMAC interrupt race condition.
+*/
+#define  XPORT_XLMAC_INTERRUPT_CHECK_REG_XLMAC_INTR_CHECK_SHIFT	0
+#define  XPORT_XLMAC_INTERRUPT_CHECK_REG_XLMAC_INTR_CHECK_MASK	0xf
+
+
+/*
+ * Register <XLMAC Port 3 RXERR Mask>
+ *
+ */
+#define XPORT_XLMAC_REG_PORT_0_RXERR_MASK_REG	0x28
+
+/*
+ * The RXERR will be set if both the mask bit & the corresponding
+ * statistics bit in RSV[37:
+ * 16] are set.
+ * RSV[23] which indicates good packet received is excluded from generating
+ * RXERR.
+*/
+#define  XPORT_XLMAC_PORT_0_RXERR_MASK_REG_RSV_ERR_MASK_SHIFT	0
+#define  XPORT_XLMAC_PORT_0_RXERR_MASK_REG_RSV_ERR_MASK_MASK	0x3fffff
+
+
+/*
+ * Register <XLMAC Port 3 RXERR Mask>
+ *
+ */
+#define XPORT_XLMAC_REG_PORT_1_RXERR_MASK_REG	0x2c
+
+/*
+ * The RXERR will be set if both the mask bit & the corresponding
+ * statistics bit in RSV[37:
+ * 16] are set.
+ * RSV[23] which indicates good packet received is excluded from generating
+ * RXERR.
+*/
+#define  XPORT_XLMAC_PORT_1_RXERR_MASK_REG_RSV_ERR_MASK_SHIFT	0
+#define  XPORT_XLMAC_PORT_1_RXERR_MASK_REG_RSV_ERR_MASK_MASK	0x3fffff
+
+
+/*
+ * Register <XLMAC Port 3 RXERR Mask>
+ *
+ */
+#define XPORT_XLMAC_REG_PORT_2_RXERR_MASK_REG	0x30
+
+/*
+ * The RXERR will be set if both the mask bit & the corresponding
+ * statistics bit in RSV[37:
+ * 16] are set.
+ * RSV[23] which indicates good packet received is excluded from generating
+ * RXERR.
+*/
+#define  XPORT_XLMAC_PORT_2_RXERR_MASK_REG_RSV_ERR_MASK_SHIFT	0
+#define  XPORT_XLMAC_PORT_2_RXERR_MASK_REG_RSV_ERR_MASK_MASK	0x3fffff
+
+
+/*
+ * Register <XLMAC Port 3 RXERR Mask>
+ *
+ */
+#define XPORT_XLMAC_REG_PORT_3_RXERR_MASK_REG	0x34
+
+/*
+ * The RXERR will be set if both the mask bit & the corresponding
+ * statistics bit in RSV[37:
+ * 16] are set.
+ * RSV[23] which indicates good packet received is excluded from generating
+ * RXERR.
+*/
+#define  XPORT_XLMAC_PORT_3_RXERR_MASK_REG_RSV_ERR_MASK_SHIFT	0
+#define  XPORT_XLMAC_PORT_3_RXERR_MASK_REG_RSV_ERR_MASK_MASK	0x3fffff
+
+
+/*
+ * Register <XLMAC Remote Loopback Control>
+ *
+ */
+#define XPORT_XLMAC_REG_RMT_LPBK_CNTRL_REG	0x40
+
+/*
+ * Remote loopback logic starts reading packet data from the loopback FIFO
+ * only when at least READ_THRESHOLD entries are available in the FIFO.
+ * Used to prevent XLMAC TX underflow.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_READ_THRESHOLD_SHIFT	8
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_READ_THRESHOLD_MASK	0x700
+
+/*
+ * TX PORT_ID[1:
+ * 0].
+ * Valid only when TX_PORT_SEL = 1.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_PORT_ID_SHIFT	6
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_PORT_ID_MASK	0xc0
+
+/*
+ * When set TX PORT_ID[1:
+ * 0] comes from this registers.
+ * When cleared TX PORT_ID[1:
+ * 0] equals RX PORT_ID[1:
+ * 0].
+ * TX PORT_ID[1:
+ * 0] is used by remote loopback logic to monitor EP credits and to
+ * indicate outgoing XLMAC port.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_PORT_SEL_MASK	0x20
+
+/*
+ * When set RXERR is propagated to TXERR.
+ * When cleared TXERR = 0.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_RXERR_EN_MASK	0x10
+
+/* When set CRC is corrupted for the outgoing packet. */
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_CRC_ERR_MASK	0x8
+
+/*
+ * TX CRC Mode.
+ * Encoded as:
+ * 00 :
+ * CRC Append.
+ * 01 :
+ * CRC Forward.
+ * 10 :
+ * CRC Replace.
+ * 11 :
+ * Reserved.
+ * CRC Append mode should be enabled only if XLMAC is programmed to strip
+ * off CRC.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_CRC_MODE_SHIFT	1
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_TX_CRC_MODE_MASK	0x6
+
+/*
+ * When set enables XLMAC Remote (RX to TX) loopback.
+ * XLMAC must be kept in reset while remote loopback is being enabled and
+ * released from the reset thereafter.
+*/
+#define  XPORT_XLMAC_RMT_LPBK_CNTRL_REG_RMT_LOOPBACK_EN_MASK	0x1
+
+
+#endif /* ! WAN_TOPXPORT_XLMAC_REG_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/Makefile linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/Makefile	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,4 @@
+obj-$(CONFIG_BCM63158_SF2) 		+= bcm63158_sf2.o
+
+bcm63158_sf2-y 				+= sf2_main.o sf2_fdb.o
+bcm63158_sf2-$(CONFIG_DEBUG_FS) 	+= sf2_debug.o
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/leds_top_regs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/leds_top_regs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/leds_top_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/leds_top_regs.h	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,8 @@
+#ifndef LEDS_TOP_REGS_H_
+# define LEDS_TOP_REGS_H_
+
+#define LEDS_TOP_FLASH_RATE_REG(_x)	(0x10 + (_x) * 4)
+#define LEDS_TOP_BRIGHTNESS_REG(_x)	(0x20 + (_x) * 4)
+#define LEDS_TOP_POLARITY_REG		(0xc0)
+
+#endif /* !LEDS_TOP_REGS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/serdes_regs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/serdes_regs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/serdes_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/serdes_regs.h	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,7 @@
+
+#define BRCM_MIIEXT_BANK            0x1f
+# define BRCM_MIIEXT_BANK_MASK       0xfff0
+# define BRCM_MIIEXT_ADDR_RANGE      0xffe0
+# define BRCM_MIIEXT_DEF_BANK        0x8000
+#define BRCM_MIIEXT_OFFSET          0x10
+# define BRCM_MIIEXT_OFF_MASK    0xf
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_debug.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_debug.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_debug.c	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,336 @@
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include "sf2_priv.h"
+#include "sf2_regs.h"
+
+static struct dentry *dbg_root;
+
+/*
+ * queues stats functions
+ */
+static void *queues_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < SF2_PORT_COUNT) ? pos : NULL;
+}
+
+static void *queues_seq_next(struct seq_file *s,
+				       void __always_unused *v,
+				       loff_t *pos)
+{
+	return (++(*pos) < SF2_PORT_COUNT) ? pos : NULL;
+}
+
+static void queues_seq_stop(struct seq_file __always_unused *s,
+			       void __always_unused *v)
+{
+}
+
+static int queues_seq_show(struct seq_file *s, void *arg)
+{
+	struct bcm_sf2_priv *priv = s->private;
+	int i = *(loff_t *)arg;
+	size_t queue;
+	u32 cur[SF2_NUM_EGRESS_QUEUES], peak[SF2_NUM_EGRESS_QUEUES];
+	u32 v;
+
+	if (!i) {
+		v = sw_core_readl(priv, SF2_CORE_FC_TOTAL_PEAK_COUNT);
+		seq_printf(s, "%-20s: %u\n", "total peak", v);
+		v = sw_core_readl(priv, SF2_CORE_FC_TOTAL_USED_COUNT);
+		seq_printf(s, "%-20s: %u\n", "total used", v);
+		seq_printf(s, "%-20s:\n", "usage per port & queue (cur/peak)");
+	}
+
+	sw_core_writel(priv, i, SF2_CORE_FC_DIAG_CTRL);
+
+	/* clear latched peak values */
+	(void)sw_core_readl(priv, SF2_CORE_FC_PEAK_RX);
+	for (queue = 0; queue < SF2_NUM_EGRESS_QUEUES; queue++)
+		(void)sw_core_readl(priv, SF2_CORE_FC_QUEUE_PEAK_COUNT(queue));
+
+	/* give some time for peak values to adjust */
+	msleep(10);
+
+	for (queue = 0; queue < SF2_NUM_EGRESS_QUEUES; queue++)
+		cur[queue] = sw_core_readl(priv, SF2_CORE_FC_QUEUE_CUR_COUNT(queue));
+
+	for (queue = 0; queue < SF2_NUM_EGRESS_QUEUES; queue++) {
+		peak[queue] = sw_core_readw(priv, SF2_CORE_FC_QUEUE_PEAK_COUNT(queue));
+	}
+
+	seq_printf(s, "port[%u]: ", i);
+	for (queue = 0; queue < SF2_NUM_EGRESS_QUEUES; queue++) {
+		seq_printf(s, "%3d/%-3d ", cur[queue], peak[queue]);
+	}
+
+	v = sw_core_readw(priv, SF2_CORE_FC_PEAK_RX);
+	seq_printf(s, "[%3d]\n", v);
+	return 0;
+}
+
+/*
+ * dump acb state
+ */
+static void *acb_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos < SF2_PORT_COUNT - 1) ? pos : NULL;
+}
+
+static void *acb_seq_next(struct seq_file *s,
+			  void __always_unused *v,
+			  loff_t *pos)
+{
+	return (++(*pos) < SF2_PORT_COUNT - 1) ? pos : NULL;
+}
+
+static void acb_seq_stop(struct seq_file __always_unused *s,
+			       void __always_unused *v)
+{
+}
+
+static int acb_seq_show(struct seq_file *s, void *arg)
+{
+	struct bcm_sf2_priv *priv = s->private;
+	int i = *(loff_t *)arg;
+	size_t queue;
+
+	seq_printf(s, "port[%u]: ", i);
+	for (queue = 0; queue < SF2_NUM_EGRESS_QUEUES; queue++) {
+		u32 v;
+
+		v = sw_acb_readl(priv, SF2_ACB_QINFLIGHT_REG(i, queue));
+		seq_printf(s, "%3d ", v);
+	}
+	seq_printf(s, "\n");
+	return 0;
+}
+
+static const struct seq_operations queues_seq_ops = {
+	.start = queues_seq_start,
+	.next  = queues_seq_next,
+	.stop  = queues_seq_stop,
+	.show  = queues_seq_show,
+};
+
+static const struct seq_operations acb_seq_ops = {
+	.start = acb_seq_start,
+	.next  = acb_seq_next,
+	.stop  = acb_seq_stop,
+	.show  = acb_seq_show,
+};
+
+static int queues_open(struct inode *inode, struct file *filep)
+{
+	struct reg_dump_priv *rpriv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &queues_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = rpriv;
+	return 0;
+}
+
+static int acb_open(struct inode *inode, struct file *filep)
+{
+	struct reg_dump_priv *rpriv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &acb_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = rpriv;
+	return 0;
+}
+
+static const struct file_operations queues_fops = {
+	.owner   = THIS_MODULE,
+	.open    = queues_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+static const struct file_operations acb_fops = {
+	.owner   = THIS_MODULE,
+	.open    = acb_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ * Dump ARL state
+ */
+
+/* Show ARL table */
+static void *arl_show_seq_start(struct seq_file *s, loff_t *pos)
+{
+	return (*pos == 0) ? pos : NULL;
+}
+
+static void *arl_show_seq_next(struct seq_file *s,
+			  void __always_unused *v,
+			  loff_t *pos)
+{
+	return NULL;
+}
+
+static void arl_show_seq_stop(struct seq_file __always_unused *s,
+			       void __always_unused *v)
+{
+}
+
+static int arl_dump_entry(struct bcm_sf2_priv *priv,
+			  const struct sf2_arl_entry *ent,
+			  void *data)
+{
+	struct seq_file *s = (struct seq_file *)data;
+	seq_printf(s, "Mac %pM Vid: %u Port: %u Valid: %u Age: %u Static %u\n",
+		   ent->mac, ent->vid, ent->port, ent->is_valid,
+		   ent->is_age, ent->is_static);
+	return 0;
+}
+
+static int arl_show_seq_show(struct seq_file *s, void *arg)
+{
+	struct bcm_sf2_priv *priv = s->private;
+
+	seq_printf(s, "ACL: \n");
+	sf2_arl_for_each(priv, arl_dump_entry, (void *)s);
+	return 0;
+}
+
+static const struct seq_operations arl_show_seq_ops = {
+	.start = arl_show_seq_start,
+	.next  = arl_show_seq_next,
+	.stop  = arl_show_seq_stop,
+	.show  = arl_show_seq_show,
+};
+
+static int arl_show_open(struct inode *inode, struct file *filep)
+{
+	struct reg_dump_priv *rpriv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &arl_show_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = rpriv;
+	return 0;
+}
+
+static const struct file_operations arl_show_fops = {
+	.owner   = THIS_MODULE,
+	.open    = arl_show_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/* Clear ARL entry */
+#define ARL_CLEAR_FILE_SZ 32
+struct arl_clear_file {
+	struct bcm_sf2_priv *priv;
+	char buf[ARL_CLEAR_FILE_SZ];
+	struct mutex lock;
+};
+
+static ssize_t arl_clear_parse(struct file *file, const char __user *buf,
+			       size_t len, loff_t *ppos)
+{
+	struct arl_clear_file *fdata = file->private_data;
+	struct bcm_sf2_priv *priv = fdata->priv;
+	size_t size;
+	int ret;
+	u16 vid;
+	u8 mac[ETH_ALEN];
+
+	ret = mutex_lock_interruptible(&fdata->lock);
+	if (ret)
+		return ret;
+
+	ret = -EFAULT;
+	size = min(sizeof(fdata->buf) - 1, len);
+	if (copy_from_user(fdata->buf, buf, size))
+		goto out;
+
+	fdata->buf[size] = '\0';
+	ret = sscanf(fdata->buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hu",
+		     &mac[0], &mac[1], &mac[2], &mac[3], &mac[4], &mac[5],
+		     &vid);
+	if (ret != 7) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	dev_info(&priv->pdev->dev, "Clearing mac %pM vid %u\n", mac, vid);
+	sf2_arl_delete(priv, mac, vid);
+	ret = len;
+out:
+	mutex_unlock(&fdata->lock);
+	return ret;
+}
+
+static int arl_clear_open(struct inode *inode, struct file *filep)
+{
+	struct arl_clear_file *fdata;
+	struct bcm_sf2_priv *priv = inode->i_private;
+
+	fdata = devm_kzalloc(&priv->pdev->dev, sizeof(*fdata), GFP_KERNEL);
+	if (fdata == NULL)
+		return -ENOMEM;
+
+	fdata->priv = priv;
+	mutex_init(&fdata->lock);
+	filep->private_data = fdata;
+	return nonseekable_open(inode, filep);
+}
+
+static int arl_clear_release(struct inode *inode, struct file *file)
+{
+	struct bcm_sf2_priv *priv = inode->i_private;
+	devm_kfree(&priv->pdev->dev, file->private_data);
+	return 0;
+}
+
+static const struct file_operations arl_clear_fops = {
+	.owner		= THIS_MODULE,
+	.open		= arl_clear_open,
+	.write		= arl_clear_parse,
+	.llseek		= noop_llseek,
+	.release	= arl_clear_release,
+};
+
+/*
+ *
+ */
+void bcm_sf2_dbg_init(struct bcm_sf2_priv *priv)
+{
+	struct dentry *dir;
+	dbg_root = debugfs_create_dir("bcm63158_sf2", NULL);
+	if (IS_ERR_OR_NULL(dbg_root))
+		return;
+
+	debugfs_create_file("queues", 0400, dbg_root, priv, &queues_fops);
+	debugfs_create_file("acb", 0400, dbg_root, priv, &acb_fops);
+
+	dir = debugfs_create_dir("arl", dbg_root);
+	if (IS_ERR_OR_NULL(dir))
+		return;
+	debugfs_create_file("show", 0400, dir, priv, &arl_show_fops);
+	debugfs_create_file("clear", 0200, dir, priv, &arl_clear_fops);
+}
+
+/*
+ *
+ */
+void bcm_sf2_dbg_exit(void)
+{
+	if (!IS_ERR_OR_NULL(dbg_root))
+		debugfs_remove_recursive(dbg_root);
+	dbg_root = NULL;
+}
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_fdb.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_fdb.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_fdb.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_fdb.c	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,595 @@
+#include <linux/module.h>
+#include <linux/hashtable.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <net/dsa.h>
+
+#include "../../../net/bridge/br_private.h"
+#include "sf2_priv.h"
+#include "sf2_regs.h"
+
+#define SF2_FDB_POLL_DELAY_MS 1500 /* 1.5sec */
+
+struct sf2_fdb_entry {
+	struct net_device *port;
+	struct hlist_node hlist;
+	struct list_head next;
+	u16 vid;
+	u8 mac[ETH_ALEN];
+};
+
+/**
+ * Get FDB hashtable key from MAC address
+ *
+ * @mac: MAC address to compute the key from
+ *
+ * @return: The computed key
+ */
+static inline u64 sf2_mac_to_hkey(const u8 *mac)
+{
+	u64 key = 0;
+	ether_addr_copy((u8 *)&key, mac);
+	return key;
+}
+
+/**
+ * Get FDB hashtable key from ARL info
+ *
+ * @ent: ARL info to compute the key from
+ *
+ * @return: The computed key
+ */
+static inline u64 sf2_arl_entry_to_hkey(const struct sf2_arl_entry *ent)
+{
+	return sf2_mac_to_hkey(ent->mac);
+}
+
+/**
+ * Create a FDB entry from ARL info
+ *
+ * @priv: Common SD2 private object
+ * @ent: ARL info to create the FDB entry from
+ *
+ * @return: The newly created FDB entry on success, error pointer otherwise
+ */
+static struct sf2_fdb_entry *
+sf2_fdb_add_entry(struct bcm_sf2_priv *priv, const struct sf2_arl_entry *ent)
+{
+	struct sf2_fdb_entry *fe;
+	u64 key;
+
+	lockdep_assert_held(&priv->fdb.lock);
+
+	fe = devm_kmalloc(&priv->pdev->dev, sizeof(*fe), GFP_KERNEL);
+	if (!fe)
+		return ERR_PTR(-ENOMEM);
+	ether_addr_copy(fe->mac, ent->mac);
+	fe->vid = ent->vid;
+	key = sf2_arl_entry_to_hkey(ent);
+	fe->port = dsa_to_port(priv->ds, ent->port)->user;
+	hash_add(priv->fdb.arl_hash, &fe->hlist, key);
+	list_add(&fe->next, &priv->fdb.cache);
+	++priv->fdb.nr_entries;
+	return fe;
+}
+
+/**
+ * Cleanup a FDB entry
+ *
+ * @priv: Common SD2 private object
+ * @fe: FDB entry to clean
+ */
+static void sf2_fdb_del_entry(struct bcm_sf2_priv *priv,
+			      struct sf2_fdb_entry *fe)
+{
+	lockdep_assert_held(&priv->fdb.lock);
+
+	--priv->fdb.nr_entries;
+	hash_del(&fe->hlist);
+	list_del(&fe->next);
+	devm_kfree(&priv->pdev->dev, fe);
+}
+
+struct sf2_vlan_notify {
+	enum switchdev_notifier_type type;
+	struct switchdev_notifier_fdb_info *info;
+};
+
+/**
+ * Notify switchdev subsystem to add/remove FDB entry for a specific vlan
+ * subdevice.
+ *
+ * @dev: VLAN subdevice
+ * @vid: VLAN ID of the subdevice
+ * @arg: FDB information
+ */
+static int _sf2_fdb_notify_vlan(struct net_device *dev, int vid, void *arg)
+{
+	struct sf2_vlan_notify *vn = (struct sf2_vlan_notify *)arg;
+
+	call_switchdev_notifiers(vn->type, dev, &vn->info->info, NULL);
+	return 0;
+}
+
+/**
+ * Notify switchdev subsystem to add/remove FDB entry.
+ *
+ * @priv: Common SF2 private structure
+ * @fe: FDB entry information to add to the bridge
+ * @added: True if the FDB should be added, false otherwise
+ */
+static void sf2_fdb_notify(struct bcm_sf2_priv *priv,
+			   struct sf2_fdb_entry *fe, bool added)
+{
+	struct switchdev_notifier_fdb_info info = {
+		.addr = fe->mac,
+		.vid = fe->vid,
+		.offloaded = added,
+	};
+	struct sf2_vlan_notify vnotify = {
+		.info = &info,
+	};
+	enum switchdev_notifier_type type;
+
+	type = added ? SWITCHDEV_FDB_ADD_TO_BRIDGE :
+			SWITCHDEV_FDB_DEL_TO_BRIDGE;
+	call_switchdev_notifiers(type, fe->port, &info.info, NULL);
+	vnotify.type = type;
+
+	/*
+	 * XXX This is hackish, vlan are not supported yet, but considering a
+	 * setup where vlan is configured but any MAC learnt in one port could
+	 * not be learnt in other port regardless of the packet vlan, vid
+	 * interface could be used.
+	 *
+	 * Update the fdb for such sub interfaces.
+	 */
+	vlan_for_each(fe->port, _sf2_fdb_notify_vlan, &vnotify);
+}
+
+struct sf2_arl_fdb_lookup {
+	struct bcm_sf2_priv *priv;
+	const struct sf2_arl_entry *ent;
+	struct net_bridge_fdb_entry *fdb;
+};
+
+/**
+ * Find if there is already a permanent FDB entry corresponding to an ARL one
+ * in the master bridge of a specific vlan sub interface
+ *
+ * @dev: Interface to check if a FDB entry matches in its master bridge
+ * @vid: Interface vlan id (not used)
+ * @arg: Holds ARL entry informations to match and filled in with found FDB if
+ * any
+ *
+ * @return: 0 if no FDB has been found, 1 otherwise
+ */
+static int _sf2_arl_find_permanent_fdb_rcu(struct net_device *dev, int vid,
+					   void *arg)
+{
+	struct net_device *bdev = netdev_master_upper_dev_get(dev);
+	struct sf2_arl_fdb_lookup *afl = (struct sf2_arl_fdb_lookup *)arg;
+	struct net_bridge_fdb_entry *fdb;
+	struct net_bridge *br;
+
+	if (!bdev || !netif_is_bridge_master(bdev))
+		return 0;
+
+	br = netdev_priv(bdev);
+	fdb = br_fdb_find_rcu(br, afl->ent->mac, afl->ent->vid);
+	if (!fdb || (!test_bit(BR_FDB_ADDED_BY_USER, &fdb->flags) &&
+		     !test_bit(BR_FDB_LOCAL, &fdb->flags)))
+		return 0;
+
+	afl->fdb = fdb;
+	return 1;
+}
+
+/**
+ * Find if there is already a permanent (user added or local) FDB entry
+ * corresponding to an ARL one in any SF2 port's bridges
+ *
+ * @priv: Common SF2 private object
+ * @ent: ARL entry to search correspond FDB one with
+ *
+ * @return: Found FDB entry if any, NULL pointer otherwise
+ */
+static struct net_bridge_fdb_entry *
+sf2_arl_find_permanent_fdb_rcu(struct bcm_sf2_priv *priv,
+			       const struct sf2_arl_entry *ent)
+{
+	struct net_device *port = dsa_to_port(priv->ds, ent->port)->user;
+	struct sf2_arl_fdb_lookup afl = {
+		.priv = priv,
+		.ent = ent,
+		.fdb = NULL,
+	};
+
+	_sf2_arl_find_permanent_fdb_rcu(port, 0, &afl);
+	if (afl.fdb)
+		goto out;
+
+	vlan_for_each(port, _sf2_arl_find_permanent_fdb_rcu, &afl);
+out:
+	return afl.fdb;
+}
+
+/**
+ * Synchronize a specific ARL entry with FDB table. This is supposed to be
+ * called for each ARL entries, at the end of this loop, data should point to
+ * the actual number of FDB entries that should be purged.
+ *
+ * @priv: Common SF2 private structure
+ * @ent: ARL entry to convert and add into FDB table
+ * @data: Updated with the number of FDB entry to remove
+ *
+ * @return: 0
+ */
+static int sf2_arl_sync_entry(struct bcm_sf2_priv *priv,
+			      const struct sf2_arl_entry *ent,
+			      void *data)
+{
+	unsigned int *nr_purge = (unsigned int *)data;
+	struct bcm_sf2_fdb *fdb = &priv->fdb;
+	struct net_bridge_fdb_entry *bfe;
+	struct net_device *port, *rdev;
+	const struct dsa_port *dp;
+	struct sf2_fdb_entry *fe;
+	u64 key;
+
+	lockdep_assert_held(&fdb->lock);
+
+	if (!ent->is_valid || (ent->port >= SF2_PORT_COUNT))
+		goto out;
+
+	dp = dsa_to_port(priv->ds, ent->port);
+	if ((dp->type != DSA_PORT_TYPE_USER) &&
+	    (dp->type != DSA_PORT_TYPE_DSA))
+		goto out;
+
+	port = dp->user;
+
+	rcu_read_lock();
+	bfe = sf2_arl_find_permanent_fdb_rcu(priv, ent);
+	if (bfe) {
+		rdev = (bfe->dst) ? bfe->dst->dev : NULL;
+		if (rdev && is_vlan_dev(rdev))
+			rdev = vlan_dev_real_dev(rdev);
+		/*
+		 * Only remove ARL entry if the permanent FDB is for another
+		 * port
+		 */
+		if (rdev != port) {
+			dev_dbg(&priv->pdev->dev,
+				"Removing ARL entry %pM for permanent FDB\n",
+				ent->mac);
+			sf2_arl_delete(priv, ent->mac, ent->vid);
+		}
+		rcu_read_unlock();
+		goto out;
+	}
+	rcu_read_unlock();
+
+	key = sf2_arl_entry_to_hkey(ent);
+	hash_for_each_possible(fdb->arl_hash, fe, hlist, key) {
+		if (ether_addr_equal(ent->mac, fe->mac) &&
+		    (ent->vid == fe->vid)) {
+			/* Move hit beginning of cache list so that at the end
+			 * all unhit fdb will be at in the cache tail */
+			list_move(&fe->next, &fdb->cache);
+			if (!(*nr_purge)) {
+				dev_err(&priv->pdev->dev,
+					"FDB cache in weird state");
+				goto out;
+			}
+			*nr_purge = *nr_purge - 1;
+			/* update source port */
+			if (fe->port != port) {
+				fe->port = port;
+				sf2_fdb_notify(priv, fe, true);
+			}
+			goto out;
+		}
+	}
+
+	/* This is a new entry sync our fdb cache and notify switchdev */
+	fe = sf2_fdb_add_entry(priv, ent);
+	if (IS_ERR(fe)) {
+		dev_err(&priv->pdev->dev, "Cannot create fdb cache entry");
+		goto out;
+	}
+	sf2_fdb_notify(priv, fe, true);
+
+out:
+	return 0;
+}
+
+/**
+ * ARL/FDB synchronization worker, called regularly to poll for ARL table
+ * modifications and synchronize the FDB accordingly.
+ *
+ * @work: work structure
+ */
+static void sf2_fdb_bookkeeping(struct work_struct *work)
+{
+	struct bcm_sf2_priv *priv = container_of(work, struct bcm_sf2_priv,
+						 fdb.poll_wk.work);
+	struct bcm_sf2_fdb *fdb = &priv->fdb;
+	struct sf2_fdb_entry *fe;
+	unsigned int i, nr_purge = fdb->nr_entries;
+	u32 lrn = sw_core_readl(priv, SF2_CORE_TOTAL_SA_LRN_CNTR);
+
+	rtnl_lock();
+
+	if (!lrn)
+		goto out;
+
+	mutex_lock(&fdb->lock);
+
+	/* Reset SA learn limit */
+	sw_core_writel(priv, SF2_CORE_TOTAL_SA_LRN_CNTR_RST_MASK,
+			SF2_CORE_SA_LRN_CNTR_RST);
+
+	/*
+	 * Insert new sf2 entries and sort the cache in such a way that entries
+	 * that are not in ARL table anymore will be at the end of this cache.
+	 */
+	sf2_arl_for_each(priv, sf2_arl_sync_entry, &nr_purge);
+
+	/*
+	 * Purge the first entries which are the ones that are not in sf2's ARL
+	 * table anymore
+	 */
+	for (i = 0; i < nr_purge; ++i) {
+		if (list_empty(&fdb->cache)) {
+			dev_err(&priv->pdev->dev, "FDB cache in weird state");
+			break;
+		}
+		fe = list_last_entry(&fdb->cache, struct sf2_fdb_entry, next);
+		sf2_fdb_notify(priv, fe, false);
+		sf2_fdb_del_entry(priv, fe);
+	}
+
+	mutex_unlock(&fdb->lock);
+out:
+	queue_delayed_work(fdb->poll_wq, &fdb->poll_wk, fdb->poll_delay);
+	rtnl_unlock();
+}
+
+/**
+ * Retrieve a SF2 port object from its net_device pointer
+ *
+ * @priv: Common SF2 object
+ * @dev: net_device object to find SF2 port from
+ *
+ * @return: The SF2 port if found, NULL pointer otherwise.
+ */
+static inline struct sf2_port *
+sf2_find_port(struct bcm_sf2_priv *priv, struct net_device *dev)
+{
+	const struct dsa_port *dp;
+	size_t i;
+	for (i = 0; i < SF2_PORT_COUNT; ++i) {
+		dp = dsa_to_port(priv->ds, i);
+		if (dp->user == dev)
+			return &priv->ports[dp->index];
+	}
+	return NULL;
+}
+
+/**
+ * Miror ARL table according to FDB event worker, called on new FDB event,
+ * remove an ARL entry if an FDB has been moved from an SF2's port to another
+ * one not belonging to the switch
+ *
+ * @work: Scheduled work data
+ */
+static void sf2_fdb_update(struct work_struct *work)
+{
+	struct sf2_switchdev_ev_work *wk =
+			container_of(work, struct sf2_switchdev_ev_work, work);
+	struct bcm_sf2_priv *priv = wk->priv;
+	struct net_device *dev = wk->dev;
+	struct net_device *rdev = dev;
+	struct sf2_fdb_entry *fe;
+	bool found = false;
+	u64 key;
+
+	rtnl_lock();
+
+	key = sf2_mac_to_hkey(wk->info.addr);
+
+	if (is_vlan_dev(rdev))
+		rdev = vlan_dev_real_dev(rdev);
+
+	mutex_lock(&priv->fdb.lock);
+	hash_for_each_possible(priv->fdb.arl_hash, fe, hlist, key) {
+		if (ether_addr_equal(wk->info.addr, fe->mac) &&
+		    (wk->info.vid == fe->vid)) {
+			found = true;
+			break;
+		}
+	}
+
+	if (!found)
+		goto out;
+
+	/* We are updating the same entry */
+	if (unlikely(rdev == fe->port)) {
+		/*
+		 * If FDB is added_by_user, we should stop synchronizing it with
+		 * ARL
+		 */
+		if (wk->info.added_by_user)
+			sf2_fdb_del_entry(priv, fe);
+		goto out;
+	}
+
+	if (unlikely(sf2_find_port(priv, rdev))) {
+		dev_warn(&priv->pdev->dev,
+			 "FDB is on invalid sf2 port %s should be %s",
+			 dev_name(&dev->dev), dev_name(&fe->port->dev));
+		goto out;
+	}
+
+	/* TODO check for same bridge */
+
+	dev_dbg(&priv->pdev->dev, "Removing ARL for %pM on port %s",
+		wk->info.addr, dev_name(&fe->port->dev));
+	/* Remove other for other vlan */
+	sf2_fdb_notify(priv, fe, false);
+	sf2_arl_delete(priv, wk->info.addr, wk->info.vid);
+	sf2_fdb_del_entry(priv, fe);
+
+out:
+	mutex_unlock(&priv->fdb.lock);
+	rtnl_unlock();
+	/*
+	 * atomic_set_release is not needed, as release semantic is done by
+	 * previous mutex_unlock() and rtnl_unlock()
+	 */
+	atomic_set(&wk->free, 1);
+	dev_put(dev);
+}
+
+/**
+ * Switchdev event handler callback
+ *
+ * @nb: Switchdev notifier object
+ * @event: Switchdev event ID
+ * @ptr: Event data
+ *
+ * @return: NOTIFY_DONE on success, NOTIFY_BAD otherwise
+ */
+static int sf2_switchdev_event(struct notifier_block *nb,
+			       unsigned long event, void *ptr)
+{
+	struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+	struct switchdev_notifier_fdb_info *fdb_info;
+	struct sf2_switchdev_ev_work *wk = NULL;
+	struct bcm_sf2_priv *priv;
+	int err = NOTIFY_DONE;
+	size_t i;
+
+	if (event != SWITCHDEV_FDB_ADD_TO_DEVICE)
+		goto out;
+
+	priv = container_of(nb, struct bcm_sf2_priv, fdb.switchdev_notifier);
+	fdb_info = container_of(ptr, struct switchdev_notifier_fdb_info, info);
+	err = NOTIFY_BAD;
+
+	/* Find a usable switchdev event object and take reference on it */
+	for (i = 0; i < ARRAY_SIZE(priv->fdb.sd_ev); i++) {
+		if (atomic_cmpxchg(&priv->fdb.sd_ev[i].free, 1, 0)) {
+			/*
+			 * atomic_cmpxchg_acquire is not needed as the acquire
+			 * semantic is done by queue_work()
+			 */
+			wk = &priv->fdb.sd_ev[i];
+			break;
+		}
+	}
+	if (!wk) {
+		dev_err(&priv->pdev->dev,
+			"No free switchdev event object found");
+		goto out;
+	}
+	wk->priv = priv;
+	wk->dev = dev;
+	memcpy(&wk->info, fdb_info, sizeof(wk->info));
+	ether_addr_copy((u8 *)wk->info.addr, fdb_info->addr);
+
+	/* Do not free dev in the meantime between now and sf2_fdb_update */
+	dev_hold(dev);
+	INIT_WORK(&wk->work, sf2_fdb_update);
+	queue_work(priv->fdb.update_wq, &wk->work);
+
+	err = NOTIFY_DONE;
+out:
+	return err;
+}
+
+/**
+ * Init fdb management structure and start ARL polling worker
+ *
+ * @priv: Common SF2 private structure to initialize fdb from
+ * @return: 0 on success, negative number otherwise
+ */
+int sf2_fdb_init(struct bcm_sf2_priv *priv)
+{
+	struct bcm_sf2_fdb *fdb = &priv->fdb;
+	struct device *dev = &priv->pdev->dev;
+	size_t i;
+
+	fdb->poll_wq = alloc_workqueue("%s-fdb-polling", 0, 0, dev_name(dev));
+	if (!fdb->poll_wq) {
+		dev_err(&priv->pdev->dev, "Cannot allocate poll workqueue\n");
+		goto err;
+	}
+
+	fdb->update_wq = alloc_ordered_workqueue("%s-fdb-update", 0,
+						 dev_name(dev));
+	if (fdb->update_wq == NULL) {
+		dev_err(dev, "cannot create switchdev workqueue\n");
+		goto update_err;
+	}
+
+	for(i = 0; i < ARRAY_SIZE(fdb->sd_ev); ++i) {
+		fdb->sd_ev[i].info.addr = devm_kmalloc(dev, ETH_ALEN,
+						       GFP_KERNEL);
+		if (!fdb->sd_ev[i].info.addr)
+			goto undo_sd_ev;
+		atomic_set(&fdb->sd_ev[i].free, 1);
+	}
+
+	hash_init(fdb->arl_hash);
+	INIT_LIST_HEAD(&fdb->cache);
+	mutex_init(&fdb->lock);
+
+	sw_core_writel(priv, gen_lan_port_mask(priv), SF2_CORE_SA_LIMIT_ENABLE);
+	INIT_DELAYED_WORK(&fdb->poll_wk, sf2_fdb_bookkeeping);
+	fdb->poll_delay = msecs_to_jiffies(SF2_FDB_POLL_DELAY_MS);
+
+	fdb->switchdev_notifier.notifier_call = sf2_switchdev_event;
+	register_switchdev_notifier(&fdb->switchdev_notifier);
+
+	queue_delayed_work(fdb->poll_wq, &fdb->poll_wk, fdb->poll_delay);
+	return 0;
+
+undo_sd_ev:
+	for(;i > 0; --i)
+		devm_kfree(dev, fdb->sd_ev[i - 1].info.addr);
+	destroy_workqueue(fdb->update_wq);
+update_err:
+	destroy_workqueue(fdb->poll_wq);
+err:
+	return -ENOMEM;
+}
+
+/**
+ * Clenup fdb management structure and stop ARL polling worker
+ *
+ * @priv: Common SF2 private structure to cleanup fdb from
+ */
+void sf2_fdb_exit(struct bcm_sf2_priv *priv)
+{
+	struct bcm_sf2_fdb *fdb = &priv->fdb;
+	struct sf2_fdb_entry *fe;
+	struct hlist_node *tmp;
+	size_t i;
+	int bkt;
+
+	unregister_switchdev_notifier(&fdb->switchdev_notifier);
+
+	for(i = 0; i < ARRAY_SIZE(fdb->sd_ev); ++i)
+		devm_kfree(&priv->pdev->dev, fdb->sd_ev[i].info.addr);
+
+	destroy_workqueue(fdb->poll_wq);
+	destroy_workqueue(fdb->update_wq);
+
+	hash_for_each_safe(fdb->arl_hash, bkt, tmp, fe, hlist) {
+		hash_del(&fe->hlist);
+		sf2_fdb_del_entry(priv, fe);
+	}
+}
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_main.c linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_main.c
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_main.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_main.c	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,2708 @@
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/reset.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/mfd/syscon.h>
+#include <linux/if_bridge.h>
+#include <linux/etherdevice.h>
+#include <net/dsa.h>
+#include "sf2_priv.h"
+#include "sf2_regs.h"
+#include "serdes_regs.h"
+#include "leds_top_regs.h"
+
+static const struct sf2_port_hw_desc
+bcm63158_port_descs[SF2_PORT_COUNT] = {
+	[0] = {
+		.port_type	= SF2_PORT_T_QUAD_GPHY,
+		.quad_gphy_port	= 0,
+	},
+
+	[1] = {
+		.port_type	= SF2_PORT_T_QUAD_GPHY,
+		.quad_gphy_port	= 1,
+	},
+
+	[2] = {
+		.port_type	= SF2_PORT_T_QUAD_GPHY,
+		.quad_gphy_port	= 2,
+	},
+
+	[3] = {
+		.port_type	= SF2_PORT_T_XBAR_MUX1,
+	},
+
+	[4] = {
+		.port_type	= SF2_PORT_T_XBAR_4X3,
+		.xbar_out_port	= 0,
+	},
+
+	[5] = {
+		.port_type	= SF2_PORT_T_UNIMAC,
+	},
+
+	[6] = {
+		.port_type	= SF2_PORT_T_XBAR_4X3,
+		.xbar_out_port	= 1,
+	},
+
+	[7] = {
+		.port_type	= SF2_PORT_T_UNIMAC,
+	},
+
+	[8] = {
+		.port_type	= SF2_PORT_T_XBAR_MUX2,
+		.is_imp		= true,
+	},
+};
+
+static const struct sf2_port_hw_desc
+bcm63158_xbar_port_descs[SF2_XBAR_IN_PORT_COUNT] = {
+	[0] = {
+		.port_type	= SF2_PORT_T_SERDES,
+	},
+
+	[1] = {
+		.port_type	= SF2_PORT_T_SINGLE_GPHY,
+	},
+
+	[2] = {
+		.port_type	= SF2_PORT_T_RGMII,
+	},
+
+	[3] = {
+		.port_type	= SF2_PORT_T_RGMII,
+	},
+};
+
+static const char *
+bcm63158_xbar_in_port_names[SF2_XBAR_IN_PORT_COUNT] = {
+	[0] = "P9",
+	[1] = "P10",
+	[2] = "P11",
+	[3] = "P12",
+};
+
+static const char *
+bcm63158_xbar_out_port_names[SF2_XBAR_OUT_PORT_COUNT] = {
+	[0] = "P4",
+	[1] = "P6",
+	[2] = "WAN",
+};
+
+static const struct sf2_port_hw_desc
+bcm63158_mux1_port_descs[2] = {
+	[0] = {
+		.port_type	= SF2_PORT_T_QUAD_GPHY,
+		.quad_gphy_port	= 3,
+	},
+
+	[1] = {
+		.port_type	= SF2_PORT_T_RGMII,
+	},
+};
+
+static const char *
+mux1_in_port_names[2] = {
+	"P14",
+	"P13",
+};
+
+static const char *mux1_out_port_name = "P3";
+
+static const struct sf2_port_hw_desc
+bcm63158_mux2_port_descs[2] = {
+	[0] = {
+		.port_type	= SF2_PORT_T_SYSPORT,
+	},
+
+	[1] = {
+		.port_type	= SF2_PORT_T_UNIMAC,
+	},
+};
+
+static const char *
+mux2_in_port_names[2] = {
+	"system_port",
+	"unimac",
+};
+
+static const char *mux2_out_port_name = "P8";
+
+
+
+/*
+ * LEDS top helpers for led flash rate, brightness and polarity.
+ */
+
+/*
+ * 4 registers, with 8 leds handled on each registers. 3 bits rate
+ * fields every 4 bits.
+ */
+static void leds_top_set_flash_rate(struct bcm_sf2_priv *priv, int led,
+				    u8 rate)
+{
+	int reg_off = led / 8;
+	int reg_shift = (led % 8) * 4;
+	u32 reg;
+
+	regmap_read(priv->leds_top_regmap, LEDS_TOP_FLASH_RATE_REG(reg_off),
+		    &reg);
+	reg &= ~(0x7 << reg_shift);
+	reg |= (rate << reg_shift);
+	regmap_write(priv->leds_top_regmap, LEDS_TOP_FLASH_RATE_REG(reg_off),
+		     reg);
+}
+
+/*
+ * 4 registers, with 8 leds handled on each registers. 4 bits
+ * brightness fields every 4 bits.
+ */
+static void leds_top_set_brightness(struct bcm_sf2_priv *priv, int led,
+				    u8 brightness)
+{
+	int reg_off = led / 8;
+	int reg_shift = (led % 8) * 4;
+	u32 reg;
+
+	regmap_read(priv->leds_top_regmap, LEDS_TOP_BRIGHTNESS_REG(reg_off),
+		    &reg);
+	reg &= ~(0xf << reg_shift);
+	reg |= (brightness << reg_shift);
+	regmap_write(priv->leds_top_regmap, LEDS_TOP_BRIGHTNESS_REG(reg_off),
+		     reg);
+}
+
+/*
+ * 1 register, with 32 leds handled on a signel register, 1 bit
+ * polarity fields.
+ */
+static void leds_top_set_polarity(struct bcm_sf2_priv *priv, int led,
+				  int polarity)
+{
+	u32 reg;
+
+	regmap_read(priv->leds_top_regmap, LEDS_TOP_POLARITY_REG, &reg);
+	if (polarity)
+		reg |= (1 << led);
+	else
+		reg &= ~(1 << led);
+	regmap_write(priv->leds_top_regmap, LEDS_TOP_POLARITY_REG, reg);
+}
+
+/*
+ * only handle link/activity leds.
+ */
+static void sf2_port_leds_control(struct bcm_sf2_priv *priv,
+				  struct sf2_port *port, bool enable)
+{
+	if (!port->used || port->cfg.led_link_act < 0)
+		return;
+
+	/*
+	 * LEDS TOP setup
+	 */
+	leds_top_set_flash_rate(priv, port->cfg.led_link_act, 0x0);
+	leds_top_set_brightness(priv, port->cfg.led_link_act,
+				enable ? 0x8 : 0x0);
+	leds_top_set_polarity(priv, port->cfg.led_link_act, 0x1);
+
+	/*
+	 * SWITCH_REG setup
+	 */
+	if (enable)
+		sw_reg_writel(priv, LED_CTRL_TX_ACT_EN | LED_CTRL_RX_ACT_EN,
+			      SF2_REG_LED_CTRL(port->id));
+	else
+		sw_reg_writel(priv, 0x0, SF2_REG_LED_CTRL(port->id));
+}
+
+/*
+ *
+ */
+static void sf2_port_control(struct bcm_sf2_priv *priv, int port, bool enable)
+{
+	u32 reg;
+
+	if (priv->ports[port].enabled == enable)
+		return;
+
+	if (enable)
+		dev_info(&priv->pdev->dev, "enabling port %d\n", port);
+	else
+		dev_info(&priv->pdev->dev, "disabling port %d\n", port);
+
+	if (bcm63158_port_descs[port].is_imp) {
+		reg = sw_core_readl(priv, SF2_CORE_IMP_CTL(port));
+		/* FIXME */
+		sw_core_writel(priv, reg, SF2_CORE_IMP_CTL(port));
+	} else {
+		reg = sw_core_readl(priv, SF2_CORE_PCTL(port));
+		if (enable) {
+			reg &= ~(PCTL_TXDIS | PCTL_RXDIS);
+			reg &= ~(PCTL_STP_MASK);
+			reg |= PCTL_STP_NONE;
+		} else {
+			reg |= (PCTL_TXDIS | PCTL_RXDIS);
+		}
+		sw_core_writel(priv, reg, SF2_CORE_PCTL(port));
+	}
+
+	sf2_port_leds_control(priv, &priv->ports[port], enable);
+
+	priv->ports[port].enabled = enable;
+}
+
+
+/*
+ * mdio bus accessors
+ */
+static int __mdio_wait(struct bcm_sf2_priv *priv)
+{
+	u32 tries;
+
+	/* at lowest clock speed (500khz), about 64 bits per transfer
+	 * (32 bits preamble) => 128 us total */
+	tries = 150;
+	while (tries) {
+		u32 cmd_reg;
+
+		cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+		if ((cmd_reg & CMD_BUSY) == 0)
+			break;
+
+		--tries;
+		udelay(1);
+	}
+
+	if (!tries)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_mii_c45_addr(struct bcm_sf2_priv *priv,
+			    int port_addr, int dev_addr, int reg)
+{
+	u32 cmd_reg;
+	int error;
+
+	cmd_reg = OPCODE_C45_ADDR |
+		CMD_PHY_ADDR(port_addr) |
+		CMD_REG_ADDR(dev_addr) |
+		CMD_DATA(reg);
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+	cmd_reg |= CMD_BUSY;
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	error = __mdio_wait(priv);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_mii_read(struct mii_bus *bus, int addr, int reg)
+{
+	struct bcm_sf2_priv *priv = bus->priv;
+	u32 cmd_reg, cfg_reg;
+	int error;
+	u16 value;
+
+	cfg_reg = sw_mdio_readl(priv, SF2_MDIO_CFG);
+	cfg_reg |= MDIO_CLAUSE_22_MASK;
+	sw_mdio_writel(priv, cfg_reg, SF2_MDIO_CFG);
+
+	cmd_reg = OPCODE_C22_READ |
+		CMD_PHY_ADDR(addr) |
+		CMD_REG_ADDR(reg);
+
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+	cmd_reg |= CMD_BUSY;
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	error = __mdio_wait(priv);
+
+	if (error) {
+		dev_err(&priv->pdev->dev,
+			"errno%d waiting for MDIO phy@%d.\n", error, addr);
+		return error;
+	}
+
+	value = sw_mdio_readl(priv, SF2_MDIO_CMD) & CMD_REG_DATA_MASK;
+	/* printk("sw_mii_read: => value: %x\n", value); */
+	return value;
+}
+
+/*
+ *
+ */
+static int sf2_mii_read_c45(struct mii_bus *bus, int addr, int dev_addr, int reg)
+{
+	struct bcm_sf2_priv *priv = bus->priv;
+	u32 cmd_reg, cfg_reg;
+	int error;
+	u16 value;
+	u16 port_addr = addr;
+
+	cfg_reg = sw_mdio_readl(priv, SF2_MDIO_CFG);
+		cfg_reg &= ~MDIO_CLAUSE_22_MASK;
+	sw_mdio_writel(priv, cfg_reg, SF2_MDIO_CFG);
+
+	/* printk("sw_mii_c45_read: port_addr:%u dev_addr:%u reg:%u\n", */
+	/*        port_addr, dev_addr, reg); */
+
+	error = sf2_mii_c45_addr(priv, port_addr, dev_addr, reg);
+	if (error)
+		return error;
+
+	cmd_reg = OPCODE_C45_READ |
+		CMD_PHY_ADDR(port_addr) |
+		CMD_REG_ADDR(dev_addr);
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+	cmd_reg |= CMD_BUSY;
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	error = __mdio_wait(priv);
+
+	if (error) {
+		dev_err(&priv->pdev->dev,
+			"errno%d waiting for MDIO phy@%d.\n", error, addr);
+		return error;
+	}
+
+	value = sw_mdio_readl(priv, SF2_MDIO_CMD) & CMD_REG_DATA_MASK;
+	/* printk("sw_mii_read: => value: %x\n", value); */
+	return value;
+}
+
+/*
+ *
+ */
+static int sf2_mii_write(struct mii_bus *bus,
+			 int addr, int reg, u16 value)
+{
+	struct bcm_sf2_priv *priv = bus->priv;
+	u32 cmd_reg, cfg_reg;
+	int error;
+
+	cfg_reg = sw_mdio_readl(priv, SF2_MDIO_CFG);
+	cfg_reg |= MDIO_CLAUSE_22_MASK;
+	sw_mdio_writel(priv, cfg_reg, SF2_MDIO_CFG);
+
+	cmd_reg = OPCODE_C22_WRITE |
+		CMD_PHY_ADDR(addr) |
+		CMD_REG_ADDR(reg) |
+		CMD_DATA(value);
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+	cmd_reg |= CMD_BUSY;
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	error = __mdio_wait(priv);
+	if (error)
+		return error;
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_mii_write_c45(struct mii_bus *bus,
+			     int addr, int dev_addr, int reg, u16 value)
+{
+	struct bcm_sf2_priv *priv = bus->priv;
+	u32 cmd_reg, cfg_reg;
+	int error;
+	u16 port_addr = addr;
+
+	cfg_reg = sw_mdio_readl(priv, SF2_MDIO_CFG);
+		cfg_reg &= ~MDIO_CLAUSE_22_MASK;
+	sw_mdio_writel(priv, cfg_reg, SF2_MDIO_CFG);
+
+
+	/* printk("sw_mii_c45_writ: port_addr:%u dev_addr:%u reg:0x%x val:0x%x\n", */
+	/*        port_addr, dev_addr, reg, value); */
+
+	error = sf2_mii_c45_addr(priv, port_addr, dev_addr, reg);
+	if (error)
+		return error;
+
+	cmd_reg = OPCODE_C45_WRITE |
+		CMD_PHY_ADDR(port_addr) |
+		CMD_REG_ADDR(dev_addr) |
+		CMD_DATA(value);
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	cmd_reg = sw_mdio_readl(priv, SF2_MDIO_CMD);
+	cmd_reg |= CMD_BUSY;
+	sw_mdio_writel(priv, cmd_reg, SF2_MDIO_CMD);
+
+	error = __mdio_wait(priv);
+	if (error)
+		return error;
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_mii_bus_reset(struct mii_bus *bus)
+{
+	struct bcm_sf2_priv *priv = bus->priv;
+
+	/* do a dummy mdio transfer */
+	if (priv->qphy_en_mask)
+		(void)mdiobus_read(bus, priv->config.qphy_base_id, MII_BMSR);
+	if (priv->sphy_en_mask)
+		(void)mdiobus_read(bus, priv->config.sphy_phy_id, MII_BMSR);
+	if (priv->serdes_en_mask)
+		(void)mdiobus_read(bus, priv->config.serdes_phy_id, MII_BMSR);
+
+        return 0;
+}
+
+/*
+ * powerup workaround needed for 63158, from refsw, initial problem
+ * not know nor reproduced during testing
+ */
+static void gphy_init_power_war(struct bcm_sf2_priv *priv)
+{
+	u32 reg;
+
+	/* assert both reset */
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+	reg |= QPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+	reg |= SPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(25);
+
+	/* magic register */
+	sw_reg_writel(priv, 1, SF2_REG_PHY_TEST);
+
+	/* powerup */
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+	reg &= ~(QPHY_CTRL_IDDQ_BIAS |
+		 QPHY_CTRL_IDDQ_GLOBAL_PWR |
+		 QPHY_CTRL_EXT_PWR_DOWN_ALL);
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+	reg &= ~(SPHY_CTRL_IDDQ_BIAS |
+		 SPHY_CTRL_IDDQ_GLOBAL_PWR |
+		 SPHY_CTRL_EXT_PWR_DOWN);
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(25);
+
+	/* powerdown */
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+	reg |= (QPHY_CTRL_IDDQ_BIAS |
+		QPHY_CTRL_IDDQ_GLOBAL_PWR |
+		QPHY_CTRL_EXT_PWR_DOWN_ALL);
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+	reg |= (SPHY_CTRL_IDDQ_BIAS |
+		SPHY_CTRL_IDDQ_GLOBAL_PWR |
+		SPHY_CTRL_EXT_PWR_DOWN);
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(25);
+
+	/* deassert both reset */
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+	reg &= ~QPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+	reg &= ~SPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(25);
+
+	sw_reg_writel(priv, 0, SF2_REG_PHY_TEST);
+}
+
+/*
+ * QUAD GPHY block init
+ */
+static void quad_gphy_block_init(struct bcm_sf2_priv *priv,
+				 unsigned int enabled_port_mask,
+				 unsigned int base_mdio_address)
+{
+	u32 reg;
+
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+
+	/* set base mdio address */
+	reg &= ~QPHY_CTRL_PHY_BASE_ADDR_MASK;
+	reg |= base_mdio_address << QPHY_CTRL_PHY_BASE_ADDR_SHIFT;
+
+	if (!enabled_port_mask) {
+		/* power down */
+		reg |= QPHY_CTRL_IDDQ_BIAS |
+			QPHY_CTRL_EXT_PWR_DOWN_ALL |
+			QPHY_CTRL_IDDQ_GLOBAL_PWR |
+			QPHY_CTRL_CLK_25_DISABLE;
+	} else {
+		reg &= ~(QPHY_CTRL_IDDQ_BIAS |
+			 QPHY_CTRL_IDDQ_GLOBAL_PWR |
+			 QPHY_CTRL_EXT_PWR_DOWN_ALL);
+
+		/* powerdown unused port */
+		reg |= (~enabled_port_mask << QPHY_CTRL_EXT_PWR_DOWN_SHIFT) &
+			QPHY_CTRL_EXT_PWR_DOWN_ALL;
+	}
+
+	/* toggle reset */
+	reg |= QPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+	msleep(1);
+
+	/* de-assert reset */
+	reg = sw_reg_readl(priv, SF2_REG_QPHY_CTRL);
+	reg &= ~QPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_QPHY_CTRL);
+	msleep(1);
+}
+
+/*
+ * single GPHY block init
+ */
+static void single_gphy_block_init(struct bcm_sf2_priv *priv,
+				   bool used,
+				   unsigned int mdio_address)
+{
+	u32 reg;
+
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+
+	/* set mdio address */
+	reg &= ~SPHY_CTRL_PHY_BASE_ADDR_MASK;
+	reg |= mdio_address << SPHY_CTRL_PHY_BASE_ADDR_SHIFT;
+
+	if (!used) {
+		reg |= SPHY_CTRL_IDDQ_BIAS |
+			SPHY_CTRL_EXT_PWR_DOWN |
+			SPHY_CTRL_IDDQ_GLOBAL_PWR |
+			SPHY_CTRL_CLK_25_DISABLE;
+	} else {
+		reg &= ~(SPHY_CTRL_IDDQ_BIAS |
+			 SPHY_CTRL_IDDQ_GLOBAL_PWR |
+			 SPHY_CTRL_EXT_PWR_DOWN);
+	}
+
+	/* toggle reset */
+	reg |= SPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(1);
+
+	/* de-assert reset */
+	reg = sw_reg_readl(priv, SF2_REG_SPHY_CTRL);
+	reg &= ~SPHY_CTRL_PHY_RESET;
+	sw_reg_writel(priv, reg, SF2_REG_SPHY_CTRL);
+	msleep(1);
+}
+
+/*
+ * serdes phy block init
+ */
+static void serdes_phy_block_init(struct bcm_sf2_priv *priv,
+				  bool used,
+				  unsigned int mdio_address,
+				  bool invert_signal_detect)
+{
+	u32 reg;
+
+	/* configure signal detect logic */
+	reg = sw_reg_readl(priv, SF2_REG_SSRD_APD_CTRL);
+	if (invert_signal_detect)
+		reg |= SSRD_APD_CTRL_INV_SD;
+	else
+		reg &= ~SSRD_APD_CTRL_INV_SD;
+
+	/*
+	 * complete reset and powerdown
+	 */
+	reg = sw_reg_readl(priv, SF2_REG_SSRD_CTRL);
+	reg |= SSRD_CTRL_IDDQ_EN |
+		SSRD_CTRL_PDOWN_EN |
+		SSRD_CTRL_RESET_PLL |
+		SSRD_CTRL_RESET_MDIO |
+		SSRD_CTRL_RESET_SERDES;
+	reg &= ~SSRD_CTRL_PHY_BASE_ADDR_MASK;
+	reg |= mdio_address << SSRD_CTRL_PHY_BASE_ADDR_SHIFT;
+	sw_reg_writel(priv, reg, SF2_REG_SSRD_CTRL);
+	msleep(4);
+
+	if (!used)
+		return;
+
+	/*
+	 * clear powerdown bits.
+	 */
+	reg = sw_reg_readl(priv, SF2_REG_SSRD_CTRL);
+	reg &= ~(SSRD_CTRL_IDDQ_EN |
+		 SSRD_CTRL_PDOWN_EN);
+	sw_reg_writel(priv, reg, SF2_REG_SSRD_CTRL);
+	msleep(4);
+
+	/*
+	 * clear reset bits
+	 */
+	reg = sw_reg_readl(priv, SF2_REG_SSRD_CTRL);
+	reg &= ~(SSRD_CTRL_RESET_PLL |
+		 SSRD_CTRL_RESET_MDIO |
+		 SSRD_CTRL_RESET_SERDES);
+	sw_reg_writel(priv, reg, SF2_REG_SSRD_CTRL);
+	msleep(4);
+}
+
+/*
+ *
+ */
+static void sf2_arl_to_entry(struct sf2_arl_entry *ent,
+			     u64 mac_vid, u32 fwd_entry)
+{
+	memset(ent, 0, sizeof(*ent));
+	ent->port = fwd_entry & ARLTBL_DATA_PORT_ID_MASK;
+	ent->is_valid = !!(fwd_entry & ARLTBL_VALID);
+	ent->is_age = !!(fwd_entry & ARLTBL_AGE);
+	ent->is_static = !!(fwd_entry & ARLTBL_STATIC);
+	u64_to_ether_addr(mac_vid, ent->mac);
+	ent->vid = mac_vid >> ARLTBL_VID_S;
+}
+
+/*
+ *
+ */
+static int sf2_arl_search_wait(struct bcm_sf2_priv *priv)
+{
+	unsigned int timeout = 1000;
+	u8 reg;
+
+	do {
+		reg = sw_core_readl(priv, SF2_CORE_ARL_SRCH_CTRL);
+
+		if (!(reg & ARL_SRCH_STDN))
+			return 0;
+
+		if (reg & ARL_SRCH_VLID)
+			return 0;
+
+		usleep_range(50, 100);
+	} while (timeout--);
+
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static void sf2_arl_search_rd(struct bcm_sf2_priv *priv, u8 idx,
+			      struct sf2_arl_entry *ent)
+{
+	u64 mac_vid;
+	u32 fwd_entry;
+
+	mac_vid = sw_core_readll(priv, SF2_CORE_ARL_SRCH_RSTx_MACVID(idx));
+	fwd_entry = sw_core_readl(priv, SF2_CORE_ARL_SRCH_RSTx(idx));
+	sf2_arl_to_entry(ent, mac_vid, fwd_entry);
+}
+
+/*
+ *
+ */
+static int sf2_arl_op_wait(struct bcm_sf2_priv *priv)
+{
+	unsigned int timeout = 10;
+	u8 reg;
+
+	do {
+		reg = sw_core_readl(priv, SF2_CORE_ARL_RWCTL);
+		if (!(reg & SF2_ARL_START))
+			return 0;
+
+		usleep_range(1000, 2000);
+	} while (timeout--);
+
+	dev_warn(&priv->pdev->dev, "timeout waiting for ARL to finish: 0x%02x\n", reg);
+	return -ETIMEDOUT;
+}
+
+/*
+ *
+ */
+static int sf2_arl_rw_op(struct bcm_sf2_priv *priv, bool is_write)
+{
+	u8 reg;
+
+	reg = sw_core_readl(priv, SF2_CORE_ARL_RWCTL);
+	reg |= SF2_ARL_START;
+	if (is_write)
+		reg &= ~SF2_ARL_READ;
+	else
+		reg |= SF2_ARL_READ;
+
+	sw_core_writel(priv, reg, SF2_CORE_ARL_RWCTL);
+	return sf2_arl_op_wait(priv);
+}
+
+/*
+ *
+ */
+static int sf2_arl_del_entry(struct bcm_sf2_priv *priv,
+			     const struct sf2_arl_entry *ent, void *data)
+{
+	u64 mac;
+	int ret, i;
+
+	/* Convert the array into a 64-bit MAC */
+	mac = ether_addr_to_u64(ent->mac);
+
+	/* Perform a read for the given MAC and VID */
+	sw_core_writell(priv, mac, SF2_CORE_ARL_MAC_IDX);
+	sw_core_writell(priv, 0, SF2_CORE_ARL_VID_IDX);
+
+	/* Issue a read operation for this MAC */
+	ret = sf2_arl_rw_op(priv, false);
+	if (ret)
+		goto out;
+
+	/* make all fwd entries invalid */
+	for (i = 0; i < 4; i++)
+		sw_core_writell(priv, 0, SF2_CORE_ARL_FWDENTRY(i));
+
+	/* write */
+	ret = sf2_arl_rw_op(priv, true);
+
+out:
+	if (ret)
+		dev_err(&priv->pdev->dev, "failed to delete mac from ARL\n");
+	return ret;
+}
+
+static void
+_sf2_arl_for_each(struct bcm_sf2_priv *priv,
+		  int (*fn)(struct bcm_sf2_priv *, const struct sf2_arl_entry *,
+			    void *),
+		  void *data)
+{
+	unsigned int count;
+	int ret;
+	u8 reg;
+
+	/* Start search operation */
+	count = 0;
+	reg = ARL_SRCH_STDN;
+	sw_core_writel(priv, reg, SF2_CORE_ARL_SRCH_CTRL);
+
+	do {
+		int i;
+
+		ret = sf2_arl_search_wait(priv);
+		if (ret)
+			return;
+
+		for (i = 0; i < 2; i++) {
+			struct sf2_arl_entry ent;
+
+			sf2_arl_search_rd(priv, i, &ent);
+			if (!ent.is_valid)
+				continue;
+
+			if (fn(priv, &ent, data))
+				return;
+
+		}
+
+	} while (count++ < 1024);
+}
+
+void
+sf2_arl_for_each(struct bcm_sf2_priv *priv,
+		 int (*fn)(struct bcm_sf2_priv *, const struct sf2_arl_entry *,
+		           void *),
+		 void *data)
+{
+	return _sf2_arl_for_each(priv, fn, data);
+}
+
+/*
+ *
+ */
+static void sf2_arl_clear(struct bcm_sf2_priv *priv)
+{
+	sf2_arl_for_each(priv, sf2_arl_del_entry, NULL);
+}
+
+/*
+ *
+ */
+int sf2_arl_delete(struct bcm_sf2_priv *priv, const u8 *mac, u16 vid)
+{
+	struct sf2_arl_entry ent = {
+		.vid = 0, /* Force vid to 0, as there is no VLAN support yet */
+	};
+
+	memcpy(ent.mac, mac, sizeof(ent.mac));
+	sf2_arl_del_entry(priv, &ent, NULL);
+	return 0;
+}
+
+/*
+ *
+ */
+static void sf2_hw_init(struct bcm_sf2_priv *priv)
+{
+	const struct sf2_config *config = &priv->config;
+	u32 reg;
+	int loop, i;
+
+	/*
+	 * power up block
+	 *
+	 * NOTE: the bootloader leave the SF2 enabled, doing a
+	 * powerdown is not supported and can result in the switch not
+	 * going out of reset (external abort accessing the registers).
+	 *
+	 * we just deassert reset in case bootloader did not
+	 */
+	reset_control_deassert(priv->rst);
+	msleep(10);
+
+	/*
+	 * soft reset switch (no effect on 63158 :/)
+	 */
+	sw_core_writel(priv,
+		       SF2_SOFTWARE_RESET |
+		       SF2_EN_SW_RST,
+		       SF2_CORE_WATCHDOG_CTRL);
+	for (loop = 0; loop < 10000; loop++) {
+		reg = sw_core_readl(priv, SF2_CORE_WATCHDOG_CTRL);
+		if (!(reg & SF2_SOFTWARE_RESET))
+			break;
+		udelay(100);
+	}
+	if (loop == 10000)
+		printk("SF2 soft reset failed\n");
+	msleep(1);
+
+	/* make sure forwarding is disabled */
+	reg = sw_core_readl(priv, SF2_CORE_SWMODE);
+	reg &= ~SWMODE_FWD_EN;
+	sw_core_writel(priv, reg, SF2_CORE_SWMODE);
+
+	/* reset does not work on 63158, restore some registers
+	 * changed by CFE */
+	for (i = 0; i < SF2_PORT_COUNT; i++) {
+		u32 reg;
+
+		reg = (1 << SF2_PORT_COUNT) - 1;
+		sw_core_writel(priv, reg, SF2_CORE_Px_VLAN_CTL(i));
+	}
+
+	/* since reset is not working, handle clear the ARL */
+	sf2_arl_clear(priv);
+
+	/* setup crossbar mapping */
+	reg = sw_reg_readl(priv, SF2_REG_XBAR_CTRL);
+	reg &= ~(XBAR_PORT_MASK << XBAR_P4_SHIFT);
+	reg &= ~(XBAR_PORT_MASK << XBAR_P6_SHIFT);
+	reg &= ~(XBAR_PORT_MASK << XBAR_WAN_SHIFT);
+	reg &= ~XBAR_MUX1_MASK;
+	reg &= ~XBAR_MUX2_MASK;
+
+	reg |= priv->xbar_mapping[0] << XBAR_P4_SHIFT;
+	reg |= priv->xbar_mapping[1] << XBAR_P6_SHIFT;
+	reg |= priv->xbar_mapping[2] << XBAR_WAN_SHIFT;
+	if (priv->mux1_mapping)
+		reg |= XBAR_MUX1_MASK;
+	if (priv->mux2_mapping)
+		reg |= XBAR_MUX2_MASK;
+	sw_reg_writel(priv, reg, SF2_REG_XBAR_CTRL);
+
+	/*
+	 * QPHY/SPHY has a powerup workaround on 63158
+	 */
+	gphy_init_power_war(priv);
+
+	/*
+	 * init QPHY/SPHY/serdes PHY block
+	 */
+	quad_gphy_block_init(priv, priv->qphy_en_mask,
+			     config->qphy_base_id);
+	single_gphy_block_init(priv, priv->sphy_en_mask != 0,
+			       config->sphy_phy_id);
+	serdes_phy_block_init(priv, priv->serdes_en_mask != 0,
+			      config->serdes_phy_id,
+			      config->serdes_inv_sd);
+
+	/*
+	 * set MDIO frequency to ~9.6Mhz
+	 * freq = (250Mhz / 2 * (DIV + 1))
+	 *
+	 * FIXME: should be in device tree
+	 */
+	reg = sw_mdio_readl(priv, SF2_MDIO_CFG);
+	reg &= ~MDIO_CLK_DIV_MASK;
+	reg |= (12 << MDIO_CLK_DIV_SHIFT);
+	sw_mdio_writel(priv, reg, SF2_MDIO_CFG);
+
+        /* Wait until hardware enable the ports, or we will kill the
+	 * hardware */
+	for (i = 0; i < SF2_PORT_COUNT; i++) {
+		for (loop = 0; loop < 10000; loop++) {
+			reg = sw_core_readl(priv, SF2_CORE_PCTL(i));
+			if (!(reg & PCTL_RXDIS))
+				break;
+			udelay(100);
+		}
+		if (loop == 10000)
+			printk("SF2 disable port %u failed\n", i);
+	}
+
+	/* allow "mini jumbo" on all ports */
+	reg = sw_core_readl(priv, SF2_CORE_MIB_GD_FM_MAX_SIZE);
+	reg &= ~SF2_GDM_FM_MAX_SIZE_MASK;
+	reg |= 2100;
+	sw_core_writel(priv, reg, SF2_CORE_MIB_GD_FM_MAX_SIZE);
+
+	/* reset MIB */
+	reg = sw_core_readl(priv, SF2_CORE_GLOBAL_MGMT_CFG);
+	reg |= GLOBAL_MGMT_RST_MIB_MASK;
+	sw_core_writel(priv, reg, SF2_CORE_GLOBAL_MGMT_CFG);
+	reg &= ~GLOBAL_MGMT_RST_MIB_MASK;
+	sw_core_writel(priv, reg, SF2_CORE_GLOBAL_MGMT_CFG);
+
+	/* enable imp port to work as a normal port */
+	reg = sw_core_readl(priv, SF2_CORE_CTRL);
+	reg &= 0xffb0;
+	sw_core_writel(priv,
+		       SF2_CTRL_MII2_VOL_SEL |
+		       SF2_CTRL_MII_DUMB_FWD_EN,
+		       SF2_CORE_CTRL);
+
+	/* enable unmanaged forwarding */
+	reg = sw_core_readl(priv, SF2_CORE_SWMODE);
+	reg |= SWMODE_FWD_EN;
+	reg &= ~(SWMODE_FWD_MANAGED | SWMODE_RETRY_LIMIT_DIS);
+	sw_core_writel(priv, reg, SF2_CORE_SWMODE);
+}
+
+/*
+ * internal serdes PHY access
+ */
+static int serdes_phy_read(struct bcm_sf2_priv *priv, u32 reg)
+{
+	return mdiobus_read(priv->mii_bus, priv->config.serdes_phy_id, reg);
+}
+
+static int serdes_phy_write(struct bcm_sf2_priv *priv, u32 reg, u16 value)
+{
+	return mdiobus_write(priv->mii_bus, priv->config.serdes_phy_id,
+			     reg, value);
+}
+
+/*
+ * internal serdes PHY read from extended registers
+ */
+static __maybe_unused int serdes_ephy_read(struct bcm_sf2_priv *priv, int reg)
+{
+	uint32_t bank;
+	uint32_t offset;
+	int val;
+	int error;
+
+	if (reg < 0x20)
+		return serdes_phy_read(priv, reg);
+
+	bank = reg & BRCM_MIIEXT_BANK_MASK;
+	offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+	error = serdes_phy_write(priv, BRCM_MIIEXT_BANK, bank);
+	val = serdes_phy_read(priv, offset);
+	if (val < 0)
+		error = val;
+
+	error |= serdes_phy_write(priv, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+        return (error < 0) ? error : val;
+}
+
+/*
+ * internal serdes PHY write to extended registers
+ */
+static int serdes_ephy_write(struct bcm_sf2_priv *priv, int reg, u16 value)
+{
+        uint32_t bank;
+        uint32_t offset;
+        int error;
+
+        if (reg < 0x20)
+                return serdes_phy_write(priv, reg, value);
+
+        bank = reg & BRCM_MIIEXT_BANK_MASK;
+        offset = (reg & BRCM_MIIEXT_OFF_MASK) + BRCM_MIIEXT_OFFSET;
+
+        error = serdes_phy_write(priv, BRCM_MIIEXT_BANK, bank);
+        error |= serdes_phy_write(priv, offset, value);
+        error |= serdes_phy_write(priv, BRCM_MIIEXT_BANK, BRCM_MIIEXT_DEF_BANK);
+        return error;
+}
+
+/*
+ * internal serdes PHY bulk write to extended registers
+ */
+static int serdes_ephy_write_array(struct bcm_sf2_priv *priv,
+				   const u16 *vals, size_t count)
+{
+	size_t i;
+
+	for (i = 0; i < count; i += 2) {
+                int ret = serdes_ephy_write(priv, vals[i], vals[i + 1]);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * reset internal serdes PHY
+ */
+static int serdes_reset(struct bcm_sf2_priv *priv)
+{
+	int retries = 100;
+	int ret;
+
+	ret = serdes_phy_read(priv, MII_BMCR);
+	if (ret < 0)
+		return ret;
+
+	ret |= BMCR_RESET;
+	ret = serdes_phy_write(priv, MII_BMCR, ret);
+	if (ret < 0)
+		return ret;
+
+	do {
+		ret = serdes_phy_read(priv, MII_BMCR);
+		if (ret < 0)
+			return ret;
+
+		msleep(10);
+	} while (ret & BMCR_RESET && --retries);
+
+	return 0;
+}
+
+/*
+ * restart aneg on internal serdes PHY
+ */
+static int serdes_aneg_restart(struct bcm_sf2_priv *priv)
+{
+	int ret;
+
+	ret = serdes_phy_read(priv, MII_BMCR);
+	if (ret < 0)
+		return ret;
+
+	ret |= BMCR_ANRESTART;
+
+	ret = serdes_phy_write(priv, MII_BMCR, ret);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * regval
+ */
+static const u16 serdesRef50mVco6p25[] = {
+	0x8000, 0x0c2f,
+	0x8308, 0xc000,
+	0x8050, 0x5740,
+	0x8051, 0x01d0,
+	0x8052, 0x19f0,
+	0x8053, 0xaab0,
+	0x8054, 0x8821,
+	0x8055, 0x0044,
+	0x8056, 0x8000,
+	0x8057, 0x0872,
+	0x8058, 0x0000,
+
+	0x8106, 0x0020,
+	0x8054, 0x8021,
+	0x8054, 0x8821,
+};
+
+static const u16 serdesSet2p5GFiber[] = {
+	0x0010, 0x0C2F,	/* disable pll start sequencer */
+	0x8300, 0x0149,	/* enable fiber mode, signal_detect_en, invert signal detect(b2) */
+	0x8308, 0xC010,	/* Force 2.5G Fiber, enable 50MHz refclk */
+	0x834a, 0x0001,	/* Set os2 mode */
+	0x0000, 0x0140,	/* disable AN, set 1G mode */
+	0x0010, 0x2C2F,	/* enable pll start sequencer */
+};
+
+#if 0
+static u16 serdesSet1GForcedFiber[] = {
+	0x0010, 0x0c2f,     /* disable pll start sequencer */
+	0x8300, 0x0109,     /* Force Invert Signal Polarity */
+	0x8473, 0x1251,
+	0x834a, 0x0003,
+	0x0000, 0x0140,
+	0x0010, 0x2c2f,     /* enable pll start sequencer */
+};
+#endif
+
+static u16 serdesSet1GFiber [] = {
+	0x0010, 0x0c2f,     /* disable pll start sequencer */
+	0x8300, 0x0149,     /* Force Auto Detect, Invert Signal Polarity */
+	0x8473, 0x1251,
+	0x834a, 0x0003,
+	0x0000, 0x1140,
+	0x0010, 0x2c2f,     /* enable pll start sequencer */
+};
+
+static u16 serdesSet100MForcedSGMII [] = {
+	0x0010, 0x0c2f,     /* disable pll start sequencer */
+	0x8300, 0x0100,
+	0x8301, 0x0007,
+	0x8473, 0x1251,
+	0x834a, 0x0003,
+	0x0000, 0x2100,
+	0x0010, 0x2c2f,     /* enable pll start sequencer */
+};
+
+static void serdes_set_1000basex(struct bcm_sf2_priv *priv)
+{
+//	printk("serdes_set_1000basex\n");
+	serdes_reset(priv);
+	serdes_ephy_write_array(priv, serdesRef50mVco6p25,
+				ARRAY_SIZE(serdesRef50mVco6p25));
+	msleep(1);
+	serdes_ephy_write_array(priv, serdesSet1GFiber,
+				ARRAY_SIZE(serdesSet1GFiber));
+	serdes_aneg_restart(priv);
+}
+
+static void serdes_set_2500basex(struct bcm_sf2_priv *priv)
+{
+//	printk("serdes_set_2500basex\n");
+	serdes_reset(priv);
+	serdes_ephy_write_array(priv, serdesRef50mVco6p25,
+				ARRAY_SIZE(serdesRef50mVco6p25));
+	msleep(1);
+	serdes_ephy_write_array(priv, serdesSet2p5GFiber,
+				ARRAY_SIZE(serdesSet2p5GFiber));
+}
+
+static void serdes_set_sgmii_100(struct bcm_sf2_priv *priv)
+{
+//	printk("serdes_set_sgmii_100\n");
+	serdes_reset(priv);
+	msleep(1);
+	serdes_ephy_write_array(priv, serdesSet100MForcedSGMII,
+				ARRAY_SIZE(serdesSet100MForcedSGMII));
+	serdes_aneg_restart(priv);
+}
+
+/*
+ *
+ */
+static void sf2_dsa_phylink_get_caps(struct dsa_switch *ds, int port_id,
+				     struct phylink_config *config)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct sf2_port *port = &priv->ports[port_id];
+	unsigned long *interfaces = config->supported_interfaces;
+
+	config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE;
+
+	switch (port->pdesc->port_type) {
+	case SF2_PORT_T_QUAD_GPHY:
+	case SF2_PORT_T_SINGLE_GPHY:
+		config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+		__set_bit(PHY_INTERFACE_MODE_GMII, interfaces);
+		break;
+
+	case SF2_PORT_T_RGMII:
+		config->mac_capabilities |= MAC_10 | MAC_100 | MAC_1000FD;
+		phy_interface_set_rgmii(interfaces);
+		break;
+
+	case SF2_PORT_T_UNIMAC:
+	case SF2_PORT_T_SYSPORT:
+		config->mac_capabilities |= MAC_1000 | MAC_2500FD;
+		__set_bit(PHY_INTERFACE_MODE_INTERNAL, interfaces);
+		break;
+
+	case SF2_PORT_T_SERDES:
+		config->mac_capabilities |= MAC_100 | MAC_1000FD | MAC_2500FD;
+		__set_bit(PHY_INTERFACE_MODE_1000BASEX, interfaces);
+		__set_bit(PHY_INTERFACE_MODE_SGMII, interfaces);
+		__set_bit(PHY_INTERFACE_MODE_2500BASEX, interfaces);
+		break;
+
+	default:
+		WARN(1, "invalid hw desc");
+	}
+
+	pr_debug("sf2_dsa_phylink_get_caps: port%d caps %08lx\n", port_id,
+		 config->mac_capabilities);
+}
+
+/*
+ * enable/disable EEE LPI request generation by hardware for given port
+ */
+static void sf2_setup_port_eee(struct bcm_sf2_priv *priv,
+			       struct sf2_port *port, bool enable)
+{
+	u32 reg;
+
+	reg = sw_core_readl(priv, SF2_CORE_EEE_EN_CTRL_REG);
+	if (enable)
+		reg |= BIT(port->id);
+	else
+		reg &= ~BIT(port->id);
+	sw_core_writel(priv, reg, SF2_CORE_EEE_EN_CTRL_REG);
+}
+
+/*
+ *
+ */
+static void sf2_phylink_mac_config(struct phylink_config *config,
+				   unsigned int mode,
+				   const struct phylink_link_state *state)
+{
+	/* unused */
+}
+
+/*
+ *
+ */
+static void sf2_phylink_mac_link_up(struct phylink_config *config,
+				    struct phy_device *phy,
+				    unsigned int mode,
+				    phy_interface_t interface,
+				    int speed, int duplex,
+				    bool tx_pause, bool rx_pause)
+{
+	struct dsa_port *dp = dsa_phylink_to_port(config);
+	int port_id = dp->index;
+	struct bcm_sf2_priv *priv = dp->ds->priv;
+	struct sf2_port *port = &priv->ports[port_id];
+	bool eee_active;
+
+	pr_debug("sf2_phylink_mac_link_up port:%d\n", port->id);
+
+	switch (port->pdesc->port_type) {
+	case SF2_PORT_T_QUAD_GPHY:
+	case SF2_PORT_T_SINGLE_GPHY:
+		WARN_ON(mode != MLO_AN_PHY);
+
+		/* should not happen because we check this in validate */
+		WARN_ON(interface != PHY_INTERFACE_MODE_GMII);
+
+		/* for internal phys, local mac config is adjusted
+		 * automatically, nothing to do */
+
+		if (mode == MLO_AN_PHY && phy)
+			eee_active = (phy_init_eee(phy, 0) >= 0);
+		else
+			eee_active = false;
+
+		sf2_setup_port_eee(priv, port,
+				   eee_active && port->tx_lpi_enabled);
+		return;
+
+	case SF2_PORT_T_UNIMAC:
+	case SF2_PORT_T_SYSPORT:
+	case SF2_PORT_T_RGMII:
+		WARN_ON(mode != MLO_AN_PHY && mode != MLO_AN_FIXED);
+
+		/* should not happen because we check this in validate */
+		WARN_ON(interface != PHY_INTERFACE_MODE_INTERNAL &&
+			interface != PHY_INTERFACE_MODE_GMII);
+
+		if (bcm63158_port_descs[port->id].is_imp) {
+			u32 reg;
+
+			reg = sw_core_readl(priv, SF2_CORE_STS_OV_IMP_STATE);
+			reg |= STS_OV_IMP_LINK_MASK |
+				STS_OV_IMP_MII_SW_OVER_MASK;
+
+			if (duplex)
+				reg |= STS_OV_IMP_FULL_DUPLEX_MASK;
+			else
+				reg &= ~STS_OV_IMP_FULL_DUPLEX_MASK;
+
+			reg &= ~STS_OV_IMP_SPEED_ALL_MASK;
+			switch (speed) {
+			case 10:
+				reg |= STS_OV_IMP_SPEED_10_MASK;
+				break;
+			case 100:
+				reg |= STS_OV_IMP_SPEED_100_MASK;
+				break;
+			case 1000:
+			case 2500:
+				reg |= STS_OV_IMP_SPEED_1000_MASK;
+				break;
+			}
+
+			if (rx_pause)
+				reg |= STS_OV_IMP_RX_FLOW_CTL_MASK;
+			else
+				reg &= ~STS_OV_IMP_RX_FLOW_CTL_MASK;
+
+			if (tx_pause)
+				reg |= STS_OV_IMP_TX_FLOW_CTL_MASK;
+			else
+				reg &= ~STS_OV_IMP_TX_FLOW_CTL_MASK;
+
+			sw_core_writel(priv, reg, SF2_CORE_STS_OV_IMP_STATE);
+		} else {
+			u32 reg;
+
+			reg = sw_core_readl(priv,
+					    SF2_CORE_STS_OV_Px_STATE(port->id));
+
+			reg |= STS_OV_Px_LINK_MASK |
+				STS_OV_Px_SW_OVER_MASK;
+
+			if (duplex)
+				reg |= STS_OV_Px_FULL_DUPLEX_MASK;
+			else
+				reg &= ~STS_OV_Px_FULL_DUPLEX_MASK;
+
+			reg &= ~STS_OV_Px_SPEED_ALL_MASK;
+			switch (speed) {
+			case 10:
+				reg |= STS_OV_Px_SPEED_10_MASK;
+				break;
+			case 100:
+				reg |= STS_OV_Px_SPEED_100_MASK;
+				break;
+			case 1000:
+			case 2500:
+				reg |= STS_OV_Px_SPEED_1000_MASK;
+				break;
+			}
+
+			if (rx_pause)
+				reg |= STS_OV_Px_RX_FLOW_CTL_MASK;
+			else
+				reg &= ~STS_OV_Px_RX_FLOW_CTL_MASK;
+
+			if (tx_pause)
+				reg |= STS_OV_Px_TX_FLOW_CTL_MASK;
+			else
+				reg &= ~STS_OV_Px_TX_FLOW_CTL_MASK;
+
+			sw_core_writel(priv, reg,
+				       SF2_CORE_STS_OV_Px_STATE(port->id));
+		}
+
+		/*
+		 * setup high speed IMP ports
+		 */
+		if (port->pdesc->port_type == SF2_PORT_T_UNIMAC ||
+		    port->pdesc->port_type == SF2_PORT_T_SYSPORT) {
+			u32 mask, spd, reg;
+
+			switch (port_id) {
+			case 5:
+				mask = SW_CTRL_P5_SPEED_MASK;
+				if (speed == 2500)
+					spd = SW_CTRL_P5_SPEED_2_5G;
+				else
+					spd = SW_CTRL_P5_SPEED_1G;
+				break;
+			case 7:
+				mask = SW_CTRL_P7_SPEED_MASK;
+				if (speed == 2500)
+					spd = SW_CTRL_P7_SPEED_2_5G;
+				else
+					spd = SW_CTRL_P7_SPEED_1G;
+				break;
+			case 8:
+				mask = SW_CTRL_P8_SPEED_MASK;
+				if (speed == 2500)
+					spd = SW_CTRL_P8_SPEED_2_5G;
+				else
+					spd = SW_CTRL_P8_SPEED_1G;
+				break;
+
+			default:
+				WARN(1, "unknown cpu port");
+				mask = 0;
+				spd = 0;
+				break;
+			}
+
+			reg = sw_reg_readl(priv, SF2_REG_SW_CTRL);
+			reg &= ~mask;
+			reg |= spd;
+			sw_reg_writel(priv, reg, SF2_REG_SW_CTRL);
+		}
+		break;
+
+	case SF2_PORT_T_SERDES:
+		switch (interface) {
+		case PHY_INTERFACE_MODE_1000BASEX:
+			serdes_set_1000basex(priv);
+			break;
+
+		case PHY_INTERFACE_MODE_SGMII:
+			if (speed == 100)
+				serdes_set_sgmii_100(priv);
+			else
+				serdes_set_1000basex(priv);
+			break;
+
+		case PHY_INTERFACE_MODE_2500BASEX:
+			serdes_set_2500basex(priv);
+			break;
+
+		default:
+			/* should not happen because we check this in
+			 * validate */
+			WARN(1, "unexpected interface mode");
+			break;
+		}
+		break;
+
+	default:
+		break;
+	}
+
+}
+
+/*
+ *
+ */
+static void sf2_phylink_mac_link_down(struct phylink_config *config,
+				      unsigned int mode,
+				      phy_interface_t interface)
+{
+	struct dsa_port *dp = dsa_phylink_to_port(config);
+	int port_id = dp->index;
+	struct bcm_sf2_priv *priv = dp->ds->priv;
+	struct sf2_port *port = &priv->ports[port_id];
+
+	pr_debug("sf2_phylink_mac_link_down port:%d\n", port->id);
+}
+
+static int sf2_dsa_port_setup(struct dsa_switch *ds, int port,
+			      struct phy_device *phy)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	pr_debug("sf2_dsa_port_setup port:%d", port);
+	sf2_port_control(priv, port, true);
+	return 0;
+}
+
+static void sf2_dsa_port_disable(struct dsa_switch *ds, int port)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	pr_debug("sf2_dsa_port_disable port:%d", port);
+	sf2_port_control(priv, port, false);
+}
+
+/*
+ *
+ */
+static int sf2_dsa_setup(struct dsa_switch *ds)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	unsigned int i;
+	u32 lan_ports, cpu_ports, reg, reg2;
+	int ret;
+	struct dsa_port *dp;
+
+	pr_debug("dsa_setup");
+
+	cpu_ports = gen_cpu_port_mask(priv);
+	lan_ports = gen_lan_port_mask(priv);
+
+	/* resolve which cpu port to use for each port */
+	list_for_each_entry(dp, &ds->dst->ports, list) {
+		switch (dp->type) {
+		case DSA_PORT_TYPE_UNUSED:
+			continue;
+
+		case DSA_PORT_TYPE_CPU:
+			if (!(cpu_ports & (1 << dp->index))) {
+				dev_err(&priv->pdev->dev,
+					"cpu port config mismatch\n");
+				return -EINVAL;
+			}
+			continue;
+
+		default:
+		{
+			int cpu_index;
+
+			if (WARN_ON(!dp->cpu_dp))
+				return -EINVAL;
+
+			cpu_index = dp->cpu_dp->index;
+
+			if (cpu_index >= SF2_PORT_COUNT ||
+			    !(cpu_ports & (1 << cpu_index))) {
+				dev_err(&priv->pdev->dev,
+					"port %u points to invalid "
+					"cpu port %u\n", dp->index, cpu_index);
+				return -EINVAL;
+			}
+
+			priv->ports[dp->index].cfg.cpu_port = cpu_index;
+			break;
+		}
+		}
+	}
+
+	sf2_hw_init(priv);
+
+	ret = of_mdiobus_register(priv->mii_bus, priv->mdio_np);
+	if (ret) {
+		dev_err(&priv->pdev->dev, "failed to register mdio bus");
+		goto out_mdio;
+	}
+
+	/* make sure all lan ports can only talk to cpu port */
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		if (!(lan_ports & (1 << i)))
+			continue;
+
+		sw_core_writel(priv,
+			       (1 << i) |
+			       (1 << priv->ports[i].cfg.cpu_port),
+			       SF2_CORE_Px_VLAN_CTL(i));
+	}
+
+	/* enable brcm tag on all cpu ports */
+	reg = reg2 = 0;
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		if (!(cpu_ports & (1 << i)))
+			continue;
+
+		switch (i) {
+		case 8:
+			reg |= SF2_BRCMTAG_P8;
+			break;
+		case 5:
+			reg |= SF2_BRCMTAG_P5;
+			break;
+		case 7:
+			reg |= SF2_BRCMTAG_P7;
+			break;
+		default:
+			reg2 |= SF2_BRCMTAG2_Px(i);
+			break;
+		}
+	}
+
+	sw_core_writel(priv, reg, SF2_CORE_BRCMTAG_CTRL);
+	sw_core_writel(priv, reg2, SF2_CORE_BRCMTAG2_CTRL);
+
+	/* enable brcmtag priority to TC (queue) mapping on cpu ports */
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		unsigned int q;
+
+		if (!(cpu_ports & (1 << i)))
+			continue;
+
+		/* map 1:1 between prio number and queue id */
+		reg = sw_core_readl(priv, SF2_CORE_PORT_TC2_QOS_MAP_PORT(i));
+		for (q = 0; q < SF2_NUM_EGRESS_QUEUES; q++)
+			reg |= q << (PRT_TO_QID_SHIFT * q);
+		sw_core_writel(priv, reg, SF2_CORE_PORT_TC2_QOS_MAP_PORT(i));
+	}
+
+	/* configure & enable ACB */
+	reg = sw_acb_readl(priv, SF2_ACB_CONTROL_REG);
+	reg &= ~ACB_CONTROL_EN_MASK;
+	reg &= ~ACB_CONTROL_FLUSHQ_MASK;
+	sw_acb_writel(priv, reg, SF2_ACB_CONTROL_REG);
+
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		size_t q;
+
+		if ((cpu_ports & (1 << i)))
+			continue;
+
+		for (q = 0; q < SF2_NUM_EGRESS_QUEUES; q++)  {
+			reg = sw_acb_readl(priv, SF2_ACB_QCFG_REG(i, q));
+			reg &= ~ACB_QCFG_XOFF_THRESH_MASK;
+			reg |= 24 << ACB_QCFG_XOFF_THRESH_SHIFT;
+			sw_acb_writel(priv, reg, SF2_ACB_QCFG_REG(i, q));
+		}
+	}
+
+	reg = sw_acb_readl(priv, SF2_ACB_CONTROL_REG);
+	reg |= ACB_CONTROL_FLUSHQ_MASK;
+	sw_acb_writel(priv, reg, SF2_ACB_CONTROL_REG);
+	reg &= ~ACB_CONTROL_FLUSHQ_MASK;
+	reg |= ACB_CONTROL_ALG2_MASK;
+	reg |= ACB_CONTROL_EN_MASK;
+	sw_acb_writel(priv, reg, SF2_ACB_CONTROL_REG);
+
+	/*
+	 * disable learning on cpu ports
+	 *
+	 * in case there are multiple CPU ports, the mac address from
+	 * the bridge devices will keep flapping between them, since
+	 * we can't install a mac address on multiple ports in the ATU
+	 * we disable learning on those ports.
+	 *
+	 * this means all traffic coming from lan ports that should go
+	 * to the CPU will be flooded by the switch, to avoid that we
+	 * also change the default flood map registers to the switch
+	 * only flood unknown unicast/multicast to the CPU port, which
+	 * will then flood back the packets to the lan ports if
+	 * needed.
+	 *
+	 * there does not seem to be a way to prevent the switch from
+	 * flooding broadcast, so to prevent duplicate flooding, we
+	 * mark those packets as already offloaded in the BRCM tag
+	 * code
+	 */
+	sw_core_writel(priv, cpu_ports, SF2_CORE_DIS_LEARN);
+	sw_core_writel(priv, cpu_ports, SF2_CORE_UFL_FWD_MAP);
+	sw_core_writel(priv, cpu_ports, SF2_CORE_MFL_FWD_MAP);
+	sw_core_writel(priv, cpu_ports, SF2_CORE_IPMC_FWD_MAP);
+
+	reg = sw_core_readl(priv, SF2_CORE_NEW_CTRL);
+	reg |= SF2_NEW_CTRL_MC_FWD_EN |
+		SF2_NEW_CTRL_UC_FWD_EN;
+	sw_core_writel(priv, reg, SF2_CORE_NEW_CTRL);
+
+	ret = sf2_fdb_init(priv);
+	if (ret) {
+		dev_err(&priv->pdev->dev, "failed to init fdb monitoring");
+		goto out_fdb;
+	}
+
+	return 0;
+
+out_mdio:
+	sf2_fdb_exit(priv);
+out_fdb:
+	mdiobus_unregister(priv->mii_bus);
+	return ret;
+}
+
+/*
+ *
+ */
+static void sf2_dsa_teardown(struct dsa_switch *ds)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+
+	pr_debug("dsa_teardown");
+
+	sf2_fdb_exit(priv);
+	mdiobus_unregister(priv->mii_bus);
+}
+
+/*
+ *
+ */
+static enum dsa_tag_protocol sf2_dsa_get_tag_protocol(struct dsa_switch *ds,
+						      int port_id,
+						      enum dsa_tag_protocol mp)
+{
+	return DSA_TAG_PROTO_BRCM_FBX;
+}
+
+struct sf2_mib_desc {
+	u8 size;
+	u8 offset;
+	const char *name;
+};
+
+static const struct sf2_mib_desc sf2_mibs[] = {
+	{ 8, 0x00, "TxOctets" },
+	{ 4, 0x10, "TxBroadcastPkts" },
+	{ 4, 0x14, "TxMulticastPkts" },
+	{ 4, 0x18, "TxUnicastPKts" },
+	{ 4, 0x08, "TxDropPkts" },
+	{ 4, 0x1c, "TxCollisions" },
+	{ 4, 0x20, "TxSingleCollision" },
+	{ 4, 0x24, "TxMultipleCollision" },
+	{ 4, 0x28, "TxDeferredCollision" },
+	{ 4, 0x2c, "TxLateCollision" },
+	{ 4, 0x30, "TxExcessiveCollision" },
+	{ 4, 0x34, "TxFrameInDiscard" },
+	{ 4, 0x38, "TxPausePkts" },
+	{ 4, 0xd0, "TxPkts64Octets" },
+	{ 4, 0xd4, "TxPkts65to127Octets" },
+	{ 4, 0xd8, "TxPkts128to255Octets" },
+	{ 4, 0xdc, "TxPkts256to511Octets" },
+	{ 4, 0xe0, "TxPkts512to1023Octets" },
+	{ 4, 0xe4, "TxPkts1024toMaxPktOctets" },
+	{ 4, 0x0c, "TxQPKTQ0" },
+	{ 4, 0x3c, "TxQPKTQ1" },
+	{ 4, 0x40, "TxQPKTQ2" },
+	{ 4, 0x44, "TxQPKTQ3" },
+	{ 4, 0x48, "TxQPKTQ4" },
+	{ 4, 0x4c, "TxQPKTQ5" },
+	{ 4, 0xc8, "TxQPKTQ6" },
+	{ 4, 0xcc, "TxQPKTQ7" },
+	{ 8, 0x50, "RxOctets" },
+	{ 4, 0x58, "RxUndersizePkts" },
+	{ 4, 0x5c, "RxPausePkts" },
+	{ 4, 0x60, "RxPkts64Octets" },
+	{ 4, 0x64, "RxPkts65to127Octets" },
+	{ 4, 0x68, "RxPkts128to255Octets" },
+	{ 4, 0x6c, "RxPkts256to511Octets" },
+	{ 4, 0x70, "RxPkts512to1023Octets" },
+	{ 4, 0x74, "RxPkts1024toMaxPktsOctets" },
+	{ 4, 0x78, "RxOversizePkts" },
+	{ 4, 0x7c, "RxJabbers" },
+	{ 4, 0x80, "RxAlignmentErrors" },
+	{ 4, 0x84, "RxFCSErrors" },
+	{ 8, 0x88, "RxGoodOctets" },
+	{ 4, 0x90, "RxDropPkts" },
+	{ 4, 0x94, "RxUnicastPkts" },
+	{ 4, 0x98, "RxMulticastPkts" },
+	{ 4, 0x9c, "RxBroadcastPkts" },
+	{ 4, 0xa0, "RxSAChanges" },
+	{ 4, 0xa4, "RxFragments" },
+	{ 4, 0xa8, "RxJumboPkt" },
+	{ 4, 0xac, "RxSymblErr" },
+	{ 4, 0xc0, "RxDiscard" },
+	{ 4, 0xb0, "InRangeErrCount" },
+	{ 4, 0xb4, "OutRangeErrCount" },
+	{ 4, 0xb8, "EEELpiEvent" },
+	{ 4, 0xbc, "EEELpiDuration" },
+};
+
+#define SF2_MIBS_SIZE	ARRAY_SIZE(sf2_mibs)
+
+/*
+ *
+ */
+static int sf2_dsa_get_sset_count(struct dsa_switch *ds, int port, int sset)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct phy_device *phydev;
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		return SF2_MIBS_SIZE;
+
+	case ETH_SS_PHY_STATS:
+		phydev = mdiobus_get_phy(priv->mii_bus, port);
+		if (!phydev)
+			return 0;
+
+		return phy_ethtool_get_sset_count(phydev);
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void sf2_dsa_get_strings(struct dsa_switch *ds, int port,
+				u32 sset, uint8_t *data)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct phy_device *phydev;
+	unsigned int i;
+
+	switch (sset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < SF2_MIBS_SIZE; i++)
+			strscpy(data + i * ETH_GSTRING_LEN,
+				sf2_mibs[i].name, ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_PHY_STATS:
+		phydev = mdiobus_get_phy(priv->mii_bus, port);
+		if (!phydev)
+			return;
+
+		phy_ethtool_get_strings(phydev, data);
+		break;
+	}
+}
+
+/*
+ *
+ */
+static void sf2_dsa_get_ethtool_stats(struct dsa_switch *ds, int port,
+				      uint64_t *data)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	unsigned int i;
+
+	for (i = 0; i < SF2_MIBS_SIZE; i++) {
+		const struct sf2_mib_desc *s = &sf2_mibs[i];
+		u32 offset = SF2_CORE_Px_MIB(port) + s->offset * 8;
+		u64 val;
+
+		if (s->size == 8)
+			val = sw_core_readll(priv, offset);
+		else
+			val = sw_core_readl(priv, offset);
+
+		data[i] = val;
+	}
+}
+
+/*
+ *
+ */
+static void sf2_dsa_get_ethtool_phy_stats(struct dsa_switch *ds, int port,
+					  uint64_t *data)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct phy_device *phydev;
+
+	phydev = mdiobus_get_phy(priv->mii_bus, port);
+	if (!phydev)
+		return;
+
+	phy_ethtool_get_stats(phydev, NULL, data);
+}
+
+/*
+ *
+ */
+static int sf2_dsa_port_join_pxvlan(struct dsa_switch *ds, int port,
+				    const struct dsa_bridge *br)
+{
+	const struct dsa_port *dp = dsa_to_port(ds, port);
+	struct bcm_sf2_priv *priv = ds->priv;
+	unsigned int i;
+	u32 pvlan;
+
+
+	if (dp->type == DSA_PORT_TYPE_CPU)
+		return 0;
+
+	/*
+	 * add port to all others ports PBVLAN map when they share the
+	 * same bridge
+	 */
+	pvlan = 0;
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		u32 reg;
+
+		if (i == port)
+			continue;
+
+		if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), br))
+			continue;
+
+		reg = sw_core_readl(priv, SF2_CORE_Px_VLAN_CTL(i));
+		reg |= BIT(port);
+		sw_core_writel(priv, reg, SF2_CORE_Px_VLAN_CTL(i));
+
+		pvlan |= BIT(i);
+	}
+
+	pvlan |= (1 << port);
+	pvlan |= (1 << priv->ports[port].cfg.cpu_port);
+	sw_core_writel(priv, pvlan, SF2_CORE_Px_VLAN_CTL(port));
+
+	return 0;
+}
+
+static int sf2_dsa_port_bridge_join(struct dsa_switch *ds, int port,
+				    struct dsa_bridge br,
+				    bool *tx_fwd_offload,
+				    struct netlink_ext_ack *extack)
+{
+	pr_debug("sf2_dsa_port_bridge_join: port:%d\n", port);
+	return sf2_dsa_port_join_pxvlan(ds, port, &br);
+}
+
+/*
+ *
+ */
+static void sf2_dsa_port_leave_pxvlan(struct dsa_switch *ds, int port,
+				      const struct dsa_bridge *br)
+{
+	const struct dsa_port *dp = dsa_to_port(ds, port);
+	struct bcm_sf2_priv *priv = ds->priv;
+	unsigned int i;
+
+	if (dp->type == DSA_PORT_TYPE_CPU)
+		return;
+
+	/*
+	 * remove port from all others ports PBVLAN map when they
+	 * share the same bridge
+	 */
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		u32 reg;
+
+		if (i == port)
+			continue;
+
+		if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), br))
+			continue;
+
+		reg = sw_core_readl(priv, SF2_CORE_Px_VLAN_CTL(i));
+		reg &= ~BIT(port);
+		sw_core_writel(priv, reg, SF2_CORE_Px_VLAN_CTL(i));
+	}
+
+	/*
+	 * restore port pbvlan to its cpu port
+	 */
+	sw_core_writel(priv,
+		       (1 << port) |
+		       (1 << priv->ports[port].cfg.cpu_port),
+		       SF2_CORE_Px_VLAN_CTL(port));
+}
+
+static void sf2_dsa_port_bridge_leave(struct dsa_switch *ds, int port,
+				      struct dsa_bridge br)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	u32 reg, new;
+
+	pr_debug("sf2_dsa_port_bridge_leave: port:%d\n", port);
+
+	sf2_dsa_port_leave_pxvlan(ds, port, &br);
+
+	reg = sw_core_readl(priv, SF2_CORE_PCTL(port));
+	new = reg & ~PCTL_STP_MASK;
+	new |= PCTL_STP_NONE;
+	sw_core_writel(priv, new, SF2_CORE_PCTL(port));
+}
+
+static void sf2_dsa_port_stp_state_set(struct dsa_switch *ds, int port_id,
+				       u8 state)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	const struct dsa_port *port = dsa_to_port(ds, port_id);
+	u32 hw_state;
+	u32 reg, new;
+
+	if (!port->bridge)
+		return;
+
+	switch (state) {
+	case BR_STATE_DISABLED:
+		hw_state = PCTL_STP_DISABLE;
+		break;
+	case BR_STATE_LISTENING:
+		hw_state = PCTL_STP_LISTEN;
+		break;
+	case BR_STATE_LEARNING:
+		hw_state = PCTL_STP_LEARN;
+		break;
+	case BR_STATE_FORWARDING:
+		hw_state = PCTL_STP_FORWARD;
+		break;
+	case BR_STATE_BLOCKING:
+		hw_state = PCTL_STP_BLOCKED;
+		break;
+	default:
+		dev_err(ds->dev, "invalid STP state: %d\n", state);
+		return;
+	}
+
+	reg = sw_core_readl(priv, SF2_CORE_PCTL(port_id));
+	new = reg & ~PCTL_STP_MASK;
+	new |= hw_state;
+	if (new == reg)
+		return;
+
+	if (state != BR_STATE_FORWARDING)
+		sf2_dsa_port_leave_pxvlan(ds, port_id, port->bridge);
+	else
+		sf2_dsa_port_join_pxvlan(ds, port_id, port->bridge);
+
+	dev_dbg(ds->dev, "Set STP state of port %d: %d\n", port_id, state);
+	sw_core_writel(priv, new, SF2_CORE_PCTL(port_id));
+	sf2_arl_clear(priv);
+}
+
+static int sf2_dsa_fdb_del(struct dsa_switch *ds, int port,
+			   const unsigned char *addr, u16 vid,
+			   struct dsa_db db)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	/* Force vlanid to 0 as there is no ARL vlan support */
+	sf2_arl_delete(priv, addr, 0);
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_fdb_copy(int port, const struct sf2_arl_entry *ent,
+			dsa_fdb_dump_cb_t *cb, void *data)
+{
+	if (!ent->is_valid)
+		return 0;
+
+	if (port != ent->port)
+		return 0;
+
+	return cb(ent->mac, ent->vid, ent->is_static, data);
+}
+
+/*
+ *
+ */
+static int sf2_dsa_change_mtu(struct dsa_switch *ds, int port, int mtu)
+{
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_dsa_get_max_mtu(struct dsa_switch *ds, int port)
+{
+	return ETH_MAX_MTU;
+}
+
+/*
+ *
+ */
+static int sf2_dsa_set_mac_eee(struct dsa_switch *ds, int port_id,
+			       struct ethtool_keee *e)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct sf2_port *port = &priv->ports[port_id];
+
+	/* record requested state, request to enable will be applied
+	 * at link change through phylink callback (cannot access
+	 * phylink eee_active from here) */
+	port->tx_lpi_enabled = e->tx_lpi_enabled;
+
+	if (!port->tx_lpi_enabled)
+		sf2_setup_port_eee(priv, port, false);
+
+	/* update e->tx_lpi_timer in hw */
+	sw_core_writel(priv, e->tx_lpi_timer,
+		       SF2_CORE_Px_EEE_SLEEP_TMR_1G_REG(port_id));
+	sw_core_writel(priv, e->tx_lpi_timer,
+		       SF2_CORE_Px_EEE_SLEEP_TMR_100M_REG(port_id));
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int sf2_dsa_get_mac_eee(struct dsa_switch *ds, int port_id,
+			       struct ethtool_keee *e)
+{
+	struct bcm_sf2_priv *priv = ds->priv;
+	struct sf2_port *port = &priv->ports[port_id];
+
+	/* other eee_ fields are filled by dsa */
+	e->tx_lpi_enabled = port->tx_lpi_enabled;
+	e->tx_lpi_timer =
+		sw_core_readl(priv, SF2_CORE_Px_EEE_SLEEP_TMR_1G_REG(port_id));
+	return 0;
+}
+
+/*
+ *
+ */
+static const struct phylink_mac_ops bcm_sf2_phylink_mac_ops = {
+	.mac_config		= sf2_phylink_mac_config,
+	.mac_link_down		= sf2_phylink_mac_link_down,
+	.mac_link_up		= sf2_phylink_mac_link_up,
+};
+
+/*
+ *
+ */
+static const struct dsa_switch_ops bcm_sf2_dsa_ops = {
+	.get_tag_protocol	= sf2_dsa_get_tag_protocol,
+	.setup			= sf2_dsa_setup,
+	.teardown		= sf2_dsa_teardown,
+	.port_enable		= sf2_dsa_port_setup,
+	.port_disable		= sf2_dsa_port_disable,
+	.phylink_get_caps	= sf2_dsa_phylink_get_caps,
+	.get_strings		= sf2_dsa_get_strings,
+	.get_ethtool_stats	= sf2_dsa_get_ethtool_stats,
+	.get_sset_count		= sf2_dsa_get_sset_count,
+	.get_ethtool_phy_stats	= sf2_dsa_get_ethtool_phy_stats,
+	.port_bridge_join	= sf2_dsa_port_bridge_join,
+	.port_bridge_leave	= sf2_dsa_port_bridge_leave,
+	.port_stp_state_set	= sf2_dsa_port_stp_state_set,
+	.port_fdb_del		= sf2_dsa_fdb_del,
+	.port_max_mtu		= sf2_dsa_get_max_mtu,
+	.port_change_mtu	= sf2_dsa_change_mtu,
+	.set_mac_eee		= sf2_dsa_set_mac_eee,
+	.get_mac_eee		= sf2_dsa_get_mac_eee,
+};
+
+/*
+ *
+ */
+static void fixup_xbar_config(struct bcm_sf2_priv *priv)
+{
+	int xbar_in_used[SF2_XBAR_OUT_PORT_COUNT];
+	size_t i;
+
+	memset(xbar_in_used, 0, sizeof (xbar_in_used));
+
+	for (i = 0; i < ARRAY_SIZE(priv->xbar_mapping); i++) {
+		if (priv->xbar_mapping[i] == -1)
+			continue;
+
+		xbar_in_used[priv->xbar_mapping[i]] = 1;
+	}
+
+	/* assign unused out port to something */
+	for (i = 0; i < ARRAY_SIZE(priv->xbar_mapping); i++) {
+		size_t j;
+
+		if (priv->xbar_mapping[i] != -1)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(xbar_in_used); j++) {
+			if (xbar_in_used[j])
+				continue;
+
+			priv->xbar_mapping[i] = j;
+			xbar_in_used[priv->xbar_mapping[i]] = 1;
+			break;
+		}
+	}
+}
+
+/*
+ *
+ */
+static int of_read_sf2_config(struct platform_device *pdev,
+			      struct bcm_sf2_priv *priv)
+{
+	struct sf2_config *config = &priv->config;
+	bool xbar_in_used[SF2_XBAR_IN_PORT_COUNT];
+	struct device_node *ports_np, *port_np, *wan_port_np;
+	int i, ret;
+	u32 val;
+
+	/*
+	 * fill config from device tree
+	 */
+	memset(config, 0, sizeof (*config));
+
+	ret = of_property_read_u32(pdev->dev.of_node,
+				   "sf2,qphy-base-id",
+				   &config->qphy_base_id);
+	ret |= of_property_read_u32(pdev->dev.of_node,
+				   "sf2,sphy-phy-id",
+				    &config->sphy_phy_id);
+	ret |= of_property_read_u32(pdev->dev.of_node,
+				    "sf2,serdes-phy-id",
+				    &config->serdes_phy_id);
+	if (ret) {
+		dev_err(&pdev->dev, "missing phy id properties");
+		return ret;
+	}
+
+	wan_port_np = of_get_child_by_name(pdev->dev.of_node,
+					"sf2,wan-port-config");
+	if (!wan_port_np) {
+		dev_err(&pdev->dev, "missing sf2,wan-port-config property");
+		return -ENODEV;
+	}
+
+	priv->wan_port.used = of_device_is_available(wan_port_np);
+	if (priv->wan_port.used) {
+		ret = of_property_read_u32(wan_port_np, "xbar-in-port", &val);
+		if (ret) {
+			dev_err(&pdev->dev, "missing xbar-in-port "
+				"property for wan port");
+			return -ENODEV;
+		}
+		priv->wan_port.cfg.xbar_in_port = val;
+	}
+
+	ports_np = of_get_child_by_name(pdev->dev.of_node, "ports");
+	if (!ports_np) {
+		dev_err(&pdev->dev, "missing ports property");
+		return -ENODEV;
+	}
+
+	priv->leds_top_regmap =
+		syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "leds-top");
+	if (IS_ERR(priv->leds_top_regmap))
+		return PTR_ERR(priv->leds_top_regmap);
+
+	for_each_available_child_of_node(ports_np, port_np) {
+		struct sf2_port *port;
+		struct sf2_port_config *pcfg;
+		const struct sf2_port_hw_desc *pdesc;
+		u32 port_id;
+
+		ret = of_property_read_u32(port_np, "reg", &port_id);
+		if (ret)
+                        return ret;
+
+		if (port_id >= SF2_PORT_COUNT)
+                        return -EINVAL;
+
+		port = &priv->ports[port_id];
+		port->id = port_id;
+		pcfg = &port->cfg;
+		pcfg->cpu_port = -1;
+
+		pdesc = &bcm63158_port_descs[port_id];
+
+		port->used = of_device_is_available(port_np);
+		if (!port->used)
+			continue;
+
+		if (!of_property_read_u32(port_np, "sf2,led-link-act", &val))
+			pcfg->led_link_act = val;
+		else
+			pcfg->led_link_act = -1;
+
+		if (!of_property_read_u32(port_np, "sf2,cpu-port", &val)) {
+			if (val >= SF2_PORT_COUNT) {
+				dev_err(&pdev->dev, "bad sf2,cpu-port value "
+					"property for port %d", port_id);
+				return -EINVAL;
+			}
+			pcfg->cpu_port = val;
+		}
+
+		switch (pdesc->port_type) {
+		case SF2_PORT_T_XBAR_4X3:
+			if (of_property_read_u32(port_np, "xbar-in-port", &val)) {
+				dev_err(&pdev->dev, "missing xbar-in-port "
+					"property for port %d", port_id);
+				return -ENODEV;
+			}
+			pcfg->xbar_in_port = val;
+			break;
+
+		case SF2_PORT_T_XBAR_MUX1:
+			if (of_property_read_u32(port_np, "mux1-in-port", &val)) {
+				dev_err(&pdev->dev, "missing mux1-in-port "
+					"property for port %d", port_id);
+				return -ENODEV;
+			}
+			pcfg->mux1_in_port = val;
+			break;
+
+		case SF2_PORT_T_XBAR_MUX2:
+			if (of_property_read_u32(port_np, "mux2-in-port", &val)) {
+				dev_err(&pdev->dev, "missing mux2-in-port "
+					"property for port %d", port_id);
+				return -ENODEV;
+			}
+			pcfg->mux2_in_port = val;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	/*
+	 * check & compute various mappings from config
+	 */
+	priv->qphy_en_mask = 0;
+	priv->sphy_en_mask = 0;
+	priv->serdes_en_mask = 0;
+	memset(xbar_in_used, 0, sizeof (xbar_in_used));
+
+	for (i = 0; i < ARRAY_SIZE(priv->xbar_mapping); i++)
+		priv->xbar_mapping[i] = -1;
+
+	/* first round to resolve real port */
+	for (i = 0; i < SF2_PORT_COUNT; i++) {
+		struct sf2_port *port = &priv->ports[i];
+		const struct sf2_port_config *pcfg = &port->cfg;
+		const struct sf2_port_hw_desc *pdesc = &bcm63158_port_descs[i];
+
+		if (!port->used)
+			continue;
+
+		port->tx_lpi_enabled = true;
+		switch (pdesc->port_type) {
+		case SF2_PORT_T_UNEXISTING:
+			dev_err(&pdev->dev,
+				"configured to use non-existing port %u\n", i);
+			return -EINVAL;
+
+		case SF2_PORT_T_RGMII:
+		case SF2_PORT_T_UNIMAC:
+		case SF2_PORT_T_SYSPORT:
+		case SF2_PORT_T_QUAD_GPHY:
+		case SF2_PORT_T_SINGLE_GPHY:
+		case SF2_PORT_T_SERDES:
+			port->pdesc = pdesc;
+			break;
+
+		case SF2_PORT_T_XBAR_MUX1:
+		{
+			int mux_in;
+
+			mux_in = pcfg->mux1_in_port;
+
+			if (mux_in < 0 || mux_in > 1) {
+				dev_err(&pdev->dev,
+					"port %d use non-existing "
+					"mux in port %u\n", i, mux_in);
+				return -EINVAL;
+			}
+
+			priv->mux1_mapping = mux_in;
+			port->pdesc = &bcm63158_mux1_port_descs[mux_in];
+			break;
+		}
+
+		case SF2_PORT_T_XBAR_MUX2:
+		{
+			int mux_in;
+
+			mux_in = pcfg->mux2_in_port;
+
+			if (mux_in < 0 || mux_in > 1) {
+				dev_err(&pdev->dev,
+					"port %d use non-existing "
+					"mux in port %u\n", i, mux_in);
+				return -EINVAL;
+			}
+
+			priv->mux2_mapping = mux_in;
+			port->pdesc = &bcm63158_mux2_port_descs[mux_in];
+			break;
+		}
+
+		case SF2_PORT_T_XBAR_4X3:
+		{
+			int xbar_in;
+
+			xbar_in = pcfg->xbar_in_port;
+			if (xbar_in < 0 ||
+			    xbar_in >= ARRAY_SIZE(bcm63158_xbar_port_descs)) {
+				dev_err(&pdev->dev,
+					"port %d use non-existing "
+					"xbar in port %u\n", i, xbar_in);
+				return -EINVAL;
+			}
+
+			if (xbar_in_used[xbar_in]) {
+				dev_err(&pdev->dev,
+					"port %d use already used "
+					"xbar in port %u\n", i, xbar_in);
+				return -EINVAL;
+			}
+
+			priv->xbar_mapping[pdesc->xbar_out_port] = xbar_in;
+			port->pdesc = &bcm63158_xbar_port_descs[xbar_in];
+			break;
+		}
+		}
+	}
+
+	for (i = 0; i < SF2_PORT_COUNT; i++) {
+		struct sf2_port *port = &priv->ports[i];
+		const struct sf2_port_hw_desc *pdesc = port->pdesc;
+
+		if (!pdesc)
+			continue;
+
+		switch (pdesc->port_type) {
+		case SF2_PORT_T_UNIMAC:
+		case SF2_PORT_T_SYSPORT:
+			break;
+
+		case SF2_PORT_T_QUAD_GPHY:
+			WARN_ON(priv->qphy_en_mask &
+				(1 << pdesc->quad_gphy_port));
+			priv->qphy_en_mask |= (1 << pdesc->quad_gphy_port);
+			break;
+
+		case SF2_PORT_T_SINGLE_GPHY:
+			WARN_ON(priv->sphy_en_mask != 0);
+			priv->sphy_en_mask |= 1;
+			break;
+
+		case SF2_PORT_T_SERDES:
+			WARN_ON(priv->serdes_en_mask != 0);
+			priv->serdes_en_mask |= 1;
+			break;
+
+		default:
+			break;
+		}
+	}
+
+	if (priv->wan_port.used) {
+		const struct sf2_port_hw_desc *xbar_in_pdesc;
+		int xbar_in;
+
+		xbar_in = priv->wan_port.cfg.xbar_in_port;
+		if (xbar_in < 0 ||
+		    xbar_in >= ARRAY_SIZE(bcm63158_xbar_port_descs)) {
+			dev_err(&pdev->dev,
+				"wan port use non-existing "
+				"xbar in port %u\n", xbar_in);
+			return -EINVAL;
+		}
+
+		if (xbar_in_used[xbar_in]) {
+			dev_err(&pdev->dev,
+				"wan port use already used "
+				"xbar in port %u\n", xbar_in);
+			return -EINVAL;
+		}
+
+		priv->xbar_mapping[2] = xbar_in;
+		xbar_in_pdesc = &bcm63158_xbar_port_descs[xbar_in];
+		priv->wan_port.pdesc = xbar_in_pdesc;
+
+		switch (xbar_in_pdesc->port_type) {
+		case SF2_PORT_T_RGMII:
+			break;
+
+		case SF2_PORT_T_UNIMAC:
+		case SF2_PORT_T_SYSPORT:
+			return -EINVAL;
+
+		case SF2_PORT_T_QUAD_GPHY:
+			WARN_ON(priv->qphy_en_mask &
+				(1 << xbar_in_pdesc->quad_gphy_port));
+			priv->qphy_en_mask |=
+				(1 << xbar_in_pdesc->quad_gphy_port);
+			break;
+		case SF2_PORT_T_SINGLE_GPHY:
+			WARN_ON(priv->sphy_en_mask != 0);
+			priv->sphy_en_mask |= 1;
+			break;
+		case SF2_PORT_T_SERDES:
+			WARN_ON(priv->serdes_en_mask != 0);
+			priv->serdes_en_mask |= 1;
+			break;
+		default:
+			WARN(1, "invalid hw desc");
+			return -EINVAL;
+		}
+	}
+
+	fixup_xbar_config(priv);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void dump_sf2_mapping(struct bcm_sf2_priv *priv)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->xbar_mapping); i++) {
+		dev_dbg(&priv->pdev->dev,
+			"XBAR mapping: %s => %s\n",
+			bcm63158_xbar_in_port_names[priv->xbar_mapping[i]],
+			bcm63158_xbar_out_port_names[i]);
+	}
+
+	dev_dbg(&priv->pdev->dev,
+		"MUX1 mapping: %s => %s\n",
+		mux1_in_port_names[priv->mux1_mapping],
+		mux1_out_port_name);
+
+	dev_dbg(&priv->pdev->dev,
+		"MUX2 mapping: %s => %s\n",
+		mux2_in_port_names[priv->mux2_mapping],
+		mux2_out_port_name);
+}
+
+
+/*
+ * reserve & remap registers region
+ */
+static int remap_regs(struct bcm_sf2_priv *priv)
+{
+	static const char *regs_name[] = {
+		"core",
+		"reg",
+		"mdio",
+		"acb",
+	};
+	struct platform_device *pdev = priv->pdev;
+	struct resource *res;
+	void *addr[ARRAY_SIZE(regs_name)];
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(regs_name); i++) {
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   regs_name[i]);
+		if (!res) {
+			dev_err(&pdev->dev, "unable to get %s register "
+				"resource.\n", regs_name[i]);
+			return -ENODEV;
+		}
+
+		addr[i] = devm_ioremap_resource(&pdev->dev, res);
+		if (!addr[i]) {
+			dev_err(&pdev->dev, "unable to ioremap %s\n",
+				regs_name[i]);
+			return -ENOMEM;
+		}
+	}
+
+	priv->regs_core = addr[0];
+	priv->regs_reg = addr[1];
+	priv->regs_mdio = addr[2];
+	priv->regs_acb = addr[3];
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int bcm_sf2_probe(struct platform_device *pdev)
+{
+	struct bcm_sf2_priv *priv;
+	struct reset_control *rst;
+	struct mii_bus *bus;
+	struct dsa_switch *ds;
+	int ret;
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	priv->pdev = pdev;
+
+	ret = of_read_sf2_config(pdev, priv);
+	if (ret) {
+		dev_err(&pdev->dev, "invalid device tree config\n");
+		return ret;
+	}
+
+	ret = remap_regs(priv);
+	if (ret)
+		return ret;
+
+	rst = devm_reset_control_get(&pdev->dev, "sf2");
+	if (IS_ERR(rst)) {
+		if (PTR_ERR(rst) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "missing sf2 reset control\n");
+		return PTR_ERR(rst);
+	}
+	priv->rst = rst;
+
+	priv->mii_bus = devm_mdiobus_alloc(&pdev->dev);
+	if (!priv->mii_bus)
+		return -ENOMEM;
+
+	/*
+	 * find mdio bus
+	 */
+	bus = priv->mii_bus;
+        bus->priv = priv;
+        bus->name = "bcm63158_sf2 MII bus";
+        bus->parent = &pdev->dev;
+        bus->read = sf2_mii_read;
+	bus->read_c45 = sf2_mii_read_c45;
+        bus->write = sf2_mii_write;
+	bus->write_c45 = sf2_mii_write_c45;
+        bus->reset = sf2_mii_bus_reset;
+        snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio", pdev->name);
+
+	priv->mdio_np = of_get_child_by_name(pdev->dev.of_node, "sf2,mdio");
+	if (!priv->mdio_np) {
+		dev_err(&pdev->dev, "missing sf2,mdio node");
+		return -ENODEV;
+	}
+
+	ds = devm_kzalloc(&pdev->dev, sizeof (*ds), GFP_KERNEL);
+	if (!ds)
+		return -ENOMEM;
+
+	ds->priv = priv;
+	ds->num_ports = SF2_PORT_COUNT;
+	ds->dev = &pdev->dev;
+	ds->ops = &bcm_sf2_dsa_ops;
+	ds->phylink_mac_ops = &bcm_sf2_phylink_mac_ops;
+	priv->ds = ds;
+
+	ret = dsa_register_switch(ds);
+	if (ret) {
+		if (ret != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "dsa register switch failed: %d\n",
+				ret);
+		return ret;
+	}
+
+	/* all set */
+	dump_sf2_mapping(priv);
+	platform_set_drvdata(pdev, priv);
+	bcm_sf2_dbg_init(priv);
+	return 0;
+}
+
+/*
+ *
+ */
+static void bcm_sf2_remove(struct platform_device *pdev)
+{
+	struct bcm_sf2_priv *priv;
+
+	bcm_sf2_dbg_exit();
+	priv = platform_get_drvdata(pdev);
+	dsa_unregister_switch(priv->ds);
+	platform_set_drvdata(pdev, NULL);
+}
+
+static const struct of_device_id bcm63158_sf2_of_match[] = {
+	{ .compatible = "brcm,bcm63158-sf2" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, bcm63158_sf2_of_match);
+
+/*
+ *
+ */
+struct platform_driver bcm63158_sf2_driver = {
+	.probe	= bcm_sf2_probe,
+	.remove	= bcm_sf2_remove,
+	.driver	= {
+		.name		= "bcm63158_sf2",
+		.of_match_table = bcm63158_sf2_of_match,
+		.owner		= THIS_MODULE,
+	},
+};
+
+module_platform_driver(bcm63158_sf2_driver);
+
+MODULE_DESCRIPTION("BCM63158 SF2 driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_priv.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_priv.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_priv.h	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,275 @@
+#ifndef SF2_PRIV_H_
+#define SF2_PRIV_H_
+
+#include <linux/kernel.h>
+#include <linux/regmap.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+#include <net/switchdev.h>
+
+/*
+ * hardware description of SF2 switch ports & crossbar for 63158 SOC
+ */
+#define SF2_PORT_COUNT		9
+#define SF2_XBAR_IN_PORT_COUNT	4
+#define SF2_XBAR_OUT_PORT_COUNT	3
+#define SF2_NUM_EGRESS_QUEUES	8
+
+enum sf2_port_type {
+	SF2_PORT_T_UNEXISTING,
+	SF2_PORT_T_RGMII,
+	SF2_PORT_T_QUAD_GPHY,
+	SF2_PORT_T_SINGLE_GPHY,
+	SF2_PORT_T_SERDES,
+	SF2_PORT_T_UNIMAC,
+	SF2_PORT_T_SYSPORT,
+
+	SF2_PORT_T_XBAR_4X3,
+	SF2_PORT_T_XBAR_MUX1,
+	SF2_PORT_T_XBAR_MUX2,
+};
+
+struct sf2_port_hw_desc {
+	enum sf2_port_type	port_type;
+	int			quad_gphy_port;
+	int			xbar_out_port;
+	bool			is_imp;
+};
+
+/*
+ * device tree extracted configuration
+ */
+struct sf2_port_config {
+	int		xbar_in_port;
+	int		mux1_in_port;
+	int		mux2_in_port;
+	int		cpu_port;
+	int		led_link_act;
+};
+
+struct sf2_config {
+	/* base mdio address to use for QUAD gphy (will use id => id + 3) */
+	unsigned int	qphy_base_id;
+
+	/* mdio address to use for single gphy */
+	unsigned int	sphy_phy_id;
+
+	/* mdio address to use for serdes phy */
+	unsigned int	serdes_phy_id;
+
+	/* use inverted logic for serdes signal detect */
+	bool		serdes_inv_sd;
+};
+
+struct sf2_port {
+	const struct sf2_port_hw_desc	*pdesc;
+	unsigned int		id;
+        struct sf2_port_config	cfg;
+	bool			used;
+	bool			enabled;
+	bool			tx_lpi_enabled;
+};
+
+struct sf2_switchdev_ev_work {
+	struct bcm_sf2_priv			*priv;
+	struct net_device			*dev;
+	struct work_struct			work;
+	struct switchdev_notifier_fdb_info	info;
+	atomic_t				free;
+};
+
+#define SF2_SWITCHDEV_EVENT_POOL_SZ 32
+struct bcm_sf2_fdb {
+	struct workqueue_struct		*poll_wq;
+	struct workqueue_struct		*update_wq;
+	struct notifier_block		switchdev_notifier;
+	struct delayed_work		poll_wk;
+	DECLARE_HASHTABLE(arl_hash, 8);
+	struct list_head		cache;
+	struct mutex			lock;
+	struct sf2_switchdev_ev_work	sd_ev[SF2_SWITCHDEV_EVENT_POOL_SZ];
+	unsigned long			poll_delay;
+	unsigned int			nr_entries;
+};
+
+struct bcm_sf2_priv {
+	void __iomem		*regs_core;
+	void __iomem		*regs_reg;
+	void __iomem		*regs_mdio;
+	void __iomem		*regs_acb;
+	struct regmap		*leds_top_regmap;
+
+	struct platform_device	*pdev;
+	struct reset_control	*rst;
+	struct mii_bus		*mii_bus;
+	struct device_node	*mdio_np;
+	struct dsa_switch	*ds;
+
+	/* extract DT configuration */
+	struct sf2_config	config;
+
+	/* logical state extracted from config */
+	unsigned int		qphy_en_mask;
+	unsigned int		sphy_en_mask;
+	unsigned int		serdes_en_mask;
+	int			xbar_mapping[SF2_XBAR_OUT_PORT_COUNT];
+	int			mux1_mapping;
+	int			mux2_mapping;
+
+	struct sf2_port		ports[SF2_PORT_COUNT];
+	struct sf2_port		wan_port;
+
+	/* FDB book keeping */
+	struct bcm_sf2_fdb	fdb;
+};
+
+static inline u32 gen_port_mask(struct bcm_sf2_priv *priv, bool cpu)
+{
+	u32 mask;
+	int i;
+
+	mask = 0;
+	for (i = 0; i < SF2_PORT_COUNT; i++)  {
+		struct sf2_port *port = &priv->ports[i];
+
+		if (!port->used)
+			continue;
+
+		switch (port->pdesc->port_type) {
+		case SF2_PORT_T_UNIMAC:
+		case SF2_PORT_T_SYSPORT:
+			if (cpu)
+				mask |= (1 << i);
+			break;
+
+		default:
+			if (!cpu)
+				mask |= (1 << i);
+			break;
+		}
+	}
+	return mask;
+}
+
+static inline u32 gen_cpu_port_mask(struct bcm_sf2_priv *priv)
+{
+	return gen_port_mask(priv, true);
+}
+
+static inline u32 gen_lan_port_mask(struct bcm_sf2_priv *priv)
+{
+	return gen_port_mask(priv, false);
+}
+
+/*
+ * FDB monitoring functions
+ */
+int sf2_fdb_init(struct bcm_sf2_priv *priv);
+void sf2_fdb_exit(struct bcm_sf2_priv *priv);
+
+/*
+ * ARL functions
+ */
+
+struct sf2_arl_entry {
+	u8 port;
+	u8 mac[ETH_ALEN];
+	u16 vid;
+	u8 is_valid:1;
+	u8 is_age:1;
+	u8 is_static:1;
+};
+
+void
+sf2_arl_for_each(struct bcm_sf2_priv *priv,
+		 int (*fn)(struct bcm_sf2_priv *, const struct sf2_arl_entry *,
+		           void *),
+		 void *data);
+
+int sf2_arl_delete(struct bcm_sf2_priv *priv, const u8 *mac, u16 vid);
+
+/*
+ * register accessors
+ */
+static inline u16 sw_core_readw(struct bcm_sf2_priv *priv, u32 off)
+{
+	u16 val;
+
+	val = ioread16(priv->regs_core + off);
+	return val;
+}
+
+static inline u32 sw_core_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+	u32 val;
+
+	val = ioread32(priv->regs_core + off);
+	return val;
+}
+
+static inline void sw_core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+	iowrite32(val, priv->regs_core + off);
+}
+
+static inline void sw_core_writell(struct bcm_sf2_priv *priv, u64 val, u32 off)
+{
+	iowrite64(val, priv->regs_core + off);
+}
+
+static inline u64 sw_core_readll(struct bcm_sf2_priv *priv, u32 off)
+{
+	u64 val;
+
+	val = ioread64(priv->regs_core + off);
+	return val;
+}
+
+static inline u32 sw_reg_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+	u32 val;
+
+	val = ioread32(priv->regs_reg + off);
+	return val;
+}
+
+static inline void sw_reg_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+	iowrite32(val, priv->regs_reg + off);
+}
+
+
+static inline u32 sw_mdio_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+	u32 val;
+
+	val = ioread32(priv->regs_mdio + off);
+	return val;
+}
+
+static inline void sw_mdio_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+	iowrite32(val, priv->regs_mdio + off);
+}
+
+static inline u32 sw_acb_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+	u32 val;
+	val = ioread32(priv->regs_acb + off);
+	return val;
+}
+
+static inline void sw_acb_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+	iowrite32(val, priv->regs_acb + off);
+}
+
+#ifdef CONFIG_DEBUG_FS
+void bcm_sf2_dbg_init(struct bcm_sf2_priv *priv);
+void bcm_sf2_dbg_exit(void);
+#else
+static inline void bcm_sf2_dbg_init(struct bcm_sf2_priv *priv) {}
+static inline void bcm_sf2_dbg_exit(void) {}
+#endif
+
+#endif
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_regs.h linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_regs.h
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158./sf2/sf2_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63158/sf2/sf2_regs.h	2025-09-25 17:40:33.563357289 +0200
@@ -0,0 +1,341 @@
+#ifndef SF2_REGS_H_
+#define SF2_REGS_H_
+
+/*
+ * apply to bcm63158
+ */
+
+/* ARL Table MAC/VID Entry N Registers (64 bit)
+ *
+ * BCM5325 and BCM5365 share most definitions below
+ */
+#define   ARLTBL_MAC_MASK		0xffffffffffffULL
+#define   ARLTBL_VID_S			48
+#define   ARLTBL_VID_MASK_25		0xff
+#define   ARLTBL_VID_MASK		0xfff
+#define   ARLTBL_DATA_PORT_ID_S_25	48
+#define   ARLTBL_DATA_PORT_ID_MASK_25	0xf
+#define   ARLTBL_AGE_25			BIT(61)
+#define   ARLTBL_STATIC_25		BIT(62)
+#define   ARLTBL_VALID_25		BIT(63)
+
+/* ARL Table Data Entry N Registers (32 bit) */
+#define   ARLTBL_DATA_PORT_ID_MASK	0x1ff
+#define   ARLTBL_TC(tc)			((3 & tc) << 11)
+#define   ARLTBL_AGE			BIT(14)
+#define   ARLTBL_STATIC			BIT(15)
+#define   ARLTBL_VALID			BIT(16)
+
+/*
+ * CORE register block
+ */
+
+/*
+ * control (page 0x0)
+ */
+#define SF2_CORE_PCTL(__p)	(__p * 8)
+# define PCTL_TXDIS		(1 << 0)
+# define PCTL_RXDIS		(1 << 1)
+# define PCTL_STP_NONE		(0 << 5)
+# define PCTL_STP_DISABLE	(1 << 5)
+# define PCTL_STP_BLOCKED	(2 << 5)
+# define PCTL_STP_LISTEN	(3 << 5)
+# define PCTL_STP_LEARN		(4 << 5)
+# define PCTL_STP_FORWARD	(5 << 5)
+# define PCTL_STP_MASK		(7 << 5)
+
+#define SF2_CORE_IMP_CTL(__p)	0x40
+#define IMP_CTL_RX_BCAST_EN	(1 << 2)
+#define IMP_CTL_RX_MCAST_EN	(1 << 3)
+#define IMP_CTL_RX_UCAST_EN	(1 << 4)
+
+#define SF2_CORE_SWMODE		0x58
+# define SWMODE_FWD_MANAGED	(1 << 0)
+# define SWMODE_FWD_EN		(1 << 1)
+# define SWMODE_RETRY_LIMIT_DIS	(1 << 2)
+
+#define SF2_CORE_NEW_CTRL	0x108
+#define  SF2_NEW_CTRL_MC_FWD_EN	(1 << 7)
+#define  SF2_NEW_CTRL_UC_FWD_EN	(1 << 6)
+
+#define SF2_CORE_CTRL		0x110
+# define SF2_CTRL_MII_DUMB_FWD_EN	0x40
+# define SF2_CTRL_MII2_VOL_SEL		0x02
+
+#define SF2_CORE_UFL_FWD_MAP	0x190
+
+#define SF2_CORE_MFL_FWD_MAP	0x1a0
+
+#define SF2_CORE_IPMC_FWD_MAP	0x1b0
+
+#define SF2_CORE_DIS_LEARN	0x1e0
+
+#define SF2_CORE_WATCHDOG_CTRL	0x3c8
+# define SF2_SOFTWARE_RESET	(1 << 7)
+# define SF2_EN_CHIP_RST	(1 << 6)
+# define SF2_EN_SW_RST		(1 << 4)
+
+/*
+ * management (page 0x2)
+ */
+#define SF2_CORE_GLOBAL_MGMT_CFG	0x1000
+#define GLOBAL_MGMT_RST_MIB_MASK	(1 << 0)
+
+#define SF2_CORE_BRCMTAG_CTRL	0x1018
+# define SF2_BRCMTAG_P8		(1 << 0)
+# define SF2_BRCMTAG_P5		(1 << 1)
+# define SF2_BRCMTAG_P7		(1 << 2)
+
+#define SF2_CORE_BRCMTAG2_CTRL	0x1050
+# define SF2_BRCMTAG2_Px(x)	(1 << (x)) /* for P[012346] */
+
+/*
+ * ARL access (page 0x5)
+ */
+#define SF2_CORE_ARL_RWCTL	(0x2800)
+# define SF2_ARL_START		(1 << 7)
+# define SF2_ARL_READ		(1 << 0)
+
+#define SF2_CORE_ARL_MAC_IDX	(0x2810)
+#define SF2_CORE_ARL_VID_IDX	(0x2840)
+
+#define SF2_CORE_ARL_MACENTRY(x)	(0x2880 + (x) * 0x80)
+#define SF2_CORE_ARL_FWDENTRY(x)	(0x28c0 + (x) * 0x80)
+
+#define SF2_CORE_ARL_SRCH_CTRL	0x2a80
+#define   ARL_SRCH_VLID		BIT(0)
+#define   ARL_SRCH_STDN		BIT(7)
+
+#define SF2_CORE_ARL_SRCH_ADR	0x2a88
+
+#define SF2_CORE_ARL_SRCH_RSTx_MACVID(x)	(0x2b00 + (x) * 0x80)
+
+#define SF2_CORE_ARL_SRCH_RSTx(x)		(0x2b40 + (x) * 0x80)
+
+/*
+ * flow control (page 0x0a)
+ */
+#define SF2_CORE_FC_DIAG_CTRL		(0x5000)
+#define SF2_CORE_FC_QUEUE_CUR_COUNT(x)	(SF2_CORE_FC_DIAG_CTRL + 0x30 * 8 + (x) * 16)
+#define SF2_CORE_FC_QUEUE_PEAK_COUNT(x)	(SF2_CORE_FC_DIAG_CTRL + 0x40 * 8 + (x) * 16)
+#define SF2_CORE_FC_TOTAL_PEAK_COUNT	(SF2_CORE_FC_DIAG_CTRL + 0x50 * 8)
+#define SF2_CORE_FC_TOTAL_USED_COUNT	(SF2_CORE_FC_DIAG_CTRL + 0x52 * 8)
+
+#define SF2_CORE_FC_PEAK_RX		(SF2_CORE_FC_DIAG_CTRL + 0x54 * 8)
+
+/*
+ * lan threshold regs (page 0x0b)
+ */
+#define SF2_CORE_LAN_THRESH		(0x5800)
+#define LAN_THRESH_TXQ_RESERVED(q)	(SF2_CORE_LAN_THRESH + 0x00 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_HYST(q)		(SF2_CORE_LAN_THRESH + 0x10 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_PAUSE(q)		(SF2_CORE_LAN_THRESH + 0x20 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_DROP(q)		(SF2_CORE_LAN_THRESH + 0x30 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_TOT_HYST(q)	(SF2_CORE_LAN_THRESH + 0x40 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_TOT_PAUSE(q)	(SF2_CORE_LAN_THRESH + 0x50 * 16 + (q) * 16)
+#define LAN_THRESH_TXQ_TOT_DROP(q)	(SF2_CORE_LAN_THRESH + 0x60 * 16 + (q) * 16)
+
+/*
+ * mib (page 0x20 => 0x28)
+ */
+#define SF2_CORE_Px_MIB(x)	(0x10000 + (x) * 0x800)
+
+/*
+ * QOS (page 0x30)
+ */
+#define SF2_CORE_PORT_TC2_QOS_MAP_PORT(x)	(0x18380 + (x) * 0x20)
+#define PRT_TO_QID_SHIFT			3
+
+/*
+ * port based vlan (page 0x31)
+ */
+#define SF2_CORE_Px_VLAN_CTL(x)		(0x18800 + (x) * 0x10)
+
+#define SF2_CORE_JUMBO_PORT_MASK_REG	0x20008
+# define  SF2_JUMBO_EN_10_100_MASK	(1 << 24)
+# define  SF2_JUMBO_FM_PORT_MASK	(0x1ff)
+
+#define SF2_CORE_MIB_GD_FM_MAX_SIZE	0x20028
+# define SF2_GDM_FM_MAX_SIZE_MASK	(0x3fff << 0)
+
+#define SF2_CORE_SA_LIMIT_ENABLE	0x22800
+#define SF2_CORE_SA_LRN_CNTR_RST	0x22810
+# define SF2_CORE_TOTAL_SA_LRN_CNTR_RST_MASK (BIT(15))
+#define SF2_CORE_TOTAL_SA_LIMIT_CTL	0x22880
+#define SF2_CORE_SA_LIMIT_CTL_PORT(x)   (0x22890 + (x) * 0x10)
+#define SF2_CORE_TOTAL_SA_LRN_CNTR	0x22980
+#define SF2_CORE_SA_LRN_CNTR_PORT(x)	(0x22990 + (x) * 0x10)
+
+/*
+ * EEE registers
+ */
+#define SF2_CORE_EEE_EN_CTRL_REG	(0x49000)
+#define SF2_CORE_Px_EEE_SLEEP_TMR_1G_REG(x)	(0x49080 + (x) * 0x20)
+#define SF2_CORE_Px_EEE_SLEEP_TMR_100M_REG(x)	(0x491a0 + (x) * 0x20)
+
+/* for P0-P7 */
+#define SF2_CORE_STS_OV_Px_STATE(x)	(0x72000 + (x) * 0x10)
+# define STS_OV_Px_LINK_MASK		(1 << 0)
+# define STS_OV_Px_FULL_DUPLEX_MASK	(1 << 1)
+# define STS_OV_Px_SPEED_10_MASK	(0x0 << 2)
+# define STS_OV_Px_SPEED_100_MASK	(0x1 << 2)
+# define STS_OV_Px_SPEED_1000_MASK	(0x2 << 2)
+# define STS_OV_Px_SPEED_ALL_MASK	(0x3 << 2)
+# define STS_OV_Px_RX_FLOW_CTL_MASK	(1 << 4)
+# define STS_OV_Px_TX_FLOW_CTL_MASK	(1 << 5)
+# define STS_OV_Px_SW_OVER_MASK		(1 << 6)
+
+/* for P8 only */
+#define SF2_CORE_STS_OV_IMP_STATE	(0x72080)
+# define STS_OV_IMP_LINK_MASK		(1 << 0)
+# define STS_OV_IMP_FULL_DUPLEX_MASK	(1 << 1)
+# define STS_OV_IMP_SPEED_10_MASK	(0x0 << 2)
+# define STS_OV_IMP_SPEED_100_MASK	(0x1 << 2)
+# define STS_OV_IMP_SPEED_1000_MASK	(0x2 << 2)
+# define STS_OV_IMP_SPEED_ALL_MASK	(0x3 << 2)
+# define STS_OV_IMP_RX_FLOW_CTL_MASK	(1 << 4)
+# define STS_OV_IMP_TX_FLOW_CTL_MASK	(1 << 5)
+# define STS_OV_IMP_MII_SW_OVER_MASK	(1 << 7)
+
+/*
+ * REG register block.
+ */
+#define SF2_REG_SW_CTRL			0x0
+#define SW_CTRL_P5_SPEED_MASK		(0x3 << 9)
+#define SW_CTRL_P5_SPEED_1G		(0x0 << 9)
+#define SW_CTRL_P5_SPEED_2G		(0x1 << 9)
+#define SW_CTRL_P5_SPEED_2_5G		(0x2 << 9)
+#define SW_CTRL_P5_SPEED_3G		(0x3 << 9)
+#define SW_CTRL_P7_SPEED_MASK		(0x3 << 5)
+#define SW_CTRL_P7_SPEED_1G		(0x0 << 5)
+#define SW_CTRL_P7_SPEED_2G		(0x1 << 5)
+#define SW_CTRL_P7_SPEED_2_5G		(0x2 << 5)
+#define SW_CTRL_P7_SPEED_3G		(0x3 << 5)
+#define SW_CTRL_P8_SPEED_MASK		(0x3 << 3)
+#define SW_CTRL_P8_SPEED_1G		(0x0 << 3)
+#define SW_CTRL_P8_SPEED_2G		(0x1 << 3)
+#define SW_CTRL_P8_SPEED_2_5G		(0x2 << 3)
+#define SW_CTRL_P8_SPEED_3G		(0x3 << 3)
+
+#define SF2_REG_PHY_TEST		0x18
+
+#define SF2_REG_QPHY_CTRL		0x1c
+# define QPHY_CTRL_IDDQ_BIAS		(1 << 0)
+# define QPHY_CTRL_EXT_PWR_DOWN_SHIFT	1
+# define QPHY_CTRL_EXT_PWR_DOWN(__p)	(1 << ((__p) + 1))
+# define QPHY_CTRL_EXT_PWR_DOWN_ALL	(0xf << QPHY_CTRL_EXT_PWR_DOWN_SHIFT)
+# define QPHY_CTRL_FORCE_DLL_EN		(1 << 5)
+# define QPHY_CTRL_IDDQ_GLOBAL_PWR	(1 << 6)
+# define QPHY_CTRL_CLK_25_DISABLE	(1 << 7)
+# define QPHY_CTRL_PHY_RESET		(1 << 8)
+# define QPHY_CTRL_PHY_BASE_ADDR_SHIFT	(12)
+# define QPHY_CTRL_PHY_BASE_ADDR_MASK	(0x1f << QPHY_CTRL_PHY_BASE_ADDR_SHIFT)
+
+#define SF2_REG_SPHY_CTRL		0x24
+# define SPHY_CTRL_IDDQ_BIAS		(1 << 0)
+# define SPHY_CTRL_EXT_PWR_DOWN		(1 << 1)
+# define SPHY_CTRL_FORCE_DLL_EN		(1 << 2)
+# define SPHY_CTRL_IDDQ_GLOBAL_PWR	(1 << 3)
+# define SPHY_CTRL_CLK_25_DISABLE	(1 << 4)
+# define SPHY_CTRL_PHY_RESET		(1 << 5)
+# define SPHY_CTRL_PHY_BASE_ADDR_SHIFT	(8)
+# define SPHY_CTRL_PHY_BASE_ADDR_MASK	(0x1f << SPHY_CTRL_PHY_BASE_ADDR_SHIFT)
+
+#define SF2_REG_LED_CTRL(__p)		(0x40 + 0xc * (__p))
+# define LED_CTRL_RX_ACT_EN		(1 << 0)
+# define LED_CTRL_TX_ACT_EN		(1 << 1)
+# define LED_CTRL_ACT_SEL		(1 << 5)
+
+#define SF2_REG_XBAR_CTRL		0xcc
+
+enum {
+	E_XBAR_MUX_SERDES		= 0x0,
+	E_XBAR_MUX_SGPHY		= 0x1,
+	E_XBAR_MUX_RGMII1		= 0x2,
+	E_XBAR_MUX_RGMII2		= 0x3,
+	E_XBAR_MUX_QGPHY		= 0x4,
+};
+
+#define XBAR_PORT_MASK			0x3
+#define XBAR_WAN_SHIFT			6
+#define XBAR_MUX2_MASK			(1 << 5)
+#define XBAR_MUX1_MASK			(1 << 4)
+#define XBAR_P6_SHIFT			2
+#define XBAR_P4_SHIFT			0
+
+
+#define SF2_REG_SSRD_CTRL		0x424
+# define SSRD_CTRL_IDDQ_EN		(1 << 0)
+# define SSRD_CTRL_PDOWN_EN		(1 << 1)
+# define SSRD_CTRL_RESET_PLL		(1 << 3)
+# define SSRD_CTRL_RESET_MDIO		(1 << 4)
+# define SSRD_CTRL_RESET_SERDES		(1 << 5)
+# define SSRD_CTRL_PHY_BASE_ADDR_SHIFT	8
+# define SSRD_CTRL_PHY_BASE_ADDR_MASK	(0xf << SSRD_CTRL_PHY_BASE_ADDR_SHIFT)
+
+
+#define SF2_REG_SSRD_STAT		0x428
+#define SSRD_STAT_LINK_UP		(1 << 0)
+#define SSRD_STAT_RX_SIGDET		(1 << 1)
+#define SSRD_STAT_RX_BITALIGN		(1 << 2)
+#define SSRD_STAT_RX_SGMII_MODE		(1 << 3)
+#define SSRD_STAT_RX_SYNC_STATUS	(1 << 4)
+#define SSRD_STAT_RX_PLL_LOCK		(1 << 5)
+#define SSRD_STAT_RX_DEBOUNCED_SIGDET	(1 << 6)
+
+
+#define SF2_REG_SSRD_APD_CTRL		0x42c
+# define SSRD_APD_CTRL_INV_SD		(1 << 3)
+
+/*
+ * MDIO block
+ */
+#define SF2_MDIO_CMD 0x0
+
+# define CMD_BUSY		(1 << 29)
+# define CMD_LIFA		(1 << 28)
+
+# define CMD_OPCODE_SHIFT	26
+# define CMD_OPCODE_MASK	(3 << CMD_OPCODE_SHIFT)
+# define OPCODE_C45_ADDR	(0 << CMD_OPCODE_SHIFT)
+# define OPCODE_C22_WRITE	(1 << CMD_OPCODE_SHIFT)
+# define OPCODE_C45_WRITE	(1 << CMD_OPCODE_SHIFT)
+# define OPCODE_C22_READ	(2 << CMD_OPCODE_SHIFT)
+# define OPCODE_C45_READ	(3 << CMD_OPCODE_SHIFT)
+
+# define CMD_PHY_ADDR(__phy)	((__phy & 0x1f) << 21)
+# define CMD_REG_ADDR(__reg)	((__reg & 0x1f) << 16)
+
+# define CMD_DATA(__data)	((__data & 0xffff) << 0)
+# define CMD_REG_DATA_MASK	0xffff
+
+#define SF2_MDIO_CFG		0x4
+#define MDIO_CLAUSE_22_MASK	(1 << 0)
+#define MDIO_CLK_DIV_SHIFT	4
+#define MDIO_CLK_DIV_MASK	(0xff << MDIO_CLK_DIV_SHIFT)
+
+
+/*
+ * ACB block
+ */
+#define SF2_ACB_CONTROL_REG		0x0
+#define ACB_CONTROL_EN_MASK		(1 << 0)
+#define ACB_CONTROL_ALG2_MASK		(1 << 1)
+#define ACB_CONTROL_FLUSHQ_SHIFT	2
+#define ACB_CONTROL_FLUSHQ_MASK		(0x7 << ACB_CONTROL_FLUSHQ_SHIFT)
+#define ACB_CONTROL_EOP_DELAY_SHIFT	5
+#define ACB_CONTROL_EOP_DELAY_MASK	(0xff << ACB_CONTROL_EOP_DELAY_SHIFT)
+
+#define SF2_ACB_XON_TRESH_REG		0x4
+#define XON_TRESH_XON_BUFS_SHIFT	0
+#define XON_TRESH_XON_BUFS_MASK		(0x7ff << XON_TRESH_XON_BUFS_SHIFT)
+#define XON_TRESH_TOTAL_XON_BUFS_SHIFT	11
+#define XON_TRESH_TOTAL_XON_BUFS_MASK	(0x7ff << XON_TRESH_TOTAL_XON_BUFS_SHIFT)
+
+#define SF2_ACB_QCFG_REG(p,q)		(0x8 + (p) * 8 * 4 + (q) * 4)
+#define ACB_QCFG_XOFF_THRESH_SHIFT	0
+#define ACB_QCFG_XOFF_THRESH_MASK	(0x7ff << ACB_QCFG_XOFF_THRESH_SHIFT)
+
+#define SF2_ACB_QINFLIGHT_REG(p,q)	(0x108 + ((p) * 8 * 4) + (q) * 4)
+
+#endif /* !SF2_REGS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner./Makefile linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/broadcom/bcm63xx_enet_runner/Makefile	2025-09-25 17:40:33.567357309 +0200
@@ -0,0 +1,5 @@
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) 	+= bcm63xx_enet_runner_mod.o
+obj-$(CONFIG_BCM63XX_ENET_RUNNER) 	+= bcm63xx_sf2.o
+
+bcm63xx_enet_runner_mod-y 			+= bcm63xx_enet_runner.o
+bcm63xx_enet_runner_mod-$(CONFIG_DEBUG_FS) 	+= bcm63xx_enet_runner_debug.o
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/cortina/network_engine./Makefile linux-6.13.12-fbx/drivers/net/ethernet/cortina/network_engine/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/cortina/network_engine./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/cortina/network_engine/Makefile	2025-09-25 17:40:33.631357626 +0200
@@ -0,0 +1,11 @@
+obj-$(CONFIG_CORTINA_NETWORK_ENGINE) += cortina_network_engine.o
+
+cortina_network_engine-objs += main.o ethtool.o
+cortina_network_engine-objs += port.o port_ge.o port_epon.o
+cortina_network_engine-objs += forward_engine.o dma_lso.o traffic_manager.o queue_manager.o
+cortina_network_engine-objs += ca8289.o
+cortina_network_engine-objs += cortina.o cortina_accessers.o
+
+ifeq ($(CONFIG_CORTINA_NETWORK_ENGINE_DEBUGFS),y)
+cortina_network_engine-objs += debugfs.o
+endif
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Kconfig linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Kconfig
--- linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Kconfig	2025-09-25 17:40:33.931359114 +0200
@@ -0,0 +1,19 @@
+config IPQ95XX_ESS
+	tristate "IPQ95XX ESS driver (edma+ppe)"
+	select NET_SWITCHDEV
+	select PHYLINK
+	select MII
+
+config IPQ95XX_FBX_FF
+	bool "fastpath support for freebox boards"
+	depends on IPQ95XX_ESS
+	select IP_FFN
+	select IPV6_FFN
+	select IPV6_SIT_6RD
+	select BRIDGE
+	select FBXBRIDGE
+
+config IPQ95XX_FBX_FF_BRNAME
+	string "bridge netdev name"
+	depends on IPQ95XX_ESS
+	default "br0"
diff -Nruw linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Makefile linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Makefile
--- linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/ethernet/qualcomm/ipq95xx/Makefile	2025-09-25 17:40:33.931359114 +0200
@@ -0,0 +1,13 @@
+obj-$(CONFIG_IPQ95XX_ESS) 	+= ipq95xx_ess.o
+
+ipq95xx_ess-objs		+= \
+				clocks.o \
+				debug.o \
+				hwdesc.o \
+				fdb.o \
+				ipo.o \
+				main.o \
+				port.o \
+				port_ethtool.o \
+				port_phylink.o \
+				uniphy.o
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/realtek-hwmon.c	2025-09-25 17:40:34.063359768 +0200
@@ -0,0 +1,138 @@
+/*
+ * realtek-hwmon.c for realtek-hwmon
+ * Created by <nschichan@freebox.fr> on Mon Mar  8 17:05:09 2021
+ */
+
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/ctype.h>
+#include <linux/hwmon.h>
+
+#include "realtek.h"
+
+#define RTL8221B_TSRR_REG		0xbd84
+#define RTL8221B_TSRR_TSOUT_SYNC_MASK	(0x3ffL)
+#define RTL8221B_TSRR_TSOUT_SYNC_SIGN	(0x200)
+
+#if IS_REACHABLE(CONFIG_HWMON)
+
+static umode_t realtek_hwmon_is_visible(const void *data,
+					enum hwmon_sensor_types type,
+					u32 attr, int channel)
+{
+	if (type != hwmon_temp)
+		return 0;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return 0444;
+	default:
+		return 0;
+	}
+}
+
+static int realtek_hwmon_get(struct phy_device *phydev, long *value)
+{
+	int raw;
+
+	raw = phy_read_mmd(phydev, MDIO_MMD_VEND2, RTL8221B_TSRR_REG);
+	if (raw < 0)
+		return raw;
+
+	raw &= RTL8221B_TSRR_TSOUT_SYNC_MASK;
+
+	if (raw & RTL8221B_TSRR_TSOUT_SYNC_SIGN) {
+		/*
+		 * negative value: sign extend it.
+		 */
+		*value = raw | ~RTL8221B_TSRR_TSOUT_SYNC_MASK;
+	} else {
+		*value = raw;
+	}
+
+	*value *= 500;
+
+	return 0;
+}
+
+static int realtek_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+			      u32 attr, int channel, long *value)
+{
+	if (type != hwmon_temp)
+		return -ENOTSUPP;
+
+	switch (attr) {
+	case hwmon_temp_input:
+		return realtek_hwmon_get(dev_get_drvdata(dev), value);
+	default:
+		return -ENOTSUPP;
+	}
+}
+
+static const struct hwmon_ops realtek_hwmon_ops = {
+	.is_visible = realtek_hwmon_is_visible,
+	.read = realtek_hwmon_read,
+};
+
+static u32 realtek_hwmon_temp_config[] = {
+	HWMON_T_INPUT,
+	0,
+};
+
+static const struct hwmon_channel_info realtek_hwmon_temp = {
+	.type = hwmon_temp,
+	.config = realtek_hwmon_temp_config,
+};
+
+static u32 realtek_hwmon_chip_config[] = {
+	HWMON_C_REGISTER_TZ,
+	0,
+};
+
+static const struct hwmon_channel_info realtek_hwmon_chip = {
+	.type = hwmon_chip,
+	.config = realtek_hwmon_chip_config,
+};
+
+
+static const struct hwmon_channel_info *realtek_hwmon_info[] = {
+	&realtek_hwmon_chip,
+	&realtek_hwmon_temp,
+	NULL,
+};
+
+static const struct hwmon_chip_info realtek_hwmon_chip_info = {
+	.ops = &realtek_hwmon_ops,
+	.info = realtek_hwmon_info,
+};
+
+int realtek_hwmon_probe(struct phy_device *phydev)
+{
+	struct device *dev = &phydev->mdio.dev;
+	char *name;
+	int i, j;
+	struct device *hdev;
+
+	name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
+	if (!name)
+		return -ENOMEM;
+
+	for (i = j = 0; name[i]; i++) {
+		if (isalnum(name[i]) ||
+		    name[i] == '.' || name[i] == ':') {
+			if (i != j)
+				name[j] = name[i];
+			j++;
+		}
+	}
+	name[j] = '\0';
+
+	hdev = devm_hwmon_device_register_with_info(dev,
+						    name,
+						    phydev,
+						    &realtek_hwmon_chip_info,
+						    NULL);
+
+	return PTR_ERR_OR_ZERO(hdev);
+}
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/phy/realtek.h	2025-09-25 17:40:34.063359768 +0200
@@ -0,0 +1,32 @@
+/*
+ * realtek.h for realtek-hwmon
+ * Created by <nschichan@freebox.fr> on Mon Mar  8 17:05:57 2021
+ */
+
+#pragma once
+
+#if IS_REACHABLE(CONFIG_HWMON)
+
+int realtek_hwmon_probe(struct phy_device *phydev);
+
+#else
+
+static inline int realtek_hwmon_probe(struct phy_device *phydev)
+{
+	return 0;
+}
+
+#endif
+
+#ifdef CONFIG_ARCH_CORTINA_VENUS
+
+int realtek_cortina_probe(struct phy_device *phydev);
+
+#else
+
+static inline int realtek_cortina_probe(struct phy_device *phydev)
+{
+	return 0;
+}
+
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_sta.c	2025-07-01 14:10:42.724046268 +0200
@@ -0,0 +1,337 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/vmalloc.h>
+
+#include "debugfs_sta.h"
+#include "core.h"
+#include "peer.h"
+#include "debug.h"
+#include "debugfs_htt_stats.h"
+#include "debugfs.h"
+
+static
+u32 ath12k_dbg_sta_dump_rate_stats(u8 *buf, u32 offset, const int size,
+				   bool he_rates_avail,
+				   const struct ath12k_rx_peer_rate_stats *stats)
+{
+	static const char *legacy_rate_str[HAL_RX_MAX_NUM_LEGACY_RATES] = {
+					"1 Mbps", "2 Mbps", "5.5 Mbps", "6 Mbps",
+					"9 Mbps", "11 Mbps", "12 Mbps", "18 Mbps",
+					"24 Mbps", "36 Mbps", "48 Mbps", "54 Mbps"};
+	u8 max_bw = HAL_RX_BW_MAX, max_gi = HAL_RX_GI_MAX, max_mcs = HAL_RX_MAX_NSS;
+	int mcs = 0, bw = 0, nss = 0, gi = 0, bw_num = 0;
+	u32 i, len = offset, max = max_bw * max_gi * max_mcs;
+	bool found;
+
+	len += scnprintf(buf + len, size - len, "\nEHT stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_BE; i++)
+		len += scnprintf(buf + len, size - len,
+				   "MCS %d: %llu%s", i, stats->be_mcs_count[i],
+				   (i + 1) % 8 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nHE stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HE; i++)
+		len += scnprintf(buf + len, size - len,
+				   "MCS %d: %llu%s", i, stats->he_mcs_count[i],
+				   (i + 1) % 6 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nVHT stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_VHT; i++)
+		len += scnprintf(buf + len, size - len,
+				   "MCS %d: %llu%s", i, stats->vht_mcs_count[i],
+				   (i + 1) % 5 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nHT stats:\n");
+	for (i = 0; i <= HAL_RX_MAX_MCS_HT; i++)
+		len += scnprintf(buf + len, size - len,
+				   "MCS %d: %llu%s", i, stats->ht_mcs_count[i],
+				   (i + 1) % 8 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nLegacy stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NUM_LEGACY_RATES; i++)
+		len += scnprintf(buf + len, size - len,
+				   "%s: %llu%s", legacy_rate_str[i],
+				   stats->legacy_count[i],
+				   (i + 1) % 4 ? "\t" : "\n");
+
+	len += scnprintf(buf + len, size - len, "\nNSS stats:\n");
+	for (i = 0; i < HAL_RX_MAX_NSS; i++)
+		len += scnprintf(buf + len, size - len,
+				   "%dx%d: %llu ", i + 1, i + 1,
+				   stats->nss_count[i]);
+
+	len += scnprintf(buf + len, size - len,
+			  "\n\nGI: 0.8 us %llu 0.4 us %llu 1.6 us %llu 3.2 us %llu\n",
+			  stats->gi_count[0],
+			  stats->gi_count[1],
+			  stats->gi_count[2],
+			  stats->gi_count[3]);
+
+	len += scnprintf(buf + len, size - len,
+			   "BW: 20 MHz %llu 40 MHz %llu 80 MHz %llu 160 MHz %llu 320 MHz %llu\n",
+			   stats->bw_count[0],
+			   stats->bw_count[1],
+			   stats->bw_count[2],
+			   stats->bw_count[3],
+			   stats->bw_count[4]);
+
+	for (i = 0; i < max; i++) {
+		found = false;
+
+		for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+			if (stats->rx_rate[bw][gi][nss][mcs]) {
+				found = true;
+				break;
+			}
+		}
+
+		if (!found)
+			goto skip_report;
+
+		switch (bw) {
+		case HAL_RX_BW_20MHZ:
+			bw_num = 20;
+			break;
+		case HAL_RX_BW_40MHZ:
+			bw_num = 40;
+			break;
+		case HAL_RX_BW_80MHZ:
+			bw_num = 80;
+			break;
+		case HAL_RX_BW_160MHZ:
+			bw_num = 160;
+			break;
+		case HAL_RX_BW_320MHZ:
+			bw_num = 320;
+			break;
+		}
+
+		len += scnprintf(buf + len, size - len, "\n%d Mhz gi %d us %dx%d : ",
+				 bw_num, gi, nss + 1, nss + 1);
+
+		for (mcs = 0; mcs <= HAL_RX_MAX_MCS_HT; mcs++) {
+			if (stats->rx_rate[bw][gi][nss][mcs])
+				len += scnprintf(buf + len, size - len,
+						 " %d:%llu", mcs,
+						 stats->rx_rate[bw][gi][nss][mcs]);
+		}
+
+skip_report:
+		if (nss++ >= max_mcs - 1) {
+			nss = 0;
+			if (gi++ >= max_gi - 1) {
+				gi = 0;
+				if (bw < max_bw - 1)
+					bw++;
+			}
+		}
+	}
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+	return len - offset;
+}
+
+static ssize_t ath12k_dbg_sta_dump_rx_stats(struct file *file,
+					    char __user *user_buf,
+					    size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+	const int size = ATH12K_STA_RX_STATS_BUF_SIZE;
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_rx_peer_stats *rx_stats;
+	struct ath12k_link_sta *arsta;
+	u8 link_id = link_sta->link_id;
+	int len = 0, i, ret = 0;
+	bool he_rates_avail;
+	struct ath12k *ar;
+
+	wiphy_lock(ah->hw->wiphy);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		wiphy_unlock(ah->hw->wiphy);
+		return -ENOENT;
+	}
+
+	arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+	if (!arsta || !arsta->arvif->ar) {
+		wiphy_unlock(ah->hw->wiphy);
+		return -ENOENT;
+	}
+
+	ar = arsta->arvif->ar;
+
+	u8 *buf __free(kfree) = kzalloc(size, GFP_KERNEL);
+	if (!buf) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	rx_stats = arsta->rx_stats;
+	if (!rx_stats) {
+		ret = -ENOENT;
+		goto unlock;
+	}
+
+	len += scnprintf(buf + len, size - len, "RX peer stats:\n\n");
+	len += scnprintf(buf + len, size - len, "Num of MSDUs: %llu\n",
+			 rx_stats->num_msdu);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs with TCP L4: %llu\n",
+			 rx_stats->tcp_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs with UDP L4: %llu\n",
+			 rx_stats->udp_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of other MSDUs: %llu\n",
+			 rx_stats->other_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs part of AMPDU: %llu\n",
+			 rx_stats->ampdu_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs not part of AMPDU: %llu\n",
+			 rx_stats->non_ampdu_msdu_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs using STBC: %llu\n",
+			 rx_stats->stbc_count);
+	len += scnprintf(buf + len, size - len, "Num of MSDUs beamformed: %llu\n",
+			 rx_stats->beamformed_count);
+	len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS ok: %llu\n",
+			 rx_stats->num_mpdu_fcs_ok);
+	len += scnprintf(buf + len, size - len, "Num of MPDUs with FCS error: %llu\n",
+			 rx_stats->num_mpdu_fcs_err);
+
+	he_rates_avail = (rx_stats->pream_cnt[HAL_RX_PREAMBLE_11AX] > 1) ? true : false;
+
+	len += scnprintf(buf + len, size - len,
+			 "preamble: 11A %llu 11B %llu 11N %llu 11AC %llu 11AX %llu 11BE %llu\n",
+			 rx_stats->pream_cnt[0], rx_stats->pream_cnt[1],
+			 rx_stats->pream_cnt[2], rx_stats->pream_cnt[3],
+			 rx_stats->pream_cnt[4], rx_stats->pream_cnt[6]);
+	len += scnprintf(buf + len, size - len,
+			 "reception type: SU %llu MU_MIMO %llu MU_OFDMA %llu MU_OFDMA_MIMO %llu\n",
+			 rx_stats->reception_type[0], rx_stats->reception_type[1],
+			 rx_stats->reception_type[2], rx_stats->reception_type[3]);
+
+	len += scnprintf(buf + len, size - len, "TID(0-15) Legacy TID(16):");
+	for (i = 0; i <= IEEE80211_NUM_TIDS; i++)
+		len += scnprintf(buf + len, size - len, "%llu ", rx_stats->tid_count[i]);
+
+	len += scnprintf(buf + len, size - len, "\nRX Duration:%llu\n",
+			 rx_stats->rx_duration);
+
+	len += scnprintf(buf + len, size - len,
+			 "\nDCM: %llu\nRU26:  %llu\nRU52:  %llu\nRU106: %llu\nRU242: %llu\nRU484: %llu\nRU996: %llu\nRU996x2: %llu\n",
+			 rx_stats->dcm_count, rx_stats->ru_alloc_cnt[0],
+			 rx_stats->ru_alloc_cnt[1], rx_stats->ru_alloc_cnt[2],
+			 rx_stats->ru_alloc_cnt[3], rx_stats->ru_alloc_cnt[4],
+			 rx_stats->ru_alloc_cnt[5], rx_stats->ru_alloc_cnt[6]);
+
+	len += scnprintf(buf + len, size - len, "\nRX success packet stats:\n");
+	len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+					      &rx_stats->pkt_stats);
+
+	len += scnprintf(buf + len, size - len, "\n");
+
+	len += scnprintf(buf + len, size - len, "\nRX success byte stats:\n");
+	len += ath12k_dbg_sta_dump_rate_stats(buf, len, size, he_rates_avail,
+					      &rx_stats->byte_stats);
+
+unlock:
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	if (len)
+		ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+out:
+	wiphy_unlock(ah->hw->wiphy);
+	return ret;
+}
+
+static const struct file_operations fops_rx_stats = {
+	.read = ath12k_dbg_sta_dump_rx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+static ssize_t ath12k_dbg_sta_reset_rx_stats(struct file *file,
+					     const char __user *buf,
+					     size_t count, loff_t *ppos)
+{
+	struct ieee80211_link_sta *link_sta = file->private_data;
+	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(link_sta->sta);
+	struct ath12k_hw *ah = ahsta->ahvif->ah;
+	struct ath12k_rx_peer_stats *rx_stats;
+	struct ath12k_link_sta *arsta;
+	u8 link_id = link_sta->link_id;
+	struct ath12k *ar;
+	bool reset;
+	int ret;
+
+	ret = kstrtobool_from_user(buf, count, &reset);
+	if (ret)
+		return ret;
+
+	if (!reset)
+		return -EINVAL;
+
+	wiphy_lock(ah->hw->wiphy);
+
+	if (!(BIT(link_id) & ahsta->links_map)) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	arsta = wiphy_dereference(ah->hw->wiphy, ahsta->link[link_id]);
+	if (!arsta || !arsta->arvif->ar) {
+		ret = -ENOENT;
+		goto out;
+	}
+
+	ar = arsta->arvif->ar;
+
+	spin_lock_bh(&ar->ab->base_lock);
+
+	rx_stats = arsta->rx_stats;
+	if (!rx_stats) {
+		spin_unlock_bh(&ar->ab->base_lock);
+		ret = -ENOENT;
+		goto out;
+	}
+
+	memset(rx_stats, 0, sizeof(*rx_stats));
+	spin_unlock_bh(&ar->ab->base_lock);
+
+	ret = count;
+out:
+	wiphy_unlock(ah->hw->wiphy);
+	return ret;
+}
+
+static const struct file_operations fops_reset_rx_stats = {
+	.write = ath12k_dbg_sta_reset_rx_stats,
+	.open = simple_open,
+	.owner = THIS_MODULE,
+	.llseek = default_llseek,
+};
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_link_sta *link_sta,
+				    struct dentry *dir)
+{
+	struct ath12k *ar;
+
+	lockdep_assert_wiphy(hw->wiphy);
+
+	ar = ath12k_get_ar_by_vif(hw, vif, link_sta->link_id);
+	if (!ar)
+		return;
+
+	if (ath12k_debugfs_is_extd_rx_stats_enabled(ar)) {
+		debugfs_create_file("rx_stats", 0400, dir, link_sta,
+				    &fops_rx_stats);
+		debugfs_create_file("reset_rx_stats", 0200, dir, link_sta,
+				    &fops_reset_rx_stats);
+	}
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/debugfs_sta.h	2025-07-01 14:10:42.724046268 +0200
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2024-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_DEBUGFS_STA_H_
+#define _ATH12K_DEBUGFS_STA_H_
+
+#include <net/mac80211.h>
+
+#include "core.h"
+
+#define ATH12K_STA_RX_STATS_BUF_SIZE		(1024 * 16)
+
+#ifdef CONFIG_ATH12K_DEBUGFS
+
+void ath12k_debugfs_link_sta_op_add(struct ieee80211_hw *hw,
+				    struct ieee80211_vif *vif,
+				    struct ieee80211_link_sta *link_sta,
+				    struct dentry *dir);
+
+#endif /* CONFIG_ATH12K_DEBUGFS */
+
+#endif /* _ATH12K_DEBUGFS_STA_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/testmode.c	2025-09-25 17:40:34.163360264 +0200
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "testmode.h"
+#include <net/netlink.h>
+#include "debug.h"
+#include "wmi.h"
+#include "hw.h"
+#include "core.h"
+#include "hif.h"
+#include "testmode_i.h"
+
+#define ATH12K_FTM_SEGHDR_CURRENT_SEQ		GENMASK(3, 0)
+#define ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS	GENMASK(7, 4)
+
+static const struct nla_policy ath12k_tm_policy[ATH_TM_ATTR_MAX + 1] = {
+	[ATH_TM_ATTR_CMD]		= { .type = NLA_U32 },
+	[ATH_TM_ATTR_DATA]		= { .type = NLA_BINARY,
+					    .len = ATH_TM_DATA_MAX_LEN },
+	[ATH_TM_ATTR_WMI_CMDID]		= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MAJOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_VERSION_MINOR]	= { .type = NLA_U32 },
+	[ATH_TM_ATTR_FWLOG]		= { .type = NLA_BINARY,
+					    .len = 2048 },
+	[ATH_TM_ATTR_DUAL_MAC]		= { .type = NLA_U8 },
+};
+
+void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len)
+{
+	struct sk_buff *nl_skb;
+	int ret, i;
+	struct ath12k *ar = NULL;
+	struct ath12k_pdev *pdev;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		if (pdev && pdev->ar)  {
+			ar = pdev->ar;
+			break;
+		}
+	}
+
+	if (!ar)
+		return;
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy, len,
+						   GFP_ATOMIC);
+
+	if (!nl_skb) {
+		ath12k_warn(ab,  "failed to allocate skb for fwlog event\n");
+		return;
+	}
+
+	ret = nla_put(nl_skb, ATH_TM_ATTR_FWLOG, len, data);
+	if (ret) {
+		ath12k_warn(ab, "failed to put fwlog wmi event to nl: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	if (ab->num_radios == 2)
+		ret = nla_put_u8(nl_skb, ATH_TM_ATTR_DUAL_MAC, ab->num_radios);
+
+	if (ret) {
+		ath12k_warn(ab, "failed to put dual mac wmi event to nl: %d\n",
+			    ret);
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static struct ath12k *ath12k_tm_get_ar(struct ath12k_base *ab)
+{
+	struct ath12k_pdev *pdev;
+	struct ath12k *ar;
+	int i;
+
+	for (i = 0; i < ab->num_radios; i++) {
+		pdev = &ab->pdevs[i];
+		ar = pdev->ar;
+
+		if (ar && ar->ah->state == ATH12K_HW_STATE_TM)
+			return ar;
+	}
+
+	return NULL;
+}
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+				     struct sk_buff *skb)
+{
+	struct sk_buff *nl_skb;
+	struct ath12k *ar;
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d skb length %d\n",
+		   cmd_id, skb->len);
+
+	ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
+
+	ar = ath12k_tm_get_ar(ab);
+	if (!ar) {
+		ath12k_warn(ab, "testmode event not handled due to invalid pdev\n");
+		return;
+	}
+
+	spin_lock_bh(&ar->data_lock);
+
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(skb->len),
+						   GFP_ATOMIC);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!nl_skb) {
+		ath12k_warn(ab,
+			    "failed to allocate skb for unsegmented testmode wmi event\n");
+		return;
+	}
+
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD, ATH_TM_CMD_WMI) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, skb->len, skb->data)) {
+		ath12k_warn(ab, "failed to populate testmode unsegmented event\n");
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+			     const struct ath12k_wmi_ftm_event *ftm_msg,
+			     u16 length)
+{
+	struct sk_buff *nl_skb;
+	struct ath12k *ar;
+	u32 data_pos, pdev_id;
+	u16 datalen;
+	u8 total_segments, current_seq;
+	u8 const *buf_pos;
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
+		   cmd_id, ftm_msg, length);
+	ath12k_dbg_dump(ab, ATH12K_DBG_TESTMODE, NULL, "", ftm_msg, length);
+	pdev_id = DP_HW2SW_MACID(le32_to_cpu(ftm_msg->seg_hdr.pdev_id));
+
+	if (pdev_id >= ab->num_radios) {
+		ath12k_warn(ab, "testmode event not handled due to invalid pdev id\n");
+		return;
+	}
+
+	ar = ab->pdevs[pdev_id].ar;
+
+	if (!ar) {
+		ath12k_warn(ab, "testmode event not handled due to absence of pdev\n");
+		return;
+	}
+
+	current_seq = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+				    ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+	total_segments = le32_get_bits(ftm_msg->seg_hdr.segmentinfo,
+				       ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS);
+	datalen = length - (sizeof(struct ath12k_wmi_ftm_seg_hdr_params));
+	buf_pos = ftm_msg->data;
+
+	if (current_seq == 0) {
+		ab->ftm_event_obj.expected_seq = 0;
+		ab->ftm_event_obj.data_pos = 0;
+	}
+
+	data_pos = ab->ftm_event_obj.data_pos;
+
+	if ((data_pos + datalen) > ATH_FTM_EVENT_MAX_BUF_LENGTH) {
+		ath12k_warn(ab,
+			    "Invalid event length date_pos[%d] datalen[%d]\n",
+			    data_pos, datalen);
+		return;
+	}
+
+	memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
+	data_pos += datalen;
+
+	if (++ab->ftm_event_obj.expected_seq != total_segments) {
+		ab->ftm_event_obj.data_pos = data_pos;
+		ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+			   "partial data received current_seq[%d], total_seg[%d]\n",
+			    current_seq, total_segments);
+		return;
+	}
+
+	ath12k_dbg(ab, ATH12K_DBG_TESTMODE,
+		   "total data length[%d] = [%d]\n",
+		   data_pos, ftm_msg->seg_hdr.len);
+
+	spin_lock_bh(&ar->data_lock);
+	nl_skb = cfg80211_testmode_alloc_event_skb(ar->ah->hw->wiphy,
+						   2 * nla_total_size(sizeof(u32)) +
+						   nla_total_size(data_pos),
+						   GFP_ATOMIC);
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!nl_skb) {
+		ath12k_warn(ab,
+			    "failed to allocate skb for testmode wmi event\n");
+		return;
+	}
+
+	if (nla_put_u32(nl_skb, ATH_TM_ATTR_CMD,
+			ATH_TM_CMD_WMI_FTM) ||
+	    nla_put_u32(nl_skb, ATH_TM_ATTR_WMI_CMDID, cmd_id) ||
+	    nla_put(nl_skb, ATH_TM_ATTR_DATA, data_pos,
+		    &ab->ftm_event_obj.eventdata[0])) {
+		ath12k_warn(ab, "failed to populate testmode event");
+		kfree_skb(nl_skb);
+		return;
+	}
+
+	cfg80211_testmode_event(nl_skb, GFP_ATOMIC);
+}
+
+static int ath12k_tm_cmd_get_version(struct ath12k *ar, struct nlattr *tb[])
+{
+	struct sk_buff *skb;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		   "testmode cmd get version_major %d version_minor %d\n",
+		   ATH_TESTMODE_VERSION_MAJOR,
+		   ATH_TESTMODE_VERSION_MINOR);
+
+	spin_lock_bh(&ar->data_lock);
+	skb = cfg80211_testmode_alloc_reply_skb(ar->ah->hw->wiphy,
+						2 * nla_total_size(sizeof(u32)));
+	spin_unlock_bh(&ar->data_lock);
+
+	if (!skb)
+		return -ENOMEM;
+
+	if (nla_put_u32(skb, ATH_TM_ATTR_VERSION_MAJOR,
+			ATH_TESTMODE_VERSION_MAJOR) ||
+	    nla_put_u32(skb, ATH_TM_ATTR_VERSION_MINOR,
+			ATH_TESTMODE_VERSION_MINOR)) {
+		kfree_skb(skb);
+		return -ENOBUFS;
+	}
+
+	return cfg80211_testmode_reply(skb);
+}
+
+static int ath12k_tm_cmd_process_ftm(struct ath12k *ar, struct nlattr *tb[])
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	struct ath12k_wmi_ftm_cmd *ftm_cmd;
+	int ret = 0;
+	void *buf;
+	size_t aligned_len;
+	u32 cmd_id, buf_len;
+	u16 chunk_len, total_bytes, num_segments;
+	u8 segnumber = 0, *bufpos;
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE, "ah->state  %d\n", ar->ah->state);
+	if (ar->ah->state != ATH12K_HW_STATE_TM)
+		return -ENETDOWN;
+
+	if (!tb[ATH_TM_ATTR_DATA])
+		return -EINVAL;
+
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+	cmd_id = WMI_PDEV_UTF_CMDID;
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		   "testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
+		   cmd_id, buf, buf_len);
+	ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+	bufpos = buf;
+	total_bytes = buf_len;
+	num_segments = total_bytes / MAX_WMI_UTF_LEN;
+
+	if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
+		num_segments++;
+
+	while (buf_len) {
+		if (buf_len > MAX_WMI_UTF_LEN)
+			chunk_len = MAX_WMI_UTF_LEN;    /* MAX message */
+		else
+			chunk_len = buf_len;
+
+		skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
+					sizeof(struct ath12k_wmi_ftm_cmd)));
+
+		if (!skb)
+			return -ENOMEM;
+
+		ftm_cmd = (struct ath12k_wmi_ftm_cmd *)skb->data;
+		aligned_len  = chunk_len + sizeof(struct ath12k_wmi_ftm_seg_hdr_params);
+		ftm_cmd->tlv_header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_BYTE, aligned_len);
+		ftm_cmd->seg_hdr.len = cpu_to_le32(total_bytes);
+		ftm_cmd->seg_hdr.msgref = cpu_to_le32(ar->ftm_msgref);
+		ftm_cmd->seg_hdr.segmentinfo =
+			le32_encode_bits(num_segments,
+					 ATH12K_FTM_SEGHDR_TOTAL_SEGMENTS) |
+			le32_encode_bits(segnumber,
+					 ATH12K_FTM_SEGHDR_CURRENT_SEQ);
+		ftm_cmd->seg_hdr.pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+		segnumber++;
+		memcpy(&ftm_cmd->data, bufpos, chunk_len);
+		ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+
+		if (ret) {
+			ath12k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
+			kfree_skb(skb);
+			return ret;
+		}
+
+		buf_len -= chunk_len;
+		bufpos += chunk_len;
+	}
+
+	++ar->ftm_msgref;
+	return ret;
+}
+
+static int ath12k_tm_cmd_testmode_start(struct ath12k *ar, struct nlattr *tb[])
+{
+	if (ar->ah->state == ATH12K_HW_STATE_TM)
+		return -EALREADY;
+
+	if (ar->ah->state != ATH12K_HW_STATE_OFF)
+		return -EBUSY;
+
+	ar->ab->ftm_event_obj.eventdata = kzalloc(ATH_FTM_EVENT_MAX_BUF_LENGTH,
+						  GFP_KERNEL);
+
+	if (!ar->ab->ftm_event_obj.eventdata)
+		return -ENOMEM;
+
+	ar->ah->state = ATH12K_HW_STATE_TM;
+	ar->ftm_msgref = 0;
+	return 0;
+}
+
+static int ath12k_tm_cmd_wmi(struct ath12k *ar, struct nlattr *tb[])
+{
+	struct ath12k_wmi_pdev *wmi = ar->wmi;
+	struct sk_buff *skb;
+	struct wmi_pdev_set_param_cmd *cmd;
+	int ret = 0, tag;
+	void *buf;
+	u32 cmd_id, buf_len;
+
+	if (!tb[ATH_TM_ATTR_DATA])
+		return -EINVAL;
+
+	if (!tb[ATH_TM_ATTR_WMI_CMDID])
+		return -EINVAL;
+
+	buf = nla_data(tb[ATH_TM_ATTR_DATA]);
+	buf_len = nla_len(tb[ATH_TM_ATTR_DATA]);
+
+	if (!buf_len) {
+		ath12k_warn(ar->ab, "No data present in testmode command\n");
+		return -EINVAL;
+	}
+
+	cmd_id = nla_get_u32(tb[ATH_TM_ATTR_WMI_CMDID]);
+
+	cmd = buf;
+	tag = le32_get_bits(cmd->tlv_header, WMI_TLV_TAG);
+
+	if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
+		cmd->pdev_id = cpu_to_le32(ar->pdev->pdev_id);
+
+	ath12k_dbg(ar->ab, ATH12K_DBG_TESTMODE,
+		   "testmode cmd wmi cmd_id %d  buf length %d\n",
+		   cmd_id, buf_len);
+
+	ath12k_dbg_dump(ar->ab, ATH12K_DBG_TESTMODE, NULL, "", buf, buf_len);
+
+	skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
+
+	if (!skb)
+		return -ENOMEM;
+
+	memcpy(skb->data, buf, buf_len);
+
+	ret = ath12k_wmi_cmd_send(wmi, skb, cmd_id);
+	if (ret) {
+		dev_kfree_skb(skb);
+		ath12k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
+			    ret);
+	}
+
+	return ret;
+}
+
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  void *data, int len)
+{
+	struct ath12k_hw *ah = hw->priv;
+	struct ath12k *ar = NULL;
+	struct nlattr *tb[ATH_TM_ATTR_MAX + 1];
+	struct ath12k_base *ab;
+	struct wiphy *wiphy = hw->wiphy;
+	int ret;
+
+	lockdep_assert_held(&wiphy->mtx);
+
+	ret = nla_parse(tb, ATH_TM_ATTR_MAX, data, len, ath12k_tm_policy,
+			NULL);
+	if (ret)
+		return ret;
+
+	if (!tb[ATH_TM_ATTR_CMD])
+		return -EINVAL;
+
+	/* TODO: have to handle ar for MLO case */
+	if (ah->num_radio)
+		ar = ah->radio;
+
+	if (!ar)
+		return -EINVAL;
+
+	ab = ar->ab;
+	switch (nla_get_u32(tb[ATH_TM_ATTR_CMD])) {
+	case ATH_TM_CMD_WMI:
+		return ath12k_tm_cmd_wmi(ar, tb);
+	case ATH_TM_CMD_TESTMODE_START:
+		return ath12k_tm_cmd_testmode_start(ar, tb);
+	case ATH_TM_CMD_GET_VERSION:
+		return ath12k_tm_cmd_get_version(ar, tb);
+	case ATH_TM_CMD_WMI_FTM:
+		set_bit(ATH12K_FLAG_FTM_SEGMENTED, &ab->dev_flags);
+		return ath12k_tm_cmd_process_ftm(ar, tb);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/testmode.h	2025-09-25 17:40:34.163360264 +0200
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include "core.h"
+#include "hif.h"
+
+#ifdef CONFIG_NL80211_TESTMODE
+
+void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+				     struct sk_buff *skb);
+void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+			     const struct ath12k_wmi_ftm_event *ftm_msg,
+			     u16 length);
+int ath12k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+		  void *data, int len);
+void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len);
+#else
+
+static inline void ath12k_tm_wmi_event_unsegmented(struct ath12k_base *ab, u32 cmd_id,
+						   struct sk_buff *skb)
+{
+}
+
+static inline void ath12k_tm_process_event(struct ath12k_base *ab, u32 cmd_id,
+					   const struct ath12k_wmi_ftm_event *msg,
+					   u16 length)
+{
+}
+
+static inline int ath12k_tm_cmd(struct ieee80211_hw *hw,
+				struct ieee80211_vif *vif,
+				void *data, int len)
+{
+	return 0;
+}
+
+static inline void ath12k_fwlog_write(struct ath12k_base *ab, u8 *data, int len)
+{
+
+}
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/testmode_i.h	2025-09-25 17:40:34.163360264 +0200
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+/* "API" level of the ath testmode interface. Bump it after every
+ * incompatible interface change.
+ */
+#define ATH_TESTMODE_VERSION_MAJOR 1
+
+/* Bump this after every _compatible_ interface change, for example
+ * addition of a new command or an attribute.
+ */
+#define ATH_TESTMODE_VERSION_MINOR 0
+
+#define ATH_TM_DATA_MAX_LEN		5000
+#define ATH_FTM_EVENT_MAX_BUF_LENGTH 	2048
+
+enum ath_tm_attr {
+	__ATH_TM_ATTR_INVALID		= 0,
+	ATH_TM_ATTR_CMD			= 1,
+	ATH_TM_ATTR_DATA		= 2,
+	ATH_TM_ATTR_WMI_CMDID		= 3,
+	ATH_TM_ATTR_VERSION_MAJOR	= 4,
+	ATH_TM_ATTR_VERSION_MINOR	= 5,
+	ATH_TM_ATTR_WMI_OP_VERSION	= 6,
+	ATH_TM_ATTR_FWLOG		= 7,
+	ATH_TM_ATTR_DUAL_MAC		= 9,
+
+	/* keep last */
+	__ATH_TM_ATTR_AFTER_LAST,
+	ATH_TM_ATTR_MAX			= __ATH_TM_ATTR_AFTER_LAST - 1,
+};
+
+/* All ath testmode interface commands specified in
+ * ATH_TM_ATTR_CMD
+ */
+enum ath_tm_cmd {
+	/* Returns the supported ath testmode interface version in
+	 * ATH_TM_ATTR_VERSION. Always guaranteed to work. User space
+	 * uses this to verify it's using the correct version of the
+	 * testmode interface
+	 */
+	ATH_TM_CMD_GET_VERSION = 0,
+
+	/* Set ar state to test mode. */
+	ATH_TM_CMD_TESTMODE_START = 1,
+
+	/* Set ar state back into OFF state. */
+	ATH_TM_CMD_TESTMODE_STOP = 2,
+
+	/* The command used to transmit a WMI command to the firmware and
+	 * the event to receive WMI events from the firmware. Without
+	 * struct wmi_cmd_hdr header, only the WMI payload. Command id is
+	 * provided with ATH_TM_ATTR_WMI_CMDID and payload in
+	 * ATH_TM_ATTR_DATA.
+	 */
+	ATH_TM_CMD_WMI = 3,
+
+	/* The command used to transmit a FTM WMI command to the firmware
+	 * and the event to receive WMI events from the firmware. The data
+	 * received only contain the payload. Need to add the tlv
+	 * header and send the cmd to fw with commandid WMI_PDEV_UTF_CMDID.
+	 */
+	ATH_TM_CMD_WMI_FTM = 4,
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/thermal.c	2025-09-25 17:40:34.163360264 +0200
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: BSD-3-Clause-Clear
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+#include <linux/thermal.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include "core.h"
+#include "debug.h"
+
+static ssize_t ath12k_thermal_show_temp(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct ath12k *ar = dev_get_drvdata(dev);
+	int ret;
+	unsigned long time_left;
+
+	wiphy_lock(ath12k_ar_to_hw(ar)->wiphy);
+
+	/* Can't get temperature when the card is off, unless in FTM mode */
+	if (!ath12k_ftm_mode && ar->ah->state != ATH12K_HW_STATE_ON) {
+		ret = -ENETDOWN;
+		goto out;
+	}
+
+	reinit_completion(&ar->thermal.wmi_sync);
+	ret = ath12k_wmi_send_pdev_temperature_cmd(ar);
+	if (ret) {
+		ath12k_warn(ar->ab, "failed to read temperature %d\n", ret);
+		goto out;
+	}
+
+	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) {
+		ret = -ESHUTDOWN;
+		goto out;
+	}
+
+	time_left = wait_for_completion_timeout(&ar->thermal.wmi_sync,
+						ATH12K_THERMAL_SYNC_TIMEOUT_HZ);
+	if (!time_left) {
+		ath12k_warn(ar->ab, "failed to synchronize thermal read\n");
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	/* display in millidegree celcius */
+	ret = snprintf(buf, PAGE_SIZE, "%d\n", ar->thermal.temperature * 1000);
+
+out:
+	wiphy_unlock(ath12k_ar_to_hw(ar)->wiphy);
+	return ret;
+}
+
+void ath12k_thermal_event_temperature(struct ath12k *ar, int temperature)
+{
+	ar->thermal.temperature = temperature;
+	complete(&ar->thermal.wmi_sync);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, ath12k_thermal_show_temp,
+			  NULL, 0);
+
+static struct attribute *ath12k_hwmon_attrs[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ath12k_hwmon);
+
+int ath12k_thermal_register(struct ath12k_base *sc)
+{
+	int i;
+
+	for (i = 0; i < sc->num_radios; i++) {
+		struct device *hwmon_dev;
+		struct ath12k *ar;
+		struct ath12k_pdev *pdev;
+
+		pdev = &sc->pdevs[i];
+		ar = pdev->ar;
+		if (!ar)
+			continue;
+
+		hwmon_dev =
+			devm_hwmon_device_register_with_groups(
+					       &ath12k_ar_to_hw(ar)->wiphy->dev,
+					       "ath12k_hwmon", ar,
+					       ath12k_hwmon_groups);
+		if (IS_ERR(hwmon_dev)) {
+			ath12k_err(ar->ab, "failed to register hwmon device: "
+				   "%ld\n", PTR_ERR(hwmon_dev));
+			return PTR_ERR(hwmon_dev);
+		}
+	}
+
+	return 0;
+}
+
+void ath12k_thermal_unregister(struct ath12k_base *sc)
+{
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/net/wireless/ath/ath12k/thermal.h	2025-09-25 17:40:34.163360264 +0200
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: BSD-3-Clause-Clear */
+/*
+ * Copyright (c) 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _ATH12K_THERMAL_
+#define _ATH12K_THERMAL_
+
+#define ATH12K_THERMAL_SYNC_TIMEOUT_HZ (5 * HZ)
+
+struct ath12k_thermal {
+	struct completion wmi_sync;
+	int temperature;
+};
+
+#if IS_REACHABLE(CONFIG_THERMAL)
+
+int ath12k_thermal_register(struct ath12k_base *sc);
+void ath12k_thermal_unregister(struct ath12k_base *sc);
+void ath12k_thermal_event_temperature(struct ath12k *ar, int temperature);
+
+#else
+
+static inline int ath12k_thermal_register(struct ath12k_base *sc)
+{
+	return 0;
+}
+
+static inline void ath12k_thermal_unregister(struct ath12k_base *sc)
+{
+}
+
+static inline void ath12k_thermal_event_temperature(struct ath12k *ar,
+						    int temperature)
+{
+}
+
+#endif
+#endif /* _ATH12K_THERMAL_ */
diff -Nruw linux-6.13.12-fbx/drivers/net/wireless/marvell/mwl8k_new./Makefile linux-6.13.12-fbx/drivers/net/wireless/marvell/mwl8k_new/Makefile
--- linux-6.13.12-fbx/drivers/net/wireless/marvell/mwl8k_new./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/net/wireless/marvell/mwl8k_new/Makefile	2025-09-25 17:40:34.303360958 +0200
@@ -0,0 +1,12 @@
+mwl8k_new-$(CONFIG_DEBUG_FS) += debugfs.o
+mwl8k_new-y += fw.o
+mwl8k_new-y += main.o
+mwl8k_new-y += utils.o
+
+mwl8k_new-y += svc_console.o
+mwl8k_new-y += svc_dma_test.o
+mwl8k_new-y += svc_vtty.o
+
+mwl8k_new-y += wifi_core.o
+
+obj-$(CONFIG_MWL8K_NEW)	+= mwl8k_new.o
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/of/configfs.c	2025-09-25 17:40:34.503361950 +0200
@@ -0,0 +1,287 @@
+/*
+ * Configfs entries for device-tree
+ *
+ * Copyright (C) 2013 - Pantelis Antoniou <panto@antoniou-consulting.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/ctype.h>
+#include <linux/cpu.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/proc_fs.h>
+#include <linux/configfs.h>
+#include <linux/types.h>
+#include <linux/stat.h>
+#include <linux/limits.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+#include <linux/firmware.h>
+#include <linux/sizes.h>
+
+#include "of_private.h"
+
+struct cfs_overlay_item {
+	struct config_item	item;
+
+	char			path[PATH_MAX];
+
+	const struct firmware	*fw;
+	struct device_node	*overlay;
+	int			ov_id;
+
+	void			*dtbo;
+	int			dtbo_size;
+};
+
+static inline struct cfs_overlay_item *to_cfs_overlay_item(
+		struct config_item *item)
+{
+	return item ? container_of(item, struct cfs_overlay_item, item) : NULL;
+}
+
+static ssize_t cfs_overlay_item_path_show(struct config_item *item,
+		char *page)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	return sprintf(page, "%s\n", overlay->path);
+}
+
+static ssize_t cfs_overlay_item_path_store(struct config_item *item,
+		const char *page, size_t count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	const char *p = page;
+	char *s;
+	int err;
+
+	/* if it's set do not allow changes */
+	if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+		return -EPERM;
+
+	/* copy to path buffer (and make sure it's always zero terminated */
+	count = snprintf(overlay->path, sizeof(overlay->path) - 1, "%s", p);
+	overlay->path[sizeof(overlay->path) - 1] = '\0';
+
+	/* strip trailing newlines */
+	s = overlay->path + strlen(overlay->path);
+	while (s > overlay->path && *--s == '\n')
+		*s = '\0';
+
+	pr_debug("%s: path is '%s'\n", __func__, overlay->path);
+
+	err = request_firmware(&overlay->fw, overlay->path, NULL);
+	if (err != 0)
+		goto out_err;
+
+	err = of_overlay_fdt_apply((void *)overlay->fw->data,
+				   overlay->fw->size,
+				   &overlay->ov_id,
+				   NULL);
+	if (err != 0)
+		goto out_err;
+
+	return count;
+
+out_err:
+
+	release_firmware(overlay->fw);
+	overlay->fw = NULL;
+
+	overlay->path[0] = '\0';
+	return err;
+}
+
+static ssize_t cfs_overlay_item_status_show(struct config_item *item,
+		char *page)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	return sprintf(page, "%s\n",
+			overlay->ov_id >= 0 ? "applied" : "unapplied");
+}
+
+CONFIGFS_ATTR(cfs_overlay_item_, path);
+CONFIGFS_ATTR_RO(cfs_overlay_item_, status);
+
+static struct configfs_attribute *cfs_overlay_attrs[] = {
+	&cfs_overlay_item_attr_path,
+	&cfs_overlay_item_attr_status,
+	NULL,
+};
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item,
+				   void *buf, size_t max_count);
+
+ssize_t cfs_overlay_item_dtbo_read(struct config_item *item,
+		void *buf, size_t max_count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	pr_debug("%s: buf=%p max_count=%zu\n", __func__,
+			buf, max_count);
+
+	if (overlay->dtbo == NULL)
+		return 0;
+
+	/* copy if buffer provided */
+	if (buf != NULL) {
+		/* the buffer must be large enough */
+		if (overlay->dtbo_size > max_count)
+			return -ENOSPC;
+
+		memcpy(buf, overlay->dtbo, overlay->dtbo_size);
+	}
+
+	return overlay->dtbo_size;
+}
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item,
+				    const void *buf, size_t count);
+
+ssize_t cfs_overlay_item_dtbo_write(struct config_item *item,
+		const void *buf, size_t count)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+	int err;
+
+	/* if it's set do not allow changes */
+	if (overlay->path[0] != '\0' || overlay->dtbo_size > 0)
+		return -EPERM;
+
+	/* copy the contents */
+	overlay->dtbo = kmemdup(buf, count, GFP_KERNEL);
+	if (overlay->dtbo == NULL)
+		return -ENOMEM;
+
+	overlay->dtbo_size = count;
+
+	err = of_overlay_fdt_apply((void *)overlay->fw->data,
+				   overlay->dtbo_size,
+				   &overlay->ov_id,
+				   NULL);
+	if (err != 0)
+		goto out_err;
+
+	return count;
+
+out_err:
+	kfree(overlay->dtbo);
+	overlay->dtbo = NULL;
+	overlay->dtbo_size = 0;
+
+	return err;
+}
+
+CONFIGFS_BIN_ATTR(cfs_overlay_item_, dtbo, NULL, SZ_1M);
+
+static struct configfs_bin_attribute *cfs_overlay_bin_attrs[] = {
+	&cfs_overlay_item_attr_dtbo,
+	NULL,
+};
+
+static void cfs_overlay_release(struct config_item *item)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	if (overlay->ov_id >= 0)
+		of_overlay_remove(&overlay->ov_id);
+	if (overlay->fw)
+		release_firmware(overlay->fw);
+	/* kfree with NULL is safe */
+	kfree(overlay->dtbo);
+	kfree(overlay);
+}
+
+static struct configfs_item_operations cfs_overlay_item_ops = {
+	.release	= cfs_overlay_release,
+};
+
+static struct config_item_type cfs_overlay_type = {
+	.ct_item_ops	= &cfs_overlay_item_ops,
+	.ct_attrs	= cfs_overlay_attrs,
+	.ct_bin_attrs	= cfs_overlay_bin_attrs,
+	.ct_owner	= THIS_MODULE,
+};
+
+static struct config_item *cfs_overlay_group_make_item(
+		struct config_group *group, const char *name)
+{
+	struct cfs_overlay_item *overlay;
+
+	overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+	if (!overlay)
+		return ERR_PTR(-ENOMEM);
+	overlay->ov_id = -1;
+
+	config_item_init_type_name(&overlay->item, name, &cfs_overlay_type);
+	return &overlay->item;
+}
+
+static void cfs_overlay_group_drop_item(struct config_group *group,
+		struct config_item *item)
+{
+	struct cfs_overlay_item *overlay = to_cfs_overlay_item(item);
+
+	config_item_put(&overlay->item);
+}
+
+static struct configfs_group_operations overlays_ops = {
+	.make_item	= cfs_overlay_group_make_item,
+	.drop_item	= cfs_overlay_group_drop_item,
+};
+
+static struct config_item_type overlays_type = {
+	.ct_group_ops   = &overlays_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+static struct configfs_group_operations of_cfs_ops = {
+	/* empty - we don't allow anything to be created */
+};
+
+static struct config_item_type of_cfs_type = {
+	.ct_group_ops   = &of_cfs_ops,
+	.ct_owner       = THIS_MODULE,
+};
+
+struct config_group of_cfs_overlay_group;
+
+static struct configfs_subsystem of_cfs_subsys = {
+	.su_group = {
+		.cg_item = {
+			.ci_namebuf = "device-tree",
+			.ci_type = &of_cfs_type,
+		},
+	},
+	.su_mutex = __MUTEX_INITIALIZER(of_cfs_subsys.su_mutex),
+};
+
+static int __init of_cfs_init(void)
+{
+	int ret;
+
+	pr_info("%s\n", __func__);
+
+	config_group_init(&of_cfs_subsys.su_group);
+	config_group_init_type_name(&of_cfs_overlay_group, "overlays",
+			&overlays_type);
+	configfs_add_default_group(&of_cfs_overlay_group,
+			&of_cfs_subsys.su_group);
+
+	ret = configfs_register_subsystem(&of_cfs_subsys);
+	if (ret != 0) {
+		pr_err("%s: failed to register subsys\n", __func__);
+		goto out;
+	}
+	pr_info("%s: OK\n", __func__);
+out:
+	return ret;
+}
+late_initcall(of_cfs_init);
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/pcie-bcm63xx.c	2025-09-25 17:40:34.527362069 +0200
@@ -0,0 +1,1433 @@
+/*
+ * pcie-bcm63xx.c for pcie-bcm63x
+ * Created by <nschichan@freebox.fr> on Wed Jun  5 14:08:58 2019
+ */
+
+/*
+ * inspired by pcie-bcm63xx.c from broadcom refsw release 5.02L.07-test9
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/ubus4.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/bcm63xx-procmon.h>
+#include <linux/msi.h>
+
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/brcm,bcm63xx-pcie.h>
+
+#include "pcie-bcm63xx.h"
+#include "../pci.h"
+
+struct port_config {
+	u32 speed;
+	u32 num_lanes;
+	u32 ssc;
+	u64 dram_size;
+};
+
+#define MSI_MAP_SIZE	32
+
+/*
+ * offsets of the root complex PCI configuration space registers we
+ * want to save when asserting reset and reiniting link via sysfs.
+ *
+ * probably not exhaustive, but should be enough to preserve the
+ * content bit for bit of the configuration space between resets for
+ * now.
+ */
+static const u32 rc_config_space_backup_offsets[] = {
+	0x004,
+	0x03c,
+	0x0b4,
+	0x0c8,
+	0x12c,
+};
+
+struct bcm63xx_pcie {
+	struct device *dev;
+	struct pci_host_bridge *host;
+
+	unsigned long regs_phys;
+	void __iomem *regs;
+
+	bool ep_reset_asserted;
+
+	int irq;
+	struct resource io_res;
+	resource_size_t io_offset;
+	struct resource mem_res;
+	resource_size_t mem_offset;
+	struct resource bus_range;
+
+	u32 core_speed;
+	struct port_config pcfg;
+	u32 misc_revision;
+	int rcal_1um_vert_value;
+
+	int msi_irq;
+	struct mutex msi_lock;
+	struct irq_domain *msi_domain;
+	DECLARE_BITMAP(msi_used, MSI_MAP_SIZE);
+
+	u32 rc_config_space_backup[ARRAY_SIZE(rc_config_space_backup_offsets)];
+};
+
+#define to_bcm63xx_pcie(chip)	container_of(chip, struct bcm63xx_pcie, chip)
+
+/*
+ * make it so that resource_size() return 0.
+ */
+static inline void make_invalid_resource(struct resource *r)
+{
+	r->start = 0;
+	r->end = -1;
+}
+
+static inline void bcm_pcie_writel(u32 val, struct bcm63xx_pcie *pcie,
+				  u32 offset)
+{
+	dev_dbg(pcie->dev, "bcm_pcie_writel: value %08x offset %08x\n",
+		val, offset);
+	writel(val, pcie->regs + offset);
+}
+
+static inline u32 bcm_pcie_readl(struct bcm63xx_pcie *pcie, u32 offset)
+{
+	u32 val = readl(pcie->regs + offset);
+
+	dev_dbg(pcie->dev, "bcm_pcie_readl: value %08x offset %08x\n",
+		val, offset);
+	return val;
+}
+
+/*
+ * PCIe phy indirect read/write helpers.
+ */
+#define PCIE_PHY_TRIES 100
+
+static int bcm_pcie_phy_read(struct bcm63xx_pcie *pcie, u32 phyaddr, u32 regaddr,
+			     u32 *out_data)
+{
+
+	u32 reg;
+	int tries = PCIE_PHY_TRIES;
+
+	reg = PHY_ADDR_READ |
+		PHY_ADDR_PHY(phyaddr) |
+		PHY_ADDR_REG(regaddr);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_PHY_ADDR);
+	udelay(1000);
+
+	do {
+		u32 v = bcm_pcie_readl(pcie, BCM63XX_PCIE_PHY_RD_DATA);
+
+		if (v & PHY_RD_DATA_DONE) {
+			*out_data = v & PHY_RD_DATA_MASK;
+			return 0;
+		}
+		udelay(10);
+		--tries;
+	} while (tries < PCIE_PHY_TRIES);
+
+	dev_err(pcie->dev, "bcm_pcie_phy_read: timedout reading from "
+		"phy %x reg %x.\n",
+		phyaddr, regaddr);
+	return -ETIMEDOUT;
+}
+
+static int bcm_pcie_phy_write(struct bcm63xx_pcie *pcie, u32 phyaddr,
+			      u32 regaddr, u32 data)
+{
+	u32 reg;
+	int tries = PCIE_PHY_TRIES;
+
+	reg = PHY_ADDR_WRITE |
+		PHY_ADDR_PHY(phyaddr) |
+		PHY_ADDR_REG(regaddr);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_PHY_ADDR);
+	udelay(1000);
+
+	reg = (data & PHY_WR_DATA_MASK) | PHY_WR_DATA_TRIG;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_PHY_WR_DATA);
+
+	do {
+		reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_PHY_WR_DATA);
+		if (reg & PHY_WR_DATA_TRIG)
+			return 0;
+		--tries;
+	} while (tries < PCIE_PHY_TRIES);
+
+	dev_err(pcie->dev, "bcm_pcie_phy_write: timedout writing from "
+		"phy %x reg %x.\n",
+		phyaddr, regaddr);
+	return -ETIMEDOUT;
+}
+
+/*
+ * UBUS configuration
+ */
+static int bcm63xx_pcie_ubus_config(struct ubus4_master *m)
+{
+	ubus_master_apply_credits(m);
+	ubus_master_set_congestion_threshold(m, 0);
+	ubus_master_remap_port(m);
+	return 0;
+}
+
+/*
+ * fetch various resources from device tree.
+ */
+static int bcm63xx_pcie_parse_ranges(struct bcm63xx_pcie *pcie)
+{
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	int error;
+
+	error = of_pci_range_parser_init(&parser, pcie->dev->of_node);
+	if (error) {
+		dev_err(pcie->dev, "of_pci_range_parser_init failed: %d\n",
+			error);
+		return error;
+	}
+
+	for_each_of_pci_range(&parser, &range){
+		struct resource res;
+
+		error = of_pci_range_to_resource(&range, pcie->dev->of_node,
+					       &res);
+		if (error)
+			return error;
+
+		switch (range.flags & IORESOURCE_TYPE_BITS) {
+		case IORESOURCE_MEM:
+			pcie->mem_res = res;
+			pcie->mem_offset = res.start - range.pci_addr;
+			break;
+		case IORESOURCE_IO:
+			pcie->io_offset = res.start - range.pci_addr;
+			pcie->io_res = res;
+			break;
+		}
+	}
+
+	error = of_pci_parse_bus_range(pcie->dev->of_node, &pcie->bus_range);
+	if (error) {
+		dev_warn(pcie->dev, "unable to parse bus range. using "
+			 "0-255.\n");
+		pcie->bus_range.flags = IORESOURCE_BUS;
+		pcie->bus_range.start = 0x0;
+		pcie->bus_range.end = 0xff;
+	}
+
+	dev_dbg(pcie->dev, "IO resource: %pR\n", &pcie->io_res);
+	dev_dbg(pcie->dev, "MEM resource: %pR\n", &pcie->mem_res);
+	dev_dbg(pcie->dev, "BUS resource: %pR\n", &pcie->bus_range);
+
+	return 0;
+}
+
+static int get_soc_dram_size(struct bcm63xx_pcie *pcie, u64 *out_dram_size)
+{
+	struct of_phandle_args args;
+	int err;
+	u64 reg[2];
+
+	err = of_parse_phandle_with_args(pcie->dev->of_node, "brcm,dram",
+					 0, 0, &args);
+	if (err) {
+		dev_err(pcie->dev, "unable to parse brcm,dram phandle.\n");
+		return err;
+	}
+
+	err = of_property_read_u64_array(args.np, "reg", reg, 2);
+	if (err) {
+		dev_err(pcie->dev, "unable to read reg from dram node: %d.\n",
+			err);
+		return err;
+	}
+
+	*out_dram_size = reg[1];
+	return 0;
+}
+
+/*
+ * SoC specific DT configuration. speed can be overridden here, or
+ * left at 0 (use SoC default supported speed).
+ *
+ * SSC might be of interest for the hardware folks and CE
+ * certification.
+ *
+ * omitted from here:
+ * - phy low power mode select.
+ * - keep PCIe core powered on on link down during init.
+ * - error logging activation.
+ */
+static int bcm63xx_pcie_parse_dt_port_config(struct bcm63xx_pcie *pcie)
+{
+	int error;
+
+	error = of_property_read_u32(pcie->dev->of_node, "brcm,speed",
+				     &pcie->pcfg.speed);
+	if (error)
+		pcie->pcfg.speed = PCIE_SPEED_DEFAULT;
+
+	error = of_property_read_u32(pcie->dev->of_node, "brcm,num-lanes",
+				     &pcie->pcfg.num_lanes);
+	if (error)
+		pcie->pcfg.num_lanes = 1;
+
+	error = of_property_read_u32(pcie->dev->of_node, "brcm,ssc",
+				     &pcie->pcfg.ssc);
+	if (error)
+		pcie->pcfg.ssc = 0;
+
+	error = get_soc_dram_size(pcie, &pcie->pcfg.dram_size);
+	if (error)
+		/*
+		 * this is mandatory.
+		 */
+		return error;
+
+	dev_dbg(pcie->dev, "speed: %d, num-lanes: %d, ssc: %d.\n",
+		pcie->pcfg.speed, pcie->pcfg.num_lanes, pcie->pcfg.ssc);
+	return 0;
+}
+
+/*
+ * read vendor/device id, get chip revision from PCIe misc registers.
+ */
+static int bcm63xx_pcie_init(struct bcm63xx_pcie *pcie)
+{
+	u32 reg;
+	u32 link_width;
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_VENDOR_ID);
+	if (reg == 0xdeaddead) {
+		dev_err(pcie->dev, "dead read from PCIe register, is the "
+			"PCIe cable bad?\n");
+		return -ENXIO;
+	}
+
+	dev_dbg(pcie->dev, "vendor %04x, device %04x\n",
+		VENDOR_ID_VENDOR(reg), VENDOR_ID_DEVICE(reg));
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_LINK_CAP);
+	link_width = LINK_CAP_WIDTH(reg);
+	pcie->core_speed = LINK_CAP_SPEED(reg);
+
+	dev_dbg(pcie->dev, "link speed: 0x%x, width 0x%x\n", pcie->core_speed,
+		link_width);
+
+	if (pcie->pcfg.num_lanes > link_width) {
+		dev_info(pcie->dev, "limiting num-lanes to %d\n", link_width);
+		pcie->pcfg.num_lanes = link_width;
+	}
+
+	if (pcie->pcfg.speed > pcie->core_speed) {
+		dev_info(pcie->dev, "limiting speed to gen%d\n",
+			 pcie->core_speed);
+		pcie->pcfg.speed = pcie->core_speed;
+	}
+
+	pcie->misc_revision = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_REVISION);
+	dev_dbg(pcie->dev, "misc revision: 0x%04x\n", pcie->misc_revision);
+
+	if (pcie->misc_revision < 0x320) {
+		/*
+		 * chip revisions below 0x320 need access to the MISC
+		 * register block to reset the PCIe core, which we do
+		 * not need to support on the bcm63158.
+		 *
+		 * revisit later if needed.
+		 */
+		dev_err(pcie->dev, "chip revision 0x%04x unsupported. "
+			"can't reset PCIe chip.\n", pcie->misc_revision);
+		return -ENOTSUPP;
+	}
+	return 0;
+}
+
+static int bcm63xx_pcie_phy_configure_gen2(struct bcm63xx_pcie *pcie)
+{
+	dev_info(pcie->dev, "configuring for GEN2 speed.\n");
+
+	/*
+	 * nothing do do for bcm63158, work arounds for chip revision
+	 * below 0x303 (everybody needs a 0x303) are fixed on chip
+	 * revision 0x320 (available on bcm63158).
+	 *
+	 * we only need to configure SSC if required.
+	 */
+	if (!pcie->pcfg.ssc)
+		return 0;
+
+	if (pcie->misc_revision < 0x303) {
+		/*
+		 * SSC Parameters
+		 * Workaround (for early gen2 cards):
+		 * Block 0x1100, Register 0xA = 0xea3c
+		 * Block 0x1100, Register 0xB = 0x04e7
+		 * Block 0x1100, Register 0xC = 0x0039
+		 *-Block 0x1100 fixed in 63148A0, 63381B0, 63138B0
+		 * but ok to write anyway
+		 */
+		bcm_pcie_phy_write(pcie, 0, 0x1f, 0x1100);
+		bcm_pcie_phy_write(pcie, 0, 0x0a, 0xea3c);
+		bcm_pcie_phy_write(pcie, 0, 0x0b, 0x04e7);
+		bcm_pcie_phy_write(pcie, 0, 0x0c, 0x0039);
+	}
+
+	/*
+	 * SSC Parameters
+	 * Block 0x2200, Register 5 = 0x5044
+	 *   // VCO parameters for fractional mode, -175ppm
+	 * Block 0x2200, Register 6 = 0xfef1
+	 *   // VCO parameters for fractional mode, -175ppm
+	 * Block 0x2200, Register 7 = 0xe818
+	 *   // VCO parameters for fractional mode, -175ppm
+	 *
+	 * Notes:
+	 * - Only need to apply those fixes when enabling Spread
+	 *   Spectrum Clocking (SSC), which would likely be a flash
+	 *   option
+	 */
+	bcm_pcie_phy_write(pcie, 0, 0x1f, 0x2200);
+	bcm_pcie_phy_write(pcie, 0, 0x05, 0x5044);
+	bcm_pcie_phy_write(pcie, 0, 0x06, 0xfef1);
+	bcm_pcie_phy_write(pcie, 0, 0x07, 0xe818);
+
+	return 0;
+}
+
+static int bcm63xx_pcie_phy_configure_gen3(struct bcm63xx_pcie *pcie)
+{
+	dev_info(pcie->dev, "configuring for GEN3 speed.\n");
+
+	/*
+	 * nothing do do for bcm63158, work arounds for chip revision
+	 * below 0x320 are fixed on chip revision 0x322 (available on
+	 * bcm63158). chip revision 0x320 seems to be found only on
+	 * bcm63158 A0 (which we do not use).
+	 *
+	 * we only need to enable SSC if required.
+	 */
+
+	if (!pcie->pcfg.ssc)
+		return 0;
+
+	/*
+	 * no GEN3 pcie cores on 63158, revisit later if needed.
+	 */
+	return -ENOTSUPP;
+}
+
+/*
+ * resistor calibration configuration: this is fetched from the
+ * bcm(63158) procmon driver, and programmed to the PCIe hardware
+ * here.
+ */
+static int bcm63xx_pcie_gen12_rescal_set(struct bcm63xx_pcie *pcie)
+{
+	u32 data;
+
+	if (pcie->rcal_1um_vert_value < 0) {
+		dev_warn(pcie->dev, "invalid rcal 1um vert value %d, "
+			 "rescal unchanged.\n", pcie->rcal_1um_vert_value);
+		return 0;
+	}
+
+	dev_info(pcie->dev, "bcm63xx_pcie_gen12_rescal_set: 0x%x\n",
+		pcie->rcal_1um_vert_value);
+
+	/*
+	 * Rcal Calibration Timers
+	 *	 Block 0x1000, Register 1, bit 4(enable), and 3:0 (value)
+	 */
+	bcm_pcie_phy_write(pcie, 0, 0x1f, 0x1000);
+	bcm_pcie_phy_read(pcie, 0, 1, &data);
+	data = ((data & 0xffe0) | (pcie->rcal_1um_vert_value & 0xf) |
+		(1 << 4)); /* enable */
+	bcm_pcie_phy_write(pcie, 0, 1, data);
+
+	return 0;
+}
+
+static int bcm63xx_pcie_rescal_set(struct bcm63xx_pcie *pcie)
+{
+	int error = 0;
+
+	switch (pcie->core_speed) {
+	case PCIE_SPEED_GEN3:
+		return -ENOTSUPP;
+	default:
+		error = bcm63xx_pcie_gen12_rescal_set(pcie);
+		break;
+	}
+
+	if (error)
+		return error;
+	return 0;
+}
+
+static int bcm63xx_pcie_phy_config(struct bcm63xx_pcie *pcie)
+{
+	int error = 0;
+
+	error = bcm63xx_pcie_rescal_set(pcie);
+	if (error)
+		return error;
+
+	switch (pcie->core_speed) {
+	case PCIE_SPEED_GEN1:
+		/* nothing to do for gen1/default speed */
+		break;
+	case PCIE_SPEED_GEN2:
+		error = bcm63xx_pcie_phy_configure_gen2(pcie);
+		break;
+	case PCIE_SPEED_GEN3:
+		error = bcm63xx_pcie_phy_configure_gen3(pcie);
+		break;
+	}
+
+	if (error) {
+		dev_err(pcie->dev, "unable to configure PCIe phy for GEN%d\n",
+			pcie->core_speed);
+		return error;
+	}
+
+	return 0;
+}
+
+static int bcm63xx_pcie_phy_enable_ssc_gen2(struct bcm63xx_pcie *pcie,
+					    bool enable)
+{
+	u32 data;
+	int timeout = 40;
+
+	dev_info(pcie->dev, "%sabling SSC on GEN2 phy.\n",
+		 enable ? "en" : "dis");
+
+	/*
+	 * SSC disabled when PCIe core comes out of reset to allow PLL
+	 * sync to happen write sscControl0 register
+	 * ssc_mode_enable_ovrd & ssc_mode_enable_ovrd_val
+	 */
+	bcm_pcie_phy_write(pcie, 0, 0x1f, 0x1100);
+	bcm_pcie_phy_read(pcie, 0, 0x02, &data);
+	if (enable)
+		data |= 0xc000;     /* bit 15:14 11'b to enable SSC */
+	else
+		data &= ~0xc000;    /* bit 15:14 00'b to disable SSC */
+	bcm_pcie_phy_write(pcie, 0, 0x02, data);
+
+	/*
+	 * TODO: Check the status to see if SSC is set or not
+	 */
+	while (timeout) {
+		bcm_pcie_phy_read(pcie, 0, 0x01, &data);
+
+		/*
+		 * bit 10 reflects state of SSC ? Then what to make of
+		 * the TODO comment above ?
+		 */
+		if (!!(data & (1 << 10)) == enable)
+			break;
+		--timeout;
+		udelay(1000);
+	}
+
+	if (!timeout)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int bcm63xx_pcie_phy_enable_ssc_gen3(struct bcm63xx_pcie *pcie,
+					    bool enable)
+{
+	dev_info(pcie->dev, "%sabling SSC on GEN3 phy.\n",
+		 enable ? "en" : "dis");
+
+	/*
+	 * no GEN3 pcie cores on 63158, revisit later if needed.
+	 */
+	return -ENOTSUPP;
+}
+
+static int bcm63xx_pcie_phy_enable_ssc(struct bcm63xx_pcie *pcie, bool enable)
+{
+	int error = 0;
+
+	if (!pcie->pcfg.ssc)
+		return 0;
+
+	switch (pcie->core_speed) {
+	case PCIE_SPEED_DEFAULT:
+	case PCIE_SPEED_GEN1:
+		break;
+	case PCIE_SPEED_GEN2:
+		error = bcm63xx_pcie_phy_enable_ssc_gen2(pcie, enable);
+		break;
+	case PCIE_SPEED_GEN3:
+		error = bcm63xx_pcie_phy_enable_ssc_gen3(pcie, enable);
+		break;
+	}
+
+	if (error) {
+		dev_err(pcie->dev, "unable to %sable SSC: %d\n",
+			enable ? "en" : "dis", error);
+		return error;
+	}
+	return 0;
+}
+
+static void __bcm63xx_pcie_set_reset(struct bcm63xx_pcie *pcie, bool assert)
+{
+	u32 reg;
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_CTRL);
+	if (assert)
+		reg &= ~MISC_CTRL_PCIE_RESETn;
+	else
+		reg |= MISC_CTRL_PCIE_RESETn;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_CTRL);
+}
+
+static inline void bcm63xx_pcie_assert_reset(struct bcm63xx_pcie *pcie)
+{
+	__bcm63xx_pcie_set_reset(pcie, true);
+}
+
+static inline void bcm63xx_pcie_deassert_reset(struct bcm63xx_pcie *pcie)
+{
+	__bcm63xx_pcie_set_reset(pcie, false);
+
+}
+
+static int bcm63xx_pcie_core_set_speed(struct bcm63xx_pcie *pcie)
+{
+	u32 reg;
+
+	if (pcie->pcfg.speed == PCIE_SPEED_DEFAULT)
+		return 0;
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_PRIV1_LINK_CAP);
+	reg &= ~PRIV1_LINK_CAP_SPEED_MASK;
+	reg |= PRIV1_LINK_CAP_SPEED(pcie->pcfg.speed);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_PRIV1_LINK_CAP);
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_LINK_STATUS_CONTROL2);
+	reg &= ~LINK_STATUS_CONTROL2_SPEED_MASK;
+	reg |= LINK_STATUS_CONTROL2_SPEED(pcie->pcfg.speed);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_LINK_STATUS_CONTROL2);
+
+	mdelay(10);
+
+	return 0;
+}
+
+/*
+ * reset PCIe core, configure phy, and release core reset.
+ */
+static int bcm63xx_pcie_reset(struct bcm63xx_pcie *pcie)
+{
+	int error;
+
+	bcm63xx_pcie_assert_reset(pcie);
+
+	error = bcm63xx_pcie_phy_config(pcie);
+	if (error)
+		return error;
+
+	/*
+	 * disable SSC for now, enable it after link up.
+	 */
+	error = bcm63xx_pcie_phy_enable_ssc(pcie, false);
+	if (error)
+		return error;
+
+	/*
+	 * TODO: enable phy low power mode here ?
+	 */
+
+	error = bcm63xx_pcie_core_set_speed(pcie);
+	if (error)
+		return error;
+
+	bcm63xx_pcie_deassert_reset(pcie);
+	mdelay(500);
+
+	return 0;
+}
+
+static bool bcm63xx_pcie_check_link_up(struct bcm63xx_pcie *pcie)
+{
+	u32 v = bcm_pcie_readl(pcie, BCM63XX_PCIE_DL_STATUS);
+
+	return !!(v & DL_STATUS_LINK_UP);
+}
+
+static void bcm63xx_pcie_add_resources(struct bcm63xx_pcie *pcie)
+{
+	struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+	if (resource_size(&pcie->io_res)) {
+		dev_err(pcie->dev, "adding resource %pR as IO@#!\n",
+			&pcie->io_res);
+		pci_add_resource(&bridge->windows, &pcie->io_res);
+	}
+	pci_add_resource(&bridge->windows, &pcie->mem_res);
+	pci_add_resource(&bridge->windows, &pcie->bus_range);
+}
+
+static int bcm63xx_pcie_core_config(struct bcm63xx_pcie *pcie)
+{
+	u32 reg;
+	u32 size, size_val;
+
+	/*
+	 * clear legacy interrupt.
+	 */
+	bcm_pcie_writel(CPU_INTR1_PCIE_INTD_CPU_INTR |
+			CPU_INTR1_PCIE_INTC_CPU_INTR |
+			CPU_INTR1_PCIE_INTB_CPU_INTR |
+			CPU_INTR1_PCIE_INTA_CPU_INTR, pcie,
+			BCM63XX_PCIE_CPU_INTR1_MASK_CLEAR);
+
+	/*
+	 * configure MEM window
+	 */
+	reg = WIN_LIMIT_END(pcie->mem_res.end) |
+		WIN_LIMIT_BASE(pcie->mem_res.start);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_WIN_LIMIT(0));
+
+	reg = WIN_LO_BASE_ADDR(pcie->mem_res.start);
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_WIN_LO(0));
+	bcm_pcie_writel(0x0, pcie, BCM63XX_PCIE_MISC_WIN_HI(0));
+
+	/*
+	 * incoming DDR memory configuration (BAR1)
+	 *
+	 * FIXME: are BAR2 and BAR3 configuration needed ? Or are
+	 * BAR1/BAR2/BAR3 poor naming for DDR chip selects ?
+	 */
+	size = pcie->pcfg.dram_size >> 16; /* in 64k units */
+	size_val = 0;
+	while (size) {
+		++size_val;
+		size >>= 1;
+	}
+	bcm_pcie_writel(size_val, pcie, BCM63XX_PCIE_MISC_BAR_CONFIG_LO(0));
+
+	bcm_pcie_writel(UBUS_BAR_ACCESS_EN, pcie,
+			BCM63XX_PCIE_MISC_UBUS_BAR_REMAP(0));
+
+	/*
+	 * setup PCI class code.
+	 */
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_PRIV1_ID_VAL3);
+	reg &= PRIV1_ID_VAL3_REV_ID_MASK;
+	reg |= PCI_CLASS_BRIDGE_PCI << 8;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_PRIV1_ID_VAL3);
+
+	/*
+	 * disable data bus error for enumeration
+	 */
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_MISC_CTRL);
+	reg |=  MISC_CTRL_CFG_READ_UR_MODE;
+
+	/*
+	 * misc performance settings
+	 */
+	reg |= (MISC_CTRL_MAX_BURST_SIZE_128B | MISC_CTRL_PCIE_IN_WR_COMBINE |
+		MISC_CTRL_PCIE_RCB_MPS_MODE | MISC_CTRL_PCIE_RCB_64B_MODE);
+
+	reg &= ~misc_ctrl_burst_align_mask(pcie->misc_revision);
+	if (pcie->misc_revision >= 0x320)
+		reg |= misc_ctrl_burst_align(pcie->misc_revision, 4);
+	else if (pcie->misc_revision == 0x310)
+		reg |= misc_ctrl_burst_align(pcie->misc_revision, 3);
+	else
+		reg |= misc_ctrl_burst_align(pcie->misc_revision, 1);
+	if (pcie->misc_revision == 0x310) {
+		/*
+		 * workaround for UBUS4 Logic Bug in this revision
+		 * Limit the max burst to 64B
+		 */
+	    reg &= ~MISC_CTRL_MAX_BURST_SIZE_MASK;
+	    reg |= MISC_CTRL_MAX_BURST_SIZE_64B;
+	}
+
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_MISC_CTRL);
+
+	/*
+	 * wait for UBUS reply for burst writes
+	 */
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_UBUS_CTRL);
+	reg |= MISC_UBUS_CTRL_UBUS_WR_WITH_REPLY;
+	reg |= MISC_UBUS_CTRL_UBUS_REPLY_ERR_DIS;
+	reg |= MISC_UBUS_CTRL_UBUS_PCIE_REPLY_DECERR_DIS;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_UBUS_CTRL);
+
+	bcm63xx_pcie_phy_enable_ssc(pcie, true);
+
+	return 0;
+}
+
+static
+void __iomem *bcm63xx_pcie_map_bus(struct pci_bus *the_bus, unsigned int devfn,
+				   int where)
+{
+	struct pci_host_bridge *host = pci_find_host_bridge(the_bus);
+	struct bcm63xx_pcie *pcie = pci_host_bridge_priv(host);
+	unsigned int bus = the_bus->number;
+	unsigned int dev = PCI_SLOT(devfn), fun = PCI_FUNC(devfn);
+
+	if (pci_is_root_bus(the_bus) && dev > 0)
+		/*
+		 * device #0 access only on the root bus.
+		 */
+		return NULL;
+
+	if (pci_is_root_bus(the_bus))
+		/*
+		 * root bus/bridge configuration space is mapped at
+		 * the start of the PCIe register area.
+		 */
+		return pcie->regs + where;
+
+	bcm_pcie_writel(EXT_CFG_INDEX_BUS_NUM(bus) |
+			EXT_CFG_INDEX_DEV_NUM(dev) |
+			EXT_CFG_INDEX_FUN_NUM(fun), pcie,
+			BCM63XX_PCIE_EXT_CFG_INDEX);
+
+	return pcie->regs + BCM63XX_PCIE_EXT_CONF_SPACE + where;
+}
+
+struct pci_ops bcm63xx_pcie_ops = {
+	.map_bus	= bcm63xx_pcie_map_bus,
+	.read		= pci_generic_config_read,
+	.write		= pci_generic_config_write,
+};
+
+/*
+ * top IRQ chip: do what is needed for parent chip (the bottom one),
+ * and also invoke PCI layer mask/unmask where applicable.
+ */
+static void bcm63xx_pcie_msi_top_irq_ack(struct irq_data *d)
+{
+	irq_chip_ack_parent(d);
+}
+
+static void bcm63xx_pcie_msi_top_irq_mask(struct irq_data *d)
+{
+	pci_msi_mask_irq(d);
+	irq_chip_mask_parent(d);
+}
+
+static void bcm63xx_pcie_msi_top_irq_unmask(struct irq_data *d)
+{
+	pci_msi_unmask_irq(d);
+	irq_chip_unmask_parent(d);
+}
+
+struct irq_chip bcm63xx_pcie_msi_top_irq_chip = {
+	.name		= "PCIe MSI ",
+	.irq_ack	= bcm63xx_pcie_msi_top_irq_ack,
+	.irq_enable	= bcm63xx_pcie_msi_top_irq_unmask,
+	.irq_disable	= bcm63xx_pcie_msi_top_irq_mask,
+	.irq_unmask	= bcm63xx_pcie_msi_top_irq_unmask,
+	.irq_mask	= bcm63xx_pcie_msi_top_irq_mask,
+};
+
+/*
+ * bottom IRQ chip: program the PCIE controller registers as needed.
+ *
+ * NOTE: bcm63xx has a direct CLEAR/SET register, so this avoids the
+ * need of a spinlock, unlike pcie-rcar-host.c
+ */
+static void bcm63xx_pcie_msi_bottom_irq_ack(struct irq_data *d)
+{
+	struct bcm63xx_pcie *pcie = irq_data_get_irq_chip_data(d);
+
+	bcm_pcie_writel(BIT(d->hwirq), pcie, BCM63XX_PCIE_MSI_CLEAR);
+}
+
+static void bcm63xx_pcie_msi_bottom_irq_mask(struct irq_data *d)
+{
+	struct bcm63xx_pcie *pcie = irq_data_get_irq_chip_data(d);
+
+	bcm_pcie_writel(BIT(d->hwirq), pcie, BCM63XX_PCIE_MSI_MASK_SET);
+}
+
+static void bcm63xx_pcie_msi_bottom_irq_unmask(struct irq_data *d)
+{
+	struct bcm63xx_pcie *pcie = irq_data_get_irq_chip_data(d);
+
+	bcm_pcie_writel(BIT(d->hwirq), pcie, BCM63XX_PCIE_MSI_MASK_CLEAR);
+}
+
+static int bcm63xx_pcie_msi_set_affinity(struct irq_data *d,
+					 const struct cpumask *mask,
+					 bool force)
+{
+	return -EINVAL;
+}
+
+static void bcm63xx_pcie_compose_msi_msg(struct irq_data *d,
+					 struct msi_msg *msg)
+{
+	msg->address_lo = BCM63XX_PCIE_MSI_ADDR_MAGIC;
+	msg->address_hi = 0;
+	msg->data = (BCM63XX_PCIE_MSI_DATA_MAGIC | d->hwirq);
+}
+
+struct irq_chip bcm63xx_pcie_msi_bottom_irq_chip = {
+	.name			= "BCM63XX MSI",
+	.irq_ack		= bcm63xx_pcie_msi_bottom_irq_ack,
+	.irq_mask		= bcm63xx_pcie_msi_bottom_irq_mask,
+	.irq_unmask		= bcm63xx_pcie_msi_bottom_irq_unmask,
+	.irq_set_affinity	= bcm63xx_pcie_msi_set_affinity,
+	.irq_compose_msi_msg	= bcm63xx_pcie_compose_msi_msg,
+};
+
+static int bcm63xx_pcie_msi_domain_alloc(struct irq_domain *domain,
+					 unsigned int virq,
+					 unsigned int nr_irqs, void *args)
+{
+	struct bcm63xx_pcie *pcie = domain->host_data;
+	int hwirq;
+	int i;
+
+	mutex_lock(&pcie->msi_lock);
+	hwirq = bitmap_find_free_region(pcie->msi_used, MSI_MAP_SIZE,
+					order_base_2(nr_irqs));
+	mutex_unlock(&pcie->msi_lock);
+
+	if (hwirq < 0) {
+		dev_err(pcie->dev, "unable to satisfy MSI request of %d MSI "
+			"irqs.", nr_irqs);
+		return -ENOSPC;
+	}
+
+	for (i = 0; i< nr_irqs; ++i) {
+		irq_domain_set_info(domain, virq + i, hwirq + i,
+				    &bcm63xx_pcie_msi_bottom_irq_chip,
+				    domain->host_data,
+				    handle_edge_irq, NULL, NULL);
+	}
+
+	return 0;
+}
+
+static void bcm63xx_pcie_msi_domain_free(struct irq_domain *domain,
+					 unsigned int virq,
+					 unsigned int nr_irqs)
+{
+	struct bcm63xx_pcie *pcie = domain->host_data;
+	struct irq_data *d = irq_domain_get_irq_data(domain, virq);
+
+	mutex_lock(&pcie->msi_lock);
+	bitmap_release_region(pcie->msi_used, d->hwirq, order_base_2(nr_irqs));
+	mutex_unlock(&pcie->msi_lock);
+}
+
+
+struct irq_domain_ops bcm63xx_pcie_msi_domain_ops = {
+	.alloc = bcm63xx_pcie_msi_domain_alloc,
+	.free = bcm63xx_pcie_msi_domain_free,
+};
+
+static struct msi_domain_info bcm63xx_pcie_msi_info = {
+	.flags  = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+		   MSI_FLAG_MULTI_PCI_MSI),
+	.chip   = &bcm63xx_pcie_msi_top_irq_chip,
+};
+
+static irqreturn_t bcm63xx_pcie_msi_irq(int irq, void *dev_id)
+{
+	struct bcm63xx_pcie *pcie = dev_id;
+	unsigned long stat;
+
+	stat = bcm_pcie_readl(pcie, BCM63XX_PCIE_MSI_STATUS);
+	if (!stat)
+		return IRQ_NONE;
+
+	while (stat) {
+		u32 index = find_first_bit(&stat, MSI_MAP_SIZE);
+		unsigned int irq;
+
+		stat &= ~(1 << index);
+		irq = irq_find_mapping(pcie->msi_domain->parent, index);
+		if (irq) {
+			generic_handle_irq(irq);
+		} else
+			dev_err(pcie->dev, "unexpected MSI: %u\n", index);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static int bcm63xx_pcie_msi_setup(struct bcm63xx_pcie *pcie)
+{
+	int err;
+	struct irq_domain *parent;
+	struct fwnode_handle *fwnode = dev_fwnode(pcie->dev);
+	mutex_init(&pcie->msi_lock);
+
+	parent = irq_domain_create_linear(fwnode,
+					  MSI_MAP_SIZE,
+					  &bcm63xx_pcie_msi_domain_ops,
+					  pcie);
+	if (!parent) {
+		dev_err(pcie->dev, "unable to create parent IRQ domain.\n");
+		return -ENOMEM;
+	}
+	irq_domain_update_bus_token(parent, DOMAIN_BUS_NEXUS);
+
+	pcie->msi_domain = pci_msi_create_irq_domain(fwnode,
+						     &bcm63xx_pcie_msi_info,
+						     parent);
+	if (!pcie->msi_domain) {
+		dev_err(pcie->dev, "unable to create MSI domain.\n");
+		err = -ENOMEM;
+		goto err_remove_parent;
+	}
+
+	err = devm_request_irq(pcie->dev, pcie->msi_irq, bcm63xx_pcie_msi_irq,
+			       IRQF_SHARED, "msi_irq", pcie);
+	if (err) {
+		dev_err(pcie->dev, "unable to request PCIe MSI IRQ.\n");
+		goto err_remove_msi_domain;
+	}
+
+	/*
+	 * Program the Root Complex Registers for matching address hi
+	 * and low.
+	 *
+	 * The address should be unique with in the
+	 * downstream/upstream BAR mapping
+	 */
+	bcm_pcie_writel(BCM63XX_PCIE_MSI_ADDR_MAGIC |
+			MSI_BAR_CONFIG_LO_ENABLE_MASK,
+			pcie,
+			BCM63XX_PCIE_MSI_BAR_CONFIG_LO);
+
+	bcm_pcie_writel(0,
+			pcie,
+			BCM63XX_PCIE_MSI_BAR_CONFIG_HI);
+
+	/*
+	 * Program the RC registers for matching data pattern
+	 */
+	bcm_pcie_writel(BCM63XX_PCIE_MSI_DATA_MAGIC |
+			MSI_DATA_CONFIG_MATCH_MASK,
+			pcie,
+			BCM63XX_PCIE_MSI_DATA_CONFIG);
+
+	/*
+	 * Clear all MSI interrupts initially
+	 */
+	bcm_pcie_writel(0xffffffff, pcie, BCM63XX_PCIE_MSI_CLEAR);
+
+	/*
+	 * disable all available MSI vectors
+	 */
+	bcm_pcie_writel(0xffffffff, pcie, BCM63XX_PCIE_MSI_MASK_SET);
+
+
+	/*
+	 * Enable MSI interrupt at L1 Intr1 controller
+	 */
+	bcm_pcie_writel(CPU_INTR1_PCIE_MSI_INTR_CPU_INTR, pcie,
+			BCM63XX_PCIE_CPU_INTR1_MASK_CLEAR);
+
+	return 0;
+
+err_remove_msi_domain:
+	irq_domain_remove(pcie->msi_domain);
+err_remove_parent:
+	irq_domain_remove(parent);
+	return err;
+}
+
+/*
+ * Unfortunately, whenever the link goes down, the root complex PCI
+ * Configuration register space gets reset to its default values, and
+ * ignores writes to it as well.
+ *
+ * The most visible consequence is that whenever the link comes up,
+ * the root complex has its MEM_SPACE bit disabled so access to the
+ * endpoint memory will fail.
+ *
+ * There are also other less visible effects of this (AER
+ * configuration, legacy interrupt configuration, ...)
+ *
+ * To work around this, backup and restore a few selected registers
+ * from the rootcomplex PCI configuration space, whenever we assert /
+ * deassert the PERST# signal.
+ */
+static void bcm63xx_pcie_backup_rc_config_space(struct bcm63xx_pcie *pcie)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcie->rc_config_space_backup); ++i) {
+		u32 reg_off = rc_config_space_backup_offsets[i];
+		pcie->rc_config_space_backup[i] = readl(pcie->regs + reg_off);
+	}
+}
+
+static void bcm63xx_pcie_restore_rc_config_space(struct bcm63xx_pcie *pcie)
+{
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(pcie->rc_config_space_backup); ++i) {
+		u32 reg_off = rc_config_space_backup_offsets[i];
+		writel(pcie->rc_config_space_backup[i], pcie->regs + reg_off);
+	}
+
+}
+
+/*
+ * Deassert PERST#, and wait a bit for the link to come back up. If if
+ * the link is seen back up, restore RC PCI configuration space
+ * registers.
+ */
+static ssize_t bcm63xx_pcie_store_reinit_link(struct device *dev,
+					      struct device_attribute *attr,
+					      const char *buf, size_t count)
+{
+	u32 reg;
+	struct bcm63xx_pcie *pcie = dev_get_drvdata(dev);;
+
+	if (!count)
+		goto done;
+
+	if (buf[0] != '1')
+		goto done;
+
+	if (bcm63xx_pcie_check_link_up(pcie)) {
+		dev_info(pcie->dev, "link is already up, naught to do.\n");
+		return count;
+	}
+
+	dev_dbg(dev, "PERST reset deassert.\n");
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_CTRL);
+	reg |= MISC_CTRL_PCIE_RESETn;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_CTRL);
+	msleep(100);
+
+	if (!bcm63xx_pcie_check_link_up(pcie)) {
+		dev_err(pcie->dev, "PCIe link: DOWN.\n");
+		return -ENXIO;
+	} else {
+		u32 link_status = bcm_pcie_readl(pcie,
+						 BCM63XX_PCIE_LINK_STATUS);
+		dev_info(pcie->dev, "PCIe link: UP, GEN%d, %d lane(s).\n",
+			LINK_STATUS_SPEED(link_status),
+			LINK_STATUS_WIDTH(link_status));
+
+		bcm63xx_pcie_restore_rc_config_space(pcie);
+	}
+
+	pcie->ep_reset_asserted = false;
+
+done:
+	return count;
+}
+
+static DEVICE_ATTR(reinit_link, 0200, NULL, bcm63xx_pcie_store_reinit_link);
+
+/*
+ * report current link state seen by the root complex.
+ */
+static ssize_t bcm63xx_pcie_show_link(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct bcm63xx_pcie *pcie = dev_get_drvdata(dev);
+
+	buf[0] = bcm63xx_pcie_check_link_up(pcie) ? '1' : '0';
+	buf[1] = '\n';
+
+	return 2;
+}
+
+static DEVICE_ATTR(link, 0400, bcm63xx_pcie_show_link, NULL);
+
+/*
+ * Assert PERST#, after saving the important RC PCI configuration space bits.
+ */
+static ssize_t bcm63xx_pcie_store_ep_reset_assert(struct device *dev,
+						  struct device_attribute *attr,
+						  const char *buf, size_t count)
+{
+	struct bcm63xx_pcie *pcie = dev_get_drvdata(dev);
+	u32 reg;
+
+	if (!count)
+		goto done;
+
+	if (buf[0] != '1')
+		goto done;
+
+	if (pcie->ep_reset_asserted) {
+		dev_info(dev, "EP already in reset.\n");
+		goto done;
+	}
+
+	dev_dbg(dev, "PERST reset assert.\n");
+	bcm63xx_pcie_backup_rc_config_space(pcie);
+
+	reg = bcm_pcie_readl(pcie, BCM63XX_PCIE_MISC_CTRL);
+	reg &= ~MISC_CTRL_PCIE_RESETn;
+	bcm_pcie_writel(reg, pcie, BCM63XX_PCIE_MISC_CTRL);
+
+	pcie->ep_reset_asserted = true;
+done:
+	return count;
+}
+
+static DEVICE_ATTR(ep_reset_assert, 0200, NULL,
+		   bcm63xx_pcie_store_ep_reset_assert);
+
+/*
+ * reset assertion status
+ */
+static ssize_t bcm63xx_pcie_show_ep_reset_asserted(struct device *dev,
+						struct device_attribute *attr,
+						char *buf)
+{
+	struct bcm63xx_pcie *pcie = dev_get_drvdata(dev);
+
+	buf[0] = pcie->ep_reset_asserted ? '1' : '0';
+	buf[1] = '\n';
+
+	return 2;
+}
+
+static DEVICE_ATTR(ep_reset_asserted, 0400, bcm63xx_pcie_show_ep_reset_asserted,
+		   NULL);
+
+
+static const struct attribute *bcm63xx_pcie_sysfs_attributes[] = {
+	&dev_attr_reinit_link.attr,
+	&dev_attr_link.attr,
+	&dev_attr_ep_reset_assert.attr,
+	&dev_attr_ep_reset_asserted.attr,
+	NULL,
+};
+
+/*
+ * create attributes for PCIe link & EP reset GPIO assert control.
+ */
+static int bcm63xx_pcie_init_sysfs(struct bcm63xx_pcie *pcie)
+{
+	return sysfs_create_files(&pcie->dev->kobj,
+				  bcm63xx_pcie_sysfs_attributes);
+}
+
+static int bcm63xx_pcie_probe(struct platform_device *pdev)
+{
+	struct reset_control *reset;
+	struct ubus4_master *master;
+	int error;
+	struct bcm63xx_pcie *pcie;
+	struct pci_host_bridge *host;
+	struct resource *res;
+	struct pci_bus *child;
+	int rcal_1um_vert_value;
+
+	dev_dbg(&pdev->dev, "probe.\n");
+
+	reset = devm_reset_control_get(&pdev->dev, "pcie0");
+	if (IS_ERR(reset)) {
+		if (PTR_ERR(reset) != -EPROBE_DEFER)
+			dev_err(&pdev->dev, "missing pcie0 reset control\n");
+		return PTR_ERR(reset);
+	}
+
+	master = ubus4_master_of_get(pdev->dev.of_node);
+	if (IS_ERR(master)) {
+		dev_err(&pdev->dev, "unable to get UBUS master: %ld.\n",
+			PTR_ERR(master));
+		return PTR_ERR(master);
+	}
+
+	rcal_1um_vert_value = procmon_get_rcal(pdev->dev.of_node);
+	if (rcal_1um_vert_value == -EPROBE_DEFER)
+		/*
+		 * should this be fatal for non EPROBE_DEFER cases?
+		 */
+		return rcal_1um_vert_value;
+
+	host = devm_pci_alloc_host_bridge(&pdev->dev, sizeof (*pcie));
+	if (!host)
+		return -ENOMEM;
+
+	pcie = pci_host_bridge_priv(host);
+	pcie->rcal_1um_vert_value = rcal_1um_vert_value;
+
+	make_invalid_resource(&pcie->mem_res);
+	make_invalid_resource(&pcie->io_res);
+	make_invalid_resource(&pcie->bus_range);
+
+	pcie->msi_irq = platform_get_irq_byname(pdev, "intr");
+	if (pcie->msi_irq < 0)
+		return pcie->msi_irq;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "unable to get PCIe registers resource.\n");
+		return -ENOENT;
+	}
+	dev_dbg(&pdev->dev, "PCIe registers: %pR\n", res);
+
+	pcie->dev = &pdev->dev;
+	pcie->regs_phys = res->start;
+
+	dev_set_drvdata(&pdev->dev, pcie);
+
+	error = bcm63xx_pcie_parse_ranges(pcie);
+	if (error)
+		return error;
+
+	host->busnr = pcie->bus_range.start;
+	host->dev.parent = &pdev->dev;
+	host->ops = &bcm63xx_pcie_ops;
+	host->map_irq = of_irq_parse_and_map_pci;
+	host->swizzle_irq = pci_common_swizzle;
+
+	pcie->irq = irq_of_parse_and_map(pcie->dev->of_node, 0);
+	if (!pcie->irq) {
+		dev_err(pcie->dev, "unable to get IRQ.\n");
+		/* can only assume it's not there :( */
+		return -ENOENT;
+	}
+
+	error = bcm63xx_pcie_parse_dt_port_config(pcie);
+	if (error)
+		return error;
+
+
+	/* power up block */
+	error = reset_control_deassert(reset);
+	if (error) {
+		dev_err(&pdev->dev, "unable to deassert reset.\n");
+		return error;
+	}
+	mdelay(10);
+
+	error = bcm63xx_pcie_ubus_config(master);
+	if (error) {
+		dev_err(&pdev->dev, "unable to configure PCIe UBUS master.\n");
+		return error;
+	}
+
+	pcie->regs = devm_ioremap_resource(&pdev->dev, res);
+	if (!pcie->regs) {
+		dev_err(&pdev->dev, "unable to ioremap registers.\n");
+		return -ENOMEM;
+	}
+
+	error = bcm63xx_pcie_init(pcie);
+	if (error)
+		return error;
+
+	error = bcm63xx_pcie_reset(pcie);
+	if (error)
+		return error;
+
+	if (!bcm63xx_pcie_check_link_up(pcie)) {
+		dev_err(pcie->dev, "PCIe link: DOWN.\n");
+		return -ENXIO;
+	} else {
+		u32 link_status = bcm_pcie_readl(pcie,
+						 BCM63XX_PCIE_LINK_STATUS);
+		dev_info(pcie->dev, "PCIe link: UP, GEN%d, %d lane(s).\n",
+			LINK_STATUS_SPEED(link_status),
+			LINK_STATUS_WIDTH(link_status));
+	}
+
+	bcm63xx_pcie_add_resources(pcie);
+
+	error = bcm63xx_pcie_core_config(pcie);
+	if (error)
+		return error;
+
+	error = bcm63xx_pcie_msi_setup(pcie);
+	if (error)
+		return error;
+
+	error = pci_scan_root_bus_bridge(host);
+	if (error) {
+		dev_err(&pdev->dev, "unable to scan root bus bridge: %d\n",
+			error);
+		return error;
+	}
+
+	pci_bus_size_bridges(host->bus);
+	pci_bus_assign_resources(host->bus);
+	list_for_each_entry(child, &host->bus->children, node)
+		pcie_bus_configure_settings(child);
+
+	pci_bus_add_devices(host->bus);
+
+	error = bcm63xx_pcie_init_sysfs(pcie);
+	if (error)
+		return error;
+
+#if 0
+	/*
+	 * now create the PCI bus proper.
+	 */
+	pcie->bus = pci_create_root_bus(&pdev->dev, pcie->bus_range.start,
+					&bcm63xx_pcie_ops, pcie,
+					&pcie->resources);
+	if (!pcie->bus) {
+		dev_err(pcie->dev, "unable to create root bus.\n");
+		return -ENXIO;
+	}
+
+	pci_scan_child_bus(pcie->bus);
+	pci_assign_unassigned_bus_resources(pcie->bus);
+	pci_bus_add_devices(pcie->bus);
+
+	/* Configure PCI Express settings */
+	list_for_each_entry(child, &pcie->bus->children, node)
+	        pcie_bus_configure_settings(child);
+#endif
+
+	return 0;
+}
+
+static const struct of_device_id bcm63xx_pcie_match[] = {
+	{ .compatible = "brcm,bcm63xx-pcie" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63xx_pcie_of_match);
+
+struct platform_driver bcm63xx_pcie_driver = {
+	.probe		= bcm63xx_pcie_probe,
+	.remove		= NULL, /* FIXME*/
+	.driver		= {
+		.name		= "bcm63xx-pcie",
+		.owner		= THIS_MODULE,
+		.of_match_table	= bcm63xx_pcie_match,
+	},
+};
+
+module_platform_driver(bcm63xx_pcie_driver);
+
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom BCM63XX SoCs PCIe root complex driver.");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/pci/controller/pcie-bcm63xx.h	2025-09-25 17:40:34.531362089 +0200
@@ -0,0 +1,131 @@
+/*
+ * pcie-bcm63xx.h for pcie-bcm63xx
+ * Created by <nschichan@freebox.fr> on Mon Jun 10 16:54:54 2019
+ */
+
+#pragma once
+
+#define _F(_v, _s, _m)			(((_v) & (_m)) << (_s))
+
+/*
+ * TYPE 1 RC configuration registers
+ */
+#define BCM63XX_PCIE_VENDOR_ID			0x0000
+#define  VENDOR_ID_VENDOR(x)			((x) & 0xffff)
+#define  VENDOR_ID_DEVICE(x)			(((x) >> 16) & 0xffff)
+
+#define BCM63XX_PCIE_LINK_CAP			0x00b8
+#define  LINK_CAP_SPEED(x)			((x) & 0xf)
+#define  LINK_CAP_WIDTH(x)			(((x) >> 4) & 0x3f)
+
+#define BCM63XX_PCIE_LINK_STATUS		0x00bc
+#define  LINK_STATUS_WIDTH(x)			(((x) >> 20) & 0x3f)
+#define  LINK_STATUS_SPEED(x)			(((x) >> 16) & 0xf)
+
+#define BCM63XX_PCIE_LINK_STATUS_CONTROL2	0x00dc
+#define  LINK_STATUS_CONTROL2_SPEED_MASK	_F(0x3, 0, 0x3)
+#define  LINK_STATUS_CONTROL2_SPEED(x)		_F(x, 0, 0x3)
+
+#define BCM63XX_PCIE_PRIV1_LINK_CAP		(0x0428 + 0xb4)
+#define  PRIV1_LINK_CAP_SPEED_MASK		_F(0x3, 0, 0x3)
+#define  PRIV1_LINK_CAP_SPEED(x)		_F(x, 0, 0x3)
+
+#define BCM63XX_PCIE_PRIV1_ID_VAL3		(0x428 + 0x14)
+#define  PRIV1_ID_VAL3_REV_ID_MASK		(0xff << 24)
+
+#define BCM63XX_PCIE_PHY_ADDR			0x1100
+#define  PHY_ADDR_READ				(1 << 20)
+#define  PHY_ADDR_WRITE				(0 << 20)
+#define  PHY_ADDR_PHY(x)			_F(x, 16, 0xf)
+#define  PHY_ADDR_REG(x)			_F(x, 0x0, 0x1f)
+
+#define BCM63XX_PCIE_DL_STATUS			0x1048
+#define  DL_STATUS_LINK_UP			(0x2000)
+
+#define BCM63XX_PCIE_PHY_WR_DATA		0x1104
+#define  PHY_WR_DATA_TRIG			(1 << 31)
+#define  PHY_WR_DATA_MASK			0xffff
+#define BCM63XX_PCIE_PHY_RD_DATA		0x1108
+#define  PHY_RD_DATA_DONE			(1 << 31)
+#define  PHY_RD_DATA_MASK			0xffff
+
+#define BCM63XX_PCIE_MISC_MISC_CTRL		(0x4008)
+#define  MISC_CTRL_MAX_BURST_SIZE_64B		(0 << 20)
+#define  MISC_CTRL_MAX_BURST_SIZE_128B		(1 << 20)
+#define  MISC_CTRL_MAX_BURST_SIZE_MASK		(3 << 20)
+static inline u32 misc_ctrl_burst_align_mask(u32 rev)
+{
+	return rev >= 0x310 ? (7 << 17) : (1 << 19);
+}
+static inline u32 misc_ctrl_burst_align(u32 rev, u32 b)
+{
+	return rev >= 0x310 ? (b << 17) : (b << 19);
+}
+#define  MISC_CTRL_CFG_READ_UR_MODE		 (1 << 13)
+#define  MISC_CTRL_PCIE_IN_WR_COMBINE	       (1 << 11)
+#define  MISC_CTRL_PCIE_RCB_MPS_MODE		(1 << 10)
+#define  MISC_CTRL_PCIE_RCB_64B_MODE		(1 << 7)
+
+#define BCM63XX_PCIE_MISC_WIN_LO(win)		(0x400c + (win) * 8)
+#define  WIN_LO_BASE_ADDR(x)			((x) & 0xfff00000)
+#define BCM63XX_PCIE_MISC_WIN_HI(win)		(0x4010 + (win) * 8)
+
+#define BCM63XX_PCIE_MISC_BAR_CONFIG_LO(bar)	(0x402c + (bar) * 8)
+
+#define BCM63XX_PCIE_MISC_UBUS_CTRL		(0x40a4)
+#define  MISC_UBUS_CTRL_UBUS_PCIE_REPLY_DECERR_DIS (1 << 19)
+#define  MISC_UBUS_CTRL_UBUS_WR_WITH_REPLY	(1 << 14)
+#define  MISC_UBUS_CTRL_UBUS_REPLY_ERR_DIS	(1 << 13)
+
+#define BCM63XX_PCIE_MISC_UBUS_BAR_REMAP(bar)	(0x40ac + (bar) * 8)
+#define  UBUS_BAR_ACCESS_EN			(1 << 0)
+
+#define BCM63XX_PCIE_MISC_CTRL			0x4064
+#define  MISC_CTRL_PCIE_RESETn			(1 << 2)
+
+#define BCM63XX_PCIE_MISC_REVISION		0x406c
+
+#define BCM63XX_PCIE_MISC_WIN
+#define BCM63XX_PCIE_MISC_WIN_LIMIT(win)	(0x4070 + (win) * 4)
+#define  WIN_LIMIT_END(x)			((x) & 0xfff00000)
+#define  WIN_LIMIT_BASE(x)			(((x) >> 16) & 0xffff0)
+
+#define BCM63XX_PCIE_EXT_CONF_SPACE		0x8000
+
+#define BCM63XX_PCIE_EXT_CFG_INDEX		0x9000
+#define  EXT_CFG_INDEX_BUS_NUM(x)		_F(x, 20, 0xff)
+#define  EXT_CFG_INDEX_DEV_NUM(x)		_F(x, 15, 0x1f)
+#define  EXT_CFG_INDEX_FUN_NUM(x)		_F(x, 12, 0x7)
+
+#define BCM63XX_PCIE_CPU_INTR1_MASK_CLEAR	0x940c
+#define  CPU_INTR1_PCIE_INTA_CPU_INTR		(1 << 1)
+#define  CPU_INTR1_PCIE_INTB_CPU_INTR		(1 << 2)
+#define  CPU_INTR1_PCIE_INTC_CPU_INTR		(1 << 3)
+#define  CPU_INTR1_PCIE_INTD_CPU_INTR		(1 << 4)
+#define  CPU_INTR1_PCIE_INTR_CPU_INTR		(1 << 5)
+#define  CPU_INTR1_PCIE_NMI_CPU_INTR		(1 << 6)
+#define  CPU_INTR1_PCIE_UBUS_CPU_INTR		(1 << 7)
+#define  CPU_INTR1_PCIE_MSI_INTR_CPU_INTR	(1 << 9)
+
+#define BCM63XX_PCIE_MSI_DATA_MAGIC		0x0000BCA0
+#define BCM63XX_PCIE_MSI_ADDR_MAGIC		0xFFFFFFFC
+
+#define BCM63XX_PCIE_MSI_BASE			0x4500
+
+#define BCM63XX_PCIE_MSI_STATUS			(0x4500)
+#define BCM63XX_PCIE_MSI_CLEAR			(0x4508)
+#define BCM63XX_PCIE_MSI_MASK_SET		(0x4510)
+#define BCM63XX_PCIE_MSI_MASK_CLEAR		(0x4514)
+
+/* #define BCM63XX_PCIE_MSI_MSI_INTR_MASK		0xFFFFFFFF */
+/* #define BCM63XX_PCIE_MSI_MSI_INTR_SHIFT		0 */
+
+
+#define BCM63XX_PCIE_MSI_BAR_CONFIG_LO		(0x4044)
+#define  MSI_BAR_CONFIG_LO_MATCH_ADDR_MASK	0xfffffffc
+#define  MSI_BAR_CONFIG_LO_ENABLE_MASK		(1 << 0)
+
+#define BCM63XX_PCIE_MSI_BAR_CONFIG_HI		(0x4048)
+
+#define BCM63XX_PCIE_MSI_DATA_CONFIG		(0x404C)
+#define  MSI_DATA_CONFIG_MATCH_MASK		0xffe00000
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/phy/xdsl_phy_api.c	2025-09-25 17:40:34.591362386 +0200
@@ -0,0 +1,205 @@
+#include <linux/module.h>
+#include <linux/notifier.h>
+#include <linux/kref.h>
+#include <linux/xdsl_phy_api.h>
+
+static DEFINE_MUTEX(phy_device_list_mutex);
+static LIST_HEAD(phy_device_list);
+
+/*
+ *
+ */
+static struct xdsl_phy *__phy_lookup(struct device_node *node,
+					unsigned int id)
+{
+	struct xdsl_phy *pd;
+
+        list_for_each_entry(pd, &phy_device_list, next) {
+		if (node) {
+			if (pd->of_node == node)
+				return pd;
+		} else {
+			if (pd->id == id)
+				return pd;
+		}
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+struct xdsl_phy *xdsl_phy_attach(struct device_node *node,
+				 unsigned int id,
+				 void (*change_cb)(struct xdsl_phy *,
+						  void *),
+				 void *change_priv)
+{
+	struct xdsl_phy *phy_dev;
+
+	if (!node && !id)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&phy_device_list_mutex);
+
+	phy_dev = __phy_lookup(node, id);
+
+	if (!phy_dev) {
+		mutex_unlock(&phy_device_list_mutex);
+		return ERR_PTR(-EPROBE_DEFER);
+	}
+
+	if (phy_dev->in_use) {
+		mutex_unlock(&phy_device_list_mutex);
+		return ERR_PTR(-EBUSY);
+	}
+
+        try_module_get(phy_dev->owner);
+
+	phy_dev->in_use = true;
+	phy_dev->started = false;
+	phy_dev->change_cb = change_cb;
+	phy_dev->change_priv = change_priv;
+
+	mutex_unlock(&phy_device_list_mutex);
+
+	return phy_dev;
+}
+
+EXPORT_SYMBOL(xdsl_phy_attach);
+
+/*
+ *
+ */
+static void initial_change_work_func(struct work_struct *work)
+{
+	struct xdsl_phy *phy_dev = container_of(work,
+						struct xdsl_phy,
+						initial_change_work);
+
+	mutex_lock(&phy_dev->lock);
+	if (!phy_dev->in_use || !phy_dev->change_cb || !phy_dev->started) {
+		mutex_unlock(&phy_dev->lock);
+		return;
+	}
+
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb(phy_dev, phy_dev->change_priv);
+	mutex_unlock(&phy_dev->lock);
+}
+
+/*
+ *
+ */
+void xdsl_phy_start(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	phy_dev->started = true;
+	phy_dev->initial_change_pending = true;
+	schedule_work(&phy_dev->initial_change_work);
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_start);
+
+/*
+ *
+ */
+void xdsl_phy_stop(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	phy_dev->started = false;
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_stop);
+
+/*
+ *
+ */
+void xdsl_phy_detach(struct xdsl_phy *phy_dev)
+{
+	WARN_ON(!phy_dev->in_use);
+
+	mutex_lock(&phy_dev->lock);
+	phy_dev->in_use = false;
+	phy_dev->started = false;
+	cancel_work_sync(&phy_dev->initial_change_work);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb = NULL;
+	phy_dev->change_priv = NULL;
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_detach);
+
+/*
+ *
+ */
+void xdsl_phy_device_notify_change(struct xdsl_phy *phy_dev)
+{
+	mutex_lock(&phy_dev->lock);
+	if (!phy_dev->in_use || !phy_dev->change_cb || !phy_dev->started) {
+		mutex_unlock(&phy_dev->lock);
+		return;
+	}
+
+	cancel_work_sync(&phy_dev->initial_change_work);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb(phy_dev, phy_dev->change_priv);
+	mutex_unlock(&phy_dev->lock);
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_notify_change);
+
+/*
+ *
+ */
+int xdsl_phy_device_register(struct xdsl_phy *phy_dev)
+{
+	if (!phy_dev->ops ||
+	    !phy_dev->ops->get_status ||
+	    !phy_dev->owner)
+		return -EINVAL;
+
+	mutex_lock(&phy_device_list_mutex);
+
+	if (__phy_lookup(phy_dev->of_node, phy_dev->id)) {
+		mutex_unlock(&phy_device_list_mutex);
+		return -EEXIST;
+	}
+
+	mutex_init(&phy_dev->lock);
+	mutex_init(&phy_dev->ops_lock);
+	phy_dev->in_use = false;
+	phy_dev->started = false;
+	INIT_WORK(&phy_dev->initial_change_work, initial_change_work_func);
+	phy_dev->initial_change_pending = false;
+	phy_dev->change_cb = NULL;
+	phy_dev->change_priv = NULL;
+
+	list_add(&phy_dev->next, &phy_device_list);
+	mutex_unlock(&phy_device_list_mutex);
+
+	return 0;
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_register);
+
+/*
+ *
+ */
+void xdsl_phy_device_unregister(struct xdsl_phy *phy_dev)
+{
+	if (WARN_ON(phy_dev->in_use))
+		return;
+
+	cancel_work_sync(&phy_dev->initial_change_work);
+	mutex_lock(&phy_device_list_mutex);
+	list_del(&phy_dev->next);
+	mutex_unlock(&phy_device_list_mutex);
+}
+
+EXPORT_SYMBOL(xdsl_phy_device_unregister);
+
+MODULE_LICENSE("GPL");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/bcm/pinctrl-bcm63138.c	2025-09-25 17:40:34.599362426 +0200
@@ -0,0 +1,1005 @@
+/*
+ * Driver for Broadcom BCM63138 pinctrl
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/gpio/driver.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pinctrl/machine.h>
+#include <linux/pinctrl/pinconf.h>
+#include <linux/pinctrl/pinctrl.h>
+#include <linux/pinctrl/pinmux.h>
+#include <linux/pinctrl/pinconf-generic.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/seq_file.h>
+
+#include <dt-bindings/pinctrl/bcm63138-pinfunc.h>
+#include <dt-bindings/pinctrl/bcm63158-pinfunc.h>
+
+#include "../pinctrl-utils.h"
+
+#define GPIO_DIR_REG(cfg, gpio)		((gpio / 32) * 4 + cfg->gpio_dir_offset)
+#define GPIO_VAL_REG(cfg, gpio)		((gpio / 32) * 4 + cfg->gpio_val_offset)
+#define BCM_MAX_GPIOS			160
+
+
+/*
+ * registers relative to cfg->testport_offset.
+ */
+#define GPIO_TPORT_DATA_MSB_REG(cfg)	(cfg->testport_offset + 0x00)
+#define GPIO_TPORT_DATA_LSB_REG(cfg)	(cfg->testport_offset + 0x04)
+#define TPORT_DATA_PIN_SHIFT		0
+#define TPORT_DATA_FUNC_SHIFT		12
+#define TPORT_DATA_FUNC_GPIO		5
+
+#define GPIO_TPORT_CMD_REG(cfg)		(cfg->testport_offset + 0x08)
+#define TPORT_CMD_LOAD			0x21
+
+/*
+ * register definition for extirq
+ */
+#define EXT_IRQ_MAX_COUNT		8
+
+#define EXTIRQ_CFG_LVL_STICKY_MASK(x)	(1 << (0 + (x)))
+#define EXTIRQ_CFG_SENSE_HI_MASK(x)	(1 << (8 + (x)))
+#define EXTIRQ_CFG_EDGE_BOTH_MASK(x)	(1 << (16 + (x)))
+#define EXTIRQ_CFG_LEVEL_MASK(x)	(1 << (24 + (x)))
+
+struct bcm63138_pinctrl_group {
+	const char		*name;
+	unsigned long		config;
+	unsigned		pin;
+};
+
+struct bcm63xx_pinctrl_soc_config;
+
+struct bcm63138_pinctrl {
+	struct device			*dev;
+	void __iomem			*base;
+
+	struct pinctrl_desc		desc;
+	struct pinctrl_dev		*pctl_dev;
+
+	struct gpio_chip		gpio_chip;
+	struct pinctrl_gpio_range	*range;
+
+	struct bcm63138_pinctrl_group	*groups;
+	unsigned			ngroups;
+	const char			**grp_names;
+	spinlock_t			lock;
+
+	void __iomem			*irq_base;
+	int				ext_irq_map[EXT_IRQ_MAX_COUNT];
+	u8				ext_irq_revmap[BCM_MAX_GPIOS];
+	struct irq_fwspec		ext_irq_parents[EXT_IRQ_MAX_COUNT];
+
+	const struct bcm63xx_pinctrl_soc_config *cfg;
+};
+
+static const char * const bcm63138_pin_functions[] = {
+	"mux0",
+	"mux1",
+	"mux2",
+	"mux3",
+	"mux4",
+	"mux5",
+	"mux6",
+};
+
+struct bcm63xx_desc_function {
+	const char *name;
+	const unsigned char num;
+};
+
+struct bcm63xx_desc_pin {
+	struct pinctrl_pin_desc pin;
+	const struct bcm63xx_desc_function *functions;
+};
+
+struct bcm63xx_pinctrl_soc_config {
+	const struct bcm63xx_desc_pin *pin_descs;
+	size_t nr_pin_descs;
+
+	/* from base gpio */
+	u32 gpio_dir_offset;
+	u32 gpio_val_offset;
+	u32 testport_offset;
+
+	/* from base irq */
+	unsigned int ext_irq_count;
+	u32 ext_irq_ctrl_offset;
+	u32 ext_irq_status_offset;
+	u32 ext_irq_clear_offset;
+	u32 ext_irq_mask_offset;
+	u32 ext_irq_mask_set_offset;
+	u32 ext_irq_mask_clear_offset;
+	u32 ext_irq_mux_sel0_offset;
+	u32 ext_irq_mux_sel1_offset;
+};
+
+#define BCM63XX_PIN(_pin, ...)					\
+	{							\
+		.pin = _pin,					\
+		.functions = (struct bcm63xx_desc_function[]){	\
+			__VA_ARGS__, { } },			\
+	}
+
+#define BCM63XX_FUNCTION(_num, _name)				\
+	{							\
+		.num = _num,					\
+		.name = _name,					\
+	}
+
+#include "pindesc-bcm63138.h"
+#include "pindesc-bcm63158.h"
+
+struct bcm63xx_pinctrl_soc_config bcm63138_soc_config = {
+	.pin_descs	=	 bcm63138_desc_pins,
+	.nr_pin_descs		= ARRAY_SIZE(bcm63138_desc_pins),
+
+	.gpio_dir_offset	= 0x00,
+	.gpio_val_offset	= 0x14,
+	.testport_offset	= 0x3c,
+};
+
+struct bcm63xx_pinctrl_soc_config bcm63158_soc_config = {
+	.pin_descs	=	 bcm63158_desc_pins,
+	.nr_pin_descs		= ARRAY_SIZE(bcm63158_desc_pins),
+
+	.gpio_dir_offset	= 0x00,
+	.gpio_val_offset	= 0x20,
+	.testport_offset	= 0x54,
+
+	.ext_irq_count		= 8,
+	.ext_irq_ctrl_offset	= 0x00,
+	.ext_irq_status_offset	= 0x04,
+	.ext_irq_clear_offset	= 0x0c,
+	.ext_irq_mask_offset	= 0x10,
+	.ext_irq_mask_set_offset	= 0x14,
+	.ext_irq_mask_clear_offset	= 0x18,
+	.ext_irq_mux_sel0_offset	= 0x24,
+	.ext_irq_mux_sel1_offset	= 0x28,
+};
+
+/* Pinctrl functions */
+static struct bcm63138_pinctrl_group *
+bcm63138_pctrl_find_group_by_pin(struct bcm63138_pinctrl *pctl, u32 pin)
+{
+	int i;
+
+	for (i = 0; i < pctl->ngroups; i++) {
+		struct bcm63138_pinctrl_group *grp = pctl->groups + i;
+
+		if (grp->pin == pin)
+			return grp;
+	}
+
+	return NULL;
+}
+
+static bool bcm63138_pctrl_is_function_valid(struct bcm63138_pinctrl *pctl,
+		u32 pin_num, u32 fnum)
+{
+	int i;
+
+	for (i = 0; i < pctl->cfg->nr_pin_descs; i++) {
+		const struct bcm63xx_desc_pin *pin = &pctl->cfg->pin_descs[i];
+		const struct bcm63xx_desc_function *func = pin->functions;
+
+		if (pin->pin.number != pin_num)
+			continue;
+
+		while (func && func->name) {
+			if (func->num == fnum)
+				return true;
+			func++;
+		}
+
+		break;
+	}
+
+	return false;
+}
+
+static int
+bcm63138_pctrl_dt_node_to_map_func(struct bcm63138_pinctrl *pctl,
+				   u32 pin, u32 fnum,
+				   struct bcm63138_pinctrl_group *grp,
+				   struct pinctrl_map **map,
+				   unsigned *reserved_maps,
+				   unsigned *num_maps)
+{
+	if (*num_maps == *reserved_maps)
+		return -ENOSPC;
+
+	(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
+	(*map)[*num_maps].data.mux.group = grp->name;
+
+	if (!bcm63138_pctrl_is_function_valid(pctl, pin, fnum)) {
+		dev_err(pctl->dev, "invalid function %d on pin %d .\n",
+				fnum, pin);
+		return -EINVAL;
+	}
+
+	(*map)[*num_maps].data.mux.function = bcm63138_pin_functions[fnum];
+	(*num_maps)++;
+
+	return 0;
+}
+
+static int bcm63138_pctrl_get_groups_count(struct pinctrl_dev *pctldev)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	return pctl->ngroups;
+}
+
+static const char *bcm63138_pctrl_get_group_name(struct pinctrl_dev *pctldev,
+					      unsigned group)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	return pctl->groups[group].name;
+}
+
+static int bcm63138_pctrl_get_group_pins(struct pinctrl_dev *pctldev,
+				      unsigned group,
+				      const unsigned **pins,
+				      unsigned *num_pins)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	*pins = (unsigned *)&pctl->groups[group].pin;
+	*num_pins = 1;
+	return 0;
+}
+
+static int bcm63138_pctrl_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+					    struct device_node *node,
+					    struct pinctrl_map **map,
+					    unsigned *reserved_maps,
+					    unsigned *num_maps)
+{
+	struct bcm63138_pinctrl *pctl;
+	struct bcm63138_pinctrl_group *grp;
+	struct property *pins;
+	u32 pinfunc, pin, func;
+	unsigned reserve = 0;
+	int num_pins, num_funcs, maps_per_pin, i, err;
+
+	pctl = pinctrl_dev_get_drvdata(pctldev);
+
+	pins = of_find_property(node, "pinmux", NULL);
+	if (!pins) {
+		dev_err(pctl->dev, "missing pins property in node %s .\n",
+				node->name);
+		return -EINVAL;
+	}
+
+	num_pins = pins->length / sizeof(u32);
+	num_funcs = num_pins;
+	maps_per_pin = 0;
+
+	if (num_funcs)
+		maps_per_pin++;
+
+	if (num_pins >= 1)
+		maps_per_pin++;
+
+	if (!num_pins || !maps_per_pin)
+		return -EINVAL;
+
+	reserve = num_pins * maps_per_pin;
+
+	err = pinctrl_utils_reserve_map(pctldev, map,
+					reserved_maps, num_maps, reserve);
+	if (err)
+		return err;
+
+	for (i = 0; i < num_pins; i++) {
+		err = of_property_read_u32_index(node, "pinmux",
+						 i, &pinfunc);
+		if (err)
+			return err;
+
+		pin = BCM63138_GET_PIN_NO(pinfunc);
+		func = BCM63138_GET_PIN_FUNC(pinfunc);
+
+		if (!bcm63138_pctrl_is_function_valid(pctl, pin, func)) {
+			dev_err(pctl->dev, "invalid function.\n");
+			return -EINVAL;
+		}
+
+		grp = bcm63138_pctrl_find_group_by_pin(pctl, pin);
+		if (!grp) {
+			dev_err(pctl->dev, "unable to match pin %d to group\n",
+				pin);
+			return -EINVAL;
+		}
+
+		err = bcm63138_pctrl_dt_node_to_map_func(pctl, pin, func, grp, map,
+							 reserved_maps, num_maps);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
+static int bcm63138_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
+				 struct device_node *np_config,
+				 struct pinctrl_map **map, unsigned *num_maps)
+{
+	struct device_node *np;
+	unsigned reserved_maps;
+	int ret;
+
+	*map = NULL;
+	*num_maps = 0;
+	reserved_maps = 0;
+
+	for_each_child_of_node(np_config, np) {
+		ret = bcm63138_pctrl_dt_subnode_to_map(pctldev, np, map,
+				&reserved_maps, num_maps);
+		if (ret < 0) {
+			pinctrl_utils_free_map(pctldev, *map, *num_maps);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static const struct pinctrl_ops bcm63138_pctrl_ops = {
+	.dt_node_to_map		= bcm63138_pctrl_dt_node_to_map,
+	.dt_free_map		= pinctrl_utils_free_map,
+	.get_groups_count	= bcm63138_pctrl_get_groups_count,
+	.get_group_name		= bcm63138_pctrl_get_group_name,
+	.get_group_pins		= bcm63138_pctrl_get_group_pins,
+};
+
+/* Pinmux functions */
+
+static int bcm63138_pmx_get_funcs_cnt(struct pinctrl_dev *pctldev)
+{
+	return ARRAY_SIZE(bcm63138_pin_functions);
+}
+
+static const char *bcm63138_pmx_get_func_name(struct pinctrl_dev *pctldev,
+					      unsigned selector)
+{
+	return bcm63138_pin_functions[selector];
+}
+
+static int bcm63138_pmx_get_func_groups(struct pinctrl_dev *pctldev,
+				     unsigned function,
+				     const char * const **groups,
+				     unsigned * const num_groups)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	*groups = pctl->grp_names;
+	*num_groups = pctl->ngroups;
+	return 0;
+}
+
+static void set_mux(struct bcm63138_pinctrl *pctl,
+		    unsigned int pin,
+		    unsigned int function)
+{
+	dev_dbg(pctl->dev, "setting mux for pin%d function%d\n",
+		 pin, function);
+
+	iowrite32(0,
+		  pctl->base + GPIO_TPORT_DATA_MSB_REG(pctl->cfg));
+
+	iowrite32(pin << TPORT_DATA_PIN_SHIFT |
+		  function << TPORT_DATA_FUNC_SHIFT,
+		  pctl->base + GPIO_TPORT_DATA_LSB_REG(pctl->cfg));
+
+	iowrite32(TPORT_CMD_LOAD,
+		  pctl->base + GPIO_TPORT_CMD_REG(pctl->cfg));
+}
+
+static int bcm63138_pmx_set_mux(struct pinctrl_dev *pctldev,
+				unsigned function,
+				unsigned group)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	struct bcm63138_pinctrl_group *g = pctl->groups + group;
+
+	if (!bcm63138_pctrl_is_function_valid(pctl, g->pin, function)) {
+		dev_err(pctl->dev, "invalid function %d on group %d .\n",
+			function, group);
+		return -EINVAL;
+	}
+
+	set_mux(pctl, g->pin, function);
+	return 0;
+}
+
+static int bcm63138_pmx_gpio_request_enable(struct pinctrl_dev *pctldev,
+					    struct pinctrl_gpio_range *range,
+					    unsigned offset)
+{
+	struct bcm63138_pinctrl *pctl = pinctrl_dev_get_drvdata(pctldev);
+	set_mux(pctl, offset, TPORT_DATA_FUNC_GPIO);
+	return 0;
+}
+
+static const struct pinmux_ops bcm63138_pmx_ops = {
+	.get_functions_count	= bcm63138_pmx_get_funcs_cnt,
+	.get_function_name	= bcm63138_pmx_get_func_name,
+	.get_function_groups	= bcm63138_pmx_get_func_groups,
+	.set_mux		= bcm63138_pmx_set_mux,
+	.gpio_request_enable	= bcm63138_pmx_gpio_request_enable,
+	.strict			= true,
+};
+
+/*
+ * GPIO functions
+ */
+static int bcm63138_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	u32 val;
+
+	val = ioread32(pctl->base + GPIO_VAL_REG(pctl->cfg, offset));
+	return !!(val & (1 << (offset % 32)));
+}
+
+static void __bcm63138_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	u32 val;
+
+	val = ioread32(pctl->base + GPIO_VAL_REG(pctl->cfg, offset));
+	if (value)
+		val |= (1 << (offset % 32));
+	else
+		val &= ~(1 << (offset % 32));
+	iowrite32(val, pctl->base + GPIO_VAL_REG(pctl->cfg, offset));
+}
+
+static void bcm63138_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	unsigned long flags;
+
+	spin_lock_irqsave(&pctl->lock, flags);
+
+	__bcm63138_gpio_set(chip, offset, value);
+
+	spin_unlock_irqrestore(&pctl->lock, flags);
+}
+
+static int bcm63138_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pctl->lock, flags);
+
+	val = ioread32(pctl->base + GPIO_DIR_REG(pctl->cfg, offset));
+	val &= ~(1 << (offset % 32));
+	iowrite32(val, pctl->base + GPIO_DIR_REG(pctl->cfg, offset));
+
+	spin_unlock_irqrestore(&pctl->lock, flags);
+	return 0;
+}
+
+static int bcm63138_gpio_direction_output(struct gpio_chip *chip,
+					  unsigned offset, int value)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	u32 val;
+	unsigned long flags;
+
+	spin_lock_irqsave(&pctl->lock, flags);
+
+	__bcm63138_gpio_set(chip, offset, value);
+
+	val = ioread32(pctl->base + GPIO_DIR_REG(pctl->cfg, offset));
+	val |= (1 << (offset % 32));
+	iowrite32(val, pctl->base + GPIO_DIR_REG(pctl->cfg, offset));
+
+	spin_unlock_irqrestore(&pctl->lock, flags);
+	return 0;
+}
+
+static int bcm63138_gpio_get_direction(struct gpio_chip *chip, unsigned int offset)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(chip);
+	u32 val;
+
+	val = ioread32(pctl->base + GPIO_DIR_REG(pctl->cfg, offset));
+	return (val & (1 << (offset % 32))) ? 0 : 1;
+}
+
+/*
+ *
+ */
+static int bcm63138_pctrl_build_state(struct platform_device *pdev)
+{
+	struct bcm63138_pinctrl *pctl = platform_get_drvdata(pdev);
+	int i;
+
+	pctl->ngroups = pctl->cfg->nr_pin_descs;
+
+	/* Allocate groups */
+	pctl->groups = devm_kcalloc(&pdev->dev, pctl->ngroups,
+				    sizeof(*pctl->groups), GFP_KERNEL);
+	if (!pctl->groups)
+		return -ENOMEM;
+
+	/* We assume that one pin is one group, use pin name as group name. */
+	pctl->grp_names = devm_kcalloc(&pdev->dev, pctl->ngroups,
+				       sizeof(*pctl->grp_names), GFP_KERNEL);
+	if (!pctl->grp_names)
+		return -ENOMEM;
+
+	for (i = 0; i < pctl->cfg->nr_pin_descs; i++) {
+		const struct bcm63xx_desc_pin *pin = &pctl->cfg->pin_descs[i];
+		struct bcm63138_pinctrl_group *group = &pctl->groups[i];
+
+		group->name = pin->pin.name;
+		group->pin = pin->pin.number;
+		pctl->grp_names[i] = pin->pin.name;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void bcm63138_gpio_irq_ack(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned int ext_irq = pctl->ext_irq_revmap[hwirq];
+
+	writel(1 << ext_irq,
+	       pctl->irq_base + pctl->cfg->ext_irq_clear_offset);
+	if (d->parent_data->chip)
+		irq_chip_eoi_parent(d);
+}
+
+/*
+ *
+ */
+static void bcm63138_gpio_irq_mask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned int ext_irq = pctl->ext_irq_revmap[hwirq];
+
+	irq_chip_mask_parent(d);
+	writel(1 << ext_irq,
+	       pctl->irq_base + pctl->cfg->ext_irq_mask_clear_offset);
+	gpiochip_disable_irq(gc, irqd_to_hwirq(d));
+}
+
+/*
+ *
+ */
+static void bcm63138_gpio_irq_unmask(struct irq_data *d)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned int ext_irq = pctl->ext_irq_revmap[hwirq];
+
+	gpiochip_enable_irq(gc, irqd_to_hwirq(d));
+	writel(1 << ext_irq,
+	       pctl->irq_base + pctl->cfg->ext_irq_mask_set_offset);
+	if (d->parent_data->chip)
+		irq_chip_unmask_parent(d);
+}
+
+/*
+ *
+ */
+static int bcm63138_gpio_irq_set_type(struct irq_data *d, unsigned int type)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	unsigned long flags;
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
+	unsigned int ext_irq = pctl->ext_irq_revmap[hwirq];
+	unsigned int irq_bit = (1 << ext_irq);
+	bool is_level, rising_or_high, dual_edge;
+	u32 val;
+
+	if (WARN_ON(ext_irq >= pctl->cfg->ext_irq_count))
+		return -EINVAL;
+
+	is_level = false;
+	rising_or_high = false;
+	dual_edge = false;
+
+	switch (type & IRQ_TYPE_SENSE_MASK) {
+	case IRQ_TYPE_EDGE_RISING:
+		rising_or_high = true;
+		break;
+	case IRQ_TYPE_EDGE_FALLING:
+		break;
+	case IRQ_TYPE_EDGE_BOTH:
+		dual_edge = true;
+		break;
+	case IRQ_TYPE_LEVEL_HIGH:
+		is_level = true;
+		rising_or_high = true;
+		break;
+	case IRQ_TYPE_LEVEL_LOW:
+		is_level = true;
+		break;
+	default:
+		dev_err(pctl->dev, "invalid GPIO IRQ type 0x%x\n", type);
+		return -EINVAL;
+	}
+
+	spin_lock_irqsave(&pctl->lock, flags);
+
+	/* mask it */
+	writel(irq_bit,
+	       pctl->irq_base + pctl->cfg->ext_irq_mask_set_offset);
+
+	/* setup */
+	val = readl(pctl->irq_base + pctl->cfg->ext_irq_ctrl_offset);
+	val &= ~EXTIRQ_CFG_LVL_STICKY_MASK(ext_irq);
+	val &= ~EXTIRQ_CFG_SENSE_HI_MASK(ext_irq);
+	val &= ~EXTIRQ_CFG_EDGE_BOTH_MASK(ext_irq);
+
+	if (is_level)
+		val |= EXTIRQ_CFG_LEVEL_MASK(ext_irq);
+	else
+		val &= ~EXTIRQ_CFG_LEVEL_MASK(ext_irq);
+
+	if (rising_or_high)
+		val |= EXTIRQ_CFG_SENSE_HI_MASK(ext_irq);
+	else
+		val &= ~EXTIRQ_CFG_SENSE_HI_MASK(ext_irq);
+
+	if (dual_edge)
+		val |= EXTIRQ_CFG_EDGE_BOTH_MASK(ext_irq);
+	else
+		val &= ~EXTIRQ_CFG_EDGE_BOTH_MASK(ext_irq);
+
+	writel(val, pctl->irq_base + pctl->cfg->ext_irq_ctrl_offset);
+
+	/* setup mux to correct gpio */
+	val = readl(pctl->irq_base + pctl->cfg->ext_irq_mux_sel0_offset);
+	val &= ~(0xf << (ext_irq * 4));
+	val |= (hwirq & 0xf) << (ext_irq * 4);
+	writel(val, pctl->irq_base + pctl->cfg->ext_irq_mux_sel0_offset);
+
+	val = readl(pctl->irq_base + pctl->cfg->ext_irq_mux_sel1_offset);
+	val &= ~(0xf << (ext_irq * 4));
+	val |= (hwirq >> 4) << (ext_irq * 4);
+	writel(val, pctl->irq_base + pctl->cfg->ext_irq_mux_sel1_offset);
+
+	if (is_level)
+		irq_set_handler_locked(d, handle_level_irq);
+	else
+		irq_set_handler_locked(d, handle_edge_irq);
+
+	/* clear pending if any */
+	writel(irq_bit,
+	       pctl->irq_base + pctl->cfg->ext_irq_clear_offset);
+
+	/* unmask */
+	writel(irq_bit,
+	       pctl->irq_base + pctl->cfg->ext_irq_mask_clear_offset);
+
+	spin_unlock_irqrestore(&pctl->lock, flags);
+
+	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static void
+bcm63138_gpio_irq_print_chip(struct irq_data *data, struct seq_file *p)
+{
+	struct gpio_chip *gc = irq_data_get_irq_chip_data(data);
+	seq_printf(p, dev_name(gc->parent));
+}
+
+static const struct irq_chip bcm63138_gpio_irq_chip = {
+	.irq_ack = bcm63138_gpio_irq_ack,
+	.irq_mask = bcm63138_gpio_irq_mask,
+	.irq_unmask = bcm63138_gpio_irq_unmask,
+	.irq_set_type = bcm63138_gpio_irq_set_type,
+	.irq_print_chip = bcm63138_gpio_irq_print_chip,
+	.flags = IRQCHIP_IMMUTABLE,
+	GPIOCHIP_IRQ_RESOURCE_HELPERS,
+};
+
+/*
+ *
+ */
+static int
+bcm63138_gpio_irq_child_to_parent_hwirq(struct gpio_chip *gc,
+					unsigned int child,
+					unsigned int child_type,
+					unsigned int *parent,
+					unsigned int *parent_type)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	unsigned long flags;
+	unsigned int i;
+	int ext_irq;
+
+	/* allocate an external irq number */
+	spin_lock_irqsave(&pctl->lock, flags);
+	ext_irq = -1;
+	for (i = 0; i < pctl->cfg->ext_irq_count; i++) {
+		if (pctl->ext_irq_map[i] == -1) {
+			pctl->ext_irq_map[i] = child;
+			pctl->ext_irq_revmap[child] = i;
+			ext_irq = i;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&pctl->lock, flags);
+
+	if (ext_irq < 0)
+		return -ENOSPC;
+
+	*parent_type = IRQ_TYPE_LEVEL_HIGH;
+	/* actually return the index in ext_irq_parents[] so that
+	 * populate_parent_alloc_arg() callback returns the correct
+	 * fwargs to lookup in GIC */
+	*parent = ext_irq;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int
+bcm63138_gpio_irq_populate_parent_fwspec(struct gpio_chip *gc,
+					 union gpio_irq_fwspec *gfwspec,
+					 unsigned int parent_hwirq,
+					 unsigned int parent_type)
+{
+	struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+	struct irq_fwspec *fwspec = &gfwspec->fwspec;
+
+	*fwspec = pctl->ext_irq_parents[parent_hwirq];
+	return 0;
+}
+
+/*
+ *
+ */
+static void
+bcm63138_gpio_irq_domain_free(struct irq_domain *domain, unsigned int virq,
+			      unsigned int nr_irqs)
+{
+	struct irq_data *d;
+	unsigned long flags;
+
+	d = irq_domain_get_irq_data(domain, virq);
+	if (d) {
+		struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+		struct bcm63138_pinctrl *pctl = gpiochip_get_data(gc);
+
+		spin_lock_irqsave(&pctl->lock, flags);
+		pctl->ext_irq_map[irqd_to_hwirq(d)] = -1;
+		spin_unlock_irqrestore(&pctl->lock, flags);
+	}
+
+	irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
+
+/*
+ *
+ */
+static int bcm63138_gpio_irq_init(struct platform_device *pdev,
+				  struct bcm63138_pinctrl *pctl)
+{
+	struct gpio_chip *chip = &pctl->gpio_chip;
+	struct gpio_irq_chip *girq = &chip->irq;
+	struct irq_domain *parent_domain;
+	struct device_node *irq_parent;
+	struct resource *res;
+	unsigned int i;
+	int irq, ret;
+
+	if (!pctl->cfg->ext_irq_count)
+		return 0;
+
+	/* if device tree does not include any interrupt, don't
+	 * register irq chip */
+	irq = platform_get_irq_optional(pdev, 0);
+	if (irq < 0)
+		return 0;
+
+	for (i = 0; i < pctl->cfg->ext_irq_count; i++) {
+		struct of_phandle_args map;
+		int ret;
+
+		ret = of_irq_parse_one(pdev->dev.of_node, i, &map);
+		if (ret) {
+			dev_err(&pdev->dev, "irq %d missing\n", i);
+			return ret;
+		}
+
+		of_phandle_args_to_fwspec(pdev->dev.of_node,
+					  map.args, map.args_count,
+					  &pctl->ext_irq_parents[i]);
+	}
+
+	BUG_ON(pctl->cfg->ext_irq_count > EXT_IRQ_MAX_COUNT);
+	BUG_ON(pctl->cfg->nr_pin_descs > BCM_MAX_GPIOS);
+
+	for (i = 0; i < pctl->cfg->ext_irq_count; i++)
+		pctl->ext_irq_map[i] = -1;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!res) {
+		dev_err(&pdev->dev, "could not get irq memory\n");
+		return -ENODEV;
+	}
+
+	pctl->irq_base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(pctl->irq_base)) {
+		dev_err(&pdev->dev, "could not remap irq memory: %d\n", ret);
+		return PTR_ERR(pctl->irq_base);
+	}
+
+	irq_parent = of_irq_find_parent(pdev->dev.of_node);
+	if (!irq_parent) {
+		dev_err(&pdev->dev, "No IRQ parent node\n");
+		return -ENODEV;
+	}
+
+	parent_domain = irq_find_host(irq_parent);
+	of_node_put(irq_parent);
+	if (!parent_domain) {
+		dev_err(&pdev->dev, "No IRQ parent domain\n");
+		return -ENODEV;
+	}
+
+	/* setup mux to invalid gpios */
+	writel(0xffffffff,
+	       pctl->irq_base + pctl->cfg->ext_irq_mux_sel0_offset);
+	writel(0xffffffff,
+	       pctl->irq_base + pctl->cfg->ext_irq_mux_sel1_offset);
+
+	gpio_irq_chip_set_chip(girq, &bcm63138_gpio_irq_chip);
+	girq->fwnode = dev_fwnode(&pdev->dev);
+	girq->parent_domain = parent_domain;
+	girq->child_to_parent_hwirq = bcm63138_gpio_irq_child_to_parent_hwirq;
+	girq->populate_parent_alloc_arg =
+		bcm63138_gpio_irq_populate_parent_fwspec;
+	girq->child_irq_domain_ops.free = bcm63138_gpio_irq_domain_free;
+
+	return 0;
+}
+
+static const struct of_device_id bcm63138_pinctrl_match[] = {
+	{ .compatible = "brcm,bcm63138-pinctrl", .data = &bcm63138_soc_config },
+	{ .compatible = "brcm,bcm63158-pinctrl", .data = &bcm63158_soc_config },
+	{}
+};
+
+/*
+ *
+ */
+static int bcm63138_pinctrl_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct bcm63138_pinctrl *pctl;
+	struct resource *res_mem;
+	struct pinctrl_pin_desc *pins;
+	struct gpio_chip *gpio_chip;
+	unsigned int i;
+	int ret;
+	const struct of_device_id *match;
+
+	match = of_match_device(bcm63138_pinctrl_match, &pdev->dev);
+	if (!match)
+		return -EINVAL;
+
+	pctl = devm_kzalloc(dev, sizeof(*pctl), GFP_KERNEL);
+	if (!pctl)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, pctl);
+	pctl->dev = dev;
+	pctl->cfg = match->data;
+
+	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_mem) {
+		dev_err(dev, "could not get IO memory\n");
+		return -ENODEV;
+	}
+
+	ret = bcm63138_pctrl_build_state(pdev);
+	if (ret) {
+		dev_err(dev, "build state failed: %d\n", ret);
+		return -EINVAL;
+	}
+
+	pctl->base = devm_ioremap_resource(dev, res_mem);
+	if (IS_ERR(pctl->base))
+		return PTR_ERR(pctl->base);
+
+	/* prepare actual pins from desc */
+	pins = devm_kcalloc(&pdev->dev, pctl->cfg->nr_pin_descs,
+			    sizeof(*pins), GFP_KERNEL);
+	if (!pins)
+		return -ENOMEM;
+
+	for (i = 0; i < pctl->cfg->nr_pin_descs; i++)
+		pins[i] = pctl->cfg->pin_descs[i].pin;
+
+	spin_lock_init(&pctl->lock);
+	pctl->desc.name = dev_name(&pdev->dev);
+	pctl->desc.owner = THIS_MODULE;
+	pctl->desc.pins = pins;
+	pctl->desc.npins = pctl->cfg->nr_pin_descs;
+	pctl->desc.pctlops = &bcm63138_pctrl_ops;
+	pctl->desc.pmxops = &bcm63138_pmx_ops;
+
+	pctl->pctl_dev = devm_pinctrl_register(dev, &pctl->desc, pctl);
+	if (IS_ERR(pctl->pctl_dev))
+		return PTR_ERR(pctl->pctl_dev);
+
+	/* add gpio chip */
+	gpio_chip = &pctl->gpio_chip;
+	gpio_chip->base = -1;
+	gpio_chip->label = "bcm63138-gpio";
+	gpio_chip->request = gpiochip_generic_request;
+	gpio_chip->free = gpiochip_generic_free;
+	gpio_chip->get = bcm63138_gpio_get;
+	gpio_chip->set = bcm63138_gpio_set;
+	gpio_chip->direction_input = bcm63138_gpio_direction_input;
+	gpio_chip->direction_output = bcm63138_gpio_direction_output;
+	gpio_chip->get_direction = bcm63138_gpio_get_direction;
+	gpio_chip->ngpio = pctl->cfg->nr_pin_descs;
+	gpio_chip->fwnode = dev_fwnode(dev);
+	gpio_chip->parent = dev;
+
+	ret = bcm63138_gpio_irq_init(pdev, pctl);
+	if (ret)
+		return ret;
+
+	ret = devm_gpiochip_add_data(dev, gpio_chip, pctl);
+	if (ret)
+		return ret;
+
+	ret = gpiochip_add_pin_range(gpio_chip, pctl->desc.name,
+				     0, 0, pctl->cfg->nr_pin_descs);
+	if (ret)
+		return ret;
+
+	return 0;
+};
+
+static struct platform_driver bcm63138_pinctrl_driver = {
+	.probe = bcm63138_pinctrl_probe,
+	.driver = {
+		.name = "bcm63138_pinctrl",
+		.of_match_table = bcm63138_pinctrl_match,
+		.suppress_bind_attrs = true,
+	},
+};
+
+builtin_platform_driver(bcm63138_pinctrl_driver);
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/bcm/pindesc-bcm63138.h	2025-09-25 17:40:34.599362426 +0200
@@ -0,0 +1,790 @@
+static const struct bcm63xx_desc_pin bcm63138_desc_pins[] = {
+	BCM63XX_PIN(
+		PINCTRL_PIN(0, "GPIO_00"),
+		BCM63XX_FUNCTION(1, "SER_LED_DATA"),
+		BCM63XX_FUNCTION(4, "LED_00"),
+		BCM63XX_FUNCTION(5, "GPIO_00")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(1, "GPIO_01"),
+		BCM63XX_FUNCTION(1, "SER_LED_CLK"),
+		BCM63XX_FUNCTION(4, "LED_01"),
+		BCM63XX_FUNCTION(5, "GPIO_01")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(2, "GPIO_02"),
+		BCM63XX_FUNCTION(1, "SER_LED_MASK"),
+		BCM63XX_FUNCTION(4, "LED_02"),
+		BCM63XX_FUNCTION(5, "GPIO_02")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(3, "GPIO_03"),
+		BCM63XX_FUNCTION(1, "UART2_CTS"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_IN_0"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_0"),
+		BCM63XX_FUNCTION(4, "LED_03"),
+		BCM63XX_FUNCTION(5, "GPIO_03")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(4, "GPIO_04"),
+		BCM63XX_FUNCTION(1, "UART2_RTS"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_OUT_0"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_1"),
+		BCM63XX_FUNCTION(4, "LED_04"),
+		BCM63XX_FUNCTION(5, "GPIO_04")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(5, "GPIO_05"),
+		BCM63XX_FUNCTION(1, "UART2_SIN"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_2"),
+		BCM63XX_FUNCTION(4, "LED_05"),
+		BCM63XX_FUNCTION(5, "GPIO_05")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(6, "GPIO_06"),
+		BCM63XX_FUNCTION(1, "UART2_SOUT"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_3"),
+		BCM63XX_FUNCTION(4, "LED_06"),
+		BCM63XX_FUNCTION(5, "GPIO_06")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(7, "GPIO_07"),
+		BCM63XX_FUNCTION(1, "SPIM_SS5_B"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_OUT_1"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_4"),
+		BCM63XX_FUNCTION(4, "LED_07"),
+		BCM63XX_FUNCTION(5, "GPIO_07")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(8, "GPIO_08"),
+		BCM63XX_FUNCTION(1, "SPIM_SS4_B"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_5"),
+		BCM63XX_FUNCTION(4, "LED_08"),
+		BCM63XX_FUNCTION(5, "GPIO_08")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(9, "GPIO_09"),
+		BCM63XX_FUNCTION(1, "SPIM_SS3_B"),
+		BCM63XX_FUNCTION(2, "LD1_DIN"),
+		BCM63XX_FUNCTION(4, "LED_09"),
+		BCM63XX_FUNCTION(5, "GPIO_09")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(10, "GPIO_10"),
+		BCM63XX_FUNCTION(1, "SPIM_SS2_B"),
+		BCM63XX_FUNCTION(2, "LD1_DCLK"),
+		BCM63XX_FUNCTION(4, "LED_10"),
+		BCM63XX_FUNCTION(5, "GPIO_10")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(11, "GPIO_11"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_6"),
+		BCM63XX_FUNCTION(4, "LED_11"),
+		BCM63XX_FUNCTION(5, "GPIO_11")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(12, "GPIO_12"),
+		BCM63XX_FUNCTION(1, "NTR_PULSE_IN"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_7"),
+		BCM63XX_FUNCTION(4, "LED_12"),
+		BCM63XX_FUNCTION(5, "GPIO_12")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(13, "GPIO_13"),
+		BCM63XX_FUNCTION(1, "NTR_PULSE_OUT_0"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_8"),
+		BCM63XX_FUNCTION(4, "LED_13"),
+		BCM63XX_FUNCTION(5, "GPIO_13")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(14, "GPIO_14"),
+		BCM63XX_FUNCTION(3, "MOCA_GPIO_9"),
+		BCM63XX_FUNCTION(4, "LED_14"),
+		BCM63XX_FUNCTION(5, "GPIO_14")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(15, "GPIO_15"),
+		BCM63XX_FUNCTION(4, "LED_15"),
+		BCM63XX_FUNCTION(5, "GPIO_15")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(16, "GPIO_16"),
+		BCM63XX_FUNCTION(3, "DECT_PD_0"),
+		BCM63XX_FUNCTION(4, "LED_16"),
+		BCM63XX_FUNCTION(5, "GPIO_16")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(17, "GPIO_17"),
+		BCM63XX_FUNCTION(3, "DECT_PD_1"),
+		BCM63XX_FUNCTION(4, "LED_17"),
+		BCM63XX_FUNCTION(5, "GPIO_17")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(18, "GPIO_18"),
+		BCM63XX_FUNCTION(1, "VREG_CLK"),
+		BCM63XX_FUNCTION(4, "LED_18"),
+		BCM63XX_FUNCTION(5, "GPIO_18")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(19, "GPIO_19"),
+		BCM63XX_FUNCTION(4, "LED_19"),
+		BCM63XX_FUNCTION(5, "GPIO_19")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(20, "GPIO_20"),
+		BCM63XX_FUNCTION(2, "UART2_CTS"),
+		BCM63XX_FUNCTION(4, "LED_20"),
+		BCM63XX_FUNCTION(5, "GPIO_20")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(21, "GPIO_21"),
+		BCM63XX_FUNCTION(2, "UART2_RTS"),
+		BCM63XX_FUNCTION(4, "LED_21"),
+		BCM63XX_FUNCTION(5, "GPIO_21")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(22, "GPIO_22"),
+		BCM63XX_FUNCTION(2, "UART2_SIN"),
+		BCM63XX_FUNCTION(4, "LED_22"),
+		BCM63XX_FUNCTION(5, "GPIO_22")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(23, "GPIO_23"),
+		BCM63XX_FUNCTION(2, "UART2_SOUT"),
+		BCM63XX_FUNCTION(4, "LED_23"),
+		BCM63XX_FUNCTION(5, "GPIO_23")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(24, "GPIO_24"),
+		BCM63XX_FUNCTION(1, "NTR_PULSE_OUT_1"),
+		BCM63XX_FUNCTION(3, "I2C_SDA"),
+		BCM63XX_FUNCTION(4, "LED_24"),
+		BCM63XX_FUNCTION(5, "GPIO_24")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(25, "GPIO_25"),
+		BCM63XX_FUNCTION(1, "SPIM_SS2_B"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_IN"),
+		BCM63XX_FUNCTION(3, "I2C_SCL"),
+		BCM63XX_FUNCTION(4, "LED_25"),
+		BCM63XX_FUNCTION(5, "GPIO_25")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(26, "GPIO_26"),
+		BCM63XX_FUNCTION(1, "SPIM_SS3_B"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_OUT_0"),
+		BCM63XX_FUNCTION(3, "NTR_PULSE_IN"),
+		BCM63XX_FUNCTION(4, "LED_26"),
+		BCM63XX_FUNCTION(5, "GPIO_26")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(27, "GPIO_27"),
+		BCM63XX_FUNCTION(1, "SPIM_SS4_B"),
+		BCM63XX_FUNCTION(2, "NTR_PULSE_OUT_1"),
+		BCM63XX_FUNCTION(3, "UART2_SIN"),
+		BCM63XX_FUNCTION(4, "LED_27"),
+		BCM63XX_FUNCTION(5, "GPIO_27")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(28, "GPIO_28"),
+		BCM63XX_FUNCTION(1, "SPIM_SS5_B"),
+		BCM63XX_FUNCTION(2, "AE_LOS"),
+		BCM63XX_FUNCTION(3, "UART2_SOUT"),
+		BCM63XX_FUNCTION(4, "LED_28"),
+		BCM63XX_FUNCTION(5, "GPIO_28")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(29, "GPIO_29"),
+		BCM63XX_FUNCTION(1, "SER_LED_DATA"),
+		BCM63XX_FUNCTION(4, "LED_29"),
+		BCM63XX_FUNCTION(5, "GPIO_29")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(30, "GPIO_30"),
+		BCM63XX_FUNCTION(1, "SER_LED_CLK"),
+		BCM63XX_FUNCTION(4, "LED_30"),
+		BCM63XX_FUNCTION(5, "GPIO_30")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(31, "GPIO_31"),
+		BCM63XX_FUNCTION(1, "SER_LED_MASK"),
+		BCM63XX_FUNCTION(4, "LED_31"),
+		BCM63XX_FUNCTION(5, "GPIO_31")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(32, "GPIO_32"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_0"),
+		BCM63XX_FUNCTION(5, "GPIO_32")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(33, "GPIO_33"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_1"),
+		BCM63XX_FUNCTION(5, "GPIO_33")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(34, "GPIO_34"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_2"),
+		BCM63XX_FUNCTION(5, "GPIO_34")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(35, "GPIO_35"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_3"),
+		BCM63XX_FUNCTION(2, "SYS_IRQ_OUT"),
+		BCM63XX_FUNCTION(5, "GPIO_35")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(36, "GPIO_36"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_4"),
+		BCM63XX_FUNCTION(2, "AE_LOS"),
+		BCM63XX_FUNCTION(5, "GPIO_36")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(37, "GPIO_37"),
+		BCM63XX_FUNCTION(1, "EXT_IRQ_5"),
+		BCM63XX_FUNCTION(2, "VREG_CLK"),
+		BCM63XX_FUNCTION(5, "GPIO_37")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(38, "GPIO_38"),
+		BCM63XX_FUNCTION(3, "NAND_CE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_38")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(39, "GPIO_39"),
+		BCM63XX_FUNCTION(3, "NAND_RE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_39")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(40, "GPIO_40"),
+		BCM63XX_FUNCTION(3, "NAND_RB_B"),
+		BCM63XX_FUNCTION(5, "GPIO_40")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(41, "GPIO_41"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_00"),
+		BCM63XX_FUNCTION(5, "GPIO_41")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(42, "GPIO_42"),
+		BCM63XX_FUNCTION(1, "DECT_PD_0"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_01"),
+		BCM63XX_FUNCTION(5, "GPIO_42")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(43, "GPIO_43"),
+		BCM63XX_FUNCTION(1, "DECT_PD_1"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_02"),
+		BCM63XX_FUNCTION(5, "GPIO_43")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(44, "GPIO_44"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_03"),
+		BCM63XX_FUNCTION(5, "GPIO_44")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(45, "GPIO_45"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_04"),
+		BCM63XX_FUNCTION(5, "GPIO_45")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(46, "GPIO_46"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_05"),
+		BCM63XX_FUNCTION(5, "GPIO_46")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(47, "GPIO_47"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_06"),
+		BCM63XX_FUNCTION(5, "GPIO_47")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(48, "GPIO_48"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_07"),
+		BCM63XX_FUNCTION(5, "GPIO_48")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(49, "GPIO_49"),
+		BCM63XX_FUNCTION(3, "NAND_ALE"),
+		BCM63XX_FUNCTION(5, "GPIO_49")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(50, "GPIO_50"),
+		BCM63XX_FUNCTION(3, "NAND_WE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_50")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(51, "GPIO_51"),
+		BCM63XX_FUNCTION(3, "NAND_CLE"),
+		BCM63XX_FUNCTION(5, "GPIO_51")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(52, "GPIO_52"),
+		BCM63XX_FUNCTION(1, "LD0_PWRUP"),
+		BCM63XX_FUNCTION(2, "I2C_SDA"),
+		BCM63XX_FUNCTION(5, "GPIO_52")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(53, "GPIO_53"),
+		BCM63XX_FUNCTION(1, "LD0_DIN"),
+		BCM63XX_FUNCTION(2, "I2C_SCL"),
+		BCM63XX_FUNCTION(5, "GPIO_53")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(54, "GPIO_54"),
+		BCM63XX_FUNCTION(1, "LD1_PWRUP"),
+		BCM63XX_FUNCTION(5, "GPIO_54")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(55, "GPIO_55"),
+		BCM63XX_FUNCTION(1, "LD0_DCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_55")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(56, "GPIO_56"),
+		BCM63XX_FUNCTION(1, "PCM_SDIN"),
+		BCM63XX_FUNCTION(5, "GPIO_56")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(57, "GPIO_57"),
+		BCM63XX_FUNCTION(1, "PCM_SDOUT"),
+		BCM63XX_FUNCTION(5, "GPIO_57")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(58, "GPIO_58"),
+		BCM63XX_FUNCTION(1, "PCM_CLK"),
+		BCM63XX_FUNCTION(5, "GPIO_58")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(59, "GPIO_59"),
+		BCM63XX_FUNCTION(1, "PCM_FS"),
+		BCM63XX_FUNCTION(5, "GPIO_59")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(60, "MII1_COL"),
+		BCM63XX_FUNCTION(1, "MII1_COL"),
+		BCM63XX_FUNCTION(5, "GPIO_60")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(61, "MII1_CRS"),
+		BCM63XX_FUNCTION(1, "MII1_CRS"),
+		BCM63XX_FUNCTION(5, "GPIO_61")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(62, "MII1_RXCLK"),
+		BCM63XX_FUNCTION(1, "MII1_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_62")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(63, "MII1_RXER"),
+		BCM63XX_FUNCTION(1, "MII1_RXER"),
+		BCM63XX_FUNCTION(5, "GPIO_63")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(64, "MII1_RXDV"),
+		BCM63XX_FUNCTION(1, "MII1_RXDV"),
+		BCM63XX_FUNCTION(5, "GPIO_64")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(65, "MII_RXD_00"),
+		BCM63XX_FUNCTION(1, "MII_RXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_65")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(66, "MII_RXD_01"),
+		BCM63XX_FUNCTION(1, "MII_RXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_66")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(67, "MII_RXD_02"),
+		BCM63XX_FUNCTION(1, "MII_RXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_67")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(68, "MII_RXD_03"),
+		BCM63XX_FUNCTION(1, "MII_RXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_68")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(69, "MII_TXCLK"),
+		BCM63XX_FUNCTION(1, "MII_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_69")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(70, "MII_TXEN"),
+		BCM63XX_FUNCTION(1, "MII_TXEN"),
+		BCM63XX_FUNCTION(5, "GPIO_70")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(71, "MII_TXER"),
+		BCM63XX_FUNCTION(1, "MII_TXER"),
+		BCM63XX_FUNCTION(5, "GPIO_71")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(72, "MII_TXD_00"),
+		BCM63XX_FUNCTION(1, "MII_TXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_72")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(73, "MII_TXD_01"),
+		BCM63XX_FUNCTION(1, "MII_TXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_73")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(74, "MII_TXD_02"),
+		BCM63XX_FUNCTION(1, "MII_TXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_74")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(75, "MII_TXD_03"),
+		BCM63XX_FUNCTION(1, "MII_TXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_75")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(76, "RGMII1_RXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_76")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(77, "RGMII1_RXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_77")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(78, "RGMII1_RXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_78")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(79, "RGMII1_RXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_79")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(80, "RGMII1_RXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_80")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(81, "RGMII1_RXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_81")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(82, "RGMII1_TXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_82")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(83, "RGMII1_TXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_83")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(84, "RGMII1_TXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_84")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(85, "RGMII1_TXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_85")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(86, "RGMII1_TXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_86")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(87, "RGMII1_TXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_87")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(88, "RGMII2_RXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_88")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(89, "RGMII2_RXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_89")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(90, "RGMII2_RXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_90")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(91, "RGMII2_RXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_91")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(92, "RGMII2_RXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_92")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(93, "RGMII2_RXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_93")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(94, "RGMII2_TXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_94")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(95, "RGMII2_TXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_95")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(96, "RGMII2_TXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_00"),
+		BCM63XX_FUNCTION(5, "GPIO_96")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(97, "RGMII2_TXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_01"),
+		BCM63XX_FUNCTION(5, "GPIO_97")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(98, "RGMII2_TXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_02"),
+		BCM63XX_FUNCTION(5, "GPIO_98")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(99, "RGMII2_TXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_03"),
+		BCM63XX_FUNCTION(5, "GPIO_99")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(100, "RGMII3_RXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXCLK"),
+		BCM63XX_FUNCTION(4, "LED_00"),
+		BCM63XX_FUNCTION(5, "GPIO_100")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(101, "RGMII3_RXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXCTL"),
+		BCM63XX_FUNCTION(4, "LED_01"),
+		BCM63XX_FUNCTION(5, "GPIO_101")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(102, "RGMII3_RXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXD_00"),
+		BCM63XX_FUNCTION(4, "LED_02"),
+		BCM63XX_FUNCTION(5, "GPIO_102")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(103, "RGMII3_RXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXD_01"),
+		BCM63XX_FUNCTION(4, "LED_03"),
+		BCM63XX_FUNCTION(5, "GPIO_103")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(104, "RGMII3_RXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXD_02"),
+		BCM63XX_FUNCTION(4, "LED_04"),
+		BCM63XX_FUNCTION(5, "GPIO_104")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(105, "RGMII3_RXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII3_RXD_03"),
+		BCM63XX_FUNCTION(4, "LED_05"),
+		BCM63XX_FUNCTION(5, "GPIO_105")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(106, "RGMII3_TXCLK"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXCLK"),
+		BCM63XX_FUNCTION(4, "LED_06"),
+		BCM63XX_FUNCTION(5, "GPIO_106")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(107, "RGMII3_TXCTL"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXCTL"),
+		BCM63XX_FUNCTION(4, "LED_07"),
+		BCM63XX_FUNCTION(5, "GPIO_107")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(108, "RGMII3_TXD_00"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXD_00"),
+		BCM63XX_FUNCTION(4, "LED_08"),
+		BCM63XX_FUNCTION(5, "GPIO_108"),
+		BCM63XX_FUNCTION(6, "LED_20")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(109, "RGMII3_TXD_01"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXD_01"),
+		BCM63XX_FUNCTION(4, "LED_09"),
+		BCM63XX_FUNCTION(5, "GPIO_109"),
+		BCM63XX_FUNCTION(6, "LED_21")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(110, "RGMII3_TXD_02"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXD_02"),
+		BCM63XX_FUNCTION(4, "LED_10"),
+		BCM63XX_FUNCTION(5, "GPIO_110")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(111, "RGMII3_TXD_03"),
+		BCM63XX_FUNCTION(1, "RGMII3_TXD_03"),
+		BCM63XX_FUNCTION(4, "LED_11"),
+		BCM63XX_FUNCTION(5, "GPIO_111")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(112, "RGMII_MDC"),
+		BCM63XX_FUNCTION(1, "RGMII_MDC"),
+		BCM63XX_FUNCTION(5, "GPIO_112")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(113, "RGMII_MDIO"),
+		BCM63XX_FUNCTION(1, "RGMII_MDIO"),
+		BCM63XX_FUNCTION(5, "GPIO_113")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(114, "BMU_AC_EN"),
+		BCM63XX_FUNCTION(1, "BMU_AC_EN"),
+		BCM63XX_FUNCTION(5, "GPIO_114")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(115, "BMU_DIS_CTRL"),
+		BCM63XX_FUNCTION(1, "BMU_DIS_CTRL"),
+		BCM63XX_FUNCTION(5, "GPIO_115")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(116, "BMU_ENA"),
+		BCM63XX_FUNCTION(1, "BMU_ENA"),
+		BCM63XX_FUNCTION(5, "GPIO_116")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(117, "BMU_ENB"),
+		BCM63XX_FUNCTION(1, "BMU_ENB"),
+		BCM63XX_FUNCTION(2, "I2C_SDA"),
+		BCM63XX_FUNCTION(5, "GPIO_117")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(118, "BMU_OWA"),
+		BCM63XX_FUNCTION(1, "BMU_OWA"),
+		BCM63XX_FUNCTION(5, "GPIO_118")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(119, "BMU_OWB"),
+		BCM63XX_FUNCTION(1, "BMU_OWB"),
+		BCM63XX_FUNCTION(2, "I2C_SCL"),
+		BCM63XX_FUNCTION(5, "GPIO_119")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(120, "BMU_PWM_OUT"),
+		BCM63XX_FUNCTION(1, "BMU_PWM_OUT"),
+		BCM63XX_FUNCTION(5, "GPIO_120")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(121, "UART0_SIN"),
+		BCM63XX_FUNCTION(1, "UART0_SIN"),
+		BCM63XX_FUNCTION(5, "GPIO_121")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(122, "UART0_SOUT"),
+		BCM63XX_FUNCTION(1, "UART0_SOUT"),
+		BCM63XX_FUNCTION(5, "GPIO_122")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(123, "SPI_CLK"),
+		BCM63XX_FUNCTION(0, "SPI_CLK"),
+		BCM63XX_FUNCTION(5, "GPIO_123")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(124, "SPI_MOSI"),
+		BCM63XX_FUNCTION(0, "SPI_MOSI"),
+		BCM63XX_FUNCTION(5, "GPIO_124")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(125, "SPI_MISO"),
+		BCM63XX_FUNCTION(0, "SPI_MISO"),
+		BCM63XX_FUNCTION(1, "SPI_MISO"),
+		BCM63XX_FUNCTION(5, "GPIO_125")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(126, "SPI_SSB0"),
+		BCM63XX_FUNCTION(0, "SPI_SSB0"),
+		BCM63XX_FUNCTION(1, "SPI_SSB0"),
+		BCM63XX_FUNCTION(5, "GPIO_126")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(127, "SPI_SSB1"),
+		BCM63XX_FUNCTION(0, "SPI_SSB1"),
+		BCM63XX_FUNCTION(1, "SPI_SSB1"),
+		BCM63XX_FUNCTION(5, "GPIO_127")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(128, "PCIE0_CLKREQ_B"),
+		BCM63XX_FUNCTION(0, "PCIE0_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_128")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(129, "PCIE0_RST_B"),
+		BCM63XX_FUNCTION(0, "PCIE0_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_129")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(130, "PCIE1_CLKREQ_B"),
+		BCM63XX_FUNCTION(0, "PCIE1_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_130")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(131, "PCIE1_RST_B"),
+		BCM63XX_FUNCTION(0, "PCIE1_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_131")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(132, "USB0_PWRFLT"),
+		BCM63XX_FUNCTION(1, "USB0_PWRFLT"),
+		BCM63XX_FUNCTION(5, "GPIO_132")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(133, "USB0_PWRON"),
+		BCM63XX_FUNCTION(1, "USB0_PWRON"),
+		BCM63XX_FUNCTION(5, "GPIO_133")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(134, "USB1_PWRFLT"),
+		BCM63XX_FUNCTION(1, "USB1_PWRFLT"),
+		BCM63XX_FUNCTION(5, "GPIO_134")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(135, "USB1_PWRON"),
+		BCM63XX_FUNCTION(1, "USB1_PWRON"),
+		BCM63XX_FUNCTION(5, "GPIO_135")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(136, "RESET_OUT_B"),
+		BCM63XX_FUNCTION(0, "RESET_OUT_B"),
+		BCM63XX_FUNCTION(5, "GPIO_136")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(137, "DECT_RDI"),
+		BCM63XX_FUNCTION(1, "DECT_RDI"),
+		BCM63XX_FUNCTION(5, "GPIO_137")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(138, "DECT_BTDO"),
+		BCM63XX_FUNCTION(1, "DECT_BTDO"),
+		BCM63XX_FUNCTION(5, "GPIO_138")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(139, "DECT_MWR_LE"),
+		BCM63XX_FUNCTION(1, "DECT_MWR_LE"),
+		BCM63XX_FUNCTION(5, "GPIO_139")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(140, "DECT_MWR_SK"),
+		BCM63XX_FUNCTION(1, "DECT_MWR_SK"),
+		BCM63XX_FUNCTION(5, "GPIO_140")
+		),
+	BCM63XX_PIN(
+		PINCTRL_PIN(141, "DECT_MWR_SIO"),
+		BCM63XX_FUNCTION(1, "DECT_MWR_SIO"),
+		BCM63XX_FUNCTION(5, "GPIO_141")
+		),
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/pinctrl/bcm/pindesc-bcm63158.h	2025-09-25 17:40:34.603362446 +0200
@@ -0,0 +1,759 @@
+static const struct bcm63xx_desc_pin bcm63158_desc_pins[] = {
+	BCM63XX_PIN(
+		PINCTRL_PIN(0, "GPIO_00"),
+		BCM63XX_FUNCTION(1, "A_SER_LED_DATA"),
+		BCM63XX_FUNCTION(4, "A_LED_00"),
+		BCM63XX_FUNCTION(5, "GPIO_00")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(1, "GPIO_01"),
+		BCM63XX_FUNCTION(1, "A_SER_LED_CLK"),
+		BCM63XX_FUNCTION(4, "A_LED_01"),
+		BCM63XX_FUNCTION(5, "GPIO_01")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(2, "GPIO_02"),
+		BCM63XX_FUNCTION(1, "A_SER_LED_MASK"),
+		BCM63XX_FUNCTION(4, "A_LED_02"),
+		BCM63XX_FUNCTION(5, "GPIO_02")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(3, "GPIO_03"),
+		BCM63XX_FUNCTION(1, "A_UART2_CTS"),
+		BCM63XX_FUNCTION(2, "B_PPS_IN"),
+		BCM63XX_FUNCTION(4, "A_LED_03"),
+		BCM63XX_FUNCTION(5, "GPIO_03")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(4, "GPIO_04"),
+		BCM63XX_FUNCTION(1, "A_UART2_RTS"),
+		BCM63XX_FUNCTION(2, "B_PPS_OUT"),
+		BCM63XX_FUNCTION(4, "A_LED_04"),
+		BCM63XX_FUNCTION(5, "GPIO_04")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(5, "GPIO_05"),
+		BCM63XX_FUNCTION(1, "A_UART2_SIN"),
+		BCM63XX_FUNCTION(4, "A_LED_05"),
+		BCM63XX_FUNCTION(5, "GPIO_05")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(6, "GPIO_06"),
+		BCM63XX_FUNCTION(1, "A_UART2_SOUT"),
+		BCM63XX_FUNCTION(4, "A_LED_06"),
+		BCM63XX_FUNCTION(5, "GPIO_06")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(7, "GPIO_07"),
+		BCM63XX_FUNCTION(1, "A_SPIM_SS5_B"),
+		BCM63XX_FUNCTION(2, "B_NTR_OUT"),
+		BCM63XX_FUNCTION(4, "A_LED_07"),
+		BCM63XX_FUNCTION(5, "GPIO_07"),
+		BCM63XX_FUNCTION(6, "B_NTR_IN")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(8, "GPIO_08"),
+		BCM63XX_FUNCTION(1, "A_SPIM_SS4_B"),
+		BCM63XX_FUNCTION(4, "A_LED_08"),
+		BCM63XX_FUNCTION(5, "GPIO_08")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(9, "GPIO_09"),
+		BCM63XX_FUNCTION(1, "A_SPIM_SS3_B"),
+		BCM63XX_FUNCTION(3, "B_USBD_ID"),
+		BCM63XX_FUNCTION(4, "A_LED_09"),
+		BCM63XX_FUNCTION(5, "GPIO_09"),
+		BCM63XX_FUNCTION(6, "A_AE_SERDES_MOD_DEF0")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(10, "GPIO_10"),
+		BCM63XX_FUNCTION(1, "A_SPIM_SS2_B"),
+		BCM63XX_FUNCTION(2, "A_PMD_EXT_LOS"),
+		BCM63XX_FUNCTION(3, "B_USBD_VBUS_PRESENT"),
+		BCM63XX_FUNCTION(4, "A_LED_10"),
+		BCM63XX_FUNCTION(5, "GPIO_10"),
+		BCM63XX_FUNCTION(6, "A_AE_FIBER_DETECT")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(11, "GPIO_11"),
+		BCM63XX_FUNCTION(2, "A_I2C_SDA_0"),
+		BCM63XX_FUNCTION(4, "A_LED_11"),
+		BCM63XX_FUNCTION(5, "GPIO_11")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(12, "GPIO_12"),
+		BCM63XX_FUNCTION(1, "A_PPS_IN"),
+		BCM63XX_FUNCTION(2, "A_I2C_SCL_0"),
+		BCM63XX_FUNCTION(4, "A_LED_12"),
+		BCM63XX_FUNCTION(5, "GPIO_12"),
+		BCM63XX_FUNCTION(6, "C_SGMII_SERDES_MOD_DEF0")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(13, "GPIO_13"),
+		BCM63XX_FUNCTION(1, "A_PPS_OUT"),
+		BCM63XX_FUNCTION(4, "A_LED_13"),
+		BCM63XX_FUNCTION(5, "GPIO_13")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(14, "GPIO_14"),
+		BCM63XX_FUNCTION(1, "A_NTR_OUT"),
+		BCM63XX_FUNCTION(2, "I2S_RX_SDATA"),
+		BCM63XX_FUNCTION(4, "A_LED_14"),
+		BCM63XX_FUNCTION(5, "GPIO_14"),
+		BCM63XX_FUNCTION(6, "A_NTR_IN")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(15, "GPIO_15"),
+		BCM63XX_FUNCTION(2, "SW_SPIS_CLK"),
+		BCM63XX_FUNCTION(4, "A_LED_15"),
+		BCM63XX_FUNCTION(5, "GPIO_15"),
+		BCM63XX_FUNCTION(6, "B_I2C_SDA_1")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(16, "GPIO_16"),
+		BCM63XX_FUNCTION(2, "SW_SPIS_SS_B"),
+		BCM63XX_FUNCTION(4, "A_LED_16"),
+		BCM63XX_FUNCTION(5, "GPIO_16"),
+		BCM63XX_FUNCTION(6, "B_I2C_SCL_1")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(17, "GPIO_17"),
+		BCM63XX_FUNCTION(2, "SW_SPIS_MISO"),
+		BCM63XX_FUNCTION(4, "A_LED_17"),
+		BCM63XX_FUNCTION(5, "GPIO_17"),
+		BCM63XX_FUNCTION(6, "C_UART3_SIN")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(18, "GPIO_18"),
+		BCM63XX_FUNCTION(2, "SW_SPIS_MOSI"),
+		BCM63XX_FUNCTION(4, "A_LED_18"),
+		BCM63XX_FUNCTION(5, "GPIO_18"),
+		BCM63XX_FUNCTION(6, "C_UART3_SOUT")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(19, "GPIO_19"),
+		BCM63XX_FUNCTION(2, "VREG_SYNC"),
+		BCM63XX_FUNCTION(4, "A_LED_19"),
+		BCM63XX_FUNCTION(5, "GPIO_19"),
+		BCM63XX_FUNCTION(6, "A_SGMII_FIBER_DETECT")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(20, "GPIO_20"),
+		BCM63XX_FUNCTION(1, "SPIS_CLK"),
+		BCM63XX_FUNCTION(2, "B_UART2_CTS"),
+		BCM63XX_FUNCTION(3, "B_UART3_SIN"),
+		BCM63XX_FUNCTION(4, "A_LED_20"),
+		BCM63XX_FUNCTION(5, "GPIO_20"),
+		BCM63XX_FUNCTION(6, "A_SGMII_SERDES_MOD_DEF0")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(21, "GPIO_21"),
+		BCM63XX_FUNCTION(1, "SPIS_SS_B"),
+		BCM63XX_FUNCTION(2, "B_UART2_RTS"),
+		BCM63XX_FUNCTION(3, "B_UART3_SOUT"),
+		BCM63XX_FUNCTION(4, "A_LED_21"),
+		BCM63XX_FUNCTION(5, "GPIO_21"),
+		BCM63XX_FUNCTION(6, "C_SGMII_FIBER_DETECT")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(22, "GPIO_22"),
+		BCM63XX_FUNCTION(1, "SPIS_MISO"),
+		BCM63XX_FUNCTION(2, "B_UART2_SOUT"),
+		BCM63XX_FUNCTION(4, "A_LED_22"),
+		BCM63XX_FUNCTION(5, "GPIO_22")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(23, "GPIO_23"),
+		BCM63XX_FUNCTION(1, "SPIS_MOSI"),
+		BCM63XX_FUNCTION(2, "B_UART2_SIN"),
+		BCM63XX_FUNCTION(4, "A_LED_23"),
+		BCM63XX_FUNCTION(5, "GPIO_23")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(24, "GPIO_24"),
+		BCM63XX_FUNCTION(2, "B_UART1_SOUT"),
+		BCM63XX_FUNCTION(3, "B_I2C_SDA_0"),
+		BCM63XX_FUNCTION(4, "A_LED_24"),
+		BCM63XX_FUNCTION(5, "GPIO_24")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(25, "GPIO_25"),
+		BCM63XX_FUNCTION(1, "B_SPIM_SS2_B"),
+		BCM63XX_FUNCTION(2, "B_UART1_SIN"),
+		BCM63XX_FUNCTION(3, "B_I2C_SCL_0"),
+		BCM63XX_FUNCTION(4, "A_LED_25"),
+		BCM63XX_FUNCTION(5, "GPIO_25")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(26, "GPIO_26"),
+		BCM63XX_FUNCTION(1, "B_SPIM_SS3_B"),
+		BCM63XX_FUNCTION(2, "A_I2C_SDA_1"),
+		BCM63XX_FUNCTION(3, "A_UART3_SIN"),
+		BCM63XX_FUNCTION(4, "A_LED_26"),
+		BCM63XX_FUNCTION(5, "GPIO_26")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(27, "GPIO_27"),
+		BCM63XX_FUNCTION(1, "B_SPIM_SS4_B"),
+		BCM63XX_FUNCTION(2, "A_I2C_SCL_1"),
+		BCM63XX_FUNCTION(3, "A_UART3_SOUT"),
+		BCM63XX_FUNCTION(4, "A_LED_27"),
+		BCM63XX_FUNCTION(5, "GPIO_27")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(28, "GPIO_28"),
+		BCM63XX_FUNCTION(1, "B_SPIM_SS5_B"),
+		BCM63XX_FUNCTION(2, "I2S_MCLK"),
+		BCM63XX_FUNCTION(4, "A_LED_28"),
+		BCM63XX_FUNCTION(5, "GPIO_28")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(29, "GPIO_29"),
+		BCM63XX_FUNCTION(1, "B_SER_LED_DATA"),
+		BCM63XX_FUNCTION(2, "I2S_LRCK"),
+		BCM63XX_FUNCTION(4, "A_LED_29"),
+		BCM63XX_FUNCTION(5, "GPIO_29")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(30, "GPIO_30"),
+		BCM63XX_FUNCTION(1, "B_SER_LED_CLK"),
+		BCM63XX_FUNCTION(2, "I2S_SCLK"),
+		BCM63XX_FUNCTION(4, "A_LED_30"),
+		BCM63XX_FUNCTION(5, "GPIO_30")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(31, "GPIO_31"),
+		BCM63XX_FUNCTION(1, "B_SER_LED_MASK"),
+		BCM63XX_FUNCTION(2, "I2S_TX_SDATA"),
+		BCM63XX_FUNCTION(4, "A_LED_31"),
+		BCM63XX_FUNCTION(5, "GPIO_31")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(32, "GPIO_32"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL0"),
+		BCM63XX_FUNCTION(5, "GPIO_32")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(33, "GPIO_33"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL_1"),
+		BCM63XX_FUNCTION(3, "B_WAN_EARLY_TXEN"),
+		BCM63XX_FUNCTION(5, "GPIO_33")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(34, "GPIO_34"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL_2"),
+		BCM63XX_FUNCTION(3, "B_ROGUE_IN"),
+		BCM63XX_FUNCTION(5, "GPIO_34")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(35, "GPIO_35"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL_3"),
+		BCM63XX_FUNCTION(3, "B_SGMII_FIBER_DETECT"),
+		BCM63XX_FUNCTION(5, "GPIO_35")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(36, "GPIO_36"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL_4"),
+		BCM63XX_FUNCTION(3, "B_SGMII_SERDES_MOD_DEF0"),
+		BCM63XX_FUNCTION(5, "GPIO_36")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(37, "GPIO_37"),
+		BCM63XX_FUNCTION(1, "B_PMD_EXT_LOS"),
+		BCM63XX_FUNCTION(2, "VDSL_CTRL_5"),
+		BCM63XX_FUNCTION(3, "B_AE_FIBER_DETECT"),
+		BCM63XX_FUNCTION(5, "GPIO_37")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(38, "GPIO_38"),
+		BCM63XX_FUNCTION(2, "B_VREG_SYNC"),
+		BCM63XX_FUNCTION(3, "B_AE_SERDES_MOD_DEF0"),
+		BCM63XX_FUNCTION(5, "GPIO_38")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(39, "GPIO_39"),
+		BCM63XX_FUNCTION(2, "A_WAN_EARLY_TXEN"),
+		BCM63XX_FUNCTION(5, "GPIO_39")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(40, "GPIO_40"),
+		BCM63XX_FUNCTION(2, "A_ROGUE_IN"),
+		BCM63XX_FUNCTION(5, "GPIO_40")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(41, "GPIO_41"),
+		BCM63XX_FUNCTION(2, "SYS_IRQ_OUT"),
+		BCM63XX_FUNCTION(3, "C_WAN_EARLY_TXEN"),
+		BCM63XX_FUNCTION(5, "GPIO_41")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(42, "GPIO_42"),
+		BCM63XX_FUNCTION(1, "PCM_SDIN"),
+		BCM63XX_FUNCTION(4, "A_UART1_SIN"),
+		BCM63XX_FUNCTION(5, "GPIO_42")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(43, "GPIO_43"),
+		BCM63XX_FUNCTION(1, "PCM_SDOUT"),
+		BCM63XX_FUNCTION(4, "A_UART1_SOUT"),
+		BCM63XX_FUNCTION(5, "GPIO_43")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(44, "GPIO_44"),
+		BCM63XX_FUNCTION(1, "PCM_CLK"),
+		BCM63XX_FUNCTION(4, "A_USBD_VBUS_PRESENT"),
+		BCM63XX_FUNCTION(5, "GPIO_44")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(45, "GPIO_45"),
+		BCM63XX_FUNCTION(1, "PCM_FS"),
+		BCM63XX_FUNCTION(4, "A_USBD_ID"),
+		BCM63XX_FUNCTION(5, "GPIO_45")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(46, "GPIO_46"),
+		BCM63XX_FUNCTION(2, "C_VREG_SYNC"),
+		BCM63XX_FUNCTION(5, "GPIO_46")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(47, "GPIO_47"),
+		BCM63XX_FUNCTION(3, "NAND_WP"),
+		BCM63XX_FUNCTION(5, "GPIO_47")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(48, "GPIO_48"),
+		BCM63XX_FUNCTION(3, "NAND_CE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_48")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(49, "GPIO_49"),
+		BCM63XX_FUNCTION(3, "NAND_RE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_49")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(50, "GPIO_50"),
+		BCM63XX_FUNCTION(3, "NAND_RB_B"),
+		BCM63XX_FUNCTION(5, "GPIO_50")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(51, "GPIO_51"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_0"),
+		BCM63XX_FUNCTION(5, "GPIO_51")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(52, "GPIO_52"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_1"),
+		BCM63XX_FUNCTION(5, "GPIO_52")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(53, "GPIO_53"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_2"),
+		BCM63XX_FUNCTION(5, "GPIO_53")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(54, "GPIO_54"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_3"),
+		BCM63XX_FUNCTION(5, "GPIO_54")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(55, "GPIO_55"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_4"),
+		BCM63XX_FUNCTION(5, "GPIO_55")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(56, "GPIO_56"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_5"),
+		BCM63XX_FUNCTION(5, "GPIO_56")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(57, "GPIO_57"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_6"),
+		BCM63XX_FUNCTION(5, "GPIO_57")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(58, "GPIO_58"),
+		BCM63XX_FUNCTION(3, "NAND_DATA_7"),
+		BCM63XX_FUNCTION(5, "GPIO_58")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(59, "GPIO_59"),
+		BCM63XX_FUNCTION(3, "NAND_ALE"),
+		BCM63XX_FUNCTION(5, "GPIO_59")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(60, "GPIO_60"),
+		BCM63XX_FUNCTION(3, "NAND_WE_B"),
+		BCM63XX_FUNCTION(5, "GPIO_60")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(61, "GPIO_61"),
+		BCM63XX_FUNCTION(3, "NAND_CLE"),
+		BCM63XX_FUNCTION(5, "GPIO_61")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(62, "GPIO_62"),
+		BCM63XX_FUNCTION(2, "NAND_CE2_B"),
+		BCM63XX_FUNCTION(3, "EMMC_CLK"),
+		BCM63XX_FUNCTION(5, "GPIO_62")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(63, "GPIO_63"),
+		BCM63XX_FUNCTION(2, "NAND_CE1_B"),
+		BCM63XX_FUNCTION(3, "EMMC_CMD"),
+		BCM63XX_FUNCTION(5, "GPIO_63")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(64, "GPIO_64"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_64"),
+		BCM63XX_FUNCTION(6, "B_LED_00")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(65, "GPIO_65"),
+		BCM63XX_FUNCTION(5, "GPIO_65"),
+		BCM63XX_FUNCTION(6, "B_LED_01")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(66, "GPIO_66"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_66"),
+		BCM63XX_FUNCTION(6, "B_LED_02")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(67, "GPIO_67"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_67"),
+		BCM63XX_FUNCTION(6, "B_LED_03")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(68, "GPIO_68"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_68"),
+		BCM63XX_FUNCTION(6, "B_LED_04")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(69, "GPIO_69"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_69"),
+		BCM63XX_FUNCTION(6, "B_LED_05")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(70, "GPIO_70"),
+		BCM63XX_FUNCTION(1, "RGMII0_RXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_70"),
+		BCM63XX_FUNCTION(6, "B_LED_06")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(71, "GPIO_71"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_71"),
+		BCM63XX_FUNCTION(6, "B_LED_07")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(72, "GPIO_72"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_72"),
+		BCM63XX_FUNCTION(6, "B_LED_08")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(73, "GPIO_73"),
+		BCM63XX_FUNCTION(5, "GPIO_73"),
+		BCM63XX_FUNCTION(6, "B_LED_09")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(74, "GPIO_74"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_74"),
+		BCM63XX_FUNCTION(6, "B_LED_10")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(75, "GPIO_75"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_75"),
+		BCM63XX_FUNCTION(6, "B_LED_11")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(76, "GPIO_76"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_76"),
+		BCM63XX_FUNCTION(6, "B_LED_12")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(77, "GPIO_77"),
+		BCM63XX_FUNCTION(1, "RGMII0_TXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_77"),
+		BCM63XX_FUNCTION(6, "B_LED_13")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(78, "GPIO_78"),
+		BCM63XX_FUNCTION(5, "GPIO_78"),
+		BCM63XX_FUNCTION(6, "B_LED_14")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(79, "GPIO_79"),
+		BCM63XX_FUNCTION(5, "GPIO_79"),
+		BCM63XX_FUNCTION(6, "B_LED_15")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(80, "GPIO_80"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_80"),
+		BCM63XX_FUNCTION(6, "B_LED_16")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(81, "GPIO_81"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_81"),
+		BCM63XX_FUNCTION(6, "B_LED_17")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(82, "GPIO_82"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_82"),
+		BCM63XX_FUNCTION(6, "B_LED_18")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(83, "GPIO_83"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_83"),
+		BCM63XX_FUNCTION(6, "B_LED_19")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(84, "GPIO_84"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_84"),
+		BCM63XX_FUNCTION(6, "B_LED_20")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(85, "GPIO_85"),
+		BCM63XX_FUNCTION(1, "RGMII1_RXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_85"),
+		BCM63XX_FUNCTION(6, "B_LED_21")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(86, "GPIO_86"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_86"),
+		BCM63XX_FUNCTION(6, "B_LED_22")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(87, "GPIO_87"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_87"),
+		BCM63XX_FUNCTION(6, "B_LED_23")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(88, "GPIO_88"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_88"),
+		BCM63XX_FUNCTION(6, "B_LED_24")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(89, "GPIO_89"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_89"),
+		BCM63XX_FUNCTION(6, "B_LED_25")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(90, "GPIO_90"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_90"),
+		BCM63XX_FUNCTION(6, "B_LED_26")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(91, "GPIO_91"),
+		BCM63XX_FUNCTION(1, "RGMII1_TXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_91"),
+		BCM63XX_FUNCTION(6, "B_LED_27")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(92, "GPIO_92"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_92"),
+		BCM63XX_FUNCTION(6, "B_LED_28")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(93, "GPIO_93"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_93"),
+		BCM63XX_FUNCTION(6, "B_LED_29")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(94, "GPIO_94"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_94"),
+		BCM63XX_FUNCTION(6, "B_LED_30")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(95, "GPIO_95"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_95"),
+		BCM63XX_FUNCTION(6, "B_LED_31")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(96, "GPIO_96"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_96")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(97, "GPIO_97"),
+		BCM63XX_FUNCTION(1, "RGMII2_RXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_97")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(98, "GPIO_98"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXCLK"),
+		BCM63XX_FUNCTION(5, "GPIO_98")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(99, "GPIO_99"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXCTL"),
+		BCM63XX_FUNCTION(5, "GPIO_99")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(100, "GPIO_100"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_0"),
+		BCM63XX_FUNCTION(5, "GPIO_100")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(101, "GPIO_101"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_1"),
+		BCM63XX_FUNCTION(5, "GPIO_101")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(102, "GPIO_102"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_2"),
+		BCM63XX_FUNCTION(5, "GPIO_102")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(103, "GPIO_103"),
+		BCM63XX_FUNCTION(1, "RGMII2_TXD_3"),
+		BCM63XX_FUNCTION(5, "GPIO_103")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(104, "GPIO_104"),
+		BCM63XX_FUNCTION(1, "RGMII_MDC"),
+		BCM63XX_FUNCTION(5, "GPIO_104")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(105, "GPIO_105"),
+		BCM63XX_FUNCTION(1, "RGMII_MDIO"),
+		BCM63XX_FUNCTION(5, "GPIO_105")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(106, "GPIO_106"),
+		BCM63XX_FUNCTION(1, "UART0_SDIN"),
+		BCM63XX_FUNCTION(5, "GPIO_106")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(107, "GPIO_107"),
+		BCM63XX_FUNCTION(1, "UART0_SDOUT"),
+		BCM63XX_FUNCTION(5, "GPIO_107")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(108, "GPIO_108"),
+		BCM63XX_FUNCTION(0, "SPIM_CLK"),
+		BCM63XX_FUNCTION(5, "GPIO_108")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(109, "GPIO_109"),
+		BCM63XX_FUNCTION(0, "SPIM_MOSI"),
+		BCM63XX_FUNCTION(5, "GPIO_109")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(110, "GPIO_110"),
+		BCM63XX_FUNCTION(0, "SPIM_MISO"),
+		BCM63XX_FUNCTION(5, "GPIO_110")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(111, "GPIO_111"),
+		BCM63XX_FUNCTION(0, "SPIM_SS0_B"),
+		BCM63XX_FUNCTION(5, "GPIO_111")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(112, "GPIO_112"),
+		BCM63XX_FUNCTION(0, "SPIM_SS1_B"),
+		BCM63XX_FUNCTION(5, "GPIO_112")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(113, "GPIO_113"),
+		BCM63XX_FUNCTION(1, "PCIE0a_CLKREQ_B"),
+		BCM63XX_FUNCTION(2, "PCIE2b_CLKREQ_B"),
+		BCM63XX_FUNCTION(3, "PCIE1c_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_113")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(114, "GPIO_114"),
+		BCM63XX_FUNCTION(1, "PCIE0a_RST_B"),
+		BCM63XX_FUNCTION(2, "PCIE2b_RST_B"),
+		BCM63XX_FUNCTION(3, "PCIE1c_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_114")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(115, "GPIO_115"),
+		BCM63XX_FUNCTION(1, "PCIE1a_CLKREQ_B"),
+		BCM63XX_FUNCTION(2, "PCIE0b_CLKREQ_B"),
+		BCM63XX_FUNCTION(3, "PCIE2c_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_115")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(116, "GPIO_116"),
+		BCM63XX_FUNCTION(1, "PCIE1a_RST_B"),
+		BCM63XX_FUNCTION(2, "PCIE0b_RST_B"),
+		BCM63XX_FUNCTION(3, "PCIE2c_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_116")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(117, "GPIO_117"),
+		BCM63XX_FUNCTION(1, "PCIE2a_CLKREQ_B"),
+		BCM63XX_FUNCTION(2, "PCIE1b_CLKREQ_B"),
+		BCM63XX_FUNCTION(3, "PCIE0c_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_117")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(118, "GPIO_118"),
+		BCM63XX_FUNCTION(1, "PCIE2a_RST_B"),
+		BCM63XX_FUNCTION(2, "PCIE1b_RST_B"),
+		BCM63XX_FUNCTION(3, "PCIE0c_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_118")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(119, "GPIO_119"),
+		BCM63XX_FUNCTION(1, "PCIE3_CLKREQ_B"),
+		BCM63XX_FUNCTION(5, "GPIO_119")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(120, "GPIO_120"),
+		BCM63XX_FUNCTION(0, "PCIE3_RST_B"),
+		BCM63XX_FUNCTION(5, "GPIO_120")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(121, "GPIO_121"),
+		BCM63XX_FUNCTION(1, "USB0a_PWRFLT"),
+		BCM63XX_FUNCTION(2, "USB1b_PWRFLT"),
+		BCM63XX_FUNCTION(5, "GPIO_121")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(122, "GPIO_122"),
+		BCM63XX_FUNCTION(1, "USB0a_PWRON"),
+		BCM63XX_FUNCTION(2, "USB1b_PWRON"),
+		BCM63XX_FUNCTION(5, "GPIO_122")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(123, "GPIO_123"),
+		BCM63XX_FUNCTION(1, "USB1a_PWRFLT"),
+		BCM63XX_FUNCTION(2, "USB0b_PWRFLT"),
+		BCM63XX_FUNCTION(5, "GPIO_123")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(124, "GPIO_124"),
+		BCM63XX_FUNCTION(1, "USB1a_PWRON"),
+		BCM63XX_FUNCTION(2, "USB0b_PWRON"),
+		BCM63XX_FUNCTION(5, "GPIO_124")
+	),
+	BCM63XX_PIN(
+		PINCTRL_PIN(125, "GPIO_125"),
+		BCM63XX_FUNCTION(0, "RESET_OUT_B"),
+		BCM63XX_FUNCTION(5, "GPIO_125")
+	),
+};
diff -Nruw linux-6.13.12-fbx/drivers/pinctrl/cortina./Makefile linux-6.13.12-fbx/drivers/pinctrl/cortina/Makefile
--- linux-6.13.12-fbx/drivers/pinctrl/cortina./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/pinctrl/cortina/Makefile	2025-09-25 17:40:34.603362446 +0200
@@ -0,0 +1,5 @@
+obj-y += pinctrl-cortina.o
+
+# pins definitions
+obj-y += 	pinctrl-ca8271-pins.o \
+			pinctrl-ca8289-pins.o
diff -Nruw linux-6.13.12-fbx/drivers/platform/fbxgw7r./Kconfig linux-6.13.12-fbx/drivers/platform/fbxgw7r/Kconfig
--- linux-6.13.12-fbx/drivers/platform/fbxgw7r./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/fbxgw7r/Kconfig	2025-09-25 17:40:34.671362783 +0200
@@ -0,0 +1,6 @@
+config FBXGW7R_PLATFORM
+	bool "Freebox Gateway V7 specific drivers"
+
+config FBXGW7R_SWITCH
+	bool "Freebox Gateway V7 in kernel switch init code."
+	depends on FBXGW7R_PLATFORM
diff -Nruw linux-6.13.12-fbx/drivers/platform/fbxgw7r./Makefile linux-6.13.12-fbx/drivers/platform/fbxgw7r/Makefile
--- linux-6.13.12-fbx/drivers/platform/fbxgw7r./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/fbxgw7r/Makefile	2025-09-25 17:40:34.671362783 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_FBXGW7R_SWITCH)	+= fbxgw7r-switch.o
diff -Nruw linux-6.13.12-fbx/drivers/platform/intelce./Kconfig linux-6.13.12-fbx/drivers/platform/intelce/Kconfig
--- linux-6.13.12-fbx/drivers/platform/intelce./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/intelce/Kconfig	2025-09-25 17:40:34.675362803 +0200
@@ -0,0 +1,18 @@
+#
+# IntelCE devices configuration
+#
+
+menu "IntelCE devices"
+
+config INTELCE_GPIO
+	tristate "GPIO support"
+	select ARCH_REQUIRE_GPIOLIB
+	help
+	  IntelCE 3100/4100 GPIO support.
+
+config INTELCE_DFX
+	tristate "DFX reporting support"
+	help
+	  IntelCE 3100/4100 DFX fuse reporting support.
+
+endmenu
diff -Nruw linux-6.13.12-fbx/drivers/platform/intelce./Makefile linux-6.13.12-fbx/drivers/platform/intelce/Makefile
--- linux-6.13.12-fbx/drivers/platform/intelce./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/intelce/Makefile	2025-09-25 17:40:34.675362803 +0200
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTELCE_GPIO)	+= gpio-intelce.o
+obj-$(CONFIG_INTELCE_DFX)	+= dfx.o
diff -Nruw linux-6.13.12-fbx/drivers/platform/ipq./Kconfig linux-6.13.12-fbx/drivers/platform/ipq/Kconfig
--- linux-6.13.12-fbx/drivers/platform/ipq./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/ipq/Kconfig	2025-09-25 17:40:34.675362803 +0200
@@ -0,0 +1,19 @@
+
+menuconfig QCOM_IPQ_PLATFORM
+	bool "Qualcomm IPQ Platform Specific Device Drivers"
+	default y
+	depends on ARCH_QCOM || COMPILE_TEST
+	help
+	  Say Y here to get to see options for device drivers for
+	  various Qualcomm IPQ platforms.  This option alone does not
+	  add any kernel code.
+
+	  If you say N, all options in this submenu will be skipped
+	  and disabled.
+
+if QCOM_IPQ_PLATFORM
+
+config IPQ_SEC_UPGRADE
+	bool "Qualcomm IPQ sec-upgrade driver."
+
+endif
diff -Nruw linux-6.13.12-fbx/drivers/platform/ipq./Makefile linux-6.13.12-fbx/drivers/platform/ipq/Makefile
--- linux-6.13.12-fbx/drivers/platform/ipq./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/platform/ipq/Makefile	2025-09-25 17:40:34.675362803 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_IPQ_SEC_UPGRADE)	+= sec-upgrade.o
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./Kconfig linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/Kconfig
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/Kconfig	2025-09-25 17:40:35.059364707 +0200
@@ -0,0 +1,29 @@
+# SPDX-License-Identifier: GPL-2.0-only
+if SOC_BCM63XX
+
+config SOC_BCM63XX_RDP
+	bool "rdp subsystem"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config SOC_BCM63XX_XRDP
+	tristate "xrdp subsystem"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+	select UBUS4_BCM63158
+
+config SOC_BCM63XX_XRDP_IOCTL
+	bool "ioctl interface"
+	depends on SOC_BCM63XX_XRDP
+
+config UBUS4_BCM63158
+	bool "Broadcom 63158 UBUS4 driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config SOC_MEMC_BCM63158
+	tristate "Broadcom 63158 MEMC driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+config PROCMON_BCM63158
+	bool "Broadcom 63158 PROCMON driver"
+	depends on ARCH_BCMBCA || COMPILE_TEST
+
+endif # SOC_BCM63XX
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./Makefile linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/Makefile
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/Makefile	2025-09-25 17:40:35.059364707 +0200
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ARCH_BCMBCA) += pmc.o
+obj-$(CONFIG_SOC_BCM63XX_RDP) += rdp/
+obj-$(CONFIG_SOC_BCM63XX_XRDP) += xrdp/
+obj-$(CONFIG_UBUS4_BCM63158)	+= ubus4-bcm63158.o
+obj-$(CONFIG_PROCMON_BCM63158)	+= procmon-bcm63158.o
+obj-$(CONFIG_SOC_MEMC_BCM63158)	+= memc-bcm63158.o
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./bpcm_defs.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/bpcm_defs.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./bpcm_defs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/bpcm_defs.h	2025-09-25 17:40:35.063364727 +0200
@@ -0,0 +1,76 @@
+#ifndef BPCM_DEFS_H_
+#define BPCM_DEFS_H_
+
+/*
+ * BPCM (Block Power Control Module) registers definitions
+ */
+#define _F(value, shift, mask)			(((value) & mask) << shift)
+#define BPCM_SR_CONTROL_REG			0x28
+
+#define BPCM_ZONE_REG(z)			(0x40 + 0x10 * (z))
+#define  BPCM_ZONE_MANUAL_CLK_EN		(1 << 0)
+#define  BPCM_ZONE_MANUAL_RESET_CTL		(1 << 1)
+#define  BPCM_ZONE_FREQ_SCALE_USED		(1 << 2)
+#define  BPCM_ZONE_DPG_CAPABLE			(1 << 3)
+#define  BPCM_ZONE_MANUAL_MEM_PWR(x)		_F(x, 4, 0x3)
+#define  BPCM_ZONE_MANUAL_MEM_PWR_MASK		_F(0x3, 4, 0x3)
+#define  BPCM_ZONE_MANUAL_ISO_CTL		(1 << 6)
+#define  BPCM_ZONE_MANUAL_CTL			(1 << 7)
+#define  BPCM_ZONE_DPG_CTL_EN			(1 << 8)
+#define  BPCM_ZONE_PWR_DN_REQ			(1 << 9)
+#define  BPCM_ZONE_PWR_UP_REQ			(1 << 10)
+#define  BPCM_ZONE_MEM_PWR_CTL_EN		(1 << 11)
+#define  BPCM_ZONE_BLK_RESET_ASSERT		(1 << 12)
+#define  BPCM_ZONE_PWR_CNTL_STATE(x)		_F(x, 19, 0x1f)
+#define  BPCM_ZONE_PWR_CNTL_STATE_MASK		_F(0x1f, 19, 0x1f)
+#define  BPCM_ZONE_FREQ_SCALAR_DYN_SEL		(1 << 24)
+#define  BPCM_ZONE_PWR_OFF_STATE		(1 << 25)
+#define  BPCM_ZONE_PWR_ON_STATE			(1 << 26)
+#define  BPCM_ZONE_PWR_GOOD			(1 << 27)
+#define  BPCM_ZONE_DPG_PWR_STATE		(1 << 28)
+#define  BPCM_ZONE_MEM_PWR_STATE		(1 << 29)
+#define  BPCM_ZONE_ISO_STATE			(1 << 30)
+#define  BPCM_ZONE_RESET_STATE			(1 << 31)
+
+#define BPCM_ARM_CONTROL_REG		0x30
+#define BPCM_ARM_PWR_CONTROL_BASE_REG	0x34
+#define BPCM_ARM_PWR_CONTROL_REG(x)	(BPCM_ARM_PWR_CONTROL_BASE_REG + (x) * 0x4)
+#define BPCM_ARM_NEON_L2_REG		0x3c
+
+
+/* ARM Control register definitions */
+#define CORE_PWR_CTRL_SHIFT	0
+#define CORE_PWR_CTRL_MASK	0x3
+#define PLL_PWR_ON		BIT(8)
+#define PLL_LDO_PWR_ON		BIT(9)
+#define PLL_CLAMP_ON		BIT(10)
+#define CPU_RESET_N(x)		BIT(13 + (x))
+#define NEON_RESET_N		BIT(15)
+#define PWR_CTRL_STATUS_SHIFT	28
+#define PWR_CTRL_STATUS_MASK	0x3
+#define PWR_DOWN_SHIFT		30
+#define PWR_DOWN_MASK		0x3
+
+/* CPU Power control register definitions */
+#define MEM_PWR_OK		BIT(0)
+#define MEM_PWR_ON		BIT(1)
+#define MEM_CLAMP_ON		BIT(2)
+#define MEM_PWR_OK_STATUS	BIT(4)
+#define MEM_PWR_ON_STATUS	BIT(5)
+#define MEM_PDA_SHIFT		8
+#define MEM_PDA_MASK		0xf
+#define  MEM_PDA_CPU_MASK	0x1
+#define  MEM_PDA_NEON_MASK	0xf
+#define CLAMP_ON		BIT(15)
+#define PWR_OK_SHIFT		16
+#define PWR_OK_MASK		0xf
+#define PWR_ON_SHIFT		20
+#define  PWR_CPU_MASK		0x03
+#define  PWR_NEON_MASK		0x01
+#define PWR_ON_MASK		0xf
+#define PWR_OK_STATUS_SHIFT	24
+#define PWR_OK_STATUS_MASK	0xf
+#define PWR_ON_STATUS_SHIFT	28
+#define PWR_ON_STATUS_MASK	0xf
+
+#endif /* ! BPCM_DEFS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./memc-bcm63158.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/memc-bcm63158.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./memc-bcm63158.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/memc-bcm63158.c	2025-09-25 17:40:35.063364727 +0200
@@ -0,0 +1,304 @@
+/*
+ * memc-bcm63158.c for memc-bcm63158
+ * Created by <nschichan@freebox.fr> on Mon Apr 20 18:06:35 2020
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/debugfs.h>
+
+#define MEMC_IDLE_PAD_CONTROL		0x20034
+#define MEMC_IDLE_PAD_EN0		0x20038
+#define MEMC_IDLE_PAD_EN1		0x2003c
+
+#define MEMC_CLK_MHZ			533
+#define MEMC_IDLE_THRESH_USEC		20
+
+#define MEMC_AUTO_SR_REG		0x0025c
+#define  MEMC_AUTO_SR_EN		(1 << 31)
+#define  MEMC_AUTO_SR_THRESH_MASK	0x7fffffff
+#define  MEMC_AUTO_SR_THRESH_VAL(v)	((v) & MEMC_AUTO_SR_THRESH_MASK)
+
+#define MEMC_GLOBAL_CONFIG_REG		0x00004
+#define  MEMC_GLOBAL_CONFIG_SLOW_CLK_EN	(1 << 26)
+
+struct bcm63158_memc_priv {
+	void __iomem *regs;
+	struct resource regs_res;
+	struct platform_device *pdev;
+
+	u32 sr_idle_thresh;
+	bool auto_sr_en;
+
+	struct mutex mutex;
+
+	struct dentry *debugfs_dir;
+};
+
+static inline u32 memc_readl(struct bcm63158_memc_priv *priv, u32 off)
+{
+	u32 ret;
+
+	ret = readl(priv->regs + off);
+	dev_dbg(&priv->pdev->dev, "memc_readl(): @%08llx: %08x",
+		priv->regs_res.start + off, ret);
+	return ret;
+}
+
+static inline void memc_writel(u32 v, struct bcm63158_memc_priv *priv, u32 off)
+{
+	dev_dbg(&priv->pdev->dev, "memc_writel(): @%08llx: %08x",
+		priv->regs_res.start + off, v);
+	writel(v, priv->regs + off);
+}
+
+/*
+ * same init as in the refsw.
+ */
+static void bcm63158_memc_idle_pad_init(struct bcm63158_memc_priv *priv)
+{
+	mutex_lock(&priv->mutex);
+
+	memc_writel(0xe, priv, MEMC_IDLE_PAD_CONTROL);
+	memc_writel(0x6df, priv, MEMC_IDLE_PAD_EN0);
+	memc_writel(0x3fffff, priv, MEMC_IDLE_PAD_EN1);
+
+	mutex_unlock(&priv->mutex);
+}
+
+/*
+ * set idle time in usec after which the MEMC will go automatically in
+ * self-refresh mode.
+ */
+static int bcm63158_memc_auto_sr_thresh_set(struct bcm63158_memc_priv *priv,
+					    u32 v)
+{
+	u32 reg;
+
+	v *= MEMC_CLK_MHZ;
+	if ((v & MEMC_AUTO_SR_THRESH_MASK) != v)
+		return -EINVAL;
+
+	mutex_lock(&priv->mutex);
+
+	reg = memc_readl(priv, MEMC_AUTO_SR_REG);
+	reg &= ~MEMC_AUTO_SR_THRESH_MASK;
+	reg |= MEMC_AUTO_SR_THRESH_VAL(v);
+	memc_writel(reg, priv, MEMC_AUTO_SR_REG);
+
+	mutex_unlock(&priv->mutex);
+	return 0;
+}
+
+/*
+ * enable or disable automatic self refresh
+ */
+static void bcm63158_memc_auto_sr_enable(struct bcm63158_memc_priv *priv,
+					bool en)
+{
+	u32 reg;
+
+	mutex_lock(&priv->mutex);
+
+	reg = memc_readl(priv, MEMC_AUTO_SR_REG);
+	if (en)
+		reg |= MEMC_AUTO_SR_EN;
+	else
+		reg &= ~MEMC_AUTO_SR_EN;
+	memc_writel(reg, priv, MEMC_AUTO_SR_REG);
+
+	mutex_unlock(&priv->mutex);
+}
+
+/*
+ * same as refsw.
+ */
+static void bcm63158_memc_enable_slow_clock(struct bcm63158_memc_priv *priv)
+{
+	u32 reg;
+
+	mutex_lock(&priv->mutex);
+
+	reg = memc_readl(priv, MEMC_GLOBAL_CONFIG_REG);
+	reg |= MEMC_GLOBAL_CONFIG_SLOW_CLK_EN;
+	memc_writel(reg, priv, MEMC_GLOBAL_CONFIG_REG);
+
+	mutex_unlock(&priv->mutex);
+}
+
+/*
+ * auto-sr-en debugfs stuff
+ */
+static int bcm63158_memc_autosr_en_read(void *_priv, u64 *val)
+{
+	struct bcm63158_memc_priv *priv = _priv;
+
+	*val =  !!(memc_readl(priv, MEMC_AUTO_SR_REG) & MEMC_AUTO_SR_EN);
+	return 0;
+}
+
+static int bcm63158_memc_autosr_en_write(void *_priv, u64 val)
+{
+	struct bcm63158_memc_priv *priv = _priv;
+
+	bcm63158_memc_auto_sr_enable(priv, !!val);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(memc_autosr_en_fops, bcm63158_memc_autosr_en_read,
+			bcm63158_memc_autosr_en_write, "%llu\n");
+
+/*
+ * auto-sr-thresh debugfs stuff
+ */
+static int bcm63158_memc_autosr_thresh_read(void *_priv, u64 *val)
+{
+	struct bcm63158_memc_priv *priv = _priv;
+
+	*val =  (memc_readl(priv, MEMC_AUTO_SR_REG) & MEMC_AUTO_SR_THRESH_MASK)
+		/ MEMC_CLK_MHZ;
+	return 0;
+}
+
+static int bcm63158_memc_autosr_thresh_write(void *_priv, u64 val)
+{
+	struct bcm63158_memc_priv *priv = _priv;
+
+	return bcm63158_memc_auto_sr_thresh_set(priv, val);
+}
+
+
+DEFINE_SIMPLE_ATTRIBUTE(memc_autosr_thresh_fops,
+			bcm63158_memc_autosr_thresh_read,
+			bcm63158_memc_autosr_thresh_write, "%llu\n");
+
+/*
+ * create debugfs entries, always report success, as debugfs support
+ * may not be compiled in.
+ */
+static int bcm63158_memc_create_debugfs(struct bcm63158_memc_priv *priv)
+{
+	priv->debugfs_dir = debugfs_create_dir("bcm63158-memc", NULL);
+	if (IS_ERR(priv->debugfs_dir)) {
+		dev_warn(&priv->pdev->dev, "unable to create debugfs "
+			 "directory");
+		priv->debugfs_dir = NULL;
+		return 0;
+	}
+
+	debugfs_create_file("auto-sr-en", S_IWUSR | S_IRUSR,
+			    priv->debugfs_dir, priv,
+			    &memc_autosr_en_fops);
+	debugfs_create_file("auto-sr-thresh", S_IWUSR | S_IRUSR,
+			    priv->debugfs_dir, priv,
+			    &memc_autosr_thresh_fops);
+
+	return 0;
+}
+
+/*
+ * remove debugfs entries
+ */
+static int bcm63158_memc_remove_debugfs(struct bcm63158_memc_priv *priv)
+{
+	if (priv->debugfs_dir)
+		debugfs_remove_recursive(priv->debugfs_dir);
+
+	return 0;
+}
+
+
+static int bcm63158_memc_probe(struct platform_device *pdev)
+{
+	struct resource *res;
+	struct bcm63158_memc_priv *priv;
+
+	dev_dbg(&pdev->dev, "probe");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(&pdev->dev, "unable to get resource for MEMC "
+			"registers.");
+		return -ENXIO;
+	}
+	priv->regs_res = *res;
+
+	priv->regs = devm_ioremap_resource(&pdev->dev, &priv->regs_res);
+	if (IS_ERR(priv->regs)) {
+		dev_err(&pdev->dev, "unable to ioremap MEMC registers");
+		return PTR_ERR(priv->regs);
+	}
+
+	mutex_init(&priv->mutex);
+	priv->pdev = pdev;
+
+	/*
+	 * default values, possibly overridden with OF
+	 */
+	priv->sr_idle_thresh = MEMC_IDLE_THRESH_USEC;
+	priv->auto_sr_en = false;
+
+	of_property_read_u32(pdev->dev.of_node, "brcm,auto-sr-thresh",
+			     &priv->sr_idle_thresh);
+	priv->auto_sr_en = of_property_read_bool(pdev->dev.of_node,
+						 "brcm,auto-sr-en");
+
+	dev_info(&pdev->dev, "auto-sr-thresh: %d", priv->sr_idle_thresh);
+	dev_info(&pdev->dev, "auto-sr-en: %d", priv->auto_sr_en);
+
+	/*
+	 * init
+	 */
+	bcm63158_memc_idle_pad_init(priv);
+	bcm63158_memc_enable_slow_clock(priv);
+
+	/*
+	 * set default values or OF values.
+	 */
+	bcm63158_memc_auto_sr_thresh_set(priv, priv->sr_idle_thresh);
+	bcm63158_memc_auto_sr_enable(priv, priv->auto_sr_en);
+
+	/*
+	 * create debugfs attributes
+	 */
+	bcm63158_memc_create_debugfs(priv);
+	dev_set_drvdata(&pdev->dev, priv);
+
+	return 0;
+}
+
+static void bcm63158_memc_remove(struct platform_device *pdev)
+{
+	struct bcm63158_memc_priv *priv = dev_get_drvdata(&pdev->dev);
+
+	dev_dbg(&pdev->dev, "remove");
+	bcm63158_memc_remove_debugfs(priv);
+}
+
+static const struct of_device_id bcm63158_memc_of_match[] = {
+	{ .compatible = "brcm,bcm63158-memc" },
+	{},
+};
+
+static struct platform_driver bcm63158_memc_driver = {
+	.probe = bcm63158_memc_probe,
+	.remove = bcm63158_memc_remove,
+	.driver = {
+		.name	= "bcm63158-memc",
+		.owner = THIS_MODULE,
+		.of_match_table = bcm63158_memc_of_match,
+	}
+};
+
+module_platform_driver(bcm63158_memc_driver);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom BCM63158 SoC MEMC driver.");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./pmc.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/pmc.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./pmc.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/pmc.c	2025-09-25 17:40:35.063364727 +0200
@@ -0,0 +1,2242 @@
+/*
+ * drivers/soc/bcm63xx/pmc.c
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "bcm63xx-pmc: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/pmc-bcm63xx.h>
+#include <linux/debugfs.h>
+
+#include <dt-bindings/reset/brcm,bcm63xx-pmc.h>
+#include <soc/bcm63xx/pmc.h>
+
+#include "bpcm_defs.h"
+
+#define VERBOSE_PMC
+
+/*
+ * PMC registers offsets
+ */
+#define PMC_CTRL_REG			(0x1000)
+#define PMC_HOST_MBOX_IN_REG		(0x1028)
+#define PMC_HOST_MBOX_OUT_REG		(0x102c)
+#define PMC_ADDR2_WIN_MASK_REG		(0x1078)
+#define PMC_ADDR2_WIN_BASEIN_REG	(0x107c)
+#define PMC_ADDR2_WIN_BASEOUT_REG	(0x1080)
+#define PMC_TIM2CTRL_REG		(0x10b4)
+#define PMC_TIM2CNT_REG			(0x10b8)
+
+#define PMC_DQM_REQ_REG(__x)		(0x1c00 + 4 * __x)
+#define PMC_DQM_RPL_REG(__x)		(0x1c10 + 4 * __x)
+#define PMC_QSTATUS			(0x1f00)
+#define PMC_DQM_STS_REG			(0x1820)
+#define PMC_DQM_STS_NOTEMPTY		(1 << 1)
+
+enum {
+	PMC_RSTATE_EXECUTING_BOOTROM,
+        PMC_RSTATE_WAITING_BMU_COMPLETE,
+        PMC_RSTATE_AVS_COMPLETE_WAITING_FOR_IMAGE,
+        PMC_RSTATE_AUTHENTICATING_IMAGE,
+        PMC_RSTATE_AUTHENTICATION_FAILED,
+        PMC_RSTATE_RESERVED,
+        PMC_RSTATE_FATAL_ERROR,
+        PMC_RSTATE_RUNNING
+};
+
+/*
+ * list of known PMB devices & their respective "addresses"
+ */
+#define PMB_BUS_ID_SHIFT	8
+
+enum pmc_bus_id {
+	PMB_BUS_SF2,
+	PMB_BUS_SAR,
+	PMB_BUS_AIP,
+	PMB_BUS_AFEPLL,
+	PMB_BUS_RDP,
+	PMB_BUS_RDPPLL,
+	PMB_BUS_USB30_2X,
+};
+
+struct pmc_addr_info {
+	bool		valid;
+	unsigned int	dev;
+	unsigned int	bus_id;
+};
+
+static const struct pmc_addr_info bcm63138_pmc_addr_info[PMB_ADDR_LAST] = {
+	[PMB_ADDR_SF2]		= { true, 1, 1 },
+	[PMB_ADDR_AIP]		= { true, 4, 0 },
+	[PMB_ADDR_SAR]		= { true, 6, 1 },
+	[PMB_ADDR_RDP]		= { true, 7, 1 },
+	[PMB_ADDR_RDPPLL]	= { true, 11, 1 },
+	[PMB_ADDR_USB30_2X]	= { true, 17, 1 },
+	[PMB_ADDR_VDSL3_MIPS]	= { true, 21, 0 },
+	[PMB_ADDR_VDSL3_CORE]	= { true, 22, 0 },
+	[PMB_ADDR_VDSL3_CORE]	= { true, 22, 0 },
+	[PMB_ADDR_AFEPLL]	= { true, 23, 0 },
+};
+
+static const struct pmc_addr_info bcm63158_pmc_addr_info[PMB_ADDR_LAST] = {
+	[PMB_ADDR_SYSTEMPORT]	= { true, 0, 0 },
+	[PMB_ADDR_SF2]		= { true, 0, 1 },
+	[PMB_ADDR_PCIE0]	= { true, 8, 0 },
+	[PMB_ADDR_PCIE1]	= { true, 9, 0 },
+	[PMB_ADDR_PCIE2]	= { true, 10, 0 },
+	[PMB_ADDR_PCIE3]	= { true, 12, 1 },
+	[PMB_ADDR_USB30_2X]	= { true, 13, 1 },
+	[PMB_ADDR_WAN]		= { true, 15, 1 },
+	[PMB_ADDR_XRDP]		= { true, 16, 1 },
+	[PMB_ADDR_XRDP_RC0]	= { true, 17, 1 },
+	[PMB_ADDR_XRDP_RC1]	= { true, 18, 1 },
+	[PMB_ADDR_XRDP_RC2]	= { true, 19, 1 },
+	[PMB_ADDR_XRDP_RC3]	= { true, 20, 1 },
+	[PMB_ADDR_XRDP_RC4]	= { true, 21, 1 },
+	[PMB_ADDR_XRDP_RC5]	= { true, 22, 1 },
+	[PMB_ADDR_VDSL3_CORE]	= { true, 23, 0 },
+	[PMB_ADDR_VDSL3_PMB]	= { true, 24, 0 },
+	[PMB_ADDR_AFEPLL]	= { true, 26, 0 },
+	[PMB_ADDR_BIU_PLL]	= { true, 38, 0 },
+};
+
+/*
+ * list of known PMC commands
+ */
+enum {
+	PMC_CMD_RESERVED = 0,
+	PMC_CMD_GET_DEV_PRESENCE,
+	PMC_CMD_GET_SW_STRAP,
+	PMC_CMD_GET_HW_REV,
+	PMC_CMD_GET_NUM_ZONES,
+	PMC_CMD_PING,
+	PMC_CMD_GET_NEXT_LOG_ENTRY,
+	PMC_CMD_GET_RMON_AND_SIGMA,
+	PMC_CMD_SET_CLOCK_HIGH_GEAR,
+	PMC_CMD_SET_CLOCK_LOW_GEAR,
+	PMC_CMD_SET_CLOCK_GEAR,
+
+	PMC_CMD_READ_BPCM_REG,
+	PMC_CMD_READ_ZONE_REG,
+	PMC_CMD_WRITE_BPCM_REG,
+	PMC_CMD_WRITE_ZONE_REG,
+	PMC_CMD_SET_RUN_STATE,
+	PMC_CMD_SET_POWER_STATE,
+	PMC_CMD_SHUTDOWN_ALLOWED,
+	PMC_CMD_GET_SELECT0,
+	PMC_CMD_GET_SELECT3,
+	PMC_CMD_GET_AVS_DISABLE_STATE,
+
+	PMC_CMD_GET_PVT,
+	PMC_CMD_POWER_DEV_ONOFF,
+	PMC_CMD_POWER_ZONE_ONOFF,
+	PMC_CMD_RESET_DEVICE,
+	PMC_CMD_RESET_ZONE,
+	PMC_CMD_ALLOCATE_G2UDQM,
+	PMC_CMD_QSM_AVAILABLE,
+	PMC_CMD_REVISION,
+	PMC_CMD_REGISTER_CMD_HANDLER,
+	PMC_CMD_FIND_UNUSED_COMMAND,
+
+	PMC_CMD_LOCK_CMD_TABLE,
+	PMC_CMD_JUMPAPP,
+	PMC_CMD_STALL,
+	PMC_CMD_CLOSEAVS,
+};
+
+struct pmc_command {
+	struct {
+		union {
+			struct {
+				u32 cmd_id: 8;
+				u32 error: 8;
+				u32 msgid: 8;
+				u32 srcport: 8;
+			};
+			u32 all;
+		};
+	} word0;
+
+	struct {
+		union {
+			struct {
+				u32 zone_idx: 10;
+				u32 dev_addr: 10;
+				u32 island: 4;
+				u32 log_num: 8;
+			};
+			u32 all;
+		};
+	} word1;
+
+	union {
+
+		struct {
+			u32 params[2];
+		} generic_params;
+
+		struct {
+			u32 word2;
+			u32 word3;
+		} command_response;
+
+		struct {
+			u8 reserved[2];
+			u8 restore;
+			u8 state;
+
+			u32 unused;
+		} command_power;
+
+		struct {
+			u16 margin_mv_slow;
+			u16 max_mv;
+			u16 margin_mv_fast;
+			u16 min_mv;
+		} close_avs_63158;
+	};
+};
+
+struct bcm63xx_pmc {
+	struct device	*dev;
+	u32		soc_id;
+	void __iomem	*base;
+	unsigned int	req_sequnum;
+	struct mutex	pmc_lock;
+	struct reset_controller_dev rcdev;
+
+	bool init_done;
+	struct dentry *debugfs_dir;
+
+	u16 avs_margin_ff;
+	u16 avs_margin_ss;
+};
+
+#define to_pmc_reset_priv(p) \
+	container_of((p), struct bcm63xx_pmc, rcdev)
+
+static struct bcm63xx_pmc *pmc = &(struct bcm63xx_pmc) {};
+
+static int __init pmc_create_debugfs(struct bcm63xx_pmc *pmc);
+
+/*
+ *
+ */
+static int __pmc_send_command(struct bcm63xx_pmc *priv,
+			      struct pmc_command *cmd,
+			      struct pmc_command *rsp)
+{
+	u32 tries = 50000;
+
+	priv->req_sequnum = (priv->req_sequnum + 1) & 0xff;
+	cmd->word0.msgid = priv->req_sequnum;
+
+	/*
+	 * write request command to PMC registers.
+	 */
+	writel(cmd->word0.all, priv->base + PMC_DQM_REQ_REG(0));
+	writel(cmd->word1.all, priv->base + PMC_DQM_REQ_REG(1));
+	writel(cmd->generic_params.params[0], priv->base + PMC_DQM_REQ_REG(2));
+	writel(cmd->generic_params.params[1], priv->base + PMC_DQM_REQ_REG(3));
+
+	/*
+	 * wait for PMC to reply.
+	 */
+	while (tries) {
+		u32 sts;
+
+		udelay(10);
+		sts = readl(priv->base + PMC_DQM_STS_REG);
+		if ((sts & PMC_DQM_STS_NOTEMPTY))
+			goto finished;
+		--tries;
+	}
+
+	/* timeout */
+	dev_err(priv->dev, "PMC command %d (seq %d)failed.\n",
+		cmd->word0.cmd_id, priv->req_sequnum);
+	return -ETIMEDOUT;
+
+finished:
+	memset(rsp, 0, sizeof (*rsp));
+	rsp->word0.all = readl(priv->base + PMC_DQM_RPL_REG(0));
+	rsp->word1.all = readl(priv->base + PMC_DQM_RPL_REG(1));
+	rsp->generic_params.params[0] = readl(priv->base + PMC_DQM_RPL_REG(2));
+	rsp->generic_params.params[1] = readl(priv->base + PMC_DQM_RPL_REG(3));
+
+	if (rsp->word0.msgid == priv->req_sequnum) {
+		/*
+		 * PMC replied to correct command, look into error of
+		 * the reply.
+		 */
+		if (rsp->word0.error) {
+			dev_info(priv->dev, "PMC reported error: %d\n",
+				 rsp->word0.error);
+			return -EIO;
+		}
+	} else {
+		/*
+		 * PMC is drunk ?
+		 */
+		dev_err(priv->dev, "PMC reported completion cmd seq %d "
+			"(PMC drunk?)\n", rsp->word0.msgid);
+		return -EILSEQ;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int pmc_send_command(struct bcm63xx_pmc *priv, struct pmc_command *cmd,
+			    struct pmc_command *rsp)
+{
+	int ret;
+
+	mutex_lock(&priv->pmc_lock);
+	ret = __pmc_send_command(priv, cmd, rsp);
+	mutex_unlock(&priv->pmc_lock);
+	return ret;
+}
+
+/*
+ *
+ */
+static u32 pmc_get_dev_addr(struct bcm63xx_pmc *priv, enum pmc_addr_id addr_id)
+{
+	const struct pmc_addr_info *info;
+
+	switch (priv->soc_id) {
+	case 0x63138:
+		info = bcm63138_pmc_addr_info;
+		break;
+	case 0x63158:
+		info = bcm63158_pmc_addr_info;
+		break;
+	default:
+		WARN(1, "missing addr info for this soc id");
+		return 0;
+	}
+
+	BUG_ON(!info[addr_id].valid);
+	return info[addr_id].dev | (info[addr_id].bus_id << PMB_BUS_ID_SHIFT);
+}
+
+/*
+ *
+ */
+static int pmc_power_on_cmd(struct bcm63xx_pmc *priv,
+			    enum pmc_addr_id addr_id)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_POWER_DEV_ONOFF;
+	cmd.word1.dev_addr = pmc_get_dev_addr(priv, addr_id);
+	cmd.word1.zone_idx = 0;
+	cmd.command_power.state = 1;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int pmc_power_off_cmd(struct bcm63xx_pmc *priv,
+			     enum pmc_addr_id addr_id)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_POWER_DEV_ONOFF;
+	cmd.word1.dev_addr = pmc_get_dev_addr(priv, addr_id);
+	cmd.word1.zone_idx = 0;
+	cmd.command_power.state = 0;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	return 0;
+}
+
+/*
+ *
+ */
+int pmc_read_bpcm_register(struct bcm63xx_pmc *priv,
+			   enum pmc_addr_id addr_id,
+			   u32 word_offset, u32 *value)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_READ_BPCM_REG;
+	cmd.word1.dev_addr = pmc_get_dev_addr(priv, addr_id);
+	cmd.word1.zone_idx = 0;
+
+	cmd.generic_params.params[0] = word_offset >> 2;
+	cmd.generic_params.params[1] = 0;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	*value = rsp.command_response.word2;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_read_bpcm_register);
+
+/*
+ *
+ */
+static int pmc_get_avs_state(struct bcm63xx_pmc *priv, int island, bool *state)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_GET_AVS_DISABLE_STATE;
+	cmd.word1.island = island;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	*state = !rsp.command_response.word2;
+	return 0;
+}
+
+/*
+ *
+ */
+static int pmc_close_avs(struct bcm63xx_pmc *pmc)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	/*
+	 * PMC_CMD_CLOSEAVS command format is different on non 63158
+	 * SoCs.
+	 */
+	if (pmc->soc_id != 0x63158)
+		return -ENXIO;
+
+	pr_info("PMC: close AVS with %u mv (SS silicon) and "
+		"%u mv (FF silicon)\n", pmc->avs_margin_ss,
+		pmc->avs_margin_ff);
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_CLOSEAVS;
+
+	/*
+	 * NOTE: keep min_mv and max_mv to 0 to let the firmware use
+	 * the default values for minimum and maximum AVS tension.
+	 */
+	cmd.close_avs_63158.margin_mv_fast = pmc->avs_margin_ff;
+	cmd.close_avs_63158.margin_mv_slow = pmc->avs_margin_ss;
+
+	error = pmc_send_command(pmc, &cmd, &rsp);
+	if (error) {
+		pr_err("PMC close AVS failed: %d\n", error);
+		return error;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static int pmc_get_rmon(struct bcm63xx_pmc *priv, u32 *v)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.word0.cmd_id = PMC_CMD_GET_RMON_AND_SIGMA;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	*v = rsp.command_response.word3;
+	return 0;
+}
+
+/*
+ *
+ */
+int pmc_write_bpcm_register(struct bcm63xx_pmc *priv,
+			    enum pmc_addr_id addr_id,
+			    u32 word_offset, u32 value)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_WRITE_BPCM_REG;
+	cmd.word1.dev_addr = pmc_get_dev_addr(priv, addr_id);
+	cmd.word1.zone_idx = 0;
+	cmd.generic_params.params[0] = word_offset >> 2;
+	cmd.generic_params.params[1] = value;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(pmc_write_bpcm_register);
+
+/* Perform a value write, then spin until the value shifted by shift
+ * is seen, masked with mask and is different from cond.
+ */
+static int pmc_wr_rd_bpcm_mask(struct bcm63xx_pmc *priv,
+			       enum pmc_addr_id addr_id,
+			       u32 word_offset, u32 *val,
+			       u32 shift, u32 mask, u32 cond)
+{
+	int ret;
+	int limit = 1000;
+	u32 dev_addr = pmc_get_dev_addr(priv, addr_id);
+
+	ret = pmc_write_bpcm_register(priv, dev_addr,
+				      word_offset, *val);
+	if (ret)
+		return ret;
+
+	do {
+		ret = pmc_read_bpcm_register(priv, dev_addr,
+					     word_offset, val);
+		if (ret)
+			return ret;
+
+		udelay(10);
+
+		if (--limit < 0) {
+			ret = -ETIMEDOUT;
+			break;
+		}
+
+	} while (((*val >> shift) & mask) != cond);
+
+	return ret;
+}
+
+/*
+ * control cpu reset/power
+ */
+static int pmc_cpu_power_on(struct bcm63xx_pmc *priv, unsigned int cpu)
+{
+	u32 ctrl, val;
+	int ret;
+
+	/* Check if the CPU is already on and save the ARM_CONTROL register
+	 * value since we will use it later for CPU de-assert once done with
+	 * the CPU-specific power sequence
+	 */
+	ret = pmc_read_bpcm_register(priv, PMB_ADDR_AIP,
+				     BPCM_ARM_CONTROL_REG, &ctrl);
+	if (ret)
+		goto out;
+
+	if (ctrl & CPU_RESET_N(cpu)) {
+		pr_info("CPU%d is already powered on, reset it\n", cpu);
+
+		ctrl &= ~CPU_RESET_N(cpu);
+		ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+					      BPCM_ARM_CONTROL_REG, ctrl);
+		mdelay(1);
+		ctrl |= CPU_RESET_N(cpu);
+		ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+					      BPCM_ARM_CONTROL_REG, ctrl);
+		ret = 0;
+		goto out;
+	}
+
+	/* Power on CPU */
+	ret = pmc_read_bpcm_register(priv, PMB_ADDR_AIP,
+				     BPCM_ARM_PWR_CONTROL_REG(cpu), &val);
+	if (ret)
+		goto out;
+
+	val |= (PWR_CPU_MASK << PWR_ON_SHIFT);
+
+	ret = pmc_wr_rd_bpcm_mask(priv, PMB_ADDR_AIP,
+				  BPCM_ARM_PWR_CONTROL_REG(cpu), &val,
+				  PWR_ON_STATUS_SHIFT,
+				  PWR_CPU_MASK, PWR_CPU_MASK);
+	if (ret)
+		goto out;
+
+	val |= (PWR_CPU_MASK << PWR_OK_SHIFT);
+
+	ret = pmc_wr_rd_bpcm_mask(priv, PMB_ADDR_AIP,
+				  BPCM_ARM_PWR_CONTROL_REG(cpu), &val,
+				  PWR_OK_STATUS_SHIFT,
+				  PWR_CPU_MASK, PWR_CPU_MASK);
+	if (ret)
+		goto out;
+
+	val &= ~CLAMP_ON;
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+				      BPCM_ARM_PWR_CONTROL_REG(cpu), val);
+	if (ret)
+		goto out;
+
+	/* Power on CPU<N> RAM */
+	val &= ~(MEM_PDA_MASK << MEM_PDA_SHIFT);
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+				      BPCM_ARM_PWR_CONTROL_REG(cpu), val);
+	if (ret)
+		goto out;
+
+	val |= MEM_PWR_ON;
+
+	ret = pmc_wr_rd_bpcm_mask(priv, PMB_ADDR_AIP,
+				  BPCM_ARM_PWR_CONTROL_REG(cpu),
+				  &val,
+				  0, MEM_PWR_ON_STATUS, MEM_PWR_ON_STATUS);
+	if (ret)
+		goto out;
+
+	val |= MEM_PWR_OK;
+
+	ret = pmc_wr_rd_bpcm_mask(priv, PMB_ADDR_AIP,
+				  BPCM_ARM_PWR_CONTROL_REG(cpu),
+				  &val,
+				  0, MEM_PWR_OK_STATUS, MEM_PWR_OK_STATUS);
+	if (ret)
+		goto out;
+
+	val &= ~MEM_CLAMP_ON;
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+				      BPCM_ARM_PWR_CONTROL_REG(cpu), val);
+	if (ret)
+		goto out;
+
+	/* De-assert CPU reset */
+	ctrl |= CPU_RESET_N(cpu);
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_AIP,
+				      BPCM_ARM_CONTROL_REG, ctrl);
+
+out:
+	return ret;
+}
+
+#if 0
+static int pmc_power_off_zone(struct bcm63xx_pmc *priv,
+			      enum pmc_addr_id addr_id, int zone)
+{
+	u32 zone_reg;
+	int err;
+
+	err = pmc_read_bpcm_register(priv, addr_id, BPCM_ZONE_REG(zone),
+				     &zone_reg);
+	if (err)
+		return err;
+
+	zone_reg &= ~BPCM_ZONE_PWR_UP_REQ;
+	zone_reg |= BPCM_ZONE_PWR_DN_REQ;
+
+	err = pmc_write_bpcm_register(priv, addr_id, BPCM_ZONE_REG(zone),
+				      zone_reg);
+	if (err)
+		return err;
+
+	return 0;
+}
+#endif
+
+static int pmc_power_on_zone(struct bcm63xx_pmc *priv, enum pmc_addr_id addr_id,
+			     int zone)
+{
+	u32 zone_reg;
+	int err;
+
+	err = pmc_read_bpcm_register(priv, addr_id, BPCM_ZONE_REG(zone),
+				     &zone_reg);
+	if (err)
+		return err;
+
+	zone_reg &= ~BPCM_ZONE_PWR_DN_REQ;
+	zone_reg |= BPCM_ZONE_PWR_UP_REQ;
+	zone_reg |= BPCM_ZONE_DPG_CTL_EN;
+	zone_reg |= BPCM_ZONE_MEM_PWR_CTL_EN;
+	zone_reg |= BPCM_ZONE_BLK_RESET_ASSERT;
+
+	err = pmc_write_bpcm_register(priv, addr_id, BPCM_ZONE_REG(zone),
+				      zone_reg);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/*
+ * control StarFighter2 (switch) power
+ */
+static int pmc_sf2_power_on(struct bcm63xx_pmc *priv)
+{
+	/* FIXME: SF2 GMII CLOCK HERE ? */
+	return pmc_power_on_cmd(priv, PMB_ADDR_SF2);
+}
+
+static int pmc_sf2_power_off(struct bcm63xx_pmc *priv)
+{
+	return pmc_power_off_cmd(priv, PMB_ADDR_SF2);
+}
+
+/*
+ * control SAR power
+ */
+# define PMB_SAR_SR_CONTROL		0x28
+#  define PMB_SAR_SR_CONTROL_RESET	0xffffff01
+#  define PMB_SAR_SR_CONTROL_UNRESET	0xffffff00
+
+static int pmc_sar_soft_reset(struct bcm63xx_pmc *priv)
+{
+	int ret;
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_SAR,
+				      PMB_SAR_SR_CONTROL,
+				      PMB_SAR_SR_CONTROL_RESET);
+	if (ret)
+		return ret;
+
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_SAR,
+				      PMB_SAR_SR_CONTROL,
+				      PMB_SAR_SR_CONTROL_UNRESET);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int pmc_sar_power_on(struct bcm63xx_pmc *priv)
+{
+	int ret;
+
+	ret = pmc_power_on_cmd(priv, PMB_ADDR_SAR);
+	if (ret)
+		return ret;
+
+	return pmc_sar_soft_reset(priv);
+}
+
+static int pmc_sar_power_off(struct bcm63xx_pmc *priv)
+{
+	return pmc_power_off_cmd(priv, PMB_ADDR_SAR);
+}
+
+/*
+ * control USB host power
+ */
+static int pmc_usbh_power_on(struct bcm63xx_pmc *priv)
+{
+	return pmc_power_on_cmd(priv, PMB_ADDR_USB30_2X);
+}
+
+static int pmc_usbh_power_off(struct bcm63xx_pmc *priv)
+{
+	return pmc_power_off_cmd(priv, PMB_ADDR_USB30_2X);
+}
+
+/*
+ * control Runner Data Path power
+ */
+# define RDP_PLL_RESETS         0x10
+#  define RDP_PLL_RESET_BIT0    (1 << 0)
+#  define RDP_PLL_RESET_BIT1    (1 << 1)
+
+# define RDP_PLL_NDIV           0x1c
+#  define NDIV_INT_MASK         0x3ff
+
+# define RDP_PLL_PDIV           0x20
+#  define PDIV_MASK             0x7
+
+# define RDP_PLL_CH01_CFG       0x2c
+#  define CH01_MDIV0_MASK       0xff
+
+# define RDP_PLL_STATUS         0x3c
+#  define RDP_PLL_STATUS_READY  (1 << 31)
+
+# define PMB_RDP_SR_CONTROL             0x28
+#  define PMB_RDP_SR_CONTROL_UNRESET    0xffffffff
+#  define PMB_RDP_SR_CONTROL_RESET      0x0
+
+static int pmc_rdp_pll_init(struct bcm63xx_pmc *priv)
+{
+	unsigned int tries = 100;
+	int ret;
+	u32 reg;
+
+        /*
+         * powerdown PLLs.
+         */
+        ret = pmc_write_bpcm_register(priv,
+					PMB_ADDR_RDPPLL,
+					RDP_PLL_RESETS, 0x0);
+        if (ret)
+                return ret;
+
+        /*
+         * release first stage reset.
+         */
+        ret = pmc_read_bpcm_register(priv,
+				     PMB_ADDR_RDPPLL,
+				     RDP_PLL_RESETS, &reg);
+        if (ret)
+                return ret;
+
+        reg |= RDP_PLL_RESET_BIT0;
+        ret = pmc_write_bpcm_register(priv,
+				      PMB_ADDR_RDPPLL,
+				      RDP_PLL_RESETS, reg);
+        if (ret)
+                return ret;
+
+        /*
+         * wait for PLL ready bit.
+         */
+        do {
+                ret = pmc_read_bpcm_register(priv,
+					     PMB_ADDR_RDPPLL,
+					     RDP_PLL_STATUS,
+					     &reg);
+                if (ret)
+                        return ret;
+        } while (--tries && !(reg & RDP_PLL_STATUS_READY));
+
+        if (!tries) {
+                dev_err(priv->dev, "timedout waiting for RDP PLL ready.\n");
+                return -ETIMEDOUT;
+        }
+
+        /*
+         * release second stage reset.
+         */
+        ret = pmc_read_bpcm_register(priv,
+				     PMB_ADDR_RDPPLL,
+				     RDP_PLL_RESETS, &reg);
+        if (ret)
+                return ret;
+
+        reg |= RDP_PLL_RESET_BIT1;
+
+        ret = pmc_write_bpcm_register(priv,
+				      PMB_ADDR_RDPPLL,
+				      RDP_PLL_RESETS, reg);
+        if (ret)
+                return ret;
+
+	return 0;
+}
+
+static int pmc_rdp_power_on(struct bcm63xx_pmc *priv)
+{
+	int ret;
+
+	ret = pmc_rdp_pll_init(priv);
+	if (ret)
+		return ret;
+
+	/*
+	 * hold RDP into reset
+         */
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_RDP,
+				      PMB_RDP_SR_CONTROL,
+				      PMB_RDP_SR_CONTROL_RESET);
+	if (ret)
+		return ret;
+
+	ret = pmc_power_off_cmd(priv, PMB_ADDR_RDP);
+	if (ret)
+		return ret;
+
+	ret = pmc_power_on_cmd(priv, PMB_ADDR_RDP);
+	if (ret)
+		return ret;
+
+	/*
+	 * hold RDP into reset
+         */
+	ret = pmc_write_bpcm_register(priv, PMB_ADDR_RDP,
+				      PMB_RDP_SR_CONTROL,
+				      PMB_RDP_SR_CONTROL_UNRESET);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int pmc_rdp_power_off(struct bcm63xx_pmc *priv)
+{
+	return 0;
+}
+
+/*
+ * XRDP power control
+ */
+static const enum pmc_addr_id xrdp_blocks[] = {
+	PMB_ADDR_XRDP,
+	PMB_ADDR_XRDP_RC0,
+	PMB_ADDR_XRDP_RC1,
+	PMB_ADDR_XRDP_RC2,
+	PMB_ADDR_XRDP_RC3,
+	PMB_ADDR_XRDP_RC4,
+	PMB_ADDR_XRDP_RC5,
+};
+
+static int pmc_xrdp_power_on(struct bcm63xx_pmc *priv)
+{
+	size_t i;
+	int ret;
+
+	for (i = 0; i < ARRAY_SIZE(xrdp_blocks); i++) {
+		ret = pmc_power_off_cmd(priv, xrdp_blocks[i]);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(xrdp_blocks); i++) {
+		ret = pmc_power_on_cmd(priv, xrdp_blocks[i]);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * hold & release reset
+         */
+	for (i = 0; i < ARRAY_SIZE(xrdp_blocks); i++) {
+		ret = pmc_write_bpcm_register(priv, xrdp_blocks[i],
+					      PMB_RDP_SR_CONTROL,
+					      PMB_RDP_SR_CONTROL_RESET);
+		if (ret)
+			return ret;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(xrdp_blocks); i++) {
+		ret = pmc_write_bpcm_register(priv, xrdp_blocks[i],
+					      PMB_RDP_SR_CONTROL,
+					      PMB_RDP_SR_CONTROL_UNRESET);
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+
+static int pmc_xrdp_power_off(struct bcm63xx_pmc *priv)
+{
+	return 0;
+}
+
+/*
+ * power on single lane PCI express port.
+ */
+static int pmc_pcieX_power_on(struct bcm63xx_pmc *priv,
+			      enum pmc_addr_id addr_id)
+{
+	int err;
+
+	dev_dbg(priv->dev, "pcie power on address id %d.\n", addr_id);
+
+	err = pmc_power_on_zone(priv, addr_id, 0);
+	if (err)
+		return err;
+
+	err = pmc_write_bpcm_register(priv, addr_id,
+				      BPCM_SR_CONTROL_REG, 0x0);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+/*
+ * power on dual lane PCI express port, lanes 0-1
+ */
+static int pmc_pcie01_power_on(struct bcm63xx_pmc *priv)
+{
+	int err;
+
+	dev_dbg(priv->dev, "pcie01 power on.\n");
+
+	/*
+	 * power on first lane
+	 */
+	err = pmc_power_on_zone(priv, PMB_ADDR_PCIE0, 0);
+	if (err)
+		return err;
+
+	/*
+	 * power on second lane
+	 */
+	err = pmc_power_on_zone(priv, PMB_ADDR_PCIE1, 0);
+	if (err)
+		return err;
+
+	/*
+	 * write all 0s in second lane SR control.
+	 */
+	err = pmc_write_bpcm_register(priv, PMB_ADDR_PCIE1,
+				      BPCM_SR_CONTROL_REG, 0x0);
+	if (err)
+		return err;
+
+	/*
+	 * write 0xff to reset SR control
+	 */
+	err = pmc_write_bpcm_register(priv, PMB_ADDR_PCIE0,
+				      BPCM_SR_CONTROL_REG, 0xff);
+	if (err)
+		return err;
+	mdelay(10);
+
+	/*
+	 * bit7: 1 - Strap override for dual lane support
+	 * bit6: Strap value, 0 - dual lane, 1 - single lane
+	 */
+	err = pmc_write_bpcm_register(priv, PMB_ADDR_PCIE0,
+				      BPCM_SR_CONTROL_REG, 0x80);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int pmc_pcieX_power_off(struct bcm63xx_pmc *priv, enum pmc_addr_id osef)
+{
+	dev_info(priv->dev, "pcie0 power off.\n");
+	return -ENOTSUPP;
+}
+static int pmc_pcie01_power_off(struct bcm63xx_pmc *priv)
+{
+	dev_info(priv->dev, "pcie01 power off.\n");
+	return -ENOTSUPP;
+}
+
+static inline enum pmc_addr_id reset_to_pmc_addr_id(unsigned long reset)
+{
+	switch (reset) {
+	case PMC_R_PCIE0:
+		return PMB_ADDR_PCIE0;
+	case PMC_R_PCIE1:
+		return PMB_ADDR_PCIE1;
+	case PMC_R_PCIE2:
+		return PMB_ADDR_PCIE2;
+	case PMC_R_PCIE3:
+		return PMB_ADDR_PCIE3;
+	default:
+		BUG();
+	}
+}
+
+/*
+ * WAN_AE reset
+ */
+#define PMB_WAN_AE_SR_CONTROL				0x28
+# define PMB_WAN_AE_SR_CTRL_RX_RCLK16_SW_RESET_MASK	(1 << 12)
+# define PMB_WAN_AE_SR_CTRL_RX_RBC125_SW_RESET_MASK	(1 << 13)
+# define PMB_WAN_AE_SR_CTRL_RX_TCLK16_SW_RESET_MASK	(1 << 14)
+# define PMB_WAN_AE_SR_CTRL_RX_CLK125_SW_RESET_MASK	(1 << 15)
+
+static int pmc_wan_ae_soft_reset(struct bcm63xx_pmc *priv)
+{
+	u32 val;
+	int ret;
+
+	ret = pmc_read_bpcm_register(priv,
+				     PMB_ADDR_WAN,
+				     BPCM_SR_CONTROL_REG, &val);
+	if (ret)
+		return ret;
+
+	val |= PMB_WAN_AE_SR_CTRL_RX_RCLK16_SW_RESET_MASK |
+		PMB_WAN_AE_SR_CTRL_RX_RBC125_SW_RESET_MASK |
+		PMB_WAN_AE_SR_CTRL_RX_TCLK16_SW_RESET_MASK |
+		PMB_WAN_AE_SR_CTRL_RX_CLK125_SW_RESET_MASK;
+
+	ret = pmc_write_bpcm_register(priv,
+				      PMB_ADDR_WAN,
+				      BPCM_SR_CONTROL_REG,
+				      val);
+	if (ret)
+		return ret;
+
+	udelay(5);
+
+	val &= ~(PMB_WAN_AE_SR_CTRL_RX_RCLK16_SW_RESET_MASK |
+		 PMB_WAN_AE_SR_CTRL_RX_RBC125_SW_RESET_MASK |
+		 PMB_WAN_AE_SR_CTRL_RX_TCLK16_SW_RESET_MASK |
+		 PMB_WAN_AE_SR_CTRL_RX_CLK125_SW_RESET_MASK);
+
+	ret = pmc_write_bpcm_register(priv,
+				      PMB_ADDR_WAN,
+				      BPCM_SR_CONTROL_REG,
+				      val);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+/*
+ * reset controller ops
+ */
+static int pmc_reset_op_action(struct bcm63xx_pmc *priv,
+			       unsigned long idx,
+			       unsigned int action)
+{
+	switch (idx) {
+	case PMC_R_CPU1:
+		if (action == 0)
+			return pmc_cpu_power_on(priv, 1);
+		break;
+
+	case PMC_R_RDP:
+		switch (action) {
+		case 0:
+			return pmc_rdp_power_on(priv);
+		case 1:
+			return pmc_rdp_power_off(priv);
+		default:
+			break;
+		}
+		break;
+
+	case PMC_R_XRDP:
+		switch (action) {
+		case 0:
+			return pmc_xrdp_power_on(priv);
+		case 1:
+			return pmc_xrdp_power_off(priv);
+		default:
+			break;
+		}
+		break;
+
+	case PMC_R_SF2:
+		switch (action) {
+		case 0:
+			return pmc_sf2_power_on(priv);
+		case 1:
+			return pmc_sf2_power_off(priv);
+		default:
+			break;
+		}
+		break;
+
+	case PMC_R_SAR:
+		switch (action) {
+		case 0:
+			return pmc_sar_power_on(priv);
+		case 1:
+			return pmc_sar_power_off(priv);
+		case 2:
+			return pmc_sar_soft_reset(priv);
+		default:
+			break;
+		}
+		break;
+
+	case PMC_R_USBH:
+		switch (action) {
+		case 0:
+			return pmc_usbh_power_on(priv);
+		case 1:
+			return pmc_usbh_power_off(priv);
+		default:
+			break;
+		}
+		break;
+
+	case PMC_R_PCIE0:
+	case PMC_R_PCIE1:
+	case PMC_R_PCIE2:
+	case PMC_R_PCIE3:
+		switch (action) {
+		case 0:
+			return pmc_pcieX_power_on(priv,
+						  reset_to_pmc_addr_id(idx));
+		case 1:
+			return pmc_pcieX_power_off(priv,
+						   reset_to_pmc_addr_id(idx));
+		}
+		break;
+
+	case PMC_R_PCIE01:
+		switch (action) {
+		case 0:
+			return pmc_pcie01_power_on(priv);
+		case 1:
+			return pmc_pcie01_power_off(priv);
+		}
+		break;
+
+	case PMC_R_WAN_AE:
+		switch (action) {
+		case 2:
+			return pmc_wan_ae_soft_reset(priv);
+		default:
+			break;
+		}
+		break;
+
+	default:
+		dev_err(priv->dev, "unknown reset idx %lu", idx);
+		return -ENODEV;
+	}
+
+	dev_err(priv->dev, "unhandled action %u for id %lu", action, idx);
+	return -ENOTSUPP;
+}
+
+static int pmc_assert_op_callback(struct reset_controller_dev *rcdev,
+				  unsigned long idx)
+{
+	return pmc_reset_op_action(to_pmc_reset_priv(rcdev),  idx, 1);
+}
+
+static int pmc_deassert_op_callback(struct reset_controller_dev *rcdev,
+				    unsigned long idx)
+{
+	return pmc_reset_op_action(to_pmc_reset_priv(rcdev),  idx, 0);
+}
+
+static int pmc_reset_op_callback(struct reset_controller_dev *rcdev,
+				 unsigned long idx)
+{
+	return pmc_reset_op_action(to_pmc_reset_priv(rcdev),  idx, 2);
+}
+
+static const struct reset_control_ops pmc_reset_controller_ops = {
+	.assert		= pmc_assert_op_callback,
+	.deassert	= pmc_deassert_op_callback,
+	.reset		= pmc_reset_op_callback,
+};
+
+/*
+ * hwmon info
+ */
+static int pmc_get_pvt(struct bcm63xx_pmc *priv,
+		       unsigned int island,
+		       unsigned int sel,
+		       unsigned int *value)
+{
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.word0.cmd_id = PMC_CMD_GET_PVT;
+	cmd.word1.dev_addr = 0;
+	cmd.word1.zone_idx = 0;
+	cmd.word1.island = island;
+	cmd.generic_params.params[0] = sel;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	*value = rsp.command_response.word2;
+	return 0;
+}
+
+static ssize_t pmc_revision_show(struct device *dev,
+				 struct device_attribute *attr,
+				 char *buf)
+{
+	struct bcm63xx_pmc *priv = dev_get_drvdata(dev);
+	unsigned int revision, change;
+	struct pmc_command cmd, rsp;
+	int error;
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.word0.cmd_id = PMC_CMD_REVISION;
+
+	error = pmc_send_command(priv, &cmd, &rsp);
+	if (error)
+		return error;
+
+	change = rsp.command_response.word2;
+	revision = rsp.command_response.word3;
+
+	return snprintf(buf, PAGE_SIZE, "%x-%d\n", revision, change);
+}
+
+static SENSOR_DEVICE_ATTR(revision, 0444, pmc_revision_show, NULL, 0);
+
+static ssize_t pmc_thermal_show_temp(struct device *dev,
+				     struct device_attribute *attr,
+				     char *buf)
+{
+	struct bcm63xx_pmc *priv = dev_get_drvdata(dev);
+	unsigned int value, temp;
+	int error;
+
+	error = pmc_get_pvt(priv, 0, 0, &value);
+	if (error)
+		return error;
+
+	temp = (41004000 - 48705 * value) / 100;
+	return snprintf(buf, PAGE_SIZE, "%u\n", temp);
+}
+
+static SENSOR_DEVICE_ATTR(temp1_input, 0444, pmc_thermal_show_temp,
+			  NULL, 0);
+
+static ssize_t pmc_thermal_show_in(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct bcm63xx_pmc *priv = dev_get_drvdata(dev);
+	unsigned int value, in, reg, island;
+	int sel = to_sensor_dev_attr(attr)->index;
+	int divider;
+	int error;
+
+	island = (sel >> 8) & 0xff;
+	reg = sel & 0xff;
+
+	error = pmc_get_pvt(priv, island, reg, &value);
+	if (error)
+		return error;
+
+	switch (reg) {
+	case 1:
+	case 2:
+		divider = 10;
+		break;
+	case 3:
+	case 4:
+		divider = 7;
+		break;
+	case 5:
+		divider = 4;
+		break;
+	case 6:
+		divider = 2;
+		break;
+	default:
+		divider = 1;
+		break;
+	}
+
+	in = (8800 * value) / (divider * 1024);
+	return snprintf(buf, PAGE_SIZE, "%u\n", in);
+}
+
+/* V0.85_0 */
+static SENSOR_DEVICE_ATTR(in1_input, 0444, pmc_thermal_show_in, NULL, 0x0001);
+
+/* V0.85_1 */
+static SENSOR_DEVICE_ATTR(in2_input, 0444, pmc_thermal_show_in, NULL, 0x0002);
+
+/* Vin */
+static SENSOR_DEVICE_ATTR(in3_input, 0444, pmc_thermal_show_in, NULL, 0x0003);
+
+/* V1.0 */
+static SENSOR_DEVICE_ATTR(in4_input, 0444, pmc_thermal_show_in, NULL, 0x0004);
+
+/* V1.8 */
+static SENSOR_DEVICE_ATTR(in5_input, 0444, pmc_thermal_show_in, NULL, 0x0005);
+
+/* V3.3 */
+static SENSOR_DEVICE_ATTR(in6_input, 0444, pmc_thermal_show_in, NULL, 0x0006);
+
+/* Vin */
+static SENSOR_DEVICE_ATTR(in9_input, 0444, pmc_thermal_show_in, NULL, 0x0103);
+
+/*
+ * AFE PLL
+ */
+static ssize_t pmc_show_afepll(struct device *dev,
+			       struct device_attribute *attr,
+			       char *buf)
+{
+	struct bcm63xx_pmc *priv = dev_get_drvdata(dev);
+	unsigned int offset = to_sensor_dev_attr(attr)->index;
+	int ret;
+	u32 val;
+
+	ret = pmc_read_bpcm_register(priv, PMB_ADDR_AFEPLL, offset, &val);
+	if (ret)
+		return ret;
+
+	return snprintf(buf, PAGE_SIZE, "0x%x\n", val);
+}
+
+/*  vdsl pll */
+static SENSOR_DEVICE_ATTR(afepll_rosc_control_input,
+			  0444, pmc_show_afepll, NULL, 0x10);
+
+static SENSOR_DEVICE_ATTR(afepll_rosc_count_input,
+			  0444, pmc_show_afepll, NULL, 0x1c);
+
+static SENSOR_DEVICE_ATTR(afepll_sr_control_input,
+			  0444, pmc_show_afepll, NULL, 0x28);
+
+static struct attribute *pmc_hwmon_attrs[] = {
+	&sensor_dev_attr_revision.dev_attr.attr,
+
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_in1_input.dev_attr.attr,
+	&sensor_dev_attr_in2_input.dev_attr.attr,
+	&sensor_dev_attr_in3_input.dev_attr.attr,
+	&sensor_dev_attr_in4_input.dev_attr.attr,
+	&sensor_dev_attr_in5_input.dev_attr.attr,
+	&sensor_dev_attr_in6_input.dev_attr.attr,
+	&sensor_dev_attr_in9_input.dev_attr.attr,
+
+	&sensor_dev_attr_afepll_rosc_control_input.dev_attr.attr,
+	&sensor_dev_attr_afepll_rosc_count_input.dev_attr.attr,
+	&sensor_dev_attr_afepll_sr_control_input.dev_attr.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(pmc_hwmon);
+
+
+/*
+ * called by SMP ops early in the boot process
+ */
+int bcm63xx_pmc_cpu_power_on(unsigned int cpu)
+{
+	/* make sure early init has been called */
+	if (WARN_ON(!pmc->base))
+		return -ENODEV;
+
+	if (WARN_ON(cpu > 1))
+		return -ENODEV;
+
+	return pmc_cpu_power_on(pmc, cpu);
+}
+
+/*
+ * load PMC firmware overlay if needed
+ *
+ * called with PMC lock held
+ */
+static int __pmc_init(struct bcm63xx_pmc *pmc)
+{
+	return 0;
+}
+
+static int bcm63xx_pmc_probe(struct platform_device *pdev)
+{
+	struct reset_controller_dev *rcdev;
+	struct resource *res;
+	struct device *hwmon_dev;
+	void __iomem *base;
+	int ret;
+
+	/*
+	 * Early initialisation should have configured an initial
+	 * register mapping and setup the soc data pointer. If these
+	 * are not valid then something went badly wrong!
+	 */
+	if (WARN_ON(!pmc->base))
+		return -ENODEV;
+
+	/* take over the memory region from the early initialization */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	base = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(base))
+		return PTR_ERR(base);
+
+	pmc->dev = &pdev->dev;
+
+	/* switch to newly reserved base address & init PMC */
+	mutex_lock(&pmc->pmc_lock);
+	iounmap(pmc->base);
+	pmc->base = base;
+
+	ret = __pmc_init(pmc);
+	if (ret) {
+		iounmap(pmc->base);
+		pmc->base = NULL;
+		mutex_unlock(&pmc->pmc_lock);
+		return ret;
+	}
+	mutex_unlock(&pmc->pmc_lock);
+
+	/* register as reset controller */
+	rcdev = &pmc->rcdev;
+	rcdev->ops = &pmc_reset_controller_ops;
+	rcdev->of_node = pdev->dev.of_node;
+	rcdev->of_reset_n_cells = 1;
+	rcdev->nr_resets = PMC_R_LAST;
+
+	ret = devm_reset_controller_register(&pdev->dev, rcdev);
+	if (ret) {
+		pr_err("failed to register reset controller: %d", ret);
+		return ret;
+	}
+
+	/* register as hwmon */
+	if (IS_ENABLED(CONFIG_HWMON)) {
+		hwmon_dev =
+			devm_hwmon_device_register_with_groups(&pdev->dev, "pmc",
+							       pmc,
+							       pmc_hwmon_groups);
+		if (IS_ERR(hwmon_dev))
+			return PTR_ERR(hwmon_dev);
+	}
+
+	pmc_create_debugfs(pmc);
+
+	pmc->init_done = true;
+	return 0;
+}
+
+static const struct of_device_id bcm63xx_pmc_match[] = {
+	{ .compatible = "brcm,bcm63138-pmc", .data = (void *)0x63138 },
+	{ .compatible = "brcm,bcm63158-pmc", .data = (void *)0x63158 },
+	{ }
+};
+
+static struct platform_driver bcm63xx_pmc_driver = {
+	.driver = {
+		.name = "bcm63xx-pmc",
+		.suppress_bind_attrs = true,
+		.of_match_table = bcm63xx_pmc_match,
+	},
+	.probe = bcm63xx_pmc_probe,
+};
+
+builtin_platform_driver(bcm63xx_pmc_driver);
+
+/*
+ * fetch PMC firmware revision and check that it matches SOC id
+ */
+static int pmc_check_revision(struct bcm63xx_pmc *priv)
+{
+	struct pmc_command cmd, rsp;
+	int ret;
+
+	memset(&cmd, 0, sizeof (cmd));
+
+	cmd.word0.cmd_id = PMC_CMD_REVISION;
+	cmd.word1.dev_addr = 0;
+
+	ret = pmc_send_command(priv, &cmd, &rsp);
+	if (ret) {
+		pr_err("failed to retrieve PMC revision\n");
+		return ret;
+	}
+
+	if (rsp.command_response.word2 != priv->soc_id) {
+		pr_err("PMC revision mismatch (0x%08x != 0x%08x)\n",
+		       rsp.command_response.word2, priv->soc_id);
+		return -ENODEV;
+	}
+
+	pr_debug("PMC firmware revision: 0x%08x", rsp.command_response.word3);
+	return 0;
+}
+
+/*
+ * additionnal code to load into PMC
+ */
+#define PMC_IMAGE_ALIGN		(1 << 13)
+
+static const __attribute__((aligned(PMC_IMAGE_ALIGN))) u8 pmc_63138_image[] = {
+	0x27, 0xbd, 0xff, 0x10, 0x8f, 0x82, 0x06, 0x00, 0xaf, 0xb1, 0x00, 0xcc,
+	0xaf, 0xb0, 0x00, 0xc8, 0x00, 0xa0, 0x88, 0x21, 0xaf, 0xbf, 0x00, 0xec,
+	0xaf, 0xbe, 0x00, 0xe8, 0xaf, 0xb7, 0x00, 0xe4, 0xaf, 0xb6, 0x00, 0xe0,
+	0xaf, 0xb5, 0x00, 0xdc, 0xaf, 0xb4, 0x00, 0xd8, 0xaf, 0xb3, 0x00, 0xd4,
+	0xaf, 0xb2, 0x00, 0xd0, 0x00, 0x80, 0x80, 0x21, 0x00, 0x40, 0xf8, 0x09,
+	0x00, 0xa0, 0x20, 0x21, 0x8e, 0x22, 0x00, 0x08, 0xaf, 0x82, 0x01, 0x84,
+	0x94, 0x42, 0x00, 0x00, 0x14, 0x40, 0x01, 0x99, 0x8f, 0xbf, 0x00, 0xec,
+	0x8f, 0x82, 0x06, 0x70, 0x02, 0x20, 0x20, 0x21, 0x00, 0x40, 0xf8, 0x09,
+	0x02, 0x20, 0x28, 0x21, 0x3c, 0x02, 0x00, 0x06, 0x8e, 0x23, 0x00, 0x08,
+	0x24, 0x42, 0x31, 0x38, 0x14, 0x62, 0x01, 0x90, 0x8f, 0xbf, 0x00, 0xec,
+	0x96, 0x22, 0x00, 0x0e, 0x34, 0x43, 0x10, 0x00, 0x30, 0x42, 0x00, 0xff,
+	0xa7, 0x83, 0x01, 0x80, 0x2c, 0x43, 0x00, 0x45, 0x8f, 0x82, 0x01, 0x84,
+	0x24, 0x44, 0xff, 0xfc, 0x00, 0x83, 0x10, 0x0b, 0x8e, 0x03, 0x00, 0x04,
+	0x10, 0x60, 0x00, 0x08, 0xaf, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x48,
+	0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x82, 0x01, 0x84,
+	0x94, 0x42, 0x00, 0x04, 0x0b, 0xf0, 0x41, 0xac, 0xae, 0x02, 0x00, 0x04,
+	0x3c, 0x03, 0xb6, 0x00, 0x8c, 0x64, 0x10, 0x30, 0x24, 0x05, 0xff, 0xfd,
+	0x00, 0x85, 0x20, 0x24, 0xac, 0x64, 0x10, 0x30, 0x3c, 0x03, 0x9f, 0xc1,
+	0x24, 0x63, 0x07, 0x24, 0x8e, 0x12, 0x00, 0x08, 0x8e, 0x14, 0x00, 0x0c,
+	0xaf, 0x83, 0x06, 0x1c, 0x3c, 0x03, 0x9f, 0xc1, 0x24, 0x63, 0x07, 0x0c,
+	0xaf, 0x83, 0x06, 0x70, 0x3c, 0x03, 0x9f, 0xc1, 0x24, 0x63, 0x0f, 0xb4,
+	0xaf, 0x83, 0x06, 0x88, 0x3c, 0x03, 0x9f, 0xc1, 0x24, 0x63, 0x0a, 0xe8,
+	0xac, 0x43, 0x00, 0x2c, 0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x04, 0x00, 0x05,
+	0x3c, 0x02, 0xb6, 0x08, 0xac, 0x40, 0x00, 0x50, 0xac, 0x40, 0x00, 0x54,
+	0xac, 0x40, 0x00, 0x58, 0xac, 0x40, 0x00, 0x5c, 0xac, 0x40, 0x01, 0x0c,
+	0x24, 0x10, 0x18, 0x00, 0x00, 0x00, 0xb8, 0x21, 0x24, 0x53, 0x00, 0x50,
+	0x24, 0x16, 0xfc, 0x7f, 0x3c, 0x15, 0xb6, 0x08, 0x24, 0x46, 0x00, 0x5c,
+	0x00, 0x17, 0x11, 0xc0, 0x02, 0x16, 0x80, 0x24, 0x02, 0x02, 0x80, 0x25,
+	0xae, 0xb0, 0x01, 0x08, 0x8f, 0x82, 0x01, 0x88, 0x00, 0x00, 0x20, 0x21,
+	0x8c, 0x42, 0x00, 0x38, 0x24, 0x05, 0x04, 0x00, 0x00, 0x40, 0xf8, 0x09,
+	0xaf, 0xa6, 0x00, 0xc0, 0x26, 0xe3, 0x00, 0x01, 0x30, 0x63, 0x00, 0x07,
+	0x00, 0x03, 0x19, 0xc0, 0x02, 0x16, 0x80, 0x24, 0x02, 0x03, 0x80, 0x25,
+	0xae, 0xb0, 0x01, 0x08, 0x00, 0x02, 0xf4, 0x00, 0x8f, 0x82, 0x01, 0x88,
+	0x00, 0x00, 0x20, 0x21, 0x8c, 0x42, 0x00, 0x38, 0x00, 0x40, 0xf8, 0x09,
+	0x24, 0x05, 0x04, 0x00, 0x8f, 0xa6, 0x00, 0xc0, 0x26, 0x73, 0x00, 0x04,
+	0x03, 0xc2, 0x10, 0x25, 0x26, 0xf7, 0x00, 0x02, 0xae, 0x62, 0xff, 0xfc,
+	0x16, 0x66, 0xff, 0xe4, 0x32, 0xf7, 0x00, 0x07, 0x3c, 0x02, 0xb6, 0x08,
+	0x36, 0x10, 0x03, 0x80, 0x3c, 0x03, 0xa0, 0x00, 0xac, 0x50, 0x01, 0x08,
+	0x34, 0x63, 0xcf, 0x08, 0x3c, 0x02, 0xb6, 0x00, 0xac, 0x43, 0x10, 0x20,
+	0x3c, 0x03, 0xb6, 0x00, 0x8c, 0x62, 0x10, 0x20, 0x04, 0x40, 0xff, 0xfe,
+	0x24, 0x73, 0x10, 0x00, 0x8f, 0x82, 0x01, 0x88, 0x24, 0x05, 0x04, 0x00,
+	0x8c, 0x42, 0x00, 0x38, 0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x20, 0x21,
+	0x3c, 0x03, 0x00, 0x01, 0x00, 0x43, 0x10, 0x25, 0x3c, 0x10, 0xb6, 0x08,
+	0xae, 0x02, 0x00, 0x5c, 0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x04, 0x00, 0x06,
+	0x24, 0x02, 0x10, 0x00, 0xae, 0x00, 0x01, 0x0c, 0xae, 0x02, 0x01, 0x08,
+	0x8f, 0x82, 0x01, 0x88, 0x00, 0x00, 0x20, 0x21, 0x8c, 0x42, 0x00, 0x38,
+	0x00, 0x40, 0xf8, 0x09, 0x24, 0x05, 0x00, 0x80, 0x24, 0x03, 0x03, 0x70,
+	0x70, 0x43, 0x10, 0x02, 0x24, 0x15, 0x04, 0x00, 0x00, 0x55, 0x00, 0x1a,
+	0x24, 0x03, 0x05, 0x91, 0x24, 0x04, 0x03, 0xe8, 0x00, 0x00, 0x10, 0x12,
+	0x00, 0x62, 0x10, 0x23, 0x70, 0x44, 0x10, 0x02, 0x24, 0x04, 0x04, 0xaa,
+	0x00, 0x44, 0x00, 0x1a, 0x8f, 0x83, 0x01, 0x84, 0x00, 0x00, 0x20, 0x12,
+	0x30, 0x82, 0xff, 0xff, 0xa4, 0x62, 0x00, 0x04, 0x24, 0x42, 0xfe, 0x49,
+	0x2c, 0x42, 0x01, 0xa4, 0x10, 0x40, 0x00, 0x17, 0x30, 0x84, 0xff, 0xff,
+	0x0f, 0xf0, 0x41, 0xea, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x02, 0xa0, 0x00,
+	0x24, 0x42, 0x0b, 0xb8, 0xae, 0x15, 0x01, 0x08, 0x3c, 0x04, 0xb6, 0x00,
+	0xae, 0x62, 0x00, 0x20, 0x8c, 0x82, 0x10, 0x20, 0x04, 0x40, 0xff, 0xfe,
+	0x24, 0x83, 0x10, 0x00, 0x3c, 0x02, 0xb6, 0x08, 0x24, 0x04, 0x0c, 0x00,
+	0xac, 0x44, 0x01, 0x08, 0x3c, 0x02, 0xa0, 0x00, 0x24, 0x42, 0x0b, 0xb8,
+	0xac, 0x62, 0x00, 0x20, 0x3c, 0x03, 0xb6, 0x00, 0x8c, 0x62, 0x10, 0x20,
+	0x04, 0x40, 0xff, 0xfe, 0x8f, 0x82, 0x01, 0x84, 0x0b, 0xf0, 0x40, 0xbe,
+	0x94, 0x42, 0x00, 0x00, 0x3c, 0x02, 0x80, 0x06, 0x0f, 0xf0, 0x41, 0xb8,
+	0x00, 0x82, 0x20, 0x25, 0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x48,
+	0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x82, 0x01, 0x84,
+	0x94, 0x42, 0x00, 0x00, 0x54, 0x40, 0x00, 0xc5, 0x02, 0x40, 0x20, 0x21,
+	0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x04, 0x00, 0x07, 0x3c, 0x02, 0xb6, 0x08,
+	0x94, 0x53, 0x01, 0x16, 0x0f, 0xf0, 0x41, 0xbe, 0x3c, 0x15, 0x00, 0x10,
+	0x30, 0x44, 0xff, 0xff, 0x00, 0x95, 0x20, 0x25, 0x32, 0x73, 0xff, 0xff,
+	0x0f, 0xf0, 0x41, 0xb8, 0x00, 0x40, 0x80, 0x21, 0x0f, 0xf0, 0x41, 0xea,
+	0x26, 0x64, 0xff, 0xc0, 0x0f, 0xf0, 0x41, 0xbe, 0x00, 0x00, 0x00, 0x00,
+	0x30, 0x44, 0xff, 0xff, 0x00, 0x40, 0xb0, 0x21, 0x0f, 0xf0, 0x41, 0xb8,
+	0x00, 0x95, 0x20, 0x25, 0x02, 0x16, 0x20, 0x23, 0x28, 0x82, 0x00, 0x08,
+	0x10, 0x40, 0x00, 0x09, 0x3c, 0x05, 0x00, 0x09, 0x0f, 0xf0, 0x41, 0xb8,
+	0x3c, 0x04, 0x80, 0x07, 0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x48,
+	0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x00, 0x00, 0x0b, 0xf0, 0x41, 0x84,
+	0x02, 0x40, 0x20, 0x21, 0x34, 0xa5, 0xc4, 0x00, 0x00, 0xa4, 0x00, 0x1a,
+	0x24, 0x03, 0x03, 0x2e, 0x00, 0x70, 0x18, 0x23, 0x24, 0x10, 0x27, 0x10,
+	0x24, 0x16, 0x03, 0x2e, 0x00, 0x00, 0x10, 0x12, 0x70, 0x62, 0x10, 0x02,
+	0x00, 0x50, 0x00, 0x1a, 0x00, 0x00, 0x80, 0x12, 0x02, 0x13, 0x80, 0x21,
+	0x0f, 0xf0, 0x41, 0xea, 0x02, 0x00, 0x20, 0x21, 0x0f, 0xf0, 0x41, 0xbe,
+	0x00, 0x00, 0x00, 0x00, 0x30, 0x44, 0xff, 0xff, 0x00, 0x95, 0x20, 0x25,
+	0xaf, 0xa2, 0x00, 0xc0, 0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x15, 0x00, 0x07,
+	0x8f, 0xa2, 0x00, 0xc0, 0x26, 0xb5, 0x00, 0x01, 0x28, 0x43, 0x03, 0x2e,
+	0x50, 0x60, 0x00, 0x07, 0x28, 0x42, 0x03, 0x33, 0x02, 0xc2, 0x10, 0x23,
+	0x28, 0x42, 0x00, 0x05, 0x10, 0x40, 0x00, 0xa3, 0x3c, 0x04, 0x00, 0x07,
+	0x0b, 0xf0, 0x41, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x40, 0x00, 0xa9,
+	0x3c, 0x04, 0x00, 0x07, 0x0f, 0xf0, 0x41, 0xb8, 0x24, 0x84, 0x00, 0x02,
+	0x8f, 0x82, 0x01, 0x88, 0x24, 0x05, 0x04, 0x00, 0x8c, 0x42, 0x00, 0x38,
+	0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x20, 0x21, 0x3c, 0x03, 0xff, 0xff,
+	0x24, 0x63, 0x41, 0xbf, 0x70, 0x43, 0x20, 0x02, 0x3c, 0x1e, 0x02, 0x71,
+	0x37, 0xde, 0xab, 0xe0, 0x00, 0x9e, 0x10, 0x21, 0x24, 0x1e, 0x00, 0x64,
+	0x00, 0x5e, 0x00, 0x1a, 0x24, 0x1e, 0x03, 0xe8, 0x27, 0xa4, 0x00, 0x10,
+	0x00, 0x00, 0x80, 0x21, 0x00, 0x00, 0xa8, 0x21, 0x00, 0x00, 0x10, 0x12,
+	0x00, 0x5e, 0x00, 0x1a, 0x0f, 0xf0, 0x42, 0x1d, 0x00, 0x00, 0xf0, 0x12,
+	0x27, 0xa4, 0x00, 0x10, 0x00, 0x00, 0x18, 0x21, 0x00, 0x00, 0x40, 0x21,
+	0x00, 0x00, 0x28, 0x21, 0x00, 0xa2, 0x30, 0x2a, 0x50, 0xc0, 0x00, 0x0f,
+	0x01, 0x15, 0x00, 0x1a, 0x94, 0x86, 0x00, 0x00, 0x2c, 0xc7, 0x00, 0x0b,
+	0x54, 0xe0, 0x00, 0x04, 0x94, 0x86, 0x00, 0x02, 0x01, 0x06, 0x40, 0x21,
+	0x26, 0xb5, 0x00, 0x01, 0x94, 0x86, 0x00, 0x02, 0x2c, 0xc7, 0x00, 0x0b,
+	0x14, 0xe0, 0x00, 0x03, 0x24, 0xa5, 0x00, 0x01, 0x00, 0x66, 0x18, 0x21,
+	0x26, 0x10, 0x00, 0x01, 0x0b, 0xf0, 0x41, 0x1b, 0x24, 0x84, 0x00, 0x04,
+	0x3c, 0x16, 0x00, 0x07, 0x26, 0xc4, 0x00, 0x03, 0x00, 0x00, 0xa8, 0x12,
+	0x00, 0x70, 0x00, 0x1a, 0x0f, 0xf0, 0x41, 0xb8, 0x00, 0x00, 0x80, 0x12,
+	0x34, 0x08, 0xc3, 0x50, 0x72, 0x08, 0x18, 0x02, 0x34, 0x07, 0xff, 0xff,
+	0x73, 0xde, 0x48, 0x02, 0x00, 0x67, 0x00, 0x1a, 0x3c, 0x04, 0xff, 0xfe,
+	0x34, 0x84, 0xbe, 0x3e, 0x24, 0x02, 0xf9, 0xcd, 0x24, 0x0a, 0x03, 0xe8,
+	0x3c, 0x05, 0xff, 0xf4, 0x24, 0xa5, 0x4f, 0x6c, 0x3c, 0x10, 0x00, 0x03,
+	0x36, 0x10, 0xb0, 0x88, 0x3c, 0x06, 0x00, 0x22, 0x24, 0xc6, 0x57, 0x04,
+	0x00, 0x00, 0x18, 0x12, 0x03, 0xc4, 0x00, 0x18, 0x71, 0x22, 0x00, 0x00,
+	0x3c, 0x02, 0x00, 0x3a, 0x00, 0x00, 0x20, 0x12, 0x24, 0x42, 0x20, 0xdc,
+	0x00, 0x8a, 0x00, 0x1a, 0x00, 0x00, 0x20, 0x12, 0x00, 0x82, 0x20, 0x21,
+	0x00, 0x8a, 0x00, 0x1a, 0x3c, 0x02, 0x00, 0x02, 0x24, 0x42, 0x6e, 0x44,
+	0x00, 0x00, 0x20, 0x12, 0x00, 0x64, 0x18, 0x23, 0x70, 0x6a, 0x18, 0x02,
+	0x03, 0xc5, 0x00, 0x18, 0x24, 0x04, 0xef, 0x14, 0x71, 0x24, 0x00, 0x00,
+	0x24, 0x04, 0x27, 0x10, 0x00, 0x00, 0x28, 0x12, 0x00, 0xa4, 0x00, 0x1a,
+	0x00, 0x00, 0x28, 0x12, 0x00, 0xb0, 0x28, 0x21, 0x00, 0xaa, 0x00, 0x1a,
+	0x00, 0x00, 0x28, 0x12, 0x00, 0x65, 0x00, 0x1a, 0x24, 0x03, 0xfb, 0xb3,
+	0x00, 0x00, 0xb8, 0x12, 0x72, 0xa8, 0x40, 0x02, 0x01, 0x07, 0x00, 0x1a,
+	0x3c, 0x07, 0x00, 0x05, 0x34, 0xe7, 0x9d, 0x80, 0x00, 0x00, 0x40, 0x12,
+	0x03, 0xc7, 0x00, 0x18, 0x71, 0x23, 0x00, 0x00, 0x00, 0x00, 0x38, 0x12,
+	0x00, 0xea, 0x00, 0x1a, 0x00, 0x00, 0x38, 0x12, 0x00, 0xe6, 0x38, 0x21,
+	0x00, 0xea, 0x00, 0x1a, 0x3c, 0x06, 0xff, 0xfc, 0x24, 0xc6, 0x62, 0x38,
+	0x00, 0x00, 0x38, 0x12, 0x01, 0x07, 0x40, 0x23, 0x71, 0x0a, 0x38, 0x02,
+	0x03, 0xc6, 0x00, 0x18, 0x24, 0x06, 0x0e, 0x48, 0x71, 0x26, 0x00, 0x00,
+	0x00, 0x00, 0x30, 0x12, 0x00, 0xc4, 0x00, 0x1a, 0x26, 0xc4, 0x00, 0x04,
+	0x00, 0x00, 0x30, 0x12, 0x00, 0xc2, 0x10, 0x21, 0x00, 0x4a, 0x00, 0x1a,
+	0x00, 0x00, 0x10, 0x12, 0x00, 0xe2, 0x00, 0x1a, 0x0f, 0xf0, 0x41, 0xb8,
+	0x00, 0x00, 0x80, 0x12, 0x02, 0xf0, 0x28, 0x21, 0x24, 0x17, 0x00, 0x02,
+	0x00, 0xb7, 0x00, 0x1a, 0x8f, 0x82, 0x01, 0x84, 0x02, 0x60, 0x20, 0x21,
+	0x00, 0x00, 0x28, 0x12, 0x0f, 0xf0, 0x41, 0xea, 0xa4, 0x45, 0x00, 0x06,
+	0x02, 0x40, 0x20, 0x21, 0x0f, 0xf0, 0x42, 0xf5, 0x02, 0x80, 0x28, 0x21,
+	0x8f, 0x82, 0x01, 0x84, 0x94, 0x42, 0x00, 0x00, 0x10, 0x40, 0x00, 0x08,
+	0x8f, 0x82, 0x01, 0x84, 0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x04, 0x80, 0x09,
+	0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x48, 0x00, 0x40, 0xf8, 0x09,
+	0x00, 0x00, 0x00, 0x00, 0x8f, 0x82, 0x01, 0x84, 0x8f, 0x83, 0x01, 0x88,
+	0x94, 0x42, 0x00, 0x06, 0x00, 0x12, 0x94, 0x00, 0xae, 0x22, 0x00, 0x04,
+	0x94, 0x62, 0x00, 0x20, 0x94, 0x63, 0x00, 0x22, 0x00, 0x02, 0x14, 0x00,
+	0x32, 0x94, 0xff, 0xff, 0x00, 0x43, 0x10, 0x25, 0x02, 0x54, 0x90, 0x25,
+	0xae, 0x22, 0x00, 0x08, 0x0b, 0xf0, 0x41, 0xac, 0xae, 0x32, 0x00, 0x0c,
+	0x0f, 0xf0, 0x41, 0xb8, 0x02, 0xa0, 0x20, 0x21, 0x24, 0x02, 0xff, 0xff,
+	0x02, 0x02, 0x80, 0x23, 0x0f, 0xf0, 0x41, 0xea, 0x02, 0x00, 0x20, 0x21,
+	0x0f, 0xf0, 0x41, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x0b, 0xf0, 0x40, 0xf6,
+	0x28, 0x43, 0x03, 0x2e, 0x0f, 0xf0, 0x41, 0xb8, 0x02, 0xa0, 0x20, 0x21,
+	0x0b, 0xf0, 0x41, 0xa1, 0x24, 0x02, 0x00, 0x01, 0x8f, 0xbf, 0x00, 0xec,
+	0x8f, 0xbe, 0x00, 0xe8, 0x8f, 0xb7, 0x00, 0xe4, 0x8f, 0xb6, 0x00, 0xe0,
+	0x8f, 0xb5, 0x00, 0xdc, 0x8f, 0xb4, 0x00, 0xd8, 0x8f, 0xb3, 0x00, 0xd4,
+	0x8f, 0xb2, 0x00, 0xd0, 0x8f, 0xb1, 0x00, 0xcc, 0x8f, 0xb0, 0x00, 0xc8,
+	0x03, 0xe0, 0x00, 0x08, 0x27, 0xbd, 0x00, 0xf0, 0x3c, 0x02, 0xb6, 0x00,
+	0x8c, 0x43, 0x10, 0x84, 0x04, 0x63, 0x00, 0x01, 0xac, 0x44, 0x10, 0x84,
+	0x03, 0xe0, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x8f, 0x82, 0x01, 0x88,
+	0x24, 0x04, 0x00, 0x03, 0x8c, 0x59, 0x00, 0x38, 0x03, 0x20, 0x00, 0x08,
+	0x24, 0x05, 0x00, 0x20, 0x3c, 0x02, 0x00, 0x06, 0x24, 0x42, 0x31, 0x38,
+	0xac, 0xa2, 0x00, 0x08, 0x97, 0x82, 0x01, 0x80, 0x03, 0xe0, 0x00, 0x08,
+	0xac, 0xa2, 0x00, 0x0c, 0x3c, 0x03, 0xb6, 0x08, 0x94, 0x68, 0x00, 0x5e,
+	0x24, 0x67, 0x00, 0x50, 0x00, 0x00, 0x10, 0x21, 0x24, 0x63, 0x00, 0x5c,
+	0x8c, 0xe4, 0x00, 0x00, 0x24, 0xe7, 0x00, 0x04, 0x30, 0x86, 0xff, 0xff,
+	0x00, 0x04, 0x24, 0x02, 0x00, 0xc4, 0x20, 0x21, 0x00, 0x04, 0x20, 0x42,
+	0x14, 0xe3, 0xff, 0xf9, 0x00, 0x44, 0x10, 0x21, 0x24, 0x03, 0x00, 0x03,
+	0x00, 0x43, 0x00, 0x1b, 0x24, 0x02, 0x03, 0xe8, 0x00, 0x00, 0x18, 0x12,
+	0x70, 0x62, 0x10, 0x02, 0x8f, 0x83, 0x01, 0x84, 0x00, 0x48, 0x00, 0x1b,
+	0x84, 0x63, 0x00, 0x06, 0x00, 0x03, 0x1c, 0x00, 0x00, 0x00, 0x10, 0x12,
+	0x30, 0x42, 0xff, 0xff, 0x00, 0x43, 0x10, 0x25, 0x8f, 0x83, 0x01, 0x88,
+	0xac, 0xa2, 0x00, 0x08, 0x94, 0x62, 0x00, 0x22, 0x94, 0x63, 0x00, 0x20,
+	0x00, 0x02, 0x14, 0x00, 0x00, 0x43, 0x10, 0x25, 0x03, 0xe0, 0x00, 0x08,
+	0xac, 0xa2, 0x00, 0x0c, 0x28, 0x83, 0x03, 0xce, 0x10, 0x60, 0x00, 0x05,
+	0x00, 0x80, 0x10, 0x21, 0x28, 0x84, 0x00, 0x32, 0x24, 0x03, 0x00, 0x32,
+	0x0b, 0xf0, 0x41, 0xf2, 0x00, 0x64, 0x10, 0x0b, 0x24, 0x02, 0x03, 0xcd,
+	0x30, 0x43, 0xff, 0xff, 0x3c, 0x04, 0xb6, 0x08, 0xa4, 0x83, 0x01, 0x16,
+	0x3c, 0x04, 0xa0, 0x00, 0x24, 0x84, 0x01, 0xf4, 0x3c, 0x03, 0xb6, 0x00,
+	0xac, 0x64, 0x10, 0x20, 0x3c, 0x04, 0xb6, 0x00, 0x8c, 0x83, 0x10, 0x20,
+	0x04, 0x60, 0xff, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe0, 0x00, 0x08,
+	0x00, 0x00, 0x00, 0x00, 0x27, 0xbd, 0xff, 0xe0, 0x3c, 0x02, 0xb6, 0x08,
+	0xaf, 0xb0, 0x00, 0x14, 0xaf, 0xbf, 0x00, 0x1c, 0xaf, 0xb1, 0x00, 0x18,
+	0x00, 0x80, 0x80, 0x21, 0x94, 0x51, 0x01, 0x16, 0x0f, 0xf0, 0x41, 0xbe,
+	0x32, 0x31, 0xff, 0xff, 0x24, 0x43, 0xfd, 0x2b, 0x2c, 0x63, 0x00, 0x69,
+	0x14, 0x60, 0x00, 0x08, 0x02, 0x11, 0x20, 0x21, 0x2c, 0x43, 0x02, 0xd4,
+	0x14, 0x60, 0x00, 0x05, 0x26, 0x24, 0x00, 0x02, 0x2c, 0x42, 0x03, 0x3f,
+	0x14, 0x40, 0x00, 0x08, 0x8f, 0xbf, 0x00, 0x1c, 0x26, 0x24, 0xff, 0xfe,
+	0x10, 0x91, 0x00, 0x05, 0x8f, 0xbf, 0x00, 0x1c, 0x8f, 0xb1, 0x00, 0x18,
+	0x8f, 0xb0, 0x00, 0x14, 0x0b, 0xf0, 0x41, 0xea, 0x27, 0xbd, 0x00, 0x20,
+	0x8f, 0xb1, 0x00, 0x18, 0x8f, 0xb0, 0x00, 0x14, 0x03, 0xe0, 0x00, 0x08,
+	0x27, 0xbd, 0x00, 0x20, 0x8f, 0x82, 0x01, 0x88, 0x27, 0xbd, 0xff, 0xc8,
+	0xaf, 0xb5, 0x00, 0x24, 0x8c, 0x55, 0x00, 0x0c, 0xaf, 0xb7, 0x00, 0x2c,
+	0xaf, 0xb6, 0x00, 0x28, 0xaf, 0xb4, 0x00, 0x20, 0xaf, 0xb3, 0x00, 0x1c,
+	0xaf, 0xb2, 0x00, 0x18, 0xaf, 0xb1, 0x00, 0x14, 0xaf, 0xb0, 0x00, 0x10,
+	0xaf, 0xbf, 0x00, 0x34, 0xaf, 0xbe, 0x00, 0x30, 0x00, 0x80, 0x80, 0x21,
+	0x00, 0x00, 0x90, 0x21, 0x3c, 0x13, 0x00, 0x1c, 0x3c, 0x16, 0x00, 0x14,
+	0x3c, 0x11, 0xb6, 0x08, 0x24, 0x17, 0x00, 0x08, 0x24, 0x14, 0xff, 0xc0,
+	0x00, 0x12, 0x20, 0x80, 0x02, 0xa4, 0x20, 0x21, 0x8c, 0x82, 0x00, 0x00,
+	0x00, 0x53, 0x28, 0x24, 0x10, 0xb3, 0x00, 0x35, 0x00, 0x02, 0x1c, 0x82,
+	0x30, 0x63, 0x00, 0x07, 0x28, 0x63, 0x00, 0x04, 0xa6, 0x00, 0x00, 0x02,
+	0x10, 0x60, 0x00, 0x11, 0xa6, 0x00, 0x00, 0x00, 0x3c, 0x03, 0x9e, 0x00,
+	0x00, 0x43, 0x10, 0x24, 0x3c, 0x03, 0x80, 0x00, 0x54, 0x43, 0x00, 0x28,
+	0x26, 0x52, 0x00, 0x01, 0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x50,
+	0x00, 0x40, 0xf8, 0x09, 0x24, 0x05, 0x00, 0x1c, 0x8f, 0x83, 0x01, 0x84,
+	0x94, 0x63, 0x00, 0x02, 0x54, 0x60, 0x00, 0x20, 0x26, 0x52, 0x00, 0x01,
+	0x00, 0x02, 0x1c, 0x02, 0x0b, 0xf0, 0x42, 0x66, 0xa6, 0x03, 0x00, 0x00,
+	0x54, 0xb6, 0x00, 0x1b, 0x26, 0x52, 0x00, 0x01, 0xae, 0x37, 0x00, 0x24,
+	0xae, 0x20, 0x00, 0x30, 0x8e, 0x22, 0x00, 0x20, 0x34, 0x04, 0xff, 0xff,
+	0x00, 0x54, 0x10, 0x24, 0x34, 0x42, 0x00, 0x03, 0xae, 0x22, 0x00, 0x20,
+	0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x34, 0x00, 0x40, 0xf8, 0x09,
+	0x00, 0x00, 0x28, 0x21, 0xa6, 0x02, 0x00, 0x00, 0x24, 0x02, 0x00, 0x02,
+	0xae, 0x22, 0x00, 0x24, 0xae, 0x20, 0x00, 0x30, 0x8e, 0x22, 0x00, 0x20,
+	0x34, 0x04, 0xff, 0xff, 0x00, 0x54, 0x10, 0x24, 0x34, 0x42, 0x00, 0x01,
+	0xae, 0x22, 0x00, 0x20, 0x8f, 0x82, 0x01, 0x88, 0x8c, 0x42, 0x00, 0x34,
+	0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x28, 0x21, 0xa6, 0x02, 0x00, 0x02,
+	0x26, 0x52, 0x00, 0x01, 0x24, 0x02, 0x00, 0x2b, 0x16, 0x42, 0xff, 0xc7,
+	0x26, 0x10, 0x00, 0x04, 0x8f, 0xbf, 0x00, 0x34, 0x02, 0x40, 0x10, 0x21,
+	0x8f, 0xbe, 0x00, 0x30, 0x8f, 0xb7, 0x00, 0x2c, 0x8f, 0xb6, 0x00, 0x28,
+	0x8f, 0xb5, 0x00, 0x24, 0x8f, 0xb4, 0x00, 0x20, 0x8f, 0xb3, 0x00, 0x1c,
+	0x8f, 0xb2, 0x00, 0x18, 0x8f, 0xb1, 0x00, 0x14, 0x8f, 0xb0, 0x00, 0x10,
+	0x03, 0xe0, 0x00, 0x08, 0x27, 0xbd, 0x00, 0x38, 0x8f, 0x82, 0x01, 0x88,
+	0x27, 0xbd, 0xff, 0x30, 0xaf, 0xb2, 0x00, 0xc8, 0x8c, 0x52, 0x00, 0x0c,
+	0x24, 0x02, 0xff, 0xff, 0xaf, 0xb1, 0x00, 0xc4, 0xaf, 0xb0, 0x00, 0xc0,
+	0xaf, 0xbf, 0x00, 0xcc, 0x00, 0x80, 0x80, 0x21, 0xa4, 0x82, 0x00, 0x00,
+	0xa4, 0x82, 0x00, 0x02, 0xa4, 0xa0, 0x00, 0x00, 0xa4, 0xa0, 0x00, 0x02,
+	0x27, 0xa4, 0x00, 0x10, 0x0f, 0xf0, 0x42, 0x1d, 0x00, 0xa0, 0x88, 0x21,
+	0x27, 0xa2, 0x00, 0x10, 0x27, 0xa6, 0x00, 0xbc, 0x3c, 0x05, 0x00, 0x1c,
+	0x3c, 0x07, 0x00, 0x14, 0x8e, 0x43, 0x00, 0x00, 0x00, 0xa3, 0x20, 0x24,
+	0x10, 0x85, 0x00, 0x26, 0x8f, 0xbf, 0x00, 0xcc, 0x00, 0x03, 0x1c, 0x82,
+	0x30, 0x63, 0x00, 0x07, 0x28, 0x63, 0x00, 0x04, 0x10, 0x60, 0x00, 0x13,
+	0x00, 0x00, 0x00, 0x00, 0x94, 0x43, 0x00, 0x00, 0x2c, 0x64, 0x00, 0x0b,
+	0x54, 0x80, 0x00, 0x06, 0x94, 0x43, 0x00, 0x02, 0x96, 0x04, 0x00, 0x00,
+	0x00, 0x64, 0x20, 0x2b, 0x54, 0x80, 0x00, 0x01, 0xa6, 0x03, 0x00, 0x00,
+	0x94, 0x43, 0x00, 0x02, 0x2c, 0x64, 0x00, 0x0b, 0x54, 0x80, 0x00, 0x12,
+	0x24, 0x42, 0x00, 0x04, 0x96, 0x04, 0x00, 0x02, 0x00, 0x64, 0x20, 0x2b,
+	0x54, 0x80, 0x00, 0x0d, 0xa6, 0x03, 0x00, 0x02, 0x0b, 0xf0, 0x42, 0xb2,
+	0x24, 0x42, 0x00, 0x04, 0x54, 0x87, 0x00, 0x0a, 0x24, 0x42, 0x00, 0x04,
+	0x94, 0x43, 0x00, 0x00, 0x2c, 0x64, 0x00, 0x0b, 0x50, 0x80, 0x00, 0x01,
+	0xa6, 0x23, 0x00, 0x00, 0x94, 0x43, 0x00, 0x02, 0x2c, 0x64, 0x00, 0x0b,
+	0x50, 0x80, 0x00, 0x01, 0xa6, 0x23, 0x00, 0x02, 0x24, 0x42, 0x00, 0x04,
+	0x14, 0x46, 0xff, 0xd9, 0x26, 0x52, 0x00, 0x04, 0x8f, 0xbf, 0x00, 0xcc,
+	0x8f, 0xb2, 0x00, 0xc8, 0x8f, 0xb1, 0x00, 0xc4, 0x8f, 0xb0, 0x00, 0xc0,
+	0x03, 0xe0, 0x00, 0x08, 0x27, 0xbd, 0x00, 0xd0, 0x8f, 0x82, 0x01, 0x84,
+	0x27, 0xbd, 0xff, 0xe0, 0x94, 0x42, 0x00, 0x00, 0x14, 0x40, 0x00, 0x34,
+	0xaf, 0xbf, 0x00, 0x1c, 0x27, 0xa4, 0x00, 0x14, 0x0f, 0xf0, 0x42, 0x78,
+	0x27, 0xa5, 0x00, 0x10, 0x8f, 0x82, 0x01, 0x88, 0x97, 0xa4, 0x00, 0x14,
+	0x94, 0x43, 0x00, 0x10, 0x00, 0x83, 0x18, 0x2b, 0x14, 0x60, 0x00, 0x0f,
+	0x97, 0xa9, 0x00, 0x16, 0x94, 0x43, 0x00, 0x12, 0x01, 0x23, 0x18, 0x2b,
+	0x54, 0x60, 0x00, 0x0c, 0x3c, 0x04, 0x00, 0x0f, 0x97, 0xa8, 0x00, 0x10,
+	0x94, 0x46, 0x00, 0x18, 0x01, 0x06, 0x30, 0x2b, 0x54, 0xc0, 0x00, 0x07,
+	0x3c, 0x04, 0x00, 0x0f, 0x97, 0xa7, 0x00, 0x12, 0x94, 0x46, 0x00, 0x1a,
+	0x00, 0xe6, 0x30, 0x2b, 0x50, 0xc0, 0x00, 0x06, 0x94, 0x45, 0x00, 0x14,
+	0x3c, 0x04, 0x00, 0x0f, 0x0f, 0xf0, 0x41, 0xb8, 0x24, 0x84, 0x00, 0x01,
+	0x0b, 0xf0, 0x42, 0xed, 0x24, 0x04, 0x00, 0x04, 0x00, 0xa4, 0x28, 0x2b,
+	0x10, 0xa0, 0x00, 0x13, 0x3c, 0x04, 0x00, 0x0f, 0x94, 0x44, 0x00, 0x16,
+	0x00, 0x89, 0x20, 0x2b, 0x50, 0x80, 0x00, 0x0f, 0x3c, 0x04, 0x00, 0x0f,
+	0x94, 0x43, 0x00, 0x1c, 0x00, 0x68, 0x18, 0x2b, 0x10, 0x60, 0x00, 0x0b,
+	0x3c, 0x04, 0x00, 0x0f, 0x94, 0x42, 0x00, 0x1e, 0x00, 0x47, 0x10, 0x2b,
+	0x10, 0x40, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x0f, 0xf0, 0x41, 0xb8,
+	0x24, 0x84, 0x00, 0x02, 0x24, 0x04, 0xff, 0xff, 0x0f, 0xf0, 0x41, 0xff,
+	0x00, 0x00, 0x00, 0x00, 0x3c, 0x04, 0x00, 0x0f, 0x0f, 0xf0, 0x41, 0xb8,
+	0x24, 0x84, 0x00, 0x03, 0x8f, 0xbf, 0x00, 0x1c, 0x03, 0xe0, 0x00, 0x08,
+	0x27, 0xbd, 0x00, 0x20, 0x27, 0xbd, 0xff, 0xa8, 0xaf, 0xb0, 0x00, 0x30,
+	0x8f, 0x90, 0x01, 0x84, 0xaf, 0xbf, 0x00, 0x54, 0x96, 0x02, 0x00, 0x00,
+	0xaf, 0xbe, 0x00, 0x50, 0xaf, 0xb7, 0x00, 0x4c, 0xaf, 0xb6, 0x00, 0x48,
+	0xaf, 0xb5, 0x00, 0x44, 0xaf, 0xb4, 0x00, 0x40, 0xaf, 0xb3, 0x00, 0x3c,
+	0xaf, 0xb2, 0x00, 0x38, 0xaf, 0xb1, 0x00, 0x34, 0x10, 0x40, 0x00, 0x04,
+	0xaf, 0xa5, 0x00, 0x5c, 0x24, 0x02, 0x00, 0x04, 0x0b, 0xf0, 0x43, 0xe1,
+	0xa6, 0x02, 0x00, 0x02, 0x00, 0x80, 0xa8, 0x21, 0x0f, 0xf0, 0x41, 0xb8,
+	0x3c, 0x04, 0x00, 0x08, 0x96, 0x04, 0x00, 0x04, 0x0f, 0xf0, 0x41, 0xea,
+	0x24, 0x10, 0x22, 0x60, 0x8f, 0xa3, 0x00, 0x5c, 0x00, 0x40, 0xf0, 0x21,
+	0x24, 0x51, 0xff, 0xc0, 0x24, 0x02, 0x1c, 0x00, 0x70, 0x62, 0x90, 0x02,
+	0x24, 0x03, 0x03, 0xe8, 0x02, 0x50, 0x00, 0x1a, 0x24, 0x13, 0x02, 0xd4,
+	0x00, 0x00, 0x90, 0x12, 0x72, 0xa2, 0x10, 0x02, 0x02, 0x72, 0x98, 0x23,
+	0x00, 0x50, 0x00, 0x1a, 0xaf, 0xb3, 0x00, 0x28, 0x00, 0x00, 0x98, 0x21,
+	0x00, 0x00, 0x10, 0x12, 0x00, 0x52, 0x20, 0x23, 0x70, 0x83, 0x28, 0x02,
+	0x02, 0x42, 0x10, 0x23, 0x24, 0x42, 0x00, 0x6a, 0xaf, 0xa5, 0x00, 0x20,
+	0xaf, 0xa2, 0x00, 0x24, 0x3c, 0x02, 0x00, 0x08, 0x26, 0x76, 0x00, 0x01,
+	0x0f, 0xf0, 0x41, 0xb8, 0x02, 0xc2, 0x20, 0x25, 0x0f, 0xf0, 0x41, 0xea,
+	0x03, 0xc0, 0x20, 0x21, 0x0f, 0xf0, 0x41, 0xbe, 0x00, 0x00, 0x00, 0x00,
+	0x27, 0xa5, 0x00, 0x14, 0x27, 0xa4, 0x00, 0x1c, 0x0f, 0xf0, 0x42, 0x78,
+	0x00, 0x40, 0xb8, 0x21, 0x0f, 0xf0, 0x41, 0xea, 0x02, 0x20, 0x20, 0x21,
+	0x0f, 0xf0, 0x41, 0xbe, 0x00, 0x00, 0x00, 0x00, 0x02, 0xe2, 0xa0, 0x23,
+	0x2a, 0x83, 0x00, 0x08, 0x00, 0x40, 0x80, 0x21, 0x14, 0x60, 0x00, 0x58,
+	0x00, 0x00, 0x30, 0x21, 0x27, 0xa4, 0x00, 0x18, 0x0f, 0xf0, 0x42, 0x78,
+	0x27, 0xa5, 0x00, 0x10, 0x97, 0xa3, 0x00, 0x18, 0x2c, 0x64, 0x00, 0x0b,
+	0x14, 0x80, 0x00, 0x12, 0x00, 0x00, 0x30, 0x21, 0x97, 0xa5, 0x00, 0x1c,
+	0x00, 0xa3, 0x28, 0x23, 0x28, 0xa4, 0x00, 0x33, 0x14, 0x80, 0x00, 0x0e,
+	0x97, 0xa4, 0x00, 0x1a, 0x24, 0x04, 0x07, 0x0a, 0x24, 0x06, 0x03, 0xe8,
+	0x00, 0x83, 0x18, 0x23, 0x70, 0x66, 0x18, 0x02, 0x70, 0xa6, 0x30, 0x02,
+	0x00, 0xd4, 0x00, 0x1a, 0x00, 0x00, 0x30, 0x12, 0x00, 0x66, 0x00, 0x1a,
+	0x00, 0x00, 0x30, 0x12, 0x00, 0xd0, 0x30, 0x21, 0x28, 0xc3, 0x00, 0x00,
+	0x00, 0x03, 0x30, 0x0b, 0x97, 0xa4, 0x00, 0x1a, 0x2c, 0x83, 0x00, 0x0b,
+	0x54, 0x60, 0x00, 0x13, 0x97, 0xa4, 0x00, 0x10, 0x97, 0xa3, 0x00, 0x1e,
+	0x00, 0x64, 0x38, 0x23, 0x28, 0xe3, 0x00, 0x33, 0x54, 0x60, 0x00, 0x0e,
+	0x97, 0xa4, 0x00, 0x10, 0x24, 0x05, 0x0d, 0x49, 0x24, 0x03, 0x03, 0xe8,
+	0x00, 0xa4, 0x20, 0x23, 0x70, 0x83, 0x20, 0x02, 0x70, 0xe3, 0x18, 0x02,
+	0x00, 0x74, 0x00, 0x1a, 0x00, 0x00, 0x18, 0x12, 0x00, 0x83, 0x00, 0x1a,
+	0x00, 0x00, 0x18, 0x12, 0x00, 0x70, 0x18, 0x21, 0x00, 0xc3, 0x20, 0x2a,
+	0x00, 0x64, 0x30, 0x0b, 0x97, 0xa4, 0x00, 0x10, 0x2c, 0x83, 0x00, 0x0b,
+	0x14, 0x60, 0x00, 0x13, 0x97, 0xa5, 0x00, 0x12, 0x97, 0xa3, 0x00, 0x14,
+	0x00, 0x64, 0x38, 0x23, 0x28, 0xe3, 0x00, 0x33, 0x14, 0x60, 0x00, 0x0f,
+	0x2c, 0xa3, 0x00, 0x0b, 0x24, 0x05, 0x07, 0x0a, 0x24, 0x03, 0x03, 0xe8,
+	0x00, 0xa4, 0x20, 0x23, 0x70, 0x83, 0x20, 0x02, 0x70, 0xe3, 0x18, 0x02,
+	0x00, 0x74, 0x00, 0x1a, 0x00, 0x00, 0x18, 0x12, 0x00, 0x83, 0x00, 0x1a,
+	0x00, 0x00, 0x18, 0x12, 0x00, 0x70, 0x18, 0x21, 0x00, 0xc3, 0x20, 0x2a,
+	0x00, 0x64, 0x30, 0x0b, 0x97, 0xa5, 0x00, 0x12, 0x2c, 0xa3, 0x00, 0x0b,
+	0x14, 0x60, 0x00, 0x13, 0x3c, 0x02, 0x80, 0x00, 0x97, 0xa4, 0x00, 0x16,
+	0x00, 0x85, 0x38, 0x23, 0x28, 0xe3, 0x00, 0x33, 0x14, 0x60, 0x00, 0x0f,
+	0x02, 0x62, 0x50, 0x21, 0x24, 0x04, 0x0d, 0x49, 0x00, 0x85, 0x28, 0x23,
+	0x24, 0x04, 0x03, 0xe8, 0x70, 0xa4, 0x28, 0x02, 0x70, 0xe4, 0x20, 0x02,
+	0x00, 0x94, 0x00, 0x1a, 0x00, 0x00, 0x18, 0x12, 0x00, 0xa3, 0x00, 0x1a,
+	0x00, 0x00, 0x18, 0x12, 0x00, 0x70, 0x18, 0x21, 0x00, 0xc3, 0x10, 0x2a,
+	0x00, 0x62, 0x30, 0x0b, 0x3c, 0x02, 0x80, 0x00, 0x02, 0x62, 0x50, 0x21,
+	0x24, 0x02, 0x22, 0x60, 0x70, 0xc2, 0x10, 0x02, 0x24, 0x03, 0x1c, 0x00,
+	0x00, 0x43, 0x00, 0x1a, 0x8f, 0x90, 0x01, 0x88, 0x00, 0x0a, 0x50, 0x40,
+	0x02, 0x0a, 0x50, 0x21, 0x00, 0x00, 0x10, 0x12, 0x14, 0xc0, 0x00, 0x08,
+	0xa5, 0x42, 0x00, 0x20, 0x0f, 0xf0, 0x41, 0xb8, 0x3c, 0x04, 0x80, 0x08,
+	0x8e, 0x02, 0x00, 0x48, 0x00, 0x40, 0xf8, 0x09, 0x00, 0x00, 0x00, 0x00,
+	0x0b, 0xf0, 0x43, 0xe2, 0x8f, 0xbf, 0x00, 0x54, 0x8f, 0xa4, 0x00, 0x20,
+	0x8f, 0xa5, 0x00, 0x24, 0x24, 0x07, 0x03, 0xe8, 0x00, 0x85, 0x00, 0x1a,
+	0x00, 0x00, 0x10, 0x12, 0x70, 0x46, 0x18, 0x02, 0x00, 0x67, 0x00, 0x1a,
+	0x8f, 0xa3, 0x00, 0x28, 0x00, 0x00, 0x38, 0x12, 0x70, 0x43, 0x10, 0x02,
+	0x24, 0x03, 0xfc, 0x18, 0x00, 0x43, 0x00, 0x1a, 0x00, 0x00, 0x10, 0x12,
+	0x02, 0x42, 0x10, 0x21, 0x00, 0xe2, 0x10, 0x21, 0x00, 0xc2, 0x38, 0x21,
+	0x28, 0xe2, 0x02, 0xd4, 0x14, 0x40, 0x00, 0x05, 0x24, 0x13, 0x02, 0xd4,
+	0x24, 0x03, 0x03, 0x3e, 0x28, 0xe6, 0x03, 0x3f, 0x00, 0x60, 0x98, 0x21,
+	0x00, 0xe6, 0x98, 0x0b, 0x3c, 0x04, 0x00, 0x12, 0x0f, 0xf0, 0x41, 0xb8,
+	0x02, 0x64, 0x20, 0x25, 0x03, 0xd1, 0x10, 0x23, 0x24, 0x03, 0x27, 0x10,
+	0x70, 0x43, 0x10, 0x02, 0x02, 0x77, 0x30, 0x23, 0x00, 0x54, 0x00, 0x1a,
+	0x00, 0x00, 0x40, 0x12, 0x70, 0xc8, 0xb8, 0x02, 0x02, 0xe3, 0x00, 0x1a,
+	0x00, 0x00, 0xb8, 0x12, 0x02, 0xfe, 0xb8, 0x21, 0x03, 0xd7, 0x10, 0x2b,
+	0x02, 0xe2, 0xf0, 0x0b, 0x26, 0xe2, 0x00, 0x20, 0x00, 0x51, 0x18, 0x2b,
+	0x00, 0x43, 0x88, 0x0b, 0x24, 0x02, 0x00, 0x02, 0x16, 0xc2, 0xff, 0x56,
+	0x24, 0x13, 0x00, 0x01, 0x3c, 0x04, 0x00, 0x08, 0x0f, 0xf0, 0x41, 0xb8,
+	0x24, 0x84, 0x00, 0x06, 0x0f, 0xf0, 0x41, 0xea, 0x02, 0xe0, 0x20, 0x21,
+	0x8f, 0xa3, 0x00, 0x5c, 0x26, 0x05, 0x00, 0x18, 0xa6, 0x03, 0x00, 0x26,
+	0x26, 0x04, 0x00, 0x10, 0x0f, 0xf0, 0x42, 0x78, 0xa6, 0x15, 0x00, 0x24,
+	0x3c, 0x02, 0xb6, 0x08, 0x94, 0x44, 0x01, 0x16, 0x30, 0x84, 0xff, 0xff,
+	0x0f, 0xf0, 0x41, 0xea, 0x24, 0x84, 0x00, 0x10, 0x8f, 0x85, 0x01, 0x88,
+	0x24, 0xa4, 0x00, 0x14, 0x0f, 0xf0, 0x42, 0x78, 0x24, 0xa5, 0x00, 0x1c,
+	0x8f, 0xbf, 0x00, 0x54, 0x8f, 0xbe, 0x00, 0x50, 0x8f, 0xb7, 0x00, 0x4c,
+	0x8f, 0xb6, 0x00, 0x48, 0x8f, 0xb5, 0x00, 0x44, 0x8f, 0xb4, 0x00, 0x40,
+	0x8f, 0xb3, 0x00, 0x3c, 0x8f, 0xb2, 0x00, 0x38, 0x8f, 0xb1, 0x00, 0x34,
+	0x8f, 0xb0, 0x00, 0x30, 0x03, 0xe0, 0x00, 0x08, 0x27, 0xbd, 0x00, 0x58,
+	0x00, 0x80, 0x10, 0x21, 0x8c, 0x45, 0x00, 0x0c, 0x0b, 0xf0, 0x42, 0xf5,
+	0x8c, 0x84, 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+	0x00, 0x00, 0x00, 0x00
+};
+
+static int __init pmc_load_plugin_bcm63138(struct bcm63xx_pmc *pmc, u32 state)
+{
+	unsigned long image_link_addr = 0x9fc10000;
+	const void *image_virt_addr = pmc_63138_image;
+	struct pmc_command cmd, rsp;
+	int ret;
+
+	if (state != PMC_RSTATE_AVS_COMPLETE_WAITING_FOR_IMAGE) {
+		WARN(1, "expected AVS_COMPLETE_WAITING_FOR_IMAGE state");
+		return 0;
+	}
+
+	writel(~(PMC_IMAGE_ALIGN - 1),
+	       pmc->base + PMC_ADDR2_WIN_MASK_REG);
+	writel(image_link_addr & 0x1fffffff,
+	       pmc->base + PMC_ADDR2_WIN_BASEIN_REG);
+	writel(virt_to_phys(image_virt_addr),
+	       pmc->base + PMC_ADDR2_WIN_BASEOUT_REG);
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.word0.cmd_id = PMC_CMD_REGISTER_CMD_HANDLER;
+	cmd.generic_params.params[0] = 96;
+	cmd.generic_params.params[1] = image_link_addr;
+
+	ret = pmc_send_command(pmc, &cmd, &rsp);
+	if (ret) {
+		pr_err("failed to register command handler\n");
+		return ret;
+	}
+
+	memset(&cmd, 0, sizeof (cmd));
+	cmd.word0.cmd_id = 96;
+	cmd.generic_params.params[0] = 75;
+	cmd.generic_params.params[1] = 75;
+
+	ret = pmc_send_command(pmc, &cmd, &rsp);
+	if (ret) {
+		pr_err("failed to send cmd 96\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static const char *pmc_run_state_str[] = {
+	[PMC_RSTATE_EXECUTING_BOOTROM] = "EXECUTING_BOOTROM",
+        [PMC_RSTATE_WAITING_BMU_COMPLETE] = "WAITING_BMU_COMPLETE",
+        [PMC_RSTATE_AVS_COMPLETE_WAITING_FOR_IMAGE] =
+	"AVS_COMPLETE_WAITING_FOR_IMAGE",
+        [PMC_RSTATE_AUTHENTICATING_IMAGE] = "AUTHENTICATING_IMAGE",
+        [PMC_RSTATE_AUTHENTICATION_FAILED] = "AUTHENTICATION_FAILED",
+        [PMC_RSTATE_RESERVED] = "RESERVED",
+        [PMC_RSTATE_FATAL_ERROR] = "FATAL_ERROR",
+        [PMC_RSTATE_RUNNING] = "RUNNING",
+};
+
+static int __init pmc_load_plugin(struct bcm63xx_pmc *pmc)
+{
+	u32 val;
+
+	val = readl(pmc->base + PMC_HOST_MBOX_IN_REG);
+	val = (val >> 24) & 0x7;
+	pr_info("PMC state is '%s'\n", pmc_run_state_str[val]);
+
+	switch (pmc->soc_id) {
+	case 0x63138:
+		return pmc_load_plugin_bcm63138(pmc, val);
+	case 0x63158:
+		return 0;
+	default:
+		WARN(1, "no load plugin defined");
+		return 0;
+	}
+}
+
+static int pmc_avs_state_read(void *priv, u64 *val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+	int error;
+	bool state;
+
+	error = pmc_get_avs_state(pmc, 0, &state);
+	if (error)
+		return error;
+
+	*val = state ? 1 : 0;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pmc_avs_state_fops,
+			pmc_avs_state_read,
+			NULL, "%llu\n");
+
+static int pmc_pred_voltage_read(void *priv, u64 *val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+	int error;
+	u32 v;
+
+	error = pmc_get_rmon(pmc, &v);
+	if (error)
+		return error;
+
+	*val = (v >> 16) & 0xffff;
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pmc_pred_voltage_fops,
+			pmc_pred_voltage_read,
+			NULL, "%lld\n");
+
+static int pmc_avs_margin_ss_read(void *priv, u64 *val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+
+	*val = pmc->avs_margin_ss;
+	return 0;
+}
+
+static int pmc_avs_margin_ss_write(void *priv, u64 val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+
+	if (val > 0xff)
+		return -EINVAL;
+
+	pmc->avs_margin_ss = val;
+
+	if (pmc->avs_margin_ff && pmc->avs_margin_ss)
+		return pmc_close_avs(pmc);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pmc_avs_margin_ss_fops,
+			pmc_avs_margin_ss_read,
+			pmc_avs_margin_ss_write,
+			"%lld\n");
+
+static int pmc_avs_margin_ff_read(void *priv, u64 *val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+
+	*val = pmc->avs_margin_ff;
+	return 0;
+}
+
+static int pmc_avs_margin_ff_write(void *priv, u64 val)
+{
+	struct bcm63xx_pmc *pmc = priv;
+
+	if (val > 0xff)
+		return -EINVAL;
+
+	pmc->avs_margin_ff = val;
+
+	if (pmc->avs_margin_ff && pmc->avs_margin_ss)
+		return pmc_close_avs(pmc);
+	return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pmc_avs_margin_ff_fops,
+			pmc_avs_margin_ff_read,
+			pmc_avs_margin_ff_write,
+			"%lld\n");
+
+static int __init pmc_create_debugfs(struct bcm63xx_pmc *pmc)
+{
+	pmc->debugfs_dir = debugfs_create_dir("bcm63xx-pmc", NULL);
+	if (IS_ERR(pmc->debugfs_dir)) {
+		dev_warn(pmc->dev, "unable to create debugfs directory");
+		pmc->debugfs_dir = NULL;
+		return 0;
+	}
+
+	debugfs_create_file("avs-state", S_IRUSR, pmc->debugfs_dir,
+			    pmc, &pmc_avs_state_fops);
+	debugfs_create_file("avs-pred-voltage", S_IRUSR, pmc->debugfs_dir,
+			    pmc, &pmc_pred_voltage_fops);
+	debugfs_create_file("avs-margin-ss", S_IRUSR | S_IWUSR,
+			    pmc->debugfs_dir, pmc, &pmc_avs_margin_ss_fops);
+	debugfs_create_file("avs-margin-ff", S_IRUSR | S_IWUSR,
+			    pmc->debugfs_dir, pmc, &pmc_avs_margin_ff_fops);
+	return 0;
+}
+
+/*
+ * Early initialization to allow access to registers in the very early boot
+ * process (mostly for SMP bootup)
+ */
+static int __init bcm63xx_pmc_early_init(void)
+{
+	const struct of_device_id *match;
+	struct device_node *np;
+	struct resource regs;
+	int ret;
+	bool avs_state;
+
+	np = of_find_matching_node_and_match(NULL, bcm63xx_pmc_match, &match);
+	if (!np) {
+		pr_warn("DT node not found, PMC disabled\n");
+		return 0;
+	}
+
+	if (of_address_to_resource(np, 0, &regs) < 0) {
+		pr_err("failed to get PMC registers\n");
+		of_node_put(np);
+		return -ENXIO;
+	}
+
+	of_node_put(np);
+
+	mutex_init(&pmc->pmc_lock);
+	pmc->soc_id = (u32)(uintptr_t)match->data;
+	pmc->base = ioremap(regs.start, resource_size(&regs));
+	if (!pmc->base) {
+		pr_err("failed to map PMC registers\n");
+		return -ENXIO;
+	}
+
+	pmc_load_plugin(pmc);
+	ret = pmc_check_revision(pmc);
+	if (ret)
+		goto fail;
+
+	ret = pmc_get_avs_state(pmc, 0, &avs_state);
+	if (ret)
+		pr_err("PMC: unable to get AVS state.\n");
+	else
+		pr_info("PMC: AVS state is %sabled\n",
+			avs_state ? "en" : "dis");
+
+	return 0;
+
+fail:
+	pr_err("PMC init failed");
+	iounmap(pmc->base);
+	pmc->base = NULL;
+	return ret;
+}
+
+early_initcall(bcm63xx_pmc_early_init);
+
+
+/*
+ * special wrappers for bcm_adsl
+ */
+static enum pmc_addr_id pmc_find_dev_addr_id(struct bcm63xx_pmc *priv,
+					     unsigned int dev_addr)
+{
+	const struct pmc_addr_info *info;
+	size_t i;
+
+	switch (priv->soc_id) {
+	case 0x63138:
+		info = bcm63138_pmc_addr_info;
+		break;
+	case 0x63158:
+		info = bcm63158_pmc_addr_info;
+		break;
+	default:
+		WARN(1, "missing addr info for this soc id");
+		return 0;
+	}
+
+	for (i = 0; i < PMB_ADDR_LAST; i++) {
+		unsigned int addr;
+
+		if (!info[i].valid)
+			continue;
+
+		addr = (info[i].dev | (info[i].bus_id << PMB_BUS_ID_SHIFT));
+		if (addr == dev_addr)
+			return i;
+	}
+
+	printk("cannot find dev addr id for dev_addr:0x%x\n", dev_addr);
+	BUG();
+}
+
+int PowerOnDevice(int devAddr);
+int PowerOnDevice(int devAddr)
+{
+	enum pmc_addr_id id = pmc_find_dev_addr_id(pmc, devAddr);
+	return pmc_power_on_cmd(pmc, id);
+}
+EXPORT_SYMBOL(PowerOnDevice);
+
+int PowerOffDevice(int devAddr);
+int PowerOffDevice(int devAddr)
+{
+	enum pmc_addr_id id = pmc_find_dev_addr_id(pmc, devAddr);
+	return pmc_power_off_cmd(pmc, id);
+}
+EXPORT_SYMBOL(PowerOffDevice);
+
+int ReadBPCMRegister(int devAddr, int wordOffset, u32 *value);
+int ReadBPCMRegister(int devAddr, int wordOffset, u32 *value)
+{
+	enum pmc_addr_id id = pmc_find_dev_addr_id(pmc, devAddr);
+	int ret;
+
+	ret = pmc_read_bpcm_register(pmc, id, wordOffset << 2, value);
+	return ret;
+}
+
+EXPORT_SYMBOL(ReadBPCMRegister);
+
+int WriteBPCMRegister(int devAddr, int wordOffset, u32 value);
+int WriteBPCMRegister(int devAddr, int wordOffset, u32 value)
+{
+	enum pmc_addr_id id = pmc_find_dev_addr_id(pmc, devAddr);
+	return pmc_write_bpcm_register(pmc, id, wordOffset << 2, value);
+}
+
+EXPORT_SYMBOL(WriteBPCMRegister);
+
+struct bcm63xx_pmc *pmc_of_get(struct device_node *np)
+{
+	return pmc->init_done ? pmc : ERR_PTR(-EPROBE_DEFER);
+}
+EXPORT_SYMBOL_GPL(pmc_of_get);
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./procmon-bcm63158.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/procmon-bcm63158.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./procmon-bcm63158.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/procmon-bcm63158.c	2025-09-25 17:40:35.063364727 +0200
@@ -0,0 +1,171 @@
+/*
+ * procmon-bcm63158.c for procmon-bcm63158
+ * Created by <nschichan@freebox.fr> on Thu Oct  3 19:14:44 2019
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/io.h>
+#include <linux/bcm63xx-procmon.h>
+
+#include <dt-bindings/soc/broadcom,bcm63158-procmon.h>
+
+struct bcm63158_procmon_priv {
+	void __iomem *regs;
+	struct device *dev;
+	struct list_head list;
+};
+
+
+static DEFINE_MUTEX(procmon_list_mutex);
+static LIST_HEAD(procmon_list);
+
+static int bcm63158_procmon_probe(struct platform_device *pdev)
+{
+	struct bcm63158_procmon_priv *priv;
+
+	dev_info(&pdev->dev, "probe.\n");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dev = &pdev->dev;
+	dev_set_drvdata(&pdev->dev, priv);
+
+	priv->regs = devm_platform_ioremap_resource(pdev, 0);
+	if (!priv->regs)
+		return -ENOMEM;
+
+	mutex_lock(&procmon_list_mutex);
+	list_add_tail(&priv->list, &procmon_list);
+	mutex_unlock(&procmon_list_mutex);
+
+	return 0;
+}
+
+static const struct of_device_id bcm63158_procmon_match[] = {
+	{ .compatible = "brcm,bcm63158-procmon" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63158_procmon_of_match);
+
+struct platform_driver bcm63158_procmon_driver = {
+	.probe		= bcm63158_procmon_probe,
+	.remove		= NULL, /* FIXME*/
+	.driver		= {
+		.name		= "bcm63158-procmon",
+		.owner		= THIS_MODULE,
+		.of_match_table	= bcm63158_procmon_match,
+	},
+};
+builtin_platform_driver(bcm63158_procmon_driver);
+
+#define PROCMON_MISC_REG(i)		(0x50 + (i) * 4)
+
+#define PROCMON_EXT_REG			(0x50 + 0xc)
+# define EXT_REG_RMON_SATURATED_VALUE	0x3a0
+# define EXT_REG_RMON_VALUE_MASK	0xffff
+# define EXT_REG_RMON_VALID		(1 << 16)
+
+/*
+ * this is the meaty part of this driver. calculation method comes
+ * directly from broadcom refsw.
+ */
+static int bcm63158_procmon_calc_rcal(struct bcm63158_procmon_priv *priv,
+				      int resistor)
+{
+	u32 res_int, res_ext;
+	u32 ext_reg;
+	int ratio, ratio1, ret;
+
+	dev_dbg(priv->dev, "getting rcal setting for resistor %d", resistor);
+
+	/*
+	 * make sure the resistor selection is valid
+	 */
+	if (resistor < RCAL_0P25UM_HORZ || resistor > RCAL_1UM_VERT)
+		return -EINVAL;
+
+	ext_reg = readl(priv->regs + PROCMON_EXT_REG);
+	if ((ext_reg & EXT_REG_RMON_VALID) == 0) {
+		dev_err(priv->dev, "resistor data is not collected by the "
+			"PMC.\n");
+		return -EIO;
+	}
+
+	if ((ext_reg & EXT_REG_RMON_VALUE_MASK) >
+	    EXT_REG_RMON_SATURATED_VALUE) {
+		dev_err(priv->dev, "ext reg is saturated.\n");
+		return -EIO;
+	}
+
+	res_ext = ext_reg & EXT_REG_RMON_VALUE_MASK;
+	res_int = readl(priv->regs + PROCMON_MISC_REG(resistor >> 1));
+	if (resistor & 1)
+		res_int >>= 16;
+
+	/*
+	 * Ratio = CLAMP((INT) (128.0 * V(REXT)/V(RINT)), 0, 255)
+	 */
+	ratio = (128 * res_ext) / res_int;
+	if (ratio > 255)
+		ratio = 255;
+
+	/*
+	 * Ratio1 = CLAMP(128 - (Ratio - 128) * 4, 0, 255)
+	 */
+	ratio1 = 128 - (ratio - 128) * 4;
+	if (ratio1 < 0)
+		ratio1 = 0;
+	if (ratio1 > 255)
+		ratio1 = 255;
+
+	/*
+	 * convert to 4 bit rcal setting value
+	 */
+	ret = (ratio1 >> 4) & 0xf;
+
+	dev_dbg(priv->dev, "getrcal for resistor%d, int %d, ext %d, "
+		"ratio %d ratio1 %d, rcal %d\n",
+		resistor, res_int, res_ext, ratio, ratio1, ret);
+
+	return ret;
+}
+
+int procmon_get_rcal(struct device_node *np)
+{
+	int err;
+	struct of_phandle_args args;
+	struct bcm63158_procmon_priv *priv;
+
+	err = of_parse_phandle_with_args(np, "procmon", "#procmon-cells",
+					 0, &args);
+	if (err) {
+		printk("unable to parse procmon phandle.\n");
+		return err;
+	}
+
+	if (args.args_count != 1)
+		return -EINVAL;
+
+	mutex_lock(&procmon_list_mutex);
+	list_for_each_entry (priv, &procmon_list, list) {
+		if (priv->dev->of_node == args.np) {
+			mutex_unlock(&procmon_list_mutex);
+			goto found;
+		}
+	}
+	mutex_unlock(&procmon_list_mutex);
+	return -EPROBE_DEFER;
+
+found:
+	return bcm63158_procmon_calc_rcal(priv, args.args[0]);
+}
+EXPORT_SYMBOL(procmon_get_rcal);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom BCM63158 SoC PROCMON driver.");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./rdp/Makefile linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/rdp/Makefile
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./rdp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/rdp/Makefile	2025-09-25 17:40:35.063364727 +0200
@@ -0,0 +1,9 @@
+obj-y += rdp_drv.o
+
+rdp_drv-y += \
+	rdp.o \
+	rdp_api.o \
+	rdp_io.o \
+	rdp_ioctl.o
+
+rdp_drv-$(CONFIG_DEBUG_FS) += rdp_debug.o
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./ubus4-bcm63158.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/ubus4-bcm63158.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./ubus4-bcm63158.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/ubus4-bcm63158.c	2025-09-25 17:40:35.067364747 +0200
@@ -0,0 +1,875 @@
+/*
+ * ubus4-bcm63158.c for bcm63158-soc
+ * Created by <nschichan@freebox.fr> on Fri Jun  7 14:30:40 2019
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/ubus4.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <dt-bindings/brcm,bcm63158-ubus.h>
+
+/*
+ * UBUS coherency port configuration registes.
+ */
+#define UBUS_COHERENCY_LUT(x)			(0x00 + 4 * (x))
+#define UBUS_COHERENCY_QUEUE_DEPTH(x)		(0x80 + 4 * (x))
+#define UBUS_COHERENCY_CBS_THRESH(x)		(0x90 + 4 * (x))
+#define UBUS_COHERENCY_CIR_INCR(x)		(0xb0 + 4 * (x))
+#define UBUS_COHERENCY_REF_CNT(x)		(0xc0 + 4 * (x))
+#define UBUS_COHERENCY_MAX_BONUS(x)		(0xd0 + 4 * (x))
+
+static inline void get_lut_queue_id_params(int id, u32 *offset, u32 *shift,
+					   u32 *mask)
+{
+	*offset = id / 8;
+	*shift = (id % 8) * 4;
+	*mask = 0xf;
+}
+
+static inline void get_queue_depth_params(int id, u32 *offset, u32 *shift,
+					  u32 *mask)
+{
+	*offset = id / 4;
+	*shift = (id % 4) * 8;
+	*mask = 0xff;
+}
+
+static inline void get_cbs_thresh_params(int id, u32 *offset, u32 *shift,
+					 u32 *mask)
+{
+	*offset = id / 2;
+	*shift = (id % 2) * 16;
+	*mask = 0xffff;
+}
+
+static inline void get_cir_incr_params(int id, u32 *offset, u32 *shift,
+				       u32 *mask)
+{
+	*offset = id / 4;
+	*shift = (id % 4) * 8;
+	*mask = 0xff;
+}
+
+static inline void get_ref_cnt_params(int id, u32 *offset, u32 *shift,
+				       u32 *mask)
+{
+	*offset = id / 4;
+	*shift = (id % 4) * 8;
+	*mask = 0xff;
+}
+
+static inline void get_max_bonus_params(int id, u32 *offset, u32 *shift,
+					u32 *mask)
+{
+	*offset = id / 8;
+	*shift = (id % 8) * 4;
+	*mask = 0x7;
+}
+
+#define UBUS_LUT_COUNT				32
+#define UBUS_QUEUE_DEPTH_COUNT			4
+#define UBUS_CBS_THRESH_COUNT			8
+#define UBUS_CIR_INCR_COUNT			4
+#define UBUS_REF_CNT_COUNT			4
+#define UBUS_MAX_BONUS_COUNT			2
+
+
+#define UBUS_COHERENCY_WLU_SCRPID(x)		(0xd8 + 4 * (x))
+/*
+ * ubus masters bas addresses
+ */
+#define UBUS_PORT_PCIE0_OFFSET       0x00C000
+#define UBUS_PORT_DSLCPU_OFFSET      0x01C000
+#define UBUS_PORT_B53_OFFSET         0x020000
+#define UBUS_PORT_PMC_OFFSET         0x02C000
+#define UBUS_PORT_PER_OFFSET         0x034000
+#define UBUS_PORT_PER_DMA_OFFSET     0x03C000
+#define UBUS_PORT_SWH_OFFSET         0x048000
+#define UBUS_PORT_SPU_OFFSET         0x050000
+#define UBUS_PORT_DSL_OFFSET         0x05C000
+#define UBUS_PORT_PCIE2_OFFSET       0x064000
+#define UBUS_PORT_PCIE3_OFFSET       0x06C000
+#define UBUS_PORT_USB_OFFSET         0x074000
+#define UBUS_PORT_DMA0_OFFSET        0x47C000
+#define UBUS_PORT_DMA1_OFFSET        0x480000
+#define UBUS_PORT_RQ0_OFFSET         0x498000
+#define UBUS_PORT_NATC_OFFSET        0x4B8000
+#define UBUS_PORT_DQM_OFFSET         0x4BC000
+#define UBUS_PORT_QM_OFFSET          0x4C4000
+
+
+static DEFINE_MUTEX(ubus_list_mutex);
+static LIST_HEAD(ubus_list);
+
+struct ubus_credit {
+	u32 port_id;
+	u32 credit;
+};
+
+struct ubus_master_desc {
+	unsigned long regs_offset;
+	u32 port_id;
+	struct ubus_credit *credits;
+};
+
+#define UBUS_MASTER_DESC(_reg_offset, _port_id, ...)	\
+	{						\
+		.regs_offset = _reg_offset,		\
+		.port_id = _port_id,			\
+		.credits = (struct ubus_credit[]){	\
+		__VA_ARGS__, { } },			\
+	}
+
+#define UBUS_CREDIT(_port_id, _credit)		\
+	{ .port_id = _port_id, .credit = _credit }
+
+/*
+ * locations of each masters control registers, and SoC specific
+ * credits tables.
+ */
+struct ubus_master_desc bcm63158_masters[] = {
+	UBUS_MASTER_DESC(UBUS_PORT_B53_OFFSET, UBUS_PORT_ID_BIU,
+			 UBUS_CREDIT(UBUS_PORT_ID_MEMC, 3),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 8),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_PER_OFFSET, UBUS_PORT_ID_PER,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_USB_OFFSET, UBUS_PORT_ID_USB,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_SPU_OFFSET, 	UBUS_PORT_ID_SPU,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 5),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 4),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 8)),
+	UBUS_MASTER_DESC(UBUS_PORT_DSL_OFFSET, UBUS_PORT_ID_DSL,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_WAN, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_PER_DMA_OFFSET, UBUS_PORT_ID_PERDMA,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 5),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 4),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 8)),
+	UBUS_MASTER_DESC(UBUS_PORT_PCIE0_OFFSET, UBUS_PORT_ID_PCIE0,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 4),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE0, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_VPB, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_PCIE2_OFFSET, UBUS_PORT_ID_PCIE2,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 3),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_VPB, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_PCIE3_OFFSET, UBUS_PORT_ID_PCIE3,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 6),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_VPB, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_DSLCPU_OFFSET, UBUS_PORT_ID_DSLCPU,
+			 UBUS_CREDIT(UBUS_PORT_ID_MEMC, 8),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_WAN, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_PMC_OFFSET, UBUS_PORT_ID_PMC,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_MEMC, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_USB, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE0, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE3, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE2, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PMC, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_WAN, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SWH, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SPU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_QM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_VPB, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_SWH_OFFSET, UBUS_PORT_ID_SWH,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 8),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYS, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_DSL, 8),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 8)),
+	UBUS_MASTER_DESC(UBUS_PORT_QM_OFFSET, UBUS_PORT_ID_QM,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 16),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_DQM_OFFSET, UBUS_PORT_ID_DQM,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_DMA0_OFFSET, UBUS_PORT_ID_DMA0,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 8),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_NATC_OFFSET, UBUS_PORT_ID_NATC,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+	UBUS_MASTER_DESC(UBUS_PORT_RQ0_OFFSET, UBUS_PORT_ID_RQ0,
+			 UBUS_CREDIT(UBUS_PORT_ID_BIU, 11),
+			 UBUS_CREDIT(UBUS_PORT_ID_USB, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE0, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE3, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_PCIE2, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_PER, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_WAN, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SWH, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_SPU, 1),
+			 UBUS_CREDIT(UBUS_PORT_ID_QM, 10),
+			 UBUS_CREDIT(UBUS_PORT_ID_FPM, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_VPB, 2),
+			 UBUS_CREDIT(UBUS_PORT_ID_PSRAM, 10),
+			 UBUS_CREDIT(UBUS_PORT_ID_SYSXRDP, 1)),
+};
+
+struct biu_config {
+	const char *blk_name;
+	u32 mstr_node;
+	u32 src_pid;
+	u32 queue_id;
+	u32 depth;
+	u32 ref_cnt[4];
+	u32 cir;
+	u32 cbs;
+	u32 bonus;
+
+};
+
+enum {
+	E_DDR_2133_x32,
+	E_DDR_2133_x16,
+	E_DDR_1600_x32,
+	E_DDR_1600_x16,
+};
+
+#define DDR_SPEED_800_10_10_10 6
+#define DDR_SPEED_800_11_11_11 7
+
+#define DDR_SPEED(mcb)		((mcb) & 0x1f)
+#define DDR_DEV_WIDTH(mcb)	(((mcb) >> 5) & 0x7)
+
+#define DDR_WIDTH_x16	0
+#define DDR_WIDTH_x32	1
+#define DDR_WIDTH_x8	2
+
+
+static u32 get_ddr_refcnt_index(u32 mcb)
+{
+	u32 speed = DDR_SPEED(mcb);
+	u32 width = DDR_DEV_WIDTH(mcb);
+
+	if (speed == DDR_SPEED_800_11_11_11 ||
+	    speed == DDR_SPEED_800_10_10_10) {
+		switch (width) {
+		case DDR_WIDTH_x16:
+			return E_DDR_1600_x16;
+		case DDR_WIDTH_x32:
+			return E_DDR_1600_x32;
+		default:
+			BUG();
+		}
+	} else {
+		switch (width) {
+		case DDR_WIDTH_x16:
+			return E_DDR_2133_x16;
+		case DDR_WIDTH_x32:
+			return E_DDR_2133_x32;
+		default:
+			BUG();
+		}
+	}
+	BUG();
+	return 0xaa55aa55;
+}
+
+static struct biu_config biu_config_bcm63158[32 + 1] = {
+	/* queue depth will be set based on ubus credits */
+	[UBUS_PORT_ID_PER]      = {
+		.blk_name = "PCM",
+		.src_pid = 3,
+		.queue_id = 7,
+		.cir = 2,
+		.cbs = 128,
+		.bonus = 0,
+		.ref_cnt = {12, 18, 22, 40 },
+		.mstr_node = UBUS_PORT_ID_PER
+	},
+	[UBUS_PORT_ID_USB]      = {
+		.blk_name = "USB",
+		.src_pid = 4,
+		.queue_id = 6,
+		.cir = 3,
+		.cbs = 128,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_USB
+	},
+	[UBUS_PORT_ID_SPU]      = {
+		.blk_name = "SPU",
+		.src_pid = 5,
+		.queue_id = 9,
+		.cir = 1,
+		.cbs = 1,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_SPU
+	},
+	[UBUS_PORT_ID_PERDMA]   = {
+		.blk_name = "M2M",
+		.src_pid = 7,
+		.queue_id = 8,
+		.cir = 1,
+		.cbs = 1,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_PERDMA
+	},
+	[UBUS_PORT_ID_PCIE0]    = {
+		.blk_name = "PCIe0",
+		.src_pid = 8,
+		.queue_id = 3,
+		.cir = 6,
+		.cbs = 128,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_PCIE0
+	},
+	[UBUS_PORT_ID_PCIE2]    = {
+		.blk_name = "PCIe2",
+		.src_pid = 9,
+		.queue_id = 4,
+		.cir = 3,
+		.cbs = 128,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_PCIE2
+	},
+	[UBUS_PORT_ID_PCIE3]    = {
+		.blk_name = "PCIe3",
+		.src_pid = 10,
+		.queue_id = 5,
+		.cir = 9,
+		.cbs = 256,
+		.bonus = 0,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_PCIE3
+	},
+	[UBUS_PORT_ID_QM]       = {
+		.blk_name = "QM",
+		.src_pid = 22,
+		.queue_id = 10,
+		.cir = 38,
+		.cbs = 512,
+		.bonus = 4,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_QM
+	},
+	[UBUS_PORT_ID_DQM]      = {
+		.blk_name = "QM_DQM",
+		.src_pid = 23,
+		.queue_id = 1,
+		.cir = 5,
+		.cbs = 256,
+		.bonus = 4,
+		.ref_cnt = {6,  9,  11, 20 },
+		.mstr_node = UBUS_PORT_ID_DQM
+	},
+	[UBUS_PORT_ID_DMA0]     = {
+		.blk_name = "DMA0",
+		.src_pid = 24,
+		.queue_id = 11,
+		.cir = 39,
+		.cbs = 128,
+		.bonus = 4,
+		.ref_cnt = {38, 71, 90, 161},
+		.mstr_node = UBUS_PORT_ID_DMA0
+	},
+	[UBUS_PORT_ID_NATC]     = {
+		.blk_name = "NAT$",
+		.src_pid = 26,
+		.queue_id = 2,
+		.cir = 6,
+		.cbs = 512,
+		.bonus = 4,
+		.ref_cnt = {21, 36, 45, 81 },
+		.mstr_node = UBUS_PORT_ID_NATC
+	},
+	[UBUS_PORT_ID_RQ0]      = {
+		.blk_name = "RNR",
+		.src_pid = 32,
+		.queue_id = 0,
+		.cir = 14,
+		.cbs = 256,
+		.bonus = 4,
+		.ref_cnt = {12, 18, 22, 40 },
+		.mstr_node = UBUS_PORT_ID_RQ0
+	},
+};
+
+#define UBUS_MASTER_TOKEN_REG(port)	(0x400 + (port) * 4)
+
+#define UBUS_CONG_THRESHOLD_REG		(0xc)
+
+#define UBUS_MASTER_DECODE_CONTROL	(0x600)
+#define  UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_MASK	(0x3 << 4)
+#define  UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_DEF	(0x0 << 4)
+#define  UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_INPUT	(0x1 << 4)
+#define  UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_CFG_REG	(0x2 << 4)
+
+#define UBUS_MASTER_DECODE_CACHE_CONFIG	(0x604)
+
+
+struct ubus4_bcm63158 {
+	struct device *dev;
+	struct list_head list;
+
+	struct resource regs;
+	struct ubus4_master *masters;
+	size_t nr_masters;
+
+	unsigned long coherency_control_regs_phys;
+	void __iomem *coherency_control_regs;
+	u32 ddr_mcb;
+};
+
+struct ubus4_master {
+	struct ubus4_bcm63158 *ubus;
+	unsigned long regs_phys;
+	void __iomem *regs;
+	u32 port_id;
+
+	const struct ubus_credit *credits;
+	size_t nr_credits;
+};
+
+static inline u32 ubus_master_readl(struct ubus4_master *m, u32 off)
+{
+	u32 ret;
+
+	ret = readl(m->regs + off);
+	dev_dbg(m->ubus->dev, "read %08x at %08lx\n", ret, m->regs_phys + off);
+	return ret;
+}
+
+static inline void ubus_master_writel(u32 val, struct ubus4_master *m, u32 off)
+{
+	dev_dbg(m->ubus->dev, "write %08x at %08lx\n", val,
+		 m->regs_phys + off);
+	writel(val, m->regs + off);
+}
+
+static inline void ubus_cohcfg_writel(u32 val, struct ubus4_bcm63158 *ubus,
+				      u32 off)
+{
+	dev_dbg(ubus->dev, "coherency config write %08x at %08lx\n",
+		 val, ubus->coherency_control_regs_phys + off);
+	writel(val, ubus->coherency_control_regs + off);
+}
+
+static inline u32 ubus_cohcfg_readl(struct ubus4_bcm63158 *ubus, u32 off)
+{
+	u32 ret;
+	ret = readl(ubus->coherency_control_regs + off);
+	dev_dbg(ubus->dev, "coherency config read %08x at %08lx\n",
+		 ret, ubus->coherency_control_regs_phys + off);
+	return ret;
+}
+
+static struct ubus4_master *ubus_get_master(struct ubus4_bcm63158 *ubus,
+					    u32 port_id)
+{
+	size_t i;
+	for (i = 0; i < ubus->nr_masters; ++i)
+		if (ubus->masters[i].port_id == port_id)
+			return &ubus->masters[i];
+	return NULL;
+}
+
+void ubus_master_apply_credits(struct ubus4_master *m)
+{
+	size_t i;
+
+	for (i = 0; i < m->nr_credits; ++i) {
+		const struct ubus_credit *c = &m->credits[i];
+
+		ubus_master_writel(c->credit, m,
+				   UBUS_MASTER_TOKEN_REG(c->port_id));
+	}
+}
+EXPORT_SYMBOL(ubus_master_apply_credits);
+
+void ubus_master_set_congestion_threshold(struct ubus4_master *m, u32 v)
+{
+	ubus_master_writel(v, m, UBUS_CONG_THRESHOLD_REG);
+}
+EXPORT_SYMBOL(ubus_master_set_congestion_threshold);
+
+void ubus_master_remap_port(struct ubus4_master *m)
+{
+	u32 v;
+
+	ubus_master_writel(0x1, m, UBUS_MASTER_DECODE_CACHE_CONFIG);
+
+	/*
+	 * on bcm63158, only master cache control configuration is
+	 * needed.
+	 */
+	v = ubus_master_readl(m, UBUS_MASTER_DECODE_CONTROL);
+	v &= ~UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_MASK;
+	v |= UBUS_MASTER_DECODE_CONTROL_CONFIG_SEL_CFG_REG;
+	ubus_master_writel(v, m, UBUS_MASTER_DECODE_CONTROL);
+}
+EXPORT_SYMBOL(ubus_master_remap_port);
+
+static int ubus_setup_master(struct ubus4_bcm63158 *ubus,
+			     struct ubus4_master *the,
+			     struct ubus_master_desc *desc)
+{
+	struct resource res;
+
+	the->port_id = desc->port_id;
+	the->credits = desc->credits;
+	the->nr_credits = 0;
+	while (the->credits[the->nr_credits].port_id &&
+	       the->credits[the->nr_credits].credit)
+		++the->nr_credits;
+
+	dev_dbg(ubus->dev, "master %d, credits %zu\n",
+		the->port_id, the->nr_credits);
+
+	res.start = ubus->regs.start + desc->regs_offset;
+	res.end = res.start + 0x1000 - 1;
+	res.name = "ubus master register";
+	res.flags = ubus->regs.flags;
+	res.desc = ubus->regs.desc;
+	res.parent = NULL;
+
+	if (!resource_contains(&ubus->regs, &res)) {
+		dev_err(ubus->dev, "registers for master %d are outside main "
+			"register space.", the->port_id);
+		return -EINVAL;
+	}
+
+	the->ubus = ubus;
+	the->regs_phys = res.start;
+	the->regs = devm_ioremap_resource(ubus->dev, &res);
+	if (!the->regs)
+		return -ENOMEM;
+
+	dev_dbg(ubus->dev, "master%d registers %pR\n", the->port_id, &res);
+	return 0;
+}
+
+static void ubus_enable_all_wlu_srcpid(struct ubus4_bcm63158 *ubus)
+{
+	int i;
+
+	for (i = 0; i < 8; ++i) {
+		ubus_cohcfg_readl(ubus, UBUS_COHERENCY_WLU_SCRPID(i));
+		ubus_cohcfg_writel(0xffffffff, ubus,
+				   UBUS_COHERENCY_WLU_SCRPID(i));
+	}
+}
+
+
+static void ubus_reset_biu_cfg(struct ubus4_bcm63158 *ubus)
+{
+	int i;
+
+	for (i = 0; i < UBUS_LUT_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_LUT(i));
+	for (i = 0; i < UBUS_QUEUE_DEPTH_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_QUEUE_DEPTH(i));
+	for (i = 0; i < UBUS_CBS_THRESH_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_CBS_THRESH(i));
+	for (i = 0; i < UBUS_CIR_INCR_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_CIR_INCR(i));
+	for (i = 0; i < UBUS_REF_CNT_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_REF_CNT(i));
+	for (i = 0; i < UBUS_MAX_BONUS_COUNT; ++i)
+		ubus_cohcfg_writel(0x0, ubus, UBUS_COHERENCY_MAX_BONUS(i));
+}
+
+static void ubus_configure_biu(struct ubus4_bcm63158 *ubus,
+			       struct biu_config *biu_cfg)
+{
+	u32 offset, shift, mask;
+	u32 reg;
+	u32 ddr_refcount = get_ddr_refcnt_index(ubus->ddr_mcb);
+
+	dev_dbg(ubus->dev, "configuring BIU for %s\n", biu_cfg->blk_name);
+
+	/*
+	 * assign queue ID
+	 */
+	get_lut_queue_id_params(biu_cfg->src_pid, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_LUT(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->queue_id << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_LUT(offset));
+
+	/*
+	 * assign queue depth
+	 */
+	get_queue_depth_params(biu_cfg->queue_id, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_QUEUE_DEPTH(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->depth << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_QUEUE_DEPTH(offset));
+
+	/*
+	 * assign CBS threshold.
+	 */
+	get_cbs_thresh_params(biu_cfg->queue_id, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_CBS_THRESH(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->cbs << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_CBS_THRESH(offset));
+
+	/*
+	 * assign CIR increment
+	 */
+	get_cir_incr_params(biu_cfg->queue_id, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_CIR_INCR(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->cir << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_CIR_INCR(offset));
+
+	/*
+	 * assign ref count.
+	 */
+	get_ref_cnt_params(biu_cfg->queue_id, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_REF_CNT(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->ref_cnt[ddr_refcount] << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_REF_CNT(offset));
+
+	/*
+	 * assign max bonus
+	 */
+	get_max_bonus_params(biu_cfg->queue_id, &offset, &shift, &mask);
+	reg = ubus_cohcfg_readl(ubus, UBUS_COHERENCY_MAX_BONUS(offset));
+	reg &= ~(mask << shift);
+	reg |= (biu_cfg->bonus << shift);
+	ubus_cohcfg_writel(reg, ubus, UBUS_COHERENCY_MAX_BONUS(offset));
+}
+
+static void ubus_calc_queue_depth_from_credits(struct ubus4_bcm63158 *ubus,
+					       struct biu_config *biu_configs,
+					       size_t nr_biu_configs)
+{
+	size_t i;
+	u32 total_depth = 0;
+
+	for (i = 0; i < nr_biu_configs; ++i) {
+		struct biu_config *cfg = &biu_configs[i];
+		struct ubus4_master *m;
+		size_t j;
+
+		if (!cfg->blk_name)
+			continue ;
+
+		m = ubus_get_master(ubus, cfg->mstr_node);
+		if (!m)
+			continue ;
+
+		for (j = 0; j < m->nr_credits; ++j)
+			if (m->credits[j].port_id == UBUS_PORT_ID_BIU)
+				cfg->depth = m->credits[j].credit;
+	}
+
+	for (i = 0; i < nr_biu_configs; ++i) {
+		struct biu_config *cfg = &biu_configs[i];
+
+		total_depth += cfg->depth;
+	}
+	if (total_depth > 64)
+		/*
+		 * NOTE: the refsw invokes BUG() here which is a bit
+		 * heavy handed.
+		 */
+		dev_warn(ubus->dev, "total depth %u is greater than 64.\n",
+			 total_depth);
+	else
+		dev_info(ubus->dev, "total depth %u ok.\n",
+			 total_depth);
+}
+
+static int ubus_get_ddr_mcb(struct ubus4_bcm63158 *ubus, u32 *mcb)
+{
+	struct of_phandle_args args;
+	int err;
+
+	err = of_parse_phandle_with_args(ubus->dev->of_node, "brcm,dram",
+					 0, 0, &args);
+	if (err)
+		return err;
+
+	err = of_property_read_u32(args.np, "brcm,ddr-mcb", mcb);
+	if (err)
+		return err;
+
+	return 0;
+}
+
+static int bcm63158_ubus4_probe(struct platform_device *pdev)
+{
+	struct ubus4_bcm63158 *ubus;
+	struct resource *res_regs, *coh_regs;
+	int err;
+	size_t i;
+
+	dev_dbg(&pdev->dev, "probe.\n");
+
+	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res_regs) {
+		dev_err(&pdev->dev, "unable to get registers resource.\n");
+		return -ENOENT;
+	}
+
+	coh_regs = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	if (!coh_regs) {
+		dev_err(&pdev->dev, "unable to get coherency control registers "
+			"resource.\n");
+		return -ENOENT;
+	}
+
+	dev_info(&pdev->dev, "registers: %pR\n", res_regs);
+
+
+	ubus = devm_kzalloc(&pdev->dev, sizeof (*ubus), GFP_KERNEL);
+	if (!ubus)
+		return -ENOMEM;
+
+	dev_info(&pdev->dev, "coherency control registers: %pR\n",
+		 coh_regs);
+	ubus->coherency_control_regs_phys = coh_regs->start;
+	ubus->coherency_control_regs = devm_ioremap_resource(&pdev->dev,
+							     coh_regs);
+	if (!ubus->coherency_control_regs)
+		return -ENOMEM;
+
+	ubus->dev = &pdev->dev;
+	ubus->regs = *res_regs;
+
+	err = ubus_get_ddr_mcb(ubus, &ubus->ddr_mcb);
+	if (err) {
+		dev_err(&pdev->dev, "unable to get DDR mcb from device "
+			"tree.\n");
+		return err;
+	}
+
+	ubus->nr_masters = ARRAY_SIZE(bcm63158_masters);
+	ubus->masters = devm_kzalloc(&pdev->dev,
+			     sizeof (*ubus->masters) * ubus->nr_masters,
+			     GFP_KERNEL);
+	if (!ubus->masters)
+		return -ENOMEM;
+
+
+	for (i = 0; i < ubus->nr_masters; ++i) {
+		err = ubus_setup_master(ubus,
+					&ubus->masters[i],
+					&bcm63158_masters[i]);
+		if (err)
+			return err;
+	}
+
+	for (i = 0; i < ubus->nr_masters; ++i) {
+		ubus_master_apply_credits(&ubus->masters[i]);
+	}
+	for (i = 0; i <  ubus->nr_masters; ++i) {
+		ubus_master_remap_port(&ubus->masters[i]);
+	}
+	ubus_enable_all_wlu_srcpid(ubus);
+	ubus_reset_biu_cfg(ubus);
+	ubus_calc_queue_depth_from_credits(ubus, biu_config_bcm63158,
+					   ARRAY_SIZE(biu_config_bcm63158));
+
+	for (i = 0; i < ARRAY_SIZE(biu_config_bcm63158); ++i) {
+		if (biu_config_bcm63158[i].blk_name)
+			ubus_configure_biu(ubus, &biu_config_bcm63158[i]);
+	}
+
+	mutex_lock(&ubus_list_mutex);
+	list_add_tail(&ubus->list, &ubus_list);
+	mutex_unlock(&ubus_list_mutex);
+
+	return 0;
+}
+
+static const struct of_device_id bcm63158_ubus4_match[] = {
+	{ .compatible = "brcm,bcm63158-ubus4" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63158_ubus4_of_match);
+
+struct platform_driver bcm63158_ubus4_driver = {
+	.probe		= bcm63158_ubus4_probe,
+	.remove		= NULL, /* FIXME*/
+	.driver		= {
+		.name		= "bcm63158-ubus4",
+		.owner		= THIS_MODULE,
+		.of_match_table	= bcm63158_ubus4_match,
+	},
+};
+
+builtin_platform_driver(bcm63158_ubus4_driver);
+
+struct ubus4_master *ubus4_master_of_get_index(struct device_node *np,
+					       int index)
+{
+	int err;
+	struct of_phandle_args args;
+	struct ubus4_bcm63158 *ubus;
+
+	err = of_parse_phandle_with_args(np, "ubus", "#ubus-cells", index,
+					 &args);
+	if (err) {
+		printk("unable to parse ubus phandle.\n");
+		return ERR_PTR(err);
+	}
+
+	if (args.args_count != 1)
+		return ERR_PTR(-EINVAL);
+
+	mutex_lock(&ubus_list_mutex);
+	list_for_each_entry (ubus, &ubus_list, list) {
+		if (ubus->dev->of_node == args.np) {
+			mutex_unlock(&ubus_list_mutex);
+			goto found;
+		}
+	}
+	mutex_unlock(&ubus_list_mutex);
+	return ERR_PTR(-EPROBE_DEFER);
+
+found:
+	return ubus_get_master(ubus, args.args[0]);
+}
+EXPORT_SYMBOL(ubus4_master_of_get_index);
+
+struct ubus4_master *ubus4_master_of_get(struct device_node *np)
+{
+	return ubus4_master_of_get_index(np, 0);
+}
+EXPORT_SYMBOL(ubus4_master_of_get);
+
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
+MODULE_DESCRIPTION("Broadcom BCM63158 SoC UBUS driver.");
+MODULE_LICENSE("GPL v2");
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/Makefile linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/Makefile
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/Makefile	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,8 @@
+obj-$(CONFIG_SOC_BCM63XX_XRDP) += xrdp_drv.o
+
+xrdp_drv-y += \
+	xrdp.o \
+	xrdp_api.o
+
+xrdp_drv-$(CONFIG_SOC_BCM63XX_XRDP_IOCTL) += xrdp_ioctl.o
+xrdp_drv-$(CONFIG_DEBUG_FS) += xrdp_debug.o
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,91 @@
+#ifndef XRDP_REGS_H_
+#define XRDP_REGS_H_
+
+enum {
+	BB_ID_FIRST			= 0,
+	BB_ID_RNR0			= 0, /* 0x0 */
+	BB_ID_RNR1			= 1, /* 0x1 */
+	BB_ID_RNR2			= 2, /* 0x2 */
+	BB_ID_RNR3			= 3, /* 0x3 */
+	BB_ID_RNR4			= 4, /* 0x4 */
+	BB_ID_RNR5			= 5, /* 0x5 */
+	BB_ID_CNPL			= 17, /* 0x11 */
+	BB_ID_DISPATCHER_REORDER	= 18, /* 0x12 */
+	BB_ID_DMA0			= 19, /* 0x13 */
+	BB_ID_SDMA0			= 21, /* 0x15 */
+	BB_ID_SDMA1			= 22, /* 0x16 */
+	BB_ID_FPM			= 23, /* 0x17 */
+	BB_ID_HASH			= 24, /* 0x18 */
+	BB_ID_NATC			= 25, /* 0x19 */
+	BB_ID_QM_CP_SDMA		= 26, /* 0x1a */
+	BB_ID_QM_RNR_GRID		= 27, /* 0x1b */
+	BB_ID_QM_BBHTX			= 28, /* 0x1c */
+	BB_ID_QM_TOP			= 29, /* 0x1d */
+	BB_ID_QM_CP_MACHINE		= 30, /* 0x1e */
+	BB_ID_RX_BBH_0			= 31, /* 0x1f */
+	BB_ID_TX_LAN			= 32, /* 0x20 */
+	BB_ID_RX_BBH_1			= 33, /* 0x21 */
+	BB_ID_RX_BBH_2			= 35, /* 0x23 */
+	BB_ID_ACB			= 47, /* 0x2f */
+	BB_ID_SBPM			= 48, /* 0x30 */
+	BB_ID_TCAM_0			= 49, /* 0x31 */
+	BB_ID_RX_PON			= 50, /* 0x32 */
+	BB_ID_TX_PON			= 51, /* 0x33 */
+	BB_ID_TX_PON_STAT		= 52, /* 0x34 */
+	BB_ID_RX_DSL			= 53, /* 0x35 */
+	BB_ID_TX_DSL			= 54, /* 0x36 */
+	BB_ID_TX_DSL_STAT		= 55, /* 0x37 */
+	BB_ID_RX_10G			= 56, /* 0x38 */
+	BB_ID_TX_10G			= 57, /* 0x39 */
+	BB_ID_RX_2P5			= 58, /* 0x3a */
+	BB_ID_TX_2P5			= 59, /* 0x3b */
+	BB_ID_LAST			= 59
+};
+
+#define BB_MSG_RNR_TO_BBH_TX_QUEUE_SHIFT	6
+
+
+enum xrdp_regs_area {
+	XRDP_AREA_CORE,
+	XRDP_AREA_WAN_TOP,
+};
+
+#define RNR_SRAM_OFFSET(x)		(0x00c00000 + 0x20000 * (x))
+#define RNR_SRAM_SIZE			(16 * 1024)
+
+#define RNR_INST_OFFSET(x)		(0x00c10000 + 0x20000 * (x))
+#define RNR_INST_SIZE			32768
+
+#define RNR_CNXT_OFFSET(x)		(0x00c18000 + 0x20000 * (x))
+#define RNR_CNXT_SIZE			1536
+
+#define RNR_PRED_OFFSET(x)		(0x00c1c000 + 0x20000 * (x))
+#define RNR_PRED_SIZE			2048
+
+#define RDP_PSRAM_OFFSET		(0x600000)
+#define RDP_PSRAM_SIZE			(256 * 1024)
+
+/* relative to core */
+#define UNIMAC_OFFSET			(0xda0000)
+
+#include "xrdp_regs_acb_if.h"
+#include "xrdp_regs_bac_if.h"
+#include "xrdp_regs_bbh_rx.h"
+#include "xrdp_regs_bbh_tx.h"
+#include "xrdp_regs_cnpl.h"
+#include "xrdp_regs_dma.h"
+#include "xrdp_regs_dqm.h"
+#include "xrdp_regs_dsptchr.h"
+#include "xrdp_regs_fpm.h"
+#include "xrdp_regs_hash.h"
+#include "xrdp_regs_natc.h"
+#include "xrdp_regs_psram.h"
+#include "xrdp_regs_qm.h"
+#include "xrdp_regs_rnr_quad.h"
+#include "xrdp_regs_rnr_regs.h"
+#include "xrdp_regs_sbpm.h"
+#include "xrdp_regs_tcam.h"
+#include "xrdp_regs_ubus_mstr.h"
+#include "xrdp_regs_ubus_slv.h"
+
+#endif /* XRDP_REGS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_acb_if.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_acb_if.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_acb_if.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_acb_if.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,131 @@
+#ifndef XRDP_REGS_ACB_IF_H_
+#define XRDP_REGS_ACB_IF_H_
+
+/* relative to core */
+#define ACB_IF_OFFSET_0			0xe50800
+
+/*
+ * Register <CONFIG0>
+ *
+ * misc configs 0
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_CONFIG_CONF0	0x0
+
+/* add to the len of each packet 4B of crc */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_CONFIG_CONF0_CRC_ADD_MASK	0x1
+
+/*
+ * location byte for the valid bit in the result(last bit in that byte):
+ * 0:
+ * bit 7.
+ * ..
+ * 7:
+ * bit 63
+*/
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_CONFIG_CONF0_VAL_LOC_SHIFT	4
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_CONFIG_CONF0_VAL_LOC_MASK	0x70
+
+
+/*
+ * Registers <CMD_TYPE_CNTR> - <x> is [ 0 => 2 ] - read-only
+ *
+ * Number of commands that were processed for each command type (order -
+ * intend, sent, stat).
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE(x)	(0x100 + (x) * 0x4)
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <CMD_IMP_CNTR> - <x> is [ 0 => 2 ] - read-only
+ *
+ * Number of commands that were processed for each IMP(0,1,2).
+ * e.
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP(x)	(0x110 + (x) * 0x4)
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <AGG_CNTR> - <x> is [ 0 => 1 ] - read-only
+ *
+ * Number of commands (for each of - intend, sent) that were for aggregated
+ * packets.
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_AGG(x)	(0x120 + (x) * 0x4)
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_AGG_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_AGG_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <BUFS_NUM_CNTR> - <x> is [ 0 => 1 ] - read-only
+ *
+ * Number of buffers that were counted for each command(order - intend,
+ * sent).
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_BUFFS(x)	(0x130 + (x) * 0x4)
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_BUFFS_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_BUFFS_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <GENERAL_CONFIG>
+ *
+ * bits rd_clr and wrap for the pm counters(above)
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_GEN_CFG	0x150
+
+/* read clear bit */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_GEN_CFG_RD_CLR_MASK	0x1
+
+/* read clear bit */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_GEN_CFG_WRAP_MASK	0x2
+
+
+/*
+ * Register <DBG_MUX_SEL>
+ *
+ * selects the debug vecore
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGSEL	0x200
+
+/* selects th debug vector */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGSEL_VS_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGSEL_VS_MASK	0x7f
+
+
+/*
+ * Register <DBG_BUS> - read-only
+ *
+ * the debug bus
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGBUS	0x204
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGBUS_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_DBGBUS_VAL_MASK	0x1fffff
+
+
+/*
+ * Registers <STATUS> - <x> is [ 0 => 1 ] - read-only
+ *
+ * status register (msb, lsb)
+ */
+#define ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_STAT(x)	(0x240 + (x) * 0x4)
+
+/* value */
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_STAT_VAL_SHIFT	0
+#define  ACB_IF_ACBIF_BLOCK_ACBIF_DEBUG_STAT_VAL_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_ACB_IF_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bac_if.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bac_if.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bac_if.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bac_if.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,285 @@
+#ifndef XRDP_REGS_BAC_IF_H_
+#define XRDP_REGS_BAC_IF_H_
+
+/* relative to core */
+#define BAC_IF_OFFSET(x)		(0xe40000 + (x) * 0x1000)
+
+/*
+ * Register <RSLT_FIFO_FULL_THR>
+ *
+ * FULL threshold of result fifo for rdy indication to engine:
+ * If there are less words than thr left - there will be !rdy indication to
+ * engine, even if there is antry empty, and result will not be pushed into
+ * fifo.
+ * - NOT USED ANYMORE!
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_CFGS_RSLT_F_FULL_THR	0x0
+
+/* threshold */
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_RSLT_F_FULL_THR_THR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_RSLT_F_FULL_THR_THR_MASK	0xf
+
+
+/*
+ * Register <DEC_ROUTE_OVERIDE>
+ *
+ * route override info for the route address decoder
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE	0x4
+
+/* en override route address */
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE_EN_MASK	0x1
+
+/* id to override route address */
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE_ID_SHIFT	4
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE_ID_MASK	0x3f0
+
+/* addr to override route address */
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE_ADDR_SHIFT	16
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_DEC_ROUT_OVRIDE_ADDR_MASK	0x3ff0000
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL	0xc
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  BAC_IF_BACIF_BLOCK_BACIF_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Registers <INGRS_FIFO> - <x> is [ 0 => 127 ] - read-only
+ *
+ * ingress fifo debug
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_FIFOS_INGFIFO(x)	(0x100 + (x) * 0x4)
+
+/* lower 31b of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_INGFIFO_ENTRY_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_INGFIFO_ENTRY_MASK	0x7fffffff
+
+/* valid bit of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_INGFIFO_VAL_MASK	0x80000000
+
+
+/*
+ * Registers <CMD_FIFO> - <x> is [ 0 => 31 ] - read-only
+ *
+ * cmd fifo debug
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_FIFOS_CMDFIFO(x)	(0x500 + (x) * 0x4)
+
+/* lower 31b of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_CMDFIFO_ENTRY_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_CMDFIFO_ENTRY_MASK	0x7fffffff
+
+/* valid bit of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_CMDFIFO_VAL_MASK	0x80000000
+
+
+/*
+ * Registers <RSLT_FIFO> - <x> is [ 0 => 31 ] - read-only
+ *
+ * result fifo debug
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RSLTFIFO(x)	(0x600 + (x) * 0x4)
+
+/* lower 31b of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RSLTFIFO_ENTRY_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RSLTFIFO_ENTRY_MASK	0x7fffffff
+
+/* valid bit of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RSLTFIFO_VAL_MASK	0x80000000
+
+
+/*
+ * Registers <EGRS_FIFO> - <x> is [ 0 => 7 ] - read-only
+ *
+ * egress fifo debug
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_FIFOS_EGFIFO(x)	(0x700 + (x) * 0x4)
+
+/* lower 31b of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_EGFIFO_ENTRY_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_EGFIFO_ENTRY_MASK	0x7fffffff
+
+/* valid bit of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_EGFIFO_VAL_MASK	0x80000000
+
+
+/*
+ * Registers <PRLY_PARAMS_ARR_FIFO> - <x> is [ 0 => 7 ] - read-only
+ *
+ * reply params array debug
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RPPRMARR(x)	(0x800 + (x) * 0x4)
+
+/* lower 31b of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RPPRMARR_ENTRY_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RPPRMARR_ENTRY_MASK	0x7fffffff
+
+/* valid bit of entry */
+#define  BAC_IF_BACIF_BLOCK_BACIF_FIFOS_RPPRMARR_VAL_MASK	0x80000000
+
+
+/*
+ * Register <ING_F_CNTR> - read-only
+ *
+ * number of bb transactions that enter the ingress fifo of accl_if
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ING_F_CNT	0xc00
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ING_F_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ING_F_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <CMD_F_CNTR> - read-only
+ *
+ * number of commands (eob) that enter the command fifo of accl_if
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_CMD_F_CNT	0xc04
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_CMD_F_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_CMD_F_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <ENG_CMD_CNTR> - read-only
+ *
+ * number of commands (eob) that enter the engine from the accl_if
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_CMD_CNT	0xc08
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_CMD_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_CMD_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <ENG_RSLT_CNTR> - read-only
+ *
+ * number of results (eob) that enter the result fifo of accl_if from the
+ * engine
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_RSLT_CNT	0xc10
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_RSLT_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ENG_RSLT_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <RSLT_F_CNTR> - read-only
+ *
+ * number of results (eob) that leave the result fifo of accl_if
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_RSLT_F_CNT	0xc14
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_RSLT_F_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_RSLT_F_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <EGR_F_CNTR> - read-only
+ *
+ * number of bb transactions that leave the egress fifo of accl_if
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_EGR_F_CNT	0xc18
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_EGR_F_CNT_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_EGR_F_CNT_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <ERR_CMD_LONG_CNTR> - read-only
+ *
+ * number of commands that entered and were longer than the max command
+ * size for the accelerator configured in HW parameter
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_CMDLNG_C	0xc30
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_CMDLNG_C_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_CMDLNG_C_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <ERR_PARAMS_OVERFLOW_CNTR> - read-only
+ *
+ * reply params array is full (no free entries), and a new command has
+ * arrived
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_OF_C	0xc34
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_OF_C_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_OF_C_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <ERR_PARAMS_UNDERFLOW_CNTR> - read-only
+ *
+ * reply params array is empty, and a new result has arrived
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_UF_C	0xc38
+
+/* value of cntr */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_UF_C_CNTR_SHIFT	0
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_ERR_PARAMS_UF_C_CNTR_MASK	0xffffffff
+
+
+/*
+ * Register <GENERAL_CONFIG>
+ *
+ * bits rd_clr and wrap for the counters
+ */
+#define BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_GEN_CFG	0xcfc
+
+/* read clear bit */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_GEN_CFG_RD_CLR_MASK	0x1
+
+/* read clear bit */
+#define  BAC_IF_BACIF_BLOCK_BACIF_PM_COUNTERS_GEN_CFG_WRAP_MASK	0x2
+
+
+#endif /* ! XRDP_REGS_BAC_IF_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bbh_rx.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bbh_rx.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bbh_rx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bbh_rx.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,1387 @@
+#ifndef XRDP_REGS_BBH_RX_H_
+#define XRDP_REGS_BBH_RX_H_
+
+/* relative to core */
+#define BBH_RX_OFFSET_0			0xd94000
+
+/* relative to core */
+#define BBH_RX_OFFSET_1			0xd94400
+
+/* relative to core */
+#define BBH_RX_OFFSET_2			0xd94800
+
+/* relative to wan_top */
+#define BBH_RX_OFFSET_3			0x8000
+
+/* relative to wan_top */
+#define BBH_RX_OFFSET_4			0x8400
+
+/* relative to wan_top */
+#define BBH_RX_OFFSET_5			0x8800
+
+/* relative to wan_top */
+#define BBH_RX_OFFSET_6			0x8c00
+
+/*
+ * Register <BROADBUS_CONFIGURATION>
+ *
+ * Each BBH unit has its own position on the BB tree.
+ * The BB defines the Route address for the specific unit.
+ */
+#define BBH_RX_GENERAL_CFG_BBCFG	0x0
+
+/*
+ * SDMA BB ID.
+ * This ID defines the BB ID of the SDMA that the BBH communicates with.
+*/
+#define  GENERAL_CFG_BBCFG_SDMABBID_SHIFT	0
+#define  GENERAL_CFG_BBCFG_SDMABBID_MASK	0x3f
+
+/*
+ * Dispatcher BB ID.
+ * This ID defines the BB ID of the Dispatcher that the BBH communicates
+ * with.
+*/
+#define  GENERAL_CFG_BBCFG_DISPBBID_SHIFT	8
+#define  GENERAL_CFG_BBCFG_DISPBBID_MASK	0x3f00
+
+/*
+ * SBPM BB ID.
+ * This ID defines the BB ID of the SBPM that the BBH communicates with.
+*/
+#define  GENERAL_CFG_BBCFG_SBPMBBID_SHIFT	16
+#define  GENERAL_CFG_BBCFG_SBPMBBID_MASK	0x3f0000
+
+
+/*
+ * Register <DISPATCHER_FLOW>
+ *
+ * For every reassembled packet in the PSRAM the BBH writes a packet
+ * descriptor (PD) into the Dispatcher.
+ * The PDs are arranged using a link list in the Dispatcher.
+ * The Dispatcher has 32 virtual queues (ingress queues) and the BBH may be
+ * assigned to each of the 32 virtual queues of the DispatcherThis register
+ * defines virtual queue for normal and exclusive packets.
+ */
+#define BBH_RX_GENERAL_CFG_DISPVIQ	0x4
+
+/* Defines the Dispatchers Virtual Ingress Queue for normal packets */
+#define  GENERAL_CFG_DISPVIQ_NORMALVIQ_SHIFT	0
+#define  GENERAL_CFG_DISPVIQ_NORMALVIQ_MASK	0x1f
+
+/* Defines the Dispatchers Virtual Ingress Queue for exclusive packets */
+#define  GENERAL_CFG_DISPVIQ_EXCLVIQ_SHIFT	8
+#define  GENERAL_CFG_DISPVIQ_EXCLVIQ_MASK	0x1f00
+
+
+/*
+ * Register <PATTERN_RECOGNITION_DATA_LSB>
+ *
+ * The BBH may direct a packet into the Dispatchers exclusive VIQ (Virtual
+ * Ingress Queue) according to a match in the pattern recognition.
+ */
+#define BBH_RX_GENERAL_CFG_PATTERNDATALSB	0x8
+
+/*
+ * Pattern Data[31:
+ * 0]
+*/
+#define  GENERAL_CFG_PATTERNDATALSB_PATTERNDATALSB_SHIFT	0
+#define  GENERAL_CFG_PATTERNDATALSB_PATTERNDATALSB_MASK	0xffffffff
+
+
+/*
+ * Register <PATTERN_RECOGNITION_DATA_MSB>
+ *
+ * The BBH may direct a packet into the Dispatchers exclusive VIQ (Virtual
+ * Ingress Queue) according to a match in the pattern recognition.
+ */
+#define BBH_RX_GENERAL_CFG_PATTERNDATAMSB	0xc
+
+/*
+ * Pattern Data[63:
+ * 32]
+*/
+#define  GENERAL_CFG_PATTERNDATAMSB_PATTERNDATAMSB_SHIFT	0
+#define  GENERAL_CFG_PATTERNDATAMSB_PATTERNDATAMSB_MASK	0xffffffff
+
+
+/*
+ * Register <PATTERN_RECOGNITION_MASK_LSB>
+ *
+ * The BBH may direct a packet into the Dispatchers exclusive VIQ (Virtual
+ * Ingress Queue) according to a match in the pattern recognition.
+ */
+#define BBH_RX_GENERAL_CFG_PATTERNMASKLSB	0x10
+
+/*
+ * Pattern mask[31:
+ * 0]
+*/
+#define  GENERAL_CFG_PATTERNMASKLSB_PATTERNMASKLSB_SHIFT	0
+#define  GENERAL_CFG_PATTERNMASKLSB_PATTERNMASKLSB_MASK	0xffffffff
+
+
+/*
+ * Register <PATTERN_RECOGNITION_MASK_MSB>
+ *
+ * The BBH may direct a packet into the Dispatchers exclusive VIQ (Virtual
+ * Ingress Queue) according to a match in the pattern recognition.
+ */
+#define BBH_RX_GENERAL_CFG_PATTERNMASKMSB	0x14
+
+/*
+ * Pattern Mask[63:
+ * 32]
+*/
+#define  GENERAL_CFG_PATTERNMASKMSB_PATTERNMASKMSB_SHIFT	0
+#define  GENERAL_CFG_PATTERNMASKMSB_PATTERNMASKMSB_MASK	0xffffffff
+
+
+/*
+ * Register <EXCLUSIVE_QUEUE_CFG>
+ *
+ * The BBH may direct a packet into the Dispatchers exclusive VIQ (Virtual
+ * Ingress Queue) according to special packet types (e.
+ * g.
+ * pause).
+ * This register enables this function
+ */
+#define BBH_RX_GENERAL_CFG_EXCLQCFG	0x18
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_PLOAMEN_MASK	0x1
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_PRI3EN_MASK	0x2
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_PAUSEEN_MASK	0x4
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_PFCEN_MASK	0x8
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_CTRLEN_MASK	0x10
+
+/* Direct this packet type to Exclusive VIQ in the Dispatcher */
+#define  GENERAL_CFG_EXCLQCFG_MULTEN_MASK	0x20
+
+/*
+ * Defines the pattern recognition offset within the packet.
+ * Offset is 8 bytes resolution
+*/
+#define  GENERAL_CFG_EXCLQCFG_PATTENOFFSET_SHIFT	8
+#define  GENERAL_CFG_EXCLQCFG_PATTENOFFSET_MASK	0xf00
+
+/* Must be enabled if pattern recognition is used */
+#define  GENERAL_CFG_EXCLQCFG_PATTERNEN_MASK	0x10000
+
+/* Must be enabled if Exclusive VIQ is used */
+#define  GENERAL_CFG_EXCLQCFG_EXCEN_MASK	0x100000
+
+
+/*
+ * Register <SDMA_ADDRESS_CONFIGURATION>
+ *
+ * The BBH reassembles the incoming data in the SRAM.
+ * The Data is written into the SRAM using the SDMA.
+ * The data is organized in a configurable number of chunks of 128 bytes.
+ * The BBH arranges the written data in the SDMA in these chunks.
+ * It arranges the data in a predefined address space in the SDMA memory
+ * and manages the chunks in a cyclic FIFO style.
+ * For every write chunk the BBH writes a write descriptor.
+ * The write descriptors are arranged in a predefined space in the SDMA
+ * memory and managed in a cyclic FIFO style as well.
+ * This register defines the Data and descriptors base addresses.
+ */
+#define BBH_RX_GENERAL_CFG_SDMAADDR	0x1c
+
+/*
+ * The Data FIFO base address within the SDMA address space.
+ * The address is in chunk resolution (128 bytes).
+ * The value should be identical to the relevant configuration in the SDMA.
+*/
+#define  GENERAL_CFG_SDMAADDR_DATABASE_SHIFT	0
+#define  GENERAL_CFG_SDMAADDR_DATABASE_MASK	0x3f
+
+/*
+ * The Descriptor FIFO base address within the SDMA address space.
+ * The address is in chunk descriptor resolution (8 bytes).
+ * The value should be identical to the relevant configuration in the SDMA.
+*/
+#define  GENERAL_CFG_SDMAADDR_DESCBASE_SHIFT	8
+#define  GENERAL_CFG_SDMAADDR_DESCBASE_MASK	0x3f00
+
+
+/*
+ * Register <SDMA_CONFIGURATION>
+ *
+ * The BBH reassembles the incoming data in the SRAM.
+ * The Data is written into the SRAM using the SDMA.
+ * The data is organized in a configurable number of chunks of 128 bytes.
+ * The BBH arranges the written data in the SDMA in these chunks.
+ * It arranges the data in a predefined address space in the SDMA memory
+ * and manages the chunks in a cyclic FIFO style.
+ * For every write chunk the BBH writes a write descriptor.
+ * The write descriptors are arranged in a predefined space in the SDMA
+ * memory and managed in a cyclic FIFO style as well.
+ * The BBH handles the congestion over the SDMA write chunks according to 2
+ * priorities (low + high, exclusive).
+ * This field defines the number of occupied write chunks for dropping
+ * normal or high priority packets.
+ * If the number of occupied chunk is lower than this threshold, then all
+ * packets are passed.
+ * If the number of occupied chunk is equal or higher than this threshold,
+ * then only exclusive priority packets are passed.
+ * This register defines the Data and descriptors FIFO sizes and the
+ * exclusive threshold.
+ */
+#define BBH_RX_GENERAL_CFG_SDMACFG	0x20
+
+/* Defines the size of the Chunk descripors FIFO in the DMA. */
+#define  GENERAL_CFG_SDMACFG_NUMOFCD_SHIFT	0
+#define  GENERAL_CFG_SDMACFG_NUMOFCD_MASK	0x7f
+
+/*
+ * This field defines the number of occupied write chunks for dropping
+ * normal or high priority packets.
+*/
+#define  GENERAL_CFG_SDMACFG_EXCLTH_SHIFT	8
+#define  GENERAL_CFG_SDMACFG_EXCLTH_MASK	0x7f00
+
+/*
+ * BBH has two methods to keep coherency:
+ * 1.
+ * Write reply for last chunk only2.
+ * Write reply for each chunk1 - enables the first method0 - enables the
+ * second method
+*/
+#define  GENERAL_CFG_SDMACFG_COHERENCYEN_MASK	0x10000
+
+
+/*
+ * Register <MINIMUM_PACKET_SIZE>
+ *
+ * There are 4 global configuration for Minimum packet size.
+ * Each flow can get one out of these 4 global configurations.
+ * Packets shorter than this threshold will be discarded.
+ */
+#define BBH_RX_GENERAL_CFG_MINPKT0	0x24
+
+/* Packets shorter than this threshold will be discarded. */
+#define  GENERAL_CFG_MINPKT0_MINPKT0_SHIFT	0
+#define  GENERAL_CFG_MINPKT0_MINPKT0_MASK	0xff
+
+/* Packets shorter than this threshold will be discarded. */
+#define  GENERAL_CFG_MINPKT0_MINPKT1_SHIFT	8
+#define  GENERAL_CFG_MINPKT0_MINPKT1_MASK	0xff00
+
+/* Packets shorter than this threshold will be discarded. */
+#define  GENERAL_CFG_MINPKT0_MINPKT2_SHIFT	16
+#define  GENERAL_CFG_MINPKT0_MINPKT2_MASK	0xff0000
+
+/* Packets shorter than this threshold will be discarded. */
+#define  GENERAL_CFG_MINPKT0_MINPKT3_SHIFT	24
+#define  GENERAL_CFG_MINPKT0_MINPKT3_MASK	0xff000000
+
+
+/*
+ * Register <MAXIMUM_PACKET_SIZE_0>
+ *
+ * There are 4 global configuration for Maximum packet size.
+ * Each flow can get one out of these 4 global configurations.
+ * Packets longer than this threshold will be discarded.
+ */
+#define BBH_RX_GENERAL_CFG_MAXPKT0	0x28
+
+/* Packets longer than this threshold will be discarded. */
+#define  GENERAL_CFG_MAXPKT0_MAXPKT0_SHIFT	0
+#define  GENERAL_CFG_MAXPKT0_MAXPKT0_MASK	0x3fff
+
+/* Packets longer than this threshold will be discarded. */
+#define  GENERAL_CFG_MAXPKT0_MAXPKT1_SHIFT	16
+#define  GENERAL_CFG_MAXPKT0_MAXPKT1_MASK	0x3fff0000
+
+
+/*
+ * Register <MAXIMUM_PACKET_SIZE_1>
+ *
+ * There are 4 global configuration for Maximum packet size.
+ * Each flow can get one out of these 4 global configurations.
+ * Packets longer than this threshold will be discarded.
+ */
+#define BBH_RX_GENERAL_CFG_MAXPKT1	0x2c
+
+/* Packets longer than this threshold will be discarded. */
+#define  GENERAL_CFG_MAXPKT1_MAXPKT2_SHIFT	0
+#define  GENERAL_CFG_MAXPKT1_MAXPKT2_MASK	0x3fff
+
+/* Packets longer than this threshold will be discarded. */
+#define  GENERAL_CFG_MAXPKT1_MAXPKT3_SHIFT	16
+#define  GENERAL_CFG_MAXPKT1_MAXPKT3_MASK	0x3fff0000
+
+
+/*
+ * Register <SOP_OFFSET>
+ *
+ * The BBH writes the packets into the PSRAM.
+ * The start of data offset is configurable.
+ * This register defines the SOP (start of packet) offset.
+ */
+#define BBH_RX_GENERAL_CFG_SOPOFFSET	0x30
+
+/*
+ * The SOP offset in bytes.
+ * Allowed values:
+ * 0-127.
+ * This value should match the relevant configuration in the Runner block.
+*/
+#define  GENERAL_CFG_SOPOFFSET_SOPOFFSET_SHIFT	0
+#define  GENERAL_CFG_SOPOFFSET_SOPOFFSET_MASK	0x7f
+
+
+/*
+ * Register <FLOW_CONTROL_CONFIGURATION>
+ *
+ * The BBH manages a flow control indication towards the Ethernet MAC
+ * according to BB messages from the FW.
+ * Each FW command will assert the flow control indication towards the
+ * Ethernet MAC and will trigger a timer.
+ * When the timer expires, the BBH will de-assert the flow control
+ * indication.
+ * This register also disable BBH packet drop due to no space in the SDMA,
+ * SBPM or Dispatcher.
+ */
+#define BBH_RX_GENERAL_CFG_FLOWCTRL	0x34
+
+/*
+ * Timer value before de-asserting the flow control indication.
+ * The duration of the time is determined according to the BBH clock
+ * frequency.
+*/
+#define  GENERAL_CFG_FLOWCTRL_TIMER_SHIFT	0
+#define  GENERAL_CFG_FLOWCTRL_TIMER_MASK	0xffffff
+
+/* Disable dropping packets due to no space in the Dispatcher. */
+#define  GENERAL_CFG_FLOWCTRL_DISPDROPDIS_MASK	0x1000000
+
+/* Disable dropping packets due to no space in the SDMA. */
+#define  GENERAL_CFG_FLOWCTRL_SDMADROPDIS_MASK	0x2000000
+
+/* Disable dropping packets due to no space in the SBPM. */
+#define  GENERAL_CFG_FLOWCTRL_SBPMDROPDIS_MASK	0x4000000
+
+/* Asserting this bit will force a flow control indication towards the MAC */
+#define  GENERAL_CFG_FLOWCTRL_FCFORCE_MASK	0x10000000
+
+
+/*
+ * Register <CRC_OMIT_DISABLE>
+ *
+ * The BBH omits the 4 CRC bytes of the packet for all packets except
+ * PLOAMs and OMCI (marked as exclusive priority).
+ * The configuration will disable this functionality.
+ */
+#define BBH_RX_GENERAL_CFG_CRCOMITDIS	0x38
+
+/* Disable CRC omitting. */
+#define  GENERAL_CFG_CRCOMITDIS_CRCOMITDIS_MASK	0x1
+
+
+/*
+ * Register <BBH_ENABLE>
+ *
+ * Controls the BBH enable configuration
+ */
+#define BBH_RX_GENERAL_CFG_ENABLE	0x3c
+
+/*
+ * When de-asserted, the BBH will not read new fragment/packet from the
+ * MAC.
+ * The BBH will Gracefully enable/disable (on fragment boundary for
+ * N/X/GPON/2 and on packet boundary for the rest)
+*/
+#define  GENERAL_CFG_ENABLE_PKTEN_MASK	0x1
+
+/* When de-asserted, the BBH will not pre-fetch SBPM buffers */
+#define  GENERAL_CFG_ENABLE_SBPMEN_MASK	0x2
+
+
+/*
+ * Register <G999_1_ENABLE>
+ *
+ * When asserted, G999.
+ * 1 fragments are received by the BBH.
+ * The BBH will pass the G999.
+ * 1 header in the PD instead of the 1588 time-stamp.
+ */
+#define BBH_RX_GENERAL_CFG_G9991EN	0x40
+
+/*
+ * Enable G999.
+ * 1
+*/
+#define  GENERAL_CFG_G9991EN_ENABLE_MASK	0x1
+
+/*
+ * Enable G999.
+ * 1 transfer of bytes 4-7 instead of bytes 0-3
+*/
+#define  GENERAL_CFG_G9991EN_BYTES4_7ENABLE_MASK	0x2
+
+
+/*
+ * Register <PER_FLOW_THRESHOLD>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines X.
+ */
+#define BBH_RX_GENERAL_CFG_PERFLOWTH	0x44
+
+/*
+ * According to this threshold:
+ * Flows 32 - th will have set 0 configurations.
+ * Flows (th+1) - 255 will have set 1 configurations.
+*/
+#define  GENERAL_CFG_PERFLOWTH_FLOWTH_SHIFT	0
+#define  GENERAL_CFG_PERFLOWTH_FLOWTH_MASK	0xff
+
+
+/*
+ * Register <PER_FLOW_SETS>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines the configurations sets.
+ */
+#define BBH_RX_GENERAL_CFG_PERFLOWSETS	0x48
+
+/*
+ * Set 0 of the general configuration.
+ * Selects between 4 global minimum packet size.
+*/
+#define  GENERAL_CFG_PERFLOWSETS_MINPKTSEL0_SHIFT	0
+#define  GENERAL_CFG_PERFLOWSETS_MINPKTSEL0_MASK	0x3
+
+/*
+ * Set 0 of the general configuration.
+ * Selects between 4 global maximum packet size.
+*/
+#define  GENERAL_CFG_PERFLOWSETS_MAXPKTSEL0_SHIFT	2
+#define  GENERAL_CFG_PERFLOWSETS_MAXPKTSEL0_MASK	0xc
+
+/*
+ * Set 1 of the general configuration.
+ * Selects between 4 global minimum packet size.
+*/
+#define  GENERAL_CFG_PERFLOWSETS_MINPKTSEL1_SHIFT	4
+#define  GENERAL_CFG_PERFLOWSETS_MINPKTSEL1_MASK	0x30
+
+/*
+ * Set 1 of the general configuration.
+ * Selects between 4 global maximum packet size.
+*/
+#define  GENERAL_CFG_PERFLOWSETS_MAXPKTSEL1_SHIFT	6
+#define  GENERAL_CFG_PERFLOWSETS_MAXPKTSEL1_MASK	0xc0
+
+
+/*
+ * Register <MINIMUM_PACKET_SELECT_0>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines the minimum packet size for flows 0-15.
+ */
+#define BBH_RX_GENERAL_CFG_MINPKTSEL0	0x50
+
+/*
+ * Selects one of the 4 global configurations for minimum packet size.
+ * Bits {2n, 2n+1} refers to flow n.
+*/
+#define  GENERAL_CFG_MINPKTSEL0_MINPKTSEL_SHIFT	0
+#define  GENERAL_CFG_MINPKTSEL0_MINPKTSEL_MASK	0xffffffff
+
+
+/*
+ * Register <MINIMUM_PACKET_SELECT_1>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines the minimum packet size for flows 16-31.
+ */
+#define BBH_RX_GENERAL_CFG_MINPKTSEL1	0x54
+
+/*
+ * Selects one of the 4 global configurations for minimum packet size.
+ * Bits {2n, 2n+1} refers to flow n+16.
+*/
+#define  GENERAL_CFG_MINPKTSEL1_MINPKTSEL_SHIFT	0
+#define  GENERAL_CFG_MINPKTSEL1_MINPKTSEL_MASK	0xffffffff
+
+
+/*
+ * Register <MAXIMUM_PACKET_SELECT_0>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines the maximum packet size for flows 0-15.
+ */
+#define BBH_RX_GENERAL_CFG_MAXPKTSEL0	0x58
+
+/*
+ * Selects one of the 4 global configurations for maximum packet size.
+ * Bits {2n, 2n+1} refers to flow n.
+*/
+#define  GENERAL_CFG_MAXPKTSEL0_MAXPKTSEL_SHIFT	0
+#define  GENERAL_CFG_MAXPKTSEL0_MAXPKTSEL_MASK	0xffffffff
+
+
+/*
+ * Register <MAXIMUM_PACKET_SELECT_1>
+ *
+ * The DS has 256 flows.
+ * Minimum packet size (2 bits) and Maximum packet size (2 bits) are
+ * configured per flow.
+ * Flows 0-31 will have full configurations.
+ * Flows 32-X and flows (X+1)-255 will have global set of configurations.
+ * X is configurable.
+ * This register defines the maximum packet size for flows 16-31.
+ */
+#define BBH_RX_GENERAL_CFG_MAXPKTSEL1	0x5c
+
+/*
+ * Selects one of the 4 global configurations for maximum packet size.
+ * Bits {2n, 2n+1} refers to flow n+16.
+*/
+#define  GENERAL_CFG_MAXPKTSEL1_MAXPKTSEL_SHIFT	0
+#define  GENERAL_CFG_MAXPKTSEL1_MAXPKTSEL_MASK	0xffffffff
+
+
+/*
+ * Register <MAC_MODE>
+ *
+ * When the BBH functions as a PON BBH, this bit selects between N/X/GPON/2
+ * and 10G/EPON functionality
+ */
+#define BBH_RX_GENERAL_CFG_MACMODE	0x60
+
+/*
+ * Relevant for PON BBH only.
+ * Distinguish between GPON (GPON, XGPON, NGPON2) to EPON (EPON, 10GEPON):
+ * 0:
+ * N/X/GPON/21:
+ * 10G/EPON
+*/
+#define  GENERAL_CFG_MACMODE_MACMODE_MASK	0x1
+
+/*
+ * Relevant for GPON BBH only.
+ * Distinguish between GPON and XGPON (XGPON, NGPON2):
+ * 0:
+ * GPON1:
+ * N/X/GPON/2
+*/
+#define  GENERAL_CFG_MACMODE_GPONMODE_MASK	0x2
+
+/*
+ * Relevant for VDSL BBH only.
+ * Distinguish between VDSL and non VDSL:
+ * 0:
+ * Non VDSL1:
+ * VDSL
+*/
+#define  GENERAL_CFG_MACMODE_MACVDSL_MASK	0x4
+
+
+/*
+ * Register <SBPM_CFG>
+ *
+ * Configure max on the fly requests to SBPM
+ */
+#define BBH_RX_GENERAL_CFG_SBPMCFG	0x64
+
+/* Configure max on the fly requests to SBPM */
+#define  GENERAL_CFG_SBPMCFG_MAXREQ_SHIFT	0
+#define  GENERAL_CFG_SBPMCFG_MAXREQ_MASK	0xf
+
+
+/*
+ * Register <RX_RESET_COMMAND>
+ *
+ * This register enable reset of internal units (for WA perposes).
+ */
+#define BBH_RX_GENERAL_CFG_RXRSTRST	0x68
+
+/*
+ * Writing 1 to this register will reset the input buffer.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_INBUFRST_MASK	0x1
+
+/*
+ * Writing 1 to this register will reset the Burst buffer.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_BURSTBUFRST_MASK	0x2
+
+/*
+ * Writing 1 to this register will reset the ingress context.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_INGRESSCNTXT_MASK	0x4
+
+/*
+ * Writing 1 to this register will reset the IH buffer enable.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_CMDFIFORST_MASK	0x8
+
+/*
+ * Writing 1 to this register will reset the SBPM FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  GENERAL_CFG_RXRSTRST_SBPMFIFORST_MASK	0x10
+
+/*
+ * Writing 1 to this register will reset the coherency FIFO.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_COHERENCYFIFORST_MASK	0x20
+
+/*
+ * Writing 1 to this register will reset the reassembly context table.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  GENERAL_CFG_RXRSTRST_CNTXTRST_MASK	0x40
+
+/*
+ * Writing 1 to this register will reset the SDMA write pointer.
+ * For a reset operation the SW should assert and then de-assert this bit.
+*/
+#define  GENERAL_CFG_RXRSTRST_SDMARST_MASK	0x80
+
+
+/*
+ * Register <RX_DEBUG_SELECT>
+ *
+ * Selects one out of 10 possible debug vectors
+ */
+#define BBH_RX_GENERAL_CFG_RXDBGSEL	0x6c
+
+/* Selects one out of 10 possible debug vectors */
+#define  GENERAL_CFG_RXDBGSEL_RXDBGSEL_SHIFT	0
+#define  GENERAL_CFG_RXDBGSEL_RXDBGSEL_MASK	0xf
+
+
+/*
+ * Register <BBH_RX_RADDR_DECODER>
+ *
+ * This register enables changing the route address for a specified BB ID
+ */
+#define BBH_RX_GENERAL_CFG_BBHRX_RADDR_DECODER	0x70
+
+/* This field contains the users BB id for override */
+#define  GENERAL_CFG_BBHRX_RADDR_DECODER_ID_2OVERWR_SHIFT	0
+#define  GENERAL_CFG_BBHRX_RADDR_DECODER_ID_2OVERWR_MASK	0x3f
+
+/* The new RA */
+#define  GENERAL_CFG_BBHRX_RADDR_DECODER_OVERWR_RA_SHIFT	8
+#define  GENERAL_CFG_BBHRX_RADDR_DECODER_OVERWR_RA_MASK	0x3ff00
+
+/* the overwr mechanism will be used only if this bit is active (1). */
+#define  GENERAL_CFG_BBHRX_RADDR_DECODER_OVERWR_EN_MASK	0x1000000
+
+
+/*
+ * Register <NON_ETHERNET_FLOW>
+ *
+ * There an option to disable CRC error counting for this flow.
+ */
+#define BBH_RX_GENERAL_CFG_NONETH	0x74
+
+/* Non Ethernet flow ID */
+#define  GENERAL_CFG_NONETH_FLOWID_SHIFT	0
+#define  GENERAL_CFG_NONETH_FLOWID_MASK	0xff
+
+/* When asserted, CRC errors will not be counted for that flow. */
+#define  GENERAL_CFG_NONETH_ENABLE_MASK	0x100
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define BBH_RX_GENERAL_CFG_CLK_GATE_CNTRL	0x78
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  GENERAL_CFG_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  GENERAL_CFG_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  GENERAL_CFG_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  GENERAL_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  GENERAL_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  GENERAL_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  GENERAL_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  GENERAL_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <INCOMING_PACKETS> - read-only
+ *
+ * This counter counts the number of incoming good packets.
+ * It counts the packets from all flows together.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_INPKT	0x100
+
+/* This counter counts the number of incoming good packets. */
+#define  PM_COUNTERS_INPKT_INPKT_SHIFT	0
+#define  PM_COUNTERS_INPKT_INPKT_MASK	0xffffffff
+
+
+/*
+ * Register <THIRD_FLOW_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to Third flow error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_THIRDFLOW	0x104
+
+/* PM counter value. */
+#define  PM_COUNTERS_THIRDFLOW_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_THIRDFLOW_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <SOP_AFTER_SOP_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to SOP after SOP error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_SOPASOP	0x108
+
+/* PM counter value. */
+#define  PM_COUNTERS_SOPASOP_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_SOPASOP_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <TOO_SHORT_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to Too short error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_TOOSHORT	0x10c
+
+/* PM counter value. */
+#define  PM_COUNTERS_TOOSHORT_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_TOOSHORT_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <TOO_LONG_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to Too long error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_TOOLONG	0x110
+
+/* PM counter value. */
+#define  PM_COUNTERS_TOOLONG_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_TOOLONG_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <CRC_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to CRC error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_CRCERROR	0x114
+
+/* PM counter value. */
+#define  PM_COUNTERS_CRCERROR_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_CRCERROR_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <ENCRYPTION_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to XGPON encryption error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_ENCRYPTERROR	0x118
+
+/* PM counter value. */
+#define  PM_COUNTERS_ENCRYPTERROR_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_ENCRYPTERROR_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <DISPATCHER_CONGESTION_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to Dispatcher congestion error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_DISPCONG	0x11c
+
+/* PM counter value. */
+#define  PM_COUNTERS_DISPCONG_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_DISPCONG_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <NO_SBPM_SBN_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to NO SBPM SBN error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_NOSBPMSBN	0x124
+
+/* PM counter value. */
+#define  PM_COUNTERS_NOSBPMSBN_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_NOSBPMSBN_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <NO_SDMA_CD_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to No SDMA CD error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_NOSDMACD	0x12c
+
+/* PM counter value. */
+#define  PM_COUNTERS_NOSDMACD_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_NOSDMACD_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <INCOMING_PLOAM> - read-only
+ *
+ * This counter counts the number of incoming good PLOAMs.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_INPLOAM	0x130
+
+/* This counter counts the number of incoming PLOAMs. */
+#define  PM_COUNTERS_INPLOAM_INPLOAM_SHIFT	0
+#define  PM_COUNTERS_INPLOAM_INPLOAM_MASK	0xffffffff
+
+
+/*
+ * Register <CRC_PLOAM_ERROR> - read-only
+ *
+ * This counter counts the PLOAMs drop due to CRC error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_CRCERRORPLOAM	0x134
+
+/* PM counter value. */
+#define  PM_COUNTERS_CRCERRORPLOAM_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_CRCERRORPLOAM_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <DISPATCHER_CONGESTION_PLOAM_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to Dispatcher congestion error
+ * for PLOAM.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_DISPCONGPLOAM	0x138
+
+/* PM counter value. */
+#define  PM_COUNTERS_DISPCONGPLOAM_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_DISPCONGPLOAM_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <NO_SBPM_SBN_PLOAM_ERROR> - read-only
+ *
+ * This counter counts the PLOAMs drop due to No SBPM SBN error.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_NOSBPMSBNPLOAM	0x13c
+
+/* PM counter value. */
+#define  PM_COUNTERS_NOSBPMSBNPLOAM_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_NOSBPMSBNPLOAM_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <NO_SDMA_CD_PLOAM_ERROR> - read-only
+ *
+ * This counter counts the packets drop due to No SDMA CD error for PLOAMs.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_NOSDMACDPLOAM	0x140
+
+/* PM counter value. */
+#define  PM_COUNTERS_NOSDMACDPLOAM_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_NOSDMACDPLOAM_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <EPON_TYPE_ERROR> - read-only
+ *
+ * This counter counts the events of EPON type sequence which is wrong,
+ * meaning no sop after header, or sop/header in the middle of packet
+ * (before eop).
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_EPONTYPERROR	0x144
+
+/* PM counter value. */
+#define  PM_COUNTERS_EPONTYPERROR_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_EPONTYPERROR_PMVALUE_MASK	0xffffffff
+
+
+/*
+ * Register <RUNT_ERROR> - read-only
+ *
+ * This counter counts the number of RUNT packets received from the XLMAC.
+ * This counter is cleared when read and freezes when reaches the maximum
+ * value.
+ */
+#define BBH_RX_PM_COUNTERS_RUNTERROR	0x148
+
+/* PM counter value. */
+#define  PM_COUNTERS_RUNTERROR_PMVALUE_SHIFT	0
+#define  PM_COUNTERS_RUNTERROR_PMVALUE_MASK	0xffff
+
+
+/*
+ * Register <CONTEXT_0_LSB> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX0LSB		0x200
+
+/*
+ * In reassembly.
+ * Not relevant for Ethernet.
+*/
+#define  DEBUG_CNTXTX0LSB_INREASS_MASK	0x1
+
+/* Flow ID */
+#define  DEBUG_CNTXTX0LSB_FLOWID_SHIFT	8
+#define  DEBUG_CNTXTX0LSB_FLOWID_MASK	0xff00
+
+/* Current offset */
+#define  DEBUG_CNTXTX0LSB_CUROFFSET_SHIFT	16
+#define  DEBUG_CNTXTX0LSB_CUROFFSET_MASK	0x3fff0000
+
+
+/*
+ * Register <CONTEXT_0_MSB> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX0MSB		0x204
+
+/* Current BN */
+#define  DEBUG_CNTXTX0MSB_CURBN_SHIFT	0
+#define  DEBUG_CNTXTX0MSB_CURBN_MASK	0x1fff
+
+/* First BN */
+#define  DEBUG_CNTXTX0MSB_FIRSTBN_SHIFT	16
+#define  DEBUG_CNTXTX0MSB_FIRSTBN_MASK	0x7fff0000
+
+
+/*
+ * Register <CONTEXT_1_LSB> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX1LSB		0x208
+
+/*
+ * In reassembly.
+ * Not relevant for Ethernet.
+*/
+#define  DEBUG_CNTXTX1LSB_INREASS_MASK	0x1
+
+/* Flow ID */
+#define  DEBUG_CNTXTX1LSB_FLOWID_SHIFT	8
+#define  DEBUG_CNTXTX1LSB_FLOWID_MASK	0xff00
+
+/* Current offset */
+#define  DEBUG_CNTXTX1LSB_CUROFFSET_SHIFT	16
+#define  DEBUG_CNTXTX1LSB_CUROFFSET_MASK	0x3fff0000
+
+
+/*
+ * Register <CONTEXT_1_MSB> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX1MSB		0x20c
+
+/* Current BN */
+#define  DEBUG_CNTXTX1MSB_CURBN_SHIFT	0
+#define  DEBUG_CNTXTX1MSB_CURBN_MASK	0x1fff
+
+/* First BN */
+#define  DEBUG_CNTXTX1MSB_FIRSTBN_SHIFT	16
+#define  DEBUG_CNTXTX1MSB_FIRSTBN_MASK	0x7fff0000
+
+
+/*
+ * Register <INGRESS_CONTEXT_0> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX0INGRESS	0x210
+
+/*
+ * In reassembly.
+ * Not relevant for Ethernet.
+*/
+#define  DEBUG_CNTXTX0INGRESS_INREASS_MASK	0x1
+
+/* SOP */
+#define  DEBUG_CNTXTX0INGRESS_SOP_MASK	0x10
+
+/* Priority */
+#define  DEBUG_CNTXTX0INGRESS_PRIORITY_SHIFT	6
+#define  DEBUG_CNTXTX0INGRESS_PRIORITY_MASK	0xc0
+
+/* Flow ID */
+#define  DEBUG_CNTXTX0INGRESS_FLOWID_SHIFT	8
+#define  DEBUG_CNTXTX0INGRESS_FLOWID_MASK	0xff00
+
+/* Current offset */
+#define  DEBUG_CNTXTX0INGRESS_CUROFFSET_SHIFT	16
+#define  DEBUG_CNTXTX0INGRESS_CUROFFSET_MASK	0x3fff0000
+
+
+/*
+ * Register <INGRESS_CONTEXT_1> - read-only
+ *
+ * In the case of GPON peripheral, DS flows may arrive interleaved.
+ * The BBH supports parallel reassembly of up to two interleaved flows (out
+ * of 256).
+ * For the reassembly process the BBH stores a double flow context.
+ */
+#define BBH_RX_DEBUG_CNTXTX1INGRESS	0x214
+
+/*
+ * In reassembly.
+ * Not relevant for Ethernet.
+*/
+#define  DEBUG_CNTXTX1INGRESS_INREASS_MASK	0x1
+
+/* SOP */
+#define  DEBUG_CNTXTX1INGRESS_SOP_MASK	0x10
+
+/* Priority */
+#define  DEBUG_CNTXTX1INGRESS_PRIORITY_SHIFT	6
+#define  DEBUG_CNTXTX1INGRESS_PRIORITY_MASK	0xc0
+
+/* Flow ID */
+#define  DEBUG_CNTXTX1INGRESS_FLOWID_SHIFT	8
+#define  DEBUG_CNTXTX1INGRESS_FLOWID_MASK	0xff00
+
+/* Current offset */
+#define  DEBUG_CNTXTX1INGRESS_CUROFFSET_SHIFT	16
+#define  DEBUG_CNTXTX1INGRESS_CUROFFSET_MASK	0x3fff0000
+
+
+/*
+ * Register <INPUT_BUF_USED_WORDS> - read-only
+ *
+ * Input buf used words
+ */
+#define BBH_RX_DEBUG_IBUW		0x218
+
+/* Used words */
+#define  DEBUG_IBUW_UW_SHIFT		0
+#define  DEBUG_IBUW_UW_MASK		0x7
+
+
+/*
+ * Register <BURST_BUF_USED_WORDS> - read-only
+ *
+ * Burst buf used words
+ */
+#define BBH_RX_DEBUG_BBUW		0x21c
+
+/* Used words */
+#define  DEBUG_BBUW_UW_SHIFT		0
+#define  DEBUG_BBUW_UW_MASK		0xf
+
+
+/*
+ * Register <COHERENCY_FIFO_USED_WORDS> - read-only
+ *
+ * Coherency FIFO used words
+ */
+#define BBH_RX_DEBUG_CFUW		0x220
+
+/* Used words */
+#define  DEBUG_CFUW_UW_SHIFT		0
+#define  DEBUG_CFUW_UW_MASK		0x3f
+
+
+/*
+ * Register <ACK_COUNTERS> - read-only
+ *
+ * The register reflects 2 ACK counters:
+ * SDMACONNECT
+ */
+#define BBH_RX_DEBUG_ACKCNT		0x224
+
+/* SDMA ACK counter */
+#define  DEBUG_ACKCNT_SDMA_SHIFT	0
+#define  DEBUG_ACKCNT_SDMA_MASK		0x1f
+
+/* Connect ACK counter */
+#define  DEBUG_ACKCNT_CONNECT_SHIFT	8
+#define  DEBUG_ACKCNT_CONNECT_MASK	0x1f00
+
+
+/*
+ * Register <COHERENCY_COUNTERS> - read-only
+ *
+ * The register 2 pending coherency counters:
+ * NormalExclusive
+ */
+#define BBH_RX_DEBUG_COHERENCYCNT	0x228
+
+/* Normal */
+#define  DEBUG_COHERENCYCNT_NORMAL_SHIFT	0
+#define  DEBUG_COHERENCYCNT_NORMAL_MASK	0x1f
+
+/* Exclusive */
+#define  DEBUG_COHERENCYCNT_EXCLUSIVE_SHIFT	8
+#define  DEBUG_COHERENCYCNT_EXCLUSIVE_MASK	0x1f00
+
+
+/*
+ * Register <DEBUG_VECTOR> - read-only
+ *
+ * selected debug vector
+ */
+#define BBH_RX_DEBUG_DBGVEC		0x22c
+
+/* selected debug vector */
+#define  DEBUG_DBGVEC_DBGVEC_SHIFT	0
+#define  DEBUG_DBGVEC_DBGVEC_MASK	0x1fffff
+
+
+/*
+ * Register <UPLOAD_FIFO_USED_WORDS> - read-only
+ *
+ * Upload FIFO used words
+ */
+#define BBH_RX_DEBUG_UFUW		0x230
+
+/* Used words */
+#define  DEBUG_UFUW_UW_SHIFT		0
+#define  DEBUG_UFUW_UW_MASK		0x7
+
+
+/*
+ * Register <CREDIT_COUNTERS> - read-only
+ *
+ * This register holds 2 credit counters:
+ * NormalExclusive
+ */
+#define BBH_RX_DEBUG_CREDITCNT		0x234
+
+/* Normal */
+#define  DEBUG_CREDITCNT_NORMAL_SHIFT	0
+#define  DEBUG_CREDITCNT_NORMAL_MASK	0x1f
+
+/* Exclusive */
+#define  DEBUG_CREDITCNT_EXCLUSIVE_SHIFT	8
+#define  DEBUG_CREDITCNT_EXCLUSIVE_MASK	0x1f00
+
+
+/*
+ * Register <USED_SDMA_CD_CNT> - read-only
+ *
+ * Number of used SDMA CDs
+ */
+#define BBH_RX_DEBUG_SDMACNT		0x238
+
+/* Used CDs */
+#define  DEBUG_SDMACNT_UCD_SHIFT	0
+#define  DEBUG_SDMACNT_UCD_MASK		0x7f
+
+
+/*
+ * Register <CMD_FIFO_USED_WORDS> - read-only
+ *
+ * CMD FIFO used words
+ */
+#define BBH_RX_DEBUG_CMFUW		0x23c
+
+/* Used words */
+#define  DEBUG_CMFUW_UW_SHIFT		0
+#define  DEBUG_CMFUW_UW_MASK		0x7
+
+
+/*
+ * Registers <SRAM_BN_FIFO> - <x> is [ 0 => 15 ] - read-only
+ *
+ * The BBH RX hold a FIFO with 16 BN.
+ */
+#define BBH_RX_DEBUG_SBNFIFO(x)		(0x240 + (x) * 0x4)
+
+/* BN */
+#define  DEBUG_SBNFIFO_BNENTRY_SHIFT	0
+#define  DEBUG_SBNFIFO_BNENTRY_MASK	0x3fff
+
+/* SBN is Valid */
+#define  DEBUG_SBNFIFO_VALID_MASK	0x10000
+
+
+/*
+ * Registers <CMD_FIFO> - <x> is [ 0 => 3 ] - read-only
+ *
+ * The BBH RX hold a FIFO with 8 command.
+ */
+#define BBH_RX_DEBUG_CMDFIFO(x)		(0x280 + (x) * 0x4)
+
+/* CMD */
+#define  DEBUG_CMDFIFO_CMDENTRY_SHIFT	0
+#define  DEBUG_CMDFIFO_CMDENTRY_MASK	0xffffffff
+
+
+/*
+ * Registers <SRAM_BN_RECYCLE_FIFO> - <x> is [ 0 => 1 ] - read-only
+ *
+ * The BBH RX hold a recycle FIFO with up to 2 BN.
+ */
+#define BBH_RX_DEBUG_SBNRECYCLEFIFO(x)	(0x290 + (x) * 0x4)
+
+/* BN */
+#define  DEBUG_SBNRECYCLEFIFO_BNENTRY_SHIFT	0
+#define  DEBUG_SBNRECYCLEFIFO_BNENTRY_MASK	0x3fff
+
+/* SBN is Valid */
+#define  DEBUG_SBNRECYCLEFIFO_VALID_MASK	0x10000
+
+
+/*
+ * Register <COHERENCY_COUNTERS_METHOD2> - read-only
+ *
+ * Read of 4 coherency counters:
+ * CD CMD sent (1 per flow)EOP ACK received (1 per flow)
+ */
+#define BBH_RX_DEBUG_COHERENCYCNT2	0x2a0
+
+/* CD sent */
+#define  DEBUG_COHERENCYCNT2_CDSENT_SHIFT	0
+#define  DEBUG_COHERENCYCNT2_CDSENT_MASK	0x7f
+
+/* EOP ACK received */
+#define  DEBUG_COHERENCYCNT2_ACKRECEIVED_SHIFT	8
+#define  DEBUG_COHERENCYCNT2_ACKRECEIVED_MASK	0x7f00
+
+
+/*
+ * Register <SPECIAL_DROP_STATUS>
+ *
+ * Information of the following:
+ * - Dispatcher drop due to coherency FIFO full- SDMA drop due to coherency
+ * method 2 counters over 63 (dec)
+ */
+#define BBH_RX_DEBUG_DROPSTATUS		0x2a4
+
+/*
+ * Dispatcher drop due to coherency FIFO full.
+ * Writing 1 to this bit clears it
+*/
+#define  DEBUG_DROPSTATUS_DISPSTATUS_MASK	0x1
+
+/*
+ * SDMA drop due to coherency method 2 counters over 63 (dec).
+ * Writing 1 to this bit clears it
+*/
+#define  DEBUG_DROPSTATUS_SDMASTATUS_MASK	0x2
+
+
+#endif /* ! XRDP_REGS_BBH_RX_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bbh_tx.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bbh_tx.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_bbh_tx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_bbh_tx.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,2073 @@
+#ifndef XRDP_REGS_BBH_TX_H_
+#define XRDP_REGS_BBH_TX_H_
+
+/* relative to core */
+#define BBH_TX_OFFSET_0			0xd90000
+
+/* relative to wan_top */
+#define BBH_TX_OFFSET_1			0x0
+
+/* relative to wan_top */
+#define BBH_TX_OFFSET_2			0x2000
+
+/* relative to wan_top */
+#define BBH_TX_OFFSET_3			0x4000
+
+/* relative to wan_top */
+#define BBH_TX_OFFSET_4			0x6000
+
+/*
+ * Register <MAC_TYPE>
+ *
+ * The BBH supports working with different MAC types.
+ * Each MAC requires different interface and features.
+ * This register defines the type of MAC the BBH works with.
+ */
+#define BBH_TX_COMMON_CFGS_MACTYPE	0x0
+
+/* MAC type */
+#define  COMMON_CFGS_MACTYPE_TYPE_SHIFT	0
+#define  COMMON_CFGS_MACTYPE_TYPE_MASK	0x7
+
+
+/*
+ * Register <BB_CFG_1>
+ *
+ * Each BBH unit has its own position on the BB tree.
+ * This position defines the Route address when approaching the Runner,
+ * S/DMA or S/BPM.
+ * The route is determined by a dedicated generic logic which uses the
+ * source id of the destination.
+ */
+#define BBH_TX_COMMON_CFGS_BBCFG_1_TX	0x4
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  COMMON_CFGS_BBCFG_1_TX_DMASRC_SHIFT	0
+#define  COMMON_CFGS_BBCFG_1_TX_DMASRC_MASK	0x3f
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  COMMON_CFGS_BBCFG_1_TX_SDMASRC_SHIFT	8
+#define  COMMON_CFGS_BBCFG_1_TX_SDMASRC_MASK	0x3f00
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  COMMON_CFGS_BBCFG_1_TX_SBPMSRC_SHIFT	16
+#define  COMMON_CFGS_BBCFG_1_TX_SBPMSRC_MASK	0x3f0000
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  COMMON_CFGS_BBCFG_1_TX_FPMSRC_SHIFT	24
+#define  COMMON_CFGS_BBCFG_1_TX_FPMSRC_MASK	0x3f000000
+
+
+/*
+ * Register <BB_CFG_2>
+ *
+ * Each BBH unit has its own position on the BB tree.
+ * This position defines the Route address when approaching the Runner,
+ * S/DMA or S/BPM.
+ * The route is determined by a dedicated generic logic which uses the
+ * source id of the destination.
+ */
+#define BBH_TX_COMMON_CFGS_BBCFG_2_TX	0x8
+
+/*
+ * source id.
+ * This id is used to determine the route to the 1st (out of possible 2
+ * runners) which are responsible for sending PDs.
+*/
+#define  COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_SHIFT	0
+#define  COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_MASK	0x3f
+
+/*
+ * source id.
+ * This id is used to determine the route to the 2nd (out of possible 2
+ * runners) which are responsible for sending PDs.
+*/
+#define  COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_SHIFT	8
+#define  COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_MASK	0x3f00
+
+/*
+ * source id.
+ * This id is used to determine the route to the Runner that is responsible
+ * for sending status messages (WAN only).
+*/
+#define  COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_SHIFT	16
+#define  COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_MASK	0x3f0000
+
+/*
+ * source id.
+ * This id is used to determine the route to the Runner which is
+ * responsible for sending DBR/Ghost messages (WAN only).
+*/
+#define  COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_SHIFT	24
+#define  COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_MASK	0x3f000000
+
+
+/*
+ * Register <RD_ADDR_CFG>
+ *
+ * Configurations for determining the address to read from the DDR/PSRAm
+ */
+#define BBH_TX_COMMON_CFGS_DDRCFG_TX	0xc
+
+/* The data is arranged in the DDR in a fixed size buffers. */
+#define  COMMON_CFGS_DDRCFG_TX_BUFSIZE_SHIFT	0
+#define  COMMON_CFGS_DDRCFG_TX_BUFSIZE_MASK	0x7
+
+/* The packet offset byte resulotion. */
+#define  COMMON_CFGS_DDRCFG_TX_BYTERESUL_MASK	0x8
+
+/* Static offset in 8-bytes resolution for non aggregated packets in DDR */
+#define  COMMON_CFGS_DDRCFG_TX_DDRTXOFFSET_SHIFT	4
+#define  COMMON_CFGS_DDRCFG_TX_DDRTXOFFSET_MASK	0x1ff0
+
+/*
+ * The size of the HN (Header number) in bytes.
+ * The BBH decides between size 0 and size 1 according to a bit in the PD
+*/
+#define  COMMON_CFGS_DDRCFG_TX_HNSIZE0_SHIFT	16
+#define  COMMON_CFGS_DDRCFG_TX_HNSIZE0_MASK	0x7f0000
+
+/*
+ * The size of the HN (Header number) in bytes.
+ * The BBH decides between size 0 and size 1 according to a bit in the PD
+*/
+#define  COMMON_CFGS_DDRCFG_TX_HNSIZE1_SHIFT	24
+#define  COMMON_CFGS_DDRCFG_TX_HNSIZE1_MASK	0x7f000000
+
+
+/*
+ * Registers <PD_RNR_CFG_1> - <x> is [ 0 => 1 ]
+ *
+ * Queue index address:
+ * The BBH requests a Packet descriptor from the Runner.
+ * The BBH writes the queue number in a predefined address at the Runner
+ * SRAM.
+ * The message serves also as a wake-up request to the Runner.
+ * This register defines the queue index address within the Runner address
+ * space.
+ * SKB address:
+ * When the packet is transmitted from absolute address, then, instead of
+ * releasing the BN, the BBH writes a 6 bits read counter into the Runner
+ * SRAM.
+ * It writes it into a pre-defined address + TCONT_NUM (for Ethernet
+ * TCONT_NUM = 0).
+ * This register defines the SKB free base address within the Runner
+b
+:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_COMMON_CFGS_RNRCFG_1(x)	(0x10 + (x) * 0x4)
+
+/*
+ * Defines the TCONT address within the Runner address space.
+ * The address is in 8 bytes resolution.
+*/
+#define  COMMON_CFGS_RNRCFG_1_TCONTADDR_SHIFT	0
+#define  COMMON_CFGS_RNRCFG_1_TCONTADDR_MASK	0xffff
+
+/*
+ * Defines the SKB free address within the Runner address space.
+ * The address is in 8-bytes resolution.
+*/
+#define  COMMON_CFGS_RNRCFG_1_SKBADDR_SHIFT	16
+#define  COMMON_CFGS_RNRCFG_1_SKBADDR_MASK	0xffff0000
+
+
+/*
+ * Registers <PD_RNR_CFG_2> - <x> is [ 0 => 1 ]
+ *
+ * PD transfer process:
+ * -The Runner wont ACK the BBH; therefore the BBH wont wake the TX task.
+ * -The Runner will push the PDs into the BBH (without any wakeup from the
+ * BBH).
+ * -Each time that the BBH reads a PD from the PD FIFO, it will write the
+ * read pointer into a pre-defined address in the Runner.
+ * The pointer is 6 bits width (one bit larger than needed to distinguish
+ * between full and empty).
+ * -The Runner should manage the congestion over the PD FIFO (in the BBH)
+ * by reading the BBH read pointer prior to each PD write.
+ * -PD drop should be done by the Runner only.
+ * The BBH will drop PD when the FIFO is full and will count each drop.
+ * The BBH wont release the BN in this case.
+ * -There will be a full threshold, which can be smaller than the actual
+ * size of the FIFO.
+ * When the BBH will move from full to not full state, the BBH will wakeup
+ * the Runner.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_COMMON_CFGS_RNRCFG_2(x)	(0x18 + (x) * 0x4)
+
+/*
+ * This field defins the address in the Runner memory space to which the
+ * read pointer is written.
+ * The address is in 8-bytes resolution.
+*/
+#define  COMMON_CFGS_RNRCFG_2_PTRADDR_SHIFT	0
+#define  COMMON_CFGS_RNRCFG_2_PTRADDR_MASK	0xffff
+
+/* The number of the task that is responsible for sending PDs to the BBH */
+#define  COMMON_CFGS_RNRCFG_2_TASK_SHIFT	16
+#define  COMMON_CFGS_RNRCFG_2_TASK_MASK	0xf0000
+
+
+/*
+ * Register <DMA_CFG>
+ *
+ * The BBH reads the packet data from the DDR in chunks (with a maximal
+ * size of 128 bytes).
+ * For each chunk the BBH writes a read request (descriptor) into the DMA
+ * memory space.
+ * The read descriptors are arranged in a predefined space in the DMA
+ * memory and managed in a cyclic FIFO style.
+ * A special configuration limits the maximum number of read requests.
+ */
+#define BBH_TX_COMMON_CFGS_DMACFG_TX	0x20
+
+/*
+ * Defines the base address of the read request FIFO within the DMA address
+ * space.
+ * The value should be identical to the relevant configuration in the DMA.
+*/
+#define  COMMON_CFGS_DMACFG_TX_DESCBASE_SHIFT	0
+#define  COMMON_CFGS_DMACFG_TX_DESCBASE_MASK	0x3f
+
+/* The size of the BBH read requests FIFO inside the DMA */
+#define  COMMON_CFGS_DMACFG_TX_DESCSIZE_SHIFT	6
+#define  COMMON_CFGS_DMACFG_TX_DESCSIZE_MASK	0xfc0
+
+/* Defines the maximum allowed number of on-the-fly read requests. */
+#define  COMMON_CFGS_DMACFG_TX_MAXREQ_SHIFT	16
+#define  COMMON_CFGS_DMACFG_TX_MAXREQ_MASK	0x3f0000
+
+/*
+ * When asserted, this bit forces urgent priority on the EPON read requests
+ * towards the DMA (relevant only for EPON BBH)
+*/
+#define  COMMON_CFGS_DMACFG_TX_EPNURGNT_MASK	0x1000000
+
+/*
+ * When asserted, this bit forces urgent priority on read requests of a
+ * jumbo packet (>2K)
+*/
+#define  COMMON_CFGS_DMACFG_TX_JUMBOURGNT_MASK	0x2000000
+
+
+/*
+ * Register <SDMA_CFG>
+ *
+ * The BBH reads the packet data from the PSRAM in chunks (with a maximal
+ * size of 128 bytes).
+ * For each chunk the BBH writes a read request (descriptor) into the SDMA
+ * memory space.
+ * The read descriptors are arranged in a predefined space in the SDMA
+ * memory and managed in a cyclic FIFO style.
+ * A special configuration limits the maximum number of read requests.
+ */
+#define BBH_TX_COMMON_CFGS_SDMACFG_TX	0x24
+
+/*
+ * Defines the base address of the read request FIFO within the DMA address
+ * space.
+ * The value should be identical to the relevant configuration in the DMA.
+*/
+#define  COMMON_CFGS_SDMACFG_TX_DESCBASE_SHIFT	0
+#define  COMMON_CFGS_SDMACFG_TX_DESCBASE_MASK	0x3f
+
+/* The size of the BBH read requests FIFO inside the DMA */
+#define  COMMON_CFGS_SDMACFG_TX_DESCSIZE_SHIFT	6
+#define  COMMON_CFGS_SDMACFG_TX_DESCSIZE_MASK	0xfc0
+
+/* Defines the maximum allowed number of on-the-fly read requests. */
+#define  COMMON_CFGS_SDMACFG_TX_MAXREQ_SHIFT	16
+#define  COMMON_CFGS_SDMACFG_TX_MAXREQ_MASK	0x3f0000
+
+/*
+ * When asserted, this bit forces urgent priority on the EPON read requests
+ * towards the DMA (relevant only for EPON BBH)
+*/
+#define  COMMON_CFGS_SDMACFG_TX_EPNURGNT_MASK	0x1000000
+
+/*
+ * When asserted, this bit forces urgent priority on Jumbo packets (>2k)
+ * read requests
+*/
+#define  COMMON_CFGS_SDMACFG_TX_JUMBOURGNT_MASK	0x2000000
+
+
+/*
+ * Register <SBPM_CFG>
+ *
+ * When packet transmission is done, the BBH releases the SBPM buffers.
+ * This register defines which release command is used:
+ * 1.
+ * Normal free with context2.
+ * Special free with context3.
+ * free without context
+ */
+#define BBH_TX_COMMON_CFGS_SBPMCFG	0x28
+
+/* When this bit is enabled, the BBH will use free without context command. */
+#define  COMMON_CFGS_SBPMCFG_FREENOCNTXT_MASK	0x1
+
+/*
+ * When this bit is enabled, the BBH will use special free with context
+ * command.
+ * This bit is relevant only if free without context_en is configured to 0.
+*/
+#define  COMMON_CFGS_SBPMCFG_SPECIALFREE_MASK	0x2
+
+/* maximum number of pending on the fly get next commands */
+#define  COMMON_CFGS_SBPMCFG_MAXGN_SHIFT	8
+#define  COMMON_CFGS_SBPMCFG_MAXGN_MASK	0x1f00
+
+
+/*
+ * Registers <DDR_TM_BASE_LOW> - <x> is [ 0 => 1 ]
+ *
+ * The BBH calculate the DDR physical address according to the Buffer
+ * number and buffer size and then adds the DDR TM base.
+ * The DDR TM address space is divided to two - coherent and non coherent.
+ * The first register in this array defines the base address of the non
+ * coherent space and the second is for the coherent.
+ * The value of this register should match the relevant registers value in
+ * the BBH RX, QM and the Runner.
+ */
+#define BBH_TX_COMMON_CFGS_DDRTMBASEL(x)	(0x2c + (x) * 0x4)
+
+/*
+ * DDR TM base.
+ * The address is in bytes resolution.
+ * The address should be aligned to 128 bytes.
+*/
+#define  COMMON_CFGS_DDRTMBASEL_DDRTMBASE_SHIFT	0
+#define  COMMON_CFGS_DDRTMBASEL_DDRTMBASE_MASK	0xffffffff
+
+
+/*
+ * Registers <DDR_TM_BASE_HIGH> - <x> is [ 0 => 1 ]
+ *
+ * The BBH calculate the DDR physical address according to the Buffer
+ * number and buffer size and then adds the DDR TM base.
+ * The DDR TM address space is divided to two - coherent and non coherent.
+ * The first register in this array defines the base address of the non
+ * coherent space and the second is for the coherent.
+ * The value of this register should match the relevant registers value in
+ * the BBH RX, QM and the Runner.
+ */
+#define BBH_TX_COMMON_CFGS_DDRTMBASEH(x)	(0x34 + (x) * 0x4)
+
+/* MSB of DDR TM base. */
+#define  COMMON_CFGS_DDRTMBASEH_DDRTMBASE_SHIFT	0
+#define  COMMON_CFGS_DDRTMBASEH_DDRTMBASE_MASK	0xff
+
+
+/*
+ * Register <DATA_FIFO_CTRL>
+ *
+ * The BBH orders data both from DDR and PSRAM.
+ * The returned data is stored in two FIFOs for reordering.
+ * The two FIFOs are implemented in a single RAM.
+ * This register defines the division of the RAM to two FIFOs.
+ */
+#define BBH_TX_COMMON_CFGS_DFIFOCTRL	0x3c
+
+/*
+ * The size of the PSRAM data FIFO in 8 bytes resolution.
+ * The BBH uses this information for determining the amount of data that
+ * can be ordered from the PSRAM.
+*/
+#define  COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_SHIFT	0
+#define  COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_MASK	0x3ff
+
+/*
+ * The size of the DDR data FIFO in 8 bytes resolution.
+ * The BBH uses this information for determining the amount of data that
+ * can be ordered from the DDR.
+*/
+#define  COMMON_CFGS_DFIFOCTRL_DDRSIZE_SHIFT	10
+#define  COMMON_CFGS_DFIFOCTRL_DDRSIZE_MASK	0xffc00
+
+/*
+ * the base address of the PSRAM data FIFO in 8 bytes resolution.
+ * The DDR data FIFO base address is always 0.
+ * In case the whole RAM is to be dedicated to PSRAM data, the base should
+ * be 0 as well, and the DDR FIFO size should be configured to 0.
+*/
+#define  COMMON_CFGS_DFIFOCTRL_PSRAMBASE_SHIFT	20
+#define  COMMON_CFGS_DFIFOCTRL_PSRAMBASE_MASK	0x3ff00000
+
+
+/*
+ * Register <ARB_CFG>
+ *
+ * configurations related to different arbitration processes (ordering PDs,
+ * ordering data)
+ */
+#define BBH_TX_COMMON_CFGS_ARB_CFG	0x40
+
+/*
+ * this configuration determines whether to give high priority to a current
+ * transmitting queue or not.
+*/
+#define  COMMON_CFGS_ARB_CFG_HIGHTRXQ_MASK	0x1
+
+
+/*
+ * Register <BB_ROUTE_OVERRIDE>
+ *
+ * override configuration for the route of one of the peripherals
+ * (DMA/SDMMA/FPM/SBPM?Runners)
+ */
+#define BBH_TX_COMMON_CFGS_BBROUTE	0x44
+
+/* route address */
+#define  COMMON_CFGS_BBROUTE_ROUTE_SHIFT	0
+#define  COMMON_CFGS_BBROUTE_ROUTE_MASK	0x3ff
+
+/* destination source id */
+#define  COMMON_CFGS_BBROUTE_DEST_SHIFT	10
+#define  COMMON_CFGS_BBROUTE_DEST_MASK	0xfc00
+
+/* enable */
+#define  COMMON_CFGS_BBROUTE_EN_MASK	0x10000
+
+
+/*
+ * Registers <Q_TO_RNR> - <x> is [ 0 => 19 ]
+ *
+ * configuration which queue is managed by each of the two runners.
+ * Each register in this array configures 2 queues.
+ */
+#define BBH_TX_COMMON_CFGS_Q2RNR(x)	(0x48 + (x) * 0x4)
+
+/* Q0 configuration */
+#define  COMMON_CFGS_Q2RNR_Q0_MASK	0x1
+
+/* Q1 configuration */
+#define  COMMON_CFGS_Q2RNR_Q1_MASK	0x2
+
+
+/*
+ * Register <PER_Q_TASK>
+ *
+ * which task in the runner to wake-up when requesting a PD for a certain
+ * q.
+ * This register holds the task number of the first 8 queues.
+ * For queues 8-40 (if they exist) the task that will be waking is the one
+ * appearing in the PD_RNR_CFG regs, depending on which runner this queue
+ * is associated with.
+ */
+#define BBH_TX_COMMON_CFGS_PERQTASK	0xa0
+
+#define  COMMON_CFGS_PERQTASK_TASKx_SHIFT(x)	(4 * (x))
+#define  COMMON_CFGS_PERQTASK_TASKx_MASK(x)	(0xf << (4 * (x)))
+
+
+/*
+ * Register <TX_RESET_COMMAND>
+ *
+ * This register enables reset of internal units (for possible WA
+ * purposes).
+ */
+#define BBH_TX_COMMON_CFGS_TXRSTCMD	0xb0
+
+/*
+ * Writing 1 to this register will reset the segmentation context table.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_CNTXTRST_MASK	0x1
+
+/*
+ * Writing 1 to this register will reset the PDs FIFOs.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_PDFIFORST_MASK	0x2
+
+/*
+ * Writing 1 to this register will reset the DMA write pointer.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_DMAPTRRST_MASK	0x4
+
+/*
+ * Writing 1 to this register will reset the SDMA write pointer.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  COMMON_CFGS_TXRSTCMD_SDMAPTRRST_MASK	0x8
+
+/*
+ * Writing 1 to this register will reset the BPM FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_BPMFIFORST_MASK	0x10
+
+/*
+ * Writing 1 to this register will reset the SBPM FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  COMMON_CFGS_TXRSTCMD_SBPMFIFORST_MASK	0x20
+
+/*
+ * Writing 1 to this register will reset the order keeper FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  COMMON_CFGS_TXRSTCMD_OKFIFORST_MASK	0x40
+
+/*
+ * Writing 1 to this register will reset the DDR data FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  COMMON_CFGS_TXRSTCMD_DDRFIFORST_MASK	0x80
+
+/*
+ * Writing 1 to this register will reset the SRAM data FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  COMMON_CFGS_TXRSTCMD_SRAMFIFORST_MASK	0x100
+
+/*
+ * Writing 1 to this register will reset the SKB pointers.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_SKBPTRRST_MASK	0x200
+
+/*
+ * Writing 1 to this register will reset the EPON status FIFOs (per queue
+ * 32 fifos).
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_STSFIFORST_MASK	0x400
+
+/*
+ * Writing 1 to this register will reset the EPON request FIFO (8 entries
+ * FIFO that holds the packet requests from the EPON MAC).
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_REQFIFORST_MASK	0x800
+
+/*
+ * Writing 1 to this register will reset the EPON/GPON MSG FIFOThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_MSGFIFORST_MASK	0x1000
+
+/*
+ * Writing 1 to this register will reset the GET NEXT FIFOsThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_GNXTFIFORST_MASK	0x2000
+
+/*
+ * Writing 1 to this register will reset the FIRST BN FIFOsThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  COMMON_CFGS_TXRSTCMD_FBNFIFORST_MASK	0x4000
+
+
+/*
+ * Register <DEBUG_SELECT>
+ *
+ * This register selects 1 of 8 debug vectors.
+ * The selected vector is reflected to DBGOUTREG.
+ */
+#define BBH_TX_COMMON_CFGS_DBGSEL	0xb4
+
+/*
+ * This register selects 1 of 8 debug vectors.
+ * The selected vector is reflected to DBGOUTREG.
+*/
+#define  COMMON_CFGS_DBGSEL_DBGSEL_SHIFT	0
+#define  COMMON_CFGS_DBGSEL_DBGSEL_MASK	0x1f
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define BBH_TX_COMMON_CFGS_CLK_GATE_CNTRL	0xb8
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  COMMON_CFGS_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  COMMON_CFGS_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  COMMON_CFGS_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <GENERAL_PURPOSE_Register>
+ *
+ * general purpose register
+ */
+#define BBH_TX_COMMON_CFGS_GPR		0xbc
+
+/* general purpose register */
+#define  COMMON_CFGS_GPR_GPR_SHIFT	0
+#define  COMMON_CFGS_GPR_GPR_MASK	0xffffffff
+
+
+/*
+ * Registers <PD_FIFO_BASE> - <x> is [ 0 => 19 ]
+ *
+ * The BBH manages 40 queues for GPON or 32 queus for EPON (1 for each
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 256
+ * PDs.
+ * The size of the Status FIFO, 1st BN FIFO and get-next FIFO is the same
+ * as the size of the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define BBH_TX_WAN_CFGS_PDBASE(x)	(0x100 + (x) * 0x4)
+
+/* The base of PD FIFO for queue 0. */
+#define  WAN_CFGS_PDBASE_FIFOBASE0_SHIFT	0
+#define  WAN_CFGS_PDBASE_FIFOBASE0_MASK	0x1ff
+
+/* The base of PD FIFO for queue 1. */
+#define  WAN_CFGS_PDBASE_FIFOBASE1_SHIFT	16
+#define  WAN_CFGS_PDBASE_FIFOBASE1_MASK	0x1ff0000
+
+
+/*
+ * Registers <PD_FIFO_SIZE> - <x> is [ 0 => 19 ]
+ *
+ * The BBH manages 40 queues for GPON and 32 queues for EPON (FIFO per
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define BBH_TX_WAN_CFGS_PDSIZE(x)	(0x150 + (x) * 0x4)
+
+/*
+ * The size of PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * For GPON, the max value is 0x7For EPON, the max value is 0xf
+*/
+#define  WAN_CFGS_PDSIZE_FIFOSIZE0_SHIFT	0
+#define  WAN_CFGS_PDSIZE_FIFOSIZE0_MASK	0x1ff
+
+/*
+ * The size of PD FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  WAN_CFGS_PDSIZE_FIFOSIZE1_SHIFT	16
+#define  WAN_CFGS_PDSIZE_FIFOSIZE1_MASK	0x1ff0000
+
+
+/*
+ * Registers <PD_WKUP_THRESH> - <x> is [ 0 => 19 ]
+ *
+ * When a FIFO occupancy is above this wakeup threshold, the BBH will not
+ * wake-up the Runner for sending a new PD.
+ * This threshold does not represent the actual size of the FIFO.
+ * If a PD will arrive from the Runner when the FIFO is above the
+ * threshold, it will not be dropped unless the FIFO is actually full.
+ * Each register defines the threshold of 2 queues.
+ */
+#define BBH_TX_WAN_CFGS_PDWKUPH(x)	(0x200 + (x) * 0x4)
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  WAN_CFGS_PDWKUPH_WKUPTHRESH0_SHIFT	0
+#define  WAN_CFGS_PDWKUPH_WKUPTHRESH0_MASK	0xff
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 1.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  WAN_CFGS_PDWKUPH_WKUPTHRESH1_SHIFT	16
+#define  WAN_CFGS_PDWKUPH_WKUPTHRESH1_MASK	0xff0000
+
+
+/*
+ * Registers <PD_BYTES_THRESHOLD> - <x> is [ 0 => 19 ]
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration for the
+ * rest (TCONTs 8-39).
+ * Each register in this array defines the threshold of 2 queues.
+ */
+#define BBH_TX_WAN_CFGS_PD_BYTE_TH(x)	(0x250 + (x) * 0x4)
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  WAN_CFGS_PD_BYTE_TH_PDLIMIT0_SHIFT	0
+#define  WAN_CFGS_PD_BYTE_TH_PDLIMIT0_MASK	0xffff
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  WAN_CFGS_PD_BYTE_TH_PDLIMIT1_SHIFT	16
+#define  WAN_CFGS_PD_BYTE_TH_PDLIMIT1_MASK	0xffff0000
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD_EN>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration per group
+ * of 8 TCONTs for the rest.
+ */
+#define BBH_TX_WAN_CFGS_PD_BYTE_TH_EN	0x300
+
+/*
+ * This bit enables the above feature (PDs pre fetch limited according to
+ * the total number of bytes).
+*/
+#define  WAN_CFGS_PD_BYTE_TH_EN_PDLIMITEN_MASK	0x1
+
+
+/*
+ * Register <PD_EMPTY_THRESHOLD>
+ *
+ * The BBH manages 32 queues for EPON (FIFO per LLID).
+ * For each queue it manages a PD FIFO.
+ * Usually, the BBH orders PDs from the Runner in RR between all queues.
+ * In EPON BBH, if a FIFO occupancy is below this threshold, the queue will
+ * have higher priority in PD ordering arbitration (with RR between all the
+ * empty queues).
+ * This configuration is global for all queues.
+ * Relevant only for EPON BBH.
+ */
+#define BBH_TX_WAN_CFGS_PDEMPTY		0x304
+
+/*
+ * EPON PD FIFO empty threshold.
+ * A queue which its PD FIFO occupancy is below this threshold will have
+ * high priority in PD ordering arbitration.
+*/
+#define  WAN_CFGS_PDEMPTY_EMPTY_SHIFT	0
+#define  WAN_CFGS_PDEMPTY_EMPTY_MASK	0xff
+
+
+/*
+ * Registers <STS_RNR_CFG_1> - <x> is [ 0 => 1 ]
+ *
+ * Queue index address:
+ * The BBH requests a Packet descriptor from the Runner.
+ * The BBH writes the queue number in a predefined address at the Runner
+ * SRAM.
+ * The message serves also as a wake-up request to the Runner.
+ * This register defines the queue index address within the Runner address
+ * space.
+ * SKB address:
+ * When the packet is transmitted from absolute address, then, instead of
+ * releasing the BN, the BBH writes a 6 bits read counter into the Runner
+ * SRAM.
+ * It writes it into a pre-defined address + TCONT_NUM (for Ethernet
+ * TCONT_NUM = 0).
+ * This register defines the SKB free base address within the Runner
+ * address.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_WAN_CFGS_STSRNRCFG_1(x)	(0x310 + (x) * 0x4)
+
+/*
+ * Defines the TCONT address within the Runner address space.
+ * The address is in 8 bytes resolution.
+*/
+#define  WAN_CFGS_STSRNRCFG_1_TCONTADDR_SHIFT	0
+#define  WAN_CFGS_STSRNRCFG_1_TCONTADDR_MASK	0xffff
+
+
+/*
+ * Registers <STS_RNR_CFG_2> - <x> is [ 0 => 1 ]
+ *
+ * PD transfer process:
+ * -The Runner wont ACK the BBH; therefore the BBH wont wake the TX task.
+ * -The Runner will push the PDs into the BBH (without any wakeup from the
+ * BBH).
+ * -Each time that the BBH reads a PD from the PD FIFO, it will write the
+ * read pointer into a pre-defined address in the Runner.
+ * The pointer is 6 bits width (one bit larger than needed to distinguish
+ * between full and empty).
+ * -The Runner should manage the congestion over the PD FIFO (in the BBH)
+ * by reading the BBH read pointer prior to each PD write.
+ * -PD drop should be done by the Runner only.
+ * The BBH will drop PD when the FIFO is full and will count each drop.
+ * The BBH wont release the BN in this case.
+ * -There will be a full threshold, which can be smaller than the actual
+ * size of the FIFO.
+ * When the BBH will move from full to not full state, the BBH will wakeup
+ * the Runner.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_WAN_CFGS_STSRNRCFG_2(x)	(0x320 + (x) * 0x4)
+
+/*
+ * This field defins the address in the Runner memory space to which the
+ * read pointer is written.
+ * The address is in 8-bytes resolution.
+*/
+#define  WAN_CFGS_STSRNRCFG_2_PTRADDR_SHIFT	0
+#define  WAN_CFGS_STSRNRCFG_2_PTRADDR_MASK	0xffff
+
+/* The number of the task that is responsible for sending PDs to the BBH */
+#define  WAN_CFGS_STSRNRCFG_2_TASK_SHIFT	16
+#define  WAN_CFGS_STSRNRCFG_2_TASK_MASK	0xf0000
+
+
+/*
+ * Registers <MSG_RNR_CFG_1> - <x> is [ 0 => 1 ]
+ *
+ * Queue index address:
+ * The BBH requests a Packet descriptor from the Runner.
+ * The BBH writes the queue number in a predefined address at the Runner
+ * SRAM.
+ * The message serves also as a wake-up request to the Runner.
+ * This register defines the queue index address within the Runner address
+ * space.
+ * SKB address:
+ * When the packet is transmitted from absolute address, then, instead of
+ * releasing the BN, the BBH writes a 6 bits read counter into the Runner
+ * SRAM.
+ * It writes it into a pre-defined address + TCONT_NUM (for Ethernet
+ * TCONT_NUM = 0).
+ * This register defines the SKB free base address within the Runner
+ * address.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_WAN_CFGS_MSGRNRCFG_1(x)	(0x330 + (x) * 0x4)
+
+/*
+ * Defines the TCONT address within the Runner address space.
+ * The address is in 8 bytes resolution.
+*/
+#define  WAN_CFGS_MSGRNRCFG_1_TCONTADDR_SHIFT	0
+#define  WAN_CFGS_MSGRNRCFG_1_TCONTADDR_MASK	0xffff
+
+
+/*
+ * Registers <MSG_RNR_CFG_2> - <x> is [ 0 => 1 ]
+ *
+ * PD transfer process:
+ * -The Runner wont ACK the BBH; therefore the BBH wont wake the TX task.
+ * -The Runner will push the PDs into the BBH (without any wakeup from the
+ * BBH).
+ * -Each time that the BBH reads a PD from the PD FIFO, it will write the
+ * read pointer into a pre-defined address in the Runner.
+ * The pointer is 6 bits width (one bit larger than needed to distinguish
+ * between full and empty).
+ * -The Runner should manage the congestion over the PD FIFO (in the BBH)
+ * by reading the BBH read pointer prior to each PD write.
+ * -PD drop should be done by the Runner only.
+ * The BBH will drop PD when the FIFO is full and will count each drop.
+ * The BBH wont release the BN in this case.
+ * -There will be a full threshold, which can be smaller than the actual
+ * size of the FIFO.
+ * When the BBH will move from full to not full state, the BBH will wakeup
+ * the Runner.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define BBH_TX_WAN_CFGS_MSGRNRCFG_2(x)	(0x340 + (x) * 0x4)
+
+/*
+ * This field defins the address in the Runner memory space to which the
+ * read pointer is written.
+ * The address is in 8-bytes resolution.
+*/
+#define  WAN_CFGS_MSGRNRCFG_2_PTRADDR_SHIFT	0
+#define  WAN_CFGS_MSGRNRCFG_2_PTRADDR_MASK	0xffff
+
+/* The number of the task that is responsible for sending PDs to the BBH */
+#define  WAN_CFGS_MSGRNRCFG_2_TASK_SHIFT	16
+#define  WAN_CFGS_MSGRNRCFG_2_TASK_MASK	0xf0000
+
+
+/*
+ * Register <EPN_CFG>
+ *
+ * Configurations related to EPON MAC.
+ */
+#define BBH_TX_WAN_CFGS_EPNCFG		0x350
+
+/*
+ * In case of fatal length error - a mismatch between the request message
+ * from MAC and its relevant PD from Runner - the BBH can stop performing
+ * or continue regardless of the error.
+ * The error is also reflected to the SW in a counter.
+*/
+#define  WAN_CFGS_EPNCFG_STPLENERR_MASK	0x1
+
+/*
+ * configures the width of the comparison of the packet ength.
+ * The length field in the EPON request interface is 11 bit, while it is 14
+ * bit in the pd.
+ * If this bit is 0, then the comparison of the length will be between the
+ * 11 bit of the interface and the 11 lsb bits of the pd.
+ * If this ibt is 1, the comparison will be done between the 11 bits of the
+ * interface, concatenated with 3 zeros and the 14 bits of the pd
+*/
+#define  WAN_CFGS_EPNCFG_CMP_WIDTH_MASK	0x2
+
+/*
+ * determines whether the BBH will consider the sts_full vector state when
+ * pushing STS messages to the MAC or not.
+ * The status fifos inside the MAC should never go full as they are mirror
+ * of the BBH PD FIFOs, but in cases where the MAC design behaves different
+ * than expected, we want the BBH to be able to operate as in 1G EPON mode
+*/
+#define  WAN_CFGS_EPNCFG_CONSIDERFULL_MASK	0x4
+
+/*
+ * configuration whether to add 4 bytes per packet to the length received
+ * in the status message from the Runner so the MAC would know the actual
+ * length to be transmitted.
+*/
+#define  WAN_CFGS_EPNCFG_ADDCRC_MASK	0x8
+
+
+/*
+ * Register <FLOW2PORT>
+ *
+ * interface for SW to access the flow id to port-id table
+ */
+#define BBH_TX_WAN_CFGS_FLOW2PORT	0x354
+
+/*
+ * write data.
+ * 15:0 - port-id - default is 0x000016 - regenerate CRC - enabled by
+ * default
+ * 17 - enc enable - disabled by default
+*/
+#define  WAN_CFGS_FLOW2PORT_WDATA_SHIFT	0
+#define  WAN_CFGS_FLOW2PORT_WDATA_MASK	0x3ffff
+
+/* address */
+#define  WAN_CFGS_FLOW2PORT_A_SHIFT	18
+#define  WAN_CFGS_FLOW2PORT_A_MASK	0x3fc0000
+
+/* rd/wr cmd */
+#define  WAN_CFGS_FLOW2PORT_CMD_MASK	0x4000000
+
+
+/*
+ * Register <TS>
+ *
+ * The BBH is responsible for indicating the EPON MAC that the current
+ * packet that is being transmitted is a 1588 paacket.
+ * The BBH gets the 1588 parameters in the PD and forward it to the MAC.
+ * This register is used to enable this feature.
+ */
+#define BBH_TX_WAN_CFGS_TS		0x358
+
+/* 1588 enable */
+#define  WAN_CFGS_TS_EN_MASK		0x1
+
+
+/*
+ * Register <DSL_MAXWLEN>
+ *
+ * VDSL max word lenrelevant only for VDSL BBH
+ */
+#define BBH_TX_WAN_CFGS_MAXWLEN		0x360
+
+/* VDSL max word len */
+#define  WAN_CFGS_MAXWLEN_MAXWLEN_SHIFT	0
+#define  WAN_CFGS_MAXWLEN_MAXWLEN_MASK	0xffff
+
+
+/*
+ * Register <DSL_FLUSH>
+ *
+ * VDSL Flush indicationrelevant only for VDSL BBH
+ */
+#define BBH_TX_WAN_CFGS_FLUSH		0x364
+
+/* VDSL flush */
+#define  WAN_CFGS_FLUSH_FLUSH_SHIFT	0
+#define  WAN_CFGS_FLUSH_FLUSH_MASK	0xffff
+
+/* soft reset */
+#define  WAN_CFGS_FLUSH_SRST_N_MASK	0x80000000
+
+
+/*
+ * Register <PD_FIFO_BASE>
+ *
+ * The BBH manages 40 queues for GPON or 32 queus for EPON (1 for each
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 256
+ * PDs.
+ * The size of the 1st BN FIFO and get-next FIFO is the same as the size of
+ * the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define BBH_TX_LAN_CFGS_PDBASE		0x400
+
+/* The base of PD FIFO for queue 0. */
+#define  LAN_CFGS_PDBASE_FIFOBASE0_SHIFT	0
+#define  LAN_CFGS_PDBASE_FIFOBASE0_MASK	0x1ff
+
+/* The base of PD FIFO for queue 1. */
+#define  LAN_CFGS_PDBASE_FIFOBASE1_SHIFT	16
+#define  LAN_CFGS_PDBASE_FIFOBASE1_MASK	0x1ff0000
+
+
+/*
+ * Register <PD_FIFO_SIZE>
+ *
+ * The BBH manages 40 queues for GPON and 32 queues for EPON (FIFO per
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define BBH_TX_LAN_CFGS_PDSIZE		0x450
+
+/*
+ * The size of PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * For GPON, the max value is 0x7For EPON, the max value is 0xf
+*/
+#define  LAN_CFGS_PDSIZE_FIFOSIZE0_SHIFT	0
+#define  LAN_CFGS_PDSIZE_FIFOSIZE0_MASK	0x1ff
+
+/*
+ * The size of PD FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  LAN_CFGS_PDSIZE_FIFOSIZE1_SHIFT	16
+#define  LAN_CFGS_PDSIZE_FIFOSIZE1_MASK	0x1ff0000
+
+
+/*
+ * Register <PD_WKUP_THRESH>
+ *
+ * When a FIFO occupancy is above this wakeup threshold, the BBH will not
+ * wake-up the Runner for sending a new PD.
+ * This threshold does not represent the actual size of the FIFO.
+ * If a PD will arrive from the Runner when the FIFO is above the
+ * threshold, it will not be dropped unless the FIFO is actually full.
+ * Each register defines the threshold of 2 queues.
+ */
+#define BBH_TX_LAN_CFGS_PDWKUPH		0x500
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  LAN_CFGS_PDWKUPH_WKUPTHRESH0_SHIFT	0
+#define  LAN_CFGS_PDWKUPH_WKUPTHRESH0_MASK	0xff
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 1.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  LAN_CFGS_PDWKUPH_WKUPTHRESH1_SHIFT	8
+#define  LAN_CFGS_PDWKUPH_WKUPTHRESH1_MASK	0xff00
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration for the
+ * rest (TCONTs 8-39).
+ * Each register in this array defines the threshold of 2 queues.
+ */
+#define BBH_TX_LAN_CFGS_PD_BYTE_TH	0x550
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  LAN_CFGS_PD_BYTE_TH_PDLIMIT0_SHIFT	0
+#define  LAN_CFGS_PD_BYTE_TH_PDLIMIT0_MASK	0xffff
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  LAN_CFGS_PD_BYTE_TH_PDLIMIT1_SHIFT	16
+#define  LAN_CFGS_PD_BYTE_TH_PDLIMIT1_MASK	0xffff0000
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD_EN>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration per group
+ * of 8 TCONTs for the rest.
+ */
+#define BBH_TX_LAN_CFGS_PD_BYTE_TH_EN	0x600
+
+/*
+ * This bit enables the above feature (PDs pre fetch limited according to
+ * the total number of bytes).
+*/
+#define  LAN_CFGS_PD_BYTE_TH_EN_PDLIMITEN_MASK	0x1
+
+
+/*
+ * Register <PD_EMPTY_THRESHOLD>
+ *
+ * The BBH manages 32 queues for EPON (FIFO per LLID).
+ * For each queue it manages a PD FIFO.
+ * Usually, the BBH orders PDs from the Runner in RR between all queues.
+ * In EPON BBH, if a FIFO occupancy is below this threshold, the queue will
+ * have higher priority in PD ordering arbitration (with RR between all the
+ * empty queues).
+ * This configuration is global for all queues.
+ * Relevant only for EPON BBH.
+ */
+#define BBH_TX_LAN_CFGS_PDEMPTY		0x604
+
+/*
+ * EPON PD FIFO empty threshold.
+ * A queue which its PD FIFO occupancy is below this threshold will have
+ * high priority in PD ordering arbitration.
+*/
+#define  LAN_CFGS_PDEMPTY_EMPTY_SHIFT	0
+#define  LAN_CFGS_PDEMPTY_EMPTY_MASK	0xff
+
+
+/*
+ * Register <TX_THRESHOLD>
+ *
+ * Transmit threshold in 8 bytes resolution.
+ * The BBH TX will not start to transmit data towards the XLMAC until the
+ * amount of data in the TX FIFO is larger than the threshold or if there
+ * is a complete packet in the FIFO.
+ */
+#define BBH_TX_LAN_CFGS_TXTHRESH	0x608
+
+/* DDR Transmit threshold in 8 bytes resoltion */
+#define  LAN_CFGS_TXTHRESH_DDRTHRESH_SHIFT	0
+#define  LAN_CFGS_TXTHRESH_DDRTHRESH_MASK	0x1ff
+
+/* SRAM Transmit threshold in 8 bytes resoltion */
+#define  LAN_CFGS_TXTHRESH_SRAMTHRESH_SHIFT	16
+#define  LAN_CFGS_TXTHRESH_SRAMTHRESH_MASK	0x1ff0000
+
+
+/*
+ * Register <EEE>
+ *
+ * The BBH is responsible for indicating the XLMAC that no traffic is about
+ * to arrive so the XLMAC may try to enter power saving mode.
+ * This register is used to enable this feature.
+ */
+#define BBH_TX_LAN_CFGS_EEE		0x60c
+
+/* enable bit */
+#define  LAN_CFGS_EEE_EN_MASK		0x1
+
+
+/*
+ * Register <TS>
+ *
+ * The BBH is responsible for indicating the XLMAC that it should and
+ * calculate timestamp for the current packet that is being transmitted.
+ * The BBH gets the timestamping parameters in the PD and forward it to the
+ * XLMAC.
+ * This register is used to enable this feature.
+ */
+#define BBH_TX_LAN_CFGS_TS		0x610
+
+/* enable bit */
+#define  LAN_CFGS_TS_EN_MASK		0x1
+
+
+/*
+ * Registers <PD_FIFO_BASE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 6 queues.
+ * Each queue is dedicated to one MAC interface.
+ * A total of 48 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 48
+ * PDs.
+ * The size of the 1st BN FIFO and get-next FIFO is the same as the size of
+ * the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_PDBASE(x)	(0x700 + (x) * 0x4)
+
+/* The base of PD FIFO for queue 0. */
+#define  UNIFIED_CFGS_PDBASE_FIFOBASE0_SHIFT	0
+#define  UNIFIED_CFGS_PDBASE_FIFOBASE0_MASK	0x1ff
+
+/* The base of PD FIFO for queue 1. */
+#define  UNIFIED_CFGS_PDBASE_FIFOBASE1_SHIFT	16
+#define  UNIFIED_CFGS_PDBASE_FIFOBASE1_MASK	0x1ff0000
+
+
+/*
+ * Registers <PD_FIFO_SIZE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 6 queues.
+ * Each queue is dedicated to one MAC interface.
+ * A total of 48 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 48
+ * PDs.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_PDSIZE(x)	(0x750 + (x) * 0x4)
+
+/*
+ * The size of PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * For GPON, the max value is 0x7For EPON, the max value is 0xf
+*/
+#define  UNIFIED_CFGS_PDSIZE_FIFOSIZE0_SHIFT	0
+#define  UNIFIED_CFGS_PDSIZE_FIFOSIZE0_MASK	0x1ff
+
+/*
+ * The size of PD FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  UNIFIED_CFGS_PDSIZE_FIFOSIZE1_SHIFT	16
+#define  UNIFIED_CFGS_PDSIZE_FIFOSIZE1_MASK	0x1ff0000
+
+
+/*
+ * Registers <PD_WKUP_THRESH> - <x> is [ 0 => 3 ]
+ *
+ * When a FIFO occupancy is above this wakeup threshold, the BBH will not
+ * wake-up the Runner for sending a new PD.
+ * This threshold does not represent the actual size of the FIFO.
+ * If a PD will arrive from the Runner when the FIFO is above the
+ * threshold, it will not be dropped unless the FIFO is actually full.
+ * Each register defines the threshold of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_PDWKUPH(x)	(0x800 + (x) * 0x4)
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  UNIFIED_CFGS_PDWKUPH_WKUPTHRESH0_SHIFT	0
+#define  UNIFIED_CFGS_PDWKUPH_WKUPTHRESH0_MASK	0xff
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 1.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  UNIFIED_CFGS_PDWKUPH_WKUPTHRESH1_SHIFT	8
+#define  UNIFIED_CFGS_PDWKUPH_WKUPTHRESH1_MASK	0xff00
+
+
+/*
+ * Registers <PD_BYTES_THRESHOLD> - <x> is [ 0 => 3 ]
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration for the
+ * rest (TCONTs 8-39).
+ * Each register in this array defines the threshold of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(x)	(0x850 + (x) * 0x4)
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  UNIFIED_CFGS_PD_BYTE_TH_PDLIMIT0_SHIFT	0
+#define  UNIFIED_CFGS_PD_BYTE_TH_PDLIMIT0_MASK	0xffff
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  UNIFIED_CFGS_PD_BYTE_TH_PDLIMIT1_SHIFT	16
+#define  UNIFIED_CFGS_PD_BYTE_TH_PDLIMIT1_MASK	0xffff0000
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD_EN>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration per group
+ * of 8 TCONTs for the rest.
+ */
+#define BBH_TX_UNIFIED_CFGS_PD_BYTE_TH_EN	0x900
+
+/*
+ * This bit enables the above feature (PDs pre fetch limited according to
+ * the total number of bytes).
+*/
+#define  UNIFIED_CFGS_PD_BYTE_TH_EN_PDLIMITEN_MASK	0x1
+
+
+/*
+ * Register <PD_EMPTY_THRESHOLD>
+ *
+ * The BBH manages 32 queues for EPON (FIFO per LLID).
+ * For each queue it manages a PD FIFO.
+ * Usually, the BBH orders PDs from the Runner in RR between all queues.
+ * In EPON BBH, if a FIFO occupancy is below this threshold, the queue will
+ * have higher priority in PD ordering arbitration (with RR between all the
+ * empty queues).
+ * This configuration is global for all queues.
+ * Relevant only for EPON BBH.
+ */
+#define BBH_TX_UNIFIED_CFGS_PDEMPTY	0x904
+
+/*
+ * EPON PD FIFO empty threshold.
+ * A queue which its PD FIFO occupancy is below this threshold will have
+ * high priority in PD ordering arbitration.
+*/
+#define  UNIFIED_CFGS_PDEMPTY_EMPTY_SHIFT	0
+#define  UNIFIED_CFGS_PDEMPTY_EMPTY_MASK	0xff
+
+
+/*
+ * Register <GLOBAL_TX_THRESHOLD>
+ *
+ * Transmit threshold in 8 bytes resolution.
+ * The BBH TX will not start to transmit data towards the XLMAC until the
+ * amount of data in the TX FIFO is larger than the threshold or if there
+ * is a complete packet in the FIFO.
+ * This threshold is used by the non unified BBH.
+ * for unified BBH it should be set to 0.
+ */
+#define BBH_TX_UNIFIED_CFGS_GTXTHRESH	0x908
+
+/* DDR Transmit threshold in 8 bytes resoltion */
+#define  UNIFIED_CFGS_GTXTHRESH_DDRTHRESH_SHIFT	0
+#define  UNIFIED_CFGS_GTXTHRESH_DDRTHRESH_MASK	0x1ff
+
+/* SRAM Transmit threshold in 8 bytes resoltion */
+#define  UNIFIED_CFGS_GTXTHRESH_SRAMTHRESH_SHIFT	16
+#define  UNIFIED_CFGS_GTXTHRESH_SRAMTHRESH_MASK	0x1ff0000
+
+
+/*
+ * Register <EEE>
+ *
+ * The BBH is responsible for indicating the XLMAC that no traffic is about
+ * to arrive so the XLMAC may try to enter power saving mode.
+ * This register is used to enable this feature per MAC
+ */
+#define BBH_TX_UNIFIED_CFGS_EEE		0x90c
+
+/* enable bit */
+#define  UNIFIED_CFGS_EEE_EN_SHIFT	0
+#define  UNIFIED_CFGS_EEE_EN_MASK	0xff
+
+
+/*
+ * Register <TS>
+ *
+ * The BBH is responsible for indicating the XLMAC that it should and
+ * calculate timestamp for the current packet that is being transmitted.
+ * The BBH gets the timestamping parameters in the PD and forward it to the
+ * XLMAC.
+ * This register is used to enable this feature per MAC
+ */
+#define BBH_TX_UNIFIED_CFGS_TS		0x910
+
+/* enable bit */
+#define  UNIFIED_CFGS_TS_EN_SHIFT	0
+#define  UNIFIED_CFGS_TS_EN_MASK	0xff
+
+
+/*
+ * Registers <FE_FIFO_BASE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 40 queues for GPON or 32 queus for EPON (1 for each
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 256
+ * PDs.
+ * The size of the 1st BN FIFO and get-next FIFO is the same as the size of
+ * the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_FEBASE(x)	(0x920 + (x) * 0x4)
+
+/* The base of FE FIFO for queue 0. */
+#define  UNIFIED_CFGS_FEBASE_FIFOBASE0_SHIFT	0
+#define  UNIFIED_CFGS_FEBASE_FIFOBASE0_MASK	0x7ff
+
+/* The base of FE FIFO for queue 1. */
+#define  UNIFIED_CFGS_FEBASE_FIFOBASE1_SHIFT	16
+#define  UNIFIED_CFGS_FEBASE_FIFOBASE1_MASK	0x7ff0000
+
+
+/*
+ * Registers <FE_FIFO_SIZE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 40 queues for GPON and 32 queues for EPON (FIFO per
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_FESIZE(x)	(0x940 + (x) * 0x4)
+
+/*
+ * The size of PD FIFO for queue 0.
+ * A value of n refers to n+1.
+*/
+#define  UNIFIED_CFGS_FESIZE_FIFOSIZE0_SHIFT	0
+#define  UNIFIED_CFGS_FESIZE_FIFOSIZE0_MASK	0x7ff
+
+/*
+ * The size of FE FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  UNIFIED_CFGS_FESIZE_FIFOSIZE1_SHIFT	16
+#define  UNIFIED_CFGS_FESIZE_FIFOSIZE1_MASK	0x7ff0000
+
+
+/*
+ * Registers <FE_PD_FIFO_BASE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 40 queues for GPON or 32 queus for EPON (1 for each
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 256
+ * PDs.
+ * The size of the 1st BN FIFO and get-next FIFO is the same as the size of
+ * the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_FEPDBASE(x)	(0x960 + (x) * 0x4)
+
+/* The base of FE PD FIFO for queue 0. */
+#define  UNIFIED_CFGS_FEPDBASE_FIFOBASE0_SHIFT	0
+#define  UNIFIED_CFGS_FEPDBASE_FIFOBASE0_MASK	0xff
+
+/* The base of FE PD FIFO for queue 1. */
+#define  UNIFIED_CFGS_FEPDBASE_FIFOBASE1_SHIFT	16
+#define  UNIFIED_CFGS_FEPDBASE_FIFOBASE1_MASK	0xff0000
+
+
+/*
+ * Registers <FE_PD_FIFO_SIZE> - <x> is [ 0 => 3 ]
+ *
+ * The BBH manages 40 queues for GPON and 32 queues for EPON (FIFO per
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define BBH_TX_UNIFIED_CFGS_FEPDSIZE(x)	(0x980 + (x) * 0x4)
+
+/*
+ * The size of FE PD FIFO for queue 0.
+ * A value of n refers to n+1.
+*/
+#define  UNIFIED_CFGS_FEPDSIZE_FIFOSIZE0_SHIFT	0
+#define  UNIFIED_CFGS_FEPDSIZE_FIFOSIZE0_MASK	0xff
+
+/*
+ * The size of FE PD FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  UNIFIED_CFGS_FEPDSIZE_FIFOSIZE1_SHIFT	16
+#define  UNIFIED_CFGS_FEPDSIZE_FIFOSIZE1_MASK	0xff0000
+
+
+/*
+ * Registers <TX_RR_WEIGHT> - <x> is [ 0 => 3 ]
+ *
+ * The unified BBH TX serves multiple MACs.
+ * The TX arbitration between these MACs is WRR.
+ * This register array determines the weight of each MAC.
+ * Each register in the array represents 2 MACs.
+ */
+#define BBH_TX_UNIFIED_CFGS_TXWRR(x)	(0x9a0 + (x) * 0x4)
+
+/* weight of MAC0 */
+#define  UNIFIED_CFGS_TXWRR_W0_SHIFT	0
+#define  UNIFIED_CFGS_TXWRR_W0_MASK	0xf
+
+/* weight of MAC1 */
+#define  UNIFIED_CFGS_TXWRR_W1_SHIFT	16
+#define  UNIFIED_CFGS_TXWRR_W1_MASK	0xf0000
+
+
+/*
+ * Registers <TX_THRESHOLD> - <x> is [ 0 => 3 ]
+ *
+ * Transmit threshold in 8 bytes resolution.
+ * The BBH TX will not start to transmit data towards the MAC until the
+ * amount of data in the TX FIFO is larger than the threshold or if there
+ * is a complete packet in the FIFO.
+ */
+#define BBH_TX_UNIFIED_CFGS_TXTHRESH(x)	(0x9e0 + (x) * 0x4)
+
+/* Transmit threshold in 8 bytes resoltion for mac 0 */
+#define  UNIFIED_CFGS_TXTHRESH_THRESH0_SHIFT	0
+#define  UNIFIED_CFGS_TXTHRESH_THRESH0_MASK	0x1ff
+
+/* Transmit threshold in 8 bytes resolution for MAC1 */
+#define  UNIFIED_CFGS_TXTHRESH_THRESH1_SHIFT	16
+#define  UNIFIED_CFGS_TXTHRESH_THRESH1_MASK	0x1ff0000
+
+
+/*
+ * Register <SRAM_PD_COUNTER> - read-only
+ *
+ * This counter counts the number of received PD for packets to be
+ * transmitted from the SRAM.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_SRAMPD	0xa00
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the SRAM.
+*/
+#define  DEBUG_COUNTERS_SRAMPD_SRAMPD_SHIFT	0
+#define  DEBUG_COUNTERS_SRAMPD_SRAMPD_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_PD_COUNTER> - read-only
+ *
+ * This counter counts the number of received PDs for packets to be
+ * transmitted from the DDR.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_DDRPD	0xa04
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the DDR.
+*/
+#define  DEBUG_COUNTERS_DDRPD_DDRPD_SHIFT	0
+#define  DEBUG_COUNTERS_DDRPD_DDRPD_MASK	0xffffffff
+
+
+/*
+ * Register <PD_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of PDs which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_PDDROP	0xa08
+
+/*
+ * This counter counts the number of PDs which were dropped due to PD FIFO
+ * full.
+*/
+#define  DEBUG_COUNTERS_PDDROP_PDDROP_SHIFT	0
+#define  DEBUG_COUNTERS_PDDROP_PDDROP_MASK	0xffff
+
+
+/*
+ * Register <STS_COUNTER> - read-only
+ *
+ * This counter counts the number of STS messages which were received from
+ * Runner.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_STSCNT	0xa10
+
+/* This counter counts the number of received status messages. */
+#define  DEBUG_COUNTERS_STSCNT_STSCNT_SHIFT	0
+#define  DEBUG_COUNTERS_STSCNT_STSCNT_MASK	0xffffffff
+
+
+/*
+ * Register <STS_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of STS which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_STSDROP	0xa14
+
+/*
+ * This counter counts the number of STS which were dropped due to PD FIFO
+ * full.
+*/
+#define  DEBUG_COUNTERS_STSDROP_STSDROP_SHIFT	0
+#define  DEBUG_COUNTERS_STSDROP_STSDROP_MASK	0xffff
+
+
+/*
+ * Register <MSG_COUNTER> - read-only
+ *
+ * This counter counts the number of MSG (DBR/Ghost) messages which were
+ * received from Runner.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_MSGCNT	0xa18
+
+/* This counter counts the number of received DBR/ghost messages. */
+#define  DEBUG_COUNTERS_MSGCNT_MSGCNT_SHIFT	0
+#define  DEBUG_COUNTERS_MSGCNT_MSGCNT_MASK	0xffffffff
+
+
+/*
+ * Register <MSG_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of MSG which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_MSGDROP	0xa1c
+
+/*
+ * This counter counts the number of MSG which were dropped due to PD FIFO
+ * full.
+*/
+#define  DEBUG_COUNTERS_MSGDROP_MSGDROP_SHIFT	0
+#define  DEBUG_COUNTERS_MSGDROP_MSGDROP_MASK	0xffff
+
+
+/*
+ * Register <GET_NEXT_IS_NULL_COUNTER> - read-only
+ *
+ * This counter counts the number Get next responses with a null BN.
+ * It counts the packets for all TCONTs together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ * This counter is relevant for Ethernet only.
+ */
+#define BBH_TX_DEBUG_COUNTERS_GETNEXTNULL	0xa20
+
+/* This counter counts the number Get next responses with a null BN. */
+#define  DEBUG_COUNTERS_GETNEXTNULL_GETNEXTNULL_SHIFT	0
+#define  DEBUG_COUNTERS_GETNEXTNULL_GETNEXTNULL_MASK	0xffff
+
+
+/*
+ * Register <FLUSHED_PACKETS_COUNTER> - read-only
+ *
+ * This counter counts the number of packets that were flushed (bn was
+ * released without sending the data to the EPON MAC) due to flush request.
+ * The counter is global for all queues.
+ * The counter is read clear.
+ */
+#define BBH_TX_DEBUG_COUNTERS_FLUSHPKTS	0xa24
+
+/* This counter counts the number of flushed packets */
+#define  DEBUG_COUNTERS_FLUSHPKTS_FLSHPKTS_SHIFT	0
+#define  DEBUG_COUNTERS_FLUSHPKTS_FLSHPKTS_MASK	0xffff
+
+
+/*
+ * Register <REQ_LENGTH_ERROR_COUNTER> - read-only
+ *
+ * This counter counts the number of times a length error (mismatch between
+ * a request from the MAC and a PD from the Runner) occured.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_LENERR	0xa28
+
+/* This counter counts the number of times a length error occuered */
+#define  DEBUG_COUNTERS_LENERR_LENERR_SHIFT	0
+#define  DEBUG_COUNTERS_LENERR_LENERR_MASK	0xffff
+
+
+/*
+ * Register <AGGREGATION_LENGTH_ERROR_COUNTER> - read-only
+ *
+ * This counter Counts aggregation length error events.
+ * If one or more of the packets in an aggregated PD is shorter than 60
+ * bytes, this counter will be incremented by 1.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_AGGRLENERR	0xa2c
+
+/*
+ * This counter counts the number of times an aggregation length error
+ * occuered
+*/
+#define  DEBUG_COUNTERS_AGGRLENERR_AGGRLENERR_SHIFT	0
+#define  DEBUG_COUNTERS_AGGRLENERR_AGGRLENERR_MASK	0xffff
+
+
+/*
+ * Register <SRAM_PKT_COUNTER> - read-only
+ *
+ * This counter counts the number of received packets to be transmitted
+ * from the SRAM.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_SRAMPKT	0xa30
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the SRAM.
+*/
+#define  DEBUG_COUNTERS_SRAMPKT_SRAMPKT_SHIFT	0
+#define  DEBUG_COUNTERS_SRAMPKT_SRAMPKT_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_PKT_COUNTER> - read-only
+ *
+ * This counter counts the number of received packets to be transmitted
+ * from the DDR.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_DDRPKT	0xa34
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the DDR.
+*/
+#define  DEBUG_COUNTERS_DDRPKT_DDRPKT_SHIFT	0
+#define  DEBUG_COUNTERS_DDRPKT_DDRPKT_MASK	0xffffffff
+
+
+/*
+ * Register <SRAM_BYTE_COUNTER> - read-only
+ *
+ * This counter counts the number of transmitted bytes from the SRAM.
+ * It counts the bytes for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_SRAMBYTE	0xa38
+
+/* This counter counts the number of transmitted bytes from the SRAM. */
+#define  DEBUG_COUNTERS_SRAMBYTE_SRAMBYTE_SHIFT	0
+#define  DEBUG_COUNTERS_SRAMBYTE_SRAMBYTE_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_BYTE_COUNTER> - read-only
+ *
+ * This counter counts the number of transmitted bytes from the DDR.
+ * It counts the bytes for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_DDRBYTE	0xa3c
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  DEBUG_COUNTERS_DDRBYTE_DDRBYTE_SHIFT	0
+#define  DEBUG_COUNTERS_DDRBYTE_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Register <SW_RD_EN>
+ *
+ * writing to this register creates a rd_en pulse to the selected array the
+ * SW wants to access.
+ * Each bit in the register represents one of the arrays the SW can access.
+ * The address inside the array is determined in the previous register
+ * (sw_rd_address).
+ * When writing to this register the SW should assert only one bit.
+ * If more than one is asserted, The HW will return the value read from the
+ * lsb selected array.
+ */
+#define BBH_TX_DEBUG_COUNTERS_SWRDEN	0xa40
+
+/* rd from the PD FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_PDSEL_MASK	0x1
+
+/* rd from the PD valid array */
+#define  DEBUG_COUNTERS_SWRDEN_PDVSEL_MASK	0x2
+
+/* rd from the PD empty array */
+#define  DEBUG_COUNTERS_SWRDEN_PDEMPTYSEL_MASK	0x4
+
+/* rd from the PD Full array */
+#define  DEBUG_COUNTERS_SWRDEN_PDFULLSEL_MASK	0x8
+
+/* rd from the PD beliow empty array */
+#define  DEBUG_COUNTERS_SWRDEN_PDBEMPTYSEL_MASK	0x10
+
+/* rd from the PD full for wakeup empty array */
+#define  DEBUG_COUNTERS_SWRDEN_PDFFWKPSEL_MASK	0x20
+
+/* rd from the first BN array */
+#define  DEBUG_COUNTERS_SWRDEN_FBNSEL_MASK	0x40
+
+/* rd from the first BN valid array */
+#define  DEBUG_COUNTERS_SWRDEN_FBNVSEL_MASK	0x80
+
+/* rd from the first BN empty array */
+#define  DEBUG_COUNTERS_SWRDEN_FBNEMPTYSEL_MASK	0x100
+
+/* rd from the first BN full array */
+#define  DEBUG_COUNTERS_SWRDEN_FBNFULLSEL_MASK	0x200
+
+/* rd from the first Get Next array */
+#define  DEBUG_COUNTERS_SWRDEN_GETNEXTSEL_MASK	0x400
+
+/* rd from the get_next valid array */
+#define  DEBUG_COUNTERS_SWRDEN_GETNEXTVSEL_MASK	0x800
+
+/* rd from the get next empty array */
+#define  DEBUG_COUNTERS_SWRDEN_GETNEXTEMPTYSEL_MASK	0x1000
+
+/* rd from the get next full array */
+#define  DEBUG_COUNTERS_SWRDEN_GETNEXTFULLSEL_MASK	0x2000
+
+/* rd from the gpon context array */
+#define  DEBUG_COUNTERS_SWRDEN_GPNCNTXTSEL_MASK	0x4000
+
+/* rd from the BPM FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_BPMSEL_MASK	0x8000
+
+/* rd from the BPM FLUSH FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_BPMFSEL_MASK	0x10000
+
+/* rd from the SBPM FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_SBPMSEL_MASK	0x20000
+
+/* rd from the SBPM FLUSH FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_SBPMFSEL_MASK	0x40000
+
+/* rd from the STS FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_STSSEL_MASK	0x80000
+
+/* rd from the STS valid array */
+#define  DEBUG_COUNTERS_SWRDEN_STSVSEL_MASK	0x100000
+
+/* rd from the STS empty array */
+#define  DEBUG_COUNTERS_SWRDEN_STSEMPTYSEL_MASK	0x200000
+
+/* rd from the STS Full array */
+#define  DEBUG_COUNTERS_SWRDEN_STSFULLSEL_MASK	0x400000
+
+/* rd from the STS beliow empty array */
+#define  DEBUG_COUNTERS_SWRDEN_STSBEMPTYSEL_MASK	0x800000
+
+/* rd from the STS full for wakeup empty array */
+#define  DEBUG_COUNTERS_SWRDEN_STSFFWKPSEL_MASK	0x1000000
+
+/* rd from the MSG FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_MSGSEL_MASK	0x2000000
+
+/* rd from the msg valid array */
+#define  DEBUG_COUNTERS_SWRDEN_MSGVSEL_MASK	0x4000000
+
+/* rd from the epon request FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_EPNREQSEL_MASK	0x8000000
+
+/* rd from the DATA FIFO (SRAM and DDR) */
+#define  DEBUG_COUNTERS_SWRDEN_DATASEL_MASK	0x10000000
+
+/* rd from the reorder FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_REORDERSEL_MASK	0x20000000
+
+/* rd from the Timestamp Info FIFO */
+#define  DEBUG_COUNTERS_SWRDEN_TSINFOSEL_MASK	0x40000000
+
+/* rd from the MAC TX FIFO. */
+#define  DEBUG_COUNTERS_SWRDEN_MACTXSEL_MASK	0x80000000
+
+
+/*
+ * Register <SW_RD_ADDR>
+ *
+ * the address inside the array the SW wants to read
+ */
+#define BBH_TX_DEBUG_COUNTERS_SWRDADDR	0xa44
+
+/* The address inside the array the sw wants to read */
+#define  DEBUG_COUNTERS_SWRDADDR_RDADDR_SHIFT	0
+#define  DEBUG_COUNTERS_SWRDADDR_RDADDR_MASK	0x7ff
+
+
+/*
+ * Register <SW_RD_DATA> - read-only
+ *
+ * indirect memories and arrays read data
+ */
+#define BBH_TX_DEBUG_COUNTERS_SWRDDATA	0xa48
+
+/* data */
+#define  DEBUG_COUNTERS_SWRDDATA_DATA_SHIFT	0
+#define  DEBUG_COUNTERS_SWRDDATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <UNIFIED_PKT_COUNTER> - <x> is [ 0 => 7 ] - read-only
+ *
+ * This counter array counts the number of transmitted packets through each
+ * interface in the unified BBH.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(x)	(0xa50 + (x) * 0x4)
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  DEBUG_COUNTERS_UNIFIEDPKT_DDRBYTE_SHIFT	0
+#define  DEBUG_COUNTERS_UNIFIEDPKT_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Registers <UNIFIED_BYTE_COUNTER> - <x> is [ 0 => 7 ] - read-only
+ *
+ * This counter array counts the number of transmitted bytes through each
+ * interface in the unified BBH.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(x)	(0xa70 + (x) * 0x4)
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  DEBUG_COUNTERS_UNIFIEDBYTE_DDRBYTE_SHIFT	0
+#define  DEBUG_COUNTERS_UNIFIEDBYTE_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Registers <DEBUG_OUT_REG> - <x> is [ 0 => 31 ] - read-only
+ *
+ * an array including all the debug vectors of the BBH TX.
+ * entries 30 and 31 are DSL debug.
+ */
+#define BBH_TX_DEBUG_COUNTERS_DBGOUTREG(x)	(0xa90 + (x) * 0x4)
+
+/* Selected debug vector. */
+#define  DEBUG_COUNTERS_DBGOUTREG_DBGVEC_SHIFT	0
+#define  DEBUG_COUNTERS_DBGOUTREG_DBGVEC_MASK	0xffffffff
+
+
+/*
+ * Registers <IN_SEGMENTATION> - <x> is [ 0 => 1 ] - read-only
+ *
+ * 40 bit vector in which each bit represents if the segmentation SM is
+ * currently handling a PD of a certain TCONT.
+ * first address is for TCONTS [31:
+ * 0]second is for TCONTS [39:
+ * 32]
+ */
+#define BBH_TX_DEBUG_COUNTERS_IN_SEGMENTATION(x)	(0xb20 + (x) * 0x4)
+
+/* in_segmentation indication */
+#define  DEBUG_COUNTERS_IN_SEGMENTATION_IN_SEGMENTATION_SHIFT	0
+#define  DEBUG_COUNTERS_IN_SEGMENTATION_IN_SEGMENTATION_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_BBH_TX_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_cnpl.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_cnpl.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_cnpl.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_cnpl.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,405 @@
+#ifndef XRDP_REGS_CNPL_H_
+#define XRDP_REGS_CNPL_H_
+
+/* relative to core */
+#define CNPL_OFFSET_0			0xe48000
+
+/*
+ * Registers <MEM_ENTRY> - <x> is [ 0 => 3071 ]
+ *
+ * mem_entry
+ */
+#define CNPL_MEMORY_DATA(x)		(0x0 + (x) * 0x4)
+
+/* data */
+#define  MEMORY_DATA_DATA_SHIFT		0
+#define  MEMORY_DATA_DATA_MASK		0xffffffff
+
+
+/*
+ * Registers <CNT_LOC_PROFILE> - <x> is [ 0 => 15 ]
+ *
+ * location profiles
+ */
+#define CNPL_COUNTERS_CFGS_CN_LOC_PROF(x)	(0x4000 + (x) * 0x4)
+
+/*
+ * counters group base address (in 8B resolution:
+ * 0 is 0x0, 1 is 0x8, .
+ * ..
+ * )
+*/
+#define  COUNTERS_CFGS_CN_LOC_PROF_BA_SHIFT	0
+#define  COUNTERS_CFGS_CN_LOC_PROF_BA_MASK	0x7ff
+
+/*
+ * number of bytes that will hold each sub-counter.
+ * 0:
+ * 1B1:
+ * 2B2:
+ * 4B
+*/
+#define  COUNTERS_CFGS_CN_LOC_PROF_CN0_BYTS_SHIFT	11
+#define  COUNTERS_CFGS_CN_LOC_PROF_CN0_BYTS_MASK	0x1800
+
+/*
+ * 1:
+ * each counter of the group is double sub-cntr0:
+ * each counter of the group is single
+*/
+#define  COUNTERS_CFGS_CN_LOC_PROF_DOUBLLE_MASK	0x2000
+
+/*
+ * 1:
+ * wrap at max value0:
+ * freeze at max value
+*/
+#define  COUNTERS_CFGS_CN_LOC_PROF_WRAP_MASK	0x4000
+
+/*
+ * 1:
+ * read clear when reading0:
+ * read no-clear when reading
+*/
+#define  COUNTERS_CFGS_CN_LOC_PROF_CLR_MASK	0x8000
+
+
+/*
+ * Registers <PL_LOC_PROFILE0> - <x> is [ 0 => 1 ]
+ *
+ * 1st reg for location profiles
+ */
+#define CNPL_POLICERS_CFGS_PL_LOC_PROF0(x)	(0x4100 + (x) * 0x4)
+
+/*
+ * buckets base address(in 8B resolution:
+ * 0 is 0x0, 1 is 0x8, .
+ * ..
+ * )
+*/
+#define  POLICERS_CFGS_PL_LOC_PROF0_BK_BA_SHIFT	0
+#define  POLICERS_CFGS_PL_LOC_PROF0_BK_BA_MASK	0x7ff
+
+/*
+ * params base address(in 8B resolution:
+ * 0 is 0x0, 1 is 0x8, .
+ * ..
+ * )
+*/
+#define  POLICERS_CFGS_PL_LOC_PROF0_PA_BA_SHIFT	11
+#define  POLICERS_CFGS_PL_LOC_PROF0_PA_BA_MASK	0x3ff800
+
+/*
+ * 1:
+ * each policer is dual bucket0:
+ * each policer is single bucket
+*/
+#define  POLICERS_CFGS_PL_LOC_PROF0_DOUBLLE_MASK	0x400000
+
+
+/*
+ * Registers <PL_LOC_PROFILE1> - <x> is [ 0 => 1 ]
+ *
+ * 2nd reg for location profiles
+ */
+#define CNPL_POLICERS_CFGS_PL_LOC_PROF1(x)	(0x4108 + (x) * 0x4)
+
+/* Index of 1st policer of the group. */
+#define  POLICERS_CFGS_PL_LOC_PROF1_PL_ST_SHIFT	0
+#define  POLICERS_CFGS_PL_LOC_PROF1_PL_ST_MASK	0xff
+
+/* Index of last policer of the group. */
+#define  POLICERS_CFGS_PL_LOC_PROF1_PL_END_SHIFT	8
+#define  POLICERS_CFGS_PL_LOC_PROF1_PL_END_MASK	0xff00
+
+
+/*
+ * Registers <PL_CALC_TYPE> - <x> is [ 0 => 2 ]
+ *
+ * calculation type register.
+ * 0:
+ * green, yellow, red1:
+ * red, yellow, green
+ */
+#define CNPL_POLICERS_CFGS_PL_CALC_TYPE(x)	(0x4110 + (x) * 0x4)
+
+/* 32b vector for 32 policers */
+#define  POLICERS_CFGS_PL_CALC_TYPE_VEC_SHIFT	0
+#define  POLICERS_CFGS_PL_CALC_TYPE_VEC_MASK	0xffffffff
+
+
+/*
+ * Register <PL_PERIODIC_UPDATE>
+ *
+ * periodic update parameters
+ */
+#define CNPL_POLICERS_CFGS_PER_UP	0x4120
+
+/*
+ * period in 8k cycles quanta (16.
+ * 384us for 500MHz)
+*/
+#define  POLICERS_CFGS_PER_UP_N_SHIFT	0
+#define  POLICERS_CFGS_PER_UP_N_MASK	0xff
+
+/*
+ * 1:
+ * enable periodic update0:
+ * disable periodic update
+*/
+#define  POLICERS_CFGS_PER_UP_EN_MASK	0x100
+
+
+/*
+ * Register <ARBITER_PARAM>
+ *
+ * arbiter sw priorities
+ */
+#define CNPL_MISC_ARB_PRM		0x4200
+
+/*
+ * 0:
+ * fixed lower1:
+ * rr with fw (default)2:
+ * fixed higher
+*/
+#define  MISC_ARB_PRM_SW_PRIO_SHIFT	0
+#define  MISC_ARB_PRM_SW_PRIO_MASK	0x3
+
+
+/*
+ * Register <COMMAND>
+ *
+ * command register
+ */
+#define CNPL_SW_IF_SW_CMD		0x4300
+
+/* value of register */
+#define  SW_IF_SW_CMD_VAL_SHIFT		0
+#define  SW_IF_SW_CMD_VAL_MASK		0xffffffff
+
+
+/*
+ * Register <STATUS> - read-only
+ *
+ * status register
+ */
+#define CNPL_SW_IF_SW_STAT		0x4304
+
+/*
+ * 0:
+ * DONE (ready)1:
+ * PROC(not ready)
+*/
+#define  SW_IF_SW_STAT_CN_RD_ST_MASK	0x1
+
+/*
+ * 0:
+ * DONE (ready)1:
+ * PROC(not ready)
+*/
+#define  SW_IF_SW_STAT_PL_PLC_ST_MASK	0x2
+
+/*
+ * 0:
+ * DONE (ready)1:
+ * PROC(not ready)
+*/
+#define  SW_IF_SW_STAT_PL_RD_ST_MASK	0x4
+
+
+/*
+ * Register <PL_RSLT> - read-only
+ *
+ * rdata register - policer command result
+ */
+#define CNPL_SW_IF_SW_PL_RSLT		0x4310
+
+/* red, yellow, green, non-active */
+#define  SW_IF_SW_PL_RSLT_COL_SHIFT	30
+#define  SW_IF_SW_PL_RSLT_COL_MASK	0xc0000000
+
+
+/*
+ * Registers <PL_RDX> - <x> is [ 0 => 1 ] - read-only
+ *
+ * rdata register - policer read command result.
+ * 2 register for 2 buckets.
+ * If the group has only one bucket per policer - the policers are returned
+ * in the registers as a full line:
+ * the even policers are in reg0 (0,2,4,.
+ * .), and the odd are in reg1.
+ */
+#define CNPL_SW_IF_SW_PL_RD(x)		(0x4314 + (x) * 0x4)
+
+/* value of read data */
+#define  SW_IF_SW_PL_RD_RD_SHIFT	0
+#define  SW_IF_SW_PL_RD_RD_MASK		0xffffffff
+
+
+/*
+ * Registers <CNT_RDX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * rdata register - counters read command result.
+ * 8 register for 32B batch.
+ * In read of single counter (burst size=1) the output will be in reg0 (the
+ * 32b where the counter is).
+ * In read of burst of counters, the counters are returned in the registers
+ * as a full line:
+ * addr[2:
+ * 0]=0 section of line in reg0,2,4,6 and the addr[2:
+ * 0]=4 are in reg1,3,5,7 (this means that if the start of burst is at
+ * addr[2:
+ * 0]=4 section of line, the wanted output should be from reg1).
+ */
+#define CNPL_SW_IF_SW_CNT_RD(x)		(0x4320 + (x) * 0x4)
+
+/* value of read data */
+#define  SW_IF_SW_CNT_RD_RD_SHIFT	0
+#define  SW_IF_SW_CNT_RD_RD_MASK	0xffffffff
+
+
+/*
+ * Registers <ENG_CMDS_CNTR> - <x> is [ 0 => 10 ] - read-only
+ *
+ * Number of commands that were processed by the engine.
+ */
+#define CNPL_PM_COUNTERS_ENG_CMDS(x)	(0x4400 + (x) * 0x4)
+
+/* value */
+#define  PM_COUNTERS_ENG_CMDS_VAL_SHIFT	0
+#define  PM_COUNTERS_ENG_CMDS_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <CMD_WAITS_CNTR> - <x> is [ 0 => 1 ] - read-only
+ *
+ * Number of wait cycles that the command waited until there was an idle
+ * engine.
+ */
+#define CNPL_PM_COUNTERS_CMD_WAIT(x)	(0x4440 + (x) * 0x4)
+
+/* value */
+#define  PM_COUNTERS_CMD_WAIT_VAL_SHIFT	0
+#define  PM_COUNTERS_CMD_WAIT_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <TOT_CYC_CNTR> - read-only
+ *
+ * Number of cycles from last read clear
+ */
+#define CNPL_PM_COUNTERS_TOT_CYC	0x4450
+
+/* value */
+#define  PM_COUNTERS_TOT_CYC_VAL_SHIFT	0
+#define  PM_COUNTERS_TOT_CYC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <GNT_CYC_CNTR> - read-only
+ *
+ * Number of cycles that there was gnt from main arbiter
+ */
+#define CNPL_PM_COUNTERS_GNT_CYC	0x4454
+
+/* value */
+#define  PM_COUNTERS_GNT_CYC_VAL_SHIFT	0
+#define  PM_COUNTERS_GNT_CYC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_CYC_CNTR> - read-only
+ *
+ * Number of cycles that there was gnt with request of more than one agent
+ */
+#define CNPL_PM_COUNTERS_ARB_CYC	0x4458
+
+/* value */
+#define  PM_COUNTERS_ARB_CYC_VAL_SHIFT	0
+#define  PM_COUNTERS_ARB_CYC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <POL_UP_ERR_CNTR> - read-only
+ *
+ * errors in policer update:
+ * the update period finished, and not all policers have been updated yet.
+ */
+#define CNPL_PM_COUNTERS_PL_UP_ERR	0x4460
+
+/* value */
+#define  PM_COUNTERS_PL_UP_ERR_VAL_SHIFT	0
+#define  PM_COUNTERS_PL_UP_ERR_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <GENERAL_CONFIG>
+ *
+ * bits rd_clr and wrap for the pm counters(above)
+ */
+#define CNPL_PM_COUNTERS_GEN_CFG	0x44fc
+
+/* read clear bit */
+#define  PM_COUNTERS_GEN_CFG_RD_CLR_MASK	0x1
+
+/* read clear bit */
+#define  PM_COUNTERS_GEN_CFG_WRAP_MASK	0x2
+
+
+/*
+ * Register <DBG_MUX_SEL>
+ *
+ * selects the debug vecore
+ */
+#define CNPL_DEBUG_DBGSEL		0x4500
+
+/* selects th debug vector */
+#define  DEBUG_DBGSEL_VS_SHIFT		0
+#define  DEBUG_DBGSEL_VS_MASK		0x7f
+
+
+/*
+ * Register <DBG_BUS> - read-only
+ *
+ * the debug bus
+ */
+#define CNPL_DEBUG_DBGBUS		0x4504
+
+/* debug vector */
+#define  DEBUG_DBGBUS_VB_SHIFT		0
+#define  DEBUG_DBGBUS_VB_MASK		0x1fffff
+
+
+/*
+ * Register <REQUEST_VECTOR> - read-only
+ *
+ * vector of all the requests of the clients (tx fifo not empty)
+ */
+#define CNPL_DEBUG_REQ_VEC		0x4508
+
+/* still more commands for arbitration */
+#define  DEBUG_REQ_VEC_REQ_SHIFT	0
+#define  DEBUG_REQ_VEC_REQ_MASK		0x7f
+
+
+/*
+ * Register <POLICER_UPDATE_STATUS> - read-only
+ *
+ * which counter is updated, and where are we in the period cycle
+ */
+#define CNPL_DEBUG_POL_UP_ST		0x4510
+
+/* number of iteration we are(each represent 8192 cycles) */
+#define  DEBUG_POL_UP_ST_ITR_NUM_SHIFT	0
+#define  DEBUG_POL_UP_ST_ITR_NUM_MASK	0xff
+
+/*
+ * number of policer now updated.
+ * (80 means we finished updated of all policers for this period)
+*/
+#define  DEBUG_POL_UP_ST_POL_NUM_SHIFT	8
+#define  DEBUG_POL_UP_ST_POL_NUM_MASK	0xff00
+
+
+#endif /* ! XRDP_REGS_CNPL_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dma.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dma.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dma.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dma.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,404 @@
+#ifndef XRDP_REGS_DMA_H_
+#define XRDP_REGS_DMA_H_
+
+/* relative to core */
+#define DMA_OFFSET(x)			(0xd98800 + (x) * 0x400)
+
+/*
+ * Register <BB_ROUTE_OVERRIDE>
+ *
+ * Broadbus route override
+ */
+#define DMA_CONFIG_BBROUTEOVRD		0x0
+
+/* destination ID */
+#define  CONFIG_BBROUTEOVRD_DEST_SHIFT	0
+#define  CONFIG_BBROUTEOVRD_DEST_MASK	0x3f
+
+/* the route to be used (override the default route) */
+#define  CONFIG_BBROUTEOVRD_ROUTE_SHIFT	8
+#define  CONFIG_BBROUTEOVRD_ROUTE_MASK	0x3ff00
+
+/* override enable */
+#define  CONFIG_BBROUTEOVRD_OVRD_MASK	0x1000000
+
+
+/*
+ * Registers <NUM_OF_WRITE_REQ> - <x> is [ 0 => 7 ]
+ *
+ * This array of registers defines the memory allocation for the
+ * peripherals, for upstream.
+ * The allocation is of number of 128byte buffers out of the total 48
+ * buffers for both sdma and dma.
+ * The allocation is done by defining a only the number of allocated
+ * buffers.
+ * base address is calculated by HW, when base of peripheral 0 is 0.
+ * Note that the memory allocation should not contain wrap around.
+ * The number of allocated CDs is the same of data buffers.
+ */
+#define DMA_CONFIG_NUM_OF_WRITES(x)	(0x4 + (x) * 0x4)
+
+/* the number of 128bytes buffers allocated to the peripheral. */
+#define  CONFIG_NUM_OF_WRITES_NUMOFBUFF_SHIFT	0
+#define  CONFIG_NUM_OF_WRITES_NUMOFBUFF_MASK	0x3f
+
+
+/*
+ * Registers <NUM_OF_READ_REQ> - <x> is [ 0 => 7 ]
+ *
+ * This array of registers controls the number of read requests of each
+ * peripheral within the read requests RAM.
+ * total of 64 requests are divided between peripherals.
+ * Base address of peripheral 0 is 0, base of peripheral 1 is 0 +
+ * periph0_num_of_read_requests and so on.
+ */
+#define DMA_CONFIG_NUM_OF_READS(x)	(0x24 + (x) * 0x4)
+
+/* number of read requests */
+#define  CONFIG_NUM_OF_READS_RR_NUM_SHIFT	0
+#define  CONFIG_NUM_OF_READS_RR_NUM_MASK	0x3f
+
+
+/*
+ * Registers <URGENT_THRESHOLDS> - <x> is [ 0 => 7 ]
+ *
+ * the in/out of urgent thresholds mark the number of write requests in the
+ * queue in which the peripherals priority is changed.
+ * The two thresholds should create hysteresis.
+ * The moving into urgent threshold must always be greater than the moving
+ * out of urgent threshold.
+ */
+#define DMA_CONFIG_U_THRESH(x)		(0x44 + (x) * 0x4)
+
+/* moving into urgent threshold */
+#define  CONFIG_U_THRESH_INTO_U_SHIFT	0
+#define  CONFIG_U_THRESH_INTO_U_MASK	0x3f
+
+/* moving out ot urgent threshold */
+#define  CONFIG_U_THRESH_OUT_OF_U_SHIFT	8
+#define  CONFIG_U_THRESH_OUT_OF_U_MASK	0x3f00
+
+
+/*
+ * Registers <STRICT_PRIORITY> - <x> is [ 0 => 7 ]
+ *
+ * The arbitration between the requests of the different peripherals is
+ * done in two stages:
+ * 1.
+ * Strict priority - chooses the peripherals with the highest priority
+ * among all perpherals who have a request pending.
+ * 2.
+ * Weighted Round-Robin between all peripherals with the same priority.
+ * This array of registers allow configuration of the priority of each
+ * peripheral (both rx and tx) in the following manner:
+ * There are 4 levels of priorities, when each bit in the register
+ * represents a different level of priority.
+ * One should assert the relevant bit according to the desired priority
+ * -For the lowest - 0001For the highest - 1000
+ */
+#define DMA_CONFIG_PRI(x)		(0x64 + (x) * 0x4)
+
+/* priority of rx side (upload) of the peripheral */
+#define  CONFIG_PRI_RXPRI_SHIFT		0
+#define  CONFIG_PRI_RXPRI_MASK		0xf
+
+/* priority of tx side (download) of the peripheral */
+#define  CONFIG_PRI_TXPRI_SHIFT		4
+#define  CONFIG_PRI_TXPRI_MASK		0xf0
+
+
+/*
+ * Registers <BB_SOURCE_DMA_PERIPH> - <x> is [ 0 => 7 ]
+ *
+ * Broadbus source address of the DMA peripherals.
+ * Register per peripheral (rx and tx).
+ * The source is used to determine the route address to the different
+ * peripherals.
+ */
+#define DMA_CONFIG_PERIPH_SOURCE(x)	(0x84 + (x) * 0x4)
+
+/* bb source of rx side (upload) of the peripheral */
+#define  CONFIG_PERIPH_SOURCE_RXSOURCE_SHIFT	0
+#define  CONFIG_PERIPH_SOURCE_RXSOURCE_MASK	0x3f
+
+/* bb source of tx side (download) of the peripheral */
+#define  CONFIG_PERIPH_SOURCE_TXSOURCE_SHIFT	8
+#define  CONFIG_PERIPH_SOURCE_TXSOURCE_MASK	0x3f00
+
+
+/*
+ * Registers <WEIGHT_OF_ROUND_ROBIN> - <x> is [ 0 => 7 ]
+ *
+ * The second phase of the arbitration between requests is weighted round
+ * robin between requests of peripherals with the same priority.
+ * This array of registers allow configurtion of the weight of each
+ * peripheral (rx and tx).
+ * The actual weight will be weight + 1, meaning configuration of 0 is
+ * actual weight of 1.
+ */
+#define DMA_CONFIG_WEIGHT(x)		(0xa4 + (x) * 0x4)
+
+/* weight of rx side (upload) of the peripheral */
+#define  CONFIG_WEIGHT_RXWEIGHT_SHIFT	0
+#define  CONFIG_WEIGHT_RXWEIGHT_MASK	0x7
+
+/* weight of tx side (download) of the peripheral */
+#define  CONFIG_WEIGHT_TXWEIGHT_SHIFT	8
+#define  CONFIG_WEIGHT_TXWEIGHT_MASK	0x700
+
+
+/*
+ * Register <POINTERS_RESET>
+ *
+ * Resets the pointers of the peripherals FIFOs within the DMA.
+ * Bit per peripheral side (rx and tx).
+ * For rx side resets the data and CD FIFOs.
+ * For tx side resets the read requests FIFO.
+ */
+#define DMA_CONFIG_PTRRST		0xd0
+
+/*
+ * vector in which each bit represents a peripheral.
+ * LSB represent RX peripherals and MSB represent TX peripherals.
+ * When asserted, the relevant FIFOS of the selected peripheral will be
+ * reset to zero
+*/
+#define  CONFIG_PTRRST_RSTVEC_SHIFT	0
+#define  CONFIG_PTRRST_RSTVEC_MASK	0xffff
+
+
+/*
+ * Register <MAX_ON_THE_FLY>
+ *
+ * max number of on the fly read commands the DMA may issue to DDR before
+ * receiving any data.
+ */
+#define DMA_CONFIG_MAX_OTF		0xd4
+
+/* max on the fly */
+#define  CONFIG_MAX_OTF_MAX_SHIFT	0
+#define  CONFIG_MAX_OTF_MAX_MASK	0x3f
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define DMA_CONFIG_CLK_GATE_CNTRL	0xd8
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  CONFIG_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  CONFIG_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  CONFIG_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <DBG_SEL>
+ *
+ * debug bus select
+ */
+#define DMA_CONFIG_DBG_SEL		0xe0
+
+/* select */
+#define  CONFIG_DBG_SEL_DBGSEL_SHIFT	0
+#define  CONFIG_DBG_SEL_DBGSEL_MASK	0x3
+
+
+/*
+ * Register <NOT_EMPTY_VECTOR> - read-only
+ *
+ * Each peripheral is represented in a bit on the not empty vector.
+ * LSB is for rx peripherals, MSB for tx peripherals.
+ * If the bit is asserted, the requests queue of the relevant peripheral is
+ * not empty.
+ * The not empty vector is used by the DMA scheduler to determine which
+ * peripheral is the next to be served.
+ */
+#define DMA_DEBUG_NEMPTY		0x100
+
+/* indication of the queue state */
+#define  DEBUG_NEMPTY_NEMPTY_SHIFT	0
+#define  DEBUG_NEMPTY_NEMPTY_MASK	0xffff
+
+
+/*
+ * Register <URGENT_VECTOR> - read-only
+ *
+ * Each peripheral, a is represented in a bit on the urgent vector.
+ * 8 LSB are rx peripherlas, 8 MSB are tx peripherals.
+ * If the bit is asserted, the requests queue of the relevant peripheral is
+ * in urgent state.
+ * The urgent vector is used by the DMA scheduler to determine which
+ * peripheral is the next to be served.
+ */
+#define DMA_DEBUG_URGNT			0x104
+
+/* indication whether the queue is in urgent state or not */
+#define  DEBUG_URGNT_URGNT_SHIFT	0
+#define  DEBUG_URGNT_URGNT_MASK		0xffff
+
+
+/*
+ * Register <SELECTED_SOURCE_NUM> - read-only
+ *
+ * The decision of the dma schedule rand the next peripheral to be served,
+ * represented by its source address
+ */
+#define DMA_DEBUG_SELSRC		0x108
+
+/* the next peripheral to be served by the dma */
+#define  DEBUG_SELSRC_SEL_SRC_SHIFT	0
+#define  DEBUG_SELSRC_SEL_SRC_MASK	0x3f
+
+
+/*
+ * Registers <REQUEST_COUNTERS_RX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the number of write requests currently pending for each rx peripheral.
+ */
+#define DMA_DEBUG_REQ_CNT_RX(x)		(0x110 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  DEBUG_REQ_CNT_RX_REQ_CNT_SHIFT	0
+#define  DEBUG_REQ_CNT_RX_REQ_CNT_MASK	0x3f
+
+
+/*
+ * Registers <REQUEST_COUNTERS_TX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the number of read requestscurrently pending for each TX peripheral.
+ */
+#define DMA_DEBUG_REQ_CNT_TX(x)		(0x130 + (x) * 0x4)
+
+/* the number of pending read requests */
+#define  DEBUG_REQ_CNT_TX_REQ_CNT_SHIFT	0
+#define  DEBUG_REQ_CNT_TX_REQ_CNT_MASK	0x3f
+
+
+/*
+ * Registers <ACC_REQUEST_COUNTERS_RX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the accumulated number of write requests served so far for each
+ * peripheral.
+ * Wrap around on max value, not read clear.
+ */
+#define DMA_DEBUG_REQ_CNT_RX_ACC(x)	(0x150 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  DEBUG_REQ_CNT_RX_ACC_REQ_CNT_SHIFT	0
+#define  DEBUG_REQ_CNT_RX_ACC_REQ_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <ACC_REQUEST_COUNTERS_TX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the accumulated number of read requests served so far for each
+ * peripheral.
+ * Wrap around on max value, not read clear.
+ */
+#define DMA_DEBUG_REQ_CNT_TX_ACC(x)	(0x170 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  DEBUG_REQ_CNT_TX_ACC_REQ_CNT_SHIFT	0
+#define  DEBUG_REQ_CNT_TX_ACC_REQ_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <RAM_ADDRES>
+ *
+ * the address and cs of the ram the user wishes to read using the indirect
+ * access read mechanism.
+ */
+#define DMA_DEBUG_RDADD			0x200
+
+/* address within the ram */
+#define  DEBUG_RDADD_ADDRESS_SHIFT	0
+#define  DEBUG_RDADD_ADDRESS_MASK	0x3ff
+
+/* chip select for write data ram */
+#define  DEBUG_RDADD_DATACS_MASK	0x10000
+
+/* chip select for chunk descriptors ram */
+#define  DEBUG_RDADD_CDCS_MASK		0x20000
+
+/* chip select for read requests ram */
+#define  DEBUG_RDADD_RRCS_MASK		0x40000
+
+
+/*
+ * Register <INDIRECT_READ_REQUEST_VALID>
+ *
+ * After determining the address and cs, the user should assert this bit
+ * for indicating that the address and cs are valid.
+ */
+#define DMA_DEBUG_RDVALID		0x204
+
+/* indirect read request is valid */
+#define  DEBUG_RDVALID_VALID_MASK	0x1
+
+
+/*
+ * Registers <INDIRECT_READ_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * The returned read data from the selected RAM.
+ * Array of 4 registers (128 bits total).
+ * The width of the different memories is as follows:
+ * write data - 128 bitschunk descriptors - 36 bitsread requests - 42
+ * bitsread data - 64 bitsThe the memories with width smaller than 128, the
+ * data will appear in the first registers of the array, for example:
+ * data from the cd RAM will appear in - {reg1[5:
+ * 0], reg0[31:
+ * 0]}.
+ */
+#define DMA_DEBUG_RDDATA(x)		(0x208 + (x) * 0x4)
+
+/* read data from ram */
+#define  DEBUG_RDDATA_DATA_SHIFT	0
+#define  DEBUG_RDDATA_DATA_MASK		0xffffffff
+
+
+/*
+ * Register <READ_DATA_READY> - read-only
+ *
+ * When assertd indicats that the data in the previous array is valid.
+ * Willremain asserted until the user deasserts the valid bit in regiser
+ * RDVALID.
+ */
+#define DMA_DEBUG_RDDATARDY		0x218
+
+/* read data ready */
+#define  DEBUG_RDDATARDY_READY_MASK	0x1
+
+
+#endif /* ! XRDP_REGS_DMA_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dqm.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dqm.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dqm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dqm.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,424 @@
+#ifndef XRDP_REGS_DQM_H_
+#define XRDP_REGS_DQM_H_
+
+/* relative to core */
+#define DQM_OFFSET_0			0x180034
+
+/*
+ * Register <DQMOL Max Entries in WORDS>
+ *
+ * Maximum number of entries in words for all the queues.
+ */
+#define DQM_MAX_ENTRIES_WORDS		0x0
+
+/*
+ * Represents the maximum number of entries the queue can hold (in words).
+ * This is a global settings.
+*/
+#define  MAX_ENTRIES_WORDS_MAX_SHIFT	0
+#define  MAX_ENTRIES_WORDS_MAX_MASK	0x7ffff
+
+
+/*
+ * Register <DQMOL FPM Address>
+ *
+ * FPM Address Register
+ */
+#define DQM_FPM_ADDR			0x18
+
+/*
+ * This is the FPM address to be used by components in this module.
+ * The same address is used to alloc and free a token in the FPM.
+*/
+#define  FPM_ADDR_FPMADDRESS_SHIFT	0
+#define  FPM_ADDR_FPMADDRESS_MASK	0xffffffff
+
+
+/*
+ * Register <DQMOL IRQ Status>
+ *
+ * DQMOL Interrupt Status Register.
+ */
+#define DQM_IRQ_STS			0x1c
+
+/*
+ * DQMOL Pushing a Full Queue IRQ Status (RW1C).
+ * This is a sticky high bit and needs to be cleared by writing to it.
+*/
+#define  IRQ_STS_PUSHFULLQ_MASK		0x2
+
+/*
+ * DQMOL Popping an Empty Queue IRQ Status (RW1C).
+ * This is a sticky high bit and needs to be cleared by writing to it.
+*/
+#define  IRQ_STS_POPEMPTYQ_MASK		0x1
+
+
+/*
+ * Register <DQMOL IRQ Mask>
+ *
+ * DQMOL Interrupt Mask Register.
+ */
+#define DQM_IRQ_MSK			0x20
+
+/* DQMOL Pushing a Full Queue IRQ Mask */
+#define  IRQ_MSK_PUSHFULLQ_MASK		0x2
+
+/* DQMOL Popping an Empty Queue IRQ Mask */
+#define  IRQ_MSK_POPEMPTYQ_MASK		0x1
+
+
+/*
+ * Register <DQMOL Token Buffer Size>
+ *
+ * Token buffer size.
+ */
+#define DQM_BUF_SIZE			0x24
+
+/*
+ * Buffer Size.
+ * This is an encoded value.
+ * 0 => 256 byte buffer, 1 => 512 byte buffer, 2 => 1024 byte buffer, 3 =>
+ * 2048 byte buffer.
+*/
+#define  BUF_SIZE_POOL_0_SIZE_SHIFT	0
+#define  BUF_SIZE_POOL_0_SIZE_MASK	0x3
+
+
+/*
+ * Register <DQMOL Token Buffer Base>
+ *
+ * Token buffer base address
+ */
+#define DQM_BUF_BASE			0x28
+
+/*
+ * Buffer base address for bits[39:
+ * 8].
+ * Address bits [7:
+ * 0] is always assumed to be 0.
+*/
+#define  BUF_BASE_BASE_SHIFT		0
+#define  BUF_BASE_BASE_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL Token Used>
+ *
+ * Shows the number of tokens used by DQMOL
+ */
+#define DQM_TOKENS_USED			0x30
+
+/*
+ * Represents the current number of tokens used by the queue data
+ * structure.
+ * This count does not include tokens that are prefetched.
+*/
+#define  TOKENS_USED_COUNT_SHIFT	0
+#define  TOKENS_USED_COUNT_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL Num Pushed Count>
+ *
+ * counter for number of pushed transactions
+ */
+#define DQM_NUM_PUSHED			0x34
+
+/* Represents the current number of pushed transaction across all queues */
+#define  NUM_PUSHED_COUNT_SHIFT		0
+#define  NUM_PUSHED_COUNT_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL Num Popped Count>
+ *
+ * counter for number of popped transactions
+ */
+#define DQM_NUM_POPPED			0x38
+
+/* Represents the current number of popped transaction across all queues */
+#define  NUM_POPPED_COUNT_SHIFT		0
+#define  NUM_POPPED_COUNT_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL Diag Readback>
+ *
+ * MUX Select for Diags
+ */
+#define DQM_DIAG_SEL			0x3c
+
+/* MUX Select for routing diag data to the Diag Data Register */
+#define  DIAG_SEL_SEL_SHIFT		0
+#define  DIAG_SEL_SEL_MASK		0xff
+
+
+/*
+ * Register <DQMOL Token Used> - read-only
+ *
+ */
+#define DQM_DIAG_DATA			0x40
+
+/* data presented as diag readback data. */
+#define  DIAG_DATA_DATA_SHIFT		0
+#define  DIAG_DATA_DATA_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL IRQ Test>
+ *
+ * DQMOL Interrupt Test Register.
+ */
+#define DQM_IRQ_TST			0x44
+
+/* Test the PushFullQ irq */
+#define  IRQ_TST_PUSHFULLQTST_MASK	0x2
+
+/* Test the PopEmptyQ irq */
+#define  IRQ_TST_POPEMPTYQTST_MASK	0x1
+
+
+/*
+ * Register <DQMOL IRQ Test> - read-only
+ *
+ * content from prefetch token fifo
+ */
+#define DQM_TOKEN_FIFO_STATUS		0x48
+
+/* token fifo full */
+#define  TOKEN_FIFO_STATUS_FULL_MASK	0x20000
+
+/* token fifo empty */
+#define  TOKEN_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* token fifo depth count */
+#define  TOKEN_FIFO_STATUS_LEVEL_SHIFT	8
+#define  TOKEN_FIFO_STATUS_LEVEL_MASK	0x1f00
+
+/* token fifo read pointer */
+#define  TOKEN_FIFO_STATUS_RD_LOC_SHIFT	0
+#define  TOKEN_FIFO_STATUS_RD_LOC_MASK	0xf
+
+
+/*
+ * Registers <DQMOL TokenFifo> - <x> is [ 0 => 15 ] - read-only
+ *
+ * content from prefetch token fifo
+ */
+#define DQM_TOKEN_FIFO(x)		(0x4c + (x) * 0x4)
+
+/* Token value read from the token fifo */
+#define  TOKEN_FIFO_TOKEN_SHIFT		0
+#define  TOKEN_FIFO_TOKEN_MASK		0xffffffff
+
+
+/*
+ * Register <DQMOL Num Popped No Commit Count>
+ *
+ * counter for number of popped with no commit transactions
+ */
+#define DQM_NUM_POPPED_NO_COMMIT	0x8c
+
+/*
+ * Represents the current number of popped with no-commit transaction
+ * across all queues
+*/
+#define  NUM_POPPED_NO_COMMIT_COUNT_SHIFT	0
+#define  NUM_POPPED_NO_COMMIT_COUNT_MASK	0xffffffff
+
+
+/*
+ * Registers <Queue Status> - <x> is [ 0 => 159 ] - read-only
+ *
+ * Number of token unused space available on queue.
+ * This register is available on the DSPRAM read bus.
+ */
+#define DQM_STATUS(x)			(0x7cc + (x) * 0x4)
+
+/* Queue data for the current Line is stored locally in the QSM. */
+#define  STATUS_CURR_LINE_DATA_IS_LOCAL_MASK	0x80000000
+
+/* Queue data for the next Line is stored locally in the QSM. */
+#define  STATUS_NEXT_LINE_DATA_IS_LOCAL_MASK	0x40000000
+
+/* Queue Available Unused Token Space (in words). */
+#define  STATUS_Q_AVL_TKN_SPACE_SHIFT	0
+#define  STATUS_Q_AVL_TKN_SPACE_MASK	0x7ffff
+
+
+/*
+ * Registers <Queue Head Pointer> - <x> is [ 0 => 159 ] - read-only
+ *
+ */
+#define DQM_HEAD_PTR(x)			(0xfcc + (x) * 0x8)
+
+/*
+ * Queue Head Pointer (in words).
+ * This is a read-only field and will reset to 0 whenever CNTRL_CFGB is
+ * programmed
+*/
+#define  HEAD_PTR_Q_HEAD_PTR_SHIFT	0
+#define  HEAD_PTR_Q_HEAD_PTR_MASK	0xfffffff
+
+
+/*
+ * Registers <Queue Tail Pointer> - <x> is [ 0 => 159 ] - read-only
+ *
+ */
+#define DQM_TAIL_PTR(x)			(0xfd0 + (x) * 0x8)
+
+/*
+ * Queue Tail Pointer (in words).
+ * This is a read-only field and will reset to 0 whenever CNTRL_CFGB is
+ * programmed
+*/
+#define  TAIL_PTR_Q_TAIL_PTR_SHIFT	0
+#define  TAIL_PTR_Q_TAIL_PTR_MASK	0xfffffff
+
+
+/*
+ * Registers <Queue Size> - <x> is [ 0 => 159 ] - read-only
+ *
+ * Number of token space available in Queue
+ */
+#define DQM_DQMOL_SIZE(x)		(0x1fcc + (x) * 0x20)
+
+/* Maximum number of entries allotted to the queue before it's full */
+#define  DQMOL_SIZE_MAX_ENTRIES_SHIFT	4
+#define  DQMOL_SIZE_MAX_ENTRIES_MASK	0x7ffff0
+
+/*
+ * When set, this puts the DQM OL queue into legacy DQM mode, there's no
+ * offloading of data.
+ * All queue data are stored in the QSM memory.
+*/
+#define  DQMOL_SIZE_Q_DISABLE_OFFLOAD_MASK	0x8
+
+/*
+ * Queue Token Size (in words).
+ * This is a base-0 value.
+ * A value of 0 means the token is 1 word long.
+ * A value of 1 means the token is 2 words long.
+ * This maxes out at a value of 3 to mean that a token is 4 words long.
+*/
+#define  DQMOL_SIZE_Q_TKN_SIZE_SHIFT	0
+#define  DQMOL_SIZE_Q_TKN_SIZE_MASK	0x3
+
+
+/*
+ * Registers <Queue Config A> - <x> is [ 0 => 159 ] - read-only
+ *
+ * Starting queue address and size of memory space
+ */
+#define DQM_DQMOL_CFGA(x)		(0x1fd0 + (x) * 0x20)
+
+/*
+ * Queue Memory Size (in words).
+ * It is required that the Queue Memory Size be whole multiple of the
+ * QUEUE_x_CNTRL_SIZE.
+ * Q_TKN_SIZE.
+ * For example, if Q_TKN_SIZE == 2 (which represents a 3 word token), then
+ * the Queue Memory Size must be 3, 6, 9, 12, etc.
+*/
+#define  DQMOL_CFGA_Q_SIZE_SHIFT	16
+#define  DQMOL_CFGA_Q_SIZE_MASK		0xffff0000
+
+/*
+ * Queue Start Address (word addr).
+ * The hardware takes this word address and adds the base address of the
+ * Queue Shared Memory (0x4000 byte addr) to form the physical address for
+ * the Queue.
+*/
+#define  DQMOL_CFGA_Q_START_ADDR_SHIFT	0
+#define  DQMOL_CFGA_Q_START_ADDR_MASK	0xffff
+
+
+/*
+ * Registers <Queue Config B> - <x> is [ 0 => 159 ]
+ *
+ * Number of tokens and low watermark setting
+ */
+#define DQM_DQMOL_CFGB(x)		(0x1fd4 + (x) * 0x20)
+
+/* When set, the DQMOL is enabled and ready for use. */
+#define  DQMOL_CFGB_ENABLE_MASK		0x80000000
+
+
+/*
+ * Registers <Queue Next Pop Token> - <x> is [ 0 => 159 ]
+ *
+ * Current Token Register
+ */
+#define DQM_DQMOL_PUSHTOKEN(x)		(0x1fdc + (x) * 0x20)
+
+/*
+ * Queue Token.
+ * This is the current token the offload hardware is using for this queue.
+*/
+#define  DQMOL_PUSHTOKEN_TOKEN_SHIFT	0
+#define  DQMOL_PUSHTOKEN_TOKEN_MASK	0xffffffff
+
+
+/*
+ * Registers <Queue Next Pop Token> - <x> is [ 0 => 159 ]
+ *
+ * Current Token Register
+ */
+#define DQM_DQMOL_PUSHTOKENNEXT(x)	(0x1fe0 + (x) * 0x20)
+
+/*
+ * Queue Token.
+ * This is the current token the offload hardware is using for this queue.
+*/
+#define  DQMOL_PUSHTOKENNEXT_TOKEN_SHIFT	0
+#define  DQMOL_PUSHTOKENNEXT_TOKEN_MASK	0xffffffff
+
+
+/*
+ * Registers <Queue Next Pop Token> - <x> is [ 0 => 159 ]
+ *
+ * Current Token Register
+ */
+#define DQM_DQMOL_POPTOKEN(x)		(0x1fe4 + (x) * 0x20)
+
+/*
+ * Queue Token.
+ * This is the current token the offload hardware is using for this queue.
+*/
+#define  DQMOL_POPTOKEN_TOKEN_SHIFT	0
+#define  DQMOL_POPTOKEN_TOKEN_MASK	0xffffffff
+
+
+/*
+ * Registers <Queue Next Pop Token> - <x> is [ 0 => 159 ]
+ *
+ * Current Token Register
+ */
+#define DQM_DQMOL_POPTOKENNEXT(x)	(0x1fe8 + (x) * 0x20)
+
+/*
+ * Queue Token.
+ * This is the current token the offload hardware is using for this queue.
+*/
+#define  DQMOL_POPTOKENNEXT_TOKEN_SHIFT	0
+#define  DQMOL_POPTOKENNEXT_TOKEN_MASK	0xffffffff
+
+
+/*
+ * Registers <QSM Shared memory space.> - <x> is [ 0 => 15360 ]
+ *
+ * Note that in the UTP has 48KB of memory space.
+ * With DFAP/GFAP/DTP,there are only 16KB of shared memory space.
+ * The entirememory space is carved out here as a placeholder in the
+ * DFAP/GFAP/DTP's case.
+ * QSM memory
+ */
+#define DQM_QueueSharedMem_qsmdata(x)	(0x1ffcc + (x) * 0x4)
+
+/* data */
+#define  QueueSharedMem_qsmdata_DATA_SHIFT	0
+#define  QueueSharedMem_qsmdata_DATA_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_DQM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dsptchr.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dsptchr.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_dsptchr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_dsptchr.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,2288 @@
+#ifndef XRDP_REGS_DSPTCHR_H_
+#define XRDP_REGS_DSPTCHR_H_
+
+/* relative to core */
+#define DSPTCHR_OFFSET_0		0xd80000
+
+/*
+ * Register <DISPATCHER_REORDER_EN>
+ *
+ * Enable of dispatcher reorder
+ */
+#define DSPTCHR_REORDER_CFG_DSPTCHR_REORDR_CFG	0x0
+
+/* Enable dispatcher reorder block */
+#define  REORDER_CFG_DSPTCHR_REORDR_CFG_EN_MASK	0x1
+
+/* Dispatcher reorder block is RDY */
+#define  REORDER_CFG_DSPTCHR_REORDR_CFG_RDY_MASK	0x100
+
+/*
+ * Enables parallel operation of Re-Order scheduler to Re-Order SM.
+ * Reduces Re-Order cycle from 16 clocks to 7.
+*/
+#define  REORDER_CFG_DSPTCHR_REORDR_CFG_REORDR_PAR_MOD_MASK	0x10000
+
+/* Enable per Q Egress congestion monitoring */
+#define  REORDER_CFG_DSPTCHR_REORDR_CFG_PER_Q_EGRS_CONGST_EN_MASK	0x20000
+
+/*
+ * Enables Enhanced performance mode of Dispatcher Load balancing and
+ * Dispatcher SM.
+ * This allows Disptach of PD to RNR instead of every 14 clocks, every 11
+ * clocks.
+*/
+#define  REORDER_CFG_DSPTCHR_REORDR_CFG_DSPTCHR_PER_ENH_POD_MASK	0x40000
+
+
+/*
+ * Register <VIRTUAL_Q_EN>
+ *
+ * Enable control for each VIQ/VEQ
+ */
+#define DSPTCHR_REORDER_CFG_VQ_EN	0x4
+
+/* Enable Virtual Q control - 32 bit vector. */
+#define  REORDER_CFG_VQ_EN_EN_SHIFT	0
+#define  REORDER_CFG_VQ_EN_EN_MASK	0xffffffff
+
+
+/*
+ * Register <BROADBUS_CONFIG>
+ *
+ * Allow override of a specific BB destination with a new Route ADDR
+ */
+#define DSPTCHR_REORDER_CFG_BB_CFG	0x8
+
+/* Source ID - Dispatcher */
+#define  REORDER_CFG_BB_CFG_SRC_ID_SHIFT	0
+#define  REORDER_CFG_BB_CFG_SRC_ID_MASK	0x3f
+
+/* Enable dispatcher reorder block */
+#define  REORDER_CFG_BB_CFG_DST_ID_OVRIDE_SHIFT	8
+#define  REORDER_CFG_BB_CFG_DST_ID_OVRIDE_MASK	0x3f00
+
+/* Use this route address instead of pre-configured */
+#define  REORDER_CFG_BB_CFG_ROUTE_OVRIDE_SHIFT	16
+#define  REORDER_CFG_BB_CFG_ROUTE_OVRIDE_MASK	0x3ff0000
+
+/* Enable dispatcher reorder block */
+#define  REORDER_CFG_BB_CFG_OVRIDE_EN_MASK	0x10000000
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define DSPTCHR_REORDER_CFG_CLK_GATE_CNTRL	0xc
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  REORDER_CFG_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  REORDER_CFG_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  REORDER_CFG_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  REORDER_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  REORDER_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  REORDER_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  REORDER_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  REORDER_CFG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Registers <INGRESS_CONGESTION_THRESHOLD> - <x> is [ 0 => 31 ]
+ *
+ * Ingress Queues congestion state.
+ */
+#define DSPTCHR_CONGESTION_INGRS_CONGSTN(x)	(0x80 + (x) * 0x4)
+
+/* First Level congestion threshold. */
+#define  CONGESTION_INGRS_CONGSTN_FRST_LVL_SHIFT	0
+#define  CONGESTION_INGRS_CONGSTN_FRST_LVL_MASK	0xfff
+
+/* Second Level congestion threshold. */
+#define  CONGESTION_INGRS_CONGSTN_SCND_LVL_SHIFT	12
+#define  CONGESTION_INGRS_CONGSTN_SCND_LVL_MASK	0xfff000
+
+/*
+ * Hystersis value in which to stop congestion indication.
+ * once reachin a congestion level only after crossing the (threshold_level
+ * - HYST_TRSH) will the congestion indication be removed
+*/
+#define  CONGESTION_INGRS_CONGSTN_HYST_THRS_SHIFT	24
+#define  CONGESTION_INGRS_CONGSTN_HYST_THRS_MASK	0xff000000
+
+
+/*
+ * Registers <EGRESS_CONGESTION_THRESHOLD> - <x> is [ 0 => 31 ]
+ *
+ * Egress Queues congestion state per Q.
+ */
+#define DSPTCHR_CONGESTION_EGRS_CONGSTN(x)	(0x100 + (x) * 0x4)
+
+/* First Level congestion threshold. */
+#define  CONGESTION_EGRS_CONGSTN_FRST_LVL_SHIFT	0
+#define  CONGESTION_EGRS_CONGSTN_FRST_LVL_MASK	0xfff
+
+/* Second Level congestion threshold. */
+#define  CONGESTION_EGRS_CONGSTN_SCND_LVL_SHIFT	12
+#define  CONGESTION_EGRS_CONGSTN_SCND_LVL_MASK	0xfff000
+
+/*
+ * Hystersis value in which to stop congestion indication.
+ * once reachin a congestion level only after crossing the (threshold_level
+ * - HYST_TRSH) will the congestion indication be removed
+*/
+#define  CONGESTION_EGRS_CONGSTN_HYST_THRS_SHIFT	24
+#define  CONGESTION_EGRS_CONGSTN_HYST_THRS_MASK	0xff000000
+
+
+/*
+ * Register <TOTAL_EGRESS_CONGESTION_THRESHOLD>
+ *
+ * Egress congestion states (Total Count)
+ */
+#define DSPTCHR_CONGESTION_TOTAL_EGRS_CONGSTN	0x180
+
+/* First Level congestion threshold. */
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_FRST_LVL_SHIFT	0
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_FRST_LVL_MASK	0xfff
+
+/* Second Level congestion threshold. */
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_SCND_LVL_SHIFT	12
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_SCND_LVL_MASK	0xfff000
+
+/*
+ * Hystersis value in which to stop congestion indication.
+ * once reachin a congestion level only after crossing the (threshold_level
+ * - HYST_TRSH) will the congestion indication be removed
+*/
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_HYST_THRS_SHIFT	24
+#define  CONGESTION_TOTAL_EGRS_CONGSTN_HYST_THRS_MASK	0xff000000
+
+
+/*
+ * Register <GLOBAL_CONGESTION_THRESHOLD>
+ *
+ * Congestion levels of FLL state.
+ * Once no mode BDs are availabe congestion indication will be risen on all
+ * PDs.
+ */
+#define DSPTCHR_CONGESTION_GLBL_CONGSTN	0x184
+
+/* First Level congestion threshold. */
+#define  CONGESTION_GLBL_CONGSTN_FRST_LVL_SHIFT	0
+#define  CONGESTION_GLBL_CONGSTN_FRST_LVL_MASK	0xfff
+
+/* Second Level congestion threshold. */
+#define  CONGESTION_GLBL_CONGSTN_SCND_LVL_SHIFT	12
+#define  CONGESTION_GLBL_CONGSTN_SCND_LVL_MASK	0xfff000
+
+/*
+ * Hystersis value in which to stop congestion indication.
+ * once reachin a congestion level only after crossing the (threshold_level
+ * - HYST_TRSH) will the congestion indication be removed
+*/
+#define  CONGESTION_GLBL_CONGSTN_HYST_THRS_SHIFT	24
+#define  CONGESTION_GLBL_CONGSTN_HYST_THRS_MASK	0xff000000
+
+
+/*
+ * Register <CONGESTION_STATUS> - read-only
+ *
+ * This register reflects the current congestion levels in the dispatcher.
+ */
+#define DSPTCHR_CONGESTION_CONGSTN_STATUS	0x188
+
+/* Global congestion levels (according to FLL buffer availability) */
+#define  CONGESTION_CONGSTN_STATUS_GLBL_CONGSTN_SHIFT	0
+#define  CONGESTION_CONGSTN_STATUS_GLBL_CONGSTN_MASK	0x3
+
+/* Global Egress congestion levels */
+#define  CONGESTION_CONGSTN_STATUS_GLBL_EGRS_CONGSTN_SHIFT	8
+#define  CONGESTION_CONGSTN_STATUS_GLBL_EGRS_CONGSTN_MASK	0x300
+
+/* SBPM congestion levels according to SPBM messages */
+#define  CONGESTION_CONGSTN_STATUS_SBPM_CONGSTN_SHIFT	16
+#define  CONGESTION_CONGSTN_STATUS_SBPM_CONGSTN_MASK	0x30000
+
+
+/*
+ * Register <PER_Q_LOW_INGRESS_CONGESTION_STATUS> - read-only
+ *
+ * Note that this vector is only updated during the dispatch stage
+ */
+#define DSPTCHR_CONGESTION_PER_Q_INGRS_CONGSTN_LOW	0x18c
+
+/* 1 - Passed Threshold0 - Did not pass threshold */
+#define  CONGESTION_PER_Q_INGRS_CONGSTN_LOW_CONGSTN_STATE_SHIFT	0
+#define  CONGESTION_PER_Q_INGRS_CONGSTN_LOW_CONGSTN_STATE_MASK	0xffffffff
+
+
+/*
+ * Register <PER_Q_HIGH_INGRESS_CONGESTION_STATUS> - read-only
+ *
+ * Note that this vector is only updated during the dispatch stage
+ */
+#define DSPTCHR_CONGESTION_PER_Q_INGRS_CONGSTN_HIGH	0x190
+
+/* 1 - Passed Threshold0 - Did not pass threshold */
+#define  CONGESTION_PER_Q_INGRS_CONGSTN_HIGH_CONGSTN_STATE_SHIFT	0
+#define  CONGESTION_PER_Q_INGRS_CONGSTN_HIGH_CONGSTN_STATE_MASK	0xffffffff
+
+
+/*
+ * Register <PER_Q_LOW_EGRESS_CONGESTION_STATUS> - read-only
+ *
+ * Note that this vector is only updated during the dispatch stage
+ */
+#define DSPTCHR_CONGESTION_PER_Q_EGRS_CONGSTN_LOW	0x194
+
+/* 1 - Passed Threshold0 - Did not pass threshold */
+#define  CONGESTION_PER_Q_EGRS_CONGSTN_LOW_CONGSTN_STATE_SHIFT	0
+#define  CONGESTION_PER_Q_EGRS_CONGSTN_LOW_CONGSTN_STATE_MASK	0xffffffff
+
+
+/*
+ * Register <PER_Q_HIGH_EGRESS_CONGESTION_STATUS> - read-only
+ *
+ * Note that this vector is only updated during the dispatch stage
+ */
+#define DSPTCHR_CONGESTION_PER_Q_EGRS_CONGSTN_HIGH	0x198
+
+/* 1 - Passed Threshold0 - Did not pass threshold */
+#define  CONGESTION_PER_Q_EGRS_CONGSTN_HIGH_CONGSTN_STATE_SHIFT	0
+#define  CONGESTION_PER_Q_EGRS_CONGSTN_HIGH_CONGSTN_STATE_MASK	0xffffffff
+
+
+/*
+ * Registers <QUEUE_INGRS_SIZE> - <x> is [ 0 => 31 ]
+ *
+ * Q Ingress size
+ */
+#define DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(x)	(0x280 + (x) * 0x4)
+
+/* Common number of buffers allocated to this Q. */
+#define  INGRS_QUEUES_Q_INGRS_SIZE_CMN_CNT_SHIFT	0
+#define  INGRS_QUEUES_Q_INGRS_SIZE_CMN_CNT_MASK	0x3ff
+
+
+/*
+ * Registers <QUEUE_INGRS_LIMITS> - <x> is [ 0 => 31 ]
+ *
+ * Q Ingress Limits
+ */
+#define DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(x)	(0x300 + (x) * 0x4)
+
+/*
+ * Maximum number of buffers allowed to be allocated to the specific VIQ
+ * from the common Pool
+*/
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_CMN_MAX_SHIFT	0
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_CMN_MAX_MASK	0x3ff
+
+/*
+ * Maximum number of buffers allowed to be allocated to the specific VIQ
+ * from the guaranteed Pool
+*/
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_GURNTD_MAX_SHIFT	10
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_GURNTD_MAX_MASK	0xffc00
+
+/*
+ * Holds the value of the the accumulated credits.
+ * this is sent to the BBH/RNR.
+ * BBH disregards the value.
+ * RNR uses it to to calculate the amount of available credits.
+*/
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_CREDIT_CNT_SHIFT	20
+#define  INGRS_QUEUES_Q_INGRS_LIMITS_CREDIT_CNT_MASK	0xfff00000
+
+
+/*
+ * Registers <QUEUE_INGRS_COHERENCY> - <x> is [ 0 => 31 ]
+ *
+ * Q Coherency counter
+ */
+#define DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(x)	(0x380 + (x) * 0x4)
+
+/*
+ * Coherency counter value.
+ * BBH sends a coherency message per PD.
+ * Coherency messages are counted and only if there is at least 1 coherency
+ * message can a PD be forwarded to the RNR for processing.
+*/
+#define  INGRS_QUEUES_Q_INGRS_COHRENCY_CHRNCY_CNT_SHIFT	0
+#define  INGRS_QUEUES_Q_INGRS_COHRENCY_CHRNCY_CNT_MASK	0x3ff
+
+/*
+ * Enable coherency counting.
+ * In case RNR is allocated to a specific VIQ it will not send coherency
+ * messages so there is no need to take them into consideration during PD
+ * dispatch
+*/
+#define  INGRS_QUEUES_Q_INGRS_COHRENCY_CHRNCY_EN_MASK	0x400
+
+/* Reserve */
+#define  INGRS_QUEUES_Q_INGRS_COHRENCY_RSRV_SHIFT	11
+#define  INGRS_QUEUES_Q_INGRS_COHRENCY_RSRV_MASK	0xfffff800
+
+
+/*
+ * Registers <CREDIT_CONFIGURATION> - <x> is [ 0 => 31 ]
+ *
+ * Configuration for each Q including BB_ID, Target address, valid
+ */
+#define DSPTCHR_QUEUE_MAPPING_CRDT_CFG(x)	(0x400 + (x) * 0x4)
+
+/*
+ * BroadBud ID:
+ * To which BroadBud agent (RNR/BBH) is the current Q associated with
+*/
+#define  QUEUE_MAPPING_CRDT_CFG_BB_ID_SHIFT	0
+#define  QUEUE_MAPPING_CRDT_CFG_BB_ID_MASK	0xff
+
+/*
+ * Target address within the BB agent where the credit message should be
+ * written to.
+ * In case of RNR:
+ * 27:16 - Ram address
+ * 31:28 - Task number to wakeup
+*/
+#define  QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_SHIFT	16
+#define  QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_MASK	0xffff0000
+
+#define QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_NORMAL	2
+#define QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_EXCL	2
+
+/*
+ * Registers <DISPATCH_ADDRESS> - <x> is [ 0 => 15 ]
+ *
+ * Dispatched address will be calculated ADD= BASE_ADD + (TASK_NUM x OFFSET)
+ *
+ */
+#define DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(x)	(0x480 + (x) * 0x4)
+
+/* Base address within each RNR, 8 bytes unit (1 == 0x8) */
+#define  QUEUE_MAPPING_PD_DSPTCH_ADD_BASE_ADD_SHIFT	0
+#define  QUEUE_MAPPING_PD_DSPTCH_ADD_BASE_ADD_MASK	0xffff
+
+/*
+ * OFFSET address, in conjunction with base address for each task there
+ * will be a different address to where to send the PDADD = BASE_ADD +
+ * (OFFSET_ADD x TASK)PD size is 128bits
+ *
+ * 8 bytes unit
+*/
+#define  QUEUE_MAPPING_PD_DSPTCH_ADD_OFFSET_ADD_SHIFT	16
+#define  QUEUE_MAPPING_PD_DSPTCH_ADD_OFFSET_ADD_MASK	0xffff0000
+
+
+/*
+ * Register <Q_DESTINATION>
+ *
+ * What is the destination of each VIQ.
+ * to Dispatcher and from there to Processing RNR or Reorder and from there
+ * to the QM
+ */
+#define DSPTCHR_QUEUE_MAPPING_Q_DEST	0x4c0
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q0_MASK	0x1
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q1_MASK	0x2
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q2_MASK	0x4
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q3_MASK	0x8
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q4_MASK	0x10
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q5_MASK	0x20
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q6_MASK	0x40
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q7_MASK	0x80
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q8_MASK	0x100
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q9_MASK	0x200
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q10_MASK	0x400
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q11_MASK	0x800
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q12_MASK	0x1000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q13_MASK	0x2000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q14_MASK	0x4000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q15_MASK	0x8000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q16_MASK	0x10000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q17_MASK	0x20000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q18_MASK	0x40000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q19_MASK	0x80000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q20_MASK	0x100000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q21_MASK	0x200000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q22_MASK	0x400000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q23_MASK	0x800000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q24_MASK	0x1000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q25_MASK	0x2000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q26_MASK	0x4000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q27_MASK	0x8000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q28_MASK	0x10000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q29_MASK	0x20000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q30_MASK	0x40000000
+
+/* 0- Dispatcher1- Reorder */
+#define  QUEUE_MAPPING_Q_DEST_Q31_MASK	0x80000000
+
+
+/*
+ * Register <COMMON_POOL_LIMIT>
+ *
+ * common pool max size
+ */
+#define DSPTCHR_POOL_SIZES_CMN_POOL_LMT	0x4d0
+
+/* MAX number of buffers allowed in the pool */
+#define  POOL_SIZES_CMN_POOL_LMT_POOL_LMT_SHIFT	0
+#define  POOL_SIZES_CMN_POOL_LMT_POOL_LMT_MASK	0x3ff
+
+
+/*
+ * Register <COMMON_POOL_SIZE>
+ *
+ * common pool size
+ */
+#define DSPTCHR_POOL_SIZES_CMN_POOL_SIZE	0x4d4
+
+/* Number of buffers currently in the pool */
+#define  POOL_SIZES_CMN_POOL_SIZE_POOL_SIZE_SHIFT	0
+#define  POOL_SIZES_CMN_POOL_SIZE_POOL_SIZE_MASK	0x3ff
+
+
+/*
+ * Register <GUARANTEED_POOL_LIMIT>
+ *
+ * Guaranteed pool max size
+ */
+#define DSPTCHR_POOL_SIZES_GRNTED_POOL_LMT	0x4d8
+
+/* MAX number of buffers allowed in the pool */
+#define  POOL_SIZES_GRNTED_POOL_LMT_POOL_LMT_SHIFT	0
+#define  POOL_SIZES_GRNTED_POOL_LMT_POOL_LMT_MASK	0x3ff
+
+
+/*
+ * Register <GUARANTEED_POOL_SIZE>
+ *
+ * Guaranteed pool size
+ */
+#define DSPTCHR_POOL_SIZES_GRNTED_POOL_SIZE	0x4dc
+
+/* Number of buffers currently in the pool */
+#define  POOL_SIZES_GRNTED_POOL_SIZE_POOL_SIZE_SHIFT	0
+#define  POOL_SIZES_GRNTED_POOL_SIZE_POOL_SIZE_MASK	0x3ff
+
+
+/*
+ * Register <MULTI_CAST_POOL_LIMIT>
+ *
+ * Multi Cast pool max size
+ */
+#define DSPTCHR_POOL_SIZES_MULTI_CST_POOL_LMT	0x4e0
+
+/* MAX number of buffers allowed in the pool */
+#define  POOL_SIZES_MULTI_CST_POOL_LMT_POOL_LMT_SHIFT	0
+#define  POOL_SIZES_MULTI_CST_POOL_LMT_POOL_LMT_MASK	0x3ff
+
+
+/*
+ * Register <MULTI_CAST_POOL_SIZE>
+ *
+ * Multi Cast pool size
+ */
+#define DSPTCHR_POOL_SIZES_MULTI_CST_POOL_SIZE	0x4e4
+
+/* Number of buffers currently in the pool */
+#define  POOL_SIZES_MULTI_CST_POOL_SIZE_POOL_SIZE_SHIFT	0
+#define  POOL_SIZES_MULTI_CST_POOL_SIZE_POOL_SIZE_MASK	0x3ff
+
+
+/*
+ * Register <RNR_POOL_LIMIT>
+ *
+ * This counter counts the amount of buffers taken by runner for MultiCast
+ * purposes (or any other the requires adding new PDs to a Virtual Egress
+ * Queue - VEQ
+ */
+#define DSPTCHR_POOL_SIZES_RNR_POOL_LMT	0x4e8
+
+/* MAX number of buffers allowed in the pool */
+#define  POOL_SIZES_RNR_POOL_LMT_POOL_LMT_SHIFT	0
+#define  POOL_SIZES_RNR_POOL_LMT_POOL_LMT_MASK	0x3ff
+
+
+/*
+ * Register <RNR_POOL_SIZE>
+ *
+ * This counter counts the amount of buffers taken by runner for MultiCast
+ * purposes (or any other the requires adding new PDs to a Virtual Egress
+ * Qeueu - VEQ)
+ */
+#define DSPTCHR_POOL_SIZES_RNR_POOL_SIZE	0x4ec
+
+/* Number of buffers currently in the pool */
+#define  POOL_SIZES_RNR_POOL_SIZE_POOL_SIZE_SHIFT	0
+#define  POOL_SIZES_RNR_POOL_SIZE_POOL_SIZE_MASK	0x3ff
+
+
+/*
+ * Register <PROCESSING_POOL_SIZE>
+ *
+ * This counter counts how many buffers are currenly being handled by all
+ * RNRs
+ */
+#define DSPTCHR_POOL_SIZES_PRCSSING_POOL_SIZE	0x4f0
+
+/* Number of buffers currently in the pool */
+#define  POOL_SIZES_PRCSSING_POOL_SIZE_POOL_SIZE_SHIFT	0
+#define  POOL_SIZES_PRCSSING_POOL_SIZE_POOL_SIZE_MASK	0x3ff
+
+
+/*
+ * Registers <TASK_MASK> - <x> is [ 0 => 63 ]
+ *
+ * Address 0 -> 255:224
+ * Address 4 -> 223:192
+ * Address 8 -> 191:160
+ * Address C -> 159:128
+ * Address 10 -> 127:96
+ * Address 14 -> 95:64
+ * Address 18 -> 63:32
+ * Address 1C -> 31:0
+ *
+ * 8 RG x 8 Regs per RG = 64 registers
+ */
+#define DSPTCHR_MASK_MSK_TSK_255_0(x)	(0x500 + (x) * 0x4)
+
+/* MASK */
+#define  MASK_MSK_TSK_255_0_MASK_SHIFT	0
+#define  MASK_MSK_TSK_255_0_MASK_MASK	0xffffffff
+
+
+/*
+ * Registers <QUEUE_MASK> - <x> is [ 0 => 7 ]
+ *
+ * Queue Mask:
+ * Per RNR group holds a vector of which tasks are related to the group
+ */
+#define DSPTCHR_MASK_MSK_Q(x)		(0x600 + (x) * 0x4)
+
+/* MASK */
+#define  MASK_MSK_Q_MASK_SHIFT		0
+#define  MASK_MSK_Q_MASK_MASK		0xffffffff
+
+
+/*
+ * Register <DELAY_Q>
+ *
+ * Describes which VEQ are part of the Delay Q group.
+ */
+#define DSPTCHR_MASK_DLY_Q		0x620
+
+/* MASK */
+#define  MASK_DLY_Q_MASK_SHIFT		0
+#define  MASK_DLY_Q_MASK_MASK		0xffffffff
+
+
+/*
+ * Register <NON_DELAY_Q>
+ *
+ * Describes which VEQ are part of the Non-Delay Q group.
+ */
+#define DSPTCHR_MASK_NON_DLY_Q		0x624
+
+/* MASK */
+#define  MASK_NON_DLY_Q_MASK_SHIFT	0
+#define  MASK_NON_DLY_Q_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <EGRESS_QM_DELAY_CREDIT>
+ *
+ * These registers hold the available credit for the Re-Order to sent PDs
+ * to the QM via Delay Q.
+ */
+#define DSPTCHR_EGRS_QUEUES_EGRS_DLY_QM_CRDT	0x630
+
+/*
+ * The amount of free credits the re-order can utilize to send PDs to the
+ * QM
+*/
+#define  EGRS_QUEUES_EGRS_DLY_QM_CRDT_DLY_CRDT_SHIFT	0
+#define  EGRS_QUEUES_EGRS_DLY_QM_CRDT_DLY_CRDT_MASK	0xff
+
+
+/*
+ * Register <EGRESS_QM_NON_DELAY_CREDIT>
+ *
+ * These registers hold the available credit for the Re-Order to sent PDs
+ * to the QM via Non-Delay Q.
+ */
+#define DSPTCHR_EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT	0x634
+
+/*
+ * The amount of free credits the re-order can utilize to send PDs to the
+ * QM
+*/
+#define  EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT_NON_DLY_CRDT_SHIFT	0
+#define  EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT_NON_DLY_CRDT_MASK	0xff
+
+
+/*
+ * Register <TOTAL_EGRESS_SIZE>
+ *
+ * Size of all egress queues.
+ * affected from PDs sent to dispatch and from multicast connect
+ */
+#define DSPTCHR_EGRS_QUEUES_TOTAL_Q_EGRS_SIZE	0x638
+
+/*
+ * Accumulates all buffers that are marked as egress (after dispatch and
+ * before sending to QM)
+*/
+#define  EGRS_QUEUES_TOTAL_Q_EGRS_SIZE_TOTAL_EGRS_SIZE_SHIFT	0
+#define  EGRS_QUEUES_TOTAL_Q_EGRS_SIZE_TOTAL_EGRS_SIZE_MASK	0x3ff
+
+
+/*
+ * Registers <Q_EGRESS_SIZE> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Size of all egress queues.
+ * affected from PDs sent to dispatch and from multicast connect
+ */
+#define DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(x)	(0x680 + (x) * 0x4)
+
+/*
+ * Accumulates all buffers that are marked as egress (after dispatch and
+ * before sending to QM)
+*/
+#define  EGRS_QUEUES_PER_Q_EGRS_SIZE_Q_EGRS_SIZE_SHIFT	0
+#define  EGRS_QUEUES_PER_Q_EGRS_SIZE_Q_EGRS_SIZE_MASK	0x3ff
+
+
+/*
+ * Register <WAKEUP_REQUEST>
+ *
+ * Bit per queue, wakeup request from RNR to a specific Q.
+ * Once a wakeup request message is sent to dsptchr it will be latched
+ * until the amount of credits pass a threshold
+ */
+#define DSPTCHR_WAKEUP_CONTROL_WKUP_REQ	0x770
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q0_MASK	0x1
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q1_MASK	0x2
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q2_MASK	0x4
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q3_MASK	0x8
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q4_MASK	0x10
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q5_MASK	0x20
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q6_MASK	0x40
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q7_MASK	0x80
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q8_MASK	0x100
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q9_MASK	0x200
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q10_MASK	0x400
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q11_MASK	0x800
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q12_MASK	0x1000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q13_MASK	0x2000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q14_MASK	0x4000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q15_MASK	0x8000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q16_MASK	0x10000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q17_MASK	0x20000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q18_MASK	0x40000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q19_MASK	0x80000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q20_MASK	0x100000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q21_MASK	0x200000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q22_MASK	0x400000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q23_MASK	0x800000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q24_MASK	0x1000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q25_MASK	0x2000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q26_MASK	0x4000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q27_MASK	0x8000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q28_MASK	0x10000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q29_MASK	0x20000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q30_MASK	0x40000000
+
+/* wakeup request pending */
+#define  WAKEUP_CONTROL_WKUP_REQ_Q31_MASK	0x80000000
+
+
+/*
+ * Register <WAKEUP_THRESHOLD>
+ *
+ * Wakeup Thresholds in which to indicate RNR
+ */
+#define DSPTCHR_WAKEUP_CONTROL_WKUP_THRSHLD	0x774
+
+/*
+ * Wakeup threshold.
+ * Once number of Guaranteed buffer count crosses the threshold and there
+ * is a pending wakeup request, the dispatcher will issue a wakeup message
+ * to the appropriate runner according to a predefind address configuration
+*/
+#define  WAKEUP_CONTROL_WKUP_THRSHLD_WKUP_THRSHLD_SHIFT	0
+#define  WAKEUP_CONTROL_WKUP_THRSHLD_WKUP_THRSHLD_MASK	0x3ff
+
+
+/*
+ * Registers <SCHEDULING_Q_INFO> - <x> is [ 0 => 31 ]
+ *
+ * DWRR info per Q.
+ * including amount of credits per Q.
+ * If Q has below zero credits and Quantum size
+ */
+#define DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(x)	(0x780 + (x) * 0x4)
+
+/*
+ * availabe credits in bytes.
+ * Q will not be permitted to dispatch PDs if credit levels are below zero
+*/
+#define  DISPTCH_SCHEDULING_DWRR_INFO_Q_CRDT_SHIFT	0
+#define  DISPTCH_SCHEDULING_DWRR_INFO_Q_CRDT_MASK	0xfffff
+
+/*
+ * Bit will be enabled if credit levels are below zero.
+ * 2 compliment
+*/
+#define  DISPTCH_SCHEDULING_DWRR_INFO_NGTV_MASK	0x100000
+
+/*
+ * Quantum size.
+ * Should be configured according to Q rate.
+ * in Bytes
+*/
+#define  DISPTCH_SCHEDULING_DWRR_INFO_QUNTUM_SHIFT	21
+#define  DISPTCH_SCHEDULING_DWRR_INFO_QUNTUM_MASK	0xffe00000
+
+
+/*
+ * Register <VALID_QUEUES>
+ *
+ * Queues with credits above zero.
+ * This will allow for the Q to participate in the scheduling round
+ */
+#define DSPTCHR_DISPTCH_SCHEDULING_VLD_CRDT	0x800
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q0_MASK	0x1
+
+/* Valid Credits. */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q1_MASK	0x2
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q2_MASK	0x4
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q3_MASK	0x8
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q4_MASK	0x10
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q5_MASK	0x20
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q6_MASK	0x40
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q7_MASK	0x80
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q8_MASK	0x100
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q9_MASK	0x200
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q10_MASK	0x400
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q11_MASK	0x800
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q12_MASK	0x1000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q13_MASK	0x2000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q14_MASK	0x4000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q15_MASK	0x8000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q16_MASK	0x10000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q17_MASK	0x20000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q18_MASK	0x40000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q19_MASK	0x80000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q20_MASK	0x100000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q21_MASK	0x200000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q22_MASK	0x400000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q23_MASK	0x800000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q24_MASK	0x1000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q25_MASK	0x2000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q26_MASK	0x4000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q27_MASK	0x8000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q28_MASK	0x10000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q29_MASK	0x20000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q30_MASK	0x40000000
+
+/* Valid Credits */
+#define  DISPTCH_SCHEDULING_VLD_CRDT_Q31_MASK	0x80000000
+
+
+/*
+ * Register <LB_CONFIG>
+ *
+ * Selects which Load Balancing mechanism to use
+ */
+#define DSPTCHR_LOAD_BALANCING_LB_CFG	0x850
+
+/* RoundRobin = 0StrictPriority = 1 */
+#define  LOAD_BALANCING_LB_CFG_LB_MODE_MASK	0x1
+
+/*
+ * Configures the threshold in which the LB mechanism opens activates a new
+ * RNR
+*/
+#define  LOAD_BALANCING_LB_CFG_SP_THRSHLD_SHIFT	8
+#define  LOAD_BALANCING_LB_CFG_SP_THRSHLD_MASK	0x1f00
+
+
+/*
+ * Register <FREE_TASKS_RNR_0_1>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0..15 belong to RNR 0
+ * Tasks 16..32 Belong to RNR 1
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_0_1	0x860
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_0_1_RNR0_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_0_1_RNR0_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_0_1_RNR1_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_0_1_RNR1_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_2_3>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0..15 belong to RNR 2
+ * Tasks 16..32 Belong to RNR 3
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_2_3	0x864
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_2_3_RNR2_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_2_3_RNR2_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_2_3_RNR3_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_2_3_RNR3_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_4_5>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 4Tasks 16.
+ * .32 Belong to RNR 5
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_4_5	0x868
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_4_5_RNR4_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_4_5_RNR4_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_4_5_RNR5_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_4_5_RNR5_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_6_7>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 6Tasks 16.
+ * .32 Belong to RNR 7
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_6_7	0x86c
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_6_7_RNR6_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_6_7_RNR6_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_6_7_RNR7_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_6_7_RNR7_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_8_9>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 8Tasks 16.
+ * .32 Belong to RNR 9
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_8_9	0x870
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_8_9_RNR8_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_8_9_RNR8_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_8_9_RNR9_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_8_9_RNR9_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_10_11>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 10Tasks 16.
+ * .32 Belong to RNR 11
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_10_11	0x874
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_10_11_RNR10_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_10_11_RNR10_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_10_11_RNR11_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_10_11_RNR11_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_12_13>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 12Tasks 16.
+ * .32 Belong to RNR 13
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_12_13	0x878
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_12_13_RNR12_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_12_13_RNR12_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_12_13_RNR13_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_12_13_RNR13_MASK	0xffff0000
+
+
+/*
+ * Register <FREE_TASKS_RNR_14_15>
+ *
+ * Each bit indicates if the Task is Free for dispatch:
+ * Tasks 0.
+ * .15 belong to RNR 14Tasks 16.
+ * .32 Belong to RNR 15
+ */
+#define DSPTCHR_LOAD_BALANCING_FREE_TASK_14_15	0x87c
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_14_15_RNR14_SHIFT	0
+#define  LOAD_BALANCING_FREE_TASK_14_15_RNR14_MASK	0xffff
+
+/* Each bit indicats which task is Free for dispatch */
+#define  LOAD_BALANCING_FREE_TASK_14_15_RNR15_SHIFT	16
+#define  LOAD_BALANCING_FREE_TASK_14_15_RNR15_MASK	0xffff0000
+
+
+/*
+ * Registers <TASK_TO_RG_MAPPING> - <x> is [ 0 => 31 ]
+ *
+ * This ram is used to map each task to which group does it belong to.
+ */
+#define DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(x)	(0x900 + (x) * 0x4)
+
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSKx_SHIFT(x)	((x) * 3)
+
+/*
+ * Can be Task 0/8/16...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK0_SHIFT	0
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK0_MASK	0x7
+
+/*
+ * Can be Task 1/9/17...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK1_SHIFT	3
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK1_MASK	0x38
+
+/*
+ * Can be Task 2/10/18...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK2_SHIFT	6
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK2_MASK	0x1c0
+
+/*
+ * Can be Task 3/11/19...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK3_SHIFT	9
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK3_MASK	0xe00
+
+/*
+ * Can be Task 4/12/20...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK4_SHIFT	12
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK4_MASK	0x7000
+
+/*
+ * Can be Task 5/13/21...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK5_SHIFT	15
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK5_MASK	0x38000
+
+/*
+ * Can be Task 6/14/22...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK6_SHIFT	18
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK6_MASK	0x1c0000
+
+/*
+ * Can be Task 7/15/23...
+*/
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK7_SHIFT	21
+#define  LOAD_BALANCING_TSK_TO_RG_MAPPING_TSK7_MASK	0xe00000
+
+
+/*
+ * Register <RG_AVAILABLE_TASK_0_3>
+ *
+ * Available tasks in all runners related to a RNR Group.
+ * In case value is zero there are no tasks available for this RNR Group
+ * for dispatch hence it should be excluded from the next RNR Group
+ * selection
+ */
+#define DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_0_3	0x980
+
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_x_SHIFT(x)	((x) * 8)
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_0_SHIFT	0
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_0_MASK	0xff
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_1_SHIFT	8
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_1_MASK	0xff00
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_2_SHIFT	16
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_2_MASK	0xff0000
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_3_SHIFT	24
+#define  LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_3_MASK	0xff000000
+
+
+/*
+ * Register <RG_AVAILABLE_TASK_4_7>
+ *
+ * Available tasks in all runners related to a RNR Group.
+ * In case value is zero there are no tasks available for this RNR Group
+ * for dispatch hence it should be excluded from the next RNR Group
+ * selection
+ */
+#define DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_4_7	0x984
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_4_SHIFT	0
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_4_MASK	0xff
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_5_SHIFT	8
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_5_MASK	0xff00
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_6_SHIFT	16
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_6_MASK	0xff0000
+
+/* Counter the amount of available (free) tasks in a RNR Group */
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_7_SHIFT	24
+#define  LOAD_BALANCING_RG_AVLABL_TSK_4_7_TSK_CNT_RG_7_MASK	0xff000000
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active TM interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR	0x990
+
+/* Buffer returned to Fll */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR_FLL_RETURN_BUF_MASK	0x1
+
+/* Drop PD counted */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR_FLL_CNT_DRP_MASK	0x2
+
+/* Unknown message entered the dispatcher */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR_UNKNWN_MSG_MASK	0x4
+
+/*
+ * Number of buffers returned to FLL exceeds the pre-defined allocated
+ * buffer amount (due to linked list bug)
+*/
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR_FLL_OVERFLOW_MASK	0x8
+
+/*
+ * Number of buffers returned to FLL decreased under zero and reached a
+ * negative amount (due to linked list bug)
+*/
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR_FLL_NEG_MASK	0x10
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISM	0x994
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISM_ISM_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISM_ISM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_IER	0x998
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_IER_IEM_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_IER_IEM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ITR	0x99c
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ITR_IST_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_0_ITR_IST_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active TM interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR	0x9a0
+
+/* New Entry added to Destination queue 0 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST0_INT_MASK	0x1
+
+/* New Entry added to Destination queue 1 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST1_INT_MASK	0x2
+
+/* New Entry added to Destination queue 2 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST2_INT_MASK	0x4
+
+/* New Entry added to Destination queue 3 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST3_INT_MASK	0x8
+
+/* New Entry added to Destination queue 4 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST4_INT_MASK	0x10
+
+/* New Entry added to Destination queue 5 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST5_INT_MASK	0x20
+
+/* New Entry added to Destination queue 6 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST6_INT_MASK	0x40
+
+/* New Entry added to Destination queue 7 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST7_INT_MASK	0x80
+
+/* New Entry added to Destination queue 8 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST8_INT_MASK	0x100
+
+/* New Entry added to Destination queue 9 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST9_INT_MASK	0x200
+
+/* New Entry added to Destination queue 10 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST10_INT_MASK	0x400
+
+/* New Entry added to Destination queue 11 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST11_INT_MASK	0x800
+
+/* New Entry added to Destination queue 12 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST12_INT_MASK	0x1000
+
+/* New Entry added to Destination queue 13 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST13_INT_MASK	0x2000
+
+/* New Entry added to Destination queue 14 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST14_INT_MASK	0x4000
+
+/* New Entry added to Destination queue 15 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST15_INT_MASK	0x8000
+
+/* New Entry added to Destination queue 16 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST16_INT_MASK	0x10000
+
+/* New Entry added to Destination queue 17 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST17_INT_MASK	0x20000
+
+/* New Entry added to Destination queue 18 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST18_INT_MASK	0x40000
+
+/* New Entry added to Destination queue 19 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST19_INT_MASK	0x80000
+
+/* New Entry added to Destination queue 20 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST20_INT_MASK	0x100000
+
+/* New Entry added to Destination queue 21 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST21_INT_MASK	0x200000
+
+/* New Entry added to Destination queue 22 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST22_INT_MASK	0x400000
+
+/* New Entry added to Destination queue 23 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST23_INT_MASK	0x800000
+
+/* New Entry added to Destination queue 24 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST24_INT_MASK	0x1000000
+
+/* New Entry added to Destination queue 25 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST25_INT_MASK	0x2000000
+
+/* New Entry added to Destination queue 26 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST26_INT_MASK	0x4000000
+
+/* New Entry added to Destination queue 27 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST27_INT_MASK	0x8000000
+
+/* New Entry added to Destination queue 28 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST28_INT_MASK	0x10000000
+
+/* New Entry added to Destination queue 29 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST29_INT_MASK	0x20000000
+
+/* New Entry added to Destination queue 30 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST30_INT_MASK	0x40000000
+
+/* New Entry added to Destination queue 31 */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR_QDEST31_INT_MASK	0x80000000
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISM	0x9a4
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISM_ISM_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISM_ISM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_IER	0x9a8
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_IER_IEM_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_IER_IEM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ITR	0x9ac
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ITR_IST_SHIFT	0
+#define  DSPTCHER_REORDR_TOP_INTR_CTRL_1_ITR_IST_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_BYPASS_CONTROL>
+ *
+ * Debug Bypass control
+ */
+#define DSPTCHR_DEBUG_DBG_BYPSS_CNTRL	0x9b0
+
+/* Enable bypass mode */
+#define  DEBUG_DBG_BYPSS_CNTRL_EN_BYP_MASK	0x1
+
+/* What BBID to use for NON_DELAY Q when in Bypass mode */
+#define  DEBUG_DBG_BYPSS_CNTRL_BBID_NON_DLY_SHIFT	8
+#define  DEBUG_DBG_BYPSS_CNTRL_BBID_NON_DLY_MASK	0xff00
+
+/* What BBID to use for DELAY Q when in Bypass mode */
+#define  DEBUG_DBG_BYPSS_CNTRL_BBID_DLY_SHIFT	16
+#define  DEBUG_DBG_BYPSS_CNTRL_BBID_DLY_MASK	0xff0000
+
+
+/*
+ * Register <TASK_COUNTER_0_7>
+ *
+ * Counts the amount of active Tasks in RNR
+ */
+#define DSPTCHR_DEBUG_GLBL_TSK_CNT_0_7	0x9b4
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_0_SHIFT	0
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_0_MASK	0xf
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_1_SHIFT	4
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_1_MASK	0xf0
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_2_SHIFT	8
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_2_MASK	0xf00
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_3_SHIFT	12
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_3_MASK	0xf000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_4_SHIFT	16
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_4_MASK	0xf0000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_5_SHIFT	20
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_5_MASK	0xf00000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_6_SHIFT	24
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_6_MASK	0xf000000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_7_SHIFT	28
+#define  DEBUG_GLBL_TSK_CNT_0_7_TSK_CNT_RNR_7_MASK	0xf0000000
+
+
+/*
+ * Register <TASK_COUNTER_8_15>
+ *
+ * Counts the amount of active Tasks in RNR
+ */
+#define DSPTCHR_DEBUG_GLBL_TSK_CNT_8_15	0x9b8
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_8_SHIFT	0
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_8_MASK	0xf
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_9_SHIFT	4
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_9_MASK	0xf0
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_10_SHIFT	8
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_10_MASK	0xf00
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_11_SHIFT	12
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_11_MASK	0xf000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_12_SHIFT	16
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_12_MASK	0xf0000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_13_SHIFT	20
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_13_MASK	0xf00000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_14_SHIFT	24
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_14_MASK	0xf000000
+
+/* Counter the amount of active tasks */
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_15_SHIFT	28
+#define  DEBUG_GLBL_TSK_CNT_8_15_TSK_CNT_RNR_15_MASK	0xf0000000
+
+
+/*
+ * Register <DEBUG_BUS_CONTROL>
+ *
+ * Debug bus control which vector to output to the top level
+ */
+#define DSPTCHR_DEBUG_DBG_BUS_CNTRL	0x9bc
+
+/* Selects with vector to output */
+#define  DEBUG_DBG_BUS_CNTRL_DBG_SEL_SHIFT	0
+#define  DEBUG_DBG_BUS_CNTRL_DBG_SEL_MASK	0x1f
+
+
+/*
+ * Register <DEBUG_VEC_0> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_0		0x9c0
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_0_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_0_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_1> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_1		0x9c4
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_1_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_1_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_2> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_2		0x9c8
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_2_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_2_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_3> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_3		0x9cc
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_3_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_3_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_4> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_4		0x9d0
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_4_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_4_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_5> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_5		0x9d4
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_5_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_5_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_6> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_6		0x9d8
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_6_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_6_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_7> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_7		0x9dc
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_7_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_7_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_8> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_8		0x9e0
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_8_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_8_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_9> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_9		0x9e4
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_9_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_9_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_10> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_10	0x9e8
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_10_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_10_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_11> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_11	0x9ec
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_11_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_11_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_12> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_12	0x9f0
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_12_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_12_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_13> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_13	0x9f4
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_13_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_13_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_14> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_14	0x9f8
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_14_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_14_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_15> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_15	0x9fc
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_15_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_15_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_16> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_16	0xa00
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_16_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_16_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_17> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_17	0xa04
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_17_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_17_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_18> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_18	0xa08
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_18_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_18_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_19> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_19	0xa0c
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_19_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_19_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_20> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_20	0xa10
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_20_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_20_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_21> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_21	0xa14
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_21_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_21_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_22> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_22	0xa18
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_22_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_22_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_VEC_23> - read-only
+ *
+ * Debug vector mapped to registers
+ */
+#define DSPTCHR_DEBUG_DBG_VEC_23	0xa1c
+
+/* Debug bus vector value */
+#define  DEBUG_DBG_VEC_23_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_DBG_VEC_23_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_STATISTICS_CONTROL>
+ *
+ * Controls which information to log
+ */
+#define DSPTCHR_DEBUG_STATISTICS_DBG_STTSTCS_CTRL	0xa70
+
+/* Selects mode to log */
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_DBG_MODE_SHIFT	0
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_DBG_MODE_MASK	0x3
+
+/* Enable statistics */
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_EN_CNTRS_MASK	0x100
+
+/* Clears all counters */
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_CLR_CNTRS_MASK	0x200
+
+/* Selects RNR to log */
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_DBG_RNR_SEL_SHIFT	16
+#define  DEBUG_STATISTICS_DBG_STTSTCS_CTRL_DBG_RNR_SEL_MASK	0xf0000
+
+
+/*
+ * Registers <DEBUG_COUNT> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Debug counter
+ */
+#define DSPTCHR_DEBUG_STATISTICS_DBG_CNT(x)	(0xa80 + (x) * 0x4)
+
+/* Debug bus vector value */
+#define  DEBUG_STATISTICS_DBG_CNT_DBG_VEC_VAL_SHIFT	0
+#define  DEBUG_STATISTICS_DBG_CNT_DBG_VEC_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <HEAD> - <x> is [ 0 => 31 ]
+ *
+ * Pointer to the first BD in the link list of this queue.
+ */
+#define DSPTCHR_QDES_HEAD(x)		(0x2000 + (x) * 0x20)
+
+/* Pointer to the first BD in the link list of this queue. */
+#define  QDES_HEAD_HEAD_SHIFT		0
+#define  QDES_HEAD_HEAD_MASK		0xffffffff
+
+
+/*
+ * Registers <BFOUT> - <x> is [ 0 => 31 ]
+ *
+ * 32 bit wrap around counter.
+ * Counts number of packets that left this queue since start of queue
+ * activity.
+ */
+#define DSPTCHR_QDES_BFOUT(x)		(0x2004 + (x) * 0x20)
+
+/*
+ * 32 bit wrap around counter.
+ * Counts number of packets that left this queue since start of queue
+ * activity.
+*/
+#define  QDES_BFOUT_BFOUT_SHIFT		0
+#define  QDES_BFOUT_BFOUT_MASK		0xffffffff
+
+
+/*
+ * Registers <BUFIN> - <x> is [ 0 => 31 ]
+ *
+ * 32 bit wrap around counter.
+ * Counts number of packets that entered this queue since start of queue
+ * activity.
+ */
+#define DSPTCHR_QDES_BUFIN(x)		(0x2008 + (x) * 0x20)
+
+/*
+ * 32 bit wrap around counter.
+ * Counts number of packets that entered this queue since start of queue
+ * activity.
+*/
+#define  QDES_BUFIN_BUFIN_SHIFT		0
+#define  QDES_BUFIN_BUFIN_MASK		0xffffffff
+
+
+/*
+ * Registers <TAIL> - <x> is [ 0 => 31 ]
+ *
+ * Pointer to the last BD in the linked list of this queue.
+ */
+#define DSPTCHR_QDES_TAIL(x)		(0x200c + (x) * 0x20)
+
+/* Pointer to the last BD in the linked list of this queue. */
+#define  QDES_TAIL_TAIL_SHIFT		0
+#define  QDES_TAIL_TAIL_MASK		0xffffffff
+
+
+/*
+ * Registers <FBDNULL> - <x> is [ 0 => 31 ]
+ *
+ * If this bit is set then the first BD attached to this Q is a null BD.
+ * In this case, its Data Pointer field is not valid, but its Next BD
+ * pointer field is valid.
+ * When it is set, the NullBD field for this queue is not valid.
+ */
+#define DSPTCHR_QDES_FBDNULL(x)		(0x2010 + (x) * 0x20)
+
+/*
+ * If this bit is set then the first BD attached to this Q is a null BD.
+ * In this case, its Data Pointer field is not valid, but its Next BD
+ * pointer field is valid.
+ * When it is set, the NullBD field for this queue is not valid.
+*/
+#define  QDES_FBDNULL_FBDNULL_MASK	0x1
+
+
+/*
+ * Registers <NULLBD> - <x> is [ 0 => 31 ]
+ *
+ * 32 bits index of a Null BD that belongs to this queue.
+ * Both the data buffer pointer and the next BD field are non valid.
+ * The pointer defines a memory allocation for a BD that might be used or
+ * not.
+ */
+#define DSPTCHR_QDES_NULLBD(x)		(0x2014 + (x) * 0x20)
+
+/*
+ * 32 bits index of a Null BD that belongs to this queue.
+ * Both the data buffer pointer and the next BD field are non valid.
+ * The pointer defines a memory allocation for a BD that might be used or
+ * not.
+*/
+#define  QDES_NULLBD_NULLBD_SHIFT	0
+#define  QDES_NULLBD_NULLBD_MASK	0xffffffff
+
+
+/*
+ * Registers <BUFAVAIL> - <x> is [ 0 => 31 ] - read-only
+ *
+ * number of entries available in queue.
+ * bufin - bfout
+ */
+#define DSPTCHR_QDES_BUFAVAIL(x)	(0x2018 + (x) * 0x20)
+
+/*
+ * number of entries available in queue.
+ * bufin - bfout
+*/
+#define  QDES_BUFAVAIL_BUFAVAIL_SHIFT	0
+#define  QDES_BUFAVAIL_BUFAVAIL_MASK	0xffffffff
+
+
+/*
+ * Registers <QUEUE_HEAD> - <x> is [ 0 => 31 ]
+ *
+ * Q Head Buffer, Used for the dispatching logic
+ */
+#define DSPTCHR_QDES_REG_Q_HEAD(x)	(0x2600 + (x) * 0x4)
+
+/* Q HEAD */
+#define  QDES_REG_Q_HEAD_HEAD_SHIFT	0
+#define  QDES_REG_Q_HEAD_HEAD_MASK	0x3ff
+
+
+/*
+ * Register <VIQ_HEAD_VALID>
+ *
+ * This register will hold the for each VIQ if the Head of the Q is valid
+ * or not.
+ * These Queues are for Dispatch
+ */
+#define DSPTCHR_QDES_REG_VIQ_HEAD_VLD	0x2680
+
+/*
+ * Q head valid.
+ * Each bit indicates for a specific VIQ if the head is valid or not
+*/
+#define  QDES_REG_VIQ_HEAD_VLD_VIQ_HEAD_VLD_SHIFT	0
+#define  QDES_REG_VIQ_HEAD_VLD_VIQ_HEAD_VLD_MASK	0xffffffff
+
+
+/*
+ * Register <VIQ_COHERENCY_VALID>
+ *
+ * This register will hold for each VIQ if the Coherency counter is larger
+ * than zero.
+ */
+#define DSPTCHR_QDES_REG_VIQ_CHRNCY_VLD	0x2684
+
+/*
+ * Q Coherency counter is valid.
+ * Each bit indicates for a specific VIQ if the there is more than one
+ * coherency message for that Q.
+ * meaning the head of the VIQ can be dispatched
+*/
+#define  QDES_REG_VIQ_CHRNCY_VLD_CHRNCY_VLD_SHIFT	0
+#define  QDES_REG_VIQ_CHRNCY_VLD_CHRNCY_VLD_MASK	0xffffffff
+
+
+/*
+ * Register <VEQ_HEAD_VALID>
+ *
+ * This register will hold the for each VEQ if the Head of the Q is valid
+ * or notThese Queues are for ReOrder
+ */
+#define DSPTCHR_QDES_REG_VEQ_HEAD_VLD	0x2688
+
+/*
+ * Q head valid.
+ * Each bit indicates for a specific VIQ if the head is valid or not
+*/
+#define  QDES_REG_VEQ_HEAD_VLD_VIQ_HEAD_VLD_SHIFT	0
+#define  QDES_REG_VEQ_HEAD_VLD_VIQ_HEAD_VLD_MASK	0xffffffff
+
+
+/*
+ * Register <QDES_BUF_AVAIL_CONTROL>
+ *
+ * Todays implementation does not require that QDES available buffer be
+ * different than zero.
+ * so this register controls whether or not to it should affect poping from
+ * the QDES or not
+ */
+#define DSPTCHR_QDES_REG_QDES_BUF_AVL_CNTRL	0x268c
+
+/* Should buf_avail in the QDES affect poping from head of linked list */
+#define  QDES_REG_QDES_BUF_AVL_CNTRL_USE_BUF_AVL_MASK	0x1
+
+/* Should buf_avail in the QDES affect poping from head of linked list */
+#define  QDES_REG_QDES_BUF_AVL_CNTRL_DEC_BUFOUT_WHEN_MLTCST_MASK	0x2
+
+
+/*
+ * Register <HEAD>
+ *
+ * Pointer to the first BD in the link list of this queue.
+ */
+#define DSPTCHR_FLLDES_HEAD		0x2700
+
+/* Pointer to the first BD in the link list of this queue. */
+#define  FLLDES_HEAD_HEAD_SHIFT		0
+#define  FLLDES_HEAD_HEAD_MASK		0xffffffff
+
+
+/*
+ * Register <BFOUT>
+ *
+ * 32 bit wrap around counter.
+ * Counts number of entries that left this queue since start of queue
+ * activity.
+ */
+#define DSPTCHR_FLLDES_BFOUT		0x2704
+
+/*
+ * 32 bit wrap around counter.
+ * Counts number of entries that left this queue since start of queue
+ * activity.
+*/
+#define  FLLDES_BFOUT_COUNT_SHIFT	0
+#define  FLLDES_BFOUT_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <BFIN>
+ *
+ * 32 bit wrap around counter.
+ * Counts number of entries that entered this queue since start of queue
+ * activity.
+ */
+#define DSPTCHR_FLLDES_BFIN		0x2708
+
+/*
+ * 32 bit wrap around counter.
+ * Counts number of entries that entered this queue since start of queue
+ * activity.
+*/
+#define  FLLDES_BFIN_BFIN_SHIFT		0
+#define  FLLDES_BFIN_BFIN_MASK		0xffffffff
+
+
+/*
+ * Register <TAIL>
+ *
+ * Pointer to the last BD in the linked list of this queue.
+ */
+#define DSPTCHR_FLLDES_TAIL		0x270c
+
+/* Pointer to the last BD in the linked list of this queue. */
+#define  FLLDES_TAIL_TAIL_SHIFT		0
+#define  FLLDES_TAIL_TAIL_MASK		0xffffffff
+
+
+/*
+ * Register <FLLDROP>
+ *
+ * 32 bit counter that counts the number of packets arrived when there is
+ * no free BD in the FLL.
+ */
+#define DSPTCHR_FLLDES_FLLDROP		0x2710
+
+/*
+ * 32 bit counter that counts the number of packets arrived when there is
+ * no free BD in the FLL.
+*/
+#define  FLLDES_FLLDROP_DRPCNT_SHIFT	0
+#define  FLLDES_FLLDROP_DRPCNT_MASK	0xffffffff
+
+
+/*
+ * Register <LTINT>
+ *
+ * Low threshold Interrupt.
+ * When number of bytes reach this level, then an interrupt is generated to
+ * the Host.
+ */
+#define DSPTCHR_FLLDES_LTINT		0x2714
+
+/*
+ * Low threshold Interrupt.
+ * When number of bytes reach this level, then an interrupt is generated to
+ * the Host.
+*/
+#define  FLLDES_LTINT_MINBUF_SHIFT	0
+#define  FLLDES_LTINT_MINBUF_MASK	0xffffffff
+
+
+/*
+ * Register <BUFAVAIL> - read-only
+ *
+ * number of entries available in queue.
+ * bufin - bfout
+ */
+#define DSPTCHR_FLLDES_BUFAVAIL		0x2720
+
+/*
+ * number of entries available in queue.
+ * bufin - bfout
+*/
+#define  FLLDES_BUFAVAIL_BUFAVAIL_SHIFT	0
+#define  FLLDES_BUFAVAIL_BUFAVAIL_MASK	0xffffffff
+
+
+/*
+ * Register <FREEMIN> - read-only
+ *
+ * Save the MIN size of free BD in the system that has been recorded during
+ * work.
+ */
+#define DSPTCHR_FLLDES_FREEMIN		0x2724
+
+/* minum value of free BD recorded */
+#define  FLLDES_FREEMIN_FREEMIN_SHIFT	0
+#define  FLLDES_FREEMIN_FREEMIN_MASK	0xffffffff
+
+
+/*
+ * Registers <BD> - <x> is [ 0 => 1023 ]
+ *
+ * This Memory holds the Buffer Descriptor (BD) entries.
+ */
+#define DSPTCHR_BDRAM_DATA(x)		(0x3000 + (x) * 0x4)
+
+/* Data Buffer entry */
+#define  BDRAM_DATA_DATA_SHIFT		2
+#define  BDRAM_DATA_DATA_MASK		0xffc
+
+
+/*
+ * Registers <PDRAM> - <x> is [ 0 => 4095 ]
+ *
+ * This memory holds the Packet descriptors.
+ */
+#define DSPTCHR_PDRAM_DATA(x)		(0x4000 + (x) * 0x4)
+
+/* Data Buffer entry */
+#define  PDRAM_DATA_DATA_SHIFT		0
+#define  PDRAM_DATA_DATA_MASK		0xffffffff
+
+
+#endif /* ! XRDP_REGS_DSPTCHR_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_fpm.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_fpm.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_fpm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_fpm.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,1952 @@
+#ifndef XRDP_REGS_FPM_H_
+#define XRDP_REGS_FPM_H_
+
+/* relative to core */
+#define FPM_OFFSET_0			0xa00000
+
+/*
+ * Register <FPM Control>
+ *
+ */
+#define FPM_FPM_CTL			0x0
+
+/*
+ * Test port mux control bits used to drive test signals from different
+ * submodules.
+*/
+#define  FPM_CTL_TP_MUX_CNTRL_SHIFT	27
+#define  FPM_CTL_TP_MUX_CNTRL_MASK	0xf8000000
+
+/*
+ * Disabling check for index memory corrupt during alloc/free/mcast
+ * updates.
+ * This should be used for debug purposes only 0 = Enable memory corruption
+ * check (normal operation) 1 = Disable memory corruption check
+*/
+#define  FPM_CTL_MEM_CORRUPT_CHECK_DISABLE_MASK	0x2000000
+
+/*
+ * Stop loading allocation fifo/cache with new tokens.
+ * This is should be used for debug purposes only 0 = Enable loading new
+ * tokens (normal operation) 1 = Disable loading new tokens
+*/
+#define  FPM_CTL_STOP_ALLOC_CACHE_LOAD_MASK	0x1000000
+
+/* Enable POOL2 token allocation / deallocation 0 = Disabled 1 = Enabled */
+#define  FPM_CTL_POOL2_ENABLE_MASK	0x20000
+
+/* Enable POOL1 token allocation / deallocation 0 = Disabled 1 = Enabled */
+#define  FPM_CTL_POOL1_ENABLE_MASK	0x10000
+
+/*
+ * Set to 1 to hold the FPM Broadbus interface in reset.
+ * This is useful for maintaining a known state on that interface when
+ * Runner is powered down.
+*/
+#define  FPM_CTL_FPM_BB_SOFT_RESET_MASK	0x4000
+
+/*
+ * Clear memory - Initialize all bits of the usage index array memory to
+ * zero's This is a self clearing bit.
+ * Once software writes a 1'b1 to enable, hardware initializes the memory
+ * and resets this bit back to 1'b0 at completion of initialization.
+ * Software can poll this bit and check for a value a zero that indicates
+ * initialization completion status
+*/
+#define  FPM_CTL_INIT_MEM_MASK		0x10
+
+/*
+ * Clear memory - Initialize all bits of the usage index array memory to
+ * zero's This is a self clearing bit.
+ * Once software writes a 1'b1 to enable, hardware initializes the memory
+ * and resets this bit back to 1'b0 at completion of initialization.
+ * Software can poll this bit and check for a value a zero that indicates
+ * initialization completion status
+*/
+#define  FPM_CTL_INIT_MEM_POOL2_MASK	0x8
+
+
+/*
+ * Register <FPM Configuration>
+ *
+ */
+#define FPM_FPM_CFG1			0x4
+
+/*
+ * Index memory search method (For more info refer to FPM architecture wiki
+ * page) 0 = Method 1 1 = Method 2
+*/
+#define  FPM_CFG1_POOL1_SEARCH_MODE_MASK	0x1
+
+
+/*
+ * Register <FPM Configuration>
+ *
+ */
+#define FPM_FPM_WEIGHT			0x8
+
+/* Weight assigned to each free to pool for DDR1 */
+#define  FPM_WEIGHT_DDR1_FREE_WEIGHT_SHIFT	24
+#define  FPM_WEIGHT_DDR1_FREE_WEIGHT_MASK	0xff000000
+
+/* Weight assigned to each alloc from pool for DDR1 */
+#define  FPM_WEIGHT_DDR1_ALLOC_WEIGHT_SHIFT	16
+#define  FPM_WEIGHT_DDR1_ALLOC_WEIGHT_MASK	0xff0000
+
+/* Weight assigned to each free to pool for DDR0 */
+#define  FPM_WEIGHT_DDR0_FREE_WEIGHT_SHIFT	8
+#define  FPM_WEIGHT_DDR0_FREE_WEIGHT_MASK	0xff00
+
+/* Weight assigned to each alloc from pool for DDR0 */
+#define  FPM_WEIGHT_DDR0_ALLOC_WEIGHT_SHIFT	0
+#define  FPM_WEIGHT_DDR0_ALLOC_WEIGHT_MASK	0xff
+
+
+/*
+ * Register <FPM_BB Configuration>
+ *
+ */
+#define FPM_FPM_BB_CFG			0xc
+
+/*
+ * Select pool/DDR to be used when FPM_BB allocates tokens 11 = reserved 10
+ * = allocate from both pools 01 = pool1/DDR1 00 = pool0/DDR0
+*/
+#define  FPM_BB_CFG_BB_DDR_SEL_SHIFT	0
+#define  FPM_BB_CFG_BB_DDR_SEL_MASK	0x3
+
+
+/*
+ * Register <POOL2 Interrupt Mask>
+ *
+ * Mask bits are active high and are disabled by default.
+ * Software enables desired bits as necessary
+ */
+#define FPM_POOL1_INTR_MSK		0x10
+
+/* Expired token recovered interrupt mask. */
+#define  POOL1_INTR_MSK_EXPIRED_TOKEN_RECOV_MSK_MASK	0x4000
+
+/* Expired token detect interrupt mask. */
+#define  POOL1_INTR_MSK_EXPIRED_TOKEN_DET_MSK_MASK	0x2000
+
+/* Illegal token request interrupt mask. */
+#define  POOL1_INTR_MSK_ILLEGAL_ALLOC_REQUEST_MSK_MASK	0x1000
+
+/* Illegal/un-implemented register/memory space access interrupt mask. */
+#define  POOL1_INTR_MSK_ILLEGAL_ADDRESS_ACCESS_MSK_MASK	0x800
+
+/* XON_STATE interrupt mask. */
+#define  POOL1_INTR_MSK_XON_MSK_MASK	0x400
+
+/* XOFF_STATE interrupt mask. */
+#define  POOL1_INTR_MSK_XOFF_MSK_MASK	0x200
+
+/* Index Memory corrupt interrupt mask. */
+#define  POOL1_INTR_MSK_MEMORY_CORRUPT_MSK_MASK	0x100
+
+/* Free or Mcast update on disabled pool interrupt mask . */
+#define  POOL1_INTR_MSK_POOL_DIS_FREE_MULTI_MSK_MASK	0x80
+
+/* Token multi-cast value update request with index out-of-range. */
+#define  POOL1_INTR_MSK_MULTI_TOKEN_INDEX_OUT_OF_RANGE_MSK_MASK	0x40
+
+/* Token multi-cast value update request with invalid token. */
+#define  POOL1_INTR_MSK_MULTI_TOKEN_NO_VALID_MSK_MASK	0x20
+
+/* De-allocation token request with index out-of-range. */
+#define  POOL1_INTR_MSK_FREE_TOKEN_INDEX_OUT_OF_RANGE_MSK_MASK	0x10
+
+/* De-allocation token request with invalid token. */
+#define  POOL1_INTR_MSK_FREE_TOKEN_NO_VALID_MSK_MASK	0x8
+
+/* Usage Index Pool is fully allocated interrupt mask. */
+#define  POOL1_INTR_MSK_POOL_FULL_MSK_MASK	0x4
+
+/* De-Allocation FIFO Full Interrupt mask. */
+#define  POOL1_INTR_MSK_FREE_FIFO_FULL_MSK_MASK	0x2
+
+/* Allocation FIFO Full Interrupt mask. */
+#define  POOL1_INTR_MSK_ALLOC_FIFO_FULL_MSK_MASK	0x1
+
+
+/*
+ * Register <POOL2 Interrupt Status>
+ *
+ * Interrupt bits are active high.
+ * When a bit in this register is set to 1 and the corresponding bit in
+ * interrupt mask register is set to 1, interrupt to CPU will occur.
+ * When set (1), interrupts bits can be cleared (0) by writing a 1 to the
+ * desired bit.
+ */
+#define FPM_POOL1_INTR_STS		0x14
+
+/*
+ * Expired token recovered interrupt.
+ * This is set when an expired token has been recoveredand returned to pool
+ * as an available token.
+*/
+#define  POOL1_INTR_STS_EXPIRED_TOKEN_RECOV_STS_MASK	0x4000
+
+/*
+ * Expired token detect interrupt.
+ * This is set when the token recovery logic detects a token that has been
+ * held for the entire duration of the aging timer.
+*/
+#define  POOL1_INTR_STS_EXPIRED_TOKEN_DET_STS_MASK	0x2000
+
+/*
+ * Illegal token request interrupt.
+ * This will be active when the pool is disabled, there is a request for a
+ * new token and the alloc fifo for the selected token size is empty.
+ * Along with interrupt being sent an error reply packet will be sent out
+ * with o_ubus_error_out asserted.
+*/
+#define  POOL1_INTR_STS_ILLEGAL_ALLOC_REQUEST_STS_MASK	0x1000
+
+/*
+ * Illegal/un-implemented register/memory space access interrupt.
+ * This will be active when there is an attempt to read from an
+ * unimplemented register or memory space.
+ * Along with interrupt being sent an error reply packet will be sent out
+ * with o_ubus_error_out asserted.
+*/
+#define  POOL1_INTR_STS_ILLEGAL_ADDRESS_ACCESS_STS_MASK	0x800
+
+/*
+ * Number of available tokens is greater than or equal to XON_THRESHOLD
+ * value in XON/XOFF Threshold configuration register.
+ * This is a functional status bit, not an error status bit.
+ * Using this information FPM generates "backpressure" output signal that
+ * is used by other UBUS client logics to throttle its operation.
+ * For example, UNIMAC logic can use "backpressure" signal to transfer
+ * "PAUSE" Ethernet flow control packets to throttle incoming frames on
+ * Ethernet interface.
+*/
+#define  POOL1_INTR_STS_XON_STATE_STS_MASK	0x400
+
+/*
+ * Number of available tokens is less than or equal to XOFF_THRESHOLD value
+ * in XON/XOFF Threshold configuration register.
+ * This is a functional status bit, not an error status bit.
+ * Using this information FPM generates "backpressure" output signal that
+ * is used by other UBUS client logics to throttle its operation.
+ * For example, UNIMAC logic can use "backpressure" signal to transfer
+ * "PAUSE" Ethernet flow control packets to throttle incoming frames on
+ * Ethernet interface.
+*/
+#define  POOL1_INTR_STS_XOFF_STATE_STS_MASK	0x200
+
+/*
+ * Index Memory is corrupted.
+ * During updates of the usage array, token manager checks if the use count
+ * and search tree value in the array has a legal value.
+ * If the use count or search tree value is not correct before updating,
+ * logic generates an error and interrupt.
+ * As long as the interrupt is active no more valid tokens will be
+ * allocated because this is a catastrophic error.
+ * Following are the two error conditions that are checked - 1.
+ * During search for a free token, a particular token use count value
+ * indicates it is allocated (use count is greater than 0), but
+ * corresponding upper level search tree value indicates the token is still
+ * available (with bit value of 1'b0, instead of 1'b1).
+ * This is an error.
+ * 2.
+ * During search for a free token, a particular token use count value
+ * indicates that it is free (use count is 0), but corresponding upper
+ * level search tree value indicates the token is not available (with bit
+ * value of 1'b1, instead of 1'b0).
+ * This is an error.
+*/
+#define  POOL1_INTR_STS_MEMORY_CORRUPT_STS_MASK	0x100
+
+/*
+ * Free or Mcast update on disabled pool interrupt.
+ * This bit goes active when a free or multi-cast request is received and
+ * FPM is not enabled, i.
+ * e.
+ * , pool enable bit in FPM control register is not set to 1'b1.
+*/
+#define  POOL1_INTR_STS_POOL_DIS_FREE_MULTI_STS_MASK	0x80
+
+/*
+ * Token multi-cast value update request with index out-of-range Interrupt.
+ * This is set when the token index is not aligned to the pool size.
+ * This is determined by examining the pool select field (bits[29:
+ * 28]) and the 3 lsbs of the token index (bits[14:
+ * 12]).
+ * There is no associated count for this error.
+ * Note:
+ * this error is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL1_INTR_STS_MULTI_TOKEN_INDEX_OUT_OF_RANGE_STS_MASK	0x40
+
+/*
+ * Token multi-cast value update request with invalid token Interrupt.
+ * Invalid multi-cast token is determined when one or more the following
+ * conditions are met - 1.
+ * Incoming multi-cast request token has valid bit (bit[31]) set to 1'b0 2.
+ * Incoming multi-cast request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 3.
+ * Incoming multi-cast request token has use count field (bit[6:
+ * 0]) set to zero 4.
+ * Incoming multi-cast request token entry in the usage array indicates it
+ * is not an allocated token, i.
+ * e.
+ * , associated use count value for this count in the usage array is zero
+ * 5.
+ * After updating the use count value, the new use count value exceeds 0x7E
+ * Note:
+ * item 2 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL1_INTR_STS_MULTI_TOKEN_NO_VALID_STS_MASK	0x20
+
+/*
+ * De-allocation token request with index out-of-range Interrupt.
+ * Free token index out of range is determined when one or more of the
+ * following conditions are met - 1.
+ * Incoming free request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 2.
+ * The buffer size indicated by the size field (bits[11:
+ * 0]) is greater than the size of the allocated token.
+ * There is no associated count for this error.
+ * Note:
+ * item 1 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL1_INTR_STS_FREE_TOKEN_INDEX_OUT_OF_RANGE_STS_MASK	0x10
+
+/*
+ * De-allocation token request with invalid token Interrupt.
+ * Invalid free token is determined when one or more the following
+ * conditions are met - 1.
+ * Incoming free request token has valid bit (bit[31]) set to 1'b0 2.
+ * Incoming free request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 3.
+ * Incoming free request token entry in the usage array indicates it is not
+ * an allocated token, i.
+ * e.
+ * , associated use count value for this count in the usage array is zero
+ * Note:
+ * item 2 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL1_INTR_STS_FREE_TOKEN_NO_VALID_STS_MASK	0x8
+
+/*
+ * Usage Index Pool is fully allocated interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that token pool is fully allocated and there are no free
+ * tokens available.
+ * This bit will be active (high) as long as there no free tokens available
+ * to allocate.
+ * This bit is intended to be used for debug purpose only.
+*/
+#define  POOL1_INTR_STS_POOL_FULL_STS_MASK	0x4
+
+/*
+ * De-Allocation FIFO Full Interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that de-allocation FIFO is full with tokens needs to be
+ * freed and will be active (high) as long as FIFO is full.
+ * This status is intended to be used for debug purpose only.
+*/
+#define  POOL1_INTR_STS_FREE_FIFO_FULL_STS_MASK	0x2
+
+/*
+ * Allocation FIFO Full Interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that allocation FIFO is full with new tokens to be
+ * allocated and will be active (high) as long as FIFO is full.
+ * This status is intended to be used for debug purpose only.
+*/
+#define  POOL1_INTR_STS_ALLOC_FIFO_FULL_STS_MASK	0x1
+
+
+/*
+ * Register <POOL2 Stall FPM mask>
+ *
+ * Software sets desired stall bits that upon corresponding active
+ * interrupt status will stall FPM from new allocation, de-allocation, and
+ * mcast update process.
+ * Listed below are the supported interrupt statuses 1.
+ * Invalid free token (bit[3] of interrupt status register 0x14) 2.
+ * Invalid free token with index out-of-range (bit[4] of interrupt status
+ * register 0x14) 3.
+ * Invalid mcast token (bit[5] of interrupt status register 0x14) 4.
+ * Invalid mcast token with index out-of-range (bit[6] of interrupt status
+ * register 0x14) 5.
+ * Memory corrupt status (bit[8] of interrupt status register 0x14) When
+ * state machine is stalled, registers and memory can still be accessed.
+ * Any new token allocation request will be serviced with valid tokens (if
+ * available in alloc cache) and invalid tokens (if alloc cache is empty).
+ * Any new de-allocation/mcast update requests will be either stored in
+ * de-allocation fifo (if there is space in free fifo) or dropped (if free
+ * fifo is full).
+ * Bit locations in this register matches the location of corrseponding
+ * interrupt status bits in register 0x14.
+ * To un-stall (enable) state machine interrupt status bits (in register
+ * 0x14) corresponding to these mask bits should be cleared.
+ * Stall mask bits are active high and are disabled by default.
+ * This is for debug purposes only.
+ */
+#define FPM_POOL1_STALL_MSK		0x18
+
+/* Stall FPM on Index Memory corrupt interrupt status. */
+#define  POOL1_STALL_MSK_MEMORY_CORRUPT_STALL_MSK_MASK	0x100
+
+/*
+ * Stall FPM on Token multi-cast value update request with index
+ * out-of-range interrupt status.
+*/
+#define  POOL1_STALL_MSK_MULTI_TOKEN_INDEX_OUT_OF_RANGE_STALL_MSK_MASK	0x40
+
+/*
+ * Stall FPM on Token multi-cast value update request with invalid token
+ * interrupt status.
+*/
+#define  POOL1_STALL_MSK_MULTI_TOKEN_NO_VALID_STALL_MSK_MASK	0x20
+
+/*
+ * Stall FPM on De-allocation token request with index out-of-range
+ * interrupt status.
+*/
+#define  POOL1_STALL_MSK_FREE_TOKEN_INDEX_OUT_OF_RANGE_STALL_MSK_MASK	0x10
+
+/*
+ * Stall FPM on De-allocation token request with invalid token interrupt
+ * status.
+*/
+#define  POOL1_STALL_MSK_FREE_TOKEN_NO_VALID_STALL_MSK_MASK	0x8
+
+
+/*
+ * Register <POOL2 Interrupt Mask>
+ *
+ * Mask bits are active high and are disabled by default.
+ * Software enables desired bits as necessary
+ */
+#define FPM_POOL2_INTR_MSK		0x1c
+
+/* Expired token recovered interrupt mask. */
+#define  POOL2_INTR_MSK_EXPIRED_TOKEN_RECOV_MSK_MASK	0x4000
+
+/* Expired token detect interrupt mask. */
+#define  POOL2_INTR_MSK_EXPIRED_TOKEN_DET_MSK_MASK	0x2000
+
+/* Illegal token request interrupt mask. */
+#define  POOL2_INTR_MSK_ILLEGAL_ALLOC_REQUEST_MSK_MASK	0x1000
+
+/* Illegal/un-implemented register/memory space access interrupt mask. */
+#define  POOL2_INTR_MSK_ILLEGAL_ADDRESS_ACCESS_MSK_MASK	0x800
+
+/* XON_STATE interrupt mask. */
+#define  POOL2_INTR_MSK_XON_MSK_MASK	0x400
+
+/* XOFF_STATE interrupt mask. */
+#define  POOL2_INTR_MSK_XOFF_MSK_MASK	0x200
+
+/* Index Memory corrupt interrupt mask. */
+#define  POOL2_INTR_MSK_MEMORY_CORRUPT_MSK_MASK	0x100
+
+/* Free or Mcast update on disabled pool interrupt mask . */
+#define  POOL2_INTR_MSK_POOL_DIS_FREE_MULTI_MSK_MASK	0x80
+
+/* Token multi-cast value update request with index out-of-range. */
+#define  POOL2_INTR_MSK_MULTI_TOKEN_INDEX_OUT_OF_RANGE_MSK_MASK	0x40
+
+/* Token multi-cast value update request with invalid token. */
+#define  POOL2_INTR_MSK_MULTI_TOKEN_NO_VALID_MSK_MASK	0x20
+
+/* De-allocation token request with index out-of-range. */
+#define  POOL2_INTR_MSK_FREE_TOKEN_INDEX_OUT_OF_RANGE_MSK_MASK	0x10
+
+/* De-allocation token request with invalid token. */
+#define  POOL2_INTR_MSK_FREE_TOKEN_NO_VALID_MSK_MASK	0x8
+
+/* Usage Index Pool is fully allocated interrupt mask. */
+#define  POOL2_INTR_MSK_POOL_FULL_MSK_MASK	0x4
+
+/* De-Allocation FIFO Full Interrupt mask. */
+#define  POOL2_INTR_MSK_FREE_FIFO_FULL_MSK_MASK	0x2
+
+/* Allocation FIFO Full Interrupt mask. */
+#define  POOL2_INTR_MSK_ALLOC_FIFO_FULL_MSK_MASK	0x1
+
+
+/*
+ * Register <POOL2 Interrupt Status>
+ *
+ * Interrupt bits are active high.
+ * When a bit in this register is set to 1 and the corresponding bit in
+ * interrupt mask register is set to 1, interrupt to CPU will occur.
+ * When set (1), interrupts bits can be cleared (0) by writing a 1 to the
+ * desired bit.
+ */
+#define FPM_POOL2_INTR_STS		0x20
+
+/*
+ * Expired token recovered interrupt.
+ * This is set when an expired token has been recoveredand returned to pool
+ * as an available token.
+*/
+#define  POOL2_INTR_STS_EXPIRED_TOKEN_RECOV_STS_MASK	0x4000
+
+/*
+ * Expired token detect interrupt.
+ * This is set when the token recovery logic detects a token that has been
+ * held for the entire duration of the aging timer.
+*/
+#define  POOL2_INTR_STS_EXPIRED_TOKEN_DET_STS_MASK	0x2000
+
+/*
+ * Illegal token request interrupt.
+ * This will be active when the pool is disabled, there is a request for a
+ * new token and the alloc fifo for the selected token size is empty.
+ * Along with interrupt being sent an error reply packet will be sent out
+ * with o_ubus_error_out asserted.
+*/
+#define  POOL2_INTR_STS_ILLEGAL_ALLOC_REQUEST_STS_MASK	0x1000
+
+/*
+ * Illegal/un-implemented register/memory space access interrupt.
+ * This will be active when there is an attempt to read from an
+ * unimplemented register or memory space.
+ * Along with interrupt being sent an error reply packet will be sent out
+ * with o_ubus_error_out asserted.
+*/
+#define  POOL2_INTR_STS_ILLEGAL_ADDRESS_ACCESS_STS_MASK	0x800
+
+/*
+ * Number of available tokens is greater than or equal to XON_THRESHOLD
+ * value in XON/XOFF Threshold configuration register.
+ * This is a functional status bit, not an error status bit.
+ * Using this information FPM generates "backpressure" output signal that
+ * is used by other UBUS client logics to throttle its operation.
+ * For example, UNIMAC logic can use "backpressure" signal to transfer
+ * "PAUSE" Ethernet flow control packets to throttle incoming frames on
+ * Ethernet interface.
+*/
+#define  POOL2_INTR_STS_XON_STATE_STS_MASK	0x400
+
+/*
+ * Number of available tokens is less than or equal to XOFF_THRESHOLD value
+ * in XON/XOFF Threshold configuration register.
+ * This is a functional status bit, not an error status bit.
+ * Using this information FPM generates "backpressure" output signal that
+ * is used by other UBUS client logics to throttle its operation.
+ * For example, UNIMAC logic can use "backpressure" signal to transfer
+ * "PAUSE" Ethernet flow control packets to throttle incoming frames on
+ * Ethernet interface.
+*/
+#define  POOL2_INTR_STS_XOFF_STATE_STS_MASK	0x200
+
+/*
+ * Index Memory is corrupted.
+ * During updates of the usage array, token manager checks if the use count
+ * and search tree value in the array has a legal value.
+ * If the use count or search tree value is not correct before updating,
+ * logic generates an error and interrupt.
+ * As long as the interrupt is active no more valid tokens will be
+ * allocated because this is a catastrophic error.
+ * Following are the two error conditions that are checked - 1.
+ * During search for a free token, a particular token use count value
+ * indicates it is allocated (use count is greater than 0), but
+ * corresponding upper level search tree value indicates the token is still
+ * available (with bit value of 1'b0, instead of 1'b1).
+ * This is an error.
+ * 2.
+ * During search for a free token, a particular token use count value
+ * indicates that it is free (use count is 0), but corresponding upper
+ * level search tree value indicates the token is not available (with bit
+ * value of 1'b1, instead of 1'b0).
+ * This is an error.
+*/
+#define  POOL2_INTR_STS_MEMORY_CORRUPT_STS_MASK	0x100
+
+/*
+ * Free or Mcast update on disabled pool interrupt.
+ * This bit goes active when a free or multi-cast request is received and
+ * FPM is not enabled, i.
+ * e.
+ * , pool enable bit in FPM control register is not set to 1'b1.
+*/
+#define  POOL2_INTR_STS_POOL_DIS_FREE_MULTI_STS_MASK	0x80
+
+/*
+ * Token multi-cast value update request with index out-of-range Interrupt.
+ * This is set when the token index is not aligned to the pool size.
+ * This is determined by examining the pool select field (bits[29:
+ * 28]) and the 3 lsbs of the token index (bits[14:
+ * 12]).
+ * There is no associated count for this error.
+ * Note:
+ * this error is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL2_INTR_STS_MULTI_TOKEN_INDEX_OUT_OF_RANGE_STS_MASK	0x40
+
+/*
+ * Token multi-cast value update request with invalid token Interrupt.
+ * Invalid multi-cast token is determined when one or more the following
+ * conditions are met - 1.
+ * Incoming multi-cast request token has valid bit (bit[31]) set to 1'b0 2.
+ * Incoming multi-cast request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 3.
+ * Incoming multi-cast request token has use count field (bit[6:
+ * 0]) set to zero 4.
+ * Incoming multi-cast request token entry in the usage array indicates it
+ * is not an allocated token, i.
+ * e.
+ * , associated use count value for this count in the usage array is zero
+ * 5.
+ * After updating the use count value, the new use count value exceeds 0x7E
+ * Note:
+ * item 2 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL2_INTR_STS_MULTI_TOKEN_NO_VALID_STS_MASK	0x20
+
+/*
+ * De-allocation token request with index out-of-range Interrupt.
+ * Free token index out of range is determined when one or more of the
+ * following conditions are met - 1.
+ * Incoming free request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 2.
+ * The buffer size indicated by the size field (bits[11:
+ * 0]) is greater than the size of the allocated token.
+ * There is no associated count for this error.
+ * Note:
+ * item 1 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL2_INTR_STS_FREE_TOKEN_INDEX_OUT_OF_RANGE_STS_MASK	0x10
+
+/*
+ * De-allocation token request with invalid token Interrupt.
+ * Invalid free token is determined when one or more the following
+ * conditions are met - 1.
+ * Incoming free request token has valid bit (bit[31]) set to 1'b0 2.
+ * Incoming free request token index is not aligned to the pool size
+ * indicated by the pool select field (bits[29:
+ * 28]) 3.
+ * Incoming free request token entry in the usage array indicates it is not
+ * an allocated token, i.
+ * e.
+ * , associated use count value for this count in the usage array is zero
+ * Note:
+ * item 2 is not checked if auto_pool_en is set.
+ * The auto_pool_en bit is always set when using the new token format
+ * without a pool select field.
+*/
+#define  POOL2_INTR_STS_FREE_TOKEN_NO_VALID_STS_MASK	0x8
+
+/*
+ * Usage Index Pool is fully allocated interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that token pool is fully allocated and there are no free
+ * tokens available.
+ * This bit will be active (high) as long as there no free tokens available
+ * to allocate.
+ * This bit is intended to be used for debug purpose only.
+*/
+#define  POOL2_INTR_STS_POOL_FULL_STS_MASK	0x4
+
+/*
+ * De-Allocation FIFO Full Interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that de-allocation FIFO is full with tokens needs to be
+ * freed and will be active (high) as long as FIFO is full.
+ * This status is intended to be used for debug purpose only.
+*/
+#define  POOL2_INTR_STS_FREE_FIFO_FULL_STS_MASK	0x2
+
+/*
+ * Allocation FIFO Full Interrupt.
+ * This is a functional status bit, not an error status bit.
+ * This indicates that allocation FIFO is full with new tokens to be
+ * allocated and will be active (high) as long as FIFO is full.
+ * This status is intended to be used for debug purpose only.
+*/
+#define  POOL2_INTR_STS_ALLOC_FIFO_FULL_STS_MASK	0x1
+
+
+/*
+ * Register <POOL2 Stall FPM mask>
+ *
+ * Software sets desired stall bits that upon corresponding active
+ * interrupt status will stall FPM from new allocation, de-allocation, and
+ * mcast update process.
+ * Listed below are the supported interrupt statuses 1.
+ * Invalid free token (bit[3] of interrupt status register 0x14) 2.
+ * Invalid free token with index out-of-range (bit[4] of interrupt status
+ * register 0x14) 3.
+ * Invalid mcast token (bit[5] of interrupt status register 0x14) 4.
+ * Invalid mcast token with index out-of-range (bit[6] of interrupt status
+ * register 0x14) 5.
+ * Memory corrupt status (bit[8] of interrupt status register 0x14) When
+ * state machine is stalled, registers and memory can still be accessed.
+ * Any new token allocation request will be serviced with valid tokens (if
+ * available in alloc cache) and invalid tokens (if alloc cache is empty).
+ * Any new de-allocation/mcast update requests will be either stored in
+ * de-allocation fifo (if there is space in free fifo) or dropped (if free
+ * fifo is full).
+ * Bit locations in this register matches the location of corrseponding
+ * interrupt status bits in register 0x14.
+ * To un-stall (enable) state machine interrupt status bits (in register
+ * 0x14) corresponding to these mask bits should be cleared.
+ * Stall mask bits are active high and are disabled by default.
+ * This is for debug purposes only.
+ */
+#define FPM_POOL2_STALL_MSK		0x24
+
+/* Stall FPM on Index Memory corrupt interrupt status. */
+#define  POOL2_STALL_MSK_MEMORY_CORRUPT_STALL_MSK_MASK	0x100
+
+/*
+ * Stall FPM on Token multi-cast value update request with index
+ * out-of-range interrupt status.
+*/
+#define  POOL2_STALL_MSK_MULTI_TOKEN_INDEX_OUT_OF_RANGE_STALL_MSK_MASK	0x40
+
+/*
+ * Stall FPM on Token multi-cast value update request with invalid token
+ * interrupt status.
+*/
+#define  POOL2_STALL_MSK_MULTI_TOKEN_NO_VALID_STALL_MSK_MASK	0x20
+
+/*
+ * Stall FPM on De-allocation token request with index out-of-range
+ * interrupt status.
+*/
+#define  POOL2_STALL_MSK_FREE_TOKEN_INDEX_OUT_OF_RANGE_STALL_MSK_MASK	0x10
+
+/*
+ * Stall FPM on De-allocation token request with invalid token interrupt
+ * status.
+*/
+#define  POOL2_STALL_MSK_FREE_TOKEN_NO_VALID_STALL_MSK_MASK	0x8
+
+
+/*
+ * Register <POOL1 Configuration1>
+ *
+ */
+#define FPM_POOL1_CFG1			0x40
+
+/*
+ * Selects the size of the buffer to be used in the pool.
+ * All buffers must be the same size.
+ * 0 - 512 byte buffers 1 - 256 byte buffers all other values - reserved
+*/
+#define  POOL1_CFG1_FPM_BUF_SIZE_SHIFT	24
+#define  POOL1_CFG1_FPM_BUF_SIZE_MASK	0x7000000
+
+
+/*
+ * Register <POOL1 Configuration2>
+ *
+ * This register sets the physical base address of this memory.
+ * The memory block should be the number of buffers times the buffer size.
+ * This is mainly used for multi-pool memory configuration.
+ * NOTE:
+ * POOL_BASE_ADDRESS[7:
+ * 2] and reserved[1:
+ * 0] field must be written with 0x00 in the BCM3382 because
+ * itstoken-to-address converter assumes the buffers start on a 2kB
+ * boundary.
+ */
+#define FPM_POOL1_CFG2			0x44
+
+/*
+ * Buffer base address.
+ * 7:
+ * 2 must be 0x00.
+*/
+#define  POOL1_CFG2_POOL_BASE_ADDRESS_SHIFT	2
+#define  POOL1_CFG2_POOL_BASE_ADDRESS_MASK	0xfffffffc
+
+
+/*
+ * Register <POOL1 Configuration3>
+ *
+ * This register sets the physical base address of this memory.
+ * The memory block should be the number of buffers times the buffer size.
+ * This is mainly used for multi-pool memory configuration.
+ */
+#define FPM_POOL1_CFG3			0x48
+
+/*
+ * Buffer base address.
+ * 7:
+ * 2 must be 0x00.
+*/
+#define  POOL1_CFG3_POOL_BASE_ADDRESS_POOL2_SHIFT	2
+#define  POOL1_CFG3_POOL_BASE_ADDRESS_POOL2_MASK	0xfffffffc
+
+
+/*
+ * Register <POOL2 Status1> - read-only
+ *
+ * This read only register allows software to read the count of free pool
+ * overflows and underflows.
+ * A overflow condition occurs when pool is empty, ie.
+ * , no tokens are allocated and free/mcast request is encountered.
+ * A underflow condition occurs when pool is full, ie.
+ * , there are no free tokens and a allocation request is encountered.
+ * When the counter values reaches maximum count, it will hold the max
+ * value and not increment the count value unless it is cleared.
+ * Any write to this register will clear both both counters.
+ */
+#define FPM_POOL1_STAT1			0x50
+
+/* Free Pool overflow count */
+#define  POOL1_STAT1_OVRFL_SHIFT	16
+#define  POOL1_STAT1_OVRFL_MASK		0xffff0000
+
+/* Free Pool underflow count */
+#define  POOL1_STAT1_UNDRFL_SHIFT	0
+#define  POOL1_STAT1_UNDRFL_MASK	0xffff
+
+
+/*
+ * Register <POOL2 Status2> - read-only
+ *
+ * This read only register provide status of index memory, alloc & free
+ * cache/fifos.
+ * These are real time statuses and bits are not sticky.
+ * Write to any bits will have no effect.
+ */
+#define FPM_POOL1_STAT2			0x54
+
+/*
+ * POOL is full This indicates that all tokens have been allocated and
+ * there no free tokens available.
+ * This bit will be active as long as all usage array is fully allocated.
+*/
+#define  POOL1_STAT2_POOL_FULL_MASK	0x80000000
+
+/* FREE_FIFO is full. */
+#define  POOL1_STAT2_FREE_FIFO_FULL_MASK	0x20000000
+
+/* FREE_FIFO is empty */
+#define  POOL1_STAT2_FREE_FIFO_EMPTY_MASK	0x10000000
+
+/* ALLOC_FIFO is full */
+#define  POOL1_STAT2_ALLOC_FIFO_FULL_MASK	0x8000000
+
+/* ALLOC_FIFO is empty. */
+#define  POOL1_STAT2_ALLOC_FIFO_EMPTY_MASK	0x4000000
+
+/*
+ * Count of tokens available for allocation.
+ * This provides a count of number of free tokens that available for
+ * allocation in the usage array.
+ * This value is updated instantaneously as tokens are allocated or freed
+ * from the array.
+*/
+#define  POOL1_STAT2_NUM_OF_TOKENS_AVAILABLE_SHIFT	0
+#define  POOL1_STAT2_NUM_OF_TOKENS_AVAILABLE_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status3> - read-only
+ *
+ * This read only register allows software to read the count of free token
+ * requests with in-valid tokens When the counter values reaches maximum
+ * count, it will hold the max value and not increment the count value
+ * unless it is cleared.
+ * Any write to this register will clear count value.
+ */
+#define FPM_POOL1_STAT3			0x58
+
+/*
+ * Count of de-allocate token requests with invalid tokens.
+ * For more information on conditions under which this counter is
+ * incremented, refer to POOL1_INTR_STS register (offset 0x14) bit[3]
+ * explanation in this document.
+*/
+#define  POOL1_STAT3_NUM_OF_NOT_VALID_TOKEN_FREES_SHIFT	0
+#define  POOL1_STAT3_NUM_OF_NOT_VALID_TOKEN_FREES_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status4> - read-only
+ *
+ * This read only register allows software to read the count of multi-cast
+ * token update requests with in-valid tokens.
+ * When the counter values reaches maximum count, it will hold the max
+ * value and not increment the count value unless it is cleared.
+ * Any write to this register will clear count value.
+ */
+#define FPM_POOL1_STAT4			0x5c
+
+/*
+ * Count of multi-cast token update requests with either valid bit not set,
+ * For more information on conditions under which this counter is
+ * incremented, refer to POOL1_INTR_STS register (offset 0x14) bit[5]
+ * explanation in this document.
+*/
+#define  POOL1_STAT4_NUM_OF_NOT_VALID_TOKEN_MULTI_SHIFT	0
+#define  POOL1_STAT4_NUM_OF_NOT_VALID_TOKEN_MULTI_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status5> - read-only
+ *
+ * This read only register allows software to read the alloc token that
+ * causes memory corrupt interrupt (intr[8]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL1_STAT5			0x60
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL1_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes memory corrupt interrupt active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value, in addition, memory corrupt
+ * status bit (bit[8]) in interrupt status register 0x14 should be cleared.
+ * Bitmap for these bits is shown below (reserved bits are zeros) Bit[30] -
+ * Reserved Bit[29:
+ * 12] - Token Bit[11:
+ * 0] - Buffer size in bytes
+*/
+#define  POOL1_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_SHIFT	0
+#define  POOL1_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status6> - read-only
+ *
+ * This read only register allows software to read the free token that
+ * causes invalid free request or free token with index out-of-range
+ * interrupts (intr[3] or intr[4]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL1_STAT6			0x64
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL1_STAT6_INVALID_FREE_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes intr[3] or intr[4] active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value.
+ * Bitmap for these bits is shown below (reserved bits are either zeros or
+ * can reflect the length of the packet associated with the freed token)
+ * Bit[30] - Reserved Bit[29:
+ * 12] - Token Bit[11:
+ * 0] - Reserved
+*/
+#define  POOL1_STAT6_INVALID_FREE_TOKEN_SHIFT	0
+#define  POOL1_STAT6_INVALID_FREE_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status7> - read-only
+ *
+ * This read only register allows software to read the multi-cast token
+ * that causes invalid mcast request or mcast token with index out-of-range
+ * interrupts (intr[5] or intr[6]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL1_STAT7			0x68
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL1_STAT7_INVALID_MCAST_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes intr[5] or intr[6] active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value.
+ * Bitmap for these bits is shown below (reserved bits are zeros) Bit[30] -
+ * Reserved Bit[29:
+ * 12] - Token Bit[11] - Mcast update type (refer to register 0x224[11])
+ * Bit[10:
+ * 7] - Reserved Bit[6:
+ * 0] - Mcast value
+*/
+#define  POOL1_STAT7_INVALID_MCAST_TOKEN_SHIFT	0
+#define  POOL1_STAT7_INVALID_MCAST_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status8> - read-only
+ *
+ * This register allows software to read the lowest value the
+ * NUM_OF_TOKENS_AVAILABLE count reached since the last time it was
+ * cleared.
+ * Any write to this register will reset the value back to the maximum
+ * number of tokens (0x10000)
+ */
+#define FPM_POOL1_STAT8			0x6c
+
+/* Lowest value the NUM_OF_TOKENS_AVAIL count has reached. */
+#define  POOL1_STAT8_TOKENS_AVAILABLE_LOW_WTMK_SHIFT	0
+#define  POOL1_STAT8_TOKENS_AVAILABLE_LOW_WTMK_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status1> - read-only
+ *
+ * This read only register allows software to read the count of free pool
+ * overflows and underflows.
+ * A overflow condition occurs when pool is empty, ie.
+ * , no tokens are allocated and free/mcast request is encountered.
+ * A underflow condition occurs when pool is full, ie.
+ * , there are no free tokens and a allocation request is encountered.
+ * When the counter values reaches maximum count, it will hold the max
+ * value and not increment the count value unless it is cleared.
+ * Any write to this register will clear both both counters.
+ */
+#define FPM_POOL2_STAT1			0x70
+
+/* Free Pool overflow count */
+#define  POOL2_STAT1_OVRFL_SHIFT	16
+#define  POOL2_STAT1_OVRFL_MASK		0xffff0000
+
+/* Free Pool underflow count */
+#define  POOL2_STAT1_UNDRFL_SHIFT	0
+#define  POOL2_STAT1_UNDRFL_MASK	0xffff
+
+
+/*
+ * Register <POOL2 Status2> - read-only
+ *
+ * This read only register provide status of index memory, alloc & free
+ * cache/fifos.
+ * These are real time statuses and bits are not sticky.
+ * Write to any bits will have no effect.
+ */
+#define FPM_POOL2_STAT2			0x74
+
+/*
+ * POOL is full This indicates that all tokens have been allocated and
+ * there no free tokens available.
+ * This bit will be active as long as all usage array is fully allocated.
+*/
+#define  POOL2_STAT2_POOL_FULL_MASK	0x80000000
+
+/* FREE_FIFO is full. */
+#define  POOL2_STAT2_FREE_FIFO_FULL_MASK	0x20000000
+
+/* FREE_FIFO is empty */
+#define  POOL2_STAT2_FREE_FIFO_EMPTY_MASK	0x10000000
+
+/* ALLOC_FIFO is full */
+#define  POOL2_STAT2_ALLOC_FIFO_FULL_MASK	0x8000000
+
+/* ALLOC_FIFO is empty. */
+#define  POOL2_STAT2_ALLOC_FIFO_EMPTY_MASK	0x4000000
+
+/*
+ * Count of tokens available for allocation.
+ * This provides a count of number of free tokens that available for
+ * allocation in the usage array.
+ * This value is updated instantaneously as tokens are allocated or freed
+ * from the array.
+*/
+#define  POOL2_STAT2_NUM_OF_TOKENS_AVAILABLE_SHIFT	0
+#define  POOL2_STAT2_NUM_OF_TOKENS_AVAILABLE_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status3> - read-only
+ *
+ * This read only register allows software to read the count of free token
+ * requests with in-valid tokens When the counter values reaches maximum
+ * count, it will hold the max value and not increment the count value
+ * unless it is cleared.
+ * Any write to this register will clear count value.
+ */
+#define FPM_POOL2_STAT3			0x78
+
+/*
+ * Count of de-allocate token requests with invalid tokens.
+ * For more information on conditions under which this counter is
+ * incremented, refer to POOL1_INTR_STS register (offset 0x14) bit[3]
+ * explanation in this document.
+*/
+#define  POOL2_STAT3_NUM_OF_NOT_VALID_TOKEN_FREES_SHIFT	0
+#define  POOL2_STAT3_NUM_OF_NOT_VALID_TOKEN_FREES_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status4> - read-only
+ *
+ * This read only register allows software to read the count of multi-cast
+ * token update requests with in-valid tokens.
+ * When the counter values reaches maximum count, it will hold the max
+ * value and not increment the count value unless it is cleared.
+ * Any write to this register will clear count value.
+ */
+#define FPM_POOL2_STAT4			0x7c
+
+/*
+ * Count of multi-cast token update requests with either valid bit not set,
+ * For more information on conditions under which this counter is
+ * incremented, refer to POOL1_INTR_STS register (offset 0x14) bit[5]
+ * explanation in this document.
+*/
+#define  POOL2_STAT4_NUM_OF_NOT_VALID_TOKEN_MULTI_SHIFT	0
+#define  POOL2_STAT4_NUM_OF_NOT_VALID_TOKEN_MULTI_MASK	0x3ffff
+
+
+/*
+ * Register <POOL2 Status5> - read-only
+ *
+ * This read only register allows software to read the alloc token that
+ * causes memory corrupt interrupt (intr[8]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL2_STAT5			0x80
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL2_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes memory corrupt interrupt active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value, in addition, memory corrupt
+ * status bit (bit[8]) in interrupt status register 0x14 should be cleared.
+ * Bitmap for these bits is shown below (reserved bits are zeros) Bit[30] -
+ * Reserved Bit[29:
+ * 12] - Token Bit[11:
+ * 0] - Buffer size in bytes
+*/
+#define  POOL2_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_SHIFT	0
+#define  POOL2_STAT5_MEM_CORRUPT_STS_RELATED_ALLOC_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status6> - read-only
+ *
+ * This read only register allows software to read the free token that
+ * causes invalid free request or free token with index out-of-range
+ * interrupts (intr[3] or intr[4]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL2_STAT6			0x84
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL2_STAT6_INVALID_FREE_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes intr[3] or intr[4] active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value.
+ * Bitmap for these bits is shown below (reserved bits are either zeros or
+ * can reflect the length of the packet associated with the freed token)
+ * Bit[30] - Reserved Bit[29:
+ * 12] - Token Bit[11:
+ * 0] - Reserved
+*/
+#define  POOL2_STAT6_INVALID_FREE_TOKEN_SHIFT	0
+#define  POOL2_STAT6_INVALID_FREE_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status7> - read-only
+ *
+ * This read only register allows software to read the multi-cast token
+ * that causes invalid mcast request or mcast token with index out-of-range
+ * interrupts (intr[5] or intr[6]) to go active.
+ * This is for debug purposes only.
+ * Any write to this register will clear token value (makes all bits zero).
+ */
+#define FPM_POOL2_STAT7			0x88
+
+/*
+ * This bit provides status of the token in bits[30:
+ * 0] of this register 0 = New token is not captured 1 = New token is
+ * captured
+*/
+#define  POOL2_STAT7_INVALID_MCAST_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * Token that causes intr[5] or intr[6] active.
+ * If there are multiple tokens that causes this error, only the first one
+ * is captured.
+ * To capture successive tokens that causes the error this register should
+ * be cleared by writing any random value.
+ * Bitmap for these bits is shown below (reserved bits are zeros) Bit[30] -
+ * Reserved Bit[29:
+ * 12] - Token Bit[11] - Mcast update type (refer to register 0x224[11])
+ * Bit[10:
+ * 7] - Reserved Bit[6:
+ * 0] - Mcast value
+*/
+#define  POOL2_STAT7_INVALID_MCAST_TOKEN_SHIFT	0
+#define  POOL2_STAT7_INVALID_MCAST_TOKEN_MASK	0x7fffffff
+
+
+/*
+ * Register <POOL2 Status8> - read-only
+ *
+ * This register allows software to read the lowest value the
+ * NUM_OF_TOKENS_AVAILABLE count reached since the last time it was
+ * cleared.
+ * Any write to this register will reset the value back to the maximum
+ * number of tokens (0x10000)
+ */
+#define FPM_POOL2_STAT8			0x8c
+
+/* Lowest value the NUM_OF_TOKENS_AVAIL count has reached. */
+#define  POOL2_STAT8_TOKENS_AVAILABLE_LOW_WTMK_SHIFT	0
+#define  POOL2_STAT8_TOKENS_AVAILABLE_LOW_WTMK_MASK	0x3ffff
+
+
+/*
+ * Register <POOL1 XON/XOFF Threshold Configuration>
+ *
+ */
+#define FPM_POOL1_XON_XOFF_CFG		0xc0
+
+/* XON Threshold value */
+#define  POOL1_XON_XOFF_CFG_XON_THRESHOLD_SHIFT	16
+#define  POOL1_XON_XOFF_CFG_XON_THRESHOLD_MASK	0xffff0000
+
+/* XOFF Threshold value */
+#define  POOL1_XON_XOFF_CFG_XOFF_THRESHOLD_SHIFT	0
+#define  POOL1_XON_XOFF_CFG_XOFF_THRESHOLD_MASK	0xffff
+
+
+/*
+ * Register <FPM_NOT_EMPTY Threshold Configuration>
+ *
+ */
+#define FPM_FPM_NOT_EMPTY_CFG		0xd0
+
+/* Threshold value for reasserting pool_not_empty to FPM_BB */
+#define  FPM_NOT_EMPTY_CFG_NOT_EMPTY_THRESHOLD_SHIFT	0
+#define  FPM_NOT_EMPTY_CFG_NOT_EMPTY_THRESHOLD_MASK	0x3f
+
+
+/*
+ * Register <Back door Memory Access Control>
+ *
+ */
+#define FPM_MEM_CTL			0x100
+
+/*
+ * Write control bit for Usage index array memory.
+ * This is a self clearing bit, cleared by hardware to zero once memory
+ * write is complete.
+ * Software can write more locations if the bit value is zero
+*/
+#define  MEM_CTL_MEM_WR_MASK		0x80000000
+
+/*
+ * Read control bit for Usage index array memory.
+ * This is a self clearing bit, cleared by hardware to zero once memory
+ * read is complete.
+ * Software can read more locations if the bit value is zero
+*/
+#define  MEM_CTL_MEM_RD_MASK		0x40000000
+
+/*
+ * 2'b00 = Reserved 2'b01 = FPM Memory 2'b10 = Reserved 2'b11 = When memory
+ * is enabled, bit[31]=1, this value will allow a write to
+ * NUM_OF_TOKENS_AVAILABLE field [17:
+ * 0] in POOL1_STAT2 register (offset 0x54).
+ * This should be used for debug purposes only
+*/
+#define  MEM_CTL_MEM_SEL_SHIFT		28
+#define  MEM_CTL_MEM_SEL_MASK		0x30000000
+
+/* Memory address for write/read location This is DWord aligned address */
+#define  MEM_CTL_MEM_ADDR_SHIFT		2
+#define  MEM_CTL_MEM_ADDR_MASK		0x3fffc
+
+
+/*
+ * Register <Back door Memory Data1>
+ *
+ */
+#define FPM_MEM_DATA1			0x104
+
+/*
+ * Memory Data 1 This contains the lower 32 bits (bits[31:
+ * 0]) of 32/64 bit data
+*/
+#define  MEM_DATA1_MEM_DATA1_SHIFT	0
+#define  MEM_DATA1_MEM_DATA1_MASK	0xffffffff
+
+
+/*
+ * Register <Back door Memory Data2>
+ *
+ */
+#define FPM_MEM_DATA2			0x108
+
+/*
+ * Memory Data 2 This contains the upper 32 bits (bits[63:
+ * 32]) of 64 bit data.
+ * The value in this register should be ignored during 32 bit access
+*/
+#define  MEM_DATA2_MEM_DATA2_SHIFT	0
+#define  MEM_DATA2_MEM_DATA2_MASK	0xffffffff
+
+
+/*
+ * Register <Token Recovery Control>
+ *
+ */
+#define FPM_TOKEN_RECOVER_CTL		0x130
+
+/*
+ * This is a self-clearing bit.
+ * Write a 1 to the bit to reset the RECOVERED_TOKEN_COUNT to 0.
+*/
+#define  TOKEN_RECOVER_CTL_CLR_RECOVERED_TOKEN_COUNT_MASK	0x40
+
+/*
+ * This is a self-clearing bit.
+ * Write a 1 to the bit to reset the EXPIRED_TOKEN_COUNT to 0.
+*/
+#define  TOKEN_RECOVER_CTL_CLR_EXPIRED_TOKEN_COUNT_MASK	0x20
+
+/*
+ * Non-automated token recovery.
+ * This bit can be used when automatic token return is not enabled.
+ * When software gets an interrupt indicating that the token recovery
+ * process has detected expired tokens, it can set this bit to force the
+ * expired tokens to be reclaimed.
+ * 1 = Enabled 0 = Disabled
+*/
+#define  TOKEN_RECOVER_CTL_FORCE_TOKEN_RECLAIM_MASK	0x10
+
+/*
+ * Enable automatic return of marked tokens to the freepool 1 = Enabled 0 =
+ * Disabled
+*/
+#define  TOKEN_RECOVER_CTL_TOKEN_RECLAIM_ENA_MASK	0x8
+
+/*
+ * Enable remarking of tokens for multiple passes through the token
+ * recovery process.
+ * The mark bit is set on all tokens on the first pass through the loop.
+ * When this bit is set, the mark bits will be set again on all subsequent
+ * passes through the loop.
+ * It is anticipated that this bit will always be set when token recovery
+ * is enabled.
+ * It is provided as a potential debug tool.
+ * 1 = Enabled 0 = Disabled
+*/
+#define  TOKEN_RECOVER_CTL_TOKEN_REMARK_ENA_MASK	0x4
+
+/*
+ * If token recovery is enabled, the single-pass control will indicate
+ * whether the hardware should perform just one iteration of the token
+ * recovery process or will continuously loop through the token recovery
+ * process.
+ * 1 = Single pass 0 = Auto repeat
+*/
+#define  TOKEN_RECOVER_CTL_SINGLE_PASS_ENA_MASK	0x2
+
+/* Token recovery enable 1 = Enabled 0 = Disabled */
+#define  TOKEN_RECOVER_CTL_TOKEN_RECOVER_ENA_MASK	0x1
+
+
+/*
+ * Register <Long Aging Timer>
+ *
+ */
+#define FPM_SHORT_AGING_TIMER		0x134
+
+/* Aging timer used in token recovery */
+#define  SHORT_AGING_TIMER_TIMER_SHIFT	0
+#define  SHORT_AGING_TIMER_TIMER_MASK	0xffffffff
+
+
+/*
+ * Register <Long Aging Timer>
+ *
+ */
+#define FPM_LONG_AGING_TIMER		0x138
+
+/* Aging timer used in token recovery */
+#define  LONG_AGING_TIMER_TIMER_SHIFT	0
+#define  LONG_AGING_TIMER_TIMER_MASK	0xffffffff
+
+
+/*
+ * Register <Token Cache Recycle Timer>
+ *
+ */
+#define FPM_CACHE_RECYCLE_TIMER		0x13c
+
+/*
+ * Timer used in token recovery logic.
+ * Upon expiration of timer, one token from the allocate cache will be
+ * freed.
+ * Over time, all cached tokens will be recycled back to the freepool.
+ * This will prevent the cached tokens frm being aged out by the token
+ * recovery logic.
+ * This timer should be set to a value so that all tokens can be recycled
+ * before the aging timer expires.
+*/
+#define  CACHE_RECYCLE_TIMER_RECYCLE_TIMER_SHIFT	0
+#define  CACHE_RECYCLE_TIMER_RECYCLE_TIMER_MASK	0xffff
+
+
+/*
+ * Register <Expired Token Count> - read-only
+ *
+ */
+#define FPM_EXPIRED_TOKEN_COUNT_POOL1	0x140
+
+/*
+ * Cumulative count of the number of expired tokens detected in the token
+ * recovery process.
+ * The count can be cleared by setting the CLR_EXPIRED_TOKEN_COUNT in the
+ * TOKEN_RECOVER_CTL register
+*/
+#define  EXPIRED_TOKEN_COUNT_POOL1_COUNT_SHIFT	0
+#define  EXPIRED_TOKEN_COUNT_POOL1_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <Recovered Token Count> - read-only
+ *
+ */
+#define FPM_RECOVERED_TOKEN_COUNT_POOL1	0x144
+
+/*
+ * Cumulative count of the number of expired tokens that were freed in the
+ * token recovery process.
+ * The count can be cleared by setting the CLR_RECOVERED_TOKEN_COUNT in the
+ * TOKEN_RECOVER_CTL register
+*/
+#define  RECOVERED_TOKEN_COUNT_POOL1_COUNT_SHIFT	0
+#define  RECOVERED_TOKEN_COUNT_POOL1_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <Expired Token Count> - read-only
+ *
+ */
+#define FPM_EXPIRED_TOKEN_COUNT_POOL2	0x148
+
+/*
+ * Cumulative count of the number of expired tokens detected in the token
+ * recovery process.
+ * The count can be cleared by setting the CLR_EXPIRED_TOKEN_COUNT in the
+ * TOKEN_RECOVER_CTL register
+*/
+#define  EXPIRED_TOKEN_COUNT_POOL2_COUNT_SHIFT	0
+#define  EXPIRED_TOKEN_COUNT_POOL2_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <Recovered Token Count> - read-only
+ *
+ */
+#define FPM_RECOVERED_TOKEN_COUNT_POOL2	0x14c
+
+/*
+ * Cumulative count of the number of expired tokens that were freed in the
+ * token recovery process.
+ * The count can be cleared by setting the CLR_RECOVERED_TOKEN_COUNT in the
+ * TOKEN_RECOVER_CTL register
+*/
+#define  RECOVERED_TOKEN_COUNT_POOL2_COUNT_SHIFT	0
+#define  RECOVERED_TOKEN_COUNT_POOL2_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <Token Recovery Start/End Range>
+ *
+ */
+#define FPM_TOKEN_RECOVER_START_END_POOL1	0x150
+
+/* Start of token index range to be used when performing token recovery. */
+#define  TOKEN_RECOVER_START_END_POOL1_START_INDEX_SHIFT	16
+#define  TOKEN_RECOVER_START_END_POOL1_START_INDEX_MASK	0xfff0000
+
+/* End of token index range to be used when performing token recovery. */
+#define  TOKEN_RECOVER_START_END_POOL1_END_INDEX_SHIFT	0
+#define  TOKEN_RECOVER_START_END_POOL1_END_INDEX_MASK	0xfff
+
+
+/*
+ * Register <Token Recovery Start/End Range>
+ *
+ */
+#define FPM_TOKEN_RECOVER_START_END_POOL2	0x154
+
+/* Start of token index range to be used when performing token recovery. */
+#define  TOKEN_RECOVER_START_END_POOL2_START_INDEX_SHIFT	16
+#define  TOKEN_RECOVER_START_END_POOL2_START_INDEX_MASK	0xfff0000
+
+/* End of token index range to be used when performing token recovery. */
+#define  TOKEN_RECOVER_START_END_POOL2_END_INDEX_SHIFT	0
+#define  TOKEN_RECOVER_START_END_POOL2_END_INDEX_MASK	0xfff
+
+
+/*
+ * Register <POOL4 Allocation & De-allocation/Free Management>
+ *
+ * The free pool FIFO contains pointers to the buffers in the pool.
+ * To allocate a buffer from the pool, read token from this port.
+ * To de-allocate/free a buffer to the pool , write the token of the buffer
+ * to this port.
+ * After reset, software must initialize the FIFO.
+ * The buffer size is given in the control register above.
+ * All buffers must be of the same size and contiguous.
+ */
+#define FPM_POOL1_ALLOC_DEALLOC		0x400
+
+/*
+ * Valid Token Indicator 0:
+ * No buffers available 1:
+ * A valid token index is provided.
+ * If a token is de-allocated/freed without this bit set that causes an
+ * error and the token will be ignored, error counter in register offset
+ * 0xB8 will be incremented.
+*/
+#define  POOL1_ALLOC_DEALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * DDR Identifier 0:
+ * DDR0 1:
+ * DDR1
+*/
+#define  POOL1_ALLOC_DEALLOC_DDR_MASK	0x20000000
+
+/* Buffer Index Pointer */
+#define  POOL1_ALLOC_DEALLOC_TOKEN_INDEX_SHIFT	12
+#define  POOL1_ALLOC_DEALLOC_TOKEN_INDEX_MASK	0x1ffff000
+
+/* Buffer length or packet size in bytes */
+#define  POOL1_ALLOC_DEALLOC_TOKEN_SIZE_SHIFT	0
+#define  POOL1_ALLOC_DEALLOC_TOKEN_SIZE_MASK	0xfff
+
+
+/*
+ * Register <POOL4 Allocation & De-allocation/Free Management>
+ *
+ * The free pool FIFO contains pointers to the buffers in the pool.
+ * To allocate a buffer from the pool, read token from this port.
+ * To de-allocate/free a buffer to the pool , write the token of the buffer
+ * to this port.
+ * After reset, software must initialize the FIFO.
+ * The buffer size is given in the control register above.
+ * All buffers must be of the same size and contiguous.
+ */
+#define FPM_POOL2_ALLOC_DEALLOC		0x408
+
+/*
+ * Valid Token Indicator 0:
+ * No buffers available 1:
+ * A valid token index is provided.
+ * If a token is de-allocated/freed without this bit set that causes an
+ * error and the token will be ignored, error counter in register offset
+ * 0xB8 will be incremented.
+*/
+#define  POOL2_ALLOC_DEALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * DDR Identifier 0:
+ * DDR0 1:
+ * DDR1
+*/
+#define  POOL2_ALLOC_DEALLOC_DDR_MASK	0x20000000
+
+/* Buffer Index Pointer */
+#define  POOL2_ALLOC_DEALLOC_TOKEN_INDEX_SHIFT	12
+#define  POOL2_ALLOC_DEALLOC_TOKEN_INDEX_MASK	0x1ffff000
+
+/* Buffer length or packet size in bytes */
+#define  POOL2_ALLOC_DEALLOC_TOKEN_SIZE_SHIFT	0
+#define  POOL2_ALLOC_DEALLOC_TOKEN_SIZE_MASK	0xfff
+
+
+/*
+ * Register <POOL4 Allocation & De-allocation/Free Management>
+ *
+ * The free pool FIFO contains pointers to the buffers in the pool.
+ * To allocate a buffer from the pool, read token from this port.
+ * To de-allocate/free a buffer to the pool , write the token of the buffer
+ * to this port.
+ * After reset, software must initialize the FIFO.
+ * The buffer size is given in the control register above.
+ * All buffers must be of the same size and contiguous.
+ */
+#define FPM_POOL3_ALLOC_DEALLOC		0x410
+
+/*
+ * Valid Token Indicator 0:
+ * No buffers available 1:
+ * A valid token index is provided.
+ * If a token is de-allocated/freed without this bit set that causes an
+ * error and the token will be ignored, error counter in register offset
+ * 0xB8 will be incremented.
+*/
+#define  POOL3_ALLOC_DEALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * DDR Identifier 0:
+ * DDR0 1:
+ * DDR1
+*/
+#define  POOL3_ALLOC_DEALLOC_DDR_MASK	0x20000000
+
+/* Buffer Index Pointer */
+#define  POOL3_ALLOC_DEALLOC_TOKEN_INDEX_SHIFT	12
+#define  POOL3_ALLOC_DEALLOC_TOKEN_INDEX_MASK	0x1ffff000
+
+/* Buffer length or packet size in bytes */
+#define  POOL3_ALLOC_DEALLOC_TOKEN_SIZE_SHIFT	0
+#define  POOL3_ALLOC_DEALLOC_TOKEN_SIZE_MASK	0xfff
+
+
+/*
+ * Register <POOL4 Allocation & De-allocation/Free Management>
+ *
+ * The free pool FIFO contains pointers to the buffers in the pool.
+ * To allocate a buffer from the pool, read token from this port.
+ * To de-allocate/free a buffer to the pool , write the token of the buffer
+ * to this port.
+ * After reset, software must initialize the FIFO.
+ * The buffer size is given in the control register above.
+ * All buffers must be of the same size and contiguous.
+ */
+#define FPM_POOL4_ALLOC_DEALLOC		0x418
+
+/*
+ * Valid Token Indicator 0:
+ * No buffers available 1:
+ * A valid token index is provided.
+ * If a token is de-allocated/freed without this bit set that causes an
+ * error and the token will be ignored, error counter in register offset
+ * 0xB8 will be incremented.
+*/
+#define  POOL4_ALLOC_DEALLOC_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * DDR Identifier 0:
+ * DDR0 1:
+ * DDR1
+*/
+#define  POOL4_ALLOC_DEALLOC_DDR_MASK	0x20000000
+
+/* Buffer Index Pointer */
+#define  POOL4_ALLOC_DEALLOC_TOKEN_INDEX_SHIFT	12
+#define  POOL4_ALLOC_DEALLOC_TOKEN_INDEX_MASK	0x1ffff000
+
+/* Buffer length or packet size in bytes */
+#define  POOL4_ALLOC_DEALLOC_TOKEN_SIZE_SHIFT	0
+#define  POOL4_ALLOC_DEALLOC_TOKEN_SIZE_MASK	0xfff
+
+
+/*
+ * Register <Sparefor future use>
+ *
+ */
+#define FPM_SPARE			0x420
+
+#define  SPARE_SPARE_BITS_SHIFT		0
+#define  SPARE_SPARE_BITS_MASK		0xffffffff
+
+/*
+ * Register <Multi-cast Token Update Control>
+ *
+ * Update/Modify the multi-cast value of the token
+ */
+#define FPM_POOL_MULTI			0x424
+
+/*
+ * Valid Token Indicator 0:
+ * No buffers available 1:
+ * A valid token index is provided.
+ * If a token multi-cast value is updated without this bit set, that causes
+ * an error and the token will be ignored, error counter in register offset
+ * 0xBC will be incremented.
+*/
+#define  POOL_MULTI_TOKEN_VALID_MASK	0x80000000
+
+/*
+ * DDR Identifier 0:
+ * DDR0 1:
+ * DDR1
+*/
+#define  POOL_MULTI_DDR_MASK		0x20000000
+
+/* Buffer Index Pointer */
+#define  POOL_MULTI_TOKEN_INDEX_SHIFT	12
+#define  POOL_MULTI_TOKEN_INDEX_MASK	0x1ffff000
+
+/*
+ * 1'b0 - Count value is replaced with new value in bits[6:
+ * 0] 1'b1 - Count value is incremented by value in bits[6:
+ * 0]
+*/
+#define  POOL_MULTI_UPDATE_TYPE_MASK	0x800
+
+/* New Multi-cast Value */
+#define  POOL_MULTI_TOKEN_MULTI_SHIFT	0
+#define  POOL_MULTI_TOKEN_MULTI_MASK	0x7f
+
+
+/*
+ * Register <FPM_BB_FORCE>
+ *
+ * Write this register to force FPM_BB transaction
+ */
+#define FPM_FPM_BB_FORCE		0x30000
+
+/* Write 1 to force BB transaction */
+#define  FPM_BB_FORCE_FORCE_MASK	0x1
+
+
+/*
+ * Register <FPM_BB_FORCED_CTRL>
+ *
+ * Control to be sent on forced transaction
+ */
+#define FPM_FPM_BB_FORCED_CTRL		0x30004
+
+/* Forced control */
+#define  FPM_BB_FORCED_CTRL_CTRL_SHIFT	0
+#define  FPM_BB_FORCED_CTRL_CTRL_MASK	0xfff
+
+
+/*
+ * Register <FPM_BB_FORCED_ADDR>
+ *
+ * Address to be sent on forced transaction
+ */
+#define FPM_FPM_BB_FORCED_ADDR		0x30008
+
+/* Forced TA address */
+#define  FPM_BB_FORCED_ADDR_TA_ADDR_SHIFT	0
+#define  FPM_BB_FORCED_ADDR_TA_ADDR_MASK	0xffff
+
+/* Forced destination address */
+#define  FPM_BB_FORCED_ADDR_DEST_ADDR_SHIFT	16
+#define  FPM_BB_FORCED_ADDR_DEST_ADDR_MASK	0x3f0000
+
+
+/*
+ * Register <FPM_BB_FORCED_DATA>
+ *
+ * Data to be sent on forced transaction
+ */
+#define FPM_FPM_BB_FORCED_DATA		0x3000c
+
+/* Forced data */
+#define  FPM_BB_FORCED_DATA_DATA_SHIFT	0
+#define  FPM_BB_FORCED_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BB_DECODE_CFG>
+ *
+ * set configuration for BB decoder
+ */
+#define FPM_FPM_BB_DECODE_CFG		0x30010
+
+/* destination id */
+#define  FPM_BB_DECODE_CFG_DEST_ID_SHIFT	0
+#define  FPM_BB_DECODE_CFG_DEST_ID_MASK	0x3f
+
+/* Enable override */
+#define  FPM_BB_DECODE_CFG_OVERRIDE_EN_MASK	0x40
+
+/* route address */
+#define  FPM_BB_DECODE_CFG_ROUTE_ADDR_SHIFT	7
+#define  FPM_BB_DECODE_CFG_ROUTE_ADDR_MASK	0x1ff80
+
+
+/*
+ * Register <FPM_BB_DBG_CFG>
+ *
+ * Set SW addr to read FPM_BB FIFOs
+ */
+#define FPM_FPM_BB_DBG_CFG		0x30014
+
+/* SW address for reading FPM BB RXFIFO */
+#define  FPM_BB_DBG_CFG_RXFIFO_SW_ADDR_SHIFT	0
+#define  FPM_BB_DBG_CFG_RXFIFO_SW_ADDR_MASK	0xf
+
+/* SW address for reading FPM BB TXFIFO */
+#define  FPM_BB_DBG_CFG_TXFIFO_SW_ADDR_SHIFT	4
+#define  FPM_BB_DBG_CFG_TXFIFO_SW_ADDR_MASK	0xf0
+
+/* SW reset for FPM BB RXFIFO */
+#define  FPM_BB_DBG_CFG_RXFIFO_SW_RST_MASK	0x100
+
+/* SW reset for FPM BB TXFIFO */
+#define  FPM_BB_DBG_CFG_TXFIFO_SW_RST_MASK	0x200
+
+
+/*
+ * Register <FPM_BB_DBG_RXFIFO_STS> - read-only
+ *
+ * Status of FPM BB RXFIFO
+ */
+#define FPM_FPM_BB_DBG_RXFIFO_STS	0x30018
+
+/* FIFO is empty */
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_EMPTY_MASK	0x1
+
+/* FIFO is full */
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_FULL_MASK	0x2
+
+/* Used words */
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_USED_WORDS_SHIFT	8
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_USED_WORDS_MASK	0x1f00
+
+/* Write counter */
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_RD_CNTR_SHIFT	16
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_RD_CNTR_MASK	0x1f0000
+
+/* Write counter */
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_WR_CNTR_SHIFT	24
+#define  FPM_BB_DBG_RXFIFO_STS_FIFO_WR_CNTR_MASK	0x1f000000
+
+
+/*
+ * Register <FPM_BB_DBG_TXFIFO_STS> - read-only
+ *
+ * Status of FPM BB TXFIFO
+ */
+#define FPM_FPM_BB_DBG_TXFIFO_STS	0x3001c
+
+/* FIFO is empty */
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_EMPTY_MASK	0x1
+
+/* FIFO is full */
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_FULL_MASK	0x2
+
+/* Used words */
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_USED_WORDS_SHIFT	8
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_USED_WORDS_MASK	0x1f00
+
+/* Write counter */
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_RD_CNTR_SHIFT	16
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_RD_CNTR_MASK	0x1f0000
+
+/* Write counter */
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_WR_CNTR_SHIFT	24
+#define  FPM_BB_DBG_TXFIFO_STS_FIFO_WR_CNTR_MASK	0x1f000000
+
+
+/*
+ * Register <FPM_BB_DBG_RXFIFO_DATA1> - read-only
+ *
+ * Data from FPM BB RXFIFO bits [31:
+ * 0]
+ */
+#define FPM_FPM_BB_DBG_RXFIFO_DATA1	0x30020
+
+/* data */
+#define  FPM_BB_DBG_RXFIFO_DATA1_DATA_SHIFT	0
+#define  FPM_BB_DBG_RXFIFO_DATA1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BB_DBG_RXFIFO_DATA2> - read-only
+ *
+ * Data from FPM BB RXFIFO bits [39:
+ * 32]
+ */
+#define FPM_FPM_BB_DBG_RXFIFO_DATA2	0x30024
+
+/* data */
+#define  FPM_BB_DBG_RXFIFO_DATA2_DATA_SHIFT	0
+#define  FPM_BB_DBG_RXFIFO_DATA2_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BB_DBG_TXFIFO_DATA1> - read-only
+ *
+ * Data from FPM BB TXFIFO bits [31:
+ * 0]
+ */
+#define FPM_FPM_BB_DBG_TXFIFO_DATA1	0x30028
+
+/* data */
+#define  FPM_BB_DBG_TXFIFO_DATA1_DATA_SHIFT	0
+#define  FPM_BB_DBG_TXFIFO_DATA1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BB_DBG_TXFIFO_DATA2> - read-only
+ *
+ * Data from FPM BB TXFIFO bits [63:
+ * 32]
+ */
+#define FPM_FPM_BB_DBG_TXFIFO_DATA2	0x3002c
+
+/* data */
+#define  FPM_BB_DBG_TXFIFO_DATA2_DATA_SHIFT	0
+#define  FPM_BB_DBG_TXFIFO_DATA2_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BB_DBG_TXFIFO_DATA3> - read-only
+ *
+ * Data from FPM BB TXFIFO bits [79:
+ * 64]
+ */
+#define FPM_FPM_BB_DBG_TXFIFO_DATA3	0x30030
+
+/* data */
+#define  FPM_BB_DBG_TXFIFO_DATA3_DATA_SHIFT	0
+#define  FPM_BB_DBG_TXFIFO_DATA3_DATA_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_FPM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_hash.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_hash.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_hash.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_hash.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,1039 @@
+#ifndef XRDP_REGS_HASH_H_
+#define XRDP_REGS_HASH_H_
+
+/* relative to core */
+#define HASH_OFFSET_0			0xe20000
+
+/*
+ * Register <PWR_SAV_EN>
+ *
+ * Power saving mode -detect that the accelerator has no activity and enter
+ * to power saving mode
+ */
+#define HASH_GENERAL_CFG_PWR_SAV_EN	0x0
+
+/* . */
+#define  HASH_GENERAL_CFG_PWR_SAV_EN_VALUE_MASK	0x1
+
+
+/*
+ * Register <PAD_VAL_HIGH>
+ *
+ * Determines the padding value added to keys according to the selected
+ * MASK
+ */
+#define HASH_GENERAL_CFG_PAD_HIGH	0x4
+
+/*
+ * Determines the padding value added to keys according to the selected
+ * MASK
+*/
+#define  HASH_GENERAL_CFG_PAD_HIGH_VAL_SHIFT	0
+#define  HASH_GENERAL_CFG_PAD_HIGH_VAL_MASK	0xfffffff
+
+
+/*
+ * Register <PAD_VAL_LOW>
+ *
+ * Determines the padding value added to keys according to the selected
+ * MASK
+ */
+#define HASH_GENERAL_CFG_PAD_LOW	0x8
+
+/*
+ * Determines the padding value added to keys according to the selected
+ * MASK
+*/
+#define  HASH_GENERAL_CFG_PAD_LOW_VAL_SHIFT	0
+#define  HASH_GENERAL_CFG_PAD_LOW_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <MULT_HIT_ERR> - read-only
+ *
+ * In case of multiple match this reg captures the hit indication per
+ * engine.
+ * This is a read clear reg.
+ */
+#define HASH_GENERAL_CFG_MULT_HIT_ERR	0xc
+
+/*
+ * In case of multiple match this reg captures the hit indication per
+ * engine.
+ * This is a read clear reg.
+*/
+#define  HASH_GENERAL_CFG_MULT_HIT_ERR_VAL_SHIFT	0
+#define  HASH_GENERAL_CFG_MULT_HIT_ERR_VAL_MASK	0xf
+
+
+/*
+ * Register <UNDO_FIX>
+ *
+ * Consist of chicken bit per specific fix
+ */
+#define HASH_GENERAL_CFG_UNDO_FIX	0x10
+
+/*
+ * The bug fixed lacking in identification and reporting when a multiple
+ * hit occurs in the first search.
+*/
+#define  HASH_GENERAL_CFG_UNDO_FIX_FRST_MUL_HIT_MASK	0x1
+
+
+/*
+ * Register <HITS> - read-only
+ *
+ * Number of key hitsThis reg is frozen when freeze bit asserted.
+ */
+#define HASH_PM_COUNTERS_HITS		0x100
+
+/* . */
+#define  HASH_PM_COUNTERS_HITS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_HITS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <SEARCHES> - read-only
+ *
+ * Number of key searchesThis register is updated only when freeze register
+ * is not set
+ */
+#define HASH_PM_COUNTERS_SRCHS		0x104
+
+/* . */
+#define  HASH_PM_COUNTERS_SRCHS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_SRCHS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <MISSES> - read-only
+ *
+ * Total NUM of missesread clear registerupdated only when freeze reg is 0
+ */
+#define HASH_PM_COUNTERS_MISS		0x108
+
+/* . */
+#define  HASH_PM_COUNTERS_MISS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_MISS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HIT_1ST_ACS> - read-only
+ *
+ * Total NUM of missesread clear registerupdated only when freeze reg is 0
+ */
+#define HASH_PM_COUNTERS_HIT_1ST_ACS	0x10c
+
+/* . */
+#define  HASH_PM_COUNTERS_HIT_1ST_ACS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_HIT_1ST_ACS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HIT_2ND_ACS> - read-only
+ *
+ * Total NUM of missesread clear registerupdated only when freeze reg is 0
+ */
+#define HASH_PM_COUNTERS_HIT_2ND_ACS	0x110
+
+/* . */
+#define  HASH_PM_COUNTERS_HIT_2ND_ACS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_HIT_2ND_ACS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HIT_3RD_ACS> - read-only
+ *
+ * Total NUM of missesread clear registerupdated only when freeze reg is 0
+ */
+#define HASH_PM_COUNTERS_HIT_3RD_ACS	0x114
+
+/* . */
+#define  HASH_PM_COUNTERS_HIT_3RD_ACS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_HIT_3RD_ACS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HIT_4TH_ACS> - read-only
+ *
+ * Total NUM of missesread clear registerupdated only when freeze reg is 0
+ */
+#define HASH_PM_COUNTERS_HIT_4TH_ACS	0x118
+
+/* . */
+#define  HASH_PM_COUNTERS_HIT_4TH_ACS_CNT_SHIFT	0
+#define  HASH_PM_COUNTERS_HIT_4TH_ACS_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <FREEZE_CNT>
+ *
+ * Freezes counters update - used to read all relevant counters at the same
+ * point.
+ */
+#define HASH_PM_COUNTERS_FRZ_CNT	0x150
+
+/* Freezes counters update */
+#define  HASH_PM_COUNTERS_FRZ_CNT_VAL_MASK	0x1
+
+
+/*
+ * Registers <TBL_CFG> - <x> is [ 0 => 6 ]
+ *
+ * Look-up table :
+ * Configuration of LUT:
+ * table params + main flag
+ */
+#define HASH_LKUP_TBL_CFG_TBL_CFG(x)	(0x200 + (x) * 0x10)
+
+/*
+ * Base address of the hash ram per engine.
+ * Varies between 0 to 1535Indicates from which address start looking the
+ * key.
+ * Note, base address must be aligned to table size - table size of 128
+ * cant get base 64
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_HASH_BASE_ADDR_SHIFT	0
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_HASH_BASE_ADDR_MASK	0x7ff
+
+/*
+ * Number of entries in the table per engine.
+ * Total entries should be multiplied with the number of engines - by 4.
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_TBL_SIZE_SHIFT	16
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_TBL_SIZE_MASK	0x70000
+
+/*
+ * Max Search depth per engine.
+ * Supports up to 16 and cannot exceed table size.
+ * For performance requirement it should be limited to 4
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_MAX_HOP_SHIFT	20
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_MAX_HOP_MASK	0xf00000
+
+/*
+ * CAM Search is enabled.
+ * If the key not found in the hash table and this flag enabled the key
+ * will be searched in the CAm.
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_CAM_EN_MASK	0x1000000
+
+/*
+ * Direct lookup enable.
+ * Allows accessing the table without hash calculation- direct access.
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_DIRECT_LKUP_EN_MASK	0x2000000
+
+/* Hash function type */
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_HASH_TYPE_MASK	0x4000000
+
+/*
+ * If the key smaller than 60 bit, then it supported to store in the
+ * remaining bits an internal context data 3B or 6B.
+*/
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_INT_CNTX_SIZE_SHIFT	28
+#define  HASH_LKUP_TBL_CFG_TBL_CFG_INT_CNTX_SIZE_MASK	0x30000000
+
+
+/*
+ * Registers <KEY_MASK_HIGH> - <x> is [ 0 => 6 ]
+ *
+ * Look-up table :
+ * key Mask on bits [59:
+ * 32]Key consist of 60-bit.
+ * by configuring mask the user can use different key lengths.
+ * if the key is smaller than 60 bit it is padded with constant value
+ * according the the mask register.
+ */
+#define HASH_LKUP_TBL_CFG_KEY_MASK_HIGH(x)	(0x204 + (x) * 0x10)
+
+/*
+ * MASK HIGH applied on the 28 msb of the current part of key for the
+ * current search table.
+ * The value used for padding purpose and comparison to the hash content.
+*/
+#define  HASH_LKUP_TBL_CFG_KEY_MASK_HIGH_MASKH_SHIFT	0
+#define  HASH_LKUP_TBL_CFG_KEY_MASK_HIGH_MASKH_MASK	0xfffffff
+
+
+/*
+ * Registers <KEY_MASK_LOW> - <x> is [ 0 => 6 ]
+ *
+ * Look-up table key Mask on bits [31:
+ * 0]Key consist of 60-bit.
+ * By configuring mask the user can use different key lengths.
+ * If the key is smaller than 60 bit it is padded with constant value
+ * according to the mask register.
+ */
+#define HASH_LKUP_TBL_CFG_KEY_MASK_LOW(x)	(0x208 + (x) * 0x10)
+
+/*
+ * MASK LOW applied on the 32 lsb of the current part of key for the
+ * current search table.
+ * The value used for padding purpose and comparison to the hash content.
+*/
+#define  HASH_LKUP_TBL_CFG_KEY_MASK_LOW_MASKL_SHIFT	0
+#define  HASH_LKUP_TBL_CFG_KEY_MASK_LOW_MASKL_MASK	0xffffffff
+
+
+/*
+ * Registers <CNTXT_CFG> - <x> is [ 0 => 6 ]
+ *
+ * Look-up table:
+ * LUT Context Table configurations (base addr + entry context size)
+ */
+#define HASH_LKUP_TBL_CFG_CNTXT_CFG(x)	(0x20c + (x) * 0x10)
+
+/*
+ * Context table base address in the RAM (6Bytes X 3264entries) .
+ * Indicates from which address start looking at the context.
+ * The address varies between 0 to 3264 (including 196 CAM entries)It
+ * should be calculated according below formula:
+ * Context_base_addr[12:
+ * 0] = sum of (table_size_per_engine*num_of_eng*context_size)/6 for all
+ * preceding tables
+*/
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_BASE_ADDRESS_SHIFT	0
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_BASE_ADDRESS_MASK	0xfff
+
+/*
+ * Indicates the first entry of the particular table in the context table.
+ * It should be calculated according to below formula:
+ * First_hash_index = sum of (table_size_per_engine*num_of_eng) for all
+ * preceding tables
+*/
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_FIRST_HASH_IDX_SHIFT	12
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_FIRST_HASH_IDX_MASK	0x1fff000
+
+/*
+ * Context entry size (in the context RAM).
+ * Varies between 0B to 12B in steps of 3BContext may also be extracted
+ * directly from Look-up Table (up to 6B).
+*/
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_CNXT_SIZE_SHIFT	28
+#define  HASH_LKUP_TBL_CFG_CNTXT_CFG_CNXT_SIZE_MASK	0x70000000
+
+
+/*
+ * Register <CNTXT_CFG>
+ *
+ * Look-up table:
+ * LUT Context Table configurations (base addr + entry context size)
+ */
+#define HASH_CAM_CFG_CNTXT_CFG		0x400
+
+/*
+ * Context table base address in the RAM (6Bytes X 3264entries) .
+ * Indicates from which address start looking at the context.
+ * The address varies between 0 to 3264 (including 196 CAM entries)It
+ * should be calculated according below formula:
+ * Context_base_addr[12:
+ * 0] = sum of (table_size*context_size)/6 for all preceding tables
+*/
+#define  HASH_CAM_CFG_CNTXT_CFG_BASE_ADDRESS_SHIFT	0
+#define  HASH_CAM_CFG_CNTXT_CFG_BASE_ADDRESS_MASK	0xfff
+
+
+/*
+ * Register <OPERATION>
+ *
+ * TCAM Operation:
+ * 0 - CAM READ1 - CAM Write2 - CAM Compare3 - CAM valid bit resetWriting
+ * to this register triggers the operation.
+ * All other relevant register should be ready before SW writes to this
+ * register.
+ */
+#define HASH_CAM_INDIRECT_OP		0x800
+
+/* . */
+#define  HASH_CAM_INDIRECT_OP_CMD_SHIFT	0
+#define  HASH_CAM_INDIRECT_OP_CMD_MASK	0xf
+
+
+/*
+ * Register <OPERATION_DONE> - read-only
+ *
+ * Raised when the CAM operation is completed (cleared by HW on writing to
+ * the OPERATION register)
+ */
+#define HASH_CAM_INDIRECT_OP_DONE	0x804
+
+/* . */
+#define  HASH_CAM_INDIRECT_OP_DONE_VAL_MASK	0x1
+
+
+/*
+ * Register <ADDRESS>
+ *
+ * Key Address to be used in RD/WR opoerations.
+ */
+#define HASH_CAM_INDIRECT_ADDR		0x808
+
+/*
+ * This bit indicate if the operation (RD/WR) is performed on the key0 or
+ * key1 part of the entry
+*/
+#define  HASH_CAM_INDIRECT_ADDR_KEY1_IND_MASK	0x1
+
+/* Address of the entry */
+#define  HASH_CAM_INDIRECT_ADDR_ENTRY_ADDR_SHIFT	1
+#define  HASH_CAM_INDIRECT_ADDR_ENTRY_ADDR_MASK	0x7e
+
+
+/*
+ * Register <VALID_IN>
+ *
+ * Valid value to be written - this value is relevant during write
+ * operation on key0.
+ */
+#define HASH_CAM_INDIRECT_VLID_IN	0x80c
+
+/* . */
+#define  HASH_CAM_INDIRECT_VLID_IN_VALID_MASK	0x1
+
+
+/*
+ * Register <VALID_OUT>
+ *
+ * Valid value read from the CAM - this value is relevant during read
+ * operation on key0.
+ */
+#define HASH_CAM_INDIRECT_VLID_OUT	0x814
+
+/* . */
+#define  HASH_CAM_INDIRECT_VLID_OUT_VALID_MASK	0x1
+
+
+/*
+ * Register <SEARCH_RESULT> - read-only
+ *
+ * The result of a search operation
+ */
+#define HASH_CAM_INDIRECT_RSLT		0x818
+
+/* indicate if a match was found */
+#define  HASH_CAM_INDIRECT_RSLT_MATCH_MASK	0x1
+
+/* index related to a match result */
+#define  HASH_CAM_INDIRECT_RSLT_INDEX_SHIFT	4
+#define  HASH_CAM_INDIRECT_RSLT_INDEX_MASK	0x3f0
+
+
+/*
+ * Registers <KEY_IN> - <x> is [ 0 => 1 ]
+ *
+ * Key to be used in Write/Compare operations.
+ * The Key is 64bit long and is represented by 2 registers.
+ * The lower address register corresponds to the most significant bits of
+ * the key.
+ */
+#define HASH_CAM_INDIRECT_KEY_IN(x)	(0x820 + (x) * 0x4)
+
+/* . */
+#define  HASH_CAM_INDIRECT_KEY_IN_VALUE_SHIFT	0
+#define  HASH_CAM_INDIRECT_KEY_IN_VALUE_MASK	0xffffffff
+
+
+/*
+ * Registers <KEY_OUT> - <x> is [ 0 => 1 ] - read-only
+ *
+ * Key returned from the CAM in a read operation.
+ * The Key is 64bit long and is represented by 2 registers.
+ * The lower address register correspond to the most significant bits of
+ * the key.
+ */
+#define HASH_CAM_INDIRECT_KEY_OUT(x)	(0x840 + (x) * 0x4)
+
+/* . */
+#define  HASH_CAM_INDIRECT_KEY_OUT_VALUE_SHIFT	0
+#define  HASH_CAM_INDIRECT_KEY_OUT_VALUE_MASK	0xffffffff
+
+
+/*
+ * Register <BIST_STATUS> - read-only
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_STATUS	0x900
+
+/* . */
+#define  HASH_CAM_BIST_BIST_STATUS_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_STATUS_VALUE_MASK	0xffffffff
+
+
+/*
+ * Register <BIST_DBG_COMPARE_EN>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_DBG_COMPARE_EN	0x904
+
+/* . */
+#define  HASH_CAM_BIST_BIST_DBG_COMPARE_EN_VALUE_MASK	0x1
+
+
+/*
+ * Register <BIST_DBG_DATA>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_DBG_DATA	0x908
+
+/* . */
+#define  HASH_CAM_BIST_BIST_DBG_DATA_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_DBG_DATA_VALUE_MASK	0xffffffff
+
+
+/*
+ * Register <BIST_DBG_DATA_SLICE_OR_STATUS_SEL>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_DBG_DATA_SLICE_OR_STATUS_SEL	0x90c
+
+/* . */
+#define  HASH_CAM_BIST_BIST_DBG_DATA_SLICE_OR_STATUS_SEL_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_DBG_DATA_SLICE_OR_STATUS_SEL_VALUE_MASK	0xff
+
+
+/*
+ * Register <BIST_DBG_DATA_VALID>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_DBG_DATA_VALID	0x910
+
+/* . */
+#define  HASH_CAM_BIST_BIST_DBG_DATA_VALID_VALUE_MASK	0x1
+
+
+/*
+ * Register <BIST_EN>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_EN		0x914
+
+/* . */
+#define  HASH_CAM_BIST_BIST_EN_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_EN_VALUE_MASK	0xff
+
+
+/*
+ * Register <BIST_MODE>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_MODE		0x918
+
+/* . */
+#define  HASH_CAM_BIST_BIST_MODE_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_MODE_VALUE_MASK	0x3
+
+
+/*
+ * Register <BIST_RST_L>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_RST_L	0x91c
+
+/* . */
+#define  HASH_CAM_BIST_BIST_RST_L_VALUE_MASK	0x1
+
+
+/*
+ * Register <BIST_SKIP_ERROR_CNT>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_SKIP_ERROR_CNT	0x920
+
+/* . */
+#define  HASH_CAM_BIST_BIST_SKIP_ERROR_CNT_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_SKIP_ERROR_CNT_VALUE_MASK	0xff
+
+
+/*
+ * Register <DBG_EN>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_DBG_EN		0x924
+
+/* . */
+#define  HASH_CAM_BIST_DBG_EN_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_DBG_EN_VALUE_MASK	0xff
+
+
+/*
+ * Register <BIST_CASCADE_SELECT>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_CASCADE_SELECT	0x928
+
+/* . */
+#define  HASH_CAM_BIST_BIST_CASCADE_SELECT_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_CASCADE_SELECT_VALUE_MASK	0x7
+
+
+/*
+ * Register <BIST_BLOCK_SELECT>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_BLOCK_SELECT	0x92c
+
+/* . */
+#define  HASH_CAM_BIST_BIST_BLOCK_SELECT_VALUE_SHIFT	0
+#define  HASH_CAM_BIST_BIST_BLOCK_SELECT_VALUE_MASK	0xf
+
+
+/*
+ * Register <BIST_REPAIR_ENABLE>
+ *
+ * .
+ */
+#define HASH_CAM_BIST_BIST_REPAIR_ENABLE	0x930
+
+/* . */
+#define  HASH_CAM_BIST_BIST_REPAIR_ENABLE_VALUE_MASK	0x1
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active hash interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define HASH_INTR_CTRL_ISR		0xa00
+
+/* Command cfg field is invalid (equals to 0) */
+#define  HASH_INTR_CTRL_ISR_INVLD_CMD_MASK	0x1
+
+/*
+ * During the search process same key was found a valid in multiple
+ * engines.
+*/
+#define  HASH_INTR_CTRL_ISR_MULT_MATCH_MASK	0x2
+
+/* hash table index over flow at hash engine */
+#define  HASH_INTR_CTRL_ISR_HASH_0_IDX_OVFLV_MASK	0x4
+
+/* hash table over flow at hash engine */
+#define  HASH_INTR_CTRL_ISR_HASH_1_IDX_OVFLV_MASK	0x8
+
+/* hash table index over flow at hash engine */
+#define  HASH_INTR_CTRL_ISR_HASH_2_IDX_OVFLV_MASK	0x10
+
+/* hash table index over flow at hash engine */
+#define  HASH_INTR_CTRL_ISR_HASH_3_IDX_OVFLV_MASK	0x20
+
+/* Context table index over flow */
+#define  HASH_INTR_CTRL_ISR_CNTXT_IDX_OVFLV_MASK	0x40
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define HASH_INTR_CTRL_ISM		0xa04
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  HASH_INTR_CTRL_ISM_ISM_SHIFT	0
+#define  HASH_INTR_CTRL_ISM_ISM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define HASH_INTR_CTRL_IER		0xa08
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  HASH_INTR_CTRL_IER_IEM_SHIFT	0
+#define  HASH_INTR_CTRL_IER_IEM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define HASH_INTR_CTRL_ITR		0xa0c
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  HASH_INTR_CTRL_ITR_IST_SHIFT	0
+#define  HASH_INTR_CTRL_ITR_IST_MASK	0xffffffff
+
+
+/*
+ * Register <DBG0> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG0			0xa30
+
+/* read debug register */
+#define  HASH_DEBUG_DBG0_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG0_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG1> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG1			0xa34
+
+/* read debug register */
+#define  HASH_DEBUG_DBG1_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG1_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG2> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG2			0xa38
+
+/* read debug register */
+#define  HASH_DEBUG_DBG2_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG2_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG3> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG3			0xa3c
+
+/* read debug register */
+#define  HASH_DEBUG_DBG3_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG3_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG4> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG4			0xa40
+
+/* read debug register */
+#define  HASH_DEBUG_DBG4_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG4_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG5> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG5			0xa44
+
+/* read debug register */
+#define  HASH_DEBUG_DBG5_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG5_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG6> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG6			0xa48
+
+/* read debug register */
+#define  HASH_DEBUG_DBG6_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG6_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG7> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG7			0xa4c
+
+/* read debug register */
+#define  HASH_DEBUG_DBG7_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG7_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG8> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG8			0xa50
+
+/* read debug register */
+#define  HASH_DEBUG_DBG8_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG8_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG9> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG9			0xa54
+
+/* read debug register */
+#define  HASH_DEBUG_DBG9_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG9_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG10> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG10		0xa58
+
+/* read debug register */
+#define  HASH_DEBUG_DBG10_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG10_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG11> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG11		0xa5c
+
+/* read debug register */
+#define  HASH_DEBUG_DBG11_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG11_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG12> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG12		0xa60
+
+/* read debug register */
+#define  HASH_DEBUG_DBG12_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG12_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG13> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG13		0xa64
+
+/* read debug register */
+#define  HASH_DEBUG_DBG13_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG13_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG14> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG14		0xa68
+
+/* read debug register */
+#define  HASH_DEBUG_DBG14_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG14_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG15> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG15		0xa6c
+
+/* read debug register */
+#define  HASH_DEBUG_DBG15_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG15_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG16> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG16		0xa70
+
+/* read debug register */
+#define  HASH_DEBUG_DBG16_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG16_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG17> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG17		0xa74
+
+/* read debug register */
+#define  HASH_DEBUG_DBG17_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG17_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG18> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG18		0xa78
+
+/* read debug register */
+#define  HASH_DEBUG_DBG18_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG18_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG19> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG19		0xa7c
+
+/* read debug register */
+#define  HASH_DEBUG_DBG19_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG19_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG20> - read-only
+ *
+ * debug reg
+ */
+#define HASH_DEBUG_DBG20		0xa80
+
+/* read debug register */
+#define  HASH_DEBUG_DBG20_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG20_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_SELECT>
+ *
+ * debug select mux
+ */
+#define HASH_DEBUG_DBG_SEL		0xa84
+
+/* debug sel */
+#define  HASH_DEBUG_DBG_SEL_VAL_SHIFT	0
+#define  HASH_DEBUG_DBG_SEL_VAL_MASK	0x1f
+
+
+/*
+ * Registers <AGING> - <x> is [ 0 => 65 ]
+ *
+ * Each bit in the ram represents hash/CAM entry.
+ * (6K hash entries + 64 CAM entries)/32= 194 rowsBit 0 at the ram
+ * corresponds to entry 0 (eng0), Bit 1 at the ram corresponds to entry 1
+ * (eng1), and so on.
+ * .
+ */
+#define HASH_AGING_RAM_AGING(x)		(0x7000 + (x) * 0x4)
+
+/* . */
+#define  HASH_AGING_RAM_AGING_DATA_SHIFT	0
+#define  HASH_AGING_RAM_AGING_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CONTEXT_47_24> - <x> is [ 0 => 1215 ]
+ *
+ * 24 most significant bits of an entry (first 3B)
+ */
+#define HASH_CONTEXT_RAM_CONTEXT_47_24(x)	(0x8000 + (x) * 0x8)
+
+/* . */
+#define  HASH_CONTEXT_RAM_CONTEXT_47_24_DATA_SHIFT	0
+#define  HASH_CONTEXT_RAM_CONTEXT_47_24_DATA_MASK	0xffffff
+
+
+/*
+ * Registers <CONTEXT_23_0> - <x> is [ 0 => 1215 ]
+ *
+ * 24 least significant bits of an entry (second 3B)
+ */
+#define HASH_CONTEXT_RAM_CONTEXT_23_0(x)	(0x8004 + (x) * 0x8)
+
+/* . */
+#define  HASH_CONTEXT_RAM_CONTEXT_23_0_DATA_SHIFT	0
+#define  HASH_CONTEXT_RAM_CONTEXT_23_0_DATA_MASK	0xffffff
+
+
+/*
+ * Registers <ENG_BITS_63_32> - <x> is [ 0 => 2047 ]
+ *
+ * Includes the MSB field of the hash entry
+ */
+#define HASH_RAM_ENG_HIGH(x)		(0x10000 + (x) * 0x8)
+
+/*
+ * This field contains one of the two:
+ * key extension or internal context data.
+ * It defined by table configuration.
+*/
+#define  HASH_RAM_ENG_HIGH_KEY_59_28_OR_DAT_SHIFT	0
+#define  HASH_RAM_ENG_HIGH_KEY_59_28_OR_DAT_MASK	0xffffffff
+
+
+/*
+ * Registers <ENG_BITS_31_0> - <x> is [ 0 => 2047 ]
+ *
+ * Includes the MSB field of the hash entry
+ */
+#define HASH_RAM_ENG_LOW(x)		(0x10004 + (x) * 0x8)
+
+/*
+ * Indicates not to search at this entry due to the ongoing update of the
+ * entry.
+*/
+#define  HASH_RAM_ENG_LOW_SKP_MASK	0x1
+
+/*
+ * Determines the table config number, between 1-7.
+ * Config 0 is used to indicate invalid entry
+*/
+#define  HASH_RAM_ENG_LOW_CFG_SHIFT	1
+#define  HASH_RAM_ENG_LOW_CFG_MASK	0xe
+
+/*
+ * Includes the first part of the key.
+ * This field is preserved for key use only.
+*/
+#define  HASH_RAM_ENG_LOW_KEY_11_0_SHIFT	4
+#define  HASH_RAM_ENG_LOW_KEY_11_0_MASK	0xfff0
+
+/*
+ * This field contains one of the two:
+ * key extension or internal context data.
+ * It defined by table configuration.
+*/
+#define  HASH_RAM_ENG_LOW_KEY_27_12_OR_DAT_SHIFT	16
+#define  HASH_RAM_ENG_LOW_KEY_27_12_OR_DAT_MASK	0xffff0000
+
+
+#endif /* ! XRDP_REGS_HASH_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_natc.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_natc.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_natc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_natc.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,1511 @@
+#ifndef XRDP_REGS_NATC_H_
+#define XRDP_REGS_NATC_H_
+
+/* relative to core */
+#define NATC_OFFSET_0			0xe50000
+
+
+/*
+ *
+ * register set NATC
+ *
+ */
+
+/*
+ * Register <NAT Cache Control and Status>
+ *
+ * NAT Cache Control and Status Register.
+ */
+#define NATC_CONTROL_STATUS		0x0
+
+/*
+ * Enables NAT table offload to DDR functionality.
+ * NATC_CONTROL_STATUS2 register should be configured before enabling this
+ * feature.
+*/
+#define  CONTROL_STATUS_DDR_ENABLE_MASK	0x80000000
+
+/*
+ * Default behavior for an ADD command is to do a LOOKUP first to see if
+ * the entrywith the same key already exists and replace it; this is to
+ * avoid having duplicatedentries in the table for ADD command.
+ * When this bit is set an ADD command willeither replace the entry with
+ * the matched key or add an entry to an empty entrydepending on whichever
+ * one is encountered first during multi-hash.
+ * Enablingthis bit speeds up the ADD command.
+*/
+#define  CONTROL_STATUS_NATC_ADD_COMMAND_SPEEDUP_MODE_MASK	0x40000000
+
+#define  CONTROL_STATUS_UNUSED4_SHIFT	28
+#define  CONTROL_STATUS_UNUSED4_MASK	0x30000000
+/*
+ * Swap 64-bit word within 128-bit word for DDR memory read/write
+ * accesses(i.
+ * e.
+ * , [127:
+ * 0] becomes {[63:
+ * 0], [127:
+ * 64]}).
+ * This bit should be set to 1 in Little Endian mode.
+*/
+#define  CONTROL_STATUS_DDR_64BIT_IN_128BIT_SWAP_CONTROL_MASK	0x8000000
+
+/*
+ * Swap 32-bit word within 64-bit word for statistics (counter) memory
+ * accesses(i.
+ * e.
+ * , [63:
+ * 0] becomes {[31:
+ * 0], [63:
+ * 32]})
+*/
+#define  CONTROL_STATUS_SMEM_32BIT_IN_64BIT_SWAP_CONTROL_MASK	0x4000000
+
+/*
+ * Reverse bytes within 32-bit word for statistics (counter) memory
+ * accesses(i.
+ * e.
+ * , [31:
+ * 0] becomes {[7:
+ * 0], [15,8], [23,16], [31,24]})
+*/
+#define  CONTROL_STATUS_SMEM_8BIT_IN_32BIT_SWAP_CONTROL_MASK	0x2000000
+
+/*
+ * Swap all bytes on DDR interface.
+ * This bit should be set to 1 in Little Endian mode.
+*/
+#define  CONTROL_STATUS_DDR_SWAP_ALL_CONTROL_MASK	0x1000000
+
+#define  CONTROL_STATUS_UNUSED3_MASK	0x800000
+/*
+ * Swap 32-bit word within 64-bit word for key_result register accesses(i.
+ * e.
+ * , [63:
+ * 0] becomes {[31:
+ * 0], [63:
+ * 32]})
+*/
+#define  CONTROL_STATUS_REG_32BIT_IN_64BIT_SWAP_CONTROL_MASK	0x400000
+
+/*
+ * Reverse bytes within 32-bit word for key_result register accesses(i.
+ * e.
+ * , [31:
+ * 0] becomes {[7:
+ * 0], [15,8], [23,16], [31,24]})
+*/
+#define  CONTROL_STATUS_REG_8BIT_IN_32BIT_SWAP_CONTROL_MASK	0x200000
+
+#define  CONTROL_STATUS_UNUSED2_SHIFT	18
+#define  CONTROL_STATUS_UNUSED2_MASK	0x1c0000
+/*
+ * This bit is only valid when CACHE_UPDATE_ON_DDR_MISS bit is set to 1.
+ * This bit determines whether pending FIFO entry will be checked
+ * todetermine whether cache update on DDR miss will happen or not.
+ * 1h:
+ * Enable; DDR miss entry fetched from DDR will be cached if pending
+ * FIFOcontains entries which have the same hash value as DDR miss entry.
+ * 0h:
+ * Disable; DDR miss entry fetched from DDR will always be cached.
+*/
+#define  CONTROL_STATUS_PENDING_FIFO_ENTRY_CHECK_ENABLE_MASK	0x20000
+
+/*
+ * This bit determines whether DDR lookup will cache the entry for DDR miss
+ * entry.
+ * 1h:
+ * Enable; DDR miss entry fetched from DDR will be cached.
+ * 0h:
+ * Disable; DDR miss entry fetched from DDR will not be cached.
+*/
+#define  CONTROL_STATUS_CACHE_UPDATE_ON_DDR_MISS_MASK	0x10000
+
+/*
+ * 0h:
+ * Enable DDR lookup when cache misses using register interface lookup1h:
+ * Disable DDR lookup when cache misses using register interface lookup
+*/
+#define  CONTROL_STATUS_DDR_DISABLE_ON_REG_LOOKUP_MASK	0x8000
+
+/* Reset regfile_FIFO. */
+#define  CONTROL_STATUS_REGFILE_FIFO_RESET_MASK	0x4000
+
+/*
+ * Hash algorithm used for internal caching0h:
+ * 32-bit rolling XOR hash is used as cache hash function.
+ * 1h:
+ * CRC32 hash is used as cache hash function.
+ * CRC32 is reduced to 10-bit usingthe same method as in 32-bit rolling XOR
+ * hash.
+ * 2h:
+ * CRC32 hash is used as cache hash function.
+ * CRC32[9:
+ * 0] is used as hash value3h:
+ * CRC32 hash is used as cache hash function.
+ * CRC32[25:
+ * 16] is used as hash value.
+*/
+#define  CONTROL_STATUS_NAT_HASH_MODE_SHIFT	12
+#define  CONTROL_STATUS_NAT_HASH_MODE_MASK	0x3000
+
+/*
+ * Maximum number of multi-hash iterationsValue of 0 is 1 iteration, 1 is 2
+ * iterations, 2 is 3 iterations, etc.
+*/
+#define  CONTROL_STATUS_MULTI_HASH_LIMIT_SHIFT	8
+#define  CONTROL_STATUS_MULTI_HASH_LIMIT_MASK	0xf00
+
+/*
+ * Decrement Count Wraparound Enable0h:
+ * Do not decrement counters for decrement command when counters reach 01h:
+ * Always decrement counters for decrement command; will wrap around from 0
+ * to all 1's
+*/
+#define  CONTROL_STATUS_DECR_COUNT_WRAPAROUND_ENABLE_MASK	0x80
+
+/*
+ * NAT Arbitration MechanismRound-robin arbitrationStrict priority
+ * arbitrationlisted from highest to lowest priority -- NAT0, NAT1, NAT2,
+ * NAT3, RunnerStrict priority arbitration (priority reversed from
+ * above)listed from highest to lowest priority -- Runner, NAT3, NAT2,
+ * NAT1, NAT0
+*/
+#define  CONTROL_STATUS_NAT_ARB_ST_SHIFT	5
+#define  CONTROL_STATUS_NAT_ARB_ST_MASK	0x60
+
+/*
+ * Enables incrementing or decrementing hit counter by 1 and byte counter
+ * by PKT_LENon successful lookups using register interfaceBY default,
+ * counters only increment on successful lookups on Runner interface
+*/
+#define  CONTROL_STATUS_NATC_SMEM_INCREMENT_ON_REG_LOOKUP_MASK	0x10
+
+/*
+ * Disables clearing counters when an existing entry is replaced by ADD
+ * command
+*/
+#define  CONTROL_STATUS_NATC_SMEM_CLEAR_BY_UPDATE_DISABLE_MASK	0x8
+
+/* Disables counters from incrementing when hit */
+#define  CONTROL_STATUS_NATC_SMEM_DISABLE_MASK	0x4
+
+/*
+ * Enables all NATC state machines and input FIFO;Clearing this bit will
+ * halt all state machines gracefully to idle states,all outstanding
+ * transactions in the FIFO will remain in the FIFO and NATCwill stop
+ * accepting new commands; All configuration registers should beconfigured
+ * before enabling this bit.
+*/
+#define  CONTROL_STATUS_NATC_ENABLE_MASK	0x2
+
+/*
+ * Self Clearing Block Reset (including resetting all registers to default
+ * values)
+*/
+#define  CONTROL_STATUS_NATC_RESET_MASK	0x1
+
+
+/*
+ * Register <NAT Cache Control and Status2>
+ *
+ * NAT Cache Control and Status Register
+ */
+#define NATC_CONTROL_STATUS2		0x4
+
+/* Reverse bytes within 18-bit DDR hash value */
+#define  CONTROL_STATUS2_DDR_HASH_SWAP_MASK	0x80000000
+
+/*
+ * Hash algorithm used for DDR lookupHash value is DDR table size
+ * dependent.
+ * 0h:
+ * 32-bit rolling XOR hash is used as DDR hash function.
+ * It is reduced to N-bitDDR table size is 8K, N = 13.
+ * DDR table size is 16K, N = 14.
+ * DDR table size is 32K, N = 15.
+ * DDR table size is 64K, N = 16.
+ * DDR table size is 128K, N = 17.
+ * DDR table size is 256K, N = 18.
+ * 1h:
+ * CRC32 hash is used as DDR hash function.
+ * CRC32 is reduced to N-bit usingthe same method as in 32-bit rolling XOR
+ * hash.
+ * DDR table size is 8K, N = 13.
+ * DDR table size is 16K, N = 14.
+ * DDR table size is 32K, N = 15.
+ * DDR table size is 64K, N = 16.
+ * DDR table size is 128K, N = 17.
+ * DDR table size is 256K, N = 18.
+ * 2h:
+ * CRC32 hash is used as DDR hash function.
+ * CRC32[N:
+ * 0] is used as hash valueDDR table size is 8K, N = 12.
+ * DDR table size is 16K, N = 13.
+ * DDR table size is 32K, N = 14.
+ * DDR table size is 64K, N = 15.
+ * DDR table size is 128K, N = 16.
+ * DDR table size is 256K, N = 17.
+ * 3h:
+ * CRC32 hash is used as DDR hash function.
+ * CRC32[31:
+ * N] is used as hash valueDDR table size is 8K, N = 19.
+ * DDR table size is 16K, N = 18.
+ * DDR table size is 32K, N = 17.
+ * DDR table size is 64K, N = 16.
+ * DDR table size is 128K, N = 15.
+ * DDR table size is 256K, N = 14.
+*/
+#define  CONTROL_STATUS2_DDR_HASH_MODE_SHIFT	29
+#define  CONTROL_STATUS2_DDR_HASH_MODE_MASK	0x60000000
+
+/*
+ * Swap 32-bit word within 64-bit word for DDR memory read/write
+ * accesses(i.
+ * e.
+ * , [63:
+ * 0] becomes {[31:
+ * 0], [63:
+ * 32]}).
+ * This bit should be set to 1 in Little Endian mode.
+*/
+#define  CONTROL_STATUS2_DDR_32BIT_IN_64BIT_SWAP_CONTROL_MASK	0x10000000
+
+/*
+ * Reverse bytes within 32-bit word for DDR memory read/write accesses(i.
+ * e.
+ * , [31:
+ * 0] becomes {[7:
+ * 0], [15,8], [23,16], [31,24]}).
+ * This bit should be set to 1 in Little Endian mode.
+*/
+#define  CONTROL_STATUS2_DDR_8BIT_IN_32BIT_SWAP_CONTROL_MASK	0x8000000
+
+/* (debug command) Do not set this bit to 1 */
+#define  CONTROL_STATUS2_CACHE_LOOKUP_BLOCKING_MODE_MASK	0x4000000
+
+/*
+ * Timer tick for pseudo-LRUTimer is incremented on every system clock
+ * cycleTimer is incremented on every packet arrival to NAT block
+*/
+#define  CONTROL_STATUS2_AGE_TIMER_TICK_MASK	0x2000000
+
+/*
+ * Timer value used for pseudo-LRU;When timer fires the 8-bit age value of
+ * every entry in the cache isdecremented (cap at 0).
+ * The entry with lower value isthe older entry.
+ * The default setting keeps track of 2s age at~7ms resolution.
+ * 0:
+ * 1 tick1:
+ * 2 ticks2:
+ * 4 ticks3:
+ * 8 ticks4:
+ * 16 ticks.
+ * ..
+ * .31:
+ * 2^31 TICKS
+*/
+#define  CONTROL_STATUS2_AGE_TIMER_SHIFT	20
+#define  CONTROL_STATUS2_AGE_TIMER_MASK	0x1f00000
+
+/*
+ * Replacement algorithm for cachingLowest-multi-hash-iteration number is
+ * used to select the final replacemententry if multiple entries were
+ * chosen by the selected algorithm.
+ * Forinstance, if HIT_COUNT algorithm were selected, and 2nd, 3rd and
+ * 7thentry all have the same hit_count values, 2nd entry will be evicted.
+ * If CACHE_DISABLE or EVICTION_DISABLE is set, HIT_COUNT algorithmcan only
+ * keep track of the hit count while the entry is in the cache.
+ * When the entry is evicted hit count for that entry is lost.
+ * Replacement algorithm prioritizes pseudo-LRU over
+ * lowest-hit-countReplacement algorithm prioritizes lowest-hit-count over
+ * pseudo-LRUReplacement algorithm uses pseudo-LRUReplacement algorithm
+ * uses least-hit-countReplacement algorithm prioritizes pseudo-LRU over
+ * pseudo-randomReplacement algorithm prioritizes lowest-hit-count over
+ * pseudo-randomReplacement algorithm uses pseudo-random
+ * algorithmReplacement algorithm prioritizes highest-hit-count
+ * overmost-recently-useReplacement algorithm prioritizes pseudo-LRU over
+ * lowest-byte-countReplacement algorithm prioritizes lowest-byte-count
+ * over pseudo-LRUReplacement algorithm uses least-byte-countReplacement
+ * algorithm prioritizes lowest-byte-count over pseudo-randomReplacement
+ * algorithm prioritizes highest-byte-count overmost-recently-use
+*/
+#define  CONTROL_STATUS2_CACHE_ALGO_SHIFT	16
+#define  CONTROL_STATUS2_CACHE_ALGO_MASK	0xf0000
+
+#define  CONTROL_STATUS2_UNUSED1_SHIFT	8
+#define  CONTROL_STATUS2_UNUSED1_MASK	0xff00
+#define  CONTROL_STATUS2_UNUSED0_SHIFT	6
+#define  CONTROL_STATUS2_UNUSED0_MASK	0xc0
+/*
+ * This bit determines whether register interface lookup will cache the
+ * entry from DDR1h:
+ * Enable; entry fetched from DDR will be cached using register interface
+ * lookup command0h:
+ * Disable; entry fetched from DDR will not be cached using register
+ * interface lookup command
+*/
+#define  CONTROL_STATUS2_CACHE_UPDATE_ON_REG_DDR_LOOKUP_MASK	0x20
+
+/*
+ * Reverse bytes within 32-bit word for DDR counters on read/write
+ * accesses.
+ * (i.
+ * e.
+ * , [31:
+ * 0] becomes {[7:
+ * 0], [15,8], [23,16], [31,24]})
+*/
+#define  CONTROL_STATUS2_DDR_COUNTER_8BIT_IN_32BIT_SWAP_CONTROL_MASK	0x10
+
+#define  CONTROL_STATUS2_UNUSED5_MASK	0x8
+/*
+ * (debug command) Do not set this bit to 1Enable replacing existing cache
+ * counters with DDR fetched entry
+*/
+#define  CONTROL_STATUS2_DDR_REPLACE_DUPLICATED_CACHED_ENTRY_ENABLE_MASK	0x4
+
+/* (debug command) Do not set this bit to 1 */
+#define  CONTROL_STATUS2_DDR_LOOKUP_PENDING_FIFO_MODE_DISABLE_MASK	0x2
+
+/*
+ * Disable counter eviction to DDR; this bit is effective when
+ * CACHE_DISABLE is 0Set this bit when counters are not used; NATC
+ * performance will improve dueto reduced DDR accesses; CACHE_ALGO should
+ * not use HIT_COUNT and BYTE_COUNT
+*/
+#define  CONTROL_STATUS2_EVICTION_DISABLE_MASK	0x1
+
+
+/*
+ * Register <NAT Cache Table Control>
+ *
+ * NAT Cache Table Control Register
+ */
+#define NATC_TABLE_CONTROL		0x8
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 7lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL7_MASK	0x800000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 6lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL6_MASK	0x400000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 5lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL5_MASK	0x200000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 4lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL4_MASK	0x100000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 3lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL3_MASK	0x80000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 2lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL2_MASK	0x40000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 1lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL1_MASK	0x20000
+
+/*
+ * Controls the amount of context to fetch from DDR in unit of 8 bytes for
+ * DDR table 0lowest 4 bits of key[3:
+ * 0] is used to indicate the context length0=8 byte, 1=16 bytes, 2=24
+ * bytes, .
+ * ..
+ * .
+ * 15=128 bytesNote that key length is reduced by 4 bit0h:
+ * Disable variable context length1h:
+ * Enable variable context length
+*/
+#define  TABLE_CONTROL_VAR_CONTEXT_LEN_EN_TBL0_MASK	0x10000
+
+/*
+ * Length of the key for DDR table 70h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL7_MASK	0x8000
+
+/*
+ * DDR table 7 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL7_MASK	0x4000
+
+/*
+ * Length of the key for DDR table 60h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL6_MASK	0x2000
+
+/*
+ * DDR table 6 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL6_MASK	0x1000
+
+/*
+ * Length of the key for DDR table 50h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL5_MASK	0x800
+
+/*
+ * DDR table 5 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL5_MASK	0x400
+
+/*
+ * Length of the key for DDR table 40h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL4_MASK	0x200
+
+/*
+ * DDR table 4 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL4_MASK	0x100
+
+/*
+ * Length of the key for DDR table 30h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL3_MASK	0x80
+
+/*
+ * DDR table 3 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL3_MASK	0x40
+
+/*
+ * Length of the key for DDR table 20h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL2_MASK	0x20
+
+/*
+ * DDR table 2 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL2_MASK	0x10
+
+/*
+ * Length of the key for DDR table 10h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL1_MASK	0x8
+
+/*
+ * DDR table 1 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL1_MASK	0x4
+
+/*
+ * Length of the key for DDR table 00h:
+ * 16-byte key1h:
+ * 32-byte key
+*/
+#define  TABLE_CONTROL_KEY_LEN_TBL0_MASK	0x2
+
+/*
+ * DDR table 0 non-cacheable control0h:
+ * DDR table is cached1h:
+ * DDR table is not cached; counters are updated in DDR directly
+*/
+#define  TABLE_CONTROL_NON_CACHEABLE_TBL0_MASK	0x1
+
+
+
+/*
+ *
+ * register sets NATC_CFG, <r> is [ 0 => 8 ]
+ *
+ */
+
+/*
+ * Register <Lower 32-bit of NAT table 7 key base address NAT table 7 in DDR>
+ *
+ * Lower 32-bit of the base address of DDR key tableAddress must be 64-bit
+ * aligned (bit 2 through 0 are zero's)In order maximum number of key
+ * fetches,for 16-byte key, bit 3 should be 0 to align at 16 byte
+ * boundaryfor 32-byte key, bit 3 and 4 should be 0 to align at 32 bytes
+ * boundary
+ */
+#define NATC_CFG_DDR_KEY_BASE_ADDR_LO(r)	(0x2d0 + (r) * 0x10)
+
+#define  DDR_KEY_BASE_ADDR_LO_BAR_SHIFT	3
+#define  DDR_KEY_BASE_ADDR_LO_BAR_MASK	0xfffffff8
+#define  DDR_KEY_BASE_ADDR_LO_ZEROS_SHIFT	0
+#define  DDR_KEY_BASE_ADDR_LO_ZEROS_MASK	0x7
+
+/*
+ * Register <Upper 32-bit of NAT table 7 key base address NAT table 7 in DDR>
+ *
+ * Upper 8-bit of the base address of DDR key tableFor 32-bit system this
+ * field should be left as 0
+ */
+#define NATC_CFG_DDR_KEY_BASE_ADDR_HI(r)	(0x2d4 + (r) * 0x10)
+
+#define  DDR_KEY_BASE_ADDR_HI_ZEROS_SHIFT	8
+#define  DDR_KEY_BASE_ADDR_HI_ZEROS_MASK	0xffffff00
+#define  DDR_KEY_BASE_ADDR_HI_BAR_SHIFT	0
+#define  DDR_KEY_BASE_ADDR_HI_BAR_MASK	0xff
+
+/*
+ * Register <Lower 32-bit of NAT table 7 result base address NAT table 7 in DDR>
+ *
+ * Lower 32-bit of the base address of DDR context tableAddress must be
+ * 64-bit aligned (bit 2 through 0 are zero's)
+ */
+#define NATC_CFG_DDR_RESULT_BASE_ADDR_LO(r)	(0x2d8 + (r) * 0x10)
+
+#define  DDR_RESULT_BASE_ADDR_LO_BAR_SHIFT	3
+#define  DDR_RESULT_BASE_ADDR_LO_BAR_MASK	0xfffffff8
+#define  DDR_RESULT_BASE_ADDR_LO_ZEROS_SHIFT	0
+#define  DDR_RESULT_BASE_ADDR_LO_ZEROS_MASK	0x7
+
+/*
+ * Register <Upper 32-bit of NAT table 7 result base address NAT table 7 in DDR>
+ *
+ * Upper 8-bit of the base address of DDR context tableFor 32-bit system
+ * this field should be left as 0
+ */
+#define NATC_CFG_DDR_RESULT_BASE_ADDR_HI(r)	(0x2dc + (r) * 0x10)
+
+#define  DDR_RESULT_BASE_ADDR_HI_ZEROS_SHIFT	8
+#define  DDR_RESULT_BASE_ADDR_HI_ZEROS_MASK	0xffffff00
+#define  DDR_RESULT_BASE_ADDR_HI_BAR_SHIFT	0
+#define  DDR_RESULT_BASE_ADDR_HI_BAR_MASK	0xff
+
+
+/*
+ *
+ * register sets NATC_CTRS, <r> is [ 0 => 8 ]
+ *
+ */
+
+/*
+ * Register <NAT table 7 NAT Cache Hit Count>
+ *
+ * NATC CACHE HIT COUNT
+ */
+#define NATC_CTRS_CACHE_HIT_COUNT(r)	(0x350 + (r) * 0x14)
+
+/* 32-bit total cache hit count value for statistics collection */
+#define  CACHE_HIT_COUNT_CACHE_HIT_COUNT_SHIFT	0
+#define  CACHE_HIT_COUNT_CACHE_HIT_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT table 7 NAT Cache Miss Count>
+ *
+ * NATC CACHE MISS COUNT
+ */
+#define NATC_CTRS_CACHE_MISS_COUNT(r)	(0x354 + (r) * 0x14)
+
+/* 32-bit total cache miss count value for statistics collection */
+#define  CACHE_MISS_COUNT_CACHE_MISS_COUNT_SHIFT	0
+#define  CACHE_MISS_COUNT_CACHE_MISS_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT table 7 NAT DDR Request Count>
+ *
+ * NATC DDR REQUEST COUNT
+ */
+#define NATC_CTRS_DDR_REQUEST_COUNT(r)	(0x358 + (r) * 0x14)
+
+/* 32-bit total DDR request count value for statistics collection */
+#define  DDR_REQUEST_COUNT_DDR_REQUEST_COUNT_SHIFT	0
+#define  DDR_REQUEST_COUNT_DDR_REQUEST_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT table 7 NAT DDR Evict Count>
+ *
+ * NATC DDR EVICT COUNT
+ */
+#define NATC_CTRS_DDR_EVICT_COUNT(r)	(0x35c + (r) * 0x14)
+
+/*
+ * 32-bit total DDR evict count value for statistics collection.
+ * It does not include the flush command evict count.
+*/
+#define  DDR_EVICT_COUNT_DDR_EVICT_COUNT_SHIFT	0
+#define  DDR_EVICT_COUNT_DDR_EVICT_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT table 7 NAT DDR Block Count>
+ *
+ * DDR BLOCK COUNT
+ */
+#define NATC_CTRS_DDR_BLOCK_COUNT(r)	(0x360 + (r) * 0x14)
+
+/* 32-bit total DDR blocked access count value for statistics collection */
+#define  DDR_BLOCK_COUNT_DDR_BLOCK_COUNT_SHIFT	0
+#define  DDR_BLOCK_COUNT_DDR_BLOCK_COUNT_MASK	0xffffffff
+
+
+
+/*
+ *
+ * register set NATC_DDR_CFG
+ *
+ */
+
+/*
+ * Register <NAT Cache DDR Size>
+ *
+ * DDR Size Register
+ */
+#define NATC_DDR_CFG_SIZE		0x410
+
+/*
+ * Number of entries in DDR table 7See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL7_SHIFT	21
+#define  SIZE_DDR_SIZE_TBL7_MASK	0xe00000
+
+/*
+ * Number of entries in DDR table 6See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL6_SHIFT	18
+#define  SIZE_DDR_SIZE_TBL6_MASK	0x1c0000
+
+/*
+ * Number of entries in DDR table 5See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL5_SHIFT	15
+#define  SIZE_DDR_SIZE_TBL5_MASK	0x38000
+
+/*
+ * Number of entries in DDR table 4See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL4_SHIFT	12
+#define  SIZE_DDR_SIZE_TBL4_MASK	0x7000
+
+/*
+ * Number of entries in DDR table 3See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL3_SHIFT	9
+#define  SIZE_DDR_SIZE_TBL3_MASK	0xe00
+
+/*
+ * Number of entries in DDR table 2See description of DDR_SIZE_TBL00=8k;
+ * 1=16k; 2=32k; 3=64k; 4=128k; 5=256k; 6=invalid; 7=invalid
+*/
+#define  SIZE_DDR_SIZE_TBL2_SHIFT	6
+#define  SIZE_DDR_SIZE_TBL2_MASK	0x1c0
+
+/*
+ * Number of entries in DDR table 10=8k; 1=16k; 2=32k; 3=64k; 4=128k;
+ * 5=256k; 6=invalid; 7=invalidSee description of DDR_SIZE_TBL0
+*/
+#define  SIZE_DDR_SIZE_TBL1_SHIFT	3
+#define  SIZE_DDR_SIZE_TBL1_MASK	0x38
+
+/*
+ * Number of entries in DDR table 0Value of 6 or above is invalidTo compute
+ * the actual size of the table, add DDR_BINS_PER_BUCKET fieldto the table
+ * size selection;For instance, if DDR_BINS_PER_BUCKET is 3 (4 bins per
+ * bucket)and DDR_size is 3 (64k entries), the actual size of the table in
+ * DDR is(64*1024+3) multiply by total length (TOTAL_LEN) of key and
+ * context in bytesExtra 3 entries are used to store collided entries of
+ * the last entryvalue 256k 5256k entriesvalue 128k 4128k entriesvalue 64k
+ * 364k entriesvalue 32k 232k entriesvalue 16k 116k entriesvalue 8k 08k
+ * entriesdefault 0h
+*/
+#define  SIZE_DDR_SIZE_TBL0_SHIFT	0
+#define  SIZE_DDR_SIZE_TBL0_MASK	0x7
+
+
+/*
+ * Register <NAT Cache DDR Bins Per Bucket 0>
+ *
+ * DDR Bins Per Bucket Register 0
+ */
+#define NATC_DDR_CFG_BINS_PER_BUCKET_0	0x414
+
+/*
+ * Number of entries per bucket in DDR table 3See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL3_SHIFT	24
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL3_MASK	0xff000000
+
+/*
+ * Number of entries per bucket in DDR table 2See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL2_SHIFT	16
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL2_MASK	0xff0000
+
+/*
+ * Number of entries per bucket in DDR table 1See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL1_SHIFT	8
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL1_MASK	0xff00
+
+/*
+ * Number of entries per bucket in DDR table 0This is limited by bus max
+ * burst size.
+ * For instance, ifUBUS supports max burst size of 128 bytes, key length is
+ * 16bytes, maximum DDR_BINS_PER_BUCKET that can be programmedis 128 bytes
+ * / 16-bytes (bytes per bin) = 8 entries0h:
+ * 1 entry1h:
+ * 2 entries2h:
+ * 3 entries3h:
+ * 4 entries4h:
+ * 5 entries5h:
+ * 6 entries6h:
+ * 7 entries7h:
+ * 8 entries.
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+*/
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL0_SHIFT	0
+#define  BINS_PER_BUCKET_0_DDR_BINS_PER_BUCKET_TBL0_MASK	0xff
+
+
+/*
+ * Register <NAT Cache DDR Bins Per Bucket 1>
+ *
+ * DDR Bins Per Bucket Register 1
+ */
+#define NATC_DDR_CFG_BINS_PER_BUCKET_1	0x418
+
+/*
+ * Number of entries per bucket in DDR table 7See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL7_SHIFT	24
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL7_MASK	0xff000000
+
+/*
+ * Number of entries per bucket in DDR table 6See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL6_SHIFT	16
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL6_MASK	0xff0000
+
+/*
+ * Number of entries per bucket in DDR table 5See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL5_SHIFT	8
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL5_MASK	0xff00
+
+/*
+ * Number of entries per bucket in DDR table 4See description of
+ * DDR_BINS_PER_BUCKET_TBL0
+*/
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL4_SHIFT	0
+#define  BINS_PER_BUCKET_1_DDR_BINS_PER_BUCKET_TBL4_MASK	0xff
+
+
+/*
+ * Register <NAT Cache Total Length>
+ *
+ * DDR TABLE Total Length Register
+ */
+#define NATC_DDR_CFG_TOTAL_LEN		0x41c
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 7See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL7_SHIFT	21
+#define  TOTAL_LEN_TOTAL_LEN_TBL7_MASK	0xe00000
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 6See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL6_SHIFT	18
+#define  TOTAL_LEN_TOTAL_LEN_TBL6_MASK	0x1c0000
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 5See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL5_SHIFT	15
+#define  TOTAL_LEN_TOTAL_LEN_TBL5_MASK	0x38000
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 4See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL4_SHIFT	12
+#define  TOTAL_LEN_TOTAL_LEN_TBL4_MASK	0x7000
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 3See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL3_SHIFT	9
+#define  TOTAL_LEN_TOTAL_LEN_TBL3_MASK	0xe00
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 2See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL2_SHIFT	6
+#define  TOTAL_LEN_TOTAL_LEN_TBL2_MASK	0x1c0
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 1See description of TOTAL_LEN_TBL0.
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL1_SHIFT	3
+#define  TOTAL_LEN_TOTAL_LEN_TBL1_MASK	0x38
+
+/*
+ * Length of the lookup key plus context (including 8-byte counters) in DDR
+ * table 0The context length (including 8-byte counters) is calculated by
+ * TOTAL_LEN minus KEY_LENThe maximum value should not exceed hardware
+ * capability.
+ * For instance, most projects have max of 80-bytes and BCM63158 has max
+ * value of 144-byte.
+ * 0h:
+ * 48-byte1h:
+ * 64-byte2h:
+ * 80-byte3h:
+ * 96-byte4h:
+ * 112-byte5h:
+ * 128-byte6h:
+ * 144-byte7h:
+ * 160-byte
+*/
+#define  TOTAL_LEN_TOTAL_LEN_TBL0_SHIFT	0
+#define  TOTAL_LEN_TOTAL_LEN_TBL0_MASK	0x7
+
+
+/*
+ * Register <NAT State Machine Status>
+ *
+ * NAT State Machine Status Register
+ */
+#define NATC_DDR_CFG_SM_STATUS		0x420
+
+/*
+ * Debug bus select.
+ * 2'b00:
+ * prb_nat_control.
+ * 2'b01:
+ * prb_cmd_control.
+ * 2'b10:
+ * prb_wb_control.
+ * 2'b11:
+ * prb_ddr_control.
+*/
+#define  SM_STATUS_DEBUG_SEL_SHIFT	24
+#define  SM_STATUS_DEBUG_SEL_MASK	0x3000000
+
+/*
+ * APB to RBUS bridge state machine.
+ * 2'b00:
+ * APB_ST_IDLE.
+ * 2'b01:
+ * APB_ST_RW.
+ * 2'b10:
+ * AOB_ST_END.
+*/
+#define  SM_STATUS_APB_STATE_SHIFT	22
+#define  SM_STATUS_APB_STATE_MASK	0xc00000
+
+/*
+ * DDR request state machine.
+ * 2'b00:
+ * DDR_REQ_ST_IDLE.
+ * 2'b01:
+ * DDR_REQ_ST_WRITE_HEADER.
+ * 2'b10:
+ * DDR_REQ_ST_WRITE_HEADER_DELAY.
+*/
+#define  SM_STATUS_DDR_REQ_STATE_SHIFT	20
+#define  SM_STATUS_DDR_REQ_STATE_MASK	0x300000
+
+/*
+ * DDR reply state machine.
+ * 3'b000:
+ * DDR_REP_ST_IDLE.
+ * 3'b001:
+ * DDR_REP_ST_READ_DATA.
+ * 3'b010:
+ * DDR_REP_ST_READ_RESULT.
+ * 3'b011:
+ * DDR_REP_ST_READ_WAIT.
+ * 3'b100:
+ * DDR_REP_ST_EVICT_WR_NON_CACHEABLE.
+*/
+#define  SM_STATUS_DDR_REP_STATE_SHIFT	17
+#define  SM_STATUS_DDR_REP_STATE_MASK	0xe0000
+
+/*
+ * Runner command state machine.
+ * 1'b0:
+ * RUNNER_CMD_ST_IDLE.
+ * 1'b1:
+ * RUNNER_CMD_ST_WRITE_RUNNER_FIFO.
+*/
+#define  SM_STATUS_RUNNER_CMD_STATE_MASK	0x10000
+
+/*
+ * Write-back state machine.
+ * 1'b0:
+ * WB_ST_IDLE.
+ * 1'b1:
+ * WB_ST_WRITE_BACIF.
+*/
+#define  SM_STATUS_WB_STATE_MASK	0x8000
+
+/*
+ * Nat state machine.
+ * 15'b000000000000000:
+ * NAT_ST_IDLE.
+ * 15'b000000000000001:
+ * NAT_ST_IDLE_WRITE_SMEM.
+ * 15'b000000000000010:
+ * NAT_ST_IDLE_DDR_PENDING.
+ * 15'b000000000000100:
+ * NAT_ST_HASH.
+ * 15'b000000000001000:
+ * NAT_ST_NAT_MEM_READ_REQ.
+ * 15'b000000000010000:
+ * NAT_ST_NAT_MEM_WRITE_REQ.
+ * 15'b000000000100000:
+ * NAT_ST_READ_SMEM.
+ * 15'b000000001000000:
+ * NAT_ST_UPDATE_DDR.
+ * 15'b000000010000000:
+ * NAT_ST_IDLE_BLOCKING_PENDING.
+ * 15'b000000100000000:
+ * NAT_ST_EVICT_WAIT.
+ * 15'b000001000000000:
+ * NAT_ST_CHECK_NON_CACHEABLE.
+ * 15'b000010000000000:
+ * NAT_ST_WAIT.
+ * 15'b000100000000000:
+ * NAT_ST_WAIT_NATC_MEM_REQ_DONE.
+ * 15'b001000000000000:
+ * NAT_ST_CACHE_FLUSH.
+ * 15'b010000000000000:
+ * NAT_ST_DDR_MISS_0.
+ * 15'b100000000000000:
+ * NAT_ST_DDR_MISS_1.
+*/
+#define  SM_STATUS_NAT_STATE_SHIFT	0
+#define  SM_STATUS_NAT_STATE_MASK	0x7fff
+
+
+
+/*
+ *
+ * register sets NATC_ENG, <r> is [ 0 => 4 ]
+ *
+ */
+
+/*
+ * Register <NAT3 command & status>
+ *
+ * NAT Command and Status Register
+ */
+#define NATC_ENG_COMMAND_STATUS(r)	(0x10 + (r) * 0xb0)
+
+/*
+ * This filed specifies the DDR BIN number to be compared for DEL
+ * commandwhen DEL_CMD_MODE is set to 1
+*/
+#define  COMMAND_STATUS_DEL_CMD_DDR_BIN_SHIFT	20
+#define  COMMAND_STATUS_DEL_CMD_DDR_BIN_MASK	0xff00000
+
+/*
+ * DEL Command DDR-bin matching mode enable0h:
+ * DEL command deletes the cache entry with matching key1h:
+ * DEL command deletes the cache entry with matching key and matching DDR
+ * binnumber specified in DEL_CMD_DDR_BIN field
+*/
+#define  COMMAND_STATUS_DEL_CMD_MODE_MASK	0x20000
+
+/*
+ * Cache Flush enableWhen set, LOOKUP command is used to flush counters
+ * from cache into DDR.
+ * This command does not use key to lookup the cache entry.
+ * Instead it usescache index number located in 10-MSB bits of key
+ * specified in NAT_KEY_RESULT register.
+ * For 16 bytes key, the cache index will be located in{NAT_KEY_RESULT[15],
+ * NAT_KEY_RESULT[14][7:
+ * 6]} (15th byte of NAT_KEY_RESULT register andbits 7:
+ * 6 of 14th byte of NAT_KEY_RESULT register).
+ * For 32 bytes key, the cache index will be located in{NAT_KEY_RESULT[31],
+ * NAT_KEY_RESULT[30][7:
+ * 6]} (31th byte of NAT_KEY_RESULT register andbits 7:
+ * 6 of 30th byte of NAT_KEY_RESULT register).
+ * 0h:
+ * LOOKUP command is used as normal lookup command.
+ * 1h:
+ * LOOKUP command is used as cache flush command.
+*/
+#define  COMMAND_STATUS_CACHE_FLUSH_MASK	0x10000
+
+/*
+ * Decrement-counter mode enableWhen set, LOOKUP command will decrement hit
+ * counter by 1 and decrementbyte counter by the value specified in
+ * PKT_LEN, on a successful lookup.
+ * NATC_SMEM_INCREMENT_ON_REG_LOOKUP must be set to 1 for it to be
+ * effective0h:
+ * LOOKUP command will increment hit counter and byte counter1h:
+ * LOOKUP command will decrement hit counter and byte counter
+*/
+#define  COMMAND_STATUS_DECR_COUNT_MASK	0x8000
+
+/*
+ * Select the DDR Table on which the command will operate0h:
+ * DDR table 01h:
+ * DDR table 12h:
+ * DDR table 23h:
+ * DDR table 34h:
+ * DDR table 45h:
+ * DDR table 56h:
+ * DDR table 67h:
+ * DDR table 7
+*/
+#define  COMMAND_STATUS_NAT_TBL_SHIFT	12
+#define  COMMAND_STATUS_NAT_TBL_MASK	0x7000
+
+/*
+ * Cache multi-hash iteration count statusValue of 0 is iteration 1, 1 is
+ * iteration 2, 2 is iteration 3, etc.
+ * cache miss returns 0 count.
+*/
+#define  COMMAND_STATUS_MULTIHASH_COUNT_SHIFT	8
+#define  COMMAND_STATUS_MULTIHASH_COUNT_MASK	0xf00
+
+/* This bit is set when a LOOKUP command has a cache hit */
+#define  COMMAND_STATUS_CACHE_HIT_MASK	0x80
+
+/* This bit is set when a LOOKUP command has a miss */
+#define  COMMAND_STATUS_MISS_MASK	0x40
+
+/*
+ * This bit is set for the following 2 casesFor ADD command all multi-hash
+ * entries are occupied (i.
+ * e, no room to ADD)For DEL command entry is not found and cannot be
+ * deleted
+*/
+#define  COMMAND_STATUS_ERROR_MASK	0x20
+
+/*
+ * Interface BusyThis bit is set when command is issued but still in
+ * progress been processed.
+ * When command completes this bit will be cleared.
+*/
+#define  COMMAND_STATUS_BUSY_MASK	0x10
+
+#define  COMMAND_STATUS_UNUSED0_MASK	0x8
+/*
+ * Command to be executedThis command only operates on the entries in cache
+ * except LOOKUP command whereentry can be fetched from DDR.
+ * Writing to this field causes BUSY bit to be set.
+ * Note:
+ * For all commands, key consists of all 0's indicates unused entry in
+ * h/wand therefore cannot be used.
+ * No-OperationLookupAdd (to cache only)Del (from cache only)Hash (debug
+ * command)Hashes are stored in different set of COMMAND_STATUS register
+ * (i.
+ * e.
+ * Hashes for HASH command issued using NAT0 register are returnedat NAT1
+ * KEY_RESULT registers; hashes for NAT1 HASH commandare returned at NAT0
+ * KEY_RESULT; hashes for NAT2 HASH command arereturned at NAT3 KEY_RESULT;
+ * hashes for NAT3 HASH command arereturned at NAT2 KEY_RESULT register).
+ * Internal Cache command (debug command)Do not use this command
+*/
+#define  COMMAND_STATUS_COMMAND_SHIFT	0
+#define  COMMAND_STATUS_COMMAND_MASK	0x7
+
+
+/*
+ * Register <NAT3 Hash Value> - read-only
+ *
+ * NAT Hash Value
+ */
+#define NATC_ENG_HASH(r)		(0x18 + (r) * 0xb0)
+
+/*
+ * hash value; only valid on a successful lookup/add/del commandFor cache
+ * hit 10-bit hash value is returned.
+ * For cache miss and DDR_ENABLE is 0, first hash value (10-bit) is
+ * returned.
+ * For cache miss, DDR_ENABLE is 1 and DDR is a hit, 18-bit DDR hash value
+ * + DDR bin count is returned.
+ * For cache miss, DDR_ENABLE is 1 and DDR is a miss, 18-bit DDR hash value
+ * is returned.
+*/
+#define  HASH_HASH_SHIFT		0
+#define  HASH_HASH_MASK			0xffffffff
+
+
+/*
+ * Register <NAT3 Session Hit Count> - read-only
+ *
+ * Hit Count
+ */
+#define NATC_ENG_HIT_COUNT(r)		(0x1c + (r) * 0xb0)
+
+/*
+ * bits 27:
+ * 0 are 28-bit hit count value.
+ * bits 31:
+ * 28 are 4 lsb of 36-bit byte count value.
+ * only valid on a successful lookup or delete command.
+*/
+#define  HIT_COUNT_HIT_COUNT_SHIFT	0
+#define  HIT_COUNT_HIT_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT3 Session Byte Count> - read-only
+ *
+ * Byte Count
+ */
+#define NATC_ENG_BYTE_COUNT(r)		(0x20 + (r) * 0xb0)
+
+/*
+ * 32-bit msb of 36-bit byte count value.
+ * {BYTE_COUNT, HIT_COUNT[31:
+ * 28]} is the 36-bit byte count value.
+ * only valid on a successful lookup or delete command
+*/
+#define  BYTE_COUNT_BYTE_COUNT_SHIFT	0
+#define  BYTE_COUNT_BYTE_COUNT_MASK	0xffffffff
+
+
+/*
+ * Register <NAT3 Packet Length>
+ *
+ * NAT PKT Length
+ */
+#define NATC_ENG_PKT_LEN(r)		(0x24 + (r) * 0xb0)
+
+#define  PKT_LEN_UNUSED_SHIFT		16
+#define  PKT_LEN_UNUSED_MASK		0xffff0000
+/* 16-bit packet length value used to increment or decrement byte counter */
+#define  PKT_LEN_PKT_LEN_SHIFT		0
+#define  PKT_LEN_PKT_LEN_MASK		0xffff
+
+
+/*
+ * Registers <NAT3 key & result> - <x> is [ 0 => 33 ]
+ *
+ * NAT key & context (excluding 8-byte counters) register; context is
+ * placed after the key;Key consists of all 0's indicates unused entry in
+ * hardware and therefore should not beused for lookup/hash/del/add
+ * commands; Key should contain an encoded type field(e.
+ * g.
+ * Unused=0, IPv4=1, IPv4m=2, IPv6=3, IPv6m=4, etc) to uniquely
+ * identifyeach key so the keys with shorter length of one type is not
+ * misidentifiedas the longer key of another type when the shorter key
+ * matches the beginningof the longer key.
+ * When multiple DDR table mode is used, DDR table number should builtinto
+ * the key so the same real key in 2 different DDR tables can be
+ * distinguished.
+ * For HASH debug command, this register stores the nth multi-hash
+ * value;For 256_BIT_MODE 10-bit hash value is store at bit 14:
+ * 5.
+ * For 512_BIT_MODE,9-bit hash value is stored at bit 14:
+ * 6; For HASH debug command,hashes for HASH command issued using NAT0
+ * register are returnedat NAT1 KEY_RESULT registers; hashes for NAT1 HASH
+ * commandare returned at NAT0 KEY_RESULT; hashes for NAT2 HASH command
+ * arereturned at NAT3 KEY_RESULT; hashes for NAT3 HASH command arereturned
+ * at NAT2 KEY_RESULT register.
+ */
+#define NATC_ENG_KEY_RESULT(r, x)	(0x30 + (r) * 0xb0 + (x) * 0x4)
+
+#define  KEY_RESULT_NAT_KEY_RESULT_SHIFT	0
+#define  KEY_RESULT_NAT_KEY_RESULT_MASK	0xffffffff
+
+
+/*
+ *
+ * register set NATC_INDIR
+ *
+ */
+
+/*
+ * Register <NATC Indirect Address>
+ *
+ */
+#define NATC_INDIR_C_INDIR_ADDR_REG	0x700
+
+/*
+ * NAT Cache Memory and Statics Memory Transaction.
+ * 1 :
+ * NAT Cache Memory and Statics Memory Write.
+ * 0 :
+ * NAT Cache Memory and Statics Memory Read.
+*/
+#define  C_INDIR_ADDR_REG_W_R_MASK	0x400
+
+/* NAT Cache Entry number. */
+#define  C_INDIR_ADDR_REG_NATC_ENTRY_SHIFT	0
+#define  C_INDIR_ADDR_REG_NATC_ENTRY_MASK	0x3ff
+
+
+/*
+ * Registers <MATC Indirect Data> - <x> is [ 0 => 36 ]
+ *
+ */
+#define NATC_INDIR_C_INDIR_DATA_REG(x)	(0x710 + (x) * 0x4)
+
+/*
+ * Indirect register access data register, bits[31:
+ * 0].
+ * -------------------------------------------------------For NAT Cache nd
+ * Statics Memory write operation,first, write all the data to Indirect
+ * Data Registers[N-1:
+ * 0],N is number of words including key, result, hit count and byte count.
+ * Indirect Data Register[1:
+ * 0] are for Statics Memory.
+ * Indirect Data register[0] is for hit count and 4 lsb of byte count.
+ * Indirect Data register[0] bits 27:
+ * 0 are 28-bit hit count.
+ * Indirect Data register[0] bits 31:
+ * 28 are 4 lsb of 36-bit byte count.
+ * Indirect Data Register[1] is for 32 msb of 36-bit byte count.
+ * {Indirect Data Register[1], Indirect Data register[0][31:
+ * 28]} is the 36-bit byte count.
+ * indirect Data register [N-1:
+ * 2] are for NAT Cache Memory (key and result), key is first,followed by
+ * result, followed by {ddr_miss, nat_ddr_bin, nat_tbl}then followed by a
+ * write to Indirect Address Register to set upNAT Cache Entry Number and
+ * W_R bit to 1, this will initiate the write operation.
+ * --------------------------------------------------------For NAT Cache
+ * Memory and statics Memory read operation,first, write to Indirect
+ * Address Register to set upNAT Cache Entry Number and W_R bit to 0, this
+ * will initiate the read operation.
+ * the read data from NAT Cache Memory and statics Memory will be loaded
+ * into Indirect Data Registers[N-1:
+ * 0].
+ * then followed by read from Indirect Data Registers[N-1:
+ * 0] for all data.
+*/
+#define  C_INDIR_DATA_REG_DATA_SHIFT	0
+#define  C_INDIR_DATA_REG_DATA_MASK	0xffffffff
+
+
+
+/*
+ *
+ * register sets NATC_KEY_MASK, <r> is [ 0 => 8 ]
+ *
+ */
+
+/*
+ * Register <NAT table 7 key mask>
+ *
+ * NAT Cache key Mask Register
+ */
+#define NATC_KEY_MASK(r)		(0x3f0 + (r) * 0x4)
+
+/*
+ * Specifies the key mask for each byte in the key.
+ * each bit corresponds to one byte.
+ * 0 enables the compare and 1 disables the compare.
+ * bit 0 corresponds to byte 0bit 1 corresponds to byte 1bit 2 corresponds
+ * to byte 2.
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * ..
+ * .bit 31 corresponds to byte 31
+*/
+#define  NATC_KEY_MASK_KEY_MASK_SHIFT	0
+#define  NATC_KEY_MASK_KEY_MASK_MASK	0xffffffff
+
+
+
+/*
+ *
+ * register set NATC_REGFILE
+ *
+ */
+
+/*
+ * Register <REGFILE FIFO Start Address0>
+ *
+ * REGFILE FIFO Start Address register 0Actual FIFO size is 2 more than the
+ * number programmed inthis register due to input and output holder
+ * registerswhich account for 2 additional depth.
+ * The actual FIFO 0 (DDR_KEY_REQ_FIFO) size isREGFILE_FIFO_START_ADDR_1 -
+ * REGFILE_FIFO_START_ADDR_0 + 2.
+ * The actual FIFO 1 (DDR_RESULT_REQ_FIFO) size isREGFILE_FIFO_START_ADDR_2
+ * - REGFILE_FIFO_START_ADDR_1 + 2.
+ * The actual FIFO 2 (DDR_KEY_REQ_PIPE) size isREGFILE_FIFO_START_ADDR_3 -
+ * REGFILE_FIFO_START_ADDR_2 + 2.
+ * The actual FIFO 3 (BLOCKING_PENDING_FIFO) size
+ * isREGFILE_FIFO_START_ADDR_4 - REGFILE_FIFO_START_ADDR_3 + 2.
+ */
+#define NATC_REGFILE_FIFO_START_ADDR_0	0x424
+
+/* REGFILE FIFO 2 Start Address */
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_3_SHIFT	24
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_3_MASK	0xff000000
+
+/* REGFILE FIFO 2 Start Address */
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_2_SHIFT	16
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_2_MASK	0xff0000
+
+/* REGFILE FIFO 1 Start Address */
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_1_SHIFT	8
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_1_MASK	0xff00
+
+/* REGFILE FIFO 0 Start Address */
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_0_SHIFT	0
+#define  FIFO_START_ADDR_0_REGFILE_FIFO_START_ADDR_0_MASK	0xff
+
+
+/*
+ * Register <REGFILE FIFO Start Address1>
+ *
+ * REGFILE FIFO Start Address register 1Actual FIFO size is 2 more than the
+ * number programmed inthis register due to input and output holder
+ * registerswhich account for 2 additional depth.
+ * The delta between REGFILE_FIFO_START_ADDR_4 and
+ * REGFILE_FIFO_START_ADDR_5,REGFILE_FIFO_START_ADDR_5 and
+ * REGFILE_FIFO_START_ADDR_6,REGFILE_FIFO_START_ADDR_6 and
+ * REGFILE_FIFO_START_ADDR_7need to be identical since these are used for
+ * the same wide FIFO.
+ * The actual FIFO 4 (DDR_WRITE_RESULT_FIFO) size
+ * isREGFILE_FIFO_START_ADDR_5 - REGFILE_FIFO_START_ADDR_4 + 2.
+ * The actual FIFO 5 (DDR_WRITE_RESULT_FIFO) size
+ * isREGFILE_FIFO_START_ADDR_6 - REGFILE_FIFO_START_ADDR_5 + 2.
+ * The actual FIFO 6 (DDR_WRITE_RESULT_FIFO) size
+ * isREGFILE_FIFO_START_ADDR_7 - REGFILE_FIFO_START_ADDR_6 + 2.
+ * The actual FIFO 7 size is the same as FIFO 4, 5, 6
+ */
+#define NATC_REGFILE_FIFO_START_ADDR_1	0x428
+
+/*
+ * REGFILE FIFO 7 Start Address -- Note that this entry is not used in
+ * 68460
+*/
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_7_SHIFT	24
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_7_MASK	0xff000000
+
+/* REGFILE FIFO 6 Start Address */
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_6_SHIFT	16
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_6_MASK	0xff0000
+
+/* REGFILE FIFO 5 Start Address */
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_5_SHIFT	8
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_5_MASK	0xff00
+
+/* REGFILE FIFO 4 Start Address */
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_4_SHIFT	0
+#define  FIFO_START_ADDR_1_REGFILE_FIFO_START_ADDR_4_MASK	0xff
+
+
+#endif /* ! XRDP_REGS_NATC_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_psram.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_psram.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_psram.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_psram.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,729 @@
+#ifndef XRDP_REGS_PSRAM_H_
+#define XRDP_REGS_PSRAM_H_
+
+/* relative to core */
+#define PSRAM_OFFSET_0			0x600000
+
+/*
+ * Registers <PSRAM_MEM_ENTRY> - <x> is [ 0 => 65535 ]
+ *
+ * psram_mem_entry
+ */
+#define PSRAM_MEMORY_DATA(x)		(0x0 + (x) * 0x4)
+
+/* data */
+#define  PSRAM_MEMORY_DATA_DATA_SHIFT	0
+#define  PSRAM_MEMORY_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <CONTROL>
+ *
+ * control reg
+ */
+#define PSRAM_CFGS_CTRL			0x799400
+
+/*
+ * 1:
+ * enable memory banks permutations0:
+ * disable
+*/
+#define  PSRAM_CFGS_CTRL_PERM_EN_MASK	0x1
+
+/*
+ * 1:
+ * enable memory banks combinations0:
+ * disable
+*/
+#define  PSRAM_CFGS_CTRL_COMB_EN_MASK	0x2
+
+/*
+ * 1:
+ * enable full combinations(also on same 4-banks)0:
+ * disable full combinations(allow only on opposite 4-banks)
+*/
+#define  PSRAM_CFGS_CTRL_COMB_FULL_MASK	0x4
+
+/*
+ * 1:
+ * all 8 banks are active0:
+ * only 4 banks are active
+*/
+#define  PSRAM_CFGS_CTRL_BANKS8_MASK	0x8
+
+/* ub_i_reqin_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB0_REQIN_ESWAP_MASK	0x10
+
+/* ub_i_repout_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB0_REPOUT_ESWAP_MASK	0x20
+
+/* ub_i_reqin_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB1_REQIN_ESWAP_MASK	0x40
+
+/* ub_i_repout_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB1_REPOUT_ESWAP_MASK	0x80
+
+/* ub_i_reqin_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB2_REQIN_ESWAP_MASK	0x100
+
+/* ub_i_repout_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB2_REPOUT_ESWAP_MASK	0x200
+
+/* ub_i_reqin_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB3_REQIN_ESWAP_MASK	0x400
+
+/* ub_i_repout_eswap for ubus slave port - Not connected */
+#define  PSRAM_CFGS_CTRL_UB3_REPOUT_ESWAP_MASK	0x800
+
+/*
+ * 1:
+ * stall ec client if wants to read same page as one of the pages in the
+ * ubus write buffers0:
+ * dont stall
+*/
+#define  PSRAM_CFGS_CTRL_COH_EN_EC0_MASK	0x1000
+
+/*
+ * 1:
+ * stall ec client if wants to read same page as one of the pages in the
+ * ubus write buffers0:
+ * dont stall
+*/
+#define  PSRAM_CFGS_CTRL_COH_EN_EC1_MASK	0x2000
+
+/*
+ * 1:
+ * stall ec client if wants to read same page as one of the pages in the
+ * ubus write buffers0:
+ * dont stall
+*/
+#define  PSRAM_CFGS_CTRL_COH_EN_EC2_MASK	0x4000
+
+/* arbitration weight for client 0 - currently not used. */
+#define  PSRAM_CFGS_CTRL_WT_0_SHIFT	15
+#define  PSRAM_CFGS_CTRL_WT_0_MASK	0x38000
+
+/* arbitration weight for client 1 - currently not used. */
+#define  PSRAM_CFGS_CTRL_WT_1_SHIFT	18
+#define  PSRAM_CFGS_CTRL_WT_1_MASK	0x1c0000
+
+/* arbitration weight for client 2 - currently not used. */
+#define  PSRAM_CFGS_CTRL_WT_2_SHIFT	21
+#define  PSRAM_CFGS_CTRL_WT_2_MASK	0xe00000
+
+/*
+ * 1:
+ * rr between all clients0:
+ * ubus is high priority (def)
+*/
+#define  PSRAM_CFGS_CTRL_ARB_RR_MASK	0x1000000
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define PSRAM_CFGS_CLK_GATE_CNTRL	0x79940c
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  PSRAM_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <MON_USER_EN>
+ *
+ * this register contains a bit for enable/disable of the counters.
+ * The counters will be reset to zero on the positive edge of the enable
+ * bit, and will count until the time window which is decrement counter,
+ * will reach zero, or until the enable bit will be de-asserted.
+ */
+#define PSRAM_PM_COUNTERS_MUEN		0x799500
+
+/* enable monitor for client 0 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL0MEN_MASK	0x1
+
+/* enable monitor for client 1 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL1MEN_MASK	0x2
+
+/* enable monitor for client 2 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL2MEN_MASK	0x4
+
+/* enable monitor for client 3 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL3MEN_MASK	0x8
+
+/* enable monitor for client 4 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL4MEN_MASK	0x10
+
+/* enable monitor for client 5 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL5MEN_MASK	0x20
+
+/* enable monitor for client 6 */
+#define  PSRAM_PM_COUNTERS_MUEN_CL6MEN_MASK	0x40
+
+
+/*
+ * Register <BW_COUNTS_CLOCKS>
+ *
+ * determines the time window in which we perform the bandwidth
+ * monitoring(on cyclic mode - when cyclic_bw_check_en=1)
+ */
+#define PSRAM_PM_COUNTERS_BWCL		0x799504
+
+/* measure time window in clock cycles */
+#define  PSRAM_PM_COUNTERS_BWCL_TW_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BWCL_TW_MASK	0xffffffff
+
+
+/*
+ * Register <BW_ENABLE>
+ *
+ * pm_bw_check_en - start of new monitoring session.
+ * resets counters on rise.
+ * cyclic_bw_check_en - if this enabled - when the bw period reaches its
+ * limit - the counters are reet.
+ */
+#define PSRAM_PM_COUNTERS_BWEN		0x799508
+
+/*
+ * start of new monitoring session.
+ * zeroes counters on rise.
+*/
+#define  PSRAM_PM_COUNTERS_BWEN_BWCEN_MASK	0x1
+
+/*
+ * if this enabled - when the bw period reaches its limit - the counters
+ * are reset.
+*/
+#define  PSRAM_PM_COUNTERS_BWEN_CBWCEN_MASK	0x100
+
+
+/*
+ * Registers <MAX_TIME_SERVED> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters hold the maximum time in clock cycles the client
+ * has waited from the moment it had a request pending to the time the
+ * request gained arbitration.
+ */
+#define PSRAM_PM_COUNTERS_MAX_TIME(x)	(0x799510 + (x) * 0x4)
+
+/* max wait time */
+#define  PSRAM_PM_COUNTERS_MAX_TIME_MAX_SHIFT	0
+#define  PSRAM_PM_COUNTERS_MAX_TIME_MAX_MASK	0xffffffff
+
+
+/*
+ * Registers <ACCUMULATE_TIME_SERVED> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters hold the accumulated time in clock cycles the
+ * client has waited from the moment it had a request pending to the time
+ * the request gained arbitration.
+ * For each access to arbiter, it will be at least 1 cycle.
+ */
+#define PSRAM_PM_COUNTERS_ACC_TIME(x)	(0x799530 + (x) * 0x4)
+
+/* max wait time */
+#define  PSRAM_PM_COUNTERS_ACC_TIME_MAX_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ACC_TIME_MAX_MASK	0xffffffff
+
+
+/*
+ * Registers <ACCUMULATE_REQ_SERVED> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters hold the accumulated number of requests that was
+ * served per user.
+ */
+#define PSRAM_PM_COUNTERS_ACC_REQ(x)	(0x799550 + (x) * 0x4)
+
+/* accumulated number of served requests */
+#define  PSRAM_PM_COUNTERS_ACC_REQ_REQ_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ACC_REQ_REQ_MASK	0xffffffff
+
+
+/*
+ * Registers <ACCUMULATE_TIME_LAST> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters hold the Result of th elast measure of
+ * accumulated time in clock cycles the client has waited from the moment
+ * it had a request pending to the time the request gained arbitration.
+ */
+#define PSRAM_PM_COUNTERS_LAST_ACC_TIME(x)	(0x799570 + (x) * 0x4)
+
+/* accumulated wait time */
+#define  PSRAM_PM_COUNTERS_LAST_ACC_TIME_TIME_SHIFT	0
+#define  PSRAM_PM_COUNTERS_LAST_ACC_TIME_TIME_MASK	0xffffffff
+
+
+/*
+ * Registers <ACCUMULATE_REQ_LAST> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters hold the last result of accumulated number of
+ * requests that was served per user on cyclic measure.
+ */
+#define PSRAM_PM_COUNTERS_LAST_ACC_REQ(x)	(0x799590 + (x) * 0x4)
+
+/* accumulated number of served requests */
+#define  PSRAM_PM_COUNTERS_LAST_ACC_REQ_REQ_SHIFT	0
+#define  PSRAM_PM_COUNTERS_LAST_ACC_REQ_REQ_MASK	0xffffffff
+
+
+/*
+ * Register <BW_COUNTS_DATA_WR_ACC> - read-only
+ *
+ * This counter holds the sum of the WR_CNT array.
+ * It holds the result of the current measure.
+ * If the measure is a single measure, the result will be kept until
+ * de-assertion and assertion of the SINGLE start bit.
+ */
+#define PSRAM_PM_COUNTERS_BW_WR_CNT_ACC	0x7995b0
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_ACC_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_ACC_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <BW_COUNTS_DATA_RD_ACC> - read-only
+ *
+ * This counter holds the sum of the RD_CNT array.
+ * It holds the result of the current measure.
+ * If the measure is a single measure, the result will be kept until
+ * de-assertion and assertion of the SINGLE start bit.
+ */
+#define PSRAM_PM_COUNTERS_BW_RD_CNT_ACC	0x7995b4
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_ACC_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_ACC_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <BW_COUNTS_DATA_WR> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters holds the number of double words written to the
+ * psram per client.
+ * It holds the result of the current measure.
+ * If the measure is a single measure, the result will be kept until
+ * de-assertion and assertion of the SINGLE start bit.
+ */
+#define PSRAM_PM_COUNTERS_BW_WR_CNT(x)	(0x7995b8 + (x) * 0x4)
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <BW_COUNTS_DATA_RD> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters holds the number of double words read from the
+ * psram per client.
+ * It holds the result of the current measure.
+ * If the measure is a single measure, the result will be kept until
+ * de-assertion and assertion of the SINGLE start bit.
+ */
+#define PSRAM_PM_COUNTERS_BW_RD_CNT(x)	(0x7995d8 + (x) * 0x4)
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <BW_COUNTS_DATA_WR_LAST_ACC> - read-only
+ *
+ * This counter is a sum of the WR_CNT_LAST counters, which holds the
+ * number of double words written to the psram per client.
+ * When the measure is cyclic, it holds the result of the last measure,
+ * sampled once every end of a time window.
+ */
+#define PSRAM_PM_COUNTERS_BW_WR_CNT_LAST_ACC	0x7995f8
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_LAST_ACC_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_LAST_ACC_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <BW_COUNTS_DATA_RD_LAST_ACC> - read-only
+ *
+ * This counter is a sum of the RD_CNT_LAST counters, which holds the
+ * number of double words written to the psram per client.
+ * When the measure is cyclic, it holds the result of the last measure,
+ * sampled once every end of a time window.
+ */
+#define PSRAM_PM_COUNTERS_BW_RD_CNT_LAST_ACC	0x7995fc
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_LAST_ACC_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_LAST_ACC_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <BW_COUNTS_DATA_WR_LAST> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters holds the number of double words written to the
+ * psram per client.
+ * When the measure is cyclic, it holds the result of the last measure,
+ * sampled once every end of a time window.
+ */
+#define PSRAM_PM_COUNTERS_BW_WR_CNT_LAST(x)	(0x799600 + (x) * 0x4)
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_LAST_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_WR_CNT_LAST_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <BW_COUNTS_DATA_RD_LAST> - <x> is [ 0 => 6 ] - read-only
+ *
+ * This array of counters holds the number of double words read from the
+ * psram per client.
+ * When the measure is cyclic, it holds the result of the last measure,
+ * sampled once every end of a time window.
+ */
+#define PSRAM_PM_COUNTERS_BW_RD_CNT_LAST(x)	(0x799620 + (x) * 0x4)
+
+/* Number of double words that were written to the DDR per client */
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_LAST_CNT_SHIFT	0
+#define  PSRAM_PM_COUNTERS_BW_RD_CNT_LAST_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_TOTAL_REQ_CYCLES> - read-only
+ *
+ * Number of cycles there were requests (even one)
+ */
+#define PSRAM_PM_COUNTERS_ARB_REQ	0x799640
+
+/* value */
+#define  PSRAM_PM_COUNTERS_ARB_REQ_VAL_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ARB_REQ_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_TOTAL_ARB_CYCLES> - read-only
+ *
+ * Number of cycles there were more that 1 request for arbitration
+ */
+#define PSRAM_PM_COUNTERS_ARB_ARB	0x799644
+
+/* value */
+#define  PSRAM_PM_COUNTERS_ARB_ARB_VAL_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ARB_ARB_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_TOTAL_COMB_CYCLES> - read-only
+ *
+ * Number of cycles there were commands combinations
+ */
+#define PSRAM_PM_COUNTERS_ARB_COMB	0x799648
+
+/* value */
+#define  PSRAM_PM_COUNTERS_ARB_COMB_VAL_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ARB_COMB_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_TOTAL_COMB_SAME4_CYCLES> - read-only
+ *
+ * Number of cycles there were commands combinations in the same 4 banks
+ */
+#define PSRAM_PM_COUNTERS_ARB_COMB_4	0x79964c
+
+/* value */
+#define  PSRAM_PM_COUNTERS_ARB_COMB_4_VAL_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ARB_COMB_4_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <ARB_TOTAL_COMB_BANKS> - read-only
+ *
+ * Number of totsl banks that were accessed during commands combinations
+ * cycles
+ */
+#define PSRAM_PM_COUNTERS_ARB_COMB_BANKS	0x799650
+
+/* value */
+#define  PSRAM_PM_COUNTERS_ARB_COMB_BANKS_VAL_SHIFT	0
+#define  PSRAM_PM_COUNTERS_ARB_COMB_BANKS_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_MUX_SEL>
+ *
+ * selects the debug vecore
+ */
+#define PSRAM_DEBUG_DBGSEL		0x799700
+
+/* selects the debug vector */
+#define  PSRAM_DEBUG_DBGSEL_VS_SHIFT	0
+#define  PSRAM_DEBUG_DBGSEL_VS_MASK	0xff
+
+
+/*
+ * Register <DBG_BUS> - read-only
+ *
+ * the debug bus
+ */
+#define PSRAM_DEBUG_DBGBUS		0x799704
+
+/* debug vector */
+#define  PSRAM_DEBUG_DBGBUS_VB_SHIFT	0
+#define  PSRAM_DEBUG_DBGBUS_VB_MASK	0xffffffff
+
+
+/*
+ * Register <REQUEST_VECTOR> - read-only
+ *
+ * vector of all the requests of the clients
+ */
+#define PSRAM_DEBUG_REQ_VEC		0x799708
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_MIPSC_REQ_MASK	0x1
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_RNRA_REQ_MASK	0x2
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_RNRB_REQ_MASK	0x4
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_SDMA_REQ_MASK	0x8
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_MIPSD_REQ_MASK	0x10
+
+/* still more commands in the tx fifo */
+#define  PSRAM_DEBUG_REQ_VEC_MIPSDMA_REQ_MASK	0x20
+
+
+/*
+ * Register <DBG_CAP_CFG1>
+ *
+ * debug capture config
+ */
+#define PSRAM_DEBUG_DBG_CAP_CFG1	0x799780
+
+/* selects bank to capture */
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_BANK_SEL_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_BANK_SEL_MASK	0x7
+
+/* selects bank address to capture */
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_BANK_ADD_SEL_SHIFT	4
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_BANK_ADD_SEL_MASK	0xfff0
+
+/* capture write enable */
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_CAP_WR_EN_MASK	0x10000
+
+/* capture read enable */
+#define  PSRAM_DEBUG_DBG_CAP_CFG1_CAP_RD_EN_MASK	0x20000
+
+
+/*
+ * Register <DBG_CAP_CFG2>
+ *
+ * debug capture config
+ */
+#define PSRAM_DEBUG_DBG_CAP_CFG2	0x799784
+
+/*
+ * maximum of captures for write.
+ * 0 means infinite.
+*/
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_MAX_WR_CAP_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_MAX_WR_CAP_MASK	0xff
+
+/*
+ * maximum of captures for read.
+ * 0 means infinite.
+*/
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_MAX_RD_CAP_SHIFT	8
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_MAX_RD_CAP_MASK	0xff00
+
+/*
+ * reset the counting and start new one.
+ * should be asserted, then deasserted, then counting starts again.
+*/
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_WR_CAP_CNT_RST_MASK	0x10000
+
+/*
+ * reset the counting and start new one.
+ * should be asserted, then deasserted, then counting starts again.
+*/
+#define  PSRAM_DEBUG_DBG_CAP_CFG2_RD_CAP_CNT_RST_MASK	0x20000
+
+
+/*
+ * Register <DBG_CAP_STAT> - read-only
+ *
+ * debug capture status
+ */
+#define PSRAM_DEBUG_DBG_CAP_ST		0x799788
+
+/*
+ * actual current capture num for write.
+ * max is FFFF (no wrap).
+*/
+#define  PSRAM_DEBUG_DBG_CAP_ST_WR_CAP_NUM_ST_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_ST_WR_CAP_NUM_ST_MASK	0xff
+
+/*
+ * actual current capture num for read.
+ * max is FFFF (no wrap).
+*/
+#define  PSRAM_DEBUG_DBG_CAP_ST_RD_CAP_NUM_ST_SHIFT	8
+#define  PSRAM_DEBUG_DBG_CAP_ST_RD_CAP_NUM_ST_MASK	0xff00
+
+
+/*
+ * Register <DBG_CAP_WDATA0> - read-only
+ *
+ * debug capture write data0 register [32*1-1:
+ * 32*0]
+ */
+#define PSRAM_DEBUG_DBG_CAP_W0		0x799790
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_W0_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_W0_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_WDATA1> - read-only
+ *
+ * debug capture write data1 register [32*2-1:
+ * 32*1]
+ */
+#define PSRAM_DEBUG_DBG_CAP_W1		0x799794
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_W1_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_W1_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_WDATA2> - read-only
+ *
+ * debug capture write data2 register [32*3-1:
+ * 32*2]
+ */
+#define PSRAM_DEBUG_DBG_CAP_W2		0x799798
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_W2_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_W2_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_WDATA3> - read-only
+ *
+ * debug capture write data3 register [32*4-1:
+ * 32*3]
+ */
+#define PSRAM_DEBUG_DBG_CAP_W3		0x79979c
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_W3_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_W3_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_WDATA_MASK> - read-only
+ *
+ * debug capture write mask register (16b for 16B=128b of data in bank row)
+ */
+#define PSRAM_DEBUG_DBG_CAP_WMSK	0x7997a0
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_WMSK_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_WMSK_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_RDATA0> - read-only
+ *
+ * debug capture read data0 register [32*1-1:
+ * 32*0]
+ */
+#define PSRAM_DEBUG_DBG_CAP_R0		0x7997b0
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_R0_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_R0_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_RDATA1> - read-only
+ *
+ * debug capture read data1 register [32*2-1:
+ * 32*1]
+ */
+#define PSRAM_DEBUG_DBG_CAP_R1		0x7997b4
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_R1_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_R1_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_RDATA2> - read-only
+ *
+ * debug capture read data2 register [32*3-1:
+ * 32*2]
+ */
+#define PSRAM_DEBUG_DBG_CAP_R2		0x7997b8
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_R2_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_R2_CV_MASK	0xffffffff
+
+
+/*
+ * Register <DBG_CAP_RDATA3> - read-only
+ *
+ * debug capture read data3 register [32*4-1:
+ * 32*3]
+ */
+#define PSRAM_DEBUG_DBG_CAP_R3		0x7997bc
+
+/* capture vector */
+#define  PSRAM_DEBUG_DBG_CAP_R3_CV_SHIFT	0
+#define  PSRAM_DEBUG_DBG_CAP_R3_CV_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_PSRAM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_qm.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_qm.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_qm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_qm.h	2025-09-25 17:40:35.071364767 +0200
@@ -0,0 +1,4528 @@
+#ifndef XRDP_REGS_QM_H_
+#define XRDP_REGS_QM_H_
+
+/* relative to core */
+#define QM_OFFSET_0			0x100000
+
+/*
+ * Register <QM_ENABLE_CTRL>
+ *
+ * QM Enable register
+ */
+#define QM_GLOBAL_CFG_QM_ENABLE_CTRL	0x0
+
+/*
+ * FPM Prefetch Enable.
+ * Setting this bit to 1 will start filling up the FPM pool prefetch FIFOs.
+ * Seeting this bit to 0, will stop FPM prefetches.
+*/
+#define  QM_GLOBAL_CFG_QM_ENABLE_CTRL_FPM_PREFETCH_ENABLE_MASK	0x1
+
+/*
+ * When this bit is set the QM will send credits to the REORDER block.
+ * Disabling this bit will stop sending credits to the reorder.
+*/
+#define  QM_GLOBAL_CFG_QM_ENABLE_CTRL_REORDER_CREDIT_ENABLE_MASK	0x2
+
+/*
+ * When this bit is set the QM will pop PDs from the DQM and place them in
+ * the runner SRAM
+*/
+#define  QM_GLOBAL_CFG_QM_ENABLE_CTRL_DQM_POP_ENABLE_MASK	0x4
+
+/*
+ * When this bit is set Fixed arbitration will be done in pops from the
+ * remote FIFOs (Non delayed highest priority).
+ * If this bit is cleared RR arbitration is done
+*/
+#define  QM_GLOBAL_CFG_QM_ENABLE_CTRL_RMT_FIXED_ARB_ENABLE_MASK	0x100
+
+/*
+ * When this bit is set Fixed arbitration will be done in DQM pushes (CPU
+ * highest priority, then non-delayed queues and then normal queues.
+ * If this bit is cleared RR arbitration is done.
+*/
+#define  QM_GLOBAL_CFG_QM_ENABLE_CTRL_DQM_PUSH_FIXED_ARB_ENABLE_MASK	0x200
+
+
+/*
+ * Register <QM_SW_RST_CTRL>
+ *
+ * QM soft reset register
+ */
+#define QM_GLOBAL_CFG_QM_SW_RST_CTRL	0x4
+
+/* FPM Prefetch FIFO0 SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_FPM_PREFETCH0_SW_RST_MASK	0x1
+
+/* FPM Prefetch FIFO1 SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_FPM_PREFETCH1_SW_RST_MASK	0x2
+
+/* FPM Prefetch FIFO2 SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_FPM_PREFETCH2_SW_RST_MASK	0x4
+
+/* FPM Prefetch FIFO3 SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_FPM_PREFETCH3_SW_RST_MASK	0x8
+
+/* Normal Remote FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_NORMAL_RMT_SW_RST_MASK	0x10
+
+/* Non-delayed Remote FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_NON_DELAYED_RMT_SW_RST_MASK	0x20
+
+/* Pre Copy Machine FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_PRE_CM_FIFO_SW_RST_MASK	0x40
+
+/* Copy Machine RD PD FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_CM_RD_PD_FIFO_SW_RST_MASK	0x80
+
+/* Pre Copy Machine FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_CM_WR_PD_FIFO_SW_RST_MASK	0x100
+
+/* BB0 OUTPUT FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_BB0_OUTPUT_FIFO_SW_RST_MASK	0x200
+
+/* BB1 Output FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_BB1_OUTPUT_FIFO_SW_RST_MASK	0x400
+
+/* BB1 Input FIFO SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_BB1_INPUT_FIFO_SW_RST_MASK	0x800
+
+/* TM FIFOs Pointers SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_TM_FIFO_PTR_SW_RST_MASK	0x1000
+
+/* Non delayed output FIFO Pointers SW reset. */
+#define  QM_GLOBAL_CFG_QM_SW_RST_CTRL_NON_DELAYED_OUT_FIFO_SW_RST_MASK	0x2000
+
+
+/*
+ * Register <QM_GENERAL_CTRL>
+ *
+ * QM Enable register
+ */
+#define QM_GLOBAL_CFG_QM_GENERAL_CTRL	0x8
+
+/* Indicates whether the Drop/max_occupancy packets counter is read clear. */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DROP_CNT_PKTS_READ_CLEAR_ENABLE_MASK	0x1
+
+/* Indicates whether the Drop/max_occupancy bytes counter is read clear. */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DROP_CNT_BYTES_READ_CLEAR_ENABLE_MASK	0x2
+
+/*
+ * This bit defines the functionality of the drop packets counter.
+ * 0 - Functions as the drop packets counter1 - Functions as the max
+ * packets occupancy holder
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DROP_CNT_MAX_OCCUPANCY_PKTS_SELECT_MASK	0x4
+
+/*
+ * This bit defines the functionality of the drop bytes counter.
+ * 0 - Functions as the drop bytes counter1 - Functions as the max bytes
+ * occupancy holder
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DROP_CNT_MAX_OCCUPANCY_BYTES_SELECT_MASK	0x8
+
+/*
+ * Indicates The value to put in the last_search field of the SBPM free
+ * with context message
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_FREE_WITH_CONTEXT_LAST_SEARCH_MASK	0x10
+
+/* Disables WRED influence on drop condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_WRED_DISABLE_MASK	0x20
+
+/* Disables DDR_PD_CONGESTION influence on drop/bpcondition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_PD_CONGESTION_DISABLE_MASK	0x40
+
+/* Disables DDR_BYTE_CONGESTION influence on drop/bp condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_BYTE_CONGESTION_DISABLE_MASK	0x80
+
+/* Disables DDR_OCCUPANCY influence on drop/bp condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_OCCUPANCY_DISABLE_MASK	0x100
+
+/* Disables DDR_FPM_CONGESTION influence on drop/bp condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_FPM_CONGESTION_DISABLE_MASK	0x200
+
+/* Disables FPM_UG influence on drop condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_FPM_UG_DISABLE_MASK	0x400
+
+/* Disables QUEUE_OCCUPANCY_DDR_COPY_DECISION influence on copy condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_QUEUE_OCCUPANCY_DDR_COPY_DECISION_DISABLE_MASK	0x800
+
+/* Disables PSRAM_OCCUPANCY_DDR_COPY_DECISION influence on copy condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_PSRAM_OCCUPANCY_DDR_COPY_DECISION_DISABLE_MASK	0x1000
+
+/* When set, the multicast bit of the PD will not be sent to BBH TX */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DONT_SEND_MC_BIT_TO_BBH_MASK	0x2000
+
+/*
+ * When set, aggregations are not closed automatically when queue open
+ * aggregation time expired.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_CLOSE_AGGREGATION_ON_TIMEOUT_DISABLE_MASK	0x4000
+
+/*
+ * When cleared, given that there is an FPM congestion situation and all
+ * prefetch FPM buffers are full then a min pool size buffer will be freed
+ * each 1us.
+ * This is done due to the fact that exclusive indication is received only
+ * togeter with buffer allocation reply and if this will not be done then a
+ * deadlock could occur.
+ * Setting this bit will disable this mechanism.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_FPM_CONGESTION_BUF_RELEASE_MECHANISM_DISABLE_MASK	0x8000
+
+/*
+ * FPM over subscription mechanism.
+ * Each queue will have one out of 8 reserved byte threshold profiles.
+ * Each profile defines 8 bit threshold with 512byte resolution.
+ * Once the global FPM counter pass configurable threshold the system goes
+ * to buffer reservation congestion state.
+ * In this state any PD entering a queue which passes the reserved byte
+ * threshold will be dropped.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_FPM_BUFFER_GLOBAL_RES_ENABLE_MASK	0x10000
+
+/* Dont drop pd with fpm allocation. */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_QM_PRESERVE_PD_WITH_FPM_MASK	0x20000
+
+/* 0 for 64B/Queue1 for 128B/Queue */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_QM_RESIDUE_PER_QUEUE_MASK	0x40000
+
+/*
+ * Controls the timing of updating the overhead counters with packets which
+ * goes through aggregation.
+ * 0 - updates when the packets enters QM1 - updates when aggregation is
+ * done.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_GHOST_RPT_UPDATE_AFTER_CLOSE_AGG_EN_MASK	0x80000
+
+/* Disables FPM_UG influence on flow control wake up messages to FW. */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_FPM_UG_FLOW_CTRL_DISABLE_MASK	0x100000
+
+/*
+ * Enables to write packet transaction to multiple slave (unlimited), if
+ * disable only one ubus slave allowed.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_WRITE_MULTI_SLAVE_EN_MASK	0x200000
+
+/* global priority bit to aggregated PDs which go through reprocessing. */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DDR_PD_CONGESTION_AGG_PRIORITY_MASK	0x400000
+
+/* Disables PSRAM_OCCUPANCY_DROP influence on drop condition */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_PSRAM_OCCUPANCY_DROP_DISABLE_MASK	0x800000
+
+/* 0 According to length1 8-byte aligned */
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_QM_DDR_WRITE_ALIGNMENT_MASK	0x1000000
+
+/*
+ * Controls if the exclusive indication in PD marks the PD as dont drop or
+ * as dont drop if the fpm in exclusive state1 - global dont drop0 - FPM
+ * exclusive state dont drop
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_EXCLUSIVE_DONT_DROP_MASK	0x2000000
+
+/*
+ * when set 1 backpressure will be applied when the DONT_DROP pd should be
+ * dropped.
+ * for example, 0 fpm buffers available and the PD should be copied to DDR.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_EXCLUSIVE_DONT_DROP_BP_EN_MASK	0x4000000
+
+/*
+ * If the bit enable, QM round up to 4 every packet length added ghost
+ * counters.
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_GPON_DBR_CEIL_MASK	0x8000000
+
+/*
+ * Drop counter counts WRED drops by color per queue.
+ * In order to enable this feature the drop counter should be configured to
+ * count drops.
+ * if the drop counter is configured count max occupancy per queue, it will
+ * override WRED drops count.
+ * color 0 - is written in dropped bytes field (word0)color 1 - is written
+ * in dropped pkts field (word1)
+*/
+#define  QM_GLOBAL_CFG_QM_GENERAL_CTRL_DROP_CNT_WRED_DROPS_MASK	0x10000000
+
+
+/*
+ * Register <FPM_CONTROL>
+ *
+ * FPM Control Register
+ */
+#define QM_GLOBAL_CFG_FPM_CONTROL	0xc
+
+/*
+ * This field indicates whether crossing the per pool FPM buffer prefetch
+ * FIFO occupancy thresholds will result in dropping packets or in applying
+ * back pressure to the re-order.
+ * 0 - drop packets1 - apply back pressure
+*/
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_POOL_BP_ENABLE_MASK	0x1
+
+/*
+ * This field indicates whether crossing the FPM congestion threshold will
+ * result in dropping packets or in applying back pressure to the re-order.
+ * 0 - drop packets1 - apply back pressure
+*/
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_CONGESTION_BP_ENABLE_MASK	0x2
+
+/*
+ * FPM prefetch minimum pool size.
+ * The supported FPM pool sizes are derived from this value:
+ * * FPM_PREFETCH_MIN_POOL_SIZEx1* FPM_PREFETCH_MIN_POOL_SIZEx2*
+ * FPM_PREFETCH_MIN_POOL_SIZEx4* FPM_PREFETCH_MIN_POOL_SIZEx8The optional
+ * values for this field:
+ * 0 - 256Byte1 - 512Byte2 - 1024Byte3 - 2048Byte
+*/
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_PREFETCH_MIN_POOL_SIZE_SHIFT	8
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_PREFETCH_MIN_POOL_SIZE_MASK	0x300
+
+/* The allowed on the fly FPM prefetch pending Alloc requests to the FPM. */
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_PREFETCH_PENDING_REQ_LIMIT_SHIFT	16
+#define  QM_GLOBAL_CFG_FPM_CONTROL_FPM_PREFETCH_PENDING_REQ_LIMIT_MASK	0x7f0000
+
+
+/*
+ * Register <DDR_BYTE_CONGESTION_CONTROL>
+ *
+ * DDR Byte Congestion Control Register
+ */
+#define QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_CONTROL	0x10
+
+/*
+ * This field indicates whether crossing the DDR bytes thresholds (the
+ * number of bytes waiting to be copied to DDR) will result in dropping
+ * packets or in applying back pressure to the re-order.
+ * 0 - apply back pressure1 - drop packets
+*/
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_CONTROL_DDR_BYTE_CONGESTION_DROP_ENABLE_MASK	0x1
+
+
+/*
+ * Register <DDR_BYTE_CONGESTION_LOWER_THR>
+ *
+ * DDR Byte Congestion Lower Threshold
+ */
+#define QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_LO_THR	0x14
+
+/*
+ * DDR copy bytes Lower Threshold.
+ * When working in packet drop mode (DDR_BYTES_CONGESTION_DROP_ENABLE=1),
+ * Then:
+ * * If (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then all packets
+ * are dropped.
+ * * If (DDR_BYTES_MID_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (DDR_BYTES_LOWER_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_MID_THR), then packets in low priority are dropped.
+ * * If (DDR copy bytes counter) <= (DDR_BYTES_LOWER_THR), then no packets
+ * are dropped.
+ * When working in backpressure mode (DDR_BYTES_CONGESTION_DROP_ENABLE=0),
+ * Then if (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then
+ * backpressure is applied to re-order (in this case DDR_BYTES_LOWER_THR
+ * and DDR_BYTES_MID_THR are dont care).
+*/
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_LO_THR_DDR_BYTES_LOWER_THR_SHIFT	0
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_LO_THR_DDR_BYTES_LOWER_THR_MASK	0x3fffffff
+
+
+/*
+ * Register <DDR_BYTE_CONGESTION_MID_THR>
+ *
+ * DDR Byte Congestion Middle Threshold
+ */
+#define QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_MID_THR	0x18
+
+/*
+ * DDR copy bytes Lower Threshold.
+ * When working in packet drop mode (DDR_BYTES_CONGESTION_DROP_ENABLE=1),
+ * Then:
+ * * If (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then all packets
+ * are dropped.
+ * * If (DDR_BYTES_MID_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (DDR_BYTES_LOWER_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_MID_THR), then packets in low priority are dropped.
+ * * If (DDR copy bytes counter) <= (DDR_BYTES_LOWER_THR), then no packets
+ * are dropped.
+ * When working in backpressure mode (DDR_BYTES_CONGESTION_DROP_ENABLE=0),
+ * Then if (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then
+ * backpressure is applied to re-order (in this case DDR_BYTES_LOWER_THR
+ * and DDR_BYTES_MID_THR are dont care).
+*/
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_MID_THR_DDR_BYTES_MID_THR_SHIFT	0
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_MID_THR_DDR_BYTES_MID_THR_MASK	0x3fffffff
+
+
+/*
+ * Register <DDR_BYTE_CONGESTION_HIGHER_THR>
+ *
+ * DDR Byte Congestion Higher Threshold
+ */
+#define QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_HIGHER_THR	0x1c
+
+/*
+ * DDR copy bytes Lower Threshold.
+ * When working in packet drop mode (DDR_BYTES_CONGESTION_DROP_ENABLE=1),
+ * Then:
+ * * If (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then all packets
+ * are dropped.
+ * * If (DDR_BYTES_MID_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (DDR_BYTES_LOWER_THR) < (DDR copy bytes counter) <=
+ * (DDR_BYTES_MID_THR), then packets in low priority are dropped.
+ * * If (DDR copy bytes counter) <= (DDR_BYTES_LOWER_THR), then no packets
+ * are dropped.
+ * When working in backpressure mode (DDR_BYTES_CONGESTION_DROP_ENABLE=0),
+ * Then if (DDR copy bytes counter) > (DDR_BYTES_HIGHER_THR), then
+ * backpressure is applied to re-order (in this case DDR_BYTES_LOWER_THR
+ * and DDR_BYTES_MID_THR are dont care).
+*/
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_HIGHER_THR_DDR_BYTES_HIGHER_THR_SHIFT	0
+#define  QM_GLOBAL_CFG_DDR_BYTE_CONGESTION_HIGHER_THR_DDR_BYTES_HIGHER_THR_MASK	0x3fffffff
+
+
+/*
+ * Register <DDR_PD_CONGESTION_CONTROL>
+ *
+ * DDR PD Congestion Control Register
+ */
+#define QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL	0x20
+
+/*
+ * This field indicates whether crossing the DDR Pipe thresholds will
+ * result in dropping packets or in applying back pressure to the re-order.
+ * 0 - apply back pressure1 - drop packets
+*/
+#define  QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL_DDR_PD_CONGESTION_DROP_ENABLE_MASK	0x1
+
+/*
+ * DDR copy Pipe Lower Threshold.
+ * When working in packet drop mode (DDR_PD_CONGESTION_DROP_ENABLE=1),
+ * Then:
+ * * If (DDR copy pipe occupancy) > (DDR_PIPE_HIGHER_THR), then packets in
+ * low/high priority are dropped (only exclusive packets are not dropped).
+ * * If (DDR_PIPE_LOWER_THR) < (DDR copy pipe occupancy) <=
+ * (DDR_PIPE_HIGHER_THR), then packets in low priority are dropped.
+ * * If (DDR copy pipe occupancy) <= (DDR_PIPE_LOWER_THR), then no packets
+ * are dropped.
+ * When working in backpressure mode (DDR_PD_CONGESTION_DROP_ENABLE=0),
+ * Then if (DDR copy pipe occupancy) > (DDR_PIPE_HIGHER_THR), then
+ * backpressure is applied to re-order (in this case DDR_PIPE_LOWER_THR is
+ * dont care).
+*/
+#define  QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL_DDR_PIPE_LOWER_THR_SHIFT	8
+#define  QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL_DDR_PIPE_LOWER_THR_MASK	0xff00
+
+/*
+ * DDR copy Pipe Lower Threshold.
+ * When working in packet drop mode (DDR_PD_CONGESTION_DROP_ENABLE=1),
+ * Then:
+ * * If (DDR copy pipe occupancy) > (DDR_PIPE_HIGHER_THR), then packets in
+ * low/high priority are dropped (only exclusive packets are not dropped).
+ * * If (DDR_PIPE_LOWER_THR) < (DDR copy pipe occupancy) <=
+ * (DDR_PIPE_HIGHER_THR), then packets in low priority are dropped.
+ * * If (DDR copy pipe occupancy) <= (DDR_PIPE_LOWER_THR), then no packets
+ * are dropped.
+ * When working in backpressure mode (DDR_PD_CONGESTION_DROP_ENABLE=0),
+ * Then if (DDR copy pipe occupancy) > (DDR_PIPE_HIGHER_THR), then
+ * backpressure is applied to re-order (in this case DDR_PIPE_LOWER_THR is
+ * dont care).
+ * IMPORTANT:
+ * recommended maximum value is 0x7B in order to avoid performance
+ * degradation when working with aggregation timeout enable
+*/
+#define  QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL_DDR_PIPE_HIGHER_THR_SHIFT	16
+#define  QM_GLOBAL_CFG_DDR_PD_CONGESTION_CONTROL_DDR_PIPE_HIGHER_THR_MASK	0xff0000
+
+
+/*
+ * Register <QM_PD_CONGESTION_CONTROL>
+ *
+ * QM PD Congestion Control Register
+ */
+#define QM_GLOBAL_CFG_QM_PD_CONGESTION_CONTROL	0x24
+
+/*
+ * If the number of PDs for a certain queue exceeds this value, then PDs
+ * will be dropped.
+*/
+#define  QM_GLOBAL_CFG_QM_PD_CONGESTION_CONTROL_TOTAL_PD_THR_SHIFT	0
+#define  QM_GLOBAL_CFG_QM_PD_CONGESTION_CONTROL_TOTAL_PD_THR_MASK	0xfffffff
+
+
+/*
+ * Register <ABS_DROP_QUEUE>
+ *
+ * Absolute Adress drop queue
+ */
+#define QM_GLOBAL_CFG_ABS_DROP_QUEUE	0x28
+
+/*
+ * Absolute address drop queue.
+ * Absolute address PDs which are dropped will be redirected into this
+ * configured queue.
+ * FW will be responsible for reclaiming their DDR space.
+*/
+#define  QM_GLOBAL_CFG_ABS_DROP_QUEUE_ABS_DROP_QUEUE_SHIFT	0
+#define  QM_GLOBAL_CFG_ABS_DROP_QUEUE_ABS_DROP_QUEUE_MASK	0x1ff
+
+/*
+ * Absolute address drop queue enable.
+ * Enables the mechanism in which absolute address PDs which are dropped
+ * are be redirected into this configured queue.
+ * FW will be responsible for reclaiming their DDR space.
+*/
+#define  QM_GLOBAL_CFG_ABS_DROP_QUEUE_ABS_DROP_QUEUE_EN_MASK	0x10000
+
+
+/*
+ * Register <AGGREGATION_CTRL>
+ *
+ * Aggregation Control register
+ */
+#define QM_GLOBAL_CFG_AGGREGATION_CTRL	0x2c
+
+/* This field indicates the maximum number of bytes in an aggregated PD. */
+#define  QM_GLOBAL_CFG_AGGREGATION_CTRL_MAX_AGG_BYTES_SHIFT	0
+#define  QM_GLOBAL_CFG_AGGREGATION_CTRL_MAX_AGG_BYTES_MASK	0x3ff
+
+/* This field indicates the maximum number of packets in an aggregated PD */
+#define  QM_GLOBAL_CFG_AGGREGATION_CTRL_MAX_AGG_PKTS_SHIFT	16
+#define  QM_GLOBAL_CFG_AGGREGATION_CTRL_MAX_AGG_PKTS_MASK	0x30000
+
+
+/*
+ * Register <FPM_BASE_ADDR>
+ *
+ * FPM Base Address
+ */
+#define QM_GLOBAL_CFG_FPM_BASE_ADDR	0x30
+
+/*
+ * FPM Base Address.
+ * This is the 32-bit MSBs out of the 40-bit address.
+ * Multiply this field by 256 to get the 40-bit address.
+ * Example:
+ * If desired base address is 0x0080_0000The FPM_BASE_ADDR field should be
+ * configured to:
+ * 0x0000_8000.
+*/
+#define  QM_GLOBAL_CFG_FPM_BASE_ADDR_FPM_BASE_ADDR_SHIFT	0
+#define  QM_GLOBAL_CFG_FPM_BASE_ADDR_FPM_BASE_ADDR_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_COHERENT_BASE_ADDR>
+ *
+ * FPM Base Address for PDs that have the coherent bit set
+ */
+#define QM_GLOBAL_CFG_FPM_COHERENT_BASE_ADDR	0x34
+
+/*
+ * FPM Base Address.
+ * This is the 32-bit MSBs out of the 40-bit address.
+ * Multiply this field by 256 to get the 40-bit address.
+ * Example:
+ * If desired base address is 0x0080_0000The FPM_BASE_ADDR field should be
+ * configured to:
+ * 0x0000_8000.
+*/
+#define  QM_GLOBAL_CFG_FPM_COHERENT_BASE_ADDR_FPM_BASE_ADDR_SHIFT	0
+#define  QM_GLOBAL_CFG_FPM_COHERENT_BASE_ADDR_FPM_BASE_ADDR_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_SOP_OFFSET>
+ *
+ * DDR SOP Offset options
+ */
+#define QM_GLOBAL_CFG_DDR_SOP_OFFSET	0x38
+
+/* DDR SOP Offset option 0 */
+#define  QM_GLOBAL_CFG_DDR_SOP_OFFSET_DDR_SOP_OFFSET0_SHIFT	0
+#define  QM_GLOBAL_CFG_DDR_SOP_OFFSET_DDR_SOP_OFFSET0_MASK	0x7ff
+
+/* DDR SOP Offset option 1 */
+#define  QM_GLOBAL_CFG_DDR_SOP_OFFSET_DDR_SOP_OFFSET1_SHIFT	16
+#define  QM_GLOBAL_CFG_DDR_SOP_OFFSET_DDR_SOP_OFFSET1_MASK	0x7ff0000
+
+
+/*
+ * Register <EPON_OVERHEAD_CTRL>
+ *
+ * EPON Ghost reporting configuration
+ */
+#define QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL	0x3c
+
+/* EPON Line Rate0 - 1G1 - 10G */
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_EPON_LINE_RATE_MASK	0x1
+
+/*
+ * If this bit is not set then 4-bytes will be added to the ghost reporting
+ * accumulated bytes and to the byte overhead calculation input
+*/
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_EPON_CRC_ADD_DISABLE_MASK	0x2
+
+/* Enables to overwrite CRC addition specified MAC FLOW in the field below. */
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_MAC_FLOW_OVERWRITE_CRC_EN_MASK	0x4
+
+/* MAC flow ID to force disable CRC addition */
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_MAC_FLOW_OVERWRITE_CRC_SHIFT	3
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_MAC_FLOW_OVERWRITE_CRC_MASK	0x7f8
+
+/* FEC IPG Length */
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_FEC_IPG_LENGTH_SHIFT	16
+#define  QM_GLOBAL_CFG_EPON_OVERHEAD_CTRL_FEC_IPG_LENGTH_MASK	0x7ff0000
+
+
+/*
+ * Registers <DQM_FULL> - <x> is [ 0 => 8 ] - read-only
+ *
+ * Queue Full indicationEach register includes a batch of 32 queues
+ * non-empty indication.
+ * 9 Batches are need for 288 queues.
+ * First Batch is for queues 31-0 and so on until the last batch
+ * representing queues 287-256.
+ */
+#define QM_GLOBAL_CFG_DQM_FULL(x)	(0x40 + (x) * 0x4)
+
+/*
+ * Queue Full indication.
+ * This is a 1-bit indication per queue.
+ * This register consists of a batch of 32 queues.
+*/
+#define  QM_GLOBAL_CFG_DQM_FULL_Q_FULL_SHIFT	0
+#define  QM_GLOBAL_CFG_DQM_FULL_Q_FULL_MASK	0xffffffff
+
+
+/*
+ * Registers <DQM_NOT_EMPTY> - <x> is [ 0 => 8 ] - read-only
+ *
+ * Queue Not Empty indicationEach register includes a batch of 32 queues
+ * non-empty indication.
+ * 9 Batches are need for 288 queues.
+ * First Batch is for queues 31-0 and so on until the last batch
+ * representing queues 287-256.
+ */
+#define QM_GLOBAL_CFG_DQM_NOT_EMPTY(x)	(0x70 + (x) * 0x4)
+
+/*
+ * Queue Not empty indication.
+ * This is a 1-bit indication per queue.
+ * This register consists of a batch of 32 queues.
+*/
+#define  QM_GLOBAL_CFG_DQM_NOT_EMPTY_Q_NOT_EMPTY_SHIFT	0
+#define  QM_GLOBAL_CFG_DQM_NOT_EMPTY_Q_NOT_EMPTY_MASK	0xffffffff
+
+
+/*
+ * Registers <DQM_POP_READY> - <x> is [ 0 => 8 ] - read-only
+ *
+ * Queue pop ready indication (Some queues may be non-empty, but due to PD
+ * offload they are not immediatly ready to be popped.
+ * Pop can be issued, but in this case the result could be delayed).
+ * Each register includes a batch of 32 queues non-empty indication.
+ * 9 Batches are need for 288 queues.
+ * First Batch is for queues 31-0 and so on until the last batch
+ * representing queues 287-256.
+ */
+#define QM_GLOBAL_CFG_DQM_POP_READY(x)	(0xa0 + (x) * 0x4)
+
+/*
+ * Queue pop ready indication.
+ * This is a 1-bit indication per queue.
+ * This register consists of a batch of 32 queues.
+*/
+#define  QM_GLOBAL_CFG_DQM_POP_READY_POP_READY_SHIFT	0
+#define  QM_GLOBAL_CFG_DQM_POP_READY_POP_READY_MASK	0xffffffff
+
+
+/*
+ * Registers <AGGREGATION_CONTEXT_VALID> - <x> is [ 0 => 8 ] - read-only
+ *
+ * Aggregation context valid.
+ * This indicates that the queue is in the process of packet aggregation.
+ * Each register includes a batch of 32 queues aggregation valid
+ * indication.
+ * 9 Batches are need for 288 queues.
+ * First Batch is for queues 31-0 and so on until the last batch
+ * representing queues 287-256.
+ */
+#define QM_GLOBAL_CFG_AGGREGATION_CONTEXT_VALID(x)	(0xd0 + (x) * 0x4)
+
+/*
+ * QM ingress aggregation context valid indication.
+ * This is a 1-bit indication per queue.
+ * This register consists of a batch of 32 queues.
+*/
+#define  QM_GLOBAL_CFG_AGGREGATION_CONTEXT_VALID_CONTEXT_VALID_SHIFT	0
+#define  QM_GLOBAL_CFG_AGGREGATION_CONTEXT_VALID_CONTEXT_VALID_MASK	0xffffffff
+
+
+/*
+ * Register <QM_AGGREGATION_TIMER_CTRL>
+ *
+ * Open aggregation will be forced to close after internal timer
+ * expiration.
+ * The first byte (0-7bits) controls the granularity of the internal
+ * counter (valid value 0x0-0x3)The second byte (8-15bits) controls the
+ * timout value (valid values 0x0-0x7), which is counted according to
+ * granularity cycles.
+ * the 16bit is enable for the mechanism
+ */
+#define QM_GLOBAL_CFG_QM_AGGREGATION_TIMER_CTRL	0x100
+
+/*
+ * defines the granularity of the prescaler counter:
+ * 0 = 10bits1 = 11bits2 = 12bits3 = 13bits4 = 14bits5 = 15bits6 =
+ * 16bitsdebug:
+ * 7 = 5bits
+*/
+#define  QM_GLOBAL_CFG_QM_AGGREGATION_TIMER_CTRL_PRESCALER_GRANULARITY_SHIFT	0
+#define  QM_GLOBAL_CFG_QM_AGGREGATION_TIMER_CTRL_PRESCALER_GRANULARITY_MASK	0x7
+
+/*
+ * Aggregation timeout value, counted in prescaler counters cycles.
+ * valid values = [1.
+ * .7]
+*/
+#define  QM_GLOBAL_CFG_QM_AGGREGATION_TIMER_CTRL_AGGREGATION_TIMEOUT_VALUE_SHIFT	8
+#define  QM_GLOBAL_CFG_QM_AGGREGATION_TIMER_CTRL_AGGREGATION_TIMEOUT_VALUE_MASK	0x700
+
+
+/*
+ * Register <QM_FPM_UG_GBL_CNT>
+ *
+ * FPM global user group counter:
+ * UG0-3 + UG7
+ */
+#define QM_GLOBAL_CFG_QM_FPM_UG_GBL_CNT	0x118
+
+/* FPM global counter */
+#define  QM_GLOBAL_CFG_QM_FPM_UG_GBL_CNT_FPM_GBL_CNT_SHIFT	0
+#define  QM_GLOBAL_CFG_QM_FPM_UG_GBL_CNT_FPM_GBL_CNT_MASK	0xffff
+
+
+/*
+ * Register <QM_EGRESS_FLUSH_QUEUE>
+ *
+ * 0-8b:
+ * queue to flush9b:
+ * enable flush
+ */
+#define QM_GLOBAL_CFG_QM_EGRESS_FLUSH_QUEUE	0x11c
+
+/* Queue num */
+#define  QM_GLOBAL_CFG_QM_EGRESS_FLUSH_QUEUE_QUEUE_NUM_SHIFT	0
+#define  QM_GLOBAL_CFG_QM_EGRESS_FLUSH_QUEUE_QUEUE_NUM_MASK	0x1ff
+
+/* flush queue enable */
+#define  QM_GLOBAL_CFG_QM_EGRESS_FLUSH_QUEUE_FLUSH_EN_MASK	0x200
+
+
+/*
+ * Registers <THR> - <x> is [ 0 => 3 ]
+ *
+ * Hold 2 thresholds per FPM pool for priority management
+ */
+#define QM_FPM_POOLS_THR(x)		(0x200 + (x) * 0x20)
+
+/*
+ * FPM Lower Threshold.
+ * When working in packet drop mode (FPM_BP_ENABLE=0), Then:
+ * * If (FPM pool occupancy) <= (FPM_LOWER_THR), then packets in low/high
+ * priority are dropped (only exclusive packets are not dropped).
+ * * If (FPM_LOWER_THR) < (FPM pool occupancy) <= (FPM_HIGHER_THR), then
+ * packets in low priority are dropped.
+ * * If (FPM pool occupancy) > (FPM_HIGHER_THR), then no packets are
+ * dropped.
+ * When working in backpressure mode (FPM_BP_ENABLE=1), Then if (FPM pool
+ * occupancy) < (FPM_LOWER_THR), then backpressure is applied to re-order
+ * (in this case FPM_HIGHER_THR is dont care).
+*/
+#define  QM_FPM_POOLS_THR_FPM_LOWER_THR_SHIFT	0
+#define  QM_FPM_POOLS_THR_FPM_LOWER_THR_MASK	0x7f
+
+/*
+ * FPM Higher Threshold.
+ * When working in packet drop mode (FPM_BP_ENABLE=0), Then:
+ * * If (FPM pool occupancy) <= (FPM_LOWER_THR), then packets in low/high
+ * priority are dropped (only exclusive packets are not dropped).
+ * * If (FPM_LOWER_THR) < (FPM pool occupancy) <= (FPM_HIGHER_THR), then
+ * packets in low priority are dropped.
+ * * If (FPM pool occupancy) > (FPM_HIGHER_THR), then no packets are
+ * dropped.
+ * When working in backpressure mode (FPM_BP_ENABLE=1), Then if (FPM pool
+ * occupancy) < (FPM_LOWER_THR), then backpressure is applied to re-order
+ * (in this case FPM_HIGHER_THR is dont care).
+*/
+#define  QM_FPM_POOLS_THR_FPM_HIGHER_THR_SHIFT	8
+#define  QM_FPM_POOLS_THR_FPM_HIGHER_THR_MASK	0x7f00
+
+
+/*
+ * Registers <LOWER_THR> - <x> is [ 0 => 3 ]
+ *
+ * Holds FPM user group lower threshold.
+ */
+#define QM_FPM_USR_GRP_LO_THR(x)	(0x280 + (x) * 0x20)
+
+/*
+ * FPM group Lower Threshold.
+ * * If (FPM User Group Counter) > (FPM_GRP_HIGHER_THR), all packets in
+ * this user group are dropped.
+ * * If (FPM_GRP_MID_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (FPM_GRP_LOWER_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_MID_THR), then packets in low priority are dropped.
+ * * If (FPM User Group Counter) <= (FPM_GRP_LOWER_THR), then no packets
+ * are dropped.
+*/
+#define  QM_FPM_USR_GRP_LO_THR_FPM_GRP_LOWER_THR_SHIFT	0
+#define  QM_FPM_USR_GRP_LO_THR_FPM_GRP_LOWER_THR_MASK	0xffff
+
+
+/*
+ * Registers <MID_THR> - <x> is [ 0 => 3 ]
+ *
+ * Holds FPM user group middle threshold.
+ * *IMPORTANT* if buffer reservations is enabled, the following should be
+ * honored:
+ * HIGHER_THR-MID_THR > 16
+ */
+#define QM_FPM_USR_GRP_MID_THR(x)	(0x284 + (x) * 0x20)
+
+/*
+ * FPM group Lower Threshold.
+ * * If (FPM User Group Counter) > (FPM_GRP_HIGHER_THR), all packets in
+ * this user group are dropped.
+ * * If (FPM_GRP_MID_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (FPM_GRP_LOWER_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_MID_THR), then packets in low priority are dropped.
+ * * If (FPM User Group Counter) <= (FPM_GRP_LOWER_THR), then no packets
+ * are dropped.
+*/
+#define  QM_FPM_USR_GRP_MID_THR_FPM_GRP_MID_THR_SHIFT	0
+#define  QM_FPM_USR_GRP_MID_THR_FPM_GRP_MID_THR_MASK	0xffff
+
+
+/*
+ * Registers <HIGHER_THR> - <x> is [ 0 => 3 ]
+ *
+ * Holds FPM user group higher threshold.
+ * *IMPORTANT* if buffer reservations is enabled, the following should be
+ * honored:
+ * HIGHER_THR-MID_THR > 16
+ */
+#define QM_FPM_USR_GRP_HIGHER_THR(x)	(0x288 + (x) * 0x20)
+
+/*
+ * FPM group Lower Threshold.
+ * * If (FPM User Group Counter) > (FPM_GRP_HIGHER_THR), all packets in
+ * this user group are dropped.
+ * * If (FPM_GRP_MID_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_HIGHER_THR), then packets in low/high priority are dropped
+ * (only exclusive packets are not dropped).
+ * * If (FPM_GRP_LOWER_THR) < (FPM User Group Counter) <=
+ * (FPM_GRP_MID_THR), then packets in low priority are dropped.
+ * * If (FPM User Group Counter) <= (FPM_GRP_LOWER_THR), then no packets
+ * are dropped.
+*/
+#define  QM_FPM_USR_GRP_HIGHER_THR_FPM_GRP_HIGHER_THR_SHIFT	0
+#define  QM_FPM_USR_GRP_HIGHER_THR_FPM_GRP_HIGHER_THR_MASK	0xffff
+
+
+/*
+ * Registers <CNT> - <x> is [ 0 => 3 ]
+ *
+ * FPM user group buffer counter
+ */
+#define QM_FPM_USR_GRP_CNT(x)		(0x28c + (x) * 0x20)
+
+/* FPM user group counter */
+#define  QM_FPM_USR_GRP_CNT_FPM_UG_CNT_SHIFT	0
+#define  QM_FPM_USR_GRP_CNT_FPM_UG_CNT_MASK	0xffff
+
+
+/*
+ * Registers <RNR_CONFIG> - <x> is [ 0 => 15 ]
+ *
+ * Runners Configurations
+ */
+#define QM_RUNNER_GRP_RNR_CONFIG(x)	(0x300 + (x) * 0x10)
+
+/* Runner BB ID associated with this configuration. */
+#define  QM_RUNNER_GRP_RNR_CONFIG_RNR_BB_ID_SHIFT	0
+#define  QM_RUNNER_GRP_RNR_CONFIG_RNR_BB_ID_MASK	0x3f
+
+/* Runner Task number to be woken up when the update FIFO is written to. */
+#define  QM_RUNNER_GRP_RNR_CONFIG_RNR_TASK_SHIFT	8
+#define  QM_RUNNER_GRP_RNR_CONFIG_RNR_TASK_MASK	0xf00
+
+/* Enable this runner interface */
+#define  QM_RUNNER_GRP_RNR_CONFIG_RNR_ENABLE_MASK	0x10000
+
+
+/*
+ * Registers <QUEUE_CONFIG> - <x> is [ 0 => 15 ]
+ *
+ * Consecutive queues which are associated with this runner
+ */
+#define QM_RUNNER_GRP_QUEUE_CONFIG(x)	(0x304 + (x) * 0x10)
+
+/*
+ * Indicates the Queue that starts this runner group.
+ * Queues belonging to the runner group are defined by the following
+ * equation:
+ * START_QUEUE <= runner_queues <= END_QUEUE
+*/
+#define  QM_RUNNER_GRP_QUEUE_CONFIG_START_QUEUE_SHIFT	0
+#define  QM_RUNNER_GRP_QUEUE_CONFIG_START_QUEUE_MASK	0x1ff
+
+/*
+ * Indicates the Queue that ends this runner group.
+ * Queues belonging to the runner group are defined by the following
+ * equation:
+ * START_QUEUE <= runner_queues <= END_QUEUE
+*/
+#define  QM_RUNNER_GRP_QUEUE_CONFIG_END_QUEUE_SHIFT	16
+#define  QM_RUNNER_GRP_QUEUE_CONFIG_END_QUEUE_MASK	0x1ff0000
+
+
+/*
+ * Registers <PDFIFO_CONFIG> - <x> is [ 0 => 15 ]
+ *
+ * head of the queue PD FIFO attributes
+ */
+#define QM_RUNNER_GRP_PDFIFO_CONFIG(x)	(0x308 + (x) * 0x10)
+
+/*
+ * PD FIFO Base Address.
+ * This is an 8-byte address (Byte_addr = BASE_ADDR*8).
+*/
+#define  QM_RUNNER_GRP_PDFIFO_CONFIG_BASE_ADDR_SHIFT	3
+#define  QM_RUNNER_GRP_PDFIFO_CONFIG_BASE_ADDR_MASK	0x3ff8
+
+/* PD FIFO Size0 - 2 entries1 - 4 entries2 - 8 entries */
+#define  QM_RUNNER_GRP_PDFIFO_CONFIG_SIZE_SHIFT	16
+#define  QM_RUNNER_GRP_PDFIFO_CONFIG_SIZE_MASK	0x30000
+
+
+/*
+ * Registers <UPDATE_FIFO_CONFIG> - <x> is [ 0 => 15 ]
+ *
+ * Update FIFO attributes
+ */
+#define QM_RUNNER_GRP_UPDATE_FIFO_CONFIG(x)	(0x30c + (x) * 0x10)
+
+/*
+ * PD FIFO Base Address.
+ * This is an 8-byte address (Byte_addr = BASE_ADDR*8).
+*/
+#define  QM_RUNNER_GRP_UPDATE_FIFO_CONFIG_BASE_ADDR_SHIFT	3
+#define  QM_RUNNER_GRP_UPDATE_FIFO_CONFIG_BASE_ADDR_MASK	0x3ff8
+
+/*
+ * PD FIFO Size0 - 8 entries1 - 16 entries2 - 32 entries3 - 64 entries4 -
+ * 128 entries5 - 256 entries
+*/
+#define  QM_RUNNER_GRP_UPDATE_FIFO_CONFIG_SIZE_SHIFT	16
+#define  QM_RUNNER_GRP_UPDATE_FIFO_CONFIG_SIZE_MASK	0x70000
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active QM interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define QM_INTR_CTRL_ISR		0x400
+
+/* HW tried to pop a PD from the DQM of an empty queue. */
+#define  QM_INTR_CTRL_ISR_QM_DQM_POP_ON_EMPTY_MASK	0x1
+
+/* HW tried to pop a PD into the DQM of a full queue. */
+#define  QM_INTR_CTRL_ISR_QM_DQM_PUSH_ON_FULL_MASK	0x2
+
+/* CPU tried to pop a PD from the DQM of an empty queue. */
+#define  QM_INTR_CTRL_ISR_QM_CPU_POP_ON_EMPTY_MASK	0x4
+
+/* CPU tried to push a PD into the DQM of a full queue. */
+#define  QM_INTR_CTRL_ISR_QM_CPU_PUSH_ON_FULL_MASK	0x8
+
+/* A PD arrived to the Normal queue without having any credits */
+#define  QM_INTR_CTRL_ISR_QM_NORMAL_QUEUE_PD_NO_CREDIT_MASK	0x10
+
+/* A PD arrived to the NON-delayed queue without having any credits */
+#define  QM_INTR_CTRL_ISR_QM_NON_DELAYED_QUEUE_PD_NO_CREDIT_MASK	0x20
+
+/* A PD arrived with a non valid queue number (>287) */
+#define  QM_INTR_CTRL_ISR_QM_NON_VALID_QUEUE_MASK	0x40
+
+/*
+ * An aggregation of PDs was done in which the coherent bit of the PD
+ * differs between them (The coherent bit of the first aggregated PD was
+ * used)
+*/
+#define  QM_INTR_CTRL_ISR_QM_AGG_COHERENT_INCONSISTENCY_MASK	0x80
+
+/*
+ * A PD with force copy bit set was received on the non-delayed queue (in
+ * this queue the copy machine is bypassed)
+*/
+#define  QM_INTR_CTRL_ISR_QM_FORCE_COPY_ON_NON_DELAYED_MASK	0x100
+
+/*
+ * A PD was marked to be copied, but there does not exist an FPM pool
+ * buffer large enough to hold it.
+*/
+#define  QM_INTR_CTRL_ISR_QM_FPM_POOL_SIZE_NONEXISTENT_MASK	0x200
+
+/*
+ * A PD was marked with a target_mem=1 (located in PSRAM) and on the other
+ * hand, the absolute address indication was set.
+*/
+#define  QM_INTR_CTRL_ISR_QM_TARGET_MEM_ABS_CONTRADICTION_MASK	0x400
+
+/* 1588 Packet is dropped when the QM PD occupancy exceeds threshold (64K) */
+#define  QM_INTR_CTRL_ISR_QM_1588_DROP_MASK	0x800
+
+/* A PD was marked as a 1588 and multicast together. */
+#define  QM_INTR_CTRL_ISR_QM_1588_MULTICAST_CONTRADICTION_MASK	0x1000
+
+/*
+ * The byte drop counter of one of the queues reached its maximum value and
+ * a new value was pushed.
+*/
+#define  QM_INTR_CTRL_ISR_QM_BYTE_DROP_CNT_OVERRUN_MASK	0x2000
+
+/*
+ * The Packet drop counter of one of the queues reached its maximum value
+ * and a new value was pushed.
+*/
+#define  QM_INTR_CTRL_ISR_QM_PKT_DROP_CNT_OVERRUN_MASK	0x4000
+
+/* The Total byte counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_TOTAL_BYTE_CNT_UNDERRUN_MASK	0x8000
+
+/* The Total PD counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_TOTAL_PKT_CNT_UNDERRUN_MASK	0x10000
+
+/* The UG0 counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_FPM_UG0_UNDERRUN_MASK	0x20000
+
+/* The UG1 counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_FPM_UG1_UNDERRUN_MASK	0x40000
+
+/* The UG2 counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_FPM_UG2_UNDERRUN_MASK	0x80000
+
+/* The UG3 counter was decremented to a negative value. */
+#define  QM_INTR_CTRL_ISR_QM_FPM_UG3_UNDERRUN_MASK	0x100000
+
+/*
+ * QM aggregation timers wraps around.
+ * In this case it isnt guaranteed that the aggregation will be closed on
+ * pre-defined timeout expiration.
+ * However the aggregation should be closed eventually.
+*/
+#define  QM_INTR_CTRL_ISR_QM_TIMER_WRAPAROUND_MASK	0x200000
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define QM_INTR_CTRL_ISM		0x404
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  QM_INTR_CTRL_ISM_ISM_SHIFT	0
+#define  QM_INTR_CTRL_ISM_ISM_MASK	0x3fffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define QM_INTR_CTRL_IER		0x408
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  QM_INTR_CTRL_IER_IEM_SHIFT	0
+#define  QM_INTR_CTRL_IER_IEM_MASK	0x3fffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define QM_INTR_CTRL_ITR		0x40c
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  QM_INTR_CTRL_ITR_IST_SHIFT	0
+#define  QM_INTR_CTRL_ITR_IST_MASK	0x3fffff
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define QM_CLK_GATE_CLK_GATE_CNTRL	0x500
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  QM_CLK_GATE_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_CTRL> - <x> is [ 0 => 3 ]
+ *
+ * CPU PD Indirect Access Control
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL(x)	(0x600 + (x) * 0x40)
+
+/* Queue Number */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_QUEUE_NUM_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_QUEUE_NUM_MASK	0x1ff
+
+/*
+ * Command:
+ * 00 - Nothing01 - Write10 - Read11 - Read No commit (entry not
+ * popped)Will trigger a read/write from the selected RAMIMPORTANT:
+ * Read is for debug purpose only.
+ * shouldnt be used during regular QM work on the requested queue (HW pop).
+ * Popping the same queue both from CPU and HW could cause to race
+ * condition which will cause to incorrect data output.
+ * It could occur when there is only one entry in the queue which is
+ * accessed both from the CPU and the HW.
+*/
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_CMD_SHIFT	16
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_CMD_MASK	0x30000
+
+/* Indicates that read/write to DQM is done */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_DONE_MASK	0x1000000
+
+/* Indicates that that an error occured (write on full or read on empty) */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_CTRL_ERROR_MASK	0x2000000
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_WR_DATA> - <x> is [ 0 => 3 ]
+ *
+ * CPU PD Indirect Write data to DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_0(x)	(0x610 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_0_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_0_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_WR_DATA> - <x> is [ 0 => 3 ]
+ *
+ * CPU PD Indirect Write data to DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_1(x)	(0x614 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_1_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_1_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_WR_DATA> - <x> is [ 0 => 3 ]
+ *
+ * CPU PD Indirect Write data to DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_2(x)	(0x618 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_2_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_2_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_WR_DATA> - <x> is [ 0 => 3 ]
+ *
+ * CPU PD Indirect Write data to DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_3(x)	(0x61c + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_3_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_WR_DATA_3_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_RD_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * CPU PD Indirect Read data from DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_0(x)	(0x620 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_0_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_0_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_RD_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * CPU PD Indirect Read data from DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_1(x)	(0x624 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_1_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_1_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_RD_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * CPU PD Indirect Read data from DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_2(x)	(0x628 + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_2_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_2_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <CPU_PD_INDIRECT_RD_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * CPU PD Indirect Read data from DQM.
+ * First entry represents PD[127:
+ * 96] and so on until the last entry representing PD[31:
+ * 0].
+ */
+#define QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_3(x)	(0x62c + (x) * 0x40)
+
+/* Data */
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_3_DATA_SHIFT	0
+#define  QM_CPU_INDR_PORT_CPU_PD_INDIRECT_RD_DATA_3_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <QUEUE_CONTEXT> - <x> is [ 0 => 159 ]
+ *
+ * This RAM holds all queue attributes.
+ * Not all of the 32-bits in the address space are implemented.
+ * WRED Profile 3:
+ * 0Copy decision profile 6:
+ * 4Copy to DDR 7DDR copy disable 8Aggregation Disable 9FPM User Group 11:
+ * 10Exclusive Priority 12802.
+ * 1AE 13SCI 14FEC Enable 15
+ */
+#define QM_QUEUE_CONTEXT_CONTEXT(x)	(0x800 + (x) * 0x4)
+
+/* Defines to which WRED Profile this queue belongs to. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_WRED_PROFILE_SHIFT	0
+#define  QM_QUEUE_CONTEXT_CONTEXT_WRED_PROFILE_MASK	0xf
+
+/* Defines to which Copy Decision Profile this queue belongs to. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_COPY_DEC_PROFILE_SHIFT	4
+#define  QM_QUEUE_CONTEXT_CONTEXT_COPY_DEC_PROFILE_MASK	0x70
+
+/* Defines this queue to always copy to DDR. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_COPY_TO_DDR_MASK	0x80
+
+/* Defines this queue never to copy to DDR. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_DDR_COPY_DISABLE_MASK	0x100
+
+/* Defines this queue never to aggregated PDs. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_AGGREGATION_DISABLE_MASK	0x200
+
+/* Defines to which FPM UG this queue belongs to. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_FPM_UG_SHIFT	10
+#define  QM_QUEUE_CONTEXT_CONTEXT_FPM_UG_MASK	0x1c00
+
+/* Defines this queue with exclusive priority. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_EXCLUSIVE_PRIORITY_MASK	0x2000
+
+/*
+ * Defines this queue as 802.
+ * 1AE for EPON packet overhead calculations.
+*/
+#define  QM_QUEUE_CONTEXT_CONTEXT_Q_802_1AE_MASK	0x4000
+
+/* Configures SCI for EPON packet overhead calculations. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_SCI_MASK	0x8000
+
+/* FEC enable configuration for EPON packet overhead calculations. */
+#define  QM_QUEUE_CONTEXT_CONTEXT_FEC_ENABLE_MASK	0x10000
+
+/*
+ * FPM reservation profile.
+ * Once the QM goes over global FPM reservation threshold.
+ * Queue with more bytes the defined in the profile will be dropped.
+ * Profile 0 means no drop due to FPM reservation for the queues with this
+ * profile.
+*/
+#define  QM_QUEUE_CONTEXT_CONTEXT_RES_PROFILE_SHIFT	17
+#define  QM_QUEUE_CONTEXT_CONTEXT_RES_PROFILE_MASK	0xe0000
+
+
+/*
+ * Registers <COLOR_MIN_THR> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color min thresholds
+ */
+#define QM_WRED_PROFILE_COLOR_MIN_THR_0(x)	(0x1000 + (x) * 0x30)
+
+/*
+ * WRED Color Min Threshold.
+ * This field represents the higher 24-bits of the queue occupancy byte
+ * threshold.
+ * byte_threshold = THR*64.
+*/
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_0_MIN_THR_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_0_MIN_THR_MASK	0xffffff
+
+/*
+ * 0 - flow control disable.
+ * regular WRED profile1 - flow control enable.
+ * no WRED drop, wake up appropriate runner task when crossed.
+*/
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_0_FLW_CTRL_EN_MASK	0x1000000
+
+
+/*
+ * Registers <COLOR_MIN_THR> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color min thresholds
+ */
+#define QM_WRED_PROFILE_COLOR_MIN_THR_1(x)	(0x1004 + (x) * 0x30)
+
+/*
+ * WRED Color Min Threshold.
+ * This field represents the higher 24-bits of the queue occupancy byte
+ * threshold.
+ * byte_threshold = THR*64.
+*/
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_1_MIN_THR_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_1_MIN_THR_MASK	0xffffff
+
+/*
+ * 0 - flow control disable.
+ * regular WRED profile1 - flow control enable.
+ * no WRED drop, wake up appropriate runner task when crossed.
+*/
+#define  QM_WRED_PROFILE_COLOR_MIN_THR_1_FLW_CTRL_EN_MASK	0x1000000
+
+
+/*
+ * Registers <COLOR_MAX_THR> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color max thresholds
+ */
+#define QM_WRED_PROFILE_COLOR_MAX_THR_0(x)	(0x1010 + (x) * 0x30)
+
+/*
+ * WRED Color Max Threshold.
+ * This field represents the higher 24-bits of the queue occupancy byte
+ * threshold.
+ * byte_threshold = THR*64.
+*/
+#define  QM_WRED_PROFILE_COLOR_MAX_THR_0_MAX_THR_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_MAX_THR_0_MAX_THR_MASK	0xffffff
+
+
+/*
+ * Registers <COLOR_MAX_THR> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color max thresholds
+ */
+#define QM_WRED_PROFILE_COLOR_MAX_THR_1(x)	(0x1014 + (x) * 0x30)
+
+/*
+ * WRED Color Max Threshold.
+ * This field represents the higher 24-bits of the queue occupancy byte
+ * threshold.
+ * byte_threshold = THR*64.
+*/
+#define  QM_WRED_PROFILE_COLOR_MAX_THR_1_MAX_THR_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_MAX_THR_1_MAX_THR_MASK	0xffffff
+
+
+/*
+ * Registers <COLOR_SLOPE> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color slopes
+ */
+#define QM_WRED_PROFILE_COLOR_SLOPE_0(x)	(0x1020 + (x) * 0x30)
+
+/* WRED Color slope mantissa. */
+#define  QM_WRED_PROFILE_COLOR_SLOPE_0_SLOPE_MANTISSA_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_SLOPE_0_SLOPE_MANTISSA_MASK	0xff
+
+/* WRED Color slope exponent. */
+#define  QM_WRED_PROFILE_COLOR_SLOPE_0_SLOPE_EXP_SHIFT	8
+#define  QM_WRED_PROFILE_COLOR_SLOPE_0_SLOPE_EXP_MASK	0x1f00
+
+
+/*
+ * Registers <COLOR_SLOPE> - <x> is [ 0 => 15 ]
+ *
+ * WRED Color slopes
+ */
+#define QM_WRED_PROFILE_COLOR_SLOPE_1(x)	(0x1024 + (x) * 0x30)
+
+/* WRED Color slope mantissa. */
+#define  QM_WRED_PROFILE_COLOR_SLOPE_1_SLOPE_MANTISSA_SHIFT	0
+#define  QM_WRED_PROFILE_COLOR_SLOPE_1_SLOPE_MANTISSA_MASK	0xff
+
+/* WRED Color slope exponent. */
+#define  QM_WRED_PROFILE_COLOR_SLOPE_1_SLOPE_EXP_SHIFT	8
+#define  QM_WRED_PROFILE_COLOR_SLOPE_1_SLOPE_EXP_MASK	0x1f00
+
+
+/*
+ * Registers <THR> - <x> is [ 0 => 7 ]
+ *
+ * DDR Pipe and PSRAM threshold configurations for DDR copy decision logic
+ */
+#define QM_COPY_DECISION_PROFILE_THR(x)	(0x1800 + (x) * 0x20)
+
+/*
+ * Queue Occupancy Threshold.
+ * When passing this threhold, packets will be copied to the DDR
+*/
+#define  QM_COPY_DECISION_PROFILE_THR_QUEUE_OCCUPANCY_THR_SHIFT	0
+#define  QM_COPY_DECISION_PROFILE_THR_QUEUE_OCCUPANCY_THR_MASK	0x3fffffff
+
+/*
+ * Indicates which of the two PSRAM threshold crossing indications coming
+ * from the SBPM will be used for the copy decision.
+ * when going over the chosen threshold, packets will be copied to the DDR.
+ * 0 - Lower threshold1 - Higher threshold
+*/
+#define  QM_COPY_DECISION_PROFILE_THR_PSRAM_THR_MASK	0x80000000
+
+
+/*
+ * Registers <COUNTER> - <x> is [ 0 => 639 ]
+ *
+ * Counter.
+ * word0:
+ * {15`b0,pkt_cnt[16:
+ * 0]}word1:
+ * {2`b0,byte_cnt[29:
+ * 0]}word2:
+ * {15b0,res_cnt[16:
+ * 0]}word3:
+ * reservedThere are three words per queue starting at queue0 up to queue
+ * 159/287.
+ */
+#define QM_TOTAL_VALID_COUNTER_COUNTER(x)	(0x2000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_TOTAL_VALID_COUNTER_COUNTER_DATA_SHIFT	0
+#define  QM_TOTAL_VALID_COUNTER_COUNTER_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <COUNTER> - <x> is [ 0 => 319 ]
+ *
+ * Counter.
+ * word0:
+ * {15`b0,pkt_cnt[16:
+ * 0]}word1:
+ * {2`b0,byte_cnt[29:
+ * 0]}There are two words per queue starting at queue0 up to queue 287.
+ */
+#define QM_DQM_VALID_COUNTER_COUNTER(x)	(0x3000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_DQM_VALID_COUNTER_COUNTER_DATA_SHIFT	0
+#define  QM_DQM_VALID_COUNTER_COUNTER_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <COUNTER> - <x> is [ 0 => 319 ] - read-only
+ *
+ * Counter.
+ * word0:
+ * {6`b0,pkt_cnt[25:
+ * 0]}word1:
+ * {byte_cnt[31:
+ * 0]}in WRED drop mode:
+ * word0 - color1 dropped packetsword1 - color0 dropped packetsThere are
+ * two words per queue starting at queue0 up to queue 287.
+ */
+#define QM_DROP_COUNTER_COUNTER(x)	(0x4000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_DROP_COUNTER_COUNTER_DATA_SHIFT	0
+#define  QM_DROP_COUNTER_COUNTER_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <COUNTER> - <x> is [ 0 => 319 ]
+ *
+ * Counter - For each of the 32-queues in a batch, this counter stores a
+ * 32-bit accumulated and overhead byte counter per queue.
+ * word0:
+ * {accumulated_bytes[31:
+ * 0]}word1:
+ * {accumulated_overhead[31:
+ * 0}There are two words per queue starting at queue0 up to queue 127.
+ */
+#define QM_EPON_RPT_CNT_COUNTER(x)	(0x5000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_EPON_RPT_CNT_COUNTER_DATA_SHIFT	0
+#define  QM_EPON_RPT_CNT_COUNTER_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <QUEUE_STATUS> - <x> is [ 0 => 4 ] - read-only
+ *
+ * Status bit vector - For each of the 32-queues in a batch, this status
+ * indicates which queue counter has been updated.
+ */
+#define QM_EPON_RPT_CNT_QUEUE_STATUS(x)	(0x5500 + (x) * 0x4)
+
+/*
+ * Status bit vector - a bit per queue indicates if the queue has been
+ * updated.
+*/
+#define  QM_EPON_RPT_CNT_QUEUE_STATUS_STATUS_BIT_VECTOR_SHIFT	0
+#define  QM_EPON_RPT_CNT_QUEUE_STATUS_STATUS_BIT_VECTOR_MASK	0xffffffff
+
+
+/*
+ * Register <RD_DATA_POOL0> - read-only
+ *
+ * Read the head of the FIFO
+ */
+#define QM_RD_DATA_POOL0		0x5800
+
+/* DATA */
+#define  QM_RD_DATA_POOL0_DATA_SHIFT	0
+#define  QM_RD_DATA_POOL0_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <RD_DATA_POOL1> - read-only
+ *
+ * Read the head of the FIFO
+ */
+#define QM_RD_DATA_POOL1		0x5804
+
+/* DATA */
+#define  QM_RD_DATA_POOL1_DATA_SHIFT	0
+#define  QM_RD_DATA_POOL1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <RD_DATA_POOL2> - read-only
+ *
+ * Read the head of the FIFO
+ */
+#define QM_RD_DATA_POOL2		0x5808
+
+/* DATA */
+#define  QM_RD_DATA_POOL2_DATA_SHIFT	0
+#define  QM_RD_DATA_POOL2_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <RD_DATA_POOL3> - read-only
+ *
+ * Read the head of the FIFO
+ */
+#define QM_RD_DATA_POOL3		0x580c
+
+/* DATA */
+#define  QM_RD_DATA_POOL3_DATA_SHIFT	0
+#define  QM_RD_DATA_POOL3_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <PDFIFO_PTR> - <x> is [ 0 => 159 ] - read-only
+ *
+ * PDFIFO per queue rd/wr pointers
+ */
+#define QM_PDFIFO_PTR(x)		(0x6000 + (x) * 0x4)
+
+/* PDFIFO WR pointers */
+#define  QM_PDFIFO_PTR_WR_PTR_SHIFT	0
+#define  QM_PDFIFO_PTR_WR_PTR_MASK	0xf
+
+/* PDFIFO RD pointers */
+#define  QM_PDFIFO_PTR_RD_PTR_SHIFT	8
+#define  QM_PDFIFO_PTR_RD_PTR_MASK	0xf00
+
+
+/*
+ * Registers <UPDATE_FIFO_PTR> - <x> is [ 0 => 15 ] - read-only
+ *
+ * Update FIFO rd/wr pointers
+ */
+#define QM_UPDATE_FIFO_PTR(x)		(0x6500 + (x) * 0x4)
+
+/* UF WR pointers */
+#define  QM_UPDATE_FIFO_PTR_WR_PTR_SHIFT	0
+#define  QM_UPDATE_FIFO_PTR_WR_PTR_MASK	0x1ff
+
+/* UF RD pointers */
+#define  QM_UPDATE_FIFO_PTR_RD_PTR_SHIFT	9
+#define  QM_UPDATE_FIFO_PTR_RD_PTR_MASK	0xfe00
+
+
+/*
+ * Registers <RD_DATA> - <x> is [ 0 => 4 ] - read-only
+ *
+ * Debug - Read the head of the FIFO
+ */
+#define QM_RD_DATA(x)			(0x8800 + (x) * 0x4)
+
+/* DATA */
+#define  QM_RD_DATA_DATA_SHIFT		0
+#define  QM_RD_DATA_DATA_MASK		0xffffffff
+
+
+/*
+ * Register <POP>
+ *
+ * Pop an entry in the FIFO
+ */
+#define QM_POP				0x8820
+
+/* Pop FIFO entry */
+#define  QM_POP_POP_MASK		0x1
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 7 ] - read-only
+ *
+ * CM Common Input FIFO - debug access
+ */
+#define QM_CM_COMMON_INPUT_FIFO_DATA(x)	(0x9000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_CM_COMMON_INPUT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_CM_COMMON_INPUT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Normal Remote FIFO - debug access
+ */
+#define QM_NORMAL_RMT_FIFO_DATA(x)	(0x9100 + (x) * 0x4)
+
+/* DATA */
+#define  QM_NORMAL_RMT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_NORMAL_RMT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Non-delayed Remote FIFO - debug access
+ */
+#define QM_NON_DELAYED_RMT_FIFO_DATA(x)	(0x9200 + (x) * 0x4)
+
+/* DATA */
+#define  QM_NON_DELAYED_RMT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_NON_DELAYED_RMT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 7 ] - read-only
+ *
+ * Egress data FIFO - debug access
+ */
+#define QM_EGRESS_DATA_FIFO_DATA(x)	(0x9300 + (x) * 0x4)
+
+/* DATA */
+#define  QM_EGRESS_DATA_FIFO_DATA_DATA_SHIFT	0
+#define  QM_EGRESS_DATA_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 1 ] - read-only
+ *
+ * Egress RR FIFO - debug access
+ */
+#define QM_EGRESS_RR_FIFO_DATA(x)	(0x9400 + (x) * 0x4)
+
+/* DATA */
+#define  QM_EGRESS_RR_FIFO_DATA_DATA_SHIFT	0
+#define  QM_EGRESS_RR_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 7 ] - read-only
+ *
+ * Egress BB Input FIFO - debug access
+ */
+#define QM_EGRESS_BB_INPUT_FIFO_DATA(x)	(0x9500 + (x) * 0x4)
+
+/* DATA */
+#define  QM_EGRESS_BB_INPUT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_EGRESS_BB_INPUT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Egress BB Output FIFO - debug access
+ */
+#define QM_EGRESS_BB_OUTPUT_FIFO_DATA(x)	(0x9600 + (x) * 0x4)
+
+/* DATA */
+#define  QM_EGRESS_BB_OUTPUT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_EGRESS_BB_OUTPUT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 31 ] - read-only
+ *
+ * QM BB Output FIFO - debug access
+ */
+#define QM_BB_OUTPUT_FIFO_DATA(x)	(0x9700 + (x) * 0x4)
+
+/* DATA */
+#define  QM_BB_OUTPUT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_BB_OUTPUT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 31 ] - read-only
+ *
+ * Non delayed output FIFO - debug access
+ */
+#define QM_NON_DELAYED_OUT_FIFO_DATA(x)	(0x9800 + (x) * 0x4)
+
+/* DATA */
+#define  QM_NON_DELAYED_OUT_FIFO_DATA_DATA_SHIFT	0
+#define  QM_NON_DELAYED_OUT_FIFO_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 639 ] - read-only
+ *
+ * Aggregation context - debug access
+ */
+#define QM_CONTEXT_DATA(x)		(0xa000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_CONTEXT_DATA_DATA_SHIFT	0
+#define  QM_CONTEXT_DATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <PROFILE> - <x> is [ 0 => 7 ]
+ *
+ * Reserved FPM buffers in units of min.
+ * FPM buffer.
+ * entry0 -> profile0.
+ * ..
+ * entry7 -> profile7
+ */
+#define QM_FPM_BUFFER_RESERVATION_DATA(x)	(0xc000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_FPM_BUFFER_RESERVATION_DATA_DATA_SHIFT	0
+#define  QM_FPM_BUFFER_RESERVATION_DATA_DATA_MASK	0xffff
+
+
+/*
+ * Register <UG_CTRL>
+ *
+ * FPM user group ctrl
+ */
+#define QM_FLOW_CTRL_UG_CTRL		0xc100
+
+/* Flow control enable */
+#define  QM_FLOW_CTRL_UG_CTRL_FLOW_CTRL_UG0_EN_MASK	0x1
+
+/* Flow control enable */
+#define  QM_FLOW_CTRL_UG_CTRL_FLOW_CTRL_UG1_EN_MASK	0x2
+
+/* Flow control enable */
+#define  QM_FLOW_CTRL_UG_CTRL_FLOW_CTRL_UG2_EN_MASK	0x4
+
+/* Flow control enable */
+#define  QM_FLOW_CTRL_UG_CTRL_FLOW_CTRL_UG3_EN_MASK	0x8
+
+
+/*
+ * Register <STATUS>
+ *
+ * Keeps status vector of user group + wred are under flow control.
+ * 3:
+ * 0 - {ug3,ug2,ug1,ug0}, 4 - OR on wred_sourcefor UG -queues which passed
+ * the mid.
+ * thr.
+ * is set to 1.
+ * the user groups indication is de-asserted when the occupancy reaches the
+ * low thr.
+ * 4bits - bit for each user group.
+ * for WRED/occupancy -If one of the queues which pass color1 min occupancy
+ * marked as 1.
+ * when the occupancy is reduced to below than color0 min occupancy in all
+ * queues the bit is set to 0.
+ * FW can set/reset the value, it will updated when flow control which is
+ * relevant to the corresponding bit takes place.
+ */
+#define QM_FLOW_CTRL_STATUS		0xc104
+
+/* User group 0 status */
+#define  QM_FLOW_CTRL_STATUS_UG0_MASK	0x1
+
+/* User group 1 status */
+#define  QM_FLOW_CTRL_STATUS_UG1_MASK	0x2
+
+/* User group 2 status */
+#define  QM_FLOW_CTRL_STATUS_UG2_MASK	0x4
+
+/* User group 3 status */
+#define  QM_FLOW_CTRL_STATUS_UG3_MASK	0x8
+
+/* OR on all wred flow control queues */
+#define  QM_FLOW_CTRL_STATUS_WRED_MASK	0x10
+
+/* reserved */
+#define  QM_FLOW_CTRL_STATUS_R0_SHIFT	5
+#define  QM_FLOW_CTRL_STATUS_R0_MASK	0xffffffe0
+
+
+/*
+ * Registers <WRED_SOURCE> - <x> is [ 0 => 4 ]
+ *
+ * Keeps status vector of queues which are under flow control:
+ * queues which passed the color1 low threshold is set to 1.
+ * the queue indication is de-asserted when the queue byte occupancy
+ * reaches the color0 low threshold.
+ * 320bits - bit for each queue number (up to 320 queues).
+ */
+#define QM_FLOW_CTRL_WRED_SOURCE(x)	(0xc108 + (x) * 0x4)
+
+/* each bit represents queue */
+#define  QM_FLOW_CTRL_WRED_SOURCE_SRC_SHIFT	0
+#define  QM_FLOW_CTRL_WRED_SOURCE_SRC_MASK	0xffffffff
+
+
+/*
+ * Register <QM_FLOW_CTRL_RNR_CFG>
+ *
+ * lossless flow control configuration
+ */
+#define QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG	0xc130
+
+/* Runner BB ID */
+#define  QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG_RNR_BB_ID_SHIFT	0
+#define  QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG_RNR_BB_ID_MASK	0x3f
+
+/* Runner task */
+#define  QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG_RNR_TASK_SHIFT	8
+#define  QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG_RNR_TASK_MASK	0xf00
+
+/*
+ * Runner enable.
+ * if disable, the lossless flow control is disabled.
+*/
+#define  QM_FLOW_CTRL_QM_FLOW_CTRL_RNR_CFG_RNR_ENABLE_MASK	0x10000
+
+
+/*
+ * Register <DEBUG_SEL>
+ *
+ * Controls Debug bus select:
+ * 5h1:
+ * qm_dbg_bus = qm_bb_input_dbg_bus;5h2:
+ * qm_dbg_bus = qm_bb_output_dbg_bus;5h3:
+ * qm_dbg_bus = qm_cm_dbg_bus;5h4:
+ * qm_dbg_bus = qm_ddr_write_dbg_bus;5h5:
+ * qm_dbg_bus = qm_counters_dbg_bus;5h6:
+ * qm_dbg_bus = qm_cpu_if_dbg_bus;5h7:
+ * qm_dbg_bus = qm_dqm_push_dbg_bus;5h8:
+ * qm_dbg_bus = qm_egress_dbg_bus;5h9:
+ * qm_dbg_bus = qm_fpm_prefetch_dbg_bus;5ha:
+ * qm_dbg_bus = qm_ingress_dbg_bus;5hb:
+ * qm_dbg_bus = qm_rmt_fifos_dbg_bus;5hc:
+ * qm_dbg_bus = {19b0,bbh_debug_0};5hd:
+ * qm_dbg_bus = {19b0,bbh_debug_1};5he:
+ * qm_dbg_bus = {19b0,bbh_debug_2};5hf:
+ * qm_dbg_bus = {19b0,bbh_debug_3};5h10:
+ * qm_dbg_bus = {19b0,bbh_debug_4};5h11:
+ * qm_dbg_bus = {19b0,bbh_debug_5};5h12:
+ * qm_dbg_bus = {19b0,bbh_debug_6};5h13:
+ * qm_dbg_bus = {19b0,bbh_debug_7};5h14:
+ * qm_dbg_bus = {19b0,bbh_debug_8};5h15:
+ * qm_dbg_bus = {19b0,bbh_debug_9};5h16:
+ * qm_dbg_bus = {19b0,dma_debug_vec};5h17:
+ * qm_dbg_bus = {8b0,dqm_diag_r};
+ */
+#define QM_DEBUG_SEL			0x16000
+
+/* Counter */
+#define  QM_DEBUG_SEL_SELECT_SHIFT	0
+#define  QM_DEBUG_SEL_SELECT_MASK	0x1f
+
+/* Enable register controlled debug select */
+#define  QM_DEBUG_SEL_ENABLE_MASK	0x80000000
+
+
+/*
+ * Register <DEBUG_BUS_LSB> - read-only
+ *
+ * Debug Bus sampling
+ */
+#define QM_DEBUG_BUS_LSB		0x16004
+
+/* Data */
+#define  QM_DEBUG_BUS_LSB_DATA_SHIFT	0
+#define  QM_DEBUG_BUS_LSB_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <DEBUG_BUS_MSB> - read-only
+ *
+ * Debug Bus sampling
+ */
+#define QM_DEBUG_BUS_MSB		0x16008
+
+/* Data */
+#define  QM_DEBUG_BUS_MSB_DATA_SHIFT	0
+#define  QM_DEBUG_BUS_MSB_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <QM_SPARE_CONFIG> - read-only
+ *
+ * Spare configuration for ECO purposes
+ */
+#define QM_QM_SPARE_CONFIG		0x1600c
+
+/* Data */
+#define  QM_QM_SPARE_CONFIG_DATA_SHIFT	0
+#define  QM_QM_SPARE_CONFIG_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <GOOD_LVL1_PKTS_CNT> - read-only
+ *
+ * Counts the total number of non-dropped and non-reprocessing packets from
+ * all queues
+ */
+#define QM_GOOD_LVL1_PKTS_CNT		0x16010
+
+/* Counter */
+#define  QM_GOOD_LVL1_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_GOOD_LVL1_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <GOOD_LVL1_BYTES_CNT> - read-only
+ *
+ * Counts the total number of non-dropped and non-reprocessing bytes from
+ * all queues
+ */
+#define QM_GOOD_LVL1_BYTES_CNT		0x16014
+
+/* Counter */
+#define  QM_GOOD_LVL1_BYTES_CNT_COUNTER_SHIFT	0
+#define  QM_GOOD_LVL1_BYTES_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <GOOD_LVL2_PKTS_CNT> - read-only
+ *
+ * Counts the total number of non-dropped and reprocessing packets from all
+ * queues
+ */
+#define QM_GOOD_LVL2_PKTS_CNT		0x16018
+
+/* Counter */
+#define  QM_GOOD_LVL2_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_GOOD_LVL2_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <GOOD_LVL2_BYTES_CNT> - read-only
+ *
+ * Counts the total number of non-dropped and reprocessing bytes from all
+ * queues
+ */
+#define QM_GOOD_LVL2_BYTES_CNT		0x1601c
+
+/* Counter */
+#define  QM_GOOD_LVL2_BYTES_CNT_COUNTER_SHIFT	0
+#define  QM_GOOD_LVL2_BYTES_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <COPIED_PKTS_CNT> - read-only
+ *
+ * Counts the total number of copied packets to the DDR from all queues
+ */
+#define QM_COPIED_PKTS_CNT		0x16020
+
+/* Counter */
+#define  QM_COPIED_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_COPIED_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <COPIED_BYTES_CNT> - read-only
+ *
+ * Counts the total number of copied bytes to the DDR from all queues
+ */
+#define QM_COPIED_BYTES_CNT		0x16024
+
+/* Counter */
+#define  QM_COPIED_BYTES_CNT_COUNTER_SHIFT	0
+#define  QM_COPIED_BYTES_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_PKTS_CNT> - read-only
+ *
+ * Counts the total number of aggregated packets from all queues
+ */
+#define QM_AGG_PKTS_CNT			0x16028
+
+/* Counter */
+#define  QM_AGG_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_BYTES_CNT> - read-only
+ *
+ * Counts the total number of aggregated bytes from all queues
+ */
+#define QM_AGG_BYTES_CNT		0x1602c
+
+/* Counter */
+#define  QM_AGG_BYTES_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_BYTES_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_1_PKTS_CNT> - read-only
+ *
+ * Counts the total number of packets aggregated in a 1-packet PD from all
+ * queues
+ */
+#define QM_AGG_1_PKTS_CNT		0x16030
+
+/* Counter */
+#define  QM_AGG_1_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_1_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_2_PKTS_CNT> - read-only
+ *
+ * Counts the total number of packets aggregated in a 2-packet PD from all
+ * queues
+ */
+#define QM_AGG_2_PKTS_CNT		0x16034
+
+/* Counter */
+#define  QM_AGG_2_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_2_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_3_PKTS_CNT> - read-only
+ *
+ * Counts the total number of packets aggregated in a 3-packet PD from all
+ * queues
+ */
+#define QM_AGG_3_PKTS_CNT		0x16038
+
+/* Counter */
+#define  QM_AGG_3_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_3_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <AGG_4_PKTS_CNT> - read-only
+ *
+ * Counts the total number of packets aggregated in a 4-packet PD from all
+ * queues
+ */
+#define QM_AGG_4_PKTS_CNT		0x1603c
+
+/* Counter */
+#define  QM_AGG_4_PKTS_CNT_COUNTER_SHIFT	0
+#define  QM_AGG_4_PKTS_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <WRED_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to WRED
+ */
+#define QM_WRED_DROP_CNT		0x16040
+
+/* Counter */
+#define  QM_WRED_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_WRED_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_CONGESTION_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to FPM
+ * congestion indication
+ */
+#define QM_FPM_CONGESTION_DROP_CNT	0x16048
+
+/* Counter */
+#define  QM_FPM_CONGESTION_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_FPM_CONGESTION_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_PD_CONGESTION_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to DDR PD
+ * congestion
+ */
+#define QM_DDR_PD_CONGESTION_DROP_CNT	0x16050
+
+/* Counter */
+#define  QM_DDR_PD_CONGESTION_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_DDR_PD_CONGESTION_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_BYTE_CONGESTION_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to DDR
+ * byte congestion (number of bytes waiting to be copied exceeded the
+ * thresholds)
+ */
+#define QM_DDR_BYTE_CONGESTION_DROP_CNT	0x16054
+
+/* Counter */
+#define  QM_DDR_BYTE_CONGESTION_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_DDR_BYTE_CONGESTION_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <QM_PD_CONGESTION_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to QM PD
+ * congestion (this value is limited by the DQM)
+ */
+#define QM_QM_PD_CONGESTION_DROP_CNT	0x16058
+
+/* Counter */
+#define  QM_QM_PD_CONGESTION_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_QM_PD_CONGESTION_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <QM_ABS_REQUEUE_CNT> - read-only
+ *
+ * Counts the total number of packets requeued due to absolute address
+ * drops from all queues
+ */
+#define QM_QM_ABS_REQUEUE_CNT		0x1605c
+
+/* Counter */
+#define  QM_QM_ABS_REQUEUE_CNT_COUNTER_SHIFT	0
+#define  QM_QM_ABS_REQUEUE_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_PREFETCH_FIFO0_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_FPM_PREFETCH_FIFO0_STATUS	0x16060
+
+/* Used words */
+#define  QM_FPM_PREFETCH_FIFO0_STATUS_USED_WORDS_SHIFT	0
+#define  QM_FPM_PREFETCH_FIFO0_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_FPM_PREFETCH_FIFO0_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_FPM_PREFETCH_FIFO0_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <FPM_PREFETCH_FIFO1_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_FPM_PREFETCH_FIFO1_STATUS	0x16064
+
+/* Used words */
+#define  QM_FPM_PREFETCH_FIFO1_STATUS_USED_WORDS_SHIFT	0
+#define  QM_FPM_PREFETCH_FIFO1_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_FPM_PREFETCH_FIFO1_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_FPM_PREFETCH_FIFO1_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <FPM_PREFETCH_FIFO2_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_FPM_PREFETCH_FIFO2_STATUS	0x16068
+
+/* Used words */
+#define  QM_FPM_PREFETCH_FIFO2_STATUS_USED_WORDS_SHIFT	0
+#define  QM_FPM_PREFETCH_FIFO2_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_FPM_PREFETCH_FIFO2_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_FPM_PREFETCH_FIFO2_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <FPM_PREFETCH_FIFO3_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_FPM_PREFETCH_FIFO3_STATUS	0x1606c
+
+/* Used words */
+#define  QM_FPM_PREFETCH_FIFO3_STATUS_USED_WORDS_SHIFT	0
+#define  QM_FPM_PREFETCH_FIFO3_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_FPM_PREFETCH_FIFO3_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_FPM_PREFETCH_FIFO3_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <NORMAL_RMT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_NORMAL_RMT_FIFO_STATUS	0x16070
+
+/* Used words */
+#define  QM_NORMAL_RMT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_NORMAL_RMT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_NORMAL_RMT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_NORMAL_RMT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <NON_DELAYED_RMT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_NON_DELAYED_RMT_FIFO_STATUS	0x16074
+
+/* Used words */
+#define  QM_NON_DELAYED_RMT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_NON_DELAYED_RMT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_NON_DELAYED_RMT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_NON_DELAYED_RMT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <NON_DELAYED_OUT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_NON_DELAYED_OUT_FIFO_STATUS	0x16078
+
+/* Used words */
+#define  QM_NON_DELAYED_OUT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_NON_DELAYED_OUT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_NON_DELAYED_OUT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_NON_DELAYED_OUT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <PRE_CM_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_PRE_CM_FIFO_STATUS		0x1607c
+
+/* Used words */
+#define  QM_PRE_CM_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_PRE_CM_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_PRE_CM_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_PRE_CM_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <CM_RD_PD_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_CM_RD_PD_FIFO_STATUS		0x16080
+
+/* Used words */
+#define  QM_CM_RD_PD_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_CM_RD_PD_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_CM_RD_PD_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_CM_RD_PD_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <CM_WR_PD_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_CM_WR_PD_FIFO_STATUS		0x16084
+
+/* Used words */
+#define  QM_CM_WR_PD_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_CM_WR_PD_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_CM_WR_PD_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_CM_WR_PD_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <CM_COMMON_INPUT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_CM_COMMON_INPUT_FIFO_STATUS	0x16088
+
+/* Used words */
+#define  QM_CM_COMMON_INPUT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_CM_COMMON_INPUT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_CM_COMMON_INPUT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_CM_COMMON_INPUT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <BB0_OUTPUT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_BB0_OUTPUT_FIFO_STATUS	0x1608c
+
+/* Used words */
+#define  QM_BB0_OUTPUT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_BB0_OUTPUT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_BB0_OUTPUT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_BB0_OUTPUT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <BB1_OUTPUT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_BB1_OUTPUT_FIFO_STATUS	0x16090
+
+/* Used words */
+#define  QM_BB1_OUTPUT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_BB1_OUTPUT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_BB1_OUTPUT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_BB1_OUTPUT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <BB1_INPUT_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_BB1_INPUT_FIFO_STATUS	0x16094
+
+/* Used words */
+#define  QM_BB1_INPUT_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_BB1_INPUT_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_BB1_INPUT_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_BB1_INPUT_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <EGRESS_DATA_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_EGRESS_DATA_FIFO_STATUS	0x16098
+
+/* Used words */
+#define  QM_EGRESS_DATA_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_EGRESS_DATA_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_EGRESS_DATA_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_EGRESS_DATA_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Register <EGRESS_RR_FIFO_STATUS> - read-only
+ *
+ * Holds the FIFO Status
+ */
+#define QM_EGRESS_RR_FIFO_STATUS	0x1609c
+
+/* Used words */
+#define  QM_EGRESS_RR_FIFO_STATUS_USED_WORDS_SHIFT	0
+#define  QM_EGRESS_RR_FIFO_STATUS_USED_WORDS_MASK	0xffff
+
+/* Empty */
+#define  QM_EGRESS_RR_FIFO_STATUS_EMPTY_MASK	0x10000
+
+/* Full */
+#define  QM_EGRESS_RR_FIFO_STATUS_FULL_MASK	0x20000
+
+
+/*
+ * Registers <BB_ROUTE_OVR> - <x> is [ 0 => 1 ]
+ *
+ * BB ROUTE Override:
+ * 0 - for QM_TOP1 - for RNR_GRID
+ */
+#define QM_BB_ROUTE_OVR(x)		(0x160a0 + (x) * 0x4)
+
+/* BB rout address decode Override enable */
+#define  QM_BB_ROUTE_OVR_OVR_EN_MASK	0x1
+
+/* Destination ID */
+#define  QM_BB_ROUTE_OVR_DEST_ID_SHIFT	8
+#define  QM_BB_ROUTE_OVR_DEST_ID_MASK	0x3f00
+
+/* Route Address */
+#define  QM_BB_ROUTE_OVR_ROUTE_ADDR_SHIFT	16
+#define  QM_BB_ROUTE_OVR_ROUTE_ADDR_MASK	0x3ff0000
+
+
+/*
+ * Register <QM_INGRESS_STAT> - read-only
+ *
+ * Holds the Ingress Status
+ */
+#define QM_QM_INGRESS_STAT		0x160b0
+
+/* Stat */
+#define  QM_QM_INGRESS_STAT_STAT_SHIFT	0
+#define  QM_QM_INGRESS_STAT_STAT_MASK	0xffffffff
+
+
+/*
+ * Register <QM_EGRESS_STAT> - read-only
+ *
+ * Holds the Egress Status
+ */
+#define QM_QM_EGRESS_STAT		0x160b4
+
+/* Stat */
+#define  QM_QM_EGRESS_STAT_STAT_SHIFT	0
+#define  QM_QM_EGRESS_STAT_STAT_MASK	0xffffffff
+
+
+/*
+ * Register <QM_CM_STAT> - read-only
+ *
+ * Holds the CM Status
+ */
+#define QM_QM_CM_STAT			0x160b8
+
+/* Stat */
+#define  QM_QM_CM_STAT_STAT_SHIFT	0
+#define  QM_QM_CM_STAT_STAT_MASK	0xffffffff
+
+
+/*
+ * Register <QM_FPM_PREFETCH_STAT> - read-only
+ *
+ * Holds the FPM Prefetch Status
+ */
+#define QM_QM_FPM_PREFETCH_STAT		0x160bc
+
+/* Stat */
+#define  QM_QM_FPM_PREFETCH_STAT_STAT_SHIFT	0
+#define  QM_QM_FPM_PREFETCH_STAT_STAT_MASK	0xffffffff
+
+
+/*
+ * Register <QM_CONNECT_ACK_COUNTER> - read-only
+ *
+ * QM connect ack counter
+ */
+#define QM_QM_CONNECT_ACK_COUNTER	0x160c0
+
+/* Pending SBPM Connect ACKs counter */
+#define  QM_QM_CONNECT_ACK_COUNTER_CONNECT_ACK_COUNTER_SHIFT	0
+#define  QM_QM_CONNECT_ACK_COUNTER_CONNECT_ACK_COUNTER_MASK	0xff
+
+
+/*
+ * Register <QM_DDR_WR_REPLY_COUNTER> - read-only
+ *
+ * QM DDR WR reply Counter
+ */
+#define QM_QM_DDR_WR_REPLY_COUNTER	0x160c4
+
+/* Pending DDR WR Replies counter */
+#define  QM_QM_DDR_WR_REPLY_COUNTER_DDR_WR_REPLY_COUNTER_SHIFT	0
+#define  QM_QM_DDR_WR_REPLY_COUNTER_DDR_WR_REPLY_COUNTER_MASK	0xff
+
+
+/*
+ * Register <QM_DDR_PIPE_BYTE_COUNTER> - read-only
+ *
+ * QM DDR pipe byte counter
+ */
+#define QM_QM_DDR_PIPE_BYTE_COUNTER	0x160c8
+
+/* Pending bytes to be copied to the DDR */
+#define  QM_QM_DDR_PIPE_BYTE_COUNTER_COUNTER_SHIFT	0
+#define  QM_QM_DDR_PIPE_BYTE_COUNTER_COUNTER_MASK	0xfffffff
+
+
+/*
+ * Register <QM_ABS_REQUEUE_VALID_COUNTER> - read-only
+ *
+ * Indicates the number of PDs currently in the Absolute address drop
+ * queue.
+ */
+#define QM_QM_ABS_REQUEUE_VALID_COUNTER	0x160cc
+
+/* Counter */
+#define  QM_QM_ABS_REQUEUE_VALID_COUNTER_COUNTER_SHIFT	0
+#define  QM_QM_ABS_REQUEUE_VALID_COUNTER_COUNTER_MASK	0x7fff
+
+
+/*
+ * Registers <QM_ILLEGAL_PD_CAPTURE> - <x> is [ 0 => 3 ] - read-only
+ *
+ * PD captured when an illegal PD was detected and the relevant interrupt
+ * was generated.
+ */
+#define QM_QM_ILLEGAL_PD_CAPTURE(x)	(0x160d0 + (x) * 0x4)
+
+/* PD */
+#define  QM_QM_ILLEGAL_PD_CAPTURE_PD_SHIFT	0
+#define  QM_QM_ILLEGAL_PD_CAPTURE_PD_MASK	0xffffffff
+
+
+/*
+ * Registers <QM_INGRESS_PROCESSED_PD_CAPTURE> - <x> is [ 0 => 3 ] - read-only
+ *
+ * Last ingress processed PD capture
+ */
+#define QM_QM_INGRESS_PROCESSED_PD_CAPTURE(x)	(0x160e0 + (x) * 0x4)
+
+/* PD */
+#define  QM_QM_INGRESS_PROCESSED_PD_CAPTURE_PD_SHIFT	0
+#define  QM_QM_INGRESS_PROCESSED_PD_CAPTURE_PD_MASK	0xffffffff
+
+
+/*
+ * Registers <QM_CM_PROCESSED_PD_CAPTURE> - <x> is [ 0 => 3 ] - read-only
+ *
+ * Last copy machine processed PD capture
+ */
+#define QM_QM_CM_PROCESSED_PD_CAPTURE(x)	(0x160f0 + (x) * 0x4)
+
+/* PD */
+#define  QM_QM_CM_PROCESSED_PD_CAPTURE_PD_SHIFT	0
+#define  QM_QM_CM_PROCESSED_PD_CAPTURE_PD_MASK	0xffffffff
+
+
+/*
+ * Registers <FPM_POOL_DROP_CNT> - <x> is [ 0 => 3 ] - read-only
+ *
+ * Counts the total number of packets dropped for all queues due to FPM
+ * pool priority thresholds.
+ * Counter per pool
+ */
+#define QM_FPM_POOL_DROP_CNT(x)		(0x16100 + (x) * 0x4)
+
+/* Counter */
+#define  QM_FPM_POOL_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_FPM_POOL_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Registers <FPM_GRP_DROP_CNT> - <x> is [ 0 => 3 ] - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to FPM
+ * user group priority thresholds.
+ * Counter per UG (0-3)
+ */
+#define QM_FPM_GRP_DROP_CNT(x)		(0x16110 + (x) * 0x4)
+
+/* Counter */
+#define  QM_FPM_GRP_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_FPM_GRP_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <FPM_BUFFER_RES_DROP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to buffer
+ * reservation mechanism.
+ */
+#define QM_FPM_BUFFER_RES_DROP_CNT	0x16120
+
+/* Counter */
+#define  QM_FPM_BUFFER_RES_DROP_CNT_COUNTER_SHIFT	0
+#define  QM_FPM_BUFFER_RES_DROP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM_EGRESS_CONG_DRP_CNT> - read-only
+ *
+ * Counts the total number of packets dropped from all queues due to psram
+ * egress congestion.
+ */
+#define QM_PSRAM_EGRESS_CONG_DRP_CNT	0x16124
+
+/* Counter */
+#define  QM_PSRAM_EGRESS_CONG_DRP_CNT_COUNTER_SHIFT	0
+#define  QM_PSRAM_EGRESS_CONG_DRP_CNT_COUNTER_MASK	0xffffffff
+
+
+/*
+ * Registers <DATA> - <x> is [ 0 => 2559 ] - read-only
+ *
+ * CM Residue - debug access
+ */
+#define QM_DATA(x)			(0x20000 + (x) * 0x4)
+
+/* DATA */
+#define  QM_DATA_DATA_SHIFT		0
+#define  QM_DATA_DATA_MASK		0xffffffff
+
+
+/*
+ * Register <VPB_BASE>
+ *
+ * VPB Base address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_BASE	0x40004
+
+/* base */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_BASE_BASE_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_BASE_BASE_MASK	0xffffffff
+
+
+/*
+ * Register <VPB_MASK>
+ *
+ * VPB mask address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_MASK	0x40008
+
+/* mask */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_MASK_MASK_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_VPB_MASK_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <APB_BASE>
+ *
+ * APB Base address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_BASE	0x4000c
+
+/* base */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_BASE_BASE_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_BASE_BASE_MASK	0xffffffff
+
+
+/*
+ * Register <APB_MASK>
+ *
+ * APB mask address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_MASK	0x40010
+
+/* mask */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_MASK_MASK_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_APB_MASK_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <DQM_BASE>
+ *
+ * DQM Base address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_BASE	0x40014
+
+/* base */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_BASE_BASE_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_BASE_BASE_MASK	0xffffffff
+
+
+/*
+ * Register <DQM_MASK>
+ *
+ * DQM mask address
+ */
+#define QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_MASK	0x40018
+
+/* mask */
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_MASK_MASK_SHIFT	0
+#define  QM_XRDP_UBUS_TOP_QM_XRDP_UBUS_SLV_DQM_MASK_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <MAC_TYPE>
+ *
+ * The BBH supports working with different MAC types.
+ * Each MAC requires different interface and features.
+ * This register defines the type of MAC the BBH works with.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_MACTYPE	0x50000
+
+/* MAC type */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_MACTYPE_TYPE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_MACTYPE_TYPE_MASK	0x7
+
+
+/*
+ * Register <BB_CFG_1>
+ *
+ * Each BBH unit has its own position on the BB tree.
+ * This position defines the Route address when approaching the Runner,
+ * S/DMA or S/BPM.
+ * The route is determined by a dedicated generic logic which uses the
+ * source id of the destination.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX	0x50004
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_DMASRC_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_DMASRC_MASK	0x3f
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_SDMASRC_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_SDMASRC_MASK	0x3f00
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_SBPMSRC_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_SBPMSRC_MASK	0x3f0000
+
+/*
+ * source id.
+ * This id is used to determine the route to the module.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_FPMSRC_SHIFT	24
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_1_TX_FPMSRC_MASK	0x3f000000
+
+
+/*
+ * Register <BB_CFG_2>
+ *
+ * Each BBH unit has its own position on the BB tree.
+ * This position defines the Route address when approaching the Runner,
+ * S/DMA or S/BPM.
+ * The route is determined by a dedicated generic logic which uses the
+ * source id of the destination.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX	0x50008
+
+/*
+ * source id.
+ * This id is used to determine the route to the 1st (out of possible 2
+ * runners) which are responsible for sending PDs.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_MASK	0x3f
+
+/*
+ * source id.
+ * This id is used to determine the route to the 2nd (out of possible 2
+ * runners) which are responsible for sending PDs.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_MASK	0x3f00
+
+/*
+ * source id.
+ * This id is used to determine the route to the Runner that is responsible
+ * for sending status messages (WAN only).
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_MASK	0x3f0000
+
+/*
+ * source id.
+ * This id is used to determine the route to the Runner which is
+ * responsible for sending DBR/Ghost messages (WAN only).
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_SHIFT	24
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_MASK	0x3f000000
+
+
+/*
+ * Register <RD_ADDR_CFG>
+ *
+ * Configurations for determining the address to read from the DDR/PSRAm
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX	0x5000c
+
+/* The data is arranged in the DDR in a fixed size buffers. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_BUFSIZE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_BUFSIZE_MASK	0x7
+
+/* The packet offset byte resulotion. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_BYTERESUL_MASK	0x8
+
+/* Static offset in 8-bytes resolution for non aggregated packets in DDR */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_DDRTXOFFSET_SHIFT	4
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_DDRTXOFFSET_MASK	0x1ff0
+
+/*
+ * The size of the HN (Header number) in bytes.
+ * The BBH decides between size 0 and size 1 according to a bit in the PD
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_HNSIZE0_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_HNSIZE0_MASK	0x7f0000
+
+/*
+ * The size of the HN (Header number) in bytes.
+ * The BBH decides between size 0 and size 1 according to a bit in the PD
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_HNSIZE1_SHIFT	24
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRCFG_TX_HNSIZE1_MASK	0x7f000000
+
+
+/*
+ * Registers <PD_RNR_CFG_1> - <x> is [ 0 => 1 ]
+ *
+ * Queue index address:
+ * The BBH requests a Packet descriptor from the Runner.
+ * The BBH writes the queue number in a predefined address at the Runner
+ * SRAM.
+ * The message serves also as a wake-up request to the Runner.
+ * This register defines the queue index address within the Runner address
+ * space.
+ * SKB address:
+ * When the packet is transmitted from absolute address, then, instead of
+ * releasing the BN, the BBH writes a 6 bits read counter into the Runner
+ * SRAM.
+ * It writes it into a pre-defined address + TCONT_NUM (for Ethernet
+ * TCONT_NUM = 0).
+ * This register defines the SKB free base address within the Runner
+ * address.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_1(x)	(0x50010 + (x) * 0x4)
+
+/*
+ * Defines the TCONT address within the Runner address space.
+ * The address is in 8 bytes resolution.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_1_TCONTADDR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_1_TCONTADDR_MASK	0xffff
+
+/*
+ * Defines the SKB free address within the Runner address space.
+ * The address is in 8-bytes resolution.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_1_SKBADDR_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_1_SKBADDR_MASK	0xffff0000
+
+
+/*
+ * Registers <PD_RNR_CFG_2> - <x> is [ 0 => 1 ]
+ *
+ * PD transfer process:
+ * -The Runner wont ACK the BBH; therefore the BBH wont wake the TX task.
+ * -The Runner will push the PDs into the BBH (without any wakeup from the
+ * BBH).
+ * -Each time that the BBH reads a PD from the PD FIFO, it will write the
+ * read pointer into a pre-defined address in the Runner.
+ * The pointer is 6 bits width (one bit larger than needed to distinguish
+ * between full and empty).
+ * -The Runner should manage the congestion over the PD FIFO (in the BBH)
+ * by reading the BBH read pointer prior to each PD write.
+ * -PD drop should be done by the Runner only.
+ * The BBH will drop PD when the FIFO is full and will count each drop.
+ * The BBH wont release the BN in this case.
+ * -There will be a full threshold, which can be smaller than the actual
+ * size of the FIFO.
+ * When the BBH will move from full to not full state, the BBH will wakeup
+ * the Runner.
+ * Note:
+ * all addresses are in 8 byte resolution.
+ * As the Runner memory is limited to 12 bits address, use the 12 lsb bits.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_2(x)	(0x50018 + (x) * 0x4)
+
+/*
+ * This field defins the address in the Runner memory space to which the
+ * read pointer is written.
+ * The address is in 8-bytes resolution.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_2_PTRADDR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_2_PTRADDR_MASK	0xffff
+
+/* The number of the task that is responsible for sending PDs to the BBH */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_2_TASK_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_RNRCFG_2_TASK_MASK	0xf0000
+
+
+/*
+ * Register <DMA_CFG>
+ *
+ * The BBH reads the packet data from the DDR in chunks (with a maximal
+ * size of 128 bytes).
+ * For each chunk the BBH writes a read request (descriptor) into the DMA
+ * memory space.
+ * The read descriptors are arranged in a predefined space in the DMA
+ * memory and managed in a cyclic FIFO style.
+ * A special configuration limits the maximum number of read requests.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX	0x50020
+
+/*
+ * Defines the base address of the read request FIFO within the DMA address
+ * space.
+ * The value should be identical to the relevant configuration in the DMA.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_DESCBASE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_DESCBASE_MASK	0x3f
+
+/* The size of the BBH read requests FIFO inside the DMA */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_DESCSIZE_SHIFT	6
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_DESCSIZE_MASK	0xfc0
+
+/* Defines the maximum allowed number of on-the-fly read requests. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_MAXREQ_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_MAXREQ_MASK	0x3f0000
+
+/*
+ * When asserted, this bit forces urgent priority on the EPON read requests
+ * towards the DMA (relevant only for EPON BBH)
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_EPNURGNT_MASK	0x1000000
+
+/*
+ * When asserted, this bit forces urgent priority on read requests of a
+ * jumbo packet (>2K)
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DMACFG_TX_JUMBOURGNT_MASK	0x2000000
+
+
+/*
+ * Register <SDMA_CFG>
+ *
+ * The BBH reads the packet data from the PSRAM in chunks (with a maximal
+ * size of 128 bytes).
+ * For each chunk the BBH writes a read request (descriptor) into the SDMA
+ * memory space.
+ * The read descriptors are arranged in a predefined space in the SDMA
+ * memory and managed in a cyclic FIFO style.
+ * A special configuration limits the maximum number of read requests.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX	0x50024
+
+/*
+ * Defines the base address of the read request FIFO within the DMA address
+ * space.
+ * The value should be identical to the relevant configuration in the DMA.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_DESCBASE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_DESCBASE_MASK	0x3f
+
+/* The size of the BBH read requests FIFO inside the DMA */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_DESCSIZE_SHIFT	6
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_DESCSIZE_MASK	0xfc0
+
+/* Defines the maximum allowed number of on-the-fly read requests. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_MAXREQ_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_MAXREQ_MASK	0x3f0000
+
+/*
+ * When asserted, this bit forces urgent priority on the EPON read requests
+ * towards the DMA (relevant only for EPON BBH)
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_EPNURGNT_MASK	0x1000000
+
+/*
+ * When asserted, this bit forces urgent priority on Jumbo packets (>2k)
+ * read requests
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SDMACFG_TX_JUMBOURGNT_MASK	0x2000000
+
+
+/*
+ * Register <SBPM_CFG>
+ *
+ * When packet transmission is done, the BBH releases the SBPM buffers.
+ * This register defines which release command is used:
+ * 1.
+ * Normal free with context2.
+ * Special free with context3.
+ * free without context
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SBPMCFG	0x50028
+
+/* When this bit is enabled, the BBH will use free without context command. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SBPMCFG_FREENOCNTXT_MASK	0x1
+
+/*
+ * When this bit is enabled, the BBH will use special free with context
+ * command.
+ * This bit is relevant only if free without context_en is configured to 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SBPMCFG_SPECIALFREE_MASK	0x2
+
+/* maximum number of pending on the fly get next commands */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SBPMCFG_MAXGN_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_SBPMCFG_MAXGN_MASK	0x1f00
+
+
+/*
+ * Registers <DDR_TM_BASE_LOW> - <x> is [ 0 => 1 ]
+ *
+ * The BBH calculate the DDR physical address according to the Buffer
+ * number and buffer size and then adds the DDR TM base.
+ * The DDR TM address space is divided to two - coherent and non coherent.
+ * The first register in this array defines the base address of the non
+ * coherent space and the second is for the coherent.
+ * The value of this register should match the relevant registers value in
+ * the BBH RX, QM and the Runner.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEL(x)	(0x5002c + (x) * 0x4)
+
+/*
+ * DDR TM base.
+ * The address is in bytes resolution.
+ * The address should be aligned to 128 bytes.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEL_DDRTMBASE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEL_DDRTMBASE_MASK	0xffffffff
+
+
+/*
+ * Registers <DDR_TM_BASE_HIGH> - <x> is [ 0 => 1 ]
+ *
+ * The BBH calculate the DDR physical address according to the Buffer
+ * number and buffer size and then adds the DDR TM base.
+ * The DDR TM address space is divided to two - coherent and non coherent.
+ * The first register in this array defines the base address of the non
+ * coherent space and the second is for the coherent.
+ * The value of this register should match the relevant registers value in
+ * the BBH RX, QM and the Runner.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEH(x)	(0x50034 + (x) * 0x4)
+
+/* MSB of DDR TM base. */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEH_DDRTMBASE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DDRTMBASEH_DDRTMBASE_MASK	0xff
+
+
+/*
+ * Register <DATA_FIFO_CTRL>
+ *
+ * The BBH orders data both from DDR and PSRAM.
+ * The returned data is stored in two FIFOs for reordering.
+ * The two FIFOs are implemented in a single RAM.
+ * This register defines the division of the RAM to two FIFOs.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL	0x5003c
+
+/*
+ * The size of the PSRAM data FIFO in 8 bytes resolution.
+ * The BBH uses this information for determining the amount of data that
+ * can be ordered from the PSRAM.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_MASK	0x3ff
+
+/*
+ * The size of the DDR data FIFO in 8 bytes resolution.
+ * The BBH uses this information for determining the amount of data that
+ * can be ordered from the DDR.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_DDRSIZE_SHIFT	10
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_DDRSIZE_MASK	0xffc00
+
+/*
+ * the base address of the PSRAM data FIFO in 8 bytes resolution.
+ * The DDR data FIFO base address is always 0.
+ * In case the whole RAM is to be dedicated to PSRAM data, the base should
+ * be 0 as well, and the DDR FIFO size should be configured to 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_PSRAMBASE_SHIFT	20
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DFIFOCTRL_PSRAMBASE_MASK	0x3ff00000
+
+
+/*
+ * Register <ARB_CFG>
+ *
+ * configurations related to different arbitration processes (ordering PDs,
+ * ordering data)
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_ARB_CFG	0x50040
+
+/*
+ * this configuration determines whether to give high priority to a current
+ * transmitting queue or not.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_ARB_CFG_HIGHTRXQ_MASK	0x1
+
+
+/*
+ * Register <BB_ROUTE_OVERRIDE>
+ *
+ * override configuration for the route of one of the peripherals
+ * (DMA/SDMMA/FPM/SBPM?Runners)
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE	0x50044
+
+/* route address */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE_ROUTE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE_ROUTE_MASK	0x3ff
+
+/* destination source id */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE_DEST_SHIFT	10
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE_DEST_MASK	0xfc00
+
+/* enable */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_BBROUTE_EN_MASK	0x10000
+
+
+/*
+ * Registers <Q_TO_RNR> - <x> is [ 0 => 19 ]
+ *
+ * configuration which queue is managed by each of the two runners.
+ * Each register in this array configures 2 queues.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_Q2RNR(x)	(0x50048 + (x) * 0x4)
+
+/* Q0 configuration */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_Q2RNR_Q0_MASK	0x1
+
+/* Q1 configuration */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_Q2RNR_Q1_MASK	0x2
+
+
+/*
+ * Register <PER_Q_TASK>
+ *
+ * which task in the runner to wake-up when requesting a PD for a certain
+ * q.
+ * This register holds the task number of the first 8 queues.
+ * For queues 8-40 (if they exist) the task that will be waking is the one
+ * appearing in the PD_RNR_CFG regs, depending on which runner this queue
+ * is associated with.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK	0x500a0
+
+/* task number for queue 0 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK0_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK0_MASK	0xf
+
+/* task number for queue 1 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK1_SHIFT	4
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK1_MASK	0xf0
+
+/* task number for queue 2 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK2_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK2_MASK	0xf00
+
+/* task number for queue 3 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK3_SHIFT	12
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK3_MASK	0xf000
+
+/* task number for queue 4 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK4_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK4_MASK	0xf0000
+
+/* task number for queue 5 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK5_SHIFT	20
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK5_MASK	0xf00000
+
+/* task number for queue 6 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK6_SHIFT	24
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK6_MASK	0xf000000
+
+/* task number for queue 7 */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK7_SHIFT	28
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_PERQTASK_TASK7_MASK	0xf0000000
+
+
+/*
+ * Register <TX_RESET_COMMAND>
+ *
+ * This register enables reset of internal units (for possible WA
+ * purposes).
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD	0x500b0
+
+/*
+ * Writing 1 to this register will reset the segmentation context table.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_CNTXTRST_MASK	0x1
+
+/*
+ * Writing 1 to this register will reset the PDs FIFOs.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_PDFIFORST_MASK	0x2
+
+/*
+ * Writing 1 to this register will reset the DMA write pointer.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_DMAPTRRST_MASK	0x4
+
+/*
+ * Writing 1 to this register will reset the SDMA write pointer.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_SDMAPTRRST_MASK	0x8
+
+/*
+ * Writing 1 to this register will reset the BPM FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_BPMFIFORST_MASK	0x10
+
+/*
+ * Writing 1 to this register will reset the SBPM FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_SBPMFIFORST_MASK	0x20
+
+/*
+ * Writing 1 to this register will reset the order keeper FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_OKFIFORST_MASK	0x40
+
+/*
+ * Writing 1 to this register will reset the DDR data FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_DDRFIFORST_MASK	0x80
+
+/*
+ * Writing 1 to this register will reset the SRAM data FIFO.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+ * This register is relevalt only for Ethernet.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_SRAMFIFORST_MASK	0x100
+
+/*
+ * Writing 1 to this register will reset the SKB pointers.
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_SKBPTRRST_MASK	0x200
+
+/*
+ * Writing 1 to this register will reset the EPON status FIFOs (per queue
+ * 32 fifos).
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_STSFIFORST_MASK	0x400
+
+/*
+ * Writing 1 to this register will reset the EPON request FIFO (8 entries
+ * FIFO that holds the packet requests from the EPON MAC).
+ * The reset is done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_REQFIFORST_MASK	0x800
+
+/*
+ * Writing 1 to this register will reset the EPON/GPON MSG FIFOThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_MSGFIFORST_MASK	0x1000
+
+/*
+ * Writing 1 to this register will reset the GET NEXT FIFOsThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_GNXTFIFORST_MASK	0x2000
+
+/*
+ * Writing 1 to this register will reset the FIRST BN FIFOsThe reset is
+ * done immediately.
+ * Reading this register will always return 0.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_TXRSTCMD_FBNFIFORST_MASK	0x4000
+
+
+/*
+ * Register <DEBUG_SELECT>
+ *
+ * This register selects 1 of 8 debug vectors.
+ * The selected vector is reflected to DBGOUTREG.
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DBGSEL	0x500b4
+
+/*
+ * This register selects 1 of 8 debug vectors.
+ * The selected vector is reflected to DBGOUTREG.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DBGSEL_DBGSEL_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_DBGSEL_DBGSEL_MASK	0x1f
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL	0x500b8
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <GENERAL_PURPOSE_Register>
+ *
+ * general purpose register
+ */
+#define QM_BBH_TX_QM_BBHTX_COMMON_CFGS_GPR	0x500bc
+
+/* general purpose register */
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_GPR_GPR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_COMMON_CFGS_GPR_GPR_MASK	0xffffffff
+
+
+/*
+ * Register <PD_FIFO_BASE>
+ *
+ * The BBH manages 40 queues for GPON or 32 queus for EPON (1 for each
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these 256
+ * PDs.
+ * The size of the 1st BN FIFO and get-next FIFO is the same as the size of
+ * the PD FIFO of each queue.
+ * each register in this array defines the PD FIFO base of 2 queues.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDBASE	0x50400
+
+/* The base of PD FIFO for queue 0. */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDBASE_FIFOBASE0_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDBASE_FIFOBASE0_MASK	0x1ff
+
+/* The base of PD FIFO for queue 1. */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDBASE_FIFOBASE1_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDBASE_FIFOBASE1_MASK	0x1ff0000
+
+
+/*
+ * Register <PD_FIFO_SIZE>
+ *
+ * The BBH manages 40 queues for GPON and 32 queues for EPON (FIFO per
+ * TCONT/LLID).
+ * For each queue it manages a PD FIFO.
+ * A total of 256 PDs are available for all queues.
+ * For each Queue the SW configures the base and the size within these.
+ * each register in this array defines the PD FIFO size of 2 queues.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDSIZE	0x50450
+
+/*
+ * The size of PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * For GPON, the max value is 0x7For EPON, the max value is 0xf
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDSIZE_FIFOSIZE0_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDSIZE_FIFOSIZE0_MASK	0x1ff
+
+/*
+ * The size of PD FIFO for queue 1.
+ * A value of n refers to n+1.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDSIZE_FIFOSIZE1_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDSIZE_FIFOSIZE1_MASK	0x1ff0000
+
+
+/*
+ * Register <PD_WKUP_THRESH>
+ *
+ * When a FIFO occupancy is above this wakeup threshold, the BBH will not
+ * wake-up the Runner for sending a new PD.
+ * This threshold does not represent the actual size of the FIFO.
+ * If a PD will arrive from the Runner when the FIFO is above the
+ * threshold, it will not be dropped unless the FIFO is actually full.
+ * Each register defines the threshold of 2 queues.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDWKUPH	0x50500
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 0.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDWKUPH_WKUPTHRESH0_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDWKUPH_WKUPTHRESH0_MASK	0xff
+
+/*
+ * The wakeup threshold of the PD FIFO for queue 1.
+ * A value of n refers to n+1.
+ * Relevant only for EPON BBH.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDWKUPH_WKUPTHRESH1_SHIFT	8
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDWKUPH_WKUPTHRESH1_MASK	0xff00
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration for the
+ * rest (TCONTs 8-39).
+ * Each register in this array defines the threshold of 2 queues.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH	0x50550
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_PDLIMIT0_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_PDLIMIT0_MASK	0xffff
+
+/*
+ * Defines the number of bytes for PDs pre fetch limited according to the
+ * total number of bytes.
+ * The value is in 8-bytes resolution.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_PDLIMIT1_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_PDLIMIT1_MASK	0xffff0000
+
+
+/*
+ * Register <PD_BYTES_THRESHOLD_EN>
+ *
+ * The BBH requests PDs from the Runner and maintains a pre-fetch PDs FIFO.
+ * The PDs pre fetch is limited either by the PD FIFO configurable size or
+ * according to the total number of bytes (deducting bytes already
+ * requested/transmitted) for preventing HOL.
+ * Full configuration for the first 8 TCONT and one configuration per group
+ * of 8 TCONTs for the rest.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_EN	0x50600
+
+/*
+ * This bit enables the above feature (PDs pre fetch limited according to
+ * the total number of bytes).
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PD_BYTE_TH_EN_PDLIMITEN_MASK	0x1
+
+
+/*
+ * Register <PD_EMPTY_THRESHOLD>
+ *
+ * The BBH manages 32 queues for EPON (FIFO per LLID).
+ * For each queue it manages a PD FIFO.
+ * Usually, the BBH orders PDs from the Runner in RR between all queues.
+ * In EPON BBH, if a FIFO occupancy is below this threshold, the queue will
+ * have higher priority in PD ordering arbitration (with RR between all the
+ * empty queues).
+ * This configuration is global for all queues.
+ * Relevant only for EPON BBH.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDEMPTY	0x50604
+
+/*
+ * EPON PD FIFO empty threshold.
+ * A queue which its PD FIFO occupancy is below this threshold will have
+ * high priority in PD ordering arbitration.
+*/
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDEMPTY_EMPTY_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_PDEMPTY_EMPTY_MASK	0xff
+
+
+/*
+ * Register <TX_THRESHOLD>
+ *
+ * Transmit threshold in 8 bytes resolution.
+ * The BBH TX will not start to transmit data towards the XLMAC until the
+ * amount of data in the TX FIFO is larger than the threshold or if there
+ * is a complete packet in the FIFO.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_TXTHRESH	0x50608
+
+/* DDR Transmit threshold in 8 bytes resoltion */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_TXTHRESH_DDRTHRESH_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_TXTHRESH_DDRTHRESH_MASK	0x1ff
+
+/* SRAM Transmit threshold in 8 bytes resoltion */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_TXTHRESH_SRAMTHRESH_SHIFT	16
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_TXTHRESH_SRAMTHRESH_MASK	0x1ff0000
+
+
+/*
+ * Register <EEE>
+ *
+ * The BBH is responsible for indicating the XLMAC that no traffic is about
+ * to arrive so the XLMAC may try to enter power saving mode.
+ * This register is used to enable this feature.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_EEE	0x5060c
+
+/* enable bit */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_EEE_EN_MASK	0x1
+
+
+/*
+ * Register <TS>
+ *
+ * The BBH is responsible for indicating the XLMAC that it should and
+ * calculate timestamp for the current packet that is being transmitted.
+ * The BBH gets the timestamping parameters in the PD and forward it to the
+ * XLMAC.
+ * This register is used to enable this feature.
+ */
+#define QM_BBH_TX_QM_BBHTX_LAN_CFGS_TS	0x50610
+
+/* enable bit */
+#define  QM_BBH_TX_QM_BBHTX_LAN_CFGS_TS_EN_MASK	0x1
+
+
+/*
+ * Register <SRAM_PD_COUNTER> - read-only
+ *
+ * This counter counts the number of received PD for packets to be
+ * transmitted from the SRAM.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPD	0x50a00
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the SRAM.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPD_SRAMPD_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPD_SRAMPD_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_PD_COUNTER> - read-only
+ *
+ * This counter counts the number of received PDs for packets to be
+ * transmitted from the DDR.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPD	0x50a04
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the DDR.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPD_DDRPD_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPD_DDRPD_MASK	0xffffffff
+
+
+/*
+ * Register <PD_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of PDs which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_PDDROP	0x50a08
+
+/*
+ * This counter counts the number of PDs which were dropped due to PD FIFO
+ * full.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_PDDROP_PDDROP_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_PDDROP_PDDROP_MASK	0xffff
+
+
+/*
+ * Register <STS_COUNTER> - read-only
+ *
+ * This counter counts the number of STS messages which were received from
+ * Runner.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSCNT	0x50a10
+
+/* This counter counts the number of received status messages. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSCNT_STSCNT_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSCNT_STSCNT_MASK	0xffffffff
+
+
+/*
+ * Register <STS_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of STS which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSDROP	0x50a14
+
+/*
+ * This counter counts the number of STS which were dropped due to PD FIFO
+ * full.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSDROP_STSDROP_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_STSDROP_STSDROP_MASK	0xffff
+
+
+/*
+ * Register <MSG_COUNTER> - read-only
+ *
+ * This counter counts the number of MSG (DBR/Ghost) messages which were
+ * received from Runner.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGCNT	0x50a18
+
+/* This counter counts the number of received DBR/ghost messages. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGCNT_MSGCNT_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGCNT_MSGCNT_MASK	0xffffffff
+
+
+/*
+ * Register <MSG_DROP_COUNTER> - read-only
+ *
+ * This counter counts the number of MSG which were dropped due to PD FIFO
+ * full.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGDROP	0x50a1c
+
+/*
+ * This counter counts the number of MSG which were dropped due to PD FIFO
+ * full.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGDROP_MSGDROP_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_MSGDROP_MSGDROP_MASK	0xffff
+
+
+/*
+ * Register <GET_NEXT_IS_NULL_COUNTER> - read-only
+ *
+ * This counter counts the number Get next responses with a null BN.
+ * It counts the packets for all TCONTs together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ * This counter is relevant for Ethernet only.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_GETNEXTNULL	0x50a20
+
+/* This counter counts the number Get next responses with a null BN. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_GETNEXTNULL_GETNEXTNULL_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_GETNEXTNULL_GETNEXTNULL_MASK	0xffff
+
+
+/*
+ * Register <FLUSHED_PACKETS_COUNTER> - read-only
+ *
+ * This counter counts the number of packets that were flushed (bn was
+ * released without sending the data to the EPON MAC) due to flush request.
+ * The counter is global for all queues.
+ * The counter is read clear.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_FLUSHPKTS	0x50a24
+
+/* This counter counts the number of flushed packets */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_FLUSHPKTS_FLSHPKTS_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_FLUSHPKTS_FLSHPKTS_MASK	0xffff
+
+
+/*
+ * Register <REQ_LENGTH_ERROR_COUNTER> - read-only
+ *
+ * This counter counts the number of times a length error (mismatch between
+ * a request from the MAC and a PD from the Runner) occured.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_LENERR	0x50a28
+
+/* This counter counts the number of times a length error occuered */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_LENERR_LENERR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_LENERR_LENERR_MASK	0xffff
+
+
+/*
+ * Register <AGGREGATION_LENGTH_ERROR_COUNTER> - read-only
+ *
+ * This counter Counts aggregation length error events.
+ * If one or more of the packets in an aggregated PD is shorter than 60
+ * bytes, this counter will be incremented by 1.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_AGGRLENERR	0x50a2c
+
+/*
+ * This counter counts the number of times an aggregation length error
+ * occuered
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_AGGRLENERR_AGGRLENERR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_AGGRLENERR_AGGRLENERR_MASK	0xffff
+
+
+/*
+ * Register <SRAM_PKT_COUNTER> - read-only
+ *
+ * This counter counts the number of received packets to be transmitted
+ * from the SRAM.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPKT	0x50a30
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the SRAM.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPKT_SRAMPKT_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMPKT_SRAMPKT_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_PKT_COUNTER> - read-only
+ *
+ * This counter counts the number of received packets to be transmitted
+ * from the DDR.
+ * It counts the packets for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPKT	0x50a34
+
+/*
+ * This counter counts the number of packets which were transmitted from
+ * the DDR.
+*/
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPKT_DDRPKT_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRPKT_DDRPKT_MASK	0xffffffff
+
+
+/*
+ * Register <SRAM_BYTE_COUNTER> - read-only
+ *
+ * This counter counts the number of transmitted bytes from the SRAM.
+ * It counts the bytes for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMBYTE	0x50a38
+
+/* This counter counts the number of transmitted bytes from the SRAM. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMBYTE_SRAMBYTE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SRAMBYTE_SRAMBYTE_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_BYTE_COUNTER> - read-only
+ *
+ * This counter counts the number of transmitted bytes from the DDR.
+ * It counts the bytes for all queues together.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRBYTE	0x50a3c
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRBYTE_DDRBYTE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DDRBYTE_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Register <SW_RD_EN>
+ *
+ * writing to this register creates a rd_en pulse to the selected array the
+ * SW wants to access.
+ * Each bit in the register represents one of the arrays the SW can access.
+ * The address inside the array is determined in the previous register
+ * (sw_rd_address).
+ * When writing to this register the SW should assert only one bit.
+ * If more than one is asserted, The HW will return the value read from the
+ * lsb selected array.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN	0x50a40
+
+/* rd from the PD FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDSEL_MASK	0x1
+
+/* rd from the PD valid array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDVSEL_MASK	0x2
+
+/* rd from the PD empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDEMPTYSEL_MASK	0x4
+
+/* rd from the PD Full array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDFULLSEL_MASK	0x8
+
+/* rd from the PD beliow empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDBEMPTYSEL_MASK	0x10
+
+/* rd from the PD full for wakeup empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_PDFFWKPSEL_MASK	0x20
+
+/* rd from the first BN array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_FBNSEL_MASK	0x40
+
+/* rd from the first BN valid array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_FBNVSEL_MASK	0x80
+
+/* rd from the first BN empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_FBNEMPTYSEL_MASK	0x100
+
+/* rd from the first BN full array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_FBNFULLSEL_MASK	0x200
+
+/* rd from the first Get Next array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_GETNEXTSEL_MASK	0x400
+
+/* rd from the get_next valid array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_GETNEXTVSEL_MASK	0x800
+
+/* rd from the get next empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_GETNEXTEMPTYSEL_MASK	0x1000
+
+/* rd from the get next full array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_GETNEXTFULLSEL_MASK	0x2000
+
+/* rd from the gpon context array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_GPNCNTXTSEL_MASK	0x4000
+
+/* rd from the BPM FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_BPMSEL_MASK	0x8000
+
+/* rd from the BPM FLUSH FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_BPMFSEL_MASK	0x10000
+
+/* rd from the SBPM FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_SBPMSEL_MASK	0x20000
+
+/* rd from the SBPM FLUSH FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_SBPMFSEL_MASK	0x40000
+
+/* rd from the STS FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSSEL_MASK	0x80000
+
+/* rd from the STS valid array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSVSEL_MASK	0x100000
+
+/* rd from the STS empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSEMPTYSEL_MASK	0x200000
+
+/* rd from the STS Full array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSFULLSEL_MASK	0x400000
+
+/* rd from the STS beliow empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSBEMPTYSEL_MASK	0x800000
+
+/* rd from the STS full for wakeup empty array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_STSFFWKPSEL_MASK	0x1000000
+
+/* rd from the MSG FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_MSGSEL_MASK	0x2000000
+
+/* rd from the msg valid array */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_MSGVSEL_MASK	0x4000000
+
+/* rd from the epon request FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_EPNREQSEL_MASK	0x8000000
+
+/* rd from the DATA FIFO (SRAM and DDR) */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_DATASEL_MASK	0x10000000
+
+/* rd from the reorder FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_REORDERSEL_MASK	0x20000000
+
+/* rd from the Timestamp Info FIFO */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_TSINFOSEL_MASK	0x40000000
+
+/* rd from the MAC TX FIFO. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDEN_MACTXSEL_MASK	0x80000000
+
+
+/*
+ * Register <SW_RD_ADDR>
+ *
+ * the address inside the array the SW wants to read
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDADDR	0x50a44
+
+/* The address inside the array the sw wants to read */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDADDR_RDADDR_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDADDR_RDADDR_MASK	0x7ff
+
+
+/*
+ * Register <SW_RD_DATA> - read-only
+ *
+ * indirect memories and arrays read data
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDDATA	0x50a48
+
+/* data */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDDATA_DATA_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_SWRDDATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <UNIFIED_PKT_COUNTER> - <x> is [ 0 => 7 ] - read-only
+ *
+ * This counter array counts the number of transmitted packets through each
+ * interface in the unified BBH.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDPKT(x)	(0x50a50 + (x) * 0x4)
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDPKT_DDRBYTE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDPKT_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Registers <UNIFIED_BYTE_COUNTER> - <x> is [ 0 => 7 ] - read-only
+ *
+ * This counter array counts the number of transmitted bytes through each
+ * interface in the unified BBH.
+ * This counter is cleared when read and freezes when maximum value is
+ * reached.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDBYTE(x)	(0x50a70 + (x) * 0x4)
+
+/* This counter counts the number of transmitted bytes from the DDr. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDBYTE_DDRBYTE_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_UNIFIEDBYTE_DDRBYTE_MASK	0xffffffff
+
+
+/*
+ * Registers <DEBUG_OUT_REG> - <x> is [ 0 => 31 ] - read-only
+ *
+ * an array including all the debug vectors of the BBH TX.
+ * entries 30 and 31 are DSL debug.
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DBGOUTREG(x)	(0x50a90 + (x) * 0x4)
+
+/* Selected debug vector. */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DBGOUTREG_DBGVEC_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_DBGOUTREG_DBGVEC_MASK	0xffffffff
+
+
+/*
+ * Registers <IN_SEGMENTATION> - <x> is [ 0 => 1 ] - read-only
+ *
+ * 40 bit vector in which each bit represents if the segmentation SM is
+ * currently handling a PD of a certain TCONT.
+ * first address is for TCONTS [31:
+ * 0]second is for TCONTS [39:
+ * 32]
+ */
+#define QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_IN_SEGMENTATION(x)	(0x50b20 + (x) * 0x4)
+
+/* in_segmentation indication */
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_IN_SEGMENTATION_IN_SEGMENTATION_SHIFT	0
+#define  QM_BBH_TX_QM_BBHTX_DEBUG_COUNTERS_IN_SEGMENTATION_IN_SEGMENTATION_MASK	0xffffffff
+
+
+/*
+ * Register <BB_ROUTE_OVERRIDE>
+ *
+ * Broadbus route override
+ */
+#define QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD	0x60000
+
+/* destination ID */
+#define  QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD_DEST_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD_DEST_MASK	0x3f
+
+/* the route to be used (override the default route) */
+#define  QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD_ROUTE_SHIFT	8
+#define  QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD_ROUTE_MASK	0x3ff00
+
+/* override enable */
+#define  QM_DMA_QM_DMA_CONFIG_BBROUTEOVRD_OVRD_MASK	0x1000000
+
+
+/*
+ * Registers <NUM_OF_WRITE_REQ> - <x> is [ 0 => 7 ]
+ *
+ * This array of registers defines the memory allocation for the
+ * peripherals, for upstream.
+ * The allocation is of number of 128byte buffers out of the total 48
+ * buffers for both sdma and dma.
+ * The allocation is done by defining a only the number of allocated
+ * buffers.
+ * base address is calculated by HW, when base of peripheral 0 is 0.
+ * Note that the memory allocation should not contain wrap around.
+ * The number of allocated CDs is the same of data buffers.
+ */
+#define QM_DMA_QM_DMA_CONFIG_NUM_OF_WRITES(x)	(0x60004 + (x) * 0x4)
+
+/* the number of 128bytes buffers allocated to the peripheral. */
+#define  QM_DMA_QM_DMA_CONFIG_NUM_OF_WRITES_NUMOFBUFF_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_NUM_OF_WRITES_NUMOFBUFF_MASK	0x3f
+
+
+/*
+ * Registers <NUM_OF_READ_REQ> - <x> is [ 0 => 7 ]
+ *
+ * This array of registers controls the number of read requests of each
+ * peripheral within the read requests RAM.
+ * total of 64 requests are divided between peripherals.
+ * Base address of peripheral 0 is 0, base of peripheral 1 is 0 +
+ * periph0_num_of_read_requests and so on.
+ */
+#define QM_DMA_QM_DMA_CONFIG_NUM_OF_READS(x)	(0x60024 + (x) * 0x4)
+
+/* number of read requests */
+#define  QM_DMA_QM_DMA_CONFIG_NUM_OF_READS_RR_NUM_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_NUM_OF_READS_RR_NUM_MASK	0x3f
+
+
+/*
+ * Registers <URGENT_THRESHOLDS> - <x> is [ 0 => 7 ]
+ *
+ * the in/out of urgent thresholds mark the number of write requests in the
+ * queue in which the peripherals priority is changed.
+ * The two thresholds should create hysteresis.
+ * The moving into urgent threshold must always be greater than the moving
+ * out of urgent threshold.
+ */
+#define QM_DMA_QM_DMA_CONFIG_U_THRESH(x)	(0x60044 + (x) * 0x4)
+
+/* moving into urgent threshold */
+#define  QM_DMA_QM_DMA_CONFIG_U_THRESH_INTO_U_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_U_THRESH_INTO_U_MASK	0x3f
+
+/* moving out ot urgent threshold */
+#define  QM_DMA_QM_DMA_CONFIG_U_THRESH_OUT_OF_U_SHIFT	8
+#define  QM_DMA_QM_DMA_CONFIG_U_THRESH_OUT_OF_U_MASK	0x3f00
+
+
+/*
+ * Registers <STRICT_PRIORITY> - <x> is [ 0 => 7 ]
+ *
+ * The arbitration between the requests of the different peripherals is
+ * done in two stages:
+ * 1.
+ * Strict priority - chooses the peripherals with the highest priority
+ * among all perpherals who have a request pending.
+ * 2.
+ * Weighted Round-Robin between all peripherals with the same priority.
+ * This array of registers allow configuration of the priority of each
+ * peripheral (both rx and tx) in the following manner:
+ * There are 4 levels of priorities, when each bit in the register
+ * represents a different level of priority.
+ * One should assert the relevant bit according to the desired priority
+ * -For the lowest - 0001For the highest - 1000
+ */
+#define QM_DMA_QM_DMA_CONFIG_PRI(x)	(0x60064 + (x) * 0x4)
+
+/* priority of rx side (upload) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_PRI_RXPRI_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_PRI_RXPRI_MASK	0xf
+
+/* priority of tx side (download) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_PRI_TXPRI_SHIFT	4
+#define  QM_DMA_QM_DMA_CONFIG_PRI_TXPRI_MASK	0xf0
+
+
+/*
+ * Registers <BB_SOURCE_DMA_PERIPH> - <x> is [ 0 => 7 ]
+ *
+ * Broadbus source address of the DMA peripherals.
+ * Register per peripheral (rx and tx).
+ * The source is used to determine the route address to the different
+ * peripherals.
+ */
+#define QM_DMA_QM_DMA_CONFIG_PERIPH_SOURCE(x)	(0x60084 + (x) * 0x4)
+
+/* bb source of rx side (upload) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_PERIPH_SOURCE_RXSOURCE_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_PERIPH_SOURCE_RXSOURCE_MASK	0x3f
+
+/* bb source of tx side (download) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_PERIPH_SOURCE_TXSOURCE_SHIFT	8
+#define  QM_DMA_QM_DMA_CONFIG_PERIPH_SOURCE_TXSOURCE_MASK	0x3f00
+
+
+/*
+ * Registers <WEIGHT_OF_ROUND_ROBIN> - <x> is [ 0 => 7 ]
+ *
+ * The second phase of the arbitration between requests is weighted round
+ * robin between requests of peripherals with the same priority.
+ * This array of registers allow configurtion of the weight of each
+ * peripheral (rx and tx).
+ * The actual weight will be weight + 1, meaning configuration of 0 is
+ * actual weight of 1.
+ */
+#define QM_DMA_QM_DMA_CONFIG_WEIGHT(x)	(0x600a4 + (x) * 0x4)
+
+/* weight of rx side (upload) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_WEIGHT_RXWEIGHT_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_WEIGHT_RXWEIGHT_MASK	0x7
+
+/* weight of tx side (download) of the peripheral */
+#define  QM_DMA_QM_DMA_CONFIG_WEIGHT_TXWEIGHT_SHIFT	8
+#define  QM_DMA_QM_DMA_CONFIG_WEIGHT_TXWEIGHT_MASK	0x700
+
+
+/*
+ * Register <POINTERS_RESET>
+ *
+ * Resets the pointers of the peripherals FIFOs within the DMA.
+ * Bit per peripheral side (rx and tx).
+ * For rx side resets the data and CD FIFOs.
+ * For tx side resets the read requests FIFO.
+ */
+#define QM_DMA_QM_DMA_CONFIG_PTRRST	0x600d0
+
+/*
+ * vector in which each bit represents a peripheral.
+ * LSB represent RX peripherals and MSB represent TX peripherals.
+ * When asserted, the relevant FIFOS of the selected peripheral will be
+ * reset to zero
+*/
+#define  QM_DMA_QM_DMA_CONFIG_PTRRST_RSTVEC_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_PTRRST_RSTVEC_MASK	0xffff
+
+
+/*
+ * Register <MAX_ON_THE_FLY>
+ *
+ * max number of on the fly read commands the DMA may issue to DDR before
+ * receiving any data.
+ */
+#define QM_DMA_QM_DMA_CONFIG_MAX_OTF	0x600d4
+
+/* max on the fly */
+#define  QM_DMA_QM_DMA_CONFIG_MAX_OTF_MAX_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_MAX_OTF_MAX_MASK	0x3f
+
+
+/*
+ * Register <CLOCK_GATE_CONTROL>
+ *
+ * Clock Gate control register including timer config and bypass control
+ */
+#define QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL	0x600d8
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_SHIFT	20
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_INTRVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  QM_DMA_QM_DMA_CONFIG_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <DBG_SEL>
+ *
+ * debug bus select
+ */
+#define QM_DMA_QM_DMA_CONFIG_DBG_SEL	0x600e0
+
+/* select */
+#define  QM_DMA_QM_DMA_CONFIG_DBG_SEL_DBGSEL_SHIFT	0
+#define  QM_DMA_QM_DMA_CONFIG_DBG_SEL_DBGSEL_MASK	0x3
+
+
+/*
+ * Register <NOT_EMPTY_VECTOR> - read-only
+ *
+ * Each peripheral is represented in a bit on the not empty vector.
+ * LSB is for rx peripherals, MSB for tx peripherals.
+ * If the bit is asserted, the requests queue of the relevant peripheral is
+ * not empty.
+ * The not empty vector is used by the DMA scheduler to determine which
+ * peripheral is the next to be served.
+ */
+#define QM_DMA_QM_DMA_DEBUG_NEMPTY	0x60100
+
+/* indication of the queue state */
+#define  QM_DMA_QM_DMA_DEBUG_NEMPTY_NEMPTY_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_NEMPTY_NEMPTY_MASK	0xffff
+
+
+/*
+ * Register <URGENT_VECTOR> - read-only
+ *
+ * Each peripheral, a is represented in a bit on the urgent vector.
+ * 8 LSB are rx peripherlas, 8 MSB are tx peripherals.
+ * If the bit is asserted, the requests queue of the relevant peripheral is
+ * in urgent state.
+ * The urgent vector is used by the DMA scheduler to determine which
+ * peripheral is the next to be served.
+ */
+#define QM_DMA_QM_DMA_DEBUG_URGNT	0x60104
+
+/* indication whether the queue is in urgent state or not */
+#define  QM_DMA_QM_DMA_DEBUG_URGNT_URGNT_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_URGNT_URGNT_MASK	0xffff
+
+
+/*
+ * Register <SELECTED_SOURCE_NUM> - read-only
+ *
+ * The decision of the dma schedule rand the next peripheral to be served,
+ * represented by its source address
+ */
+#define QM_DMA_QM_DMA_DEBUG_SELSRC	0x60108
+
+/* the next peripheral to be served by the dma */
+#define  QM_DMA_QM_DMA_DEBUG_SELSRC_SEL_SRC_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_SELSRC_SEL_SRC_MASK	0x3f
+
+
+/*
+ * Registers <REQUEST_COUNTERS_RX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the number of write requests currently pending for each rx peripheral.
+ */
+#define QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX(x)	(0x60110 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX_REQ_CNT_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX_REQ_CNT_MASK	0x3f
+
+
+/*
+ * Registers <REQUEST_COUNTERS_TX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the number of read requestscurrently pending for each TX peripheral.
+ */
+#define QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX(x)	(0x60130 + (x) * 0x4)
+
+/* the number of pending read requests */
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX_REQ_CNT_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX_REQ_CNT_MASK	0x3f
+
+
+/*
+ * Registers <ACC_REQUEST_COUNTERS_RX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the accumulated number of write requests served so far for each
+ * peripheral.
+ * Wrap around on max value, not read clear.
+ */
+#define QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX_ACC(x)	(0x60150 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX_ACC_REQ_CNT_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_RX_ACC_REQ_CNT_MASK	0xffffffff
+
+
+/*
+ * Registers <ACC_REQUEST_COUNTERS_TX> - <x> is [ 0 => 7 ] - read-only
+ *
+ * the accumulated number of read requests served so far for each
+ * peripheral.
+ * Wrap around on max value, not read clear.
+ */
+#define QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX_ACC(x)	(0x60170 + (x) * 0x4)
+
+/* the number of pending write requests */
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX_ACC_REQ_CNT_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_REQ_CNT_TX_ACC_REQ_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <RAM_ADDRES>
+ *
+ * the address and cs of the ram the user wishes to read using the indirect
+ * access read mechanism.
+ */
+#define QM_DMA_QM_DMA_DEBUG_RDADD	0x60200
+
+/* address within the ram */
+#define  QM_DMA_QM_DMA_DEBUG_RDADD_ADDRESS_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_RDADD_ADDRESS_MASK	0x3ff
+
+/* chip select for write data ram */
+#define  QM_DMA_QM_DMA_DEBUG_RDADD_DATACS_MASK	0x10000
+
+/* chip select for chunk descriptors ram */
+#define  QM_DMA_QM_DMA_DEBUG_RDADD_CDCS_MASK	0x20000
+
+/* chip select for read requests ram */
+#define  QM_DMA_QM_DMA_DEBUG_RDADD_RRCS_MASK	0x40000
+
+
+/*
+ * Register <INDIRECT_READ_REQUEST_VALID>
+ *
+ * After determining the address and cs, the user should assert this bit
+ * for indicating that the address and cs are valid.
+ */
+#define QM_DMA_QM_DMA_DEBUG_RDVALID	0x60204
+
+/* indirect read request is valid */
+#define  QM_DMA_QM_DMA_DEBUG_RDVALID_VALID_MASK	0x1
+
+
+/*
+ * Registers <INDIRECT_READ_DATA> - <x> is [ 0 => 3 ] - read-only
+ *
+ * The returned read data from the selected RAM.
+ * Array of 4 registers (128 bits total).
+ * The width of the different memories is as follows:
+ * write data - 128 bitschunk descriptors - 36 bitsread requests - 42
+ * bitsread data - 64 bitsThe the memories with width smaller than 128, the
+ * data will appear in the first registers of the array, for example:
+ * data from the cd RAM will appear in - {reg1[5:
+ * 0], reg0[31:
+ * 0]}.
+ */
+#define QM_DMA_QM_DMA_DEBUG_RDDATA(x)	(0x60208 + (x) * 0x4)
+
+/* read data from ram */
+#define  QM_DMA_QM_DMA_DEBUG_RDDATA_DATA_SHIFT	0
+#define  QM_DMA_QM_DMA_DEBUG_RDDATA_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <READ_DATA_READY> - read-only
+ *
+ * When assertd indicats that the data in the previous array is valid.
+ * Willremain asserted until the user deasserts the valid bit in regiser
+ * RDVALID.
+ */
+#define QM_DMA_QM_DMA_DEBUG_RDDATARDY	0x60218
+
+/* read data ready */
+#define  QM_DMA_QM_DMA_DEBUG_RDDATARDY_READY_MASK	0x1
+
+
+#endif /* ! XRDP_REGS_QM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_rnr_quad.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_rnr_quad.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_rnr_quad.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_rnr_quad.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,1707 @@
+#ifndef XRDP_REGS_RNR_QUAD_H_
+#define XRDP_REGS_RNR_QUAD_H_
+
+/* relative to core */
+#define RNR_QUAD_OFFSET_0		0xd08400
+
+/*
+ * Register <ENG>
+ *
+ * Engineering Configuration reserved for Broadlight use
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_ENG	0x0
+
+/*
+ * eng_cnfg[0] - IP filters on IPV6 on LSByte not MSByte
+ * eng_cnfg[1] - IP DAfields shows LSByte not MSbyte
+ * eng_cnfg[2] - enable error instead of brdcst at classifier summary word
+ * eng_cnfg[3] - Free
+ * eng_cnfg[8] - * Free
+ * eng_cnfg[9] - Free
+ * eng_cnfg[10] - Free
+ * eng_cnfg[11] - Free
+ * eng_cnfg[12] - Free
+ * eng_cnfg[13] - Free
+ * eng_cnfg[14] - mask of ah ext header excepetion
+ * eng_cnfg[15] - enable old mode of AH at IPV6
+ * eng_cnfg[16] - enable old mode of AH at IPV4
+ * eng_cnfg[17] - Free
+ * eng_cnfg[18] - dont allow 0xFFFF as valid ipv4 header cksum resultss
+*/
+#define  PARSER_CORE_CFG_ENG_CFG_SHIFT	0
+#define  PARSER_CORE_CFG_ENG_CFG_MASK	0xffffffff
+
+
+/*
+ * Register <PARSER_MISC_CFG>
+ *
+ * Parser Miscellaneous Configuration
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_PARSER_MISC_CFG	0x4
+
+/*
+ * Define which status bit cause exception bit in summary word to be set,
+ * the cuse vector is {4h0,ip fragment, ip version error, ip checksum
+ * error, ip_header length error}
+*/
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_EXCEPTION_EN_SHIFT	0
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_EXCEPTION_EN_MASK	0x3fff
+
+/*
+ * Defines which TCP falgs set will cause TCP_FLAG bit in summary word to
+ * be set
+*/
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_TCP_FLAGS_FILT_SHIFT	16
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_TCP_FLAGS_FILT_MASK	0xff0000
+
+/* Profile US */
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_PROFILE_US_SHIFT	28
+#define  PARSER_CORE_CFG_PARSER_MISC_CFG_PROFILE_US_MASK	0x70000000
+
+
+/*
+ * Register <VID_CONFIGURATION_0_1>
+ *
+ * Config VID Filter 0 & 1
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_VID_0_1	0x8
+
+/* VLAN ID Filter for first VLAN of register */
+#define  PARSER_CORE_CFG_VID_0_1_VID_0_SHIFT	0
+#define  PARSER_CORE_CFG_VID_0_1_VID_0_MASK	0xfff
+
+/* VLAND ID Filter 0 Enable */
+#define  PARSER_CORE_CFG_VID_0_1_VID_0_EN_MASK	0x8000
+
+/* VLAN ID Filter 1 for second VLAN of register */
+#define  PARSER_CORE_CFG_VID_0_1_VID_1_SHIFT	16
+#define  PARSER_CORE_CFG_VID_0_1_VID_1_MASK	0xfff0000
+
+/* VLAND ID Filter 1 Enable */
+#define  PARSER_CORE_CFG_VID_0_1_VID_1_EN_MASK	0x80000000
+
+
+/*
+ * Register <VID_CONFIGURATION_2_3>
+ *
+ * Config VID Filter 2 & 3
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_VID_2_3	0xc
+
+/* VLAN ID Filter for first VLAN of register */
+#define  PARSER_CORE_CFG_VID_2_3_VID_2_SHIFT	0
+#define  PARSER_CORE_CFG_VID_2_3_VID_2_MASK	0xfff
+
+/* VLAND ID Filter 2 Enable */
+#define  PARSER_CORE_CFG_VID_2_3_VID_2_EN_MASK	0x8000
+
+/* VLAN ID Filter 3 ofr second VLAN of register */
+#define  PARSER_CORE_CFG_VID_2_3_VID_3_SHIFT	16
+#define  PARSER_CORE_CFG_VID_2_3_VID_3_MASK	0xfff0000
+
+/* VLAND ID Filter 3 Enable */
+#define  PARSER_CORE_CFG_VID_2_3_VID_3_EN_MASK	0x80000000
+
+
+/*
+ * Register <VID_CONFIGURATION_4_5>
+ *
+ * Config VID Filter 4 & 5
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_VID_4_5	0x10
+
+/* VLAN ID Filter for first VLAN of register */
+#define  PARSER_CORE_CFG_VID_4_5_VID_4_SHIFT	0
+#define  PARSER_CORE_CFG_VID_4_5_VID_4_MASK	0xfff
+
+/* VLAND ID Filter 4 Enable */
+#define  PARSER_CORE_CFG_VID_4_5_VID_4_EN_MASK	0x8000
+
+/* VLAN ID Filter 5 ofr second VLAN of register */
+#define  PARSER_CORE_CFG_VID_4_5_VID_5_SHIFT	16
+#define  PARSER_CORE_CFG_VID_4_5_VID_5_MASK	0xfff0000
+
+/* VLAND ID Filter 5 Enable */
+#define  PARSER_CORE_CFG_VID_4_5_VID_5_EN_MASK	0x80000000
+
+
+/*
+ * Register <VID_CONFIGURATION_6_7>
+ *
+ * Config VID Filter 6 & 7
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_VID_6_7	0x14
+
+/* VLAN ID Filter for first VLAN of register */
+#define  PARSER_CORE_CFG_VID_6_7_VID_6_SHIFT	0
+#define  PARSER_CORE_CFG_VID_6_7_VID_6_MASK	0xfff
+
+/* VLAND ID Filter 6 Enable */
+#define  PARSER_CORE_CFG_VID_6_7_VID_6_EN_MASK	0x8000
+
+/* VLAN ID Filter 7 ofr second VLAN of register */
+#define  PARSER_CORE_CFG_VID_6_7_VID_7_SHIFT	16
+#define  PARSER_CORE_CFG_VID_6_7_VID_7_MASK	0xfff0000
+
+/* VLAND ID Filter 7 Enable */
+#define  PARSER_CORE_CFG_VID_6_7_VID_7_EN_MASK	0x80000000
+
+
+/*
+ * Register <IP_FILTER0_CFG>
+ *
+ * Config the IP Address filtering.
+ * Notice that the enable bit is located in the IP_FILTERS_CFG[4]
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IP_FILTER0_CFG	0x18
+
+/*
+ * 32-bit address to match SIP or DIP (according to predefined
+ * configuration in IP_FILTERS_CFG register)
+*/
+#define  PARSER_CORE_CFG_IP_FILTER0_CFG_IP_ADDRESS_SHIFT	0
+#define  PARSER_CORE_CFG_IP_FILTER0_CFG_IP_ADDRESS_MASK	0xffffffff
+
+
+/*
+ * Register <IP_FILTER1_CFG>
+ *
+ * Config the IP Address filtering.
+ * Notice that the enable bit is located in the IP_FILTERS_CFG[5]
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IP_FILTER1_CFG	0x1c
+
+/*
+ * 32-bit address to match SIP or DIP (according to predefined
+ * configuration in IP_FILTERS_CFG register)
+*/
+#define  PARSER_CORE_CFG_IP_FILTER1_CFG_IP_ADDRESS_SHIFT	0
+#define  PARSER_CORE_CFG_IP_FILTER1_CFG_IP_ADDRESS_MASK	0xffffffff
+
+
+/*
+ * Register <IP_FILTER0_MASK_CFG>
+ *
+ * Config the IP Address masking.
+ * Notice that the enable bit is located in the IP_FILTERS_CFG[4]
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IP_FILTER0_MASK_CFG	0x28
+
+/* 32-bit address mask */
+#define  PARSER_CORE_CFG_IP_FILTER0_MASK_CFG_IP_ADDRESS_MASK_SHIFT	0
+#define  PARSER_CORE_CFG_IP_FILTER0_MASK_CFG_IP_ADDRESS_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <IP_FILTER1_MASK_CFG>
+ *
+ * Config the IP Address masking.
+ * Notice that the enable bit is located in the IP_FILTERS_CFG[5]
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IP_FILTER1_MASK_CFG	0x2c
+
+/* 32-bit address mask */
+#define  PARSER_CORE_CFG_IP_FILTER1_MASK_CFG_IP_ADDRESS_MASK_SHIFT	0
+#define  PARSER_CORE_CFG_IP_FILTER1_MASK_CFG_IP_ADDRESS_MASK_MASK	0xffffffff
+
+
+/*
+ * Register <IP_FILTERS_CFG>
+ *
+ * IP Address Filters (0.
+ * .3) configurations:
+ * (1) SIP or DIP selection config per each filter(1) Valid bit per each
+ * filter
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IP_FILTERS_CFG	0x38
+
+/*
+ * IP Filter0 DIP or SIP selection.
+ * The default is SIP, when the field is set -> DIP selection is enabled
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER0_DIP_EN_MASK	0x1
+
+/*
+ * IP Filter1 DIP or SIP selection.
+ * The default is SIP, when the field is set -> DIP selection is enabled
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER1_DIP_EN_MASK	0x2
+
+/*
+ * IP Filter2 DIP or SIP selection.
+ * The default is SIP, when the field is set -> DIP selection is enabled
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER2_DIP_EN_MASK	0x4
+
+/*
+ * IP Filter3 DIP or SIP selection.
+ * The default is SIP, when the field is set -> DIP selection is enabled
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER3_DIP_EN_MASK	0x8
+
+/*
+ * IP Filter0 valid bit.
+ * When the bit valid is set, the IP filter/mask can be applied by
+ * hardware.
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER0_VALID_MASK	0x10
+
+/*
+ * IP Filter1 valid bit.
+ * When the bit valid is set, the IP filter/mask can be applied by
+ * hardware.
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER1_VALID_MASK	0x20
+
+/*
+ * IP Filter2 valid bit.
+ * When the bit valid is set, the IP filter/mask can be applied by
+ * hardware.
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER2_VALID_MASK	0x40
+
+/*
+ * IP Filter3 valid bit.
+ * When the bit valid is set, the IP filter/mask can be applied by
+ * hardware.
+*/
+#define  PARSER_CORE_CFG_IP_FILTERS_CFG_IP_FILTER3_VALID_MASK	0x80
+
+
+/*
+ * Register <SNAP_ORGANIZATION_CODE>
+ *
+ * Identifies SNAP tunneling organization code
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_SNAP_ORG_CODE	0x3c
+
+/* Used defined SNAP organization code */
+#define  PARSER_CORE_CFG_SNAP_ORG_CODE_CODE_SHIFT	0
+#define  PARSER_CORE_CFG_SNAP_ORG_CODE_CODE_MASK	0xffffff
+
+/* enable RFC1042 0x00000 organization code */
+#define  PARSER_CORE_CFG_SNAP_ORG_CODE_EN_RFC1042_MASK	0x1000000
+
+/*
+ * enables 802.
+ * 1Q 0x0000f8 organization code
+*/
+#define  PARSER_CORE_CFG_SNAP_ORG_CODE_EN_8021Q_MASK	0x2000000
+
+
+/*
+ * Register <PPP_IP_PROTOCOL_CODE>
+ *
+ * PPP Protocol Code to indicate L3 is IP
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_PPP_IP_PROT_CODE	0x40
+
+/* PPP Protocol code to identify L3 is IP */
+#define  PARSER_CORE_CFG_PPP_IP_PROT_CODE_PPP_CODE_0_SHIFT	0
+#define  PARSER_CORE_CFG_PPP_IP_PROT_CODE_PPP_CODE_0_MASK	0xffff
+
+/* PPP Protocol code to identify L3 is IP */
+#define  PARSER_CORE_CFG_PPP_IP_PROT_CODE_PPP_CODE_1_SHIFT	16
+#define  PARSER_CORE_CFG_PPP_IP_PROT_CODE_PPP_CODE_1_MASK	0xffff0000
+
+
+/*
+ * Register <QTAG_ETHERTYPE>
+ *
+ * Ethertype values to identify the presence of VLAN QTAG
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_QTAG_ETHTYPE	0x44
+
+/* Ethertype to identify VLAN QTAG */
+#define  PARSER_CORE_CFG_QTAG_ETHTYPE_ETHTYPE_QTAG_0_SHIFT	0
+#define  PARSER_CORE_CFG_QTAG_ETHTYPE_ETHTYPE_QTAG_0_MASK	0xffff
+
+/* Ethertype to identify VLAN QTAG */
+#define  PARSER_CORE_CFG_QTAG_ETHTYPE_ETHTYPE_QTAG_1_SHIFT	16
+#define  PARSER_CORE_CFG_QTAG_ETHTYPE_ETHTYPE_QTAG_1_MASK	0xffff0000
+
+
+/*
+ * Register <USER_ETHERTYPE_CONFIGURTION_0_1>
+ *
+ * Configures user Ethertype values
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_0_1	0x48
+
+/* User Ethertype 0 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_0_1_ETHYPE_0_SHIFT	0
+#define  PARSER_CORE_CFG_USER_ETHTYPE_0_1_ETHYPE_0_MASK	0xffff
+
+/* User Ethertype 1 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_0_1_ETHYPE_1_SHIFT	16
+#define  PARSER_CORE_CFG_USER_ETHTYPE_0_1_ETHYPE_1_MASK	0xffff0000
+
+
+/*
+ * Register <USER_ETHERTYPE_CONFIGURTION_2_3>
+ *
+ * Configures user Ethertype values
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_2_3	0x4c
+
+/* User Ethertype 2 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_2_3_ETHYPE_2_SHIFT	0
+#define  PARSER_CORE_CFG_USER_ETHTYPE_2_3_ETHYPE_2_MASK	0xffff
+
+/* User Ethertype 3 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_2_3_ETHYPE_3_SHIFT	16
+#define  PARSER_CORE_CFG_USER_ETHTYPE_2_3_ETHYPE_3_MASK	0xffff0000
+
+
+/*
+ * Register <USER_ETHERTYPE_CONFIGURATION>
+ *
+ * Configure protocol and enables user Ethertype
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_CONFIG	0x50
+
+/* Pointer to L3 protocol for User Ethertype 0 (0 - None, 1-IPv4, 2-IPv6) */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_0_SHIFT	0
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_0_MASK	0x3
+
+/* Pointer to L3 protocol for User Ethertype 1 (0 - None, 1-IPv4, 2-IPv6) */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_1_SHIFT	2
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_1_MASK	0xc
+
+/* Pointer to L3 protocol for User Ethertype 2 (0 - None, 1-IPv4, 2-IPv6) */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_2_SHIFT	4
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_2_MASK	0x30
+
+/* Pointer to L3 protocol for User Ethertype 3 (0 - None, 1-IPv4, 2-IPv6) */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_3_SHIFT	6
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_PROT_3_MASK	0xc0
+
+/* Enable user Ethertype 3-0 (LSB is for user ethertype 0) */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_EN_SHIFT	8
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_EN_MASK	0xf00
+
+/* 4 byte offset for User Ethertype 0 L3 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_0_SHIFT	16
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_0_MASK	0xf0000
+
+/* 4 byte offset for User Ethertype 1 L3 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_1_SHIFT	20
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_1_MASK	0xf00000
+
+/* 4 byte offset for User Ethertype 2 L3 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_2_SHIFT	24
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_2_MASK	0xf000000
+
+/* 4 byte offset for User Ethertype 3 L3 */
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_3_SHIFT	28
+#define  PARSER_CORE_CFG_USER_ETHTYPE_CONFIG_ETHTYPE_USER_OFFSET_3_MASK	0xf0000000
+
+
+/*
+ * Register <IPV6_HDR_EXT_FLTR_MASK_CFG>
+ *
+ * IPV6 Header Extension Filter Mask register
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG	0x54
+
+/* hop by hop match filter mask */
+#define  PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG_HOP_BY_HOP_MATCH_MASK	0x1
+
+/* Routing extension header option match filter mask */
+#define  PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG_ROUTING_EH_MASK	0x2
+
+/* Destination Options extension header option match filter mask */
+#define  PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG_DEST_OPT_EH_MASK	0x4
+
+
+/*
+ * Register <QTAG_NESTING>
+ *
+ * Qtag Nesting config
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_QTAG_NEST	0x58
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_0_SHIFT	0
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_0_MASK	0x7
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_1_SHIFT	3
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_1_MASK	0x38
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_2_SHIFT	6
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_0_PROFILE_2_MASK	0x1c0
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_0_SHIFT	9
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_0_MASK	0xe00
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_1_SHIFT	12
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_1_MASK	0x7000
+
+/*
+ * Set to enable Ethertype_qTag 0 as outer (LSB) 2nd VLAN (2nd), 3rd VLAN
+ * (MSB)
+*/
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_2_SHIFT	15
+#define  PARSER_CORE_CFG_QTAG_NEST_QTAG_NEST_1_PROFILE_2_MASK	0x38000
+
+
+/*
+ * Register <QTAG_HARD_NEST_PROFILE_0>
+ *
+ * QTAG Hard Nest Profile 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_0	0x5c
+
+/*
+ * bit 2-0:
+ * Enable 8100 as VLAN for outer, 2nd, and inner VLANs (inner is bit 2).
+ * bit 5-3:
+ * Enable 88a8 as VLAN for outer, 2nd, and inner VLANs.
+ * bit 8-6:
+ * Enable 9100 as VLAN for outer, 2nd, and inner VLANs.
+ * bit 11-9:
+ * Enable 9200 as VLAN for outer, 2nd, and inner VLANs.
+*/
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_0_HARD_NEST_PROFILE_SHIFT	0
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_0_HARD_NEST_PROFILE_MASK	0xfff
+
+
+/*
+ * Register <QTAG_HARD_NEST_PROFILE_1>
+ *
+ * QTAG Hard Nest Profile 1
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_1	0x60
+
+/* Hard Nest Profile */
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_1_HARD_NEST_PROFILE_SHIFT	0
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_1_HARD_NEST_PROFILE_MASK	0xfff
+
+
+/*
+ * Register <QTAG_HARD_NEST_PROFILE_2>
+ *
+ * QTAG Hard Nest Profile 2
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_2	0x64
+
+/* Hard Nest Profile */
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_2_HARD_NEST_PROFILE_SHIFT	0
+#define  PARSER_CORE_CFG_QTAG_HARD_NEST_2_HARD_NEST_PROFILE_MASK	0xfff
+
+
+/*
+ * Register <USER_DEFINED_IP_PROTOCL>
+ *
+ * IP Protocols to be matched to IP Protocol field and to be indicated in
+ * the output summary word
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_USER_IP_PROT	0x68
+
+/* User defined IP protocol 0 (value to be matched to IP protocol field) */
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_0_SHIFT	0
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_0_MASK	0xff
+
+/* User defined IP protocol 1 (value to be matched to IP protocol field) */
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_1_SHIFT	8
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_1_MASK	0xff00
+
+/* User defined IP protocol 2 (value to be matched to IP protocol field) */
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_2_SHIFT	16
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_2_MASK	0xff0000
+
+/* User defined IP protocol 3 (value to be matched to IP protocol field) */
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_3_SHIFT	24
+#define  PARSER_CORE_CFG_USER_IP_PROT_USER_IP_PROT_3_MASK	0xff000000
+
+
+/*
+ * Register <DA_FILT0_VAL_L>
+ *
+ * Config DA filter 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_VAL_L	0x70
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT0_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT0_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT0_VAL_H>
+ *
+ * Config DA filter0 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_VAL_H	0x74
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT0_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT0_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT1_VAL_L>
+ *
+ * Config DA filter1 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_VAL_L	0x78
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT1_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT1_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT1_VAL_H>
+ *
+ * Config DA filter1 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_VAL_H	0x7c
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT1_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT1_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT2_VAL_L>
+ *
+ * Config DA filter2 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT2_VAL_L	0x80
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT2_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT2_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT2_VAL_H>
+ *
+ * Config DA filter2 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT2_VAL_H	0x84
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT2_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT2_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT3_VAL_L>
+ *
+ * Config DA filter3 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT3_VAL_L	0x88
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT3_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT3_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT3_VAL_H>
+ *
+ * Config DA filter3 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT3_VAL_H	0x8c
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT3_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT3_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT4_VAL_L>
+ *
+ * Config DA filter4 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT4_VAL_L	0x90
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT4_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT4_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT4_VAL_H>
+ *
+ * Config DA Filter4 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT4_VAL_H	0x94
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT4_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT4_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT5_VAL_L>
+ *
+ * Config DA filter5 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT5_VAL_L	0x98
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT5_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT5_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT5_VAL_H>
+ *
+ * Config DA Filter5 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT5_VAL_H	0x9c
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT5_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT5_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT6_VAL_L>
+ *
+ * Config DA filter6 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT6_VAL_L	0xa0
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT6_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT6_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT6_VAL_H>
+ *
+ * Config DA Filter6 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT6_VAL_H	0xa4
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT6_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT6_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT7_VAL_L>
+ *
+ * Config DA filter7 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT7_VAL_L	0xa8
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT7_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT7_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT7_VAL_H>
+ *
+ * Config DA Filter7 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT7_VAL_H	0xac
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT7_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT7_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT8_VAL_L>
+ *
+ * Config DA filter8 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT8_VAL_L	0xb0
+
+/*
+ * DA Filter bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT8_VAL_L_DA_FILT_LSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT8_VAL_L_DA_FILT_LSB_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT8_VAL_H>
+ *
+ * Config DA Filter8 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT8_VAL_H	0xb4
+
+/*
+ * Current DA Filter bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT8_VAL_H_DA_FILT_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT8_VAL_H_DA_FILT_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT0_MASK_L>
+ *
+ * Config DA Filter mask 15:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_MASK_L	0xc8
+
+/*
+ * Current DA Filter mask bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT0_MASK_L_DA_FILT_MASK_L_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT0_MASK_L_DA_FILT_MASK_L_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT0_MASK_H>
+ *
+ * Config DA Filter0 mask 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_MASK_H	0xcc
+
+/*
+ * Current DA Filter mask bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT0_MASK_H_DA_FILT_MASK_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT0_MASK_H_DA_FILT_MASK_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT1_MASK_L>
+ *
+ * Config DA Filter1 mask 31:
+ * 0
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_MASK_L	0xd0
+
+/*
+ * Current DA Filter mask bits 31:
+ * 0
+*/
+#define  PARSER_CORE_CFG_DA_FILT1_MASK_L_DA_FILT_MASK_L_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT1_MASK_L_DA_FILT_MASK_L_MASK	0xffffffff
+
+
+/*
+ * Register <DA_FILT1_MASK_H>
+ *
+ * Config DA Filter1 mask 47:
+ * 32
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_MASK_H	0xd4
+
+/*
+ * Current DA Filter mask bits 47:
+ * 32
+*/
+#define  PARSER_CORE_CFG_DA_FILT1_MASK_H_DA_FILT_MASK_MSB_SHIFT	0
+#define  PARSER_CORE_CFG_DA_FILT1_MASK_H_DA_FILT_MASK_MSB_MASK	0xffff
+
+
+/*
+ * Register <DA_FILT_VALID_CFG_PROFILE_0>
+ *
+ * Valid configuration of all DA filters:
+ * there is a dedicated bit per each DA filter that says if the current DA
+ * filter is valid or not.
+ * Used for on-the-fly DA filter value (mask) modifications, since the DA
+ * filter parameters are not assigned on single SW register.
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_0	0xd8
+
+/* DA Filter0 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT0_VALID_MASK	0x1
+
+/* DA Filter1 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT1_VALID_MASK	0x2
+
+/* DA Filter2 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT2_VALID_MASK	0x4
+
+/* DA Filter3 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT3_VALID_MASK	0x8
+
+/* DA Filter4 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT4_VALID_MASK	0x10
+
+/* DA Filter5 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT5_VALID_MASK	0x20
+
+/* DA Filter6 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT6_VALID_MASK	0x40
+
+/* DA Filter7 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT7_VALID_MASK	0x80
+
+/* DA Filter8 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_0_DA_FILT8_VALID_MASK	0x100
+
+
+/*
+ * Register <DA_FILT_VALID_CFG_PROFILE_1>
+ *
+ * Valid configuration of all DA filters:
+ * there is a dedicated bit per each DA filter that says if the current DA
+ * filter is valid or not.
+ * Used for on-the-fly DA filter value (mask) modifications, since the DA
+ * filter parameters are not assigned on single SW register.
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_1	0xdc
+
+/* DA Filter0 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT0_VALID_MASK	0x1
+
+/* DA Filter1 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT1_VALID_MASK	0x2
+
+/* DA Filter2 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT2_VALID_MASK	0x4
+
+/* DA Filter3 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT3_VALID_MASK	0x8
+
+/* DA Filter4 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT4_VALID_MASK	0x10
+
+/* DA Filter5 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT5_VALID_MASK	0x20
+
+/* DA Filter6 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT6_VALID_MASK	0x40
+
+/* DA Filter7 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT7_VALID_MASK	0x80
+
+/* DA Filter8 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_1_DA_FILT8_VALID_MASK	0x100
+
+
+/*
+ * Register <DA_FILT_VALID_CFG_PROFILE_2>
+ *
+ * Valid configuration of all DA filters:
+ * there is a dedicated bit per each DA filter that says if the current DA
+ * filter is valid or not.
+ * Used for on-the-fly DA filter value (mask) modifications, since the DA
+ * filter parameters are not assigned on single SW register.
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_2	0xe0
+
+/* DA Filter0 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT0_VALID_MASK	0x1
+
+/* DA Filter1 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT1_VALID_MASK	0x2
+
+/* DA Filter2 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT2_VALID_MASK	0x4
+
+/* DA Filter3 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT3_VALID_MASK	0x8
+
+/* DA Filter4 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT4_VALID_MASK	0x10
+
+/* DA Filter5 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT5_VALID_MASK	0x20
+
+/* DA Filter6 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT6_VALID_MASK	0x40
+
+/* DA Filter7 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT7_VALID_MASK	0x80
+
+/* DA Filter8 valid bit */
+#define  PARSER_CORE_CFG_DA_FILT_VALID_CFG_2_DA_FILT8_VALID_MASK	0x100
+
+
+/*
+ * Register <GRE_PROTOCOL_CFG>
+ *
+ * GRE Protocol
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_GRE_PROTOCOL_CFG	0xe4
+
+/* GRE_PROTOCOL */
+#define  PARSER_CORE_CFG_GRE_PROTOCOL_CFG_GRE_PROTOCOL_SHIFT	0
+#define  PARSER_CORE_CFG_GRE_PROTOCOL_CFG_GRE_PROTOCOL_MASK	0xffff
+
+
+/*
+ * Register <PROP_TAG_CFG>
+ *
+ * Prop Tag Configuration
+ */
+#define RNR_QUAD_PARSER_CORE_CFG_PROP_TAG_CFG	0xe8
+
+/* profile 0 tag size, valid values are 0,2,4,6,8 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_0_SHIFT	0
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_0_MASK	0x1f
+
+/* profile 1 tag size, valid values are 0,2,4,6,8 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_1_SHIFT	5
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_1_MASK	0x3e0
+
+/* profile 2 tag size, valid values are 0,2,4,6,8 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_2_SHIFT	10
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_SIZE_PROFILE_2_MASK	0x7c00
+
+/* Pre-DA Profile 0 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_PRE_DA_DPROFILE_0_MASK	0x8000
+
+/* Pre-DA Profile 1 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_PRE_DA_DPROFILE_1_MASK	0x10000
+
+/* Pre-DA Profile 2 */
+#define  PARSER_CORE_CFG_PROP_TAG_CFG_PRE_DA_DPROFILE_2_MASK	0x20000
+
+
+/*
+ * Register <DMA_ARB_CFG>
+ *
+ * DMA arbiter Configuration
+ */
+#define RNR_QUAD_GENERAL_CONFIG_DMA_ARB_CFG	0x100
+
+/* Select whether to use DDR FIFO only for DDR accesses */
+#define  GENERAL_CONFIG_DMA_ARB_CFG_USE_FIFO_FOR_DDR_ONLY_MASK	0x1
+
+/* Scheduling policy for token arbiter */
+#define  GENERAL_CONFIG_DMA_ARB_CFG_TOKEN_ARBITER_IS_RR_MASK	0x2
+
+/*
+ * chicken bit to disable external flow control.
+ * Packetw wil always be sent, no matter what token count says
+*/
+#define  GENERAL_CONFIG_DMA_ARB_CFG_CHICKEN_NO_FLOWCTRL_MASK	0x4
+
+/* Set congestion threshold */
+#define  GENERAL_CONFIG_DMA_ARB_CFG_CONGEST_THRESHOLD_SHIFT	4
+#define  GENERAL_CONFIG_DMA_ARB_CFG_CONGEST_THRESHOLD_MASK	0x1f0
+
+
+/*
+ * Register <PSRAM0_BASE>
+ *
+ * Configure PSRAM0 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM0_BASE	0x104
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM0_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM0_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM1_BASE>
+ *
+ * Configure PSRAM1 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM1_BASE	0x108
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM1_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM1_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM2_BASE>
+ *
+ * Configure PSRAM2 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM2_BASE	0x10c
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM2_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM2_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM3_BASE>
+ *
+ * Configure PSRAM3 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM3_BASE	0x110
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM3_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM3_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DDR0_BASE>
+ *
+ * Configure DDR0 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_DDR0_BASE	0x114
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_DDR0_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_DDR0_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DDR1_BASE>
+ *
+ * Configure DDR1 base
+ */
+#define RNR_QUAD_GENERAL_CONFIG_DDR1_BASE	0x118
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_DDR1_BASE_VAL_SHIFT	0
+#define  GENERAL_CONFIG_DDR1_BASE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM0_MASK>
+ *
+ * Configure PSRAM0 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM0_MASK	0x11c
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM0_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM0_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM1_MASK>
+ *
+ * Configure PSRAM1 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM1_MASK	0x120
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM1_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM1_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM2_MASK>
+ *
+ * Configure PSRAM2 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM2_MASK	0x124
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM2_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM2_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM3_MASK>
+ *
+ * Configure PSRAM3 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PSRAM3_MASK	0x128
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_PSRAM3_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_PSRAM3_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DDR0_MASK>
+ *
+ * Configure DDR0 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_DDR0_MASK	0x12c
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_DDR0_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_DDR0_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <DDR1_MASK>
+ *
+ * Configure DDR1 mask
+ */
+#define RNR_QUAD_GENERAL_CONFIG_DDR1_MASK	0x130
+
+/* Value for base/mask */
+#define  GENERAL_CONFIG_DDR1_MASK_VAL_SHIFT	0
+#define  GENERAL_CONFIG_DDR1_MASK_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PROFILING_CONFIG>
+ *
+ * Profiling configuration
+ */
+#define RNR_QUAD_GENERAL_CONFIG_PROF_CONFIG	0x134
+
+/* Select which 12-bits from 32-bit counter value to be recorded by tracer */
+#define  GENERAL_CONFIG_PROF_CONFIG_COUNTER_LSB_SEL_SHIFT	0
+#define  GENERAL_CONFIG_PROF_CONFIG_COUNTER_LSB_SEL_MASK	0x1f
+
+/* Enable tracing for core 0 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_0_MASK	0x100
+
+/* Enable tracing for core 1 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_1_MASK	0x200
+
+/* Enable tracing for core 2 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_2_MASK	0x400
+
+/* Enable tracing for core 3 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_3_MASK	0x800
+
+/* Enable tracing for core 4 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_4_MASK	0x1000
+
+/* Enable tracing for core 5 */
+#define  GENERAL_CONFIG_PROF_CONFIG_ENABLE_TRACE_CORE_5_MASK	0x2000
+
+
+/*
+ * Register <BKPT_CFG_0>
+ *
+ * Breakpoint 0 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_0_CFG	0x140
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_0_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_0_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_0_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_0_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_1>
+ *
+ * Breakpoint 1 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_1_CFG	0x144
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_1_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_1_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_1_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_1_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_2>
+ *
+ * Breakpoint 2 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_2_CFG	0x148
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_2_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_2_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_2_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_2_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_3>
+ *
+ * Breakpoint 3 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_3_CFG	0x14c
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_3_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_3_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_3_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_3_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_4>
+ *
+ * Breakpoint 4 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_4_CFG	0x150
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_4_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_4_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_4_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_4_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_5>
+ *
+ * Breakpoint 5 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_5_CFG	0x154
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_5_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_5_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_5_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_5_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_6>
+ *
+ * Breakpoint 6 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_6_CFG	0x158
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_6_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_6_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_6_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_6_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_7>
+ *
+ * Breakpoint 7 configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_7_CFG	0x15c
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_7_CFG_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_7_CFG_ADDR_MASK	0x1fff
+
+/* Breakpoint address */
+#define  GENERAL_CONFIG_BKPT_7_CFG_THREAD_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_7_CFG_THREAD_MASK	0xf0000
+
+
+/*
+ * Register <BKPT_CFG_GEN>
+ *
+ * Breakpoint general configuration.
+ */
+#define RNR_QUAD_GENERAL_CONFIG_BKPT_GEN_CFG	0x160
+
+/* Breakpoint handler routine address */
+#define  GENERAL_CONFIG_BKPT_GEN_CFG_HANDLER_ADDR_SHIFT	0
+#define  GENERAL_CONFIG_BKPT_GEN_CFG_HANDLER_ADDR_MASK	0x1fff
+
+/* New PC to be updated by breakpoint handler routine */
+#define  GENERAL_CONFIG_BKPT_GEN_CFG_UPDATE_PC_VALUE_SHIFT	16
+#define  GENERAL_CONFIG_BKPT_GEN_CFG_UPDATE_PC_VALUE_MASK	0x1fff0000
+
+
+/*
+ * Register <POWERSAVE_CONFIG>
+ *
+ * Powersaving configuration
+ */
+#define RNR_QUAD_GENERAL_CONFIG_POWERSAVE_CONFIG	0x170
+
+/*
+ * Select how many clocks to wait in IDLE condition before enetrin
+ * powersave state
+*/
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_TIME_COUNTER_SHIFT	0
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_TIME_COUNTER_MASK	0xff
+
+/* Enable powersavingfor core 0 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_0_MASK	0x100
+
+/* Enable powersave for core 1 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_1_MASK	0x200
+
+/* Enable powersave for core 2 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_2_MASK	0x400
+
+/* Enable powersave for core 3 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_3_MASK	0x800
+
+/* Enable powersave for core 4 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_4_MASK	0x1000
+
+/* Enable powersave for core 5 */
+#define  GENERAL_CONFIG_POWERSAVE_CONFIG_ENABLE_POWERSAVE_CORE_5_MASK	0x2000
+
+
+/*
+ * Register <POWERSAVE_STATUS> - read-only
+ *
+ * Powersave status indications
+ */
+#define RNR_QUAD_GENERAL_CONFIG_POWERSAVE_STATUS	0x174
+
+/* Core 0 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_0_STATUS_MASK	0x1
+
+/* Core 0 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_1_STATUS_MASK	0x2
+
+/* Core 2 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_2_STATUS_MASK	0x4
+
+/* Core 3 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_3_STATUS_MASK	0x8
+
+/* Core 4 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_4_STATUS_MASK	0x10
+
+/* Core 5 status */
+#define  GENERAL_CONFIG_POWERSAVE_STATUS_CORE_5_STATUS_MASK	0x20
+
+
+/*
+ * Register <FIFO_CONFIG>
+ *
+ * FIFOs configuration
+ */
+#define RNR_QUAD_DEBUG_FIFO_CONFIG	0x200
+
+/* Apply software reset to PSRAM header FIFO in EC arbiter */
+#define  DEBUG_FIFO_CONFIG_PSRAM_HDR_SW_RST_MASK	0x1
+
+/* Apply software reset to PSRAM data FIFO in EC arbiter */
+#define  DEBUG_FIFO_CONFIG_PSRAM_DATA_SW_RST_MASK	0x2
+
+/* Apply software reset to DDR header FIFO in EC arbiter */
+#define  DEBUG_FIFO_CONFIG_DDR_HDR_SW_RST_MASK	0x4
+
+/* Software read address for PSRAM header FIFO */
+#define  DEBUG_FIFO_CONFIG_PSRAM_HDR_SW_RD_ADDR_SHIFT	8
+#define  DEBUG_FIFO_CONFIG_PSRAM_HDR_SW_RD_ADDR_MASK	0xf00
+
+/* Software read address for PSRAM data FIFO */
+#define  DEBUG_FIFO_CONFIG_PSRAM_DATA_SW_RD_ADDR_SHIFT	12
+#define  DEBUG_FIFO_CONFIG_PSRAM_DATA_SW_RD_ADDR_MASK	0xf000
+
+/* Software read address for DDR header FIFO */
+#define  DEBUG_FIFO_CONFIG_DDR_HDR_SW_RD_ADDR_SHIFT	16
+#define  DEBUG_FIFO_CONFIG_DDR_HDR_SW_RD_ADDR_MASK	0xf0000
+
+
+/*
+ * Register <PSRAM_HDR_FIFO_STATUS> - read-only
+ *
+ * PSRAM Header FIFO status
+ */
+#define RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_STATUS	0x204
+
+/* FIFO full indication */
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_FULL_MASK	0x1
+
+/* FIFO empty indication */
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_EMPTY_MASK	0x2
+
+/* Push write counter value */
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_PUSH_WR_CNTR_SHIFT	4
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_PUSH_WR_CNTR_MASK	0x1f0
+
+/* Pop read counter value */
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_POP_RD_CNTR_SHIFT	12
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_POP_RD_CNTR_MASK	0x1f000
+
+/* Used words value */
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_USED_WORDS_SHIFT	20
+#define  DEBUG_PSRAM_HDR_FIFO_STATUS_USED_WORDS_MASK	0x1f00000
+
+
+/*
+ * Register <PSRAM_DATA_FIFO_STATUS> - read-only
+ *
+ * PSRAM Data FIFO status
+ */
+#define RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_STATUS	0x208
+
+/* FIFO full indication */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_FULL_MASK	0x1
+
+/* FIFO empty indication */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_EMPTY_MASK	0x2
+
+/* Almost FIFO full indication */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_ALMOST_FULL_MASK	0x4
+
+/* Push write counter value */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_PUSH_WR_CNTR_SHIFT	4
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_PUSH_WR_CNTR_MASK	0x1f0
+
+/* Pop read counter value */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_POP_RD_CNTR_SHIFT	12
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_POP_RD_CNTR_MASK	0x1f000
+
+/* Used words value */
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_USED_WORDS_SHIFT	20
+#define  DEBUG_PSRAM_DATA_FIFO_STATUS_USED_WORDS_MASK	0x1f00000
+
+
+/*
+ * Register <DDR_HDR_FIFO_STATUS> - read-only
+ *
+ * DDR Header FIFO status
+ */
+#define RNR_QUAD_DEBUG_DDR_HDR_FIFO_STATUS	0x20c
+
+/* FIFO full indication */
+#define  DEBUG_DDR_HDR_FIFO_STATUS_FULL_MASK	0x1
+
+/* FIFO empty indication */
+#define  DEBUG_DDR_HDR_FIFO_STATUS_EMPTY_MASK	0x2
+
+/* Push write counter value */
+#define  DEBUG_DDR_HDR_FIFO_STATUS_PUSH_WR_CNTR_SHIFT	4
+#define  DEBUG_DDR_HDR_FIFO_STATUS_PUSH_WR_CNTR_MASK	0x1f0
+
+/* Pop read counter value */
+#define  DEBUG_DDR_HDR_FIFO_STATUS_POP_RD_CNTR_SHIFT	12
+#define  DEBUG_DDR_HDR_FIFO_STATUS_POP_RD_CNTR_MASK	0x1f000
+
+/* Used words value */
+#define  DEBUG_DDR_HDR_FIFO_STATUS_USED_WORDS_SHIFT	20
+#define  DEBUG_DDR_HDR_FIFO_STATUS_USED_WORDS_MASK	0x1f00000
+
+
+/*
+ * Register <DDR_DATA_FIFO_STATUS> - read-only
+ *
+ * DDR Data FIFO status
+ */
+#define RNR_QUAD_DEBUG_DDR_DATA_FIFO_STATUS	0x210
+
+/* FIFO full indication */
+#define  DEBUG_DDR_DATA_FIFO_STATUS_FULL_MASK	0x1
+
+/* FIFO empty indication */
+#define  DEBUG_DDR_DATA_FIFO_STATUS_EMPTY_MASK	0x2
+
+/* Almost FIFO full indication */
+#define  DEBUG_DDR_DATA_FIFO_STATUS_ALMOST_FULL_MASK	0x4
+
+/* rite counter value */
+#define  DEBUG_DDR_DATA_FIFO_STATUS_WR_CNTR_SHIFT	4
+#define  DEBUG_DDR_DATA_FIFO_STATUS_WR_CNTR_MASK	0x1ff0
+
+/* Read counter value */
+#define  DEBUG_DDR_DATA_FIFO_STATUS_RD_CNTR_SHIFT	16
+#define  DEBUG_DDR_DATA_FIFO_STATUS_RD_CNTR_MASK	0x1ff0000
+
+
+/*
+ * Register <DDR_DATA_FIFO_STATUS2> - read-only
+ *
+ * DDR Data FIFO status 2
+ */
+#define RNR_QUAD_DEBUG_DDR_DATA_FIFO_STATUS2	0x214
+
+/* Current read address */
+#define  DEBUG_DDR_DATA_FIFO_STATUS2_READ_ADDR_SHIFT	0
+#define  DEBUG_DDR_DATA_FIFO_STATUS2_READ_ADDR_MASK	0xff
+
+/* Used words */
+#define  DEBUG_DDR_DATA_FIFO_STATUS2_USED_WORDS_SHIFT	8
+#define  DEBUG_DDR_DATA_FIFO_STATUS2_USED_WORDS_MASK	0x1ff00
+
+
+/*
+ * Register <PSRAM_HDR_FIFO_DATA1> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_DATA1	0x220
+
+/* Data */
+#define  DEBUG_PSRAM_HDR_FIFO_DATA1_DATA_SHIFT	0
+#define  DEBUG_PSRAM_HDR_FIFO_DATA1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM_HDR_FIFO_DATA2> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_DATA2	0x224
+
+/* Data */
+#define  DEBUG_PSRAM_HDR_FIFO_DATA2_DATA_SHIFT	0
+#define  DEBUG_PSRAM_HDR_FIFO_DATA2_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM_DATA_FIFO_DATA1> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_DATA1	0x228
+
+/* Data */
+#define  DEBUG_PSRAM_DATA_FIFO_DATA1_DATA_SHIFT	0
+#define  DEBUG_PSRAM_DATA_FIFO_DATA1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <PSRAM_DATA_FIFO_DATA2> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_DATA2	0x22c
+
+/* Data */
+#define  DEBUG_PSRAM_DATA_FIFO_DATA2_DATA_SHIFT	0
+#define  DEBUG_PSRAM_DATA_FIFO_DATA2_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_HDR_FIFO_DATA1> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_DDR_HDR_FIFO_DATA1	0x230
+
+/* Data */
+#define  DEBUG_DDR_HDR_FIFO_DATA1_DATA_SHIFT	0
+#define  DEBUG_DDR_HDR_FIFO_DATA1_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <DDR_HDR_FIFO_DATA2> - read-only
+ *
+ * Read contents of FIFO memory
+ */
+#define RNR_QUAD_DEBUG_DDR_HDR_FIFO_DATA2	0x234
+
+/* Data */
+#define  DEBUG_DDR_HDR_FIFO_DATA2_DATA_SHIFT	0
+#define  DEBUG_DDR_HDR_FIFO_DATA2_DATA_MASK	0xffffffff
+
+
+/*
+ * Registers <TOKEN> - <x> is [ 0 => 35 ]
+ *
+ * Token value for flow control
+ */
+#define RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(x)	(0x400 + (x) * 0x4)
+
+/* Value */
+#define  EXT_FLOWCTRL_CONFIG_TOKEN_VAL_VAL_SHIFT	0
+#define  EXT_FLOWCTRL_CONFIG_TOKEN_VAL_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <PSRAM_UBUS_DECODE> - <x> is [ 0 => 15 ]
+ *
+ * Decode for PSRAM Queue
+ */
+#define RNR_QUAD_UBUS_DECODE_CFG_PSRAM_UBUS_DECODE(x)	(0x600 + (x) * 0x4)
+
+/* Value */
+#define  UBUS_DECODE_CFG_PSRAM_UBUS_DECODE_VAL_SHIFT	0
+#define  UBUS_DECODE_CFG_PSRAM_UBUS_DECODE_VAL_MASK	0xffffffff
+
+
+/*
+ * Registers <DDR_UBUS_DECODE> - <x> is [ 0 => 15 ]
+ *
+ * Decode for DDR Queue
+ */
+#define RNR_QUAD_UBUS_DECODE_CFG_DDR_UBUS_DECODE(x)	(0x640 + (x) * 0x4)
+
+/* Value */
+#define  UBUS_DECODE_CFG_DDR_UBUS_DECODE_VAL_SHIFT	0
+#define  UBUS_DECODE_CFG_DDR_UBUS_DECODE_VAL_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_RNR_QUAD_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_rnr_regs.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_rnr_regs.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_rnr_regs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_rnr_regs.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,570 @@
+#ifndef XRDP_REGS_RNR_REGS_H_
+#define XRDP_REGS_RNR_REGS_H_
+
+/* relative to core */
+#define RNR_REGS_OFFSET_0		0xd00000
+
+/* relative to core */
+#define RNR_REGS_OFFSET_1		0xd01000
+
+/* relative to core */
+#define RNR_REGS_OFFSET_2		0xd02000
+
+/* relative to core */
+#define RNR_REGS_OFFSET_3		0xd03000
+
+/* relative to core */
+#define RNR_REGS_OFFSET_4		0xd04000
+
+/* relative to core */
+#define RNR_REGS_OFFSET_5		0xd05000
+
+/*
+ * Register <GLOBAL_CONTROL>
+ *
+ * Global control
+ */
+#define RNR_REGS_CFG_GLOBAL_CTRL	0x0
+
+/*
+ * Runner enable.
+ * When reset runner pipe is halted, instruction memory and context memory
+ * can be accessed by the CPU.
+ * The CPU can reset or set this bitThe firmware can reset this bit by
+ * writing to the disable bit at the runner I/O control register.
+*/
+#define  CFG_GLOBAL_CTRL_EN_MASK	0x1
+
+/*
+ * Notifies about DMA illegal access (>16 cycles on UBUS).
+ * Sticky bit.
+ * cleared by writing 1 to this bit.
+*/
+#define  CFG_GLOBAL_CTRL_DMA_ILLEGAL_STATUS_MASK	0x2
+
+#define  CFG_GLOBAL_CTRL_MICRO_SEC_VAL_SHIFT	8
+#define  CFG_GLOBAL_CTRL_MICRO_SEC_VAL_MASK	0x3ff00
+
+/*
+ * Register <CPU_WAKEUP>
+ *
+ * Writing to this register generates a request towards the runner
+ * scheduler.
+ */
+#define RNR_REGS_CFG_CPU_WAKEUP		0x4
+
+/* The thread number to be invoked by the CPU. */
+#define  CFG_CPU_WAKEUP_THREAD_NUM_SHIFT	0
+#define  CFG_CPU_WAKEUP_THREAD_NUM_MASK	0xf
+
+
+/*
+ * Register <INTERRUPT_CONTROL>
+ *
+ * Interrupt control - UNUSED in 6858
+ */
+#define RNR_REGS_CFG_INT_CTRL		0x8
+
+/*
+ * While any of this field bits is set interrupt line 0 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT0_STS_SHIFT	0
+#define  CFG_INT_CTRL_INT0_STS_MASK	0xff
+
+/*
+ * While any of this field bits is set interrupt line 0 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT1_STS_SHIFT	8
+#define  CFG_INT_CTRL_INT1_STS_MASK	0xff00
+
+/*
+ * While this bit is set interrupt line 2 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT2_STS_MASK	0x10000
+
+/*
+ * While this bit is set interrupt line 3 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT3_STS_MASK	0x20000
+
+/*
+ * While this bit is set interrupt line 4 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT4_STS_MASK	0x40000
+
+/*
+ * While this bit is set interrupt line 5 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT5_STS_MASK	0x80000
+
+/*
+ * While this bit is set interrupt line 6 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT6_STS_MASK	0x100000
+
+/*
+ * While this bit is set interrupt line 6 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT7_STS_MASK	0x200000
+
+/*
+ * While this bit is set interrupt line 8 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT8_STS_MASK	0x400000
+
+/*
+ * While this bit is set interrupt line 9 is set.
+ * SW can write '1' to clear any bit.
+ * Write of '0' is ignored.
+*/
+#define  CFG_INT_CTRL_INT9_STS_MASK	0x800000
+
+#define  CFG_INT_CTRL_FIT_FAIL_STS_MASK	0x80000000
+
+/*
+ * Register <INTERRUPT_MASK>
+ *
+ * Interrupt mask - UNUSED in 6858
+ */
+#define RNR_REGS_CFG_INT_MASK		0xc
+
+/* Mask INT0 causes */
+#define  CFG_INT_MASK_INT0_MASK_SHIFT	0
+#define  CFG_INT_MASK_INT0_MASK_MASK	0xff
+
+/* INT1 mask cause */
+#define  CFG_INT_MASK_INT1_MASK_SHIFT	8
+#define  CFG_INT_MASK_INT1_MASK_MASK	0xff00
+
+/* INT2 mask cause */
+#define  CFG_INT_MASK_INT2_MASK_MASK	0x10000
+
+/* INT3 mask cause */
+#define  CFG_INT_MASK_INT3_MASK_MASK	0x20000
+
+/* INT4 mask cause */
+#define  CFG_INT_MASK_INT4_MASK_MASK	0x40000
+
+/* INT5 mask cause */
+#define  CFG_INT_MASK_INT5_MASK_MASK	0x80000
+
+/* INT6 mask cause */
+#define  CFG_INT_MASK_INT6_MASK_MASK	0x100000
+
+/* INT7 mask cause */
+#define  CFG_INT_MASK_INT7_MASK_MASK	0x200000
+
+/* INT8 mask cause */
+#define  CFG_INT_MASK_INT8_MASK_MASK	0x400000
+
+/* INT9 mask cause */
+#define  CFG_INT_MASK_INT9_MASK_MASK	0x800000
+
+
+/*
+ * Register <GENERAL_CONFIGURATION>
+ *
+ * General configuration
+ */
+#define RNR_REGS_CFG_GEN_CFG		0x30
+
+/*
+ * Disable DMA old flow control.
+ * When set to 1, DMA will not check read FIFO occupancy when issuing READ
+ * requests, relying instead on DMA backpressure mechanism vs read
+ * dispatcher block.
+*/
+#define  CFG_GEN_CFG_DISABLE_DMA_OLD_FLOW_CONTROL_MASK	0x1
+
+/* set to 1 to test fit fail interrupt. */
+#define  CFG_GEN_CFG_TEST_FIT_FAIL_MASK	0x2
+
+
+/*
+ * Register <CAM_CONFIGURATION>
+ *
+ * CAM configuration
+ */
+#define RNR_REGS_CFG_CAM_CFG		0x34
+
+/*
+ * CAM operation is stopped when reaching an entry with a value matching
+ * this field.
+ * For a 32-bit or 64-bit CAM entries, this value is concatenated.
+*/
+#define  CFG_CAM_CFG_STOP_VALUE_SHIFT	0
+#define  CFG_CAM_CFG_STOP_VALUE_MASK	0xffff
+
+
+/*
+ * Register <DMA_DDR_CONFIG>
+ *
+ * DMA DDR config Register.
+ * Contains configurations such as buffer size and ddr base address that
+ * are used for DDR address calculations (from buffer number) when DMA
+ * instruction addr_calc flag is set.
+ */
+#define RNR_REGS_CFG_DDR_CFG		0x40
+
+/* DMA base address for ADDR_CALC */
+#define  CFG_DDR_CFG_DMA_BASE_SHIFT	0
+#define  CFG_DDR_CFG_DMA_BASE_MASK	0xfffff
+
+/* 3 bits indicating buffer size */
+#define  CFG_DDR_CFG_DMA_BUF_SIZE_SHIFT	20
+#define  CFG_DDR_CFG_DMA_BUF_SIZE_MASK	0x700000
+
+/* DMA static offset */
+#define  CFG_DDR_CFG_DMA_STATIC_OFFSET_SHIFT	24
+#define  CFG_DDR_CFG_DMA_STATIC_OFFSET_MASK	0xff000000
+
+
+/*
+ * Register <DMA_PSRAM_CONFIG>
+ *
+ * DMA PSRAM config Register.
+ * Contains configurations such as buffer size and ddr base address that
+ * are used for DDR address calculations (from buffer number) when DMA
+ * instruction addr_calc flag is set.
+ */
+#define RNR_REGS_CFG_PSRAM_CFG		0x44
+
+/* DMA base address for ADDR_CALC */
+#define  CFG_PSRAM_CFG_DMA_BASE_SHIFT	0
+#define  CFG_PSRAM_CFG_DMA_BASE_MASK	0xfffff
+
+/* 3 bits indicating buffer size */
+#define  CFG_PSRAM_CFG_DMA_BUF_SIZE_SHIFT	20
+#define  CFG_PSRAM_CFG_DMA_BUF_SIZE_MASK	0x700000
+
+/* DMA static offset */
+#define  CFG_PSRAM_CFG_DMA_STATIC_OFFSET_SHIFT	24
+#define  CFG_PSRAM_CFG_DMA_STATIC_OFFSET_MASK	0xff000000
+
+
+/*
+ * Register <RAMRD_MASK_CONFIG>
+ *
+ * Ramrd mask for range search.
+ * The register holds 2 mask that can be chosen by runner core for range
+ * seraches.
+ */
+#define RNR_REGS_CFG_RAMRD_RANGE_MASK_CFG	0x48
+
+/*
+ * Mask 0 for range serach.
+ * according to the number of 1 in the mask the cam machine can differ
+ * between the Key and TAG
+*/
+#define  CFG_RAMRD_RANGE_MASK_CFG_MASK0_SHIFT	0
+#define  CFG_RAMRD_RANGE_MASK_CFG_MASK0_MASK	0xffff
+
+/*
+ * Mask 0 for range serach.
+ * according to the number of 1 in the mask the cam machine can differ
+ * between the Key and TAG
+*/
+#define  CFG_RAMRD_RANGE_MASK_CFG_MASK1_SHIFT	16
+#define  CFG_RAMRD_RANGE_MASK_CFG_MASK1_MASK	0xffff0000
+
+
+/*
+ * Register <SCHEDULER_CONFIG>
+ *
+ * scheduler configuration
+ */
+#define RNR_REGS_CFG_SCH_CFG		0x4c
+
+/* Configure priority mode for scheduler operation */
+#define  CFG_SCH_CFG_SCHEDULER_MODE_SHIFT	0
+#define  CFG_SCH_CFG_SCHEDULER_MODE_MASK	0x7
+
+
+/*
+ * Register <BKPT_CFG>
+ *
+ * breakpoint configuration
+ */
+#define RNR_REGS_CFG_BKPT_CFG		0x50
+
+/* Enable breakpoint 0 */
+#define  CFG_BKPT_CFG_BKPT_0_EN_MASK	0x1
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_0_USE_THREAD_MASK	0x2
+
+/* Enable breakpoint 1 */
+#define  CFG_BKPT_CFG_BKPT_1_EN_MASK	0x4
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_1_USE_THREAD_MASK	0x8
+
+/* Enable breakpoint 2 */
+#define  CFG_BKPT_CFG_BKPT_2_EN_MASK	0x10
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_2_USE_THREAD_MASK	0x20
+
+/* Enable breakpoint 3 */
+#define  CFG_BKPT_CFG_BKPT_3_EN_MASK	0x40
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_3_USE_THREAD_MASK	0x80
+
+/* Enable breakpoint 4 */
+#define  CFG_BKPT_CFG_BKPT_4_EN_MASK	0x100
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_4_USE_THREAD_MASK	0x200
+
+/* Enable breakpoint 5 */
+#define  CFG_BKPT_CFG_BKPT_5_EN_MASK	0x400
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_5_USE_THREAD_MASK	0x800
+
+/* Enable breakpoint 6 */
+#define  CFG_BKPT_CFG_BKPT_6_EN_MASK	0x1000
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_6_USE_THREAD_MASK	0x2000
+
+/* Enable breakpoint 7 */
+#define  CFG_BKPT_CFG_BKPT_7_EN_MASK	0x4000
+
+/* Enable breakpoint for given thread only */
+#define  CFG_BKPT_CFG_BKPT_7_USE_THREAD_MASK	0x8000
+
+/* Configure step mode */
+#define  CFG_BKPT_CFG_STEP_MODE_MASK	0x10000
+
+/* Value for new flags */
+#define  CFG_BKPT_CFG_NEW_FLAGS_VAL_SHIFT	20
+#define  CFG_BKPT_CFG_NEW_FLAGS_VAL_MASK	0xf00000
+
+
+/*
+ * Register <BKPT_IMMEDIATE>
+ *
+ * break point immediate
+ */
+#define RNR_REGS_CFG_BKPT_IMM		0x54
+
+/* Enable immediate breakpoint */
+#define  CFG_BKPT_IMM_ENABLE_MASK	0x1
+
+
+/*
+ * Register <BKPT_STS> - read-only
+ *
+ * breakpoint status
+ */
+#define RNR_REGS_CFG_BKPT_STS		0x58
+
+/* Breakpoint address */
+#define  CFG_BKPT_STS_BKPT_ADDR_SHIFT	0
+#define  CFG_BKPT_STS_BKPT_ADDR_MASK	0x1fff
+
+/* Breakpoint active indication */
+#define  CFG_BKPT_STS_ACTIVE_MASK	0x10000
+
+
+/*
+ * Register <PC_STS> - read-only
+ *
+ * Program counterstatus
+ */
+#define RNR_REGS_CFG_PC_STS		0x5c
+
+/* Current program counter address */
+#define  CFG_PC_STS_CURRENT_PC_ADDR_SHIFT	0
+#define  CFG_PC_STS_CURRENT_PC_ADDR_MASK	0x1fff
+
+/* Call stack return address */
+#define  CFG_PC_STS_PC_RET_SHIFT	16
+#define  CFG_PC_STS_PC_RET_MASK		0x1fff0000
+
+
+/*
+ * Register <PROFILING_STS> - read-only
+ *
+ * profiling status
+ */
+#define RNR_REGS_CFG_PROF_STS		0xb0
+
+/* Trace write pointer */
+#define  CFG_PROF_STS_TRACE_WRITE_PNT_SHIFT	0
+#define  CFG_PROF_STS_TRACE_WRITE_PNT_MASK	0x1fff
+
+/* No active task */
+#define  CFG_PROF_STS_IDLE_NO_ACTIVE_TASK_MASK	0x2000
+
+/* Current thread num */
+#define  CFG_PROF_STS_CURR_THREAD_NUM_SHIFT	14
+#define  CFG_PROF_STS_CURR_THREAD_NUM_MASK	0x3c000
+
+/* Status of profiling ON/OFF */
+#define  CFG_PROF_STS_PROFILING_ACTIVE_MASK	0x40000
+
+/*
+ * Sticky bit, indicating trace event FIFO overrun.
+ * Cleared by writing bit [31] of PROFILING_CFG_1 register
+*/
+#define  CFG_PROF_STS_TRACE_FIFO_OVERRUN_MASK	0x80000
+
+
+/*
+ * Register <PROFILING_CFG_0>
+ *
+ * profiling confuguration 0
+ */
+#define RNR_REGS_CFG_PROF_CFG_0		0xb4
+
+/* Base address for trace buffer */
+#define  CFG_PROF_CFG_0_TRACE_BASE_ADDR_SHIFT	0
+#define  CFG_PROF_CFG_0_TRACE_BASE_ADDR_MASK	0x1fff
+
+/* Trace buffer MAX address */
+#define  CFG_PROF_CFG_0_TRACE_MAX_ADDR_SHIFT	16
+#define  CFG_PROF_CFG_0_TRACE_MAX_ADDR_MASK	0x1fff0000
+
+
+/*
+ * Register <PROFILING_CFG_1>
+ *
+ * profiling confuguration 1
+ */
+#define RNR_REGS_CFG_PROF_CFG_1		0xb8
+
+/* Wraparound when writing trace buffer */
+#define  CFG_PROF_CFG_1_TRACE_WRAPAROUND_MASK	0x1
+
+/* Select all tasks or single task mode */
+#define  CFG_PROF_CFG_1_TRACE_MODE_MASK	0x2
+
+/* Select whether to log IDLE in context swap events */
+#define  CFG_PROF_CFG_1_TRACE_DISABLE_IDLE_IN_MASK	0x4
+
+/*
+ * Enable/disable logging of scheduler events (wakeups).
+ * Relevant only for single task mode
+*/
+#define  CFG_PROF_CFG_1_TRACE_DISABLE_WAKEUP_LOG_MASK	0x8
+
+/* Select task for single task operation */
+#define  CFG_PROF_CFG_1_TRACE_TASK_SHIFT	4
+#define  CFG_PROF_CFG_1_TRACE_TASK_MASK	0xf0
+
+/* Select mode for IDLE counter */
+#define  CFG_PROF_CFG_1_IDLE_COUNTER_SOURCE_SEL_MASK	0x100
+
+/* Apply software reset to event FIFO */
+#define  CFG_PROF_CFG_1_TRACE_RESET_EVENT_FIFO_MASK	0x40000000
+
+/* Write 1 to clear event FIFO overrun sticky bit */
+#define  CFG_PROF_CFG_1_TRACE_CLEAR_FIFO_OVERRUN_MASK	0x80000000
+
+
+/*
+ * Register <PROFILING_COUNTER> - read-only
+ *
+ * Display profiling counter value
+ */
+#define RNR_REGS_CFG_PROF_COUNTER	0xbc
+
+/* Current 32-bit value of counter */
+#define  CFG_PROF_COUNTER_VAL_SHIFT	0
+#define  CFG_PROF_COUNTER_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <STALL_CNT1> - read-only
+ *
+ * stall count
+ */
+#define RNR_REGS_CFG_STALL_CNT1		0xc0
+
+/* Count load stalls */
+#define  CFG_STALL_CNT1_LD_STALL_CNT_SHIFT	0
+#define  CFG_STALL_CNT1_LD_STALL_CNT_MASK	0xffff
+
+/* Count accelerator stalls */
+#define  CFG_STALL_CNT1_ACC_STALL_CNT_SHIFT	16
+#define  CFG_STALL_CNT1_ACC_STALL_CNT_MASK	0xffff0000
+
+
+/*
+ * Register <STALL_CNT2> - read-only
+ *
+ * stall count
+ */
+#define RNR_REGS_CFG_STALL_CNT2		0xc4
+
+/* Count load io stalls */
+#define  CFG_STALL_CNT2_LDIO_STALL_CNT_SHIFT	0
+#define  CFG_STALL_CNT2_LDIO_STALL_CNT_MASK	0xffff
+
+/* Count store stalls */
+#define  CFG_STALL_CNT2_STORE_STALL_CNT_SHIFT	16
+#define  CFG_STALL_CNT2_STORE_STALL_CNT_MASK	0xffff0000
+
+
+/*
+ * Register <IDLE_CNT1> - read-only
+ *
+ * idle count
+ */
+#define RNR_REGS_CFG_IDLE_CNT1		0xc8
+
+/* IDLE countReserved */
+#define  CFG_IDLE_CNT1_IDLE_CNT_SHIFT	0
+#define  CFG_IDLE_CNT1_IDLE_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <JUMP_CNT> - read-only
+ *
+ * Mispredicted jumps count
+ */
+#define RNR_REGS_CFG_JMP_CNT		0xcc
+
+/* Counts jumps with prediction miss, when prediction was dont jump */
+#define  CFG_JMP_CNT_UNTAKEN_JMP_CNT_SHIFT	0
+#define  CFG_JMP_CNT_UNTAKEN_JMP_CNT_MASK	0xffff
+
+/* Counts jumps with prediction miss, when prediction was jump */
+#define  CFG_JMP_CNT_TAKEN_JMP_CNT_SHIFT	16
+#define  CFG_JMP_CNT_TAKEN_JMP_CNT_MASK	0xffff0000
+
+
+/*
+ * Register <METAL_FIX>
+ *
+ * 32 bit register for metal fixes.
+ */
+#define RNR_REGS_CFG_METAL_FIX_REG	0xf0
+
+/* 32 bit register for metal fix */
+#define  CFG_METAL_FIX_REG_METAL_FIX_SHIFT	0
+#define  CFG_METAL_FIX_REG_METAL_FIX_MASK	0xffffffff
+
+
+#endif /* ! XRDP_REGS_RNR_REGS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_sbpm.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_sbpm.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_sbpm.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_sbpm.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,1204 @@
+#ifndef XRDP_REGS_SBPM_H_
+#define XRDP_REGS_SBPM_H_
+
+/* relative to core */
+#define SBPM_OFFSET_0			0xd98000
+
+/*
+ * Register <INIT_FREE_LIST>
+ *
+ * request for building the free list using HW accelerator
+ */
+#define SBPM_REGS_INIT_FREE_LIST	0x0
+
+/* init_base_addr */
+#define  REGS_INIT_FREE_LIST_INIT_BASE_ADDR_SHIFT	0
+#define  REGS_INIT_FREE_LIST_INIT_BASE_ADDR_MASK	0x3fff
+
+/* init_offset */
+#define  REGS_INIT_FREE_LIST_INIT_OFFSET_SHIFT	14
+#define  REGS_INIT_FREE_LIST_INIT_OFFSET_MASK	0xfffc000
+
+/*
+ * The bit is used as busy indication of buffer allocation request status
+ * (busy status) by CPU.
+ * BPM asserts this bit on each valid request and de-asserts when request
+ * is treated.
+*/
+#define  REGS_INIT_FREE_LIST_BSY_MASK	0x40000000
+
+/*
+ * The bit is used as ready indication of buffer allocation request status
+ * (ready status) by CPU.
+ * BPM asserts this bit when request is treated and de-asserts when new
+ * valid request is accepted, thus this is READY indication
+*/
+#define  REGS_INIT_FREE_LIST_RDY_MASK	0x80000000
+
+
+/*
+ * Register <BN_ALLOC>
+ *
+ * request for a new buffer
+ */
+#define SBPM_REGS_BN_ALLOC		0x4
+
+/*
+ * Source address used by Alloc BN command (may be used for alloc on behalf
+ * another user)
+*/
+#define  REGS_BN_ALLOC_SA_SHIFT		14
+#define  REGS_BN_ALLOC_SA_MASK		0xfc000
+
+
+/*
+ * Register <BN_ALLOC_RPLY> - read-only
+ *
+ * reply for a new buffer alloc
+ */
+#define SBPM_REGS_BN_ALLOC_RPLY		0x8
+
+/* alloc_bn_valid */
+#define  REGS_BN_ALLOC_RPLY_ALLOC_BN_VALID_MASK	0x1
+
+/* alloc_bn */
+#define  REGS_BN_ALLOC_RPLY_ALLOC_BN_SHIFT	1
+#define  REGS_BN_ALLOC_RPLY_ALLOC_BN_MASK	0x7ffe
+
+/* ack */
+#define  REGS_BN_ALLOC_RPLY_ACK_MASK	0x8000
+
+/* nack */
+#define  REGS_BN_ALLOC_RPLY_NACK_MASK	0x10000
+
+/*
+ * Exclusive bit is indication of Exclusive_high status of client with
+ * related Alloc request
+*/
+#define  REGS_BN_ALLOC_RPLY_EXCL_HIGH_MASK	0x20000
+
+/*
+ * Exclusive bit is indication of Exclusive_low status of client with
+ * related Alloc request
+*/
+#define  REGS_BN_ALLOC_RPLY_EXCL_LOW_MASK	0x40000
+
+/* busy */
+#define  REGS_BN_ALLOC_RPLY_BUSY_MASK	0x40000000
+
+/* rdy */
+#define  REGS_BN_ALLOC_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <BN_FREE_WITH_CONTXT_LOW>
+ *
+ * Request for freeing buffers of a packet offline with context (lower
+ * 32-bit)
+ */
+#define SBPM_REGS_BN_FREE_WITH_CONTXT_LOW	0xc
+
+/* head_bn */
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_HEAD_BN_SHIFT	0
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_HEAD_BN_MASK	0x3fff
+
+/*
+ * Source addres used for free comand (may be used for freeing BN on behalf
+ * another port)
+*/
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_SA_SHIFT	14
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_SA_MASK	0xfc000
+
+/* Offset (or length) = number of BNs in packet that is going to be freed */
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_OFFSET_SHIFT	24
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_OFFSET_MASK	0x7f000000
+
+/* Ack request */
+#define  REGS_BN_FREE_WITH_CONTXT_LOW_ACK_MASK	0x80000000
+
+
+/*
+ * Register <BN_FREE_WITH_CONTXT_HIGH>
+ *
+ * Request for freeing buffers of a packet offline with context (higher
+ * 32-bit)
+ */
+#define SBPM_REGS_BN_FREE_WITH_CONTXT_HIGH	0x10
+
+/* Last BN in packet that is going to be freed */
+#define  REGS_BN_FREE_WITH_CONTXT_HIGH_LAST_BN_SHIFT	0
+#define  REGS_BN_FREE_WITH_CONTXT_HIGH_LAST_BN_MASK	0x3fff
+
+
+/*
+ * Register <MCST_INC>
+ *
+ * Multicast counter increment.
+ * Contains the BN, which is head of the packet to be multicast and its
+ * counter value
+ */
+#define SBPM_REGS_MCST_INC		0x14
+
+/* bufer number */
+#define  REGS_MCST_INC_BN_SHIFT		0
+#define  REGS_MCST_INC_BN_MASK		0x3fff
+
+/* MCST value that should be added to current mulicast counter */
+#define  REGS_MCST_INC_MCST_VAL_SHIFT	14
+#define  REGS_MCST_INC_MCST_VAL_MASK	0x3fc000
+
+/* Acknowledge request */
+#define  REGS_MCST_INC_ACK_REQ_MASK	0x400000
+
+
+/*
+ * Register <MCST_INC_RPLY> - read-only
+ *
+ * mcst_inc_rply
+ */
+#define SBPM_REGS_MCST_INC_RPLY		0x18
+
+/* Acknowledge reply of MCST command */
+#define  REGS_MCST_INC_RPLY_MCST_ACK_MASK	0x1
+
+/*
+ * The bit is used as busy indication of MCST request status (busy status)
+ * by CPUSBPM asserts this bit on each valid request and de-asserts when
+ * request is treated:
+ * 1 - request is busy,0- request is not busy (ready)
+*/
+#define  REGS_MCST_INC_RPLY_BSY_MASK	0x40000000
+
+/*
+ * The bit is used as ready indication of MCST request status (ready
+ * status) by CPU.
+ * SBPM asserts this bit when request is treated and de-asserts when new
+ * valid request is accepted, thus this is READY indication:
+ * 1 - request is ready,0- request is not ready (busy)
+*/
+#define  REGS_MCST_INC_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <BN_CONNECT>
+ *
+ * request for connection between two buffers in a linked list.
+ * The connection request may be replied with ACK message if the ACK
+ * request bit is asserted.
+ * This command is used as write command.
+ */
+#define SBPM_REGS_BN_CONNECT		0x1c
+
+/* bn */
+#define  REGS_BN_CONNECT_BN_SHIFT	0
+#define  REGS_BN_CONNECT_BN_MASK	0x3fff
+
+/* ack_req for Connect command (should be always set) */
+#define  REGS_BN_CONNECT_ACK_REQ_MASK	0x4000
+
+/* Used for Direct Write (for work arround) */
+#define  REGS_BN_CONNECT_WR_REQ_MASK	0x8000
+
+/* pointed_bn */
+#define  REGS_BN_CONNECT_POINTED_BN_SHIFT	16
+#define  REGS_BN_CONNECT_POINTED_BN_MASK	0x3fff0000
+
+
+/*
+ * Register <BN_CONNECT_RPLY> - read-only
+ *
+ * bn_connect_rply
+ */
+#define SBPM_REGS_BN_CONNECT_RPLY	0x20
+
+/* Acknowledge reply on Connect request */
+#define  REGS_BN_CONNECT_RPLY_CONNECT_ACK_MASK	0x1
+
+/* busy bit */
+#define  REGS_BN_CONNECT_RPLY_BUSY_MASK	0x40000000
+
+/* ready bit */
+#define  REGS_BN_CONNECT_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <GET_NEXT>
+ *
+ * a pointer to a buffer in a packet linked list and request for the next
+ * buffer in the listthis command is used as read command.
+ */
+#define SBPM_REGS_GET_NEXT		0x24
+
+/* Get Next Buffer of current BN (used in this field) */
+#define  REGS_GET_NEXT_BN_SHIFT		0
+#define  REGS_GET_NEXT_BN_MASK		0x3fff
+
+
+/*
+ * Register <GET_NEXT_RPLY> - read-only
+ *
+ * get_next_rply
+ */
+#define SBPM_REGS_GET_NEXT_RPLY		0x28
+
+/* Used for validation of Next BN reply */
+#define  REGS_GET_NEXT_RPLY_BN_VALID_MASK	0x1
+
+/* Next BN - reply of Get_next command */
+#define  REGS_GET_NEXT_RPLY_NEXT_BN_SHIFT	1
+#define  REGS_GET_NEXT_RPLY_NEXT_BN_MASK	0x7ffe
+
+/* Next BN is null indication */
+#define  REGS_GET_NEXT_RPLY_BN_NULL_MASK	0x8000
+
+/* mcst cnt val */
+#define  REGS_GET_NEXT_RPLY_MCNT_VAL_SHIFT	16
+#define  REGS_GET_NEXT_RPLY_MCNT_VAL_MASK	0xff0000
+
+/* Get Next command is busy */
+#define  REGS_GET_NEXT_RPLY_BUSY_MASK	0x40000000
+
+/* Get Next command is ready */
+#define  REGS_GET_NEXT_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <SBPM_CLK_GATE_CNTRL>
+ *
+ * control for the bl_clk_control module
+ */
+#define SBPM_REGS_SBPM_CLK_GATE_CNTRL	0x2c
+
+/*
+ * If set to 1b1 will disable the clock gate logic such to always enable
+ * the clock
+*/
+#define  REGS_SBPM_CLK_GATE_CNTRL_BYPASS_CLK_GATE_MASK	0x1
+
+/*
+ * For how long should the clock stay active once all conditions for clock
+ * disable are met.
+*/
+#define  REGS_SBPM_CLK_GATE_CNTRL_TIMER_VAL_SHIFT	8
+#define  REGS_SBPM_CLK_GATE_CNTRL_TIMER_VAL_MASK	0xff00
+
+/*
+ * Enables the keep alive logic which will periodically enable the clock to
+ * assure that no deadlock of clock being removed completely will occur
+*/
+#define  REGS_SBPM_CLK_GATE_CNTRL_KEEP_ALIVE_EN_MASK	0x10000
+
+/*
+ * If the KEEP alive option is enabled the field will determine for how
+ * many cycles should the clock be active
+*/
+#define  REGS_SBPM_CLK_GATE_CNTRL_KEEP_ALIVE_INTERVL_SHIFT	20
+#define  REGS_SBPM_CLK_GATE_CNTRL_KEEP_ALIVE_INTERVL_MASK	0x700000
+
+/*
+ * If the KEEP alive option is enabled this field will determine for how
+ * many cycles should the clock be disabled (minus the
+ * KEEP_ALIVE_INTERVAL)So KEEP_ALIVE_CYCLE must be larger than
+ * KEEP_ALIVE_INTERVAL.
+*/
+#define  REGS_SBPM_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_SHIFT	24
+#define  REGS_SBPM_CLK_GATE_CNTRL_KEEP_ALIVE_CYC_MASK	0xff000000
+
+
+/*
+ * Register <BN_FREE_WITHOUT_CONTXT>
+ *
+ * bn_free_without_contxt
+ */
+#define SBPM_REGS_BN_FREE_WITHOUT_CONTXT	0x38
+
+/* Head BN = First BN in packet that is going to be freed */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_HEAD_BN_SHIFT	0
+#define  REGS_BN_FREE_WITHOUT_CONTXT_HEAD_BN_MASK	0x3fff
+
+/*
+ * source address used for command (may be used for performing command on
+ * behalf another port)
+*/
+#define  REGS_BN_FREE_WITHOUT_CONTXT_SA_SHIFT	14
+#define  REGS_BN_FREE_WITHOUT_CONTXT_SA_MASK	0xfc000
+
+/* ACK request - should be always set */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_ACK_REQ_MASK	0x80000000
+
+
+/*
+ * Register <BN_FREE_WITHOUT_CONTXT_RPLY> - read-only
+ *
+ * bn_free_without_contxt_rply
+ */
+#define SBPM_REGS_BN_FREE_WITHOUT_CONTXT_RPLY	0x3c
+
+/* Acknowledge on Free command */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_FREE_ACK_MASK	0x1
+
+/* ACK status of CPU */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_ACK_STAT_MASK	0x8000
+
+/* NACK status of CPU */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_NACK_STAT_MASK	0x10000
+
+/* Exclusive_high status of CPU */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_EXCL_HIGH_STAT_MASK	0x20000
+
+/* Exclusive_low status of CPU */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_EXCL_LOW_STAT_MASK	0x40000
+
+/* Busy bit of command (command is currently in execution) */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_BSY_MASK	0x40000000
+
+/* Ready bit of command (ready for new command execution) */
+#define  REGS_BN_FREE_WITHOUT_CONTXT_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <BN_FREE_WITH_CONTXT_RPLY> - read-only
+ *
+ * bn_free_with_contxt_rply
+ */
+#define SBPM_REGS_BN_FREE_WITH_CONTXT_RPLY	0x40
+
+/* Free command acknowledge */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_FREE_ACK_MASK	0x1
+
+/* ACK status of CPU */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_ACK_STATE_MASK	0x8000
+
+/* NACK status of CPU */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_NACK_STATE_MASK	0x10000
+
+/* Exclusive high status of CPU */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_EXCL_HIGH_STATE_MASK	0x20000
+
+/* Exclusive low status of CPU */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_EXCL_LOW_STATE_MASK	0x40000
+
+/* Busy bit of command */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_BUSY_MASK	0x40000000
+
+/* Ready bit of command */
+#define  REGS_BN_FREE_WITH_CONTXT_RPLY_RDY_MASK	0x80000000
+
+
+/*
+ * Register <GLOBAL_THRESHOLD>
+ *
+ * Global Threshold for Allocated Buffers.
+ * SBPM will issue BN in the accepted range upon to Global threshold setup.
+ * Ths register also holds global hysteresis value for ACK/NACK transition
+ * setting.
+ * We cross to Nack state if BAC equals the threshold.
+ * We cross down to Ack if BAC equals the thrshold minus the histeresis
+ * value.
+ */
+#define SBPM_REGS_SBPM_GL_TRSH		0x4c
+
+/* Global Threshold for Allocated BN = maximal total number of BNs in SBPM */
+#define  REGS_SBPM_GL_TRSH_GL_BAT_SHIFT	0
+#define  REGS_SBPM_GL_TRSH_GL_BAT_MASK	0x3fff
+
+/*
+ * Global Hysteresis for Allocated BN = hysteresis value related to maximal
+ * total threshold of SRAM BNs
+*/
+#define  REGS_SBPM_GL_TRSH_GL_BAH_SHIFT	16
+#define  REGS_SBPM_GL_TRSH_GL_BAH_MASK	0x3fff0000
+
+
+/*
+ * Register <UG0_THRESHOLD>
+ *
+ * Threshold for Allocated Buffers of UG0Ths register also holds UG0
+ * hysteresis value for ACK/NACK transition setting.
+ * We cross to Nack state if BAC equals the threshold.
+ * We cross down to Ack if BAC equals the thrshold minus the histeresis
+ * value.
+ */
+#define SBPM_REGS_SBPM_UG0_TRSH		0x50
+
+/* Current UG Threshold for Allocated BN */
+#define  REGS_SBPM_UG0_TRSH_UG_BAT_SHIFT	0
+#define  REGS_SBPM_UG0_TRSH_UG_BAT_MASK	0x3fff
+
+/* Current UG hysteresis Threshold for Allocated BN */
+#define  REGS_SBPM_UG0_TRSH_UG_BAH_SHIFT	16
+#define  REGS_SBPM_UG0_TRSH_UG_BAH_MASK	0x3fff0000
+
+
+/*
+ * Register <UG1_THRESHOLD>
+ *
+ * Threshold for Allocated Buffers of UG1Ths register also holds UG1
+ * hysteresis value for ACK/NACK transition setting.
+ * We cross to Nack state if BAC equals the threshold.
+ * We cross down to Ack if BAC equals the thrshold minus the histeresis
+ * value.
+ */
+#define SBPM_REGS_SBPM_UG1_TRSH		0x54
+
+/* Current UG Threshold for Allocated BN */
+#define  REGS_SBPM_UG1_TRSH_UG_BAT_SHIFT	0
+#define  REGS_SBPM_UG1_TRSH_UG_BAT_MASK	0x3fff
+
+/* Current UG hysteresis delta Threshold for Allocated BN */
+#define  REGS_SBPM_UG1_TRSH_UG_BAH_SHIFT	16
+#define  REGS_SBPM_UG1_TRSH_UG_BAH_MASK	0x3fff0000
+
+
+/*
+ * Register <SBPM_DBG>
+ *
+ * SBPM select the debug bus
+ */
+#define SBPM_REGS_SBPM_DBG		0x74
+
+/*
+ * select bus.
+ * the bus index should be mentioned in onehot writting:
+ * bus0 = 0001bus1 = 0010bus2 = 0100bus3 = 1000
+*/
+#define  REGS_SBPM_DBG_SELECT_BUS_SHIFT	0
+#define  REGS_SBPM_DBG_SELECT_BUS_MASK	0xf
+
+
+/*
+ * Register <SBPM_UG0_BAC> - read-only
+ *
+ * SBPM UG0 allocated BN counter
+ */
+#define SBPM_REGS_SBPM_UG0_BAC		0x78
+
+/* UG0 counter for allocated BNs */
+#define  REGS_SBPM_UG0_BAC_UG0BAC_SHIFT	0
+#define  REGS_SBPM_UG0_BAC_UG0BAC_MASK	0x3fff
+
+
+/*
+ * Register <SBPM_UG1_BAC> - read-only
+ *
+ * SBPM UG1 allocated BN Counter
+ */
+#define SBPM_REGS_SBPM_UG1_BAC		0x7c
+
+/* Baffer Allocated Counter */
+#define  REGS_SBPM_UG1_BAC_UG1BAC_SHIFT	0
+#define  REGS_SBPM_UG1_BAC_UG1BAC_MASK	0x3fff
+
+
+/*
+ * Register <SBPM_GL_BAC> - read-only
+ *
+ * SBPM global BN Counter
+ */
+#define SBPM_REGS_SBPM_GL_BAC		0x98
+
+/* Global BN counter */
+#define  REGS_SBPM_GL_BAC_BAC_SHIFT	0
+#define  REGS_SBPM_GL_BAC_BAC_MASK	0x3fff
+
+
+/*
+ * Register <SBPM_UG0_EXCLUSIVE_HIGH_THRESHOLD>
+ *
+ * SBPM UG0 Exclusive high and hysteresis threshold.
+ * We cross to Excl state if BAC equals the threshold.
+ * We cross down to not Excl if BAC equals the thrshold minus the
+ * histeresis value.
+ */
+#define SBPM_REGS_SBPM_UG0_EXCL_HIGH_TRSH	0x9c
+
+/* exclusive high threshold */
+#define  REGS_SBPM_UG0_EXCL_HIGH_TRSH_EXCLT_SHIFT	0
+#define  REGS_SBPM_UG0_EXCL_HIGH_TRSH_EXCLT_MASK	0x3fff
+
+/* exclusive histeresis threshold */
+#define  REGS_SBPM_UG0_EXCL_HIGH_TRSH_EXCLH_SHIFT	16
+#define  REGS_SBPM_UG0_EXCL_HIGH_TRSH_EXCLH_MASK	0x3fff0000
+
+
+/*
+ * Register <SBPM_UG1_EXCLUSIVE_HIGH_THRESHOLD>
+ *
+ * SBPM UG1 Exclusive high and hysteresis threshold.
+ * We cross to Excl state if BAC equals the threshold.
+ * We cross down to not Excl if BAC equals the thrshold minus the
+ * histeresis value.
+ */
+#define SBPM_REGS_SBPM_UG1_EXCL_HIGH_TRSH	0x100
+
+/* exclusive high threshold */
+#define  REGS_SBPM_UG1_EXCL_HIGH_TRSH_EXCLT_SHIFT	0
+#define  REGS_SBPM_UG1_EXCL_HIGH_TRSH_EXCLT_MASK	0x3fff
+
+/* exclusive histeresis threshold */
+#define  REGS_SBPM_UG1_EXCL_HIGH_TRSH_EXCLH_SHIFT	16
+#define  REGS_SBPM_UG1_EXCL_HIGH_TRSH_EXCLH_MASK	0x3fff0000
+
+
+/*
+ * Register <SBPM_UG0_EXCLUSIVE_LOW_THRESHOLD>
+ *
+ * SBPM UG0 Exclusive low and hysteresis threshold.
+ * We cross to Excl state if BAC equals the threshold.
+ * We cross down to not Excl if BAC equals the thrshold minus the
+ * histeresis value.
+ */
+#define SBPM_REGS_SBPM_UG0_EXCL_LOW_TRSH	0x104
+
+/* exclusive low threshold */
+#define  REGS_SBPM_UG0_EXCL_LOW_TRSH_EXCLT_SHIFT	0
+#define  REGS_SBPM_UG0_EXCL_LOW_TRSH_EXCLT_MASK	0x3fff
+
+/* exclusive histeresis threshold */
+#define  REGS_SBPM_UG0_EXCL_LOW_TRSH_EXCLH_SHIFT	16
+#define  REGS_SBPM_UG0_EXCL_LOW_TRSH_EXCLH_MASK	0x3fff0000
+
+
+/*
+ * Register <SBPM_UG1_EXCLUSIVE_LOW_THRESHOLD>
+ *
+ * SBPM UG1 Exclusive low and hysteresis threshold.
+ * We cross to Excl state if BAC equals the threshold.
+ * We cross down to not Excl if BAC equals the thrshold minus the
+ * histeresis value.
+ */
+#define SBPM_REGS_SBPM_UG1_EXCL_LOW_TRSH	0x108
+
+/* exclusive low threshold */
+#define  REGS_SBPM_UG1_EXCL_LOW_TRSH_EXCLT_SHIFT	0
+#define  REGS_SBPM_UG1_EXCL_LOW_TRSH_EXCLT_MASK	0x3fff
+
+/* exclusive histeresis threshold */
+#define  REGS_SBPM_UG1_EXCL_LOW_TRSH_EXCLH_SHIFT	16
+#define  REGS_SBPM_UG1_EXCL_LOW_TRSH_EXCLH_MASK	0x3fff0000
+
+
+/*
+ * Register <USER_GROUP_STATUS_Register> - read-only
+ *
+ * This register is status set of all 8 Ugs:
+ * Ack/NACK state and in addition Exclusive state pereach of 8 UGs
+ */
+#define SBPM_REGS_SBPM_UG_STATUS	0x11c
+
+/*
+ * Ack/Nack status per UG.
+ * 0 - NACK1 - ACKbit [0] in field matches UG0 ACK status,bit [1] in field
+ * matches UG1 ACK status,bit [2] in field matches UG2 ACK status,bit [3]
+ * in field matches UG3 ACK status,bit [4] in field matches UG4 ACK
+ * status,bit [5] in field matches UG5 ACK status,bit [6] in field matches
+ * UG6 ACK status,bit [7] in field matches UG7 ACK status,
+*/
+#define  REGS_SBPM_UG_STATUS_UG_ACK_STTS_SHIFT	0
+#define  REGS_SBPM_UG_STATUS_UG_ACK_STTS_MASK	0x3
+
+/*
+ * High EXCL/Non-Excl status per UG.
+ * 0 - non_exclusive1 - exclusive
+*/
+#define  REGS_SBPM_UG_STATUS_UG_EXCL_HIGH_STTS_SHIFT	16
+#define  REGS_SBPM_UG_STATUS_UG_EXCL_HIGH_STTS_MASK	0x30000
+
+/*
+ * Low EXCL/Non-Excl status per UG.
+ * 0 - non_exclusive1 - exclusive
+*/
+#define  REGS_SBPM_UG_STATUS_UG_EXCL_LOW_STTS_SHIFT	18
+#define  REGS_SBPM_UG_STATUS_UG_EXCL_LOW_STTS_MASK	0xc0000
+
+
+/*
+ * Register <ERROR_HANDLING_PARAMS>
+ *
+ * Parameters and thresholds used for Error handling:
+ * error detection, max search enable and threshold, etc.
+ */
+#define SBPM_REGS_ERROR_HANDLING_PARAMS	0x138
+
+/* Depth (or maximal threshold) for search during Free without context */
+#define  REGS_ERROR_HANDLING_PARAMS_SEARCH_DEPTH_SHIFT	0
+#define  REGS_ERROR_HANDLING_PARAMS_SEARCH_DEPTH_MASK	0x7f
+
+/* Enable for max search during Free without context */
+#define  REGS_ERROR_HANDLING_PARAMS_MAX_SEARCH_EN_MASK	0x80
+
+/* Enable for Last BN checking during Free with context */
+#define  REGS_ERROR_HANDLING_PARAMS_CHCK_LAST_EN_MASK	0x100
+
+/* Freeze Ug/Global counters + mask access to SBPM RAM while in ERROR state */
+#define  REGS_ERROR_HANDLING_PARAMS_FREEZE_IN_ERROR_MASK	0x200
+
+
+/*
+ * Register <SBPM_IIR_LOW_Register> - read-only
+ *
+ * SBPM IIR low (Interrupt information register)
+ */
+#define SBPM_REGS_SBPM_IIR_LOW		0x148
+
+/* Interrupt command source address (latched from BB SA or CPU code) */
+#define  REGS_SBPM_IIR_LOW_CMD_SA_SHIFT	0
+#define  REGS_SBPM_IIR_LOW_CMD_SA_MASK	0x3f
+
+/* Interrupt command target address (latched from BB TA or CPU request) */
+#define  REGS_SBPM_IIR_LOW_CMD_TA_SHIFT	6
+#define  REGS_SBPM_IIR_LOW_CMD_TA_MASK	0x1c0
+
+/*
+ * Interrupt command data lowest 23-bit (latched from BB data[22:
+ * 0] or CPU request data)
+*/
+#define  REGS_SBPM_IIR_LOW_CMD_DATA_22TO0_SHIFT	9
+#define  REGS_SBPM_IIR_LOW_CMD_DATA_22TO0_MASK	0xfffffe00
+
+
+/*
+ * Register <SBPM_IIR_HIGH_Register> - read-only
+ *
+ * SBPM IIR high (Interrupt information register)
+ */
+#define SBPM_REGS_SBPM_IIR_HIGH		0x14c
+
+/*
+ * Data (bits [63:
+ * 23], without reserved bits) of the command that caused interrupt
+*/
+#define  REGS_SBPM_IIR_HIGH_CMD_DATA_23TO63_SHIFT	0
+#define  REGS_SBPM_IIR_HIGH_CMD_DATA_23TO63_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_DBG_VEC0> - read-only
+ *
+ * SBPM debug vector0 includes 21 bit of control/state machine of CMD pipe
+ */
+#define SBPM_REGS_SBPM_DBG_VEC0		0x150
+
+/* Alloc State Machine{update, rd_head_cnxt} */
+#define  REGS_SBPM_DBG_VEC0_ALLOC_SM_SHIFT	0
+#define  REGS_SBPM_DBG_VEC0_ALLOC_SM_MASK	0x3
+
+/* Connect State Machine{update} */
+#define  REGS_SBPM_DBG_VEC0_CNNCT_SM_MASK	0x4
+
+/* Multicast incr State Machine{read,check,error,update} */
+#define  REGS_SBPM_DBG_VEC0_MCINT_SM_SHIFT	3
+#define  REGS_SBPM_DBG_VEC0_MCINT_SM_MASK	0x78
+
+/* Free w cnxt State Machine{read,check,update,error} */
+#define  REGS_SBPM_DBG_VEC0_FREE_W_CNXT_SM_SHIFT	7
+#define  REGS_SBPM_DBG_VEC0_FREE_W_CNXT_SM_MASK	0x780
+
+/* Free w/o cnxt State Machine{read,check,update,error} */
+#define  REGS_SBPM_DBG_VEC0_FREE_WO_CNXT_SM_SHIFT	11
+#define  REGS_SBPM_DBG_VEC0_FREE_WO_CNXT_SM_MASK	0x7800
+
+/*
+ * Get next State Machine:
+ * {read,reply}
+*/
+#define  REGS_SBPM_DBG_VEC0_GN_SM_SHIFT	15
+#define  REGS_SBPM_DBG_VEC0_GN_SM_MASK	0x18000
+
+/*
+ * Those are the 4 Multi get next states:
+ * {rd_next,error,rd_last,wait}
+*/
+#define  REGS_SBPM_DBG_VEC0_MULTI_GN_SM_SHIFT	17
+#define  REGS_SBPM_DBG_VEC0_MULTI_GN_SM_MASK	0x1e0000
+
+/* the value of the head of FREE list */
+#define  REGS_SBPM_DBG_VEC0_FREE_LST_HD_SHIFT	21
+#define  REGS_SBPM_DBG_VEC0_FREE_LST_HD_MASK	0xffe00000
+
+
+/*
+ * Register <SBPM_DBG_VEC1> - read-only
+ *
+ * SBPM debug vector1 includes 21 bit of control/state machine of CMD pipe
+ */
+#define SBPM_REGS_SBPM_DBG_VEC1		0x154
+
+/* sbpm_ingress2egress_valid bit */
+#define  REGS_SBPM_DBG_VEC1_IN2E_VALID_MASK	0x1
+
+/* multi_get_next_valid bits */
+#define  REGS_SBPM_DBG_VEC1_MULTI_GN_VALID_SHIFT	1
+#define  REGS_SBPM_DBG_VEC1_MULTI_GN_VALID_MASK	0x1e
+
+/* sbpm_ug_active 2 bits */
+#define  REGS_SBPM_DBG_VEC1_UG_ACTIVE_SHIFT	5
+#define  REGS_SBPM_DBG_VEC1_UG_ACTIVE_MASK	0x60
+
+/* sbpm_tx_cmd_fifo_full bit */
+#define  REGS_SBPM_DBG_VEC1_TX_CMD_FULL_MASK	0x80
+
+/* sbpm_rx_fifo_pop bit */
+#define  REGS_SBPM_DBG_VEC1_RX_FIFO_POP_MASK	0x100
+
+/* sbpm_ram_init_start bit */
+#define  REGS_SBPM_DBG_VEC1_RAM_INIT_START_MASK	0x200
+
+/* sbpm_ram_init_done bit */
+#define  REGS_SBPM_DBG_VEC1_RAM_INIT_DONE_MASK	0x400
+
+/* RX FIFO Data in pipe */
+#define  REGS_SBPM_DBG_VEC1_RX_FIFO_DATA_SHIFT	11
+#define  REGS_SBPM_DBG_VEC1_RX_FIFO_DATA_MASK	0x1ff800
+
+/* sbpm_free_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_FREE_DECODE_MASK	0x200000
+
+/* sbpm_in2e_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_IN2E_DECODE_MASK	0x400000
+
+/* sbpm_free_wo_cnxt_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_FREE_WO_DECODE_MASK	0x800000
+
+/* sbpm_get_next_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_GET_NXT_DECODE_MASK	0x1000000
+
+/* sbpm_multi_get_next_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_MULTI_GET_NXT_DECODE_MASK	0x2000000
+
+/* sbpm_cnct_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_CNCT_DECODE_MASK	0x4000000
+
+/* sbpm_free_w_cnxt_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_FREE_W_DECODE_MASK	0x8000000
+
+/* sbpm_mcinc_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_MCIN_DECODE_MASK	0x10000000
+
+/* sbpm_alloc_rqst_dec */
+#define  REGS_SBPM_DBG_VEC1_ALLOC_DECODE_MASK	0x20000000
+
+
+/*
+ * Register <SBPM_DBG_VEC2> - read-only
+ *
+ * This is one of the TX_handler debug vectors
+ */
+#define SBPM_REGS_SBPM_DBG_VEC2		0x174
+
+/* sbpm_tx_data_fifo_full */
+#define  REGS_SBPM_DBG_VEC2_TX_DATA_FULL_MASK	0x1
+
+/* sbpm_tx_fifo_empty */
+#define  REGS_SBPM_DBG_VEC2_TX_FIFO_EMPTY_MASK	0x2
+
+/* sbpm_tx_cmd_local_stts_fifo_full */
+#define  REGS_SBPM_DBG_VEC2_LCL_STTS_FULL_MASK	0x4
+
+/* sbpm_tx_cmd_local_stts_fifo_empty */
+#define  REGS_SBPM_DBG_VEC2_LCL_STTS_EMPTY_MASK	0x8
+
+/* sbpm_tx_cmd_fifo_full */
+#define  REGS_SBPM_DBG_VEC2_TX_CMD_FULL_MASK	0x10
+
+/* sbpm_tx_cmd_fifo_empty */
+#define  REGS_SBPM_DBG_VEC2_TX_CMD_FIFO_EMPTY_MASK	0x20
+
+/*
+ * bb_decoder_dest_idThis is the ID of the user that will recieve a message
+ * from SBPM
+*/
+#define  REGS_SBPM_DBG_VEC2_BB_DECODER_DEST_ID_SHIFT	6
+#define  REGS_SBPM_DBG_VEC2_BB_DECODER_DEST_ID_MASK	0xfc0
+
+/* sbpm_tx_bbh_send_in_progress bit */
+#define  REGS_SBPM_DBG_VEC2_TX_BBH_SEND_IN_PROGRESS_MASK	0x1000
+
+/* sbpm_sp_2send - this is the user ID that is about to get stts msg */
+#define  REGS_SBPM_DBG_VEC2_SP_2SEND_SHIFT	13
+#define  REGS_SBPM_DBG_VEC2_SP_2SEND_MASK	0x7e000
+
+/*
+ * sbpm_tx2data_fifo_taddr[2:
+ * 0] this is the opcode that describe the type of the reply
+*/
+#define  REGS_SBPM_DBG_VEC2_TX2DATA_FIFO_TADDR_SHIFT	19
+#define  REGS_SBPM_DBG_VEC2_TX2DATA_FIFO_TADDR_MASK	0x380000
+
+/* sbpm_cpu_access bit */
+#define  REGS_SBPM_DBG_VEC2_CPU_ACCESS_MASK	0x400000
+
+/* sbpm_bbh_access bit */
+#define  REGS_SBPM_DBG_VEC2_BBH_ACCESS_MASK	0x800000
+
+/* sbpm_rnr_access bit */
+#define  REGS_SBPM_DBG_VEC2_RNR_ACCESS_MASK	0x1000000
+
+
+/*
+ * Register <SBPM_DBG_VEC3> - read-only
+ *
+ * This is one of TX_handler debug vectors
+ */
+#define SBPM_REGS_SBPM_DBG_VEC3		0x178
+
+/* ALLOC_RPLY bit */
+#define  REGS_SBPM_DBG_VEC3_ALLOC_RPLY_MASK	0x1
+
+/* BN_RPLY value */
+#define  REGS_SBPM_DBG_VEC3_BN_RPLY_SHIFT	1
+#define  REGS_SBPM_DBG_VEC3_BN_RPLY_MASK	0xffe
+
+/* sbpm_txfifo_alloc_ack */
+#define  REGS_SBPM_DBG_VEC3_TXFIFO_ALLOC_ACK_MASK	0x1000
+
+/* sbpm_txfifo_mcinc_ack */
+#define  REGS_SBPM_DBG_VEC3_TX_FIFO_MCINC_ACK_MASK	0x2000
+
+/* sbpm_txfifo_cnct_ack */
+#define  REGS_SBPM_DBG_VEC3_TXFIFO_CNCT_ACK_MASK	0x4000
+
+/* sbpm_txfifo_get_next_reply */
+#define  REGS_SBPM_DBG_VEC3_TXFIFO_GT_NXT_RPLY_MASK	0x8000
+
+/* sbpm_txfifo_multi_get_next_reply */
+#define  REGS_SBPM_DBG_VEC3_TXFIFO_MLTI_GT_NXT_RPLY_MASK	0x10000
+
+/* sbpm_tx_msg_pipe_cur_sm */
+#define  REGS_SBPM_DBG_VEC3_TX_MSG_PIPE_SM_SHIFT	17
+#define  REGS_SBPM_DBG_VEC3_TX_MSG_PIPE_SM_MASK	0x60000
+
+/* sbpm_send_stat_sm_ps */
+#define  REGS_SBPM_DBG_VEC3_SEND_STT_SM_MASK	0x80000
+
+/* sbpm_txfifo_ingress2egress_stts_change */
+#define  REGS_SBPM_DBG_VEC3_TXFIFO_IN2ESTTS_CHNG_MASK	0x100000
+
+
+/*
+ * Register <SBPM_SP_BBH_LOW>
+ *
+ * This register mark all the SPs which are BBHs.
+ * Each bit in this register, refers to a SP with the same index
+ */
+#define SBPM_REGS_SBPM_SP_BBH_LOW	0x17c
+
+/* sbpm_sp_bbh_low bit i tells us if SP #i is a BBH (1) or not (0) */
+#define  REGS_SBPM_SP_BBH_LOW_SBPM_SP_BBH_LOW_SHIFT	0
+#define  REGS_SBPM_SP_BBH_LOW_SBPM_SP_BBH_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_SP_BBH_HIGH>
+ *
+ * This register mark all the SPs which are BBHs.
+ * Each bit in this register, refers to a SP with the same index
+ */
+#define SBPM_REGS_SBPM_SP_BBH_HIGH	0x180
+
+/*
+ * Not in use in 68360!sbpm_sp_bbh_high bit i tells us if SP #i is a BBH
+ * (1) or not (0)
+*/
+#define  REGS_SBPM_SP_BBH_HIGH_SBPM_SP_BBH_HIGH_SHIFT	0
+#define  REGS_SBPM_SP_BBH_HIGH_SBPM_SP_BBH_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_SP_RNR_LOW>
+ *
+ * This register mark all the SPs which are runners.
+ * Each bit in this register, refers to a SP with the same index
+ */
+#define SBPM_REGS_SBPM_SP_RNR_LOW	0x184
+
+/* sbpm_sp_rnr_low bit i tells us if SP #i is a runner (1) or not (0) */
+#define  REGS_SBPM_SP_RNR_LOW_SBPM_SP_RNR_LOW_SHIFT	0
+#define  REGS_SBPM_SP_RNR_LOW_SBPM_SP_RNR_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_SP_RNR_HIGH>
+ *
+ * This register mark all the SPs which are runners.
+ * Each bit in this register, refers to a SP with the same index
+ */
+#define SBPM_REGS_SBPM_SP_RNR_HIGH	0x188
+
+/*
+ * Not in use in 68360!sbpm_sp_rnr_high bit i tells us if SP #i is a runner
+ * (1) or not (0)
+*/
+#define  REGS_SBPM_SP_RNR_HIGH_SBPM_SP_RNR_HIGH_SHIFT	0
+#define  REGS_SBPM_SP_RNR_HIGH_SBPM_SP_RNR_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_UG_MAP_LOW>
+ *
+ * bit i value determine if SP number i belongs to UG0 (ingress) or UG1
+ * (egress)
+ */
+#define SBPM_REGS_SBPM_UG_MAP_LOW	0x18c
+
+/*
+ * bit i value determine if SP number i belongs to UG0 (ingress) or UG1
+ * (egress)
+*/
+#define  REGS_SBPM_UG_MAP_LOW_SBPM_UG_MAP_LOW_SHIFT	0
+#define  REGS_SBPM_UG_MAP_LOW_SBPM_UG_MAP_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_UG_MAP_HIGH>
+ *
+ * bit i value determine if SP number i belongs to UG0 (ingress) or UG1
+ * (egress)
+ */
+#define SBPM_REGS_SBPM_UG_MAP_HIGH	0x190
+
+/*
+ * Not in use in 68360!bit i value determine if SP number i belongs to UG0
+ * (ingress) or UG1 (egress)
+*/
+#define  REGS_SBPM_UG_MAP_HIGH_SBPM_UG_MAP_HIGH_SHIFT	0
+#define  REGS_SBPM_UG_MAP_HIGH_SBPM_UG_MAP_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_NACK_MASK_LOW> - read-only
+ *
+ * bit i value determine if SP number i got nack or not
+ */
+#define SBPM_REGS_SBPM_NACK_MASK_LOW	0x194
+
+/* bit i value determine if SP number i got nack or not */
+#define  REGS_SBPM_NACK_MASK_LOW_SBPM_NACK_MASK_LOW_SHIFT	0
+#define  REGS_SBPM_NACK_MASK_LOW_SBPM_NACK_MASK_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_NACK_MASK_HIGH> - read-only
+ *
+ * bit i value determine if SP number i got nack or not
+ */
+#define SBPM_REGS_SBPM_NACK_MASK_HIGH	0x198
+
+/* bit i value determine if SP number i got nack or not */
+#define  REGS_SBPM_NACK_MASK_HIGH_SBPM_NACK_MASK_HIGH_SHIFT	0
+#define  REGS_SBPM_NACK_MASK_HIGH_SBPM_NACK_MASK_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_EXCL_MASK_LOW>
+ *
+ * This register mark all the SPs that should get exclusive messages
+ */
+#define SBPM_REGS_SBPM_EXCL_MASK_LOW	0x19c
+
+/* This register mark all the SPs that should get exclusive messagesyes no */
+#define  REGS_SBPM_EXCL_MASK_LOW_SBPM_EXCL_MASK_LOW_SHIFT	0
+#define  REGS_SBPM_EXCL_MASK_LOW_SBPM_EXCL_MASK_LOW_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_EXCL_MASK_HIGH>
+ *
+ * This register mark all the SPs that should get exclusive messages
+ */
+#define SBPM_REGS_SBPM_EXCL_MASK_HIGH	0x1a0
+
+/*
+ * Not in use in 68360!This register mark all the SPs that should get
+ * exclusive messagesyes no
+*/
+#define  REGS_SBPM_EXCL_MASK_HIGH_SBPM_EXCL_MASK_HIGH_SHIFT	0
+#define  REGS_SBPM_EXCL_MASK_HIGH_SBPM_EXCL_MASK_HIGH_MASK	0xffffffff
+
+
+/*
+ * Register <SBPM_RADDR_DECODER>
+ *
+ * This register let you choose one user that you would like to change its
+ * default RA.
+ */
+#define SBPM_REGS_SBPM_RADDR_DECODER	0x1a4
+
+/*
+ * this field contains the users id that you want to override its default
+ * RA
+*/
+#define  REGS_SBPM_RADDR_DECODER_ID_2OVERWR_SHIFT	0
+#define  REGS_SBPM_RADDR_DECODER_ID_2OVERWR_MASK	0x3f
+
+/* The new RA */
+#define  REGS_SBPM_RADDR_DECODER_OVERWR_RA_SHIFT	6
+#define  REGS_SBPM_RADDR_DECODER_OVERWR_RA_MASK	0xffc0
+
+/* the overwr mechanism will be used only if this bit is active (1). */
+#define  REGS_SBPM_RADDR_DECODER_OVERWR_VALID_MASK	0x10000
+
+
+/*
+ * Register <SBPM_WR_DATA>
+ *
+ * If SW want to write a whole word into the SBPMs RAM, it needs first to
+ * write the data to this register and then, send connect request with the
+ * wr_req bit asserted, with the address (BN field).
+ */
+#define SBPM_REGS_SBPM_WR_DATA		0x1a8
+
+/*
+ * If SW want to write a whole word into the SBPMs RAM, it needs first to
+ * write the data to this register and then, send connect request with the
+ * wr_req bit asserted, with the address (BN field).
+ * In 68360 the only the 15 LSB are used
+*/
+#define  REGS_SBPM_WR_DATA_SBPM_WR_DATA_SHIFT	0
+#define  REGS_SBPM_WR_DATA_SBPM_WR_DATA_MASK	0x3fffff
+
+
+/*
+ * Register <SBPM_UG_BAC_MAX>
+ *
+ * This register tracks the max values of the UG counters.
+ * it can be reset/modified by SW.
+ */
+#define SBPM_REGS_SBPM_UG_BAC_MAX	0x1ac
+
+/*
+ * This is the maximum value that have been recorded on the UG0 counter.
+ * SW can write to this field in order to change the max record (for
+ * example write 0 to reset it)
+*/
+#define  REGS_SBPM_UG_BAC_MAX_UG0BACMAX_SHIFT	0
+#define  REGS_SBPM_UG_BAC_MAX_UG0BACMAX_MASK	0x3fff
+
+/*
+ * This is the maximum value that have been recorded on the UG1 counter.
+ * SW can write to this field in order to change the max record (for
+ * example write 0 to reset it)
+*/
+#define  REGS_SBPM_UG_BAC_MAX_UG1BACMAX_SHIFT	14
+#define  REGS_SBPM_UG_BAC_MAX_UG1BACMAX_MASK	0xfffc000
+
+
+/*
+ * Register <SBPM_SPARE>
+ *
+ * sbpm spare register
+ */
+#define SBPM_REGS_SBPM_SPARE		0x1b0
+
+/* sbpm_gl_bac_clear_en */
+#define  REGS_SBPM_SPARE_GL_BAC_CLEAR_EN_MASK	0x1
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active TM interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define SBPM_INTR_CTRL_ISR		0x200
+
+/*
+ * This error bit indicates underrun state of SBPM Buffer Allocated Counter
+ * (one of User Groups).
+ * SW can clear this bit by writing 1 to this field
+*/
+#define  INTR_CTRL_ISR_BAC_UNDERRUN_MASK	0x1
+
+/*
+ * This error bit indicates if the Multi Cast value of a buffer is in
+ * overflow as a result of erroneous MCINC command
+*/
+#define  INTR_CTRL_ISR_MCST_OVERFLOW_MASK	0x2
+
+/*
+ * This bit indicates error state on Last BN checking during Free with
+ * context request.
+ * SW can clear this bit by writing 1 to this field.
+*/
+#define  INTR_CTRL_ISR_CHECK_LAST_ERR_MASK	0x4
+
+/*
+ * This bit indicates error state on maximal search checking during Free
+ * without context request.
+ * SW can clear this bit by writing 1 to this field.
+*/
+#define  INTR_CTRL_ISR_MAX_SEARCH_ERR_MASK	0x8
+
+/*
+ * This bit indicates invalid ingress2egress command (caused BAC
+ * under/overrun).
+ * SW can clear this bit by writing 1 to this field.
+*/
+#define  INTR_CTRL_ISR_INVALID_IN2E_MASK	0x10
+
+/*
+ * This bit indicates Null encounter during one of the next BNs.
+ * SW can clear this bit by writing 0 to this field.
+*/
+#define  INTR_CTRL_ISR_MULTI_GET_NEXT_NULL_MASK	0x20
+
+/*
+ * This bit indicates connection of the NULL buffer to another buufer.
+ * SW can clear this bit by writing 0 to this field.
+*/
+#define  INTR_CTRL_ISR_CNCT_NULL_MASK	0x40
+
+/*
+ * This bit indicates allocation of the NULL buffer.
+ * SW can clear this bit by writing 0 to this field.
+*/
+#define  INTR_CTRL_ISR_ALLOC_NULL_MASK	0x80
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define SBPM_INTR_CTRL_ISM		0x204
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  INTR_CTRL_ISM_ISM_SHIFT	0
+#define  INTR_CTRL_ISM_ISM_MASK		0xffffffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define SBPM_INTR_CTRL_IER		0x208
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  INTR_CTRL_IER_IEM_SHIFT	0
+#define  INTR_CTRL_IER_IEM_MASK		0xffffffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define SBPM_INTR_CTRL_ITR		0x20c
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  INTR_CTRL_ITR_IST_SHIFT	0
+#define  INTR_CTRL_ITR_IST_MASK		0xffffffff
+
+
+#endif /* ! XRDP_REGS_SBPM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_tcam.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_tcam.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_tcam.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_tcam.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,225 @@
+#ifndef XRDP_REGS_TCAM_H_
+#define XRDP_REGS_TCAM_H_
+
+/* relative to core */
+#define TCAM_OFFSET_0			0xe00000
+
+/*
+ * Registers <CONTEXT> - <x> is [ 0 => 2047 ]
+ *
+ * Each 64 bit entry in the context ram occupies two addresses:
+ * For 64bit entry number i:
+ * the 32 least significant bits of the context are in address 2*ithe 32
+ * most significant bits of the context are in address 2*i +1
+ */
+#define TCAM_CONTEXT_RAM_TCAM_TCAM_CONTEXT_RAM_CONTEXT(x)	(0x0 + (x) * 0x4)
+
+/* . */
+#define  CONTEXT_RAM_TCAM_TCAM_CONTEXT_RAM_CONTEXT_DATA_SHIFT	0
+#define  CONTEXT_RAM_TCAM_TCAM_CONTEXT_RAM_CONTEXT_DATA_MASK	0xffffffff
+
+
+/*
+ * Register <BANK_ENABLE>
+ *
+ * The TCAM is divided into 8 banks.
+ * banks can be disabled to save power.
+ * bit i correspond to addresses i*128:
+ * i*128+127
+ */
+#define TCAM_CFG_TCAM_TCAM_CFG_BANK_EN	0x2000
+
+/* . */
+#define  CFG_TCAM_TCAM_CFG_BANK_EN_VALUE_SHIFT	0
+#define  CFG_TCAM_TCAM_CFG_BANK_EN_VALUE_MASK	0xff
+
+
+/*
+ * Registers <GLOBAL_MASK> - <x> is [ 0 => 7 ]
+ *
+ * Global Mask - 256bit mask for all entries.
+ * Default value enable all bits.
+ */
+#define TCAM_CFG_TCAM_TCAM_CFG_GLOBAL_MASK(x)	(0x2010 + (x) * 0x4)
+
+/* . */
+#define  CFG_TCAM_TCAM_CFG_GLOBAL_MASK_VALUE_SHIFT	0
+#define  CFG_TCAM_TCAM_CFG_GLOBAL_MASK_VALUE_MASK	0xffffffff
+
+
+/*
+ * Register <SEARCHES_256BIT> - read-only
+ *
+ * Number of 256bit key searches
+ */
+#define TCAM_COUNTERS_TCAM_TCAM_COUNTERS_SRCH_256	0x2100
+
+/* . */
+#define  COUNTERS_TCAM_TCAM_COUNTERS_SRCH_256_CNT_SHIFT	0
+#define  COUNTERS_TCAM_TCAM_COUNTERS_SRCH_256_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HITS_256BIT> - read-only
+ *
+ * Number of 256bit key hits
+ */
+#define TCAM_COUNTERS_TCAM_TCAM_COUNTERS_HIT_256	0x2104
+
+/* . */
+#define  COUNTERS_TCAM_TCAM_COUNTERS_HIT_256_CNT_SHIFT	0
+#define  COUNTERS_TCAM_TCAM_COUNTERS_HIT_256_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <SEARCHES_512BIT> - read-only
+ *
+ * Number of 512it key searches
+ */
+#define TCAM_COUNTERS_TCAM_TCAM_COUNTERS_SRCH_512	0x2108
+
+/* . */
+#define  COUNTERS_TCAM_TCAM_COUNTERS_SRCH_512_CNT_SHIFT	0
+#define  COUNTERS_TCAM_TCAM_COUNTERS_SRCH_512_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <HITS_512BIT> - read-only
+ *
+ * Number of 512bit key hits
+ */
+#define TCAM_COUNTERS_TCAM_TCAM_COUNTERS_HIT_512	0x210c
+
+/* . */
+#define  COUNTERS_TCAM_TCAM_COUNTERS_HIT_512_CNT_SHIFT	0
+#define  COUNTERS_TCAM_TCAM_COUNTERS_HIT_512_CNT_MASK	0xffffffff
+
+
+/*
+ * Register <OPERATION>
+ *
+ * TCAM Operation:
+ * 0 - TCAM READ1 - TCAM Write2 - TCAM Compare3 - TCAM valid bit
+ * resetWriting to this register triggers the operation.
+ * All other relevant register should be ready before SW writes to this
+ * register.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_OP	0x2200
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_OP_CMD_SHIFT	0
+#define  INDIRECT_TCAM_TCAM_INDIRECT_OP_CMD_MASK	0xf
+
+
+/*
+ * Register <OPERATION_DONE> - read-only
+ *
+ * Raised when the TCAM operation is completed (cleared by HW on write to
+ * the OPERATION regiser)
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_OP_DONE	0x2204
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_OP_DONE_DONE_MASK	0x1
+
+
+/*
+ * Register <ADDRESS>
+ *
+ * Key Address to be used in RD/WR opoerations.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_ADDR	0x2208
+
+/*
+ * This bit indicate if the operation (RD/WR) is performed on the key0 or
+ * key1 part of the entry
+*/
+#define  INDIRECT_TCAM_TCAM_INDIRECT_ADDR_KEY1_IND_MASK	0x1
+
+/* Address of the entry */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_ADDR_ENTRY_ADDR_SHIFT	1
+#define  INDIRECT_TCAM_TCAM_INDIRECT_ADDR_ENTRY_ADDR_MASK	0x7fe
+
+
+/*
+ * Register <VALID_IN>
+ *
+ * Valid value to be written - this value is relevant during write
+ * operation on key0.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_VLID_IN	0x220c
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_VLID_IN_VALID_MASK	0x1
+
+
+/*
+ * Register <VALID_OUT>
+ *
+ * Valid value read from the TCAM - this value is relevant during read
+ * operation on key0.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_VLID_OUT	0x2214
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_VLID_OUT_VALID_MASK	0x1
+
+
+/*
+ * Register <SEARCH_RESULT> - read-only
+ *
+ * The result of a search operation
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_RSLT	0x2218
+
+/* indicate if a match was found */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_RSLT_MATCH_MASK	0x1
+
+/* index related to a match result */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_RSLT_INDEX_SHIFT	4
+#define  INDIRECT_TCAM_TCAM_INDIRECT_RSLT_INDEX_MASK	0x3ff0
+
+
+/*
+ * Registers <KEY_IN> - <x> is [ 0 => 7 ]
+ *
+ * Key to be used in Write/Compare operations.
+ * The Key is 256bit long and is represented by 8 registers.
+ * The lower address register correspond to the least significant bits of
+ * the key.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_KEY_IN(x)	(0x2220 + (x) * 0x4)
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_KEY_IN_VALUE_SHIFT	0
+#define  INDIRECT_TCAM_TCAM_INDIRECT_KEY_IN_VALUE_MASK	0xffffffff
+
+
+/*
+ * Registers <KEY_OUT> - <x> is [ 0 => 7 ] - read-only
+ *
+ * Key returned from the CAM in a read operation.
+ * The Key is 256bit long and is represented by 8 registers.
+ * The lower address register correspond to the least significant bits of
+ * the key.
+ */
+#define TCAM_INDIRECT_TCAM_TCAM_INDIRECT_KEY_OUT(x)	(0x2240 + (x) * 0x4)
+
+/* . */
+#define  INDIRECT_TCAM_TCAM_INDIRECT_KEY_OUT_VALUE_SHIFT	0
+#define  INDIRECT_TCAM_TCAM_INDIRECT_KEY_OUT_VALUE_MASK	0xffffffff
+
+
+/*
+ * Register <SELECT>
+ *
+ * Select
+ */
+#define TCAM_DEBUG_BUS_TCAM_DEBUG_BUS_SELECT	0x2500
+
+/* selection */
+#define  DEBUG_BUS_TCAM_DEBUG_BUS_SELECT_SELECT_MODULE_SHIFT	0
+#define  DEBUG_BUS_TCAM_DEBUG_BUS_SELECT_SELECT_MODULE_MASK	0x3
+
+
+#endif /* ! XRDP_REGS_TCAM_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_ubus_mstr.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_ubus_mstr.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_ubus_mstr.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_ubus_mstr.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,137 @@
+#ifndef XRDP_REGS_UBUS_MSTR_H_
+#define XRDP_REGS_UBUS_MSTR_H_
+
+/* relative to core */
+#define UBUS_MSTR_OFFSET_0		0xd96000
+
+/*
+ * Register <BRDG_EN>
+ *
+ * bridge enable
+ */
+#define UBUS_MSTR_EN			0x0
+
+/* bridge enable */
+#define  EN_EN_MASK			0x1
+
+
+/*
+ * Register <RQUSTOR_CTRL>
+ *
+ * Requestor side contol.
+ * These registers are releated to ubus requestor control
+ */
+#define UBUS_MSTR_REQ_CNTRL		0x4
+
+/* ID that can be added to a packet */
+#define  REQ_CNTRL_PKT_ID_SHIFT		0
+#define  REQ_CNTRL_PKT_ID_MASK		0xff
+
+/* enable packet tagging */
+#define  REQ_CNTRL_PKT_TAG_MASK		0x100
+
+/* endian mode of the requester */
+#define  REQ_CNTRL_ENDIAN_MODE_SHIFT	16
+#define  REQ_CNTRL_ENDIAN_MODE_MASK	0x30000
+
+/* repin endian swap */
+#define  REQ_CNTRL_REPIN_ESWAP_MASK	0x40000
+
+/* reqout endian swap */
+#define  REQ_CNTRL_REQOUT_ESWAP_MASK	0x80000
+
+/* indicate an error on Ubus */
+#define  REQ_CNTRL_DEV_ERR_MASK		0x100000
+
+/* Max packet len that the bridge can support */
+#define  REQ_CNTRL_MAX_PKT_LEN_SHIFT	24
+#define  REQ_CNTRL_MAX_PKT_LEN_MASK	0xff000000
+
+
+/*
+ * Register <HYST_CTRL>
+ *
+ * control the command / data queue full and empty indications.
+ */
+#define UBUS_MSTR_HYST_CTRL		0x8
+
+/*
+ * command space indication that controls the ARdy signal.
+ * Once the HSPACE indication is lower than CMD_SPACE the ARdy will be
+ * deasserted
+*/
+#define  HYST_CTRL_CMD_SPACE_SHIFT	0
+#define  HYST_CTRL_CMD_SPACE_MASK	0x3ff
+
+/*
+ * data space indication that controls the ARdy signal.
+ * Once the DSPACE indication is lower than DATA_SPACE the ARdy will be
+ * deasserted
+*/
+#define  HYST_CTRL_DATA_SPACE_SHIFT	16
+#define  HYST_CTRL_DATA_SPACE_MASK	0x3ff0000
+
+
+/*
+ * Register <HIGH_PRIORITY>
+ *
+ * controls the high priority mechanism
+ */
+#define UBUS_MSTR_HP			0xc
+
+/* enables the hp mechanism */
+#define  HP_HP_EN_MASK			0x1
+
+/* selects between external control and internal control of the HP bit */
+#define  HP_HP_SEL_MASK			0x2
+
+/* combines both internal and external HP control (OR between them) */
+#define  HP_HP_COMB_MASK		0x4
+
+/*
+ * counter will count according to this setting the amount of cycles the HP
+ * will be asserted in the internal mech
+*/
+#define  HP_HP_CNT_HIGH_SHIFT		8
+#define  HP_HP_CNT_HIGH_MASK		0xf00
+
+/*
+ * includes both asserted and deasserted cycles of the HP counter.
+ * can control with hp_cnt_high the frequnecy of the HP assertion
+*/
+#define  HP_HP_CNT_TOTAL_SHIFT		16
+#define  HP_HP_CNT_TOTAL_MASK		0xf0000
+
+
+/*
+ * Register <REPLY_ADDRESS>
+ *
+ * holds the termination address used for the read reply command
+ */
+#define UBUS_MSTR_REPLY_ADD		0x10
+
+/*
+ * address value used for the read reply.
+ * a read command with this address will be terminated in the bridge
+*/
+#define  REPLY_ADD_ADD_SHIFT		0
+#define  REPLY_ADD_ADD_MASK		0xffffffff
+
+
+/*
+ * Register <REPLY_DATA>
+ *
+ * holds the data value for the read reply command.
+ * the data held in this register will be returned to runner
+ */
+#define UBUS_MSTR_REPLY_DATA		0x14
+
+/*
+ * holds the data value for the read reply command.
+ * the data held in this register will be returned to runner
+*/
+#define  REPLY_DATA_DATA_SHIFT		0
+#define  REPLY_DATA_DATA_MASK		0xffffffff
+
+
+#endif /* ! XRDP_REGS_UBUS_MSTR_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_ubus_slv.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_ubus_slv.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/regs/xrdp_regs_ubus_slv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/regs/xrdp_regs_ubus_slv.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,1004 @@
+#ifndef XRDP_REGS_UBUS_SLV_H_
+#define XRDP_REGS_UBUS_SLV_H_
+
+/* relative to core */
+#define UBUS_SLV_OFFSET_0		0xd97000
+
+/*
+ * Register <VPB_BASE>
+ *
+ * VPB Base address
+ */
+#define UBUS_SLV_VPB_BASE		0x4
+
+/* base */
+#define  VPB_BASE_BASE_SHIFT		0
+#define  VPB_BASE_BASE_MASK		0xffffffff
+
+
+/*
+ * Register <VPB_MASK>
+ *
+ * VPB mask address
+ */
+#define UBUS_SLV_VPB_MASK		0x8
+
+/* mask */
+#define  VPB_MASK_MASK_SHIFT		0
+#define  VPB_MASK_MASK_MASK		0xffffffff
+
+
+/*
+ * Register <APB_BASE>
+ *
+ * APB Base address
+ */
+#define UBUS_SLV_APB_BASE		0xc
+
+/* base */
+#define  APB_BASE_BASE_SHIFT		0
+#define  APB_BASE_BASE_MASK		0xffffffff
+
+
+/*
+ * Register <APB_MASK>
+ *
+ * APB mask address
+ */
+#define UBUS_SLV_APB_MASK		0x10
+
+/* mask */
+#define  APB_MASK_MASK_SHIFT		0
+#define  APB_MASK_MASK_MASK		0xffffffff
+
+
+/*
+ * Register <DQM_BASE>
+ *
+ * DQM Base address
+ */
+#define UBUS_SLV_DQM_BASE		0x14
+
+/* base */
+#define  DQM_BASE_BASE_SHIFT		0
+#define  DQM_BASE_BASE_MASK		0xffffffff
+
+
+/*
+ * Register <DQM_MASK>
+ *
+ * DQM mask address
+ */
+#define UBUS_SLV_DQM_MASK		0x18
+
+/* mask */
+#define  DQM_MASK_MASK_SHIFT		0
+#define  DQM_MASK_MASK_MASK		0xffffffff
+
+
+/*
+ * Register <INTERRUPT_STATUS_Register>
+ *
+ * This register contains the current active QM interrupts.
+ * Each asserted bit represents an active interrupt source.
+ * The interrupt remains active until the software clears it by writing 1
+ * to the corresponding bit.
+ */
+#define UBUS_SLV_RNR_INTR_CTRL_ISR	0x80
+
+/* ISR - 32bit RNR INT */
+#define  RNR_INTR_CTRL_ISR_IST_SHIFT	0
+#define  RNR_INTR_CTRL_ISR_IST_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_STATUS_MASKED_Register> - read-only
+ *
+ * This register provides only the enabled interrupts for each of the
+ * interrupt sources depicted in the ISR register.
+ */
+#define UBUS_SLV_RNR_INTR_CTRL_ISM	0x84
+
+/* Status Masked of corresponding interrupt source in the ISR */
+#define  RNR_INTR_CTRL_ISM_ISM_SHIFT	0
+#define  RNR_INTR_CTRL_ISM_ISM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_ENABLE_Register>
+ *
+ * This register provides an enable mask for each of the interrupt sources
+ * depicted in the ISR register.
+ */
+#define UBUS_SLV_RNR_INTR_CTRL_IER	0x88
+
+/*
+ * Each bit in the mask controls the corresponding interrupt source in the
+ * IER
+*/
+#define  RNR_INTR_CTRL_IER_IEM_SHIFT	0
+#define  RNR_INTR_CTRL_IER_IEM_MASK	0xffffffff
+
+
+/*
+ * Register <INTERRUPT_TEST_Register>
+ *
+ * This register enables testing by simulating interrupt sources.
+ * When the software sets a bit in the ITR, the corresponding bit in the
+ * ISR shows an active interrupt.
+ * The interrupt remains active until software clears the bit in the ITR
+ */
+#define UBUS_SLV_RNR_INTR_CTRL_ITR	0x8c
+
+/* Each bit in the mask tests the corresponding interrupt source in the ISR */
+#define  RNR_INTR_CTRL_ITR_IST_SHIFT	0
+#define  RNR_INTR_CTRL_ITR_IST_MASK	0xffffffff
+
+
+/*
+ * Register <PROFILING_CFG>
+ *
+ * Profiling configuration settings
+ */
+#define UBUS_SLV_PROF_CFG		0x100
+
+/* Enable free-running counter */
+#define  PROF_CFG_COUNTER_ENABLE_MASK	0x1
+
+/* Start profiling window. */
+#define  PROF_CFG_PROFILING_START_MASK	0x2
+
+/* Enable manual stop mode */
+#define  PROF_CFG_MANUAL_STOP_MODE_MASK	0x4
+
+/* Stop window now */
+#define  PROF_CFG_DO_MANUAL_STOP_MASK	0x8
+
+
+/*
+ * Register <PROFILING_STATUS> - read-only
+ *
+ * Profiling status
+ */
+#define UBUS_SLV_PROF_STATUS		0x104
+
+/* Profiling is currently on */
+#define  PROF_STATUS_PROFILING_ON_MASK	0x1
+
+/*
+ * Current value of profiling window cycles counter (bits [30:
+ * 0]
+*/
+#define  PROF_STATUS_CYCLES_COUNTER_SHIFT	1
+#define  PROF_STATUS_CYCLES_COUNTER_MASK	0xfffffffe
+
+
+/*
+ * Register <PROFILING_COUNTER> - read-only
+ *
+ * Read PROFILING_COUNTER current value
+ */
+#define UBUS_SLV_PROF_COUNTER		0x108
+
+/* Value */
+#define  PROF_COUNTER_VAL_SHIFT		0
+#define  PROF_COUNTER_VAL_MASK		0xffffffff
+
+
+/*
+ * Register <PROFILING_START_VALUE> - read-only
+ *
+ * Read PROFILING_START_VALUE value
+ */
+#define UBUS_SLV_PROF_START_VALUE	0x10c
+
+/* Value */
+#define  PROF_START_VALUE_VAL_SHIFT	0
+#define  PROF_START_VALUE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PROFILING_STOP_VALUE> - read-only
+ *
+ * Read PROFILING_STOP_VALUE value
+ */
+#define UBUS_SLV_PROF_STOP_VALUE	0x110
+
+/* Value */
+#define  PROF_STOP_VALUE_VAL_SHIFT	0
+#define  PROF_STOP_VALUE_VAL_MASK	0xffffffff
+
+
+/*
+ * Register <PROFILING_CYCLE_NUM>
+ *
+ * Set length of profiling window
+ */
+#define UBUS_SLV_PROF_CYCLE_NUM		0x114
+
+/* Length of profiling window in 500MHz clock cycles */
+#define  PROF_CYCLE_NUM_PROFILING_CYCLES_NUM_SHIFT	0
+#define  PROF_CYCLE_NUM_PROFILING_CYCLES_NUM_MASK	0xffffffff
+
+
+/*
+ * Register <RGMII_CNTRL>
+ *
+ * RGMII Control Register
+ */
+#define UBUS_SLV__CNTRL			0x300
+
+/*
+ * When set this bit enables RGMII interface.
+ * This bit acts as a reset for RGMII block abd therefore it can be used to
+ * reset RGMII block when needed.
+*/
+#define  _CNTRL_RGMII_MODE_EN_MASK	0x1
+
+/*
+ * RGMII Internal Delay (ID) mode disable.
+ * When set RGMII transmit clock edges are aligned with the data.
+ * When cleared RGMII transmit clock edges are centered in the middle of
+ * (transmit) data valid window.
+*/
+#define  _CNTRL_ID_MODE_DIS_MASK	0x2
+
+/*
+ * Port Mode encoded as:
+ * 000 :
+ * Internal EPHY (MII).
+ * 001 :
+ * Internal GPHY (GMII/MII).
+ * 010 :
+ * External EPHY (MII).
+ * 011 :
+ * External GPHY (RGMII).
+ * 100 :
+ * External RvMII.
+ * Not all combinations are applicable to all chips.
+*/
+#define  _CNTRL_PORT_MODE_SHIFT		2
+#define  _CNTRL_PORT_MODE_MASK		0x1c
+
+/*
+ * Selects clock in RvMII mode.
+ * 0 :
+ * RvMII reference clock is 50MHz.
+ * 1 :
+ * RvMII reference clock is 25MHz.
+*/
+#define  _CNTRL_RVMII_REF_SEL_MASK	0x20
+
+/*
+ * Rx Pause as negotiated by the attached PHY.
+ * Obtained by SW via MDIO.
+*/
+#define  _CNTRL_RX_PAUSE_EN_MASK	0x40
+
+/*
+ * Tx Pause as negotiated by the attached PHY.
+ * Obtained by SW via MDIO.
+*/
+#define  _CNTRL_TX_PAUSE_EN_MASK	0x80
+
+/*
+ * hen set enables stopping TX_CLK after LPI is asserted.
+ * This bit should be set only when the connected EEE PHY supports it.
+*/
+#define  _CNTRL_TX_CLK_STOP_EN_MASK	0x100
+
+/*
+ * Specifies number of cycles after which TX_CLK will be stopped (after LPI
+ * is asserted), if the clock stopping is enabled.
+*/
+#define  _CNTRL_LPI_COUNT_SHIFT		9
+#define  _CNTRL_LPI_COUNT_MASK		0x3e00
+
+/*
+ * When this bit is set to 1b1, RX_ERR signal toward the MAC is 1b0 (i.
+ * e.
+ * no error).
+ * Applicable to MII/rvMII interfaces and used in case where link partner
+ * does not support RX_ERR.
+*/
+#define  _CNTRL_RX_ERR_MASK_MASK	0x4000
+
+/*
+ * When this bit is set to 1b1, COL signal toward the MAC is 1b0 and CRS
+ * signal toward the MAC is 1b1.
+ * Applicable to MII/rvMII interfaces and used in case where link partner
+ * does not support COL/CRS or the link is full-duplex.
+ * Note that as per IEEE 802.
+ * 3 MACs ignore COL/CRS in full-duplex mode and therefore it is not
+ * necessary required to set this bit.
+*/
+#define  _CNTRL_COL_CRS_MASK_MASK	0x8000
+
+
+/*
+ * Register <RGMII_IB_STATUS>
+ *
+ * RGMII IB Status Register
+ */
+#define UBUS_SLV__IB_STATUS		0x304
+
+/*
+ * RGMII operating speed as extracted from in-band signaling.
+ * 00 :
+ * 10Mbp/s.
+ * 01 :
+ * 100Mbp/s.
+ * 10 :
+ * 1000Mbp/s.
+ * 11 :
+ * reserved.
+*/
+#define  _IB_STATUS_SPEED_DECODE_SHIFT	0
+#define  _IB_STATUS_SPEED_DECODE_MASK	0x3
+
+/*
+ * RGMII duplex mode as extracted from in-band signaling.
+ * 1 :
+ * Full Duplex.
+ * 0 :
+ * Half Duplex.
+*/
+#define  _IB_STATUS_DUPLEX_DECODE_MASK	0x4
+
+/*
+ * RGMII link indication as extracted from in-band signaling.
+ * 0 :
+ * Link Down.
+ * 1 :
+ * Link Up.
+*/
+#define  _IB_STATUS_LINK_DECODE_MASK	0x8
+
+/*
+ * When this bit is set, RGMII in-band status can be overridden by bits [3:
+ * 0] of this register by SW.
+*/
+#define  _IB_STATUS_IB_STATUS_OVRD_MASK	0x10
+
+
+/*
+ * Register <RGMII_RX_CLOCK_DELAY_CNTRL>
+ *
+ * RGMII RX Clock Delay Control Register
+ */
+#define UBUS_SLV__RX_CLOCK_DELAY_CNTRL	0x308
+
+/*
+ * Charge pump current control.
+ * Contact BRCM for more information
+*/
+#define  _RX_CLOCK_DELAY_CNTRL_CTRI_SHIFT	0
+#define  _RX_CLOCK_DELAY_CNTRL_CTRI_MASK	0x3
+
+/*
+ * VCDL control.
+ * Contact BRCM for more information
+*/
+#define  _RX_CLOCK_DELAY_CNTRL_DRNG_SHIFT	2
+#define  _RX_CLOCK_DELAY_CNTRL_DRNG_MASK	0xc
+
+/*
+ * When set puts 2ns delay line in IDDQ mode.
+ * Requires HW reset (see bit 8 of this register) to bring 2ns delay line
+ * from power down.
+*/
+#define  _RX_CLOCK_DELAY_CNTRL_IDDQ_MASK	0x10
+
+/*
+ * When set it puts 2ns delay line in bypass mode (default).
+ * This bit should be cleared only in non-ID mode.
+*/
+#define  _RX_CLOCK_DELAY_CNTRL_BYPASS_MASK	0x20
+
+/*
+ * When set delay line delay is ~2ns and when cleared delay line is > 2.
+ * 2ns.
+ * Valid only when DLY_OVERRIDE bit is set.
+*/
+#define  _RX_CLOCK_DELAY_CNTRL_DLY_SEL_MASK	0x40
+
+/* Overrides HW selected delay. */
+#define  _RX_CLOCK_DELAY_CNTRL_DLY_OVERRIDE_MASK	0x80
+
+/* When set it resets 2ns delay line. */
+#define  _RX_CLOCK_DELAY_CNTRL_RESET_MASK	0x100
+
+
+/*
+ * Register <RGMII_ATE_RX_CNTRL_EXP_DATA>
+ *
+ * RGMII port ATE RX Control and Expected Data Register
+ */
+#define UBUS_SLV__ATE_RX_CNTRL_EXP_DATA	0x30c
+
+/*
+ * Data expected on the even rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * Bits[3:
+ * 0] of this register are used only in MII modes and they represent RXD[3:
+ * 0].
+ * Bit 8 corresponds RX_ER.
+ * Not used in Packet Generation mode.
+*/
+#define  _ATE_RX_CNTRL_EXP_DATA_EXPECTED_DATA_0_SHIFT	0
+#define  _ATE_RX_CNTRL_EXP_DATA_EXPECTED_DATA_0_MASK	0x1ff
+
+/*
+ * Data expected on the odd rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * Bits[12:
+ * 9] of this register are used only in MII modes and they represent RXD[3:
+ * 0].
+ * Bit 17 corresponds RX_ER.
+ * Not used in Packet Generation mode.
+*/
+#define  _ATE_RX_CNTRL_EXP_DATA_EXPECTED_DATA_1_SHIFT	9
+#define  _ATE_RX_CNTRL_EXP_DATA_EXPECTED_DATA_1_MASK	0x3fe00
+
+/*
+ * Count that specifies how many consecutive {EXPECTED_DATA_0,
+ * EXPECTED_DATA_1, EXPECTED_DATA_2, EXPECTED_DATA_3 } patterns should be
+ * received before RX_OK signal is asserted.
+ * In packet generation mode it specifies number of expected packets.
+*/
+#define  _ATE_RX_CNTRL_EXP_DATA_GOOD_COUNT_SHIFT	18
+#define  _ATE_RX_CNTRL_EXP_DATA_GOOD_COUNT_MASK	0x3fc0000
+
+/*
+ * When set resets received packets counter.
+ * Used only in packet generation mode (PKT_GEN_MODE bit is set).
+*/
+#define  _ATE_RX_CNTRL_EXP_DATA_PKT_COUNT_RST_MASK	0x4000000
+
+/* When set enables ATE testing */
+#define  _ATE_RX_CNTRL_EXP_DATA_ATE_EN_MASK	0x8000000
+
+
+/*
+ * Register <RGMII_ATE_RX_EXP_DATA_1>
+ *
+ * RGMII port ATE RX Expected Data 1 Register
+ */
+#define UBUS_SLV__ATE_RX_EXP_DATA_1	0x310
+
+/*
+ * Data expected on the even rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * Bits[3:
+ * 0] of this register are used only in MII modes and they represent RXD[3:
+ * 0].
+ * Bit 8 corresponds RX_ER.
+ * Not used in Packet Generation mode.
+*/
+#define  _ATE_RX_EXP_DATA_1_EXPECTED_DATA_2_SHIFT	0
+#define  _ATE_RX_EXP_DATA_1_EXPECTED_DATA_2_MASK	0x1ff
+
+/*
+ * Data expected on the odd rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * Bits[12:
+ * 9] of this register are used only in MII modes and they represent RXD[3:
+ * 0].
+ * Bit 17 corresponds RX_ER.
+ * Not used in Packet Generation mode.
+*/
+#define  _ATE_RX_EXP_DATA_1_EXPECTED_DATA_3_SHIFT	9
+#define  _ATE_RX_EXP_DATA_1_EXPECTED_DATA_3_MASK	0x3fe00
+
+
+/*
+ * Register <RGMII_ATE_RX_STATUS_0> - read-only
+ *
+ * RGMII port ATE RX Status 0 Register
+ */
+#define UBUS_SLV__ATE_RX_STATUS_0	0x314
+
+/*
+ * Data received on the even rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * In MII modes, only Bits[3:
+ * 0] of this register are used only for RXD[3:
+ * 0].
+ * Bit[8]:
+ * RX_ERIn Packet Generation mode bits [7:
+ * 0] are 1st received byte after SOF.
+*/
+#define  _ATE_RX_STATUS_0_RECEIVED_DATA_0_SHIFT	0
+#define  _ATE_RX_STATUS_0_RECEIVED_DATA_0_MASK	0x1ff
+
+/*
+ * Data received on the odd rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * In MII modes, only Bits[12:
+ * 9] of this register are used only for RXD[3:
+ * 0].
+ * Bit[17]:
+ * RX_ERIn Packet Generation mode bits [7:
+ * 0] are 2nd received byte after SOF.
+*/
+#define  _ATE_RX_STATUS_0_RECEIVED_DATA_1_SHIFT	9
+#define  _ATE_RX_STATUS_0_RECEIVED_DATA_1_MASK	0x3fe00
+
+/*
+ * Test Status.
+ * This bit is cleared by HW on the rising edge of RX_CTL and asserted if
+ * GOOD_COUNT consective expected patterns are detected.
+ * In packet generation mode this bit is cleared when PKT_COUNT_RST bit is
+ * set and set when received packet count = GOOD_COUNT.
+*/
+#define  _ATE_RX_STATUS_0_RX_OK_MASK	0x40000
+
+
+/*
+ * Register <RGMII_ATE_RX_STATUS_1> - read-only
+ *
+ * RGMII port ATE RX Status 1 Register
+ */
+#define UBUS_SLV__ATE_RX_STATUS_1	0x318
+
+/*
+ * Data received on the even rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * In MII modes, only Bits[3:
+ * 0] of this register are used only for RXD[3:
+ * 0].
+ * Bit[8]:
+ * RX_ERIn Packet Generation mode bits [7:
+ * 0] are 3rd received byte after SOF.
+*/
+#define  _ATE_RX_STATUS_1_RECEIVED_DATA_2_SHIFT	0
+#define  _ATE_RX_STATUS_1_RECEIVED_DATA_2_MASK	0x1ff
+
+/*
+ * Data received on the odd rising edge of the RXC clock on the RGMII Rx
+ * interface.
+ * In MII modes, only Bits[12:
+ * 9] of this register are used only for RXD[3:
+ * 0].
+ * Bit[17]:
+ * RX_ERIn Packet Generation mode bits [7:
+ * 0] are 4th received byte after SOF.
+*/
+#define  _ATE_RX_STATUS_1_RECEIVED_DATA_3_SHIFT	9
+#define  _ATE_RX_STATUS_1_RECEIVED_DATA_3_MASK	0x3fe00
+
+
+/*
+ * Register <RGMII_ATE_TX_CNTRL>
+ *
+ * RGMII port ATE TX Control Register
+ */
+#define UBUS_SLV__ATE_TX_CNTRL		0x31c
+
+/*
+ * START_STOP override.
+ * When this bit is set, transmit state machine will be controlled by
+ * START_STOP bit of this register instead of the chip pin.
+*/
+#define  _ATE_TX_CNTRL_START_STOP_OVRD_MASK	0x1
+
+/*
+ * start_stop.
+ * When set transmit state matchin starts outputing programmed pattern over
+ * RGMII TX interface.
+ * When cleared transmit state machine stops outputting data.
+*/
+#define  _ATE_TX_CNTRL_START_STOP_MASK	0x2
+
+/*
+ * When this bit is set ATE test logic operates in the packet generation
+ * mode.
+*/
+#define  _ATE_TX_CNTRL_PKT_GEN_EN_MASK	0x4
+
+/*
+ * Number of packets generated when START_STOP bit is set.
+ * When program to 0 it means infinite number of packets will be transmit
+ * (i.
+ * e.
+ * until START_STOP is cleared).
+*/
+#define  _ATE_TX_CNTRL_PKT_CNT_SHIFT	3
+#define  _ATE_TX_CNTRL_PKT_CNT_MASK	0x7f8
+
+/*
+ * Generated packet payload in bytes.
+ * Must be between 46B and 1500B.
+*/
+#define  _ATE_TX_CNTRL_PAYLOAD_LENGTH_SHIFT	11
+#define  _ATE_TX_CNTRL_PAYLOAD_LENGTH_MASK	0x3ff800
+
+/* Inter-packet gap in packet generation mode. */
+#define  _ATE_TX_CNTRL_PKT_IPG_SHIFT	22
+#define  _ATE_TX_CNTRL_PKT_IPG_MASK	0xfc00000
+
+
+/*
+ * Register <RGMII_ATE_TX_DATA_0>
+ *
+ * RGMII port ATE TX Data 0 Register
+ */
+#define UBUS_SLV__ATE_TX_DATA_0		0x320
+
+/*
+ * Data transmitted on the even rising edge of the TXC clock on the RGMII
+ * Tx interface.
+ * In case of MII, only bit[3:
+ * 0] are used to transmit TXD[3:
+ * 0].
+ * Bit 8:
+ * TX_ERIn Packet Generation mode bits [7:
+ * 0] are 1st byte of MAC DA.
+*/
+#define  _ATE_TX_DATA_0_TX_DATA_0_SHIFT	0
+#define  _ATE_TX_DATA_0_TX_DATA_0_MASK	0x1ff
+
+/*
+ * Data transmitted on the odd rising edge of the TXC clock on the RGMII Tx
+ * interface.
+ * In case of MII, only bit[12:
+ * 9] are used to transmit TXD[3:
+ * 0].
+ * Bit 17:
+ * TX_ERIn Packet Generation mode bits [7:
+ * 0] are 2nd byte of MAC DA.
+*/
+#define  _ATE_TX_DATA_0_TX_DATA_1_SHIFT	9
+#define  _ATE_TX_DATA_0_TX_DATA_1_MASK	0x3fe00
+
+
+/*
+ * Register <RGMII_ATE_TX_DATA_1>
+ *
+ * RGMII port ATE TX Data 1 Register
+ */
+#define UBUS_SLV__ATE_TX_DATA_1		0x324
+
+/*
+ * Data transmitted on the even rising edge of the TXC clock on the RGMII
+ * Tx interface.
+ * In case of MII, only bit[3:
+ * 0] are used to transmit TXD[3:
+ * 0].
+ * Bit 8:
+ * TX_ERIn Packet Generation mode bits [7:
+ * 0] are 3rd byte of MAC DA.
+*/
+#define  _ATE_TX_DATA_1_TX_DATA_2_SHIFT	0
+#define  _ATE_TX_DATA_1_TX_DATA_2_MASK	0x1ff
+
+/*
+ * Data transmitted on the odd rising edge of the TXC clock on the RGMII Tx
+ * interface.
+ * In case of MII, only bit[12:
+ * 9] are used to transmit TXD[3:
+ * 0].
+ * Bit 17:
+ * TX_ERIn Packet Generation mode bits [7:
+ * 0] are 4th byte of MAC DA.
+*/
+#define  _ATE_TX_DATA_1_TX_DATA_3_SHIFT	9
+#define  _ATE_TX_DATA_1_TX_DATA_3_MASK	0x3fe00
+
+
+/*
+ * Register <RGMII_ATE_TX_DATA_2>
+ *
+ * RGMII port ATE TX Data 2 Register
+ */
+#define UBUS_SLV__ATE_TX_DATA_2		0x328
+
+/*
+ * In Packet Generation mode bits [7:
+ * 0] are 5th byte of MAC DA
+*/
+#define  _ATE_TX_DATA_2_TX_DATA_4_SHIFT	0
+#define  _ATE_TX_DATA_2_TX_DATA_4_MASK	0xff
+
+/*
+ * In Packet Generation mode bits [7:
+ * 0] are 6th byte of MAC DA
+*/
+#define  _ATE_TX_DATA_2_TX_DATA_5_SHIFT	8
+#define  _ATE_TX_DATA_2_TX_DATA_5_MASK	0xff00
+
+/* Generated packet Ethertype */
+#define  _ATE_TX_DATA_2_ETHER_TYPE_SHIFT	16
+#define  _ATE_TX_DATA_2_ETHER_TYPE_MASK	0xffff0000
+
+
+/*
+ * Register <RGMII_TX_DELAY_CNTRL_0>
+ *
+ * RGMII TX Delay Control 0 Register
+ */
+#define UBUS_SLV__TX_DELAY_CNTRL_0	0x32c
+
+/*
+ * txd0 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _TX_DELAY_CNTRL_0_TXD0_DEL_SEL_SHIFT	0
+#define  _TX_DELAY_CNTRL_0_TXD0_DEL_SEL_MASK	0x3f
+
+/*
+ * txd0 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_0_TXD0_DEL_OVRD_EN_MASK	0x40
+
+/*
+ * txd1 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _TX_DELAY_CNTRL_0_TXD1_DEL_SEL_SHIFT	7
+#define  _TX_DELAY_CNTRL_0_TXD1_DEL_SEL_MASK	0x1f80
+
+/*
+ * txd1 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_0_TXD1_DEL_OVRD_EN_MASK	0x2000
+
+/*
+ * txd2 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _TX_DELAY_CNTRL_0_TXD2_DEL_SEL_SHIFT	14
+#define  _TX_DELAY_CNTRL_0_TXD2_DEL_SEL_MASK	0xfc000
+
+/*
+ * txd2 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_0_TXD2_DEL_OVRD_EN_MASK	0x100000
+
+/*
+ * txd3 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _TX_DELAY_CNTRL_0_TXD3_DEL_SEL_SHIFT	21
+#define  _TX_DELAY_CNTRL_0_TXD3_DEL_SEL_MASK	0x7e00000
+
+/*
+ * txd3 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_0_TXD3_DEL_OVRD_EN_MASK	0x8000000
+
+
+/*
+ * Register <RGMII_TX_DELAY_CNTRL_1>
+ *
+ * RGMII TX Delay Control 1 Register
+ */
+#define UBUS_SLV__TX_DELAY_CNTRL_1	0x330
+
+/*
+ * txctl CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCTL_DEL_SEL_SHIFT	0
+#define  _TX_DELAY_CNTRL_1_TXCTL_DEL_SEL_MASK	0x3f
+
+/*
+ * txctl CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCTL_DEL_OVRD_EN_MASK	0x40
+
+/*
+ * txclk NON-ID mode CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCLK_DEL_SEL_SHIFT	7
+#define  _TX_DELAY_CNTRL_1_TXCLK_DEL_SEL_MASK	0x780
+
+/*
+ * txclk NON_ID mode CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCLK_DEL_OVRD_EN_MASK	0x800
+
+/*
+ * txclk ID mode CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCLK_ID_DEL_SEL_SHIFT	12
+#define  _TX_DELAY_CNTRL_1_TXCLK_ID_DEL_SEL_MASK	0xf000
+
+/*
+ * txclk ID mode CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _TX_DELAY_CNTRL_1_TXCLK_ID_DEL_OVRD_EN_MASK	0x10000
+
+
+/*
+ * Register <RGMII_RX_DELAY_CNTRL_0>
+ *
+ * RGMII RX Delay Control 0 Register
+ */
+#define UBUS_SLV__RX_DELAY_CNTRL_0	0x334
+
+/*
+ * rxd0 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_0_RXD0_DEL_SEL_SHIFT	0
+#define  _RX_DELAY_CNTRL_0_RXD0_DEL_SEL_MASK	0x3f
+
+/*
+ * rxd0 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_0_RXD0_DEL_OVRD_EN_MASK	0x40
+
+/*
+ * rxd1 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_0_RXD1_DEL_SEL_SHIFT	7
+#define  _RX_DELAY_CNTRL_0_RXD1_DEL_SEL_MASK	0x1f80
+
+/*
+ * rxd1 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_0_RXD1_DEL_OVRD_EN_MASK	0x2000
+
+/*
+ * rxd2 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_0_RXD2_DEL_SEL_SHIFT	14
+#define  _RX_DELAY_CNTRL_0_RXD2_DEL_SEL_MASK	0xfc000
+
+/*
+ * rxd2 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_0_RXD2_DEL_OVRD_EN_MASK	0x100000
+
+/*
+ * rxd3 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_0_RXD3_DEL_SEL_SHIFT	21
+#define  _RX_DELAY_CNTRL_0_RXD3_DEL_SEL_MASK	0x7e00000
+
+/*
+ * rxd3 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_0_RXD3_DEL_OVRD_EN_MASK	0x8000000
+
+
+/*
+ * Register <RGMII_RX_DELAY_CNTRL_1>
+ *
+ * RGMII RX Delay Control 1 Register
+ */
+#define UBUS_SLV__RX_DELAY_CNTRL_1	0x338
+
+/*
+ * rxd4 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_1_RXD4_DEL_SEL_SHIFT	0
+#define  _RX_DELAY_CNTRL_1_RXD4_DEL_SEL_MASK	0x3f
+
+/*
+ * rxd4 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_1_RXD4_DEL_OVRD_EN_MASK	0x40
+
+/*
+ * rxd5 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_1_RXD5_DEL_SEL_SHIFT	7
+#define  _RX_DELAY_CNTRL_1_RXD5_DEL_SEL_MASK	0x1f80
+
+/*
+ * rxd5 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_1_RXD5_DEL_OVRD_EN_MASK	0x2000
+
+/*
+ * rxd6 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_1_RXD6_DEL_SEL_SHIFT	14
+#define  _RX_DELAY_CNTRL_1_RXD6_DEL_SEL_MASK	0xfc000
+
+/*
+ * rxd6 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_1_RXD6_DEL_OVRD_EN_MASK	0x100000
+
+/*
+ * rxd7 CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_1_RXD7_DEL_SEL_SHIFT	21
+#define  _RX_DELAY_CNTRL_1_RXD7_DEL_SEL_MASK	0x7e00000
+
+/*
+ * rxd7 CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_1_RXD7_DEL_OVRD_EN_MASK	0x8000000
+
+
+/*
+ * Register <RGMII_RX_DELAY_CNTRL_2>
+ *
+ * RGMII RX Delay Control 2 Register
+ */
+#define UBUS_SLV__RX_DELAY_CNTRL_2	0x33c
+
+/*
+ * rxctl_pos CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming
+*/
+#define  _RX_DELAY_CNTRL_2_RXCTL_POS_DEL_SEL_SHIFT	0
+#define  _RX_DELAY_CNTRL_2_RXCTL_POS_DEL_SEL_MASK	0x3f
+
+/*
+ * rxctl_pos CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_2_RXCTL_POS_DEL_OVRD_EN_MASK	0x40
+
+/*
+ * rxctl_neg CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming.
+*/
+#define  _RX_DELAY_CNTRL_2_RXCTL_NEG_DEL_SEL_SHIFT	7
+#define  _RX_DELAY_CNTRL_2_RXCTL_NEG_DEL_SEL_MASK	0x1f80
+
+/*
+ * rxctl_neg CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_2_RXCTL_NEG_DEL_OVRD_EN_MASK	0x2000
+
+/*
+ * rxclk CKTAP delay control.
+ * Refer to the CKTAP datasheet for programming.
+*/
+#define  _RX_DELAY_CNTRL_2_RXCLK_DEL_SEL_SHIFT	14
+#define  _RX_DELAY_CNTRL_2_RXCLK_DEL_SEL_MASK	0x3c000
+
+/*
+ * rxclk CKTAP delay override enable.
+ * When set enables CKTAP delay to be controlled from this register.
+*/
+#define  _RX_DELAY_CNTRL_2_RXCLK_DEL_OVRD_EN_MASK	0x40000
+
+
+/*
+ * Register <RGMII_CLK_RST_CTRL>
+ *
+ * Controls the following:
+ * i_sw_initi_clk_250_en
+ */
+#define UBUS_SLV__CLK_RST_CTRL		0x340
+
+/* SW init */
+#define  _CLK_RST_CTRL_SWINIT_MASK	0x1
+
+/* Enables clock 250 */
+#define  _CLK_RST_CTRL_CLK250EN_MASK	0x2
+
+
+#endif /* ! XRDP_REGS_UBUS_SLV_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/runner_program.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/runner_program.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/runner_program.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/runner_program.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,37 @@
+#ifndef RUNNER_PROGRAM_H_
+#define RUNNER_PROGRAM_H_
+
+/*
+ * all fields are big endian
+ */
+
+struct rpgm_load {
+	__be32		offset;
+	__be32		value;
+};
+
+enum rpgm_section_type {
+	RPGM_SECTION_CODE = 0,
+	RPGM_SECTION_DATA = 1,
+	RPGM_SECTION_CONTEXT = 2,
+	RPGM_SECTION_PREDICT = 3,
+};
+
+struct rpgm_section {
+	__be32		type;
+	__be32		do_memset; /* if non zero, will memset whole section area */
+	__be32		memset_value;
+	__be32		load_count;
+	struct rpgm_load	loads[0];
+};
+
+#define RPGM_MAGIC	0x39274e69
+
+struct rpgm_header {
+	__be32		magic;
+	__be32		version;
+	__be32		section_count;
+	struct rpgm_section sections[0];
+};
+
+#endif
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp.c	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,2607 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/reset.h>
+#include <linux/firmware.h>
+#include <linux/bcm63xx_rdp_ioctl.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ubus4.h>
+
+#include "xrdp_priv.h"
+#include "runner_program.h"
+
+/*
+ * don't load any runner firmware
+ */
+static int skip_load;
+module_param(skip_load, int, S_IRUGO);
+
+struct bbh_dma_config {
+	bool			configure;
+
+	/* which module to use for this BBH, each case there are
+	 * multiple instances of DMA or SDMA */
+	unsigned int		module_id;
+
+	/* number of chunks to reserve for rx & tx on dma module */
+	unsigned int		rx_chunk_count;
+	unsigned int		tx_chunk_count;
+
+	/* threshold for entering and leaving urgent mode */
+	unsigned int		thresh_into;
+	unsigned int		thresh_outof;
+};
+
+struct bbh_config {
+	bool			rx_configure;
+	bool			tx_configure;
+
+	/*
+	 * fields needed for both RX & TX
+	 */
+
+	/* BBH hardware IDs on the broadbus bus  */
+	unsigned int		rx_broadbus_id;
+	unsigned int		tx_broadbus_id;
+
+	/*
+	 * 0 for DMA, 1 for SDMA
+	 *
+	 * (note: DMA used only for TX, RX uses SDMA only)
+	 */
+	struct bbh_dma_config	dma_configs[2];
+
+
+
+	/*
+	 * fields needed for RX
+	 */
+
+	/* accepted packet size range */
+	unsigned int		min_pkt_size;
+	unsigned int		max_pkt_size;
+
+	/* VIQ to use */
+	unsigned int		rx_viq;
+
+
+
+	/*
+	 * fields needed for TX
+	 */
+
+	/* runners pushing PD in tx */
+	int			tx_runners[2];
+
+	/* runner pushing "status" (?) message  */
+	int			tx_status_runner;
+
+	/* runner pushing report (epon) message  */
+	int			tx_report_runner;
+
+	/*
+	 * sram address to use for "skb_addr" pointer, used by BBH
+	 * only when packet descriptor has "absolute address" flag
+	 * to be able to send from DDR
+	 */
+	unsigned int		skb_addr_sram;
+
+	/*
+	 * sram address to use for "tcont_addr" pointer, used by BBH
+	 * *not* in "MDU mode"
+	 */
+	unsigned int		tcont_addr_sram;
+
+	/*
+	 * sram address to use for "ptr_addr" pointer, used by BBH in
+	 * "MDU mode"
+	 */
+	unsigned int		ptr_addr_sram;
+
+	/*
+	 * task id to wakeup when TX is full/not empty, depending on
+	 * mode used
+	 *
+	 * ENET has only 8 queues
+	 * GPON has 40 queues, queue 8-39 shares the same task_id
+	 */
+	unsigned int		tx_queue_task_id[9];
+
+	/*
+	 * mapping or each tx queue to one of the two runner cores
+	 * assigned to that bbh
+	 *
+	 * expected value is 0 for first runner, and 1 for second
+	 * runner
+	 */
+	unsigned int		tx_queue_to_runner[40];
+
+	/*
+	 * sram address to use for "tcont_addr" pointer for statusing
+	 * (epon only)
+	 */
+	unsigned int		status_tcont_addr_sram;
+
+	/*
+	 * sram address to use for "ptr_addr" pointer for statusing
+	 * (epon only)
+	 */
+	unsigned int		status_ptr_addr_sram;
+
+	/*
+	 * status task id to wakeup when TX is full/not empty
+	 */
+	unsigned int		status_tx_task_id[2];
+
+	/*
+	 * sram address to use for "tcont_addr" pointer for reporting
+	 * (epon only)
+	 */
+	unsigned int		report_tcont_addr_sram;
+
+	/*
+	 * sram address to use for "ptr_addr" pointer for reporting
+	 * (epon only)
+	 */
+	unsigned int		report_ptr_addr_sram;
+
+	/*
+	 * report task id to wakeup when TX is full/not empty
+	 */
+	unsigned int		report_tx_task_id[2];
+};
+
+struct disp_grp_config {
+	bool			configure;
+
+	/*
+	 * 16 bits mask (one bit per task) for each runner.
+	 *
+	 * task will be assigned to this runner dispatch group if bit
+	 * it set
+	 */
+	unsigned int		task_mask[RDP_RUNNER_COUNT];
+};
+
+struct disp_rnr_config {
+	bool			configure;
+
+	/* where the dispatcher writes its PD in runner memory */
+	unsigned int		pd_base;
+	unsigned int		pd_per_tsk_off;
+};
+
+struct disp_viq_config {
+	bool			configure;
+	unsigned int		runner_group;
+
+	unsigned int		ing_cong_frst_lvl;
+	unsigned int		ing_cong_scnd_lvl;
+	unsigned int		ing_cong_hyst_thr;
+
+	unsigned int		guaranteed_pool_limit;
+	unsigned int		common_pool_limit;
+};
+
+struct disp_config {
+	struct disp_rnr_config	runners[RDP_RUNNER_COUNT];
+	struct disp_grp_config	groups[RDP_DISP_RNR_GRP_COUNT];
+	struct disp_viq_config	viqs[RDP_DISP_VIQ_COUNT];
+};
+
+/*
+ *
+ */
+static const struct disp_config disp_config = {
+	.runners = {
+		/*
+		 * configuration for RX dispatcher, indicates where to
+		 * put the RX PDs in the runner memory
+		 *
+		 * there is a base address used for task 0, and a
+		 * configurable offset for each task id
+		 *
+		 * enabled for all runners now but should be only done
+		 * when needed
+		 */
+		[0] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+
+		[1] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+
+		[2] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+
+		[3] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+
+		[4] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+
+		[5] = {
+			.configure	= true,
+			.pd_base	= 0x1000 / 8,
+			.pd_per_tsk_off	= 16 / 8,
+		},
+	},
+
+	.groups = {
+		/*
+		 * dispatch group 0 for BBH 0
+		 */
+		[RDP_BBH_IDX_UNIMAC0] = {
+			.configure	= true,
+			.task_mask	= {
+				[UNIMAC0_BBH_RX_CORE]	=
+				1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(UNIMAC0_BBH_RX_QUEUE, 0),
+			},
+		},
+
+		/*
+		 * dispatch group 1 for BBH 1
+		 */
+		[RDP_BBH_IDX_UNIMAC1] = {
+			.configure	= true,
+			.task_mask	= {
+				[UNIMAC1_BBH_RX_CORE]	=
+				1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(UNIMAC1_BBH_RX_QUEUE, 0),
+			},
+		},
+
+		/*
+		 * dispatch group 2 for BBH 2
+		 */
+		[RDP_BBH_IDX_UNIMAC2] = {
+			.configure	= true,
+			.task_mask	= {
+				[UNIMAC2_BBH_RX_CORE]	=
+				1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(UNIMAC2_BBH_RX_QUEUE, 0),
+			},
+		},
+
+		/*
+		 * dispatch group 3 for BBH 3
+		 */
+		[RDP_BBH_IDX_PON] = {
+			.configure	= true,
+			.task_mask	= {
+				[PON_BBH_RX_CORE]	=
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(PON_BBH_RX_QUEUE, 0)) |
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(PON_BBH_RX_QUEUE, 1)) |
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(PON_BBH_RX_QUEUE, 2)),
+			},
+		},
+
+		/*
+		 * dispatch group 4 for BBH 4
+		 */
+		[RDP_BBH_IDX_AE10] = {
+			.configure	= true,
+			.task_mask	= {
+				[AE10_BBH_RX_CORE]	=
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(AE10_BBH_RX_QUEUE, 0)) |
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(AE10_BBH_RX_QUEUE, 1)) |
+				(1 << ENET_FW_RX_XF_QUEUEx_TASK_ID(AE10_BBH_RX_QUEUE, 2)),
+			},
+		},
+
+		/*
+		 * dispatch group 6 for BBH 6
+		 */
+		[RDP_BBH_IDX_DSL] = {
+			.configure	= true,
+			.task_mask	= {
+				[DSL_BBH_RX_CORE]	=
+				1 << DSL_FW_RX_QUEUEx_TASK_ID(DSL_BBH_RX_QUEUE),
+			},
+		},
+	},
+
+	.viqs = {
+		/* VIQ for BBH 0 */
+		[RDP_BBH_IDX_UNIMAC0] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_UNIMAC0,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+
+		/* VIQ for BBH 1 */
+		[RDP_BBH_IDX_UNIMAC1] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_UNIMAC1,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+
+		/* VIQ for BBH 2 */
+		[RDP_BBH_IDX_UNIMAC2] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_UNIMAC2,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+
+		/* VIQ for BBH 3 */
+		[RDP_BBH_IDX_PON] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_PON,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+
+		/* VIQ for BBH 4 */
+		[RDP_BBH_IDX_AE10] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_AE10,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+
+		/* VIQ for BBH 6 */
+		[RDP_BBH_IDX_DSL] = {
+			.configure		= true,
+			.runner_group		= RDP_BBH_IDX_DSL,
+			.ing_cong_frst_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_scnd_lvl	= RDP_DIS_REOR_FLL_BUF_COUNT - 1,
+			.ing_cong_hyst_thr	= 8,
+			.guaranteed_pool_limit	= 8,
+			.common_pool_limit	= 225,
+		},
+	},
+};
+
+/*
+ *
+ */
+static const struct bbh_config bbh_configs[RDP_BBH_COUNT] = {
+	/* bbh0 => ethernet unimac0 rx, unimac[012] tx */
+	[RDP_BBH_IDX_UNIMAC0] = {
+		.rx_configure		= true,
+		.tx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_BBH_0,
+		.tx_broadbus_id		= BB_ID_TX_LAN,
+
+		.min_pkt_size		= 60,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_UNIMAC0,
+
+		.dma_configs		= {
+			[0] = {
+				.configure		= true,
+				.module_id		= 0,
+				.tx_chunk_count		= 12,
+			},
+
+			[1] = {
+				.configure		= true,
+				.module_id		= 1,
+				.rx_chunk_count		= 12,
+				.tx_chunk_count		= 32,
+			}
+		},
+
+		.tx_runners		= { UNIMACx_BBH_TX_CORE, -1 },
+		.skb_addr_sram		= 0x1800 / 8, /* unused (only BN) */
+		.tcont_addr_sram	= 0x1820 / 8, /* unused */
+		.ptr_addr_sram		= 0x1840 / 8,
+		.tx_queue_task_id	= {
+			ENET_FW_TX_QUEUEx_TASK_ID(UNIMAC0_BBH_TX_QUEUE),
+			ENET_FW_TX_QUEUEx_TASK_ID(UNIMAC1_BBH_TX_QUEUE),
+			ENET_FW_TX_QUEUEx_TASK_ID(UNIMAC2_BBH_TX_QUEUE),
+		},
+		.tx_queue_to_runner	= {
+			0, /* remaining are set to 0 */
+		},
+	},
+
+	/* bbh1 => ethernet unimac1 rx */
+	[RDP_BBH_IDX_UNIMAC1] = {
+		.rx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_BBH_1,
+		.min_pkt_size		= 60,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_UNIMAC1,
+
+		/* tx handled by bbh0, so no DMA needed  */
+		.dma_configs		= {
+			[1] = {
+				.configure		= true,
+				.module_id		= 1,
+				.rx_chunk_count		= 12,
+			}
+		},
+	},
+
+	/* bbh2 => ethernet unimac2 rx */
+	[RDP_BBH_IDX_UNIMAC2] = {
+		.rx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_BBH_2,
+		.min_pkt_size		= 60,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_UNIMAC2,
+
+		/* tx handled by bbh0, so no DMA needed  */
+		.dma_configs		= {
+			[1] = {
+				.configure		= true,
+				.module_id		= 1,
+				.rx_chunk_count		= 12,
+			}
+		},
+	},
+
+	/* bbh3 => PON */
+	[RDP_BBH_IDX_PON] = {
+		.rx_configure		= true,
+		.tx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_PON,
+		.tx_broadbus_id		= BB_ID_TX_PON,
+		.min_pkt_size		= 60,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_PON,
+
+		.dma_configs		= {
+			[0] = {
+				.configure		= true,
+				.module_id		= 0,
+				.tx_chunk_count		= 20,
+			},
+
+			[1] = {
+				.configure		= true,
+				.module_id		= 2,
+				.rx_chunk_count		= 24,
+				.tx_chunk_count		= 32,
+			}
+		},
+
+		.tx_runners		= { PON_BBH_TX_CORE, -1 },
+		.skb_addr_sram		= 0x1800 / 8, /* unused (only BN) */
+		.tcont_addr_sram	= 0x1820 / 8, /* unused */
+		.ptr_addr_sram		= 0x1840 / 8,
+
+		.tx_queue_task_id	= {
+			ENET_FW_TX_QUEUEx_TASK_ID(PON_BBH_TX_QUEUE),
+		},
+
+		.tx_queue_to_runner	= {
+			0, /* remaining are set to 0 */
+		},
+
+		.tx_report_runner	= PON_BBH_TX_CORE,
+		.report_tcont_addr_sram	= 0x1860 / 8, /* unused */
+		.report_ptr_addr_sram	= 0x1880 / 8,
+		.report_tx_task_id	= { 15 },
+
+		.tx_status_runner	= PON_BBH_TX_CORE,
+		.status_tcont_addr_sram	= 0x18a0 / 8, /* unused */
+		.status_ptr_addr_sram	= 0x18c0 / 8,
+		.status_tx_task_id	= { 14 },
+	},
+
+	/* bbh4 => active ethernet 10G rx/tx */
+	[RDP_BBH_IDX_AE10] = {
+		.rx_configure		= true,
+		.tx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_10G,
+		.tx_broadbus_id		= BB_ID_TX_10G,
+		.min_pkt_size		= 60,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_AE10,
+
+		.dma_configs		= {
+			[0] = {
+				.configure		= true,
+				.module_id		= 0,
+				.tx_chunk_count		= 20,
+			},
+
+			[1] = {
+				.configure		= true,
+				.module_id		= 2,
+				.rx_chunk_count		= 24,
+				.tx_chunk_count		= 32,
+			}
+		},
+
+		.tx_runners		= { AE10_BBH_TX_CORE, -1 },
+		.skb_addr_sram		= 0x1900 / 8, /* unused */
+		.tcont_addr_sram	= 0x1920 / 8, /* unused */
+		.ptr_addr_sram		= 0x1940 / 8,
+		.tx_queue_task_id	= {
+			ENET_FW_TX_QUEUEx_TASK_ID(AE10_BBH_TX_QUEUE),
+		},
+		.tx_queue_to_runner	= {
+			0, /* remaining are set to 0 */
+		},
+	},
+
+	/* bbh5 => active ethernet 2.5G (sf2 xbar) */
+	[RDP_BBH_IDX_AE25] = {
+		.rx_configure		= false,
+	},
+
+	/* bbh6 => DSL */
+	[RDP_BBH_IDX_DSL] = {
+		.rx_configure		= true,
+		.tx_configure		= true,
+		.rx_broadbus_id		= BB_ID_RX_DSL,
+		.tx_broadbus_id		= BB_ID_TX_DSL,
+
+		.min_pkt_size		= 1,
+		.max_pkt_size		= 2048,
+		.rx_viq			= RDP_BBH_IDX_DSL,
+
+		.dma_configs		= {
+			[0] = {
+				.configure		= true,
+				.module_id		= 0,
+				.tx_chunk_count		= 12,
+			},
+
+			[1] = {
+				.configure		= true,
+				.module_id		= 1,
+				.rx_chunk_count		= 12,
+				.tx_chunk_count		= 32,
+			}
+		},
+
+		.tx_runners		= { DSL_BBH_TX_CORE, -1 },
+		.skb_addr_sram		= 0x1800 / 8, /* unused */
+		.tcont_addr_sram	= 0x1820 / 8, /* unused */
+		.ptr_addr_sram		= 0x1840 / 8,
+		.tx_queue_task_id	= {
+			DSL_FW_TX_QUEUEx_TASK_ID(0),
+			DSL_FW_TX_QUEUEx_TASK_ID(1),
+			DSL_FW_TX_QUEUEx_TASK_ID(2),
+			DSL_FW_TX_QUEUEx_TASK_ID(3),
+			DSL_FW_TX_QUEUEx_TASK_ID(4),
+			DSL_FW_TX_QUEUEx_TASK_ID(5),
+			DSL_FW_TX_QUEUEx_TASK_ID(6),
+		},
+		.tx_queue_to_runner	= {
+			0, /* remaining are set to 0 */
+		},
+	},
+};
+
+
+/*
+ *
+ */
+static void xrdp_zero_memories(struct bcm_xrdp_priv *priv)
+{
+	size_t i;
+
+	for (i = 0; i < RDP_RUNNER_COUNT; i++) {
+		xrdp_memset32(priv, XRDP_AREA_CORE,
+			      RNR_SRAM_OFFSET(i), 0, RNR_SRAM_SIZE);
+		xrdp_memset32(priv, XRDP_AREA_CORE,
+			      RNR_INST_OFFSET(i), 0, RNR_INST_SIZE);
+		xrdp_memset32(priv, XRDP_AREA_CORE,
+			      RNR_CNXT_OFFSET(i), 0, RNR_CNXT_SIZE);
+		xrdp_memset32(priv, XRDP_AREA_CORE,
+			      RNR_PRED_OFFSET(i), 0, RNR_PRED_SIZE);
+		xrdp_memset32(priv, XRDP_AREA_CORE,
+			      RDP_PSRAM_OFFSET, 0, RDP_PSRAM_SIZE);
+ 	}
+}
+
+/*
+ *
+ */
+static int xrdp_compute_params(struct bcm_xrdp_priv *priv)
+{
+	size_t i;
+
+	/*
+	 * compute total number of DMA chunks per module according to
+	 * BBH configs
+	 */
+	memset(priv->dma_params, 0, sizeof (priv->dma_params));
+
+	for (i = 0; i < ARRAY_SIZE(priv->bbh_params); i++) {
+		const struct bbh_config *bc = &bbh_configs[i];
+		struct bbh_params *bp = &priv->bbh_params[i];
+		size_t j;
+
+		if (!bc->rx_configure)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(bc->dma_configs); j++) {
+			const struct bbh_dma_config *bdc = &bc->dma_configs[j];
+			struct bbh_dma_params *bdp = &bp->dma_params[j];
+			struct dma_params *dp;
+
+			if (!bdc->configure)
+				continue;
+
+			dp = &priv->dma_params[bdc->module_id];
+
+			if (dp->assigned_bbh_count ==
+			    ARRAY_SIZE(dp->assigned_bbh_cfg)) {
+				dev_err(&priv->pdev->dev,
+					"DMA module %zu assigned too many "
+					"peripherals\n", i);
+				return -EINVAL;
+			}
+
+			dp->assigned_bbh_cfg[dp->assigned_bbh_count] = bc;
+			dp->assigned_bbh_dma_cfg[dp->assigned_bbh_count] = bdc;
+			dp->assigned_bbh_count++;
+
+			bdp->rx_offset = dp->total_rx_chunks;
+			bdp->tx_offset = dp->total_tx_chunks;
+
+			dp->total_rx_chunks += bdc->rx_chunk_count;
+			dp->total_tx_chunks += bdc->tx_chunk_count;
+		}
+	}
+
+	/* sanity check */
+	for (i = 0; i < ARRAY_SIZE(priv->dma_params); i++) {
+		const struct dma_params *dp = &priv->dma_params[i];
+
+		if (dp->total_rx_chunks > RDP_DMA_CHUNK_RX_COUNT) {
+			dev_err(&priv->pdev->dev,
+				"DMA config for module %zu invalid, %u is "
+				"larger than %u max rx chunks\n",
+				i, dp->total_rx_chunks,
+				RDP_DMA_CHUNK_RX_COUNT);
+			return -EINVAL;
+		}
+
+		if (dp->total_tx_chunks > RDP_DMA_CHUNK_TX_COUNT) {
+			dev_err(&priv->pdev->dev,
+				"DMA config for module %zu invalid, %u is "
+				"larger than %u max tx chunks\n",
+				i, dp->total_tx_chunks,
+				RDP_DMA_CHUNK_TX_COUNT);
+			return -EINVAL;
+		}
+
+		if (dp->total_rx_chunks &&
+		    dp->total_rx_chunks < RDP_DMA_CHUNK_RX_COUNT) {
+			dev_warn(&priv->pdev->dev,
+				 "DMA rx config for module %zu not optimal,"
+				 "only %u chunks used, could raise to %u\n",
+				 i, dp->total_rx_chunks,
+				 RDP_DMA_CHUNK_RX_COUNT);
+		}
+
+		if (dp->total_tx_chunks &&
+		    dp->total_tx_chunks < RDP_DMA_CHUNK_TX_COUNT) {
+			dev_warn(&priv->pdev->dev,
+				 "DMA tx config for module %zu not optimal,"
+				 "only %u chunks used, could raise to %u\n",
+				 i, dp->total_tx_chunks,
+				 RDP_DMA_CHUNK_TX_COUNT);
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void xrdp_setup_ubus(struct bcm_xrdp_priv *priv)
+{
+	u32 val;
+	size_t i;
+
+	for (i = 0; i < ARRAY_SIZE(priv->ubus_masters); i++) {
+		ubus_master_apply_credits(priv->ubus_masters[i]);
+		ubus_master_set_congestion_threshold(priv->ubus_masters[i], 0);
+		ubus_master_remap_port(priv->ubus_masters[i]);
+	}
+
+	/* config ubus slave */
+	ubus_slave_writel(priv, UBUS_SLV_VPB_BASE, 0x82d00000);
+	ubus_slave_writel(priv, UBUS_SLV_VPB_MASK, 0xffe00000);
+	ubus_slave_writel(priv, UBUS_SLV_APB_BASE, 0x82e00000);
+	ubus_slave_writel(priv, UBUS_SLV_APB_MASK, 0xffe00000);
+
+	val = ubus_master_readl(priv, UBUS_MSTR_HYST_CTRL);
+	val &= ~HYST_CTRL_CMD_SPACE_MASK;
+	val &= ~HYST_CTRL_DATA_SPACE_MASK;
+	val |= 2 << HYST_CTRL_CMD_SPACE_SHIFT;
+	val |= 2 << HYST_CTRL_DATA_SPACE_SHIFT;
+	ubus_master_writel(priv, UBUS_MSTR_HYST_CTRL, val);
+
+	/* enable ubus master */
+	val = ubus_master_readl(priv, UBUS_MSTR_EN);
+	val |= EN_EN_MASK;
+	ubus_master_writel(priv, UBUS_MSTR_EN, val);
+}
+
+/*
+ *
+ */
+static void xrdp_setup_sbpm(struct bcm_xrdp_priv *priv)
+{
+	size_t i;
+	u32 val, max_bn;
+
+	/*
+	 * setup SBPM to use whole psram
+	 */
+	val = sbpm_reg_read(priv, SBPM_REGS_INIT_FREE_LIST);
+	val &= ~REGS_INIT_FREE_LIST_INIT_BASE_ADDR_MASK;
+        val &= ~REGS_INIT_FREE_LIST_INIT_OFFSET_MASK;
+
+        /*
+         * this allows changing the base BN that will be allocated, to
+         * allow using PSRAM for something else
+         */
+	val |= (0 << REGS_INIT_FREE_LIST_INIT_BASE_ADDR_SHIFT);
+
+	/*
+         * this is the maximum number of allocated BN - 1 each buffer
+         * is 128 bytes
+         */
+	max_bn = RDP_PSRAM_SIZE / RDP_SBPM_BUF_SIZE;
+	val |= (max_bn - 1) << REGS_INIT_FREE_LIST_INIT_OFFSET_SHIFT;
+	sbpm_reg_write(priv, SBPM_REGS_INIT_FREE_LIST, val);
+
+	for (i = 0; i < 1000; i++) {
+		val = sbpm_reg_read(priv, SBPM_REGS_INIT_FREE_LIST);
+		if (val & REGS_INIT_FREE_LIST_RDY_MASK)
+			break;
+		udelay(1);
+	}
+
+	if (i == 1000)
+		printk("SBPM init timeout\n");
+
+	/* move more buffers to SBPM0 since we don't use UG1 */
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG0_TRSH, 0x000a07ff);
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG1_TRSH, 0x000a0000);
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG0_EXCL_HIGH_TRSH, 0x000a07e0);
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG0_EXCL_LOW_TRSH, 0x000a0750);
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG1_EXCL_HIGH_TRSH, 0x000a0000);
+	sbpm_reg_write(priv, SBPM_REGS_SBPM_UG1_EXCL_LOW_TRSH, 0x000a0000);
+}
+
+/*
+ *
+ */
+static void xrdp_setup_dma_module(struct bcm_xrdp_priv *priv,
+				  unsigned int id)
+{
+	const struct dma_params *dp = &priv->dma_params[id];
+	unsigned int i;
+
+	for (i = 0; i < dp->assigned_bbh_count; i++) {
+		const struct bbh_config *bc = dp->assigned_bbh_cfg[i];
+		const struct bbh_dma_config *bdc = dp->assigned_bbh_dma_cfg[i];
+		u32 val;
+
+		val = dma_reg_read(priv, id, DMA_CONFIG_NUM_OF_WRITES(i));
+		val &= ~CONFIG_NUM_OF_WRITES_NUMOFBUFF_MASK;
+		val |= bdc->rx_chunk_count <<
+			CONFIG_NUM_OF_WRITES_NUMOFBUFF_SHIFT;
+		dma_reg_write(priv, id, DMA_CONFIG_NUM_OF_WRITES(i), val);
+
+		val = dma_reg_read(priv, id, DMA_CONFIG_NUM_OF_READS(i));
+		val &= ~CONFIG_NUM_OF_READS_RR_NUM_MASK;
+		val |= bdc->tx_chunk_count <<
+			CONFIG_NUM_OF_READS_RR_NUM_SHIFT;
+		dma_reg_write(priv, id, DMA_CONFIG_NUM_OF_READS(i), val);
+
+		/* FIXME: set urgent thresh */
+		/* FIXME: set priority & RR weight */
+
+		val = dma_reg_read(priv, id, DMA_CONFIG_PERIPH_SOURCE(i));
+		val &= ~CONFIG_PERIPH_SOURCE_RXSOURCE_MASK;
+		val &= ~CONFIG_PERIPH_SOURCE_TXSOURCE_MASK;
+		val |= bc->rx_broadbus_id <<
+			CONFIG_PERIPH_SOURCE_RXSOURCE_SHIFT;
+		val |= bc->tx_broadbus_id <<
+			CONFIG_PERIPH_SOURCE_TXSOURCE_SHIFT;
+		dma_reg_write(priv, id, DMA_CONFIG_PERIPH_SOURCE(i), val);
+
+		/* FIXME: set max on the fly */
+	}
+}
+
+/*
+ *
+ */
+static void xrdp_setup_bbh_rx(struct bcm_xrdp_priv *priv,
+			      unsigned int id)
+{
+	const struct bbh_config *bc = &bbh_configs[id];
+	const struct bbh_params *bp = &priv->bbh_params[id];
+	u32 val;
+
+	if (!bc->rx_configure)
+		return;
+
+	/*
+	 * setup BB ID of needed peripherals
+	 */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_BBCFG);
+	val &= ~GENERAL_CFG_BBCFG_SDMABBID_MASK;
+	if (bc->dma_configs[1].configure) {
+		unsigned int sdma_bb_id;
+
+		switch (bc->dma_configs[1].module_id) {
+		case 1:
+			sdma_bb_id = BB_ID_SDMA0;
+			break;
+		case 2:
+			sdma_bb_id = BB_ID_SDMA1;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		val |= sdma_bb_id << GENERAL_CFG_BBCFG_SDMABBID_SHIFT;
+	}
+
+	val &= ~GENERAL_CFG_BBCFG_DISPBBID_MASK;
+	val |= BB_ID_DISPATCHER_REORDER << GENERAL_CFG_BBCFG_DISPBBID_SHIFT;
+	val &= ~GENERAL_CFG_BBCFG_SBPMBBID_MASK;
+	val |= BB_ID_SBPM << GENERAL_CFG_BBCFG_SBPMBBID_SHIFT;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_BBCFG, val);
+
+	/*
+	 * assign one VIQ per BBH for now, don't use exclusive VIQ
+	 * FIXME
+	 */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_DISPVIQ);
+	val &= ~GENERAL_CFG_DISPVIQ_NORMALVIQ_MASK;
+	val &= ~GENERAL_CFG_DISPVIQ_EXCLVIQ_MASK;
+	val |= (bc->rx_viq << GENERAL_CFG_DISPVIQ_NORMALVIQ_SHIFT);
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_DISPVIQ, val);
+
+	/*
+	 * setup SDMA configuration
+	 */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_SDMAADDR);
+	val &= ~GENERAL_CFG_SDMAADDR_DATABASE_MASK;
+	val |= bp->dma_params[1].rx_offset <<
+		GENERAL_CFG_SDMAADDR_DATABASE_SHIFT;
+
+	val &= ~GENERAL_CFG_SDMAADDR_DESCBASE_MASK;
+	val |= bp->dma_params[1].rx_offset <<
+		GENERAL_CFG_SDMAADDR_DESCBASE_SHIFT;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_SDMAADDR, val);
+
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_SDMACFG);
+	val &= ~GENERAL_CFG_SDMACFG_NUMOFCD_MASK;
+	val |= bc->dma_configs[1].rx_chunk_count <<
+		GENERAL_CFG_SDMACFG_NUMOFCD_SHIFT;
+
+	val &= ~GENERAL_CFG_SDMACFG_EXCLTH_MASK;
+	val |= bc->dma_configs[1].rx_chunk_count <<
+		GENERAL_CFG_SDMACFG_EXCLTH_SHIFT;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_SDMACFG, val);
+
+	/*
+	 * setup minimum packet size
+	 */
+	val = (bc->min_pkt_size << GENERAL_CFG_MINPKT0_MINPKT0_SHIFT) |
+		(bc->min_pkt_size << GENERAL_CFG_MINPKT0_MINPKT1_SHIFT) |
+		(bc->min_pkt_size << GENERAL_CFG_MINPKT0_MINPKT2_SHIFT) |
+		(bc->min_pkt_size << GENERAL_CFG_MINPKT0_MINPKT3_SHIFT);
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_MINPKT0, val);
+
+	val = (bc->max_pkt_size << GENERAL_CFG_MAXPKT0_MAXPKT0_SHIFT) |
+		(bc->max_pkt_size << GENERAL_CFG_MAXPKT0_MAXPKT1_SHIFT);
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_MAXPKT0, val);
+
+	val = (bc->max_pkt_size << GENERAL_CFG_MAXPKT1_MAXPKT2_SHIFT) |
+		(bc->max_pkt_size << GENERAL_CFG_MAXPKT1_MAXPKT3_SHIFT);
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_MAXPKT1, val);
+
+	/*
+	 * set packet receive offset to 0
+	 */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_SOPOFFSET);
+	val &= ~GENERAL_CFG_SOPOFFSET_SOPOFFSET_MASK;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_SOPOFFSET, val);
+
+	/*
+	 * set correct mac mode for this BBH
+	 */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_MACMODE);
+	val &= ~GENERAL_CFG_MACMODE_MACMODE_MASK;
+	val &= ~GENERAL_CFG_MACMODE_GPONMODE_MASK;
+	val &= ~GENERAL_CFG_MACMODE_MACVDSL_MASK;
+	switch (id) {
+	case RDP_BBH_IDX_PON:
+		val |= GENERAL_CFG_MACMODE_MACMODE_MASK;
+		break;
+	case RDP_BBH_IDX_DSL:
+		val |= GENERAL_CFG_MACMODE_MACVDSL_MASK;
+		break;
+	}
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_MACMODE, val);
+
+	/* don't remove CRC from frame, removed in ethernet
+	 * driver, important for DSL because it would corrupt
+	 * packets */
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_CRCOMITDIS);
+	val |= GENERAL_CFG_CRCOMITDIS_CRCOMITDIS_MASK;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_CRCOMITDIS, val);
+}
+
+/*
+ *
+ */
+static void xrdp_enable_bbh_rx(struct bcm_xrdp_priv *priv,
+			       unsigned int id)
+{
+	const struct bbh_config *bc = &bbh_configs[id];
+	u32 val;
+
+	if (!bc->rx_configure)
+		return;
+
+	val = bbh_rx_read(priv, id, BBH_RX_GENERAL_CFG_ENABLE);
+	val |= GENERAL_CFG_ENABLE_PKTEN_MASK;
+	val |= GENERAL_CFG_ENABLE_SBPMEN_MASK;
+	bbh_rx_write(priv, id, BBH_RX_GENERAL_CFG_ENABLE, val);
+}
+
+/*
+ *
+ */
+static unsigned int get_runner_bb_id(unsigned int core_id)
+{
+	static unsigned int bb_ids[RDP_RUNNER_COUNT] = {
+		BB_ID_RNR0,
+		BB_ID_RNR1,
+		BB_ID_RNR2,
+		BB_ID_RNR3,
+		BB_ID_RNR4,
+		BB_ID_RNR5,
+	};
+
+	BUG_ON(core_id >= ARRAY_SIZE(bb_ids));
+	return bb_ids[core_id];
+}
+
+/*
+ *
+ */
+static void xrdp_setup_bbh_tx_unified(struct bcm_xrdp_priv *priv,
+				      unsigned int id)
+{
+	size_t i;
+	u32 val;
+
+	/*
+	 * setup PD FIFO sizes, constant size for now like BCM code
+	 */
+	for (i = 0; i < 4; i++) {
+		const u32 fifo_size = (UNIFIED_BBH_PD_FIFO_SIZE + 1);
+
+		/* base */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_PDBASE(i));
+		val &= ~UNIFIED_CFGS_PDBASE_FIFOBASE0_MASK;
+		val &= ~UNIFIED_CFGS_PDBASE_FIFOBASE1_MASK;
+
+		val |= (fifo_size * (i * 2)) <<
+			UNIFIED_CFGS_PDBASE_FIFOBASE0_SHIFT;
+		val |= (fifo_size * (i * 2 + 1)) <<
+			UNIFIED_CFGS_PDBASE_FIFOBASE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_PDBASE(i), val);
+
+		/* size */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_PDSIZE(i));
+		val &= ~UNIFIED_CFGS_PDSIZE_FIFOSIZE0_MASK;
+		val &= ~UNIFIED_CFGS_PDSIZE_FIFOSIZE1_MASK;
+
+		val |= (fifo_size - 1) <<
+			UNIFIED_CFGS_PDSIZE_FIFOSIZE0_SHIFT;
+		val |= (fifo_size - 1) <<
+			UNIFIED_CFGS_PDSIZE_FIFOSIZE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_PDSIZE(i), val);
+
+		/* wakeup threshold, should not be used in MDU mode */
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_PDWKUPH(i), 0);
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(i), 0);
+	}
+
+	/* should not be needed in MDU mode */
+	bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_PD_BYTE_TH_EN, 0);
+
+	/* should not be needed by unified BBH */
+	bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_GTXTHRESH, 0);
+
+	/*
+	 * setup FE FIFO, constant size for now like BCM code
+	 */
+	for (i = 0; i < 4; i++) {
+		const u32 fifo_size = (i < 3) ? 2560 / 8 : 1;
+
+		/* base */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_FEBASE(i));
+		val &= ~UNIFIED_CFGS_FEBASE_FIFOBASE0_MASK;
+		val &= ~UNIFIED_CFGS_FEBASE_FIFOBASE1_MASK;
+		val |= (fifo_size * (i * 2)) <<
+			UNIFIED_CFGS_FEBASE_FIFOBASE0_SHIFT;
+		val |= (fifo_size * (i * 2 + 1)) <<
+			UNIFIED_CFGS_FEBASE_FIFOBASE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_FEBASE(i), val);
+
+		/* size */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_FESIZE(i));
+		val &= ~UNIFIED_CFGS_FESIZE_FIFOSIZE0_MASK;
+		val &= ~UNIFIED_CFGS_FESIZE_FIFOSIZE1_MASK;
+		val |= (fifo_size - 1) <<
+			UNIFIED_CFGS_FESIZE_FIFOSIZE0_SHIFT;
+		val |= (fifo_size - 1) <<
+			UNIFIED_CFGS_FESIZE_FIFOSIZE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_FESIZE(i), val);
+	}
+
+	/*
+	 * setup FE PD FIFO, constant size for now like BCM code
+	 */
+	for (i = 0; i < 4; i++) {
+		const u32 fifo_size = (i < 3) ? 40 : 1;
+
+		/* base */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_FEPDBASE(i));
+		val &= ~UNIFIED_CFGS_FEPDBASE_FIFOBASE0_MASK;
+		val &= ~UNIFIED_CFGS_FEPDBASE_FIFOBASE1_MASK;
+		val |= (fifo_size * (i * 2)) <<
+			UNIFIED_CFGS_FEPDBASE_FIFOBASE0_SHIFT;
+		val |= (fifo_size * (i * 2 + 1)) <<
+			UNIFIED_CFGS_FEPDBASE_FIFOBASE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_FEPDBASE(i), val);
+
+		/* size */
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_FEPDSIZE(i));
+		val &= ~UNIFIED_CFGS_FEPDSIZE_FIFOSIZE0_MASK;
+		val &= ~UNIFIED_CFGS_FEPDSIZE_FIFOSIZE1_MASK;
+		val |= (fifo_size - 1) <<
+                        UNIFIED_CFGS_FEPDSIZE_FIFOSIZE0_SHIFT;
+		val |= (fifo_size - 1) <<
+                        UNIFIED_CFGS_FEPDSIZE_FIFOSIZE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_FEPDSIZE(i), val);
+	}
+
+	/*
+	 * setup FIFO TX threshold
+	 */
+	for (i = 0; i < 4; i++) {
+		const u32 thresh_size = 2048 / 8;
+
+		val = bbh_tx_read(priv, id, BBH_TX_UNIFIED_CFGS_TXTHRESH(i));
+		val &= ~UNIFIED_CFGS_TXTHRESH_THRESH0_MASK;
+		val &= ~UNIFIED_CFGS_TXTHRESH_THRESH1_MASK;
+		val |= (thresh_size << UNIFIED_CFGS_TXTHRESH_THRESH0_SHIFT);
+		val |= (thresh_size << UNIFIED_CFGS_TXTHRESH_THRESH1_SHIFT);
+		bbh_tx_write(priv, id, BBH_TX_UNIFIED_CFGS_TXTHRESH(i), val);
+	}
+}
+
+/*
+ * "lan" is a misnomer, this is for AE (P2P) bbh
+ */
+static void xrdp_setup_bbh_tx_lan(struct bcm_xrdp_priv *priv,
+				  unsigned int id)
+{
+	const u32 fifo_size = (AE_BBH_PD_FIFO_SIZE + 1);
+	const u32 thresh_ddr_size = 2048 / 8;
+	const u32 thresh_sram_size = 512 / 8;
+	u32 val;
+
+	/* base */
+	val = bbh_tx_read(priv, id, BBH_TX_LAN_CFGS_PDBASE);
+	val &= ~LAN_CFGS_PDBASE_FIFOBASE0_MASK;
+	val &= ~LAN_CFGS_PDBASE_FIFOBASE1_MASK;
+
+	val |= fifo_size << LAN_CFGS_PDBASE_FIFOBASE0_SHIFT;
+	val |= (fifo_size * 2) << LAN_CFGS_PDBASE_FIFOBASE1_SHIFT;
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PDBASE, val);
+
+	/* size */
+	val = bbh_tx_read(priv, id, BBH_TX_LAN_CFGS_PDSIZE);
+	val &= ~LAN_CFGS_PDBASE_FIFOBASE0_MASK;
+	val &= ~LAN_CFGS_PDBASE_FIFOBASE1_MASK;
+
+	val |= (fifo_size - 1) << LAN_CFGS_PDSIZE_FIFOSIZE0_SHIFT;
+	val |= (fifo_size - 1) << LAN_CFGS_PDSIZE_FIFOSIZE1_SHIFT;
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PDSIZE, val);
+
+	/* wakeup threshold, should not be used in MDU mode */
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PDWKUPH, 0);
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PD_BYTE_TH, 0);
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PD_BYTE_TH_EN, 0);
+
+	/* FIXME: this one is set to 1 in bcm code */
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_PDEMPTY, 0);
+
+	/* FIFO TX threshold */
+	val = bbh_tx_read(priv, id, BBH_TX_LAN_CFGS_TXTHRESH);
+	val &= ~LAN_CFGS_TXTHRESH_DDRTHRESH_MASK;
+	val &= ~LAN_CFGS_TXTHRESH_SRAMTHRESH_MASK;
+	val |= (thresh_ddr_size << LAN_CFGS_TXTHRESH_DDRTHRESH_SHIFT);
+	val |= (thresh_sram_size << LAN_CFGS_TXTHRESH_SRAMTHRESH_SHIFT);
+	bbh_tx_write(priv, id, BBH_TX_LAN_CFGS_TXTHRESH, val);
+}
+
+/*
+ *
+ */
+static void xrdp_setup_bbh_reporting(struct bcm_xrdp_priv *priv,
+				     unsigned int id)
+{
+	const struct bbh_config *bc = &bbh_configs[id];
+	size_t i;
+	u32 val;
+
+	/* setup reporting */
+	for (i = 0; i < 2; i++) {
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_MSGRNRCFG_1(i));
+		val &= ~WAN_CFGS_MSGRNRCFG_1_TCONTADDR_MASK;
+		val |= bc->report_tcont_addr_sram <<
+			WAN_CFGS_MSGRNRCFG_1_TCONTADDR_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_MSGRNRCFG_1(i), val);
+
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_MSGRNRCFG_2(i));
+		val &= ~WAN_CFGS_MSGRNRCFG_2_PTRADDR_MASK;
+		val |= bc->report_ptr_addr_sram <<
+			WAN_CFGS_MSGRNRCFG_2_PTRADDR_SHIFT;
+
+		val &= ~WAN_CFGS_MSGRNRCFG_2_TASK_MASK;
+		val |= bc->report_tx_task_id[i] <<
+			WAN_CFGS_MSGRNRCFG_2_TASK_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_MSGRNRCFG_2(i), val);
+	}
+
+	/* setup status */
+	for (i = 0; i < 2; i++) {
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_STSRNRCFG_1(i));
+		val &= ~WAN_CFGS_STSRNRCFG_1_TCONTADDR_MASK;
+		val |= bc->status_tcont_addr_sram <<
+			WAN_CFGS_STSRNRCFG_1_TCONTADDR_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_STSRNRCFG_1(i), val);
+
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_STSRNRCFG_2(i));
+		val &= ~WAN_CFGS_STSRNRCFG_2_PTRADDR_MASK;
+		val |= bc->status_ptr_addr_sram <<
+			WAN_CFGS_STSRNRCFG_2_PTRADDR_SHIFT;
+
+		val &= ~WAN_CFGS_STSRNRCFG_2_TASK_MASK;
+		val |= bc->status_tx_task_id[i] <<
+			WAN_CFGS_STSRNRCFG_2_TASK_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_STSRNRCFG_2(i), val);
+	}
+}
+
+/*
+ * for PON or DSL bbh
+ */
+static void xrdp_setup_bbh_tx_wan(struct bcm_xrdp_priv *priv,
+				  unsigned int id)
+{
+	size_t i;
+	u32 val;
+	unsigned int pd_limit;
+
+	if (id == RDP_BBH_IDX_DSL) {
+		/* small fifo for dsl */
+		pd_limit = 128;
+	} else {
+		/* maximum value for PON (from bcm code) */
+		pd_limit = 131040;
+	}
+
+	/*
+	 * setup PD FIFO sizes, constant size for now like BCM code
+	 */
+	for (i = 0; i < 20; i++) {
+		u32 fifo_size;
+
+		if (id == RDP_BBH_IDX_DSL) {
+			/* 8 fifo used in dsl */
+			fifo_size = (i >= 8) ? 1 : (DSL_BBH_PD_FIFO_SIZE + 1);
+		} else {
+			/* setup all fifo for PON (value from bcm code) */
+			fifo_size = (i >= 1) ? 1 : (PON_BBH_PD_FIFO_SIZE + 1);
+		}
+
+		/* base */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PDBASE(i));
+		val &= ~WAN_CFGS_PDBASE_FIFOBASE0_MASK;
+		val &= ~WAN_CFGS_PDBASE_FIFOBASE1_MASK;
+
+		val |= (fifo_size * (i * 2)) <<
+			WAN_CFGS_PDBASE_FIFOBASE0_SHIFT;
+		val |= (fifo_size * (i * 2 + 1)) <<
+			WAN_CFGS_PDBASE_FIFOBASE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PDBASE(i), val);
+
+		/* size */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PDSIZE(i));
+		val &= ~WAN_CFGS_PDSIZE_FIFOSIZE0_MASK;
+		val &= ~WAN_CFGS_PDSIZE_FIFOSIZE1_MASK;
+		val |= (fifo_size - 1) << WAN_CFGS_PDSIZE_FIFOSIZE0_SHIFT;
+		val |= (fifo_size - 1) << WAN_CFGS_PDSIZE_FIFOSIZE1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PDSIZE(i), val);
+
+		/* wakeup threshold set to same value as size */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PDWKUPH(i));
+		val &= ~WAN_CFGS_PDWKUPH_WKUPTHRESH0_MASK;
+		val &= ~WAN_CFGS_PDWKUPH_WKUPTHRESH1_MASK;
+		val |= (fifo_size - 2) << WAN_CFGS_PDWKUPH_WKUPTHRESH0_SHIFT;
+		val |= (fifo_size - 2) << WAN_CFGS_PDWKUPH_WKUPTHRESH1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PDWKUPH(i), val);
+
+		/* limit maximum number of bytes inside BBH fifo (32
+		 * bytes unit) */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PD_BYTE_TH(i));
+		val &= ~WAN_CFGS_PD_BYTE_TH_PDLIMIT0_MASK;
+		val &= ~WAN_CFGS_PD_BYTE_TH_PDLIMIT1_MASK;
+		val |= (pd_limit / 32) << WAN_CFGS_PD_BYTE_TH_PDLIMIT0_SHIFT;
+		val |= (pd_limit / 32) << WAN_CFGS_PD_BYTE_TH_PDLIMIT1_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PD_BYTE_TH(i), val);
+	}
+
+	if (id == RDP_BBH_IDX_DSL) {
+		/* enable total bytes limiting */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PD_BYTE_TH_EN);
+		val |= WAN_CFGS_PD_BYTE_TH_EN_PDLIMITEN_MASK;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PD_BYTE_TH_EN, val);
+
+		/* setup pd empty threshold */
+		val = bbh_tx_read(priv, id, BBH_TX_WAN_CFGS_PDEMPTY);
+		val &= ~WAN_CFGS_PDEMPTY_EMPTY_MASK;
+		val |= 1;
+		bbh_tx_write(priv, id, BBH_TX_WAN_CFGS_PDEMPTY, val);
+	}
+}
+
+/*
+ *
+ */
+static void xrdp_setup_bbh_tx(struct bcm_xrdp_priv *priv,
+			      unsigned int id)
+{
+	const struct bbh_config *bc = &bbh_configs[id];
+	const struct bbh_params *bp = &priv->bbh_params[id];
+	size_t i;
+	u32 val;
+
+	if (!bc->tx_configure)
+		return;
+
+	/* set MACTYPE */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_MACTYPE);
+	val &= ~COMMON_CFGS_MACTYPE_TYPE_MASK;
+	switch (id) {
+	case RDP_BBH_IDX_UNIMAC0:
+	case RDP_BBH_IDX_UNIMAC1:
+	case RDP_BBH_IDX_UNIMAC2:
+		val |= (RDP_MACTYPE_GPON << COMMON_CFGS_MACTYPE_TYPE_SHIFT);
+		break;
+	case RDP_BBH_IDX_PON:
+		val |= (RDP_MACTYPE_EPON << COMMON_CFGS_MACTYPE_TYPE_SHIFT);
+		break;
+	case RDP_BBH_IDX_AE10:
+	case RDP_BBH_IDX_AE25:
+		val |= (RDP_MACTYPE_EMAC << COMMON_CFGS_MACTYPE_TYPE_SHIFT);
+		break;
+	case RDP_BBH_IDX_DSL:
+		val |= (RDP_MACTYPE_GPON << COMMON_CFGS_MACTYPE_TYPE_SHIFT);
+		break;
+	}
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_MACTYPE, val);
+
+	/*
+	 * setup BB ID of needed peripherals
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_BBCFG_1_TX);
+
+	val &= ~COMMON_CFGS_BBCFG_1_TX_SBPMSRC_MASK;
+	val |= BB_ID_SBPM << COMMON_CFGS_BBCFG_1_TX_SBPMSRC_SHIFT;
+
+	val &= ~COMMON_CFGS_BBCFG_1_TX_FPMSRC_MASK;
+	val |= BB_ID_FPM << COMMON_CFGS_BBCFG_1_TX_FPMSRC_SHIFT;
+
+	val &= ~COMMON_CFGS_BBCFG_1_TX_DMASRC_MASK;
+	if (bc->dma_configs[0].configure) {
+		unsigned int dma_bb_id;
+
+		switch (bc->dma_configs[0].module_id) {
+		case 0:
+			dma_bb_id = BB_ID_DMA0;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		val |= dma_bb_id << COMMON_CFGS_BBCFG_1_TX_DMASRC_SHIFT;
+	}
+
+	val &= ~COMMON_CFGS_BBCFG_1_TX_SDMASRC_MASK;
+	if (bc->dma_configs[1].configure) {
+		unsigned int sdma_bb_id;
+
+		switch (bc->dma_configs[1].module_id) {
+		case 1:
+			sdma_bb_id = BB_ID_SDMA0;
+			break;
+		case 2:
+			sdma_bb_id = BB_ID_SDMA1;
+			break;
+		default:
+			BUG();
+			break;
+		}
+		val |= sdma_bb_id << COMMON_CFGS_BBCFG_1_TX_SDMASRC_SHIFT;
+	}
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_BBCFG_1_TX, val);
+
+	/*
+	 * setup BB ID of needed peripherals
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_BBCFG_2_TX);
+	val &= ~COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_MASK;
+	if (bc->tx_runners[0] != -1)
+		val |= get_runner_bb_id(bc->tx_runners[0]) <<
+			COMMON_CFGS_BBCFG_2_TX_PDRNR0SRC_SHIFT;
+
+	val &= ~COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_MASK;
+	if (bc->tx_runners[1] != -1)
+		val |= get_runner_bb_id(bc->tx_runners[1]) <<
+			COMMON_CFGS_BBCFG_2_TX_PDRNR1SRC_SHIFT;
+
+	val &= ~COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_MASK;
+	if (bc->tx_status_runner != -1)
+		val |= get_runner_bb_id(bc->tx_status_runner) <<
+			COMMON_CFGS_BBCFG_2_TX_STSRNRSRC_SHIFT;
+
+	val &= ~COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_MASK;
+	if (bc->tx_report_runner != -1)
+		val |= get_runner_bb_id(bc->tx_report_runner) <<
+			COMMON_CFGS_BBCFG_2_TX_MSGRNRSRC_SHIFT;
+
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_BBCFG_2_TX, val);
+
+	/*
+	 * setup DDR DMA offsets
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_DDRCFG_TX);
+	val &= ~COMMON_CFGS_DDRCFG_TX_BUFSIZE_MASK;
+	val &= ~COMMON_CFGS_DDRCFG_TX_HNSIZE0_MASK;
+	val &= ~COMMON_CFGS_DDRCFG_TX_HNSIZE1_MASK;
+	/* FIXME: no need to setup thos for now, need to be set if we
+	 * want to use FPM */
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DDRCFG_TX, val);
+
+	/*
+	 * setup runner sram addresses
+	 */
+	for (i = 0; i < 2; i++) {
+		val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_RNRCFG_1(i));
+		val &= ~COMMON_CFGS_RNRCFG_1_TCONTADDR_MASK;
+		val |= bc->tcont_addr_sram <<
+			COMMON_CFGS_RNRCFG_1_TCONTADDR_SHIFT;
+
+		val &= ~COMMON_CFGS_RNRCFG_1_SKBADDR_SHIFT;
+		val |= bc->skb_addr_sram <<
+			COMMON_CFGS_RNRCFG_1_SKBADDR_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_RNRCFG_1(i), val);
+
+		val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_RNRCFG_2(i));
+		val &= ~COMMON_CFGS_RNRCFG_2_PTRADDR_MASK;
+		val |= bc->ptr_addr_sram <<
+			COMMON_CFGS_RNRCFG_2_PTRADDR_SHIFT;
+
+		val &= ~COMMON_CFGS_RNRCFG_2_TASK_MASK;
+		val |= bc->tx_queue_task_id[8] <<
+			COMMON_CFGS_RNRCFG_2_TASK_SHIFT;
+		bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_RNRCFG_2(i), val);
+	}
+
+	/*
+	 * setup DMA configuration
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_DMACFG_TX);
+
+	val &= ~COMMON_CFGS_DMACFG_TX_DESCBASE_MASK;
+	val |= bp->dma_params[0].tx_offset <<
+		COMMON_CFGS_DMACFG_TX_DESCBASE_SHIFT;
+
+	val &= ~COMMON_CFGS_DMACFG_TX_DESCSIZE_MASK;
+	val |= bc->dma_configs[0].tx_chunk_count <<
+		COMMON_CFGS_DMACFG_TX_DESCSIZE_SHIFT;
+
+	val &= ~COMMON_CFGS_DMACFG_TX_MAXREQ_MASK;
+	/* FIXME: for now set max request to chunk count */
+	val |= bc->dma_configs[0].tx_chunk_count <<
+		COMMON_CFGS_DMACFG_TX_MAXREQ_SHIFT;
+
+	if (id == RDP_BBH_IDX_PON)
+		val |= COMMON_CFGS_DMACFG_TX_EPNURGNT_MASK;
+
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DMACFG_TX, val);
+
+	/*
+	 * setup SDMA configuration
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_SDMACFG_TX);
+
+	val &= ~COMMON_CFGS_SDMACFG_TX_DESCBASE_MASK;
+	val |= bp->dma_params[1].tx_offset <<
+		COMMON_CFGS_SDMACFG_TX_DESCBASE_SHIFT;
+
+	val &= ~COMMON_CFGS_SDMACFG_TX_DESCSIZE_MASK;
+	val |= bc->dma_configs[1].tx_chunk_count <<
+		COMMON_CFGS_SDMACFG_TX_DESCSIZE_SHIFT;
+
+	val &= ~COMMON_CFGS_SDMACFG_TX_MAXREQ_MASK;
+	/* FIXME: for now set max request to chunk count */
+	val |= bc->dma_configs[1].tx_chunk_count <<
+		COMMON_CFGS_SDMACFG_TX_MAXREQ_SHIFT;
+
+	if (id == RDP_BBH_IDX_PON)
+		val |= COMMON_CFGS_SDMACFG_TX_EPNURGNT_MASK;
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_SDMACFG_TX, val);
+
+	/*
+	 * setup DDR global offsets (coherent and non coherent)
+	 */
+	/* FIXME: set to 0 since we don't use FPM */
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DDRTMBASEL(0), 0);
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DDRTMBASEL(1), 0);
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DDRTMBASEH(0), 0);
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DDRTMBASEH(1), 0);
+
+	/*
+	 * setup data fifo params (internal FIFO space allocation for
+	 * data coming from either DDR or PSRAM)
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_DFIFOCTRL);
+	val &= ~COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_MASK;
+	val &= ~COMMON_CFGS_DFIFOCTRL_DDRSIZE_MASK;
+	val &= ~COMMON_CFGS_DFIFOCTRL_PSRAMBASE_MASK;
+
+	/* values from bcm code (it seems FIFO is about 384 bytes, 288
+	 * + 94 == 382) */
+	val |= 287 << COMMON_CFGS_DFIFOCTRL_DDRSIZE_SHIFT;
+	val |= 288 << COMMON_CFGS_DFIFOCTRL_PSRAMBASE_SHIFT;
+	val |= 94 << COMMON_CFGS_DFIFOCTRL_PSRAMSIZE_SHIFT;
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_DFIFOCTRL, val);
+
+	/*
+	 * setup high priority to transmitting queue (in BCM code,
+	 * this is only for WAN)
+	 */
+	val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_ARB_CFG);
+	val |= COMMON_CFGS_ARB_CFG_HIGHTRXQ_MASK;
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_ARB_CFG, val);
+
+	/*
+	 * setup queue to runner mapping
+	 */
+	for (i = 0; i < ARRAY_SIZE(bc->tx_queue_to_runner); i += 2) {
+		val = bbh_tx_read(priv, id, BBH_TX_COMMON_CFGS_Q2RNR(i / 2));
+		val &= ~COMMON_CFGS_Q2RNR_Q0_MASK;
+		val &= ~COMMON_CFGS_Q2RNR_Q1_MASK;
+
+		if (bc->tx_queue_to_runner[i]) {
+			/* make sure second runner id is setup  */
+			BUG_ON(bc->tx_runners[1] == -1);
+			val |= COMMON_CFGS_Q2RNR_Q0_MASK;
+		}
+
+		if (bc->tx_queue_to_runner[i + 1]) {
+			/* make sure second runner id is setup  */
+			BUG_ON(bc->tx_runners[1] == -1);
+			val |= COMMON_CFGS_Q2RNR_Q1_MASK;
+		}
+
+		bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_Q2RNR(i / 2), val);
+	}
+
+	/*
+	 * setup queue to task id mapping
+	 *
+	 * NOTE: task id for queues 8 to 40 is set earlier
+	 */
+	val = 0;
+	for (i = 0; i < ARRAY_SIZE(bc->tx_queue_task_id) - 1; i++)
+		val |= bc->tx_queue_task_id[i] <<
+			COMMON_CFGS_PERQTASK_TASKx_SHIFT(i);
+	bbh_tx_write(priv, id, BBH_TX_COMMON_CFGS_PERQTASK, val);
+
+	/*
+	 * configure remaining registers depending on BBH type
+	 */
+	switch (id) {
+	case RDP_BBH_IDX_UNIMAC0:
+	case RDP_BBH_IDX_UNIMAC1:
+	case RDP_BBH_IDX_UNIMAC2:
+		xrdp_setup_bbh_tx_unified(priv, id);
+		break;
+	case RDP_BBH_IDX_AE10:
+	case RDP_BBH_IDX_AE25:
+		xrdp_setup_bbh_tx_lan(priv, id);
+		break;
+	case RDP_BBH_IDX_DSL:
+		xrdp_setup_bbh_tx_wan(priv, id);
+		break;
+	case RDP_BBH_IDX_PON:
+		xrdp_setup_bbh_tx_wan(priv, id);
+		xrdp_setup_bbh_reporting(priv, id);
+		break;
+	}
+
+	if (id == RDP_BBH_IDX_DSL) {
+		/* firmware allocates sbpm buffers for TX (with dsl bbh id
+		 * as source), make sure it uses UG0 since UG1 is empty */
+		val = sbpm_reg_read(priv, SBPM_REGS_SBPM_UG_MAP_HIGH);
+		val &= ~(1 << (BB_ID_TX_DSL - 32));
+		sbpm_reg_write(priv, SBPM_REGS_SBPM_UG_MAP_HIGH, val);
+	}
+}
+
+/*
+ *
+ */
+static void xrdp_setup_dispatcher_grp(struct bcm_xrdp_priv *priv,
+				      size_t grp_id)
+{
+	const struct disp_grp_config *dgc;
+	u32 grp_off, grp_id_shift;
+	size_t core_id;
+	u32 task_count, off, val;
+
+	dgc = &disp_config.groups[grp_id];
+	if (!dgc->configure)
+		return;
+
+	/* set correct bits for each core/task member of this group */
+	grp_off = grp_id * 8;
+	task_count = 0;
+
+	for (core_id = 0; core_id < ARRAY_SIZE(dgc->task_mask); core_id++) {
+		u32 core_off;
+		u32 shift, k;
+
+		core_off = core_id / 2;
+		off = grp_off + core_off;
+		shift = (core_id & 1) ? 16 : 0;
+
+		val = disp_read(priv, DSPTCHR_MASK_MSK_TSK_255_0(off));
+		val &= ~(0xffff << shift);
+		val |= dgc->task_mask[core_id] << shift;
+		disp_write(priv, DSPTCHR_MASK_MSK_TSK_255_0(off), val);
+
+		task_count += hweight_long(dgc->task_mask[core_id]);
+
+		for (k = 0; k < 2; k++) {
+			u32 task8_mask, bit;
+
+			task8_mask = k ?
+				(dgc->task_mask[core_id] >> 8) :
+				dgc->task_mask[core_id] & 0xff;
+
+			off = DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(core_id * 2 + k);
+			val = disp_read(priv, off);
+
+			for (bit = 0; bit < 8; bit++) {
+				if (!(task8_mask & (1 << bit)))
+					continue;
+
+				val |= grp_id << LOAD_BALANCING_TSK_TO_RG_MAPPING_TSKx_SHIFT(bit);
+			}
+			disp_write(priv, off, val);
+		}
+	}
+
+	/* setup task count for this RG */
+	if (grp_id < 4) {
+		off = DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_0_3;
+		grp_id_shift = grp_id;
+	} else {
+		off = DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_4_7;
+		grp_id_shift = grp_id - 4;
+	}
+
+	val = disp_read(priv, off);
+	val |= task_count <<
+		LOAD_BALANCING_RG_AVLABL_TSK_0_3_TSK_CNT_RG_x_SHIFT(grp_id_shift);
+	disp_write(priv, off, val);
+
+	/* reset VIQ to group membership, will be set later */
+	disp_write(priv, DSPTCHR_MASK_MSK_Q(grp_id), 0);
+}
+
+/*
+ *
+ */
+static int xrdp_setup_dispatcher(struct bcm_xrdp_priv *priv)
+{
+	size_t i;
+	u32 viqs_enabled, viqs_for_dispatcher, viqs_delayed, val;
+	u32 viq_guaranteed_total;
+	u32 common_pool_size;
+
+	/*
+	 * memset PD area, useful for debug
+	 */
+	for (i = 0; i < RDP_DIS_REOR_FLL_BUF_COUNT * 4; i++) {
+		if ((i % 4) == 0)
+			disp_write(priv, DSPTCHR_PDRAM_DATA(i),
+				   (0x4242 << 16) + i / 4);
+		else
+			disp_write(priv, DSPTCHR_PDRAM_DATA(i), 0);
+	}
+
+	/*
+	 * reset qhead (same as BCM code)
+	 */
+	for (i = 0; i < ARRAY_SIZE(disp_config.viqs); i++) {
+		disp_write(priv, DSPTCHR_QDES_HEAD(i), 0);
+		disp_write(priv, DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(i), 0);
+	}
+
+	/*
+	 * setup Free Linked List
+	 *
+	 * each BD "next" fields is initialized to the next entry
+	 */
+	for (i = 0; i < RDP_DIS_REOR_FLL_BUF_COUNT - 1; i++)
+		disp_write(priv, DSPTCHR_BDRAM_DATA(i),
+			   (i + 1) << BDRAM_DATA_DATA_SHIFT);
+
+	/* reset FLL head & tail pointer, skip first entry on purpose,
+	 * because I get reorder to drop it so is stalls the egress
+	 * queue */
+	disp_write(priv, DSPTCHR_FLLDES_HEAD, 1);
+	disp_write(priv, DSPTCHR_FLLDES_TAIL, RDP_DIS_REOR_FLL_BUF_COUNT - 1);
+
+	/* set FLL low threshold interrupt to the minimum */
+	disp_write(priv, DSPTCHR_FLLDES_LTINT, RDP_DIS_REOR_FLL_BUF_COUNT);
+
+	/* reset internal counters value, minus 1 is for the PD 0 that
+	 * we skipped */
+	disp_write(priv, DSPTCHR_FLLDES_BFIN, RDP_DIS_REOR_FLL_BUF_COUNT - 1);
+	disp_write(priv, DSPTCHR_FLLDES_BFOUT, 0);
+
+	/*
+	 * setup all needed VIQs
+	 */
+	viqs_enabled = 0;
+	viqs_for_dispatcher = 0;
+	viqs_delayed = 0;
+	viq_guaranteed_total = 0;
+
+	for (i = 0; i < ARRAY_SIZE(bbh_configs); i++) {
+		const struct bbh_config *bc = &bbh_configs[i];
+		const struct disp_viq_config *dvc;
+		unsigned int viq;
+
+		if (!bc->rx_configure)
+			continue;
+
+		viq = bc->rx_viq;
+
+		if (viq >= ARRAY_SIZE(disp_config.viqs)) {
+			dev_err(&priv->pdev->dev,
+				"BBH %zu references invalid viq id %u\n",
+				i, viq);
+			return -EINVAL;
+		}
+
+		dvc = &disp_config.viqs[viq];
+		if (!dvc->configure) {
+			dev_err(&priv->pdev->dev,
+				"BBH %zu references viq %u which "
+				"has no config\n", i, viq);
+			return -EINVAL;
+		}
+
+		if (viqs_enabled & (1 << viq)) {
+			dev_err(&priv->pdev->dev,
+				"duplicate viq %u used by bbh %zu\n", viq, i);
+			return -EINVAL;
+		}
+
+		viqs_enabled |= (1 << viq);
+		viqs_for_dispatcher |= (1 << viq);
+		viqs_delayed |= (1 << viq);
+
+		/* setup ingress congestion threshold */
+		val = disp_read(priv, DSPTCHR_CONGESTION_INGRS_CONGSTN(viq));
+		val &= ~CONGESTION_INGRS_CONGSTN_FRST_LVL_MASK;
+		val &= ~CONGESTION_INGRS_CONGSTN_SCND_LVL_MASK;
+		val &= ~CONGESTION_INGRS_CONGSTN_HYST_THRS_MASK;
+		val |= dvc->ing_cong_frst_lvl <<
+			CONGESTION_INGRS_CONGSTN_FRST_LVL_SHIFT;
+		val |= dvc->ing_cong_scnd_lvl <<
+			CONGESTION_INGRS_CONGSTN_SCND_LVL_SHIFT;
+		val |= dvc->ing_cong_hyst_thr <<
+			CONGESTION_INGRS_CONGSTN_HYST_THRS_SHIFT;
+		disp_write(priv, DSPTCHR_CONGESTION_INGRS_CONGSTN(viq), val);
+
+		/* setup egress congestion threshold */
+		val = disp_read(priv, DSPTCHR_CONGESTION_EGRS_CONGSTN(viq));
+		val &= ~CONGESTION_EGRS_CONGSTN_FRST_LVL_MASK;
+		val &= ~CONGESTION_EGRS_CONGSTN_SCND_LVL_MASK;
+		val &= ~CONGESTION_EGRS_CONGSTN_HYST_THRS_MASK;
+		/* FIXME: use specific value instead of the same as
+		 * ingress */
+		val |= dvc->ing_cong_frst_lvl <<
+			CONGESTION_EGRS_CONGSTN_FRST_LVL_SHIFT;
+		val |= dvc->ing_cong_scnd_lvl <<
+			CONGESTION_EGRS_CONGSTN_SCND_LVL_SHIFT;
+		val |= dvc->ing_cong_hyst_thr <<
+			CONGESTION_EGRS_CONGSTN_HYST_THRS_SHIFT;
+		disp_write(priv, DSPTCHR_CONGESTION_EGRS_CONGSTN(viq), val);
+
+		/* reset queue ingress size */
+		disp_write(priv, DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(viq), 0);
+
+		/* setup queue ingress limit */
+		val = disp_read(priv,
+				DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(viq));
+		val &= ~INGRS_QUEUES_Q_INGRS_LIMITS_CMN_MAX_MASK;
+		val &= ~INGRS_QUEUES_Q_INGRS_LIMITS_GURNTD_MAX_MASK;
+		val &= ~INGRS_QUEUES_Q_INGRS_LIMITS_CREDIT_CNT_MASK;
+		val |= dvc->common_pool_limit <<
+			INGRS_QUEUES_Q_INGRS_LIMITS_CMN_MAX_SHIFT;
+		val |= dvc->guaranteed_pool_limit <<
+			INGRS_QUEUES_Q_INGRS_LIMITS_GURNTD_MAX_SHIFT;
+		disp_write(priv,
+			   DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(viq), val);
+
+		viq_guaranteed_total += dvc->guaranteed_pool_limit;
+
+		/* setup queue coherency */
+		val = disp_read(priv,
+				DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(viq));
+		val &= ~INGRS_QUEUES_Q_INGRS_COHRENCY_CHRNCY_CNT_MASK;
+		val |= INGRS_QUEUES_Q_INGRS_COHRENCY_CHRNCY_EN_MASK;
+		disp_write(priv,
+			   DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(viq), val);
+
+		/*
+		 * setup queue credit configuration, this will write
+		 * the credit message to the BBH, which is said to
+		 * ignore it according to the documentation
+		 */
+		val = disp_read(priv, DSPTCHR_QUEUE_MAPPING_CRDT_CFG(viq));
+		val &= ~QUEUE_MAPPING_CRDT_CFG_BB_ID_MASK;
+		val &= ~QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_MASK;
+		val |= bc->rx_broadbus_id <<
+			QUEUE_MAPPING_CRDT_CFG_BB_ID_SHIFT;
+		val |= QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_NORMAL <<
+			QUEUE_MAPPING_CRDT_CFG_TRGT_ADD_SHIFT;
+		disp_write(priv, DSPTCHR_QUEUE_MAPPING_CRDT_CFG(viq), val);
+	}
+
+	/*
+	 * setup global pool size & congestion threshold
+	 */
+
+	/* setup ingress guaranteed pool limit */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_GRNTED_POOL_LMT);
+	val &= ~POOL_SIZES_GRNTED_POOL_LMT_POOL_LMT_MASK;
+	val |= viq_guaranteed_total <<
+		POOL_SIZES_GRNTED_POOL_LMT_POOL_LMT_SHIFT;
+	disp_write(priv, DSPTCHR_POOL_SIZES_GRNTED_POOL_LMT, val);
+
+	/* setup ingress guaranteed pool size */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_GRNTED_POOL_SIZE);
+	val &= ~POOL_SIZES_GRNTED_POOL_SIZE_POOL_SIZE_MASK;
+	val |= viq_guaranteed_total <<
+		POOL_SIZES_GRNTED_POOL_SIZE_POOL_SIZE_SHIFT;
+	disp_write(priv, DSPTCHR_POOL_SIZES_GRNTED_POOL_SIZE, val);
+
+	/* setup common pool limit (same formula as refsw) */
+	common_pool_size = (RDP_DIS_REOR_FLL_BUF_COUNT -
+			    RDP_DISP_VIQ_COUNT - 2 -
+			    viq_guaranteed_total);
+
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_CMN_POOL_LMT);
+	val &= ~POOL_SIZES_CMN_POOL_LMT_POOL_LMT_MASK;
+	val |= common_pool_size << POOL_SIZES_CMN_POOL_LMT_POOL_LMT_SHIFT;
+	disp_write(priv, DSPTCHR_POOL_SIZES_CMN_POOL_LMT, val);
+
+	/* setup common pool max size */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_CMN_POOL_SIZE);
+	val &= ~POOL_SIZES_CMN_POOL_SIZE_POOL_SIZE_MASK;
+	val |= common_pool_size << POOL_SIZES_CMN_POOL_SIZE_POOL_SIZE_SHIFT;
+	disp_write(priv, DSPTCHR_POOL_SIZES_CMN_POOL_SIZE, val);
+
+	/* setup global ingress congestion threshold) */
+	val = disp_read(priv, DSPTCHR_CONGESTION_GLBL_CONGSTN);
+	val &= ~CONGESTION_GLBL_CONGSTN_FRST_LVL_MASK;
+	val &= ~CONGESTION_GLBL_CONGSTN_SCND_LVL_MASK;
+	val &= ~CONGESTION_GLBL_CONGSTN_HYST_THRS_MASK;
+	val |= (common_pool_size * 40 * 100) <<
+		CONGESTION_GLBL_CONGSTN_FRST_LVL_SHIFT;
+	/* FIXME: refsw uses DSPTCHR_RESERVED_PRIORITY_BUFF_NUM, what
+	 * is it ? */
+	val |= (common_pool_size - 10) <<
+		CONGESTION_GLBL_CONGSTN_SCND_LVL_SHIFT;
+	val |= 8 << CONGESTION_GLBL_CONGSTN_HYST_THRS_SHIFT;
+	disp_write(priv, DSPTCHR_CONGESTION_GLBL_CONGSTN, val);
+
+	/* setup global egress congestion threshold */
+	val = disp_read(priv, DSPTCHR_CONGESTION_TOTAL_EGRS_CONGSTN);
+	val &= ~CONGESTION_TOTAL_EGRS_CONGSTN_FRST_LVL_MASK;
+	val &= ~CONGESTION_TOTAL_EGRS_CONGSTN_SCND_LVL_MASK;
+	val &= ~CONGESTION_TOTAL_EGRS_CONGSTN_HYST_THRS_MASK;
+	/* FIXME: hardcoded value */
+	val |= (common_pool_size * 40 * 100) <<
+                CONGESTION_TOTAL_EGRS_CONGSTN_FRST_LVL_SHIFT;
+	val |= (common_pool_size - 10) <<
+		CONGESTION_TOTAL_EGRS_CONGSTN_SCND_LVL_SHIFT;
+	val |= 8 << CONGESTION_TOTAL_EGRS_CONGSTN_HYST_THRS_SHIFT;
+	disp_write(priv, DSPTCHR_CONGESTION_TOTAL_EGRS_CONGSTN, val);
+
+
+	/* setup multicast pool limit */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_MULTI_CST_POOL_LMT);
+	val &= ~POOL_SIZES_MULTI_CST_POOL_LMT_POOL_LMT_MASK;
+	disp_write(priv, DSPTCHR_POOL_SIZES_MULTI_CST_POOL_LMT, val);
+
+	/* setup multicast pool size */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_MULTI_CST_POOL_SIZE);
+	val &= ~POOL_SIZES_MULTI_CST_POOL_SIZE_POOL_SIZE_MASK;
+	disp_write(priv, DSPTCHR_POOL_SIZES_MULTI_CST_POOL_SIZE, val);
+
+	/* setup runner pool limit */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_RNR_POOL_LMT);
+	val &= ~POOL_SIZES_RNR_POOL_LMT_POOL_LMT_MASK;
+	disp_write(priv, DSPTCHR_POOL_SIZES_RNR_POOL_LMT, val);
+
+	/* setup runner pool size */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_RNR_POOL_SIZE);
+	val &= ~POOL_SIZES_RNR_POOL_SIZE_POOL_SIZE_MASK;
+	disp_write(priv, DSPTCHR_POOL_SIZES_RNR_POOL_SIZE, val);
+
+	/* setup processing pool size */
+	val = disp_read(priv, DSPTCHR_POOL_SIZES_PRCSSING_POOL_SIZE);
+	val &= ~POOL_SIZES_PRCSSING_POOL_SIZE_POOL_SIZE_MASK;
+	disp_write(priv, DSPTCHR_POOL_SIZES_PRCSSING_POOL_SIZE, val);
+
+	/*
+	 * setup dispatch address for each runner
+	 */
+	for (i = 0; i < ARRAY_SIZE(disp_config.runners); i++) {
+		const struct disp_rnr_config *drc = &disp_config.runners[i];
+
+		if (!drc->configure)
+			continue;
+
+		val = disp_read(priv, DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(i));
+		val &= ~QUEUE_MAPPING_PD_DSPTCH_ADD_BASE_ADD_MASK;
+		val &= ~QUEUE_MAPPING_PD_DSPTCH_ADD_OFFSET_ADD_MASK;
+		val |= drc->pd_base <<
+			QUEUE_MAPPING_PD_DSPTCH_ADD_BASE_ADD_SHIFT;
+		val |= drc->pd_per_tsk_off <<
+			QUEUE_MAPPING_PD_DSPTCH_ADD_OFFSET_ADD_SHIFT;
+		disp_write(priv, DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(i), val);
+	}
+
+	/*
+	 * setup runner dispatch groups
+	 */
+	for (i = 0; i < ARRAY_SIZE(disp_config.groups); i++)
+		xrdp_setup_dispatcher_grp(priv, i);
+
+	/*
+	 * setup VIQ to runner group membership
+	 */
+	for (i = 0; i < ARRAY_SIZE(disp_config.viqs); i++) {
+		const struct disp_viq_config *dvc = &disp_config.viqs[i];
+		const struct disp_grp_config *dgc;
+		u32 val;
+
+		if (!dvc->configure)
+			continue;
+
+		if (dvc->runner_group >= ARRAY_SIZE(disp_config.groups)) {
+			dev_err(&priv->pdev->dev,
+				"VIQ %zu references invalid runner group %u\n",
+				i, dvc->runner_group);
+			return -EINVAL;
+		}
+
+		dgc = &disp_config.groups[dvc->runner_group];
+		if (!dgc->configure) {
+			dev_err(&priv->pdev->dev,
+				"VIQ %zu references unconfigured runner "
+				"group %u\n", i, dvc->runner_group);
+			return -EINVAL;
+		}
+
+		val = disp_read(priv, DSPTCHR_MASK_MSK_Q(dvc->runner_group));
+		val |= (1 << i);
+		disp_write(priv, DSPTCHR_MASK_MSK_Q(dvc->runner_group), val);
+
+	}
+
+	/* setup VIQs destination */
+	disp_write(priv, DSPTCHR_QUEUE_MAPPING_Q_DEST, ~viqs_for_dispatcher);
+
+	/* setup VIQs delay */
+	disp_write(priv, DSPTCHR_MASK_DLY_Q, viqs_delayed);
+	disp_write(priv, DSPTCHR_MASK_NON_DLY_Q, ~viqs_delayed);
+
+	/*
+	 * setup each VIQ linked list
+	 */
+	for (i = 0; i < ARRAY_SIZE(disp_config.viqs); i++) {
+		const struct disp_viq_config *dvc = &disp_config.viqs[i];
+		u32 fll_head, val, fll_new_head;
+
+		if (!dvc->configure)
+			continue;
+
+		/* manually allocate from FLL */
+		fll_head = disp_read(priv, DSPTCHR_FLLDES_HEAD);
+
+		/* setup VIQ head/tail */
+		disp_write(priv, DSPTCHR_QDES_HEAD(i), fll_head);
+		disp_write(priv, DSPTCHR_QDES_TAIL(i), fll_head);
+		disp_write(priv, DSPTCHR_QDES_REG_Q_HEAD(i), fll_head);
+
+		/* move FLL head & credit one 'out' buffer from fll */
+		fll_new_head = disp_read(priv, DSPTCHR_BDRAM_DATA(fll_head)) >>
+			BDRAM_DATA_DATA_SHIFT;
+
+		disp_write(priv, DSPTCHR_FLLDES_HEAD, fll_new_head);
+
+		val = disp_read(priv, DSPTCHR_FLLDES_BFOUT);
+		val++;
+		disp_write(priv, DSPTCHR_FLLDES_BFOUT, val);
+	}
+
+	/* enable VIQs */
+	disp_write(priv, DSPTCHR_REORDER_CFG_VQ_EN, viqs_enabled);
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void xrdp_enable_dispatcher(struct bcm_xrdp_priv *priv)
+{
+	u32 val;
+
+	/* enable reorder block */
+	val = disp_read(priv, DSPTCHR_REORDER_CFG_DSPTCHR_REORDR_CFG);
+	val |= REORDER_CFG_DSPTCHR_REORDR_CFG_EN_MASK |
+		REORDER_CFG_DSPTCHR_REORDR_CFG_REORDR_PAR_MOD_MASK |
+		REORDER_CFG_DSPTCHR_REORDR_CFG_DSPTCHR_PER_ENH_POD_MASK;
+	disp_write(priv, DSPTCHR_REORDER_CFG_DSPTCHR_REORDR_CFG, val);
+
+	/* since we don't initialize QM, it does not refill credits in
+	 * the reorder, which seem to be needed even in we ask the
+	 * reorder to drop */
+	disp_write(priv, DSPTCHR_EGRS_QUEUES_EGRS_DLY_QM_CRDT,
+		   0x42);
+	disp_write(priv, DSPTCHR_EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT,
+		   0x42);
+}
+
+/*
+ *
+ */
+static int xrdp_setup_qm(struct bcm_xrdp_priv *priv)
+{
+	/* u32 val; */
+
+	/* val = qm_read(priv, QM_GLOBAL_CFG_QM_ENABLE_CTRL); */
+	/* val |= QM_GLOBAL_CFG_QM_ENABLE_CTRL_REORDER_CREDIT_ENABLE_MASK; */
+	/* qm_write(priv, QM_GLOBAL_CFG_QM_ENABLE_CTRL, val); */
+	return 0;
+}
+
+/*
+ *
+ */
+static int xrdp_setup_runner(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	u32 val;
+
+	val = runner_read(priv, id, RNR_REGS_CFG_GLOBAL_CTRL);
+	val &= ~CFG_GLOBAL_CTRL_MICRO_SEC_VAL_MASK;
+	val |= RDP_RUNNER_FREQ << CFG_GLOBAL_CTRL_MICRO_SEC_VAL_SHIFT;
+	runner_write(priv, id, RNR_REGS_CFG_GLOBAL_CTRL, val);
+
+	/* setup psram base, used to compute address when addr_calc is used */
+	val = priv->regs_phys[XRDP_AREA_CORE] + PSRAM_OFFSET_0;
+	val >>= 20;
+	runner_write(priv, id, RNR_REGS_CFG_PSRAM_CFG, val);
+
+	/* FIXME: setup DDR config too to be able to use FPM */
+	return 0;
+}
+
+/*
+ *
+ */
+static int xrdp_setup_runner_quad(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	unsigned int slv;
+	u32 val;
+
+	val = runner_quad_read(priv, 0, RNR_QUAD_GENERAL_CONFIG_DMA_ARB_CFG);
+	val &= ~GENERAL_CONFIG_DMA_ARB_CFG_CONGEST_THRESHOLD_MASK;
+	val |= 2 << GENERAL_CONFIG_DMA_ARB_CFG_CONGEST_THRESHOLD_SHIFT;
+	runner_quad_write(priv, 0, RNR_QUAD_GENERAL_CONFIG_DMA_ARB_CFG, val);
+
+	for (slv = 16; slv < 20; slv++) {
+		runner_quad_write(priv, 0, RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(slv), 8);
+	}
+
+	/* for DDR performance */
+	runner_quad_write(priv, 0, RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(2), 8);
+	return 0;
+}
+
+/*
+ *
+ */
+static int xrdp_setup(struct bcm_xrdp_priv *priv)
+{
+	unsigned int i;
+	int ret;
+
+	xrdp_zero_memories(priv);
+
+	ret = xrdp_compute_params(priv);
+	if (ret)
+		return ret;
+
+	xrdp_setup_ubus(priv);
+	xrdp_setup_sbpm(priv);
+
+	ret = xrdp_setup_dispatcher(priv);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(priv->bbh_params); i++) {
+		xrdp_setup_bbh_rx(priv, i);
+		xrdp_setup_bbh_tx(priv, i);
+	}
+
+	for (i = 0; i < ARRAY_SIZE(priv->dma_params); i++)
+		xrdp_setup_dma_module(priv, i);
+
+	ret = xrdp_setup_qm(priv);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < RDP_RUNNER_QUAD_COUNT; i++)
+		xrdp_setup_runner_quad(priv, i);
+
+	for (i = 0; i < RDP_RUNNER_COUNT; i++)
+		xrdp_setup_runner(priv, i);
+
+	return 0;
+}
+
+
+/*
+ *
+ */
+static void xrdp_enable(struct bcm_xrdp_priv *priv)
+{
+	unsigned int i;
+
+	/* enable everyhing now */
+	for (i = 0; i < ARRAY_SIZE(priv->bbh_params); i++)
+		xrdp_enable_bbh_rx(priv, i);
+
+	xrdp_enable_dispatcher(priv);
+}
+
+/*
+ *
+ */
+static u32 runner_get_section_offset(enum rpgm_section_type type,
+				      unsigned int core_id)
+{
+	switch (type) {
+	case RPGM_SECTION_CODE:
+		return RNR_INST_OFFSET(core_id);
+	case RPGM_SECTION_DATA:
+		return RNR_SRAM_OFFSET(core_id);
+	case RPGM_SECTION_CONTEXT:
+		return RNR_CNXT_OFFSET(core_id);
+	case RPGM_SECTION_PREDICT:
+		return RNR_PRED_OFFSET(core_id);
+	default:
+		BUG();
+		return 0;
+	}
+}
+
+/*
+ *
+ */
+static u32 runner_get_section_size(enum rpgm_section_type type)
+{
+	switch (type) {
+	case RPGM_SECTION_CODE:
+		return RNR_INST_SIZE;
+	case RPGM_SECTION_DATA:
+		return RNR_SRAM_SIZE;
+	case RPGM_SECTION_CONTEXT:
+		return RNR_CNXT_SIZE;
+	case RPGM_SECTION_PREDICT:
+		return RNR_PRED_SIZE;
+	default:
+		BUG();
+		return 0;
+	}
+}
+
+/*
+ * load program into memory
+ */
+static int runner_load_program(struct bcm_xrdp_priv *priv,
+			       unsigned int core_mask,
+			       const struct firmware *fw,
+			       u32 *version)
+{
+	struct rpgm_header hdr;
+	const u8 *fw_in = fw->data;
+	const u8 *fw_end = fw->data + fw->size;
+	unsigned int i, load_mask;
+
+	load_mask = ~0;
+
+	if (fw_end - fw_in < sizeof (hdr)) {
+		dev_err(&priv->pdev->dev, "file too short\n");
+		return 1;
+	}
+
+	memcpy(&hdr, fw_in, sizeof (hdr));
+	fw_in += sizeof (hdr);
+
+	if (be32_to_cpu(hdr.magic) != RPGM_MAGIC) {
+		dev_err(&priv->pdev->dev, "bad magic\n");
+		return 1;
+	}
+
+	*version = be32_to_cpu(hdr.version);
+
+	for (i = 0; i < be32_to_cpu(hdr.section_count); i++) {
+		struct rpgm_section s;
+		unsigned int li, type, core_id, size;
+
+		if (fw_end - fw_in < sizeof (s)) {
+			dev_err(&priv->pdev->dev, "file too short\n");
+			return 1;
+		}
+
+		memcpy(&s, fw_in, sizeof (s));
+		fw_in += sizeof (s);
+
+		type = be32_to_cpu(s.type);
+		switch (type) {
+		case RPGM_SECTION_CODE:
+		case RPGM_SECTION_DATA:
+		case RPGM_SECTION_CONTEXT:
+		case RPGM_SECTION_PREDICT:
+			break;
+		default:
+			dev_err(&priv->pdev->dev,
+				"unknown section type %u\n", type);
+			return 1;
+		}
+
+		size = runner_get_section_size(type);
+
+		/* check if memset is requested */
+		if ((load_mask & (1 << type)) && s.do_memset) {
+			u32 value;
+
+			value = be32_to_cpu(s.memset_value);
+			for (core_id = 0;
+			     core_id < RDP_RUNNER_COUNT; core_id++) {
+				u32 offset;
+
+				if (!(core_mask & (1 << core_id)))
+					continue;
+
+				offset = runner_get_section_offset(type,
+								   core_id);
+				xrdp_memset32be(priv, XRDP_AREA_CORE,
+						offset, value, size);
+			}
+		}
+
+		/* load section data */
+		for (li = 0; li < be32_to_cpu(s.load_count); li++) {
+			struct rpgm_load l;
+			u32 val, load_offset;
+
+			if (fw_end - fw_in < sizeof (l)) {
+				dev_err(&priv->pdev->dev,
+					"file too short\n");
+				return 1;
+			}
+
+			memcpy(&l, fw_in, sizeof (l));
+			fw_in += sizeof (l);
+
+			if (!(load_mask & (1 << type)))
+				continue;
+
+			val = be32_to_cpu(l.value);
+			load_offset = be32_to_cpu(l.offset);
+
+			if (load_offset >= size) {
+				dev_err(&priv->pdev->dev, "load at "
+					"0x%08x out of range "
+					"(size=0x%08x)\n",
+					load_offset, size);
+				return 1;
+			}
+
+			for (core_id = 0;
+			     core_id < RDP_RUNNER_COUNT; core_id++) {
+				u32 offset;
+
+				if (!(core_mask & (1 << core_id)))
+					continue;
+				offset = runner_get_section_offset(type,
+								   core_id);
+				xrdp_write32be(priv, XRDP_AREA_CORE,
+					       offset + load_offset, val);
+			}
+		}
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void runner_start(struct bcm_xrdp_priv *priv, unsigned int core_mask)
+{
+	size_t core_id;
+
+	for (core_id = 0; core_id < RDP_RUNNER_COUNT; core_id++) {
+		u32 val;
+
+		if (!(core_mask & (1 << core_id)))
+			continue;
+
+		val = runner_read(priv, core_id, RNR_REGS_CFG_GLOBAL_CTRL);
+		val |= CFG_GLOBAL_CTRL_EN_MASK;
+		runner_write(priv, core_id, RNR_REGS_CFG_GLOBAL_CTRL, val);
+	}
+}
+
+/*
+ *
+ */
+static int enet_fw_load(struct bcm_xrdp_priv *priv,
+			unsigned int core_mask)
+{
+	const struct firmware *firmware;
+	u32 version;
+	int ret;
+
+	/* load firmwares file from filesystem */
+	ret = request_firmware(&firmware,
+			       "xrdp/enet_firmware/bcm63xx_enet_runner.rpgm",
+			       &priv->pdev->dev);
+	if (ret) {
+		dev_err(&priv->pdev->dev, "failed to load "
+			"ethernet runner firmware\n");
+		goto fail;
+	}
+
+	if (runner_load_program(priv, core_mask, firmware, &version)) {
+		dev_err(&priv->pdev->dev, "invalid firmware file\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	runner_start(priv, core_mask);
+	dev_info(&priv->pdev->dev, "enet fw version:%u\n", version);
+
+	release_firmware(firmware);
+	return 0;
+
+fail:
+	release_firmware(firmware);
+	return ret;
+}
+
+/*
+ *
+ */
+static int dsl_fw_load(struct bcm_xrdp_priv *priv,
+		       unsigned int core_mask)
+{
+	const struct firmware *firmware;
+	u32 version;
+	int ret;
+
+	/* load firmwares file from filesystem */
+	ret = request_firmware(&firmware,
+			       "xrdp/dsl_firmware/bcm63xx_dsl_runner.rpgm",
+			       &priv->pdev->dev);
+	if (ret) {
+		dev_err(&priv->pdev->dev, "failed to load "
+			"dsl runner firmware\n");
+		goto fail;
+	}
+
+	if (runner_load_program(priv, core_mask, firmware, &version)) {
+		dev_err(&priv->pdev->dev, "invalid firmware file\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	runner_start(priv, core_mask);
+	dev_info(&priv->pdev->dev, "dsl fw version:%u\n", version);
+
+	release_firmware(firmware);
+	return 0;
+
+fail:
+	release_firmware(firmware);
+	return ret;
+}
+
+/*
+ *
+ */
+int bcm_xrdp_init(struct bcm_xrdp_priv *priv)
+{
+	int ret;
+
+	if (reset_control_deassert(priv->rdp_rst)) {
+		printk("failed to reset XRDP\n");
+		ret = -EIO;
+		goto fail;
+	}
+
+	if (xrdp_setup(priv)) {
+		dev_err(&priv->pdev->dev, "failed to setup XRDP\n");
+		ret = -EIO;
+		goto fail;
+	}
+
+	if (!skip_load) {
+		ret = enet_fw_load(priv, 0x1f);
+		if (ret)
+			goto fail;
+
+		ret = dsl_fw_load(priv, (1 << 5));
+		if (ret)
+			goto fail;
+	}
+
+	xrdp_enable(priv);
+	return 0;
+
+fail:
+	return ret;
+}
+
+static __maybe_unused int init_reserved_mem(struct device *dev, int idx,
+			     void **ptr,
+			     dma_addr_t *addr,
+			     size_t *size)
+{
+        int ret;
+        struct device_node *mem_node;
+
+        ret = of_reserved_mem_device_init_by_idx(dev, dev->of_node, idx);
+        if (ret) {
+                dev_err(dev,
+                        "of_reserved_mem_device_init_by_idx(%d) failed 0x%x\n",
+                        idx, ret);
+                return -ENODEV;
+        }
+
+        mem_node = of_parse_phandle(dev->of_node, "memory-region", 0);
+        if (mem_node) {
+		u64 size64;
+
+                ret = of_property_read_u64(mem_node, "size", &size64);
+                if (ret) {
+                        dev_err(dev, "of_property_read_u64 fails 0x%x\n",
+                                ret);
+                        return -ENODEV;
+                }
+                *size = size64;
+        } else {
+                dev_err(dev, "No memory-region found for index %d\n", idx);
+                return -ENODEV;
+        }
+
+	*ptr = dmam_alloc_coherent(dev, *size / 2, addr, 0);
+	if (!*ptr) {
+                dev_err(dev, "DMA alloc memory of size %zu failed\n", *size);
+                return -ENODEV;
+        }
+
+        return 0;
+}
+
+/*
+ *
+ */
+static int bcm_xrdp_probe(struct platform_device *pdev)
+{
+	struct bcm_xrdp_priv *priv;
+	struct resource *res_core, *res_wan_top;
+	struct reset_control *rdp_rst;
+	size_t i;
+	int ret;
+
+	rdp_rst = devm_reset_control_get(&pdev->dev, "rdp");
+	if (IS_ERR(rdp_rst)) {
+		dev_err(&pdev->dev, "missing rdp reset control: %ld\n",
+			PTR_ERR(rdp_rst));
+		return -ENODEV;
+	}
+
+	res_core = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+	if (!res_core) {
+		dev_err(&pdev->dev, "unable to get register core resource\n");
+		return -ENODEV;
+	}
+
+	res_wan_top = platform_get_resource_byname(pdev, IORESOURCE_MEM, "wan_top");
+	if (!res_wan_top) {
+		dev_err(&pdev->dev, "unable to get register wan_top resource\n");
+		return -ENODEV;
+	}
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	spin_lock_init(&priv->irq_lock);
+
+	priv->regs[0] = devm_ioremap_resource(&pdev->dev, res_core);
+	if (!priv->regs[0]) {
+		dev_err(&pdev->dev, "unable to ioremap regs\n");
+		return -ENOMEM;
+	}
+	priv->regs_phys[0] = res_core->start;
+	priv->regs_size[0] = resource_size(res_core);
+
+	priv->regs[1] = devm_ioremap_resource(&pdev->dev, res_wan_top);
+	if (!priv->regs[1]) {
+		dev_err(&pdev->dev, "unable to ioremap wan_top regs\n");
+		return -ENOMEM;
+	}
+	priv->regs_phys[1] = res_wan_top->start;
+	priv->regs_size[1] = resource_size(res_wan_top);
+
+#ifdef CONFIG_SOC_BCM63XX_XRDP_IOCTL
+	if (init_reserved_mem(&pdev->dev, 0,
+			      &priv->rmem_tm.ptr,
+			      &priv->rmem_tm.dma_addr,
+			      &priv->rmem_tm.size)) {
+		dev_err(&pdev->dev, "failed to get reserved TM memory\n");
+		return -ENOMEM;
+	}
+#endif
+
+	for (i = 0; i < 6; i++) {
+		priv->ubus_masters[i] =
+			ubus4_master_of_get_index(pdev->dev.of_node, i);
+		if (IS_ERR(priv->ubus_masters[i])) {
+			dev_err(&pdev->dev,
+				"unable to get UBUS master %zu", i);
+			return PTR_ERR(priv->ubus_masters[i]);
+		}
+	}
+
+	priv->rdp_rst = rdp_rst;
+	priv->pdev = pdev;
+	INIT_LIST_HEAD(&priv->user_dma_list);
+	platform_set_drvdata(pdev, priv);
+
+	ret = bcm_xrdp_ioctl_register(priv);
+	if (ret)
+		return ret;
+
+	bcm_xrdp_dbg_init(priv);
+
+	ret = bcm_xrdp_init(priv);
+	if (ret) {
+		bcm_xrdp_ioctl_unregister(priv);
+		bcm_xrdp_dbg_exit();
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+static void bcm_xrdp_remove(struct platform_device *pdev)
+{
+	struct bcm_xrdp_priv *priv;
+
+	priv = platform_get_drvdata(pdev);
+	bcm_xrdp_dbg_exit();
+	bcm_xrdp_ioctl_unregister(priv);
+	reset_control_assert(priv->rdp_rst);
+}
+
+static const struct of_device_id bcm63xx_xrdp_of_match[] = {
+	{ .compatible = "brcm,bcm63158-xrdp" },
+	{ /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, bcm63xx_xrdp_of_match);
+
+/*
+ *
+ */
+struct platform_driver bcm63xx_xrdp_driver = {
+	.probe	= bcm_xrdp_probe,
+	.remove	= bcm_xrdp_remove,
+	.driver	= {
+		.name		= "bcm63xx_xrdp",
+		.of_match_table = bcm63xx_xrdp_of_match,
+		.owner		= THIS_MODULE,
+	},
+};
+
+module_platform_driver(bcm63xx_xrdp_driver);
+
+MODULE_DESCRIPTION("BCM63xx XRDP driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_api.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_api.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_api.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_api.c	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,477 @@
+#include "xrdp_priv.h"
+#include <linux/soc/bcm63xx_xrdp_api.h>
+
+struct bbh_irq  {
+	bool		assigned;
+	u32		rx_mask;
+	u32		tx_mask;
+};
+
+static struct bbh_irq bbh_irq_cache[RDP_BBH_COUNT];
+static u32 irq_mask_free = ~0;
+
+/*
+ *
+ */
+static int irq_bit_alloc(unsigned int bbh,
+			 unsigned int rx_count,
+			 unsigned int tx_count,
+			 unsigned long *rx_mask,
+			 unsigned long *tx_mask)
+{
+	u32 free;
+	unsigned int i;
+
+	if (bbh >= ARRAY_SIZE(bbh_irq_cache))
+		return -EINVAL;
+
+	if (bbh_irq_cache[bbh].assigned) {
+		*rx_mask = bbh_irq_cache[bbh].rx_mask;
+		*tx_mask = bbh_irq_cache[bbh].tx_mask;
+		return 0;
+	}
+
+	free = irq_mask_free;
+	*rx_mask = 0;
+	for (i = 0; i < rx_count; i++) {
+		int bit = ffs(free);
+		if (!bit) {
+			printk("not enough irq bits available\n");
+			return -EBUSY;
+		}
+
+		*rx_mask |= (1 << (bit - 1));
+		free &= ~(1 << (bit - 1));
+	}
+
+	*tx_mask = 0;
+	for (i = 0; i < tx_count; i++) {
+		int bit = ffs(free);
+		if (!bit) {
+			printk("not enough irq bits available\n");
+			return -EBUSY;
+		}
+
+		*tx_mask |= (1 << (bit - 1));
+		free &= ~(1 << (bit - 1));
+	}
+
+	bbh_irq_cache[bbh].assigned = true;
+	bbh_irq_cache[bbh].rx_mask = *rx_mask;
+	bbh_irq_cache[bbh].tx_mask = *tx_mask;
+
+	irq_mask_free = free;
+	return 0;
+}
+
+/*
+ *
+ */
+static int xrdp_find_irq(struct bcm_xrdp_priv *priv,
+			 unsigned int irq_id)
+{
+	char name[32];
+	int irq;
+
+	scnprintf(name, sizeof (name), "queue%d", irq_id);
+	irq = platform_get_irq_byname(priv->pdev, name);
+	if (irq < 0) {
+		dev_err(&priv->pdev->dev, "cannot find irq %u\n", irq);
+		return irq;
+	}
+
+	return irq;
+}
+
+/*
+ *
+ */
+static int xrdp_gen_irq_mask(struct bcm_xrdp_priv *priv,
+			     unsigned int max_count,
+			     unsigned long idx_mask,
+			     unsigned int *irqs,
+			     u32 *irqs_mask)
+{
+	unsigned int irq_idx, count;
+
+	count = 0;
+	for_each_set_bit(irq_idx, &idx_mask, 32) {
+		int irq;
+
+		irq = xrdp_find_irq(priv, irq_idx);
+		if (irq < 0)
+			return irq;
+
+		if (count == max_count) {
+			dev_err(&priv->pdev->dev,
+				"irq array too small\n");
+			return -EINVAL;
+		}
+
+		irqs[count] = irq;
+		irqs_mask[count] = (1 << irq_idx);
+		count++;
+	}
+
+	return 0;
+}
+
+/*
+ *
+ */
+bool bcm_xrdp_api_bbh_txq_is_empty(struct bcm_xrdp_priv *priv,
+				   unsigned int bbh_id,
+				   unsigned int hw_queue_idx)
+{
+	u32 val;
+
+	/* not implemented but could be */
+	BUG_ON(hw_queue_idx >= 32);
+
+	bbh_tx_write(priv, bbh_id,
+		     BBH_TX_DEBUG_COUNTERS_SWRDEN,
+		     DEBUG_COUNTERS_SWRDEN_PDEMPTYSEL_MASK);
+	bbh_tx_write(priv, bbh_id,
+		     BBH_TX_DEBUG_COUNTERS_SWRDADDR,
+		     0);
+
+	val = bbh_tx_read(priv, bbh_id, BBH_TX_DEBUG_COUNTERS_SWRDDATA);
+	if (val & (1 << hw_queue_idx))
+		return true;
+	return false;
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_bbh_txq_is_empty);
+
+/*
+ * API
+ */
+int bcm_xrdp_api_get_enet_params(struct bcm_xrdp_priv *priv,
+				 unsigned int bbh_id,
+				 struct bcm_xrdp_enet_params *params)
+{
+	switch (bbh_id) {
+	case RDP_BBH_IDX_UNIMAC0:
+	case RDP_BBH_IDX_UNIMAC1:
+	case RDP_BBH_IDX_UNIMAC2:
+	case RDP_BBH_IDX_PON:
+	case RDP_BBH_IDX_AE10:
+	{
+		unsigned int i;
+		unsigned long rx_mask, tx_mask;
+		unsigned int rx_core, tx_core, tx_bbh_bbid;
+		unsigned int tx_fw_queue_id, rx_fw_queue_id;
+		unsigned int tx_bbh_queue_id;
+		unsigned int tx_bbh_pd_queue_size;
+		unsigned int tx_bbh_mdu_addr;
+		void *mac_regs;
+		bool tx_need_reporting;
+		bool tx_need_batch;
+
+		params->rx_queue_count = 1;
+		params->tx_queue_count = 2;
+		BUG_ON(params->rx_queue_count > XRDP_MAX_RX_QUEUE);
+		BUG_ON(params->tx_queue_count > XRDP_MAX_TX_QUEUE);
+
+		if (irq_bit_alloc(bbh_id,
+				  params->rx_queue_count,
+				  params->tx_queue_count,
+				  &rx_mask, &tx_mask)) {
+			dev_err(&priv->pdev->dev,
+				"failed to alloc enet %u irq\n", bbh_id);
+			return -EINVAL;
+		}
+
+		if (xrdp_gen_irq_mask(priv,
+				      XRDP_MAX_RX_QUEUE,
+				      rx_mask,
+				      params->rx_irq,
+				      params->rx_irq_mask))
+			return -EINVAL;
+
+		if (xrdp_gen_irq_mask(priv,
+				      XRDP_MAX_TX_QUEUE,
+				      tx_mask,
+				      params->tx_irq,
+				      params->tx_done_irq_mask))
+			return -EINVAL;
+
+		switch (bbh_id) {
+		case RDP_BBH_IDX_UNIMAC0:
+			rx_core = UNIMAC0_BBH_RX_CORE;
+			tx_core = UNIMACx_BBH_TX_CORE;
+			rx_fw_queue_id = UNIMAC0_BBH_RX_QUEUE;
+			tx_fw_queue_id = UNIMAC0_BBH_TX_QUEUE;
+			tx_bbh_bbid = BB_ID_TX_LAN;
+			tx_bbh_queue_id = 0;
+			mac_regs = priv->regs[0] + UNIMAC_OFFSET;
+			tx_need_reporting = false;
+			tx_need_batch = false;
+			tx_bbh_pd_queue_size = UNIFIED_BBH_PD_FIFO_SIZE;
+			tx_bbh_mdu_addr = 0x1800;
+			break;
+		case RDP_BBH_IDX_UNIMAC1:
+			rx_core = UNIMAC1_BBH_RX_CORE;
+			tx_core = UNIMACx_BBH_TX_CORE;
+			rx_fw_queue_id = UNIMAC1_BBH_RX_QUEUE;
+			tx_fw_queue_id = UNIMAC1_BBH_TX_QUEUE;
+			tx_bbh_bbid = BB_ID_TX_LAN;
+			tx_bbh_queue_id = 1;
+			mac_regs = priv->regs[0] + UNIMAC_OFFSET;
+			tx_need_reporting = false;
+			tx_need_batch = false;
+			tx_bbh_pd_queue_size = UNIFIED_BBH_PD_FIFO_SIZE;
+			tx_bbh_mdu_addr = 0x1800;
+			break;
+		case RDP_BBH_IDX_UNIMAC2:
+			rx_core = UNIMAC2_BBH_RX_CORE;
+			tx_core = UNIMACx_BBH_TX_CORE;
+			rx_fw_queue_id = UNIMAC2_BBH_RX_QUEUE;
+			tx_fw_queue_id = UNIMAC2_BBH_TX_QUEUE;
+			tx_bbh_bbid = BB_ID_TX_LAN;
+			tx_bbh_queue_id = 2;
+			mac_regs = priv->regs[0] + UNIMAC_OFFSET;
+			tx_need_reporting = false;
+			tx_need_batch = false;
+			tx_bbh_pd_queue_size = UNIFIED_BBH_PD_FIFO_SIZE;
+			tx_bbh_mdu_addr = 0x1800;
+			break;
+		case RDP_BBH_IDX_PON:
+			rx_core = PON_BBH_RX_CORE;
+			tx_core = PON_BBH_TX_CORE;
+			rx_fw_queue_id = PON_BBH_RX_QUEUE;
+			tx_fw_queue_id = PON_BBH_TX_QUEUE;
+			tx_bbh_bbid = BB_ID_TX_PON;
+			tx_bbh_queue_id = 0;
+			mac_regs = NULL;
+			tx_need_reporting = true;
+			tx_need_batch = true;
+			tx_bbh_pd_queue_size = PON_BBH_PD_FIFO_SIZE;
+			tx_bbh_mdu_addr = 0x1800;
+			break;
+		case RDP_BBH_IDX_AE10:
+			rx_core = AE10_BBH_RX_CORE;
+			tx_core = AE10_BBH_TX_CORE;
+			rx_fw_queue_id = AE10_BBH_RX_QUEUE;
+			tx_fw_queue_id = AE10_BBH_TX_QUEUE;
+			tx_bbh_bbid = BB_ID_TX_10G;
+			tx_bbh_queue_id = 0;
+			mac_regs = NULL;
+			tx_need_reporting = false;
+			tx_need_batch = false;
+			tx_bbh_pd_queue_size = AE_BBH_PD_FIFO_SIZE;
+			tx_bbh_mdu_addr = 0x1900;
+			break;
+		default:
+			BUG();
+			break;
+		}
+
+		params->mac_regs = mac_regs;
+		params->tx_bbh_bbid = tx_bbh_bbid |
+			(tx_bbh_queue_id << BB_MSG_RNR_TO_BBH_TX_QUEUE_SHIFT);
+		params->tx_bbh_queue_id = tx_bbh_queue_id;
+		params->rx_regs = priv->regs[0] +
+			RNR_SRAM_OFFSET(rx_core) +
+			ENET_FW_RX_REGS_SRAM_OFF(rx_fw_queue_id);
+		params->tx_regs = priv->regs[0] +
+			RNR_SRAM_OFFSET(tx_core) +
+			ENET_FW_TX_REGS_SRAM_OFF(tx_fw_queue_id);
+		params->rx_core_id = rx_core;
+		params->tx_core_id = tx_core;
+		params->tx_need_reporting = tx_need_reporting;
+		params->tx_need_batch = tx_need_batch;
+		params->tx_bbh_pd_queue_size = tx_bbh_pd_queue_size;
+		params->tx_bbh_mdu_addr = tx_bbh_mdu_addr + tx_bbh_queue_id;
+
+		params->rxq_fqm_wakeup_thread =
+			ENET_FW_RX_FQM_TASK_ID(rx_fw_queue_id);
+
+		for (i = 0; i < ARRAY_SIZE(params->rxq_xf_wakeup_thread); i++)
+			params->rxq_xf_wakeup_thread[i] =
+				ENET_FW_RX_XF_QUEUEx_TASK_ID(rx_fw_queue_id, i);
+
+		for (i = 0; i < params->tx_queue_count; i++)
+			params->txq_wakeup_thread[i] =
+				ENET_FW_TX_QUEUEx_TASK_ID(tx_fw_queue_id);
+		break;
+	}
+
+	default:
+		/* FIXME */
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_get_enet_params);
+
+/*
+ *
+ */
+int bcm_xrdp_api_get_dsl_params(struct bcm_xrdp_priv *priv,
+				struct bcm_xrdp_dsl_params *params)
+{
+	unsigned int i;
+	unsigned long rx_mask, tx_mask;
+
+	params->rx_queue_count = 1;
+	params->tx_queue_count = 7;
+	BUG_ON(params->rx_queue_count > XRDP_MAX_RX_QUEUE);
+	BUG_ON(params->tx_queue_count > XRDP_MAX_TX_QUEUE);
+
+	if (irq_bit_alloc(RDP_BBH_IDX_DSL,
+			  params->rx_queue_count,
+			  params->tx_queue_count, &rx_mask, &tx_mask)) {
+		dev_err(&priv->pdev->dev,
+			"failed to alloc dsl\n");
+		return -EINVAL;
+	}
+
+	if (xrdp_gen_irq_mask(priv,
+			      XRDP_MAX_RX_QUEUE,
+			      rx_mask,
+			      params->rx_irq,
+			      params->rxq_irq_mask))
+		return -EINVAL;
+
+	if (xrdp_gen_irq_mask(priv,
+			      XRDP_MAX_TX_QUEUE,
+			      tx_mask,
+			      params->tx_irq,
+			      params->txq_done_irq_mask))
+		return -EINVAL;
+
+	params->rx_regs = priv->regs[0] +
+		RNR_SRAM_OFFSET(DSL_BBH_RX_CORE) +
+		DSL_FW_RX_REGS_SRAM_OFF;
+	params->tx_regs = priv->regs[0] +
+		RNR_SRAM_OFFSET(DSL_BBH_TX_CORE) +
+		DSL_FW_TX_REGS_SRAM_OFF;
+
+	params->rx_core_id = DSL_BBH_RX_CORE;
+	params->tx_core_id = DSL_BBH_TX_CORE;
+
+	for (i = 0; i < params->rx_queue_count; i++)
+		params->rxq_wakeup_thread[i] = 1;
+
+	for (i = 0; i < params->tx_queue_count; i++)
+		params->txq_wakeup_thread[i] = 2 + i * 2 + 1;
+
+	return 0;
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_get_dsl_params);
+
+/*
+ *
+ */
+void bcm_xrdp_api_dsl_flow_id_set(struct bcm_xrdp_priv *priv,
+				  unsigned int flow_id,
+				  u32 hwval)
+{
+	u32 val;
+
+	BUG_ON(flow_id >= 256);
+
+	val = (flow_id << WAN_CFGS_FLOW2PORT_A_SHIFT) |
+		(hwval << WAN_CFGS_FLOW2PORT_WDATA_SHIFT) |
+		WAN_CFGS_FLOW2PORT_CMD_MASK;
+
+	bbh_tx_write(priv, RDP_BBH_IDX_DSL,
+		     BBH_TX_WAN_CFGS_FLOW2PORT, val);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_dsl_flow_id_set);
+
+/*
+ *
+ */
+void bcm_xrdp_api_pon_flow_id_set(struct bcm_xrdp_priv *priv,
+				  unsigned int flow_id,
+				  u32 hwval)
+{
+	u32 val;
+
+	BUG_ON(flow_id >= 256);
+
+	val = (flow_id << WAN_CFGS_FLOW2PORT_A_SHIFT) |
+		(hwval << WAN_CFGS_FLOW2PORT_WDATA_SHIFT) |
+		WAN_CFGS_FLOW2PORT_CMD_MASK;
+
+	bbh_tx_write(priv, RDP_BBH_IDX_PON,
+		     BBH_TX_WAN_CFGS_FLOW2PORT, val);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_pon_flow_id_set);
+
+/*
+ *
+ */
+void bcm_xrdp_api_wakeup(struct bcm_xrdp_priv *priv,
+			 unsigned int core_id,
+			 unsigned int thread)
+{
+	WARN_ON(core_id >= RDP_RUNNER_COUNT);
+	WARN_ON(thread >= RDP_RUNNER_THREAD_COUNT);
+
+	runner_wakeup(priv, core_id, thread);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_wakeup);
+
+/*
+ *
+ */
+u32 bcm_xrdp_api_irq_read_status(struct bcm_xrdp_priv *priv,
+				 unsigned int core_id)
+{
+	return ubus_slave_readl(priv, UBUS_SLV_RNR_INTR_CTRL_ISR);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_irq_read_status);
+
+void bcm_xrdp_api_irq_write_status(struct bcm_xrdp_priv *priv,
+				  unsigned int core_id,
+				  u32 val)
+{
+	return ubus_slave_writel(priv, UBUS_SLV_RNR_INTR_CTRL_ISR, val);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_irq_write_status);
+
+/*
+ *
+ */
+void bcm_xrdp_api_irq_mask_clear(struct bcm_xrdp_priv *priv,
+				unsigned int core_id,
+				u32 bits)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	val = ubus_slave_readl(priv, UBUS_SLV_RNR_INTR_CTRL_IER);
+	val &= ~bits;
+	ubus_slave_writel(priv, UBUS_SLV_RNR_INTR_CTRL_IER, val);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_irq_mask_clear);
+
+void bcm_xrdp_api_irq_mask_set(struct bcm_xrdp_priv *priv,
+			      unsigned int core_id,
+			      u32 bits)
+{
+	unsigned long flags;
+	u32 val;
+
+	spin_lock_irqsave(&priv->irq_lock, flags);
+	val = ubus_slave_readl(priv, UBUS_SLV_RNR_INTR_CTRL_IER);
+	val |= bits;
+	ubus_slave_writel(priv, UBUS_SLV_RNR_INTR_CTRL_IER, val);
+	spin_unlock_irqrestore(&priv->irq_lock, flags);
+}
+
+EXPORT_SYMBOL(bcm_xrdp_api_irq_mask_set);
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_debug.c linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_debug.c
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_debug.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_debug.c	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,1832 @@
+#include <linux/slab.h>
+#include "xrdp_priv.h"
+
+static struct dentry *dbg_root;
+static struct dentry *dbg_regs;
+
+struct reg_desc {
+	const char	*name;
+	u32		offset;
+};
+
+struct reg_dump_priv {
+	const struct reg_desc	*regs;
+	size_t			regs_count;
+	enum xrdp_regs_area	area;
+	unsigned int		base_offset;
+	struct bcm_xrdp_priv	*priv;
+};
+
+static const struct reg_desc runner_regs[] = {
+	{ "GLOBAL_CTRL",	RNR_REGS_CFG_GLOBAL_CTRL },
+	{ "CPU_WAKEUP",	RNR_REGS_CFG_CPU_WAKEUP },
+	{ "INT_CTRL",	RNR_REGS_CFG_INT_CTRL },
+	{ "INT_MASK",	RNR_REGS_CFG_INT_MASK },
+	{ "GEN_CFG",	RNR_REGS_CFG_GEN_CFG },
+	{ "CAM_CFG",	RNR_REGS_CFG_CAM_CFG },
+	{ "DDR_CFG",	RNR_REGS_CFG_DDR_CFG },
+	{ "PSRAM_CFG",	RNR_REGS_CFG_PSRAM_CFG },
+	{ "RAMRD_RANGE_MASK_CFG",	RNR_REGS_CFG_RAMRD_RANGE_MASK_CFG },
+	{ "SCH_CFG",	RNR_REGS_CFG_SCH_CFG },
+	{ "BKPT_CFG",	RNR_REGS_CFG_BKPT_CFG },
+	{ "BKPT_IMM",	RNR_REGS_CFG_BKPT_IMM },
+	{ "BKPT_STS",	RNR_REGS_CFG_BKPT_STS },
+	{ "PC_STS",	RNR_REGS_CFG_PC_STS },
+	{ "PROF_STS",	RNR_REGS_CFG_PROF_STS },
+	{ "PROF_CFG_0",	RNR_REGS_CFG_PROF_CFG_0 },
+	{ "PROF_CFG_1",	RNR_REGS_CFG_PROF_CFG_1 },
+	{ "PROF_COUNTER",	RNR_REGS_CFG_PROF_COUNTER },
+	{ "STALL_CNT1",	RNR_REGS_CFG_STALL_CNT1 },
+	{ "STALL_CNT2",	RNR_REGS_CFG_STALL_CNT2 },
+	{ "IDLE_CNT1",	RNR_REGS_CFG_IDLE_CNT1 },
+	{ "JMP_CNT",	RNR_REGS_CFG_JMP_CNT },
+	{ "METAL_FIX_REG",	RNR_REGS_CFG_METAL_FIX_REG },
+};
+
+static const struct reg_desc runner_quad_regs[] = {
+	{ "PARSER_CORE_CFG_ENG", RNR_QUAD_PARSER_CORE_CFG_ENG },
+	{ "PARSER_CORE_CFG_PARSER_MISC_CFG", RNR_QUAD_PARSER_CORE_CFG_PARSER_MISC_CFG },
+	{ "PARSER_CORE_CFG_VID_0_1", RNR_QUAD_PARSER_CORE_CFG_VID_0_1 },
+	{ "PARSER_CORE_CFG_VID_2_3", RNR_QUAD_PARSER_CORE_CFG_VID_2_3 },
+	{ "PARSER_CORE_CFG_VID_4_5", RNR_QUAD_PARSER_CORE_CFG_VID_4_5 },
+	{ "PARSER_CORE_CFG_VID_6_7", RNR_QUAD_PARSER_CORE_CFG_VID_6_7 },
+	{ "PARSER_CORE_CFG_IP_FILTER0_CFG", RNR_QUAD_PARSER_CORE_CFG_IP_FILTER0_CFG },
+	{ "PARSER_CORE_CFG_IP_FILTER1_CFG", RNR_QUAD_PARSER_CORE_CFG_IP_FILTER1_CFG },
+	{ "PARSER_CORE_CFG_IP_FILTER0_MASK_CFG", RNR_QUAD_PARSER_CORE_CFG_IP_FILTER0_MASK_CFG },
+	{ "PARSER_CORE_CFG_IP_FILTER1_MASK_CFG", RNR_QUAD_PARSER_CORE_CFG_IP_FILTER1_MASK_CFG },
+	{ "PARSER_CORE_CFG_IP_FILTERS_CFG", RNR_QUAD_PARSER_CORE_CFG_IP_FILTERS_CFG },
+	{ "PARSER_CORE_CFG_SNAP_ORG_CODE", RNR_QUAD_PARSER_CORE_CFG_SNAP_ORG_CODE },
+	{ "PARSER_CORE_CFG_PPP_IP_PROT_CODE", RNR_QUAD_PARSER_CORE_CFG_PPP_IP_PROT_CODE },
+	{ "PARSER_CORE_CFG_QTAG_ETHTYPE", RNR_QUAD_PARSER_CORE_CFG_QTAG_ETHTYPE },
+	{ "PARSER_CORE_CFG_USER_ETHTYPE_0_1", RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_0_1 },
+	{ "PARSER_CORE_CFG_USER_ETHTYPE_2_3", RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_2_3 },
+	{ "PARSER_CORE_CFG_USER_ETHTYPE_CONFIG", RNR_QUAD_PARSER_CORE_CFG_USER_ETHTYPE_CONFIG },
+	{ "PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG", RNR_QUAD_PARSER_CORE_CFG_IPV6_HDR_EXT_FLTR_MASK_CFG },
+	{ "PARSER_CORE_CFG_QTAG_NEST", RNR_QUAD_PARSER_CORE_CFG_QTAG_NEST },
+	{ "PARSER_CORE_CFG_QTAG_HARD_NEST_0", RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_0 },
+	{ "PARSER_CORE_CFG_QTAG_HARD_NEST_1", RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_1 },
+	{ "PARSER_CORE_CFG_QTAG_HARD_NEST_2", RNR_QUAD_PARSER_CORE_CFG_QTAG_HARD_NEST_2 },
+	{ "PARSER_CORE_CFG_USER_IP_PROT", RNR_QUAD_PARSER_CORE_CFG_USER_IP_PROT },
+	{ "PARSER_CORE_CFG_DA_FILT0_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT0_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT1_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT1_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT2_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT2_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT2_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT2_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT3_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT3_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT3_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT3_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT4_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT4_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT4_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT4_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT5_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT5_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT5_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT5_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT6_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT6_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT6_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT6_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT7_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT7_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT7_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT7_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT8_VAL_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT8_VAL_L },
+	{ "PARSER_CORE_CFG_DA_FILT8_VAL_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT8_VAL_H },
+	{ "PARSER_CORE_CFG_DA_FILT0_MASK_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_MASK_L },
+	{ "PARSER_CORE_CFG_DA_FILT0_MASK_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT0_MASK_H },
+	{ "PARSER_CORE_CFG_DA_FILT1_MASK_L", RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_MASK_L },
+	{ "PARSER_CORE_CFG_DA_FILT1_MASK_H", RNR_QUAD_PARSER_CORE_CFG_DA_FILT1_MASK_H },
+	{ "PARSER_CORE_CFG_DA_FILT_VALID_CFG_0", RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_0 },
+	{ "PARSER_CORE_CFG_DA_FILT_VALID_CFG_1", RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_1 },
+	{ "PARSER_CORE_CFG_DA_FILT_VALID_CFG_2", RNR_QUAD_PARSER_CORE_CFG_DA_FILT_VALID_CFG_2 },
+	{ "PARSER_CORE_CFG_GRE_PROTOCOL_CFG", RNR_QUAD_PARSER_CORE_CFG_GRE_PROTOCOL_CFG },
+	{ "PARSER_CORE_CFG_PROP_TAG_CFG", RNR_QUAD_PARSER_CORE_CFG_PROP_TAG_CFG },
+	{ "GENERAL_CONFIG_DMA_ARB_CFG", RNR_QUAD_GENERAL_CONFIG_DMA_ARB_CFG },
+	{ "GENERAL_CONFIG_PSRAM0_BASE", RNR_QUAD_GENERAL_CONFIG_PSRAM0_BASE },
+	{ "GENERAL_CONFIG_PSRAM1_BASE", RNR_QUAD_GENERAL_CONFIG_PSRAM1_BASE },
+	{ "GENERAL_CONFIG_PSRAM2_BASE", RNR_QUAD_GENERAL_CONFIG_PSRAM2_BASE },
+	{ "GENERAL_CONFIG_PSRAM3_BASE", RNR_QUAD_GENERAL_CONFIG_PSRAM3_BASE },
+	{ "GENERAL_CONFIG_DDR0_BASE", RNR_QUAD_GENERAL_CONFIG_DDR0_BASE },
+	{ "GENERAL_CONFIG_DDR1_BASE", RNR_QUAD_GENERAL_CONFIG_DDR1_BASE },
+	{ "GENERAL_CONFIG_PSRAM0_MASK", RNR_QUAD_GENERAL_CONFIG_PSRAM0_MASK },
+	{ "GENERAL_CONFIG_PSRAM1_MASK", RNR_QUAD_GENERAL_CONFIG_PSRAM1_MASK },
+	{ "GENERAL_CONFIG_PSRAM2_MASK", RNR_QUAD_GENERAL_CONFIG_PSRAM2_MASK },
+	{ "GENERAL_CONFIG_PSRAM3_MASK", RNR_QUAD_GENERAL_CONFIG_PSRAM3_MASK },
+	{ "GENERAL_CONFIG_DDR0_MASK", RNR_QUAD_GENERAL_CONFIG_DDR0_MASK },
+	{ "GENERAL_CONFIG_DDR1_MASK", RNR_QUAD_GENERAL_CONFIG_DDR1_MASK },
+	{ "GENERAL_CONFIG_PROF_CONFIG", RNR_QUAD_GENERAL_CONFIG_PROF_CONFIG },
+	{ "GENERAL_CONFIG_BKPT_0_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_0_CFG },
+	{ "GENERAL_CONFIG_BKPT_1_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_1_CFG },
+	{ "GENERAL_CONFIG_BKPT_2_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_2_CFG },
+	{ "GENERAL_CONFIG_BKPT_3_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_3_CFG },
+	{ "GENERAL_CONFIG_BKPT_4_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_4_CFG },
+	{ "GENERAL_CONFIG_BKPT_5_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_5_CFG },
+	{ "GENERAL_CONFIG_BKPT_6_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_6_CFG },
+	{ "GENERAL_CONFIG_BKPT_7_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_7_CFG },
+	{ "GENERAL_CONFIG_BKPT_GEN_CFG", RNR_QUAD_GENERAL_CONFIG_BKPT_GEN_CFG },
+	{ "GENERAL_CONFIG_POWERSAVE_CONFIG", RNR_QUAD_GENERAL_CONFIG_POWERSAVE_CONFIG },
+	{ "GENERAL_CONFIG_POWERSAVE_STATUS", RNR_QUAD_GENERAL_CONFIG_POWERSAVE_STATUS },
+	{ "DEBUG_FIFO_CONFIG", RNR_QUAD_DEBUG_FIFO_CONFIG },
+	{ "DEBUG_PSRAM_HDR_FIFO_STATUS", RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_STATUS },
+	{ "DEBUG_PSRAM_DATA_FIFO_STATUS", RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_STATUS },
+	{ "DEBUG_DDR_HDR_FIFO_STATUS", RNR_QUAD_DEBUG_DDR_HDR_FIFO_STATUS },
+	{ "DEBUG_DDR_DATA_FIFO_STATUS", RNR_QUAD_DEBUG_DDR_DATA_FIFO_STATUS },
+	{ "DEBUG_DDR_DATA_FIFO_STATUS2", RNR_QUAD_DEBUG_DDR_DATA_FIFO_STATUS2 },
+	{ "DEBUG_PSRAM_HDR_FIFO_DATA1", RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_DATA1 },
+	{ "DEBUG_PSRAM_HDR_FIFO_DATA2", RNR_QUAD_DEBUG_PSRAM_HDR_FIFO_DATA2 },
+	{ "DEBUG_PSRAM_DATA_FIFO_DATA1", RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_DATA1 },
+	{ "DEBUG_PSRAM_DATA_FIFO_DATA2", RNR_QUAD_DEBUG_PSRAM_DATA_FIFO_DATA2 },
+	{ "DEBUG_DDR_HDR_FIFO_DATA1", RNR_QUAD_DEBUG_DDR_HDR_FIFO_DATA1 },
+	{ "DEBUG_DDR_HDR_FIFO_DATA2", RNR_QUAD_DEBUG_DDR_HDR_FIFO_DATA2 },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[0]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(0) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[1]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(1) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[2]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(2) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[3]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(3) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[4]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(4) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[5]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(5) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[6]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(6) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[7]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(7) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[8]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(8) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[9]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(9) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[10]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(10) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[11]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(11) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[12]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(12) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[13]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(13) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[14]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(14) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[15]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(15) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[16]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(16) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[17]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(17) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[18]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(18) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[19]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(19) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[20]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(20) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[21]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(21) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[22]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(22) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[23]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(23) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[24]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(24) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[25]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(25) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[26]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(26) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[27]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(27) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[28]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(28) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[29]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(29) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[30]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(30) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[31]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(31) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[32]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(32) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[33]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(33) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[34]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(34) },
+	{ "EXT_FLOWCTRL_CONFIG_TOKEN_VAL[35]", RNR_QUAD_EXT_FLOWCTRL_CONFIG_TOKEN_VAL(35) },
+};
+
+static const struct reg_desc ubus_master_regs[] = {
+	{ "BRDG_EN",		UBUS_MSTR_EN },
+	{ "RQUSTOR_CTRL",	UBUS_MSTR_REQ_CNTRL },
+	{ "HYST_CTRL",		UBUS_MSTR_HYST_CTRL },
+	{ "HIGH_PRIORITY",	UBUS_MSTR_HP },
+	{ "REPLY_ADDRESS",	UBUS_MSTR_REPLY_ADD },
+	{ "REPLY_DATA",		UBUS_MSTR_REPLY_DATA },
+};
+
+static const struct reg_desc ubus_slave_regs[] = {
+	{ "VPB_BASE",		UBUS_SLV_VPB_BASE },
+	{ "VPB_MASK",		UBUS_SLV_VPB_MASK },
+	{ "APB_BASE",		UBUS_SLV_APB_BASE },
+	{ "APB_MASK",		UBUS_SLV_APB_MASK },
+	{ "DQM_BASE",		UBUS_SLV_DQM_BASE },
+	{ "DQM_MASK",		UBUS_SLV_DQM_MASK },
+	{ "INTERRUPT_STATUS",		UBUS_SLV_RNR_INTR_CTRL_ISR },
+	{ "INTERRUPT_STATUS_MASKED",		UBUS_SLV_RNR_INTR_CTRL_ISM },
+	{ "INTERRUPT_ENABLE",		UBUS_SLV_RNR_INTR_CTRL_IER },
+	{ "INTERRUPT_TEST",		UBUS_SLV_RNR_INTR_CTRL_ITR },
+	{ "PROFILING_CFG",		UBUS_SLV_PROF_CFG },
+	{ "PROFILING_STATUS",		UBUS_SLV_PROF_STATUS },
+	{ "PROFILING_COUNTER",		UBUS_SLV_PROF_COUNTER },
+	{ "PROFILING_START_VALUE",		UBUS_SLV_PROF_START_VALUE },
+	{ "PROFILING_STOP_VALUE",		UBUS_SLV_PROF_STOP_VALUE },
+	{ "PROFILING_CYCLE_NUM",		UBUS_SLV_PROF_CYCLE_NUM },
+	{ "RGMII_CNTRL",		UBUS_SLV__CNTRL },
+	{ "RGMII_IB_STATUS",		UBUS_SLV__IB_STATUS },
+	{ "RGMII_RX_CLOCK_DELAY_CNTRL",		UBUS_SLV__RX_CLOCK_DELAY_CNTRL },
+	{ "RGMII_ATE_RX_CNTRL_EXP_DATA",		UBUS_SLV__ATE_RX_CNTRL_EXP_DATA },
+	{ "RGMII_ATE_RX_EXP_DATA_1",		UBUS_SLV__ATE_RX_EXP_DATA_1 },
+	{ "RGMII_ATE_RX_STATUS_0",		UBUS_SLV__ATE_RX_STATUS_0 },
+	{ "RGMII_ATE_RX_STATUS_1",		UBUS_SLV__ATE_RX_STATUS_1 },
+	{ "RGMII_ATE_TX_CNTRL",		UBUS_SLV__ATE_TX_CNTRL },
+	{ "RGMII_ATE_TX_DATA_0",		UBUS_SLV__ATE_TX_DATA_0 },
+	{ "RGMII_ATE_TX_DATA_1",		UBUS_SLV__ATE_TX_DATA_1 },
+	{ "RGMII_ATE_TX_DATA_2",		UBUS_SLV__ATE_TX_DATA_2 },
+	{ "RGMII_TX_DELAY_CNTRL_0",		UBUS_SLV__TX_DELAY_CNTRL_0 },
+	{ "RGMII_TX_DELAY_CNTRL_1",		UBUS_SLV__TX_DELAY_CNTRL_1 },
+	{ "RGMII_RX_DELAY_CNTRL_0",		UBUS_SLV__RX_DELAY_CNTRL_0 },
+	{ "RGMII_RX_DELAY_CNTRL_1",		UBUS_SLV__RX_DELAY_CNTRL_1 },
+	{ "RGMII_RX_DELAY_CNTRL_2",		UBUS_SLV__RX_DELAY_CNTRL_2 },
+	{ "RGMII_CLK_RST_CTRL",		UBUS_SLV__CLK_RST_CTRL },
+};
+
+static const struct reg_desc bbh_rx_regs[] = {
+	{ "GENERAL_CFG_BBCFG",	BBH_RX_GENERAL_CFG_BBCFG },
+	{ "GENERAL_CFG_DISPVIQ",	BBH_RX_GENERAL_CFG_DISPVIQ },
+	{ "GENERAL_CFG_PATTERNDATALSB",	BBH_RX_GENERAL_CFG_PATTERNDATALSB },
+	{ "GENERAL_CFG_PATTERNDATAMSB",	BBH_RX_GENERAL_CFG_PATTERNDATAMSB },
+	{ "GENERAL_CFG_PATTERNMASKLSB",	BBH_RX_GENERAL_CFG_PATTERNMASKLSB },
+	{ "GENERAL_CFG_PATTERNMASKMSB",	BBH_RX_GENERAL_CFG_PATTERNMASKMSB },
+	{ "GENERAL_CFG_EXCLQCFG",	BBH_RX_GENERAL_CFG_EXCLQCFG },
+	{ "GENERAL_CFG_SDMAADDR",	BBH_RX_GENERAL_CFG_SDMAADDR },
+	{ "GENERAL_CFG_SDMACFG",	BBH_RX_GENERAL_CFG_SDMACFG },
+	{ "GENERAL_CFG_MINPKT0",	BBH_RX_GENERAL_CFG_MINPKT0 },
+	{ "GENERAL_CFG_MAXPKT0",	BBH_RX_GENERAL_CFG_MAXPKT0 },
+	{ "GENERAL_CFG_MAXPKT1",	BBH_RX_GENERAL_CFG_MAXPKT1 },
+	{ "GENERAL_CFG_SOPOFFSET",	BBH_RX_GENERAL_CFG_SOPOFFSET },
+	{ "GENERAL_CFG_FLOWCTRL",	BBH_RX_GENERAL_CFG_FLOWCTRL },
+	{ "GENERAL_CFG_CRCOMITDIS",	BBH_RX_GENERAL_CFG_CRCOMITDIS },
+	{ "GENERAL_CFG_ENABLE",	BBH_RX_GENERAL_CFG_ENABLE },
+	{ "GENERAL_CFG_G9991EN",	BBH_RX_GENERAL_CFG_G9991EN },
+	{ "GENERAL_CFG_PERFLOWTH",	BBH_RX_GENERAL_CFG_PERFLOWTH },
+	{ "GENERAL_CFG_PERFLOWSETS",	BBH_RX_GENERAL_CFG_PERFLOWSETS },
+	{ "GENERAL_CFG_MINPKTSEL0",	BBH_RX_GENERAL_CFG_MINPKTSEL0 },
+	{ "GENERAL_CFG_MINPKTSEL1",	BBH_RX_GENERAL_CFG_MINPKTSEL1 },
+	{ "GENERAL_CFG_MAXPKTSEL0",	BBH_RX_GENERAL_CFG_MAXPKTSEL0 },
+	{ "GENERAL_CFG_MAXPKTSEL1",	BBH_RX_GENERAL_CFG_MAXPKTSEL1 },
+	{ "GENERAL_CFG_MACMODE",	BBH_RX_GENERAL_CFG_MACMODE },
+	{ "GENERAL_CFG_SBPMCFG",	BBH_RX_GENERAL_CFG_SBPMCFG },
+	{ "GENERAL_CFG_RXRSTRST",	BBH_RX_GENERAL_CFG_RXRSTRST },
+	{ "GENERAL_CFG_RXDBGSEL",	BBH_RX_GENERAL_CFG_RXDBGSEL },
+	{ "GENERAL_CFG_BBHRX_RADDR_DECODER",	BBH_RX_GENERAL_CFG_BBHRX_RADDR_DECODER },
+	{ "GENERAL_CFG_NONETH",	BBH_RX_GENERAL_CFG_NONETH },
+	{ "GENERAL_CFG_CLK_GATE_CNTRL",	BBH_RX_GENERAL_CFG_CLK_GATE_CNTRL },
+	{ "PM_COUNTERS_INPKT",	BBH_RX_PM_COUNTERS_INPKT },
+	{ "PM_COUNTERS_THIRDFLOW",	BBH_RX_PM_COUNTERS_THIRDFLOW },
+	{ "PM_COUNTERS_SOPASOP",	BBH_RX_PM_COUNTERS_SOPASOP },
+	{ "PM_COUNTERS_TOOSHORT",	BBH_RX_PM_COUNTERS_TOOSHORT },
+	{ "PM_COUNTERS_TOOLONG",	BBH_RX_PM_COUNTERS_TOOLONG },
+	{ "PM_COUNTERS_CRCERROR",	BBH_RX_PM_COUNTERS_CRCERROR },
+	{ "PM_COUNTERS_ENCRYPTERROR",	BBH_RX_PM_COUNTERS_ENCRYPTERROR },
+	{ "PM_COUNTERS_DISPCONG",	BBH_RX_PM_COUNTERS_DISPCONG },
+	{ "PM_COUNTERS_NOSBPMSBN",	BBH_RX_PM_COUNTERS_NOSBPMSBN },
+	{ "PM_COUNTERS_NOSDMACD",	BBH_RX_PM_COUNTERS_NOSDMACD },
+	{ "PM_COUNTERS_INPLOAM",	BBH_RX_PM_COUNTERS_INPLOAM },
+	{ "PM_COUNTERS_CRCERRORPLOAM",	BBH_RX_PM_COUNTERS_CRCERRORPLOAM },
+	{ "PM_COUNTERS_DISPCONGPLOAM",	BBH_RX_PM_COUNTERS_DISPCONGPLOAM },
+	{ "PM_COUNTERS_NOSBPMSBNPLOAM",	BBH_RX_PM_COUNTERS_NOSBPMSBNPLOAM },
+	{ "PM_COUNTERS_NOSDMACDPLOAM",	BBH_RX_PM_COUNTERS_NOSDMACDPLOAM },
+	{ "PM_COUNTERS_EPONTYPERROR",	BBH_RX_PM_COUNTERS_EPONTYPERROR },
+	{ "PM_COUNTERS_RUNTERROR",	BBH_RX_PM_COUNTERS_RUNTERROR },
+	{ "DEBUG_CNTXTX0LSB",	BBH_RX_DEBUG_CNTXTX0LSB },
+	{ "DEBUG_CNTXTX0MSB",	BBH_RX_DEBUG_CNTXTX0MSB },
+	{ "DEBUG_CNTXTX1LSB",	BBH_RX_DEBUG_CNTXTX1LSB },
+	{ "DEBUG_CNTXTX1MSB",	BBH_RX_DEBUG_CNTXTX1MSB },
+	{ "DEBUG_CNTXTX0INGRESS",	BBH_RX_DEBUG_CNTXTX0INGRESS },
+	{ "DEBUG_CNTXTX1INGRESS",	BBH_RX_DEBUG_CNTXTX1INGRESS },
+	{ "DEBUG_IBUW",	BBH_RX_DEBUG_IBUW },
+	{ "DEBUG_BBUW",	BBH_RX_DEBUG_BBUW },
+	{ "DEBUG_CFUW",	BBH_RX_DEBUG_CFUW },
+	{ "DEBUG_ACKCNT",	BBH_RX_DEBUG_ACKCNT },
+	{ "DEBUG_COHERENCYCNT",	BBH_RX_DEBUG_COHERENCYCNT },
+	{ "DEBUG_DBGVEC",	BBH_RX_DEBUG_DBGVEC },
+	{ "DEBUG_UFUW",	BBH_RX_DEBUG_UFUW },
+	{ "DEBUG_CREDITCNT",	BBH_RX_DEBUG_CREDITCNT },
+	{ "DEBUG_SDMACNT",	BBH_RX_DEBUG_SDMACNT },
+	{ "DEBUG_CMFUW",	BBH_RX_DEBUG_CMFUW },
+	{ "DEBUG_SBNFIFO[0]",	BBH_RX_DEBUG_SBNFIFO(0) },
+	{ "DEBUG_SBNFIFO[1]",	BBH_RX_DEBUG_SBNFIFO(1) },
+	{ "DEBUG_SBNFIFO[2]",	BBH_RX_DEBUG_SBNFIFO(2) },
+	{ "DEBUG_SBNFIFO[3]",	BBH_RX_DEBUG_SBNFIFO(3) },
+	{ "DEBUG_SBNFIFO[4]",	BBH_RX_DEBUG_SBNFIFO(4) },
+	{ "DEBUG_SBNFIFO[5]",	BBH_RX_DEBUG_SBNFIFO(5) },
+	{ "DEBUG_SBNFIFO[6]",	BBH_RX_DEBUG_SBNFIFO(6) },
+	{ "DEBUG_SBNFIFO[7]",	BBH_RX_DEBUG_SBNFIFO(7) },
+	{ "DEBUG_SBNFIFO[8]",	BBH_RX_DEBUG_SBNFIFO(8) },
+	{ "DEBUG_SBNFIFO[9]",	BBH_RX_DEBUG_SBNFIFO(9) },
+	{ "DEBUG_SBNFIFO[10]",	BBH_RX_DEBUG_SBNFIFO(10) },
+	{ "DEBUG_SBNFIFO[11]",	BBH_RX_DEBUG_SBNFIFO(11) },
+	{ "DEBUG_SBNFIFO[12]",	BBH_RX_DEBUG_SBNFIFO(12) },
+	{ "DEBUG_SBNFIFO[13]",	BBH_RX_DEBUG_SBNFIFO(13) },
+	{ "DEBUG_SBNFIFO[14]",	BBH_RX_DEBUG_SBNFIFO(14) },
+	{ "DEBUG_SBNFIFO[15]",	BBH_RX_DEBUG_SBNFIFO(15) },
+	{ "DEBUG_CMDFIFO[0]",	BBH_RX_DEBUG_CMDFIFO(0) },
+	{ "DEBUG_CMDFIFO[1]",	BBH_RX_DEBUG_CMDFIFO(1) },
+	{ "DEBUG_CMDFIFO[2]",	BBH_RX_DEBUG_CMDFIFO(2) },
+	{ "DEBUG_CMDFIFO[3]",	BBH_RX_DEBUG_CMDFIFO(3) },
+	{ "DEBUG_CMDFIFO[4]",	BBH_RX_DEBUG_CMDFIFO(4) },
+	{ "DEBUG_CMDFIFO[5]",	BBH_RX_DEBUG_CMDFIFO(5) },
+	{ "DEBUG_CMDFIFO[6]",	BBH_RX_DEBUG_CMDFIFO(6) },
+	{ "DEBUG_CMDFIFO[7]",	BBH_RX_DEBUG_CMDFIFO(7) },
+	{ "DEBUG_SBNRECYCLEFIFO[0]",	BBH_RX_DEBUG_SBNRECYCLEFIFO(0) },
+	{ "DEBUG_SBNRECYCLEFIFO[1]",	BBH_RX_DEBUG_SBNRECYCLEFIFO(1) },
+	{ "DEBUG_COHERENCYCNT2",	BBH_RX_DEBUG_COHERENCYCNT2 },
+	{ "DEBUG_DROPSTATUS",	BBH_RX_DEBUG_DROPSTATUS },
+};
+
+static const struct reg_desc bbh_tx_regs[] = {
+	{ "COMMON_CFGS_MACTYPE", BBH_TX_COMMON_CFGS_MACTYPE },
+	{ "COMMON_CFGS_BBCFG_1_TX", BBH_TX_COMMON_CFGS_BBCFG_1_TX },
+	{ "COMMON_CFGS_BBCFG_2_TX", BBH_TX_COMMON_CFGS_BBCFG_2_TX },
+	{ "COMMON_CFGS_DDRCFG_TX", BBH_TX_COMMON_CFGS_DDRCFG_TX },
+	{ "COMMON_CFGS_RNRCFG_1[0]", BBH_TX_COMMON_CFGS_RNRCFG_1(0) },
+	{ "COMMON_CFGS_RNRCFG_1[1]", BBH_TX_COMMON_CFGS_RNRCFG_1(1) },
+	{ "COMMON_CFGS_RNRCFG_2[0]", BBH_TX_COMMON_CFGS_RNRCFG_2(0) },
+	{ "COMMON_CFGS_RNRCFG_2[1]", BBH_TX_COMMON_CFGS_RNRCFG_2(1) },
+	{ "COMMON_CFGS_DMACFG_TX", BBH_TX_COMMON_CFGS_DMACFG_TX },
+	{ "COMMON_CFGS_SDMACFG_TX", BBH_TX_COMMON_CFGS_SDMACFG_TX },
+	{ "COMMON_CFGS_SBPMCFG", BBH_TX_COMMON_CFGS_SBPMCFG },
+	{ "COMMON_CFGS_DDRTMBASEL[0]", BBH_TX_COMMON_CFGS_DDRTMBASEL(0) },
+	{ "COMMON_CFGS_DDRTMBASEL[1]", BBH_TX_COMMON_CFGS_DDRTMBASEL(1) },
+	{ "COMMON_CFGS_DDRTMBASEH[0]", BBH_TX_COMMON_CFGS_DDRTMBASEH(0) },
+	{ "COMMON_CFGS_DDRTMBASEH[1]", BBH_TX_COMMON_CFGS_DDRTMBASEH(1) },
+	{ "COMMON_CFGS_DFIFOCTRL", BBH_TX_COMMON_CFGS_DFIFOCTRL },
+	{ "COMMON_CFGS_ARB_CFG", BBH_TX_COMMON_CFGS_ARB_CFG },
+	{ "COMMON_CFGS_BBROUTE", BBH_TX_COMMON_CFGS_BBROUTE },
+	{ "COMMON_CFGS_Q2RNR[0]", BBH_TX_COMMON_CFGS_Q2RNR(0) },
+	{ "COMMON_CFGS_Q2RNR[1]", BBH_TX_COMMON_CFGS_Q2RNR(1) },
+	{ "COMMON_CFGS_Q2RNR[2]", BBH_TX_COMMON_CFGS_Q2RNR(2) },
+	{ "COMMON_CFGS_Q2RNR[3]", BBH_TX_COMMON_CFGS_Q2RNR(3) },
+	{ "COMMON_CFGS_Q2RNR[4]", BBH_TX_COMMON_CFGS_Q2RNR(4) },
+	{ "COMMON_CFGS_Q2RNR[5]", BBH_TX_COMMON_CFGS_Q2RNR(5) },
+	{ "COMMON_CFGS_Q2RNR[6]", BBH_TX_COMMON_CFGS_Q2RNR(6) },
+	{ "COMMON_CFGS_Q2RNR[7]", BBH_TX_COMMON_CFGS_Q2RNR(7) },
+	{ "COMMON_CFGS_Q2RNR[8]", BBH_TX_COMMON_CFGS_Q2RNR(8) },
+	{ "COMMON_CFGS_Q2RNR[9]", BBH_TX_COMMON_CFGS_Q2RNR(9) },
+	{ "COMMON_CFGS_Q2RNR[10]", BBH_TX_COMMON_CFGS_Q2RNR(10) },
+	{ "COMMON_CFGS_Q2RNR[11]", BBH_TX_COMMON_CFGS_Q2RNR(11) },
+	{ "COMMON_CFGS_Q2RNR[12]", BBH_TX_COMMON_CFGS_Q2RNR(12) },
+	{ "COMMON_CFGS_Q2RNR[13]", BBH_TX_COMMON_CFGS_Q2RNR(13) },
+	{ "COMMON_CFGS_Q2RNR[14]", BBH_TX_COMMON_CFGS_Q2RNR(14) },
+	{ "COMMON_CFGS_Q2RNR[15]", BBH_TX_COMMON_CFGS_Q2RNR(15) },
+	{ "COMMON_CFGS_Q2RNR[16]", BBH_TX_COMMON_CFGS_Q2RNR(16) },
+	{ "COMMON_CFGS_Q2RNR[17]", BBH_TX_COMMON_CFGS_Q2RNR(17) },
+	{ "COMMON_CFGS_Q2RNR[18]", BBH_TX_COMMON_CFGS_Q2RNR(18) },
+	{ "COMMON_CFGS_Q2RNR[19]", BBH_TX_COMMON_CFGS_Q2RNR(19) },
+	{ "COMMON_CFGS_PERQTASK", BBH_TX_COMMON_CFGS_PERQTASK },
+	{ "COMMON_CFGS_TXRSTCMD", BBH_TX_COMMON_CFGS_TXRSTCMD },
+	{ "COMMON_CFGS_DBGSEL", BBH_TX_COMMON_CFGS_DBGSEL },
+	{ "COMMON_CFGS_CLK_GATE_CNTRL", BBH_TX_COMMON_CFGS_CLK_GATE_CNTRL },
+	{ "COMMON_CFGS_GPR", BBH_TX_COMMON_CFGS_GPR },
+	{ "WAN_CFGS_PDBASE[0]", BBH_TX_WAN_CFGS_PDBASE(0) },
+	{ "WAN_CFGS_PDBASE[1]", BBH_TX_WAN_CFGS_PDBASE(1) },
+	{ "WAN_CFGS_PDBASE[2]", BBH_TX_WAN_CFGS_PDBASE(2) },
+	{ "WAN_CFGS_PDBASE[3]", BBH_TX_WAN_CFGS_PDBASE(3) },
+	{ "WAN_CFGS_PDBASE[4]", BBH_TX_WAN_CFGS_PDBASE(4) },
+	{ "WAN_CFGS_PDBASE[5]", BBH_TX_WAN_CFGS_PDBASE(5) },
+	{ "WAN_CFGS_PDBASE[6]", BBH_TX_WAN_CFGS_PDBASE(6) },
+	{ "WAN_CFGS_PDBASE[7]", BBH_TX_WAN_CFGS_PDBASE(7) },
+	{ "WAN_CFGS_PDBASE[8]", BBH_TX_WAN_CFGS_PDBASE(8) },
+	{ "WAN_CFGS_PDBASE[9]", BBH_TX_WAN_CFGS_PDBASE(9) },
+	{ "WAN_CFGS_PDBASE[10]", BBH_TX_WAN_CFGS_PDBASE(10) },
+	{ "WAN_CFGS_PDBASE[11]", BBH_TX_WAN_CFGS_PDBASE(11) },
+	{ "WAN_CFGS_PDBASE[12]", BBH_TX_WAN_CFGS_PDBASE(12) },
+	{ "WAN_CFGS_PDBASE[13]", BBH_TX_WAN_CFGS_PDBASE(13) },
+	{ "WAN_CFGS_PDBASE[14]", BBH_TX_WAN_CFGS_PDBASE(14) },
+	{ "WAN_CFGS_PDBASE[15]", BBH_TX_WAN_CFGS_PDBASE(15) },
+	{ "WAN_CFGS_PDBASE[16]", BBH_TX_WAN_CFGS_PDBASE(16) },
+	{ "WAN_CFGS_PDBASE[17]", BBH_TX_WAN_CFGS_PDBASE(17) },
+	{ "WAN_CFGS_PDBASE[18]", BBH_TX_WAN_CFGS_PDBASE(18) },
+	{ "WAN_CFGS_PDBASE[19]", BBH_TX_WAN_CFGS_PDBASE(19) },
+	{ "WAN_CFGS_PDSIZE[0]", BBH_TX_WAN_CFGS_PDSIZE(0) },
+	{ "WAN_CFGS_PDSIZE[1]", BBH_TX_WAN_CFGS_PDSIZE(1) },
+	{ "WAN_CFGS_PDSIZE[2]", BBH_TX_WAN_CFGS_PDSIZE(2) },
+	{ "WAN_CFGS_PDSIZE[3]", BBH_TX_WAN_CFGS_PDSIZE(3) },
+	{ "WAN_CFGS_PDSIZE[4]", BBH_TX_WAN_CFGS_PDSIZE(4) },
+	{ "WAN_CFGS_PDSIZE[5]", BBH_TX_WAN_CFGS_PDSIZE(5) },
+	{ "WAN_CFGS_PDSIZE[6]", BBH_TX_WAN_CFGS_PDSIZE(6) },
+	{ "WAN_CFGS_PDSIZE[7]", BBH_TX_WAN_CFGS_PDSIZE(7) },
+	{ "WAN_CFGS_PDSIZE[8]", BBH_TX_WAN_CFGS_PDSIZE(8) },
+	{ "WAN_CFGS_PDSIZE[9]", BBH_TX_WAN_CFGS_PDSIZE(9) },
+	{ "WAN_CFGS_PDSIZE[10]", BBH_TX_WAN_CFGS_PDSIZE(10) },
+	{ "WAN_CFGS_PDSIZE[11]", BBH_TX_WAN_CFGS_PDSIZE(11) },
+	{ "WAN_CFGS_PDSIZE[12]", BBH_TX_WAN_CFGS_PDSIZE(12) },
+	{ "WAN_CFGS_PDSIZE[13]", BBH_TX_WAN_CFGS_PDSIZE(13) },
+	{ "WAN_CFGS_PDSIZE[14]", BBH_TX_WAN_CFGS_PDSIZE(14) },
+	{ "WAN_CFGS_PDSIZE[15]", BBH_TX_WAN_CFGS_PDSIZE(15) },
+	{ "WAN_CFGS_PDSIZE[16]", BBH_TX_WAN_CFGS_PDSIZE(16) },
+	{ "WAN_CFGS_PDSIZE[17]", BBH_TX_WAN_CFGS_PDSIZE(17) },
+	{ "WAN_CFGS_PDSIZE[18]", BBH_TX_WAN_CFGS_PDSIZE(18) },
+	{ "WAN_CFGS_PDSIZE[19]", BBH_TX_WAN_CFGS_PDSIZE(19) },
+	{ "WAN_CFGS_PDWKUPH[0]", BBH_TX_WAN_CFGS_PDWKUPH(0) },
+	{ "WAN_CFGS_PDWKUPH[1]", BBH_TX_WAN_CFGS_PDWKUPH(1) },
+	{ "WAN_CFGS_PDWKUPH[2]", BBH_TX_WAN_CFGS_PDWKUPH(2) },
+	{ "WAN_CFGS_PDWKUPH[3]", BBH_TX_WAN_CFGS_PDWKUPH(3) },
+	{ "WAN_CFGS_PDWKUPH[4]", BBH_TX_WAN_CFGS_PDWKUPH(4) },
+	{ "WAN_CFGS_PDWKUPH[5]", BBH_TX_WAN_CFGS_PDWKUPH(5) },
+	{ "WAN_CFGS_PDWKUPH[6]", BBH_TX_WAN_CFGS_PDWKUPH(6) },
+	{ "WAN_CFGS_PDWKUPH[7]", BBH_TX_WAN_CFGS_PDWKUPH(7) },
+	{ "WAN_CFGS_PDWKUPH[8]", BBH_TX_WAN_CFGS_PDWKUPH(8) },
+	{ "WAN_CFGS_PDWKUPH[9]", BBH_TX_WAN_CFGS_PDWKUPH(9) },
+	{ "WAN_CFGS_PDWKUPH[10]", BBH_TX_WAN_CFGS_PDWKUPH(10) },
+	{ "WAN_CFGS_PDWKUPH[11]", BBH_TX_WAN_CFGS_PDWKUPH(11) },
+	{ "WAN_CFGS_PDWKUPH[12]", BBH_TX_WAN_CFGS_PDWKUPH(12) },
+	{ "WAN_CFGS_PDWKUPH[13]", BBH_TX_WAN_CFGS_PDWKUPH(13) },
+	{ "WAN_CFGS_PDWKUPH[14]", BBH_TX_WAN_CFGS_PDWKUPH(14) },
+	{ "WAN_CFGS_PDWKUPH[15]", BBH_TX_WAN_CFGS_PDWKUPH(15) },
+	{ "WAN_CFGS_PDWKUPH[16]", BBH_TX_WAN_CFGS_PDWKUPH(16) },
+	{ "WAN_CFGS_PDWKUPH[17]", BBH_TX_WAN_CFGS_PDWKUPH(17) },
+	{ "WAN_CFGS_PDWKUPH[18]", BBH_TX_WAN_CFGS_PDWKUPH(18) },
+	{ "WAN_CFGS_PDWKUPH[19]", BBH_TX_WAN_CFGS_PDWKUPH(19) },
+	{ "WAN_CFGS_PD_BYTE_TH[0]", BBH_TX_WAN_CFGS_PD_BYTE_TH(0) },
+	{ "WAN_CFGS_PD_BYTE_TH[1]", BBH_TX_WAN_CFGS_PD_BYTE_TH(1) },
+	{ "WAN_CFGS_PD_BYTE_TH[2]", BBH_TX_WAN_CFGS_PD_BYTE_TH(2) },
+	{ "WAN_CFGS_PD_BYTE_TH[3]", BBH_TX_WAN_CFGS_PD_BYTE_TH(3) },
+	{ "WAN_CFGS_PD_BYTE_TH[4]", BBH_TX_WAN_CFGS_PD_BYTE_TH(4) },
+	{ "WAN_CFGS_PD_BYTE_TH[5]", BBH_TX_WAN_CFGS_PD_BYTE_TH(5) },
+	{ "WAN_CFGS_PD_BYTE_TH[6]", BBH_TX_WAN_CFGS_PD_BYTE_TH(6) },
+	{ "WAN_CFGS_PD_BYTE_TH[7]", BBH_TX_WAN_CFGS_PD_BYTE_TH(7) },
+	{ "WAN_CFGS_PD_BYTE_TH[8]", BBH_TX_WAN_CFGS_PD_BYTE_TH(8) },
+	{ "WAN_CFGS_PD_BYTE_TH[9]", BBH_TX_WAN_CFGS_PD_BYTE_TH(9) },
+	{ "WAN_CFGS_PD_BYTE_TH[10]", BBH_TX_WAN_CFGS_PD_BYTE_TH(10) },
+	{ "WAN_CFGS_PD_BYTE_TH[11]", BBH_TX_WAN_CFGS_PD_BYTE_TH(11) },
+	{ "WAN_CFGS_PD_BYTE_TH[12]", BBH_TX_WAN_CFGS_PD_BYTE_TH(12) },
+	{ "WAN_CFGS_PD_BYTE_TH[13]", BBH_TX_WAN_CFGS_PD_BYTE_TH(13) },
+	{ "WAN_CFGS_PD_BYTE_TH[14]", BBH_TX_WAN_CFGS_PD_BYTE_TH(14) },
+	{ "WAN_CFGS_PD_BYTE_TH[15]", BBH_TX_WAN_CFGS_PD_BYTE_TH(15) },
+	{ "WAN_CFGS_PD_BYTE_TH[16]", BBH_TX_WAN_CFGS_PD_BYTE_TH(16) },
+	{ "WAN_CFGS_PD_BYTE_TH[17]", BBH_TX_WAN_CFGS_PD_BYTE_TH(17) },
+	{ "WAN_CFGS_PD_BYTE_TH[18]", BBH_TX_WAN_CFGS_PD_BYTE_TH(18) },
+	{ "WAN_CFGS_PD_BYTE_TH[19]", BBH_TX_WAN_CFGS_PD_BYTE_TH(19) },
+	{ "WAN_CFGS_PD_BYTE_TH_EN", BBH_TX_WAN_CFGS_PD_BYTE_TH_EN },
+	{ "WAN_CFGS_PDEMPTY", BBH_TX_WAN_CFGS_PDEMPTY },
+	{ "WAN_CFGS_STSRNRCFG_1[0]", BBH_TX_WAN_CFGS_STSRNRCFG_1(0) },
+	{ "WAN_CFGS_STSRNRCFG_1[1]", BBH_TX_WAN_CFGS_STSRNRCFG_1(1) },
+	{ "WAN_CFGS_STSRNRCFG_2[0]", BBH_TX_WAN_CFGS_STSRNRCFG_2(0) },
+	{ "WAN_CFGS_STSRNRCFG_2[1]", BBH_TX_WAN_CFGS_STSRNRCFG_2(1) },
+	{ "WAN_CFGS_MSGRNRCFG_1[0]", BBH_TX_WAN_CFGS_MSGRNRCFG_1(0) },
+	{ "WAN_CFGS_MSGRNRCFG_1[1]", BBH_TX_WAN_CFGS_MSGRNRCFG_1(1) },
+	{ "WAN_CFGS_MSGRNRCFG_2[0]", BBH_TX_WAN_CFGS_MSGRNRCFG_2(0) },
+	{ "WAN_CFGS_MSGRNRCFG_2[1]", BBH_TX_WAN_CFGS_MSGRNRCFG_2(1) },
+	{ "WAN_CFGS_EPNCFG", BBH_TX_WAN_CFGS_EPNCFG },
+	{ "WAN_CFGS_FLOW2PORT", BBH_TX_WAN_CFGS_FLOW2PORT },
+	{ "WAN_CFGS_TS", BBH_TX_WAN_CFGS_TS },
+	{ "WAN_CFGS_MAXWLEN", BBH_TX_WAN_CFGS_MAXWLEN },
+	{ "WAN_CFGS_FLUSH", BBH_TX_WAN_CFGS_FLUSH },
+	{ "LAN_CFGS_PDBASE", BBH_TX_LAN_CFGS_PDBASE },
+	{ "LAN_CFGS_PDSIZE", BBH_TX_LAN_CFGS_PDSIZE },
+	{ "LAN_CFGS_PDWKUPH", BBH_TX_LAN_CFGS_PDWKUPH },
+	{ "LAN_CFGS_PD_BYTE_TH", BBH_TX_LAN_CFGS_PD_BYTE_TH },
+	{ "LAN_CFGS_PD_BYTE_TH_EN", BBH_TX_LAN_CFGS_PD_BYTE_TH_EN },
+	{ "LAN_CFGS_PDEMPTY", BBH_TX_LAN_CFGS_PDEMPTY },
+	{ "LAN_CFGS_TXTHRESH", BBH_TX_LAN_CFGS_TXTHRESH },
+	{ "LAN_CFGS_EEE", BBH_TX_LAN_CFGS_EEE },
+	{ "LAN_CFGS_TS", BBH_TX_LAN_CFGS_TS },
+	{ "UNIFIED_CFGS_PDBASE[0]", BBH_TX_UNIFIED_CFGS_PDBASE(0) },
+	{ "UNIFIED_CFGS_PDBASE[1]", BBH_TX_UNIFIED_CFGS_PDBASE(1) },
+	{ "UNIFIED_CFGS_PDBASE[2]", BBH_TX_UNIFIED_CFGS_PDBASE(2) },
+	{ "UNIFIED_CFGS_PDBASE[3]", BBH_TX_UNIFIED_CFGS_PDBASE(3) },
+	{ "UNIFIED_CFGS_PDSIZE[0]", BBH_TX_UNIFIED_CFGS_PDSIZE(0) },
+	{ "UNIFIED_CFGS_PDSIZE[1]", BBH_TX_UNIFIED_CFGS_PDSIZE(1) },
+	{ "UNIFIED_CFGS_PDSIZE[2]", BBH_TX_UNIFIED_CFGS_PDSIZE(2) },
+	{ "UNIFIED_CFGS_PDSIZE[3]", BBH_TX_UNIFIED_CFGS_PDSIZE(3) },
+	{ "UNIFIED_CFGS_PDWKUPH[0]", BBH_TX_UNIFIED_CFGS_PDWKUPH(0) },
+	{ "UNIFIED_CFGS_PDWKUPH[1]", BBH_TX_UNIFIED_CFGS_PDWKUPH(1) },
+	{ "UNIFIED_CFGS_PDWKUPH[2]", BBH_TX_UNIFIED_CFGS_PDWKUPH(2) },
+	{ "UNIFIED_CFGS_PDWKUPH[3]", BBH_TX_UNIFIED_CFGS_PDWKUPH(3) },
+	{ "UNIFIED_CFGS_PD_BYTE_TH[0]", BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(0) },
+	{ "UNIFIED_CFGS_PD_BYTE_TH[1]", BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(1) },
+	{ "UNIFIED_CFGS_PD_BYTE_TH[2]", BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(2) },
+	{ "UNIFIED_CFGS_PD_BYTE_TH[3]", BBH_TX_UNIFIED_CFGS_PD_BYTE_TH(3) },
+	{ "UNIFIED_CFGS_PD_BYTE_TH_EN", BBH_TX_UNIFIED_CFGS_PD_BYTE_TH_EN },
+	{ "UNIFIED_CFGS_PDEMPTY", BBH_TX_UNIFIED_CFGS_PDEMPTY },
+	{ "UNIFIED_CFGS_GTXTHRESH", BBH_TX_UNIFIED_CFGS_GTXTHRESH },
+	{ "UNIFIED_CFGS_EEE", BBH_TX_UNIFIED_CFGS_EEE },
+	{ "UNIFIED_CFGS_TS", BBH_TX_UNIFIED_CFGS_TS },
+	{ "UNIFIED_CFGS_FEBASE[0]", BBH_TX_UNIFIED_CFGS_FEBASE(0) },
+	{ "UNIFIED_CFGS_FEBASE[1]", BBH_TX_UNIFIED_CFGS_FEBASE(1) },
+	{ "UNIFIED_CFGS_FEBASE[2]", BBH_TX_UNIFIED_CFGS_FEBASE(2) },
+	{ "UNIFIED_CFGS_FEBASE[3]", BBH_TX_UNIFIED_CFGS_FEBASE(3) },
+	{ "UNIFIED_CFGS_FESIZE[0]", BBH_TX_UNIFIED_CFGS_FESIZE(0) },
+	{ "UNIFIED_CFGS_FESIZE[1]", BBH_TX_UNIFIED_CFGS_FESIZE(1) },
+	{ "UNIFIED_CFGS_FESIZE[2]", BBH_TX_UNIFIED_CFGS_FESIZE(2) },
+	{ "UNIFIED_CFGS_FESIZE[3]", BBH_TX_UNIFIED_CFGS_FESIZE(3) },
+	{ "UNIFIED_CFGS_FEPDBASE[0]", BBH_TX_UNIFIED_CFGS_FEPDBASE(0) },
+	{ "UNIFIED_CFGS_FEPDBASE[1]", BBH_TX_UNIFIED_CFGS_FEPDBASE(1) },
+	{ "UNIFIED_CFGS_FEPDBASE[2]", BBH_TX_UNIFIED_CFGS_FEPDBASE(2) },
+	{ "UNIFIED_CFGS_FEPDBASE[3]", BBH_TX_UNIFIED_CFGS_FEPDBASE(3) },
+	{ "UNIFIED_CFGS_FEPDSIZE[0]", BBH_TX_UNIFIED_CFGS_FEPDSIZE(0) },
+	{ "UNIFIED_CFGS_FEPDSIZE[1]", BBH_TX_UNIFIED_CFGS_FEPDSIZE(1) },
+	{ "UNIFIED_CFGS_FEPDSIZE[2]", BBH_TX_UNIFIED_CFGS_FEPDSIZE(2) },
+	{ "UNIFIED_CFGS_FEPDSIZE[3]", BBH_TX_UNIFIED_CFGS_FEPDSIZE(3) },
+	{ "UNIFIED_CFGS_TXWRR[0]", BBH_TX_UNIFIED_CFGS_TXWRR(0) },
+	{ "UNIFIED_CFGS_TXWRR[1]", BBH_TX_UNIFIED_CFGS_TXWRR(1) },
+	{ "UNIFIED_CFGS_TXWRR[2]", BBH_TX_UNIFIED_CFGS_TXWRR(2) },
+	{ "UNIFIED_CFGS_TXWRR[3]", BBH_TX_UNIFIED_CFGS_TXWRR(3) },
+	{ "UNIFIED_CFGS_TXTHRESH[0]", BBH_TX_UNIFIED_CFGS_TXTHRESH(0) },
+	{ "UNIFIED_CFGS_TXTHRESH[1]", BBH_TX_UNIFIED_CFGS_TXTHRESH(1) },
+	{ "UNIFIED_CFGS_TXTHRESH[2]", BBH_TX_UNIFIED_CFGS_TXTHRESH(2) },
+	{ "UNIFIED_CFGS_TXTHRESH[3]", BBH_TX_UNIFIED_CFGS_TXTHRESH(3) },
+	{ "DEBUG_COUNTERS_SRAMPD", BBH_TX_DEBUG_COUNTERS_SRAMPD },
+	{ "DEBUG_COUNTERS_DDRPD", BBH_TX_DEBUG_COUNTERS_DDRPD },
+	{ "DEBUG_COUNTERS_PDDROP", BBH_TX_DEBUG_COUNTERS_PDDROP },
+	{ "DEBUG_COUNTERS_STSCNT", BBH_TX_DEBUG_COUNTERS_STSCNT },
+	{ "DEBUG_COUNTERS_STSDROP", BBH_TX_DEBUG_COUNTERS_STSDROP },
+	{ "DEBUG_COUNTERS_MSGCNT", BBH_TX_DEBUG_COUNTERS_MSGCNT },
+	{ "DEBUG_COUNTERS_MSGDROP", BBH_TX_DEBUG_COUNTERS_MSGDROP },
+	{ "DEBUG_COUNTERS_GETNEXTNULL", BBH_TX_DEBUG_COUNTERS_GETNEXTNULL },
+	{ "DEBUG_COUNTERS_FLUSHPKTS", BBH_TX_DEBUG_COUNTERS_FLUSHPKTS },
+	{ "DEBUG_COUNTERS_LENERR", BBH_TX_DEBUG_COUNTERS_LENERR },
+	{ "DEBUG_COUNTERS_AGGRLENERR", BBH_TX_DEBUG_COUNTERS_AGGRLENERR },
+	{ "DEBUG_COUNTERS_SRAMPKT", BBH_TX_DEBUG_COUNTERS_SRAMPKT },
+	{ "DEBUG_COUNTERS_DDRPKT", BBH_TX_DEBUG_COUNTERS_DDRPKT },
+	{ "DEBUG_COUNTERS_SRAMBYTE", BBH_TX_DEBUG_COUNTERS_SRAMBYTE },
+	{ "DEBUG_COUNTERS_DDRBYTE", BBH_TX_DEBUG_COUNTERS_DDRBYTE },
+	{ "DEBUG_COUNTERS_SWRDEN", BBH_TX_DEBUG_COUNTERS_SWRDEN },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[0]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(0) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[1]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(1) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[2]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(2) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[3]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(3) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[4]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(4) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[5]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(5) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[6]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(6) },
+	{ "DEBUG_COUNTERS_UNIFIEDPKT[7]", BBH_TX_DEBUG_COUNTERS_UNIFIEDPKT(7) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[0]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(0) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[1]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(1) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[2]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(2) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[3]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(3) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[4]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(4) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[5]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(5) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[6]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(6) },
+	{ "DEBUG_COUNTERS_UNIFIEDBYTE[7]", BBH_TX_DEBUG_COUNTERS_UNIFIEDBYTE(7) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[0]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(0) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[1]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(1) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[2]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(2) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[3]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(3) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[4]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(4) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[5]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(5) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[6]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(6) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[7]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(7) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[8]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(8) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[9]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(9) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[10]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(10) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[11]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(11) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[12]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(12) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[13]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(13) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[14]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(14) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[15]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(15) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[16]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(16) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[17]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(17) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[18]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(18) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[19]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(19) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[20]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(20) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[21]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(21) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[22]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(22) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[23]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(23) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[24]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(24) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[25]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(25) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[26]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(26) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[27]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(27) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[28]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(28) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[29]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(29) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[30]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(30) },
+	{ "DEBUG_COUNTERS_DBGOUTREG[31]", BBH_TX_DEBUG_COUNTERS_DBGOUTREG(31) },
+	{ "DEBUG_COUNTERS_IN_SEGMENTATION[0]", BBH_TX_DEBUG_COUNTERS_IN_SEGMENTATION(0) },
+	{ "DEBUG_COUNTERS_IN_SEGMENTATION[1]", BBH_TX_DEBUG_COUNTERS_IN_SEGMENTATION(1) },
+};
+
+static const struct reg_desc dma_regs[] = {
+	{ "CONFIG_BBROUTEOVRD", DMA_CONFIG_BBROUTEOVRD },
+	{ "CONFIG_NUM_OF_WRITES[0]", DMA_CONFIG_NUM_OF_WRITES(0) },
+	{ "CONFIG_NUM_OF_WRITES[1]", DMA_CONFIG_NUM_OF_WRITES(1) },
+	{ "CONFIG_NUM_OF_WRITES[2]", DMA_CONFIG_NUM_OF_WRITES(2) },
+	{ "CONFIG_NUM_OF_WRITES[3]", DMA_CONFIG_NUM_OF_WRITES(3) },
+	{ "CONFIG_NUM_OF_WRITES[4]", DMA_CONFIG_NUM_OF_WRITES(4) },
+	{ "CONFIG_NUM_OF_WRITES[5]", DMA_CONFIG_NUM_OF_WRITES(5) },
+	{ "CONFIG_NUM_OF_WRITES[6]", DMA_CONFIG_NUM_OF_WRITES(6) },
+	{ "CONFIG_NUM_OF_WRITES[7]", DMA_CONFIG_NUM_OF_WRITES(7) },
+	{ "CONFIG_NUM_OF_READS[0]", DMA_CONFIG_NUM_OF_READS(0) },
+	{ "CONFIG_NUM_OF_READS[1]", DMA_CONFIG_NUM_OF_READS(1) },
+	{ "CONFIG_NUM_OF_READS[2]", DMA_CONFIG_NUM_OF_READS(2) },
+	{ "CONFIG_NUM_OF_READS[3]", DMA_CONFIG_NUM_OF_READS(3) },
+	{ "CONFIG_NUM_OF_READS[4]", DMA_CONFIG_NUM_OF_READS(4) },
+	{ "CONFIG_NUM_OF_READS[5]", DMA_CONFIG_NUM_OF_READS(5) },
+	{ "CONFIG_NUM_OF_READS[6]", DMA_CONFIG_NUM_OF_READS(6) },
+	{ "CONFIG_NUM_OF_READS[7]", DMA_CONFIG_NUM_OF_READS(7) },
+	{ "CONFIG_U_THRESH[0]", DMA_CONFIG_U_THRESH(0) },
+	{ "CONFIG_U_THRESH[1]", DMA_CONFIG_U_THRESH(1) },
+	{ "CONFIG_U_THRESH[2]", DMA_CONFIG_U_THRESH(2) },
+	{ "CONFIG_U_THRESH[3]", DMA_CONFIG_U_THRESH(3) },
+	{ "CONFIG_U_THRESH[4]", DMA_CONFIG_U_THRESH(4) },
+	{ "CONFIG_U_THRESH[5]", DMA_CONFIG_U_THRESH(5) },
+	{ "CONFIG_U_THRESH[6]", DMA_CONFIG_U_THRESH(6) },
+	{ "CONFIG_U_THRESH[7]", DMA_CONFIG_U_THRESH(7) },
+	{ "CONFIG_PRI[0]", DMA_CONFIG_PRI(0) },
+	{ "CONFIG_PRI[1]", DMA_CONFIG_PRI(1) },
+	{ "CONFIG_PRI[2]", DMA_CONFIG_PRI(2) },
+	{ "CONFIG_PRI[3]", DMA_CONFIG_PRI(3) },
+	{ "CONFIG_PRI[4]", DMA_CONFIG_PRI(4) },
+	{ "CONFIG_PRI[5]", DMA_CONFIG_PRI(5) },
+	{ "CONFIG_PRI[6]", DMA_CONFIG_PRI(6) },
+	{ "CONFIG_PRI[7]", DMA_CONFIG_PRI(7) },
+	{ "CONFIG_PERIPH_SOURCE[0]", DMA_CONFIG_PERIPH_SOURCE(0) },
+	{ "CONFIG_PERIPH_SOURCE[1]", DMA_CONFIG_PERIPH_SOURCE(1) },
+	{ "CONFIG_PERIPH_SOURCE[2]", DMA_CONFIG_PERIPH_SOURCE(2) },
+	{ "CONFIG_PERIPH_SOURCE[3]", DMA_CONFIG_PERIPH_SOURCE(3) },
+	{ "CONFIG_PERIPH_SOURCE[4]", DMA_CONFIG_PERIPH_SOURCE(4) },
+	{ "CONFIG_PERIPH_SOURCE[5]", DMA_CONFIG_PERIPH_SOURCE(5) },
+	{ "CONFIG_PERIPH_SOURCE[6]", DMA_CONFIG_PERIPH_SOURCE(6) },
+	{ "CONFIG_PERIPH_SOURCE[7]", DMA_CONFIG_PERIPH_SOURCE(7) },
+	{ "CONFIG_WEIGHT[0]", DMA_CONFIG_WEIGHT(0) },
+	{ "CONFIG_WEIGHT[1]", DMA_CONFIG_WEIGHT(1) },
+	{ "CONFIG_WEIGHT[2]", DMA_CONFIG_WEIGHT(2) },
+	{ "CONFIG_WEIGHT[3]", DMA_CONFIG_WEIGHT(3) },
+	{ "CONFIG_WEIGHT[4]", DMA_CONFIG_WEIGHT(4) },
+	{ "CONFIG_WEIGHT[5]", DMA_CONFIG_WEIGHT(5) },
+	{ "CONFIG_WEIGHT[6]", DMA_CONFIG_WEIGHT(6) },
+	{ "CONFIG_WEIGHT[7]", DMA_CONFIG_WEIGHT(7) },
+	{ "CONFIG_PTRRST", DMA_CONFIG_PTRRST },
+	{ "CONFIG_MAX_OTF", DMA_CONFIG_MAX_OTF },
+	{ "CONFIG_CLK_GATE_CNTRL", DMA_CONFIG_CLK_GATE_CNTRL },
+	{ "CONFIG_DBG_SEL", DMA_CONFIG_DBG_SEL },
+	{ "DEBUG_NEMPTY", DMA_DEBUG_NEMPTY },
+	{ "DEBUG_URGNT", DMA_DEBUG_URGNT },
+	{ "DEBUG_SELSRC", DMA_DEBUG_SELSRC },
+	{ "DEBUG_REQ_CNT_RX[0]", DMA_DEBUG_REQ_CNT_RX(0) },
+	{ "DEBUG_REQ_CNT_RX[1]", DMA_DEBUG_REQ_CNT_RX(1) },
+	{ "DEBUG_REQ_CNT_RX[2]", DMA_DEBUG_REQ_CNT_RX(2) },
+	{ "DEBUG_REQ_CNT_RX[3]", DMA_DEBUG_REQ_CNT_RX(3) },
+	{ "DEBUG_REQ_CNT_RX[4]", DMA_DEBUG_REQ_CNT_RX(4) },
+	{ "DEBUG_REQ_CNT_RX[5]", DMA_DEBUG_REQ_CNT_RX(5) },
+	{ "DEBUG_REQ_CNT_RX[6]", DMA_DEBUG_REQ_CNT_RX(6) },
+	{ "DEBUG_REQ_CNT_RX[7]", DMA_DEBUG_REQ_CNT_RX(7) },
+	{ "DEBUG_REQ_CNT_TX[0]", DMA_DEBUG_REQ_CNT_TX(0) },
+	{ "DEBUG_REQ_CNT_TX[1]", DMA_DEBUG_REQ_CNT_TX(1) },
+	{ "DEBUG_REQ_CNT_TX[2]", DMA_DEBUG_REQ_CNT_TX(2) },
+	{ "DEBUG_REQ_CNT_TX[3]", DMA_DEBUG_REQ_CNT_TX(3) },
+	{ "DEBUG_REQ_CNT_TX[4]", DMA_DEBUG_REQ_CNT_TX(4) },
+	{ "DEBUG_REQ_CNT_TX[5]", DMA_DEBUG_REQ_CNT_TX(5) },
+	{ "DEBUG_REQ_CNT_TX[6]", DMA_DEBUG_REQ_CNT_TX(6) },
+	{ "DEBUG_REQ_CNT_TX[7]", DMA_DEBUG_REQ_CNT_TX(7) },
+	{ "DEBUG_REQ_CNT_RX_ACC[0]", DMA_DEBUG_REQ_CNT_RX_ACC(0) },
+	{ "DEBUG_REQ_CNT_RX_ACC[1]", DMA_DEBUG_REQ_CNT_RX_ACC(1) },
+	{ "DEBUG_REQ_CNT_RX_ACC[2]", DMA_DEBUG_REQ_CNT_RX_ACC(2) },
+	{ "DEBUG_REQ_CNT_RX_ACC[3]", DMA_DEBUG_REQ_CNT_RX_ACC(3) },
+	{ "DEBUG_REQ_CNT_RX_ACC[4]", DMA_DEBUG_REQ_CNT_RX_ACC(4) },
+	{ "DEBUG_REQ_CNT_RX_ACC[5]", DMA_DEBUG_REQ_CNT_RX_ACC(5) },
+	{ "DEBUG_REQ_CNT_RX_ACC[6]", DMA_DEBUG_REQ_CNT_RX_ACC(6) },
+	{ "DEBUG_REQ_CNT_RX_ACC[7]", DMA_DEBUG_REQ_CNT_RX_ACC(7) },
+	{ "DEBUG_REQ_CNT_TX_ACC[0]", DMA_DEBUG_REQ_CNT_TX_ACC(0) },
+	{ "DEBUG_REQ_CNT_TX_ACC[1]", DMA_DEBUG_REQ_CNT_TX_ACC(1) },
+	{ "DEBUG_REQ_CNT_TX_ACC[2]", DMA_DEBUG_REQ_CNT_TX_ACC(2) },
+	{ "DEBUG_REQ_CNT_TX_ACC[3]", DMA_DEBUG_REQ_CNT_TX_ACC(3) },
+	{ "DEBUG_REQ_CNT_TX_ACC[4]", DMA_DEBUG_REQ_CNT_TX_ACC(4) },
+	{ "DEBUG_REQ_CNT_TX_ACC[5]", DMA_DEBUG_REQ_CNT_TX_ACC(5) },
+	{ "DEBUG_REQ_CNT_TX_ACC[6]", DMA_DEBUG_REQ_CNT_TX_ACC(6) },
+	{ "DEBUG_REQ_CNT_TX_ACC[7]", DMA_DEBUG_REQ_CNT_TX_ACC(7) },
+};
+
+static const struct reg_desc sbpm_regs[] = {
+	{ "INIT_FREE_LIST", SBPM_REGS_INIT_FREE_LIST },
+	{ "BN_ALLOC", SBPM_REGS_BN_ALLOC },
+	{ "BN_ALLOC_RPLY", SBPM_REGS_BN_ALLOC_RPLY },
+	{ "BN_FREE_WITH_CONTXT_LOW", SBPM_REGS_BN_FREE_WITH_CONTXT_LOW },
+	{ "BN_FREE_WITH_CONTXT_HIGH", SBPM_REGS_BN_FREE_WITH_CONTXT_HIGH },
+	{ "MCST_INC", SBPM_REGS_MCST_INC },
+	{ "MCST_INC_RPLY", SBPM_REGS_MCST_INC_RPLY },
+	{ "BN_CONNECT", SBPM_REGS_BN_CONNECT },
+	{ "BN_CONNECT_RPLY", SBPM_REGS_BN_CONNECT_RPLY },
+	{ "GET_NEXT", SBPM_REGS_GET_NEXT },
+	{ "GET_NEXT_RPLY", SBPM_REGS_GET_NEXT_RPLY },
+	{ "SBPM_CLK_GATE_CNTRL", SBPM_REGS_SBPM_CLK_GATE_CNTRL },
+	{ "BN_FREE_WITHOUT_CONTXT", SBPM_REGS_BN_FREE_WITHOUT_CONTXT },
+	{ "BN_FREE_WITHOUT_CONTXT_RPLY", SBPM_REGS_BN_FREE_WITHOUT_CONTXT_RPLY },
+	{ "BN_FREE_WITH_CONTXT_RPLY", SBPM_REGS_BN_FREE_WITH_CONTXT_RPLY },
+	{ "SBPM_GL_TRSH", SBPM_REGS_SBPM_GL_TRSH },
+	{ "SBPM_UG0_TRSH", SBPM_REGS_SBPM_UG0_TRSH },
+	{ "SBPM_UG1_TRSH", SBPM_REGS_SBPM_UG1_TRSH },
+	{ "SBPM_DBG", SBPM_REGS_SBPM_DBG },
+	{ "SBPM_UG0_BAC", SBPM_REGS_SBPM_UG0_BAC },
+	{ "SBPM_UG1_BAC", SBPM_REGS_SBPM_UG1_BAC },
+	{ "SBPM_GL_BAC", SBPM_REGS_SBPM_GL_BAC },
+	{ "SBPM_UG0_EXCL_HIGH_TRSH", SBPM_REGS_SBPM_UG0_EXCL_HIGH_TRSH },
+	{ "SBPM_UG1_EXCL_HIGH_TRSH", SBPM_REGS_SBPM_UG1_EXCL_HIGH_TRSH },
+	{ "SBPM_UG0_EXCL_LOW_TRSH", SBPM_REGS_SBPM_UG0_EXCL_LOW_TRSH },
+	{ "SBPM_UG1_EXCL_LOW_TRSH", SBPM_REGS_SBPM_UG1_EXCL_LOW_TRSH },
+	{ "SBPM_UG_STATUS", SBPM_REGS_SBPM_UG_STATUS },
+	{ "ERROR_HANDLING_PARAMS", SBPM_REGS_ERROR_HANDLING_PARAMS },
+	{ "SBPM_IIR_LOW", SBPM_REGS_SBPM_IIR_LOW },
+	{ "SBPM_IIR_HIGH", SBPM_REGS_SBPM_IIR_HIGH },
+	{ "SBPM_DBG_VEC0", SBPM_REGS_SBPM_DBG_VEC0 },
+	{ "SBPM_DBG_VEC1", SBPM_REGS_SBPM_DBG_VEC1 },
+	{ "SBPM_DBG_VEC2", SBPM_REGS_SBPM_DBG_VEC2 },
+	{ "SBPM_DBG_VEC3", SBPM_REGS_SBPM_DBG_VEC3 },
+	{ "SBPM_SP_BBH_LOW", SBPM_REGS_SBPM_SP_BBH_LOW },
+	{ "SBPM_SP_BBH_HIGH", SBPM_REGS_SBPM_SP_BBH_HIGH },
+	{ "SBPM_SP_RNR_LOW", SBPM_REGS_SBPM_SP_RNR_LOW },
+	{ "SBPM_SP_RNR_HIGH", SBPM_REGS_SBPM_SP_RNR_HIGH },
+	{ "SBPM_UG_MAP_LOW", SBPM_REGS_SBPM_UG_MAP_LOW },
+	{ "SBPM_UG_MAP_HIGH", SBPM_REGS_SBPM_UG_MAP_HIGH },
+	{ "SBPM_NACK_MASK_LOW", SBPM_REGS_SBPM_NACK_MASK_LOW },
+	{ "SBPM_NACK_MASK_HIGH", SBPM_REGS_SBPM_NACK_MASK_HIGH },
+	{ "SBPM_EXCL_MASK_LOW", SBPM_REGS_SBPM_EXCL_MASK_LOW },
+	{ "SBPM_EXCL_MASK_HIGH", SBPM_REGS_SBPM_EXCL_MASK_HIGH },
+	{ "SBPM_RADDR_DECODER", SBPM_REGS_SBPM_RADDR_DECODER },
+	{ "SBPM_WR_DATA", SBPM_REGS_SBPM_WR_DATA },
+	{ "SBPM_UG_BAC_MAX", SBPM_REGS_SBPM_UG_BAC_MAX },
+	{ "SBPM_SPARE", SBPM_REGS_SBPM_SPARE },
+	{ "SBPM_INTR_CTRL_ISR", SBPM_INTR_CTRL_ISR },
+	{ "SBPM_INTR_CTRL_ISM", SBPM_INTR_CTRL_ISM },
+	{ "SBPM_INTR_CTRL_IER", SBPM_INTR_CTRL_IER },
+	{ "SBPM_INTR_CTRL_ITR", SBPM_INTR_CTRL_ITR },
+};
+
+static const struct reg_desc disp_regs[] = {
+	{ "REORDER_CFG_DSPTCHR_REORDR_CFG", DSPTCHR_REORDER_CFG_DSPTCHR_REORDR_CFG },
+	{ "REORDER_CFG_VQ_EN", DSPTCHR_REORDER_CFG_VQ_EN },
+	{ "REORDER_CFG_BB_CFG", DSPTCHR_REORDER_CFG_BB_CFG },
+	{ "REORDER_CFG_CLK_GATE_CNTRL", DSPTCHR_REORDER_CFG_CLK_GATE_CNTRL },
+	{ "CONGESTION_INGRS_CONGSTN[0]", DSPTCHR_CONGESTION_INGRS_CONGSTN(0) },
+	{ "CONGESTION_INGRS_CONGSTN[1]", DSPTCHR_CONGESTION_INGRS_CONGSTN(1) },
+	{ "CONGESTION_INGRS_CONGSTN[2]", DSPTCHR_CONGESTION_INGRS_CONGSTN(2) },
+	{ "CONGESTION_INGRS_CONGSTN[3]", DSPTCHR_CONGESTION_INGRS_CONGSTN(3) },
+	{ "CONGESTION_INGRS_CONGSTN[4]", DSPTCHR_CONGESTION_INGRS_CONGSTN(4) },
+	{ "CONGESTION_INGRS_CONGSTN[5]", DSPTCHR_CONGESTION_INGRS_CONGSTN(5) },
+	{ "CONGESTION_INGRS_CONGSTN[6]", DSPTCHR_CONGESTION_INGRS_CONGSTN(6) },
+	{ "CONGESTION_INGRS_CONGSTN[7]", DSPTCHR_CONGESTION_INGRS_CONGSTN(7) },
+	{ "CONGESTION_INGRS_CONGSTN[8]", DSPTCHR_CONGESTION_INGRS_CONGSTN(8) },
+	{ "CONGESTION_INGRS_CONGSTN[9]", DSPTCHR_CONGESTION_INGRS_CONGSTN(9) },
+	{ "CONGESTION_INGRS_CONGSTN[10]", DSPTCHR_CONGESTION_INGRS_CONGSTN(10) },
+	{ "CONGESTION_INGRS_CONGSTN[11]", DSPTCHR_CONGESTION_INGRS_CONGSTN(11) },
+	{ "CONGESTION_INGRS_CONGSTN[12]", DSPTCHR_CONGESTION_INGRS_CONGSTN(12) },
+	{ "CONGESTION_INGRS_CONGSTN[13]", DSPTCHR_CONGESTION_INGRS_CONGSTN(13) },
+	{ "CONGESTION_INGRS_CONGSTN[14]", DSPTCHR_CONGESTION_INGRS_CONGSTN(14) },
+	{ "CONGESTION_INGRS_CONGSTN[15]", DSPTCHR_CONGESTION_INGRS_CONGSTN(15) },
+	{ "CONGESTION_INGRS_CONGSTN[16]", DSPTCHR_CONGESTION_INGRS_CONGSTN(16) },
+	{ "CONGESTION_INGRS_CONGSTN[17]", DSPTCHR_CONGESTION_INGRS_CONGSTN(17) },
+	{ "CONGESTION_INGRS_CONGSTN[18]", DSPTCHR_CONGESTION_INGRS_CONGSTN(18) },
+	{ "CONGESTION_INGRS_CONGSTN[19]", DSPTCHR_CONGESTION_INGRS_CONGSTN(19) },
+	{ "CONGESTION_INGRS_CONGSTN[20]", DSPTCHR_CONGESTION_INGRS_CONGSTN(20) },
+	{ "CONGESTION_INGRS_CONGSTN[21]", DSPTCHR_CONGESTION_INGRS_CONGSTN(21) },
+	{ "CONGESTION_INGRS_CONGSTN[22]", DSPTCHR_CONGESTION_INGRS_CONGSTN(22) },
+	{ "CONGESTION_INGRS_CONGSTN[23]", DSPTCHR_CONGESTION_INGRS_CONGSTN(23) },
+	{ "CONGESTION_INGRS_CONGSTN[24]", DSPTCHR_CONGESTION_INGRS_CONGSTN(24) },
+	{ "CONGESTION_INGRS_CONGSTN[25]", DSPTCHR_CONGESTION_INGRS_CONGSTN(25) },
+	{ "CONGESTION_INGRS_CONGSTN[26]", DSPTCHR_CONGESTION_INGRS_CONGSTN(26) },
+	{ "CONGESTION_INGRS_CONGSTN[27]", DSPTCHR_CONGESTION_INGRS_CONGSTN(27) },
+	{ "CONGESTION_INGRS_CONGSTN[28]", DSPTCHR_CONGESTION_INGRS_CONGSTN(28) },
+	{ "CONGESTION_INGRS_CONGSTN[29]", DSPTCHR_CONGESTION_INGRS_CONGSTN(29) },
+	{ "CONGESTION_INGRS_CONGSTN[30]", DSPTCHR_CONGESTION_INGRS_CONGSTN(30) },
+	{ "CONGESTION_INGRS_CONGSTN[31]", DSPTCHR_CONGESTION_INGRS_CONGSTN(31) },
+	{ "CONGESTION_EGRS_CONGSTN[0]", DSPTCHR_CONGESTION_EGRS_CONGSTN(0) },
+	{ "CONGESTION_EGRS_CONGSTN[1]", DSPTCHR_CONGESTION_EGRS_CONGSTN(1) },
+	{ "CONGESTION_EGRS_CONGSTN[2]", DSPTCHR_CONGESTION_EGRS_CONGSTN(2) },
+	{ "CONGESTION_EGRS_CONGSTN[3]", DSPTCHR_CONGESTION_EGRS_CONGSTN(3) },
+	{ "CONGESTION_EGRS_CONGSTN[4]", DSPTCHR_CONGESTION_EGRS_CONGSTN(4) },
+	{ "CONGESTION_EGRS_CONGSTN[5]", DSPTCHR_CONGESTION_EGRS_CONGSTN(5) },
+	{ "CONGESTION_EGRS_CONGSTN[6]", DSPTCHR_CONGESTION_EGRS_CONGSTN(6) },
+	{ "CONGESTION_EGRS_CONGSTN[7]", DSPTCHR_CONGESTION_EGRS_CONGSTN(7) },
+	{ "CONGESTION_EGRS_CONGSTN[8]", DSPTCHR_CONGESTION_EGRS_CONGSTN(8) },
+	{ "CONGESTION_EGRS_CONGSTN[9]", DSPTCHR_CONGESTION_EGRS_CONGSTN(9) },
+	{ "CONGESTION_EGRS_CONGSTN[10]", DSPTCHR_CONGESTION_EGRS_CONGSTN(10) },
+	{ "CONGESTION_EGRS_CONGSTN[11]", DSPTCHR_CONGESTION_EGRS_CONGSTN(11) },
+	{ "CONGESTION_EGRS_CONGSTN[12]", DSPTCHR_CONGESTION_EGRS_CONGSTN(12) },
+	{ "CONGESTION_EGRS_CONGSTN[13]", DSPTCHR_CONGESTION_EGRS_CONGSTN(13) },
+	{ "CONGESTION_EGRS_CONGSTN[14]", DSPTCHR_CONGESTION_EGRS_CONGSTN(14) },
+	{ "CONGESTION_EGRS_CONGSTN[15]", DSPTCHR_CONGESTION_EGRS_CONGSTN(15) },
+	{ "CONGESTION_EGRS_CONGSTN[16]", DSPTCHR_CONGESTION_EGRS_CONGSTN(16) },
+	{ "CONGESTION_EGRS_CONGSTN[17]", DSPTCHR_CONGESTION_EGRS_CONGSTN(17) },
+	{ "CONGESTION_EGRS_CONGSTN[18]", DSPTCHR_CONGESTION_EGRS_CONGSTN(18) },
+	{ "CONGESTION_EGRS_CONGSTN[19]", DSPTCHR_CONGESTION_EGRS_CONGSTN(19) },
+	{ "CONGESTION_EGRS_CONGSTN[20]", DSPTCHR_CONGESTION_EGRS_CONGSTN(20) },
+	{ "CONGESTION_EGRS_CONGSTN[21]", DSPTCHR_CONGESTION_EGRS_CONGSTN(21) },
+	{ "CONGESTION_EGRS_CONGSTN[22]", DSPTCHR_CONGESTION_EGRS_CONGSTN(22) },
+	{ "CONGESTION_EGRS_CONGSTN[23]", DSPTCHR_CONGESTION_EGRS_CONGSTN(23) },
+	{ "CONGESTION_EGRS_CONGSTN[24]", DSPTCHR_CONGESTION_EGRS_CONGSTN(24) },
+	{ "CONGESTION_EGRS_CONGSTN[25]", DSPTCHR_CONGESTION_EGRS_CONGSTN(25) },
+	{ "CONGESTION_EGRS_CONGSTN[26]", DSPTCHR_CONGESTION_EGRS_CONGSTN(26) },
+	{ "CONGESTION_EGRS_CONGSTN[27]", DSPTCHR_CONGESTION_EGRS_CONGSTN(27) },
+	{ "CONGESTION_EGRS_CONGSTN[28]", DSPTCHR_CONGESTION_EGRS_CONGSTN(28) },
+	{ "CONGESTION_EGRS_CONGSTN[29]", DSPTCHR_CONGESTION_EGRS_CONGSTN(29) },
+	{ "CONGESTION_EGRS_CONGSTN[30]", DSPTCHR_CONGESTION_EGRS_CONGSTN(30) },
+	{ "CONGESTION_EGRS_CONGSTN[31]", DSPTCHR_CONGESTION_EGRS_CONGSTN(31) },
+	{ "CONGESTION_TOTAL_EGRS_CONGSTN", DSPTCHR_CONGESTION_TOTAL_EGRS_CONGSTN },
+	{ "CONGESTION_GLBL_CONGSTN", DSPTCHR_CONGESTION_GLBL_CONGSTN },
+	{ "CONGESTION_CONGSTN_STATUS", DSPTCHR_CONGESTION_CONGSTN_STATUS },
+	{ "CONGESTION_PER_Q_INGRS_CONGSTN_LOW", DSPTCHR_CONGESTION_PER_Q_INGRS_CONGSTN_LOW },
+	{ "CONGESTION_PER_Q_INGRS_CONGSTN_HIGH", DSPTCHR_CONGESTION_PER_Q_INGRS_CONGSTN_HIGH },
+	{ "CONGESTION_PER_Q_EGRS_CONGSTN_LOW", DSPTCHR_CONGESTION_PER_Q_EGRS_CONGSTN_LOW },
+	{ "CONGESTION_PER_Q_EGRS_CONGSTN_HIGH", DSPTCHR_CONGESTION_PER_Q_EGRS_CONGSTN_HIGH },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[0]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(0) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[1]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(1) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[2]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(2) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[3]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(3) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[4]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(4) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[5]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(5) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[6]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(6) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[7]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(7) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[8]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(8) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[9]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(9) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[10]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(10) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[11]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(11) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[12]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(12) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[13]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(13) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[14]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(14) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[15]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(15) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[16]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(16) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[17]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(17) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[18]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(18) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[19]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(19) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[20]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(20) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[21]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(21) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[22]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(22) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[23]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(23) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[24]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(24) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[25]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(25) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[26]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(26) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[27]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(27) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[28]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(28) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[29]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(29) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[30]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(30) },
+	{ "INGRS_QUEUES_Q_INGRS_SIZE[31]", DSPTCHR_INGRS_QUEUES_Q_INGRS_SIZE(31) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[0]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(0) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[1]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(1) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[2]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(2) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[3]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(3) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[4]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(4) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[5]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(5) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[6]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(6) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[7]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(7) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[8]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(8) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[9]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(9) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[10]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(10) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[11]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(11) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[12]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(12) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[13]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(13) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[14]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(14) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[15]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(15) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[16]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(16) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[17]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(17) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[18]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(18) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[19]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(19) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[20]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(20) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[21]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(21) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[22]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(22) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[23]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(23) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[24]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(24) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[25]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(25) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[26]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(26) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[27]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(27) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[28]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(28) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[29]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(29) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[30]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(30) },
+	{ "INGRS_QUEUES_Q_INGRS_LIMITS[31]", DSPTCHR_INGRS_QUEUES_Q_INGRS_LIMITS(31) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[0]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(0) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[1]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(1) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[2]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(2) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[3]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(3) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[4]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(4) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[5]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(5) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[6]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(6) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[7]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(7) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[8]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(8) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[9]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(9) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[10]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(10) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[11]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(11) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[12]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(12) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[13]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(13) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[14]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(14) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[15]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(15) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[16]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(16) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[17]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(17) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[18]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(18) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[19]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(19) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[20]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(20) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[21]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(21) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[22]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(22) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[23]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(23) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[24]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(24) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[25]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(25) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[26]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(26) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[27]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(27) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[28]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(28) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[29]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(29) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[30]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(30) },
+	{ "INGRS_QUEUES_Q_INGRS_COHRENCY[31]", DSPTCHR_INGRS_QUEUES_Q_INGRS_COHRENCY(31) },
+	{ "QUEUE_MAPPING_CRDT_CFG[0]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(0) },
+	{ "QUEUE_MAPPING_CRDT_CFG[1]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(1) },
+	{ "QUEUE_MAPPING_CRDT_CFG[2]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(2) },
+	{ "QUEUE_MAPPING_CRDT_CFG[3]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(3) },
+	{ "QUEUE_MAPPING_CRDT_CFG[4]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(4) },
+	{ "QUEUE_MAPPING_CRDT_CFG[5]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(5) },
+	{ "QUEUE_MAPPING_CRDT_CFG[6]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(6) },
+	{ "QUEUE_MAPPING_CRDT_CFG[7]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(7) },
+	{ "QUEUE_MAPPING_CRDT_CFG[8]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(8) },
+	{ "QUEUE_MAPPING_CRDT_CFG[9]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(9) },
+	{ "QUEUE_MAPPING_CRDT_CFG[10]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(10) },
+	{ "QUEUE_MAPPING_CRDT_CFG[11]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(11) },
+	{ "QUEUE_MAPPING_CRDT_CFG[12]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(12) },
+	{ "QUEUE_MAPPING_CRDT_CFG[13]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(13) },
+	{ "QUEUE_MAPPING_CRDT_CFG[14]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(14) },
+	{ "QUEUE_MAPPING_CRDT_CFG[15]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(15) },
+	{ "QUEUE_MAPPING_CRDT_CFG[16]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(16) },
+	{ "QUEUE_MAPPING_CRDT_CFG[17]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(17) },
+	{ "QUEUE_MAPPING_CRDT_CFG[18]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(18) },
+	{ "QUEUE_MAPPING_CRDT_CFG[19]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(19) },
+	{ "QUEUE_MAPPING_CRDT_CFG[20]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(20) },
+	{ "QUEUE_MAPPING_CRDT_CFG[21]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(21) },
+	{ "QUEUE_MAPPING_CRDT_CFG[22]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(22) },
+	{ "QUEUE_MAPPING_CRDT_CFG[23]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(23) },
+	{ "QUEUE_MAPPING_CRDT_CFG[24]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(24) },
+	{ "QUEUE_MAPPING_CRDT_CFG[25]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(25) },
+	{ "QUEUE_MAPPING_CRDT_CFG[26]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(26) },
+	{ "QUEUE_MAPPING_CRDT_CFG[27]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(27) },
+	{ "QUEUE_MAPPING_CRDT_CFG[28]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(28) },
+	{ "QUEUE_MAPPING_CRDT_CFG[29]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(29) },
+	{ "QUEUE_MAPPING_CRDT_CFG[30]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(30) },
+	{ "QUEUE_MAPPING_CRDT_CFG[31]", DSPTCHR_QUEUE_MAPPING_CRDT_CFG(31) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[0]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(0) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[1]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(1) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[2]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(2) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[3]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(3) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[4]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(4) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[5]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(5) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[6]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(6) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[7]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(7) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[8]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(8) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[9]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(9) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[10]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(10) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[11]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(11) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[12]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(12) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[13]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(13) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[14]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(14) },
+	{ "QUEUE_MAPPING_PD_DSPTCH_ADD[15]", DSPTCHR_QUEUE_MAPPING_PD_DSPTCH_ADD(15) },
+	{ "QUEUE_MAPPING_Q_DEST", DSPTCHR_QUEUE_MAPPING_Q_DEST },
+	{ "POOL_SIZES_CMN_POOL_LMT", DSPTCHR_POOL_SIZES_CMN_POOL_LMT },
+	{ "POOL_SIZES_CMN_POOL_SIZE", DSPTCHR_POOL_SIZES_CMN_POOL_SIZE },
+	{ "POOL_SIZES_GRNTED_POOL_LMT", DSPTCHR_POOL_SIZES_GRNTED_POOL_LMT },
+	{ "POOL_SIZES_GRNTED_POOL_SIZE", DSPTCHR_POOL_SIZES_GRNTED_POOL_SIZE },
+	{ "POOL_SIZES_MULTI_CST_POOL_LMT", DSPTCHR_POOL_SIZES_MULTI_CST_POOL_LMT },
+	{ "POOL_SIZES_MULTI_CST_POOL_SIZE", DSPTCHR_POOL_SIZES_MULTI_CST_POOL_SIZE },
+	{ "POOL_SIZES_RNR_POOL_LMT", DSPTCHR_POOL_SIZES_RNR_POOL_LMT },
+	{ "POOL_SIZES_RNR_POOL_SIZE", DSPTCHR_POOL_SIZES_RNR_POOL_SIZE },
+	{ "POOL_SIZES_PRCSSING_POOL_SIZE", DSPTCHR_POOL_SIZES_PRCSSING_POOL_SIZE },
+	{ "MASK_MSK_TSK_255_0[0]", DSPTCHR_MASK_MSK_TSK_255_0(0) },
+	{ "MASK_MSK_TSK_255_0[1]", DSPTCHR_MASK_MSK_TSK_255_0(1) },
+	{ "MASK_MSK_TSK_255_0[2]", DSPTCHR_MASK_MSK_TSK_255_0(2) },
+	{ "MASK_MSK_TSK_255_0[3]", DSPTCHR_MASK_MSK_TSK_255_0(3) },
+	{ "MASK_MSK_TSK_255_0[4]", DSPTCHR_MASK_MSK_TSK_255_0(4) },
+	{ "MASK_MSK_TSK_255_0[5]", DSPTCHR_MASK_MSK_TSK_255_0(5) },
+	{ "MASK_MSK_TSK_255_0[6]", DSPTCHR_MASK_MSK_TSK_255_0(6) },
+	{ "MASK_MSK_TSK_255_0[7]", DSPTCHR_MASK_MSK_TSK_255_0(7) },
+	{ "MASK_MSK_TSK_255_0[8]", DSPTCHR_MASK_MSK_TSK_255_0(8) },
+	{ "MASK_MSK_TSK_255_0[9]", DSPTCHR_MASK_MSK_TSK_255_0(9) },
+	{ "MASK_MSK_TSK_255_0[10]", DSPTCHR_MASK_MSK_TSK_255_0(10) },
+	{ "MASK_MSK_TSK_255_0[11]", DSPTCHR_MASK_MSK_TSK_255_0(11) },
+	{ "MASK_MSK_TSK_255_0[12]", DSPTCHR_MASK_MSK_TSK_255_0(12) },
+	{ "MASK_MSK_TSK_255_0[13]", DSPTCHR_MASK_MSK_TSK_255_0(13) },
+	{ "MASK_MSK_TSK_255_0[14]", DSPTCHR_MASK_MSK_TSK_255_0(14) },
+	{ "MASK_MSK_TSK_255_0[15]", DSPTCHR_MASK_MSK_TSK_255_0(15) },
+	{ "MASK_MSK_TSK_255_0[16]", DSPTCHR_MASK_MSK_TSK_255_0(16) },
+	{ "MASK_MSK_TSK_255_0[17]", DSPTCHR_MASK_MSK_TSK_255_0(17) },
+	{ "MASK_MSK_TSK_255_0[18]", DSPTCHR_MASK_MSK_TSK_255_0(18) },
+	{ "MASK_MSK_TSK_255_0[19]", DSPTCHR_MASK_MSK_TSK_255_0(19) },
+	{ "MASK_MSK_TSK_255_0[20]", DSPTCHR_MASK_MSK_TSK_255_0(20) },
+	{ "MASK_MSK_TSK_255_0[21]", DSPTCHR_MASK_MSK_TSK_255_0(21) },
+	{ "MASK_MSK_TSK_255_0[22]", DSPTCHR_MASK_MSK_TSK_255_0(22) },
+	{ "MASK_MSK_TSK_255_0[23]", DSPTCHR_MASK_MSK_TSK_255_0(23) },
+	{ "MASK_MSK_TSK_255_0[24]", DSPTCHR_MASK_MSK_TSK_255_0(24) },
+	{ "MASK_MSK_TSK_255_0[25]", DSPTCHR_MASK_MSK_TSK_255_0(25) },
+	{ "MASK_MSK_TSK_255_0[26]", DSPTCHR_MASK_MSK_TSK_255_0(26) },
+	{ "MASK_MSK_TSK_255_0[27]", DSPTCHR_MASK_MSK_TSK_255_0(27) },
+	{ "MASK_MSK_TSK_255_0[28]", DSPTCHR_MASK_MSK_TSK_255_0(28) },
+	{ "MASK_MSK_TSK_255_0[29]", DSPTCHR_MASK_MSK_TSK_255_0(29) },
+	{ "MASK_MSK_TSK_255_0[30]", DSPTCHR_MASK_MSK_TSK_255_0(30) },
+	{ "MASK_MSK_TSK_255_0[31]", DSPTCHR_MASK_MSK_TSK_255_0(31) },
+	{ "MASK_MSK_TSK_255_0[32]", DSPTCHR_MASK_MSK_TSK_255_0(32) },
+	{ "MASK_MSK_TSK_255_0[33]", DSPTCHR_MASK_MSK_TSK_255_0(33) },
+	{ "MASK_MSK_TSK_255_0[34]", DSPTCHR_MASK_MSK_TSK_255_0(34) },
+	{ "MASK_MSK_TSK_255_0[35]", DSPTCHR_MASK_MSK_TSK_255_0(35) },
+	{ "MASK_MSK_TSK_255_0[36]", DSPTCHR_MASK_MSK_TSK_255_0(36) },
+	{ "MASK_MSK_TSK_255_0[37]", DSPTCHR_MASK_MSK_TSK_255_0(37) },
+	{ "MASK_MSK_TSK_255_0[38]", DSPTCHR_MASK_MSK_TSK_255_0(38) },
+	{ "MASK_MSK_TSK_255_0[39]", DSPTCHR_MASK_MSK_TSK_255_0(39) },
+	{ "MASK_MSK_TSK_255_0[40]", DSPTCHR_MASK_MSK_TSK_255_0(40) },
+	{ "MASK_MSK_TSK_255_0[41]", DSPTCHR_MASK_MSK_TSK_255_0(41) },
+	{ "MASK_MSK_TSK_255_0[42]", DSPTCHR_MASK_MSK_TSK_255_0(42) },
+	{ "MASK_MSK_TSK_255_0[43]", DSPTCHR_MASK_MSK_TSK_255_0(43) },
+	{ "MASK_MSK_TSK_255_0[44]", DSPTCHR_MASK_MSK_TSK_255_0(44) },
+	{ "MASK_MSK_TSK_255_0[45]", DSPTCHR_MASK_MSK_TSK_255_0(45) },
+	{ "MASK_MSK_TSK_255_0[46]", DSPTCHR_MASK_MSK_TSK_255_0(46) },
+	{ "MASK_MSK_TSK_255_0[47]", DSPTCHR_MASK_MSK_TSK_255_0(47) },
+	{ "MASK_MSK_TSK_255_0[48]", DSPTCHR_MASK_MSK_TSK_255_0(48) },
+	{ "MASK_MSK_TSK_255_0[49]", DSPTCHR_MASK_MSK_TSK_255_0(49) },
+	{ "MASK_MSK_TSK_255_0[50]", DSPTCHR_MASK_MSK_TSK_255_0(50) },
+	{ "MASK_MSK_TSK_255_0[51]", DSPTCHR_MASK_MSK_TSK_255_0(51) },
+	{ "MASK_MSK_TSK_255_0[52]", DSPTCHR_MASK_MSK_TSK_255_0(52) },
+	{ "MASK_MSK_TSK_255_0[53]", DSPTCHR_MASK_MSK_TSK_255_0(53) },
+	{ "MASK_MSK_TSK_255_0[54]", DSPTCHR_MASK_MSK_TSK_255_0(54) },
+	{ "MASK_MSK_TSK_255_0[55]", DSPTCHR_MASK_MSK_TSK_255_0(55) },
+	{ "MASK_MSK_TSK_255_0[56]", DSPTCHR_MASK_MSK_TSK_255_0(56) },
+	{ "MASK_MSK_TSK_255_0[57]", DSPTCHR_MASK_MSK_TSK_255_0(57) },
+	{ "MASK_MSK_TSK_255_0[58]", DSPTCHR_MASK_MSK_TSK_255_0(58) },
+	{ "MASK_MSK_TSK_255_0[59]", DSPTCHR_MASK_MSK_TSK_255_0(59) },
+	{ "MASK_MSK_TSK_255_0[60]", DSPTCHR_MASK_MSK_TSK_255_0(60) },
+	{ "MASK_MSK_TSK_255_0[61]", DSPTCHR_MASK_MSK_TSK_255_0(61) },
+	{ "MASK_MSK_TSK_255_0[62]", DSPTCHR_MASK_MSK_TSK_255_0(62) },
+	{ "MASK_MSK_TSK_255_0[63]", DSPTCHR_MASK_MSK_TSK_255_0(63) },
+
+	{ "MASK_MSK_Q[0]", DSPTCHR_MASK_MSK_Q(0) },
+	{ "MASK_MSK_Q[1]", DSPTCHR_MASK_MSK_Q(1) },
+	{ "MASK_MSK_Q[2]", DSPTCHR_MASK_MSK_Q(2) },
+	{ "MASK_MSK_Q[3]", DSPTCHR_MASK_MSK_Q(3) },
+	{ "MASK_MSK_Q[4]", DSPTCHR_MASK_MSK_Q(4) },
+	{ "MASK_MSK_Q[5]", DSPTCHR_MASK_MSK_Q(5) },
+	{ "MASK_MSK_Q[6]", DSPTCHR_MASK_MSK_Q(6) },
+	{ "MASK_MSK_Q[7]", DSPTCHR_MASK_MSK_Q(7) },
+
+	{ "MASK_DLY_Q", DSPTCHR_MASK_DLY_Q },
+	{ "MASK_NON_DLY_Q", DSPTCHR_MASK_NON_DLY_Q },
+	{ "EGRS_QUEUES_EGRS_DLY_QM_CRDT", DSPTCHR_EGRS_QUEUES_EGRS_DLY_QM_CRDT },
+	{ "EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT", DSPTCHR_EGRS_QUEUES_EGRS_NON_DLY_QM_CRDT },
+	{ "EGRS_QUEUES_TOTAL_Q_EGRS_SIZE", DSPTCHR_EGRS_QUEUES_TOTAL_Q_EGRS_SIZE },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[0]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(0) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[1]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(1) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[2]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(2) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[3]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(3) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[4]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(4) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[5]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(5) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[6]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(6) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[7]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(7) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[8]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(8) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[9]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(9) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[10]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(10) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[11]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(11) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[12]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(12) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[13]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(13) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[14]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(14) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[15]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(15) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[16]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(16) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[17]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(17) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[18]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(18) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[19]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(19) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[20]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(20) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[21]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(21) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[22]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(22) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[23]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(23) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[24]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(24) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[25]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(25) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[26]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(26) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[27]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(27) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[28]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(28) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[29]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(29) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[30]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(30) },
+	{ "EGRS_QUEUES_PER_Q_EGRS_SIZE[31]", DSPTCHR_EGRS_QUEUES_PER_Q_EGRS_SIZE(31) },
+
+	{ "WAKEUP_CONTROL_WKUP_REQ", DSPTCHR_WAKEUP_CONTROL_WKUP_REQ },
+	{ "WAKEUP_CONTROL_WKUP_THRSHLD", DSPTCHR_WAKEUP_CONTROL_WKUP_THRSHLD },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[0]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(0) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[1]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(1) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[2]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(2) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[3]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(3) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[4]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(4) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[5]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(5) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[6]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(6) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[7]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(7) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[8]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(8) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[9]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(9) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[10]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(10) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[11]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(11) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[12]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(12) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[13]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(13) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[14]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(14) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[15]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(15) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[16]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(16) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[17]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(17) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[18]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(18) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[19]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(19) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[20]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(20) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[21]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(21) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[22]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(22) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[23]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(23) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[24]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(24) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[25]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(25) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[26]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(26) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[27]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(27) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[28]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(28) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[29]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(29) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[30]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(30) },
+	{ "DISPTCH_SCHEDULING_DWRR_INFO[31]", DSPTCHR_DISPTCH_SCHEDULING_DWRR_INFO(31) },
+
+	{ "DISPTCH_SCHEDULING_VLD_CRDT", DSPTCHR_DISPTCH_SCHEDULING_VLD_CRDT },
+	{ "LOAD_BALANCING_LB_CFG", DSPTCHR_LOAD_BALANCING_LB_CFG },
+	{ "LOAD_BALANCING_FREE_TASK_0_1", DSPTCHR_LOAD_BALANCING_FREE_TASK_0_1 },
+	{ "LOAD_BALANCING_FREE_TASK_2_3", DSPTCHR_LOAD_BALANCING_FREE_TASK_2_3 },
+	{ "LOAD_BALANCING_FREE_TASK_4_5", DSPTCHR_LOAD_BALANCING_FREE_TASK_4_5 },
+	{ "LOAD_BALANCING_FREE_TASK_6_7", DSPTCHR_LOAD_BALANCING_FREE_TASK_6_7 },
+	{ "LOAD_BALANCING_FREE_TASK_8_9", DSPTCHR_LOAD_BALANCING_FREE_TASK_8_9 },
+	{ "LOAD_BALANCING_FREE_TASK_10_11", DSPTCHR_LOAD_BALANCING_FREE_TASK_10_11 },
+	{ "LOAD_BALANCING_FREE_TASK_12_13", DSPTCHR_LOAD_BALANCING_FREE_TASK_12_13 },
+	{ "LOAD_BALANCING_FREE_TASK_14_15", DSPTCHR_LOAD_BALANCING_FREE_TASK_14_15 },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[0]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(0) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[1]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(1) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[2]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(2) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[3]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(3) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[4]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(4) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[5]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(5) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[6]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(6) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[7]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(7) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[8]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(8) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[9]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(9) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[10]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(10) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[11]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(11) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[12]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(12) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[13]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(13) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[14]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(14) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[15]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(15) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[16]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(16) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[17]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(17) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[18]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(18) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[19]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(19) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[20]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(20) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[21]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(21) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[22]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(22) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[23]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(23) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[24]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(24) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[25]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(25) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[26]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(26) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[27]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(27) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[28]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(28) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[29]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(29) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[30]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(30) },
+	{ "LOAD_BALANCING_TSK_TO_RG_MAPPING[31]", DSPTCHR_LOAD_BALANCING_TSK_TO_RG_MAPPING(31) },
+
+	{ "LOAD_BALANCING_RG_AVLABL_TSK_0_3", DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_0_3 },
+	{ "LOAD_BALANCING_RG_AVLABL_TSK_4_7", DSPTCHR_LOAD_BALANCING_RG_AVLABL_TSK_4_7 },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISR },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISM", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ISM },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_0_IER", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_IER },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_0_ITR", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_0_ITR },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISR },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISM", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ISM },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_1_IER", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_IER },
+	{ "DSPTCHER_REORDR_TOP_INTR_CTRL_1_ITR", DSPTCHR_DSPTCHER_REORDR_TOP_INTR_CTRL_1_ITR },
+	{ "DEBUG_DBG_BYPSS_CNTRL", DSPTCHR_DEBUG_DBG_BYPSS_CNTRL },
+	{ "DEBUG_GLBL_TSK_CNT_0_7", DSPTCHR_DEBUG_GLBL_TSK_CNT_0_7 },
+	{ "DEBUG_GLBL_TSK_CNT_8_15", DSPTCHR_DEBUG_GLBL_TSK_CNT_8_15 },
+	{ "DEBUG_DBG_BUS_CNTRL", DSPTCHR_DEBUG_DBG_BUS_CNTRL },
+	{ "DEBUG_DBG_VEC_0", DSPTCHR_DEBUG_DBG_VEC_0 },
+	{ "DEBUG_DBG_VEC_1", DSPTCHR_DEBUG_DBG_VEC_1 },
+	{ "DEBUG_DBG_VEC_2", DSPTCHR_DEBUG_DBG_VEC_2 },
+	{ "DEBUG_DBG_VEC_3", DSPTCHR_DEBUG_DBG_VEC_3 },
+	{ "DEBUG_DBG_VEC_4", DSPTCHR_DEBUG_DBG_VEC_4 },
+	{ "DEBUG_DBG_VEC_5", DSPTCHR_DEBUG_DBG_VEC_5 },
+	{ "DEBUG_DBG_VEC_6", DSPTCHR_DEBUG_DBG_VEC_6 },
+	{ "DEBUG_DBG_VEC_7", DSPTCHR_DEBUG_DBG_VEC_7 },
+	{ "DEBUG_DBG_VEC_8", DSPTCHR_DEBUG_DBG_VEC_8 },
+	{ "DEBUG_DBG_VEC_9", DSPTCHR_DEBUG_DBG_VEC_9 },
+	{ "DEBUG_DBG_VEC_10", DSPTCHR_DEBUG_DBG_VEC_10 },
+	{ "DEBUG_DBG_VEC_11", DSPTCHR_DEBUG_DBG_VEC_11 },
+	{ "DEBUG_DBG_VEC_12", DSPTCHR_DEBUG_DBG_VEC_12 },
+	{ "DEBUG_DBG_VEC_13", DSPTCHR_DEBUG_DBG_VEC_13 },
+	{ "DEBUG_DBG_VEC_14", DSPTCHR_DEBUG_DBG_VEC_14 },
+	{ "DEBUG_DBG_VEC_15", DSPTCHR_DEBUG_DBG_VEC_15 },
+	{ "DEBUG_DBG_VEC_16", DSPTCHR_DEBUG_DBG_VEC_16 },
+	{ "DEBUG_DBG_VEC_17", DSPTCHR_DEBUG_DBG_VEC_17 },
+	{ "DEBUG_DBG_VEC_18", DSPTCHR_DEBUG_DBG_VEC_18 },
+	{ "DEBUG_DBG_VEC_19", DSPTCHR_DEBUG_DBG_VEC_19 },
+	{ "DEBUG_DBG_VEC_20", DSPTCHR_DEBUG_DBG_VEC_20 },
+	{ "DEBUG_DBG_VEC_21", DSPTCHR_DEBUG_DBG_VEC_21 },
+	{ "DEBUG_DBG_VEC_22", DSPTCHR_DEBUG_DBG_VEC_22 },
+	{ "DEBUG_DBG_VEC_23", DSPTCHR_DEBUG_DBG_VEC_23 },
+	{ "DEBUG_STATISTICS_DBG_STTSTCS_CTRL", DSPTCHR_DEBUG_STATISTICS_DBG_STTSTCS_CTRL },
+	{ "DEBUG_STATISTICS_DBG_CNT[0]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(0) },
+	{ "DEBUG_STATISTICS_DBG_CNT[1]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(1) },
+	{ "DEBUG_STATISTICS_DBG_CNT[2]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(2) },
+	{ "DEBUG_STATISTICS_DBG_CNT[3]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(3) },
+	{ "DEBUG_STATISTICS_DBG_CNT[4]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(4) },
+	{ "DEBUG_STATISTICS_DBG_CNT[5]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(5) },
+	{ "DEBUG_STATISTICS_DBG_CNT[6]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(6) },
+	{ "DEBUG_STATISTICS_DBG_CNT[7]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(7) },
+	{ "DEBUG_STATISTICS_DBG_CNT[8]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(8) },
+	{ "DEBUG_STATISTICS_DBG_CNT[9]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(9) },
+	{ "DEBUG_STATISTICS_DBG_CNT[10]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(10) },
+	{ "DEBUG_STATISTICS_DBG_CNT[11]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(11) },
+	{ "DEBUG_STATISTICS_DBG_CNT[12]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(12) },
+	{ "DEBUG_STATISTICS_DBG_CNT[13]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(13) },
+	{ "DEBUG_STATISTICS_DBG_CNT[14]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(14) },
+	{ "DEBUG_STATISTICS_DBG_CNT[15]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(15) },
+	{ "DEBUG_STATISTICS_DBG_CNT[16]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(16) },
+	{ "DEBUG_STATISTICS_DBG_CNT[17]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(17) },
+	{ "DEBUG_STATISTICS_DBG_CNT[18]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(18) },
+	{ "DEBUG_STATISTICS_DBG_CNT[19]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(19) },
+	{ "DEBUG_STATISTICS_DBG_CNT[20]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(20) },
+	{ "DEBUG_STATISTICS_DBG_CNT[21]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(21) },
+	{ "DEBUG_STATISTICS_DBG_CNT[22]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(22) },
+	{ "DEBUG_STATISTICS_DBG_CNT[23]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(23) },
+	{ "DEBUG_STATISTICS_DBG_CNT[24]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(24) },
+	{ "DEBUG_STATISTICS_DBG_CNT[25]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(25) },
+	{ "DEBUG_STATISTICS_DBG_CNT[26]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(26) },
+	{ "DEBUG_STATISTICS_DBG_CNT[27]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(27) },
+	{ "DEBUG_STATISTICS_DBG_CNT[28]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(28) },
+	{ "DEBUG_STATISTICS_DBG_CNT[29]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(29) },
+	{ "DEBUG_STATISTICS_DBG_CNT[30]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(30) },
+	{ "DEBUG_STATISTICS_DBG_CNT[31]", DSPTCHR_DEBUG_STATISTICS_DBG_CNT(31) },
+
+	{ "QDES_HEAD[0]", DSPTCHR_QDES_HEAD(0) },
+	{ "QDES_HEAD[1]", DSPTCHR_QDES_HEAD(1) },
+	{ "QDES_HEAD[2]", DSPTCHR_QDES_HEAD(2) },
+	{ "QDES_HEAD[3]", DSPTCHR_QDES_HEAD(3) },
+	{ "QDES_HEAD[4]", DSPTCHR_QDES_HEAD(4) },
+	{ "QDES_HEAD[5]", DSPTCHR_QDES_HEAD(5) },
+	{ "QDES_HEAD[6]", DSPTCHR_QDES_HEAD(6) },
+	{ "QDES_HEAD[7]", DSPTCHR_QDES_HEAD(7) },
+	{ "QDES_HEAD[8]", DSPTCHR_QDES_HEAD(8) },
+	{ "QDES_HEAD[9]", DSPTCHR_QDES_HEAD(9) },
+	{ "QDES_HEAD[10]", DSPTCHR_QDES_HEAD(10) },
+	{ "QDES_HEAD[11]", DSPTCHR_QDES_HEAD(11) },
+	{ "QDES_HEAD[12]", DSPTCHR_QDES_HEAD(12) },
+	{ "QDES_HEAD[13]", DSPTCHR_QDES_HEAD(13) },
+	{ "QDES_HEAD[14]", DSPTCHR_QDES_HEAD(14) },
+	{ "QDES_HEAD[15]", DSPTCHR_QDES_HEAD(15) },
+	{ "QDES_HEAD[16]", DSPTCHR_QDES_HEAD(16) },
+	{ "QDES_HEAD[17]", DSPTCHR_QDES_HEAD(17) },
+	{ "QDES_HEAD[18]", DSPTCHR_QDES_HEAD(18) },
+	{ "QDES_HEAD[19]", DSPTCHR_QDES_HEAD(19) },
+	{ "QDES_HEAD[20]", DSPTCHR_QDES_HEAD(20) },
+	{ "QDES_HEAD[21]", DSPTCHR_QDES_HEAD(21) },
+	{ "QDES_HEAD[22]", DSPTCHR_QDES_HEAD(22) },
+	{ "QDES_HEAD[23]", DSPTCHR_QDES_HEAD(23) },
+	{ "QDES_HEAD[24]", DSPTCHR_QDES_HEAD(24) },
+	{ "QDES_HEAD[25]", DSPTCHR_QDES_HEAD(25) },
+	{ "QDES_HEAD[26]", DSPTCHR_QDES_HEAD(26) },
+	{ "QDES_HEAD[27]", DSPTCHR_QDES_HEAD(27) },
+	{ "QDES_HEAD[28]", DSPTCHR_QDES_HEAD(28) },
+	{ "QDES_HEAD[29]", DSPTCHR_QDES_HEAD(29) },
+	{ "QDES_HEAD[30]", DSPTCHR_QDES_HEAD(30) },
+	{ "QDES_HEAD[31]", DSPTCHR_QDES_HEAD(31) },
+
+	{ "QDES_BFOUT[0]", DSPTCHR_QDES_BFOUT(0) },
+	{ "QDES_BFOUT[1]", DSPTCHR_QDES_BFOUT(1) },
+	{ "QDES_BFOUT[2]", DSPTCHR_QDES_BFOUT(2) },
+	{ "QDES_BFOUT[3]", DSPTCHR_QDES_BFOUT(3) },
+	{ "QDES_BFOUT[4]", DSPTCHR_QDES_BFOUT(4) },
+	{ "QDES_BFOUT[5]", DSPTCHR_QDES_BFOUT(5) },
+	{ "QDES_BFOUT[6]", DSPTCHR_QDES_BFOUT(6) },
+	{ "QDES_BFOUT[7]", DSPTCHR_QDES_BFOUT(7) },
+	{ "QDES_BFOUT[8]", DSPTCHR_QDES_BFOUT(8) },
+	{ "QDES_BFOUT[9]", DSPTCHR_QDES_BFOUT(9) },
+	{ "QDES_BFOUT[10]", DSPTCHR_QDES_BFOUT(10) },
+	{ "QDES_BFOUT[11]", DSPTCHR_QDES_BFOUT(11) },
+	{ "QDES_BFOUT[12]", DSPTCHR_QDES_BFOUT(12) },
+	{ "QDES_BFOUT[13]", DSPTCHR_QDES_BFOUT(13) },
+	{ "QDES_BFOUT[14]", DSPTCHR_QDES_BFOUT(14) },
+	{ "QDES_BFOUT[15]", DSPTCHR_QDES_BFOUT(15) },
+	{ "QDES_BFOUT[16]", DSPTCHR_QDES_BFOUT(16) },
+	{ "QDES_BFOUT[17]", DSPTCHR_QDES_BFOUT(17) },
+	{ "QDES_BFOUT[18]", DSPTCHR_QDES_BFOUT(18) },
+	{ "QDES_BFOUT[19]", DSPTCHR_QDES_BFOUT(19) },
+	{ "QDES_BFOUT[20]", DSPTCHR_QDES_BFOUT(20) },
+	{ "QDES_BFOUT[21]", DSPTCHR_QDES_BFOUT(21) },
+	{ "QDES_BFOUT[22]", DSPTCHR_QDES_BFOUT(22) },
+	{ "QDES_BFOUT[23]", DSPTCHR_QDES_BFOUT(23) },
+	{ "QDES_BFOUT[24]", DSPTCHR_QDES_BFOUT(24) },
+	{ "QDES_BFOUT[25]", DSPTCHR_QDES_BFOUT(25) },
+	{ "QDES_BFOUT[26]", DSPTCHR_QDES_BFOUT(26) },
+	{ "QDES_BFOUT[27]", DSPTCHR_QDES_BFOUT(27) },
+	{ "QDES_BFOUT[28]", DSPTCHR_QDES_BFOUT(28) },
+	{ "QDES_BFOUT[29]", DSPTCHR_QDES_BFOUT(29) },
+	{ "QDES_BFOUT[30]", DSPTCHR_QDES_BFOUT(30) },
+	{ "QDES_BFOUT[31]", DSPTCHR_QDES_BFOUT(31) },
+
+	{ "QDES_BUFIN[0]", DSPTCHR_QDES_BUFIN(0) },
+	{ "QDES_BUFIN[1]", DSPTCHR_QDES_BUFIN(1) },
+	{ "QDES_BUFIN[2]", DSPTCHR_QDES_BUFIN(2) },
+	{ "QDES_BUFIN[3]", DSPTCHR_QDES_BUFIN(3) },
+	{ "QDES_BUFIN[4]", DSPTCHR_QDES_BUFIN(4) },
+	{ "QDES_BUFIN[5]", DSPTCHR_QDES_BUFIN(5) },
+	{ "QDES_BUFIN[6]", DSPTCHR_QDES_BUFIN(6) },
+	{ "QDES_BUFIN[7]", DSPTCHR_QDES_BUFIN(7) },
+	{ "QDES_BUFIN[8]", DSPTCHR_QDES_BUFIN(8) },
+	{ "QDES_BUFIN[9]", DSPTCHR_QDES_BUFIN(9) },
+	{ "QDES_BUFIN[10]", DSPTCHR_QDES_BUFIN(10) },
+	{ "QDES_BUFIN[11]", DSPTCHR_QDES_BUFIN(11) },
+	{ "QDES_BUFIN[12]", DSPTCHR_QDES_BUFIN(12) },
+	{ "QDES_BUFIN[13]", DSPTCHR_QDES_BUFIN(13) },
+	{ "QDES_BUFIN[14]", DSPTCHR_QDES_BUFIN(14) },
+	{ "QDES_BUFIN[15]", DSPTCHR_QDES_BUFIN(15) },
+	{ "QDES_BUFIN[16]", DSPTCHR_QDES_BUFIN(16) },
+	{ "QDES_BUFIN[17]", DSPTCHR_QDES_BUFIN(17) },
+	{ "QDES_BUFIN[18]", DSPTCHR_QDES_BUFIN(18) },
+	{ "QDES_BUFIN[19]", DSPTCHR_QDES_BUFIN(19) },
+	{ "QDES_BUFIN[20]", DSPTCHR_QDES_BUFIN(20) },
+	{ "QDES_BUFIN[21]", DSPTCHR_QDES_BUFIN(21) },
+	{ "QDES_BUFIN[22]", DSPTCHR_QDES_BUFIN(22) },
+	{ "QDES_BUFIN[23]", DSPTCHR_QDES_BUFIN(23) },
+	{ "QDES_BUFIN[24]", DSPTCHR_QDES_BUFIN(24) },
+	{ "QDES_BUFIN[25]", DSPTCHR_QDES_BUFIN(25) },
+	{ "QDES_BUFIN[26]", DSPTCHR_QDES_BUFIN(26) },
+	{ "QDES_BUFIN[27]", DSPTCHR_QDES_BUFIN(27) },
+	{ "QDES_BUFIN[28]", DSPTCHR_QDES_BUFIN(28) },
+	{ "QDES_BUFIN[29]", DSPTCHR_QDES_BUFIN(29) },
+	{ "QDES_BUFIN[30]", DSPTCHR_QDES_BUFIN(30) },
+	{ "QDES_BUFIN[31]", DSPTCHR_QDES_BUFIN(31) },
+
+	{ "QDES_TAIL[0]", DSPTCHR_QDES_TAIL(0) },
+	{ "QDES_TAIL[1]", DSPTCHR_QDES_TAIL(1) },
+	{ "QDES_TAIL[2]", DSPTCHR_QDES_TAIL(2) },
+	{ "QDES_TAIL[3]", DSPTCHR_QDES_TAIL(3) },
+	{ "QDES_TAIL[4]", DSPTCHR_QDES_TAIL(4) },
+	{ "QDES_TAIL[5]", DSPTCHR_QDES_TAIL(5) },
+	{ "QDES_TAIL[6]", DSPTCHR_QDES_TAIL(6) },
+	{ "QDES_TAIL[7]", DSPTCHR_QDES_TAIL(7) },
+	{ "QDES_TAIL[8]", DSPTCHR_QDES_TAIL(8) },
+	{ "QDES_TAIL[9]", DSPTCHR_QDES_TAIL(9) },
+	{ "QDES_TAIL[10]", DSPTCHR_QDES_TAIL(10) },
+	{ "QDES_TAIL[11]", DSPTCHR_QDES_TAIL(11) },
+	{ "QDES_TAIL[12]", DSPTCHR_QDES_TAIL(12) },
+	{ "QDES_TAIL[13]", DSPTCHR_QDES_TAIL(13) },
+	{ "QDES_TAIL[14]", DSPTCHR_QDES_TAIL(14) },
+	{ "QDES_TAIL[15]", DSPTCHR_QDES_TAIL(15) },
+	{ "QDES_TAIL[16]", DSPTCHR_QDES_TAIL(16) },
+	{ "QDES_TAIL[17]", DSPTCHR_QDES_TAIL(17) },
+	{ "QDES_TAIL[18]", DSPTCHR_QDES_TAIL(18) },
+	{ "QDES_TAIL[19]", DSPTCHR_QDES_TAIL(19) },
+	{ "QDES_TAIL[20]", DSPTCHR_QDES_TAIL(20) },
+	{ "QDES_TAIL[21]", DSPTCHR_QDES_TAIL(21) },
+	{ "QDES_TAIL[22]", DSPTCHR_QDES_TAIL(22) },
+	{ "QDES_TAIL[23]", DSPTCHR_QDES_TAIL(23) },
+	{ "QDES_TAIL[24]", DSPTCHR_QDES_TAIL(24) },
+	{ "QDES_TAIL[25]", DSPTCHR_QDES_TAIL(25) },
+	{ "QDES_TAIL[26]", DSPTCHR_QDES_TAIL(26) },
+	{ "QDES_TAIL[27]", DSPTCHR_QDES_TAIL(27) },
+	{ "QDES_TAIL[28]", DSPTCHR_QDES_TAIL(28) },
+	{ "QDES_TAIL[29]", DSPTCHR_QDES_TAIL(29) },
+	{ "QDES_TAIL[30]", DSPTCHR_QDES_TAIL(30) },
+	{ "QDES_TAIL[31]", DSPTCHR_QDES_TAIL(31) },
+
+	{ "QDES_FBDNULL[0]", DSPTCHR_QDES_FBDNULL(0) },
+	{ "QDES_FBDNULL[1]", DSPTCHR_QDES_FBDNULL(1) },
+	{ "QDES_FBDNULL[2]", DSPTCHR_QDES_FBDNULL(2) },
+	{ "QDES_FBDNULL[3]", DSPTCHR_QDES_FBDNULL(3) },
+	{ "QDES_FBDNULL[4]", DSPTCHR_QDES_FBDNULL(4) },
+	{ "QDES_FBDNULL[5]", DSPTCHR_QDES_FBDNULL(5) },
+	{ "QDES_FBDNULL[6]", DSPTCHR_QDES_FBDNULL(6) },
+	{ "QDES_FBDNULL[7]", DSPTCHR_QDES_FBDNULL(7) },
+	{ "QDES_FBDNULL[8]", DSPTCHR_QDES_FBDNULL(8) },
+	{ "QDES_FBDNULL[9]", DSPTCHR_QDES_FBDNULL(9) },
+	{ "QDES_FBDNULL[10]", DSPTCHR_QDES_FBDNULL(10) },
+	{ "QDES_FBDNULL[11]", DSPTCHR_QDES_FBDNULL(11) },
+	{ "QDES_FBDNULL[12]", DSPTCHR_QDES_FBDNULL(12) },
+	{ "QDES_FBDNULL[13]", DSPTCHR_QDES_FBDNULL(13) },
+	{ "QDES_FBDNULL[14]", DSPTCHR_QDES_FBDNULL(14) },
+	{ "QDES_FBDNULL[15]", DSPTCHR_QDES_FBDNULL(15) },
+	{ "QDES_FBDNULL[16]", DSPTCHR_QDES_FBDNULL(16) },
+	{ "QDES_FBDNULL[17]", DSPTCHR_QDES_FBDNULL(17) },
+	{ "QDES_FBDNULL[18]", DSPTCHR_QDES_FBDNULL(18) },
+	{ "QDES_FBDNULL[19]", DSPTCHR_QDES_FBDNULL(19) },
+	{ "QDES_FBDNULL[20]", DSPTCHR_QDES_FBDNULL(20) },
+	{ "QDES_FBDNULL[21]", DSPTCHR_QDES_FBDNULL(21) },
+	{ "QDES_FBDNULL[22]", DSPTCHR_QDES_FBDNULL(22) },
+	{ "QDES_FBDNULL[23]", DSPTCHR_QDES_FBDNULL(23) },
+	{ "QDES_FBDNULL[24]", DSPTCHR_QDES_FBDNULL(24) },
+	{ "QDES_FBDNULL[25]", DSPTCHR_QDES_FBDNULL(25) },
+	{ "QDES_FBDNULL[26]", DSPTCHR_QDES_FBDNULL(26) },
+	{ "QDES_FBDNULL[27]", DSPTCHR_QDES_FBDNULL(27) },
+	{ "QDES_FBDNULL[28]", DSPTCHR_QDES_FBDNULL(28) },
+	{ "QDES_FBDNULL[29]", DSPTCHR_QDES_FBDNULL(29) },
+	{ "QDES_FBDNULL[30]", DSPTCHR_QDES_FBDNULL(30) },
+	{ "QDES_FBDNULL[31]", DSPTCHR_QDES_FBDNULL(31) },
+
+	{ "QDES_NULLBD[0]", DSPTCHR_QDES_NULLBD(0) },
+	{ "QDES_NULLBD[1]", DSPTCHR_QDES_NULLBD(1) },
+	{ "QDES_NULLBD[2]", DSPTCHR_QDES_NULLBD(2) },
+	{ "QDES_NULLBD[3]", DSPTCHR_QDES_NULLBD(3) },
+	{ "QDES_NULLBD[4]", DSPTCHR_QDES_NULLBD(4) },
+	{ "QDES_NULLBD[5]", DSPTCHR_QDES_NULLBD(5) },
+	{ "QDES_NULLBD[6]", DSPTCHR_QDES_NULLBD(6) },
+	{ "QDES_NULLBD[7]", DSPTCHR_QDES_NULLBD(7) },
+	{ "QDES_NULLBD[8]", DSPTCHR_QDES_NULLBD(8) },
+	{ "QDES_NULLBD[9]", DSPTCHR_QDES_NULLBD(9) },
+	{ "QDES_NULLBD[10]", DSPTCHR_QDES_NULLBD(10) },
+	{ "QDES_NULLBD[11]", DSPTCHR_QDES_NULLBD(11) },
+	{ "QDES_NULLBD[12]", DSPTCHR_QDES_NULLBD(12) },
+	{ "QDES_NULLBD[13]", DSPTCHR_QDES_NULLBD(13) },
+	{ "QDES_NULLBD[14]", DSPTCHR_QDES_NULLBD(14) },
+	{ "QDES_NULLBD[15]", DSPTCHR_QDES_NULLBD(15) },
+	{ "QDES_NULLBD[16]", DSPTCHR_QDES_NULLBD(16) },
+	{ "QDES_NULLBD[17]", DSPTCHR_QDES_NULLBD(17) },
+	{ "QDES_NULLBD[18]", DSPTCHR_QDES_NULLBD(18) },
+	{ "QDES_NULLBD[19]", DSPTCHR_QDES_NULLBD(19) },
+	{ "QDES_NULLBD[20]", DSPTCHR_QDES_NULLBD(20) },
+	{ "QDES_NULLBD[21]", DSPTCHR_QDES_NULLBD(21) },
+	{ "QDES_NULLBD[22]", DSPTCHR_QDES_NULLBD(22) },
+	{ "QDES_NULLBD[23]", DSPTCHR_QDES_NULLBD(23) },
+	{ "QDES_NULLBD[24]", DSPTCHR_QDES_NULLBD(24) },
+	{ "QDES_NULLBD[25]", DSPTCHR_QDES_NULLBD(25) },
+	{ "QDES_NULLBD[26]", DSPTCHR_QDES_NULLBD(26) },
+	{ "QDES_NULLBD[27]", DSPTCHR_QDES_NULLBD(27) },
+	{ "QDES_NULLBD[28]", DSPTCHR_QDES_NULLBD(28) },
+	{ "QDES_NULLBD[29]", DSPTCHR_QDES_NULLBD(29) },
+	{ "QDES_NULLBD[30]", DSPTCHR_QDES_NULLBD(30) },
+	{ "QDES_NULLBD[31]", DSPTCHR_QDES_NULLBD(31) },
+
+	{ "QDES_BUFAVAIL[0]", DSPTCHR_QDES_BUFAVAIL(0) },
+	{ "QDES_BUFAVAIL[1]", DSPTCHR_QDES_BUFAVAIL(1) },
+	{ "QDES_BUFAVAIL[2]", DSPTCHR_QDES_BUFAVAIL(2) },
+	{ "QDES_BUFAVAIL[3]", DSPTCHR_QDES_BUFAVAIL(3) },
+	{ "QDES_BUFAVAIL[4]", DSPTCHR_QDES_BUFAVAIL(4) },
+	{ "QDES_BUFAVAIL[5]", DSPTCHR_QDES_BUFAVAIL(5) },
+	{ "QDES_BUFAVAIL[6]", DSPTCHR_QDES_BUFAVAIL(6) },
+	{ "QDES_BUFAVAIL[7]", DSPTCHR_QDES_BUFAVAIL(7) },
+	{ "QDES_BUFAVAIL[8]", DSPTCHR_QDES_BUFAVAIL(8) },
+	{ "QDES_BUFAVAIL[9]", DSPTCHR_QDES_BUFAVAIL(9) },
+	{ "QDES_BUFAVAIL[10]", DSPTCHR_QDES_BUFAVAIL(10) },
+	{ "QDES_BUFAVAIL[11]", DSPTCHR_QDES_BUFAVAIL(11) },
+	{ "QDES_BUFAVAIL[12]", DSPTCHR_QDES_BUFAVAIL(12) },
+	{ "QDES_BUFAVAIL[13]", DSPTCHR_QDES_BUFAVAIL(13) },
+	{ "QDES_BUFAVAIL[14]", DSPTCHR_QDES_BUFAVAIL(14) },
+	{ "QDES_BUFAVAIL[15]", DSPTCHR_QDES_BUFAVAIL(15) },
+	{ "QDES_BUFAVAIL[16]", DSPTCHR_QDES_BUFAVAIL(16) },
+	{ "QDES_BUFAVAIL[17]", DSPTCHR_QDES_BUFAVAIL(17) },
+	{ "QDES_BUFAVAIL[18]", DSPTCHR_QDES_BUFAVAIL(18) },
+	{ "QDES_BUFAVAIL[19]", DSPTCHR_QDES_BUFAVAIL(19) },
+	{ "QDES_BUFAVAIL[20]", DSPTCHR_QDES_BUFAVAIL(20) },
+	{ "QDES_BUFAVAIL[21]", DSPTCHR_QDES_BUFAVAIL(21) },
+	{ "QDES_BUFAVAIL[22]", DSPTCHR_QDES_BUFAVAIL(22) },
+	{ "QDES_BUFAVAIL[23]", DSPTCHR_QDES_BUFAVAIL(23) },
+	{ "QDES_BUFAVAIL[24]", DSPTCHR_QDES_BUFAVAIL(24) },
+	{ "QDES_BUFAVAIL[25]", DSPTCHR_QDES_BUFAVAIL(25) },
+	{ "QDES_BUFAVAIL[26]", DSPTCHR_QDES_BUFAVAIL(26) },
+	{ "QDES_BUFAVAIL[27]", DSPTCHR_QDES_BUFAVAIL(27) },
+	{ "QDES_BUFAVAIL[28]", DSPTCHR_QDES_BUFAVAIL(28) },
+	{ "QDES_BUFAVAIL[29]", DSPTCHR_QDES_BUFAVAIL(29) },
+	{ "QDES_BUFAVAIL[30]", DSPTCHR_QDES_BUFAVAIL(30) },
+	{ "QDES_BUFAVAIL[31]", DSPTCHR_QDES_BUFAVAIL(31) },
+
+	{ "QDES_REG_Q_HEAD[0]", DSPTCHR_QDES_REG_Q_HEAD(0) },
+	{ "QDES_REG_Q_HEAD[1]", DSPTCHR_QDES_REG_Q_HEAD(1) },
+	{ "QDES_REG_Q_HEAD[2]", DSPTCHR_QDES_REG_Q_HEAD(2) },
+	{ "QDES_REG_Q_HEAD[3]", DSPTCHR_QDES_REG_Q_HEAD(3) },
+	{ "QDES_REG_Q_HEAD[4]", DSPTCHR_QDES_REG_Q_HEAD(4) },
+	{ "QDES_REG_Q_HEAD[5]", DSPTCHR_QDES_REG_Q_HEAD(5) },
+	{ "QDES_REG_Q_HEAD[6]", DSPTCHR_QDES_REG_Q_HEAD(6) },
+	{ "QDES_REG_Q_HEAD[7]", DSPTCHR_QDES_REG_Q_HEAD(7) },
+	{ "QDES_REG_Q_HEAD[8]", DSPTCHR_QDES_REG_Q_HEAD(8) },
+	{ "QDES_REG_Q_HEAD[9]", DSPTCHR_QDES_REG_Q_HEAD(9) },
+	{ "QDES_REG_Q_HEAD[10]", DSPTCHR_QDES_REG_Q_HEAD(10) },
+	{ "QDES_REG_Q_HEAD[11]", DSPTCHR_QDES_REG_Q_HEAD(11) },
+	{ "QDES_REG_Q_HEAD[12]", DSPTCHR_QDES_REG_Q_HEAD(12) },
+	{ "QDES_REG_Q_HEAD[13]", DSPTCHR_QDES_REG_Q_HEAD(13) },
+	{ "QDES_REG_Q_HEAD[14]", DSPTCHR_QDES_REG_Q_HEAD(14) },
+	{ "QDES_REG_Q_HEAD[15]", DSPTCHR_QDES_REG_Q_HEAD(15) },
+	{ "QDES_REG_Q_HEAD[16]", DSPTCHR_QDES_REG_Q_HEAD(16) },
+	{ "QDES_REG_Q_HEAD[17]", DSPTCHR_QDES_REG_Q_HEAD(17) },
+	{ "QDES_REG_Q_HEAD[18]", DSPTCHR_QDES_REG_Q_HEAD(18) },
+	{ "QDES_REG_Q_HEAD[19]", DSPTCHR_QDES_REG_Q_HEAD(19) },
+	{ "QDES_REG_Q_HEAD[20]", DSPTCHR_QDES_REG_Q_HEAD(20) },
+	{ "QDES_REG_Q_HEAD[21]", DSPTCHR_QDES_REG_Q_HEAD(21) },
+	{ "QDES_REG_Q_HEAD[22]", DSPTCHR_QDES_REG_Q_HEAD(22) },
+	{ "QDES_REG_Q_HEAD[23]", DSPTCHR_QDES_REG_Q_HEAD(23) },
+	{ "QDES_REG_Q_HEAD[24]", DSPTCHR_QDES_REG_Q_HEAD(24) },
+	{ "QDES_REG_Q_HEAD[25]", DSPTCHR_QDES_REG_Q_HEAD(25) },
+	{ "QDES_REG_Q_HEAD[26]", DSPTCHR_QDES_REG_Q_HEAD(26) },
+	{ "QDES_REG_Q_HEAD[27]", DSPTCHR_QDES_REG_Q_HEAD(27) },
+	{ "QDES_REG_Q_HEAD[28]", DSPTCHR_QDES_REG_Q_HEAD(28) },
+	{ "QDES_REG_Q_HEAD[29]", DSPTCHR_QDES_REG_Q_HEAD(29) },
+	{ "QDES_REG_Q_HEAD[30]", DSPTCHR_QDES_REG_Q_HEAD(30) },
+	{ "QDES_REG_Q_HEAD[31]", DSPTCHR_QDES_REG_Q_HEAD(31) },
+
+	{ "QDES_REG_VIQ_HEAD_VLD", DSPTCHR_QDES_REG_VIQ_HEAD_VLD },
+	{ "QDES_REG_VIQ_CHRNCY_VLD", DSPTCHR_QDES_REG_VIQ_CHRNCY_VLD },
+	{ "QDES_REG_VEQ_HEAD_VLD", DSPTCHR_QDES_REG_VEQ_HEAD_VLD },
+	{ "QDES_REG_QDES_BUF_AVL_CNTRL", DSPTCHR_QDES_REG_QDES_BUF_AVL_CNTRL },
+	{ "FLLDES_HEAD", DSPTCHR_FLLDES_HEAD },
+	{ "FLLDES_BFOUT", DSPTCHR_FLLDES_BFOUT },
+	{ "FLLDES_BFIN", DSPTCHR_FLLDES_BFIN },
+	{ "FLLDES_TAIL", DSPTCHR_FLLDES_TAIL },
+	{ "FLLDES_FLLDROP", DSPTCHR_FLLDES_FLLDROP },
+	{ "FLLDES_LTINT", DSPTCHR_FLLDES_LTINT },
+	{ "FLLDES_BUFAVAIL", DSPTCHR_FLLDES_BUFAVAIL },
+	{ "FLLDES_FREEMIN", DSPTCHR_FLLDES_FREEMIN },
+};
+
+static const struct reg_desc acb_regs[] = {
+	{ "CONFIG_CONF0", ACB_IF_ACBIF_BLOCK_ACBIF_CONFIG_CONF0 },
+	{ "PM_COUNTERS_CMD_TYPE[0]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE(0) },
+	{ "PM_COUNTERS_CMD_TYPE[1]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE(1) },
+	{ "PM_COUNTERS_CMD_TYPE[2]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_TYPE(2) },
+	{ "PM_COUNTERS_CMD_IMP[0]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP(0) },
+	{ "PM_COUNTERS_CMD_IMP[1]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP(1) },
+	{ "PM_COUNTERS_CMD_IMP[2]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_CMD_IMP(2) },
+	{ "PM_COUNTERS_AGG[0]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_AGG(0) },
+	{ "PM_COUNTERS_AGG[1]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_AGG(1) },
+	{ "PM_COUNTERS_BUFFS[0]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_BUFFS(0) },
+	{ "PM_COUNTERS_BUFFS[1]", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_BUFFS(1) },
+	{ "PM_COUNTERS_GEN_CFG", ACB_IF_ACBIF_BLOCK_ACBIF_PM_COUNTERS_GEN_CFG },
+};
+
+
+/*
+ * regs dump functions
+ */
+static void *regs_dump_seq_start(struct seq_file *s, loff_t *pos)
+{
+	struct reg_dump_priv *rpriv = s->private;
+	return (*pos < rpriv->regs_count) ? pos : NULL;
+}
+
+static void *regs_dump_seq_next(struct seq_file *s,
+				       void __always_unused *v,
+				       loff_t *pos)
+{
+	struct reg_dump_priv *rpriv = s->private;
+	return (++(*pos) < rpriv->regs_count) ? pos : NULL;
+}
+
+static void regs_dump_seq_stop(struct seq_file __always_unused *s,
+			       void __always_unused *v)
+{
+}
+
+static int regs_dump_seq_show(struct seq_file *s, void *v)
+{
+	struct reg_dump_priv *rpriv = s->private;
+	const struct reg_desc *rdesc;
+	int i = *(loff_t *)v;
+	u32 val;
+
+	rdesc = &rpriv->regs[i];
+	val = xrdp_read32(rpriv->priv,
+			  rpriv->area,
+			  rpriv->base_offset + rdesc->offset);
+
+	seq_printf(s, "%-40s\t0x%08x\n",
+		   rdesc->name, val);
+
+	return 0;
+}
+
+static const struct seq_operations regs_dump_seq_ops = {
+	.start = regs_dump_seq_start,
+	.next  = regs_dump_seq_next,
+	.stop  = regs_dump_seq_stop,
+	.show  = regs_dump_seq_show,
+};
+
+static int regs_dump_open(struct inode *inode, struct file *filep)
+{
+	struct reg_dump_priv *rpriv = inode->i_private;
+	int ret;
+
+	ret = seq_open(filep, &regs_dump_seq_ops);
+	if (ret)
+		return ret;
+
+	((struct seq_file *)filep->private_data)->private = rpriv;
+	return 0;
+}
+
+static const struct file_operations regs_dump_fops = {
+	.owner   = THIS_MODULE,
+	.open    = regs_dump_open,
+	.read    = seq_read,
+	.llseek  = seq_lseek,
+	.release = seq_release,
+};
+
+/*
+ *
+ */
+static struct reg_dump_priv *
+dump_ctx_create(struct bcm_xrdp_priv *priv,
+		const struct reg_desc *regs,
+		size_t regs_count,
+		enum xrdp_regs_area area,
+		unsigned int base_offset)
+{
+	struct reg_dump_priv *rpriv;
+
+	rpriv = kmalloc(sizeof (*priv), GFP_KERNEL);
+	if (!rpriv)
+		return NULL;
+	rpriv->priv = priv;
+	rpriv->regs = regs;
+	rpriv->regs_count = regs_count;
+	rpriv->area = area;
+	rpriv->base_offset = base_offset;
+	return rpriv;
+}
+
+/*
+ *
+ */
+static void dbg_create_bbh_rx_reg(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+	enum xrdp_regs_area area;
+	uint32_t offset;
+
+	get_bbh_rx_offset(id, &offset, &area);
+	rpriv = dump_ctx_create(priv, bbh_rx_regs, ARRAY_SIZE(bbh_rx_regs),
+				area, offset);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "bbh_rx_%u", id);
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_bbh_tx_reg(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+	enum xrdp_regs_area area;
+	uint32_t offset;
+
+	get_bbh_tx_offset(id, &offset, &area);
+	rpriv = dump_ctx_create(priv, bbh_tx_regs, ARRAY_SIZE(bbh_tx_regs),
+				area, offset);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "bbh_tx_%u", id);
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_runner_reg(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+	uint32_t offset;
+
+	get_runner_offset(id, &offset);
+	rpriv = dump_ctx_create(priv, runner_regs, ARRAY_SIZE(runner_regs),
+				XRDP_AREA_CORE, offset);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "runner_%u", id);
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_quad_reg(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, runner_quad_regs,
+				ARRAY_SIZE(runner_quad_regs),
+				XRDP_AREA_CORE, RNR_QUAD_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "runner_quad_%u", id);
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_sdma_reg(struct bcm_xrdp_priv *priv, unsigned int id)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, dma_regs,
+				ARRAY_SIZE(dma_regs),
+				XRDP_AREA_CORE, DMA_OFFSET(1 + id));
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "sdma_%u", id);
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_dma_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, dma_regs,
+				ARRAY_SIZE(dma_regs),
+				XRDP_AREA_CORE, DMA_OFFSET(0));
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "dma");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_disp_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, disp_regs,
+				ARRAY_SIZE(disp_regs),
+				XRDP_AREA_CORE, DSPTCHR_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "disp");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_sbpm_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, sbpm_regs,
+				ARRAY_SIZE(sbpm_regs),
+				XRDP_AREA_CORE, SBPM_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "sbpm");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_ubus_master_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, ubus_master_regs,
+				ARRAY_SIZE(ubus_master_regs),
+				XRDP_AREA_CORE, UBUS_MSTR_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "ubus_master");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_ubus_slave_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, ubus_slave_regs,
+				ARRAY_SIZE(ubus_slave_regs),
+				XRDP_AREA_CORE, UBUS_SLV_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "ubus_slave");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+static void dbg_create_acb_reg(struct bcm_xrdp_priv *priv)
+{
+	struct reg_dump_priv *rpriv;
+	char name[32];
+
+	rpriv = dump_ctx_create(priv, acb_regs,
+				ARRAY_SIZE(acb_regs),
+				XRDP_AREA_CORE, ACB_IF_OFFSET_0);
+	if (!rpriv)
+		return;
+	snprintf(name, sizeof(name), "acb");
+	debugfs_create_file(name, 0400, dbg_regs, rpriv, &regs_dump_fops);
+}
+
+/*
+ *
+ */
+void bcm_xrdp_dbg_init(struct bcm_xrdp_priv *priv)
+{
+	size_t i;
+
+	dbg_root = debugfs_create_dir("bcm63xx_xrdp", NULL);
+	if (!dbg_root)
+		return;
+
+	dbg_regs = debugfs_create_dir("regs", dbg_root);
+	if (!dbg_regs)
+		return;
+
+	for (i = 0; i < 7; i++)
+		dbg_create_bbh_rx_reg(priv, i);
+	for (i = 0; i < 7; i++) {
+		if (i == 1 || i == 2)
+			continue;
+		dbg_create_bbh_tx_reg(priv, i);
+	}
+	for (i = 0; i < 6; i++)
+		dbg_create_runner_reg(priv, i);
+	for (i = 0; i < 1; i++)
+		dbg_create_quad_reg(priv, i);
+	for (i = 0; i < 2; i++)
+		dbg_create_sdma_reg(priv, i);
+	dbg_create_dma_reg(priv);
+	dbg_create_disp_reg(priv);
+	dbg_create_sbpm_reg(priv);
+	dbg_create_ubus_master_reg(priv);
+	dbg_create_ubus_slave_reg(priv);
+	dbg_create_acb_reg(priv);
+}
+
+/*
+ *
+ */
+void bcm_xrdp_dbg_exit(void)
+{
+	if (dbg_root)
+		debugfs_remove_recursive(dbg_root);
+	dbg_root = NULL;
+	dbg_regs = NULL;
+}
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_defs.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_defs.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_defs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_defs.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,44 @@
+#ifndef XRDP_DEFS_H_
+#define XRDP_DEFS_H_
+
+/*
+ * generic XRDP definition
+ */
+#define RDP_SBPM_BUF_SIZE		128
+#define RDP_DISP_RNR_GRP_COUNT		8
+#define RDP_DISP_VIQ_COUNT		32
+#define RDP_RUNNER_THREAD_COUNT		16
+
+/*
+ * chip specific definition (63158)
+ */
+#define RDP_RUNNER_QUAD_COUNT		1
+#define RDP_RUNNER_COUNT		6
+#define RDP_ALL_RUNNER_CORE_MASK	((1 << RDP_RUNNER_COUNT) - 1)
+#define RDP_BBH_COUNT			7
+#define RDP_DMA_MODULE_COUNT		3
+#define RDP_DMA_CHUNK_RX_COUNT		48
+#define RDP_DMA_CHUNK_TX_COUNT		64
+#define RDP_PERIPHS_PER_DMA		8
+#define RDP_DIS_REOR_FLL_BUF_COUNT	512
+#define RDP_RUNNER_FREQ			0x2bb
+
+#define RDP_MACTYPE_EMAC		0
+#define RDP_MACTYPE_GPON		1
+#define RDP_MACTYPE_EPON		3
+#define RDP_MACTYPE_XEPON		4
+#define RDP_MACTYPE_DSL			5
+#define RDP_MACTYPE_AE10G		6
+#define RDP_MACTYPE_AE25P		7
+
+enum {
+	RDP_BBH_IDX_UNIMAC0		= 0,
+	RDP_BBH_IDX_UNIMAC1		= 1,
+	RDP_BBH_IDX_UNIMAC2		= 2,
+	RDP_BBH_IDX_PON			= 3,
+	RDP_BBH_IDX_AE10		= 4,
+	RDP_BBH_IDX_AE25		= 5,
+	RDP_BBH_IDX_DSL			= 6,
+};
+
+#endif /* XRDP_DEFS_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_priv.h linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_priv.h
--- linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx./xrdp/xrdp_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/soc/bcm/bcm63xx/xrdp/xrdp_priv.h	2025-09-25 17:40:35.075364786 +0200
@@ -0,0 +1,452 @@
+#ifndef BCM63XX_RDP_PRIV_H_
+#define BCM63XX_RDP_PRIV_H_
+
+#include <linux/kernel.h>
+#include <linux/reset.h>
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+
+#include "regs/xrdp_regs.h"
+#include "xrdp_defs.h"
+
+/*
+ * tunables
+ */
+#define UNIFIED_BBH_PD_FIFO_SIZE	7
+#define AE_BBH_PD_FIFO_SIZE		31
+#define PON_BBH_PD_FIFO_SIZE		127
+#define DSL_BBH_PD_FIFO_SIZE		15
+
+/*
+ * firmware queue to task mapping for enet
+ *  2 hardware rx queues per core
+ *  3 hardware tx queues per core
+ */
+#define ENET_FW_RX_FQM_TASK_ID(x)		(1 + 4 * (x))
+#define ENET_FW_RX_XF_QUEUEx_TASK_ID(x,xf_id)	(ENET_FW_RX_FQM_TASK_ID(x) + 1 + xf_id)
+#define ENET_FW_TX_QUEUEx_TASK_ID(x)		(13 + (x))
+
+#define ENET_FW_RX_REGS_SRAM_OFF(q)		(0x100 + (q) * 0x200)
+#define ENET_FW_TX_REGS_SRAM_OFF(q)		(0x500 + (q) * 0x200)
+
+/*
+ * firmware queue to task mapping for dsl
+ *  1 hardware rx queues per core
+ *  8 hardware tx queues per core
+ */
+#define DSL_FW_RX_QUEUEx_TASK_ID(x)		(1 + (x))
+#define DSL_FW_TX_QUEUEx_TASK_ID(x)		(2 + (x) * 2)
+
+#define DSL_FW_RX_REGS_SRAM_OFF			(0x100)
+#define DSL_FW_TX_REGS_SRAM_OFF			(0x200)
+
+/*
+ * bbh-to-runner core assignment (for ethernet firmware)
+ *
+ * NOTE: you may need to adapt the bbh_configs[] array if you change this
+ */
+#define UNIMAC0_BBH_RX_CORE		1
+#define UNIMAC0_BBH_RX_QUEUE		0
+
+#define UNIMAC1_BBH_RX_CORE		2
+#define UNIMAC1_BBH_RX_QUEUE		0
+
+#define UNIMAC2_BBH_RX_CORE		3
+#define UNIMAC2_BBH_RX_QUEUE		0
+
+#define PON_BBH_RX_CORE			4
+#define PON_BBH_RX_QUEUE		0
+
+#define AE10_BBH_RX_CORE		4
+#define AE10_BBH_RX_QUEUE		1
+
+
+#define UNIMACx_BBH_TX_CORE		0
+#define UNIMAC0_BBH_TX_QUEUE		0
+#define UNIMAC1_BBH_TX_QUEUE		1
+#define UNIMAC2_BBH_TX_QUEUE		2
+
+#define PON_BBH_TX_CORE			1
+#define PON_BBH_TX_QUEUE		0
+
+#define AE10_BBH_TX_CORE		1
+#define AE10_BBH_TX_QUEUE		1
+
+/*
+ * dsl core left alone, different firmware
+ *
+ * NOTE: you may need to adapt the bbh_configs[] array if you change this
+ */
+#define DSL_BBH_RX_CORE			5
+#define DSL_BBH_RX_QUEUE		0
+
+#define DSL_BBH_TX_CORE			5
+
+
+struct user_dma {
+	u32			id;
+	u8			*buf;
+	dma_addr_t		dma_addr;
+	u32			size;
+	struct list_head	next;
+};
+
+struct rmem_priv {
+	void			*ptr;
+	dma_addr_t		dma_addr;
+	size_t			size;
+};
+
+struct bbh_dma_params {
+	u32			rx_offset;
+	u32			tx_offset;
+};
+
+struct bbh_params {
+	/* 0: DMA, 1: SDMA (0 or 1, depending on which one is assigned)  */
+	struct bbh_dma_params	dma_params[2];
+};
+
+struct dma_params {
+	const struct bbh_config	*assigned_bbh_cfg[RDP_PERIPHS_PER_DMA];
+	const struct bbh_dma_config	*assigned_bbh_dma_cfg[RDP_PERIPHS_PER_DMA];
+	u32			assigned_bbh_count;
+	u32			total_rx_chunks;
+	u32			total_tx_chunks;
+};
+
+struct bcm_xrdp_priv {
+	/* platform device reference */
+	struct platform_device	*pdev;
+
+	void __iomem		*regs[2];
+	resource_size_t		regs_phys[2];
+	u32			regs_size[2];
+
+	int			irq_fpm;
+	int			irq_hash;
+	int			irq_qm;
+	int			irq_dsptchr;
+	int			irq_sbpm;
+	int			irq_runner[RDP_RUNNER_COUNT];
+	int			irq_queue[32];
+	spinlock_t		irq_lock;
+
+	struct bbh_params	bbh_params[RDP_BBH_COUNT];
+	struct dma_params	dma_params[RDP_DMA_MODULE_COUNT];
+
+	struct rmem_priv	rmem_tm;
+	struct reset_control	*rdp_rst;
+	struct ubus4_master	*ubus_masters[6];
+
+	/* for debug */
+	struct list_head		user_dma_list;
+	u32				user_dma_last_id;
+};
+
+int bcm_xrdp_init(struct bcm_xrdp_priv *priv);
+
+#ifdef CONFIG_SOC_BCM63XX_XRDP_IOCTL
+int bcm_xrdp_ioctl_register(struct bcm_xrdp_priv *priv);
+void bcm_xrdp_ioctl_unregister(struct bcm_xrdp_priv *priv);
+#else
+static inline int bcm_xrdp_ioctl_register(struct bcm_xrdp_priv *priv) { return 0; }
+static inline void bcm_xrdp_ioctl_unregister(struct bcm_xrdp_priv *priv) {}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void bcm_xrdp_dbg_init(struct bcm_xrdp_priv *priv);
+void bcm_xrdp_dbg_exit(void);
+#else
+static inline void bcm_xrdp_dbg_init(struct bcm_xrdp_priv *priv) {}
+static inline void bcm_xrdp_dbg_exit(void) {}
+#endif
+
+/*
+ * io helpers
+ */
+static inline u32 xrdp_read32(struct bcm_xrdp_priv *priv,
+			      enum xrdp_regs_area area,
+			      u32 offset)
+{
+	return ioread32(priv->regs[area] + offset);
+}
+
+static inline void xrdp_write32(struct bcm_xrdp_priv *priv,
+				enum xrdp_regs_area area,
+				u32 offset, u32 val)
+{
+	return iowrite32(val, priv->regs[area] + offset);
+}
+
+static inline void xrdp_write32be(struct bcm_xrdp_priv *priv,
+				  enum xrdp_regs_area area,
+				  u32 offset, u32 val)
+{
+	return iowrite32be(val, priv->regs[area] + offset);
+}
+
+static inline void xrdp_memset32(struct bcm_xrdp_priv *priv,
+				 enum xrdp_regs_area area,
+				 u32 offset, u32 val32,
+				 unsigned int size)
+{
+        unsigned int i;
+
+        for (i = 0; i < size; i += 4)
+                xrdp_write32(priv, area, offset + i, val32);
+}
+
+static inline void xrdp_memset32be(struct bcm_xrdp_priv *priv,
+				   enum xrdp_regs_area area,
+				   u32 offset, u32 val32,
+				   unsigned int size)
+{
+        unsigned int i;
+
+        for (i = 0; i < size; i += 4)
+                xrdp_write32be(priv, area, offset + i, val32);
+}
+
+static inline void get_runner_offset(unsigned int runner_id,
+				     u32 *offset)
+{
+	switch (runner_id) {
+	case 0:
+                *offset = RNR_REGS_OFFSET_0;
+		break;
+	case 1:
+		*offset = RNR_REGS_OFFSET_1;
+		break;
+	case 2:
+		*offset = RNR_REGS_OFFSET_2;
+		break;
+	case 3:
+		*offset = RNR_REGS_OFFSET_3;
+		break;
+	case 4:
+		*offset = RNR_REGS_OFFSET_4;
+		break;
+	case 5:
+		*offset = RNR_REGS_OFFSET_5;
+		break;
+	default:
+		BUG();
+		break;
+        };
+}
+
+static inline u32 runner_read(struct bcm_xrdp_priv *priv,
+			      unsigned int id, u32 reg)
+{
+	u32 offset;
+	get_runner_offset(id, &offset);
+	return xrdp_read32(priv, XRDP_AREA_CORE, offset + reg);
+}
+
+static inline void runner_write(struct bcm_xrdp_priv *priv,
+				unsigned int id, uint32_t reg, u32 val)
+{
+	u32 offset;
+	get_runner_offset(id, &offset);
+	xrdp_write32(priv, XRDP_AREA_CORE, offset + reg, val);
+}
+
+static inline u32 runner_quad_read(struct bcm_xrdp_priv *priv,
+				   unsigned int id, u32 reg)
+{
+	BUG_ON(id >= RDP_RUNNER_QUAD_COUNT);
+	return xrdp_read32(priv, XRDP_AREA_CORE, RNR_QUAD_OFFSET_0 + reg);
+}
+
+static inline void runner_quad_write(struct bcm_xrdp_priv *priv,
+				     unsigned int id, uint32_t reg, u32 val)
+{
+	BUG_ON(id >= RDP_RUNNER_QUAD_COUNT);
+	xrdp_write32(priv, XRDP_AREA_CORE, RNR_QUAD_OFFSET_0 + reg, val);
+}
+
+static inline void runner_wakeup(struct bcm_xrdp_priv *priv,
+				 unsigned int core_id,
+				 unsigned int thread)
+
+{
+	runner_write(priv, core_id, RNR_REGS_CFG_CPU_WAKEUP,
+		     thread << CFG_CPU_WAKEUP_THREAD_NUM_SHIFT);
+}
+
+static inline u32 ubus_master_readl(struct bcm_xrdp_priv *priv,
+				    u32 offset)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, UBUS_MSTR_OFFSET_0 + offset);
+}
+
+static inline void ubus_master_writel(struct bcm_xrdp_priv *priv,
+				     u32 offset, u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE,
+		     UBUS_MSTR_OFFSET_0 + offset, val);
+}
+
+static inline u32 ubus_slave_readl(struct bcm_xrdp_priv *priv,
+				   u32 offset)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, UBUS_SLV_OFFSET_0 + offset);
+}
+
+static inline void ubus_slave_writel(struct bcm_xrdp_priv *priv,
+				     u32 offset, u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE,
+		     UBUS_SLV_OFFSET_0 + offset, val);
+}
+
+static inline u32 sbpm_reg_read(struct bcm_xrdp_priv *priv, u32 reg)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, SBPM_OFFSET_0 + reg);
+}
+
+static inline void sbpm_reg_write(struct bcm_xrdp_priv *priv, u32 reg,
+				  u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE, SBPM_OFFSET_0 + reg, val);
+}
+
+static inline u32 dma_reg_read(struct bcm_xrdp_priv *priv,
+			       u32 id, u32 reg)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, DMA_OFFSET(id) + reg);
+}
+
+static inline void dma_reg_write(struct bcm_xrdp_priv *priv, u32 id,
+				 u32 reg, u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE, DMA_OFFSET(id) + reg, val);
+}
+
+static inline void get_bbh_rx_offset(unsigned int bbh_rx_id,
+				     uint32_t *offset,
+				     enum xrdp_regs_area *area)
+{
+	switch (bbh_rx_id) {
+	case 0:
+                *offset = BBH_RX_OFFSET_0;
+		*area = XRDP_AREA_CORE;
+		break;
+	case 1:
+		*offset = BBH_RX_OFFSET_1;
+		*area = XRDP_AREA_CORE;
+		break;
+	case 2:
+		*offset = BBH_RX_OFFSET_2;
+		*area = XRDP_AREA_CORE;
+		break;
+	case 3:
+		*offset = BBH_RX_OFFSET_3;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 4:
+		*offset = BBH_RX_OFFSET_4;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 5:
+		*offset = BBH_RX_OFFSET_5;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 6:
+		*offset = BBH_RX_OFFSET_6;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	default:
+		BUG();
+		break;
+        };
+}
+
+static inline u32 bbh_rx_read(struct bcm_xrdp_priv *priv, u32 id, u32 reg)
+{
+	u32 offset, area;
+	get_bbh_rx_offset(id, &offset, &area);
+	return xrdp_read32(priv, area, offset + reg);
+}
+
+static inline void bbh_rx_write(struct bcm_xrdp_priv *priv, u32 id,
+				u32 reg, u32 val)
+{
+	u32 offset, area;
+	get_bbh_rx_offset(id, &offset, &area);
+	xrdp_write32(priv, area, offset + reg, val);
+}
+
+static inline void get_bbh_tx_offset(unsigned int bbh_tx_id,
+				     uint32_t *offset,
+				     enum xrdp_regs_area *area)
+{
+	switch (bbh_tx_id) {
+	case 0:
+                *offset = BBH_TX_OFFSET_0;
+		*area = XRDP_AREA_CORE;
+		break;
+	case 3:
+		*offset = BBH_TX_OFFSET_1;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 4:
+		*offset = BBH_TX_OFFSET_2;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 5:
+		*offset = BBH_TX_OFFSET_3;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	case 6:
+		*offset = BBH_TX_OFFSET_4;
+		*area = XRDP_AREA_WAN_TOP;
+		break;
+	default:
+		BUG();
+		break;
+        };
+}
+
+static inline u32 bbh_tx_read(struct bcm_xrdp_priv *priv, u32 id, u32 reg)
+{
+	u32 offset, area;
+	get_bbh_tx_offset(id, &offset, &area);
+	return xrdp_read32(priv, area, offset + reg);
+}
+
+static inline void bbh_tx_write(struct bcm_xrdp_priv *priv, u32 id,
+				u32 reg, u32 val)
+{
+	u32 offset, area;
+	get_bbh_tx_offset(id, &offset, &area);
+	xrdp_write32(priv, area, offset + reg, val);
+}
+
+static inline u32 disp_read(struct bcm_xrdp_priv *priv, u32 reg)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, DSPTCHR_OFFSET_0 + reg);
+}
+
+static inline void disp_write(struct bcm_xrdp_priv *priv,  u32 reg, u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE, DSPTCHR_OFFSET_0 + reg, val);
+}
+
+static inline u32 qm_read(struct bcm_xrdp_priv *priv, u32 reg)
+{
+	return xrdp_read32(priv, XRDP_AREA_CORE, QM_OFFSET_0 + reg);
+}
+
+static inline void qm_write(struct bcm_xrdp_priv *priv,  u32 reg, u32 val)
+{
+	xrdp_write32(priv, XRDP_AREA_CORE, QM_OFFSET_0 + reg, val);
+}
+
+#endif /* BCM63XX_XRDP_PRIV_H_ */
diff -Nruw linux-6.13.12-fbx/drivers/thermal/cortina./Kconfig linux-6.13.12-fbx/drivers/thermal/cortina/Kconfig
--- linux-6.13.12-fbx/drivers/thermal/cortina./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/thermal/cortina/Kconfig	2025-09-25 17:40:35.223365520 +0200
@@ -0,0 +1,3 @@
+config CA82XX_THERMAL
+	tristate "cortina 82xx thermal driver"
+	depends on ARCH_CORTINA || MACH_CORTINA_SATURN || COMPILE_TEST
diff -Nruw linux-6.13.12-fbx/drivers/thermal/cortina./Makefile linux-6.13.12-fbx/drivers/thermal/cortina/Makefile
--- linux-6.13.12-fbx/drivers/thermal/cortina./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/drivers/thermal/cortina/Makefile	2025-09-25 17:40:35.223365520 +0200
@@ -0,0 +1 @@
+obj-$(CONFIG_CA82XX_THERMAL)	+= ca82xx_thermal.o
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/usb/host/usb-bcm63158.c	2025-09-25 17:40:35.355366175 +0200
@@ -0,0 +1,698 @@
+/*
+ * usb-bcm63158.c for usb-bcm63158
+ * Created by <nschichan@freebox.fr> on Tue Jul  9 14:44:06 2019
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/ubus4.h>
+#include <linux/io.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+
+/*
+ * USB control register space defines
+ */
+#define USB_CTRL_SETUP				0x0
+#define  USB_CTRL_SETUP_IOC			(1 << 4)
+#define  USB_CTRL_SETUP_IPP			(1 << 5)
+#define  USB_CTRL_SETUP_STRAP_IPP_SEL		(1 << 25)
+
+#define USB_CTRL_BRIDGE_CTRL			0x0c
+#define  USB_CTRL_BRIDGE_CTRL_SWAPMASK		0xf
+#define  USB_CTRL_BRIDGE_CTRL_NOSWAP		0x0
+#define  USB_CTRL_BRIDGE_CTRL_SWAP_D_AND_C	0x5
+#define  USB_CTRL_BRIDGE_CTRL_SWAP_D		0xa
+#define  USB_CTRL_BRIDGE_CTRL_SWAP_C		0xf
+
+#define USB_CTRL_PHY_IO_CMD			0x14
+#define  USB_CTRL_PHY_IO_CMD_READ		(1 << 24)
+#define  USB_CTRL_PHY_IO_CMD_WRITE		(1 << 25)
+
+#define USB_CTRL_PHY_IO_DATA			0x18
+
+
+#define USB_CTRL_PM				0x34
+#define  USB_CTRL_PM_PHY_PDWN			(1 << 31)
+#define  USB_CTRL_PM_XHCI_RSTB			(1 << 22)
+
+#define USB_CTRL_USB30_CTL1			0x60
+#define  USB_CTRL_USB30_CTL1_PLL_SEQ_START	(1 << 4)
+
+struct bcm63158_usb_priv {
+	void __iomem *usb_control_regs;
+	struct resource *usb_control_res;
+	void __iomem *xhci_regs;
+	struct resource *xhci_res;
+	struct reset_control *reset;
+	struct ubus4_master *ubus;
+	struct device *dev;
+	struct platform_device *pdev;
+
+	struct platform_device *hcd_devices[5];
+	size_t nr_hcd_devices;
+};
+
+/*
+ * USB control register space access helpers.
+ */
+static inline u32 usb_control_read(struct bcm63158_usb_priv *priv, u32 off)
+{
+	u32 val = readl(priv->usb_control_regs + off);
+
+	dev_dbg(priv->dev, "usb_control_read: %08x at offset %02x\n",
+		 val, off);
+	return val;
+}
+
+static inline void usb_control_write(u32 val, struct bcm63158_usb_priv *priv,
+				     u32 off)
+{
+	dev_dbg(priv->dev, "usb_control_write: %08x at offset %02x\n",
+		 val, off);
+	writel(val, priv->usb_control_regs + off);
+}
+
+/*
+ * USB XHCI register space access helpers.
+ */
+static inline u32 xhci_read(struct bcm63158_usb_priv *priv, u32 off)
+{
+	u32 val = readl(priv->xhci_regs + off);
+
+	dev_dbg(priv->dev, "xhci_read: %08x at offset %02x\n",
+		 val, off);
+	return val;
+}
+
+static inline void xhci_write(u32 val, struct bcm63158_usb_priv *priv,
+				     u32 off)
+{
+	dev_dbg(priv->dev, "xhci_write: %08x at offset %02x\n",
+		 val, off);
+	writel(val, priv->xhci_regs + off);
+}
+
+static int bcm63158_usb_iomap_resource(struct platform_device *pdev,
+				       const char *resname, void **regs,
+				       struct resource **res)
+{
+	struct resource *resource;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM, resname);
+	if (!resource) {
+		dev_err(&pdev->dev, "unable to get %s registers.\n", resname);
+		return -ENXIO;
+	}
+	*res = resource;
+
+	if (!devm_request_mem_region(&pdev->dev, resource->start,
+				     resource_size(resource),
+				     dev_name(&pdev->dev)))
+		return -EBUSY;
+
+
+	*regs = devm_ioremap(&pdev->dev, resource->start,
+			     resource_size(resource));
+	if (IS_ERR(*regs)) {
+		dev_err(&pdev->dev, "unable to ioremap %s registers: %ld.\n",
+			resname, PTR_ERR(*regs));
+		return PTR_ERR(*regs);
+	}
+	dev_dbg(&pdev->dev, "%s: %pR\n", resname, resource);
+	return 0;
+}
+
+/*
+ * UBUS configuration
+ */
+static int bcm63158_usb_ubus_config(struct bcm63158_usb_priv *priv)
+{
+	ubus_master_apply_credits(priv->ubus);
+	ubus_master_set_congestion_threshold(priv->ubus, 0);
+	ubus_master_remap_port(priv->ubus);
+	return 0;
+}
+
+/*
+ * USB2/3 phy access helpers
+ */
+#define MDIO_USB3	(1 << 31)
+#define MDIO_USB2	0x0
+
+static void __usb_phy_write(struct bcm63158_usb_priv *priv, int address, u16 value,
+			    u32 mode)
+{
+	u32 reg;
+
+	address &= 0xff;
+	value &= 0xffff;
+
+	reg = (address << 16) | value | mode;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+
+	reg |= USB_CTRL_PHY_IO_CMD_WRITE;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+	mdelay(1);
+
+	reg &= ~USB_CTRL_PHY_IO_CMD_WRITE;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+	mdelay(1);
+}
+
+static u16 __usb_phy_read(struct bcm63158_usb_priv *priv, int address, u32 mode)
+{
+	u32 reg;
+
+	address &= 0xff;
+
+	reg = (address << 16) | mode;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+
+	reg |= USB_CTRL_PHY_IO_CMD_READ;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+	mdelay(1);
+
+#if 0
+	reg &= ~USB_CTRL_PHY_IO_CMD_READ;
+	usb_control_write(reg, priv, USB_CTRL_PHY_IO_CMD);
+#endif
+	mdelay(1);
+
+	return usb_control_read(priv, USB_CTRL_PHY_IO_DATA);
+}
+
+/*
+ * usb3 phy helpers
+ */
+static void usb3_phy_write(struct bcm63158_usb_priv *priv, int address, u16 value)
+{
+	dev_dbg(priv->dev, "usb3_phy_write: %04x at %02x\n", value, address);
+	__usb_phy_write(priv, address, value, MDIO_USB3);
+}
+
+static u16 usb3_phy_read(struct bcm63158_usb_priv *priv, int address)
+{
+	u32 ret = __usb_phy_read(priv, address, MDIO_USB3);
+
+	dev_dbg(priv->dev, "usb3_phy_read: %04x at %02x\n", ret, address);
+	return ret;
+}
+
+static void usb3_phy_write_page(struct bcm63158_usb_priv *priv, int page, int address,
+				u16 value)
+{
+	dev_dbg(priv->dev, "usb3_phy_write_page: %04x at %04x.%02x\n",
+		 value, page, address);
+	usb3_phy_write(priv, 0x1f, page);
+	usb3_phy_write(priv, address, value);
+}
+
+static u16 usb3_phy_read_page(struct bcm63158_usb_priv *priv, int page, int address)
+{
+	u32 ret;
+
+	usb3_phy_write(priv, 0x1f, page);
+	ret = usb3_phy_read(priv, address);
+	dev_dbg(priv->dev, "usb3_phy_read_page: %04x at %04x.%02x\n",
+		 ret, page, address);
+	return ret;
+}
+
+/*
+ * usb2 phy helpers
+ */
+static void usb2_phy_write(struct bcm63158_usb_priv *priv, int address, u16 value)
+{
+	dev_dbg(priv->dev, "usb2_phy_write: %04x at %02x\n", value, address);
+	__usb_phy_write(priv, address, value, MDIO_USB2);
+}
+
+static u16 usb2_phy_read(struct bcm63158_usb_priv *priv, int address)
+{
+	u32 ret = __usb_phy_read(priv, address, MDIO_USB2);
+
+	dev_dbg(priv->dev, "usb2_phy_read: %04x at %02x\n", ret, address);
+	return ret;
+}
+
+static void usb2_phy_write_page(struct bcm63158_usb_priv *priv, int page, int address,
+				u16 value)
+{
+	dev_dbg(priv->dev, "usb2_phy_write_page: %04x at %04x.%02x\n",
+		 value, page, address);
+	usb2_phy_write(priv, 0x1f, page);
+	usb2_phy_write(priv, address, value);
+}
+
+static __maybe_unused u16 usb2_phy_read_page(struct bcm63158_usb_priv *priv, int page, int address)
+{
+	u32 ret;
+
+	usb2_phy_write(priv, 0x1f, page);
+	ret = usb2_phy_read(priv, address);
+	dev_dbg(priv->dev, "usb2_phy_read_page: %04x at %04x.%02x\n",
+		 ret, page, address);
+	return ret;
+}
+
+/*
+ * power enable and power fault polarity management.
+ */
+static void bcm63158_usb_set_pwren_polarity(struct bcm63158_usb_priv *priv)
+{
+	bool pwren_low = of_property_read_bool(priv->dev->of_node,
+					       "brcm,pwren-low");
+	u32 reg;
+
+	dev_info(priv->dev, "USB PWR enable pins are active-%s\n",
+		 pwren_low ? "low" : "high");
+
+	reg = usb_control_read(priv, USB_CTRL_SETUP);
+	if (pwren_low)
+		reg |= USB_CTRL_SETUP_IPP;
+	else
+		reg &= ~USB_CTRL_SETUP_IPP;
+	reg &= ~USB_CTRL_SETUP_STRAP_IPP_SEL;
+	usb_control_write(reg, priv, USB_CTRL_SETUP);
+}
+
+static void bcm63158_usb_set_pwrflt_polarity(struct bcm63158_usb_priv *priv)
+{
+	bool pwrflt_low = of_property_read_bool(priv->dev->of_node,
+						"brcm,pwrflt-low");
+	u32 reg;
+
+	dev_info(priv->dev, "USB PWR fault pins are active-%s\n",
+		 pwrflt_low ? "low" : "high");
+
+	reg = usb_control_read(priv, USB_CTRL_SETUP);
+	if (pwrflt_low)
+		reg |= USB_CTRL_SETUP_IOC;
+	else
+		reg &= ~USB_CTRL_SETUP_IOC;
+	usb_control_write(reg, priv, USB_CTRL_SETUP);
+}
+
+/*
+ * USB3 phy configuration
+ */
+static void __bcm63158_usb3_ssc_enable(struct bcm63158_usb_priv *priv, int port)
+{
+	int page_addr = 0x8040;
+	u16 val;
+
+	if (port > 0)
+		page_addr += 0x1000;
+
+	val = usb3_phy_read_page(priv, page_addr, 0x1);
+	val |= 0x0f;
+	usb3_phy_write_page(priv, page_addr, 0x1, val);
+}
+
+static void bcm63158_usb3_ssc_enable(struct bcm63158_usb_priv *priv)
+{
+	__bcm63158_usb3_ssc_enable(priv, 0);
+	__bcm63158_usb3_ssc_enable(priv, 1);
+}
+
+static void bcm63158_usb3_enable_pipe_reset(struct bcm63158_usb_priv *priv)
+{
+	u16 val;
+
+	val = usb3_phy_read_page(priv, 0x8000, 0x0f);
+	val |= 0x200;
+	usb3_phy_write_page(priv, 0x8000, 0x0f, val);
+}
+
+static void __bcm63158_usb3_enable_sigdet(struct bcm63158_usb_priv *priv,
+					  int port)
+{
+	int page_addr = 0x8080;
+	u16 val;
+
+	if (port > 0)
+		page_addr += 0x1000;
+
+	val = usb3_phy_read_page(priv, page_addr, 0x5);
+	val = (val & ~0x800f) | 0x800d;
+	usb3_phy_write_page(priv, page_addr, 0x5, val);
+}
+
+static void __bcm63158_usb3_enable_skip_align(struct bcm63158_usb_priv *priv,
+					      int port)
+{
+	int page_addr = 0x8060;
+	u16 val;
+
+	if (port > 0)
+		page_addr += 0x1000;
+
+	val = usb3_phy_read_page(priv, page_addr, 0x1);
+	val |= 0x200;
+	usb3_phy_write_page(priv, page_addr, 0x1, val);
+}
+
+static void bcm63158_usb3_enable_sigdet(struct bcm63158_usb_priv *priv)
+{
+	__bcm63158_usb3_enable_sigdet(priv, 0);
+	__bcm63158_usb3_enable_sigdet(priv, 1);
+}
+
+static void bcm63158_usb3_enable_skip_align(struct bcm63158_usb_priv *priv)
+{
+	__bcm63158_usb3_enable_skip_align(priv, 0);
+	__bcm63158_usb3_enable_skip_align(priv, 1);
+}
+
+static void bcm63158_usb3_phy_init(struct bcm63158_usb_priv *priv)
+{
+	bcm63158_usb3_ssc_enable(priv);
+	bcm63158_usb3_enable_pipe_reset(priv);
+	bcm63158_usb3_enable_sigdet(priv);
+	bcm63158_usb3_enable_skip_align(priv);
+
+	mdelay(300);
+}
+
+/*
+ * USB2 phy configuration
+ */
+static void bcm63158_usb2_eye_fix(struct bcm63158_usb_priv *priv)
+{
+	usb2_phy_write_page(priv, 0x80a0, 0xa, 0xc6a0);
+}
+
+/*
+ * XHCI EC IRA indirect access.
+ */
+#define XHCI_ECIRA_AR	0xf98
+#define XHCI_ECIRA_DR	0xf9c
+
+static u32 xhci_ecira_read(struct bcm63158_usb_priv *priv, u32 reg)
+{
+	xhci_write(reg, priv, XHCI_ECIRA_AR);
+	return xhci_read(priv, XHCI_ECIRA_DR);
+}
+
+static void xhci_ecira_write(struct bcm63158_usb_priv *priv, u32 reg, u32 value)
+{
+	xhci_write(reg, priv, XHCI_ECIRA_AR);
+	xhci_write(value, priv, XHCI_ECIRA_DR);
+}
+
+static void bcm63158_usb3_erdy_nump_bypass(struct bcm63158_usb_priv *priv)
+{
+	u32 v;
+
+	v = xhci_ecira_read(priv, 0xa20c);
+	v |= 0x10000;
+	xhci_ecira_write(priv, 0xa20c, v);
+}
+
+
+#define XHCI_EC_ECHHST	0xfa0
+
+static void  bcm63158_usb3_uas_wa(struct bcm63158_usb_priv *priv)
+{
+	if (IS_ENABLED(CONFIG_USB_UAS)) {
+		u32 reg;
+
+		reg = xhci_read(priv, XHCI_EC_ECHHST);
+		reg &= 0x7fffffff;
+		xhci_write(reg, priv, XHCI_EC_ECHHST);
+	}
+}
+
+/*
+ * add platform devices with the resources required as defined in the
+ * device tree.
+ */
+static int bcm63158_usb_add_usb_pdev(struct bcm63158_usb_priv *priv,
+				     const char *devname,
+				     const char *resource_name,
+				     u32 id)
+{
+	struct resource res[2], *pres;
+	struct platform_device *pdev;
+	int error, irq;
+
+	pres = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM,
+					    resource_name);
+	if (!pres) {
+		dev_err(priv->dev, "unable to get MEM resource for %s\n",
+			resource_name);
+		return -ENXIO;
+	}
+	res[0] = *pres;
+
+	irq = platform_get_irq_byname(priv->pdev, resource_name);
+	if (irq < 0) {
+		dev_err(priv->dev, "unable to get IRQ resource for %s\n",
+			resource_name);
+		return irq;
+	}
+	memset(&res[1], 0, sizeof (res[1]));
+	res[1].start = res[1].end = irq;
+	res[1].flags = IORESOURCE_IRQ;
+	res[1].name = resource_name;
+
+	dev_dbg(priv->dev, "%s.%d %pR %pR\n", devname, id,
+		 &res[0], &res[1]);
+
+	pdev = platform_device_alloc(devname, id);
+	if (!pdev) {
+		dev_err(priv->dev, "unable to allocate %s.%d\n",
+			devname, id);
+		return -ENOMEM;
+	}
+
+	platform_device_add_resources(pdev, res, 2);
+
+	/*
+	 * internal platform device parameters setup. This is quite
+	 * arch specific and known to work on arm64.
+	 *
+	 * copying of dev.archdata is required to preserve the
+	 * dma-coherent attribute coming from the device-tree.
+	 */
+	pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
+	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+	pdev->dev.archdata = priv->dev->archdata;
+	pdev->dev.dma_coherent = priv->dev->dma_coherent;
+
+	error = platform_device_add(pdev);
+	if (error) {
+		dev_err(priv->dev, "unable to add platform device "
+			"%s.%d: %d\n", devname, id, error);
+		platform_device_put(pdev);
+		return error;
+	}
+
+	if (WARN_ON(priv->nr_hcd_devices == ARRAY_SIZE(priv->hcd_devices)))
+		return -ENOSPC;
+
+	priv->hcd_devices[priv->nr_hcd_devices] = pdev;
+	priv->nr_hcd_devices++;
+
+	return 0;
+}
+
+static int bcm63158_usb_probe(struct platform_device *pdev)
+{
+	struct bcm63158_usb_priv *priv;
+	int error;
+	u32 reg;
+	size_t i;
+
+	dev_dbg(&pdev->dev, "probe.\n");
+
+	priv = devm_kzalloc(&pdev->dev, sizeof (*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	priv->dev = &pdev->dev;
+	priv->pdev = pdev;
+	dev_set_drvdata(&pdev->dev, priv);
+
+	/*
+	 * get some resources.
+	 */
+	priv->reset = devm_reset_control_get(&pdev->dev, "xhci-pmc-reset");
+	if (IS_ERR(priv->reset)) {
+		dev_err(&pdev->dev, "unable to get reset control.\n");
+		return PTR_ERR(priv->reset);
+	}
+
+	priv->ubus = ubus4_master_of_get(pdev->dev.of_node);
+	if (IS_ERR(priv->ubus)) {
+		dev_err(&pdev->dev, "unable to get UBUS master.\n");
+		return PTR_ERR(priv->ubus);
+	}
+
+	error = bcm63158_usb_iomap_resource(pdev, "xhci",
+					    &priv->xhci_regs,
+					    &priv->xhci_res);
+	if (error)
+		return error;
+
+	error = bcm63158_usb_iomap_resource(pdev, "usb-control",
+					    &priv->usb_control_regs,
+					    &priv->usb_control_res);
+	if (error)
+		return error;
+
+	/*
+	 * deassert reset and configure UBUS.
+	 */
+	error = reset_control_deassert(priv->reset);
+	if (error) {
+		dev_err(&pdev->dev, "unable to deassert reset.\n");
+		return error;
+	}
+
+	error = bcm63158_usb_ubus_config(priv);
+	if (error)
+		goto err;
+
+	/*
+	 * power pins polarity config
+	 */
+	bcm63158_usb_set_pwren_polarity(priv);
+	bcm63158_usb_set_pwrflt_polarity(priv);
+
+	/*
+	 * power up USB PHYs
+	 */
+	reg = usb_control_read(priv, USB_CTRL_PM);
+	reg &= ~USB_CTRL_PM_PHY_PDWN;
+	usb_control_write(reg, priv, USB_CTRL_PM);
+
+	bcm63158_usb3_phy_init(priv);
+
+	reg = usb_control_read(priv, USB_CTRL_USB30_CTL1);
+	reg |= USB_CTRL_USB30_CTL1_PLL_SEQ_START;
+	usb_control_write(reg, priv, USB_CTRL_USB30_CTL1);
+
+	reg = usb_control_read(priv, USB_CTRL_PM);
+	reg |= USB_CTRL_PM_XHCI_RSTB;
+	usb_control_write(reg, priv, USB_CTRL_PM);
+
+	/*
+	 * USB2 phy configuration
+	 */
+	bcm63158_usb2_eye_fix(priv);
+
+	/*
+	 * ensure no byte swap occurs on EHCI & OHCI for both data &
+	 * control. Bridge Control register reset value should already
+	 * be configured for no byte swapping though.
+	 */
+	reg = usb_control_read(priv, USB_CTRL_BRIDGE_CTRL);
+	reg &= ~USB_CTRL_BRIDGE_CTRL_SWAPMASK;
+	reg |= USB_CTRL_BRIDGE_CTRL_NOSWAP;
+	usb_control_write(reg, priv, USB_CTRL_BRIDGE_CTRL);
+
+	/*
+	 * work around possible overcurrent indications during init.
+	 */
+	reg = usb_control_read(priv, USB_CTRL_PM);
+	usb_control_write(0, priv, USB_CTRL_PM);
+	usb_control_write(reg, priv, USB_CTRL_PM);
+	mdelay(1);
+
+	bcm63158_usb3_erdy_nump_bypass(priv);
+	bcm63158_usb3_uas_wa(priv);
+
+	/*
+	 * XHCI registers are going to be claimed by the xhci-hcd
+	 * driver.
+	 */
+	devm_iounmap(&pdev->dev, priv->xhci_regs);
+	priv->xhci_regs = NULL;
+	devm_release_mem_region(&pdev->dev, priv->xhci_res->start,
+				resource_size(priv->xhci_res));
+	priv->xhci_res = NULL;
+
+	/*
+	 * add XHCI HCD
+	 */
+	error = bcm63158_usb_add_usb_pdev(priv, "xhci-hcd", "xhci", 0);
+	if (error)
+		goto err;
+
+	/*
+	 * the XHCI HCD on the 63158 cannot handle the USB1/USB2
+	 * device, but there are OHCI and EHCI registers and resources
+	 * separately for both ports. we need to instanciate them
+	 * here.
+	 */
+	error = bcm63158_usb_add_usb_pdev(priv, "ehci-platform", "ehci0", 0);
+	if (error)
+		goto err;
+
+	error = bcm63158_usb_add_usb_pdev(priv, "ehci-platform", "ehci1", 1);
+	if (error)
+		goto err;
+
+	error = bcm63158_usb_add_usb_pdev(priv, "ohci-platform", "ohci0", 0);
+	if (error)
+		goto err;
+
+	error = bcm63158_usb_add_usb_pdev(priv, "ohci-platform", "ohci1", 1);
+	if (error)
+		goto err;
+
+	return 0;
+err:
+	/*
+	 * cleanup previously registered HCD platform devices
+	 */
+	for (i = 0; i < priv->nr_hcd_devices; ++i) {
+		dev_info(&pdev->dev, "removing hcd device %zd: %s", i,
+			 dev_name(&priv->hcd_devices[i]->dev));
+		platform_device_del(priv->hcd_devices[i]);
+		platform_device_put(priv->hcd_devices[i]);
+	}
+
+	/*
+	 * restore reset on error.
+	 */
+	reset_control_assert(priv->reset);
+	return error;
+}
+
+static void bcm63158_usb_remove(struct platform_device *pdev)
+{
+	struct bcm63158_usb_priv *priv = dev_get_drvdata(&pdev->dev);
+	size_t i;
+
+	for (i = 0; i < priv->nr_hcd_devices; ++i) {
+		dev_info(&pdev->dev, "removing hcd device %zd: %s", i,
+			 dev_name(&priv->hcd_devices[i]->dev));
+		platform_device_del(priv->hcd_devices[i]);
+		platform_device_put(priv->hcd_devices[i]);
+	}
+}
+
+static const struct of_device_id bcm63158_usb_of_match[] = {
+	{ .compatible = "brcm,bcm63158-usb" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, bcm63158_usb_of_match);
+
+struct platform_driver bcm63158_usb_driver = {
+	.probe		= bcm63158_usb_probe,
+	.remove		= bcm63158_usb_remove,
+	.driver		= {
+		.name		= "bcm63158-usb",
+		.owner		= THIS_MODULE,
+		.of_match_table	= bcm63158_usb_of_match,
+	},
+};
+
+module_platform_driver(bcm63158_usb_driver);
+
+MODULE_DESCRIPTION("Broadcom BCM63158 SoCs USB 1/2/3 Host Driver.");
+MODULE_LICENSE("GPL v2");
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/drivers/video/fbdev/ssd1320.c	2025-09-25 17:40:35.447366631 +0200
@@ -0,0 +1,1000 @@
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/gpio.h>
+#include <linux/fb.h>
+#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include <linux/backlight.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/reset.h>
+
+#define SSD1320_MAX_BRIGHTNESS		0xff
+#define SSD1320_NOMINAL_BRIGHTNESS	0x9f
+
+/*
+ * common commands
+ */
+#define OPCODE_ADDRESSING_MODE		0x20
+#define OPCODE_SET_COLUMN		0x21
+#define OPCODE_SET_ROW			0x22
+#define OPCODE_CONTRAST			0x81
+#define OPCODE_SET_SEG_NORM_SCAN_DIR	0xa0
+#define OPCODE_SET_SEG_REV_SCAN_DIR	0xa1
+#define OPCODE_DISPLAY_START_LINE	0xa2
+#define OPCODE_DISPLAY_NO_FORCEON	0xa4
+#define OPCODE_DISPLAY_FORCEON		0xa5
+#define OPCODE_DISPLAY_NO_INVERSE	0xa6
+#define OPCODE_DISPLAY_INVERSE		0xa7
+#define OPCODE_MULTIPLEX_RATIO		0xa8
+#define OPCODE_IREF_SELECTION		0xad
+#define OPCODE_DISPLAY_OFF		0xae
+#define OPCODE_DISPLAY_ON		0xaf
+#define OPCODE_SET_PRECHARGE_VOLTAGE	0xbc
+#define OPCODE_SET_GRAYSCALE_TBL	0xbe
+#define OPCODE_DEF_GRAY			0xbf
+#define OPCODE_SET_COM_NORMAL_SCAN_DIR	0xc0
+#define OPCODE_SET_COM_REV_SCAN_DIR	0xc8
+#define OPCODE_SET_SEG_PINS_HWCONFIG	0xda
+#define OPCODE_DISPLAY_OFFSET		0xd3
+#define OPCODE_CLK_DIVIDE_RATIO		0xd5
+#define OPCODE_SET_DISPLAY_ENH_A	0xd8
+#define OPCODE_SET_PRECHARGE_PERIOD	0xd9
+#define OPCODE_SET_VCOM_DESELECT_LVL	0xdb
+#define OPCODE_SET_DISPLAY_ENH_B	0xf0
+
+/*
+ * ch1120 specific commands
+ */
+#define OPCODE_CH1120_SET_DISCHAGE_PERIOD	0x93
+#define OPCODE_CH1120_GRAYSCALE_MONO_MODE	0xac
+
+/*
+ * fbinfo
+ */
+static struct fb_fix_screeninfo ssd1320_fb_fix = {
+	.id		= "ssd1320",
+	.type		= FB_TYPE_PACKED_PIXELS,
+	.visual		= FB_VISUAL_STATIC_PSEUDOCOLOR,
+	.xpanstep	= 0,
+	.ypanstep	= 1,
+	.ywrapstep	= 0,
+	.accel		= FB_ACCEL_NONE,
+};
+
+static struct fb_var_screeninfo ssd1320_fb_var = {
+	.bits_per_pixel	= 8,
+	.grayscale	= 1,
+	.nonstd		= 1,
+	.red.length	= 8,
+	.green.length	= 8,
+	.blue.length	= 8,
+};
+
+/*
+ * private data
+ */
+#define SSD1320_SEGS		160
+#define SSD1320_COMS		160
+
+enum oled_type {
+	TYPE_SSD1320,
+	TYPE_CH1120,
+};
+
+struct ssd1320 {
+	struct mutex			mutex;
+
+	/* configuration from device tree */
+	enum oled_type			type;
+	u32				watchdog;
+	u32				max_brightness;
+	u32				default_brightness;
+	u32				com_range[2];
+	bool				com_reverse;
+	u32				seg_range[2];
+	bool				seg_reverse;
+	bool				seg_sequential;
+	bool				seg_first_odd;
+
+	bool				has_clk_divide_ratio;
+	u32				clk_divide_ratio;
+	bool				has_precharge_period;
+	u32				precharge_period;
+	bool				has_vcom_deselect_level;
+	u32				vcom_deselect_level;
+	bool				has_precharge_voltage;
+	u32				precharge_voltage;
+	bool				has_iref;
+	u32				iref;
+	bool				has_grayscale_table;
+	u32				grayscale_table[15];
+	bool				has_display_enh_a;
+	u32				display_enh_a;
+	bool				has_display_enh_b;
+	u32				display_enh_b;
+	bool				has_discharge_period;
+	u32				discharge_period;
+
+	/* image of display ram */
+	u32				width;
+	u32				height;
+	u8				*gddram;
+	unsigned int			gddram_size;
+
+	/* data ram, 8 bits per pixel */
+	u8				*vmem;
+	unsigned int			vmem_size;
+
+	struct fb_info			*fb;
+	struct gpio_desc		*vcc_gpio;
+	struct reset_control		*reset;
+	struct gpio_desc		*data_gpio;
+	struct gpio_desc		*reset_gpio;
+	struct spi_device		*spi;
+
+	struct backlight_device		*backlight;
+	unsigned int			brightness;
+
+	/* watchog timer */
+	struct delayed_work		wtd_work;
+	atomic_t			wtd_count;
+};
+
+/*
+ * send command to device
+ */
+static int send_cmd(struct ssd1320 *priv, u8 cmd)
+{
+	int ret;
+
+	mutex_lock(&priv->mutex);
+	gpiod_set_value(priv->data_gpio, 0);
+	ret = spi_write_then_read(priv->spi, &cmd, 1, NULL, 0);
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+/*
+ * send command to device
+ */
+static int send_cmd2(struct ssd1320 *priv, u8 cmd, u8 arg)
+{
+	int ret;
+	ret = send_cmd(priv, cmd);
+	ret |= send_cmd(priv, arg);
+	return ret;
+}
+
+static int send_cmd3(struct ssd1320 *priv, u8 cmd, u8 arg1, u8 arg2)
+{
+	int ret;
+	ret = send_cmd(priv, cmd);
+	ret |= send_cmd(priv, arg1);
+	ret |= send_cmd(priv, arg2);
+	return ret;
+}
+
+/*
+ * send command list to device
+ */
+static int send_cmds(struct ssd1320 *priv, const u8 *cmd, unsigned int len)
+{
+	unsigned int i;
+	int ret;
+
+	for (i = 0; i < len; i++) {
+		ret = send_cmd(priv, cmd[i]);
+		if (ret < 0)
+			return ret;
+	}
+	return 0;
+}
+
+/*
+ * write given data into device gddram
+ */
+static int write_data(struct ssd1320 *priv, u8 *tx, unsigned int size)
+{
+	int ret;
+
+	mutex_lock(&priv->mutex);
+	gpiod_set_value(priv->data_gpio, 1);
+	ret = spi_write(priv->spi, tx, size);
+	mutex_unlock(&priv->mutex);
+	return ret;
+}
+
+/*
+ * soft reset & initialize ssd1320
+ */
+static int ssd1320_init(struct ssd1320 *priv)
+{
+	int ret;
+
+	if (priv->reset_gpio) {
+		if (priv->vcc_gpio)
+			gpiod_direction_output(priv->vcc_gpio, 0);
+		gpiod_set_value_cansleep(priv->reset_gpio, 1);
+		udelay(10);
+		gpiod_set_value_cansleep(priv->reset_gpio, 0);
+		udelay(10);
+	}
+
+	ret = send_cmd(priv, OPCODE_DISPLAY_OFF);
+	if (ret)
+		return ret;
+
+	if (priv->has_clk_divide_ratio) {
+		ret = send_cmd2(priv, OPCODE_CLK_DIVIDE_RATIO,
+				priv->clk_divide_ratio);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_precharge_period) {
+		ret = send_cmd2(priv, OPCODE_SET_PRECHARGE_PERIOD,
+				priv->precharge_period);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_precharge_voltage) {
+		ret = send_cmd2(priv, OPCODE_SET_PRECHARGE_VOLTAGE,
+				priv->precharge_voltage);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_vcom_deselect_level) {
+		ret = send_cmd2(priv, OPCODE_SET_VCOM_DESELECT_LVL,
+				priv->vcom_deselect_level);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_iref) {
+		ret = send_cmd2(priv, OPCODE_IREF_SELECTION, priv->iref);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_display_enh_a) {
+		ret = send_cmd2(priv, OPCODE_SET_DISPLAY_ENH_A,
+				priv->display_enh_a);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_display_enh_b) {
+		ret = send_cmd2(priv, OPCODE_SET_DISPLAY_ENH_B,
+				priv->display_enh_b);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_discharge_period) {
+		if (priv->type != TYPE_CH1120)
+			return -EINVAL;
+		ret = send_cmd2(priv, OPCODE_CH1120_SET_DISCHAGE_PERIOD,
+				priv->discharge_period);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->has_grayscale_table) {
+		u8 cmds[16];
+		int i;
+
+		cmds[0] = OPCODE_SET_GRAYSCALE_TBL;
+		for (i = 0; i < 15; i++)
+			cmds[i + 1] = priv->grayscale_table[i];
+
+		ret = send_cmds(priv, cmds, sizeof (cmds));
+		if (ret)
+			return ret;
+	}
+
+	ret = send_cmd2(priv, OPCODE_CONTRAST,
+			priv->default_brightness);
+	if (ret)
+		return ret;
+
+	if (priv->seg_sequential || priv->seg_first_odd) {
+		u8 hw_config;
+
+		hw_config = 0;
+		switch (priv->type) {
+		case TYPE_SSD1320:
+			hw_config = 2;
+			if (!priv->seg_sequential)
+				hw_config |= (1 << 4);
+			if (priv->seg_first_odd)
+				hw_config |= (1 << 5);
+			break;
+		case TYPE_CH1120:
+			if (priv->seg_first_odd)
+				hw_config |= (1 << 0);
+			if (priv->seg_sequential)
+				hw_config |= (1 << 1);
+			break;
+		}
+
+		ret = send_cmd2(priv, OPCODE_SET_SEG_PINS_HWCONFIG,
+				hw_config);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->com_reverse) {
+		ret = send_cmd(priv, OPCODE_SET_COM_REV_SCAN_DIR);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->seg_reverse) {
+		ret = send_cmd(priv, OPCODE_SET_SEG_REV_SCAN_DIR);
+		if (ret)
+			return ret;
+	}
+
+	if (priv->height != SSD1320_COMS) {
+		u8 off;
+
+		if (priv->com_reverse)
+			off = priv->com_range[0];
+		else
+			off = SSD1320_COMS -  priv->com_range[0];
+
+		/* hardware will skip this number of COM */
+		ret = send_cmd2(priv, OPCODE_DISPLAY_OFFSET, off);
+                if (ret)
+                        return ret;
+
+		/* hardware will only use this number of COM + 1 */
+		ret = send_cmd2(priv, OPCODE_MULTIPLEX_RATIO,
+				priv->height - 1);
+                if (ret)
+                        return ret;
+
+		/* set row boundaries so that hardware switches to
+		   back to first row when row pointer reaches the
+		   upper bound. This way after writing the whole
+		   visible pixels data the hardware goes back (0,0) */
+		ret = send_cmd3(priv, OPCODE_SET_ROW, 0, priv->height - 1);
+		if (ret)
+			return ret;
+	}
+
+	/* zero ram */
+	memset(priv->gddram, 0x00, priv->gddram_size);
+	ret = write_data(priv, priv->gddram, priv->gddram_size);
+	if (ret)
+		return ret;
+
+	if (priv->vcc_gpio) {
+		gpiod_direction_output(priv->vcc_gpio, 1);
+		msleep(10);
+	}
+
+	return send_cmd(priv, OPCODE_DISPLAY_ON);
+}
+
+/*
+ * update area
+ */
+static int ssd1320_fb_update(struct ssd1320 *priv)
+{
+	const unsigned char *vmem;
+	unsigned int row, w, h;
+	unsigned int rotate;
+
+	w = priv->width;
+	h = priv->height;
+
+	rotate = priv->fb->var.rotate;
+	vmem = priv->vmem + w * priv->fb->var.yoffset;
+	memset(priv->gddram, 0, priv->gddram_size);
+
+	for (row = 0; row < h; row++) {
+		unsigned int hw_col;
+
+		for (hw_col = 0; hw_col < SSD1320_SEGS; hw_col += 2) {
+			unsigned int col, nibble;
+			u8 val;
+
+			if (hw_col < priv->seg_range[0])
+				continue;
+
+			col = hw_col - priv->seg_range[0];
+			val = 0;
+			for (nibble = 0; nibble < 2; nibble++) {
+				unsigned int off, x;
+				u8 vval;
+
+				x = col + nibble;
+				if (x >= w)
+					break;
+
+				switch (rotate) {
+				case 0:
+				default:
+					off = row * w + x;
+					break;
+
+				case 180:
+					off = w * h - (row * w + x) - 1;
+					break;
+
+				case 90:
+					off = (w - x - 1) * w + row;
+					break;
+
+				case 270:
+					off = x * w + (h - row - 1);
+					break;
+				}
+
+				vval = vmem[off] >> 4;
+				val |= vval << (nibble * 4);
+			}
+
+			priv->gddram[row * (SSD1320_SEGS / 2) +
+				     hw_col / 2] = val;
+		}
+	}
+
+	return write_data(priv, priv->gddram, priv->gddram_size);
+}
+
+/*
+ * frame buffer pan callback
+ */
+static int ssd1320_fb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+	struct ssd1320 *priv = info->par;
+	priv->fb->var.xoffset = var->xoffset;
+	priv->fb->var.yoffset = var->yoffset;
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+	return 0;
+}
+
+/*
+ * fram buffer set_par callback, set videomode
+ */
+static int ssd1320_fb_set_par(struct fb_info *info)
+{
+	struct ssd1320 *priv = info->par;
+	/* called after rotate update */
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+	return 0;
+}
+
+static int ssd1320_fb_check_var(struct fb_var_screeninfo *var,
+				struct fb_info *info)
+{
+	unsigned int rotate;
+
+	rotate = var->rotate;
+	if (rotate != 0 && rotate != 90 && rotate != 180 && rotate != 270)
+		rotate = 0;
+	*var = info->var;
+	var->rotate = rotate;
+	return 0;
+}
+
+/*
+ * frame buffer blank callback
+ */
+static int ssd1320_fb_blank(int blank, struct fb_info *info)
+{
+	return 0;
+}
+
+/*
+ * deferred io callback after write(2)
+ */
+static void
+ssd1320_fb_defio_damage_range(struct fb_info *info, off_t off, size_t len)
+{
+	struct ssd1320 *priv = info->par;
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+}
+
+/*
+ * deferred io callback after fillrect/copyarea/imageblit
+ */
+static void
+ssd1320_fb_defio_damage_area(struct fb_info *info, u32 x, u32 y,
+			     u32 width, u32 height)
+{
+	struct ssd1320 *priv = info->par;
+	atomic_set(&priv->wtd_count, priv->watchdog);
+	ssd1320_fb_update(priv);
+}
+
+FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(ssd1320_fb,
+                                   ssd1320_fb_defio_damage_range,
+                                   ssd1320_fb_defio_damage_area)
+
+static const struct fb_ops ssd1320_fb_ops = {
+	.owner		= THIS_MODULE,
+	FB_DEFAULT_DEFERRED_OPS(ssd1320_fb),
+	.fb_pan_display	= ssd1320_fb_pan,
+	.fb_blank	= ssd1320_fb_blank,
+	.fb_check_var	= ssd1320_fb_check_var,
+	.fb_set_par	= ssd1320_fb_set_par,
+};
+
+/*
+ * callback after mapped dirty pages writeback timeout
+ */
+static void
+ssd1320_fb_deferred_io(struct fb_info *info, struct list_head *pagereflist)
+{
+	struct ssd1320 *priv = info->par;
+	atomic_set(&priv->wtd_count, priv->watchdog);
+        ssd1320_fb_update(priv);
+}
+
+/*
+ * watchdog timer
+ */
+static void wtd_work_cb(struct work_struct *t)
+{
+	struct ssd1320 *priv;
+	struct delayed_work *dwork;
+
+	dwork = container_of(t, struct delayed_work, work);
+	priv = container_of(dwork, struct ssd1320, wtd_work);
+
+	if (atomic_dec_and_test(&priv->wtd_count)) {
+		dev_err(&priv->spi->dev, "watchdog triggered\n");
+		memset(priv->vmem, 0, priv->vmem_size);
+		ssd1320_fb_update(priv);
+	}
+
+	schedule_delayed_work(&priv->wtd_work, HZ);
+}
+
+/*
+ * backlight control
+ */
+static int ssd1320_bl_update_status(struct backlight_device *bl)
+{
+	struct ssd1320 *priv;
+	u8 bl_cmds[2];
+	unsigned int brightness = bl->props.brightness;
+	int ret;
+
+	priv = bl_get_data(bl);
+
+	if (brightness > priv->max_brightness)
+		brightness = priv->max_brightness;
+
+	bl_cmds[0] = OPCODE_CONTRAST;
+	bl_cmds[1] = brightness;
+
+	ret = send_cmds(priv, bl_cmds, sizeof (bl_cmds));
+	if (ret < 0)
+		return ret;
+
+	priv->brightness = bl->props.brightness;
+	return 0;
+}
+
+static int ssd1320_bl_get_brightness(struct backlight_device *bl)
+{
+	struct ssd1320 *priv;
+	priv = bl_get_data(bl);
+	return priv->brightness;
+}
+
+static struct backlight_ops ssd1320_bl_ops = {
+	.update_status		= ssd1320_bl_update_status,
+	.get_brightness		= ssd1320_bl_get_brightness,
+};
+
+static const struct backlight_properties ssd1320_bl_props = {
+	.power		= FB_BLANK_UNBLANK,
+	.max_brightness	= SSD1320_MAX_BRIGHTNESS,
+	.type		= BACKLIGHT_RAW,
+};
+
+static int init_backlight(struct ssd1320 *priv)
+{
+	struct backlight_device *bl;
+
+	bl = backlight_device_register("ssd1320", &priv->spi->dev,
+				       priv, &ssd1320_bl_ops,
+				       &ssd1320_bl_props);
+	if (IS_ERR(bl)) {
+		dev_err(&priv->spi->dev, "error %ld on backlight register\n",
+			PTR_ERR(bl));
+		return PTR_ERR(bl);
+	}
+	priv->backlight = bl;
+	bl->props.brightness = priv->brightness;
+	return 0;
+}
+
+/*
+ * platform device probe callback
+ */
+static int ssd1320_probe(struct spi_device *spi)
+{
+	struct device_node *node = spi->dev.of_node;
+	struct fb_deferred_io *ssd_defio;
+	struct ssd1320 *priv;
+	struct fb_info *fb;
+	int ret;
+
+	if (!node) {
+		dev_err(&spi->dev, "No device tree data found!\n");
+		return -EINVAL;
+	}
+
+	fb = framebuffer_alloc(sizeof (*priv), &spi->dev);
+	if (!fb)
+		return -ENOMEM;
+
+	priv = fb->par;
+	mutex_init(&priv->mutex);
+	priv->spi = spi;
+	priv->fb = fb;
+	priv->type = (enum oled_type)of_device_get_match_data(&spi->dev);
+
+	priv->vcc_gpio = devm_gpiod_get_optional(&spi->dev, "ssd1320,vcc",
+					GPIOD_ASIS);
+	if (IS_ERR(priv->vcc_gpio)) {
+		ret = PTR_ERR(priv->vcc_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev,
+				"failed to get vcc gpio: %d\n", ret);
+		goto fail;
+	}
+
+	priv->data_gpio = devm_gpiod_get(&spi->dev,
+					 "ssd1320,data-select",
+					 GPIOD_OUT_LOW);
+	if (IS_ERR(priv->data_gpio)) {
+		ret = PTR_ERR(priv->data_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev, "failed to get data gpio: %d\n",
+				ret);
+		goto fail;
+	}
+
+	priv->reset_gpio = devm_gpiod_get(&spi->dev,
+					  "ssd1320,reset",
+					 GPIOD_OUT_LOW);
+	if (IS_ERR(priv->reset_gpio)) {
+		ret = PTR_ERR(priv->reset_gpio);
+		if (ret != -EPROBE_DEFER)
+			dev_err(&spi->dev, "failed to get reset gpio: %d\n",
+				ret);
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,watchdog", &priv->watchdog);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get watchdog\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,max-brightness",
+				   &priv->max_brightness);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get max-brightness\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,default-brightness",
+				   &priv->default_brightness);
+	if (ret) {
+		dev_err(&spi->dev, "failed to get default-brightness\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32_array(node, "ssd1320,com-range",
+					 priv->com_range,
+					 ARRAY_SIZE(priv->com_range));
+	if (ret) {
+		dev_err(&spi->dev, "failed to get com-range\n");
+		goto fail;
+	}
+
+
+	ret = of_property_read_u32_array(node, "ssd1320,seg-range",
+					 priv->seg_range,
+					 ARRAY_SIZE(priv->seg_range));
+	if (ret) {
+		dev_err(&spi->dev, "failed to get seg-range\n");
+		goto fail;
+	}
+
+	/* sanity check on screen size */
+	if (priv->com_range[0] >= SSD1320_COMS ||
+	    priv->com_range[1] >= SSD1320_COMS ||
+	    priv->com_range[0] >= priv->com_range[1])  {
+		dev_err(&spi->dev, "unsupported com-range\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (priv->seg_range[0] >= SSD1320_SEGS ||
+	    priv->seg_range[1] >= SSD1320_SEGS ||
+	    priv->seg_range[0] >= priv->seg_range[1])  {
+		dev_err(&spi->dev, "unsupported seg-range\n");
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	priv->height = priv->com_range[1] - priv->com_range[0] + 1;
+	priv->width = priv->seg_range[1] - priv->seg_range[0] + 1;
+
+	priv->com_reverse = of_property_read_bool(node,
+						  "ssd1320,com-reverse-dir");
+	priv->seg_reverse = of_property_read_bool(node,
+						  "ssd1320,seg-reverse-dir");
+	priv->seg_sequential = of_property_read_bool(node,
+						     "ssd1320,seg-sequential");
+	priv->seg_first_odd = of_property_read_bool(node,
+						    "ssd1320,seg-first-odd");
+
+	ret = of_property_read_u32(node, "ssd1320,clk-divide-ratio",
+				   &priv->clk_divide_ratio);
+	priv->has_clk_divide_ratio = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get clk-divide-ratio\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,precharge-period",
+				   &priv->precharge_period);
+	priv->has_precharge_period = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get precharge-period\n");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,vcom-deselect-level",
+				   &priv->vcom_deselect_level);
+	priv->has_vcom_deselect_level = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get vcom-deselect-level");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,vcom-deselect-level",
+				   &priv->vcom_deselect_level);
+	priv->has_vcom_deselect_level = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get vcom-deselect-level");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,precharge-voltage",
+				   &priv->precharge_voltage);
+	priv->has_precharge_voltage = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get precharge-voltage");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,iref", &priv->iref);
+	priv->has_iref = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get iref");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,display-enh-a",
+				   &priv->display_enh_a);
+	priv->has_display_enh_a = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,display-enh-a");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,display-enh-b",
+				   &priv->display_enh_b);
+	priv->has_display_enh_b = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,display-enh-b");
+		goto fail;
+	}
+
+	ret = of_property_read_u32(node, "ssd1320,discharge-period",
+				   &priv->discharge_period);
+	priv->has_discharge_period = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get ssd1320,discharge-period");
+		goto fail;
+	}
+
+	ret = of_property_read_u32_array(node, "ssd1320,grayscale-table",
+					 priv->grayscale_table,
+					 ARRAY_SIZE(priv->grayscale_table));
+	priv->has_grayscale_table = (ret != -EINVAL);
+	if (ret && ret != -EINVAL) {
+		dev_err(&spi->dev, "failed to get grayscale-table\n");
+		goto fail;
+	}
+
+	priv->brightness = priv->default_brightness;
+
+	/* setup framebuffer */
+	fb->fbops = &ssd1320_fb_ops;
+	fb->flags = FBINFO_VIRTFB | FBINFO_HWACCEL_YPAN;
+	fb->var = ssd1320_fb_var;
+	fb->fix = ssd1320_fb_fix;
+
+	fb->var.xres = priv->width;
+	fb->var.yres = priv->height;
+	fb->var.xres_virtual = priv->width;
+	fb->var.yres_virtual = priv->height * 2;
+
+	/* twice lcd size so we can pan in one direction */
+	fb->fix.smem_len = (priv->width * priv->height) * 2;
+	fb->fix.line_length = priv->width;
+	fb->var.rotate = 0;
+
+	/* allocate hardware video memory, no way to make the hardware
+	 * skip some segments so the full hw width is always used */
+	priv->gddram_size = SSD1320_SEGS * priv->height / 2;
+	priv->gddram = vmalloc(priv->gddram_size);
+	if (!priv->gddram) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memset(priv->gddram, 0, priv->gddram_size);
+
+	/* allocate video memory */
+	priv->vmem_size = PAGE_ALIGN(fb->fix.smem_len);
+	priv->vmem = vmalloc(priv->vmem_size);
+	if (!priv->vmem) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+	memset(priv->vmem, 0, priv->vmem_size);
+	fb->screen_buffer = priv->vmem;
+
+	ssd_defio = devm_kzalloc(&spi->dev, sizeof (*ssd_defio), GFP_KERNEL);
+	if (!ssd_defio) {
+                ret = -ENOMEM;
+		goto fail;
+        }
+
+	ssd_defio->deferred_io = ssd1320_fb_deferred_io;
+	fb->fbdefio = ssd_defio;
+
+	ret = ssd1320_init(priv);
+	if (ret)
+		goto fail;
+
+	/* if (1){ */
+	/* 	unsigned int col = 0; */
+	/* 	unsigned int row = 0; */
+	/* 	for (row = 0; row < priv->height; row++) { */
+	/* 		if (signal_pending(current)) */
+	/* 			break; */
+
+	/* 		for (col = 0; col < priv->width / 2; col++) { */
+	/* 			u8 c; */
+
+	/* 			if (signal_pending(current)) */
+	/* 				break; */
+
+	/* 			/\* if (row <= 118) *\/ */
+	/* 			/\* 	c = 0x0; *\/ */
+	/* 			/\* else *\/ */
+	/* 			/\* 	c = 0xff; *\/ */
+	/* 			c = 0xff; */
+
+	/* 			printk("row:%u col:%u\n", row, col); */
+	/* 			write_data(priv, &c, 1); */
+	/* 			mdelay(1); */
+	/* 		} */
+	/* 	} */
+	/* } */
+
+	/* return -EINVAL; */
+
+	if (init_backlight(priv))
+		goto fail;
+
+	fb_deferred_io_init(fb);
+
+	/* register frame buffer */
+	ret = register_framebuffer(fb);
+	if (ret < 0)
+		goto fail;
+
+	INIT_DELAYED_WORK(&priv->wtd_work, wtd_work_cb);
+
+	if (priv->watchdog) {
+		atomic_set(&priv->wtd_count, priv->watchdog);
+		schedule_delayed_work(&priv->wtd_work, HZ);
+	}
+
+	dev_info(&spi->dev,
+		 "fb%d: SSD1320 frame buffer device (%ux%u screen)\n",
+		 fb->node, priv->width, priv->height);
+
+	dev_set_drvdata(&spi->dev, priv);
+	return 0;
+
+fail:
+	if (priv->vmem)
+		vfree(priv->vmem);
+	if (priv->backlight)
+		backlight_device_unregister(priv->backlight);
+	framebuffer_release(fb);
+	return ret;
+}
+
+/*
+ * platform device remove callback
+ */
+static void ssd1320_remove(struct spi_device *spi)
+{
+	struct ssd1320 *priv;
+
+	priv = dev_get_drvdata(&spi->dev);
+	cancel_delayed_work_sync(&priv->wtd_work);
+	backlight_device_unregister(priv->backlight);
+	unregister_framebuffer(priv->fb);
+	fb_deferred_io_cleanup(priv->fb);
+	vfree(priv->vmem);
+	vfree(priv->gddram);
+	framebuffer_release(priv->fb);
+}
+
+static const struct spi_device_id ssd1320_id_table[] = {
+        { "ssd1320", 0 },
+        { "ch1120", 0 },
+        {}
+};
+MODULE_DEVICE_TABLE(spi, ssd1320_id_table);
+
+static const struct of_device_id ssd1320_of_match[] = {
+	{
+		.compatible = "solomon,ssd1320",
+		.data = (void *)TYPE_SSD1320,
+	},
+	{
+		.compatible = "chipwealth,ch1120",
+		.data = (void *)TYPE_CH1120,
+	},
+	{},
+};
+MODULE_DEVICE_TABLE(of, ssd1320_of_match);
+
+static struct spi_driver ssd1320_driver = {
+	.driver = {
+		.name		= "ssd1320",
+		.of_match_table	= ssd1320_of_match,
+	},
+	.probe		= ssd1320_probe,
+	.remove		= ssd1320_remove,
+	.id_table	= ssd1320_id_table,
+};
+
+module_spi_driver(ssd1320_driver);
+
+MODULE_DESCRIPTION("SSD1320 driver");
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./Kconfig linux-6.13.12-fbx/fs/exfat-fbx/Kconfig
--- linux-6.13.12-fbx/fs/exfat-fbx./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/Kconfig	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,3 @@
+
+config EXFAT_FS_FBX
+	tristate "exFAT fs support (fbx)"
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./Makefile linux-6.13.12-fbx/fs/exfat-fbx/Makefile
--- linux-6.13.12-fbx/fs/exfat-fbx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/Makefile	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_EXFAT_FS_FBX)	+= exfat.o
+
+exfat-y	= super.o				\
+	inode.o					\
+	fat.o					\
+	read-write.o				\
+	upcase.o				\
+	bitmap.o				\
+	time.o					\
+	dir.o					\
+	namei.o					\
+	file.o
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./bitmap.c linux-6.13.12-fbx/fs/exfat-fbx/bitmap.c
--- linux-6.13.12-fbx/fs/exfat-fbx./bitmap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/bitmap.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,606 @@
+/*
+ * bitmap.c for exfat
+ * Created by <nschichan@freebox.fr> on Thu Aug  8 19:21:05 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+static inline sector_t exfat_bitmap_sector(struct exfat_sb_info *sbi,
+					   u32 cluster)
+{
+	return sbi->first_bitmap_sector + ((cluster / 8) >> sbi->sectorbits);
+}
+
+static inline u32 exfat_bitmap_off(struct exfat_sb_info *sbi,
+				   u32 cluster)
+{
+	return (cluster / 8) & sbi->sectormask;
+}
+
+static inline u32 exfat_bitmap_shift(u32 cluster)
+{
+	return cluster & 7;
+}
+
+static int __find_get_free_cluster(struct inode *inode, u32 *out_cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	while (1) {
+		sector_t sect = exfat_bitmap_sector(sbi,
+						    sbi->cur_bitmap_cluster);
+		u32 off = exfat_bitmap_off(sbi, sbi->cur_bitmap_cluster);
+		u32 shift = exfat_bitmap_shift(sbi->cur_bitmap_cluster);
+
+		/* disk is full */
+		if (!sbi->free_clusters)
+			break;
+
+		if (!sbi->cur_bitmap_bh ||
+		    sect != sbi->cur_bitmap_sector) {
+			if (sbi->cur_bitmap_bh)
+				brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+			sbi->cur_bitmap_sector = sect;
+			if (!sbi->cur_bitmap_bh) {
+				exfat_msg(inode->i_sb, KERN_ERR,
+					  "unable to read bitmap sector "
+					  "at %llu", (unsigned long long)sect);
+				return -EIO;
+			}
+		}
+
+		if (!(sbi->cur_bitmap_bh->b_data[off] & (1 << shift))) {
+			sbi->cur_bitmap_bh->b_data[off] |= (1 << shift);
+			*out_cluster = sbi->cur_bitmap_cluster;
+			goto found;
+		}
+
+		++sbi->cur_bitmap_cluster;
+		if (sbi->cur_bitmap_cluster == sbi->cluster_count)
+			sbi->cur_bitmap_cluster = 0;
+	}
+	return -ENOSPC;
+
+found:
+	sbi->prev_free_cluster = *out_cluster;
+	--sbi->free_clusters;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	return 0;
+}
+
+static int __put_cluster(struct inode *inode, u32 cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	sector_t sect = exfat_bitmap_sector(sbi, cluster);
+	u32 off = exfat_bitmap_off(sbi, cluster);
+	u32 shift = exfat_bitmap_shift(cluster);
+
+
+	if (!sbi->cur_bitmap_bh || sect != sbi->cur_bitmap_sector) {
+		if (sbi->cur_bitmap_bh)
+			brelse(sbi->cur_bitmap_bh);
+		sbi->cur_bitmap_bh = sb_bread(inode->i_sb, sect);
+		if (!sbi->cur_bitmap_bh) {
+			exfat_msg(inode->i_sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		sbi->cur_bitmap_sector = sect;
+		sbi->cur_bitmap_cluster = cluster;
+	}
+	if ((sbi->cur_bitmap_bh->b_data[off] & (1 << shift)) == 0) {
+		exfat_fs_error(inode->i_sb, "put_cluster: cluster %u "
+			  "already free.", cluster);
+		return -EIO;
+	}
+
+	++sbi->free_clusters;
+	sbi->cur_bitmap_bh->b_data[off] &= ~(1 << shift);
+	sbi->prev_free_cluster = cluster;
+	mark_buffer_dirty(sbi->cur_bitmap_bh);
+	/* sync_dirty_buffer(sbi->cur_bitmap_bh); */
+	return 0;
+}
+
+/*
+ * setup search to start at given cluster.
+ */
+static void __exfat_reset_bitmap(struct exfat_sb_info *sbi, u32 cluster)
+{
+	sector_t sect;
+
+	if (cluster >= sbi->cluster_count)
+		cluster = 0;
+
+	sect = exfat_bitmap_sector(sbi, cluster);
+	if (sbi->cur_bitmap_sector != sect) {
+		sbi->cur_bitmap_sector = sect;
+		if (sbi->cur_bitmap_bh) {
+			brelse(sbi->cur_bitmap_bh);
+			sbi->cur_bitmap_bh = NULL;
+		}
+	}
+	sbi->cur_bitmap_cluster = cluster;
+}
+
+static bool all_contiguous(u32 *clusters, u32 nr)
+{
+	u32 i;
+
+	for (i = 0; i < nr - 1; ++i) {
+		if (clusters[i] != clusters[i + 1] - 1)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * hint must be the immediately after the last allocated cluster of
+ * the inode.
+ */
+int exfat_alloc_clusters(struct inode *inode, u32 hint, u32 *clusters, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 i;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	__exfat_reset_bitmap(sbi, hint - 2);
+	for (i = 0; i < nr; ++i) {
+		u32 new;
+		int error;
+
+		error = __find_get_free_cluster(inode, &new);
+		if (error) {
+			mutex_unlock(&sbi->bitmap_mutex);
+			return error;
+		}
+
+		clusters[i] = new + 2;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * all clusters found: now see if we need to update/create a
+	 * fat chain.
+	 */
+	if (info->first_cluster == 0) {
+		info->first_cluster = clusters[0];
+		if (all_contiguous(clusters, nr)) {
+			/*
+			 * first cluster alloc on inode and all
+			 * clusters are contiguous.
+			 */
+			info->flags |= EXFAT_I_FAT_INVALID;
+		} else {
+			/*
+			 * first alloc and already fragmented.
+			 */
+			return exfat_write_fat(inode, 0, clusters, nr);
+		}
+	} else {
+		int error;
+		if ((info->flags & EXFAT_I_FAT_INVALID) &&
+		    (clusters[0] != hint || !all_contiguous(clusters, nr))) {
+			/*
+			 * must now use fat chain instead of bitmap.
+			 */
+			info->flags &= ~(EXFAT_I_FAT_INVALID);
+
+			/*
+			 * write the contiguous chain that would
+			 * previously be accessed without the FAT
+			 * chain.
+			 */
+			error = exfat_write_fat_contiguous(inode,
+						  info->first_cluster,
+						  hint - info->first_cluster);
+			if (error)
+				return error;
+		}
+
+		if ((info->flags & EXFAT_I_FAT_INVALID) == 0) {
+			/*
+			 * link the allocated clusters after hint.
+			 */
+			error = exfat_write_fat(inode, hint - 1, clusters, nr);
+			if (error)
+				return  error;
+		}
+
+	}
+
+	/*
+	 * update i_blocks.
+	 */
+	inode->i_blocks += nr << (sbi->clusterbits - 9);
+	info->allocated_clusters += nr;
+
+	/*
+	 * caller must call mark_inode_dirty so that inode
+	 * first_cluster and inode flags get written to the disk.
+	 * caller must update inode size (directory and regular file
+	 * have different rules).
+	 */
+	return 0;
+}
+
+
+static int exfat_free_clusters_contiguous(struct inode *inode,
+					  u32 start, u32 nr)
+{
+	u32 cluster;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (cluster = start; cluster < start + nr; ++cluster) {
+		error = __put_cluster(inode, cluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+	return error;
+}
+
+static int exfat_free_clusters_fat(struct inode *inode,
+				   u32 fcluster_start, u32 nr)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster;
+	int error = 0;
+
+	mutex_lock(&sbi->bitmap_mutex);
+	for (fcluster = fcluster_start; fcluster < fcluster_start + nr;
+	     ++fcluster) {
+		u32 dcluster;
+		int error;
+
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			break;
+
+		error = __put_cluster(inode, dcluster - 2);
+		if (error)
+			break;
+	}
+	mutex_unlock(&sbi->bitmap_mutex);
+
+	/*
+	 * per-inode file cluster to disk cluster translation cache
+	 * mostly now holds entries to the zone we just truncated, so
+	 * they must not be kept (this could lead to FS corruption).
+	 */
+	exfat_inode_cache_drop(inode);
+
+	return error;
+}
+
+int exfat_free_clusters_inode(struct inode *inode, u32 fcluster_start)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 nr_to_free = info->allocated_clusters - fcluster_start;
+
+	if (info->first_cluster == 0 || nr_to_free == 0)
+		/*
+		 * no clusters allocated, or nothing to do
+		 */
+		return 0;
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		error = exfat_free_clusters_contiguous(inode,
+				       info->first_cluster + fcluster_start,
+				       nr_to_free);
+	else
+		error = exfat_free_clusters_fat(inode, fcluster_start,
+					nr_to_free);
+	if (error)
+		return error;
+
+	info->allocated_clusters -= nr_to_free;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(EXFAT_SB(inode->i_sb)->clusterbits - 9);
+
+	/*
+	 * update inode info, caller must call mark_inode_dirty and
+	 * update inode->i_size.
+	 */
+	if (fcluster_start == 0) {
+		info->first_cluster = 0;
+		info->flags &= ~(EXFAT_I_FAT_INVALID);
+	}
+	return 0;
+}
+
+static u32 count_clusters_bh(struct buffer_head *bh, u32 count)
+{
+	u8 *ptr = bh->b_data;
+	u32 ret = 0;
+	u8 val;
+
+	while (count >= sizeof (u64) * 8) {
+		u64 val = *(u64*)ptr;
+
+		ret += hweight64(~val);
+		count -= sizeof (u64) * 8;
+		ptr += sizeof (u64);
+	}
+	if (count >= sizeof (u32) * 8) {
+		u32 val = *(u32*)ptr;
+
+		ret += hweight32(~val);
+		count -= sizeof (u32) * 8;
+		ptr += sizeof (u32);
+	}
+	if (count >= sizeof (u16) * 8) {
+		u16 val = *(u16*)ptr;
+
+		ret += hweight16(~val);
+		count -= sizeof (u16) * 8;
+		ptr += sizeof (u16);
+	}
+	if (count >= sizeof (u8) * 8) {
+		u8 val = *ptr;
+
+		ret += hweight8(~val);
+		count -= sizeof (u8) * 8;
+		ptr += sizeof (u8);
+	}
+
+	if (count) {
+		val = *ptr;
+		while (count) {
+			ret += (~val & 1);
+			val >>= 1;
+			--count;
+		}
+	}
+	return ret;
+}
+
+/*
+ * only called during mount, so taking sbi->bitmap_mutex should not be
+ * needed.
+ */
+static int exfat_get_free_cluster_count(struct super_block *sb, u32 *out_count)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 clusters_per_sector = 8 * sbi->sectorsize;
+	u32 cluster;
+
+	*out_count = 0;
+	for (cluster = 0; cluster < sbi->cluster_count;
+	     cluster += clusters_per_sector) {
+		sector_t sect = exfat_bitmap_sector(sbi, cluster);
+		struct buffer_head *bh;
+		u32 count = clusters_per_sector;
+
+		if (cluster + clusters_per_sector > sbi->cluster_count)
+			count = sbi->cluster_count - cluster;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		*out_count += count_clusters_bh(bh, count);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * setup a bitmap context, preload a bh from the requested starting
+ * cluster.
+ */
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx,
+			      u32 cluster)
+{
+	memset(ctx, 0, sizeof (*ctx));
+	ctx->sb = sb;
+
+	cluster -= 2;
+	if (cluster >= EXFAT_SB(sb)->cluster_count)
+		return -ENOSPC;
+
+	ctx->cur_sector = exfat_bitmap_sector(EXFAT_SB(sb), cluster);
+	ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+
+	if (!ctx->bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read bitmap sector at %llu",
+			  (unsigned long long)ctx->cur_sector);
+		return -EIO;
+	}
+	return 0;
+}
+
+/*
+ * release bh in an already setup bitmap context.
+ */
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx)
+{
+	if (ctx->bh)
+		brelse(ctx->bh);
+}
+
+/*
+ * test a specific cluster usage in the bitmap. reuse the bh in the
+ * exfat_bitmap_ctx or read a new one if starting cluster is outside
+ * the current one.
+ */
+static int exfat_test_bitmap_cluster(struct exfat_bitmap_ctx *ctx,
+				     uint32_t cluster, bool *cluster_in_use)
+{
+	sector_t sect;
+	uint32_t off = exfat_bitmap_off(EXFAT_SB(ctx->sb), cluster);
+	int shift = exfat_bitmap_shift(cluster);
+
+	sect = exfat_bitmap_sector(EXFAT_SB(ctx->sb), cluster);
+	if (sect != ctx->cur_sector) {
+		ctx->cur_sector = sect;
+		ctx->bh = sb_bread(ctx->sb, ctx->cur_sector);
+		if (!ctx->bh) {
+			exfat_msg(ctx->sb, KERN_ERR,
+				  "unable to read bitmap sector at %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+	}
+
+	*cluster_in_use = !!(ctx->bh->b_data[off] & (1 << shift));
+	return 0;
+}
+
+/*
+ * update first_in_use and nr_in_use with the first zone of used
+ * clusters starting from start_cluster.
+ */
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use)
+{
+	bool in_use = false;
+	int error = 0;
+	struct exfat_sb_info *sbi = EXFAT_SB(ctx->sb);
+
+	start_cluster -= 2;
+
+	/*
+	 * scan bitmap until we find a cluster that is in use.
+	 */
+	while (1) {
+		if (start_cluster == sbi->cluster_count) {
+			/*
+			 * readched end of disk: no more in use
+			 * cluster found.
+			 */
+			*first_in_use = sbi->cluster_count;
+			*nr_in_use = 0;
+			return 0;
+		}
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (in_use)
+			break;
+		++start_cluster;
+	}
+
+
+	/*
+	 * update first_in_use, and scan until a free cluster is
+	 * found.
+	 */
+	*first_in_use = start_cluster + 2;
+	*nr_in_use = 0;
+	while (1) {
+		error = exfat_test_bitmap_cluster(ctx, start_cluster, &in_use);
+		if (error)
+			return error;
+		if (!in_use)
+			break;
+		++(*nr_in_use);
+		++start_cluster;
+	}
+	return 0;
+}
+
+int exfat_init_bitmap(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_bitmap_entry *be;
+	struct exfat_dir_ctx dctx;
+	u32 first_bitmap_cluster;
+	u32 last_bitmap_cluster;
+
+	int error;
+
+	mutex_init(&sbi->bitmap_mutex);
+
+	error = exfat_init_dir_ctx(root, &dctx, 0);
+	if (error)
+		return error;
+
+try_bitmap:
+	error = -ENOENT;
+	be = __exfat_dentry_next(&dctx, E_EXFAT_BITMAP, 0xff, true, NULL);
+	if (!be) {
+		exfat_msg(root->i_sb, KERN_ERR, "root directory does not "
+			  "have a bitmap entry.");
+		goto fail;
+	}
+
+	if (exfat_bitmap_nr(be->flags) != 0)
+		/*
+		 * not expected to find a second bitmap entry here
+		 * since we checked during superblock fill that we
+		 * were not on a texFAT volume ...
+		 */
+		goto try_bitmap;
+
+
+	error = -EINVAL;
+	if (__le64_to_cpu(be->length) * 8 < sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_INFO, "bitmap does not cover "
+			  "the whole cluster heap.");
+		goto fail;
+	}
+
+	first_bitmap_cluster = __le32_to_cpu(be->cluster_addr);
+	last_bitmap_cluster = first_bitmap_cluster +
+		(__le32_to_cpu(be->length) >> sbi->clusterbits);
+
+	/*
+	 * check that bitmap start and end clusters are inside the
+	 * disk.
+	 */
+	error = -ERANGE;
+	if (first_bitmap_cluster < 2 &&
+	    first_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap start cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+	if (last_bitmap_cluster < 2 &&
+	    last_bitmap_cluster >= sbi->cluster_count) {
+		exfat_msg(root->i_sb, KERN_ERR, "bitmap last cluster is "
+			  "outside disk limits.");
+		goto fail;
+	}
+
+	sbi->bitmap_length = __le32_to_cpu(be->length);
+	sbi->first_bitmap_sector = exfat_cluster_sector(sbi,
+					__le32_to_cpu(be->cluster_addr));
+	sbi->last_bitmap_sector = sbi->first_bitmap_sector +
+		DIV_ROUND_UP(sbi->bitmap_length, sbi->sectorsize);
+
+	error = exfat_get_free_cluster_count(root->i_sb, &sbi->free_clusters);
+	if (error)
+		goto fail;
+
+	sbi->prev_free_cluster = 0;
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+fail:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+void exfat_exit_bitmap(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+
+	if (sbi->cur_bitmap_bh)
+		brelse(sbi->cur_bitmap_bh);
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./dir.c linux-6.13.12-fbx/fs/exfat-fbx/dir.c
--- linux-6.13.12-fbx/fs/exfat-fbx./dir.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/dir.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,402 @@
+/*
+ * dir.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 11:42:46 2013
+ */
+
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * setup an exfat_dir_ctx structure so that __exfat_dentry_next can
+ * work with it.
+ */
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t start)
+{
+	u32 cluster = EXFAT_I(inode)->first_cluster;
+
+	memset(ctx, 0, sizeof (*ctx));
+
+	if (cluster == 0) {
+		ctx->empty = true;
+		ctx->sb = inode->i_sb;
+		return 0;
+	}
+
+	if (cluster < EXFAT_CLUSTER_FIRSTVALID ||
+	    cluster > EXFAT_CLUSTER_LASTVALID) {
+		exfat_msg(inode->i_sb, KERN_ERR, "exfat_init_dir_ctx: invalid "
+			  "cluster %u", cluster);
+		return -EINVAL;
+	}
+
+	start &= ~(0x20 - 1);
+	if (start == 0)
+		ctx->off = -1;
+	else
+		ctx->off = start - 0x20;
+
+	ctx->sb = inode->i_sb;
+	ctx->inode = inode;
+
+	return 0;
+}
+
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx)
+{
+	if (dctx->bh)
+		brelse(dctx->bh);
+}
+
+/*
+ * calculate the checksum for the current direntry. fields containing
+ * the checksum for the first entry is not part of the checksum
+ * calculation.
+ */
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first)
+{
+	u8 *ptr = data;
+	int i;
+
+	for (i = 0; i < 0x20; ++i) {
+		if (first && (i == 2 || i == 3))
+			continue ;
+		checksum = ((checksum << 15) | (checksum >> 1)) + (u16)ptr[i];
+	}
+	return checksum;
+}
+
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx)
+{
+	return dctx->off;
+}
+
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	return (dctx->sector << sbi->sectorbits) +
+		(dctx->off & sbi->sectormask);
+}
+
+static int exfat_get_dctx_disk_cluster(struct exfat_dir_ctx *dctx,
+				       u32 file_cluster, u32 *disk_cluster)
+{
+	struct exfat_inode_info *info = EXFAT_I(dctx->inode);
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		*disk_cluster = info->first_cluster + file_cluster;
+		return 0;
+	} else {
+		return exfat_get_fat_cluster(dctx->inode, file_cluster,
+					     disk_cluster);
+	}
+}
+
+/*
+ * get the next typed dentry in the exfat_dir_ctx structure. can_skip
+ * indicates whether the entry must be immediately there in the entry
+ * stream. *end indicates whether end of directory entry stream is
+ * reached or not.
+ *
+ * only one buffer_head is kept at a time. subsequent calls to
+ * __exfat_dentry_next can invalidate pointers from previous calls due
+ * to that.
+ */
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(dctx->sb);
+
+	if (dctx->empty) {
+		if (end)
+			*end = true;
+		return NULL;
+	}
+
+	if (end)
+		*end = false;
+
+	if (dctx->off == -1)
+		dctx->off = 0;
+	else
+		dctx->off += 0x20;
+
+	for (;;) {
+		sector_t wanted_sector;
+		u32 file_cluster = dctx->off >> sbi->clusterbits;
+		u32 disk_cluster;
+		int error;
+		int sector_offset;
+		sector_t sector_in_cluster;
+
+		if (dctx->off >= dctx->inode->i_size) {
+			*end = true;
+			return NULL;
+		}
+
+
+		error = exfat_get_dctx_disk_cluster(dctx, file_cluster,
+						    &disk_cluster);
+		if (error)
+			return NULL;
+
+		sector_in_cluster = (dctx->off >> sbi->sectorbits) %
+			sbi->sectors_per_cluster;
+
+		wanted_sector = exfat_cluster_sector(sbi, disk_cluster) +
+			sector_in_cluster;
+		if (wanted_sector != dctx->sector || !dctx->bh) {
+			/*
+			 * need to fetch a new sector from the current
+			 * cluster.
+			 */
+			dctx->sector = wanted_sector;
+			if (dctx->bh)
+				brelse(dctx->bh);
+			dctx->bh = sb_bread(dctx->sb, dctx->sector);
+			if (!dctx->bh)
+				return NULL;
+		}
+
+		sector_offset = dctx->off & sbi->sectormask;
+		if ((dctx->bh->b_data[sector_offset] & mask) == (type & mask))
+			/*
+			 * return pointer to entry if type matches the
+			 * one given.
+			 */
+			return dctx->bh->b_data + sector_offset;
+
+		if (dctx->bh->b_data[sector_offset] == 0 && end)
+			/*
+			 * set end if no more entries in this directory.
+			 */
+			*end = true;
+
+		if (dctx->bh->b_data[sector_offset] == 0 || !can_skip)
+			/*
+			 * handle can_skip / end of directory.
+			 */
+			return NULL;
+
+		/*
+		 * move to next entry.
+		 */
+		dctx->off += 0x20;
+	}
+	return NULL;
+}
+
+/*
+ * helper around __exfat_dentry_next that copies the content of the
+ * found entry in a user supplied buffer.
+ */
+int exfat_dentry_next(void *out, struct exfat_dir_ctx *dctx,
+			     int type, bool can_skip)
+{
+	bool end;
+
+	void *ptr = __exfat_dentry_next(dctx, type, 0xff, can_skip, &end);
+
+	if (!ptr) {
+		if (end)
+			return -ENOENT;
+		else {
+			exfat_msg(dctx->sb, KERN_INFO, "no ptr and "
+				  "end not reached: "
+				  "type %02x, can_skip %s\n", type,
+				  can_skip ? "true" : "false");
+			return -EIO;
+		}
+	}
+	memcpy(out, ptr, 0x20);
+	return 0;
+}
+
+/*
+ * extract name by parsing consecutive E_EXFAT_FILENAME entries in a
+ * caller provided buffer. also update the checksum on the fly.
+ *
+ * no utf16 to utf8 conversion is performed.
+ */
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length,
+			    __le16 *name, u16 *calc_checksum,
+			    struct exfat_iloc *iloc)
+{
+	__le16 *ptr;
+	int error;
+	int nr;
+
+	ptr = name;
+
+	error = -EIO;
+	nr = 0;
+	while (name_length) {
+		struct exfat_filename_entry *e;
+		u32 len = 15;
+
+		e = __exfat_dentry_next(dctx, E_EXFAT_FILENAME, 0xff,
+					false, NULL);
+		if (!e)
+			goto fail;
+		*calc_checksum = exfat_direntry_checksum(e, *calc_checksum,
+							 false);
+
+		if (iloc)
+			iloc->disk_offs[nr + 2] = exfat_dctx_dpos(dctx);
+		if (name_length < 15)
+			len = name_length;
+
+		memcpy(ptr, e->name_frag, len * sizeof (__le16));
+		name_length -= len;
+		ptr += len;
+		nr++;
+	}
+	return 0;
+
+fail:
+	return error;
+}
+
+/*
+ * walk the directory and invoke filldir on all found entries.
+ */
+static int __exfat_iterate(struct exfat_dir_ctx *dctx, struct file *file,
+			   struct dir_context *ctx)
+{
+	int error;
+	char *name = __getname();
+	__le16 *utf16name = __getname();
+
+	if (!name)
+		return -ENOMEM;
+	if (!utf16name) {
+		__putname(name);
+		return -ENOMEM;
+	}
+
+	for (;;) {
+		struct exfat_filedir_entry *efd;
+		struct exfat_stream_extension_entry *esx;
+		int dtype = DT_REG;
+		int name_length;
+		bool end;
+		u16 calc_checksum;
+		u16 expect_checksum;
+
+		/*
+		 * get the next filedir entry, we are allowed to skip
+		 * entries for that.
+		 */
+		error = -EIO;
+		efd = __exfat_dentry_next(dctx, E_EXFAT_FILEDIR, 0xff,
+					  true, &end);
+		if (!efd) {
+			if (end)
+				break;
+			else
+				goto fail;
+		}
+		expect_checksum = __le16_to_cpu(efd->set_checksum);
+		calc_checksum = exfat_direntry_checksum(efd, 0, true);
+
+		if (__le16_to_cpu(efd->attributes & E_EXFAT_ATTR_DIRECTORY))
+			dtype = DT_DIR;
+
+		/*
+		 * get immediate stream extension entry.
+		 */
+		esx = __exfat_dentry_next(dctx, E_EXFAT_STREAM_EXT, 0xff, false,
+					  NULL);
+		if (!esx)
+			goto fail;
+		calc_checksum = exfat_direntry_checksum(esx, calc_checksum,
+							false);
+
+		/*
+		 * get immediate name.
+		 */
+		error = __exfat_get_name(dctx, esx->name_length, utf16name,
+					 &calc_checksum, NULL);
+		if (error) {
+			exfat_msg(dctx->sb, KERN_INFO, "__exfat_get_name "
+				  "has failed with %i", error);
+			goto fail;
+		}
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx->sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto fail;
+		}
+
+		/*
+		 * convert utf16 to utf8 for kernel filldir callback.
+		 */
+		name_length = utf16s_to_utf8s(utf16name, esx->name_length,
+						   UTF16_LITTLE_ENDIAN,
+						   name, NAME_MAX + 2);
+		if (name_length < 0) {
+			error = name_length;
+			goto fail;
+		}
+		if (name_length > 255) {
+			error = -ENAMETOOLONG;
+			goto fail;
+		}
+
+		/*
+		 * tell the kernel we have an entry by calling
+		 * dir_emit
+		 */
+		if (dir_emit(ctx, name, name_length, 1, dtype))
+			ctx->pos = 2 + exfat_dctx_fpos(dctx);
+		else
+			goto fail;
+	}
+	__putname(name);
+	__putname(utf16name);
+	ctx->pos = file_inode(file)->i_size + 2;
+	return 0;
+fail:
+	__putname(name);
+	__putname(utf16name);
+	return error;
+}
+
+/*
+ * readdir callback for VFS. fill "." and "..", then invoke
+ * __exfat_iterate.
+ */
+int exfat_iterate(struct file *file, struct dir_context *ctx)
+{
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct inode *inode = file_inode(file);
+
+	switch (ctx->pos) {
+	case 0:
+		if (!dir_emit_dots(file, ctx))
+			return 0;
+		fallthrough;
+	default:
+		if (ctx->pos >= inode->i_size + 2)
+			return 0;
+		error = exfat_init_dir_ctx(inode, &dctx, ctx->pos - 2);
+		if (error)
+			return error;
+		exfat_lock_super(inode->i_sb);
+		error = __exfat_iterate(&dctx, file, ctx);
+		exfat_unlock_super(inode->i_sb);
+		exfat_cleanup_dir_ctx(&dctx);
+		return error;
+	}
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./exfat.h linux-6.13.12-fbx/fs/exfat-fbx/exfat.h
--- linux-6.13.12-fbx/fs/exfat-fbx./exfat.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/exfat.h	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,325 @@
+/*
+ * exfat.h for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:37:12 2013
+ */
+
+#ifndef __EXFAT_H
+# define __EXFAT_H
+
+#define EXFAT_HASH_BITS	(8)
+#define EXFAT_HASH_SIZE	(1 << EXFAT_HASH_BITS)
+
+/*
+ * special inode number for root directory.
+ */
+#define EXFAT_ROOT_INO	1
+
+enum {
+	EXFAT_ERROR_ACTION_CONTINUE,
+	EXFAT_ERROR_ACTION_REMOUNT_RO,
+	EXFAT_ERROR_ACTION_PANIC,
+};
+
+struct exfat_sb_options {
+	kuid_t	uid;
+	kgid_t	gid;
+	mode_t	dmask;
+	mode_t	fmask;
+	int	time_offset;
+	int	time_offset_set;
+	int	error_action;
+};
+
+struct exfat_sb_info {
+	struct exfat_sb_options options;
+
+	struct buffer_head *sb_bh;
+	struct exfat_vbr *vbr;
+	bool dirty;
+
+	u32 sectorsize; /* in bytes*/
+	u32 clustersize; /* in bytes */
+	u32 sectors_per_cluster;
+	int sectorbits;
+	int clusterbits;
+	u32 sectormask;
+	u32 clustermask;
+
+	u32 fat_offset;
+	u32 fat_length;
+
+	u32 root_dir_cluster;
+	u32 cluster_heap_offset;
+	u32 cluster_count;
+
+	__le16	*upcase_table;
+	u32	upcase_len;
+
+	/*
+	 * bitmap fields
+	 */
+	struct mutex		bitmap_mutex;
+	u32			bitmap_length;
+	sector_t		first_bitmap_sector;
+	sector_t		last_bitmap_sector;
+	sector_t		cur_bitmap_sector;
+	u32			cur_bitmap_cluster;
+	struct buffer_head	*cur_bitmap_bh;
+	u32			free_clusters;
+	u32			prev_free_cluster;
+
+	/*
+	 * inode hash fields
+	 */
+	spinlock_t		inode_hash_lock;
+	struct hlist_head	inode_hash[EXFAT_HASH_SIZE];
+
+	struct mutex		sb_mutex;
+};
+
+struct exfat_cache_entry {
+	struct list_head list;
+	u32 file_cluster;
+	u32 disk_cluster;
+	u32 nr_contig;
+};
+
+struct exfat_cache {
+	struct mutex		mutex;
+	struct list_head	entries;
+	u32			nr_entries;
+};
+
+struct exfat_iloc {
+	u8 nr_secondary;
+	u32 file_off;
+	u64 disk_offs[19];
+};
+
+struct exfat_inode_info {
+	u8			flags;
+	u16			attributes;
+	u32			first_cluster;
+	u32			allocated_clusters;
+	loff_t			mmu_private;
+	struct exfat_iloc	iloc;
+	struct hlist_node	hash_list;
+
+	struct exfat_cache	exfat_cache;
+	struct inode		vfs_inode;
+};
+
+static inline struct exfat_sb_info *EXFAT_SB(struct super_block *sb)
+{
+	return sb->s_fs_info;
+}
+
+static inline struct exfat_inode_info *EXFAT_I(struct inode *inode)
+{
+	return container_of(inode, struct exfat_inode_info, vfs_inode);
+}
+
+loff_t exfat_dir_links(struct inode *inode);
+
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters);
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters);
+
+__printf(3, 4) void exfat_msg(struct super_block *sb, const char *level,
+			      const char *fmt, ...);
+__printf(2, 3) void exfat_fs_error(struct super_block *sb,
+				   const char *fmt, ...);
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster);
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal);
+
+void exfat_inode_cache_init(struct inode *inode);
+void exfat_inode_cache_drop(struct inode *inode);
+
+int exfat_init_fat(struct super_block *sb);
+
+int exfat_init_bitmap(struct inode *root);
+void exfat_exit_bitmap(struct super_block *sb);
+int exfat_alloc_clusters(struct inode *inode, u32 hint_cluster,
+			 u32 *cluster, u32 nr);
+int exfat_free_clusters_inode(struct inode *inode, u32 start);
+
+
+/*
+ * read only bitmap accessors: used by EXFAT_IOCGETBITMAP ioctl.
+ */
+struct exfat_bitmap_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	sector_t cur_sector;
+};
+
+int exfat_init_bitmap_context(struct super_block *sb,
+			      struct exfat_bitmap_ctx *ctx, u32 cluster);
+void exfat_exit_bitmap_context(struct exfat_bitmap_ctx *ctx);
+int exfat_test_bitmap(struct exfat_bitmap_ctx *ctx, uint32_t start_cluster,
+		      uint32_t *first_in_use, uint32_t *nr_in_use);
+
+
+/*
+ * return the physical sector address for a given cluster.
+ */
+static inline sector_t exfat_cluster_sector(struct exfat_sb_info *sbi,
+					    u32 cluster)
+{
+	return (sector_t)sbi->cluster_heap_offset + (cluster - 2) *
+		(sector_t)sbi->sectors_per_cluster;
+}
+
+/*
+ * in dir.c
+ */
+struct exfat_dir_ctx {
+	struct super_block	*sb;
+	struct inode		*inode;
+	struct buffer_head	*bh;
+
+	off_t			off; /* from beginning of directory */
+	sector_t		sector;
+	bool empty;
+};
+
+int exfat_init_dir_ctx(struct inode *inode, struct exfat_dir_ctx *ctx,
+		       off_t off);
+void exfat_cleanup_dir_ctx(struct exfat_dir_ctx *dctx);
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint);
+int exfat_dentry_next(void *, struct exfat_dir_ctx *, int, bool);
+void *__exfat_dentry_next(struct exfat_dir_ctx *dctx, int type, int mask,
+			  bool can_skip, bool *end);
+u16 exfat_direntry_checksum(void *data, u16 checksum, bool first);
+u32 exfat_dctx_fpos(struct exfat_dir_ctx *dctx);
+u64 exfat_dctx_dpos(struct exfat_dir_ctx *dctx);
+int __exfat_get_name(struct exfat_dir_ctx *dctx, u32 name_length, __le16 *name,
+		     u16 *calc_checksum, struct exfat_iloc *iloc);
+
+/*
+ * in namei.c
+ */
+
+/*
+ * hold a pointer to an exfat dir entry, with the corresponding bh.
+ */
+struct dir_entry_buffer {
+	struct buffer_head *bh;
+	u32 off; /* in bytes, inside the buffer_head b_data array */
+	void *start;
+};
+
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries);
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr);
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync);
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset);
+
+/*
+ * in inode.c
+ */
+
+int exfat_init_inodes(void);
+void exfat_exit_inodes(void);
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos);
+void exfat_insert_inode_hash(struct inode *inode);
+void exfat_remove_inode_hash(struct inode *inode);
+int __exfat_write_inode(struct inode *inode, bool sync);
+
+/*
+ * in upcase.c
+ */
+int exfat_upcase_init(struct inode *root);
+static inline __le16 exfat_upcase_convert(struct super_block *sb, __le16 _c)
+{
+	u16 c = __le16_to_cpu(_c);
+
+	if (c >= EXFAT_SB(sb)->upcase_len)
+		return _c;
+	return EXFAT_SB(sb)->upcase_table[c];
+}
+
+/*
+ * superblock operations
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb);
+void exfat_destroy_inode(struct inode *_inode);
+int exfat_drop_inode(struct inode *inode);
+void exfat_evict_inode(struct inode *inode);
+
+/*
+ * file operations
+ */
+int exfat_iterate(struct file *f, struct dir_context *ctx);
+long exfat_ioctl(struct file *, unsigned int, unsigned long);
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize);
+
+/*
+ * inode operations
+ */
+struct dentry *exfat_inode_lookup(struct inode *, struct dentry *,
+				  unsigned int);
+int exfat_inode_create(struct mnt_idmap *, struct inode *dir,
+		       struct dentry *dentry, umode_t mode, bool excl);
+int exfat_inode_mkdir(struct mnt_idmap *, struct inode *dir,
+		      struct dentry *dentry, umode_t mode);
+
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs);
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc);
+
+int exfat_inode_unlink(struct inode *inode, struct dentry *dentry);
+
+int exfat_inode_rmdir(struct inode *inode, struct dentry *dentry);
+
+int exfat_getattr(struct mnt_idmap *, const struct path *, struct kstat *,
+		  u32, unsigned int);
+int exfat_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
+int exfat_rename(struct mnt_idmap *, struct inode *, struct dentry *,
+		 struct inode *, struct dentry *, unsigned int);
+
+/*
+ * address space operations
+ */
+int exfat_read_folio(struct file *file, struct folio *folio);
+void exfat_readahead(struct readahead_control *rac);
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len,
+		      struct folio **foliop, void **fsdata);
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct folio *folio, void *fsdata);
+int exfat_writepages(struct address_space *, struct writeback_control *);
+
+
+extern const struct inode_operations exfat_dir_inode_operations;
+extern const struct inode_operations exfat_file_inode_operations;
+extern const struct file_operations exfat_dir_operations;
+extern const struct file_operations exfat_file_operations;
+extern const struct address_space_operations exfat_address_space_operations;
+
+/*
+ * time functions
+ */
+void exfat_time_2unix(struct timespec64 *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset);
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset);
+
+static inline void exfat_lock_super(struct super_block *sb)
+{
+	mutex_lock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+static inline void exfat_unlock_super(struct super_block *sb)
+{
+	mutex_unlock(&EXFAT_SB(sb)->sb_mutex);
+}
+
+#endif /*! __EXFAT_H */
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./exfat_fs.h linux-6.13.12-fbx/fs/exfat-fbx/exfat_fs.h
--- linux-6.13.12-fbx/fs/exfat-fbx./exfat_fs.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/exfat_fs.h	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,200 @@
+/*
+ * exfat_fs.h for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 15:06:38 2013
+ */
+
+#ifndef __EXFAT_FS_H
+# define __EXFAT_FS_H
+
+/*
+ * exfat on disk structures and constants
+ */
+
+#include <linux/types.h>
+
+struct exfat_vbr {
+	u8	jump[3];
+	u8	fsname[8];
+	u8	reserved1[53];
+
+	__le64	partition_offset;
+	__le64	volume_length;
+
+	__le32	fat_offset;
+	__le32	fat_length;
+
+	__le32	cluster_heap_offset;
+	__le32	cluster_count;
+	__le32	cluster_root_dir;
+
+	__le32	serial_number;
+
+	__le16	fs_rev;
+	__le16	volume_flags;
+
+	u8	bytes_per_sector;
+	u8	sectors_per_cluster;
+
+	u8	fat_num;
+	u8	drive_select;
+	u8	heap_use_percent;
+
+	u8	reserved2[7];
+	u8	boot_code[390];
+
+	u8	boot_sig[2];
+};
+
+enum {
+	EXFAT_CLUSTER_FIRSTVALID	= 0x00000002,
+	EXFAT_CLUSTER_LASTVALID		= 0xfffffff6,
+	EXFAT_CLUSTER_BADBLK		= 0xfffffff7,
+	EXFAT_CLUSTER_MEDIATYPE		= 0xfffffff8,
+	EXFAT_CLUSTER_EOF		= 0xffffffff,
+};
+
+enum {
+	EXFAT_ACTIVEFAT_MASK = (1 << 0),
+	EXFAT_FLAG_DIRTY = (1 << 1),
+	EXFAT_FLAG_MEDIA_FAILURE = (1 << 2),
+};
+
+static inline int exfat_active_fat(u16 flags)
+{
+	return flags & EXFAT_ACTIVEFAT_MASK;
+}
+
+#define EXFAT_CHECKSUM_SECTORS	11
+
+enum {
+	EXFAT_I_ALLOC_POSSIBLE = (1 << 0),
+	EXFAT_I_FAT_INVALID = (1 << 1),
+};
+
+/*
+ * directory cluster content
+ */
+
+/*
+ * entry types
+ */
+enum {
+	E_EXFAT_EOD		= 0x00,
+	E_EXFAT_VOLUME_LABEL	= 0x83,
+	E_EXFAT_BITMAP		= 0x81,
+	E_EXFAT_UPCASE_TABLE	= 0x82,
+	E_EXFAT_GUID		= 0xa0,
+	E_EXFAT_PADDING		= 0xa1,
+	E_EXFAT_ACL		= 0xe2,
+	E_EXFAT_FILEDIR		= 0x85,
+	E_EXFAT_STREAM_EXT	= 0xc0,
+	E_EXFAT_FILENAME	= 0xc1,
+};
+
+/*
+ * file attributes in exfat_filedir_entry
+ */
+enum {
+	E_EXFAT_ATTR_RO		= (1 << 0),
+	E_EXFAT_ATTR_HIDDEN	= (1 << 1),
+	E_EXFAT_ATTR_SYSTEM	= (1 << 2),
+	/* bit 3 reserved */
+	E_EXFAT_ATTR_DIRECTORY	= (1 << 4),
+	E_EXFAT_ATTR_ARCHIVE	= (1 << 5),
+	/* bits 6-15 reserved */
+};
+
+/* type 0x83 */
+struct exfat_volume_label_entry {
+	u8 type;
+	u8 charcount;
+	__u16 label[11];
+	u8 reserved1[8];
+};
+
+static inline int exfat_bitmap_nr(u8 flags)
+{
+	return flags & 1;
+}
+
+/* type 0x81 */
+struct exfat_bitmap_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1[18];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0x82 */
+struct exfat_upcase_entry {
+	u8 type;
+	u8 reserved1[3];
+	__le32 checksum;
+	u8 reserved2[12];
+	__le32 cluster_addr;
+	__le64 length;
+};
+
+/* type 0xa0 */
+struct exfat_guid_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 flags;
+	u8 guid[16];
+	u8 reserved1[10];
+};
+
+/* type 0xa1 */
+struct exfat_padding_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0xe2 */
+struct exfat_acl_entry {
+	u8 type;
+	u8 reserved1[31];
+};
+
+/* type 0x85 */
+struct exfat_filedir_entry {
+	u8 type;
+	u8 secondary_count;
+	__le16 set_checksum;
+	__le16 attributes;
+	u8 reserved1[2];
+	__le32 create;
+	__le32 modified;
+	__le32 accessed;
+	u8 create_10ms;
+	u8 modified_10ms;
+	s8 create_tz_offset;
+	s8 modified_tz_offset;
+	s8 accessed_tz_offset;
+	u8 reserved2[7];
+};
+
+/* 0xc0 */
+struct exfat_stream_extension_entry {
+	u8 type;
+	u8 flags;
+	u8 reserved1;
+	u8 name_length;
+	__le16 name_hash;
+	u8 reserved2[2];
+	__le64 valid_data_length;
+	u8 reserved3[4];
+	__le32 first_cluster;
+	__le64 data_length;
+};
+
+/* 0xc1 */
+struct exfat_filename_entry {
+	u8 type;
+	u8 flags;
+	__le16 name_frag[15];
+};
+
+#endif /*! __EXFAT_FS_H */
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./fat.c linux-6.13.12-fbx/fs/exfat-fbx/fat.c
--- linux-6.13.12-fbx/fs/exfat-fbx./fat.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/fat.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,424 @@
+/*
+ * fat.c for exfat
+ * Created by <nschichan@freebox.fr> on Mon Jul 29 19:43:38 2013
+ */
+
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+#define MAX_CACHED_FAT	16
+
+/*
+ * helpers for exfat_next_fat_cluster.
+ */
+
+/*
+ * get the sector number in the fat where the next requested cluster
+ * number is to be found.
+ */
+static inline sector_t cluster_sector(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return sbi->fat_offset + (((u64)cluster * sizeof (u32)) >> sbi->sectorbits);
+}
+
+/*
+ * get the offset in the fat sector where the next requested cluster
+ * number is to be found.
+ */
+static inline off_t cluster_offset(struct exfat_sb_info *sbi, u32 cluster)
+{
+	return (cluster * sizeof (u32)) & sbi->sectormask;
+}
+
+/*
+ * walk one step in the fat chain.
+ */
+static int exfat_next_fat_cluster(struct super_block *sb, u32 *cluster)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	sector_t sect = cluster_sector(sbi, *cluster);
+	off_t off = cluster_offset(sbi, *cluster);
+	struct buffer_head *bh;
+
+	bh = sb_bread(sb, sect);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %llu",
+			  (unsigned long long)sect);
+		return -EIO;
+	}
+
+	*cluster = __le32_to_cpu(*(u32*)&bh->b_data[off]);
+	brelse(bh);
+	return 0;
+}
+
+/*
+ * setup inode cache
+ */
+void exfat_inode_cache_init(struct inode *inode)
+{
+	mutex_init(&EXFAT_I(inode)->exfat_cache.mutex);
+	EXFAT_I(inode)->exfat_cache.nr_entries = 0;
+	INIT_LIST_HEAD(&EXFAT_I(inode)->exfat_cache.entries);
+}
+
+/*
+ * drop inode cache content
+ */
+void exfat_inode_cache_drop(struct inode *inode)
+{
+	struct exfat_cache *cache = &EXFAT_I(inode)->exfat_cache;
+	struct exfat_cache_entry *e, *tmp;
+
+	mutex_lock(&cache->mutex);
+	list_for_each_entry_safe (e, tmp, &cache->entries, list) {
+		kfree(e);
+	}
+	INIT_LIST_HEAD(&cache->entries);
+	cache->nr_entries = 0;
+	mutex_unlock(&cache->mutex);
+}
+
+/*
+ * move the entry to the head of the list, this will make it less
+ * likely to be the victim in when caching new entries.
+ *
+ * caller must hold cache->mutex.
+ */
+static void __exfat_fat_lru(struct exfat_cache *cache,
+			  struct exfat_cache_entry *e)
+{
+	if (cache->entries.next != &e->list)
+		list_move(&e->list, &cache->entries);
+}
+
+/*
+ * find a cache entry that is close to the wanted fcluster (ideally
+ * spanning over the requested file cluster).
+ *
+ * caller must hold cache->mutex.
+ */
+static struct exfat_cache_entry *__exfat_cache_lookup(struct exfat_cache *cache,
+						      u32 fcluster)
+{
+	struct exfat_cache_entry *e;
+	struct exfat_cache_entry *best = NULL;
+
+	list_for_each_entry (e, &cache->entries, list) {
+		if (e->file_cluster <= fcluster &&
+		    e->file_cluster + e->nr_contig >= fcluster)
+			return e;
+
+		if (!best && e->file_cluster < fcluster)
+			best = e;
+		if (best && best->file_cluster < e->file_cluster &&
+		    e->file_cluster < fcluster)
+			best = e;
+	}
+	return best;
+}
+
+/*
+ * caller must hold cache->mutex.
+ */
+static int __exfat_cache_cluster(struct exfat_cache *cache,
+			       struct exfat_cache_entry *nearest,
+			       u32 fcluster, u32 dcluster)
+{
+	struct exfat_cache_entry *e;
+
+	/*
+	 * see if we can merge with the nearest entry. in the ideal
+	 * case, all cluster in the chain are contiguous, and only
+	 * one entry is needed for a single file.
+	 */
+	if (nearest &&
+	    nearest->file_cluster + nearest->nr_contig + 1 == fcluster &&
+	    nearest->disk_cluster + nearest->nr_contig + 1 == dcluster) {
+		list_move(&nearest->list, &cache->entries);
+		nearest->nr_contig++;
+		return 0;
+	}
+
+	/*
+	 * allocate a new entry or reuse an existing one if the number
+	 * of cached entries is too hihc.
+	 */
+	if (cache->nr_entries < MAX_CACHED_FAT) {
+		e = kmalloc(sizeof (*e), GFP_NOFS);
+		list_add(&e->list, &cache->entries);
+		++cache->nr_entries;
+	} else {
+		e = list_entry(cache->entries.prev, struct exfat_cache_entry,
+			       list);
+		list_move(&e->list, &cache->entries);
+	}
+
+	if (!e)
+		return -ENOMEM;
+
+	e->file_cluster = fcluster;
+	e->disk_cluster = dcluster;
+	e->nr_contig = 0;
+
+	return 0;
+}
+
+int __exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster,
+			    bool eof_is_fatal)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_cache *cache = &info->exfat_cache;
+	int error;
+	struct exfat_cache_entry *e;
+	u32 fcluster_start;
+
+	/*
+	 * intial translation: first file cluster is found in the
+	 * inode info.
+	 */
+	if (fcluster == 0) {
+		*dcluster = info->first_cluster;
+		return 0;
+	}
+
+	mutex_lock(&cache->mutex);
+	/*
+	 * try to find a cached entry either covering the file cluster
+	 * we want or at least close to the file cluster.
+	 */
+	e = __exfat_cache_lookup(cache, fcluster);
+	if (e && e->file_cluster <= fcluster &&
+	    e->file_cluster + e->nr_contig >= fcluster) {
+		/*
+		 * perfect match, entry zone covers the requested file
+		 * cluster.
+		 */
+		__exfat_fat_lru(cache, e);
+		*dcluster = e->disk_cluster + (fcluster - e->file_cluster);
+		mutex_unlock(&cache->mutex);
+		return 0;
+	}
+
+	if (e) {
+		/*
+		 * we have an entry, hopefully close enough, setup
+		 * cluster walk from there.
+		 */
+		*dcluster = e->disk_cluster + e->nr_contig;
+		fcluster_start = e->file_cluster + e->nr_contig;
+	} else {
+		/*
+		 * no entry, walk the FAT chain from the start of the
+		 * file.
+		 */
+		fcluster_start = 0;
+		*dcluster = info->first_cluster;
+	}
+
+	/*
+	 * walk fhe FAT chain the number of time required to get the
+	 * disk cluster corresponding to the file cluster.
+	 */
+	while (fcluster_start != fcluster) {
+		error = exfat_next_fat_cluster(inode->i_sb, dcluster);
+		if (error) {
+			mutex_unlock(&cache->mutex);
+			return error;
+		}
+		if (*dcluster == EXFAT_CLUSTER_EOF) {
+			if (eof_is_fatal)
+				/*
+				 * exfat_fill_root uses
+				 * __exfat_get_fat_cluster with
+				 * eof_is_fatal set to false, as the
+				 * root inode does not have a size
+				 * field and thus requires a complete
+				 * FAT walk to compute the size.
+				 */
+				exfat_fs_error(inode->i_sb, "premature EOF in FAT "
+					       "chain. file cluster %u out "
+					       "of %u\n", fcluster_start,
+					       fcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		if (*dcluster < EXFAT_CLUSTER_FIRSTVALID) {
+			exfat_fs_error(inode->i_sb, "invalid cluster %u found "
+				       "in fat chain.", *dcluster);
+			mutex_unlock(&cache->mutex);
+			return -EIO;
+		}
+		++fcluster_start;
+	}
+
+	/*
+	 * cache the result.
+	 */
+	__exfat_cache_cluster(cache, e, fcluster, *dcluster);
+	mutex_unlock(&cache->mutex);
+	return 0;
+}
+
+int exfat_get_fat_cluster(struct inode *inode, u32 fcluster, u32 *dcluster)
+{
+	return __exfat_get_fat_cluster(inode, fcluster, dcluster, true);
+}
+
+int exfat_init_fat(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	int error = 0;
+	u32 first, second;
+
+	bh = sb_bread(sb, sbi->fat_offset);
+	if (!bh) {
+		exfat_msg(sb, KERN_ERR, "unable to read FAT sector at %u",
+			  sbi->fat_offset);
+		return -EIO;
+	}
+
+	first = __le32_to_cpu(*(__le32*)(bh->b_data + 0));
+	second = __le32_to_cpu(*(__le32*)(bh->b_data + sizeof (__le32)));
+
+	if (first != 0xf8ffffff && second != 0xffffffff) {
+		exfat_msg(sb, KERN_INFO, "invalid FAT start: %08x, %08x",
+			  first, second);
+		error = -ENXIO;
+	}
+
+	brelse(bh);
+	return error;
+}
+
+/*
+ * fat write context, store the current buffer_head and current
+ * cluster to avoid having sb_bread all the time when the clusters are
+ * contiguous or at least not too far apart.
+ */
+struct fat_write_ctx {
+	struct super_block *sb;
+	struct buffer_head *bh;
+	u32 cur_cluster;
+};
+
+static void fat_init_write_ctx(struct fat_write_ctx *fwctx,
+				struct super_block *sb)
+{
+	memset(fwctx, 0, sizeof (*fwctx));
+	fwctx->sb = sb;
+}
+
+static void fat_exit_write_ctx(struct fat_write_ctx *fwctx)
+{
+	if (fwctx->bh)
+		brelse(fwctx->bh);
+}
+
+static int __fat_write_entry(struct fat_write_ctx *fwctx,
+			       u32 cluster, u32 next)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(fwctx->sb);
+	sector_t current_sector = cluster_sector(sbi, fwctx->cur_cluster);
+	sector_t wanted_sector = cluster_sector(sbi, cluster);
+	off_t off = cluster_offset(sbi, cluster);
+
+	/*
+	 * first see if we need a different buffer head from the
+	 * current one in the fat_write_ctx.
+	 */
+	if (current_sector != wanted_sector || !fwctx->bh) {
+		if (fwctx->bh)
+			brelse(fwctx->bh);
+		fwctx->bh = sb_bread(fwctx->sb, wanted_sector);
+		if (!fwctx->bh) {
+			exfat_msg(fwctx->sb, KERN_ERR,
+				  "unable to read FAT sector at %llu",
+				  (unsigned long long)wanted_sector);
+			return -EIO;
+		}
+	}
+
+	/*
+	 * set fat cluster to point to the next cluster, and mark bh
+	 * dirty so that the change hits the storage device.
+	 */
+	fwctx->cur_cluster = cluster;
+	*(__le32*)(fwctx->bh->b_data + off) = __cpu_to_le32(next);
+	mark_buffer_dirty(fwctx->bh);
+	return 0;
+}
+
+/*
+ * write nr_clusters contiguous clusters starting at first_cluster.
+ */
+int exfat_write_fat_contiguous(struct inode *inode, u32 first_cluster,
+			       u32 nr_clusters)
+{
+	u32 cluster;
+	struct fat_write_ctx fwctx;
+	int error = 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+	for (cluster = first_cluster;
+	     cluster < first_cluster + nr_clusters - 1;
+	     ++cluster) {
+		error = __fat_write_entry(&fwctx, cluster, cluster + 1);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF
+	 */
+	error = __fat_write_entry(&fwctx, cluster, EXFAT_CLUSTER_EOF);
+end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+
+}
+
+/*
+ * write cluster nr_clusters stored in clusters array, link with prev_cluster.
+ */
+int exfat_write_fat(struct inode *inode, u32 prev_cluster, u32 *clusters,
+		    u32 nr_clusters)
+{
+	u32 i;
+	struct fat_write_ctx fwctx;
+	int error;
+
+	if (!nr_clusters)
+		/* ??! */
+		return 0;
+
+	fat_init_write_ctx(&fwctx, inode->i_sb);
+
+	if (prev_cluster) {
+		/*
+		 * link with previous cluster if applicable.
+		 */
+		error = __fat_write_entry(&fwctx, prev_cluster, clusters[0]);
+		if (error)
+			goto end;
+	}
+	for (i = 0; i < nr_clusters - 1; ++i) {
+		error = __fat_write_entry(&fwctx, clusters[i], clusters[i + 1]);
+		if (error)
+			goto end;
+	}
+
+	/*
+	 * set EOF.
+	 */
+	error = __fat_write_entry(&fwctx, clusters[i], EXFAT_CLUSTER_EOF);
+
+ end:
+	fat_exit_write_ctx(&fwctx);
+	return error;
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./file.c linux-6.13.12-fbx/fs/exfat-fbx/file.c
--- linux-6.13.12-fbx/fs/exfat-fbx./file.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/file.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,428 @@
+/*
+ * file.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 14:39:41 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/fs.h>
+#include <linux/exfat_user.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static int append_fragment(struct exfat_fragment __user *ufrag,
+			   struct exfat_fragment *kfrag)
+{
+	if (copy_to_user(ufrag, kfrag, sizeof (*kfrag)))
+		return -EFAULT;
+	return 0;
+}
+
+static void setup_fragment(struct exfat_sb_info *sbi,
+			  struct exfat_fragment *fragment, uint32_t fcluster,
+			  uint32_t dcluster)
+{
+	fragment->fcluster_start = fcluster;
+	fragment->dcluster_start = dcluster;
+	fragment->sector_start = exfat_cluster_sector(sbi, dcluster);
+	fragment->nr_clusters = 1;
+}
+
+static int exfat_ioctl_get_fragments(struct inode *inode,
+				     struct exfat_fragment_head __user *uhead)
+{
+	struct exfat_fragment_head head;
+	struct exfat_fragment fragment;
+	u32 fcluster;
+	u32 prev_dcluster;
+	u32 cur_fragment;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	int error;
+
+	memset(&fragment, 0, sizeof (fragment));
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+
+	if (put_user(sbi->sectorsize, &uhead->sector_size) ||
+	    put_user(sbi->clustersize, &uhead->cluster_size))
+		return -EFAULT;
+
+	if (!head.nr_fragments) {
+		/*
+		 * user did not provide space for fragments after
+		 * header.
+		 */
+		return 0;
+	}
+
+	if (head.fcluster_start >= info->allocated_clusters) {
+		/*
+		 * requested start cluster is after file EOF
+		 */
+		if (put_user(0, &uhead->nr_fragments))
+			return -EFAULT;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not FAT chain, this file has only one fragment.
+		 */
+		fragment.fcluster_start = head.fcluster_start;
+		fragment.dcluster_start =
+			info->first_cluster + head.fcluster_start;
+		fragment.nr_clusters = info->allocated_clusters -
+			head.fcluster_start;
+		fragment.sector_start =
+			exfat_cluster_sector(sbi, fragment.dcluster_start);
+
+		if (copy_to_user(&uhead->fragments[0], &fragment,
+				 sizeof (fragment)))
+			return -EFAULT;
+		if (put_user(1, &uhead->nr_fragments))
+			return -EFAULT;
+		if (put_user(info->first_cluster + info->allocated_clusters,
+			     &uhead->fcluster_start))
+			return -EFAULT;
+		return 0;
+	}
+
+	fcluster = head.fcluster_start;
+	cur_fragment = 0;
+
+	/*
+	 * initial fragment setup
+	 */
+	error = exfat_get_fat_cluster(inode, fcluster,
+				      &prev_dcluster);
+	if (error)
+		return error;
+	setup_fragment(sbi, &fragment, fcluster, prev_dcluster);
+	++fcluster;
+	while (fcluster < info->allocated_clusters) {
+		int error;
+		u32 dcluster;
+
+		/*
+		 * walk one step in the FAT.
+		 */
+		error = exfat_get_fat_cluster(inode, fcluster, &dcluster);
+		if (error)
+			return error;
+
+		if (prev_dcluster == dcluster - 1) {
+			/*
+			 * dcluster and prev_dcluster are contiguous.
+			 */
+			++fragment.nr_clusters;
+		} else {
+			/*
+			 * put this cluster in the user array
+			 */
+			error = append_fragment(&uhead->fragments[cur_fragment],
+						&fragment);
+			if (error)
+				return error;
+
+			++cur_fragment;
+			if (cur_fragment == head.nr_fragments)
+				break;
+
+			/*
+			 * setup a new fragment.
+			 */
+			setup_fragment(sbi, &fragment, fcluster, dcluster);
+		}
+		++fcluster;
+		prev_dcluster = dcluster;
+	}
+
+	if (cur_fragment < head.nr_fragments) {
+		append_fragment(&uhead->fragments[cur_fragment], &fragment);
+		++cur_fragment;
+	}
+
+	/*
+	 * update nr_fragments in user supplied head.
+	 */
+	if (cur_fragment != head.nr_fragments &&
+	    put_user(cur_fragment, &uhead->nr_fragments))
+		return -EFAULT;
+
+	/*
+	 * update fcluster_start in user supplied head.
+	 */
+	if (put_user(fcluster, &uhead->fcluster_start))
+		return -EFAULT;
+
+
+	return 0;
+}
+
+static int exfat_ioctl_get_bitmap(struct super_block *sb,
+				  struct exfat_bitmap_head __user *uhead)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_bitmap_head head;
+	uint32_t i;
+	int error;
+	struct exfat_bitmap_ctx ctx;
+	uint32_t start_cluster;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	start_cluster = head.start_cluster;
+	if (start_cluster < 2)
+		return -EINVAL;
+
+
+	error = exfat_init_bitmap_context(sb, &ctx, head.start_cluster);
+	if (error)
+		return error;
+	for (i = 0; i < head.nr_entries; ++i) {
+		uint32_t first_in_use;
+		uint32_t nr_in_use;
+		int error;
+
+		error = exfat_test_bitmap(&ctx, start_cluster, &first_in_use,
+					  &nr_in_use);
+		if (error)
+			goto out_error;
+
+		if (first_in_use == sbi->cluster_count)
+			break;
+		if (put_user(first_in_use, &uhead->entries[i].start_cluster))
+			goto out_efault;
+		if (put_user(nr_in_use, &uhead->entries[i].nr_clusters))
+			goto out_efault;
+		if (put_user(exfat_cluster_sector(sbi, first_in_use),
+			     &uhead->entries[i].sector_start))
+			goto out_efault;
+		if (put_user((u64)nr_in_use * sbi->sectors_per_cluster,
+			     &uhead->entries[i].nr_sectors))
+			goto out_efault;
+		start_cluster = first_in_use + nr_in_use + 1;
+	}
+
+	exfat_exit_bitmap_context(&ctx);
+	if (put_user(i, &uhead->nr_entries))
+		return -EFAULT;
+	if (put_user(start_cluster, &uhead->start_cluster))
+		return -EFAULT;
+
+	return 0;
+
+out_efault:
+	error = -EFAULT;
+out_error:
+	exfat_exit_bitmap_context(&ctx);
+	return error;
+}
+
+static int exfat_ioctl_get_dirents(struct inode *inode,
+				   struct exfat_dirent_head __user *uhead)
+{
+	struct exfat_dir_ctx dctx;
+	struct exfat_dirent_head head;
+	int error;
+	uint32_t i;
+
+	if (!S_ISDIR(inode->i_mode))
+		return -ENOTDIR;
+
+	if (copy_from_user(&head, uhead, sizeof (head)))
+		return -EFAULT;
+
+	/* make sure we're aligned on an entry boundary */
+	head.offset &= ~0x1f;
+
+	error = exfat_init_dir_ctx(inode, &dctx, head.offset);
+	if (error < 0)
+		return error;
+
+	error = 0;
+	for (i = 0; i < head.nr_entries; ++i) {
+		bool end;
+		u8 *entry = __exfat_dentry_next(&dctx, 0, 0, false, &end);
+		u8 type;
+
+		if (!entry && end)
+			/* genuine end of file */
+			break;
+		if (!entry) {
+			/* something went wrong */
+			error = -EIO;
+			goto out;
+		}
+		type = *entry;
+
+		if (put_user(type, &uhead->entries[i])) {
+			error = -EFAULT;
+			goto out;
+		}
+	}
+
+	/*
+	 * update head nr_entries and offset.
+	 */
+	if (put_user(i, &uhead->nr_entries))  {
+		error = -EFAULT;
+		goto out;
+	}
+	if (put_user(head.offset + 0x20 * i, &uhead->offset)) {
+		error = -EFAULT;
+		goto out;
+	}
+
+ out:
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
+
+long exfat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	switch (cmd) {
+	case EXFAT_IOCGETFRAGMENTS:
+		return exfat_ioctl_get_fragments(file_inode(file),
+						 (void __user*)arg);
+	case EXFAT_IOCGETBITMAP:
+		return exfat_ioctl_get_bitmap(file_inode(file)->i_sb,
+					      (void __user*)arg);
+	case EXFAT_IOCGETDIRENTS:
+		return exfat_ioctl_get_dirents(file_inode(file),
+					       (void __user*)arg);
+	default:
+		return -ENOTTY;
+	}
+}
+
+static int exfat_cont_expand(struct inode *inode, loff_t newsize)
+{
+	int error;
+
+	error = generic_cont_expand_simple(inode, newsize);
+	if (error)
+		return error;
+
+	inode_set_mtime_to_ts(inode, current_time(inode));
+	mark_inode_dirty(inode);
+
+	if (IS_SYNC(inode))
+		exfat_msg(inode->i_sb, KERN_ERR, "TODO: cont_expand with "
+			  "sync mode.");
+	return 0;
+}
+
+int exfat_truncate_blocks(struct inode *inode, loff_t newsize)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 fcluster = (newsize + sbi->clustersize - 1) >> sbi->clusterbits;
+	int error;
+
+	if (EXFAT_I(inode)->mmu_private > newsize)
+		EXFAT_I(inode)->mmu_private = newsize;
+
+	error = exfat_free_clusters_inode(inode, fcluster);
+	if (error) {
+		exfat_msg(inode->i_sb, KERN_INFO, "exfat_free_clusters_inode: "
+			  "%i", error);
+		return error;
+	}
+
+	return 0;
+}
+
+int exfat_getattr(struct mnt_idmap *ns, const struct path *path,
+		  struct kstat *stat, u32 request_mask, unsigned int flags)
+{
+	struct inode *inode = d_inode(path->dentry);
+	generic_fillattr(&nop_mnt_idmap, request_mask, inode, stat);
+	stat->blksize = EXFAT_SB(inode->i_sb)->clustersize;
+	return 0;
+}
+
+#define EXFAT_VALID_MODE       (S_IFREG | S_IFDIR | S_IRWXUGO)
+
+static int exfat_mode_fixup(struct inode *inode, umode_t *mode)
+{
+	mode_t mask, perm;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	if (S_ISDIR(*mode))
+		mask = sbi->options.dmask;
+	else
+		mask = sbi->options.fmask;
+
+	perm = *mode & ~(S_IFMT | mask);
+
+	/*
+	 * we want 'r' and 'x' bits when mask allows for it.
+	 */
+	if ((perm & (S_IRUGO | S_IXUGO)) !=
+	    (inode->i_mode & ~mask & (S_IRUGO | S_IXUGO))) {
+		return -EPERM;
+	}
+
+	/*
+	 * we want all 'w' bits or none, depending on mask.
+	 */
+	if ((perm & S_IWUGO) && (perm & S_IWUGO) != (~mask & S_IWUGO))
+		return -EPERM;
+	*mode &= ~mask;
+	return 0;
+}
+
+int exfat_setattr(struct mnt_idmap *ns, struct dentry *dentry,
+		  struct iattr *attrs)
+{
+	struct inode *inode = dentry->d_inode;
+	int error;
+
+	/*
+	 * can set uid/gid, only if it the same as the current one in
+	 * the inode.
+	 */
+	if (attrs->ia_valid & ATTR_UID &&
+	    !uid_eq(inode->i_uid, attrs->ia_uid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_GID &&
+	    !gid_eq(inode->i_gid, attrs->ia_gid))
+		return -EPERM;
+
+	if (attrs->ia_valid & ATTR_MODE &&
+	    (attrs->ia_mode & ~EXFAT_VALID_MODE ||
+	     exfat_mode_fixup(inode, &attrs->ia_mode) < 0)) {
+		/*
+		 * silently ignore mode change if we're not OK with
+		 * it (same behavior as vfat).
+		 */
+		attrs->ia_valid &= ~ATTR_MODE;
+	}
+
+	if (attrs->ia_valid & ATTR_SIZE) {
+		inode_dio_wait(inode);
+		if (attrs->ia_size > inode->i_size) {
+			/*
+			 * expand file
+			 */
+			error = exfat_cont_expand(inode, attrs->ia_size);
+			if (error)
+				return error;
+		} else {
+			/*
+			 * shrink file
+			 */
+			truncate_setsize(inode, attrs->ia_size);
+			exfat_truncate_blocks(inode, attrs->ia_size);
+		}
+	}
+
+	setattr_copy(&nop_mnt_idmap, inode, attrs);
+	mark_inode_dirty(inode);
+	return 0;
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./inode.c linux-6.13.12-fbx/fs/exfat-fbx/inode.c
--- linux-6.13.12-fbx/fs/exfat-fbx./inode.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/inode.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,281 @@
+/*
+ * inode.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 24 16:15:52 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/writeback.h>
+#include <linux/hash.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+static struct kmem_cache *exfat_inodes_cachep;
+
+/*
+ * inode callbacks.
+ */
+struct inode *exfat_alloc_inode(struct super_block *sb)
+{
+	struct exfat_inode_info *ei = kmem_cache_alloc(exfat_inodes_cachep,
+						       GFP_NOFS);
+
+	if (!ei)
+		return NULL;
+
+	return &ei->vfs_inode;
+}
+
+static void exfat_i_callback(struct rcu_head *head)
+{
+	struct inode *inode = container_of(head, struct inode, i_rcu);
+
+	kmem_cache_free(exfat_inodes_cachep, EXFAT_I(inode));
+}
+
+void exfat_destroy_inode(struct inode *_inode)
+{
+	struct exfat_inode_info *inode = EXFAT_I(_inode);
+
+	call_rcu(&inode->vfs_inode.i_rcu, exfat_i_callback);
+}
+
+static void exfat_inode_init_once(void *ptr)
+{
+	struct exfat_inode_info *info = ptr;
+
+	INIT_HLIST_NODE(&info->hash_list);
+	exfat_inode_cache_init(&info->vfs_inode);
+	inode_init_once(&info->vfs_inode);
+}
+
+/*
+ * inode cache create/destroy.
+ */
+int exfat_init_inodes(void)
+{
+	exfat_inodes_cachep = kmem_cache_create("exfat-inodes",
+				       sizeof (struct exfat_inode_info), 0,
+				       SLAB_RECLAIM_ACCOUNT,
+				       exfat_inode_init_once);
+	if (!exfat_inodes_cachep)
+		return -ENOMEM;
+	return 0;
+}
+
+void exfat_exit_inodes(void)
+{
+	kmem_cache_destroy(exfat_inodes_cachep);
+}
+
+int exfat_drop_inode(struct inode *inode)
+{
+	return generic_drop_inode(inode);
+}
+
+void exfat_evict_inode(struct inode *inode)
+{
+	truncate_inode_pages_final(&inode->i_data);
+	if (!inode->i_nlink) {
+		inode->i_size = 0;
+		exfat_free_clusters_inode(inode, 0);
+	}
+	invalidate_inode_buffers(inode);
+	clear_inode(inode);
+	exfat_remove_inode_hash(inode);
+	exfat_inode_cache_drop(inode);
+}
+
+static u32 exfat_hash(loff_t disk_pos)
+{
+	return hash_32(disk_pos, EXFAT_HASH_BITS);
+}
+
+struct inode *exfat_iget(struct super_block *sb, loff_t disk_pos)
+{
+	struct exfat_inode_info *info;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct hlist_head *head = sbi->inode_hash + exfat_hash(disk_pos);
+	struct inode *ret = NULL;
+
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_for_each_entry (info, head, hash_list) {
+		if (info->iloc.disk_offs[0] != disk_pos)
+			continue ;
+		ret = igrab(&info->vfs_inode);
+		if (ret)
+			break;
+	}
+	spin_unlock(&sbi->inode_hash_lock);
+	return ret;
+}
+
+void exfat_insert_inode_hash(struct inode *inode)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct hlist_head *head = sbi->inode_hash +
+		exfat_hash(info->iloc.disk_offs[0]);
+
+	spin_lock(&sbi->inode_hash_lock);
+	hlist_add_head(&info->hash_list, head);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+void exfat_remove_inode_hash(struct inode *inode)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+
+	spin_lock(&sbi->inode_hash_lock);
+	info->iloc.disk_offs[0] = 0;
+	hlist_del_init(&info->hash_list);
+	spin_unlock(&sbi->inode_hash_lock);
+}
+
+/*
+ * calculate the number of links in a directory. this is the number of
+ * EXFAT_FILEDIR_ENTRY typed elements in the directory stream. This
+ * does not include the '.' and '..' entries.
+ */
+loff_t exfat_dir_links(struct inode *inode)
+{
+	size_t ret = 0;
+	struct exfat_dir_ctx dctx;
+	int error;
+	bool end;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	error = -EIO;
+	for (;;) {
+		struct exfat_filedir_entry *e =
+			__exfat_dentry_next(&dctx, E_EXFAT_FILEDIR, 0xff,
+					    true, &end);
+		if (!e) {
+			if (end)
+				error = 0;
+			goto out;
+		}
+		++ret;
+	}
+out:
+	exfat_cleanup_dir_ctx(&dctx);
+	if (error)
+		return error;
+	return ret;
+}
+
+int exfat_get_cluster_hint(struct inode *inode, u32 *out_hint)
+{
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	int error;
+	u32 first_cluster = info->first_cluster;
+
+
+	if (!first_cluster) {
+		/*
+		 * empty file, return a cluster likely to be free.
+		 */
+		*out_hint = EXFAT_SB(inode->i_sb)->prev_free_cluster + 2;
+		return 0;
+	}
+
+	if (info->flags & EXFAT_I_FAT_INVALID) {
+		/*
+		 * not fat run, all clusters are contiguous, set hint
+		 * to next last file cluster.
+		 */
+		*out_hint = first_cluster + info->allocated_clusters;
+		return 0;
+	}
+
+	/*
+	 * fat run available, walk it to get the last physical cluster
+	 * address and set hint to the immediate next physical
+	 * cluster.
+	 */
+	error = exfat_get_fat_cluster(inode, info->allocated_clusters - 1,
+				      out_hint);
+	if (error)
+		return error;
+	(*out_hint)++;
+	return 0;
+}
+
+int __exfat_write_inode(struct inode *inode, bool sync)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	struct timespec64 ts;
+	u16 checksum;
+
+	if (inode->i_ino == EXFAT_ROOT_INO)
+		return 0;
+
+	if (info->iloc.disk_offs[0] == 0) {
+		/*
+		 * write_inode() to unlinked inode: don't corrupt
+		 * superblock.
+		 */
+		return 0;
+	}
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	if (inode->i_mode & S_IWUGO)
+		info->attributes &= ~E_EXFAT_ATTR_RO;
+	else
+		info->attributes |= E_EXFAT_ATTR_RO;
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	efd->attributes = __cpu_to_le16(info->attributes);
+	esx->data_length = __cpu_to_le64(inode->i_size);
+	esx->valid_data_length = esx->data_length =
+		__cpu_to_le64(inode->i_size);
+	esx->flags = info->flags;
+	esx->first_cluster = __cpu_to_le32(info->first_cluster);
+
+	ts = inode_get_ctime(inode);
+	exfat_write_time(sbi, &ts, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	ts = inode_get_mtime(inode);
+	exfat_write_time(sbi, &ts, &efd->modified,
+			 &efd->modified_10ms, &efd->modified_tz_offset);
+	ts = inode_get_atime(inode);
+	exfat_write_time(sbi, &ts, &efd->accessed, NULL,
+			 &efd->accessed_tz_offset);
+
+	checksum = exfat_dir_entries_checksum(entries, info->iloc.nr_secondary);
+	efd->set_checksum = __cpu_to_le16(checksum);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, sync);
+
+
+	return 0;
+}
+
+int exfat_write_inode(struct inode *inode, struct writeback_control *wbc)
+{
+	int ret;
+
+	exfat_lock_super(inode->i_sb);
+	ret = __exfat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
+	exfat_unlock_super(inode->i_sb);
+	return ret;
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./namei.c linux-6.13.12-fbx/fs/exfat-fbx/namei.c
--- linux-6.13.12-fbx/fs/exfat-fbx./namei.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/namei.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,945 @@
+/*
+ * namei.c for exfat
+ * Created by <nschichan@freebox.fr> on Tue Aug 20 12:00:27 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/buffer_head.h>
+#include <linux/nls.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len);
+
+
+void exfat_write_time(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		      __le32 *datetime, u8 *time_cs, u8 *tz_offset)
+{
+	u32 cpu_datetime;
+
+	exfat_time_2exfat(sbi, ts, &cpu_datetime, time_cs, tz_offset);
+	*datetime = __cpu_to_le32(cpu_datetime);
+}
+
+static void exfat_read_time(struct timespec64 *ts, __le32 datetime, u8 time_cs,
+			    u8 tz_offset)
+{
+	u32 cpu_datetime = __le32_to_cpu(datetime);
+	exfat_time_2unix(ts, cpu_datetime, time_cs, tz_offset);
+}
+
+static int exfat_zero_cluster(struct super_block *sb, u32 cluster, bool sync)
+{
+	sector_t start = exfat_cluster_sector(EXFAT_SB(sb), cluster);
+	sector_t end = start + EXFAT_SB(sb)->sectors_per_cluster;
+	sector_t sect;
+
+	for (sect = start; sect < end; ++sect) {
+		struct buffer_head *bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_WARNING,
+				  "unable to read sector %llu for zeroing.",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memset(bh->b_data, 0, bh->b_size);
+		mark_buffer_dirty(bh);
+		if (sync)
+			sync_dirty_buffer(bh);
+		brelse(bh);
+	}
+	return 0;
+}
+
+/*
+ * use per superblock fmask or dmaks, depending on provided entry
+ * attribute to restrict the provided mode even more.
+ */
+mode_t exfat_make_mode(struct exfat_sb_info *sbi, mode_t mode, u16 attrs)
+{
+	if (attrs & E_EXFAT_ATTR_DIRECTORY)
+		mode = (mode & ~sbi->options.dmask) | S_IFDIR;
+	else
+		mode = (mode & ~sbi->options.fmask) | S_IFREG;
+	if (attrs & E_EXFAT_ATTR_RO)
+		mode &= ~S_IWUGO;
+	return mode;
+}
+
+/*
+ * populate inode fields.
+ */
+static struct inode *exfat_populate_inode(struct super_block *sb,
+			  const struct exfat_filedir_entry *efd,
+			  const struct exfat_stream_extension_entry *esx,
+			  const struct exfat_iloc *iloc)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct inode *inode;
+	struct timespec64 ts;
+
+	inode = exfat_iget(sb, iloc->disk_offs[0]);
+	if (inode)
+		return inode;
+
+	inode = new_inode(sb);
+	if (!inode)
+		return NULL;
+
+	inode->i_ino = iunique(sb, EXFAT_ROOT_INO);
+	EXFAT_I(inode)->first_cluster = __le32_to_cpu(esx->first_cluster);
+	EXFAT_I(inode)->flags = esx->flags;
+	EXFAT_I(inode)->iloc = *iloc;
+	EXFAT_I(inode)->attributes = __le16_to_cpu(efd->attributes);
+
+	inode->i_size = __le64_to_cpu(esx->data_length);
+	EXFAT_I(inode)->allocated_clusters = inode->i_size >> sbi->clusterbits;
+	if (inode->i_size & sbi->clustermask)
+		EXFAT_I(inode)->allocated_clusters++;
+	inode->i_blocks = EXFAT_I(inode)->allocated_clusters <<
+		(sbi->clusterbits - 9);
+	EXFAT_I(inode)->mmu_private = inode->i_size;
+
+	inode->i_uid = sbi->options.uid;
+	inode->i_gid = sbi->options.gid;
+	inode->i_mode = exfat_make_mode(sbi, S_IRWXUGO,
+					EXFAT_I(inode)->attributes);
+
+	if (EXFAT_I(inode)->attributes & E_EXFAT_ATTR_DIRECTORY) {
+		loff_t nlinks = exfat_dir_links(inode);
+		if (nlinks < 0)
+			goto iput;
+		set_nlink(inode, nlinks + 2);
+	} else
+		set_nlink(inode, 1);
+
+	if (esx->data_length != esx->valid_data_length)
+		exfat_msg(sb, KERN_WARNING, "data length (%llu) != valid data "
+			  "length (%llu)", __le64_to_cpu(esx->data_length),
+			  __le64_to_cpu(esx->valid_data_length));
+
+	if (S_ISDIR(inode->i_mode)) {
+		inode->i_fop = &exfat_dir_operations;
+		inode->i_op = &exfat_dir_inode_operations;
+	} else {
+		/* until we support write */
+		inode->i_fop = &exfat_file_operations;
+		inode->i_op = &exfat_file_inode_operations;
+		inode->i_data.a_ops = &exfat_address_space_operations;
+	}
+
+
+	exfat_read_time(&ts, efd->create, efd->create_10ms,
+			efd->create_tz_offset);
+	inode_set_ctime_to_ts(inode, ts);
+	exfat_read_time(&ts, efd->modified, efd->modified_10ms,
+			efd->modified_tz_offset);
+	inode_set_mtime_to_ts(inode, ts);
+	exfat_read_time(&ts, efd->accessed, 0,
+			efd->accessed_tz_offset);
+	inode_set_atime_to_ts(inode, ts);
+
+	exfat_insert_inode_hash(inode);
+	insert_inode_hash(inode);
+	return inode;
+iput:
+	iput(inode);
+	return NULL;
+}
+
+/*
+ * lookup an inode.
+ */
+struct dentry *exfat_inode_lookup(struct inode *parent, struct dentry *dentry,
+				  unsigned int flags)
+{
+	struct super_block *sb = dentry->d_sb;
+	struct exfat_dir_ctx dctx;
+	int error;
+	struct exfat_filedir_entry efd;
+	struct exfat_stream_extension_entry esx;
+	__le16 *name = __getname();
+	__le16 *utf16_name = __getname();
+	unsigned int utf16_name_length;
+	__le16 name_hash;
+
+	exfat_lock_super(parent->i_sb);
+
+	if (!name || !utf16_name) {
+		error = -ENOMEM;
+		goto putnames;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN,
+					    utf16_name, 255 + 2);
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putnames;
+	}
+
+	/*
+	 * get the name hash of the wanted inode early so that we can
+	 * skip entries with only an efd and an esx entry.
+	 */
+	name_hash = __cpu_to_le16(exfat_filename_hash_cont(sb, utf16_name, 0,
+							   utf16_name_length));
+
+	/*
+	 * create a dir ctx from the parent so that we can iterate on
+	 * it.
+	 */
+	error = exfat_init_dir_ctx(parent, &dctx, 0);
+	if (error)
+		goto putnames;
+
+	for (;;) {
+		u32 name_length;
+		struct inode *inode;
+		u16 calc_checksum;
+		u16 expect_checksum;
+		struct exfat_iloc iloc;
+
+		memset(&iloc, 0, sizeof (iloc));
+		/*
+		 * get filedir and stream extension entries.
+		 */
+		error = exfat_dentry_next(&efd, &dctx, E_EXFAT_FILEDIR, true);
+		if (error < 0)
+			/* end of directory reached, or other error */
+			goto cleanup;
+
+		error = -EINVAL;
+		if (efd.secondary_count > 18)
+			goto cleanup;
+
+		iloc.file_off = exfat_dctx_fpos(&dctx);
+		iloc.disk_offs[0] = exfat_dctx_dpos(&dctx);
+		iloc.nr_secondary = efd.secondary_count + 1;
+
+		error = exfat_dentry_next(&esx, &dctx, E_EXFAT_STREAM_EXT,
+					  false);
+		if (error)
+			goto cleanup;
+
+		if (esx.name_hash != name_hash)
+			/*
+			 * stored name hash is not the same as the
+			 * wanted hash: no point in processing the
+			 * remaining entries for the current efd/esx
+			 * any further.
+			 */
+			continue ;
+
+		/*
+		 * now that the hash matches it is ok to update the
+		 * checksum for the efd and esx entries.
+		 */
+		expect_checksum = __le16_to_cpu(efd.set_checksum);
+		calc_checksum = exfat_direntry_checksum(&efd, 0, true);
+
+		calc_checksum = exfat_direntry_checksum(&esx,
+							calc_checksum, false);
+		iloc.disk_offs[1] = exfat_dctx_dpos(&dctx);
+
+		/*
+		 * fetch name.
+		 */
+		name_length = esx.name_length;
+		error = __exfat_get_name(&dctx, name_length, name,
+					 &calc_checksum, &iloc);
+		if (error)
+			goto cleanup;
+
+		if (calc_checksum != expect_checksum) {
+			exfat_msg(dctx.sb, KERN_INFO, "checksum: "
+				  "calculated %04x, expect %04x",
+				  calc_checksum, expect_checksum);
+			error = -EIO;
+			goto cleanup;
+		}
+
+
+		if (utf16_name_length != name_length)
+			continue ;
+
+		if (memcmp(utf16_name, name, name_length * sizeof (__le16)))
+			continue ;
+
+		inode = exfat_populate_inode(sb, &efd, &esx, &iloc);
+		if (inode) {
+			d_add(dentry, inode);
+			error = 0;
+		} else
+			error = -EIO;
+		goto cleanup;
+	}
+
+cleanup:
+	exfat_cleanup_dir_ctx(&dctx);
+putnames:
+	if (name)
+		__putname(name);
+	if (utf16_name)
+		__putname(utf16_name);
+	exfat_unlock_super(parent->i_sb);
+	if (error && error != -ENOENT)
+		return ERR_PTR(error);
+	return NULL;
+}
+
+/*
+ * find nr unused directory entries (type & 0x80 == 0).
+ */
+static int exfat_find_dir_iloc(struct inode *inode, int nr,
+			       struct exfat_iloc *iloc)
+{
+	struct exfat_dir_ctx dctx;
+	bool end = false;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	u32 nr_new_clusters, i;
+	u32 new_clusters[2];
+	u32 hint_cluster;
+
+retry:
+	memset(iloc, 0, sizeof (*iloc));
+	iloc->nr_secondary = nr;
+
+	error = exfat_init_dir_ctx(inode, &dctx, 0);
+	if (error)
+		return error;
+
+	while (1) {
+		int nr_free;
+		void *ent;
+
+		ent = __exfat_dentry_next(&dctx, 0x00, 0x80, true, &end);
+		if (end)
+			break;
+		if (!ent) {
+			exfat_cleanup_dir_ctx(&dctx);
+			return -EIO;
+		}
+
+		nr_free = 1;
+		iloc->file_off = exfat_dctx_fpos(&dctx);
+		iloc->disk_offs[0] = exfat_dctx_dpos(&dctx);
+		while (__exfat_dentry_next(&dctx, 0x00, 0x80, false, &end)
+		       != NULL && nr_free < nr) {
+			iloc->disk_offs[nr_free] = exfat_dctx_dpos(&dctx);
+			++nr_free;
+		}
+		if (nr_free == nr) {
+			/*
+			 * we found enough consecutive free entries.
+			 */
+			exfat_cleanup_dir_ctx(&dctx);
+			return 0;
+		}
+
+	}
+
+	/*
+	 * not enough consecutive free entries found, kick the cluster
+	 * allocator and retry.
+	 */
+	exfat_cleanup_dir_ctx(&dctx);
+
+	/*
+	 * with the smallest cluster size, a file can take more than
+	 * two clusters. allocate two in that case reardless of what
+	 * is needed to make code simplier.
+	 */
+	switch (sbi->clustersize) {
+	case 512:
+		nr_new_clusters = 2;
+		break;
+	default:
+		nr_new_clusters = 1;
+		break;
+	}
+
+	/*
+	 * get a hint cluster for the cluster allocator.
+	 */
+	error = exfat_get_cluster_hint(inode, &hint_cluster);
+	if (error)
+		return error;
+
+	/*
+	 * peform the allocation.
+	 */
+	error = exfat_alloc_clusters(inode, hint_cluster, new_clusters,
+				     nr_new_clusters);
+	if (error)
+		return error;
+
+	/*
+	 * fill new cluster(s) with zero.
+	 */
+	for (i = 0; i < nr_new_clusters; ++i)
+		exfat_zero_cluster(inode->i_sb, new_clusters[i], false);
+
+	/*
+	 * update size and mark inode as dirty so that write_inode()
+	 * can update it's size, and the other fields updated by
+	 * exfat_alloc_clusters.
+	 */
+	inode->i_size += nr_new_clusters << sbi->clusterbits;
+	mark_inode_dirty(inode);
+
+	/*
+	 * kick the whole place search again, this time with the newly
+	 * allocated clusters.
+	 */
+	goto retry;
+}
+
+/*
+ * setup dir_entry_buffers starting at using iloc.
+ */
+int exfat_get_dir_entry_buffers(struct inode *dir, struct exfat_iloc *iloc,
+				struct dir_entry_buffer *entries,
+				size_t nr_entries)
+{
+	size_t i;
+	int error;
+	struct exfat_sb_info *sbi = EXFAT_SB(dir->i_sb);
+
+	BUG_ON(iloc->nr_secondary != nr_entries);
+
+	memset(entries, 0, sizeof (*entries) * nr_entries);
+	for (i = 0; i < nr_entries; ++i) {
+		sector_t sector = iloc->disk_offs[i] >> sbi->sectorbits;
+
+		entries[i].off = iloc->disk_offs[i] & sbi->sectormask;
+		entries[i].bh = sb_bread(dir->i_sb, sector);
+		if (!entries[i].bh) {
+			error = -EIO;
+			goto fail;
+		}
+		entries[i].start = entries[i].bh->b_data + entries[i].off;
+	}
+	return 0;
+
+fail:
+	for (i = 0; i < nr_entries; ++i)
+		if (entries[i].bh)
+			brelse(entries[i].bh);
+	return error;
+}
+
+static u16 exfat_filename_hash_cont(struct super_block *sb,
+				    const __le16 *name, u16 hash, size_t len)
+{
+	while (len) {
+		u16 c = __le16_to_cpu(exfat_upcase_convert(sb, *name));
+
+		hash = ((hash << 15) | (hash >> 1)) + (c & 0xff);
+		hash = ((hash << 15) | (hash >> 1)) + (c >> 8);
+		--len;
+		++name;
+	}
+	return hash;
+}
+
+u16 exfat_dir_entries_checksum(struct dir_entry_buffer *entries, u32 nr)
+{
+	u32 checksum = 0;
+
+	if (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, true);
+		--nr;
+		++entries;
+	}
+	while (nr) {
+		checksum = exfat_direntry_checksum(entries->start,
+						   checksum, false);
+		--nr;
+		++entries;
+	}
+	return checksum;
+}
+
+/*
+ * setup exfat_filedir_entry and exfat_stream_extension_entry for a
+ * new entry, with attribute attrs, and named name.
+ */
+static void exfat_fill_dir_entries(struct super_block *sb,
+				  struct dir_entry_buffer *entries,
+				  size_t nr_entries, u8 attrs,
+				  __le16 *name, int name_length)
+{
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int i;
+	u16 name_hash;
+	u16 checksum;
+	struct timespec64 ts;
+
+        ktime_get_coarse_real_ts64(&ts);
+
+	efd = entries[0].start;
+	esx = entries[1].start;
+
+	/*
+	 * fill exfat filedir entry
+	 */
+	memset(efd, 0, sizeof (*efd));
+	efd->type = E_EXFAT_FILEDIR;
+	efd->secondary_count = nr_entries - 1;
+	efd->set_checksum = 0;
+	efd->attributes = __cpu_to_le16(attrs);
+
+	/*
+	 * update file directory entry times
+	 */
+	efd = entries[0].start;
+	exfat_write_time(EXFAT_SB(sb), &ts, &efd->create, &efd->create_10ms,
+			 &efd->create_tz_offset);
+	efd->modified = efd->accessed = efd->create;
+	efd->modified_10ms = efd->create_10ms;
+	efd->accessed_tz_offset = efd->modified_tz_offset =
+		efd->create_tz_offset;
+
+	/*
+	 * fill exfat stream extension entry
+	 */
+	memset(esx, 0, sizeof (*esx));
+	esx->type = E_EXFAT_STREAM_EXT;
+	esx->flags = EXFAT_I_ALLOC_POSSIBLE;
+	esx->first_cluster = __cpu_to_le32(0);
+	esx->data_length = __cpu_to_le64(0);
+	esx->valid_data_length = __cpu_to_le64(0);
+	esx->name_length = name_length;
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = entries[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(sb, efn->name_frag,
+						     name_hash, len);
+	}
+	esx->name_hash = __cpu_to_le16(name_hash);
+
+	checksum = exfat_dir_entries_checksum(entries, nr_entries);
+	efd->set_checksum = __cpu_to_le16(checksum);
+}
+
+/*
+ * mark all buffer heads in the entries array as dirty. optionally
+ * sync them if required.
+ */
+void exfat_dirty_dir_entries(struct dir_entry_buffer *entries,
+			     size_t nr_entries, bool sync)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i) {
+		mark_buffer_dirty(entries[i].bh);
+		if (sync)
+			sync_dirty_buffer(entries[i].bh);
+		brelse(entries[i].bh);
+	}
+}
+
+/*
+ * cleanup all buffer heads in entries.
+ */
+static void exfat_cleanup_dir_entries(struct dir_entry_buffer *entries,
+				     size_t nr_entries)
+{
+	size_t i;
+
+	for (i = 0; i < nr_entries; ++i)
+		brelse(entries[i].bh);
+}
+
+/*
+ * create an inode
+ */
+static int __exfat_inode_create(struct inode *dir, struct dentry *dentry,
+				umode_t mode, bool is_dir)
+{
+	int nr_entries;
+	struct dir_entry_buffer entries[19];
+	struct inode *new;
+	struct exfat_iloc iloc;
+	int error;
+	u8 attr = 0;
+	__le16 *utf16_name;
+	int utf16_name_length;
+
+	if (is_dir)
+		attr |= E_EXFAT_ATTR_DIRECTORY;
+
+	exfat_lock_super(dir->i_sb);
+
+	utf16_name = __getname();
+	if (!utf16_name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+
+	utf16_name_length = utf8s_to_utf16s(dentry->d_name.name,
+					    dentry->d_name.len,
+					    UTF16_LITTLE_ENDIAN, utf16_name,
+					    255 + 2);
+	if (utf16_name_length < 0) {
+		error = utf16_name_length;
+		goto putname;
+	}
+	if (utf16_name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+
+	nr_entries = 2 + DIV_ROUND_UP(utf16_name_length, 15);
+	if (nr_entries > 19) {
+		error = -ENAMETOOLONG;
+		goto putname;
+	}
+
+	error = exfat_find_dir_iloc(dir, nr_entries, &iloc);
+	if (error < 0)
+		goto putname;
+
+	error = exfat_get_dir_entry_buffers(dir, &iloc, entries, nr_entries);
+	if (error)
+		goto putname;
+	exfat_fill_dir_entries(dir->i_sb, entries, nr_entries, attr,
+				       utf16_name, utf16_name_length);
+
+	/*
+	 * create an inode with it.
+	 */
+	error = -ENOMEM;
+	new = exfat_populate_inode(dir->i_sb, entries[0].start,
+				   entries[1].start, &iloc);
+	if (!new)
+		goto cleanup;
+	inc_nlink(dir);
+	d_instantiate(dentry, new);
+
+	/*
+	 * update directory atime / ctime.
+	 */
+	inode_set_mtime_to_ts(dir, inode_set_ctime_current(dir));
+	inode_set_atime_to_ts(dir, inode_get_mtime(dir));
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	/*
+	 * write to disk
+	 */
+	exfat_dirty_dir_entries(entries, nr_entries, false);
+	__putname(utf16_name);
+	exfat_unlock_super(dir->i_sb);
+	return 0;
+
+cleanup:
+	exfat_cleanup_dir_entries(entries, nr_entries);
+putname:
+	__putname(utf16_name);
+unlock_super:
+	exfat_unlock_super(dir->i_sb);
+	return error;
+}
+
+int exfat_inode_create(struct mnt_idmap *ns, struct inode *dir,
+		       struct dentry *dentry, umode_t mode, bool excl)
+{
+	return __exfat_inode_create(dir, dentry, mode, false);
+}
+
+int exfat_inode_mkdir(struct mnt_idmap *ns, struct inode *dir,
+		      struct dentry *dentry, umode_t mode)
+{
+	return __exfat_inode_create(dir, dentry, mode, true);
+}
+
+/*
+ * inode unlink: find all direntry buffers and clear seventh bit of
+ * the entry type to mark the as unused.
+ */
+static int __exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	struct dir_entry_buffer entries[info->iloc.nr_secondary];
+	int error;
+	u32 i;
+
+	error = exfat_get_dir_entry_buffers(inode, &info->iloc,
+					    entries, info->iloc.nr_secondary);
+	if (error)
+		return error;
+
+	for (i = 0; i < info->iloc.nr_secondary; ++i) {
+		u8 *type = entries[i].start;
+
+		*type &= 0x7f;
+	}
+
+	drop_nlink(dir);
+	clear_nlink(inode);
+	inode_set_mtime_to_ts(inode, current_time(inode));
+	inode_set_atime_to_ts(inode, inode_get_mtime(inode));
+
+	/*
+	 * update atime & mtime for parent directory.
+	 */
+	inode_set_mtime_to_ts(dir, current_time(dir));
+	inode_set_atime_to_ts(dir, inode_get_mtime(dir));
+	if (IS_DIRSYNC(dir))
+		__exfat_write_inode(dir, true);
+	else
+		mark_inode_dirty(dir);
+
+	exfat_dirty_dir_entries(entries, info->iloc.nr_secondary, false);
+	exfat_remove_inode_hash(inode);
+	return 0;
+}
+
+int exfat_inode_unlink(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_unlink(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+/*
+ * inode rmdir: check that links is not greater than 2 (meaning that
+ * the directory is empty) and invoke unlink.
+ */
+static int __exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	struct inode *inode = dentry->d_inode;
+
+	if (inode->i_nlink > 2)
+		return -ENOTEMPTY;
+
+	return __exfat_inode_unlink(dir, dentry);
+}
+
+int exfat_inode_rmdir(struct inode *dir, struct dentry *dentry)
+{
+	int ret;
+
+	exfat_lock_super(dir->i_sb);
+	ret = __exfat_inode_rmdir(dir, dentry);
+	exfat_unlock_super(dir->i_sb);
+	return ret;
+}
+
+int exfat_rename(struct mnt_idmap *ns,
+		 struct inode *old_dir, struct dentry *old_dentry,
+		 struct inode *new_dir, struct dentry *new_dentry,
+		 unsigned int flags)
+{
+	struct inode *old_inode = old_dentry->d_inode;
+	struct inode *new_inode = new_dentry->d_inode;
+	int new_nr_entries;
+	int error = 0;
+	struct exfat_iloc new_iloc;
+	struct exfat_inode_info *old_info = EXFAT_I(old_inode);
+	struct dir_entry_buffer old_buffers[old_info->iloc.nr_secondary];
+	struct dir_entry_buffer new_buffers[19];
+	struct exfat_filedir_entry *efd;
+	struct exfat_stream_extension_entry *esx;
+	int name_length;
+	__le16 *name;
+	u16 name_hash;
+	int i;
+
+	if (flags & ~RENAME_NOREPLACE)
+		return -EINVAL;
+
+	exfat_lock_super(new_dir->i_sb);
+
+	/*
+	 * convert new name to utf16
+	 */
+	name = __getname();
+	if (!name) {
+		error = -ENOMEM;
+		goto unlock_super;
+	}
+	name_length = utf8s_to_utf16s(new_dentry->d_name.name,
+				      new_dentry->d_name.len,
+				      UTF16_LITTLE_ENDIAN, name, 255 + 2);
+
+	if (name_length > 255) {
+		error = -ENAMETOOLONG;
+		goto err_putname;
+	}
+	if (name_length < 0) {
+		error = name_length;
+		goto err_putname;
+	}
+
+	new_nr_entries = 2 + DIV_ROUND_UP(name_length, 15);
+
+	/*
+	 * find space for new entry
+	 */
+	error = exfat_find_dir_iloc(new_dir, new_nr_entries, &new_iloc);
+	if (error < 0)
+		goto err_putname;
+
+	/*
+	 * get buffers for old and new entries.
+	 */
+	error = exfat_get_dir_entry_buffers(old_dir, &old_info->iloc,
+				    old_buffers, old_info->iloc.nr_secondary);
+	if (error < 0)
+		goto err_putname;
+
+	error = exfat_get_dir_entry_buffers(new_dir, &new_iloc, new_buffers,
+					    new_nr_entries);
+	if (error < 0)
+		goto err_cleanup_old_buffers;
+
+
+	/*
+	 * remove new inode, if it exists.
+	 */
+	if (new_inode) {
+		if (S_ISDIR(new_inode->i_mode))
+			error = __exfat_inode_rmdir(new_dir, new_dentry);
+		else
+			error = __exfat_inode_unlink(new_dir, new_dentry);
+		if (error < 0)
+			goto err_cleanup_new_buffers;
+	}
+
+	/*
+	 * move old esd to new esd (and ditto for esx).
+	 */
+	efd = new_buffers[0].start;
+	esx = new_buffers[1].start;
+	memcpy(efd, old_buffers[0].start, sizeof (*efd));
+	memcpy(esx, old_buffers[1].start, sizeof (*esx));
+
+	efd->secondary_count = new_nr_entries - 1;
+
+	/*
+	 * patch new name after that.
+	 */
+	esx->name_length = __cpu_to_le16(name_length);
+
+	/*
+	 * fill name fragments.
+	 */
+	name_hash = 0;
+	for (i = 0; i < new_nr_entries - 2; ++i, name_length -= 15) {
+		struct exfat_filename_entry *efn = new_buffers[i + 2].start;
+		int len = 15;
+
+		if (name_length < 15)
+			len = name_length;
+
+		memset(efn, 0, sizeof (*efn));
+		efn->type = E_EXFAT_FILENAME;
+		memcpy(efn->name_frag, name + i * 15, len * sizeof (__le16));
+		name_hash = exfat_filename_hash_cont(new_dir->i_sb,
+						     efn->name_frag,
+						     name_hash, len);
+	}
+	__putname(name);
+	esx->name_hash = __cpu_to_le16(name_hash);
+	efd->set_checksum = exfat_dir_entries_checksum(new_buffers,
+						       new_nr_entries);
+	efd->set_checksum = __cpu_to_le16(efd->set_checksum);
+
+	/*
+	 * mark old buffer entries as unused.
+	 */
+	for (i = 0; i < old_info->iloc.nr_secondary; ++i)
+		*((u8*)old_buffers[i].start) &= 0x7f;
+
+	/*
+	 * dirty old & new entries buffers.
+	 */
+	exfat_dirty_dir_entries(new_buffers, new_nr_entries, false);
+	exfat_dirty_dir_entries(old_buffers, old_info->iloc.nr_secondary,
+				false);
+
+	/*
+	 * update links if new_dir and old_dir are differents.
+	 */
+	if (new_dir != old_dir) {
+		drop_nlink(old_dir);
+		inc_nlink(new_dir);
+	}
+
+	/*
+	 * make old inode use the new iloc, and update sb inode hash.
+	 */
+	exfat_remove_inode_hash(old_inode);
+	old_info->iloc = new_iloc;
+	exfat_insert_inode_hash(old_inode);
+
+	/*
+	 * update new dir & old dir mtime/atime
+	 */
+	if (new_dir == old_dir) {
+		inode_set_mtime_to_ts(new_dir, current_time(new_dir));
+		inode_set_atime_to_ts(new_dir, inode_get_mtime(new_dir));
+
+		if (IS_DIRSYNC(new_dir))
+			__exfat_write_inode(new_dir, true);
+		else
+			mark_inode_dirty(new_dir);
+	} else {
+		inode_set_mtime_to_ts(old_dir, current_time(old_dir));
+
+		inode_set_mtime_to_ts(new_dir, inode_get_mtime(old_dir));
+		inode_set_atime_to_ts(old_dir, inode_get_mtime(old_dir));
+		inode_set_atime_to_ts(new_dir, inode_get_mtime(old_dir));
+
+		if (IS_DIRSYNC(new_dir)) {
+			__exfat_write_inode(new_dir, true);
+			__exfat_write_inode(old_dir, true);
+		} else {
+			mark_inode_dirty(new_dir);
+			mark_inode_dirty(old_dir);
+		}
+	}
+
+	exfat_unlock_super(new_dir->i_sb);
+	return 0;
+
+err_cleanup_new_buffers:
+	exfat_cleanup_dir_entries(new_buffers, new_nr_entries);
+err_cleanup_old_buffers:
+	exfat_cleanup_dir_entries(old_buffers, old_info->iloc.nr_secondary);
+err_putname:
+	__putname(name);
+unlock_super:
+	exfat_unlock_super(new_dir->i_sb);
+	return error;
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./read-write.c linux-6.13.12-fbx/fs/exfat-fbx/read-write.c
--- linux-6.13.12-fbx/fs/exfat-fbx./read-write.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/read-write.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,144 @@
+/*
+ * read-write.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Jul 31 16:37:51 2013
+ */
+
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/mpage.h>
+#include <linux/buffer_head.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+/*
+ * map file sector to disk sector.
+ */
+static int exfat_bmap(struct inode *inode, sector_t fsect, sector_t *dsect)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	u32 cluster_nr = fsect >> (sbi->clusterbits - sbi->sectorbits);
+	u32 cluster;
+	unsigned int offset = fsect & (sbi->sectors_per_cluster - 1);
+
+	if (info->flags & EXFAT_I_FAT_INVALID)
+		cluster = info->first_cluster + cluster_nr;
+	else {
+		int error;
+
+		error = exfat_get_fat_cluster(inode, cluster_nr, &cluster);
+		if (error)
+			return error;
+	}
+
+	*dsect = exfat_cluster_sector(sbi, cluster) + offset;
+	return 0;
+}
+
+static int exfat_get_block(struct inode *inode, sector_t block,
+			   struct buffer_head *bh, int create)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(inode->i_sb);
+	struct exfat_inode_info *info = EXFAT_I(inode);
+	sector_t last_block;
+	unsigned int offset;
+	sector_t dblock;
+	int error;
+
+	last_block = (i_size_read(inode) + sbi->sectorsize - 1) >>
+		sbi->sectorbits;
+	offset = block & (sbi->sectors_per_cluster - 1);
+
+	if (!create && block >= last_block)
+		return 0;
+
+	if (create && block >= last_block && offset == 0) {
+		u32 hint, cluster;
+
+		/*
+		 * request for first sector in a cluster immediate to
+		 * the last allocated cluster of the file: must
+		 * allocate a new clluster.
+		 */
+		error = exfat_get_cluster_hint(inode, &hint);
+		if (error)
+			return error;
+
+		error = exfat_alloc_clusters(inode, hint, &cluster, 1);
+		if (error)
+			return error;
+	}
+
+	error = exfat_bmap(inode, block, &dblock);
+	if (error)
+		return error;
+
+	if (create && block >= last_block) {
+		/*
+		 * currently in create mode: we need to update
+		 * mmu_private.
+		 */
+		info->mmu_private += sbi->sectorsize;
+		set_buffer_new(bh);
+	}
+	map_bh(bh, inode->i_sb, dblock);
+	return 0;
+}
+
+int exfat_read_folio(struct file *file, struct folio *folio)
+{
+	return mpage_read_folio(folio, exfat_get_block);
+}
+
+void exfat_readahead(struct readahead_control *rac)
+{
+	mpage_readahead(rac, exfat_get_block);
+}
+
+static int exfat_write_error(struct inode *inode, loff_t to)
+{
+	if (to > inode->i_size) {
+		truncate_pagecache(inode, to);
+		exfat_truncate_blocks(inode, inode->i_size);
+	}
+	return 0;
+}
+
+int exfat_write_begin(struct file *file, struct address_space *mapping,
+		      loff_t pos, unsigned len,
+		      struct folio **foliop, void **fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	*foliop = NULL;
+	error = cont_write_begin(file, mapping, pos, len, foliop, fsdata,
+				 exfat_get_block, &EXFAT_I(inode)->mmu_private);
+
+	if (error)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_write_end(struct file *file, struct address_space *mapping,
+		    loff_t pos, unsigned len, unsigned copied,
+		    struct folio *folio, void *fsdata)
+{
+	struct inode *inode = mapping->host;
+	int error;
+
+	error = generic_write_end(file, mapping, pos, len, copied, folio,
+				  fsdata);
+
+	if (error < len)
+		exfat_write_error(inode, pos + len);
+	return error;
+}
+
+int exfat_writepages(struct address_space *mapping,
+		     struct writeback_control *wbc)
+{
+	return mpage_writepages(mapping, wbc, exfat_get_block);
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./super.c linux-6.13.12-fbx/fs/exfat-fbx/super.c
--- linux-6.13.12-fbx/fs/exfat-fbx./super.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/super.c	2025-09-25 17:40:36.559372145 +0200
@@ -0,0 +1,749 @@
+/*
+ * super.c<2> for exfat
+ * Created by <nschichan@freebox.fr> on Tue Jul 23 12:33:53 2013
+ */
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/statfs.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/cred.h>
+#include <linux/iversion.h>
+#include <linux/blk_types.h>
+
+#include "exfat_fs.h"
+#include "exfat.h"
+
+
+#define PFX	"exFAT: "
+
+static void exfat_put_super(struct super_block *sb);
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat);
+static int exfat_show_options(struct seq_file *m, struct dentry *root);
+static int exfat_remount(struct super_block *sb, int *flags, char *opts);
+
+static const struct super_operations exfat_super_ops = {
+	.alloc_inode	= exfat_alloc_inode,
+	.destroy_inode	= exfat_destroy_inode,
+	.drop_inode	= exfat_drop_inode,
+	.evict_inode	= exfat_evict_inode,
+	.write_inode	= exfat_write_inode,
+	.statfs         = exfat_statfs,
+	.put_super      = exfat_put_super,
+	.show_options	= exfat_show_options,
+	.remount_fs	= exfat_remount,
+};
+
+WRAP_DIR_ITER(exfat_iterate) // FIXME!
+
+const struct file_operations exfat_dir_operations = {
+	.llseek = generic_file_llseek,
+	.read = generic_read_dir,
+	.iterate_shared = shared_exfat_iterate,
+	.unlocked_ioctl	= exfat_ioctl,
+};
+
+const struct file_operations exfat_file_operations = {
+	.llseek		= generic_file_llseek,
+	.read_iter	= generic_file_read_iter,
+	.write_iter	= generic_file_write_iter,
+	.mmap		= generic_file_mmap,
+	.splice_read	= filemap_splice_read,
+	.splice_write	= iter_file_splice_write,
+	.unlocked_ioctl	= exfat_ioctl,
+	.fsync		= generic_file_fsync,
+};
+
+const struct inode_operations exfat_dir_inode_operations =
+{
+	.create = exfat_inode_create,
+	.mkdir	= exfat_inode_mkdir,
+	.lookup = exfat_inode_lookup,
+	.rmdir	= exfat_inode_rmdir,
+	.unlink	= exfat_inode_unlink,
+	.rename	= exfat_rename,
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct inode_operations exfat_file_inode_operations = {
+	.setattr = exfat_setattr,
+	.getattr = exfat_getattr,
+};
+
+const struct address_space_operations exfat_address_space_operations = {
+	.dirty_folio	= block_dirty_folio,
+	.invalidate_folio = block_invalidate_folio,
+	.read_folio	= exfat_read_folio,
+	.readahead	= exfat_readahead,
+	.write_begin	= exfat_write_begin,
+	.write_end	= exfat_write_end,
+	.writepages	= exfat_writepages,
+	.migrate_folio  = buffer_migrate_folio,
+};
+
+void exfat_msg(struct super_block *sb, const char *prefix,
+		const char *fmt, ...)
+{
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	printk("%sexFAT-fs (%s): %pV\n", prefix, sb->s_id, &vaf);
+	va_end(args);
+}
+
+void exfat_fs_error(struct super_block *sb, const char *fmt, ...)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct va_format vaf;
+	va_list args;
+
+	va_start(args, fmt);
+	vaf.fmt = fmt;
+	vaf.va = &args;
+	exfat_msg(sb, KERN_ERR, "error: %pV", &vaf);
+	va_end(args);
+
+	if (sbi->options.error_action == EXFAT_ERROR_ACTION_REMOUNT_RO &&
+	    !(sb->s_flags & SB_RDONLY)) {
+		sb->s_flags |= SB_RDONLY;
+		exfat_msg(sb, KERN_ERR, "remounted read-only due to fs error.");
+	} else if (sbi->options.error_action == EXFAT_ERROR_ACTION_PANIC)
+		panic("exFAT-fs (%s): panic due fs error.\n", sb->s_id);
+}
+
+/*
+ * process checksum on buffer head. first indicates if the special
+ * treatment of the first sector needs to be done or not.
+ *
+ * first sector can be changed (volume flags, and heap use percent),
+ * those fields are excluded from the checksum to allow updating
+ * without recalculating the checksum.
+ */
+static u32 exfat_sb_checksum_process(struct buffer_head *bh, u32 checksum,
+				     unsigned int size,
+				     bool first)
+{
+	unsigned int i;
+
+	for (i = 0; i < size; ++i) {
+		if (first && (i == 106 || i == 107 || i == 112))
+			continue ;
+		checksum = ((checksum << 31) | (checksum >> 1)) +
+			(unsigned char)bh->b_data[i];
+	}
+	return checksum;
+}
+
+static int exfat_check_sb_checksum(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 checksum;
+	int i;
+	int err;
+	struct buffer_head *bh[EXFAT_CHECKSUM_SECTORS + 1];
+
+	/*
+	 * fetch needed sectors, reuse first sector from sbi.
+	 */
+	err = -ENOMEM;
+	memset(bh, 0, sizeof (struct buffer_head*) *
+	       (EXFAT_CHECKSUM_SECTORS + 1));
+	bh[0] = sbi->sb_bh;
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS + 1; ++i) {
+		bh[i] = sb_bread(sb, i);
+		if (!bh[i])
+			goto out;
+	}
+
+	/*
+	 * calculate checksum.
+	 */
+	checksum = exfat_sb_checksum_process(bh[0], 0, sbi->sectorsize, true);
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i) {
+		checksum = exfat_sb_checksum_process(bh[i], checksum,
+						     sbi->sectorsize, false);
+	}
+
+	/*
+	 * compare with the checksum sector.
+	 */
+	err = -EINVAL;
+	for (i = 0; i < sbi->sectorsize; i += sizeof (u32)) {
+		__le32 val = *(u32*)(bh[EXFAT_CHECKSUM_SECTORS]->b_data + i);
+
+		if (__le32_to_cpu(val) != checksum) {
+			exfat_msg(sb, KERN_INFO, "at offset %i, checksum "
+				  "%08x != %08x", i, __le32_to_cpu(val), checksum);
+			goto out;
+		}
+	}
+	err = 0;
+
+out:
+	for (i = 1; i < EXFAT_CHECKSUM_SECTORS; ++i)
+		if (bh[i])
+			brelse(bh[i]);
+	return err;
+}
+
+static int exfat_check_sb(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct exfat_vbr *vbr = sbi->vbr;
+	u16 fs_rev;
+	u16 flags;
+	int active_fat;
+	u16 num_fats;
+
+	if (memcmp(vbr->jump, "\xeb\x76\x90", sizeof (vbr->jump))) {
+		exfat_msg(sb, KERN_INFO, "invalid jump field in vbr.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->fsname, "EXFAT   ", 8)) {
+		exfat_msg(sb, KERN_INFO, "invalid fsname field in vbr: %s.",
+			  vbr->fsname);
+		return -EINVAL;
+	}
+
+	fs_rev = __le16_to_cpu(vbr->fs_rev);
+	if (fs_rev != 0x0100) {
+		exfat_msg(sb, KERN_INFO, "filesystem version invalid: "
+			  "have 0x%04x, need 0x0100", fs_rev);
+		return -EINVAL;
+	}
+
+	flags = __le16_to_cpu(vbr->volume_flags);
+	active_fat = exfat_active_fat(flags);
+	if (active_fat != 0) {
+		exfat_msg(sb, KERN_INFO, "filesystems with active fat > 0 are "
+			  "not supported.");
+		return -EINVAL;
+	}
+
+	if (flags & EXFAT_FLAG_MEDIA_FAILURE)
+		exfat_msg(sb, KERN_WARNING, "filesystem had media failure(s)");
+
+	/*
+	 * bytes per sectors are on the range 2^9 - 2^12 (512 - 4096)
+	 */
+	if (vbr->bytes_per_sector < 9 || vbr->bytes_per_sector > 12) {
+		exfat_msg(sb, KERN_ERR, "invalid byte per sectors: %u",
+			  (1 << vbr->bytes_per_sector));
+		return -EINVAL;
+	}
+
+	/*
+	 * sectors per cluster can be as low as 0, and must not result
+	 * in a cluster size higher than 32MB (byte_per_sector +
+	 * sectors_per_cluster must not be creater than 25)
+	 */
+	if (vbr->bytes_per_sector + vbr->sectors_per_cluster > 25) {
+		exfat_msg(sb, KERN_ERR, "invalid cluster size: %u",
+		  1 << (vbr->bytes_per_sector + vbr->sectors_per_cluster));
+		return -EINVAL;
+	}
+
+	num_fats = __le16_to_cpu(vbr->fat_num);
+	if (num_fats == 0) {
+		exfat_msg(sb, KERN_ERR, "superblock reports no FAT.");
+		return -EINVAL;
+	}
+	if (num_fats > 1) {
+		exfat_msg(sb, KERN_ERR, "TexFAT is not supported.");
+		return -EINVAL;
+	}
+
+	if (memcmp(vbr->boot_sig, "\x55\xaa", 2)) {
+		exfat_msg(sb, KERN_ERR, "invalid end boot signature: %02x%02x.",
+			  vbr->boot_sig[0], vbr->boot_sig[1]);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int exfat_fill_root(struct super_block *sb, struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u32 nclust;
+	u32 dummy;
+	loff_t links;
+
+	root->i_ino = EXFAT_ROOT_INO;
+	inode_set_iversion(root, 1);
+	EXFAT_I(root)->first_cluster =
+		__le32_to_cpu(sbi->root_dir_cluster);
+	EXFAT_I(root)->attributes = E_EXFAT_ATTR_DIRECTORY;
+
+	root->i_uid = sbi->options.uid;
+	root->i_gid = sbi->options.gid;
+
+	root->i_mode = exfat_make_mode(sbi, S_IRWXUGO, E_EXFAT_ATTR_DIRECTORY);
+	inode_inc_iversion(root);
+	root->i_generation = 0;
+
+	root->i_op = &exfat_dir_inode_operations;
+	root->i_fop = &exfat_dir_operations;
+
+	/*
+	 * root inode cannot use bitmap.
+	 */
+	EXFAT_I(root)->flags = EXFAT_I_ALLOC_POSSIBLE;
+
+	/*
+	 * set i_size
+	 */
+	nclust = 0;
+	while (__exfat_get_fat_cluster(root, nclust, &dummy, false) == 0)
+		++nclust;
+	root->i_size = nclust << sbi->clusterbits;
+	root->i_blocks = nclust << (sbi->clusterbits - 9);
+	EXFAT_I(root)->allocated_clusters = nclust;
+
+	/*
+	 * +2 to account for '.' and '..'
+	 */
+	links = exfat_dir_links(root);
+	if (links < 0)
+		return links;
+	set_nlink(root, links + 2);
+
+	inode_set_mtime_to_ts(root, inode_set_ctime_current(root));
+	inode_set_atime_to_ts(root, inode_get_mtime(root));
+
+	return 0;
+}
+
+static loff_t exfat_file_max_byte(struct exfat_sb_info *sbi)
+{
+	u32 max_clusters = EXFAT_CLUSTER_LASTVALID -
+		EXFAT_CLUSTER_FIRSTVALID + 1;
+
+	return (loff_t)max_clusters << sbi->clusterbits;
+}
+
+static int exfat_show_options(struct seq_file *m, struct dentry *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->d_inode->i_sb);
+
+	if (!uid_eq(sbi->options.uid, GLOBAL_ROOT_UID))
+		seq_printf(m, ",uid=%u",
+			   from_kuid_munged(&init_user_ns, sbi->options.uid));
+	if (!gid_eq(sbi->options.gid, GLOBAL_ROOT_GID))
+		seq_printf(m, ",gid=%u",
+			   from_kgid_munged(&init_user_ns, sbi->options.gid));
+
+	seq_printf(m, ",fmask=%04o", sbi->options.fmask);
+	seq_printf(m, ",dmask=%04o", sbi->options.dmask);
+
+	if (sbi->options.time_offset_set)
+		seq_printf(m, ",time_offset=%d", sbi->options.time_offset);
+
+	switch (sbi->options.error_action) {
+	case EXFAT_ERROR_ACTION_PANIC:
+		seq_printf(m, ",errors=panic");
+		break;
+	case EXFAT_ERROR_ACTION_REMOUNT_RO:
+		seq_printf(m, ",errors=remount-ro");
+		break;
+	default:
+		seq_printf(m, ",errors=continue");
+		break;
+	}
+
+	return 0;
+}
+
+enum {
+	Opt_exfat_uid,
+	Opt_exfat_gid,
+	Opt_exfat_dmask,
+	Opt_exfat_fmask,
+	Opt_exfat_time_offset,
+	Opt_exfat_error_continue,
+	Opt_exfat_error_remount_ro,
+	Opt_exfat_error_panic,
+	Opt_exfat_err,
+};
+
+static const match_table_t exfat_tokens = {
+	{ Opt_exfat_uid, "uid=%u", },
+	{ Opt_exfat_gid, "gid=%u", },
+	{ Opt_exfat_dmask, "dmask=%04o", },
+	{ Opt_exfat_fmask, "fmask=%04o", },
+	{ Opt_exfat_time_offset, "time_offset=%d", },
+	{ Opt_exfat_error_continue, "errors=continue", },
+	{ Opt_exfat_error_remount_ro, "errors=remount-ro", },
+	{ Opt_exfat_error_panic, "errors=panic", },
+	{ Opt_exfat_err, NULL },
+};
+
+static int exfat_parse_options(struct super_block *sb, char *opts, int silent)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	char *p;
+
+	sbi->options.uid = current_uid();
+	sbi->options.gid = current_gid();
+
+	sbi->options.dmask = current_umask();
+	sbi->options.fmask = current_umask();
+	sbi->options.time_offset_set = 0;
+	sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+
+	while (1) {
+		int token;
+		substring_t args[MAX_OPT_ARGS];
+		unsigned int optval;
+
+		p = strsep(&opts, ",");
+		if (!p)
+			break;
+		token = match_token(p, exfat_tokens, args);
+
+		switch (token) {
+		case Opt_exfat_uid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.uid = make_kuid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_gid:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.gid = make_kgid(current_user_ns(), optval);
+			break;
+
+		case Opt_exfat_dmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.dmask = optval;
+			break;
+
+		case Opt_exfat_fmask:
+			if (match_octal(&args[0], &optval))
+				return -EINVAL;
+			sbi->options.fmask = optval;
+			break;
+
+		case Opt_exfat_time_offset:
+			if (match_int(&args[0], &optval))
+				return -EINVAL;
+			if (optval < -12 * 60 && optval > 12 * 60) {
+				if (!silent)
+					exfat_msg(sb, KERN_INFO, "invalid "
+						  "time_offset value %d: "
+						  "should be between %d and %d",
+						  optval, -12 * 60, 12 * 60);
+				return -EINVAL;
+			}
+			sbi->options.time_offset = optval;
+			sbi->options.time_offset_set = 1;
+			break;
+
+		case Opt_exfat_error_continue:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_CONTINUE;
+			break;
+
+		case Opt_exfat_error_remount_ro:
+			sbi->options.error_action =
+				EXFAT_ERROR_ACTION_REMOUNT_RO;
+			break;
+
+		case Opt_exfat_error_panic:
+			sbi->options.error_action = EXFAT_ERROR_ACTION_PANIC;
+			break;
+
+		default:
+			if (!silent)
+				exfat_msg(sb, KERN_INFO, "Unrecognized mount "
+					  "option %s or missing parameter.\n",
+					  p);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static void exfat_set_sb_dirty(struct super_block *sb, bool set, bool force)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u16 flags;
+
+	/*
+	 * do not change anything if mounted read only and not
+	 * forced. the force case would happen during remount.
+	 */
+	if ((sb->s_flags & SB_RDONLY) && !force)
+		return ;
+
+	if (sbi->dirty) {
+		if (set)
+			exfat_msg(sb, KERN_WARNING, "Volume was not cleanly "
+				  "umounted. fsck should probably be needed.");
+		return ;
+	}
+
+	flags = __le16_to_cpu(sbi->vbr->volume_flags);
+	if (set)
+		flags |= EXFAT_FLAG_DIRTY;
+	else
+		flags &= ~EXFAT_FLAG_DIRTY;
+	sbi->vbr->volume_flags = __cpu_to_le16(flags);
+
+	mark_buffer_dirty(sbi->sb_bh);
+	sync_dirty_buffer(sbi->sb_bh);
+}
+
+static int exfat_remount(struct super_block *sb, int *flags, char *opts)
+{
+	int new_rdonly = *flags & SB_RDONLY;
+
+	if (new_rdonly != (sb->s_flags & SB_RDONLY)) {
+		if (new_rdonly)
+			exfat_set_sb_dirty(sb, false, false);
+		else
+			/*
+			 * sb->s_flag still has SB_RDONLY, so we need
+			 * to force the dirty state
+			 */
+			exfat_set_sb_dirty(sb, true, true);
+	}
+	return 0;
+}
+
+static int exfat_fill_super(struct super_block *sb, void *data, int silent)
+{
+	struct exfat_sb_info *sbi = NULL;
+	int ret = -ENOMEM;
+	struct inode *root = NULL;
+	int i;
+
+	sbi = kzalloc(sizeof (*sbi), GFP_KERNEL);
+	if (!sbi)
+		return -ENOMEM;
+
+	sb->s_fs_info = sbi;
+	if (exfat_parse_options(sb, data, silent) < 0)
+		return -EINVAL;
+
+	mutex_init(&sbi->sb_mutex);
+	spin_lock_init(&sbi->inode_hash_lock);
+
+	/*
+	 * first block, before we know sector size.
+	 */
+	sbi->sb_bh = sb_bread(sb, 0);
+	if (!sbi->sb_bh)
+		goto fail;
+
+	sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+	sb->s_op = &exfat_super_ops;
+
+
+	ret = exfat_check_sb(sb);
+	if (ret)
+		goto fail;
+
+	/*
+	 * time granularity of FS for use by current_time(inode): in
+	 * nsec so 1000000000 for 1 sec granularity.
+	 */
+	sb->s_time_gran = 1000 * 1000 * 1000;
+
+	/*
+	 * vbr seems sane, fill sbi.
+	 */
+	sbi->sectorsize = (1 << sbi->vbr->bytes_per_sector);
+	sbi->clustersize = sbi->sectorsize *
+		(1 << sbi->vbr->sectors_per_cluster);
+
+	sbi->sectors_per_cluster = sbi->clustersize / sbi->sectorsize;
+
+	sbi->sectorbits = sbi->vbr->bytes_per_sector;
+	sbi->clusterbits = sbi->vbr->sectors_per_cluster + sbi->sectorbits;
+	sbi->sectormask = sbi->sectorsize - 1;
+	sbi->clustermask = sbi->clustersize - 1;
+
+
+	sbi->fat_offset = __le32_to_cpu(sbi->vbr->fat_offset);
+	sbi->fat_length = __le32_to_cpu(sbi->vbr->fat_length);
+
+	sbi->root_dir_cluster = __le32_to_cpu(sbi->vbr->cluster_root_dir);
+
+	sbi->cluster_heap_offset = __le32_to_cpu(sbi->vbr->cluster_heap_offset);
+	sbi->cluster_count = __le32_to_cpu(sbi->vbr->cluster_count);
+
+	sbi->dirty = !!(__le16_to_cpu(sbi->vbr->volume_flags) &
+			EXFAT_FLAG_DIRTY);
+
+	/*
+	 * now that we know sector size, reread superblock with
+	 * correct sector size.
+	 */
+	ret = -EIO;
+	if (sb->s_blocksize != sbi->sectorsize) {
+		if (!sb_set_blocksize(sb, sbi->sectorsize)) {
+			exfat_msg(sb, KERN_INFO, "bad block size %d.",
+				  sbi->sectorsize);
+			goto fail;
+		}
+
+		brelse(sbi->sb_bh);
+		sbi->vbr = NULL;
+
+		sbi->sb_bh = sb_bread(sb, 0);
+		if (!sbi->sb_bh)
+			goto fail;
+		sbi->vbr = (struct exfat_vbr*)sbi->sb_bh->b_data;
+		sb->s_fs_info = sbi;
+	}
+
+	ret = exfat_check_sb_checksum(sb);
+	if (ret)
+		goto fail;
+
+	sb->s_maxbytes = exfat_file_max_byte(sbi);
+
+	ret = exfat_init_fat(sb);
+	if (ret)
+		goto fail;
+
+	for (i = 0 ; i < EXFAT_HASH_SIZE; ++i) {
+		INIT_HLIST_HEAD(&sbi->inode_hash[i]);
+	}
+
+	/*
+	 * create root inode.
+	 */
+	root = new_inode(sb);
+	if (!root)
+		goto fail;
+
+	exfat_fill_root(sb, root);
+
+	ret = exfat_upcase_init(root);
+	if (ret)
+		goto fail_iput;
+
+	ret = exfat_init_bitmap(root);
+	if (ret)
+		goto fail_iput;
+
+
+	sb->s_root = d_make_root(root);
+	if (!sb->s_root)
+		goto fail_iput;
+
+	exfat_set_sb_dirty(sb, true, false);
+	return 0;
+
+fail_iput:
+	iput(root);
+
+fail:
+	if (sbi->sb_bh)
+		brelse(sbi->sb_bh);
+	if (sbi)
+		kfree(sbi);
+	return ret;
+}
+
+static struct dentry *exfat_mount(struct file_system_type *fstype,
+				  int flags, const char *dev_name, void *data)
+{
+	return mount_bdev(fstype, flags, dev_name, data, exfat_fill_super);
+}
+
+static void exfat_put_super(struct super_block *sb)
+{
+	struct exfat_sb_info *sbi;
+
+	sbi = EXFAT_SB(sb);
+	if (sbi) {
+		exfat_set_sb_dirty(sb, false, false);
+		exfat_exit_bitmap(sb);
+		brelse(sbi->sb_bh);
+		kfree(sbi->upcase_table);
+		kfree(sbi);
+	}
+}
+
+static int exfat_statfs(struct dentry *dentry, struct kstatfs *kstat)
+{
+	struct super_block *sb = dentry->d_inode->i_sb;
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+
+	memset(kstat, 0, sizeof (*kstat));
+
+
+	kstat->f_bsize = sbi->clustersize;
+	kstat->f_blocks = sbi->cluster_count;
+	kstat->f_bfree = sbi->free_clusters;
+	kstat->f_bavail = sbi->free_clusters;
+	kstat->f_namelen = 255;
+	kstat->f_fsid.val[0] = (u32)id;
+	kstat->f_fsid.val[1] = (u32)(id >> 32);
+
+	return 0;
+}
+
+static struct file_system_type exfat_fs_type = {
+	.owner		= THIS_MODULE,
+	.name		= "exfat",
+	.mount		= exfat_mount,
+	.kill_sb	= kill_block_super,
+	.fs_flags	= FS_REQUIRES_DEV,
+};
+
+static int __init exfat_init(void)
+{
+	int error;
+
+	/* some sanity check on internal structure sizes */
+	BUILD_BUG_ON(sizeof (struct exfat_vbr) != 512);
+
+	BUILD_BUG_ON(sizeof (struct exfat_volume_label_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_bitmap_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_upcase_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_guid_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_padding_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_acl_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filedir_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_stream_extension_entry) != 0x20);
+	BUILD_BUG_ON(sizeof (struct exfat_filename_entry) != 0x20);
+
+	error = exfat_init_inodes();
+	if (error)
+		return error;
+
+
+	error = register_filesystem(&exfat_fs_type);
+	if (error)
+		exfat_exit_inodes();
+	return error;
+}
+
+static void __exit exfat_exit(void)
+{
+	unregister_filesystem(&exfat_fs_type);
+	exfat_exit_inodes();
+}
+
+module_init(exfat_init);
+module_exit(exfat_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Schichan <nschichan@freebox.fr>");
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./time.c linux-6.13.12-fbx/fs/exfat-fbx/time.c
--- linux-6.13.12-fbx/fs/exfat-fbx./time.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/time.c	2025-09-25 17:40:36.563372165 +0200
@@ -0,0 +1,126 @@
+
+#include <linux/kernel.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+
+
+extern struct timezone sys_tz;
+
+/*
+ * The epoch of FAT timestamp is 1980.
+ *     :  bits :     value
+ * date:  0 -  4: day	(1 -  31)
+ * date:  5 -  8: month	(1 -  12)
+ * date:  9 - 15: year	(0 - 127) from 1980
+ * time:  0 -  4: sec	(0 -  29) 2sec counts
+ * time:  5 - 10: min	(0 -  59)
+ * time: 11 - 15: hour	(0 -  23)
+ */
+#define SECS_PER_MIN	60
+#define SECS_PER_HOUR	(60 * 60)
+#define SECS_PER_DAY	(SECS_PER_HOUR * 24)
+/* days between 1.1.70 and 1.1.80 (2 leap days) */
+#define DAYS_DELTA	(365 * 10 + 2)
+/* 120 (2100 - 1980) isn't leap year */
+#define YEAR_2100	120
+#define IS_LEAP_YEAR(y)	(!((y) & 3) && (y) != YEAR_2100)
+
+/* Linear day numbers of the respective 1sts in non-leap years. */
+static u32 days_in_year[] = {
+	/* Jan  Feb  Mar  Apr  May  Jun  Jul  Aug  Sep  Oct  Nov  Dec */
+	0,   0,  31,  59,  90, 120, 151, 181, 212, 243, 273, 304, 334, 0, 0, 0,
+};
+
+/* Convert a FAT time/date pair to a UNIX date (seconds since 1 1 70). */
+void exfat_time_2unix(struct timespec64 *ts, u32 datetime, u8 time_cs,
+		      s8 tz_offset)
+{
+	u16 date = (datetime >> 16);
+	u16 time = (datetime & 0xffff);
+	time64_t second, day, leap_day, month, year;
+
+	year  = date >> 9;
+	month = max(1, (date >> 5) & 0xf);
+	day   = max(1, date & 0x1f) - 1;
+
+	if (((tz_offset & (1 << 6)) == 0))
+		tz_offset &= ~(1 << 7);
+
+	leap_day = (year + 3) / 4;
+	if (year > YEAR_2100)		/* 2100 isn't leap year */
+		leap_day--;
+	if (IS_LEAP_YEAR(year) && month > 2)
+		leap_day++;
+
+	second =  (time & 0x1f) << 1;
+	second += ((time >> 5) & 0x3f) * SECS_PER_MIN;
+	second += (time >> 11) * SECS_PER_HOUR;
+	second += (year * 365 + leap_day
+		   + days_in_year[month] + day
+		   + DAYS_DELTA) * SECS_PER_DAY;
+
+	second -= tz_offset * 15 * SECS_PER_MIN;
+
+	if (time_cs) {
+		ts->tv_sec = second + (time_cs / 100);
+		ts->tv_nsec = (time_cs % 100) * 10000000;
+	} else {
+		ts->tv_sec = second;
+		ts->tv_nsec = 0;
+	}
+}
+
+/* Convert linear UNIX date to a FAT time/date pair. */
+void exfat_time_2exfat(struct exfat_sb_info *sbi, struct timespec64 *ts,
+		       u32 *datetime, u8 *time_cs, s8 *tz_offset)
+{
+	struct tm tm;
+	u16 time;
+	u16 date;
+	int offset;
+
+	if (sbi->options.time_offset_set) {
+		offset = -sbi->options.time_offset;
+	} else
+		offset = sys_tz.tz_minuteswest;
+
+	time64_to_tm(ts->tv_sec, -offset * SECS_PER_MIN, &tm);
+
+	/*  FAT can only support year between 1980 to 2107 */
+	if (tm.tm_year < 1980 - 1900) {
+		time = 0;
+		date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
+		if (time_cs)
+			*time_cs = 0;
+		*tz_offset = 0;
+		return;
+	}
+	if (tm.tm_year > 2107 - 1900) {
+		time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
+		date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
+		if (time_cs)
+			*time_cs = 199;
+		*tz_offset = 0;
+		return;
+	}
+
+	/* from 1900 -> from 1980 */
+	tm.tm_year -= 80;
+	/* 0~11 -> 1~12 */
+	tm.tm_mon++;
+	/* 0~59 -> 0~29(2sec counts) */
+	tm.tm_sec >>= 1;
+
+	time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
+	date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
+
+	*datetime = (date << 16) | time;
+
+	if (time_cs)
+		*time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
+	*tz_offset = -offset / 15;
+	*tz_offset |= (1 << 7);
+}
diff -Nruw linux-6.13.12-fbx/fs/exfat-fbx./upcase.c linux-6.13.12-fbx/fs/exfat-fbx/upcase.c
--- linux-6.13.12-fbx/fs/exfat-fbx./upcase.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/fs/exfat-fbx/upcase.c	2025-09-25 17:40:36.563372165 +0200
@@ -0,0 +1,137 @@
+/*
+ * upcase.c for exfat
+ * Created by <nschichan@freebox.fr> on Wed Aug  7 11:51:37 2013
+ */
+
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "exfat.h"
+#include "exfat_fs.h"
+
+static u32 exfat_calc_upcase_checksum(const u8 *data, u32 checksum,
+				      size_t count)
+{
+	while (count) {
+		checksum = ((checksum << 31) | (checksum >> 1)) + *data;
+		--count;
+		++data;
+	}
+	return checksum;
+}
+
+static int exfat_load_upcase_table(struct super_block *sb, u32 disk_cluster,
+				   u32 *out_checksum)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(sb);
+	struct buffer_head *bh;
+	sector_t start, sect, end;
+	u32 off = 0;
+	u32 byte_len = sbi->upcase_len * sizeof (__le16);
+	u32 checksum = 0;
+
+	/*
+	 * up-case table are not fragmented, so sequential cluster
+	 * read will do here.
+	 */
+	start = exfat_cluster_sector(sbi, disk_cluster);
+	end = start + DIV_ROUND_UP(byte_len,
+			   sbi->sectorsize);
+	for (sect = start; sect < end; ++sect) {
+		u32 len = sbi->sectorsize;
+
+		if (sect == end - 1)
+			len = byte_len & sbi->sectormask;
+
+		bh = sb_bread(sb, sect);
+		if (!bh) {
+			exfat_msg(sb, KERN_ERR,
+				  "unable to read upcase sector %llu",
+				  (unsigned long long)sect);
+			return -EIO;
+		}
+		memcpy((u8*)sbi->upcase_table + off, bh->b_data,
+		       len);
+
+		checksum = exfat_calc_upcase_checksum(bh->b_data, checksum,
+						      len);
+
+		off += len;
+		brelse(bh);
+	}
+
+	BUG_ON(off != byte_len);
+	*out_checksum = checksum;
+	return 0;
+}
+
+int exfat_upcase_init(struct inode *root)
+{
+	struct exfat_sb_info *sbi = EXFAT_SB(root->i_sb);
+	struct exfat_upcase_entry *upcase;
+	struct exfat_dir_ctx dctx;
+	int error;
+	u64 upcase_length;
+	u32 checksum;
+
+	/*
+	 * configure directory context and look for an upcase table
+	 * entry.
+	 */
+	if (exfat_init_dir_ctx(root, &dctx, 0) < 0)
+		return -EIO;
+
+	error = -EIO;
+	upcase = __exfat_dentry_next(&dctx, E_EXFAT_UPCASE_TABLE, 0xff,
+				     true, NULL);
+	if (!upcase)
+		goto fail;
+
+	/*
+	 * check upcase table length. we need it to be non-zero,
+	 * ending on a __le16 boundary and provide at most a
+	 * conversion for the whole __le16 space.
+	 */
+	upcase_length = __le64_to_cpu(upcase->length);
+	if (upcase_length == 0 ||
+	    upcase_length & (sizeof (__le16) - 1) ||
+	    upcase_length > 0xffff * sizeof (__le16)) {
+		exfat_msg(root->i_sb, KERN_ERR, "invalid upcase length %llu",
+			  (unsigned long long)upcase_length);
+		goto fail;
+	}
+
+	/*
+	 * load complete upcase table in memory.
+	 */
+	error = -ENOMEM;
+	sbi->upcase_len = upcase_length / sizeof (__le16);
+	sbi->upcase_table = kmalloc(upcase_length, GFP_NOFS);
+	if (!sbi->upcase_table)
+		goto fail;
+
+	error = exfat_load_upcase_table(root->i_sb,
+					__le32_to_cpu(upcase->cluster_addr),
+					&checksum);
+	if (error)
+		goto fail;
+
+	if (checksum != __le32_to_cpu(upcase->checksum)) {
+		exfat_msg(root->i_sb, KERN_INFO,
+			  "upcase table checksum mismatch: have %08x, "
+			  "expect %08x", checksum,
+			  __le32_to_cpu(upcase->checksum));
+		error = -EINVAL;
+		goto fail;
+	}
+
+	exfat_cleanup_dir_ctx(&dctx);
+	return 0;
+
+fail:
+	if (sbi->upcase_table)
+		kfree(sbi->upcase_table);
+	exfat_cleanup_dir_ctx(&dctx);
+	return error;
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb1misc.c	2025-09-25 17:40:36.779373236 +0200
@@ -0,0 +1,298 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include "glob.h"
+#include "asn1.h"
+#include "nterr.h"
+#include "ksmbd_work.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+#include "mgmt/user_session.h"
+
+/**
+ * check_smb_hdr() - check for valid smb request header
+ * @smb:        smb header to be checked
+ *
+ * check for valid smb signature and packet direction(request/response)
+ * TODO: properly check client authetication and tree authentication
+ *
+ * Return:      0 on success, otherwise 1
+ */
+static int check_smb1_hdr(struct smb_hdr *smb)
+{
+	/* does it have the right SMB "signature" ? */
+	if (*(__le32 *) smb->Protocol != SMB1_PROTO_NUMBER) {
+		ksmbd_debug(SMB, "Bad protocol string signature header 0x%x\n",
+				*(unsigned int *)smb->Protocol);
+		return 1;
+	}
+	ksmbd_debug(SMB, "got SMB\n");
+
+	/* if it's not a response then accept */
+	/* TODO : check for oplock break */
+	if (!(smb->Flags & SMBFLG_RESPONSE))
+		return 0;
+
+	ksmbd_debug(SMB, "Server sent request, not response\n");
+	return 1;
+}
+
+
+static int smb1_req_struct_size(struct smb_hdr *hdr)
+{
+	int wc = hdr->WordCount;
+
+	switch (hdr->Command) {
+	case SMB_COM_CREATE_DIRECTORY:
+	case SMB_COM_DELETE_DIRECTORY:
+	case SMB_COM_QUERY_INFORMATION:
+	case SMB_COM_TREE_DISCONNECT:
+	case SMB_COM_NEGOTIATE:
+	case SMB_COM_NT_CANCEL:
+	case SMB_COM_CHECK_DIRECTORY:
+	case SMB_COM_PROCESS_EXIT:
+	case SMB_COM_QUERY_INFORMATION_DISK:
+		if (wc != 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_FLUSH:
+	case SMB_COM_DELETE:
+	case SMB_COM_RENAME:
+	case SMB_COM_ECHO:
+	case SMB_COM_FIND_CLOSE2:
+		if (wc != 0x1)
+			return -EINVAL;
+		break;
+	case SMB_COM_LOGOFF_ANDX:
+		if (wc != 0x2)
+			return -EINVAL;
+		break;
+	case SMB_COM_CLOSE:
+		if (wc != 0x3)
+			return -EINVAL;
+		break;
+	case SMB_COM_TREE_CONNECT_ANDX:
+	case SMB_COM_NT_RENAME:
+		if (wc != 0x4)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE:
+		if (wc != 0x5)
+			return -EINVAL;
+		break;
+	case SMB_COM_SETATTR:
+	case SMB_COM_LOCKING_ANDX:
+		if (wc != 0x8)
+			return -EINVAL;
+		break;
+	case SMB_COM_TRANSACTION:
+		if (wc < 0xe)
+			return -EINVAL;
+		break;
+	case SMB_COM_SESSION_SETUP_ANDX:
+		if (wc != 0xc && wc != 0xd)
+			return -EINVAL;
+		break;
+	case SMB_COM_OPEN_ANDX:
+	case SMB_COM_TRANSACTION2:
+		if (wc != 0xf)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_CREATE_ANDX:
+		if (wc != 0x18)
+			return -EINVAL;
+		break;
+	case SMB_COM_READ_ANDX:
+		if (wc != 0xa && wc != 0xc)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE_ANDX:
+		if (wc != 0xc && wc != 0xe)
+			return -EINVAL;
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+
+	return wc;
+}
+
+static int smb1_get_byte_count(struct smb_hdr *hdr)
+{
+	int bc;
+
+	bc = le16_to_cpu(*(__le16 *)((char *)hdr +
+		sizeof(struct smb_hdr) + hdr->WordCount * 2));
+
+	switch (hdr->Command) {
+	case SMB_COM_CLOSE:
+	case SMB_COM_FLUSH:
+	case SMB_COM_READ_ANDX:
+	case SMB_COM_TREE_DISCONNECT:
+	case SMB_COM_LOGOFF_ANDX:
+	case SMB_COM_NT_CANCEL:
+	case SMB_COM_PROCESS_EXIT:
+	case SMB_COM_FIND_CLOSE2:
+		if (bc != 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_LOCKING_ANDX:
+	case SMB_COM_TRANSACTION:
+	case SMB_COM_TRANSACTION2:
+	case SMB_COM_ECHO:
+	case SMB_COM_SESSION_SETUP_ANDX:
+		if (bc < 0x0)
+			return -EINVAL;
+		break;
+	case SMB_COM_WRITE_ANDX:
+		if (bc < 0x1)
+			return -EINVAL;
+		break;
+	case SMB_COM_CREATE_DIRECTORY:
+	case SMB_COM_DELETE_DIRECTORY:
+	case SMB_COM_DELETE:
+	case SMB_COM_RENAME:
+	case SMB_COM_QUERY_INFORMATION:
+	case SMB_COM_SETATTR:
+	case SMB_COM_OPEN_ANDX:
+	case SMB_COM_NEGOTIATE:
+	case SMB_COM_CHECK_DIRECTORY:
+		if (bc < 0x2)
+			return -EINVAL;
+		break;
+	case SMB_COM_TREE_CONNECT_ANDX:
+	case SMB_COM_WRITE:
+		if (bc < 0x3)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_RENAME:
+		if (bc < 0x4)
+			return -EINVAL;
+		break;
+	case SMB_COM_NT_CREATE_ANDX:
+		if (hdr->Flags2 & SMBFLG2_UNICODE) {
+			if (bc < 3)
+				return -EINVAL;
+		} else if (bc < 2)
+			return -EINVAL;
+		break;
+	}
+
+	return bc;
+}
+
+static unsigned int smb1_calc_size(struct smb_hdr *hdr)
+{
+	int len = sizeof(struct smb_hdr) - 4 + 2;
+	int bc, struct_size = hdr->WordCount * 2;
+
+	len += struct_size;
+	bc = smb1_get_byte_count(hdr);
+	if (bc < 0)
+		return bc;
+	ksmbd_debug(SMB, "SMB2 byte count %d, struct size : %d\n", bc,
+		struct_size);
+	len += bc;
+
+	ksmbd_debug(SMB, "SMB1 len %d\n", len);
+	return len;
+}
+
+static int smb1_get_data_len(struct smb_hdr *hdr)
+{
+	int data_len = 0;
+
+	/* data offset check */
+	switch (hdr->Command) {
+	case SMB_COM_WRITE_ANDX:
+	{
+		struct smb_com_write_req *req = (struct smb_com_write_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataLengthLow);
+		data_len |= (le16_to_cpu(req->DataLengthHigh) << 16);
+		data_len += le16_to_cpu(req->DataOffset);
+		break;
+	}
+	case SMB_COM_TRANSACTION:
+	{
+		struct smb_com_trans_req *req = (struct smb_com_trans_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataOffset) +
+			le16_to_cpu(req->DataCount);
+		break;
+	}
+	case SMB_COM_TRANSACTION2:
+	{
+		struct smb_com_trans2_req *req =
+				(struct smb_com_trans2_req *)hdr;
+
+		data_len = le16_to_cpu(req->DataOffset) +
+			le16_to_cpu(req->DataCount);
+		break;
+	}
+	}
+
+	return data_len;
+}
+
+int ksmbd_smb1_check_message(struct ksmbd_work *work)
+{
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	char *buf = work->request_buf;
+	int command = hdr->Command;
+	__u32 clc_len;  /* calculated length */
+	__u32 len = get_rfc1002_len(buf);
+	int wc, data_len;
+
+	if (check_smb1_hdr(hdr))
+		return 1;
+
+	wc = smb1_req_struct_size(hdr);
+	if (wc == -EOPNOTSUPP) {
+		ksmbd_debug(SMB, "Not support cmd %x\n", command);
+		return 1;
+	} else if (hdr->WordCount != wc) {
+		pr_err("Invalid word count, %d not %d. cmd %x\n",
+		       hdr->WordCount, wc, command);
+		return 1;
+	}
+
+	data_len = smb1_get_data_len(hdr);
+	if (len < data_len) {
+		pr_err("Invalid data area length %u not %u. cmd : %x\n",
+		       len, data_len, command);
+		return 1;
+	}
+
+	clc_len = smb1_calc_size(hdr);
+	if (len != clc_len) {
+		/*
+		 * smbclient may return wrong byte count in smb header.
+		 * But allow it to avoid write failure with smbclient.
+		 */
+		if (command == SMB_COM_WRITE_ANDX)
+			return 0;
+
+		if (len > clc_len) {
+			ksmbd_debug(SMB,
+				"cli req too long, len %d not %d. cmd:%x\n",
+				len, clc_len, command);
+			return 0;
+		}
+
+		pr_err("cli req too short, len %d not %d. cmd:%x\n",
+		       len, clc_len, command);
+
+		return 1;
+	}
+
+	return 0;
+}
+
+int smb_negotiate_request(struct ksmbd_work *work)
+{
+	return ksmbd_smb_negotiate_common(work, SMB_COM_NEGOTIATE);
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb1ops.c	2025-09-25 17:40:36.783373256 +0200
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#include <linux/slab.h>
+
+#include "glob.h"
+#include "connection.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+
+static struct smb_version_values smb1_server_values = {
+	.version_string = SMB1_VERSION_STRING,
+	.protocol_id = SMB10_PROT_ID,
+	.capabilities = SMB1_SERVER_CAPS,
+	.max_read_size = CIFS_DEFAULT_IOSIZE,
+	.max_write_size = MAX_STREAM_PROT_LEN,
+	.max_trans_size = CIFS_DEFAULT_IOSIZE,
+	.large_lock_type = LOCKING_ANDX_LARGE_FILES,
+	.exclusive_lock_type = 0,
+	.shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
+	.unlock_lock_type = 0,
+	.header_size = sizeof(struct smb_hdr),
+	.max_header_size = MAX_CIFS_HDR_SIZE,
+	.read_rsp_size = sizeof(struct smb_com_read_rsp),
+	.lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX),
+	.cap_unix = CAP_UNIX,
+	.cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND,
+	.cap_large_files = CAP_LARGE_FILES,
+	.signing_enabled = SECMODE_SIGN_ENABLED,
+	.signing_required = SECMODE_SIGN_REQUIRED,
+	.max_credits = SMB2_MAX_CREDITS,
+};
+
+static struct smb_version_ops smb1_server_ops = {
+	.get_cmd_val = get_smb_cmd_val,
+	.init_rsp_hdr = init_smb_rsp_hdr,
+	.set_rsp_status = set_smb_rsp_status,
+	.allocate_rsp_buf = smb_allocate_rsp_buf,
+	.check_user_session = smb_check_user_session,
+	.is_sign_req = smb1_is_sign_req,
+	.check_sign_req = smb1_check_sign_req,
+	.set_sign_rsp = smb1_set_sign_rsp,
+	.get_ksmbd_tcon = smb_get_ksmbd_tcon,
+};
+
+static struct smb_version_cmds smb1_server_cmds[256] = {
+	[SMB_COM_CREATE_DIRECTORY]	= { .proc = smb_mkdir, },
+	[SMB_COM_DELETE_DIRECTORY]	= { .proc = smb_rmdir, },
+	[SMB_COM_CLOSE]			= { .proc = smb_close, },
+	[SMB_COM_FLUSH]			= { .proc = smb_flush, },
+	[SMB_COM_DELETE]		= { .proc = smb_unlink, },
+	[SMB_COM_RENAME]		= { .proc = smb_rename, },
+	[SMB_COM_QUERY_INFORMATION]	= { .proc = smb_query_info, },
+	[SMB_COM_SETATTR]		= { .proc = smb_setattr, },
+	[SMB_COM_LOCKING_ANDX]		= { .proc = smb_locking_andx, },
+	[SMB_COM_TRANSACTION]		= { .proc = smb_trans, },
+	[SMB_COM_ECHO]			= { .proc = smb_echo, },
+	[SMB_COM_OPEN_ANDX]		= { .proc = smb_open_andx, },
+	[SMB_COM_READ_ANDX]		= { .proc = smb_read_andx, },
+	[SMB_COM_WRITE_ANDX]		= { .proc = smb_write_andx, },
+	[SMB_COM_TRANSACTION2]		= { .proc = smb_trans2, },
+	[SMB_COM_FIND_CLOSE2]		= { .proc = smb_closedir, },
+	[SMB_COM_TREE_DISCONNECT]	= { .proc = smb_tree_disconnect, },
+	[SMB_COM_NEGOTIATE]		= { .proc = smb_negotiate_request, },
+	[SMB_COM_SESSION_SETUP_ANDX]	= { .proc = smb_session_setup_andx, },
+	[SMB_COM_LOGOFF_ANDX]           = { .proc = smb_session_disconnect, },
+	[SMB_COM_TREE_CONNECT_ANDX]	= { .proc = smb_tree_connect_andx, },
+	[SMB_COM_QUERY_INFORMATION_DISK] = { .proc = smb_query_information_disk, },
+	[SMB_COM_NT_CREATE_ANDX]	= { .proc = smb_nt_create_andx, },
+	[SMB_COM_NT_CANCEL]		= { .proc = smb_nt_cancel, },
+	[SMB_COM_NT_RENAME]		= { .proc = smb_nt_rename, },
+	[SMB_COM_WRITE]			= { .proc = smb_write, },
+	[SMB_COM_CHECK_DIRECTORY]	= { .proc = smb_checkdir, },
+	[SMB_COM_PROCESS_EXIT]		= { .proc = smb_process_exit, },
+};
+
+/**
+ * init_smb1_server() - initialize a smb server connection with smb1
+ *			command dispatcher
+ * @conn:	connection instance
+ */
+int init_smb1_server(struct ksmbd_conn *conn)
+{
+	conn->vals = &smb1_server_values;
+	conn->ops = &smb1_server_ops;
+	conn->cmds = smb1_server_cmds;
+	conn->max_cmds = ARRAY_SIZE(smb1_server_cmds);
+	return 0;
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb1pdu.c	2025-09-25 17:40:36.783373256 +0200
@@ -0,0 +1,9252 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+#include <linux/math64.h>
+#include <linux/fs.h>
+#include <linux/posix_acl_xattr.h>
+#include <linux/namei.h>
+#include <linux/statfs.h>
+#include <linux/vmalloc.h>
+#include <linux/filelock.h>
+#include <linux/version.h>
+
+#include "glob.h"
+#include "oplock.h"
+#include "connection.h"
+#include "transport_ipc.h"
+#include "vfs.h"
+#include "misc.h"
+
+#include "auth.h"
+#include "asn1.h"
+#include "server.h"
+#include "smb_common.h"
+#include "smb1pdu.h"
+#include "../common/smb2status.h"
+#include "mgmt/user_config.h"
+#include "mgmt/share_config.h"
+#include "mgmt/tree_connect.h"
+#include "mgmt/user_session.h"
+#include "ndr.h"
+#include "smberr.h"
+
+#define MAX_HEADER_SIZE(conn)		((conn)->vals->max_header_size)
+
+static const char *smb_cmd_str[] = {
+	[SMB_COM_CREATE_DIRECTORY] = "SMB_COM_CREATE_DIRECTORY",
+	[SMB_COM_DELETE_DIRECTORY] = "SMB_COM_DELETE_DIRECTORY",
+	[SMB_COM_CLOSE] = "SMB_COM_CLOSE",
+	[SMB_COM_FLUSH] = "SMB_COM_FLUSH",
+	[SMB_COM_DELETE] = "SMB_COM_DELETE",
+	[SMB_COM_RENAME] = "SMB_COM_RENAME",
+	[SMB_COM_QUERY_INFORMATION] = "SMB_COM_QUERY_INFORMATION",
+	[SMB_COM_SETATTR] = "SMB_COM_SETATTR",
+	[SMB_COM_WRITE] = "SMB_COM_WRITE",
+	[SMB_COM_CHECK_DIRECTORY] = "SMB_COM_CHECK_DIRECTORY",
+	[SMB_COM_PROCESS_EXIT] = "SMB_COM_PROCESS_EXIT",
+	[SMB_COM_LOCKING_ANDX] = "SMB_COM_LOCKING_ANDX",
+	[SMB_COM_TRANSACTION] = "SMB_COM_TRANSACTION",
+	[SMB_COM_COPY] = "SMB_COM_COPY",
+	[SMB_COM_ECHO] = "SMB_COM_ECHO",
+	[SMB_COM_OPEN_ANDX] = "SMB_COM_OPEN_ANDX",
+	[SMB_COM_READ_ANDX] = "SMB_COM_READ_ANDX",
+	[SMB_COM_WRITE_ANDX] = "SMB_COM_WRITE_ANDX",
+	[SMB_COM_TRANSACTION2] = "SMB_COM_TRANSACTION2",
+	[SMB_COM_TRANSACTION2_SECONDARY] = "SMB_COM_TRANSACTION2_SECONDARY",
+	[SMB_COM_FIND_CLOSE2] = "SMB_COM_FIND_CLOSE2",
+	[SMB_COM_TREE_DISCONNECT] = "SMB_COM_TREE_DISCONNECT",
+	[SMB_COM_NEGOTIATE] = "SMB_COM_NEGOTIATE",
+	[SMB_COM_SESSION_SETUP_ANDX] = "SMB_COM_SESSION_SETUP_ANDX",
+	[SMB_COM_LOGOFF_ANDX] = "SMB_COM_LOGOFF_ANDX",
+	[SMB_COM_TREE_CONNECT_ANDX] = "SMB_COM_TREE_CONNECT_ANDX",
+	[SMB_COM_QUERY_INFORMATION_DISK] = "SMB_COM_QUERY_INFORMATION_DISK",
+	[SMB_COM_NT_TRANSACT] = "SMB_COM_NT_TRANSACT",
+	[SMB_COM_NT_TRANSACT_SECONDARY] = "SMB_COM_NT_TRANSACT_SECONDARY",
+	[SMB_COM_NT_CREATE_ANDX] = "SMB_COM_NT_CREATE_ANDX",
+	[SMB_COM_NT_CANCEL] = "SMB_COM_NT_CANCEL",
+	[SMB_COM_NT_RENAME] = "SMB_COM_NT_RENAME",
+};
+
+static const char *smb_cmd_to_str(u16 cmd)
+{
+	if (cmd < ARRAY_SIZE(smb_cmd_str))
+		return smb_cmd_str[cmd];
+
+	return "unknown_cmd";
+}
+
+static const char *smb_trans2_cmd_str[] = {
+	[TRANS2_OPEN] = "TRANS2_OPEN",
+	[TRANS2_FIND_FIRST] = "TRANS2_FIND_FIRST",
+	[TRANS2_FIND_NEXT] = "TRANS2_FIND_NEXT",
+	[TRANS2_QUERY_FS_INFORMATION] = "TRANS2_QUERY_FS_INFORMATION",
+	[TRANS2_SET_FS_INFORMATION] = "TRANS2_SET_FS_INFORMATION",
+	[TRANS2_QUERY_PATH_INFORMATION] = "TRANS2_QUERY_PATH_INFORMATION",
+	[TRANS2_SET_PATH_INFORMATION] = "TRANS2_SET_PATH_INFORMATION",
+	[TRANS2_QUERY_FILE_INFORMATION] = "TRANS2_QUERY_FILE_INFORMATION",
+	[TRANS2_SET_FILE_INFORMATION] = "TRANS2_SET_FILE_INFORMATION",
+	[TRANS2_CREATE_DIRECTORY] = "TRANS2_CREATE_DIRECTORY",
+	[TRANS2_GET_DFS_REFERRAL] = "TRANS2_GET_DFS_REFERRAL",
+	[TRANS2_REPORT_DFS_INCOSISTENCY] = "TRANS2_REPORT_DFS_INCOSISTENCY",
+};
+
+static const char *smb_trans2_cmd_to_str(u16 cmd)
+{
+	if (cmd < ARRAY_SIZE(smb_trans2_cmd_str))
+		return smb_trans2_cmd_str[cmd];
+
+	return "unknown_trans2_cmd";
+}
+
+static int smb1_oplock_enable = false;
+
+/* Default: allocation roundup size = 1048576 */
+static unsigned int alloc_roundup_size = 1048576;
+
+struct ksmbd_dirent {
+	unsigned long long	ino;
+	unsigned long long	offset;
+	unsigned int		namelen;
+	unsigned int		d_type;
+	char			name[];
+};
+
+static inline void inc_resp_size(struct ksmbd_work *work, size_t len)
+{
+	work->response_offset += len;
+}
+
+static inline unsigned int get_req_len(void *buf)
+{
+	return 4 + get_rfc1002_len(buf);
+}
+
+/**
+ * smb_NTtimeToUnix() - convert NTFS time to unix style time format
+ * @ntutc:	NTFS style time
+ *
+ * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units)
+ * into Unix UTC (based 1970-01-01, in seconds).
+ *
+ * Return:      timespec containing unix style time
+ */
+static struct timespec64 smb_NTtimeToUnix(__le64 ntutc)
+{
+	struct timespec64 ts;
+
+	/* BB what about the timezone? BB */
+
+	/* Subtract the NTFS time offset, then convert to 1s intervals. */
+	/* this has been taken from cifs, ntfs code */
+	u64 t;
+
+	t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+	ts.tv_nsec = do_div(t, 10000000) * 100;
+	ts.tv_sec = t;
+	return ts;
+}
+
+/**
+ * get_smb_cmd_val() - get smb command value from smb header
+ * @work:	smb work containing smb header
+ *
+ * Return:      smb command value
+ */
+u16 get_smb_cmd_val(struct ksmbd_work *work)
+{
+	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+	return (u16)rcv_hdr->Command;
+}
+
+/**
+ * is_smbreq_unicode() - check if the smb command is request is unicode or not
+ * @hdr:	pointer to smb_hdr in the the request part
+ *
+ * Return: check flags and return true if request is unicode, else false
+ */
+static inline int is_smbreq_unicode(struct smb_hdr *hdr)
+{
+	return hdr->Flags2 & SMBFLG2_UNICODE ? 1 : 0;
+}
+
+/**
+ * set_smb_rsp_status() - set error type in smb response header
+ * @work:	smb work containing smb response header
+ * @err:	error code to set in response
+ */
+void set_smb_rsp_status(struct ksmbd_work *work, __le32 err)
+{
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *) work->response_buf;
+
+	rsp_hdr->Status.CifsError = err;
+}
+
+/**
+ * init_smb_rsp_hdr() - initialize smb response header
+ * @work:	smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -EINVAL
+ */
+int init_smb_rsp_hdr(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr;
+	struct smb_hdr *rcv_hdr = (struct smb_hdr *)work->request_buf;
+
+	rsp_hdr = (struct smb_hdr *) work->response_buf;
+	memset(rsp_hdr, 0, sizeof(struct smb_hdr) + 2);
+
+	/* smallest response is: smb_hdr, 1 byte wc and 2 bytes bcc */
+	work->response_offset = SMB_HEADER_SIZE + 2 + 1;
+	memcpy(rsp_hdr->Protocol, rcv_hdr->Protocol, 4);
+	rsp_hdr->Command = rcv_hdr->Command;
+
+	/*
+	 * Message is response. Other bits are obsolete.
+	 */
+	rsp_hdr->Flags = (SMBFLG_RESPONSE);
+
+	/*
+	 * Lets assume error code are NTLM. True for CIFS and windows 7
+	 */
+	rsp_hdr->Flags2 = rcv_hdr->Flags2;
+	rsp_hdr->PidHigh = rcv_hdr->PidHigh;
+	rsp_hdr->Pid = rcv_hdr->Pid;
+	rsp_hdr->Mid = rcv_hdr->Mid;
+	rsp_hdr->WordCount = 0;
+
+	/* We can do the above test because we have set maxVCN as 1 */
+	rsp_hdr->Uid = rcv_hdr->Uid;
+	rsp_hdr->Tid = rcv_hdr->Tid;
+	return 0;
+}
+
+/**
+ * smb_allocate_rsp_buf() - allocate response buffer for a command
+ * @work:	smb work containing smb request
+ *
+ * Return:      0 on success, otherwise -ENOMEM
+ */
+int smb_allocate_rsp_buf(struct ksmbd_work *work)
+{
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	unsigned char cmd = hdr->Command;
+	size_t large_sz = work->conn->vals->max_read_size + MAX_CIFS_HDR_SIZE;
+	size_t sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+
+	if (cmd == SMB_COM_TRANSACTION2) {
+		struct smb_com_trans2_qpi_req *req = work->request_buf;
+		u16 sub_cmd = le16_to_cpu(req->SubCommand);
+		u16 infolevel = le16_to_cpu(req->InformationLevel);
+
+		if ((sub_cmd == TRANS2_FIND_FIRST) ||
+		    (sub_cmd == TRANS2_FIND_NEXT) ||
+		    (sub_cmd == TRANS2_QUERY_PATH_INFORMATION &&
+		     (infolevel == SMB_QUERY_FILE_UNIX_LINK ||
+		      infolevel == SMB_QUERY_POSIX_ACL ||
+		      infolevel == SMB_INFO_QUERY_ALL_EAS)))
+			sz = large_sz;
+	}
+
+	if (cmd == SMB_COM_TRANSACTION)
+		sz = large_sz;
+
+	if (cmd == SMB_COM_ECHO) {
+		int resp_size;
+		struct smb_com_echo_req *req = work->request_buf;
+
+		/*
+		 * size of struct smb_com_echo_rsp + Bytecount - Size of Data
+		 * in struct smb_com_echo_rsp
+		 */
+		resp_size = sizeof(struct smb_com_echo_rsp) +
+			    le16_to_cpu(req->ByteCount) - 1;
+		if (resp_size > MAX_CIFS_SMALL_BUFFER_SIZE)
+			sz = large_sz;
+	}
+
+	work->response_buf = kvmalloc(sz, KSMBD_DEFAULT_GFP | __GFP_ZERO);
+	work->response_sz = sz;
+
+	if (!work->response_buf) {
+		pr_err("Failed to allocate %zu bytes buffer\n", sz);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * andx_request_buffer() - return pointer to matching andx command
+ * @buf:	buffer containing smb request
+ * @len:	buffer length
+ * @command:	match next command with this command
+ *
+ * Return:      pointer to matching command buffer on success, otherwise NULL
+ */
+static char *andx_request_buffer(char *buf, unsigned int len, int command)
+{
+	struct andx_block *andx_ptr =
+		(struct andx_block *)(buf + sizeof(struct smb_hdr) - 1);
+	struct andx_block *next;
+
+	/* AndXOffset does not include 4 byte RFC1002 header */
+	len -= 4;
+
+	while (andx_ptr->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		unsigned int offset = le16_to_cpu(andx_ptr->AndXOffset);
+
+		if (offset > len)
+			return NULL;
+
+		next = (struct andx_block *)(buf + 4 + offset);
+		if (andx_ptr->AndXCommand == command)
+			return (char *)next;
+		andx_ptr = next;
+	}
+	return NULL;
+}
+
+/**
+ * andx_response_buffer() - return pointer to andx response buffer
+ * @buf:	buffer containing smb request
+ *
+ * Return:      pointer to andx command response on success, otherwise NULL
+ */
+static char *andx_response_buffer(char *buf)
+{
+	int pdu_length = get_rfc1002_len(buf);
+
+	return buf + 4 + pdu_length;
+}
+
+/**
+ * smb_check_user_session() - check for valid session for a user
+ * @work:	smb work containing smb request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_check_user_session(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct ksmbd_conn *conn = work->conn;
+	unsigned int cmd = conn->ops->get_cmd_val(work);
+
+	ksmbd_debug(SMB, "received SMB command: %s\n",
+		    smb_cmd_to_str(req_hdr->Command));
+
+	work->sess = NULL;
+	if (cmd == SMB_COM_NEGOTIATE || cmd == SMB_COM_SESSION_SETUP_ANDX ||
+	    cmd == SMB_COM_ECHO)
+		return 0;
+
+	if (!ksmbd_conn_good(conn))
+		return -EINVAL;
+
+	if (xa_empty(&conn->sessions)) {
+		ksmbd_debug(SMB, "NO sessions registered\n");
+		return -EINVAL;
+	}
+
+	work->sess = ksmbd_session_lookup(conn, le16_to_cpu(req_hdr->Uid));
+	if (work->sess) {
+		ksmbd_user_session_get(work->sess);
+		return 1;
+	}
+	ksmbd_debug(SMB, "Invalid user session, Uid %u\n",
+		    le16_to_cpu(req_hdr->Uid));
+	return -EINVAL;
+}
+
+/**
+ * smb_get_ksmbd_tcon() - get tree connection information for a tree id
+ * @sess:	session containing tree list
+ * @tid:	match tree connection with tree id
+ *
+ * Return:      matching tree connection on success, otherwise error
+ */
+int smb_get_ksmbd_tcon(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	u8 cmd = req_hdr->Command;
+	int tree_id;
+
+	work->tcon = NULL;
+	if (cmd == SMB_COM_TREE_CONNECT_ANDX ||
+	    cmd == SMB_COM_NT_CANCEL ||
+	    cmd == SMB_COM_PROCESS_EXIT ||
+	    cmd == SMB_COM_LOGOFF_ANDX) {
+		ksmbd_debug(SMB, "skip to check tree connect request\n");
+		return 0;
+	}
+
+	if (xa_empty(&work->sess->tree_conns)) {
+		ksmbd_debug(SMB, "NO tree connected\n");
+		return -ENOENT;
+	}
+
+	tree_id = le16_to_cpu(req_hdr->Tid);
+	work->tcon = ksmbd_tree_conn_lookup(work->sess, tree_id);
+	if (!work->tcon) {
+		pr_err("Invalid tid %d\n", tree_id);
+		return -EINVAL;
+	}
+
+	return 1;
+}
+
+/**
+ * smb_session_disconnect() - LOGOFF request handler
+ * @work:	smb work containing log off request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_session_disconnect(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_session *sess = work->sess;
+
+	/* setting CifsExiting here may race with start_tcp_sess */
+	ksmbd_conn_set_need_reconnect(conn);
+
+	ksmbd_conn_wait_idle(conn);
+
+	ksmbd_tree_conn_session_logoff(sess);
+
+	/* let start_tcp_sess free conn info now */
+	ksmbd_conn_set_exiting(conn);
+	return 0;
+}
+
+/**
+ * smb_tree_disconnect() - tree disconnect request handler
+ * @work:	smb work containing tree disconnect request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_tree_disconnect(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	struct ksmbd_tree_connect *tcon = work->tcon;
+	struct ksmbd_session *sess = work->sess;
+	int err;
+
+	if (!tcon) {
+		pr_err("Invalid tid %d\n", req_hdr->Tid);
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_USER;
+		return -EINVAL;
+	}
+
+	ksmbd_close_tree_conn_fds(work);
+
+	write_lock(&sess->tree_conns_lock);
+	if (tcon->t_state == TREE_DISCONNECTED) {
+		write_unlock(&sess->tree_conns_lock);
+		rsp_hdr->Status.CifsError = STATUS_NETWORK_NAME_DELETED;
+		return -ENOENT;
+	}
+
+	WARN_ON_ONCE(atomic_dec_and_test(&tcon->refcount));
+	tcon->t_state = TREE_DISCONNECTED;
+	write_unlock(&sess->tree_conns_lock);
+
+	err = ksmbd_tree_conn_disconnect(sess, tcon);
+	if (err) {
+		rsp_hdr->Status.CifsError = STATUS_NETWORK_NAME_DELETED;
+		return -ENOENT;
+	}
+
+	work->tcon = NULL;
+
+	return 0;
+}
+
+static void set_service_type(struct ksmbd_conn *conn,
+			     struct ksmbd_share_config *share,
+			     struct smb_com_tconx_rsp_ext *rsp)
+{
+	int length;
+	char *buf = rsp->Service;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+		length = strlen(SERVICE_IPC_SHARE);
+		memcpy(buf, SERVICE_IPC_SHARE, length);
+		rsp->ByteCount = cpu_to_le16(length + 1);
+		buf += length;
+		*buf = '\0';
+	} else {
+		int uni_len = 0;
+
+		length = strlen(SERVICE_DISK_SHARE);
+		memcpy(buf, SERVICE_DISK_SHARE, length);
+		buf[length] = '\0';
+		length += 1;
+		uni_len = smbConvertToUTF16((__le16 *)(buf + length),
+					    NATIVE_FILE_SYSTEM,
+					    strlen(NATIVE_FILE_SYSTEM),
+					    conn->local_nls, 0);
+		uni_len++;
+		uni_len *= 2;
+		length += uni_len;
+		rsp->ByteCount = cpu_to_le16(length);
+	}
+}
+
+/**
+ * smb_tree_connect_andx() - tree connect request handler
+ * @work:	smb work containing tree connect request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_tree_connect_andx(struct ksmbd_work *work)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_tconx_req *req;
+	struct smb_com_tconx_rsp_ext *rsp;
+	int extra_byte = 0;
+	char *treename = NULL, *name = NULL, *dev_type = NULL;
+	struct ksmbd_share_config *share;
+	struct ksmbd_session *sess = work->sess;
+	int dev_flags = 0;
+	struct ksmbd_tree_conn_status status;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req_hdr);
+
+	/* Is this an ANDX command ? */
+	if (req_hdr->Command != SMB_COM_TREE_CONNECT_ANDX) {
+		ksmbd_debug(SMB, "SMB_COM_TREE_CONNECT_ANDX is part of ANDX");
+		req = (struct smb_com_tconx_req *)
+			andx_request_buffer(work->request_buf, maxlen,
+					    SMB_COM_TREE_CONNECT_ANDX);
+		rsp = (struct smb_com_tconx_rsp_ext *)
+			andx_response_buffer(work->response_buf);
+		extra_byte = 3;
+		if (!req) {
+			status.ret = -EINVAL;
+			goto out_err;
+		}
+	} else {
+		req = (struct smb_com_tconx_req *)(&req_hdr->WordCount);
+		rsp = (struct smb_com_tconx_rsp_ext *)(&rsp_hdr->WordCount);
+	}
+
+	offset = (char *)req - (char *)work->request_buf;
+	offset += offsetof(struct smb_com_tconx_req, Password)
+		  + le16_to_cpu(req->PasswordLength);
+
+	if (offset >= maxlen) {
+		status.ret = -EINVAL;
+		goto out_err;
+	}
+
+	treename = smb_strndup_from_utf16((char *)work->request_buf + offset,
+					  maxlen - offset,
+					  true,
+					  conn->local_nls);
+	if (IS_ERR(treename)) {
+		pr_err("Unable to strdup() treename for uid %d\n",
+		       rsp_hdr->Uid);
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+	}
+
+	/* adjust tree name length */
+	offset += (strlen(treename) + 1) * 2;
+	if (offset > maxlen) {
+		status.ret = -EINVAL;
+		goto out_err;
+	}
+
+	dev_type = kstrndup((char *)work->request_buf + offset,
+			    maxlen - offset, KSMBD_DEFAULT_GFP);
+	if (IS_ERR(dev_type)) {
+		pr_err("Unable to strdup() devtype for uid %d\n",
+		       rsp_hdr->Uid);
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+	}
+
+	name = ksmbd_extract_sharename(conn->um, treename);
+	if (IS_ERR(name)) {
+		status.ret = KSMBD_TREE_CONN_STATUS_ERROR;
+		goto out_err;
+	}
+
+	ksmbd_debug(SMB, "tree connect request for tree %s, dev_type : %s\n",
+		    name, dev_type);
+
+	if (!strcmp(dev_type, "A:"))
+		dev_flags = 1;
+	else if (!strncmp(dev_type, "LPT", 3))
+		dev_flags = 2;
+	else if (!strcmp(dev_type, "IPC"))
+		dev_flags = 3;
+	else if (!strcmp(dev_type, "COMM"))
+		dev_flags = 4;
+	else if (!strcmp(dev_type, "?????"))
+		dev_flags = 5;
+
+	if (!strcmp(name, "IPC$")) {
+		if (dev_flags < 3) {
+			status.ret = -ENODEV;
+			goto out_err;
+		}
+	} else if (!dev_flags || (dev_flags > 1 && dev_flags < 5)) {
+		status.ret = -ENODEV;
+		goto out_err;
+	}
+
+	status = ksmbd_tree_conn_connect(work, name);
+	if (status.ret == KSMBD_TREE_CONN_STATUS_OK)
+		rsp_hdr->Tid = cpu_to_le16(status.tree_conn->id);
+	else
+		goto out_err;
+
+	status.ret = 0;
+	share = status.tree_conn->share_conf;
+	rsp->WordCount = 7;
+	rsp->OptionalSupport = cpu_to_le16(SMB_SUPPORT_SEARCH_BITS |
+					   SMB_CSC_NO_CACHING |
+					   SMB_UNIQUE_FILE_NAME);
+
+	rsp->MaximalShareAccessRights = cpu_to_le32(FILE_READ_RIGHTS |
+						    FILE_EXEC_RIGHTS);
+	if (test_tree_conn_flag(status.tree_conn,
+				KSMBD_TREE_CONN_FLAG_WRITABLE))
+		rsp->MaximalShareAccessRights |= cpu_to_le32(FILE_WRITE_RIGHTS);
+	rsp->GuestMaximalShareAccessRights = 0;
+
+	set_service_type(conn, share, rsp);
+
+	/* For each extra andx response, we have to add 1 byte,
+	 * for wc and 2 bytes for byte count
+	 */
+	inc_resp_size(work, 7 * 2 + le16_to_cpu(rsp->ByteCount) + extra_byte);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(rsp_hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		/* More processing required */
+		status.ret = rsp->AndXCommand;
+	} else {
+		rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	}
+
+	kfree(treename);
+	kfree(dev_type);
+	kfree(name);
+
+	write_lock(&sess->tree_conns_lock);
+	status.tree_conn->t_state = TREE_CONNECTED;
+	write_unlock(&sess->tree_conns_lock);
+
+	return status.ret;
+
+out_err:
+	if (!IS_ERR(treename))
+		kfree(treename);
+	if (!IS_ERR(dev_type))
+		kfree(dev_type);
+	if (!IS_ERR(name))
+		kfree(name);
+
+	rsp->WordCount = 7;
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(rsp_hdr));
+	rsp->OptionalSupport = 0;
+	rsp->MaximalShareAccessRights = 0;
+	rsp->GuestMaximalShareAccessRights = 0;
+	rsp->ByteCount = 0;
+	ksmbd_debug(SMB, "error while tree connect\n");
+	switch (status.ret) {
+	case KSMBD_TREE_CONN_STATUS_NO_SHARE:
+		rsp_hdr->Status.CifsError = STATUS_BAD_NETWORK_PATH;
+		break;
+	case -ENOMEM:
+	case KSMBD_TREE_CONN_STATUS_NOMEM:
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	case KSMBD_TREE_CONN_STATUS_TOO_MANY_CONNS:
+	case KSMBD_TREE_CONN_STATUS_TOO_MANY_SESSIONS:
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -ENODEV:
+		rsp_hdr->Status.CifsError = STATUS_BAD_DEVICE_TYPE;
+		break;
+	case KSMBD_TREE_CONN_STATUS_ERROR:
+		rsp_hdr->Status.CifsError = STATUS_BAD_NETWORK_NAME;
+		break;
+	case -EINVAL:
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		break;
+	default:
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	}
+
+	inc_resp_size(work, 7 * 2 + le16_to_cpu(rsp->ByteCount) + extra_byte);
+	return -EINVAL;
+}
+
+/**
+ * smb_get_name() - convert filename on smb packet to char string
+ * @src:	source filename, mostly in unicode format
+ * @maxlen:	maxlen of src string to be used for parsing
+ * @work:	smb work containing smb header flag
+ * @converted:	src string already converted to local characterset
+ *
+ * Return:	pointer to filename string on success, otherwise error ptr
+ */
+static char *smb_get_name(struct ksmbd_share_config *share, const char *src,
+			  const int maxlen, struct ksmbd_work *work,
+			  bool converted)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	bool is_unicode = is_smbreq_unicode(req_hdr);
+	char *name, *wild_card_pos;
+
+	if (converted)
+		name = (char *)src;
+	else {
+		name = smb_strndup_from_utf16(src, maxlen, is_unicode,
+					      work->conn->local_nls);
+		if (IS_ERR(name)) {
+			ksmbd_debug(SMB, "failed to get name %ld\n",
+				    PTR_ERR(name));
+			return name;
+		}
+	}
+
+	ksmbd_conv_path_to_unix(name);
+	ksmbd_strip_last_slash(name);
+
+	/*Handling of dir path in FIND_FIRST2 having '*' at end of path*/
+	wild_card_pos = strrchr(name, '*');
+
+	if (wild_card_pos != NULL)
+		*wild_card_pos = '\0';
+
+
+	if (ksmbd_validate_filename(name) < 0) {
+		if (!converted)
+			kfree(name);
+		return ERR_PTR(-ENOENT);
+	}
+
+	if (ksmbd_share_veto_filename(share, name)) {
+		ksmbd_debug(SMB,
+			"file(%s) open is not allowed by setting as veto file\n",
+			name);
+		if (!converted)
+			kfree(name);
+		return ERR_PTR(-ENOENT);
+	}
+
+	ksmbd_debug(SMB, "file name = %s\n", name);
+
+	return name;
+}
+
+/**
+ * smb_get_dir_name() - convert directory name on smb packet to char string
+ * @src:	source dir name, mostly in unicode format
+ * @maxlen:	maxlen of src string to be used for parsing
+ * @work:	smb work containing smb header flag
+ * @srch_ptr:	update search pointer in dir for searching dir entries
+ *
+ * Return:	pointer to dir name string on success, otherwise error ptr
+ */
+static char *smb_get_dir_name(struct ksmbd_share_config *share,
+			      const char *src, const int maxlen,
+			      struct ksmbd_work *work, char **srch_ptr)
+{
+	struct smb_hdr *req_hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	bool is_unicode = is_smbreq_unicode(req_hdr);
+	char *name, *pattern_pos, *pattern = NULL;
+	int pattern_len, rc;
+
+	name = smb_strndup_from_utf16(src, maxlen, is_unicode,
+				      work->conn->local_nls);
+	if (IS_ERR(name)) {
+		pr_err("failed to allocate memory\n");
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+		return name;
+	}
+
+	ksmbd_conv_path_to_unix(name);
+	ksmbd_strip_last_slash(name);
+
+	pattern_pos = strrchr(name, '/');
+
+	if (!pattern_pos)
+		pattern_pos = name;
+	else
+		pattern_pos += 1;
+
+	pattern_len = strlen(pattern_pos);
+	if (pattern_len == 0) {
+		rc = -EINVAL;
+		goto err_name;
+	}
+	ksmbd_debug(SMB, "pattern searched = %s pattern_len = %d\n",
+		    pattern_pos, pattern_len);
+	pattern = kmalloc(pattern_len + 1, KSMBD_DEFAULT_GFP);
+	if (!pattern) {
+		rc = -ENOMEM;
+		goto err_name;
+	}
+	memcpy(pattern, pattern_pos, pattern_len);
+	*(pattern + pattern_len) = '\0';
+	*pattern_pos = '\0';
+	*srch_ptr = pattern;
+
+	if (ksmbd_validate_filename(name) < 0) {
+		rc = -ENOENT;
+		goto err_pattern;
+	}
+
+	if (ksmbd_share_veto_filename(share, name)) {
+		ksmbd_debug(SMB,
+			"file(%s) open is not allowed by setting as veto file\n",
+			name);
+		rc = -ENOENT;
+		goto err_pattern;
+	}
+
+	ksmbd_debug(SMB, "dir name = %s\n", name);
+	return name;
+
+err_pattern:
+	kfree(pattern);
+	*srch_ptr = NULL;
+err_name:
+	kfree(name);
+
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+
+	return ERR_PTR(rc);
+}
+
+static int smb_common_rename(struct ksmbd_work *work, struct ksmbd_file *fp,
+			     char *newname, int flags)
+{
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB, "user does not have permission to write\n");
+		return -EACCES;
+	}
+
+	return ksmbd_vfs_rename(work, &fp->filp->f_path, newname, flags);
+}
+
+/**
+ * smb_rename() - rename request handler
+ * @work:	smb work containing rename request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_rename(struct ksmbd_work *work)
+{
+	struct smb_com_rename_req *req = work->request_buf;
+	struct smb_com_rename_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	bool is_unicode = is_smbreq_unicode(&req->hdr);
+	char *oldname, *newname = NULL;
+	struct ksmbd_file *fp = NULL;
+	int oldname_len;
+	struct path path, parent_path;
+	int rc = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_rename_req, OldFileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, (char *)req + offset,
+			       maxlen - offset, work, false);
+	if (IS_ERR(oldname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(oldname);
+	}
+
+	if (is_unicode)
+		oldname_len = smb1_utf16_name_length((__le16 *)req->OldFileName,
+						     maxlen - offset);
+	else {
+		oldname_len = strlen(oldname);
+		oldname_len++;
+	}
+
+	/* 2 bytes for BufferFormat field and padding byte */
+	offset += oldname_len + 2;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out_free_name;
+	}
+
+	newname = smb_get_name(share, (char *)req + offset,
+			       maxlen - offset, work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		rc = PTR_ERR(newname);
+		newname = NULL;
+		goto out_free_name;
+	}
+
+	ksmbd_debug(SMB, "rename %s -> %s\n", oldname, newname);
+	rc = ksmbd_vfs_kern_path_locked(work, oldname, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 1);
+	if (rc)
+		goto out_free_name;
+
+	fp = ksmbd_vfs_dentry_open(work, &path, O_RDONLY, 0, false);
+
+	/* release parent lock to avoid deadlock in smb_common_rename */
+	inode_unlock(d_inode(parent_path.dentry));
+
+	if (IS_ERR(fp)) {
+		rc = PTR_ERR(fp);
+		fp = NULL;
+		goto out_kern_path;
+	}
+	ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+
+	rc = smb_common_rename(work, fp, newname, RENAME_NOREPLACE);
+	if (rc)
+		goto out_kern_path;
+
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out_kern_path:
+	if (fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+	path_put(&path);
+	path_put(&parent_path);
+
+out_free_name:
+	kfree(oldname);
+	kfree(newname);
+
+	if (rc) {
+		switch (rc) {
+		case -EEXIST:
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+			break;
+		case -ENOENT:
+			rsp->hdr.Status.CifsError =
+				NT_STATUS_OBJECT_NAME_NOT_FOUND;
+			break;
+		case -ENOMEM:
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -EACCES:
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		}
+	}
+
+	return rc;
+}
+
+/**
+ * smb_handle_negotiate() - negotiate request handler
+ * @work:	smb work containing negotiate request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_handle_negotiate(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_negotiate_rsp *neg_rsp = work->response_buf;
+	__u64 time;
+	int rc = 0;
+
+	WARN_ON(ksmbd_conn_good(conn));
+
+	if (conn->dialect == BAD_PROT_ID) {
+		neg_rsp->hdr.Status.CifsError = STATUS_INVALID_LOGON_TYPE;
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	conn->connection_type = 0;
+
+	/* wct 17 for NTLM */
+	neg_rsp->hdr.WordCount = 17;
+	neg_rsp->DialectIndex = cpu_to_le16(conn->dialect);
+
+	neg_rsp->SecurityMode = SMB1_SERVER_SECU;
+	if (server_conf.signing == KSMBD_CONFIG_OPT_AUTO ||
+	    server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY) {
+		conn->sign = true;
+		neg_rsp->SecurityMode |= SECMODE_SIGN_ENABLED;
+		if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
+			neg_rsp->SecurityMode |= SECMODE_SIGN_REQUIRED;
+	}
+	neg_rsp->MaxMpxCount = cpu_to_le16(SMB1_MAX_MPX_COUNT);
+	neg_rsp->MaxNumberVcs = cpu_to_le16(SMB1_MAX_VCS);
+	neg_rsp->MaxBufferSize = cpu_to_le32(conn->vals->max_read_size);
+	neg_rsp->MaxRawSize = cpu_to_le32(SMB1_MAX_RAW_SIZE);
+	neg_rsp->SessionKey = 0;
+	neg_rsp->Capabilities = cpu_to_le32(SMB1_SERVER_CAPS);
+
+	time = ksmbd_systime();
+	neg_rsp->SystemTimeLow = cpu_to_le32(time & 0x00000000FFFFFFFF);
+	neg_rsp->SystemTimeHigh =
+		cpu_to_le32((time & 0xFFFFFFFF00000000) >> 32);
+	neg_rsp->ServerTimeZone = 0;
+
+	if (((struct smb_hdr *)work->request_buf)->Flags2 & SMBFLG2_EXT_SEC)
+		conn->use_spnego = true;
+
+	ksmbd_debug(SMB, "spnego is %s\n", conn->use_spnego ? "on" : "off");
+
+	if (conn->use_spnego == false) {
+		neg_rsp->EncryptionKeyLength = CIFS_CRYPTO_KEY_SIZE;
+		neg_rsp->Capabilities &= ~cpu_to_le32(CAP_EXTENDED_SECURITY);
+		neg_rsp->ByteCount = cpu_to_le16(CIFS_CRYPTO_KEY_SIZE);
+		/* initialize random server challenge */
+		get_random_bytes(conn->ntlmssp.cryptkey, sizeof(__u64));
+		memcpy((neg_rsp->u.EncryptionKey), conn->ntlmssp.cryptkey,
+		       CIFS_CRYPTO_KEY_SIZE);
+		/* Adjust pdu length, 17 words and 8 bytes added */
+		inc_resp_size(work, 17 * 2 + 8);
+	} else {
+		neg_rsp->EncryptionKeyLength = 0;
+		neg_rsp->ByteCount = cpu_to_le16(SMB1_CLIENT_GUID_SIZE +
+			AUTH_GSS_LENGTH);
+		get_random_bytes(neg_rsp->u.extended_response.GUID,
+				 SMB1_CLIENT_GUID_SIZE);
+		ksmbd_copy_gss_neg_header(
+				neg_rsp->u.extended_response.SecurityBlob);
+		inc_resp_size(work, 17 * 2 + 16 + AUTH_GSS_LENGTH);
+	}
+
+	/* Null terminated domain name in unicode */
+
+	ksmbd_conn_set_need_negotiate(conn);
+	/* Domain name and PC name are ignored by clients, so no need to send.
+	 * We can try sending them later
+	 */
+err_out:
+	return rc;
+}
+
+static int build_sess_rsp_noextsec(struct ksmbd_work *work,
+				   struct ksmbd_session *sess,
+				   struct smb_com_session_setup_req_no_secext *req,
+				   struct smb_com_session_setup_old_resp *rsp)
+{
+	struct ksmbd_conn *conn = work->conn;
+	u16 oem_passwd_len, unicode_passwd_len;
+	u8 *oem_passwd, *unicode_passwd;
+	int offset, err = 0, len;
+	unsigned int maxlen;
+	char *name;
+	__le16 str[32];
+
+	/* Build response. We don't use extended security (yet), so wct is 3 */
+	rsp->hdr.WordCount = 3;
+	rsp->Action = 0;
+	/* The names should be unicode */
+	rsp->ByteCount = 0;
+	/* adjust pdu length */
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	maxlen = get_req_len(req);
+
+	/* start of variable data */
+	offset = offsetof(struct smb_com_session_setup_req_no_secext,
+			  CaseInsensitivePassword);
+
+	/* verify password length fields and save pointers to data */
+	oem_passwd_len = le16_to_cpu(req->CaseInsensitivePasswordLength);
+	if (offset + oem_passwd_len > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+	oem_passwd = (char *)req + offset;
+	offset += oem_passwd_len;
+
+	unicode_passwd_len = le16_to_cpu(req->CaseSensitivePasswordLength);
+	if (offset + unicode_passwd_len > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+	unicode_passwd = (char *)req + offset;
+	offset += unicode_passwd_len;
+
+	/* 1 byte for padding */
+	offset++;
+	if (offset > maxlen) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	/* check if valid user name is present in request or not */
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      true,  conn->local_nls);
+	if (IS_ERR(name)) {
+		pr_err("cannot allocate memory\n");
+		err = PTR_ERR(name);
+		goto out_err;
+	}
+	offset += (strlen(name) + 1) * 2;
+
+	WARN_ON(sess->user);
+
+	ksmbd_debug(SMB, "session setup request for user %s\n", name);
+	sess->user = ksmbd_login_user(name);
+	kfree(name);
+	if (!sess->user) {
+		pr_err("user not present in database\n");
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	if (user_guest(sess->user)) {
+		rsp->Action = cpu_to_le16(GUEST_LOGIN);
+		goto no_password_check;
+	}
+
+	if (unicode_passwd_len == CIFS_AUTH_RESP_SIZE) {
+		err = ksmbd_auth_ntlm(sess, oem_passwd,
+				      conn->ntlmssp.cryptkey);
+		if (err) {
+			pr_err("ntlm authentication failed for user %s\n",
+			       user_name(sess->user));
+			goto out_err;
+		}
+	} else {
+		char *ntdomain;
+
+		ntdomain = smb_strndup_from_utf16((char *)req + offset,
+						  maxlen - offset, true,
+						  conn->local_nls);
+		if (IS_ERR(ntdomain)) {
+			pr_err("cannot allocate memory\n");
+			err = PTR_ERR(ntdomain);
+			goto out_err;
+		}
+
+		err = ksmbd_auth_ntlmv2(conn, sess,
+					(struct ntlmv2_resp *)unicode_passwd,
+					unicode_passwd_len - CIFS_ENCPWD_SIZE,
+					ntdomain, conn->ntlmssp.cryptkey);
+		kfree(ntdomain);
+		if (err) {
+			pr_err("authentication failed for user %s\n",
+			       user_name(sess->user));
+			goto out_err;
+		}
+	}
+
+no_password_check:
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+
+	/* 1 byte padding for word alignment */
+	offset = 1;
+
+	memset(str, 0, sizeof(str));
+
+	len = smb_strtoUTF16(str, "Unix", 4, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	len = smb_strtoUTF16(str, "ksmbd", 5, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	len = smb_strtoUTF16(str, "WORKGROUP", 9, conn->local_nls);
+	len = UNICODE_LEN(len + 1);
+	memcpy(rsp->NativeOS + offset, str, len);
+	offset += len;
+
+	rsp->ByteCount = cpu_to_le16(offset);
+	inc_resp_size(work, offset);
+
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+out_err:
+	return err;
+}
+
+static int build_sess_rsp_extsec(struct ksmbd_work *work,
+				 struct ksmbd_session *sess,
+				 struct smb_com_session_setup_req *req,
+				 struct smb_com_session_setup_resp *rsp)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct negotiate_message *negblob;
+	char *neg_blob;
+	int err = 0, neg_blob_len;
+	unsigned char *spnego_blob;
+	u16 spnego_blob_len;
+	int sz;
+
+	rsp->hdr.WordCount = 4;
+	rsp->Action = 0;
+
+	/* The names should be unicode */
+	rsp->ByteCount = 0;
+	/* adjust pdu length */
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	negblob = (struct negotiate_message *)req->SecurityBlob;
+	sz = le16_to_cpu(req->SecurityBlobLength);
+
+	if (offsetof(struct smb_com_session_setup_req, SecurityBlob) + sz >
+		get_req_len(req)) {
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	if (ksmbd_decode_negTokenInit((char *)negblob, sz, conn)) {
+		if (ksmbd_decode_negTokenTarg((char *)negblob, sz, conn)) {
+			conn->use_spnego = false;
+		}
+	}
+
+	if (conn->mechToken)
+		negblob = (struct negotiate_message *)conn->mechToken;
+
+	if (negblob->MessageType == NtLmNegotiate) {
+		struct challenge_message *chgblob;
+
+		ksmbd_debug(SMB, "negotiate phase\n");
+		err = ksmbd_decode_ntlmssp_neg_blob(negblob, sz, conn);
+		if (err)
+			goto out_err;
+
+		chgblob = (struct challenge_message *)rsp->SecurityBlob;
+		memset(chgblob, 0, sizeof(struct challenge_message));
+
+		if (conn->use_spnego) {
+			int sz;
+
+			sz = sizeof(struct negotiate_message) +
+				(strlen(ksmbd_netbios_name()) * 2 + 1 + 4) * 6;
+			neg_blob = kmalloc(sz, KSMBD_DEFAULT_GFP);
+			if (!neg_blob) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+			chgblob = (struct challenge_message *)neg_blob;
+			neg_blob_len = ksmbd_build_ntlmssp_challenge_blob(
+					chgblob,
+					conn);
+			if (neg_blob_len < 0) {
+				kfree(neg_blob);
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			if (build_spnego_ntlmssp_neg_blob(&spnego_blob,
+							  &spnego_blob_len,
+							  neg_blob,
+							  neg_blob_len)) {
+				kfree(neg_blob);
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			memcpy((char *)rsp->SecurityBlob, spnego_blob,
+			       spnego_blob_len);
+			rsp->SecurityBlobLength = cpu_to_le16(spnego_blob_len);
+			kfree(spnego_blob);
+			kfree(neg_blob);
+		} else {
+			neg_blob_len = ksmbd_build_ntlmssp_challenge_blob(
+					chgblob,
+					conn);
+			if (neg_blob_len < 0) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			rsp->SecurityBlobLength = cpu_to_le16(neg_blob_len);
+		}
+
+		rsp->hdr.Status.CifsError = STATUS_MORE_PROCESSING_REQUIRED;
+		/*
+		 * Note: here total size -1 is done as an adjustment
+		 * for 0 size blob.
+		 */
+		inc_resp_size(work, le16_to_cpu(rsp->SecurityBlobLength));
+		rsp->ByteCount = rsp->SecurityBlobLength;
+	} else if (negblob->MessageType == NtLmAuthenticate) {
+		struct authenticate_message *authblob;
+		unsigned int blob_len, un_off, un_len;
+		char *username;
+
+		ksmbd_debug(SMB, "authenticate phase\n");
+		if (conn->use_spnego && conn->mechToken)
+			authblob =
+				(struct authenticate_message *)conn->mechToken;
+		else
+			authblob = (struct authenticate_message *)
+						req->SecurityBlob;
+
+		un_off = le32_to_cpu(authblob->UserName.BufferOffset);
+		un_len = le16_to_cpu(authblob->UserName.Length);
+		blob_len = le16_to_cpu(req->SecurityBlobLength);
+		if (blob_len < (u64)un_off + un_len) {
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		username =
+			smb_strndup_from_utf16((const char *)authblob + un_off,
+					       un_len, true, conn->local_nls);
+
+		if (IS_ERR(username)) {
+			pr_err("cannot allocate memory\n");
+			err = PTR_ERR(username);
+			goto out_err;
+		}
+
+		ksmbd_debug(SMB, "session setup request for user %s\n",
+			    username);
+		sess->user = ksmbd_login_user(username);
+		kfree(username);
+
+		if (!sess->user) {
+			ksmbd_debug(SMB, "Unknown user name or an error\n");
+			err = -EINVAL;
+			goto out_err;
+		}
+
+		if (user_guest(sess->user)) {
+			rsp->Action = cpu_to_le16(GUEST_LOGIN);
+			goto no_password_check;
+		}
+
+		err = ksmbd_decode_ntlmssp_auth_blob(authblob, blob_len,
+						     conn, sess);
+		if (err) {
+			ksmbd_debug(SMB, "authentication failed\n");
+			err = -EINVAL;
+			goto out_err;
+		}
+
+no_password_check:
+		if (conn->use_spnego) {
+			if (build_spnego_ntlmssp_auth_blob(&spnego_blob,
+						&spnego_blob_len, 0)) {
+				err = -ENOMEM;
+				goto out_err;
+			}
+
+			memcpy((char *)rsp->SecurityBlob, spnego_blob,
+			       spnego_blob_len);
+			rsp->SecurityBlobLength = cpu_to_le16(spnego_blob_len);
+			kfree(spnego_blob);
+			inc_resp_size(work, spnego_blob_len);
+			rsp->ByteCount = rsp->SecurityBlobLength;
+		}
+	} else {
+		pr_err("Invalid phase %d\n", negblob->MessageType);
+		err = -EINVAL;
+		goto out_err;
+	}
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+out_err:
+	if (conn->use_spnego && conn->mechToken) {
+		kfree(conn->mechToken);
+		conn->mechToken = NULL;
+	}
+
+	return err;
+}
+
+/**
+ * smb_session_setup_andx() - session setup request handler
+ * @work:   smb work containing session setup request buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_session_setup_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_session *sess = NULL;
+	int rc = 0, cap;
+	unsigned short uid;
+
+	union smb_com_session_setup_andx *pSMB = work->request_buf;
+	union smb_com_session_setup_andx *rsp = work->response_buf;
+
+	if (pSMB->req.hdr.WordCount == 12)
+		cap = le32_to_cpu(pSMB->req.Capabilities);
+	else if (pSMB->req.hdr.WordCount == 13)
+		cap = le32_to_cpu(pSMB->req_no_secext.Capabilities);
+	else {
+		pr_err("malformed packet\n");
+		work->send_no_response = 1;
+		return 0;
+	}
+
+	ksmbd_conn_lock(conn);
+	uid = le16_to_cpu(pSMB->req.hdr.Uid);
+	if (uid != 0) {
+		sess = ksmbd_session_lookup(conn, uid);
+		if (!sess) {
+			rc = -ENOENT;
+			goto out_err;
+		}
+		ksmbd_user_session_get(sess);
+		ksmbd_debug(SMB, "Reuse session ID: %llu, Uid: %u\n",
+			    sess->id, uid);
+	} else {
+		sess = ksmbd_smb1_session_create();
+		if (!sess) {
+			rc = -ENOMEM;
+			goto out_err;
+		}
+
+		rc = ksmbd_session_register(conn, sess);
+		if (rc)
+			goto out_err;
+		rsp->resp.hdr.Uid = cpu_to_le16(sess->id);
+		ksmbd_debug(SMB, "New session ID: %llu, Uid: %u\n", sess->id,
+			    uid);
+	}
+
+	if (cap & CAP_EXTENDED_SECURITY) {
+		ksmbd_debug(SMB, "build response with extend_security\n");
+		rc = build_sess_rsp_extsec(work, sess, &pSMB->req, &rsp->resp);
+
+	} else {
+		ksmbd_debug(SMB, "build response without extend_security\n");
+		rc = build_sess_rsp_noextsec(work, sess, &pSMB->req_no_secext,
+					     &rsp->old_resp);
+	}
+	if (rc < 0)
+		goto out_err;
+
+	work->sess = sess;
+	ksmbd_conn_set_good(conn);
+	ksmbd_conn_unlock(conn);
+	return 0;
+
+out_err:
+	rsp->resp.hdr.Status.CifsError = STATUS_LOGON_FAILURE;
+	rsp->resp.hdr.WordCount = 0;
+	rsp->resp.ByteCount = 0;
+	if (rc < 0 && sess) {
+		xa_erase(&conn->sessions, sess->id);
+		hash_del(&sess->hlist);
+		ksmbd_session_destroy(sess);
+		work->sess = NULL;
+	}
+	ksmbd_conn_unlock(conn);
+	return rc;
+}
+
+/**
+ * file_create_dispostion_flags() - convert disposition flags to
+ *				file open flags
+ * @dispostion:		file disposition contained in open request
+ * @file_present:	file already present or not
+ *
+ * Return:      file open flags after conversion from disposition
+ */
+static int file_create_dispostion_flags(int dispostion, bool file_present)
+{
+	int disp_flags = 0;
+
+	switch (dispostion) {
+	/*
+	 * If the file already exists, it SHOULD be superseded (overwritten).
+	 * If it does not already exist, then it SHOULD be created.
+	 */
+	case FILE_SUPERSEDE:
+		if (file_present)
+			disp_flags |= O_TRUNC;
+		else
+			disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened rather than created.
+	 * If the file does not already exist, the operation MUST fail.
+	 */
+	case FILE_OPEN:
+		if (!file_present)
+			return -ENOENT;
+		break;
+	/*
+	 * If the file already exists, the operation MUST fail.
+	 * If the file does not already exist, it SHOULD be created.
+	 */
+	case FILE_CREATE:
+		if (file_present)
+			return -EEXIST;
+		disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened. If the file
+	 * does not already exist, then it SHOULD be created.
+	 */
+	case FILE_OPEN_IF:
+		if (!file_present)
+			disp_flags |= O_CREAT;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened and truncated.
+	 * If the file does not already exist, the operation MUST fail.
+	 */
+	case FILE_OVERWRITE:
+		if (!file_present)
+			return -ENOENT;
+		disp_flags |= O_TRUNC;
+		break;
+	/*
+	 * If the file already exists, it SHOULD be opened and truncated.
+	 * If the file does not already exist, it SHOULD be created.
+	 */
+	case FILE_OVERWRITE_IF:
+		if (file_present)
+			disp_flags |= O_TRUNC;
+		else
+			disp_flags |= O_CREAT;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return disp_flags;
+}
+
+static inline int ksmbd_openflags_to_mayflags(int open_flags)
+{
+	int mask = open_flags & O_ACCMODE;
+
+	if (mask == O_WRONLY)
+		return MAY_OPEN | MAY_WRITE;
+	else if (mask == O_RDWR)
+		return MAY_OPEN | MAY_READ | MAY_WRITE;
+	else
+		return MAY_OPEN | MAY_READ;
+}
+
+/**
+ * convert_generic_access_flags() - convert access flags to
+ *				file open flags
+ * @access_flag:	file access flags contained in open request
+ * @open_flag:		file open flags are updated as per access flags
+ * @may_flags:		file may flags are updated with @open_flags
+ * @attrib:		attribute flag indicating posix symantics or not
+ *
+ * Return:		access flags
+ */
+static int convert_generic_access_flags(int access_flag, int *open_flags,
+					int *may_flags, int attrib)
+{
+	int aflags = access_flag;
+	int oflags = *open_flags;
+
+	if (aflags & GENERIC_READ) {
+		aflags &= ~GENERIC_READ;
+		aflags |= GENERIC_READ_FLAGS;
+	}
+
+	if (aflags & GENERIC_WRITE) {
+		aflags &= ~GENERIC_WRITE;
+		aflags |= GENERIC_WRITE_FLAGS;
+	}
+
+	if (aflags & GENERIC_EXECUTE) {
+		aflags &= ~GENERIC_EXECUTE;
+		aflags |= GENERIC_EXECUTE_FLAGS;
+	}
+
+	if (aflags & GENERIC_ALL) {
+		aflags &= ~GENERIC_ALL;
+		aflags |= GENERIC_ALL_FLAGS;
+	}
+
+	if (oflags & O_TRUNC)
+		aflags |= FILE_WRITE_DATA;
+
+	if (aflags & (FILE_WRITE_DATA | FILE_APPEND_DATA)) {
+		if (aflags & (FILE_READ_ATTRIBUTES | FILE_READ_DATA |
+			      FILE_READ_EA | FILE_EXECUTE)) {
+			*open_flags |= O_RDWR;
+
+		} else {
+			*open_flags |= O_WRONLY;
+		}
+	} else {
+		*open_flags |= O_RDONLY;
+	}
+
+	if ((attrib & ATTR_POSIX_SEMANTICS) && (aflags & FILE_APPEND_DATA))
+		*open_flags |= O_APPEND;
+
+	*may_flags = ksmbd_openflags_to_mayflags(*open_flags);
+
+	return aflags;
+}
+
+/**
+ * smb_get_dos_attr() - convert unix style stat info to dos attr
+ * @stat:	stat to be converted to dos attr
+ *
+ * Return:	dos style attribute
+ */
+static __u32 smb_get_dos_attr(struct kstat *stat)
+{
+	__u32 attr = 0;
+
+	/* check whether file has attributes ATTR_READONLY, ATTR_HIDDEN,
+	 * ATTR_SYSTEM, ATTR_VOLUME, ATTR_DIRECTORY, ATTR_ARCHIVE,
+	 * ATTR_DEVICE, ATTR_NORMAL, ATTR_TEMPORARY, ATTR_SPARSE,
+	 * ATTR_REPARSE, ATTR_COMPRESSED, ATTR_OFFLINE
+	 */
+
+	if (stat->mode & S_ISVTX) /* hidden */
+		attr |= (ATTR_HIDDEN | ATTR_SYSTEM);
+
+	if (!(stat->mode & 0222)) /* read-only */
+		attr |= ATTR_READONLY;
+
+	if (S_ISDIR(stat->mode))
+		attr |= ATTR_DIRECTORY;
+
+	if (stat->size > (stat->blksize * stat->blocks))
+		attr |= ATTR_SPARSE;
+
+	if (!attr)
+		attr |= ATTR_NORMAL;
+
+	return attr;
+}
+
+static int lock_oplock_release(struct ksmbd_file *fp, int type,
+			       int oplock_level)
+{
+	struct oplock_info *opinfo;
+	int ret;
+
+	ksmbd_debug(SMB, "got oplock brk for level OplockLevel = %d\n",
+		    oplock_level);
+
+	opinfo = fp->f_opinfo;
+	if (opinfo->op_state == OPLOCK_STATE_NONE) {
+		pr_err("unexpected oplock state 0x%x\n", opinfo->op_state);
+		return -EINVAL;
+	}
+
+	if (oplock_level == OPLOCK_EXCLUSIVE || oplock_level == OPLOCK_BATCH) {
+		if (opinfo_write_to_none(opinfo) < 0) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	} else if (((opinfo->level == OPLOCK_EXCLUSIVE) ||
+		    (opinfo->level == OPLOCK_BATCH)) &&
+		   (oplock_level == OPLOCK_READ)) {
+		ret = opinfo_write_to_read(opinfo);
+		if (ret) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	} else if ((opinfo->level == OPLOCK_READ) &&
+		   (oplock_level == OPLOCK_NONE)) {
+		ret = opinfo_read_to_none(opinfo);
+		if (ret) {
+			opinfo->op_state = OPLOCK_STATE_NONE;
+			return -EINVAL;
+		}
+	}
+
+	opinfo->op_state = OPLOCK_STATE_NONE;
+	wake_up_interruptible(&opinfo->oplock_q);
+
+	return 0;
+}
+
+static struct ksmbd_lock *smb_lock_init(struct file_lock *flock,
+					unsigned int cmd, int mode,
+					unsigned long long offset,
+					unsigned long long length,
+					struct list_head *lock_list)
+{
+	struct ksmbd_lock *lock;
+
+	lock = kzalloc(sizeof(struct ksmbd_lock), KSMBD_DEFAULT_GFP);
+	if (!lock)
+		return NULL;
+
+	lock->cmd = cmd;
+	lock->fl = flock;
+	lock->start = offset;
+	lock->end = offset + length;
+	lock->flags = mode;
+	if (lock->start == lock->end)
+		lock->zero_len = 1;
+	INIT_LIST_HEAD(&lock->llist);
+	INIT_LIST_HEAD(&lock->clist);
+	INIT_LIST_HEAD(&lock->flist);
+	list_add_tail(&lock->llist, lock_list);
+
+	return lock;
+}
+
+/**
+ * smb_locking_andx() - received oplock break response from client
+ * @work:	smb work containing oplock break command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_locking_andx(struct ksmbd_work *work)
+{
+	struct smb_com_lock_req *req = work->request_buf;
+	struct smb_com_lock_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	int err = 0;
+	struct locking_andx_range32 *lock_ele32 = NULL, *unlock_ele32 = NULL;
+	struct locking_andx_range64 *lock_ele64 = NULL, *unlock_ele64 = NULL;
+	struct file *filp = NULL;
+	struct ksmbd_lock *smb_lock = NULL, *cmp_lock, *tmp, *tmp2;
+	int i, lock_count, unlock_count;
+	unsigned long long offset, length;
+	struct file_lock *flock = NULL;
+	unsigned int cmd = 0, leftlen;
+	LIST_HEAD(lock_list);
+	LIST_HEAD(rollback_list);
+	int locked, timeout;
+	const unsigned long long loff_max = ~0;
+	struct ksmbd_conn *conn;
+
+	timeout = le32_to_cpu(req->Timeout);
+	ksmbd_debug(SMB, "got oplock brk for fid %d lock type = 0x%x, timeout: %d\n",
+		    req->Fid, req->LockType, timeout);
+
+	/* find fid */
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("cannot obtain fid for %d\n", req->Fid);
+		return -EINVAL;
+	}
+
+	if (req->LockType & LOCKING_ANDX_OPLOCK_RELEASE) {
+		pr_err("lock type is oplock release\n");
+		err = lock_oplock_release(fp, req->LockType, req->OplockLevel);
+	}
+
+	filp = fp->filp;
+	lock_count = le16_to_cpu(req->NumberOfLocks);
+	unlock_count = le16_to_cpu(req->NumberOfUnlocks);
+
+	ksmbd_debug(SMB, "lock count is %d, unlock_count : %d\n", lock_count,
+		    unlock_count);
+
+	leftlen = get_req_len(req) - offsetof(struct smb_com_lock_req, Locks);
+
+	if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+		if ((unlock_count + lock_count) * sizeof(*lock_ele64) > leftlen) {
+			err = -EINVAL;
+			goto out;
+		}
+		lock_ele64 = (struct locking_andx_range64 *)req->Locks;
+	} else {
+		if ((unlock_count + lock_count) * sizeof(*lock_ele32) > leftlen) {
+			err = -EINVAL;
+			goto out;
+		}
+		lock_ele32 = (struct locking_andx_range32 *)req->Locks;
+	}
+
+	if (req->LockType & LOCKING_ANDX_CHANGE_LOCKTYPE) {
+		pr_err("lock type: LOCKING_ANDX_CHANGE_LOCKTYPE\n");
+		rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+		rsp->hdr.Status.DosError.Error = cpu_to_le16(ERRnoatomiclocks);
+		rsp->hdr.Flags2 &= ~SMBFLG2_ERR_STATUS;
+		goto out;
+	}
+
+	if (req->LockType & LOCKING_ANDX_CANCEL_LOCK)
+		pr_err("lock type: LOCKING_ANDX_CANCEL_LOCK\n");
+
+	for (i = 0; i < lock_count; i++) {
+		flock = smb_flock_init(filp);
+		if (!flock)
+			goto out;
+
+		if (req->LockType & LOCKING_ANDX_SHARED_LOCK) {
+			pr_err("received shared request\n");
+			if (!(filp->f_mode & FMODE_READ)) {
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				locks_free_lock(flock);
+				goto out;
+			}
+			cmd = F_SETLKW;
+			flock->c.flc_type = F_RDLCK;
+		} else {
+			pr_err("received exclusive request\n");
+			if (!(filp->f_mode & FMODE_WRITE)) {
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				locks_free_lock(flock);
+				goto out;
+			}
+			cmd = F_SETLKW;
+			flock->c.flc_type = F_WRLCK;
+			flock->c.flc_flags |= FL_SLEEP;
+		}
+
+		if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+			offset = (unsigned long long)le32_to_cpu(
+					lock_ele64[i].OffsetLow);
+			length = (unsigned long long)le32_to_cpu(
+					lock_ele64[i].LengthLow);
+			offset |= (unsigned long long)le32_to_cpu(
+					lock_ele64[i].OffsetHigh) << 32;
+			length |= (unsigned long long)le32_to_cpu(
+					lock_ele64[i].LengthHigh) << 32;
+		} else {
+			offset = (unsigned long long)le32_to_cpu(
+				lock_ele32[i].Offset);
+			length = (unsigned long long)le32_to_cpu(
+				lock_ele32[i].Length);
+		}
+
+		if (offset > loff_max) {
+			pr_err("Invalid lock range requested\n");
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LOCK_RANGE;
+			locks_free_lock(flock);
+			goto out;
+		}
+
+		if (offset > 0 && length > (loff_max - offset) + 1) {
+			pr_err("Invalid lock range requested\n");
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LOCK_RANGE;
+			locks_free_lock(flock);
+			goto out;
+		}
+
+		ksmbd_debug(SMB, "locking offset : %llx, length : %llu\n",
+			    offset, length);
+
+		if (offset > OFFSET_MAX)
+			flock->fl_start = OFFSET_MAX;
+		else
+			flock->fl_start = offset;
+		if (offset + length > OFFSET_MAX)
+			flock->fl_end = OFFSET_MAX;
+		else
+			flock->fl_end = offset + length;
+
+		smb_lock = smb_lock_init(flock, cmd, req->LockType, offset,
+					 length, &lock_list);
+		if (!smb_lock) {
+			locks_free_lock(flock);
+			goto out;
+		}
+	}
+
+	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+		int same_zero_lock = 0;
+
+		list_del(&smb_lock->llist);
+		/* check locks in connections */
+		down_read(&conn_list_lock);
+		list_for_each_entry(conn, &conn_list, conns_list) {
+			spin_lock(&conn->llist_lock);
+			list_for_each_entry_safe(cmp_lock, tmp2, &conn->lock_list, clist) {
+				if (file_inode(cmp_lock->fl->c.flc_file) !=
+					file_inode(smb_lock->fl->c.flc_file))
+					continue;
+
+				if (smb_lock->zero_len &&
+					cmp_lock->start == smb_lock->start &&
+					cmp_lock->end == smb_lock->end) {
+					same_zero_lock = 1;
+					spin_unlock(&conn->llist_lock);
+					up_read(&conn_list_lock);
+					goto out_check_cl;
+				}
+
+				/* check zero byte lock range */
+				if (cmp_lock->zero_len && !smb_lock->zero_len &&
+						cmp_lock->start > smb_lock->start &&
+						cmp_lock->start < smb_lock->end) {
+					pr_err("previous lock conflict with zero byte lock range\n");
+					err = -EPERM;
+				} else if (smb_lock->zero_len && !cmp_lock->zero_len &&
+					smb_lock->start > cmp_lock->start &&
+					smb_lock->start < cmp_lock->end) {
+					pr_err("current lock conflict with zero byte lock range\n");
+					err = -EPERM;
+				} else if (((cmp_lock->start <= smb_lock->start &&
+					cmp_lock->end > smb_lock->start) ||
+					(cmp_lock->start < smb_lock->end &&
+					 cmp_lock->end >= smb_lock->end)) &&
+					!cmp_lock->zero_len && !smb_lock->zero_len) {
+					pr_err("Not allow lock operation on exclusive lock range\n");
+					err = -EPERM;
+				}
+
+				if (err) {
+					/* Clean error cache */
+					if ((smb_lock->zero_len &&
+							fp->cflock_cnt > 1) ||
+						(timeout && (fp->llock_fstart ==
+								smb_lock->start))) {
+						ksmbd_debug(SMB, "clean error cache\n");
+						fp->cflock_cnt = 0;
+					}
+
+					if (timeout > 0 ||
+						(fp->cflock_cnt > 0 &&
+						fp->llock_fstart == smb_lock->start) ||
+						((smb_lock->start >> 63) == 0 &&
+						smb_lock->start >= 0xEF000000)) {
+						if (timeout) {
+							spin_unlock(&conn->llist_lock);
+							up_read(&conn_list_lock);
+							ksmbd_debug(SMB, "waiting error response for timeout : %d\n",
+								timeout);
+							msleep(timeout);
+						}
+						rsp->hdr.Status.CifsError =
+							STATUS_FILE_LOCK_CONFLICT;
+					} else
+						rsp->hdr.Status.CifsError =
+							STATUS_LOCK_NOT_GRANTED;
+					fp->cflock_cnt++;
+					fp->llock_fstart = smb_lock->start;
+
+					if (timeout <= 0) {
+						spin_unlock(&conn->llist_lock);
+						up_read(&conn_list_lock);
+					}
+					goto out;
+				}
+			}
+			spin_unlock(&conn->llist_lock);
+		}
+		up_read(&conn_list_lock);
+
+out_check_cl:
+		if (same_zero_lock)
+			continue;
+		if (smb_lock->zero_len) {
+			err = 0;
+			goto skip;
+		}
+
+		flock = smb_lock->fl;
+retry:
+		err = vfs_lock_file(filp, smb_lock->cmd, flock, NULL);
+		if (err == FILE_LOCK_DEFERRED) {
+			pr_err("would have to wait for getting lock\n");
+			spin_lock(&work->conn->llist_lock);
+			list_add_tail(&smb_lock->clist, &work->conn->lock_list);
+			spin_unlock(&work->conn->llist_lock);
+			list_add(&smb_lock->llist, &rollback_list);
+wait:
+			err = ksmbd_vfs_posix_lock_wait_timeout(flock,
+							msecs_to_jiffies(10));
+			if (err) {
+				list_del(&smb_lock->llist);
+				spin_lock(&work->conn->llist_lock);
+				list_del(&smb_lock->clist);
+				spin_unlock(&work->conn->llist_lock);
+				goto retry;
+			} else
+				goto wait;
+		} else if (!err) {
+skip:
+			spin_lock(&work->conn->llist_lock);
+			list_add_tail(&smb_lock->clist, &work->conn->lock_list);
+			list_add_tail(&smb_lock->flist, &fp->lock_list);
+			spin_unlock(&work->conn->llist_lock);
+			list_add(&smb_lock->llist, &rollback_list);
+			pr_err("successful in taking lock\n");
+		} else if (err < 0) {
+			rsp->hdr.Status.CifsError = STATUS_LOCK_NOT_GRANTED;
+			goto out;
+		}
+	}
+
+	if (req->LockType & LOCKING_ANDX_LARGE_FILES)
+		unlock_ele64 = (struct locking_andx_range64 *)(req->Locks +
+				(sizeof(struct locking_andx_range64) *
+				 lock_count));
+	else
+		unlock_ele32 = (struct locking_andx_range32 *)(req->Locks +
+				(sizeof(struct locking_andx_range32) *
+				 lock_count));
+
+	for (i = 0; i < unlock_count; i++) {
+		flock = smb_flock_init(filp);
+		if (!flock)
+			goto out;
+
+		flock->c.flc_type = F_UNLCK;
+		cmd = 0;
+
+		if (req->LockType & LOCKING_ANDX_LARGE_FILES) {
+			offset = (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].OffsetLow);
+			length = (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].LengthLow);
+			offset |= (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].OffsetHigh) << 32;
+			length |= (unsigned long long)le32_to_cpu(
+					unlock_ele64[i].LengthHigh) << 32;
+		} else {
+			offset = (unsigned long long)le32_to_cpu(
+				unlock_ele32[i].Offset);
+			length = (unsigned long long)le32_to_cpu(
+				unlock_ele32[i].Length);
+		}
+
+		ksmbd_debug(SMB, "unlock offset : %llx, length : %llu\n",
+			    offset, length);
+
+		if (offset > OFFSET_MAX)
+			flock->fl_start = OFFSET_MAX;
+		else
+			flock->fl_start = offset;
+		if (offset + length > OFFSET_MAX)
+			flock->fl_end = OFFSET_MAX;
+		else
+			flock->fl_end = offset + length;
+
+		locked = 0;
+		up_read(&conn_list_lock);
+		list_for_each_entry(conn, &conn_list, conns_list) {
+			spin_lock(&conn->llist_lock);
+			list_for_each_entry(cmp_lock, &conn->lock_list, clist) {
+				if (file_inode(cmp_lock->fl->c.flc_file) !=
+					file_inode(flock->c.flc_file))
+					continue;
+
+				if ((cmp_lock->start == offset &&
+					 cmp_lock->end == offset + length)) {
+					locked = 1;
+					spin_unlock(&conn->llist_lock);
+					up_read(&conn_list_lock);
+					goto out_check_cl_unlck;
+				}
+			}
+			spin_unlock(&conn->llist_lock);
+		}
+		up_read(&conn_list_lock);
+
+out_check_cl_unlck:
+		if (!locked) {
+			locks_free_lock(flock);
+			rsp->hdr.Status.CifsError = STATUS_RANGE_NOT_LOCKED;
+			goto out;
+		}
+
+		err = vfs_lock_file(filp, cmd, flock, NULL);
+		if (!err) {
+			ksmbd_debug(SMB, "File unlocked\n");
+			spin_lock(&conn->llist_lock);
+			if (!list_empty(&cmp_lock->flist))
+				list_del(&cmp_lock->flist);
+			list_del(&cmp_lock->clist);
+			spin_unlock(&conn->llist_lock);
+
+			locks_free_lock(cmp_lock->fl);
+			kfree(cmp_lock);
+			fp->cflock_cnt = 0;
+		} else if (err == -ENOENT) {
+			rsp->hdr.Status.CifsError = STATUS_RANGE_NOT_LOCKED;
+			locks_free_lock(flock);
+			goto out;
+		}
+		locks_free_lock(flock);
+	}
+
+	rsp->hdr.WordCount = 2;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	ksmbd_fd_put(work, fp);
+	return err;
+
+out:
+	list_for_each_entry_safe(smb_lock, tmp, &lock_list, llist) {
+		locks_free_lock(smb_lock->fl);
+		list_del(&smb_lock->llist);
+		kfree(smb_lock);
+	}
+
+	list_for_each_entry_safe(smb_lock, tmp, &rollback_list, llist) {
+		struct file_lock *rlock = NULL;
+
+		rlock = smb_flock_init(filp);
+		rlock->c.flc_type = F_UNLCK;
+		rlock->fl_start = smb_lock->start;
+		rlock->fl_end = smb_lock->end;
+
+		err = vfs_lock_file(filp, 0, rlock, NULL);
+		if (err)
+			pr_err("rollback unlock fail : %d\n", err);
+
+		list_del(&smb_lock->llist);
+		spin_lock(&work->conn->llist_lock);
+		if (!list_empty(&smb_lock->flist))
+			list_del(&smb_lock->flist);
+		list_del(&smb_lock->clist);
+		spin_unlock(&work->conn->llist_lock);
+
+		locks_free_lock(smb_lock->fl);
+		locks_free_lock(rlock);
+		kfree(smb_lock);
+	}
+
+	ksmbd_fd_put(work, fp);
+	pr_err("failed in taking lock\n");
+	return err;
+}
+
+/**
+ * smb_trans() - trans2 command dispatcher
+ * @work:	smb work containing trans2 command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_trans(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_trans_req *req = work->request_buf;
+	struct smb_com_trans_rsp *rsp = work->response_buf;
+	struct smb_com_trans_pipe_req *pipe_req = work->request_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	unsigned int maxlen, offset;
+	__u16 subcommand;
+	char *name, *pipe;
+	char *pipedata;
+	int setup_bytes_count = 0;
+	int pipe_name_offset = 0;
+	int str_len_uni;
+	int ret = 0, nbytes = 0;
+	int param_len = 0;
+	int id;
+	int padding;
+
+	if (req->SetupCount)
+		setup_bytes_count = 2 * req->SetupCount;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans_req, Data) + setup_bytes_count;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return 0;
+	}
+
+	subcommand = le16_to_cpu(req->SubCommand);
+	name = smb_strndup_from_utf16((char *)req + offset,
+				      maxlen - offset, 1,
+				      conn->local_nls);
+
+	if (IS_ERR(name)) {
+		pr_err("failed to allocate memory\n");
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return PTR_ERR(name);
+	}
+
+	ksmbd_debug(SMB, "Obtained string name = %s setupcount = %d\n",
+			name, setup_bytes_count);
+
+	pipe_name_offset = strlen("\\PIPE");
+	if (strncmp("\\PIPE", name, pipe_name_offset) != 0) {
+		ksmbd_debug(SMB, "Not Pipe request\n");
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		kfree(name);
+		return 0;
+	}
+
+	if (name[pipe_name_offset] == '\\')
+		pipe_name_offset++;
+
+	pipe = name + pipe_name_offset;
+
+	if (*pipe != '\0' && strncmp(pipe, "LANMAN", sizeof("LANMAN")) != 0) {
+		ksmbd_debug(SMB, "Pipe %s not supported request\n", pipe);
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		kfree(name);
+		return 0;
+	}
+
+	/* Incoming pipe name unicode len */
+	str_len_uni = 2 * (strlen(name) + 1);
+
+	ksmbd_debug(SMB, "Pipe name unicode len = %d\n", str_len_uni);
+
+	/* Some clients like Windows may have additional padding. */
+	padding = le16_to_cpu(req->ParameterOffset) -
+		offsetof(struct smb_com_trans_req, Data)
+		- str_len_uni;
+	pipedata = req->Data + str_len_uni + setup_bytes_count + padding;
+	offset += str_len_uni + padding;
+
+	if (!strncmp(pipe, "LANMAN", sizeof("LANMAN"))) {
+		u16 param_count = le16_to_cpu(req->ParameterCount);
+
+		if (offset + param_count > maxlen) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+			goto out;
+		}
+
+		rpc_resp = ksmbd_rpc_rap(work->sess, pipedata, param_count);
+
+		if (rpc_resp) {
+			if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+				rsp->hdr.Status.CifsError =
+					STATUS_NOT_SUPPORTED;
+				kvfree(rpc_resp);
+				goto out;
+			} else if (rpc_resp->flags != KSMBD_RPC_OK) {
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+				kvfree(rpc_resp);
+				goto out;
+			}
+
+			nbytes = rpc_resp->payload_sz;
+			memcpy((char *)rsp + sizeof(struct smb_com_trans_rsp),
+			       rpc_resp->payload, nbytes);
+
+			kvfree(rpc_resp);
+			ret = 0;
+			goto resp_out;
+		} else {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	id = pipe_req->fid;
+	switch (subcommand) {
+	case TRANSACT_DCERPCCMD:
+	{
+		u16 data_count = le16_to_cpu(req->DataCount);
+
+		if (offset + data_count > maxlen) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+			goto out;
+		}
+
+		ksmbd_debug(SMB, "GOT TRANSACT_DCERPCCMD\n");
+		ret = -EINVAL;
+		rpc_resp = ksmbd_rpc_ioctl(work->sess, id, pipedata,
+					   data_count);
+		if (rpc_resp) {
+			if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+				rsp->hdr.Status.CifsError =
+					STATUS_NOT_SUPPORTED;
+				kvfree(rpc_resp);
+				goto out;
+			} else if (rpc_resp->flags != KSMBD_RPC_OK) {
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+				kvfree(rpc_resp);
+				goto out;
+			}
+
+			nbytes = rpc_resp->payload_sz;
+			memcpy((char *)rsp + sizeof(struct smb_com_trans_rsp),
+			       rpc_resp->payload, nbytes);
+			kvfree(rpc_resp);
+			ret = 0;
+		}
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "SMB TRANS subcommand not supported %u\n",
+			    subcommand);
+		ret = -EOPNOTSUPP;
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		goto out;
+	}
+
+resp_out:
+
+	rsp->hdr.WordCount = 10;
+	rsp->TotalParameterCount = cpu_to_le16(param_len);
+	rsp->TotalDataCount = cpu_to_le16(nbytes);
+	rsp->Reserved = 0;
+	rsp->ParameterCount = cpu_to_le16(param_len);
+	rsp->ParameterOffset = cpu_to_le16(56);
+	rsp->ParameterDisplacement = 0;
+	rsp->DataCount = cpu_to_le16(nbytes);
+	rsp->DataOffset = cpu_to_le16(56 + param_len);
+	rsp->DataDisplacement = 0;
+	rsp->SetupCount = 0;
+	rsp->Reserved1 = 0;
+	/* Adding 1 for Pad */
+	rsp->ByteCount = cpu_to_le16(nbytes + 1 + param_len);
+	rsp->Pad = 0;
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	kfree(name);
+	return ret;
+}
+
+/**
+ * create_andx_pipe() - create ipc pipe request handler
+ * @work:	smb work containing create command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int create_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_open_req *req = work->request_buf;
+	struct smb_com_open_ext_rsp *rsp = work->response_buf;
+	unsigned int maxlen, offset;
+	char *name;
+	int rc = 0;
+	__u16 fid;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_open_req, fileName);
+
+	if (is_smbreq_unicode(&req->hdr))
+		offset += 1;
+
+	if (offset >= maxlen) {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset,
+				      maxlen - offset, 1,
+				      work->conn->local_nls);
+	if (IS_ERR(name)) {
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rc = ksmbd_session_rpc_open(work->sess, name);
+	if (rc < 0)
+		goto out;
+	fid = rc;
+
+	rsp->hdr.WordCount = 42;
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	rsp->AndXReserved = 0;
+	rsp->OplockLevel = 0;
+	rsp->Fid = fid;
+	rsp->CreateAction = cpu_to_le32(1);
+	rsp->CreationTime = 0;
+	rsp->LastAccessTime = 0;
+	rsp->LastWriteTime = 0;
+	rsp->ChangeTime = 0;
+	rsp->FileAttributes = cpu_to_le32(ATTR_NORMAL);
+	rsp->AllocationSize = cpu_to_le64(0);
+	rsp->EndOfFile = 0;
+	rsp->FileType = cpu_to_le16(2);
+	rsp->DeviceState = cpu_to_le16(0x05ff);
+	rsp->DirectoryFlag = 0;
+	rsp->fid = 0;
+	rsp->MaxAccess = cpu_to_le32(FILE_GENERIC_ALL);
+	rsp->GuestAccess = cpu_to_le32(FILE_GENERIC_READ);
+	rsp->ByteCount = 0;
+	inc_resp_size(work, 100);
+
+out:
+	switch (rc) {
+	case 0:
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		break;
+	case -EINVAL:
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		break;
+	case -ENOSPC:
+	case -ENOMEM:
+	default:
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	}
+
+	kfree(name);
+	return rc;
+}
+
+/*
+ * helper to create a file/directory with a given mode
+ */
+static int smb_common_create(struct ksmbd_work *work, struct path *parent_path,
+			     struct path *path, char *name, int open_flags,
+			     umode_t posix_mode, bool is_dir)
+{
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	umode_t mode;
+	int err;
+
+	if (!(open_flags & O_CREAT))
+		return -EBADF;
+
+	if (is_dir) {
+		mode = share_config_directory_mode(share, posix_mode);
+		ksmbd_debug(SMB, "creating directory (mode=%04o)\n", mode);
+		err = ksmbd_vfs_mkdir(work, name, mode);
+		if (err)
+			return err;
+	} else {
+		mode = share_config_create_mode(share, posix_mode);
+		ksmbd_debug(SMB, "creating regular file (mode=%04o)\n", mode);
+		err = ksmbd_vfs_create(work, name, mode);
+		if (err)
+			return err;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, 0, parent_path, path, 0);
+	if (err) {
+		pr_err("cannot get linux path (%s), err = %d\n", name, err);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * smb_nt_create_andx() - file open request handler
+ * @work:	smb work containing nt open command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_nt_create_andx(struct ksmbd_work *work)
+{
+	struct smb_com_open_req *req = work->request_buf;
+	struct smb_com_open_rsp *rsp = work->response_buf;
+	struct smb_com_open_ext_rsp *ext_rsp = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_tree_connect *tcon = work->tcon;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	int oplock_flags, file_info, open_flags, may_flags, access_flags;
+	char *name;
+	char *conv_name = NULL;
+	bool file_present = true, extended_reply;
+	__u64 alloc_size = 0, time;
+	umode_t mode = 0;
+	int err = 0;
+	int create_directory = 0;
+	char *root = NULL;
+	bool is_unicode;
+	bool is_relative_root = false;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE;
+	int share_ret;
+	unsigned int maxlen, offset;
+
+	rsp->hdr.Status.CifsError = STATUS_UNSUCCESSFUL;
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "create pipe on IPC\n");
+		return create_andx_pipe(work);
+	}
+
+	if (req->CreateOptions & FILE_OPEN_BY_FILE_ID_LE) {
+		ksmbd_debug(SMB, "file open with FID is not supported\n");
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		return -EINVAL;
+	}
+
+	if (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE) {
+		if (req->DesiredAccess &&
+		    !(le32_to_cpu(req->DesiredAccess) & DELETE)) {
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			return -EPERM;
+		}
+
+		if (le32_to_cpu(req->FileAttributes) & ATTR_READONLY) {
+			rsp->hdr.Status.CifsError = STATUS_CANNOT_DELETE;
+			return -EPERM;
+		}
+	}
+
+	if (req->CreateOptions & FILE_DIRECTORY_FILE_LE) {
+		ksmbd_debug(SMB, "GOT Create Directory via CREATE ANDX\n");
+		create_directory = 1;
+	}
+
+	/*
+	 * Filename is relative to this root directory FID, instead of
+	 * tree connect point. Find root dir name from this FID and
+	 * prepend root dir name in filename.
+	 */
+	if (req->RootDirectoryFid) {
+		ksmbd_debug(SMB, "path lookup relative to RootDirectoryFid\n");
+
+		is_relative_root = true;
+		fp = ksmbd_lookup_fd_fast(work, req->RootDirectoryFid);
+		if (fp)
+			root = (char *)fp->filp->f_path.dentry->d_name.name;
+		else {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			return -EINVAL;
+		}
+		ksmbd_fd_put(work, fp);
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_open_req, fileName);
+	is_unicode = is_smbreq_unicode(&req->hdr);
+
+	if (is_unicode)
+		offset++;
+
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      is_unicode, conn->local_nls);
+	if (IS_ERR(name)) {
+		if (PTR_ERR(name) == -ENOMEM) {
+			pr_err("failed to allocate memory\n");
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		} else
+			rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+
+		return PTR_ERR(name);
+	}
+
+	if (is_relative_root) {
+		char *full_name;
+
+		full_name = kasprintf(KSMBD_DEFAULT_GFP, "\\%s\\%s", root, name);
+		if (!full_name) {
+			kfree(name);
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			return -ENOMEM;
+		}
+
+		kfree(name);
+		name = full_name;
+	}
+
+	root = strrchr(name, '\\');
+	if (root) {
+		root++;
+		if ((root[0] == '*' || root[0] == '/') && (root[1] == '\0')) {
+			rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+			kfree(name);
+			return -EINVAL;
+		}
+	}
+
+	conv_name = smb_get_name(share, name, strlen(name), work, true);
+	if (IS_ERR(conv_name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		kfree(name);
+		return PTR_ERR(conv_name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		err = -ENOMEM;
+		goto out1;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, conv_name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 (req->hdr.Flags & SMBFLG_CASELESS) &&
+						!create_directory);
+	if (err) {
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+		file_present = false;
+		ksmbd_debug(SMB, "can not get linux path for %s, err = %d\n",
+			    conv_name, err);
+	} else {
+		if (d_is_symlink(path.dentry)) {
+			err = -EACCES;
+			goto free_path;
+		}
+
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err) {
+			pr_err("can not stat %s, err = %d\n", conv_name, err);
+			goto free_path;
+		}
+	}
+
+	if (file_present &&
+	    (req->CreateOptions & FILE_NON_DIRECTORY_FILE_LE) &&
+	    S_ISDIR(stat.mode)) {
+		ksmbd_debug(SMB, "Can't open dir %s, request is to open file\n",
+			    conv_name);
+		if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+					SMBFLG2_ERR_STATUS)) {
+			rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+			rsp->hdr.Status.DosError.Error =
+				cpu_to_le16(ERRfilexists);
+		} else
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+
+		goto free_path;
+	}
+
+	if (file_present && create_directory && !S_ISDIR(stat.mode)) {
+		ksmbd_debug(SMB, "Can't open file %s, request is to open dir\n",
+			    conv_name);
+		if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+		      SMBFLG2_ERR_STATUS)) {
+			rsp->hdr.Status.DosError.Error = 267;
+			rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+		} else
+			rsp->hdr.Status.CifsError = STATUS_NOT_A_DIRECTORY;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+
+		goto free_path;
+	}
+
+	oplock_flags = le32_to_cpu(req->OpenFlags) &
+		(REQ_OPLOCK | REQ_BATCHOPLOCK);
+	extended_reply = le32_to_cpu(req->OpenFlags) & REQ_EXTENDED_INFO;
+	open_flags = file_create_dispostion_flags(
+			le32_to_cpu(req->CreateDisposition), file_present);
+
+	if (open_flags < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", open_flags);
+		if (file_present) {
+			if (!(((struct smb_hdr *)work->request_buf)->Flags2 &
+						SMBFLG2_ERR_STATUS)) {
+				rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+				rsp->hdr.Status.DosError.Error =
+					cpu_to_le16(ERRfilexists);
+			} else if (open_flags == -EINVAL)
+				rsp->hdr.Status.CifsError =
+					STATUS_INVALID_PARAMETER;
+			else
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_NAME_COLLISION;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			goto free_path;
+		} else {
+			err = -ENOENT;
+			goto out;
+		}
+	} else {
+		if (file_present) {
+			if (S_ISFIFO(stat.mode))
+				open_flags |= O_NONBLOCK;
+		}
+
+		if (req->CreateOptions & FILE_WRITE_THROUGH_LE)
+			open_flags |= O_SYNC;
+	}
+
+	access_flags =
+		convert_generic_access_flags(le32_to_cpu(req->DesiredAccess),
+					     &open_flags, &may_flags,
+					     le32_to_cpu(req->FileAttributes));
+
+	mode |= 0777;
+	if (le32_to_cpu(req->FileAttributes) & ATTR_READONLY)
+		mode &= ~0222;
+
+	/* TODO:
+	 * - check req->ShareAccess for sharing file among different process
+	 * - check req->FileAttributes for special/readonly file attrib
+	 * - check req->SecurityFlags for client security context tracking
+	 * - check req->ImpersonationLevel
+	 */
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		if (open_flags & O_CREAT) {
+			ksmbd_debug(SMB,
+				"returning as user does not have permission to write\n");
+			err = -EACCES;
+			if (file_present)
+				goto free_path;
+			else
+				goto out;
+		}
+	}
+
+	ksmbd_debug(SMB, "filename : %s, open_flags = 0x%x\n", conv_name,
+		    open_flags);
+	if (!file_present && (open_flags & O_CREAT)) {
+		if (!create_directory)
+			mode |= S_IFREG;
+
+		err = smb_common_create(work, &parent_path, &path, conv_name,
+					open_flags, mode, create_directory);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+	} else {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	err = ksmbd_query_inode_status(path.dentry->d_parent);
+	if (err == KSMBD_INODE_STATUS_PENDING_DELETE) {
+		err = -EBUSY;
+		goto free_path;
+	}
+
+	err = 0;
+	/* open file and get FID */
+	fp = ksmbd_vfs_dentry_open(work,
+				   &path,
+				   open_flags,
+				   req->CreateOptions,
+				   file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->daccess = req->DesiredAccess;
+	fp->saccess = req->ShareAccess;
+	fp->pid = le16_to_cpu(req->hdr.Pid);
+
+	down_write(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	up_write(&fp->f_ci->m_lock);
+
+	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode) && oplock_flags) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags, fp->volatile_id, fp,
+				       le16_to_cpu(req->hdr.Tid), NULL,
+				       share_ret);
+		if (err)
+			goto free_path;
+	} else {
+		if (ksmbd_inode_pending_delete(fp)) {
+			err = -EBUSY;
+			goto free_path;
+		}
+
+		if (share_ret < 0) {
+			err = -EPERM;
+			goto free_path;
+		}
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+	if (file_present) {
+		if (!(open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+
+	if (le32_to_cpu(req->DesiredAccess) & (DELETE | GENERIC_ALL))
+		fp->is_nt_open = 1;
+	if ((le32_to_cpu(req->DesiredAccess) & DELETE) &&
+	    (req->CreateOptions & FILE_DELETE_ON_CLOSE_LE))
+		ksmbd_fd_set_delete_on_close(fp, file_info);
+
+	/* open success, send back response */
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err) {
+		pr_err("cannot get stat information\n");
+		goto free_path;
+	}
+
+	alloc_size = le64_to_cpu(req->AllocationSize);
+	if (alloc_size &&
+	    (file_info == F_CREATED || file_info == F_OVERWRITTEN)) {
+		if (alloc_size > stat.size) {
+			err = ksmbd_vfs_truncate(work, fp, alloc_size);
+			if (err) {
+				pr_err("failed to expand file, err = %d\n",
+				       err);
+				goto free_path;
+			}
+		}
+	}
+
+	/* prepare response buffer */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->OplockLevel = oplock_rsp;
+	rsp->Fid = fp->volatile_id;
+
+	if ((le32_to_cpu(req->CreateDisposition) == FILE_SUPERSEDE) &&
+	    (file_info == F_OVERWRITTEN))
+		rsp->CreateAction = cpu_to_le32(F_SUPERSEDED);
+	else
+		rsp->CreateAction = cpu_to_le32(file_info);
+
+	if (stat.result_mask & STATX_BTIME)
+		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+	else
+		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+	if (file_present) {
+		if (test_share_config_flag(tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da;
+
+			err = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     path.dentry, &da);
+			if (err > 0)
+				fp->create_time = da.create_time;
+			err = 0;
+		}
+	} else {
+		if (test_share_config_flag(tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da = {0};
+
+			da.version = 4;
+			da.attr = smb_get_dos_attr(&stat);
+			da.create_time = fp->create_time;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			err = 0;
+		}
+	}
+
+	rsp->CreationTime = cpu_to_le64(fp->create_time);
+	time = ksmbd_UnixTimeToNT(stat.atime);
+	rsp->LastAccessTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat.mtime);
+	rsp->LastWriteTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat.ctime);
+	rsp->ChangeTime = cpu_to_le64(time);
+
+	rsp->FileAttributes = cpu_to_le32(smb_get_dos_attr(&stat));
+	rsp->AllocationSize = cpu_to_le64(stat.blocks << 9);
+	rsp->EndOfFile = cpu_to_le64(stat.size);
+	/* TODO: is it normal file, named pipe, printer, modem etc*/
+	rsp->FileType = 0;
+	/* status of named pipe*/
+	rsp->DeviceState = 0;
+	rsp->DirectoryFlag = S_ISDIR(stat.mode) ? 1 : 0;
+	if (extended_reply) {
+		struct inode *inode;
+
+		rsp->hdr.WordCount = 50;
+		memset(&ext_rsp->VolId, 0, 16);
+		if (fp) {
+			inode = file_inode(fp->filp);
+			ext_rsp->fid = inode->i_ino;
+			if (S_ISDIR(inode->i_mode) ||
+			    (fp->filp->f_mode & FMODE_WRITE))
+				ext_rsp->MaxAccess = FILE_GENERIC_ALL_LE;
+			else
+				ext_rsp->MaxAccess = FILE_GENERIC_READ_LE |
+						     FILE_EXECUTE_LE;
+		} else {
+			ext_rsp->MaxAccess = FILE_GENERIC_ALL_LE;
+			ext_rsp->fid = 0;
+		}
+
+		ext_rsp->ByteCount = 0;
+
+	} else {
+		rsp->hdr.WordCount = 34;
+		rsp->ByteCount = 0;
+	}
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 0);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+out1:
+	switch (err) {
+	case 0:
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+		break;
+	case -ENOSPC:
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		break;
+	case -EMFILE:
+		rsp->hdr.Status.CifsError = STATUS_TOO_MANY_OPENED_FILES;
+		break;
+	case -EINVAL:
+		rsp->hdr.Status.CifsError = STATUS_NO_SUCH_USER;
+		break;
+	case -EACCES:
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -EPERM:
+		rsp->hdr.Status.CifsError = STATUS_SHARING_VIOLATION;
+		break;
+	case -ENOENT:
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		break;
+	case -EBUSY:
+		rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		break;
+	case -EOPNOTSUPP:
+		rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+		break;
+	case -ENOMEM:
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		break;
+	default:
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+	}
+
+	if (err && fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+
+	kfree(conv_name);
+
+	if (!rsp->hdr.WordCount)
+		return err;
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return err;
+}
+
+/**
+ * smb_close_pipe() - ipc pipe close request handler
+ * @work:	smb work containing close command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_close_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_close_req *req = work->request_buf;
+
+	ksmbd_session_rpc_close(work->sess, req->FileID);
+	return 0;
+}
+
+/**
+ * smb_close() - ipc pipe close request handler
+ * @work:	smb work containing close command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_close(struct ksmbd_work *work)
+{
+	struct smb_com_close_req *req = work->request_buf;
+	struct smb_com_close_rsp *rsp = work->response_buf;
+	int err = 0;
+
+	ksmbd_debug(SMB, "SMB_COM_CLOSE called for fid %u\n", req->FileID);
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		err = smb_close_pipe(work);
+		if (err < 0)
+			goto out;
+		goto IPC_out;
+	}
+
+	/*
+	 * TODO: linux cifs client does not send LastWriteTime,
+	 * need to check if windows client use this field
+	 */
+	if (le32_to_cpu(req->LastWriteTime) > 0 &&
+	    le32_to_cpu(req->LastWriteTime) < 0xFFFFFFFF)
+		pr_info("need to set last modified time before close\n");
+
+	err = ksmbd_close_fd(work, req->FileID);
+
+IPC_out:
+	/* file close success, return response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out:
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_read_andx_pipe() - read from ipc pipe request handler
+ * @work:	smb work containing read command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_read_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_read_req *req = work->request_buf;
+	struct smb_com_read_rsp *rsp = work->response_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	int ret = 0, nbytes = 0;
+
+	rpc_resp = ksmbd_rpc_read(work->sess, req->Fid);
+	if (rpc_resp) {
+		void *aux_buf;
+
+		if (rpc_resp->flags != KSMBD_RPC_OK || !rpc_resp->payload_sz) {
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			kvfree(rpc_resp);
+			return -EINVAL;
+		}
+
+		nbytes = rpc_resp->payload_sz;
+		aux_buf = kvmalloc(nbytes, KSMBD_DEFAULT_GFP);
+		if (!aux_buf) {
+			kvfree(rpc_resp);
+			return -ENOMEM;
+		}
+		memcpy(aux_buf, rpc_resp->payload, nbytes);
+
+		kvfree(rpc_resp);
+		ret = ksmbd_iov_pin_rsp_read(work, (char *)rsp + 4,
+					     sizeof (struct smb_com_read_rsp) - 4,
+					     aux_buf, nbytes);
+		if (ret) {
+			kvfree(aux_buf);
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			return -EINVAL;
+		}
+	} else {
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		return -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 12;
+	rsp->Remaining = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->Reserved = 0;
+	rsp->DataLength = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->DataOffset = cpu_to_le16(sizeof(struct smb_com_read_rsp) -
+			sizeof(rsp->hdr.smb_buf_length));
+	rsp->DataLengthHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved2 = 0;
+
+	rsp->ByteCount = cpu_to_le16(nbytes);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + nbytes);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return ret;
+}
+
+/**
+ * smb_read_andx() - read request handler
+ * @work:	smb work containing read command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_read_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_read_req *req = work->request_buf;
+	struct smb_com_read_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	void *aux_payload_buf;
+	loff_t pos;
+	size_t count;
+	ssize_t nbytes;
+	int err = 0;
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE))
+		return smb_read_andx_pipe(work);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %d\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->OffsetLow);
+	if (req->hdr.WordCount == 12)
+		pos |= ((loff_t)le32_to_cpu(req->OffsetHigh) << 32);
+
+	count = le16_to_cpu(req->MaxCount);
+	/*
+	 * It probably seems to be set to 0 or 0xFFFF if MaxCountHigh is
+	 * not supported. If it is 0xFFFF, it is set to a too large value
+	 * and a read fail occurs. If it is 0xFFFF, limit it to not set
+	 * the value.
+	 *
+	 * [MS-SMB] 3.2.4.4.1:
+	 * If the CAP_LARGE_READX bit is set in
+	 * Client.Connection.ServerCapabilities, then the client is allowed to
+	 * issue a read of a size larger than Client.Connection.MaxBufferSize
+	 * using an SMB_COM_READ_ANDX request.
+	 */
+	if (conn->vals->capabilities & CAP_LARGE_READ_X &&
+	    le32_to_cpu(req->MaxCountHigh) < 0xFFFF)
+		count |= le32_to_cpu(req->MaxCountHigh) << 16;
+	else if (count > CIFS_DEFAULT_IOSIZE) {
+		ksmbd_debug(SMB, "read size(%zu) exceeds max size(%u)\n", count,
+			    CIFS_DEFAULT_IOSIZE);
+		ksmbd_debug(SMB, "limiting read size to max size(%u)\n",
+			    CIFS_DEFAULT_IOSIZE);
+		count = CIFS_DEFAULT_IOSIZE;
+	}
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+
+	aux_payload_buf = kvmalloc(count, KSMBD_DEFAULT_GFP | __GFP_ZERO);
+	if (!aux_payload_buf) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	nbytes = ksmbd_vfs_read(work, fp, count, &pos, aux_payload_buf);
+	if (nbytes < 0) {
+		err = nbytes;
+		goto out;
+	}
+
+	/* read success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 12;
+	rsp->Remaining = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->DataCompactionMode = 0;
+	rsp->Reserved = 0;
+	rsp->DataLength = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->DataOffset = cpu_to_le16(sizeof(struct smb_com_read_rsp) -
+			sizeof(rsp->hdr.smb_buf_length));
+	rsp->DataLengthHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved2 = 0;
+
+	rsp->ByteCount = cpu_to_le16(nbytes);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		ksmbd_fd_put(work, fp);
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+	err = ksmbd_iov_pin_rsp_read(work, (char *)rsp + 4,
+				     sizeof(struct smb_com_read_rsp) - 4,
+				     aux_payload_buf, nbytes);
+out:
+	ksmbd_fd_put(work, fp);
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_write() - write request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_write(struct ksmbd_work *work)
+{
+	struct smb_com_write_req_32bit *req = work->request_buf;
+	struct smb_com_write_rsp_32bit *rsp = work->response_buf;
+	struct ksmbd_file *fp = NULL;
+	loff_t pos;
+	size_t count;
+	char *data_buf;
+	ssize_t nbytes = 0;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->Offset);
+	count = le16_to_cpu(req->Length);
+	data_buf = req->Data;
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_write_req_32bit, Data);
+	if (offset + count > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return -EINVAL;
+	}
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+	if (!count) {
+		err = ksmbd_vfs_truncate(work, fp, pos);
+		nbytes = 0;
+	} else
+		err = ksmbd_vfs_write(work, fp, data_buf,
+				      count, &pos, 0, &nbytes);
+
+	rsp->hdr.WordCount = 1;
+	rsp->Written = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	ksmbd_fd_put(work, fp);
+	if (!err) {
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		return 0;
+	}
+
+	if (err == -ENOSPC || err == -EFBIG)
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_write_andx_pipe() - write on pipe request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_write_andx_pipe(struct ksmbd_work *work)
+{
+	struct smb_com_write_req *req = work->request_buf;
+	struct smb_com_write_rsp *rsp = work->response_buf;
+	struct ksmbd_rpc_command *rpc_resp;
+	int ret = 0;
+	size_t count = 0;
+	unsigned int maxlen;
+
+	count = le16_to_cpu(req->DataLengthLow);
+	if (work->conn->vals->capabilities & CAP_LARGE_WRITE_X)
+		count |= (le16_to_cpu(req->DataLengthHigh) << 16);
+
+	maxlen = get_req_len(req) - sizeof(struct smb_com_write_req);
+	if (count > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	rpc_resp = ksmbd_rpc_write(work->sess, req->Fid, req->Data, count);
+	if (rpc_resp) {
+		if (rpc_resp->flags == KSMBD_RPC_ENOTIMPLEMENTED) {
+			rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+			kvfree(rpc_resp);
+			return -EOPNOTSUPP;
+		}
+		if (rpc_resp->flags != KSMBD_RPC_OK) {
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			kvfree(rpc_resp);
+			return -EINVAL;
+		}
+		count = rpc_resp->payload_sz;
+		kvfree(rpc_resp);
+	} else {
+		ret = -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 6;
+	rsp->Count = cpu_to_le16(count & 0xFFFF);
+	rsp->Remaining = 0;
+	rsp->CountHigh = cpu_to_le16(count >> 16);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return ret;
+}
+
+/**
+ * smb_write_andx() - andx write request handler
+ * @work:	smb work containing write command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_write_andx(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_com_write_req *req = work->request_buf;
+	struct smb_com_write_rsp *rsp = work->response_buf;
+	struct ksmbd_file *fp;
+	bool writethrough = false;
+	loff_t pos;
+	size_t count;
+	ssize_t nbytes = 0;
+	char *data_buf;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "Write ANDX called for IPC$");
+		return smb_write_andx_pipe(work);
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	pos = le32_to_cpu(req->OffsetLow);
+	if (req->hdr.WordCount == 14)
+		pos |= ((loff_t)le32_to_cpu(req->OffsetHigh) << 32);
+
+	writethrough = (le16_to_cpu(req->WriteMode) == 1);
+
+	/*
+	 * [MS-SMB] 3.3.5.8:
+	 * If CAP_LARGE_WRITEX is set in Server.Connection.ClientCapabilities,
+	 * then it is possible that the count of bytes to be written is larger
+	 * than the server's MaxBufferSize
+	 */
+	count = le16_to_cpu(req->DataLengthLow);
+	if (conn->vals->capabilities & CAP_LARGE_WRITE_X)
+		count |= (le16_to_cpu(req->DataLengthHigh) << 16);
+	else if (count > CIFS_DEFAULT_IOSIZE) {
+		ksmbd_debug(SMB, "write size(%zu) exceeds max size(%u)\n",
+				count, CIFS_DEFAULT_IOSIZE);
+		ksmbd_debug(SMB, "limiting write size to max size(%u)\n",
+				CIFS_DEFAULT_IOSIZE);
+		count = CIFS_DEFAULT_IOSIZE;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if ((offset > maxlen) ||
+	    (offset + count > maxlen)) {
+		pr_err("invalid write data offset %u, smb_len %u\n",
+		       le16_to_cpu(req->DataOffset),
+		       get_rfc1002_len(req));
+		err = -EINVAL;
+		goto out;
+	}
+
+	data_buf = (char *)req + offset;
+
+	ksmbd_debug(SMB, "filename %pd, offset %lld, count %zu\n",
+		    fp->filp->f_path.dentry, pos, count);
+	err = ksmbd_vfs_write(work, fp, data_buf, count, &pos,
+			      writethrough, &nbytes);
+	if (err < 0)
+		goto out;
+
+	/* write success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 6;
+	rsp->Count = cpu_to_le16(nbytes & 0xFFFF);
+	rsp->Remaining = 0;
+	rsp->CountHigh = cpu_to_le16(nbytes >> 16);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+	ksmbd_fd_put(work, fp);
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return 0;
+
+out:
+	ksmbd_fd_put(work, fp);
+	if (err == -ENOSPC || err == -EFBIG)
+		rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * smb_echo() - echo(ping) request handler
+ * @work:	smb work containing echo command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_echo(struct ksmbd_work *work)
+{
+	struct smb_com_echo_req *req = work->request_buf;
+	struct smb_com_echo_rsp *rsp = work->response_buf;
+	__u16 data_count, echo_count;
+	int i;
+
+	echo_count = le16_to_cpu(req->EchoCount);
+
+	ksmbd_debug(SMB, "SMB_COM_ECHO called with echo count %u\n",
+		    echo_count);
+
+	if (!echo_count) {
+		work->send_no_response = true;
+		return 0;
+	}
+
+	/* don't let a client make us work too much */
+	if (echo_count > 10)
+		echo_count = 10;
+
+	data_count = le16_to_cpu(req->ByteCount);
+	/* send echo response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 1;
+	rsp->ByteCount = cpu_to_le16(data_count);
+
+	memcpy(rsp->Data, req->Data, data_count);
+	inc_resp_size(work, (rsp->hdr.WordCount * 2) + data_count);
+
+	/* Send req->EchoCount - 1 number of ECHO response now &
+	 * if SMB CANCEL for Echo comes don't send response
+	 */
+	for (i = 1; i < echo_count && !work->send_no_response; i++) {
+		rsp->SequenceNumber = cpu_to_le16(i);
+		ksmbd_conn_write(work);
+	}
+
+	/* Last echo response */
+	rsp->SequenceNumber = cpu_to_le16(i);
+
+	return 0;
+}
+
+/**
+ * smb_flush() - file sync - flush request handler
+ * @work:	smb work containing flush command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_flush(struct ksmbd_work *work)
+{
+	struct smb_com_flush_req *req = work->request_buf;
+	struct smb_com_flush_rsp *rsp = work->response_buf;
+	int err = 0;
+
+	ksmbd_debug(SMB, "SMB_COM_FLUSH called for fid %u\n", req->FileID);
+
+	if (req->FileID == 0xFFFF) {
+		err = ksmbd_file_table_flush(work);
+		if (err)
+			goto out;
+	} else {
+		err = ksmbd_vfs_fsync(work, req->FileID, KSMBD_NO_FID);
+		if (err)
+			goto out;
+	}
+
+	/* file fsync success, return response to server */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+	return err;
+
+out:
+	if (err)
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+
+	return err;
+}
+
+/*****************************************************************************
+ * TRANS2 command implementation functions
+ *****************************************************************************/
+
+/**
+ * get_filetype() - convert file mode to smb file type
+ * @mode:	file mode to be convertd
+ *
+ * Return:	converted file type
+ */
+static __u32 get_filetype(mode_t mode)
+{
+	if (S_ISREG(mode))
+		return UNIX_FILE;
+	else if (S_ISDIR(mode))
+		return UNIX_DIR;
+	else if (S_ISLNK(mode))
+		return UNIX_SYMLINK;
+	else if (S_ISCHR(mode))
+		return UNIX_CHARDEV;
+	else if (S_ISBLK(mode))
+		return UNIX_BLOCKDEV;
+	else if (S_ISFIFO(mode))
+		return UNIX_FIFO;
+	else if (S_ISSOCK(mode))
+		return UNIX_SOCKET;
+
+	return UNIX_UNKNOWN;
+}
+
+/**
+ * init_unix_info() - convert file stat information to smb file info format
+ * @unix_info:	smb file information format
+ * @stat:	unix file/dir stat information
+ */
+static void init_unix_info(struct file_unix_basic_info *unix_info,
+			   struct user_namespace *user_ns, struct kstat *stat)
+{
+	u64 time;
+
+	unix_info->EndOfFile = cpu_to_le64(stat->size);
+	unix_info->NumOfBytes = cpu_to_le64(512 * stat->blocks);
+	time = ksmbd_UnixTimeToNT(stat->ctime);
+	unix_info->LastStatusChange = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat->atime);
+	unix_info->LastAccessTime = cpu_to_le64(time);
+	time = ksmbd_UnixTimeToNT(stat->mtime);
+	unix_info->LastModificationTime = cpu_to_le64(time);
+	unix_info->Uid = cpu_to_le64(from_kuid(user_ns, stat->uid));
+	unix_info->Gid = cpu_to_le64(from_kgid(user_ns, stat->gid));
+	unix_info->Type = cpu_to_le32(get_filetype(stat->mode));
+	unix_info->DevMajor = cpu_to_le64(MAJOR(stat->rdev));
+	unix_info->DevMinor = cpu_to_le64(MINOR(stat->rdev));
+	unix_info->UniqueId = cpu_to_le64(stat->ino);
+	unix_info->Permissions = cpu_to_le64(stat->mode);
+	unix_info->Nlinks = cpu_to_le64(stat->nlink);
+}
+
+/**
+ * unix_info_to_attr() - convert smb file info format to unix attr format
+ * @unix_info:	smb file information format
+ * @attrs:	unix file/dir stat information
+ *
+ * Return:	0
+ */
+static int unix_info_to_attr(struct file_unix_basic_info *unix_info,
+			     struct user_namespace *user_ns,
+			     struct iattr *attrs)
+{
+	struct timespec64 ts;
+
+	if (le64_to_cpu(unix_info->EndOfFile) != NO_CHANGE_64) {
+		attrs->ia_size = le64_to_cpu(unix_info->EndOfFile);
+		attrs->ia_valid |= ATTR_SIZE;
+	}
+
+	if (le64_to_cpu(unix_info->LastStatusChange) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastStatusChange);
+		attrs->ia_ctime = ts;
+		attrs->ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(unix_info->LastAccessTime) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastAccessTime);
+		attrs->ia_atime = ts;
+		attrs->ia_valid |= ATTR_ATIME;
+	}
+
+	if (le64_to_cpu(unix_info->LastModificationTime) != NO_CHANGE_64) {
+		ts = smb_NTtimeToUnix(unix_info->LastModificationTime);
+		attrs->ia_mtime = ts;
+		attrs->ia_valid |= ATTR_MTIME;
+	}
+
+	if (le64_to_cpu(unix_info->Uid) != NO_CHANGE_64) {
+		attrs->ia_uid = make_kuid(user_ns, le64_to_cpu(unix_info->Uid));
+		attrs->ia_valid |= ATTR_UID;
+	}
+
+	if (le64_to_cpu(unix_info->Gid) != NO_CHANGE_64) {
+		attrs->ia_gid = make_kgid(user_ns, le64_to_cpu(unix_info->Gid));
+		attrs->ia_valid |= ATTR_GID;
+	}
+
+	if (le64_to_cpu(unix_info->Permissions) != NO_CHANGE_64) {
+		attrs->ia_mode = le64_to_cpu(unix_info->Permissions);
+		attrs->ia_valid |= ATTR_MODE;
+	}
+
+	switch (le32_to_cpu(unix_info->Type)) {
+	case UNIX_FILE:
+		attrs->ia_mode |= S_IFREG;
+		break;
+	case UNIX_DIR:
+		attrs->ia_mode |= S_IFDIR;
+		break;
+	case UNIX_SYMLINK:
+		attrs->ia_mode |= S_IFLNK;
+		break;
+	case UNIX_CHARDEV:
+		attrs->ia_mode |= S_IFCHR;
+		break;
+	case UNIX_BLOCKDEV:
+		attrs->ia_mode |= S_IFBLK;
+		break;
+	case UNIX_FIFO:
+		attrs->ia_mode |= S_IFIFO;
+		break;
+	case UNIX_SOCKET:
+		attrs->ia_mode |= S_IFSOCK;
+		break;
+	default:
+		pr_err("unknown file type 0x%x\n",
+		       le32_to_cpu(unix_info->Type));
+	}
+
+	return 0;
+}
+
+/**
+ * unix_to_dos_time() - convert unix time to dos format
+ * @ts:		unix style time
+ * @time:	store dos style time
+ * @date:	store dos style date
+ */
+static void unix_to_dos_time(struct timespec64 ts, __le16 *time, __le16 *date)
+{
+	struct tm t;
+	__u16 val;
+
+	time64_to_tm(ts.tv_sec, (-sys_tz.tz_minuteswest) * 60, &t);
+	val = (((unsigned int)(t.tm_mon + 1)) >> 3) | ((t.tm_year - 80) << 1);
+	val = ((val & 0xFF) << 8) | (t.tm_mday | (((t.tm_mon + 1) & 0x7) << 5));
+	*date = cpu_to_le16(val);
+
+	val = ((((unsigned int)t.tm_min >> 3) & 0x7) |
+	       (((unsigned int)t.tm_hour) << 3));
+	val = ((val & 0xFF) << 8) | ((t.tm_sec / 2) | ((t.tm_min & 0x7) << 5));
+	*time = cpu_to_le16(val);
+}
+
+/**
+ * cifs_convert_ace() - helper function for convert an Access Control Entry
+ *		from cifs wire format to local POSIX xattr format
+ * @ace:	local - unix style Access Control Entry format
+ * @cifs_ace:	cifs wire Access Control Entry format
+ */
+static void cifs_convert_ace(struct posix_acl_xattr_entry *ace,
+			     struct cifs_posix_ace *cifs_ace)
+{
+	/* u8 cifs fields do not need le conversion */
+	ace->e_perm = cpu_to_le16(cifs_ace->cifs_e_perm);
+	ace->e_tag = cpu_to_le16(cifs_ace->cifs_e_tag);
+	ace->e_id = cpu_to_le32(le64_to_cpu(cifs_ace->cifs_uid));
+}
+
+/**
+ * cifs_copy_posix_acl() - Convert ACL from CIFS POSIX wire format to local
+ *		Linux POSIX ACL xattr
+ * @trgt:	target buffer for storing in local ace format
+ * @src:	source buffer in cifs ace format
+ * @buflen:	target buffer length
+ * @acl_type:	ace type
+ * @size_of_data_area:	max buffer size to store ace xattr
+ *
+ * Return:	size of convert ace xattr on success, otherwise error
+ */
+static int cifs_copy_posix_acl(char *trgt, char *src, const int buflen,
+			       const int acl_type, const int size_of_data_area)
+{
+	int size = 0;
+	int i;
+	__u16 count;
+	struct cifs_posix_ace *pACE;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)src;
+	struct posix_acl_xattr_entry *ace;
+	struct posix_acl_xattr_header *local_acl = (void *)trgt;
+
+	if (le16_to_cpu(cifs_acl->version) != CIFS_ACL_VERSION)
+		return -EOPNOTSUPP;
+
+	if (acl_type & ACL_TYPE_ACCESS) {
+		count = le16_to_cpu(cifs_acl->access_entry_count);
+		pACE = &cifs_acl->ace_array[0];
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if (size_of_data_area < size) {
+			ksmbd_debug(SMB, "bad CIFS POSIX ACL size %d vs. %d\n",
+				    size_of_data_area, size);
+			return -EINVAL;
+		}
+	} else if (acl_type & ACL_TYPE_DEFAULT) {
+		count = le16_to_cpu(cifs_acl->default_entry_count);
+		pACE = &cifs_acl->ace_array[0];
+		size = sizeof(struct cifs_posix_acl);
+		size += sizeof(struct cifs_posix_ace) * count;
+		/* check if we would go beyond end of SMB */
+		if (size_of_data_area < size)
+			return -EINVAL;
+	} else {
+		/* illegal type */
+		return -EINVAL;
+	}
+
+	size = posix_acl_xattr_size(count);
+	if ((buflen != 0) && local_acl && size > buflen)
+		return -ERANGE;
+
+	/* buffer big enough */
+	ace = (void *)(local_acl + 1);
+	local_acl->a_version = cpu_to_le32(POSIX_ACL_XATTR_VERSION);
+	for (i = 0; i < count; i++) {
+		cifs_convert_ace(&ace[i], pACE);
+		pACE++;
+	}
+
+	return size;
+}
+
+/**
+ * convert_ace_to_cifs_ace() - helper function to convert ACL from local
+ * Linux POSIX ACL xattr to CIFS POSIX wire format to local
+ * @cifs_ace:	target buffer for storing in cifs ace format
+ * @local_ace:	source buffer in Linux POSIX ACL xattr format
+ *
+ * Return:	0
+ */
+static __u16 convert_ace_to_cifs_ace(struct cifs_posix_ace *cifs_ace,
+		const struct posix_acl_xattr_entry *local_ace)
+{
+	__u16 rc = 0; /* 0 = ACL converted ok */
+
+	cifs_ace->cifs_e_perm = le16_to_cpu(local_ace->e_perm);
+	cifs_ace->cifs_e_tag = le16_to_cpu(local_ace->e_tag);
+	/* BB is there a better way to handle the large uid? */
+	if (local_ace->e_id == cpu_to_le32(-1)) {
+		/* Probably no need to le convert -1 on any
+		 * arch but can not hurt
+		 */
+		cifs_ace->cifs_uid = cpu_to_le64(-1);
+	} else
+		cifs_ace->cifs_uid = cpu_to_le64(le32_to_cpu(local_ace->e_id));
+	return rc;
+}
+
+/**
+ * ACL_to_cifs_posix() - ACL from local Linux POSIX xattr to CIFS POSIX ACL
+ *		wire format
+ * @parm_data:	target buffer for storing in cifs ace format
+ * @pACL:	source buffer in cifs ace format
+ * @buflen:	target buffer length
+ * @acl_type:	ace type
+ *
+ * Return:	0 on success, otherwise error
+ */
+static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL,
+			       const int buflen, const int acl_type)
+{
+	__u16 rc = 0;
+	struct cifs_posix_acl *cifs_acl = (struct cifs_posix_acl *)parm_data;
+	struct posix_acl_xattr_header *local_acl = (void *)pACL;
+	struct posix_acl_xattr_entry *ace = (void *)(local_acl + 1);
+	int count;
+	int i, j = 0;
+
+	if ((buflen == 0) || !pACL || !cifs_acl)
+		return 0;
+
+	count = posix_acl_xattr_count((size_t)buflen);
+	ksmbd_debug(SMB, "setting acl with %d entries from buf of length %d and version of %d\n",
+		 count, buflen, le32_to_cpu(local_acl->a_version));
+	if (le32_to_cpu(local_acl->a_version) != 2) {
+		ksmbd_debug(SMB, "unknown POSIX ACL version %d\n",
+			    le32_to_cpu(local_acl->a_version));
+		return 0;
+	}
+	if (acl_type == ACL_TYPE_ACCESS) {
+		cifs_acl->access_entry_count = cpu_to_le16(count);
+		j = 0;
+	} else if (acl_type == ACL_TYPE_DEFAULT) {
+		cifs_acl->default_entry_count = cpu_to_le16(count);
+		if (cifs_acl->access_entry_count)
+			j = le16_to_cpu(cifs_acl->access_entry_count);
+	} else {
+		ksmbd_debug(SMB, "unknown ACL type %d\n", acl_type);
+		return 0;
+	}
+	for (i = 0; i < count; i++, j++) {
+		rc = convert_ace_to_cifs_ace(&cifs_acl->ace_array[i], &ace[i]);
+		if (rc != 0) {
+			/* ACE not converted */
+			break;
+		}
+	}
+	if (rc == 0) {
+		rc = (__u16)(count * sizeof(struct cifs_posix_ace));
+		/* BB add check to make sure ACL does not overflow SMB */
+	}
+	return rc;
+}
+
+/**
+ * smb_get_acl() - handler for query posix acl information
+ * @work:	smb work containing posix acl query command
+ * @path:	path of file/dir to query acl
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_get_acl(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	char *buf = NULL;
+	int rc = 0, value_len;
+	struct cifs_posix_acl *aclbuf;
+	__u16 rsp_data_cnt = 0;
+
+	aclbuf = (struct cifs_posix_acl *)(work->response_buf +
+			sizeof(struct smb_com_trans2_rsp) + 4);
+
+	aclbuf->version = cpu_to_le16(CIFS_ACL_VERSION);
+	aclbuf->default_entry_count = 0;
+	aclbuf->access_entry_count = 0;
+
+	/* check if POSIX_ACL_XATTR_ACCESS exists */
+	value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt), path->dentry,
+				       XATTR_NAME_POSIX_ACL_ACCESS,
+				       &buf);
+	if (value_len > 0) {
+		rsp_data_cnt += ACL_to_cifs_posix((char *)aclbuf, buf,
+				value_len, ACL_TYPE_ACCESS);
+		kfree(buf);
+		buf = NULL;
+	}
+
+	/* check if POSIX_ACL_XATTR_DEFAULT exists */
+	value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt), path->dentry,
+				       XATTR_NAME_POSIX_ACL_DEFAULT,
+				       &buf);
+	if (value_len > 0) {
+		rsp_data_cnt += ACL_to_cifs_posix((char *)aclbuf, buf,
+						  value_len, ACL_TYPE_DEFAULT);
+		kfree(buf);
+		buf = NULL;
+	}
+
+	if (rsp_data_cnt)
+		rsp_data_cnt += sizeof(struct cifs_posix_acl);
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(rsp_data_cnt);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(rsp_data_cnt + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+	if (buf)
+		kfree(buf);
+	return rc;
+}
+
+/**
+ * smb_set_acl() - handler for setting posix acl information
+ * @work:	smb work containing posix acl set command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_acl(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct cifs_posix_acl *wire_acl_data;
+	char *fname, *buf = NULL;
+	int rc = 0, acl_type = 0, value_len;
+	unsigned int maxlen, offset;
+
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	fname = smb_get_name(share, req->FileName, maxlen - offset,
+			     work, false);
+	if (IS_ERR(fname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(fname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	buf = vmalloc(XATTR_SIZE_MAX);
+	if (!buf) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	wire_acl_data = (struct cifs_posix_acl *)((char *)req + offset);
+	if (le16_to_cpu(wire_acl_data->access_entry_count) > 0 &&
+	    le16_to_cpu(wire_acl_data->access_entry_count) < 0xFFFF) {
+		acl_type = ACL_TYPE_ACCESS;
+	} else if (le16_to_cpu(wire_acl_data->default_entry_count) > 0 &&
+		   le16_to_cpu(wire_acl_data->default_entry_count) < 0xFFFF) {
+		acl_type = ACL_TYPE_DEFAULT;
+	} else {
+		rc = -EINVAL;
+		goto out;
+	}
+
+	rc = cifs_copy_posix_acl(buf, (char *)wire_acl_data, XATTR_SIZE_MAX,
+				 acl_type, maxlen - offset);
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out;
+	}
+
+	value_len = rc;
+	if (acl_type == ACL_TYPE_ACCESS) {
+		rc = ksmbd_vfs_fsetxattr(work,
+					 fname,
+					 XATTR_NAME_POSIX_ACL_ACCESS,
+					 buf, value_len, 0);
+	} else if (acl_type == ACL_TYPE_DEFAULT) {
+		rc = ksmbd_vfs_fsetxattr(work,
+					 fname,
+					 XATTR_NAME_POSIX_ACL_DEFAULT,
+					 buf, value_len, 0);
+	}
+
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		goto out;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	if (buf)
+		vfree(buf);
+	kfree(fname);
+	return rc;
+}
+
+static void *ksmbd_realloc_response(void *ptr, size_t old_sz, size_t new_sz)
+{
+	size_t sz = min(old_sz, new_sz);
+	void *nptr;
+
+	nptr = kvmalloc(new_sz, KSMBD_DEFAULT_GFP | __GFP_ZERO);
+	if (!nptr)
+		return ptr;
+	memcpy(nptr, ptr, sz);
+	kvfree(ptr);
+	return nptr;
+}
+
+/**
+ * smb_readlink() - handler for reading symlink source path
+ * @work:	smb work containing query link information
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_readlink(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_qpi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	int err, name_len, link_len;
+	char *buf, *ptr;
+
+	buf = kzalloc((CIFS_MF_SYMLINK_LINK_MAXLEN), KSMBD_DEFAULT_GFP);
+	if (!buf) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_readlink(path, buf, CIFS_MF_SYMLINK_LINK_MAXLEN);
+	if (err < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+		goto out;
+	}
+
+	/*
+	 * check if this namelen(unicode) and smb header can fit in small rsp
+	 * buf. If not, switch to large rsp buffer.
+	 */
+	err++;
+	err *= 2;
+	if (err + MAX_HEADER_SIZE(work->conn) > work->response_sz) {
+		void *nptr;
+		size_t nsz = err + MAX_HEADER_SIZE(work->conn);
+
+		nptr = ksmbd_realloc_response(work->response_buf,
+					      work->response_sz,
+					      nsz);
+		if (nptr == work->response_buf) {
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			err = -ENOMEM;
+			goto out;
+		}
+
+		work->response_buf = nptr;
+		rsp = (struct smb_com_trans2_rsp *)work->response_buf;
+	}
+	link_len = err;
+	err = 0;
+
+	ptr = (char *)&rsp->Buffer[0];
+	memset(ptr, 0, 4);
+	ptr += 4;
+
+	if (is_smbreq_unicode(&req->hdr)) {
+		name_len = smb_strtoUTF16((__le16 *)ptr,
+					  buf,
+					  link_len,
+					  work->conn->local_nls);
+		name_len++;     /* trailing null */
+		name_len *= 2;
+	} else { /* BB add path length overrun check */
+		name_len = strscpy(ptr, buf, link_len);
+		if (name_len == -E2BIG) {
+			err = -ENOMEM;
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			goto out;
+		}
+		name_len++;     /* trailing null */
+	}
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(name_len);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(name_len + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	kfree(buf);
+	return err;
+}
+
+/**
+ * smb_get_ea() - handler for extended attribute query
+ * @work:	smb work containing query xattr command
+ * @path:	path of file/dir to query xattr command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_get_ea(struct ksmbd_work *work, struct path *path)
+{
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	char *name, *ptr, *xattr_list = NULL, *buf;
+	int rc, name_len, value_len, xattr_list_len;
+	struct fealist *eabuf = (struct fealist *)(work->response_buf +
+			sizeof(struct smb_com_trans2_rsp) + 4);
+	struct fea *temp_fea;
+	ssize_t buf_free_len;
+	__u16 rsp_data_cnt = 4;
+
+	eabuf->list_len = cpu_to_le32(rsp_data_cnt);
+	buf_free_len = work->response_sz - (get_rfc1002_len(rsp) + 4) -
+		sizeof(struct smb_com_trans2_rsp);
+	rc = ksmbd_vfs_listxattr(path->dentry, &xattr_list);
+	if (rc < 0) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+		goto out;
+	} else if (!rc) { /* there is no EA in the file */
+		eabuf->list_len = cpu_to_le32(rsp_data_cnt);
+		goto done;
+	}
+
+	xattr_list_len = rc;
+	rc = 0;
+
+	ptr = (char *)eabuf->list;
+	temp_fea = (struct fea *)ptr;
+	for (name = xattr_list; name - xattr_list < xattr_list_len;
+	     name += strlen(name) + 1) {
+		ksmbd_debug(SMB, "%s, len %zd\n", name, strlen(name));
+		/*
+		 * CIFS does not support EA other name user.* namespace,
+		 * still keep the framework generic, to list other attrs
+		 * in future.
+		 */
+		if (strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			continue;
+
+		name_len = strlen(name);
+		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			name_len -= XATTR_USER_PREFIX_LEN;
+
+		ptr = (char *)(&temp_fea->name + name_len + 1);
+		buf_free_len -= (offsetof(struct fea, name) + name_len + 1);
+
+		value_len = ksmbd_vfs_getxattr(mnt_idmap(path->mnt),
+					       path->dentry, name, &buf);
+		if (value_len <= 0) {
+			rc = -ENOENT;
+			rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+			goto out;
+		}
+
+		memcpy(ptr, buf, value_len);
+		kfree(buf);
+
+		temp_fea->EA_flags = 0;
+		temp_fea->name_len = name_len;
+		if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN))
+			memcpy(temp_fea->name, &name[XATTR_USER_PREFIX_LEN],
+			       name_len);
+		else
+			memcpy(temp_fea->name, name, name_len);
+
+		temp_fea->value_len = cpu_to_le16(value_len);
+		buf_free_len -= value_len;
+		rsp_data_cnt += offsetof(struct fea, name) + name_len + 1 +
+			value_len;
+		eabuf->list_len += cpu_to_le32(offsetof(struct fea, name) +
+				name_len + 1 + value_len);
+		ptr += value_len;
+		temp_fea = (struct fea *)ptr;
+	}
+
+done:
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(rsp_data_cnt);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(rsp_data_cnt + 5);
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+out:
+	kvfree(xattr_list);
+	return rc;
+}
+
+/**
+ * query_path_info() - handler for query path info
+ * @work:	smb work containing query path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_path_info(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct trans2_qpi_req_params *req_params;
+	char *name = NULL;
+	struct path path, parent_path;
+	struct kstat st;
+	int rc;
+	char *ptr;
+	__u64 create_time = 0, time;
+	unsigned int maxlen, offset;
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		return 0;
+	}
+
+	maxlen = get_req_len(work->request_buf);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	req_params = (struct trans2_qpi_req_params *)(work->request_buf + offset);
+
+	offset += offsetof(struct trans2_qpi_req_params, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req_params->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 0);
+	if (rc) {
+		if (rc == -EACCES || rc == -EXDEV)
+			rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp_hdr->Status.CifsError =
+					STATUS_OBJECT_NAME_NOT_FOUND;
+		ksmbd_debug(SMB, "cannot get linux path for %s, err %d\n",
+				name, rc);
+		goto out;
+	}
+
+	if (d_is_symlink(path.dentry)) {
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+		goto err_out;
+	}
+
+	rc = vfs_getattr(&path, &st, STATX_BASIC_STATS, AT_STATX_SYNC_AS_STAT);
+	if (rc) {
+		pr_err("cannot get stat information\n");
+		goto err_out;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+		struct xattr_dos_attrib da;
+
+		rc = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+						    path.dentry, &da);
+		if (rc > 0)
+			create_time = da.create_time;
+		rc = 0;
+	}
+
+	switch (le16_to_cpu(req_params->InformationLevel)) {
+	case SMB_INFO_STANDARD:
+	{
+		struct file_info_standard *infos;
+
+		ksmbd_debug(SMB, "SMB_INFO_STANDARD\n");
+		rc = ksmbd_query_inode_status(path.dentry);
+		if (rc == KSMBD_INODE_STATUS_PENDING_DELETE) {
+			rc = -EBUSY;
+			goto err_out;
+		}
+
+		rc = 0;
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		infos = (struct file_info_standard *)(ptr + 4);
+		unix_to_dos_time(ksmbd_NTtimeToUnix(cpu_to_le64(create_time)),
+				 &infos->CreationDate, &infos->CreationTime);
+		unix_to_dos_time(st.atime, &infos->LastAccessDate,
+				 &infos->LastAccessTime);
+		unix_to_dos_time(st.mtime, &infos->LastWriteDate,
+				 &infos->LastWriteTime);
+		infos->DataSize = cpu_to_le32(st.size);
+		infos->AllocationSize = cpu_to_le32(st.blocks << 9);
+		infos->Attributes = cpu_to_le16(S_ISDIR(st.mode) ?
+					ATTR_DIRECTORY : ATTR_ARCHIVE);
+		infos->EASize = 0;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(22);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(22);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(27);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_STANDARD_INFO:
+	{
+		struct file_standard_info *standard_info;
+		unsigned int del_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+		del_pending = ksmbd_query_inode_status(path.dentry);
+		if (del_pending == KSMBD_INODE_STATUS_PENDING_DELETE)
+			del_pending = 1;
+		else
+			del_pending = 0;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		standard_info = (struct file_standard_info *)(ptr + 4);
+		standard_info->AllocationSize = cpu_to_le64(st.blocks << 9);
+		standard_info->EndOfFile = cpu_to_le64(st.size);
+		standard_info->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			del_pending);
+		standard_info->DeletePending = del_pending;
+		standard_info->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_BASIC_INFO:
+	{
+		struct file_basic_info *basic_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_BASIC_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_basic_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		basic_info = (struct file_basic_info *)(ptr + 4);
+		basic_info->CreationTime = cpu_to_le64(create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		basic_info->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		basic_info->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		basic_info->ChangeTime = cpu_to_le64(time);
+		basic_info->Attributes = S_ISDIR(st.mode) ?
+					 ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		basic_info->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_EA_INFO:
+	{
+		struct file_ea_info *ea_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_EA_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_ea_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ea_info = (struct file_ea_info *)(ptr + 4);
+		ea_info->EaSize = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_NAME_INFO:
+	{
+		struct file_name_info *name_info;
+		size_t len, rsp_offset;
+		int uni_filename_len;
+		char *filename;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_NAME_INFO\n");
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		name_info = (struct file_name_info *)(ptr + 4);
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf, &path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_name_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16(
+				(__le16 *)name_info->FileName,
+				filename, len,
+				conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		name_info->FileNameLength = cpu_to_le32(uni_filename_len);
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(2 + uni_filename_len + 4 + 3);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_ALL_INFO:
+	{
+		struct file_all_info *ainfo;
+		size_t len, rsp_offset;
+		unsigned int del_pending;
+		char *filename;
+		int uni_filename_len, total_count = 72;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_ALL_INFO\n");
+
+		del_pending = ksmbd_query_inode_status(path.dentry);
+		if (del_pending == KSMBD_INODE_STATUS_PENDING_DELETE)
+			del_pending = 1;
+		else
+			del_pending = 0;
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf, &path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+
+		/*
+		 * Observation: sizeof smb_hdr is 33 bytes(including word count)
+		 * After that: trans2 response 22 bytes when stepcount 0 and
+		 * including ByteCount storage.
+		 */
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ainfo = (struct file_all_info *) (ptr + 4);
+
+		ainfo->CreationTime = cpu_to_le64(create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		ainfo->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		ainfo->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		ainfo->ChangeTime = cpu_to_le64(time);
+		ainfo->Attributes = S_ISDIR(st.mode) ?
+					ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		ainfo->Pad1 = 0;
+		ainfo->AllocationSize = cpu_to_le64(st.blocks << 9);
+		ainfo->EndOfFile = cpu_to_le64(st.size);
+		ainfo->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			del_pending);
+		ainfo->DeletePending = del_pending;
+		ainfo->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		ainfo->Pad2 = 0;
+		ainfo->EASize = 0;
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_all_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16((__le16 *)ainfo->FileName,
+						     filename, len,
+						     conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		ainfo->FileNameLength = cpu_to_le32(uni_filename_len);
+		total_count += uni_filename_len;
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		/* add unicode name length of name */
+		rsp->t2.TotalDataCount = cpu_to_le16(total_count);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(total_count);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/* 2 for parameter count + 72 data count +
+		 * filename length + 3 pad (1pad1 + 2 pad2)
+		 */
+		rsp->ByteCount = cpu_to_le16(5 + total_count);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_ALT_NAME_INFO:
+	{
+		struct alt_name_info *alt_name_info;
+		char *base;
+		int filename_len;
+
+		ksmbd_debug(SMB, "SMB_QUERY_ALT_NAME_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(25);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		alt_name_info = (struct alt_name_info *)(ptr + 4);
+
+		base = strrchr(name, '/');
+		if (!base)
+			base = name;
+		else
+			base += 1;
+
+		filename_len = ksmbd_extract_shortname(conn, base,
+						       alt_name_info->FileName);
+		alt_name_info->FileNameLength = cpu_to_le32(filename_len);
+		rsp->t2.TotalDataCount = cpu_to_le16(4 + filename_len);
+		rsp->t2.DataCount = cpu_to_le16(4 + filename_len);
+
+		inc_resp_size(work, 4 + filename_len + 25);
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_BASIC:
+	{
+		struct file_unix_basic_info *unix_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_BASIC\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = 0;
+		rsp->t2.TotalDataCount = cpu_to_le16(100);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = 0;
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(100);
+		rsp->t2.DataOffset = cpu_to_le16(56);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(101); /* 100 data count + 1pad */
+		rsp->Pad = 0;
+		unix_info = (struct file_unix_basic_info *)(&rsp->Pad + 1);
+		init_unix_info(unix_info, &init_user_ns, &st);
+		inc_resp_size(work, 10 * 2 + 101);
+		break;
+	}
+	case SMB_QUERY_FILE_INTERNAL_INFO:
+	{
+		struct file_internal_info *iinfo;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_INTERNAL_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(8);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(8);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		rsp->ByteCount = cpu_to_le16(13);
+		rsp->Pad = 0;
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		iinfo = (struct file_internal_info *) (ptr + 4);
+		iinfo->UniqueId = cpu_to_le64(st.ino);
+		inc_resp_size(work, 10 * 2 + 13);
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_LINK:
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_LINK\n");
+		rc = smb_readlink(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	case SMB_INFO_QUERY_ALL_EAS:
+		ksmbd_debug(SMB, "SMB_INFO_QUERY_ALL_EAS\n");
+		rc = smb_get_ea(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	case SMB_QUERY_POSIX_ACL:
+		ksmbd_debug(SMB, "SMB_QUERY_POSIX_ACL\n");
+		rc = smb_get_acl(work, &path);
+		if (rc < 0)
+			goto err_out;
+		break;
+	default:
+		pr_err("query path info not implemnted for %x\n",
+		       le16_to_cpu(req_params->InformationLevel));
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+err_out:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	kfree(name);
+	return rc;
+}
+
+/**
+ * create_trans2_reply() - create response for trans2 request
+ * @work:	smb work containing smb response buffer
+ * @count:	trans2 response buffer size
+ */
+static void create_trans2_reply(struct ksmbd_work *work, __u16 count)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = 0;
+	rsp->t2.TotalDataCount = cpu_to_le16(count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = 0;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(count);
+	rsp->t2.DataOffset = cpu_to_le16(56);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	rsp->ByteCount = cpu_to_le16(count + 1);
+	rsp->Pad = 0;
+	inc_resp_size(work, 10 * 2 + (count + 1));
+}
+
+/**
+ * set_fs_info() - handler for set fs info commands
+ * @work:	smb work containing set fs info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_fs_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_setfsi_req *req = work->request_buf;
+	struct smb_com_trans2_setfsi_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_setfsi_req_params *params;
+	int info_level;
+
+	params = (struct smb_com_trans2_setfsi_req_params *)
+		(work->request_buf + le16_to_cpu(req->ParameterOffset) + 4);
+
+	info_level = le16_to_cpu(params->InformationLevel);
+
+	switch (info_level) {
+	case SMB_SET_CIFS_UNIX_INFO:
+	{
+		u64 client_cap;
+
+		ksmbd_debug(SMB, "SMB_SET_CIFS_UNIX_INFO\n");
+		if (le16_to_cpu(params->ClientUnixMajor) !=
+			CIFS_UNIX_MAJOR_VERSION) {
+			pr_err("Non compatible unix major info\n");
+			return -EINVAL;
+		}
+
+		if (le16_to_cpu(params->ClientUnixMinor) !=
+			CIFS_UNIX_MINOR_VERSION) {
+			pr_err("Non compatible unix minor info\n");
+			return -EINVAL;
+		}
+
+		client_cap = le64_to_cpu(params->ClientUnixCap);
+		ksmbd_debug(SMB, "clients unix cap = %llx\n", client_cap);
+		/* TODO: process caps */
+		rsp->hdr.WordCount = 0x0A;
+		rsp->t2.TotalDataCount = 0;
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "info level %x  not supported\n", info_level);
+		return -EINVAL;
+	}
+
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+	return 0;
+}
+
+/**
+ * query_fs_info() - handler for query fs info commands
+ * @work:	smb work containing query fs info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_fs_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_qfsi_req_params *req_params;
+	struct ksmbd_conn *conn = work->conn;
+	struct kstatfs stfs;
+	struct ksmbd_share_config *share;
+	int rc;
+	struct path path;
+	bool incomplete = false;
+	int info_level, len = 0;
+	struct ksmbd_tree_connect *tree_conn;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	req_params =
+		(struct smb_com_trans2_qfsi_req_params *)(work->request_buf +
+							  offset);
+
+	/* check if more data is coming */
+	if (le16_to_cpu(req->TotalParameterCount) !=
+	    le16_to_cpu(req->ParameterCount)) {
+		ksmbd_debug(SMB, "total param = %d, received = %d\n",
+			    le16_to_cpu(req->TotalParameterCount),
+			    le16_to_cpu(req->ParameterCount));
+		incomplete = true;
+	}
+
+	if (le16_to_cpu(req->TotalDataCount) != le16_to_cpu(req->DataCount)) {
+		ksmbd_debug(SMB, "total data = %d, received = %d\n",
+			    le16_to_cpu(req->TotalDataCount),
+			    le16_to_cpu(req->DataCount));
+		incomplete = true;
+	}
+
+	if (incomplete) {
+		/* create 1 trans_state structure
+		 * and add to connection list
+		 */
+	}
+
+	info_level = le16_to_cpu(req_params->InformationLevel);
+
+	tree_conn = work->tcon;
+	if (!tree_conn)
+		return -ENOENT;
+	share = tree_conn->share_conf;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE))
+		return -ENOENT;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+
+	rc = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+	if (rc) {
+		ksmbd_revert_fsids(work);
+		pr_err("cannot create vfs path\n");
+		return rc;
+	}
+
+	rc = vfs_statfs(&path, &stfs);
+	if (rc) {
+		pr_err("cannot do stat of path %s\n", share->path);
+		goto err_out;
+	}
+
+	switch (info_level) {
+	case SMB_INFO_ALLOCATION:
+	{
+		struct filesystem_alloc_info *ainfo;
+
+		ksmbd_debug(SMB, "GOT SMB_INFO_ALLOCATION\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(18);
+		ainfo = (struct filesystem_alloc_info *)(&rsp->Pad + 1);
+		ainfo->fsid = 0;
+		ainfo->BytesPerSector = cpu_to_le16(512);
+		ainfo->SectorsPerAllocationUnit =
+		cpu_to_le32(stfs.f_bsize/le16_to_cpu(ainfo->BytesPerSector));
+		ainfo->TotalAllocationUnits = cpu_to_le32(stfs.f_blocks);
+		ainfo->FreeAllocationUnits = cpu_to_le32(stfs.f_bfree);
+		break;
+	}
+	case SMB_QUERY_FS_VOLUME_INFO:
+	{
+		struct filesystem_vol_info *vinfo;
+		size_t share_len, rsp_offset;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_VOLUME_INFO\n");
+		vinfo = (struct filesystem_vol_info *)(&rsp->Pad + 1);
+		vinfo->VolumeCreationTime = 0;
+		/* Taking dummy value of serial number*/
+		vinfo->SerialNumber = cpu_to_le32(0xbc3ac512);
+		share_len = strlen(share->name);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct filesystem_vol_info, VolumeLabel) +
+			     share_len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = smbConvertToUTF16((__le16 *)vinfo->VolumeLabel,
+					share->name, share_len,
+					conn->local_nls, 0);
+		vinfo->VolumeLabelSize = cpu_to_le32(len);
+		vinfo->Reserved = 0;
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct filesystem_vol_info) +
+				    len - 2);
+		break;
+	}
+	case SMB_QUERY_FS_SIZE_INFO:
+	{
+		struct filesystem_info *sinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_SIZE_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(24);
+		sinfo = (struct filesystem_info *)(&rsp->Pad + 1);
+		sinfo->BytesPerSector = cpu_to_le32(512);
+		sinfo->SectorsPerAllocationUnit =
+			cpu_to_le32(stfs.f_bsize / sinfo->BytesPerSector);
+		sinfo->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+		sinfo->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+		break;
+	}
+	case SMB_QUERY_FS_FULL_SIZE_INFO:
+	{
+		struct filesystem_full_info *sinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_FULL_SIZE_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(32);
+		sinfo = (struct filesystem_full_info *)(&rsp->Pad + 1);
+		sinfo->BytesPerSector = cpu_to_le32(stfs.f_bsize);
+		sinfo->SectorsPerAllocationUnit =
+			cpu_to_le32(stfs.f_bsize/sinfo->BytesPerSector);
+		sinfo->TotalAllocationUnits = cpu_to_le64(stfs.f_blocks);
+		sinfo->FreeAllocationUnits = cpu_to_le64(stfs.f_bfree);
+		sinfo->ActualAvailableUnits = cpu_to_le64(stfs.f_bavail);
+		break;
+	}
+	case SMB_QUERY_FS_DEVICE_INFO:
+	{
+		struct filesystem_device_info *fdi;
+
+		/* query fs info device info response is 0 word and 8 bytes */
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_DEVICE_INFO\n");
+		if (le16_to_cpu(req->MaxDataCount) < 8) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		rsp->t2.TotalDataCount = cpu_to_le16(18);
+		fdi = (struct filesystem_device_info *)(&rsp->Pad + 1);
+		fdi->DeviceType = cpu_to_le32(FILE_DEVICE_DISK);
+		fdi->DeviceCharacteristics = cpu_to_le32(0x20);
+		break;
+	}
+	case SMB_QUERY_FS_ATTRIBUTE_INFO:
+	{
+		struct filesystem_attribute_info *info;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_FS_ATTRIBUTE_INFO\n");
+		/* constant 12 bytes + variable filesystem name */
+		info = (struct filesystem_attribute_info *)(&rsp->Pad + 1);
+
+		if (le16_to_cpu(req->MaxDataCount) < 12) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+
+		info->Attributes = cpu_to_le32(FILE_CASE_PRESERVED_NAMES |
+					       FILE_CASE_SENSITIVE_SEARCH |
+					       FILE_VOLUME_QUOTAS);
+		info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen);
+		info->FileSystemNameLen = 0;
+		rsp->t2.TotalDataCount = cpu_to_le16(12);
+		break;
+	}
+	case SMB_QUERY_CIFS_UNIX_INFO:
+	{
+		struct filesystem_unix_info *uinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_CIFS_UNIX_INFO\n");
+		/* constant 12 bytes + variable filesystem name */
+		uinfo = (struct filesystem_unix_info *)(&rsp->Pad + 1);
+
+		if (le16_to_cpu(req->MaxDataCount) < 12) {
+			pr_err("Insufficient bytes, cannot response()\n");
+			rc = -EINVAL;
+			goto err_out;
+		}
+		uinfo->MajorVersionNumber =
+			cpu_to_le16(CIFS_UNIX_MAJOR_VERSION);
+		uinfo->MinorVersionNumber =
+			cpu_to_le16(CIFS_UNIX_MINOR_VERSION);
+		uinfo->Capability = cpu_to_le64(SMB_UNIX_CAPS);
+		rsp->t2.TotalDataCount = cpu_to_le16(12);
+		break;
+	}
+	case SMB_QUERY_POSIX_FS_INFO:
+	{
+		struct filesystem_posix_info *pinfo;
+
+		ksmbd_debug(SMB, "GOT SMB_QUERY_POSIX_FS_INFO\n");
+		rsp->t2.TotalDataCount = cpu_to_le16(56);
+		pinfo = (struct filesystem_posix_info *)(&rsp->Pad + 1);
+		pinfo->BlockSize = cpu_to_le32(stfs.f_bsize);
+		pinfo->OptimalTransferSize = cpu_to_le32(stfs.f_blocks);
+		pinfo->TotalBlocks = cpu_to_le64(stfs.f_blocks);
+		pinfo->BlocksAvail = cpu_to_le64(stfs.f_bfree);
+		pinfo->UserBlocksAvail = cpu_to_le64(stfs.f_bavail);
+		pinfo->TotalFileNodes = cpu_to_le64(stfs.f_files);
+		pinfo->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
+		pinfo->FileSysIdentifier = 0;
+		break;
+	}
+	default:
+		ksmbd_debug(SMB, "info level %x not implemented\n", info_level);
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	create_trans2_reply(work, le16_to_cpu(rsp->t2.TotalDataCount));
+
+err_out:
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	return rc;
+}
+
+/**
+ * smb_posix_convert_flags() - convert smb posix access flags to open flags
+ * @flags:	smb posix access flags
+ *
+ * Return:	file open flags
+ */
+static __u32 smb_posix_convert_flags(__u32 flags, int *may_flags)
+{
+	__u32 posix_flags = 0;
+
+	if ((flags & SMB_ACCMODE) == SMB_O_RDONLY)
+		posix_flags = O_RDONLY;
+	else if ((flags & SMB_ACCMODE) == SMB_O_WRONLY)
+		posix_flags = O_WRONLY;
+	else if ((flags & SMB_ACCMODE) == SMB_O_RDWR)
+		posix_flags = O_RDWR;
+
+	if (flags & SMB_O_CREAT)
+		posix_flags |= O_CREAT;
+	if (flags & SMB_O_SYNC)
+		posix_flags |= O_DSYNC;
+	if (flags & SMB_O_DIRECTORY)
+		posix_flags |= O_DIRECTORY;
+	if (flags & SMB_O_NOFOLLOW)
+		posix_flags |= O_NOFOLLOW;
+	if (flags & SMB_O_APPEND)
+		posix_flags |= O_APPEND;
+
+	*may_flags = ksmbd_openflags_to_mayflags(posix_flags);
+
+	return posix_flags;
+}
+
+/**
+ * smb_get_disposition() - convert smb disposition flags to open flags
+ * @flags:		smb file disposition flags
+ * @file_present:	file already present or not
+ * @stat:		file stat information
+ * @open_flags:		open flags should be stored here
+ *
+ * Return:		file disposition flags
+ */
+static int smb_get_disposition(unsigned int flags, bool file_present,
+			       struct kstat *stat, unsigned int *open_flags)
+{
+	int dispostion, disp_flags;
+
+	if ((flags & (SMB_O_CREAT | SMB_O_EXCL)) == (SMB_O_CREAT | SMB_O_EXCL))
+		dispostion = FILE_CREATE;
+	else if ((flags & (SMB_O_CREAT | SMB_O_TRUNC)) ==
+		 (SMB_O_CREAT | SMB_O_TRUNC))
+		dispostion = FILE_OVERWRITE_IF;
+	else if ((flags & SMB_O_CREAT) == SMB_O_CREAT)
+		dispostion = FILE_OPEN_IF;
+	else if ((flags & SMB_O_TRUNC) == SMB_O_TRUNC)
+		dispostion = FILE_OVERWRITE;
+	else if ((flags & (SMB_O_CREAT | SMB_O_EXCL | SMB_O_TRUNC)) == 0)
+		dispostion = FILE_OPEN;
+	else
+		dispostion = FILE_SUPERSEDE;
+
+	disp_flags = file_create_dispostion_flags(dispostion, file_present);
+	if (disp_flags < 0)
+		return disp_flags;
+
+	*open_flags |= disp_flags;
+	return disp_flags;
+}
+
+/**
+ * smb_posix_open() - handler for smb posix open
+ * @work:	smb work containing posix open command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_posix_open(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *pSMB_req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *pSMB_rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct open_psx_req *psx_req;
+	struct open_psx_rsp *psx_rsp;
+	struct path path, parent_path;
+	struct kstat stat;
+	__u16 data_offset, rsp_info_level, file_info = 0;
+	__u32 oplock_flags, posix_open_flags, may_flags;
+	umode_t mode;
+	char *name;
+	unsigned int maxlen, offset;
+	bool file_present = true, create_directory;
+	int err;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE;
+
+	maxlen = get_req_len(pSMB_req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, pSMB_req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		kfree(name);
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (err) {
+		file_present = false;
+		ksmbd_debug(SMB, "cannot get linux path for %s, err = %d\n",
+			    name, err);
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+	} else {
+		if (d_is_symlink(path.dentry)) {
+			err = -EACCES;
+			goto free_path;
+		}
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err) {
+			pr_err("can not stat %s, err = %d\n", name, err);
+			goto free_path;
+		}
+	}
+
+	data_offset = le16_to_cpu(pSMB_req->DataOffset) + 4;
+	if (data_offset > maxlen) {
+		err = -EINVAL;
+		goto free_path;
+	}
+	psx_req = (struct open_psx_req *)((char *)pSMB_req + data_offset);
+	oplock_flags = le32_to_cpu(psx_req->OpenFlags);
+
+	posix_open_flags = smb_posix_convert_flags(
+			le32_to_cpu(psx_req->PosixOpenFlags),
+			&may_flags);
+	create_directory = !!(posix_open_flags == (O_DIRECTORY | O_CREAT));
+
+	err = smb_get_disposition(le32_to_cpu(psx_req->PosixOpenFlags),
+				  file_present, &stat, &posix_open_flags);
+	if (err < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", err);
+		if (file_present)
+			goto free_path;
+		else
+			goto out;
+	}
+
+	ksmbd_debug(SMB, "filename : %s, posix_open_flags : %x\n", name,
+		    posix_open_flags);
+	mode = (umode_t)le64_to_cpu(psx_req->Permissions);
+	rsp_info_level = le16_to_cpu(psx_req->Level);
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		if (posix_open_flags & O_CREAT) {
+			err = -EACCES;
+			ksmbd_debug(SMB,
+				"returning as user does not have permission to write\n");
+			if (file_present)
+				goto free_path;
+			else
+				goto out;
+		}
+	}
+
+	if (file_present && create_directory) {
+		err = -EEXIST;
+		goto free_path;
+	}
+
+	if (!file_present && (posix_open_flags & O_CREAT)) {
+		err = smb_common_create(work, &parent_path, &path, name,
+					posix_open_flags, mode,
+					create_directory);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+
+		if (create_directory)
+			goto prepare_rsp;
+	} else {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	fp = ksmbd_vfs_dentry_open(work, &path, posix_open_flags, 0,
+				   file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->pid = le16_to_cpu(pSMB_req->hdr.Pid);
+
+	down_write(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	up_write(&fp->f_ci->m_lock);
+
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode)) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags &
+			(REQ_OPLOCK | REQ_BATCHOPLOCK), fp->volatile_id, fp,
+			le16_to_cpu(pSMB_req->hdr.Tid), NULL, 0);
+		if (err)
+			goto free_path;
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+prepare_rsp:
+	/* open/mkdir success, send back response */
+	data_offset = sizeof(struct smb_com_trans2_spi_rsp) -
+		      sizeof(pSMB_rsp->hdr.smb_buf_length) + 3 /*alignment*/;
+	psx_rsp = (struct open_psx_rsp *)(((char *)&pSMB_rsp->hdr.Protocol) +
+			data_offset);
+	if (data_offset + sizeof(struct open_psx_rsp) > work->response_sz) {
+		err = -EIO;
+		goto free_path;
+	}
+
+	psx_rsp->OplockFlags = cpu_to_le16(oplock_rsp);
+	psx_rsp->Fid = fp != NULL ? fp->volatile_id : 0;
+
+	if (file_present) {
+		if (!(posix_open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+	psx_rsp->CreateAction = cpu_to_le32(file_info);
+
+	if (rsp_info_level != SMB_QUERY_FILE_UNIX_BASIC) {
+		ksmbd_debug(SMB, "returning null information level response");
+		rsp_info_level = SMB_NO_INFO_LEVEL_RESPONSE;
+	}
+	psx_rsp->ReturnedLevel = cpu_to_le16(rsp_info_level);
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err) {
+		pr_err("cannot get stat information\n");
+		goto free_path;
+	}
+
+	pSMB_rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	pSMB_rsp->hdr.WordCount = 10;
+	pSMB_rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	pSMB_rsp->t2.TotalDataCount = cpu_to_le16(sizeof(struct open_psx_rsp));
+	pSMB_rsp->t2.ParameterCount = pSMB_rsp->t2.TotalParameterCount;
+	pSMB_rsp->t2.Reserved = 0;
+	pSMB_rsp->t2.ParameterCount = cpu_to_le16(2);
+	pSMB_rsp->t2.ParameterOffset = cpu_to_le16(56);
+	pSMB_rsp->t2.ParameterDisplacement = 0;
+	pSMB_rsp->t2.DataCount = pSMB_rsp->t2.TotalDataCount;
+	pSMB_rsp->t2.DataOffset = cpu_to_le16(data_offset);
+	pSMB_rsp->t2.DataDisplacement = 0;
+	pSMB_rsp->t2.SetupCount = 0;
+	pSMB_rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 12 data count + 3 pad (1 pad1 + 2 pad2)*/
+	pSMB_rsp->ByteCount = cpu_to_le16(sizeof(struct open_psx_rsp) + 2 + 3);
+	pSMB_rsp->Reserved2 = 0;
+	inc_resp_size(work, pSMB_rsp->hdr.WordCount * 2 + 117);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	switch (err) {
+	case 0:
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+		break;
+	case -ENOSPC:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		break;
+	case -EINVAL:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NO_SUCH_USER;
+		break;
+	case -EACCES:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		break;
+	case -ENOENT:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		break;
+	case -EBUSY:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		break;
+	case -EEXIST:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_COLLISION;
+		break;
+	default:
+		pSMB_rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+	}
+
+	if (err && fp)
+		ksmbd_close_fd(work, fp->volatile_id);
+	kfree(name);
+	ksmbd_revert_fsids(work);
+	return err;
+}
+
+/**
+ * smb_posix_unlink() - handler for posix file delete
+ * @work:	smb work containing trans2 posix delete command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_posix_unlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct unlink_psx_rsp *psx_rsp = NULL;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	int rc = 0;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	rc = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					&parent_path, &path, 0);
+	if (rc < 0)
+		goto out;
+
+	rc = ksmbd_vfs_remove_file(work, &path);
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+
+	if (rc < 0)
+		goto out;
+
+	psx_rsp = (struct unlink_psx_rsp *)((char *)rsp +
+			sizeof(struct smb_com_trans2_rsp));
+	psx_rsp->EAErrorOffset = cpu_to_le16(0);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	if (rc)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	kfree(name);
+	return rc;
+}
+
+/**
+ * smb_set_time_pathinfo() - handler for setting time using set path info
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_time_pathinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_basic_info *info;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct iattr attrs;
+	unsigned int maxlen, offset;
+	char *name;
+	int err = 0;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(name);
+		return -EINVAL;
+	}
+
+	info = (struct file_basic_info *)((char *)req + offset);
+
+	attrs.ia_valid = 0;
+	if (le64_to_cpu(info->LastAccessTime)) {
+		attrs.ia_atime = smb_NTtimeToUnix(info->LastAccessTime);
+		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+	}
+
+	if (le64_to_cpu(info->ChangeTime)) {
+		attrs.ia_ctime = smb_NTtimeToUnix(info->ChangeTime);
+		attrs.ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(info->LastWriteTime)) {
+		attrs.ia_mtime = smb_NTtimeToUnix(info->LastWriteTime);
+		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+	}
+	/* TODO: check dos mode and acl bits if req->Attributes nonzero */
+
+	if (!attrs.ia_valid)
+		goto done;
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+done:
+	ksmbd_debug(SMB, "%s setattr done\n", name);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+	kfree(name);
+	return 0;
+}
+
+/**
+ * smb_set_unix_pathinfo() - handler for setting unix path info(setattr)
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_unix_pathinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_unix_basic_info *unix_info;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path;
+	struct iattr attrs;
+	char *name;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+	err = kern_path(name, 0, &path);
+	if (err) {
+		ksmbd_revert_fsids(work);
+		kfree(name);
+		return -ENOENT;
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		ksmbd_revert_fsids(work);
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	unix_info = (struct file_unix_basic_info *)((char *)req + offset);
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+	err = unix_info_to_attr(unix_info, &init_user_ns, &attrs);
+	path_put(&path);
+	ksmbd_revert_fsids(work);
+	if (err)
+		goto out;
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err)
+		goto out;
+	/* setattr success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(name);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+	return 0;
+}
+
+/**
+ * smb_set_ea() - handler for setting extended attributes using set path
+ *		info command
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_ea(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct fealist *eabuf;
+	struct fea *ea;
+	char *fname, *attr_name = NULL, *value;
+	int rc = 0, list_len, i, next = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	fname = smb_get_name(share, req->FileName, maxlen - offset,
+			     work, false);
+	if (IS_ERR(fname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(fname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	eabuf = (struct fealist *)((char *)req + offset);
+
+	list_len = le32_to_cpu(eabuf->list_len) - 4;
+	if (offset + list_len > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(fname);
+		return -EINVAL;
+	}
+
+	ea = (struct fea *)eabuf->list;
+
+	for (i = 0; list_len >= 0 && ea->name_len != 0; i++, list_len -= next) {
+		if (ea->name_len > (XATTR_NAME_MAX - XATTR_USER_PREFIX_LEN)) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		next = ea->name_len + le16_to_cpu(ea->value_len) + 4;
+		offset += next;
+		if (offset > maxlen) {
+			rc = -EINVAL;
+			goto out;
+		}
+
+		attr_name = kmalloc(XATTR_NAME_MAX + 1, KSMBD_DEFAULT_GFP);
+		if (!attr_name) {
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		memcpy(attr_name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN);
+		memcpy(&attr_name[XATTR_USER_PREFIX_LEN], ea->name,
+		       ea->name_len);
+		attr_name[XATTR_USER_PREFIX_LEN + ea->name_len] = '\0';
+		value = (char *)&ea->name + ea->name_len + 1;
+		ksmbd_debug(SMB, "name: <%s>, name_len %u, value_len %u\n",
+			    ea->name, ea->name_len, le16_to_cpu(ea->value_len));
+
+		rc = ksmbd_vfs_fsetxattr(work, fname, attr_name, value,
+					 le16_to_cpu(ea->value_len), 0);
+		if (rc < 0) {
+			kfree(attr_name);
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			goto out;
+		}
+		kfree(attr_name);
+		ea += next;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(0);
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = cpu_to_le16(0);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Pad = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(fname);
+	return rc;
+}
+
+/**
+ * smb_set_file_size_pinfo() - handler for setting eof or truncate using
+ *		trans2 set path info command
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_file_size_pinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct file_end_of_file_info *eofinfo;
+	struct iattr attr;
+	char *name = NULL;
+	loff_t newsize;
+	int rc = 0;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	offset += le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(name);
+		return -EINVAL;
+	}
+
+	eofinfo = (struct file_end_of_file_info *)((char *)req + offset);
+	newsize = le64_to_cpu(eofinfo->FileSize);
+	attr.ia_valid = ATTR_SIZE;
+	attr.ia_size = newsize;
+	rc = ksmbd_vfs_setattr(work, name, 0, &attr);
+	if (rc) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		goto out;
+	}
+	ksmbd_debug(SMB, "%s truncated to newsize %lld\n", name, newsize);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 2 for parameter count + 1 pad1*/
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	kfree(name);
+	return rc;
+}
+
+/**
+ * smb_creat_hardlink() - handler for creating hardlink
+ * @work:	smb work containing set path info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_creat_hardlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *oldname, *newname;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, req->FileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(newname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(newname);
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			       work, false);
+	if (IS_ERR(oldname)) {
+		err = PTR_ERR(oldname);
+		oldname = NULL;
+		goto out;
+	}
+	ksmbd_debug(SMB, "oldname %s, newname %s\n", oldname, newname);
+
+	err = ksmbd_vfs_link(work, oldname, newname);
+	if (err < 0) {
+		if (err == -EACCES)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+		goto out;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = 0;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+out:
+	kfree(newname);
+	kfree(oldname);
+	return err;
+}
+
+/**
+ * smb_creat_symlink() - handler for creating symlink
+ * @work:	smb work containing set path info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_creat_symlink(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *name, *symname;
+	bool is_unicode = is_smbreq_unicode(&req->hdr);
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_trans2_spi_req, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	symname = smb_get_name(share, req->FileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(symname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(symname);
+	}
+
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset >= maxlen) {
+		kfree(symname);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_strndup_from_utf16((char *)req + offset, maxlen - offset,
+				      is_unicode, work->conn->local_nls);
+	if (IS_ERR(name)) {
+		kfree(symname);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return PTR_ERR(name);
+	}
+	ksmbd_debug(SMB, "name %s, symname %s\n", name, symname);
+
+	err = ksmbd_vfs_symlink(work, name, symname);
+	if (err < 0) {
+		if (err == -ENOSPC)
+			rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		else if (err == -EEXIST)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+		else
+			rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+	} else
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = 0;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->ByteCount = cpu_to_le16(3);
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	kfree(name);
+	kfree(symname);
+	return err;
+}
+
+/**
+ * set_path_info() - handler for trans2 set path info sub commands
+ * @work:	smb work containing set path info command
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_path_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_spi_req *pSMB_req = work->request_buf;
+	struct smb_com_trans2_spi_rsp *pSMB_rsp = work->response_buf;
+	__u16 info_level, total_param;
+	int err = 0;
+
+	info_level = le16_to_cpu(pSMB_req->InformationLevel);
+	total_param = le16_to_cpu(pSMB_req->TotalParameterCount);
+	if (total_param < 7) {
+		pSMB_rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		pr_err("invalid total parameter for info_level 0x%x\n",
+		       total_param);
+		return -EINVAL;
+	}
+
+	switch (info_level) {
+	case SMB_POSIX_OPEN:
+		err = smb_posix_open(work);
+		break;
+	case SMB_POSIX_UNLINK:
+		err = smb_posix_unlink(work);
+		break;
+	case SMB_SET_FILE_UNIX_HLINK:
+		err = smb_creat_hardlink(work);
+		break;
+	case SMB_SET_FILE_UNIX_LINK:
+		err = smb_creat_symlink(work);
+		break;
+	case SMB_SET_FILE_BASIC_INFO:
+		/* fall through */
+	case SMB_SET_FILE_BASIC_INFO2:
+		err = smb_set_time_pathinfo(work);
+		break;
+	case SMB_SET_FILE_UNIX_BASIC:
+		err = smb_set_unix_pathinfo(work);
+		break;
+	case SMB_SET_FILE_EA:
+		err = smb_set_ea(work);
+		break;
+	case SMB_SET_POSIX_ACL:
+		err = smb_set_acl(work);
+		break;
+	case SMB_SET_FILE_END_OF_FILE_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_END_OF_FILE_INFO:
+		err = smb_set_file_size_pinfo(work);
+		break;
+	default:
+		ksmbd_debug(SMB, "info level = %x not implemented yet\n",
+			    info_level);
+		pSMB_rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+		return -EOPNOTSUPP;
+	}
+
+	if (err < 0)
+		ksmbd_debug(SMB, "info_level 0x%x failed, err %d\n", info_level,
+			    err);
+	return err;
+}
+static int readdir_info_level_struct_sz(int info_level)
+{
+	switch (info_level) {
+	case SMB_FIND_FILE_INFO_STANDARD:
+		return sizeof(struct find_info_standard);
+	case SMB_FIND_FILE_QUERY_EA_SIZE:
+		return sizeof(struct find_info_query_ea_size);
+	case SMB_FIND_FILE_DIRECTORY_INFO:
+		return sizeof(struct file_directory_info);
+	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+		return sizeof(struct file_full_directory_info);
+	case SMB_FIND_FILE_NAMES_INFO:
+		return sizeof(struct file_names_info);
+	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
+		return sizeof(struct file_both_directory_info);
+	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+		return sizeof(struct file_id_full_dir_info);
+	case SMB_FIND_FILE_ID_BOTH_DIR_INFO:
+		return sizeof(struct file_id_both_directory_info);
+	case SMB_FIND_FILE_UNIX:
+		return sizeof(struct file_unix_info);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+/**
+ * smb_populate_readdir_entry() - encode directory entry in smb response buffer
+ * @conn:	connection instance
+ * @info_level:	smb information level
+ * @d_info: structure included variables for query dir
+ * @ksmbd_kstat: ksmbd wrapper of dirent stat information
+ *
+ * if directory has many entries, find first can't read it fully.
+ * find next might be called multiple times to read remaining dir entries
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
+				      struct ksmbd_dir_info *d_info,
+				      struct ksmbd_kstat *ksmbd_kstat)
+{
+	int next_entry_offset;
+	char *conv_name;
+	int conv_len;
+	int struct_sz;
+
+	struct_sz = readdir_info_level_struct_sz(info_level);
+	if (struct_sz == -EOPNOTSUPP)
+		return -EOPNOTSUPP;
+
+	conv_name = ksmbd_convert_dir_info_name(d_info,
+						conn->local_nls,
+						&conv_len);
+	if (!conv_name)
+		return -ENOMEM;
+
+	next_entry_offset = ALIGN(struct_sz - 1 + conv_len,
+				  KSMBD_DIR_INFO_ALIGNMENT);
+
+	if (next_entry_offset > d_info->out_buf_len) {
+		kfree(conv_name);
+		d_info->out_buf_len = -1;
+		return -ENOSPC;
+	}
+
+	switch (info_level) {
+	case SMB_FIND_FILE_INFO_STANDARD:
+	{
+		struct find_info_standard *fsinfo;
+
+		fsinfo = (struct find_info_standard *)(d_info->wptr);
+		unix_to_dos_time(
+			ksmbd_NTtimeToUnix(
+				cpu_to_le64(ksmbd_kstat->create_time)),
+			&fsinfo->CreationTime,
+			&fsinfo->CreationDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->atime,
+				 &fsinfo->LastAccessTime,
+				 &fsinfo->LastAccessDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->mtime,
+				 &fsinfo->LastWriteTime,
+				 &fsinfo->LastWriteDate);
+		fsinfo->DataSize = cpu_to_le32(ksmbd_kstat->kstat->size);
+		fsinfo->AllocationSize =
+			cpu_to_le32(ksmbd_kstat->kstat->blocks << 9);
+		fsinfo->Attributes =
+			cpu_to_le16(S_ISDIR(ksmbd_kstat->kstat->mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		fsinfo->FileNameLength = cpu_to_le16(conv_len);
+		memcpy(fsinfo->FileName, conv_name, conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_QUERY_EA_SIZE:
+	{
+		struct find_info_query_ea_size *fesize;
+
+		fesize = (struct find_info_query_ea_size *)(d_info->wptr);
+		unix_to_dos_time(
+			ksmbd_NTtimeToUnix(
+				cpu_to_le64(ksmbd_kstat->create_time)),
+			&fesize->CreationTime,
+			&fesize->CreationDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->atime,
+				 &fesize->LastAccessTime,
+				 &fesize->LastAccessDate);
+		unix_to_dos_time(ksmbd_kstat->kstat->mtime,
+				 &fesize->LastWriteTime,
+				 &fesize->LastWriteDate);
+
+		fesize->DataSize = cpu_to_le32(ksmbd_kstat->kstat->size);
+		fesize->AllocationSize =
+			cpu_to_le32(ksmbd_kstat->kstat->blocks << 9);
+		fesize->Attributes =
+			cpu_to_le16(S_ISDIR(ksmbd_kstat->kstat->mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		fesize->EASize = 0;
+		fesize->FileNameLength = (__u8)(conv_len);
+		memcpy(fesize->FileName, conv_name, conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_DIRECTORY_INFO:
+	{
+		struct file_directory_info *fdinfo = NULL;
+
+		fdinfo = (struct file_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fdinfo->FileNameLength = cpu_to_le32(conv_len);
+		memcpy(fdinfo->FileName, conv_name, conv_len);
+		fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_FULL_DIRECTORY_INFO:
+	{
+		struct file_full_directory_info *ffdinfo = NULL;
+
+		ffdinfo = (struct file_full_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		ffdinfo->FileNameLength = cpu_to_le32(conv_len);
+		ffdinfo->EaSize = 0;
+		memcpy(ffdinfo->FileName, conv_name, conv_len);
+		ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)ffdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_NAMES_INFO:
+	{
+		struct file_names_info *fninfo = NULL;
+
+		fninfo = (struct file_names_info *)(d_info->wptr);
+		fninfo->FileNameLength = cpu_to_le32(conv_len);
+		memcpy(fninfo->FileName, conv_name, conv_len);
+		fninfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fninfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_BOTH_DIRECTORY_INFO:
+	{
+		struct file_both_directory_info *fbdinfo = NULL;
+
+		fbdinfo = (struct file_both_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fbdinfo->FileNameLength = cpu_to_le32(conv_len);
+		fbdinfo->EaSize = 0;
+		fbdinfo->ShortNameLength = 0;
+		fbdinfo->Reserved = 0;
+		memcpy(fbdinfo->FileName, conv_name, conv_len);
+		fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fbdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_ID_FULL_DIR_INFO:
+	{
+		struct file_id_full_dir_info *dinfo = NULL;
+
+		dinfo = (struct file_id_full_dir_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		dinfo->FileNameLength = cpu_to_le32(conv_len);
+		dinfo->EaSize = 0;
+		dinfo->Reserved = 0;
+		dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+		memcpy(dinfo->FileName, conv_name, conv_len);
+		dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)dinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	case SMB_FIND_FILE_ID_BOTH_DIR_INFO:
+	{
+		struct file_id_both_directory_info *fibdinfo = NULL;
+
+		fibdinfo = (struct file_id_both_directory_info *)
+			ksmbd_vfs_init_kstat(&d_info->wptr, ksmbd_kstat);
+		fibdinfo->FileNameLength = cpu_to_le32(conv_len);
+		fibdinfo->EaSize = 0;
+		fibdinfo->ShortNameLength = 0;
+		fibdinfo->Reserved = 0;
+		fibdinfo->Reserved2 = 0;
+		fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
+		memcpy(fibdinfo->FileName, conv_name, conv_len);
+		fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)fibdinfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+
+		break;
+	}
+	case SMB_FIND_FILE_UNIX:
+	{
+		struct file_unix_info *finfo = NULL;
+		struct file_unix_basic_info *unix_info;
+
+		finfo = (struct file_unix_info *)(d_info->wptr);
+		finfo->ResumeKey = 0;
+		unix_info = (struct file_unix_basic_info *)((char *)finfo + 8);
+		init_unix_info(unix_info, &init_user_ns, ksmbd_kstat->kstat);
+		/* include null terminator */
+		memcpy(finfo->FileName, conv_name, conv_len + 2);
+		next_entry_offset += 2;
+		finfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
+		memset((char *)finfo + struct_sz - 1 + conv_len,
+			'\0',
+			next_entry_offset - struct_sz - 1 + conv_len);
+		break;
+	}
+	}
+
+	d_info->num_entry++;
+	d_info->last_entry_offset = d_info->data_count;
+	d_info->data_count += next_entry_offset;
+	d_info->out_buf_len -= next_entry_offset;
+	d_info->wptr = (char *)(d_info->wptr) + next_entry_offset;
+	kfree(conv_name);
+
+	ksmbd_debug(SMB, "info_level : %d, buf_len :%d, next_offset : %d, data_count : %d\n",
+		    info_level, d_info->out_buf_len, next_entry_offset,
+		    d_info->data_count);
+	return 0;
+}
+
+/**
+ * ksmbd_fill_dirent() - populates a dirent details in readdir
+ * @ctx:	dir_context information
+ * @name:	dirent name
+ * @namelen:	dirent name length
+ * @offset:	dirent offset in directory
+ * @ino:	dirent inode number
+ * @d_type:	dirent type
+ *
+ * Return:	0 on success, otherwise -EINVAL
+ */
+static bool ksmbd_fill_dirent(struct dir_context *ctx, const char *name, int namlen,
+			      loff_t offset, u64 ino, unsigned int d_type)
+{
+	struct ksmbd_readdir_data *buf =
+		container_of(ctx, struct ksmbd_readdir_data, ctx);
+	struct ksmbd_dirent *de = (void *)(buf->dirent + buf->used);
+	unsigned int reclen;
+
+	reclen = ALIGN(sizeof(struct ksmbd_dirent) + namlen, sizeof(u64));
+	if (buf->used + reclen > PAGE_SIZE)
+		return false;
+
+	de->namelen = namlen;
+	de->offset = offset;
+	de->ino = ino;
+	de->d_type = d_type;
+	memcpy(de->name, name, namlen);
+	buf->used += reclen;
+
+	return true;
+}
+
+/**
+ * find_first() - smb readdir command
+ * @work:	smb work containing find first request params
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int find_first(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_ffirst_req_params *req_params;
+	struct smb_com_trans2_ffirst_rsp_parms *params = NULL;
+	struct path path, parent_path;
+	struct ksmbd_dirent *de;
+	struct ksmbd_file *dir_fp = NULL;
+	struct kstat kstat;
+	struct ksmbd_kstat ksmbd_kstat;
+	struct ksmbd_dir_info d_info;
+	int params_count = sizeof(struct smb_com_trans2_ffirst_rsp_parms);
+	int data_alignment_offset = 0;
+	int rc = 0, reclen = 0;
+	int srch_cnt = 0;
+	char *dirpath = NULL;
+	char *srch_ptr = NULL;
+	int header_size;
+	int struct_sz;
+	unsigned int maxlen, offset;
+
+	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+
+	if (ksmbd_override_fsids(work)) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_revert_fsids(work);
+		return -EINVAL;
+	}
+
+	req_params = (struct smb_com_trans2_ffirst_req_params *)
+		     (work->request_buf + offset);
+
+	offset += offsetof(struct smb_com_trans2_ffirst_req_params, FileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_revert_fsids(work);
+		return -EINVAL;
+	}
+
+	dirpath = smb_get_dir_name(share, req_params->FileName,
+				   maxlen - offset, work, &srch_ptr);
+	if (IS_ERR(dirpath)) {
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		rc = PTR_ERR(dirpath);
+		goto err_out;
+	}
+
+	if (strlen(dirpath) == 1 && dirpath[0] == '/')
+		dirpath[0] = '\0';
+
+	ksmbd_debug(SMB, "complete dir path = %s\n", dirpath);
+	rc = ksmbd_vfs_kern_path_locked(work, dirpath,
+					LOOKUP_NO_SYMLINKS | LOOKUP_DIRECTORY,
+					&parent_path, &path, 0);
+	if (rc < 0) {
+		ksmbd_debug(SMB, "cannot create vfs root path <%s> %d\n",
+			    dirpath, rc);
+		goto err_free_dirpath;
+	} else {
+		if (inode_permission(mnt_idmap(path.mnt),
+				     d_inode(path.dentry),
+				     MAY_READ | MAY_EXEC)) {
+			rc = -EACCES;
+			goto err_free_kernpath;
+		}
+	}
+
+	if (d_is_symlink(path.dentry)) {
+		rc = -EACCES;
+		goto err_free_kernpath;
+	}
+
+	dir_fp = ksmbd_vfs_dentry_open(work, &path, O_RDONLY, 0, 1);
+	if (IS_ERR(dir_fp)) {
+		ksmbd_debug(SMB, "dir dentry open failed with rc=%d\n", rc);
+		rc = -EINVAL;
+		dir_fp = NULL;
+		goto err_free_kernpath;
+	}
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&parent_path);
+
+	down_write(&dir_fp->f_ci->m_lock);
+	list_add(&dir_fp->node, &dir_fp->f_ci->m_fp_list);
+	up_write(&dir_fp->f_ci->m_lock);
+
+	set_ctx_actor(&dir_fp->readdir_data.ctx, ksmbd_fill_dirent);
+	dir_fp->readdir_data.dirent = (void *)__get_free_page(KSMBD_DEFAULT_GFP);
+	if (!dir_fp->readdir_data.dirent) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+
+	dir_fp->filename = dirpath;
+	dir_fp->readdir_data.used = 0;
+	dir_fp->dirent_offset = 0;
+	dir_fp->readdir_data.file_attr =
+		le16_to_cpu(req_params->SearchAttributes);
+	ksmbd_update_fstate(&work->sess->file_table, dir_fp, FP_INITED);
+
+	if (params_count % 4)
+		data_alignment_offset = 4 - params_count % 4;
+
+	d_info.smb1_name = kmalloc(NAME_MAX + 1, KSMBD_DEFAULT_GFP);
+	if (!d_info.smb1_name) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	d_info.wptr = (char *)((char *)rsp + sizeof(struct smb_com_trans2_rsp) +
+			params_count + data_alignment_offset);
+
+	header_size = sizeof(struct smb_com_trans2_rsp) + params_count +
+		data_alignment_offset;
+
+
+	struct_sz = readdir_info_level_struct_sz(le16_to_cpu(req_params->InformationLevel));
+
+	if (struct_sz < 0) {
+		rc = -EFAULT;
+		goto err_out;
+	}
+
+	/* When search count is zero, respond only 1 entry. */
+	srch_cnt = le16_to_cpu(req_params->SearchCount);
+	if (!srch_cnt)
+		d_info.out_buf_len = struct_sz + header_size;
+	else
+		d_info.out_buf_len = min_t(int, srch_cnt * struct_sz + header_size,
+				MAX_CIFS_LOOKUP_BUFFER_SIZE - header_size);
+
+
+	/* reserve dot and dotdot entries in head of buffer in first response */
+	if (!*srch_ptr || is_asterisk(srch_ptr)) {
+		rc = ksmbd_populate_dot_dotdot_entries(work,
+				le16_to_cpu(req_params->InformationLevel),
+				dir_fp,
+				&d_info,
+				srch_ptr,
+				smb_populate_readdir_entry);
+		if (rc)
+			goto err_out;
+	}
+
+	do {
+		if (dir_fp->dirent_offset >= dir_fp->readdir_data.used) {
+			dir_fp->dirent_offset = 0;
+			dir_fp->readdir_data.used = 0;
+			rc = iterate_dir(dir_fp->filp,
+					 &dir_fp->readdir_data.ctx);
+			if (rc < 0) {
+				ksmbd_debug(SMB, "err : %d\n", rc);
+				goto err_out;
+			}
+
+			if (!dir_fp->readdir_data.used) {
+				free_page((unsigned long)
+						(dir_fp->readdir_data.dirent));
+				dir_fp->readdir_data.dirent = NULL;
+				break;
+			}
+
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent);
+		} else {
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent +
+				 dir_fp->dirent_offset);
+		}
+
+		reclen = ALIGN(sizeof(struct ksmbd_dirent) + de->namelen,
+			       sizeof(__le64));
+		dir_fp->dirent_offset += reclen;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_DIRECTORY && de->d_type != DT_DIR)
+			continue;
+
+		ksmbd_kstat.kstat = &kstat;
+
+		if (de->namelen > NAME_MAX) {
+			pr_err("filename length exceeds 255 bytes.\n");
+			continue;
+		}
+
+		if (!strncmp(de->name, ".", de->namelen) ||
+		    !strncmp(de->name, "..", de->namelen))
+			continue;
+
+		memcpy(d_info.smb1_name, de->name, de->namelen);
+		d_info.smb1_name[de->namelen] = '\0';
+		d_info.name = (const char *)d_info.smb1_name;
+		d_info.name_len = de->namelen;
+		rc = ksmbd_vfs_readdir_name(work,
+					    file_mnt_idmap(dir_fp->filp),
+					    &ksmbd_kstat,
+					    de->name,
+					    de->namelen,
+					    dirpath);
+		if (rc) {
+			ksmbd_debug(SMB, "Cannot read dirent: %d\n", rc);
+			continue;
+		}
+
+		if (ksmbd_share_veto_filename(share, d_info.name)) {
+			ksmbd_debug(SMB, "Veto filename %s\n", d_info.name);
+			continue;
+		}
+
+		if (match_pattern(d_info.name, d_info.name_len, srch_ptr)) {
+			rc = smb_populate_readdir_entry(conn,
+				le16_to_cpu(req_params->InformationLevel),
+				&d_info,
+				&ksmbd_kstat);
+			if (rc == -ENOSPC)
+				break;
+			else if (rc)
+				goto err_out;
+		}
+	} while (d_info.out_buf_len >= 0);
+
+	if (!d_info.data_count && *srch_ptr) {
+		ksmbd_debug(SMB, "There is no entry matched with the search pattern\n");
+		rc = -ENOENT;
+		goto err_out;
+	}
+
+	if (d_info.out_buf_len < 0)
+		dir_fp->dirent_offset -= reclen;
+
+	params = (struct smb_com_trans2_ffirst_rsp_parms *)((char *)rsp +
+			sizeof(struct smb_com_trans2_rsp));
+	params->SearchHandle = dir_fp->volatile_id;
+	params->SearchCount = cpu_to_le16(d_info.num_entry);
+	params->LastNameOffset = cpu_to_le16(d_info.last_entry_offset);
+
+	if (d_info.out_buf_len < 0) {
+		ksmbd_debug(SMB, "continue search\n");
+		params->EndofSearch = cpu_to_le16(0);
+	} else {
+		ksmbd_debug(SMB, "end of search\n");
+		params->EndofSearch = cpu_to_le16(1);
+		path_put(&(dir_fp->filp->f_path));
+		if (le16_to_cpu(req_params->SearchFlags) &
+				CIFS_SEARCH_CLOSE_AT_END)
+			ksmbd_close_fd(work, dir_fp->volatile_id);
+	}
+	params->EAErrorOffset = cpu_to_le16(0);
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = cpu_to_le16(params_count);
+	rsp->t2.TotalDataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(params_count);
+	rsp->t2.ParameterOffset =
+		cpu_to_le16(sizeof(struct smb_com_trans2_rsp) - 4);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.DataOffset = cpu_to_le16(sizeof(struct smb_com_trans2_rsp) +
+		params_count + data_alignment_offset - 4);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->Pad = 0;
+	rsp->ByteCount = cpu_to_le16(d_info.data_count +
+		params_count + 1 /*pad*/ + data_alignment_offset);
+	memset((char *)rsp + sizeof(struct smb_com_trans2_rsp) + params_count,
+			'\0', 2);
+	inc_resp_size(work, (10 * 2 + d_info.data_count +
+				params_count + 1 + data_alignment_offset));
+	kfree(srch_ptr);
+	kfree(d_info.smb1_name);
+	ksmbd_revert_fsids(work);
+	return 0;
+
+err_free_kernpath:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+err_free_dirpath:
+	kfree(dirpath);
+err_out:
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -EACCES || rc == -EXDEV)
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+	else if (rc == -EBADF)
+		rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -EFAULT)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+	if (!rsp->hdr.Status.CifsError)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	if (dir_fp) {
+		if (dir_fp->readdir_data.dirent) {
+			free_page((unsigned long)(dir_fp->readdir_data.dirent));
+			dir_fp->readdir_data.dirent = NULL;
+		}
+		path_put(&(dir_fp->filp->f_path));
+		ksmbd_close_fd(work, dir_fp->volatile_id);
+	}
+
+	kfree(srch_ptr);
+	kfree(d_info.smb1_name);
+	ksmbd_revert_fsids(work);
+	return 0;
+}
+
+/**
+ * find_next() - smb next readdir command
+ * @work:	smb work containing find next request params
+ *
+ * if directory has many entries, find first can't read it fully.
+ * find next might be called multiple times to read remaining dir entries
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int find_next(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct ksmbd_conn *conn = work->conn;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_com_trans2_fnext_req_params *req_params;
+	struct smb_com_trans2_fnext_rsp_params *params = NULL;
+	struct ksmbd_dirent *de;
+	struct ksmbd_file *dir_fp;
+	struct kstat kstat;
+	struct ksmbd_kstat ksmbd_kstat;
+	struct ksmbd_dir_info d_info;
+	int params_count = sizeof(struct smb_com_trans2_fnext_rsp_params);
+	int data_alignment_offset = 0;
+	int rc = 0, reclen = 0;
+	__u16 sid;
+	int header_size, srch_cnt, struct_sz;
+	unsigned int maxlen, offset;
+
+	memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+
+	if (offset > maxlen) {
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	req_params = (struct smb_com_trans2_fnext_req_params *)
+		     (work->request_buf + offset);
+	sid = req_params->SearchHandle;
+
+	dir_fp = ksmbd_lookup_fd_fast(work, sid);
+	if (!dir_fp) {
+		ksmbd_debug(SMB, "error invalid sid\n");
+		rc = -EINVAL;
+		goto err_out;
+	}
+
+	set_ctx_actor(&dir_fp->readdir_data.ctx, ksmbd_fill_dirent);
+
+	if (params_count % 4)
+		data_alignment_offset = 4 - params_count % 4;
+
+	d_info.smb1_name = kmalloc(NAME_MAX + 1, KSMBD_DEFAULT_GFP);
+	if (!d_info.smb1_name) {
+		rc = -ENOMEM;
+		goto err_out;
+	}
+	d_info.wptr = (char *)((char *)rsp + sizeof(struct smb_com_trans2_rsp) +
+			params_count + data_alignment_offset);
+
+	header_size = sizeof(struct smb_com_trans2_rsp) + params_count +
+		data_alignment_offset;
+
+	srch_cnt = le16_to_cpu(req_params->SearchCount);
+	struct_sz = readdir_info_level_struct_sz(le16_to_cpu(req_params->InformationLevel));
+
+	if (struct_sz < 0) {
+		rc = -EFAULT;
+		goto err_out;
+	}
+
+	d_info.out_buf_len = min_t(int, srch_cnt * struct_sz + header_size,
+				   MAX_CIFS_LOOKUP_BUFFER_SIZE - header_size);
+	do {
+		if (dir_fp->dirent_offset >= dir_fp->readdir_data.used) {
+			dir_fp->dirent_offset = 0;
+			dir_fp->readdir_data.used = 0;
+			rc = iterate_dir(dir_fp->filp,
+					 &dir_fp->readdir_data.ctx);
+			if (rc < 0) {
+				ksmbd_debug(SMB, "err : %d\n", rc);
+				goto err_out;
+			}
+
+			if (!dir_fp->readdir_data.used) {
+				free_page((unsigned long)
+						(dir_fp->readdir_data.dirent));
+				dir_fp->readdir_data.dirent = NULL;
+				break;
+			}
+
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent);
+		} else {
+			de = (struct ksmbd_dirent *)
+				((char *)dir_fp->readdir_data.dirent +
+				 dir_fp->dirent_offset);
+		}
+
+		reclen = ALIGN(sizeof(struct ksmbd_dirent) + de->namelen,
+			       sizeof(__le64));
+		dir_fp->dirent_offset += reclen;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_DIRECTORY && de->d_type != DT_DIR)
+			continue;
+
+		if (dir_fp->readdir_data.file_attr &
+			SMB_SEARCH_ATTRIBUTE_ARCHIVE && (de->d_type == DT_DIR ||
+			(!strcmp(de->name, ".") || !strcmp(de->name, ".."))))
+			continue;
+
+		ksmbd_kstat.kstat = &kstat;
+
+		if (de->namelen > NAME_MAX) {
+			pr_err("filename length exceeds 255 bytes.\n");
+			continue;
+		}
+		memcpy(d_info.smb1_name, de->name, de->namelen);
+		d_info.smb1_name[de->namelen] = '\0';
+		d_info.name = (const char *)d_info.smb1_name;
+		d_info.name_len = de->namelen;
+		rc = ksmbd_vfs_readdir_name(work,
+					    file_mnt_idmap(dir_fp->filp),
+					    &ksmbd_kstat,
+					    de->name,
+					    de->namelen,
+					    dir_fp->filename);
+		if (rc) {
+			ksmbd_debug(SMB, "Err while dirent read rc = %d\n", rc);
+			rc = 0;
+			continue;
+		}
+
+		if (ksmbd_share_veto_filename(share, d_info.name)) {
+			ksmbd_debug(SMB, "file(%s) is invisible by setting as veto file\n",
+				    d_info.name);
+			continue;
+		}
+
+		ksmbd_debug(SMB, "filename string = %.*s\n",
+				d_info.name_len, d_info.name);
+		ksmbd_debug(SMB, "filename string = %.*s\n", d_info.name_len,
+			    d_info.name);
+		rc = smb_populate_readdir_entry(conn,
+						le16_to_cpu(req_params->InformationLevel),
+						&d_info, &ksmbd_kstat);
+		if (rc == -ENOSPC)
+			break;
+		else if (rc)
+			goto err_out;
+
+	} while (d_info.out_buf_len >= 0);
+
+	if (d_info.out_buf_len < 0)
+		dir_fp->dirent_offset -= reclen;
+
+	params = (struct smb_com_trans2_fnext_rsp_params *)
+		((char *)rsp + sizeof(struct smb_com_trans_rsp));
+	params->SearchCount = cpu_to_le16(d_info.num_entry);
+
+	if (d_info.out_buf_len < 0) {
+		ksmbd_debug(SMB, "continue search\n");
+		params->EndofSearch = cpu_to_le16(0);
+		params->LastNameOffset = cpu_to_le16(d_info.last_entry_offset);
+	} else {
+		ksmbd_debug(SMB, "end of search\n");
+		params->EndofSearch = cpu_to_le16(1);
+		params->LastNameOffset = cpu_to_le16(0);
+		path_put(&(dir_fp->filp->f_path));
+		if (le16_to_cpu(req_params->SearchFlags) &
+				CIFS_SEARCH_CLOSE_AT_END)
+			ksmbd_close_fd(work, sid);
+	}
+	params->EAErrorOffset = cpu_to_le16(0);
+
+	rsp_hdr->WordCount = 0x0A;
+	rsp->t2.TotalParameterCount = cpu_to_le16(params_count);
+	rsp->t2.TotalDataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(params_count);
+	rsp->t2.ParameterOffset =
+		cpu_to_le16(sizeof(struct smb_com_trans_rsp) - 4);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(d_info.data_count);
+	rsp->t2.DataOffset = cpu_to_le16(sizeof(struct smb_com_trans_rsp) +
+		params_count + data_alignment_offset - 4);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	rsp->Pad = 0;
+	rsp->ByteCount = cpu_to_le16(d_info.data_count + params_count + 1 +
+		data_alignment_offset);
+	memset((char *)rsp + sizeof(struct smb_com_trans_rsp) +
+		params_count, '\0', data_alignment_offset);
+	inc_resp_size(work, (10 * 2 + d_info.data_count +
+		params_count + 1 + data_alignment_offset));
+	kfree(d_info.smb1_name);
+	ksmbd_fd_put(work, dir_fp);
+	return 0;
+
+err_out:
+	if (rc == -EINVAL)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+	else if (rc == -EACCES || rc == -EXDEV)
+		rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (rc == -ENOENT)
+		rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+	else if (rc == -EBADF)
+		rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+	else if (rc == -ENOMEM)
+		rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+	else if (rc == -EFAULT)
+		rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+	if (!rsp->hdr.Status.CifsError)
+		rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+
+	if (dir_fp) {
+		if (dir_fp->readdir_data.dirent) {
+			free_page((unsigned long)(dir_fp->readdir_data.dirent));
+			dir_fp->readdir_data.dirent = NULL;
+		}
+		path_put(&(dir_fp->filp->f_path));
+		ksmbd_close_fd(work, sid);
+	}
+
+	kfree(d_info.smb1_name);
+	return 0;
+}
+
+/**
+ * smb_set_alloc_size() - set file truncate method using trans2
+ *		set file info command - file allocation info level
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_alloc_size(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_allocation_info *allocinfo;
+	struct kstat stat;
+	struct ksmbd_file *fp = NULL;
+	loff_t newsize;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	allocinfo =  (struct file_allocation_info *)
+		(((char *) &req->hdr.Protocol) + le16_to_cpu(req->DataOffset));
+	newsize = le64_to_cpu(allocinfo->AllocationSize);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	err = ksmbd_vfs_getattr(&fp->filp->f_path, &stat);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+	if (newsize == stat.size) /* nothing to do */
+		goto out;
+
+	/* Round up size */
+	if (alloc_roundup_size) {
+		newsize = div64_u64(newsize + alloc_roundup_size - 1,
+				    alloc_roundup_size);
+		newsize *= alloc_roundup_size;
+	}
+
+	err = ksmbd_vfs_truncate(work, fp, newsize);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+out:
+	ksmbd_debug(SMB, "fid %u, truncated to newsize %llu\n", req->Fid,
+		    newsize);
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	ksmbd_fd_put(work, fp);
+
+	return 0;
+}
+
+/**
+ * smb_set_file_size_finfo() - set file truncate method using trans2
+ *		set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_file_size_finfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_end_of_file_info *eofinfo;
+	struct ksmbd_file *fp;
+	loff_t newsize;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	eofinfo =  (struct file_end_of_file_info *)
+		(((char *) &req->hdr.Protocol) + le16_to_cpu(req->DataOffset));
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	newsize = le64_to_cpu(eofinfo->FileSize);
+	err = ksmbd_vfs_truncate(work, fp, newsize);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		ksmbd_fd_put(work, fp);
+		return err;
+	}
+
+	ksmbd_debug(SMB, "fid %u, truncated to newsize %lld\n", req->Fid,
+		    newsize);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+	ksmbd_fd_put(work, fp);
+
+	return 0;
+}
+
+/**
+ * query_file_info_pipe() - query file info of IPC pipe
+ *		using query file info command
+ * @work:	smb work containing query file info command buffer
+ * @req_params:	buffer containing Trans2 Query File Information parameters
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_file_info_pipe(struct ksmbd_work *work,
+				struct smb_trans2_qfi_req_params *req_params)
+{
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct file_standard_info *standard_info;
+	char *ptr;
+
+	if (le16_to_cpu(req_params->InformationLevel) !=
+	    SMB_QUERY_FILE_STANDARD_INFO) {
+		ksmbd_debug(SMB, "query file info for info %u not supported\n",
+				le16_to_cpu(req_params->InformationLevel));
+		rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+		return -EOPNOTSUPP;
+	}
+
+	ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+	rsp_hdr->WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = cpu_to_le16(sizeof(struct file_standard_info));
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = cpu_to_le16(2);
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_standard_info));
+	rsp->t2.DataOffset = cpu_to_le16(60);
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+	/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+	rsp->Pad = 0;
+	/* lets set EA info */
+	ptr = (char *)&rsp->Pad + 1;
+	memset(ptr, 0, 4);
+	standard_info = (struct file_standard_info *)(ptr + 4);
+	standard_info->AllocationSize = cpu_to_le64(4096);
+	standard_info->EndOfFile = 0;
+	standard_info->NumberOfLinks = cpu_to_le32(1);
+	standard_info->DeletePending = 0;
+	standard_info->Directory = 0;
+	standard_info->DeletePending = 1;
+	inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+
+	return 0;
+}
+
+/**
+ * query_file_info() - query file info of file/dir
+ *		using query file info command
+ * @work:	smb work containing query file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int query_file_info(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct smb_trans2_qfi_req_params *req_params;
+	unsigned int maxlen, offset;
+	struct ksmbd_file *fp;
+	struct kstat st;
+	char *ptr;
+	int rc = 0;
+	u64 time;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset > maxlen) {
+		rsp_hdr->Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	req_params = (struct smb_trans2_qfi_req_params *)
+		     (work->request_buf + offset);
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_PIPE)) {
+		ksmbd_debug(SMB, "query file info for IPC srvsvc\n");
+		return query_file_info_pipe(work, req_params);
+	}
+
+	fp = ksmbd_lookup_fd_fast(work, req_params->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req_params->Fid);
+		rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		rc = -EIO;
+		goto err_out;
+	}
+
+	rc = vfs_getattr(&fp->filp->f_path, &st, STATX_BASIC_STATS,
+			 AT_STATX_SYNC_AS_STAT);
+	if (rc)
+		goto err_out;
+
+	switch (le16_to_cpu(req_params->InformationLevel)) {
+
+	case SMB_QUERY_FILE_STANDARD_INFO:
+	{
+		struct file_standard_info *standard_info;
+		unsigned int delete_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_STANDARD_INFO\n");
+		delete_pending = ksmbd_inode_pending_delete(fp);
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_standard_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_standard_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		standard_info = (struct file_standard_info *)(ptr + 4);
+		standard_info->AllocationSize = cpu_to_le64(st.blocks << 9);
+		standard_info->EndOfFile = cpu_to_le64(st.size);
+		standard_info->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			delete_pending);
+		standard_info->DeletePending = delete_pending;
+		standard_info->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_BASIC_INFO:
+	{
+		struct file_basic_info *basic_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_BASIC_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_basic_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		basic_info = (struct file_basic_info *)(ptr + 4);
+		basic_info->CreationTime = cpu_to_le64(fp->create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		basic_info->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		basic_info->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		basic_info->ChangeTime = cpu_to_le64(time);
+		basic_info->Attributes = S_ISDIR(st.mode) ?
+			ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+		basic_info->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_EA_INFO:
+	{
+		struct file_ea_info *ea_info;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_EA_INFO\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_ea_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_ea_info) + 3);
+		rsp->Pad = 0;
+		/* lets set EA info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ea_info = (struct file_ea_info *)(ptr + 4);
+		ea_info->EaSize = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_UNIX_BASIC:
+	{
+		struct file_unix_basic_info *uinfo;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_UNIX_BASIC\n");
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_unix_basic_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount =
+			cpu_to_le16(sizeof(struct file_unix_basic_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_unix_basic_info)
+				+ 3);
+		rsp->Pad = 0;
+		/* lets set unix info info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		uinfo = (struct file_unix_basic_info *)(ptr + 4);
+		init_unix_info(uinfo, &init_user_ns, &st);
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_NAME_INFO:
+	{
+		struct file_name_info *name_info;
+		size_t len, rsp_offset;
+		int uni_filename_len;
+		char *filename;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_NAME_INFO\n");
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		name_info = (struct file_name_info *)(ptr + 4);
+
+		filename = convert_to_nt_pathname(work->tcon->share_conf,
+						  &fp->filp->f_path);
+		if (!filename) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		len = strlen(filename);
+		rsp_offset = offsetof(struct smb_com_trans2_rsp, Buffer) +
+			     offsetof(struct file_name_info, FileName) + len;
+		if (rsp_offset > work->response_sz) {
+			rc = -ENOMEM;
+			goto err_out;
+		}
+		uni_filename_len = smbConvertToUTF16(
+				(__le16 *)name_info->FileName,
+				filename, len, conn->local_nls, 0);
+		kfree(filename);
+		uni_filename_len *= 2;
+		name_info->FileNameLength = cpu_to_le32(uni_filename_len);
+
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(uni_filename_len + 4);
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount = cpu_to_le16(2 + uni_filename_len + 4 + 3);
+		rsp->Pad = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	case SMB_QUERY_FILE_ALL_INFO:
+	{
+		struct file_all_info *ainfo;
+		unsigned int delete_pending;
+
+		ksmbd_debug(SMB, "SMB_QUERY_FILE_ALL_INFO\n");
+		delete_pending = ksmbd_inode_pending_delete(fp);
+		rsp_hdr->WordCount = 10;
+		rsp->t2.TotalParameterCount = cpu_to_le16(2);
+		rsp->t2.TotalDataCount =
+			cpu_to_le16(sizeof(struct file_all_info));
+		rsp->t2.Reserved = 0;
+		rsp->t2.ParameterCount = cpu_to_le16(2);
+		rsp->t2.ParameterOffset = cpu_to_le16(56);
+		rsp->t2.ParameterDisplacement = 0;
+		rsp->t2.DataCount = cpu_to_le16(sizeof(struct file_all_info));
+		rsp->t2.DataOffset = cpu_to_le16(60);
+		rsp->t2.DataDisplacement = 0;
+		rsp->t2.SetupCount = 0;
+		rsp->t2.Reserved1 = 0;
+		/*2 for parameter count & 3 pad (1pad1 + 2 pad2)*/
+		rsp->ByteCount =
+			cpu_to_le16(2 + sizeof(struct file_all_info) + 3);
+		rsp->Pad = 0;
+		/* lets set all info info */
+		ptr = (char *)&rsp->Pad + 1;
+		memset(ptr, 0, 4);
+		ainfo = (struct file_all_info *)(ptr + 4);
+		ainfo->CreationTime = cpu_to_le64(fp->create_time);
+		time = ksmbd_UnixTimeToNT(st.atime);
+		ainfo->LastAccessTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.mtime);
+		ainfo->LastWriteTime = cpu_to_le64(time);
+		time = ksmbd_UnixTimeToNT(st.ctime);
+		ainfo->ChangeTime = cpu_to_le64(time);
+		ainfo->Attributes = cpu_to_le32(S_ISDIR(st.mode) ?
+				ATTR_DIRECTORY : ATTR_ARCHIVE);
+		ainfo->Pad1 = 0;
+		ainfo->AllocationSize = cpu_to_le64(st.blocks << 9);
+		ainfo->EndOfFile = cpu_to_le64(st.size);
+		ainfo->NumberOfLinks = cpu_to_le32(get_nlink(&st) -
+			delete_pending);
+		ainfo->DeletePending = delete_pending;
+		ainfo->Directory = S_ISDIR(st.mode) ? 1 : 0;
+		ainfo->Pad2 = 0;
+		ainfo->EASize = 0;
+		ainfo->FileNameLength = 0;
+		inc_resp_size(work, 10 * 2 + le16_to_cpu(rsp->ByteCount));
+		break;
+	}
+	default:
+		pr_err("query path info not implemnted for %x\n",
+		       le16_to_cpu(req_params->InformationLevel));
+		rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+		rc = -EINVAL;
+		goto err_out;
+
+	}
+
+err_out:
+	ksmbd_fd_put(work, fp);
+	return rc;
+}
+
+/**
+ * smb_set_unix_fileinfo() - set smb unix file info(setattr)
+ * @work:	smb work containing unix basic info buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_unix_fileinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req = work->request_buf;
+	struct smb_com_trans2_sfi_rsp *rsp = work->response_buf;
+	struct file_unix_basic_info *unix_info;
+	struct ksmbd_file *fp;
+	struct iattr attrs;
+	int err = 0;
+	unsigned int maxlen, offset;
+
+	if (ksmbd_override_fsids(work))
+		return -ENOMEM;
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		ksmbd_revert_fsids(work);
+		return -ENOENT;
+	}
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		ksmbd_revert_fsids(work);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	unix_info =  (struct file_unix_basic_info *) ((char *)req + offset);
+
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+	err = unix_info_to_attr(unix_info, &init_user_ns, &attrs);
+	ksmbd_fd_put(work, fp);
+	ksmbd_revert_fsids(work);
+	if (err)
+		goto out;
+
+	err = ksmbd_vfs_setattr(work, NULL, (u64)req->Fid, &attrs);
+	if (err)
+		goto out;
+
+	/* setattr success, prepare response */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work,
+		      rsp->hdr.WordCount * 2 + le16_to_cpu(rsp->ByteCount));
+
+out:
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+	return 0;
+}
+
+/**
+ * smb_set_disposition() - set file dispostion method using trans2
+ *		using set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_disposition(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req = work->request_buf;
+	struct smb_com_trans2_sfi_rsp *rsp = work->response_buf;
+	char *disp_info;
+	struct ksmbd_file *fp;
+	int ret = 0;
+
+	disp_info =  (char *) (((char *) &req->hdr.Protocol)
+			+ le16_to_cpu(req->DataOffset));
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		ksmbd_debug(SMB, "Invalid id for close: %d\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	if (*disp_info) {
+		if (!fp->is_nt_open) {
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			ret = -EPERM;
+			goto err_out;
+		}
+
+		if (!(file_inode(fp->filp)->i_mode & 0222)) {
+			rsp->hdr.Status.CifsError = STATUS_CANNOT_DELETE;
+			ret = -EPERM;
+			goto err_out;
+		}
+
+		if (S_ISDIR(file_inode(fp->filp)->i_mode) &&
+		    ksmbd_vfs_empty_dir(fp) == -ENOTEMPTY) {
+			rsp->hdr.Status.CifsError = STATUS_DIRECTORY_NOT_EMPTY;
+			ret = -ENOTEMPTY;
+			goto err_out;
+		}
+
+		ksmbd_set_inode_pending_delete(fp);
+	} else {
+		ksmbd_clear_inode_pending_delete(fp);
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+err_out:
+	ksmbd_fd_put(work, fp);
+	return ret;
+}
+
+/**
+ * smb_set_time_fileinfo() - set file time method using trans2
+ *		using set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_set_time_fileinfo(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct file_basic_info *info;
+	struct iattr attrs;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	info = (struct file_basic_info *)(((char *) &req->hdr.Protocol) +
+			le16_to_cpu(req->DataOffset));
+
+	attrs.ia_valid = 0;
+	if (le64_to_cpu(info->LastAccessTime)) {
+		attrs.ia_atime = smb_NTtimeToUnix(info->LastAccessTime);
+		attrs.ia_valid |= (ATTR_ATIME | ATTR_ATIME_SET);
+	}
+
+	if (le64_to_cpu(info->ChangeTime)) {
+		attrs.ia_ctime = smb_NTtimeToUnix(info->ChangeTime);
+		attrs.ia_valid |= ATTR_CTIME;
+	}
+
+	if (le64_to_cpu(info->LastWriteTime)) {
+		attrs.ia_mtime = smb_NTtimeToUnix(info->LastWriteTime);
+		attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+	}
+	/* TODO: check dos mode and acl bits if req->Attributes nonzero */
+
+	if (!attrs.ia_valid)
+		goto done;
+
+	err = ksmbd_vfs_setattr(work, NULL, (u64)req->Fid, &attrs);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+done:
+	ksmbd_debug(SMB, "fid %u, setattr done\n", req->Fid);
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+	return 0;
+}
+
+/**
+ * smb_fileinfo_rename() - rename method using trans2 set file info command
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int smb_fileinfo_rename(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct set_file_rename *info;
+	struct ksmbd_file *fp;
+	unsigned int maxlen, offset;
+	char *newname;
+	int rc = 0, flags;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->DataOffset) + 4;
+	if (offset > maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+	info = (struct set_file_rename *)(work->request_buf + offset);
+
+	fp = ksmbd_lookup_fd_fast(work, req->Fid);
+	if (!fp) {
+		pr_err("failed to get filp for fid %u\n", req->Fid);
+		rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+		return -ENOENT;
+	}
+
+	flags = info->overwrite ? 0 : RENAME_NOREPLACE;
+
+	offset += sizeof(struct set_file_rename);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, info->target_name, maxlen - offset,
+			       work, 0);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		rc = PTR_ERR(newname);
+		newname = NULL;
+		goto out;
+	}
+
+	ksmbd_debug(SMB, "new name(%s)\n", newname);
+	rc = smb_common_rename(work, fp, newname, flags);
+	if (rc) {
+		switch (rc) {
+		case -EEXIST:
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_COLLISION;
+			break;
+		case -ENOENT:
+			rsp->hdr.Status.CifsError =
+				NT_STATUS_OBJECT_NAME_NOT_FOUND;
+			break;
+		case -ENOMEM:
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -EACCES:
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		default:
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			break;
+		}
+		goto out;
+	}
+
+	rsp->hdr.WordCount = 10;
+	rsp->t2.TotalParameterCount = cpu_to_le16(2);
+	rsp->t2.TotalDataCount = 0;
+	rsp->t2.Reserved = 0;
+	rsp->t2.ParameterCount = rsp->t2.TotalParameterCount;
+	rsp->t2.ParameterOffset = cpu_to_le16(56);
+	rsp->t2.ParameterDisplacement = 0;
+	rsp->t2.DataCount = rsp->t2.TotalDataCount;
+	rsp->t2.DataOffset = 0;
+	rsp->t2.DataDisplacement = 0;
+	rsp->t2.SetupCount = 0;
+	rsp->t2.Reserved1 = 0;
+
+	/* 3 pad (1 pad1 + 2 pad2)*/
+	rsp->ByteCount = cpu_to_le16(3);
+	rsp->Reserved2 = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2 + 3);
+
+out:
+	ksmbd_fd_put(work, fp);
+	kfree(newname);
+	return rc;
+}
+
+/**
+ * set_file_info() - trans2 set file info command dispatcher
+ * @work:	smb work containing set file info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+static int set_file_info(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_sfi_req *req;
+	struct smb_com_trans2_sfi_rsp *rsp;
+	__u16 info_level, total_param;
+	int err = 0;
+
+	req = (struct smb_com_trans2_sfi_req *)work->request_buf;
+	rsp = (struct smb_com_trans2_sfi_rsp *)work->response_buf;
+	info_level = le16_to_cpu(req->InformationLevel);
+	total_param = le16_to_cpu(req->TotalParameterCount);
+	if (total_param < 4) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		pr_err("invalid total parameter for info_level 0x%x\n",
+		       total_param);
+		return -EINVAL;
+	}
+
+	switch (info_level) {
+	case SMB_SET_FILE_EA:
+		err = smb_set_ea(work);
+		break;
+	case SMB_SET_FILE_ALLOCATION_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_ALLOCATION_INFO:
+		err = smb_set_alloc_size(work);
+		break;
+	case SMB_SET_FILE_END_OF_FILE_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_END_OF_FILE_INFO:
+		err = smb_set_file_size_finfo(work);
+		break;
+	case SMB_SET_FILE_UNIX_BASIC:
+		err = smb_set_unix_fileinfo(work);
+		break;
+	case SMB_SET_FILE_DISPOSITION_INFO:
+	case SMB_SET_FILE_DISPOSITION_INFORMATION:
+		err = smb_set_disposition(work);
+		break;
+	case SMB_SET_FILE_BASIC_INFO2:
+		/* fall through */
+	case SMB_SET_FILE_BASIC_INFO:
+		err = smb_set_time_fileinfo(work);
+		break;
+	case SMB_SET_FILE_RENAME_INFORMATION:
+		err = smb_fileinfo_rename(work);
+		break;
+	default:
+		ksmbd_debug(SMB, "info level = %x not implemented yet\n",
+			    info_level);
+		rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+		return -EOPNOTSUPP;
+	}
+
+	if (err < 0)
+		ksmbd_debug(SMB, "info_level 0x%x failed, err %d\n", info_level,
+			    err);
+	return err;
+}
+
+/*
+ * helper to create a directory and set DOS attrs
+ */
+static int smb_common_mkdir(struct ksmbd_work *work, char *name, mode_t mode)
+{
+	struct smb_hdr *rsp = work->response_buf;
+	int err;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		rsp->Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_mkdir(work, name, mode);
+	if (err) {
+		if (err == -EEXIST) {
+			if (!(rsp->Flags2 & SMBFLG2_ERR_STATUS)) {
+				rsp->Status.DosError.ErrorClass = ERRDOS;
+				rsp->Status.DosError.Error =
+					cpu_to_le16(ERRnoaccess);
+			} else
+				rsp->Status.CifsError =
+					STATUS_OBJECT_NAME_COLLISION;
+		} else
+			rsp->Status.CifsError = STATUS_DATA_ERROR;
+		goto out;
+	}
+
+	if (test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+		__u64 ctime;
+		struct path path, parent_path;
+		struct xattr_dos_attrib da = {0};
+
+		err = ksmbd_vfs_kern_path_locked(work, name, 0,
+						 &parent_path, &path, 1);
+		if (!err) {
+			ctime = ksmbd_UnixTimeToNT(current_time(d_inode(path.dentry)));
+
+			da.version = 4;
+			da.attr = ATTR_DIRECTORY;
+			da.itime = da.create_time = ctime;
+			da.flags = XATTR_DOSINFO_ATTRIB |
+				   XATTR_DOSINFO_CREATE_TIME |
+				   XATTR_DOSINFO_ITIME;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			inode_unlock(d_inode(parent_path.dentry));
+			path_put(&path);
+			path_put(&parent_path);
+		}
+		err = 0;
+	}
+
+	rsp->Status.CifsError = STATUS_SUCCESS;
+	rsp->WordCount = 0;
+
+out:
+	ksmbd_revert_fsids(work);
+
+	return err;
+}
+
+/**
+ * create_dir() - trans2 create directory dispatcher
+ * @work:   smb work containing set file info command buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+static int create_dir(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_com_trans2_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	mode_t mode = S_IALLUGO;
+	char *name;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = le16_to_cpu(req->ParameterOffset) + 4;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = smb_common_mkdir(work, name, mode);
+
+	rsp->ByteCount = 0;
+	kfree(name);
+
+	return err;
+}
+
+/**
+ * smb_trans2() - handler for trans2 commands
+ * @work:	smb work containing trans2 command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_trans2(struct ksmbd_work *work)
+{
+	struct smb_com_trans2_req *req = work->request_buf;
+	struct smb_hdr *rsp_hdr = work->response_buf;
+	int err = 0;
+	u16 sub_command = le16_to_cpu(req->SubCommand);
+
+	/* at least one setup word for TRANS2 command
+	 *		MS-CIFS, SMB COM TRANSACTION
+	 */
+	if (req->SetupCount < 1) {
+		pr_err("Wrong setup count in SMB_TRANS2 - indicates wrong request\n");
+		rsp_hdr->Status.CifsError = STATUS_UNSUCCESSFUL;
+		return -EINVAL;
+	}
+
+	ksmbd_debug(SMB, "processing trans2 subcommand: %s\n",
+		    smb_trans2_cmd_to_str(sub_command));
+	switch (sub_command) {
+	case TRANS2_FIND_FIRST:
+		err = find_first(work);
+		break;
+	case TRANS2_FIND_NEXT:
+		err = find_next(work);
+		break;
+	case TRANS2_QUERY_FS_INFORMATION:
+		err = query_fs_info(work);
+		break;
+	case TRANS2_QUERY_PATH_INFORMATION:
+		err = query_path_info(work);
+		break;
+	case TRANS2_SET_PATH_INFORMATION:
+		err = set_path_info(work);
+		break;
+	case TRANS2_SET_FS_INFORMATION:
+		err = set_fs_info(work);
+		break;
+	case TRANS2_QUERY_FILE_INFORMATION:
+		err = query_file_info(work);
+		break;
+	case TRANS2_SET_FILE_INFORMATION:
+		err = set_file_info(work);
+		break;
+	case TRANS2_CREATE_DIRECTORY:
+		err = create_dir(work);
+		break;
+	case TRANS2_GET_DFS_REFERRAL:
+	default:
+		ksmbd_debug(SMB, "sub command 0x%x not implemented yet\n",
+			    sub_command);
+		err = -EINVAL;
+	}
+
+	if (err) {
+		switch (err) {
+		case -EINVAL:
+			rsp_hdr->Status.CifsError = STATUS_NOT_SUPPORTED;
+			break;
+		case -ENOMEM:
+			rsp_hdr->Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -ENOENT:
+			rsp_hdr->Status.CifsError = STATUS_NO_SUCH_FILE;
+			break;
+		case -EBUSY:
+			rsp_hdr->Status.CifsError = STATUS_DELETE_PENDING;
+			break;
+		case -EACCES:
+		case -EXDEV:
+			rsp_hdr->Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		case -EBADF:
+			rsp_hdr->Status.CifsError = STATUS_FILE_CLOSED;
+			break;
+		case -EFAULT:
+			rsp_hdr->Status.CifsError = STATUS_INVALID_LEVEL;
+			break;
+		case -EOPNOTSUPP:
+			rsp_hdr->Status.CifsError = STATUS_NOT_IMPLEMENTED;
+			break;
+		case -EIO:
+			rsp_hdr->Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			break;
+		}
+
+		ksmbd_debug(SMB, "%s failed with error %d\n", __func__, err);
+	}
+
+	return err;
+}
+
+/**
+ * smb_mkdir() - handler for smb mkdir
+ * @work:	smb work containing creat directory command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_mkdir(struct ksmbd_work *work)
+{
+	struct smb_com_create_directory_req *req = work->request_buf;
+	struct smb_com_create_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	mode_t mode = S_IALLUGO;
+	char *name;
+	int err;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_create_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = smb_common_mkdir(work, name, mode);
+
+	rsp->ByteCount = 0;
+	kfree(name);
+
+	return err;
+}
+
+/**
+ * smb_checkdir() - handler to verify whether a specified
+ * path resolves to a valid directory or not
+ *
+ * @work:   smb work containing creat directory command buffer
+ *
+ * Return:      0 on success, otherwise error
+ */
+int smb_checkdir(struct ksmbd_work *work)
+{
+	struct smb_com_check_directory_req *req = work->request_buf;
+	struct smb_com_check_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	char *name, *last;
+	int err;
+	bool caseless_lookup = req->hdr.Flags & SMBFLG_CASELESS;
+	unsigned int maxlen, offset;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_check_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset, work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, caseless_lookup);
+	if (err) {
+		if (err == -ENOENT) {
+			/*
+			 * If the parent directory is valid but not the
+			 * last component - then returns
+			 * STATUS_OBJECT_NAME_NOT_FOUND
+			 * for that case and STATUS_OBJECT_PATH_NOT_FOUND
+			 * if the path is invalid.
+			 */
+			last = strrchr(name, '/');
+			if (last && last[1] != '\0') {
+				*last = '\0';
+				last++;
+
+				err = ksmbd_vfs_kern_path_locked(work, name,
+						LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+						&parent_path, &path,
+						caseless_lookup);
+			} else {
+				ksmbd_debug(SMB, "can't lookup parent %s\n",
+					name);
+				err = -ENOENT;
+			}
+		}
+		if (err) {
+			ksmbd_debug(SMB, "look up failed err %d\n", err);
+			switch (err) {
+			case -ENOENT:
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_NAME_NOT_FOUND;
+				break;
+			case -ENOMEM:
+				rsp->hdr.Status.CifsError =
+					STATUS_INSUFFICIENT_RESOURCES;
+				break;
+			case -EACCES:
+				rsp->hdr.Status.CifsError =
+					STATUS_ACCESS_DENIED;
+				break;
+			case -EIO:
+				rsp->hdr.Status.CifsError = STATUS_DATA_ERROR;
+				break;
+			default:
+				rsp->hdr.Status.CifsError =
+					STATUS_OBJECT_PATH_SYNTAX_BAD;
+				break;
+			}
+			kfree(name);
+			return err;
+		}
+	}
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	if (!S_ISDIR(stat.mode)) {
+		rsp->hdr.Status.CifsError = STATUS_NOT_A_DIRECTORY;
+	} else {
+		/* checkdir success, return response to server */
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+out:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_process_exit() - handler for smb process exit
+ * @work:	smb work containing process exit command buffer
+ *
+ * Return:	0 on success always
+ * This command is obsolete now. Starting with the LAN Manager 1.0 dialect,
+ * FIDs are no longer associated with PIDs.CIFS clients SHOULD NOT send
+ * SMB_COM_PROCESS_EXIT requests. Instead, CIFS clients SHOULD perform all
+ * process cleanup operations, sending individual file close operations
+ * as needed.Here it is implemented very minimally for sake
+ * of passing smbtorture testcases.
+ */
+int smb_process_exit(struct ksmbd_work *work)
+{
+	struct smb_com_process_exit_rsp *rsp = work->response_buf;
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+	return 0;
+}
+
+/**
+ * smb_rmdir() - handler for smb rmdir
+ * @work:	smb work containing delete directory command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_rmdir(struct ksmbd_work *work)
+{
+	struct smb_com_delete_directory_req *req = work->request_buf;
+	struct smb_com_delete_directory_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	int err;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_delete_directory_req, DirName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->DirName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (err < 0)
+		goto out;
+
+	err = ksmbd_vfs_remove_file(work, &path);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+	if (err) {
+		if (err == -ENOTEMPTY)
+			rsp->hdr.Status.CifsError = STATUS_DIRECTORY_NOT_EMPTY;
+		else if (err == -ENOENT)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+		else
+			rsp->hdr.Status.CifsError = STATUS_DATA_ERROR;
+	} else {
+		/* rmdir success, return response to server */
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+out:
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_unlink() - handler for smb delete file
+ * @work:	smb work containing delete file command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_unlink(struct ksmbd_work *work)
+{
+	struct smb_com_delete_file_req *req = work->request_buf;
+	struct smb_com_delete_file_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	unsigned int maxlen, offset;
+	char *name;
+	int err;
+	struct ksmbd_file *fp;
+
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_delete_file_req, fileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->fileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	fp = ksmbd_lookup_fd_filename(work, name);
+	if (fp)
+		err = -ESHARE;
+	else {
+		struct path path, parent_path;
+
+		err = ksmbd_vfs_kern_path_locked(work, name,
+						 LOOKUP_NO_SYMLINKS,
+						 &parent_path, &path, 0);
+		if (!err) {
+			err = ksmbd_vfs_remove_file(work, &path);
+			inode_unlock(d_inode(parent_path.dentry));
+			path_put(&path);
+			path_put(&parent_path);
+		}
+	}
+
+	if (err) {
+		if (err == -EISDIR)
+			rsp->hdr.Status.CifsError = STATUS_FILE_IS_A_DIRECTORY;
+		else if (err == -ESHARE)
+			rsp->hdr.Status.CifsError = STATUS_SHARING_VIOLATION;
+		else if (err == -EACCES || err == -EXDEV)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+	} else {
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+		rsp->hdr.WordCount = 0;
+		rsp->ByteCount = 0;
+	}
+
+	ksmbd_fd_put(work, fp);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_nt_cancel() - handler for smb cancel command
+ * @work:	smb work containing cancel command buffer
+ *
+ * Return:	0
+ */
+int smb_nt_cancel(struct ksmbd_work *work)
+{
+	struct ksmbd_conn *conn = work->conn;
+	struct smb_hdr *hdr = (struct smb_hdr *)work->request_buf;
+	struct smb_hdr *work_hdr;
+	struct ksmbd_work *new_work;
+
+	ksmbd_debug(SMB, "smb cancel called on mid %u\n", hdr->Mid);
+
+	spin_lock(&conn->request_lock);
+	list_for_each_entry(new_work, &conn->requests, request_entry) {
+		work_hdr = (struct smb_hdr *)new_work->request_buf;
+		if (work_hdr->Mid == hdr->Mid) {
+			ksmbd_debug(SMB, "smb with mid %u cancelled command = 0x%x\n",
+			       hdr->Mid, work_hdr->Command);
+			new_work->send_no_response = 1;
+			list_del_init(&new_work->request_entry);
+			new_work->sess->sequence_number--;
+			break;
+		}
+	}
+	spin_unlock(&conn->request_lock);
+
+	/* For SMB_COM_NT_CANCEL command itself send no response */
+	work->send_no_response = 1;
+	return 0;
+}
+
+/**
+ * smb_nt_rename() - handler for smb rename command
+ * @work:	smb work containing nt rename command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_nt_rename(struct ksmbd_work *work)
+{
+	struct smb_com_nt_rename_req *req = work->request_buf;
+	struct smb_com_rename_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	char *oldname, *newname;
+	int oldname_len, err;
+	unsigned int maxlen, offset;
+
+	if (!test_tree_conn_flag(work->tcon, KSMBD_TREE_CONN_FLAG_WRITABLE)) {
+		ksmbd_debug(SMB,
+			"returning as user does not have permission to write\n");
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		return -EACCES;
+	}
+
+	if (le16_to_cpu(req->Flags) != CREATE_HARD_LINK) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_rename_req, OldFileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	oldname = smb_get_name(share, req->OldFileName, maxlen - offset,
+			       work, false);
+	if (IS_ERR(oldname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(oldname);
+	}
+
+	if (is_smbreq_unicode(&req->hdr))
+		oldname_len = smb1_utf16_name_length((__le16 *)req->OldFileName,
+						     maxlen - offset);
+	else {
+		oldname_len = strlen(oldname);
+		oldname_len++;
+	}
+
+	/* 2 bytes for BufferFormat field and padding byte */
+	offset += oldname_len + 2;
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		kfree(oldname);
+		return -EINVAL;
+	}
+
+	newname = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			       work, false);
+	if (IS_ERR(newname)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		kfree(oldname);
+		return PTR_ERR(newname);
+	}
+	ksmbd_debug(SMB, "oldname %s, newname %s, oldname_len %d, unicode %d\n",
+		    oldname, newname, oldname_len,
+		    is_smbreq_unicode(&req->hdr));
+
+	err = ksmbd_vfs_link(work, oldname, newname);
+	if (err == -EACCES)
+		rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+	else if (err < 0)
+		rsp->hdr.Status.CifsError = STATUS_NOT_SAME_DEVICE;
+
+	kfree(newname);
+	kfree(oldname);
+	return err;
+}
+
+static __le32 smb_query_info_pipe(struct ksmbd_share_config *share,
+				  struct kstat *st)
+{
+	st->mode = S_IFDIR;
+	return 0;
+}
+
+static __le32 smb_query_info_path(struct ksmbd_work *work, struct kstat *st)
+{
+	struct smb_com_query_information_req *req = work->request_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	unsigned int maxlen, offset;
+	char *name;
+	__le32 err = 0;
+	int ret;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_query_information_req, FileName);
+	if (offset >= maxlen) {
+		return STATUS_INVALID_PARAMETER;
+	}
+
+	name = smb_get_name(share, req->FileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name))
+		return STATUS_OBJECT_NAME_INVALID;
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		return STATUS_NO_MEMORY;
+	}
+
+	ret = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path, 0);
+	if (ret) {
+		pr_err("look up failed err %d\n", ret);
+
+		if (d_is_symlink(path.dentry)) {
+			err = STATUS_ACCESS_DENIED;
+			goto out;
+		}
+		err = STATUS_OBJECT_NAME_NOT_FOUND;
+		goto out;
+	}
+
+	err = vfs_getattr(&path, st, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	kfree(name);
+	return err;
+}
+
+/**
+ * smb_query_info() - handler for query information command
+ * @work:	smb work containing query info command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_query_info(struct ksmbd_work *work)
+{
+	struct smb_com_query_information_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct kstat st = {0,};
+	__u16 attr = 0;
+	int i;
+	__le32 err;
+
+	if (!test_share_config_flag(work->tcon->share_conf,
+				    KSMBD_SHARE_FLAG_PIPE))
+		err = smb_query_info_path(work, &st);
+	else
+		err = smb_query_info_pipe(share, &st);
+
+	if (le32_to_cpu(err) != 0) {
+		rsp->hdr.Status.CifsError = err;
+		return -EINVAL;
+	}
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 10;
+
+	if (st.mode & S_ISVTX)
+		attr |= (ATTR_HIDDEN | ATTR_SYSTEM);
+	if (!(st.mode & 0222))
+		attr |= ATTR_READONLY;
+	if (S_ISDIR(st.mode))
+		attr |= ATTR_DIRECTORY;
+
+	rsp->attr = cpu_to_le16(attr);
+	rsp->last_write_time = cpu_to_le32(st.mtime.tv_sec);
+	rsp->size = cpu_to_le32((u32)st.size);
+	for (i = 0; i < 5; i++)
+		rsp->reserved[i] = 0;
+
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+	return 0;
+}
+
+/**
+ * smb_closedir() - handler closing dir handle, opened for readdir
+ * @work:	smb work containing find close command buffer
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_closedir(struct ksmbd_work *work)
+{
+	struct smb_com_findclose_req *req = work->request_buf;
+	struct smb_com_close_rsp *rsp = work->response_buf;
+	int err;
+
+	ksmbd_debug(SMB, "SMB_COM_FIND_CLOSE2 called for fid %u\n",
+		    req->FileID);
+
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+	err = ksmbd_close_fd(work, req->FileID);
+	if (!err)
+		rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	else
+		rsp->hdr.Status.CifsError = STATUS_INVALID_HANDLE;
+	return err;
+}
+
+/**
+ * convert_open_flags() - convert smb open flags to file open flags
+ * @file_present:	is file already present
+ * @mode:		smp file open mode
+ * @disposition:	smp file disposition information
+ *
+ * Return:	converted file open flags
+ */
+static int convert_open_flags(bool file_present,
+			      __u16 mode, __u16 dispostion,
+			      int *may_flags)
+{
+	int oflags = 0;
+
+	switch (mode & 0x0007) {
+	case SMBOPEN_READ:
+		oflags |= O_RDONLY;
+		break;
+	case SMBOPEN_WRITE:
+		oflags |= O_WRONLY;
+		break;
+	case SMBOPEN_READWRITE:
+		oflags |= O_RDWR;
+		break;
+	default:
+		oflags |= O_RDONLY;
+		break;
+	}
+
+	if (mode & SMBOPEN_WRITE_THROUGH)
+		oflags |= O_SYNC;
+
+	if (file_present) {
+		switch (dispostion & 0x0003) {
+		case SMBOPEN_DISPOSITION_NONE:
+			return -EEXIST;
+		case SMBOPEN_OAPPEND:
+			oflags |= O_APPEND;
+			break;
+		case SMBOPEN_OTRUNC:
+			oflags |= O_TRUNC;
+			break;
+		default:
+			break;
+		}
+	} else {
+		switch (dispostion & 0x0010) {
+		case SMBOPEN_DISPOSITION_NONE:
+			return -EINVAL;
+		case SMBOPEN_OCREATE:
+			oflags |= O_CREAT;
+			break;
+		default:
+			break;
+		}
+	}
+
+	*may_flags = ksmbd_openflags_to_mayflags(oflags);
+
+	return oflags;
+}
+
+/**
+ * smb_open_andx() - smb andx open method handler
+ * @work:	smb work containing buffer for andx open command buffer
+ *
+ * Return:	error if there is error while processing current command,
+ *		otherwise pointer to next andx command in the chain
+ */
+int smb_open_andx(struct ksmbd_work *work)
+{
+	struct smb_com_openx_req *req = work->request_buf;
+	struct smb_com_openx_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	int oplock_flags, file_info, open_flags, may_flags;
+	char *name;
+	bool file_present = true;
+	umode_t mode = 0;
+	int err;
+	struct ksmbd_file *fp = NULL;
+	int oplock_rsp = OPLOCK_NONE, share_ret;
+	unsigned int maxlen, offset;
+
+	rsp->hdr.Status.CifsError = STATUS_UNSUCCESSFUL;
+
+	/* check for sharing mode flag */
+	if ((le16_to_cpu(req->Mode) & SMBOPEN_SHARING_MODE) >
+			SMBOPEN_DENY_NONE) {
+		rsp->hdr.Status.DosError.ErrorClass = ERRDOS;
+		rsp->hdr.Status.DosError.Error = cpu_to_le16(ERRbadaccess);
+		rsp->hdr.Flags2 &= ~SMBFLG2_ERR_STATUS;
+
+		memset(&rsp->hdr.WordCount, 0, 3);
+		return -EINVAL;
+	}
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_openx_req, fileName);
+
+	if (is_smbreq_unicode(&req->hdr))
+		offset++;
+
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, (char *)req + offset, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		kfree(name);
+		rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+		return -ENOMEM;
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 req->hdr.Flags & SMBFLG_CASELESS);
+	if (err) {
+		if (err == -EACCES || err == -EXDEV)
+			goto out;
+		file_present = false;
+	} else {
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err)
+			goto free_path;
+	}
+
+	oplock_flags =
+		le16_to_cpu(req->OpenFlags) & (REQ_OPLOCK | REQ_BATCHOPLOCK);
+
+	open_flags = convert_open_flags(file_present,
+					le16_to_cpu(req->Mode),
+					le16_to_cpu(req->OpenFunction),
+					&may_flags);
+	if (open_flags < 0) {
+		ksmbd_debug(SMB, "create_dispostion returned %d\n", open_flags);
+		if (file_present)
+			goto free_path;
+		else {
+			err = -ENOENT;
+			goto out;
+		}
+	}
+
+	if (file_present && !(stat.mode & 0222)) {
+		if ((open_flags & O_ACCMODE) == O_WRONLY ||
+		    (open_flags & O_ACCMODE) == O_RDWR) {
+			ksmbd_debug(SMB, "readonly file(%s)\n", name);
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			memset(&rsp->hdr.WordCount, 0, 3);
+			goto free_path;
+		}
+	}
+
+	if (!file_present && (open_flags & O_CREAT)) {
+		mode |= 0777;
+		if (le16_to_cpu(req->FileAttributes) & ATTR_READONLY)
+			mode &= ~0222;
+
+		mode |= S_IFREG;
+		err = smb_common_create(work, &parent_path, &path, name,
+					open_flags, mode, false);
+		if (err) {
+			ksmbd_debug(SMB, "smb_common_create err: %d\n", err);
+			goto out;
+		}
+
+		err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+				  AT_STATX_SYNC_AS_STAT);
+		if (err)
+			goto free_path;
+	} else if (file_present) {
+		err = inode_permission(mnt_idmap(path.mnt),
+				       d_inode(path.dentry),
+				       may_flags);
+		if (err)
+			goto free_path;
+	}
+
+	err = ksmbd_query_inode_status(path.dentry->d_parent);
+	if (err == KSMBD_INODE_STATUS_PENDING_DELETE) {
+		err = -EBUSY;
+		goto free_path;
+	}
+
+	err = 0;
+	ksmbd_debug(SMB, "(%s) open_flags = 0x%x, oplock_flags 0x%x\n", name,
+		    open_flags, oplock_flags);
+	/* open  file and get FID */
+	fp = ksmbd_vfs_dentry_open(work, &path, open_flags, 0, file_present);
+	if (IS_ERR(fp)) {
+		err = PTR_ERR(fp);
+		fp = NULL;
+		goto free_path;
+	}
+	fp->pid = le16_to_cpu(req->hdr.Pid);
+
+	down_write(&fp->f_ci->m_lock);
+	list_add(&fp->node, &fp->f_ci->m_fp_list);
+	up_write(&fp->f_ci->m_lock);
+
+	share_ret = ksmbd_smb_check_shared_mode(fp->filp, fp);
+	if (smb1_oplock_enable &&
+	    test_share_config_flag(work->tcon->share_conf,
+				   KSMBD_SHARE_FLAG_OPLOCKS) &&
+	    !S_ISDIR(file_inode(fp->filp)->i_mode) && oplock_flags) {
+		/* Client cannot request levelII oplock directly */
+		err = smb_grant_oplock(work, oplock_flags, fp->volatile_id, fp,
+				       le16_to_cpu(req->hdr.Tid), NULL, 0);
+		if (err)
+			goto free_path;
+	} else {
+		if (ksmbd_inode_pending_delete(fp)) {
+			err = -EBUSY;
+			goto free_path;
+		}
+
+		if (share_ret < 0) {
+			err = -EPERM;
+			goto free_path;
+		}
+	}
+
+	oplock_rsp = fp->f_opinfo != NULL ? fp->f_opinfo->level : 0;
+
+	/* open success, send back response */
+	if (file_present) {
+		if (!(open_flags & O_TRUNC))
+			file_info = F_OPENED;
+		else
+			file_info = F_OVERWRITTEN;
+	} else
+		file_info = F_CREATED;
+
+	if (oplock_rsp)
+		file_info |= SMBOPEN_LOCK_GRANTED;
+
+	if (stat.result_mask & STATX_BTIME)
+		fp->create_time = ksmbd_UnixTimeToNT(stat.btime);
+	else
+		fp->create_time = ksmbd_UnixTimeToNT(stat.ctime);
+	if (file_present) {
+		if (test_share_config_flag(work->tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da;
+
+			err = ksmbd_vfs_get_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     path.dentry, &da);
+			if (err > 0) {
+				fp->create_time = da.create_time;
+				fp->itime = da.itime;
+			}
+			err = 0;
+		}
+	} else {
+		if (test_share_config_flag(work->tcon->share_conf,
+					   KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
+			struct xattr_dos_attrib da = {0};
+
+			da.version = 4;
+			da.attr = ATTR_NORMAL;
+			da.itime = da.create_time = fp->create_time;
+			da.flags = XATTR_DOSINFO_ATTRIB |
+				   XATTR_DOSINFO_CREATE_TIME |
+				   XATTR_DOSINFO_ITIME;
+
+			err = ksmbd_vfs_set_dos_attrib_xattr(mnt_idmap(path.mnt),
+							     &path, &da, false);
+			if (err)
+				ksmbd_debug(SMB, "failed to store creation time in xattr\n");
+			err = 0;
+		}
+	}
+
+	/* prepare response buffer */
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0x0F;
+	rsp->Fid = fp->volatile_id;
+	rsp->FileAttributes = cpu_to_le16(ATTR_NORMAL);
+	rsp->LastWriteTime = cpu_to_le32(stat.mtime.tv_sec);
+	rsp->EndOfFile = cpu_to_le32(stat.size);
+	switch (open_flags & O_ACCMODE) {
+	case O_RDONLY:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ);
+		break;
+	case O_WRONLY:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_WRITE);
+		break;
+	case O_RDWR:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ_WRITE);
+		break;
+	default:
+		rsp->Access = cpu_to_le16(SMB_DA_ACCESS_READ);
+		break;
+	}
+
+	rsp->FileType = 0;
+	rsp->IPCState = 0;
+	rsp->Action = cpu_to_le16(file_info);
+	rsp->Reserved = 0;
+	rsp->ByteCount = 0;
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+free_path:
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+out:
+	ksmbd_revert_fsids(work);
+	if (err) {
+		if (err == -ENOSPC)
+			rsp->hdr.Status.CifsError = STATUS_DISK_FULL;
+		else if (err == -EMFILE)
+			rsp->hdr.Status.CifsError =
+				STATUS_TOO_MANY_OPENED_FILES;
+		else if (err == -EBUSY)
+			rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+		else if (err == -ENOENT)
+			rsp->hdr.Status.CifsError =
+				STATUS_OBJECT_NAME_NOT_FOUND;
+		else if (err == -EACCES || err == -EXDEV)
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+		else
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+		if (fp)
+			ksmbd_close_fd(work, fp->volatile_id);
+	} else
+		ksmbd_update_fstate(&work->sess->file_table, fp, FP_INITED);
+
+	kfree(name);
+	if (!rsp->hdr.WordCount)
+		return err;
+
+	/* this is an ANDx command ? */
+	rsp->AndXReserved = 0;
+	rsp->AndXOffset = cpu_to_le16(get_rfc1002_len(&rsp->hdr));
+	if (req->AndXCommand != SMB_NO_MORE_ANDX_COMMAND) {
+		/* adjust response */
+		rsp->AndXCommand = req->AndXCommand;
+		return rsp->AndXCommand; /* More processing required */
+	}
+	rsp->AndXCommand = SMB_NO_MORE_ANDX_COMMAND;
+
+	return err;
+}
+
+/**
+ * smb_setattr() - set file attributes
+ * @work:	smb work containing setattr command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_setattr(struct ksmbd_work *work)
+{
+	struct smb_com_setattr_req *req = work->request_buf;
+	struct smb_com_setattr_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct path path, parent_path;
+	struct kstat stat;
+	struct iattr attrs;
+	int err = 0;
+	char *name;
+	unsigned int maxlen, offset;
+	__u16 dos_attr;
+
+	maxlen = get_req_len(req);
+	offset = offsetof(struct smb_com_setattr_req, fileName);
+	if (offset >= maxlen) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return -EINVAL;
+	}
+
+	name = smb_get_name(share, req->fileName, maxlen - offset,
+			    work, false);
+	if (IS_ERR(name)) {
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_INVALID;
+		return PTR_ERR(name);
+	}
+
+	err = ksmbd_vfs_kern_path_locked(work, name, LOOKUP_NO_SYMLINKS,
+					 &parent_path, &path,
+					 req->hdr.Flags & SMBFLG_CASELESS);
+	if (err) {
+		ksmbd_debug(SMB, "look up failed err %d\n", err);
+		rsp->hdr.Status.CifsError = STATUS_OBJECT_NAME_NOT_FOUND;
+		err = 0;
+		goto out;
+	}
+
+	err = vfs_getattr(&path, &stat, STATX_BASIC_STATS,
+			  AT_STATX_SYNC_AS_STAT);
+	if (err)
+		goto out;
+
+	attrs.ia_valid = 0;
+	attrs.ia_mode = 0;
+
+	inode_unlock(d_inode(parent_path.dentry));
+	path_put(&path);
+	path_put(&parent_path);
+
+	dos_attr = le16_to_cpu(req->attr);
+	if (!dos_attr)
+		attrs.ia_mode = stat.mode | 0200;
+
+	if (dos_attr & ATTR_READONLY)
+		attrs.ia_mode = stat.mode & ~0222;
+
+	if (attrs.ia_mode)
+		attrs.ia_valid |= ATTR_MODE;
+
+	attrs.ia_mtime.tv_sec = le32_to_cpu(req->LastWriteTime);
+	attrs.ia_valid |= (ATTR_MTIME | ATTR_MTIME_SET);
+
+	err = ksmbd_vfs_setattr(work, name, 0, &attrs);
+	if (err)
+		goto out;
+
+	rsp->hdr.Status.CifsError = STATUS_SUCCESS;
+	rsp->hdr.WordCount = 0;
+	rsp->ByteCount = 0;
+
+out:
+	kfree(name);
+	if (err) {
+		rsp->hdr.Status.CifsError = STATUS_INVALID_PARAMETER;
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * smb_query_information_disk() - determine capacity and remaining free space
+ * @work:	smb work containing command
+ *
+ * Return:	0 on success, otherwise error
+ */
+int smb_query_information_disk(struct ksmbd_work *work)
+{
+	struct smb_hdr *req = work->request_buf;
+	struct smb_com_query_information_disk_rsp *rsp = work->response_buf;
+	struct ksmbd_share_config *share = work->tcon->share_conf;
+	struct ksmbd_tree_connect *tree_conn;
+	struct kstatfs stfs;
+	struct path path;
+	int err = 0;
+
+	u16 blocks_per_unit, bytes_per_block, total_units, free_units;
+	u64 total_blocks, free_blocks;
+	u32 block_size, unit_size;
+
+	if (req->WordCount) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	tree_conn = work->tcon;
+	if (!tree_conn) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	share = tree_conn->share_conf;
+
+	if (test_share_config_flag(share, KSMBD_SHARE_FLAG_PIPE)) {
+		err = -ENOENT;
+		goto out;
+	}
+
+	if (ksmbd_override_fsids(work)) {
+		err = -ENOMEM;
+		goto out;
+	}
+
+	err = kern_path(share->path, LOOKUP_NO_SYMLINKS, &path);
+	if (err)
+		goto out_fsids;
+
+	err = vfs_statfs(&path, &stfs);
+	if (err) {
+		pr_err("cannot do stat of path %s\n", share->path);
+		goto out_path;
+	}
+
+	unit_size = stfs.f_bsize / stfs.f_frsize;
+	block_size = stfs.f_bsize;
+	total_blocks = stfs.f_blocks;
+	free_blocks = stfs.f_bavail;
+
+	/*
+	 * clamp block size to at most 512 KB for compatibility with
+	 * older clients
+	 */
+	while (block_size > 512) {
+		block_size >>= 1;
+		unit_size <<= 1;
+	}
+
+	/* adjust blocks and sizes until they fit into a u16 */
+	while (total_blocks >= 0xFFFF) {
+		total_blocks >>= 1;
+		free_blocks >>= 1;
+		if ((unit_size <<= 1) > 0xFFFF) {
+			unit_size >>= 1;
+			total_blocks = 0xFFFF;
+			free_blocks <<= 1;
+			break;
+		}
+	}
+
+	total_units = (total_blocks >= 0xFFFF) ? 0xFFFF : (u16)total_blocks;
+	free_units = (free_blocks >= 0xFFFF) ? 0xFFFF : (u16)free_blocks;
+	bytes_per_block = (u16)block_size;
+	blocks_per_unit = (u16)unit_size;
+
+	rsp->hdr.WordCount = 5;
+
+	rsp->TotalUnits = total_units;
+	rsp->BlocksPerUnit = blocks_per_unit;
+	rsp->BlockSize = bytes_per_block;
+	rsp->FreeUnits = free_units;
+	rsp->Pad = 0;
+	rsp->ByteCount = 0;
+
+	inc_resp_size(work, rsp->hdr.WordCount * 2);
+
+out_path:
+	path_put(&path);
+out_fsids:
+	ksmbd_revert_fsids(work);
+out:
+	if (err) {
+		switch (err) {
+		case -EINVAL:
+			rsp->hdr.Status.CifsError = STATUS_NOT_SUPPORTED;
+			break;
+		case -ENOMEM:
+			rsp->hdr.Status.CifsError = STATUS_NO_MEMORY;
+			break;
+		case -ENOENT:
+			rsp->hdr.Status.CifsError = STATUS_NO_SUCH_FILE;
+			break;
+		case -EBUSY:
+			rsp->hdr.Status.CifsError = STATUS_DELETE_PENDING;
+			break;
+		case -EACCES:
+		case -EXDEV:
+			rsp->hdr.Status.CifsError = STATUS_ACCESS_DENIED;
+			break;
+		case -EBADF:
+			rsp->hdr.Status.CifsError = STATUS_FILE_CLOSED;
+			break;
+		case -EFAULT:
+			rsp->hdr.Status.CifsError = STATUS_INVALID_LEVEL;
+			break;
+		case -EOPNOTSUPP:
+			rsp->hdr.Status.CifsError = STATUS_NOT_IMPLEMENTED;
+			break;
+		case -EIO:
+			rsp->hdr.Status.CifsError = STATUS_UNEXPECTED_IO_ERROR;
+			break;
+		}
+		ksmbd_debug(SMB, "%s failed with error %d\n", __func__, err);
+	}
+
+	return err;
+}
+
+/**
+ * smb1_is_sign_req() - handler for checking packet signing status
+ * @work:	smb work containing notify command buffer
+ *
+ * Return:	true if packed is signed, false otherwise
+ */
+bool smb1_is_sign_req(struct ksmbd_work *work, unsigned int command)
+{
+#if 0
+	struct smb_hdr *rcv_hdr1 = (struct smb_hdr *)work->request_buf;
+
+	/*
+	 * FIXME: signed tree connect failed by signing error
+	 * with windows XP client. For now, Force to turn off
+	 * signing feature in SMB1.
+	 */
+	if ((rcv_hdr1->Flags2 & SMBFLG2_SECURITY_SIGNATURE) &&
+			command != SMB_COM_SESSION_SETUP_ANDX)
+		return true;
+	return false;
+#else
+	return false;
+#endif
+}
+
+/**
+ * smb1_check_sign_req() - handler for req packet sign processing
+ * @work:	smb work containing notify command buffer
+ *
+ * Return:	1 on success, 0 otherwise
+ */
+int smb1_check_sign_req(struct ksmbd_work *work)
+{
+	struct smb_hdr *rcv_hdr1 = (struct smb_hdr *)work->request_buf;
+	char signature_req[CIFS_SMB1_SIGNATURE_SIZE];
+	char signature[20];
+	struct kvec iov[1];
+
+	memcpy(signature_req, rcv_hdr1->Signature.SecuritySignature,
+	       CIFS_SMB1_SIGNATURE_SIZE);
+	rcv_hdr1->Signature.Sequence.SequenceNumber =
+		cpu_to_le32(++work->sess->sequence_number);
+	rcv_hdr1->Signature.Sequence.Reserved = 0;
+
+	iov[0].iov_base = rcv_hdr1->Protocol;
+	iov[0].iov_len = be32_to_cpu(rcv_hdr1->smb_buf_length);
+
+	if (ksmbd_sign_smb1_pdu(work->sess, iov, 1, signature))
+		return 0;
+
+	if (memcmp(signature, signature_req, CIFS_SMB1_SIGNATURE_SIZE)) {
+		ksmbd_debug(SMB, "bad smb1 sign\n");
+		return 0;
+	}
+
+	return 1;
+}
+
+/**
+ * smb1_set_sign_rsp() - handler for rsp packet sign processing
+ * @work:	smb work containing notify command buffer
+ *
+ */
+void smb1_set_sign_rsp(struct ksmbd_work *work)
+{
+	struct smb_hdr *rsp_hdr = (struct smb_hdr *)work->response_buf;
+	char signature[20];
+	struct kvec iov[2];
+	int n_vec = 1;
+
+	rsp_hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
+	rsp_hdr->Signature.Sequence.SequenceNumber =
+		cpu_to_le32(++work->sess->sequence_number);
+	rsp_hdr->Signature.Sequence.Reserved = 0;
+
+	iov[0].iov_base = rsp_hdr->Protocol;
+	iov[0].iov_len = be32_to_cpu(rsp_hdr->smb_buf_length);
+
+#if 0
+	/* XXX smb1 signing is broken iirc */
+	if (work->aux_payload_sz) {
+		iov[0].iov_len -= work->aux_payload_sz;
+
+		iov[1].iov_base = work->aux_payload_buf;
+		iov[1].iov_len = work->aux_payload_sz;
+		n_vec++;
+	}
+#endif
+
+	if (ksmbd_sign_smb1_pdu(work->sess, iov, n_vec, signature))
+		memset(rsp_hdr->Signature.SecuritySignature, 0,
+		       CIFS_SMB1_SIGNATURE_SIZE);
+	else
+		memcpy(rsp_hdr->Signature.SecuritySignature, signature,
+		       CIFS_SMB1_SIGNATURE_SIZE);
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smb1pdu.h	2025-09-25 17:40:36.783373256 +0200
@@ -0,0 +1,1653 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ *   Copyright (C) 2016 Namjae Jeon <linkinjeon@kernel.org>
+ *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
+ */
+
+#ifndef __SMB1PDU_H
+#define __SMB1PDU_H
+
+#define MAX_CIFS_HDR_SIZE 0x58
+
+#define SMB_HEADER_SIZE			32
+
+#define SMB1_CLIENT_GUID_SIZE		(16)
+#define SMB1_MAX_MPX_COUNT		10
+#define SMB1_MAX_VCS			1
+#define SMB1_MAX_RAW_SIZE		65536
+#define MAX_CIFS_LOOKUP_BUFFER_SIZE	(16*1024)
+
+/*
+ * Size of the ntlm client response
+ */
+#define CIFS_AUTH_RESP_SIZE		24
+#define CIFS_SMB1_SIGNATURE_SIZE	8
+#define CIFS_SMB1_SESSKEY_SIZE		16
+
+#define SMB1_SERVER_CAPS					\
+	(CAP_UNICODE | CAP_LARGE_FILES | CAP_EXTENDED_SECURITY |\
+	 CAP_NT_SMBS | CAP_STATUS32 | CAP_LOCK_AND_READ |	\
+	 CAP_NT_FIND | CAP_UNIX | CAP_LARGE_READ_X |		\
+	 CAP_LARGE_WRITE_X | CAP_LEVEL_II_OPLOCKS)
+
+#define SMB1_SERVER_SECU  (SECMODE_USER | SECMODE_PW_ENCRYPT)
+
+/* Service Type of TreeConnect*/
+#define SERVICE_DISK_SHARE	"A:"
+#define SERVICE_IPC_SHARE	"IPC"
+#define SERVICE_PRINTER_SHARE	"LPT1:"
+#define SERVICE_COMM		"COMM"
+
+#define NATIVE_FILE_SYSTEM	"NTFS"
+
+#define SMB_NO_MORE_ANDX_COMMAND 0xFF
+#define SMB1_PROTO_NUMBER cpu_to_le32(0x424d53ff)
+
+/* Transact2 subcommand codes */
+#define TRANS2_OPEN                   0x00
+#define TRANS2_FIND_FIRST             0x01
+#define TRANS2_FIND_NEXT              0x02
+#define TRANS2_QUERY_FS_INFORMATION   0x03
+#define TRANS2_SET_FS_INFORMATION     0x04
+#define TRANS2_QUERY_PATH_INFORMATION 0x05
+#define TRANS2_SET_PATH_INFORMATION   0x06
+#define TRANS2_QUERY_FILE_INFORMATION 0x07
+#define TRANS2_SET_FILE_INFORMATION   0x08
+#define TRANS2_CREATE_DIRECTORY       0x0d
+#define TRANS2_GET_DFS_REFERRAL       0x10
+#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
+
+/* SMB Transact (Named Pipe) subcommand codes */
+#define TRANS_SET_NMPIPE_STATE      0x0001
+#define TRANS_RAW_READ_NMPIPE       0x0011
+#define TRANS_QUERY_NMPIPE_STATE    0x0021
+#define TRANS_QUERY_NMPIPE_INFO     0x0022
+#define TRANS_PEEK_NMPIPE           0x0023
+#define TRANS_TRANSACT_NMPIPE       0x0026
+#define TRANS_RAW_WRITE_NMPIPE      0x0031
+#define TRANS_READ_NMPIPE           0x0036
+#define TRANS_WRITE_NMPIPE          0x0037
+#define TRANS_WAIT_NMPIPE           0x0053
+#define TRANS_CALL_NMPIPE           0x0054
+
+/* NT Transact subcommand codes */
+#define NT_TRANSACT_CREATE            0x01
+#define NT_TRANSACT_IOCTL             0x02
+#define NT_TRANSACT_SET_SECURITY_DESC 0x03
+#define NT_TRANSACT_NOTIFY_CHANGE     0x04
+#define NT_TRANSACT_RENAME            0x05
+#define NT_TRANSACT_QUERY_SECURITY_DESC 0x06
+#define NT_TRANSACT_GET_USER_QUOTA    0x07
+#define NT_TRANSACT_SET_USER_QUOTA    0x08
+
+/*
+ * SMB flag definitions
+ */
+#define SMBFLG_EXTD_LOCK 0x01   /* server supports lock-read write-unlock smb */
+#define SMBFLG_RCV_POSTED 0x02  /* obsolete */
+#define SMBFLG_RSVD 0x04
+#define SMBFLG_CASELESS 0x08    /*
+				 * all pathnames treated as caseless (off
+				 * implies case sensitive file handling
+				 * request)
+				 */
+#define SMBFLG_CANONICAL_PATH_FORMAT 0x10       /* obsolete */
+#define SMBFLG_OLD_OPLOCK 0x20  /* obsolete */
+#define SMBFLG_OLD_OPLOCK_NOTIFY 0x40   /* obsolete */
+#define SMBFLG_RESPONSE 0x80    /* this PDU is a response from server */
+
+/*
+ * SMB flag2 definitions
+ */
+#define SMBFLG2_KNOWS_LONG_NAMES cpu_to_le16(1) /*
+						 * can send long (non-8.3)
+						 * path names in response
+						 */
+#define SMBFLG2_KNOWS_EAS cpu_to_le16(2)
+#define SMBFLG2_SECURITY_SIGNATURE cpu_to_le16(4)
+#define SMBFLG2_COMPRESSED (8)
+#define SMBFLG2_SECURITY_SIGNATURE_REQUIRED (0x10)
+#define SMBFLG2_IS_LONG_NAME cpu_to_le16(0x40)
+#define SMBFLG2_REPARSE_PATH (0x400)
+#define SMBFLG2_EXT_SEC cpu_to_le16(0x800)
+#define SMBFLG2_DFS cpu_to_le16(0x1000)
+#define SMBFLG2_PAGING_IO cpu_to_le16(0x2000)
+#define SMBFLG2_ERR_STATUS cpu_to_le16(0x4000)
+#define SMBFLG2_UNICODE cpu_to_le16(0x8000)
+
+#define SMB_COM_CREATE_DIRECTORY      0x00 /* trivial response */
+#define SMB_COM_DELETE_DIRECTORY      0x01 /* trivial response */
+#define SMB_COM_CLOSE                 0x04 /* triv req/rsp, timestamp ignored */
+#define SMB_COM_FLUSH                 0x05 /* triv req/rsp */
+#define SMB_COM_DELETE                0x06 /* trivial response */
+#define SMB_COM_RENAME                0x07 /* trivial response */
+#define SMB_COM_QUERY_INFORMATION     0x08 /* aka getattr */
+#define SMB_COM_SETATTR               0x09 /* trivial response */
+#define SMB_COM_WRITE                 0x0b
+#define SMB_COM_CHECK_DIRECTORY       0x10 /* trivial response */
+#define SMB_COM_PROCESS_EXIT          0x11 /* trivial response */
+#define SMB_COM_LOCKING_ANDX          0x24 /* trivial response */
+#define SMB_COM_TRANSACTION	      0x25
+#define SMB_COM_COPY                  0x29 /* trivial rsp, fail filename ignrd*/
+#define SMB_COM_ECHO                  0x2B /* echo request */
+#define SMB_COM_OPEN_ANDX             0x2D /* Legacy open for old servers */
+#define SMB_COM_READ_ANDX             0x2E
+#define SMB_COM_WRITE_ANDX            0x2F
+#define SMB_COM_TRANSACTION2          0x32
+#define SMB_COM_TRANSACTION2_SECONDARY 0x33
+#define SMB_COM_FIND_CLOSE2           0x34 /* trivial response */
+#define SMB_COM_TREE_DISCONNECT       0x71 /* trivial response */
+#define SMB_COM_NEGOTIATE             0x72
+#define SMB_COM_SESSION_SETUP_ANDX    0x73
+#define SMB_COM_LOGOFF_ANDX           0x74 /* trivial response */
+#define SMB_COM_TREE_CONNECT_ANDX     0x75
+#define SMB_COM_QUERY_INFORMATION_DISK 0x80
+#define SMB_COM_NT_TRANSACT           0xA0
+#define SMB_COM_NT_TRANSACT_SECONDARY 0xA1
+#define SMB_COM_NT_CREATE_ANDX        0xA2
+#define SMB_COM_NT_CANCEL             0xA4 /* no response */
+#define SMB_COM_NT_RENAME             0xA5 /* trivial response */
+
+/* Negotiate response Capabilities */
+#define CAP_RAW_MODE           0x00000001
+#define CAP_MPX_MODE           0x00000002
+#define CAP_UNICODE            0x00000004
+#define CAP_LARGE_FILES        0x00000008
+#define CAP_NT_SMBS            0x00000010       /* implies CAP_NT_FIND */
+#define CAP_RPC_REMOTE_APIS    0x00000020
+#define CAP_STATUS32           0x00000040
+#define CAP_LEVEL_II_OPLOCKS   0x00000080
+#define CAP_LOCK_AND_READ      0x00000100
+#define CAP_NT_FIND            0x00000200
+#define CAP_DFS                0x00001000
+#define CAP_INFOLEVEL_PASSTHRU 0x00002000
+#define CAP_LARGE_READ_X       0x00004000
+#define CAP_LARGE_WRITE_X      0x00008000
+#define CAP_LWIO               0x00010000 /* support fctl_srv_req_resume_key */
+#define CAP_UNIX               0x00800000
+#define CAP_COMPRESSED_DATA    0x02000000
+#define CAP_DYNAMIC_REAUTH     0x20000000
+#define CAP_PERSISTENT_HANDLES 0x40000000
+#define CAP_EXTENDED_SECURITY  0x80000000
+
+/* RFC 1002 session packet types */
+#define RFC1002_SESSION_MESSAGE 0x00
+#define RFC1002_SESSION_REQUEST  0x81
+#define RFC1002_POSITIVE_SESSION_RESPONSE 0x82
+#define RFC1002_NEGATIVE_SESSION_RESPONSE 0x83
+#define RFC1002_RETARGET_SESSION_RESPONSE 0x84
+#define RFC1002_SESSION_KEEP_ALIVE 0x85
+
+/* Action bits */
+#define GUEST_LOGIN 1
+
+struct smb_negotiate_rsp {
+	struct smb_hdr hdr;     /* wct = 17 */
+	__le16 DialectIndex; /* 0xFFFF = no dialect acceptable */
+	__u8 SecurityMode;
+	__le16 MaxMpxCount;
+	__le16 MaxNumberVcs;
+	__le32 MaxBufferSize;
+	__le32 MaxRawSize;
+	__le32 SessionKey;
+	__le32 Capabilities;    /* see below */
+	__le32 SystemTimeLow;
+	__le32 SystemTimeHigh;
+	__le16 ServerTimeZone;
+	__u8 EncryptionKeyLength;
+	__le16 ByteCount;
+	union {
+		unsigned char EncryptionKey[8]; /* cap extended security off */
+		/* followed by Domain name - if extended security is off */
+		/* followed by 16 bytes of server GUID */
+		/* then security blob if cap_extended_security negotiated */
+		struct {
+			unsigned char GUID[SMB1_CLIENT_GUID_SIZE];
+			unsigned char SecurityBlob[1];
+		} __packed extended_response;
+	} __packed u;
+} __packed;
+
+struct smb_com_read_req {
+	struct smb_hdr hdr;     /* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__le16 MaxCount;
+	__le16 MinCount;                /* obsolete */
+	__le32 MaxCountHigh;
+	__le16 Remaining;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_read_rsp {
+	struct smb_hdr hdr;     /* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Remaining;
+	__le16 DataCompactionMode;
+	__le16 Reserved;
+	__le16 DataLength;
+	__le16 DataOffset;
+	__le16 DataLengthHigh;
+	__u64 Reserved2;
+	__le16 ByteCount;
+	/* read response data immediately follows */
+} __packed;
+
+struct smb_com_write_req {
+	struct smb_hdr hdr;	/* wct = 14 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__le32 OffsetLow;
+	__u32 Reserved;
+	__le16 WriteMode;
+	__le16 Remaining;
+	__le16 DataLengthHigh;
+	__le16 DataLengthLow;
+	__le16 DataOffset;
+	__le32 OffsetHigh;
+	__le16 ByteCount;
+	__u8 Pad;		/*
+				 * BB check for whether padded to DWORD
+				 * boundary and optimum performance here
+				 */
+	char Data[0];
+} __packed;
+
+struct smb_com_write_req_32bit {
+	struct smb_hdr hdr;	/* wct = 5 */
+	__u16 Fid;
+	__le16 Length;
+	__le32 Offset;
+	__u16 Estimate;
+	__le16 ByteCount;	/* must be greater than 2 */
+	__u8 BufferFormat;
+	__u16 DataLength;
+	char Data[0];
+} __packed;
+
+struct smb_com_write_rsp_32bit {
+	struct smb_hdr hdr;	/* wct = 1 */
+	__le16 Written;
+	__le16 ByteCount;	/* must be 0 */
+} __packed;
+
+struct smb_com_write_rsp {
+	struct smb_hdr hdr;	/* wct = 6 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Count;
+	__le16 Remaining;
+	__le16 CountHigh;
+	__u16  Reserved;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_rename_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__le16 SearchAttributes;        /* target file attributes */
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} __packed;
+
+struct smb_com_rename_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+/* SecurityMode bits */
+#define SECMODE_USER          0x01      /* off indicates share level security */
+#define SECMODE_PW_ENCRYPT    0x02
+#define SECMODE_SIGN_ENABLED  0x04      /* SMB security signatures enabled */
+#define SECMODE_SIGN_REQUIRED 0x08      /* SMB security signatures required */
+
+struct smb_com_session_setup_req {	/* request format */
+	struct smb_hdr hdr;	/* wct = 12 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 MaxBufferSize;
+	__le16 MaxMpxCount;
+	__le16 VcNumber;
+	__u32 SessionKey;
+	__le16 SecurityBlobLength;
+	__u32 Reserved;
+	__le32 Capabilities;	/* see below */
+	__le16 ByteCount;
+	unsigned char SecurityBlob[1];	/* followed by */
+	/* STRING NativeOS */
+	/* STRING NativeLanMan */
+} __packed;	/* NTLM request format (with extended security) */
+
+struct smb_com_session_setup_req_no_secext {	/* request format */
+	struct smb_hdr hdr;	/* we will handle this :: wct = 13 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 MaxBufferSize;
+	__le16 MaxMpxCount;
+	__le16 VcNumber;
+	__u32 SessionKey;
+	__le16 CaseInsensitivePasswordLength;	/* ASCII password len */
+	__le16 CaseSensitivePasswordLength;	/* Unicode password length*/
+	__u32 Reserved;	/* see below */
+	__le32 Capabilities;
+	__le16 ByteCount;
+	unsigned char CaseInsensitivePassword[0];	/* followed by: */
+	/* unsigned char * CaseSensitivePassword; */
+	/* STRING AccountName */
+	/* STRING PrimaryDomain */
+	/* STRING NativeOS */
+	/* STRING NativeLanMan */
+} __packed;	/* NTLM request format (without extended security */
+
+struct smb_com_session_setup_resp {	/* default (NTLM) response format */
+	struct smb_hdr hdr;	/* wct = 4 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Action;	/* see below */
+	__le16 SecurityBlobLength;
+	__le16 ByteCount;
+	unsigned char SecurityBlob[1];	/* followed by */
+	/*      unsigned char  * NativeOS;      */
+	/*      unsigned char  * NativeLanMan;  */
+	/*      unsigned char  * PrimaryDomain; */
+} __packed;	/* NTLM response (with or without extended sec) */
+
+struct smb_com_session_setup_old_resp { /* default (NTLM) response format */
+	struct smb_hdr hdr;	/* wct = 3 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Action;	/* see below */
+	__le16 ByteCount;
+	unsigned char NativeOS[1];	/* followed by */
+	/*      unsigned char * NativeLanMan; */
+	/*      unsigned char * PrimaryDomain; */
+} __packed;	/* pre-NTLM (LANMAN2.1) response */
+
+union smb_com_session_setup_andx {
+	struct smb_com_session_setup_req req;
+	struct smb_com_session_setup_req_no_secext req_no_secext;
+	struct smb_com_session_setup_resp resp;
+	struct smb_com_session_setup_old_resp old_resp;
+} __packed;
+
+struct smb_com_tconx_req {
+	__u8 WordCount;  /* wct = 4, it could be ANDX */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 Flags;           /* see below */
+	__le16 PasswordLength;
+	__le16 ByteCount;
+	unsigned char Password[1];      /* followed by */
+	/* STRING Path    *//* \\server\share name */
+	/* STRING Service */
+} __packed;
+
+struct smb_com_tconx_rsp {
+	__u8 WordCount;     /* wct = 3 , not extended response */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OptionalSupport; /* see below */
+	__le16 ByteCount;
+	unsigned char Service[1];       /* always ASCII, not Unicode */
+	/* STRING NativeFileSystem */
+} __packed;
+
+struct smb_com_tconx_rsp_ext {
+	__u8 WordCount;	/* wct = 7, extended response */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OptionalSupport; /* see below */
+	__le32 MaximalShareAccessRights;
+	__le32 GuestMaximalShareAccessRights;
+	__le16 ByteCount;
+	unsigned char Service[1];       /* always ASCII, not Unicode */
+	/* STRING NativeFileSystem */
+} __packed;
+
+struct andx_block {
+	__u8 WordCount;
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+} __packed;
+
+struct locking_andx_range64 {
+	__le16 Pid;
+	__le16 Pad;
+	__le32 OffsetHigh;
+	__le32 OffsetLow;
+	__le32 LengthHigh;
+	__le32 LengthLow;
+} __packed;
+
+struct locking_andx_range32 {
+	__le16 Pid;
+	__le32 Offset;
+	__le32 Length;
+} __packed;
+
+#define LOCKING_ANDX_SHARED_LOCK     0x01
+#define LOCKING_ANDX_OPLOCK_RELEASE  0x02
+#define LOCKING_ANDX_CHANGE_LOCKTYPE 0x04
+#define LOCKING_ANDX_CANCEL_LOCK     0x08
+#define LOCKING_ANDX_LARGE_FILES     0x10       /* always on for us */
+
+struct smb_com_lock_req {
+	struct smb_hdr hdr;	/* wct = 8 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16 Fid;
+	__u8 LockType;
+	__u8 OplockLevel;
+	__le32 Timeout;
+	__le16 NumberOfUnlocks;
+	__le16 NumberOfLocks;
+	__le16 ByteCount;
+	char *Locks[1];
+} __packed;
+
+struct smb_com_lock_rsp {
+	struct smb_hdr hdr;     /* wct = 2 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_query_information_disk_rsp {
+	struct smb_hdr hdr;     /* wct = 5 */
+	__le16 TotalUnits;
+	__le16 BlocksPerUnit;
+	__le16 BlockSize;
+	__le16 FreeUnits;
+	__le16 Pad;
+	__le16 ByteCount;
+} __packed;
+
+/* tree connect Flags */
+#define DISCONNECT_TID          0x0001
+#define TCON_EXTENDED_SIGNATURES 0x0004
+#define TCON_EXTENDED_SECINFO   0x0008
+
+/* OptionalSupport bits */
+#define SMB_SUPPORT_SEARCH_BITS 0x0001  /*
+					 * "must have" directory search bits
+					 * (exclusive searches supported)
+					 */
+#define SMB_SHARE_IS_IN_DFS     0x0002
+#define SMB_CSC_MASK               0x000C
+/* CSC flags defined as follows */
+#define SMB_CSC_CACHE_MANUAL_REINT 0x0000
+#define SMB_CSC_CACHE_AUTO_REINT   0x0004
+#define SMB_CSC_CACHE_VDO          0x0008
+#define SMB_CSC_NO_CACHING         0x000C
+#define SMB_UNIQUE_FILE_NAME    0x0010
+#define SMB_EXTENDED_SIGNATURES 0x0020
+
+/* OpenFlags */
+#define REQ_MORE_INFO      0x00000001  /* legacy (OPEN_AND_X) only */
+#define REQ_OPLOCK         0x00000002
+#define REQ_BATCHOPLOCK    0x00000004
+#define REQ_OPENDIRONLY    0x00000008
+#define REQ_EXTENDED_INFO  0x00000010
+
+/* File type */
+#define DISK_TYPE               0x0000
+#define BYTE_PIPE_TYPE          0x0001
+#define MESSAGE_PIPE_TYPE       0x0002
+#define PRINTER_TYPE            0x0003
+#define COMM_DEV_TYPE           0x0004
+#define UNKNOWN_TYPE            0xFFFF
+
+/* Device Type or File Status Flags */
+#define NO_EAS                  0x0001
+#define NO_SUBSTREAMS           0x0002
+#define NO_REPARSETAG           0x0004
+/* following flags can apply if pipe */
+#define ICOUNT_MASK             0x00FF
+#define PIPE_READ_MODE          0x0100
+#define NAMED_PIPE_TYPE         0x0400
+#define PIPE_END_POINT          0x4000
+#define BLOCKING_NAMED_PIPE     0x8000
+
+/* ShareAccess flags */
+#define FILE_NO_SHARE     0x00000000
+#define FILE_SHARE_READ   0x00000001
+#define FILE_SHARE_WRITE  0x00000002
+#define FILE_SHARE_DELETE 0x00000004
+#define FILE_SHARE_ALL    0x00000007
+
+/* CreateDisposition flags, similar to CreateAction as well */
+#define FILE_SUPERSEDE    0x00000000
+#define FILE_OPEN         0x00000001
+#define FILE_CREATE       0x00000002
+#define FILE_OPEN_IF      0x00000003
+#define FILE_OVERWRITE    0x00000004
+#define FILE_OVERWRITE_IF 0x00000005
+
+/* ImpersonationLevel flags */
+#define SECURITY_ANONYMOUS      0
+#define SECURITY_IDENTIFICATION 1
+#define SECURITY_IMPERSONATION  2
+#define SECURITY_DELEGATION     3
+
+/* SecurityFlags */
+#define SECURITY_CONTEXT_TRACKING 0x01
+#define SECURITY_EFFECTIVE_ONLY   0x02
+
+struct smb_com_open_req {       /* also handles create */
+	struct smb_hdr hdr;     /* wct = 24 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 Reserved;          /* Must Be Zero */
+	__le16 NameLength;
+	__le32 OpenFlags;
+	__u32  RootDirectoryFid;
+	__le32 DesiredAccess;
+	__le64 AllocationSize;
+	__le32 FileAttributes;
+	__le32 ShareAccess;
+	__le32 CreateDisposition;
+	__le32 CreateOptions;
+	__le32 ImpersonationLevel;
+	__u8 SecurityFlags;
+	__le16 ByteCount;
+	char fileName[1];
+} __packed;
+
+/* open response for CreateAction shifted left */
+#define CIFS_CREATE_ACTION 0x20000 /* file created */
+
+/* Basic file attributes */
+#define SMB_FILE_ATTRIBUTE_NORMAL	0x0000
+#define SMB_FILE_ATTRIBUTE_READONLY	0x0001
+#define SMB_FILE_ATTRIBUTE_HIDDEN	0x0002
+#define SMB_FILE_ATTRIBUTE_SYSTEM	0x0004
+#define SMB_FILE_ATTRIBUTE_VOLUME	0x0008
+#define SMB_FILE_ATTRIBUTE_DIRECTORY	0x0010
+#define SMB_FILE_ATTRIBUTE_ARCHIVE	0x0020
+#define SMB_SEARCH_ATTRIBUTE_READONLY	0x0100
+#define SMB_SEARCH_ATTRIBUTE_HIDDEN	0x0200
+#define SMB_SEARCH_ATTRIBUTE_SYSTEM	0x0400
+#define SMB_SEARCH_ATTRIBUTE_DIRECTORY	0x1000
+#define SMB_SEARCH_ATTRIBUTE_ARCHIVE	0x2000
+
+struct smb_com_open_rsp {
+	struct smb_hdr hdr;     /* wct = 34 BB */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 OplockLevel;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 FileAttributes;
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le16 FileType;
+	__le16 DeviceState;
+	__u8 DirectoryFlag;
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_open_ext_rsp {
+	struct smb_hdr hdr;     /* wct = 42 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u8 OplockLevel;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 FileAttributes;
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le16 FileType;
+	__le16 DeviceState;
+	__u8 DirectoryFlag;
+	__u8 VolId[16];
+	__u64 fid;
+	__le32 MaxAccess;
+	__le32 GuestAccess;
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_close_req {
+	struct smb_hdr hdr;     /* wct = 3 */
+	__u16 FileID;
+	__le32 LastWriteTime;    /* should be zero or -1 */
+	__le16  ByteCount;        /* 0 */
+} __packed;
+
+struct smb_com_close_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_echo_req {
+	struct  smb_hdr hdr;
+	__le16  EchoCount;
+	__le16  ByteCount;
+	char    Data[1];
+} __packed;
+
+struct smb_com_echo_rsp {
+	struct  smb_hdr hdr;
+	__le16  SequenceNumber;
+	__le16  ByteCount;
+	char    Data[1];
+} __packed;
+
+struct smb_com_flush_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__u16 FileID;
+	__le16 ByteCount;        /* 0 */
+} __packed;
+
+struct smb_com_flush_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+/* SMB_COM_TRANSACTION */
+struct smb_com_trans_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;
+	__u8  Pad;
+	__u8 Data[1];
+} __packed;
+
+struct smb_com_trans_pipe_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__u16 SubCommand;
+	__u16 fid;
+	__le16 ByteCount;
+	__u8  Pad;
+	__u8 Data[1];
+} __packed;
+
+struct smb_com_trans_rsp {
+	struct smb_hdr hdr;     /* wct = 10+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__u16 Reserved;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 ParameterDisplacement;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__le16 DataDisplacement;
+	__u8 SetupCount;
+	__u8 Reserved1;
+	__le16 ByteCount;
+	__u8 Pad;
+} __packed;
+
+/* SMB_COM_TRANSACTION subcommands */
+
+#define TRANSACT_DCERPCCMD	0x26
+
+/*****************************************************************************
+ * TRANS2 command implementation functions
+ *****************************************************************************/
+#define NO_CHANGE_64          0xFFFFFFFFFFFFFFFFULL
+
+/* QFSInfo Levels */
+#define SMB_INFO_ALLOCATION         1
+#define SMB_INFO_VOLUME             2
+#define SMB_QUERY_FS_VOLUME_INFO    0x102
+#define SMB_QUERY_FS_SIZE_INFO      0x103
+#define SMB_QUERY_FS_DEVICE_INFO    0x104
+#define SMB_QUERY_FS_ATTRIBUTE_INFO 0x105
+#define SMB_QUERY_CIFS_UNIX_INFO    0x200
+#define SMB_QUERY_POSIX_FS_INFO     0x201
+#define SMB_QUERY_POSIX_WHO_AM_I    0x202
+#define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203
+#define SMB_QUERY_FS_PROXY          0x204 /*
+					   * WAFS enabled. Returns structure
+					   * FILE_SYSTEM__UNIX_INFO to tell
+					   * whether new NTIOCTL available
+					   * (0xACE) for WAN friendly SMB
+					   * operations to be carried
+					   */
+#define SMB_QUERY_LABEL_INFO        0x3ea
+#define SMB_QUERY_FS_QUOTA_INFO     0x3ee
+#define SMB_QUERY_FS_FULL_SIZE_INFO 0x3ef
+#define SMB_QUERY_OBJECTID_INFO     0x3f0
+
+struct trans2_resp {
+	/* struct smb_hdr hdr precedes. Note wct = 10 + setup count */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__u16 Reserved;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 ParameterDisplacement;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__le16 DataDisplacement;
+	__u8 SetupCount;
+	__u8 Reserved1;
+	/*
+	 * SetupWords[SetupCount];
+	 * __u16 ByteCount;
+	 * __u16 Reserved2;
+	 */
+	/* data area follows */
+} __packed;
+
+struct smb_com_trans2_req {
+	struct smb_hdr hdr;
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+} __packed;
+
+struct smb_com_trans2_qfsi_req {
+	struct smb_hdr hdr;     /* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+} __packed;
+
+struct smb_com_trans2_qfsi_req_params {
+	__le16 InformationLevel;
+} __packed;
+
+#define CIFS_SEARCH_CLOSE_ALWAYS	0x0001
+#define CIFS_SEARCH_CLOSE_AT_END	0x0002
+#define CIFS_SEARCH_RETURN_RESUME	0x0004
+#define CIFS_SEARCH_CONTINUE_FROM_LAST	0x0008
+#define CIFS_SEARCH_BACKUP_SEARCH	0x0010
+
+struct smb_com_trans2_ffirst_req_params {
+	__le16 SearchAttributes;
+	__le16 SearchCount;
+	__le16 SearchFlags;
+	__le16 InformationLevel;
+	__le32 SearchStorageType;
+	char FileName[1];
+} __packed;
+
+struct smb_com_trans2_ffirst_rsp_parms {
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} __packed;
+
+struct smb_com_trans2_fnext_req_params {
+	__u16 SearchHandle;
+	__le16 SearchCount;
+	__le16 InformationLevel;
+	__u32 ResumeKey;
+	__le16 SearchFlags;
+	char ResumeFileName[1];
+} __packed;
+
+struct smb_com_trans2_fnext_rsp_params {
+	__le16 SearchCount;
+	__le16 EndofSearch;
+	__le16 EAErrorOffset;
+	__le16 LastNameOffset;
+} __packed;
+
+struct smb_com_trans2_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u8 Pad;       /* may be three bytes? *//* followed by data area */
+	__u8 Buffer[0];
+} __packed;
+
+struct file_internal_info {
+	__le64  UniqueId; /* inode number */
+} __packed;      /* level 0x3ee */
+
+/* DeviceType Flags */
+#define FILE_DEVICE_CD_ROM              0x00000002
+#define FILE_DEVICE_CD_ROM_FILE_SYSTEM  0x00000003
+#define FILE_DEVICE_DFS                 0x00000006
+#define FILE_DEVICE_DISK                0x00000007
+#define FILE_DEVICE_DISK_FILE_SYSTEM    0x00000008
+#define FILE_DEVICE_FILE_SYSTEM         0x00000009
+#define FILE_DEVICE_NAMED_PIPE          0x00000011
+#define FILE_DEVICE_NETWORK             0x00000012
+#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014
+#define FILE_DEVICE_NULL                0x00000015
+#define FILE_DEVICE_PARALLEL_PORT       0x00000016
+#define FILE_DEVICE_PRINTER             0x00000018
+#define FILE_DEVICE_SERIAL_PORT         0x0000001b
+#define FILE_DEVICE_STREAMS             0x0000001e
+#define FILE_DEVICE_TAPE                0x0000001f
+#define FILE_DEVICE_TAPE_FILE_SYSTEM    0x00000020
+#define FILE_DEVICE_VIRTUAL_DISK        0x00000024
+#define FILE_DEVICE_NETWORK_REDIRECTOR  0x00000028
+
+/* Filesystem Attributes. */
+#define FILE_CASE_SENSITIVE_SEARCH      0x00000001
+#define FILE_CASE_PRESERVED_NAMES       0x00000002
+#define FILE_UNICODE_ON_DISK            0x00000004
+/* According to cifs9f, this is 4, not 8 */
+/* Acconding to testing, this actually sets the security attribute! */
+#define FILE_PERSISTENT_ACLS            0x00000008
+#define FILE_FILE_COMPRESSION           0x00000010
+#define FILE_VOLUME_QUOTAS              0x00000020
+#define FILE_SUPPORTS_SPARSE_FILES      0x00000040
+#define FILE_SUPPORTS_REPARSE_POINTS    0x00000080
+#define FILE_SUPPORTS_REMOTE_STORAGE    0x00000100
+#define FS_LFN_APIS                     0x00004000
+#define FILE_VOLUME_IS_COMPRESSED       0x00008000
+#define FILE_SUPPORTS_OBJECT_IDS        0x00010000
+#define FILE_SUPPORTS_ENCRYPTION        0x00020000
+#define FILE_NAMED_STREAMS              0x00040000
+#define FILE_READ_ONLY_VOLUME           0x00080000
+
+/* PathInfo/FileInfo infolevels */
+#define SMB_INFO_STANDARD                   1
+#define SMB_SET_FILE_EA                     2
+#define SMB_QUERY_FILE_EA_SIZE              2
+#define SMB_INFO_QUERY_EAS_FROM_LIST        3
+#define SMB_INFO_QUERY_ALL_EAS              4
+#define SMB_INFO_IS_NAME_VALID              6
+#define SMB_QUERY_FILE_BASIC_INFO       0x101
+#define SMB_QUERY_FILE_STANDARD_INFO    0x102
+#define SMB_QUERY_FILE_EA_INFO          0x103
+#define SMB_QUERY_FILE_NAME_INFO        0x104
+#define SMB_QUERY_FILE_ALLOCATION_INFO  0x105
+#define SMB_QUERY_FILE_END_OF_FILEINFO  0x106
+#define SMB_QUERY_FILE_ALL_INFO         0x107
+#define SMB_QUERY_ALT_NAME_INFO         0x108
+#define SMB_QUERY_FILE_STREAM_INFO      0x109
+#define SMB_QUERY_FILE_COMPRESSION_INFO 0x10B
+#define SMB_QUERY_FILE_UNIX_BASIC       0x200
+#define SMB_QUERY_FILE_UNIX_LINK        0x201
+#define SMB_QUERY_POSIX_ACL             0x204
+#define SMB_QUERY_XATTR                 0x205  /* e.g. system EA name space */
+#define SMB_QUERY_ATTR_FLAGS            0x206  /* append,immutable etc. */
+#define SMB_QUERY_POSIX_PERMISSION      0x207
+#define SMB_QUERY_POSIX_LOCK            0x208
+/* #define SMB_POSIX_OPEN               0x209 */
+/* #define SMB_POSIX_UNLINK             0x20a */
+#define SMB_QUERY_FILE__UNIX_INFO2      0x20b
+#define SMB_QUERY_FILE_INTERNAL_INFO    0x3ee
+#define SMB_QUERY_FILE_ACCESS_INFO      0x3f0
+#define SMB_QUERY_FILE_NAME_INFO2       0x3f1 /* 0x30 bytes */
+#define SMB_QUERY_FILE_POSITION_INFO    0x3f6
+#define SMB_QUERY_FILE_MODE_INFO        0x3f8
+#define SMB_QUERY_FILE_ALGN_INFO        0x3f9
+
+
+#define SMB_SET_FILE_BASIC_INFO         0x101
+#define SMB_SET_FILE_DISPOSITION_INFO   0x102
+#define SMB_SET_FILE_ALLOCATION_INFO    0x103
+#define SMB_SET_FILE_END_OF_FILE_INFO   0x104
+#define SMB_SET_FILE_UNIX_BASIC         0x200
+#define SMB_SET_FILE_UNIX_LINK          0x201
+#define SMB_SET_FILE_UNIX_HLINK         0x203
+#define SMB_SET_POSIX_ACL               0x204
+#define SMB_SET_XATTR                   0x205
+#define SMB_SET_ATTR_FLAGS              0x206  /* append, immutable etc. */
+#define SMB_SET_POSIX_LOCK              0x208
+#define SMB_POSIX_OPEN                  0x209
+#define SMB_POSIX_UNLINK                0x20a
+#define SMB_SET_FILE_UNIX_INFO2         0x20b
+#define SMB_SET_FILE_BASIC_INFO2        0x3ec
+#define SMB_SET_FILE_RENAME_INFORMATION 0x3f2 /* BB check if qpathinfo too */
+#define SMB_SET_FILE_DISPOSITION_INFORMATION   0x3f5   /* alias for 0x102 */
+#define SMB_FILE_ALL_INFO2              0x3fa
+#define SMB_SET_FILE_ALLOCATION_INFO2   0x3fb
+#define SMB_SET_FILE_END_OF_FILE_INFO2  0x3fc
+#define SMB_FILE_MOVE_CLUSTER_INFO      0x407
+#define SMB_FILE_QUOTA_INFO             0x408
+#define SMB_FILE_REPARSEPOINT_INFO      0x409
+#define SMB_FILE_MAXIMUM_INFO           0x40d
+
+/* Find File infolevels */
+#define SMB_FIND_FILE_INFO_STANDARD       0x001
+#define SMB_FIND_FILE_QUERY_EA_SIZE       0x002
+#define SMB_FIND_FILE_QUERY_EAS_FROM_LIST 0x003
+#define SMB_FIND_FILE_DIRECTORY_INFO      0x101
+#define SMB_FIND_FILE_FULL_DIRECTORY_INFO 0x102
+#define SMB_FIND_FILE_NAMES_INFO          0x103
+#define SMB_FIND_FILE_BOTH_DIRECTORY_INFO 0x104
+#define SMB_FIND_FILE_ID_FULL_DIR_INFO    0x105
+#define SMB_FIND_FILE_ID_BOTH_DIR_INFO    0x106
+#define SMB_FIND_FILE_UNIX                0x202
+
+struct smb_com_trans2_qpi_req {
+	struct smb_hdr hdr;     /* wct = 14+ */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+struct trans2_qpi_req_params {
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+/******************************************************************************/
+/* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */
+/******************************************************************************/
+struct file_basic_info {
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad;
+} __packed;      /* size info, level 0x101 */
+
+struct file_standard_info {
+	__le64 AllocationSize;
+	__le64 EndOfFile;
+	__le32 NumberOfLinks;
+	__u8 DeletePending;
+	__u8 Directory;
+	__le16 Reserved;
+} __packed;
+
+struct file_ea_info {
+	__le32 EaSize;
+} __packed;
+
+struct alt_name_info {
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct file_name_info {
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed;
+
+/* data block encoding of response to level 263 QPathInfo */
+struct file_all_info {
+	__le64 CreationTime;
+	__le64 LastAccessTime;
+	__le64 LastWriteTime;
+	__le64 ChangeTime;
+	__le32 Attributes;
+	__u32 Pad1;
+	__le64 AllocationSize;
+	__le64 EndOfFile;       /* size ie offset to first free byte in file */
+	__le32 NumberOfLinks;   /* hard links */
+	__u8 DeletePending;
+	__u8 Directory;
+	__u16 Pad2;
+	__le32 EASize;
+	__le32 FileNameLength;
+	char FileName[1];
+} __packed; /* level 0x107 QPathInfo */
+
+/* set path info/open file */
+/* defines for enumerating possible values of the Unix type field below */
+#define UNIX_FILE      0
+#define UNIX_DIR       1
+#define UNIX_SYMLINK   2
+#define UNIX_CHARDEV   3
+#define UNIX_BLOCKDEV  4
+#define UNIX_FIFO      5
+#define UNIX_SOCKET    6
+#define UNIX_UNKNOWN   0xFFFFFFFF
+
+struct file_unix_basic_info {
+	__le64 EndOfFile;
+	__le64 NumOfBytes;
+	__le64 LastStatusChange; /*SNIA specs DCE time for the 3 time fields */
+	__le64 LastAccessTime;
+	__le64 LastModificationTime;
+	__le64 Uid;
+	__le64 Gid;
+	__le32 Type;
+	__le64 DevMajor;
+	__le64 DevMinor;
+	__le64 UniqueId;
+	__le64 Permissions;
+	__le64 Nlinks;
+} __packed; /* level 0x200 QPathInfo */
+
+struct smb_com_trans2_spi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__le16 InformationLevel;
+	__u32 Reserved4;
+	char FileName[1];
+} __packed;
+
+struct smb_com_trans2_spi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u16 Reserved2; /* parameter word is present for infolevels > 100 */
+} __packed;
+
+/* POSIX Open Flags */
+#define SMB_O_RDONLY     0x1
+#define SMB_O_WRONLY    0x2
+#define SMB_O_RDWR      0x4
+#define SMB_O_CREAT     0x10
+#define SMB_O_EXCL      0x20
+#define SMB_O_TRUNC     0x40
+#define SMB_O_APPEND    0x80
+#define SMB_O_SYNC      0x100
+#define SMB_O_DIRECTORY 0x200
+#define SMB_O_NOFOLLOW  0x400
+#define SMB_O_DIRECT    0x800
+#define SMB_ACCMODE	0x7
+
+/* info level response for SMB_POSIX_PATH_OPEN */
+#define SMB_NO_INFO_LEVEL_RESPONSE 0xFFFF
+
+struct open_psx_req {
+	__le32 OpenFlags; /* same as NT CreateX */
+	__le32 PosixOpenFlags;
+	__le64 Permissions;
+	__le16 Level; /* reply level requested (see QPathInfo levels) */
+} __packed; /* level 0x209 SetPathInfo data */
+
+struct open_psx_rsp {
+	__le16 OplockFlags;
+	__u16 Fid;
+	__le32 CreateAction;
+	__le16 ReturnedLevel;
+	__le16 Pad;
+	/* struct following varies based on requested level */
+} __packed; /* level 0x209 SetPathInfo data */
+
+struct unlink_psx_rsp {
+	__le16 EAErrorOffset;
+} __packed; /* level 0x209 SetPathInfo data*/
+
+/* Version numbers for CIFS UNIX major and minor. */
+#define CIFS_UNIX_MAJOR_VERSION 1
+#define CIFS_UNIX_MINOR_VERSION 0
+
+struct filesystem_unix_info {
+	__le16 MajorVersionNumber;
+	__le16 MinorVersionNumber;
+	__le64 Capability;
+} __packed; /* Unix extension level 0x200*/
+
+/* Linux/Unix extensions capability flags */
+#define CIFS_UNIX_FCNTL_CAP             0x00000001 /* support for fcntl locks */
+#define CIFS_UNIX_POSIX_ACL_CAP         0x00000002 /* support getfacl/setfacl */
+#define CIFS_UNIX_XATTR_CAP             0x00000004 /* support new namespace   */
+#define CIFS_UNIX_EXTATTR_CAP           0x00000008 /* support chattr/chflag   */
+#define CIFS_UNIX_POSIX_PATHNAMES_CAP   0x00000010 /* Allow POSIX path chars  */
+#define CIFS_UNIX_POSIX_PATH_OPS_CAP    0x00000020 /*
+						    * Allow new POSIX path based
+						    * calls including posix open
+						    * and posix unlink
+						    */
+#define CIFS_UNIX_LARGE_READ_CAP        0x00000040 /*
+						    * support reads >128K (up
+						    * to 0xFFFF00
+						    */
+#define CIFS_UNIX_LARGE_WRITE_CAP       0x00000080
+#define CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP 0x00000100 /* can do SPNEGO crypt */
+#define CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP  0x00000200 /* must do  */
+#define CIFS_UNIX_PROXY_CAP             0x00000400 /*
+						    * Proxy cap: 0xACE ioctl and
+						    * QFS PROXY call
+						    */
+#ifdef CONFIG_CIFS_POSIX
+/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
+ * LockingX instead of posix locking call on unix sess (and we do not expect
+ * LockingX to use different (ie Windows) semantics than posix locking on
+ * the same session (if WINE needs to do this later, we can add this cap
+ * back in later
+ */
+
+/* #define CIFS_UNIX_CAP_MASK              0x000000fb */
+#define CIFS_UNIX_CAP_MASK              0x000003db
+#else
+#define CIFS_UNIX_CAP_MASK              0x00000013
+#endif /* CONFIG_CIFS_POSIX */
+
+
+#define CIFS_POSIX_EXTENSIONS           0x00000010 /* support for new QFSInfo */
+
+/* Our server caps */
+
+#define SMB_UNIX_CAPS	(CIFS_UNIX_FCNTL_CAP | CIFS_UNIX_POSIX_ACL_CAP | \
+		CIFS_UNIX_XATTR_CAP | CIFS_UNIX_POSIX_PATHNAMES_CAP| \
+		CIFS_UNIX_POSIX_PATH_OPS_CAP | CIFS_UNIX_LARGE_READ_CAP | \
+		CIFS_UNIX_LARGE_WRITE_CAP)
+
+#define SMB_SET_CIFS_UNIX_INFO    0x200
+/* Level 0x200 request structure follows */
+struct smb_com_trans2_setfsi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;  /* 4 */
+	__le16 ParameterOffset;
+	__le16 DataCount;       /* 12 */
+	__le16 DataOffset;
+	__u8 SetupCount;        /* one */
+	__u8 Reserved3;
+	__le16 SubCommand;      /* TRANS2_SET_FS_INFORMATION */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 FileNum;          /* Parameters start. */
+	__le16 InformationLevel;/* Parameters end. */
+	__le16 ClientUnixMajor; /* Data start. */
+	__le16 ClientUnixMinor;
+	__le64 ClientUnixCap;   /* Data end */
+} __packed;
+
+/* response for setfsinfo levels 0x200 and 0x203 */
+struct smb_com_trans2_setfsi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+} __packed;
+
+struct smb_com_trans2_setfsi_req_params {
+	__u16 FileNum;
+	__le16 InformationLevel;
+	__le16 ClientUnixMajor; /* Data start. */
+	__le16 ClientUnixMinor;
+	__le64 ClientUnixCap;   /* Data end */
+} __packed;
+
+struct smb_trans2_qfi_req_params {
+	__u16   Fid;
+	__le16  InformationLevel;
+} __packed;
+
+/* FIND FIRST2 and FIND NEXT2 INFORMATION Level Codes*/
+
+struct find_info_standard {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le16 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct find_info_query_ea_size {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le32 EASize;
+	__u8 FileNameLength;
+	char FileName[1];
+} __packed;
+
+struct file_unix_info {
+	__le32 NextEntryOffset;
+	__u32 ResumeKey; /* as with FileIndex - no need to convert */
+	struct file_unix_basic_info basic;
+	char FileName[1];
+} __packed; /* level 0x202 */
+
+struct smb_com_trans2_sfi_req {
+	struct smb_hdr hdr;     /* wct = 15 */
+	__le16 TotalParameterCount;
+	__le16 TotalDataCount;
+	__le16 MaxParameterCount;
+	__le16 MaxDataCount;
+	__u8 MaxSetupCount;
+	__u8 Reserved;
+	__le16 Flags;
+	__le32 Timeout;
+	__u16 Reserved2;
+	__le16 ParameterCount;
+	__le16 ParameterOffset;
+	__le16 DataCount;
+	__le16 DataOffset;
+	__u8 SetupCount;
+	__u8 Reserved3;
+	__le16 SubCommand;      /* one setup word */
+	__le16 ByteCount;
+	__u8 Pad;
+	__u16 Pad1;
+	__u16 Fid;
+	__le16 InformationLevel;
+	__u16 Reserved4;
+} __packed;
+
+struct smb_com_trans2_sfi_rsp {
+	struct smb_hdr hdr;     /* wct = 10 + SetupCount */
+	struct trans2_resp t2;
+	__le16 ByteCount;
+	__u16 Reserved2;        /*
+				 * parameter word reserved -
+				 * present for infolevels > 100
+				 */
+} __packed;
+
+struct file_end_of_file_info {
+	__le64 FileSize;                /* offset to end of file */
+} __packed; /* size info, level 0x104 for set, 0x106 for query */
+
+struct smb_com_create_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_create_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_check_directory_req {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;	/* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_check_directory_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_process_exit_rsp {
+	struct smb_hdr hdr;	/* wct = 0 */
+	__le16 ByteCount;	/* bct = 0 */
+} __packed;
+
+struct smb_com_delete_directory_req {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char DirName[1];
+} __packed;
+
+struct smb_com_delete_directory_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+struct smb_com_delete_file_req {
+	struct smb_hdr hdr;     /* wct = 1 */
+	__le16 SearchAttributes;
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char fileName[1];
+} __packed;
+
+struct smb_com_delete_file_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+#define CREATE_HARD_LINK         0x103
+
+struct smb_com_nt_rename_req {  /* A5 - also used for create hardlink */
+	struct smb_hdr hdr;     /* wct = 4 */
+	__le16 SearchAttributes;        /* target file attributes */
+	__le16 Flags;           /* spec says Information Level */
+	__le32 ClusterCount;
+	__le16 ByteCount;
+	__u8 BufferFormat;      /* 4 = ASCII or Unicode */
+	unsigned char OldFileName[1];
+	/* followed by __u8 BufferFormat2 */
+	/* followed by NewFileName */
+} __packed;
+
+struct smb_com_query_information_req {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;       /* 1 + namelen + 1 */
+	__u8 BufferFormat;      /* 4 = ASCII */
+	unsigned char FileName[1];
+} __packed;
+
+struct smb_com_query_information_rsp {
+	struct smb_hdr hdr;     /* wct = 10 */
+	__le16 attr;
+	__le32  last_write_time;
+	__le32 size;
+	__u16  reserved[5];
+	__le16 ByteCount;       /* bcc = 0 */
+} __packed;
+
+struct smb_com_findclose_req {
+	struct smb_hdr hdr; /* wct = 1 */
+	__u16 FileID;
+	__le16 ByteCount;    /* 0 */
+} __packed;
+
+#define SMBOPEN_DISPOSITION_NONE        0
+#define SMBOPEN_LOCK_GRANTED            0x8000
+
+#define SMB_DA_ACCESS_READ              0
+#define SMB_DA_ACCESS_WRITE             0x0001
+#define SMB_DA_ACCESS_READ_WRITE        0x0002
+
+/*
+ * Flags on SMB open
+ */
+#define SMBOPEN_WRITE_THROUGH 0x4000
+#define SMBOPEN_DENY_ALL      0x0010
+#define SMBOPEN_DENY_WRITE    0x0020
+#define SMBOPEN_DENY_READ     0x0030
+#define SMBOPEN_DENY_NONE     0x0040
+#define SMBOPEN_SHARING_MODE  (SMBOPEN_DENY_ALL |	\
+				SMBOPEN_DENY_WRITE |	\
+				SMBOPEN_DENY_READ |	\
+				SMBOPEN_DENY_NONE)
+#define SMBOPEN_READ          0x0000
+#define SMBOPEN_WRITE         0x0001
+#define SMBOPEN_READWRITE     0x0002
+#define SMBOPEN_EXECUTE       0x0003
+
+#define SMBOPEN_OCREATE       0x0010
+#define SMBOPEN_OTRUNC        0x0002
+#define SMBOPEN_OAPPEND       0x0001
+
+/* format of legacy open request */
+struct smb_com_openx_req {
+	struct smb_hdr  hdr;    /* wct = 15 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__le16 OpenFlags;
+	__le16 Mode;
+	__le16 Sattr; /* search attributes */
+	__le16 FileAttributes;  /* dos attrs */
+	__le32 CreateTime; /* os2 format */
+	__le16 OpenFunction;
+	__le32 EndOfFile;
+	__le32 Timeout;
+	__le32 Reserved;
+	__le16  ByteCount;  /* file name follows */
+	char   fileName[1];
+} __packed;
+
+struct smb_com_openx_rsp {
+	struct smb_hdr  hdr;    /* wct = 15 */
+	__u8 AndXCommand;
+	__u8 AndXReserved;
+	__le16 AndXOffset;
+	__u16  Fid;
+	__le16 FileAttributes;
+	__le32 LastWriteTime; /* os2 format */
+	__le32 EndOfFile;
+	__le16 Access;
+	__le16 FileType;
+	__le16 IPCState;
+	__le16 Action;
+	__u32  FileId;
+	__u16  Reserved;
+	__le16 ByteCount;
+} __packed;
+
+struct filesystem_alloc_info {
+	__le32 fsid;
+	__le32 SectorsPerAllocationUnit;
+	__le32 TotalAllocationUnits;
+	__le32 FreeAllocationUnits;
+	__le16  BytesPerSector;
+} __packed;
+
+struct file_allocation_info {
+	__le64 AllocationSize; /* Note old Samba srvr rounds this up too much */
+} __packed;      /* size used on disk: 0x103 for set, 0x105 for query */
+
+struct file_info_standard {
+	__le16 CreationDate; /* SMB Date see above */
+	__le16 CreationTime; /* SMB Time */
+	__le16 LastAccessDate;
+	__le16 LastAccessTime;
+	__le16 LastWriteDate;
+	__le16 LastWriteTime;
+	__le32 DataSize; /* File Size (EOF) */
+	__le32 AllocationSize;
+	__le16 Attributes; /* verify not u32 */
+	__le32 EASize;
+} __packed;  /* level 1 SetPath/FileInfo */
+
+#define CIFS_MF_SYMLINK_LINK_MAXLEN (1024)
+
+struct set_file_rename {
+	__le32 overwrite;   /* 1 = overwrite dest */
+	__u32 root_fid;   /* zero */
+	__le32 target_name_len;
+	char  target_name[0];  /* Must be unicode */
+} __packed;
+
+struct fea {
+	unsigned char EA_flags;
+	__u8 name_len;
+	__le16 value_len;
+	char name[1];
+	/* optionally followed by value */
+} __packed;
+
+struct fealist {
+	__le32 list_len;
+	__u8 list[1];
+} __packed;
+
+/* POSIX ACL set/query path info structures */
+#define CIFS_ACL_VERSION 1
+struct cifs_posix_ace { /* access control entry (ACE) */
+	__u8  cifs_e_tag;
+	__u8  cifs_e_perm;
+	__le64 cifs_uid; /* or gid */
+} __packed;
+
+struct cifs_posix_acl { /* access conrol list  (ACL) */
+	__le16  version;
+	__le16  access_entry_count;  /* access ACL - count of entries */
+	__le16  default_entry_count; /* default ACL - count of entries */
+	struct cifs_posix_ace ace_array[0];
+	/*
+	 * followed by
+	 * struct cifs_posix_ace default_ace_arraay[]
+	 */
+} __packed;  /* level 0x204 */
+
+struct smb_com_setattr_req {
+	struct smb_hdr hdr; /* wct = 8 */
+	__le16 attr;
+	__le32 LastWriteTime;
+	__le16 reserved[5]; /* must be zero */
+	__le16 ByteCount;
+	__u8   BufferFormat; /* 4 = ASCII */
+	unsigned char fileName[1];
+} __packed;
+
+struct smb_com_setattr_rsp {
+	struct smb_hdr hdr;     /* wct = 0 */
+	__le16 ByteCount;        /* bct = 0 */
+} __packed;
+
+#ifdef CONFIG_SMB_INSECURE_SERVER
+extern int init_smb1_server(struct ksmbd_conn *conn);
+#endif
+
+/* function prototypes */
+extern int init_smb_rsp_hdr(struct ksmbd_work *work);
+extern u16 get_smb_cmd_val(struct ksmbd_work *work);
+extern void set_smb_rsp_status(struct ksmbd_work *work, __le32 err);
+extern int smb_allocate_rsp_buf(struct ksmbd_work *work);
+extern bool smb1_is_sign_req(struct ksmbd_work *work, unsigned int command);
+extern int smb1_check_sign_req(struct ksmbd_work *work);
+extern void smb1_set_sign_rsp(struct ksmbd_work *work);
+extern int smb_check_user_session(struct ksmbd_work *work);
+extern int smb_get_ksmbd_tcon(struct ksmbd_work *work);
+extern int ksmbd_smb1_check_message(struct ksmbd_work *work);
+
+/* smb1 command handlers */
+extern int smb_rename(struct ksmbd_work *work);
+extern int smb_negotiate_request(struct ksmbd_work *work);
+#ifdef CONFIG_SMB_INSECURE_SERVER
+extern int smb_handle_negotiate(struct ksmbd_work *work);
+#endif
+extern int smb_session_setup_andx(struct ksmbd_work *work);
+extern int smb_tree_connect_andx(struct ksmbd_work *work);
+extern int smb_trans2(struct ksmbd_work *work);
+extern int smb_nt_create_andx(struct ksmbd_work *work);
+extern int smb_trans(struct ksmbd_work *work);
+extern int smb_locking_andx(struct ksmbd_work *work);
+extern int smb_close(struct ksmbd_work *work);
+extern int smb_read_andx(struct ksmbd_work *work);
+extern int smb_tree_disconnect(struct ksmbd_work *work);
+extern int smb_session_disconnect(struct ksmbd_work *work);
+extern int smb_write_andx(struct ksmbd_work *work);
+extern int smb_echo(struct ksmbd_work *work);
+extern int smb_flush(struct ksmbd_work *work);
+extern int smb_mkdir(struct ksmbd_work *work);
+extern int smb_rmdir(struct ksmbd_work *work);
+extern int smb_unlink(struct ksmbd_work *work);
+extern int smb_nt_cancel(struct ksmbd_work *work);
+extern int smb_nt_rename(struct ksmbd_work *work);
+extern int smb_query_info(struct ksmbd_work *work);
+extern int smb_closedir(struct ksmbd_work *work);
+extern int smb_open_andx(struct ksmbd_work *work);
+extern int smb_write(struct ksmbd_work *work);
+extern int smb_setattr(struct ksmbd_work *work);
+extern int smb_query_information_disk(struct ksmbd_work *work);
+extern int smb_checkdir(struct ksmbd_work *work);
+extern int smb_process_exit(struct ksmbd_work *work);
+#endif /* __SMB1PDU_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/fs/smb/server/smberr.h	2025-09-25 17:40:36.787373276 +0200
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+/*
+ *   Copyright (c) International Business Machines  Corp., 2002,2004
+ *   Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ *   See Error Codes section of the SNIA CIFS Specification
+ *   for more information
+ */
+#ifndef __KSMBD_SMBERR_H
+#define __KSMBD_SMBERR_H
+
+#define SUCCESS	0x00	/* The request was successful. */
+#define ERRDOS	0x01	/* Error is from the core DOS operating system set */
+#define ERRSRV	0x02	/* Error is generated by the file server daemon */
+#define ERRHRD	0x03	/* Error is a hardware error. */
+#define ERRCMD	0xFF	/* Command was not in the "SMB" format. */
+
+/* The following error codes may be generated with the SUCCESS error class.*/
+
+/*#define SUCCESS	0	The request was successful. */
+
+/* The following error codes may be generated with the ERRDOS error class.*/
+
+#define ERRbadfunc		1	/*
+					 * Invalid function. The server did not
+					 * recognize or could not perform a
+					 * system call generated by the server,
+					 * e.g., set the DIRECTORY attribute on
+					 * a data file, invalid seek mode.
+					 */
+#define ERRbadfile		2	/*
+					 * File not found. The last component
+					 * of a file's pathname could not be
+					 * found.
+					 */
+#define ERRbadpath		3	/*
+					 * Directory invalid. A directory
+					 * component in a pathname could not be
+					 * found.
+					 */
+#define ERRnofids		4	/*
+					 * Too many open files. The server has
+					 * no file handles available.
+					 */
+#define ERRnoaccess		5	/*
+					 * Access denied, the client's context
+					 * does not permit the requested
+					 * function. This includes the
+					 * following conditions: invalid rename
+					 * command, write to Fid open for read
+					 * only, read on Fid open for write
+					 * only, attempt to delete a non-empty
+					 * directory
+					 */
+#define ERRbadfid		6	/*
+					 * Invalid file handle. The file handle
+					 * specified was not recognized by the
+					 * server.
+					 */
+#define ERRbadmcb		7	/* Memory control blocks destroyed. */
+#define ERRnomem		8	/*
+					 * Insufficient server memory to
+					 * perform the requested function.
+					 */
+#define ERRbadmem		9	/* Invalid memory block address. */
+#define ERRbadenv		10	/* Invalid environment. */
+#define ERRbadformat		11	/* Invalid format. */
+#define ERRbadaccess		12	/* Invalid open mode. */
+#define ERRbaddata		13	/*
+					 * Invalid data (generated only by
+					 * IOCTL calls within the server).
+					 */
+#define ERRbaddrive		15	/* Invalid drive specified. */
+#define ERRremcd		16	/*
+					 * A Delete Directory request attempted
+					 * to remove the server's current
+					 * directory.
+					 */
+#define ERRdiffdevice		17	/*
+					 * Not same device (e.g., a cross
+					 * volume rename was attempted
+					 */
+#define ERRnofiles		18	/*
+					 * A File Search command can find no
+					 * more files matching the specified
+					 * criteria.
+					 */
+#define ERRwriteprot		19	/* media is write protected */
+#define ERRgeneral		31
+#define ERRbadshare		32	/*
+					 * The sharing mode specified for an
+					 * Open conflicts with existing FIDs on
+					 * the file.
+					 */
+#define ERRlock			33	/*
+					 * A Lock request conflicted with an
+					 * existing lock or specified an
+					 * invalid mode, or an Unlock requested
+					 * attempted to remove a lock held by
+					 * another process.
+					 */
+#define ERRunsup		50
+#define ERRnosuchshare		67
+#define ERRfilexists		80	/*
+					 * The file named in the request
+					 * already exists.
+					 */
+#define ERRinvparm		87
+#define ERRdiskfull		112
+#define ERRinvname		123
+#define ERRinvlevel		124
+#define ERRdirnotempty		145
+#define ERRnotlocked		158
+#define ERRcancelviolation	173
+#define ERRnoatomiclocks	174
+#define ERRalreadyexists	183
+#define ERRbadpipe		230
+#define ERRpipebusy		231
+#define ERRpipeclosing		232
+#define ERRnotconnected		233
+#define ERRmoredata		234
+#define ERReasnotsupported	282
+#define ErrQuota		0x200	/*
+					 * The operation would cause a quota
+					 * limit to be exceeded.
+					 */
+#define ErrNotALink		0x201	/*
+					 * A link operation was performed on a
+					 * pathname that was not a link.
+					 */
+
+/*
+ * Below errors are used internally (do not come over the wire) for passthrough
+ * from STATUS codes to POSIX only
+ */
+#define ERRsymlink              0xFFFD
+#define ErrTooManyLinks         0xFFFE
+
+/* Following error codes may be generated with the ERRSRV error class.*/
+
+#define ERRerror		1	/*
+					 * Non-specific error code. It is
+					 * returned under the following
+					 * conditions: resource other than disk
+					 * space exhausted (e.g. TIDs), first
+					 * SMB command was not negotiate,
+					 * multiple negotiates attempted, and
+					 * internal server error.
+					 */
+#define ERRbadpw		2	/*
+					 * Bad password - name/password pair in
+					 * a TreeConnect or Session Setup are
+					 * invalid.
+					 */
+#define ERRbadtype		3	/*
+					 * used for indicating DFS referral
+					 * needed
+					 */
+#define ERRaccess		4	/*
+					 * The client does not have the
+					 * necessary access rights within the
+					 * specified context for requested
+					 * function.
+					 */
+#define ERRinvtid		5	/*
+					 * The Tid specified in a command was
+					 * invalid.
+					 */
+#define ERRinvnetname		6	/*
+					 * Invalid network name in tree
+					 * connect.
+					 */
+#define ERRinvdevice		7	/*
+					 * Invalid device - printer request
+					 * made to non-printer connection or
+					 * non-printer request made to printer
+					 * connection.
+					 */
+#define ERRqfull		49	/*
+					 * Print queue full (files) -- returned
+					 * by open print file.
+					 */
+#define ERRqtoobig		50	/* Print queue full -- no space. */
+#define ERRqeof			51	/* EOF on print queue dump */
+#define ERRinvpfid		52	/* Invalid print file FID. */
+#define ERRsmbcmd		64	/*
+					 * The server did not recognize the
+					 * command received.
+					 */
+#define ERRsrverror		65	/*
+					 * The server encountered an internal
+					 * error, e.g., system file
+					 * unavailable.
+					 */
+#define ERRbadBID		66	/* (obsolete) */
+#define ERRfilespecs		67	/*
+					 * The Fid and pathname parameters
+					 * contained an invalid combination of
+					 * values.
+					 */
+#define ERRbadLink		68	/* (obsolete) */
+#define ERRbadpermits		69	/*
+					 * The access permissions specified for
+					 * a file or directory are not a valid
+					 * combination.
+					 */
+#define ERRbadPID		70
+#define ERRsetattrmode		71	/* attribute (mode) is invalid */
+#define ERRpaused		81	/* Server is paused */
+#define ERRmsgoff		82	/* reserved - messaging off */
+#define ERRnoroom		83	/* reserved - no room for message */
+#define ERRrmuns		87	/* reserved - too many remote names */
+#define ERRtimeout		88	/* operation timed out */
+#define ERRnoresource		89	/* No resources available for request */
+#define ERRtoomanyuids		90	/*
+					 * Too many UIDs active on this session
+					 */
+#define ERRbaduid		91	/*
+					 * The UID is not known as a valid user
+					 */
+#define ERRusempx		250	/* temporarily unable to use raw */
+#define ERRusestd		251	/*
+					 * temporarily unable to use either raw
+					 * or mpx
+					 */
+#define ERR_NOTIFY_ENUM_DIR	1024
+#define ERRnoSuchUser		2238	/* user account does not exist */
+#define ERRaccountexpired	2239
+#define ERRbadclient		2240	/* can not logon from this client */
+#define ERRbadLogonTime		2241	/* logon hours do not allow this */
+#define ERRpasswordExpired	2242
+#define ERRnetlogonNotStarted	2455
+#define ERRnosupport		0xFFFF
+
+#endif /* __KSMBD_SMBERR_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/brcm,bcm63158-ubus.h	2025-09-25 17:40:36.915373911 +0200
@@ -0,0 +1,31 @@
+
+#pragma once
+
+/*
+ * this is SoC specific, maybe abstract this in some kind of virtual
+ * ID just like the PMC code does.
+ */
+#define UBUS_PORT_ID_MEMC        1
+#define UBUS_PORT_ID_BIU         2
+#define UBUS_PORT_ID_PER         3
+#define UBUS_PORT_ID_USB         4
+#define UBUS_PORT_ID_SPU         5
+#define UBUS_PORT_ID_DSL         6
+#define UBUS_PORT_ID_PERDMA      7
+#define UBUS_PORT_ID_PCIE0       8
+#define UBUS_PORT_ID_PCIE2       9
+#define UBUS_PORT_ID_PCIE3       10
+#define UBUS_PORT_ID_DSLCPU      11
+#define UBUS_PORT_ID_WAN         12
+#define UBUS_PORT_ID_PMC         13
+#define UBUS_PORT_ID_SWH         14
+#define UBUS_PORT_ID_PSRAM       16
+#define UBUS_PORT_ID_VPB         20
+#define UBUS_PORT_ID_FPM         21
+#define UBUS_PORT_ID_QM          22
+#define UBUS_PORT_ID_DQM         23
+#define UBUS_PORT_ID_DMA0        24
+#define UBUS_PORT_ID_NATC        26
+#define UBUS_PORT_ID_SYSXRDP     27
+#define UBUS_PORT_ID_SYS         31
+#define UBUS_PORT_ID_RQ0         32
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/brcm,bcm63xx-pcie.h	2025-09-25 17:40:36.915373911 +0200
@@ -0,0 +1,7 @@
+
+#pragma once
+
+#define PCIE_SPEED_DEFAULT	0
+#define PCIE_SPEED_GEN1		1
+#define PCIE_SPEED_GEN2		2
+#define PCIE_SPEED_GEN3		3
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/net/realtek-phy-rtl8211f.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,19 @@
+/*
+ * Device Tree constants for Realek rtl8211f PHY
+ *
+ * Author: Remi Pommarel
+ *
+ * License: GPL
+ * Copyright (c) 2017 Remi Pommarel
+ */
+
+#ifndef _DT_BINDINGS_RTL_8211F_H
+#define _DT_BINDINGS_RTL_8211F_H
+
+#define RTL8211F_LED_MODE_10M			0x1
+#define RTL8211F_LED_MODE_100M			0x2
+#define RTL8211F_LED_MODE_1000M			0x8
+#define RTL8211F_LED_MODE_ACT			0x10
+
+#endif
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/pinctrl/bcm63138-pinfunc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,512 @@
+#ifndef _DT_BINDINGS_BCM63138_PINFUNC_H
+#define _DT_BINDINGS_BCM63138_PINFUNC_H
+
+#define BCM63138_PIN_NO(x)		((x) << 8)
+#define BCM63138_GET_PIN_NO(x)		((x) >> 8)
+#define BCM63138_GET_PIN_FUNC(x)	((x) & 0xff)
+
+#define BCM63138_GPIO_00__FUNC_SER_LED_DATA	(BCM63138_PIN_NO(0) | 1)
+#define BCM63138_GPIO_00__FUNC_LED_00		(BCM63138_PIN_NO(0) | 4)
+#define BCM63138_GPIO_00__FUNC_GPIO_00		(BCM63138_PIN_NO(0) | 5)
+
+#define BCM63138_GPIO_01__FUNC_SER_LED_CLK	(BCM63138_PIN_NO(1) | 1)
+#define BCM63138_GPIO_01__FUNC_LED_01		(BCM63138_PIN_NO(1) | 4)
+#define BCM63138_GPIO_01__FUNC_GPIO_01		(BCM63138_PIN_NO(1) | 5)
+
+#define BCM63138_GPIO_02__FUNC_SER_LED_MASK	(BCM63138_PIN_NO(2) | 1)
+#define BCM63138_GPIO_02__FUNC_LED_02		(BCM63138_PIN_NO(2) | 4)
+#define BCM63138_GPIO_02__FUNC_GPIO_02		(BCM63138_PIN_NO(2) | 5)
+
+#define BCM63138_GPIO_03__FUNC_UART2_CTS	(BCM63138_PIN_NO(3) | 1)
+#define BCM63138_GPIO_03__FUNC_NTR_PULSE_IN_0	(BCM63138_PIN_NO(3) | 2)
+#define BCM63138_GPIO_03__FUNC_MOCA_GPIO_0	(BCM63138_PIN_NO(3) | 3)
+#define BCM63138_GPIO_03__FUNC_LED_03		(BCM63138_PIN_NO(3) | 4)
+#define BCM63138_GPIO_03__FUNC_GPIO_03		(BCM63138_PIN_NO(3) | 5)
+
+#define BCM63138_GPIO_04__FUNC_UART2_RTS	(BCM63138_PIN_NO(4) | 1)
+#define BCM63138_GPIO_04__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(4) | 2)
+#define BCM63138_GPIO_04__FUNC_MOCA_GPIO_1	(BCM63138_PIN_NO(4) | 3)
+#define BCM63138_GPIO_04__FUNC_LED_04		(BCM63138_PIN_NO(4) | 4)
+#define BCM63138_GPIO_04__FUNC_GPIO_04		(BCM63138_PIN_NO(4) | 5)
+
+#define BCM63138_GPIO_05__FUNC_UART2_SIN	(BCM63138_PIN_NO(5) | 1)
+#define BCM63138_GPIO_05__FUNC_MOCA_GPIO_2	(BCM63138_PIN_NO(5) | 3)
+#define BCM63138_GPIO_05__FUNC_LED_05		(BCM63138_PIN_NO(5) | 4)
+#define BCM63138_GPIO_05__FUNC_GPIO_05		(BCM63138_PIN_NO(5) | 5)
+
+#define BCM63138_GPIO_06__FUNC_UART2_SOUT	(BCM63138_PIN_NO(6) | 1)
+#define BCM63138_GPIO_06__FUNC_MOCA_GPIO_3	(BCM63138_PIN_NO(6) | 3)
+#define BCM63138_GPIO_06__FUNC_LED_06		(BCM63138_PIN_NO(6) | 4)
+#define BCM63138_GPIO_06__FUNC_GPIO_06		(BCM63138_PIN_NO(6) | 5)
+
+#define BCM63138_GPIO_07__FUNC_SPIM_SS5_B	(BCM63138_PIN_NO(7) | 1)
+#define BCM63138_GPIO_07__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(7) | 2)
+#define BCM63138_GPIO_07__FUNC_MOCA_GPIO_4	(BCM63138_PIN_NO(7) | 3)
+#define BCM63138_GPIO_07__FUNC_LED_07		(BCM63138_PIN_NO(7) | 4)
+#define BCM63138_GPIO_07__FUNC_GPIO_07		(BCM63138_PIN_NO(7) | 5)
+
+#define BCM63138_GPIO_08__FUNC_SPIM_SS4_B	(BCM63138_PIN_NO(8) | 1)
+#define BCM63138_GPIO_08__FUNC_MOCA_GPIO_5	(BCM63138_PIN_NO(8) | 3)
+#define BCM63138_GPIO_08__FUNC_LED_08		(BCM63138_PIN_NO(8) | 4)
+#define BCM63138_GPIO_08__FUNC_GPIO_08		(BCM63138_PIN_NO(8) | 5)
+
+#define BCM63138_GPIO_09__FUNC_SPIM_SS3_B	(BCM63138_PIN_NO(9) | 1)
+#define BCM63138_GPIO_09__FUNC_LD1_DIN		(BCM63138_PIN_NO(9) | 2)
+#define BCM63138_GPIO_09__FUNC_LED_09		(BCM63138_PIN_NO(9) | 4)
+#define BCM63138_GPIO_09__FUNC_GPIO_09		(BCM63138_PIN_NO(9) | 5)
+
+#define BCM63138_GPIO_10__FUNC_SPIM_SS2_B	(BCM63138_PIN_NO(10) | 1)
+#define BCM63138_GPIO_10__FUNC_LD1_DCLK		(BCM63138_PIN_NO(10) | 2)
+#define BCM63138_GPIO_10__FUNC_LED_10		(BCM63138_PIN_NO(10) | 4)
+#define BCM63138_GPIO_10__FUNC_GPIO_10		(BCM63138_PIN_NO(10) | 5)
+
+#define BCM63138_GPIO_11__FUNC_MOCA_GPIO_6	(BCM63138_PIN_NO(11) | 3)
+#define BCM63138_GPIO_11__FUNC_LED_11		(BCM63138_PIN_NO(11) | 4)
+#define BCM63138_GPIO_11__FUNC_GPIO_11		(BCM63138_PIN_NO(11) | 5)
+
+#define BCM63138_GPIO_12__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(12) | 1)
+#define BCM63138_GPIO_12__FUNC_MOCA_GPIO_7	(BCM63138_PIN_NO(12) | 3)
+#define BCM63138_GPIO_12__FUNC_LED_12		(BCM63138_PIN_NO(12) | 4)
+#define BCM63138_GPIO_12__FUNC_GPIO_12		(BCM63138_PIN_NO(12) | 5)
+
+#define BCM63138_GPIO_13__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(13) | 1)
+#define BCM63138_GPIO_13__FUNC_MOCA_GPIO_8	(BCM63138_PIN_NO(13) | 3)
+#define BCM63138_GPIO_13__FUNC_LED_13		(BCM63138_PIN_NO(13) | 4)
+#define BCM63138_GPIO_13__FUNC_GPIO_13		(BCM63138_PIN_NO(13) | 5)
+
+#define BCM63138_GPIO_14__FUNC_MOCA_GPIO_9	(BCM63138_PIN_NO(14) | 3)
+#define BCM63138_GPIO_14__FUNC_LED_14		(BCM63138_PIN_NO(14) | 4)
+#define BCM63138_GPIO_14__FUNC_GPIO_14		(BCM63138_PIN_NO(14) | 5)
+
+#define BCM63138_GPIO_15__FUNC_LED_15		(BCM63138_PIN_NO(15) | 4)
+#define BCM63138_GPIO_15__FUNC_GPIO_15		(BCM63138_PIN_NO(15) | 5)
+
+#define BCM63138_GPIO_16__FUNC_DECT_PD_0	(BCM63138_PIN_NO(16) | 3)
+#define BCM63138_GPIO_16__FUNC_LED_16		(BCM63138_PIN_NO(16) | 4)
+#define BCM63138_GPIO_16__FUNC_GPIO_16		(BCM63138_PIN_NO(16) | 5)
+
+#define BCM63138_GPIO_17__FUNC_DECT_PD_1	(BCM63138_PIN_NO(17) | 3)
+#define BCM63138_GPIO_17__FUNC_LED_17		(BCM63138_PIN_NO(17) | 4)
+#define BCM63138_GPIO_17__FUNC_GPIO_17		(BCM63138_PIN_NO(17) | 5)
+
+#define BCM63138_GPIO_18__FUNC_VREG_CLK		(BCM63138_PIN_NO(18) | 1)
+#define BCM63138_GPIO_18__FUNC_LED_18		(BCM63138_PIN_NO(18) | 4)
+#define BCM63138_GPIO_18__FUNC_GPIO_18		(BCM63138_PIN_NO(18) | 5)
+
+#define BCM63138_GPIO_19__FUNC_LED_19		(BCM63138_PIN_NO(19) | 4)
+#define BCM63138_GPIO_19__FUNC_GPIO_19		(BCM63138_PIN_NO(19) | 5)
+
+#define BCM63138_GPIO_20__FUNC_UART2_CTS	(BCM63138_PIN_NO(20) | 2)
+#define BCM63138_GPIO_20__FUNC_LED_20		(BCM63138_PIN_NO(20) | 4)
+#define BCM63138_GPIO_20__FUNC_GPIO_20		(BCM63138_PIN_NO(20) | 5)
+
+#define BCM63138_GPIO_21__FUNC_UART2_RTS	(BCM63138_PIN_NO(21) | 2)
+#define BCM63138_GPIO_21__FUNC_LED_21		(BCM63138_PIN_NO(21) | 4)
+#define BCM63138_GPIO_21__FUNC_GPIO_21		(BCM63138_PIN_NO(21) | 5)
+
+#define BCM63138_GPIO_22__FUNC_UART2_SIN	(BCM63138_PIN_NO(22) | 2)
+#define BCM63138_GPIO_22__FUNC_LED_22		(BCM63138_PIN_NO(22) | 4)
+#define BCM63138_GPIO_22__FUNC_GPIO_22		(BCM63138_PIN_NO(22) | 5)
+
+#define BCM63138_GPIO_23__FUNC_UART2_SOUT	(BCM63138_PIN_NO(23) | 2)
+#define BCM63138_GPIO_23__FUNC_LED_23		(BCM63138_PIN_NO(23) | 4)
+#define BCM63138_GPIO_23__FUNC_GPIO_23		(BCM63138_PIN_NO(23) | 5)
+
+#define BCM63138_GPIO_24__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(24) | 1)
+#define BCM63138_GPIO_24__FUNC_I2C_SDA		(BCM63138_PIN_NO(24) | 3)
+#define BCM63138_GPIO_24__FUNC_LED_24		(BCM63138_PIN_NO(24) | 4)
+#define BCM63138_GPIO_24__FUNC_GPIO_24		(BCM63138_PIN_NO(24) | 5)
+
+#define BCM63138_GPIO_25__FUNC_SPIM_SS2_B	(BCM63138_PIN_NO(25) | 1)
+#define BCM63138_GPIO_25__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(25) | 2)
+#define BCM63138_GPIO_25__FUNC_I2C_SCL		(BCM63138_PIN_NO(25) | 3)
+#define BCM63138_GPIO_25__FUNC_LED_25		(BCM63138_PIN_NO(25) | 4)
+#define BCM63138_GPIO_25__FUNC_GPIO_25		(BCM63138_PIN_NO(25) | 5)
+
+#define BCM63138_GPIO_26__FUNC_SPIM_SS3_B	(BCM63138_PIN_NO(26) | 1)
+#define BCM63138_GPIO_26__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(26) | 2)
+#define BCM63138_GPIO_26__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(26) | 3)
+#define BCM63138_GPIO_26__FUNC_LED_26		(BCM63138_PIN_NO(26) | 4)
+#define BCM63138_GPIO_26__FUNC_GPIO_26		(BCM63138_PIN_NO(26) | 5)
+
+#define BCM63138_GPIO_27__FUNC_SPIM_SS4_B	(BCM63138_PIN_NO(27) | 1)
+#define BCM63138_GPIO_27__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(27) | 2)
+#define BCM63138_GPIO_27__FUNC_UART2_SIN	(BCM63138_PIN_NO(27) | 3)
+#define BCM63138_GPIO_27__FUNC_LED_27		(BCM63138_PIN_NO(27) | 4)
+#define BCM63138_GPIO_27__FUNC_GPIO_27		(BCM63138_PIN_NO(27) | 5)
+
+#define BCM63138_GPIO_28__FUNC_SPIM_SS5_B	(BCM63138_PIN_NO(28) | 1)
+#define BCM63138_GPIO_28__FUNC_AE_LOS		(BCM63138_PIN_NO(28) | 2)
+#define BCM63138_GPIO_28__FUNC_UART2_SOUT	(BCM63138_PIN_NO(28) | 3)
+#define BCM63138_GPIO_28__FUNC_LED_28		(BCM63138_PIN_NO(28) | 4)
+#define BCM63138_GPIO_28__FUNC_GPIO_28		(BCM63138_PIN_NO(28) | 5)
+
+#define BCM63138_GPIO_29__FUNC_SER_LED_DATA	(BCM63138_PIN_NO(29) | 1)
+#define BCM63138_GPIO_29__FUNC_LED_29		(BCM63138_PIN_NO(29) | 4)
+#define BCM63138_GPIO_29__FUNC_GPIO_29		(BCM63138_PIN_NO(29) | 5)
+
+#define BCM63138_GPIO_30__FUNC_SER_LED_CLK	(BCM63138_PIN_NO(30) | 1)
+#define BCM63138_GPIO_30__FUNC_LED_30		(BCM63138_PIN_NO(30) | 4)
+#define BCM63138_GPIO_30__FUNC_GPIO_30		(BCM63138_PIN_NO(30) | 5)
+
+#define BCM63138_GPIO_31__FUNC_SER_LED_MASK	(BCM63138_PIN_NO(31) | 1)
+#define BCM63138_GPIO_31__FUNC_LED_31		(BCM63138_PIN_NO(31) | 4)
+#define BCM63138_GPIO_31__FUNC_GPIO_31		(BCM63138_PIN_NO(31) | 5)
+
+#define BCM63138_GPIO_32__FUNC_EXT_IRQ_0	(BCM63138_PIN_NO(32) | 1)
+#define BCM63138_GPIO_32__FUNC_GPIO_32		(BCM63138_PIN_NO(32) | 5)
+
+#define BCM63138_GPIO_33__FUNC_EXT_IRQ_1	(BCM63138_PIN_NO(33) | 1)
+#define BCM63138_GPIO_33__FUNC_GPIO_33		(BCM63138_PIN_NO(33) | 5)
+
+#define BCM63138_GPIO_34__FUNC_EXT_IRQ_2	(BCM63138_PIN_NO(34) | 1)
+#define BCM63138_GPIO_34__FUNC_GPIO_34		(BCM63138_PIN_NO(34) | 5)
+
+#define BCM63138_GPIO_35__FUNC_EXT_IRQ_3	(BCM63138_PIN_NO(35) | 1)
+#define BCM63138_GPIO_35__FUNC_SYS_IRQ_OUT	(BCM63138_PIN_NO(35) | 2)
+#define BCM63138_GPIO_35__FUNC_GPIO_35		(BCM63138_PIN_NO(35) | 5)
+
+#define BCM63138_GPIO_36__FUNC_EXT_IRQ_4	(BCM63138_PIN_NO(36) | 1)
+#define BCM63138_GPIO_36__FUNC_AE_LOS		(BCM63138_PIN_NO(36) | 2)
+#define BCM63138_GPIO_36__FUNC_GPIO_36		(BCM63138_PIN_NO(36) | 5)
+
+#define BCM63138_GPIO_37__FUNC_EXT_IRQ_5	(BCM63138_PIN_NO(37) | 1)
+#define BCM63138_GPIO_37__FUNC_VREG_CLK		(BCM63138_PIN_NO(37) | 2)
+#define BCM63138_GPIO_37__FUNC_GPIO_37		(BCM63138_PIN_NO(37) | 5)
+
+#define BCM63138_GPIO_38__FUNC_NAND_CE_B	(BCM63138_PIN_NO(38) | 3)
+#define BCM63138_GPIO_38__FUNC_GPIO_38		(BCM63138_PIN_NO(38) | 5)
+
+#define BCM63138_GPIO_39__FUNC_NAND_RE_B	(BCM63138_PIN_NO(39) | 3)
+#define BCM63138_GPIO_39__FUNC_GPIO_39		(BCM63138_PIN_NO(39) | 5)
+
+#define BCM63138_GPIO_40__FUNC_NAND_RB_B	(BCM63138_PIN_NO(40) | 3)
+#define BCM63138_GPIO_40__FUNC_GPIO_40		(BCM63138_PIN_NO(40) | 5)
+
+#define BCM63138_GPIO_41__FUNC_NAND_DATA_00	(BCM63138_PIN_NO(41) | 3)
+#define BCM63138_GPIO_41__FUNC_GPIO_41		(BCM63138_PIN_NO(41) | 5)
+
+#define BCM63138_GPIO_42__FUNC_DECT_PD_0	(BCM63138_PIN_NO(42) | 1)
+#define BCM63138_GPIO_42__FUNC_NAND_DATA_01	(BCM63138_PIN_NO(42) | 3)
+#define BCM63138_GPIO_42__FUNC_GPIO_42		(BCM63138_PIN_NO(42) | 5)
+
+#define BCM63138_GPIO_43__FUNC_DECT_PD_1	(BCM63138_PIN_NO(43) | 1)
+#define BCM63138_GPIO_43__FUNC_NAND_DATA_02	(BCM63138_PIN_NO(43) | 3)
+#define BCM63138_GPIO_43__FUNC_GPIO_43		(BCM63138_PIN_NO(43) | 5)
+
+#define BCM63138_GPIO_44__FUNC_NAND_DATA_03	(BCM63138_PIN_NO(44) | 3)
+#define BCM63138_GPIO_44__FUNC_GPIO_44		(BCM63138_PIN_NO(44) | 5)
+
+#define BCM63138_GPIO_45__FUNC_NAND_DATA_04	(BCM63138_PIN_NO(45) | 3)
+#define BCM63138_GPIO_45__FUNC_GPIO_45		(BCM63138_PIN_NO(45) | 5)
+
+#define BCM63138_GPIO_46__FUNC_NAND_DATA_05	(BCM63138_PIN_NO(46) | 3)
+#define BCM63138_GPIO_46__FUNC_GPIO_46		(BCM63138_PIN_NO(46) | 5)
+
+#define BCM63138_GPIO_47__FUNC_NAND_DATA_06	(BCM63138_PIN_NO(47) | 3)
+#define BCM63138_GPIO_47__FUNC_GPIO_47		(BCM63138_PIN_NO(47) | 5)
+
+#define BCM63138_GPIO_48__FUNC_NAND_DATA_07	(BCM63138_PIN_NO(48) | 3)
+#define BCM63138_GPIO_48__FUNC_GPIO_48		(BCM63138_PIN_NO(48) | 5)
+
+#define BCM63138_GPIO_49__FUNC_NAND_ALE		(BCM63138_PIN_NO(49) | 3)
+#define BCM63138_GPIO_49__FUNC_GPIO_49		(BCM63138_PIN_NO(49) | 5)
+
+#define BCM63138_GPIO_50__FUNC_NAND_WE_B	(BCM63138_PIN_NO(50) | 3)
+#define BCM63138_GPIO_50__FUNC_GPIO_50		(BCM63138_PIN_NO(50) | 5)
+
+#define BCM63138_GPIO_51__FUNC_NAND_CLE		(BCM63138_PIN_NO(51) | 3)
+#define BCM63138_GPIO_51__FUNC_GPIO_51		(BCM63138_PIN_NO(51) | 5)
+
+#define BCM63138_GPIO_52__FUNC_LD0_PWRUP	(BCM63138_PIN_NO(52) | 1)
+#define BCM63138_GPIO_52__FUNC_I2C_SDA		(BCM63138_PIN_NO(52) | 2)
+#define BCM63138_GPIO_52__FUNC_GPIO_52		(BCM63138_PIN_NO(52) | 5)
+
+#define BCM63138_GPIO_53__FUNC_LD0_DIN		(BCM63138_PIN_NO(53) | 1)
+#define BCM63138_GPIO_53__FUNC_I2C_SCL		(BCM63138_PIN_NO(53) | 2)
+#define BCM63138_GPIO_53__FUNC_GPIO_53		(BCM63138_PIN_NO(53) | 5)
+
+#define BCM63138_GPIO_54__FUNC_LD1_PWRUP	(BCM63138_PIN_NO(54) | 1)
+#define BCM63138_GPIO_54__FUNC_GPIO_54		(BCM63138_PIN_NO(54) | 5)
+
+#define BCM63138_GPIO_55__FUNC_LD0_DCLK		(BCM63138_PIN_NO(55) | 1)
+#define BCM63138_GPIO_55__FUNC_GPIO_55		(BCM63138_PIN_NO(55) | 5)
+
+#define BCM63138_GPIO_56__FUNC_PCM_SDIN		(BCM63138_PIN_NO(56) | 1)
+#define BCM63138_GPIO_56__FUNC_GPIO_56		(BCM63138_PIN_NO(56) | 5)
+
+#define BCM63138_GPIO_57__FUNC_PCM_SDOUT	(BCM63138_PIN_NO(57) | 1)
+#define BCM63138_GPIO_57__FUNC_GPIO_57		(BCM63138_PIN_NO(57) | 5)
+
+#define BCM63138_GPIO_58__FUNC_PCM_CLK		(BCM63138_PIN_NO(58) | 1)
+#define BCM63138_GPIO_58__FUNC_GPIO_58		(BCM63138_PIN_NO(58) | 5)
+
+#define BCM63138_GPIO_59__FUNC_PCM_FS		(BCM63138_PIN_NO(59) | 1)
+#define BCM63138_GPIO_59__FUNC_GPIO_59		(BCM63138_PIN_NO(59) | 5)
+
+#define BCM63138_MII1_COL__FUNC_MII1_COL	(BCM63138_PIN_NO(60) | 1)
+#define BCM63138_MII1_COL__FUNC_GPIO_60		(BCM63138_PIN_NO(60) | 5)
+
+#define BCM63138_MII1_CRS__FUNC_MII1_CRS	(BCM63138_PIN_NO(61) | 1)
+#define BCM63138_MII1_CRS__FUNC_GPIO_61		(BCM63138_PIN_NO(61) | 5)
+
+#define BCM63138_MII1_RXCLK__FUNC_MII1_RXCLK	(BCM63138_PIN_NO(62) | 1)
+#define BCM63138_MII1_RXCLK__FUNC_GPIO_62	(BCM63138_PIN_NO(62) | 5)
+
+#define BCM63138_MII1_RXER__FUNC_MII1_RXER	(BCM63138_PIN_NO(63) | 1)
+#define BCM63138_MII1_RXER__FUNC_GPIO_63	(BCM63138_PIN_NO(63) | 5)
+
+#define BCM63138_MII1_RXDV__FUNC_MII1_RXDV	(BCM63138_PIN_NO(64) | 1)
+#define BCM63138_MII1_RXDV__FUNC_GPIO_64	(BCM63138_PIN_NO(64) | 5)
+
+#define BCM63138_MII_RXD_00__FUNC_MII_RXD_00	(BCM63138_PIN_NO(65) | 1)
+#define BCM63138_MII_RXD_00__FUNC_GPIO_65	(BCM63138_PIN_NO(65) | 5)
+
+#define BCM63138_MII_RXD_01__FUNC_MII_RXD_01	(BCM63138_PIN_NO(66) | 1)
+#define BCM63138_MII_RXD_01__FUNC_GPIO_66	(BCM63138_PIN_NO(66) | 5)
+
+#define BCM63138_MII_RXD_02__FUNC_MII_RXD_02	(BCM63138_PIN_NO(67) | 1)
+#define BCM63138_MII_RXD_02__FUNC_GPIO_67	(BCM63138_PIN_NO(67) | 5)
+
+#define BCM63138_MII_RXD_03__FUNC_MII_RXD_03	(BCM63138_PIN_NO(68) | 1)
+#define BCM63138_MII_RXD_03__FUNC_GPIO_68	(BCM63138_PIN_NO(68) | 5)
+
+#define BCM63138_MII_TXCLK__FUNC_MII_TXCLK	(BCM63138_PIN_NO(69) | 1)
+#define BCM63138_MII_TXCLK__FUNC_GPIO_69	(BCM63138_PIN_NO(69) | 5)
+
+#define BCM63138_MII_TXEN__FUNC_MII_TXEN	(BCM63138_PIN_NO(70) | 1)
+#define BCM63138_MII_TXEN__FUNC_GPIO_70		(BCM63138_PIN_NO(70) | 5)
+
+#define BCM63138_MII_TXER__FUNC_MII_TXER	(BCM63138_PIN_NO(71) | 1)
+#define BCM63138_MII_TXER__FUNC_GPIO_71		(BCM63138_PIN_NO(71) | 5)
+
+#define BCM63138_MII_TXD_00__FUNC_MII_TXD_00	(BCM63138_PIN_NO(72) | 1)
+#define BCM63138_MII_TXD_00__FUNC_GPIO_72	(BCM63138_PIN_NO(72) | 5)
+
+#define BCM63138_MII_TXD_01__FUNC_MII_TXD_01	(BCM63138_PIN_NO(73) | 1)
+#define BCM63138_MII_TXD_01__FUNC_GPIO_73	(BCM63138_PIN_NO(73) | 5)
+
+#define BCM63138_MII_TXD_02__FUNC_MII_TXD_02	(BCM63138_PIN_NO(74) | 1)
+#define BCM63138_MII_TXD_02__FUNC_GPIO_74	(BCM63138_PIN_NO(74) | 5)
+
+#define BCM63138_MII_TXD_03__FUNC_MII_TXD_03	(BCM63138_PIN_NO(75) | 1)
+#define BCM63138_MII_TXD_03__FUNC_GPIO_75	(BCM63138_PIN_NO(75) | 5)
+
+#define BCM63138_RGMII1_RXCLK__FUNC_RGMII1_RXCLK	(BCM63138_PIN_NO(76) | 1)
+#define BCM63138_RGMII1_RXCLK__FUNC_GPIO_76		(BCM63138_PIN_NO(76) | 5)
+
+#define BCM63138_RGMII1_RXCTL__FUNC_RGMII1_RXCTL	(BCM63138_PIN_NO(77) | 1)
+#define BCM63138_RGMII1_RXCTL__FUNC_GPIO_77		(BCM63138_PIN_NO(77) | 5)
+
+#define BCM63138_RGMII1_RXD_00__FUNC_RGMII1_RXD_00	(BCM63138_PIN_NO(78) | 1)
+#define BCM63138_RGMII1_RXD_00__FUNC_GPIO_78		(BCM63138_PIN_NO(78) | 5)
+
+#define BCM63138_RGMII1_RXD_01__FUNC_RGMII1_RXD_01	(BCM63138_PIN_NO(79) | 1)
+#define BCM63138_RGMII1_RXD_01__FUNC_GPIO_79		(BCM63138_PIN_NO(79) | 5)
+
+#define BCM63138_RGMII1_RXD_02__FUNC_RGMII1_RXD_02	(BCM63138_PIN_NO(80) | 1)
+#define BCM63138_RGMII1_RXD_02__FUNC_GPIO_80		(BCM63138_PIN_NO(80) | 5)
+
+#define BCM63138_RGMII1_RXD_03__FUNC_RGMII1_RXD_03	(BCM63138_PIN_NO(81) | 1)
+#define BCM63138_RGMII1_RXD_03__FUNC_GPIO_81		(BCM63138_PIN_NO(81) | 5)
+
+#define BCM63138_RGMII1_TXCLK__FUNC_RGMII1_TXCLK	(BCM63138_PIN_NO(82) | 1)
+#define BCM63138_RGMII1_TXCLK__FUNC_GPIO_82		(BCM63138_PIN_NO(82) | 5)
+
+#define BCM63138_RGMII1_TXCTL__FUNC_RGMII1_TXCTL	(BCM63138_PIN_NO(83) | 1)
+#define BCM63138_RGMII1_TXCTL__FUNC_GPIO_83		(BCM63138_PIN_NO(83) | 5)
+
+#define BCM63138_RGMII1_TXD_00__FUNC_RGMII1_TXD_00	(BCM63138_PIN_NO(84) | 1)
+#define BCM63138_RGMII1_TXD_00__FUNC_GPIO_84		(BCM63138_PIN_NO(84) | 5)
+
+#define BCM63138_RGMII1_TXD_01__FUNC_RGMII1_TXD_01	(BCM63138_PIN_NO(85) | 1)
+#define BCM63138_RGMII1_TXD_01__FUNC_GPIO_85		(BCM63138_PIN_NO(85) | 5)
+
+#define BCM63138_RGMII1_TXD_02__FUNC_RGMII1_TXD_02	(BCM63138_PIN_NO(86) | 1)
+#define BCM63138_RGMII1_TXD_02__FUNC_GPIO_86		(BCM63138_PIN_NO(86) | 5)
+
+#define BCM63138_RGMII1_TXD_03__FUNC_RGMII1_TXD_03	(BCM63138_PIN_NO(87) | 1)
+#define BCM63138_RGMII1_TXD_03__FUNC_GPIO_87		(BCM63138_PIN_NO(87) | 5)
+
+#define BCM63138_RGMII2_RXCLK__FUNC_RGMII2_RXCLK	(BCM63138_PIN_NO(88) | 1)
+#define BCM63138_RGMII2_RXCLK__FUNC_GPIO_88		(BCM63138_PIN_NO(88) | 5)
+
+#define BCM63138_RGMII2_RXCTL__FUNC_RGMII2_RXCTL	(BCM63138_PIN_NO(89) | 1)
+#define BCM63138_RGMII2_RXCTL__FUNC_GPIO_89		(BCM63138_PIN_NO(89) | 5)
+
+#define BCM63138_RGMII2_RXD_00__FUNC_RGMII2_RXD_00	(BCM63138_PIN_NO(90) | 1)
+#define BCM63138_RGMII2_RXD_00__FUNC_GPIO_90		(BCM63138_PIN_NO(90) | 5)
+
+#define BCM63138_RGMII2_RXD_01__FUNC_RGMII2_RXD_01	(BCM63138_PIN_NO(91) | 1)
+#define BCM63138_RGMII2_RXD_01__FUNC_GPIO_91		(BCM63138_PIN_NO(91) | 5)
+
+#define BCM63138_RGMII2_RXD_02__FUNC_RGMII2_RXD_02	(BCM63138_PIN_NO(92) | 1)
+#define BCM63138_RGMII2_RXD_02__FUNC_GPIO_92		(BCM63138_PIN_NO(92) | 5)
+
+#define BCM63138_RGMII2_RXD_03__FUNC_RGMII2_RXD_03	(BCM63138_PIN_NO(93) | 1)
+#define BCM63138_RGMII2_RXD_03__FUNC_GPIO_93		(BCM63138_PIN_NO(93) | 5)
+
+#define BCM63138_RGMII2_TXCLK__FUNC_RGMII2_TXCLK	(BCM63138_PIN_NO(94) | 1)
+#define BCM63138_RGMII2_TXCLK__FUNC_GPIO_94		(BCM63138_PIN_NO(94) | 5)
+
+#define BCM63138_RGMII2_TXCTL__FUNC_RGMII2_TXCTL	(BCM63138_PIN_NO(95) | 1)
+#define BCM63138_RGMII2_TXCTL__FUNC_GPIO_95		(BCM63138_PIN_NO(95) | 5)
+
+#define BCM63138_RGMII2_TXD_00__FUNC_RGMII2_TXD_00	(BCM63138_PIN_NO(96) | 1)
+#define BCM63138_RGMII2_TXD_00__FUNC_GPIO_96		(BCM63138_PIN_NO(96) | 5)
+
+#define BCM63138_RGMII2_TXD_01__FUNC_RGMII2_TXD_01	(BCM63138_PIN_NO(97) | 1)
+#define BCM63138_RGMII2_TXD_01__FUNC_GPIO_97		(BCM63138_PIN_NO(97) | 5)
+
+#define BCM63138_RGMII2_TXD_02__FUNC_RGMII2_TXD_02	(BCM63138_PIN_NO(98) | 1)
+#define BCM63138_RGMII2_TXD_02__FUNC_GPIO_98		(BCM63138_PIN_NO(98) | 5)
+
+#define BCM63138_RGMII2_TXD_03__FUNC_RGMII2_TXD_03	(BCM63138_PIN_NO(99) | 1)
+#define BCM63138_RGMII2_TXD_03__FUNC_GPIO_99		(BCM63138_PIN_NO(99) | 5)
+
+#define BCM63138_RGMII3_RXCLK__FUNC_RGMII3_RXCLK	(BCM63138_PIN_NO(100) | 1)
+#define BCM63138_RGMII3_RXCLK__FUNC_LED_00		(BCM63138_PIN_NO(100) | 4)
+#define BCM63138_RGMII3_RXCLK__FUNC_GPIO_100		(BCM63138_PIN_NO(100) | 5)
+
+#define BCM63138_RGMII3_RXCTL__FUNC_RGMII3_RXCTL	(BCM63138_PIN_NO(101) | 1)
+#define BCM63138_RGMII3_RXCTL__FUNC_LED_01		(BCM63138_PIN_NO(101) | 4)
+#define BCM63138_RGMII3_RXCTL__FUNC_GPIO_101		(BCM63138_PIN_NO(101) | 5)
+
+#define BCM63138_RGMII3_RXD_00__FUNC_RGMII3_RXD_00	(BCM63138_PIN_NO(102) | 1)
+#define BCM63138_RGMII3_RXD_00__FUNC_LED_02		(BCM63138_PIN_NO(102) | 4)
+#define BCM63138_RGMII3_RXD_00__FUNC_GPIO_102		(BCM63138_PIN_NO(102) | 5)
+
+#define BCM63138_RGMII3_RXD_01__FUNC_RGMII3_RXD_01	(BCM63138_PIN_NO(103) | 1)
+#define BCM63138_RGMII3_RXD_01__FUNC_LED_03		(BCM63138_PIN_NO(103) | 4)
+#define BCM63138_RGMII3_RXD_01__FUNC_GPIO_103		(BCM63138_PIN_NO(103) | 5)
+
+#define BCM63138_RGMII3_RXD_02__FUNC_RGMII3_RXD_02	(BCM63138_PIN_NO(104) | 1)
+#define BCM63138_RGMII3_RXD_02__FUNC_LED_04		(BCM63138_PIN_NO(104) | 4)
+#define BCM63138_RGMII3_RXD_02__FUNC_GPIO_104		(BCM63138_PIN_NO(104) | 5)
+
+#define BCM63138_RGMII3_RXD_03__FUNC_RGMII3_RXD_03	(BCM63138_PIN_NO(105) | 1)
+#define BCM63138_RGMII3_RXD_03__FUNC_LED_05		(BCM63138_PIN_NO(105) | 4)
+#define BCM63138_RGMII3_RXD_03__FUNC_GPIO_105		(BCM63138_PIN_NO(105) | 5)
+
+#define BCM63138_RGMII3_TXCLK__FUNC_RGMII3_TXCLK	(BCM63138_PIN_NO(106) | 1)
+#define BCM63138_RGMII3_TXCLK__FUNC_LED_06		(BCM63138_PIN_NO(106) | 4)
+#define BCM63138_RGMII3_TXCLK__FUNC_GPIO_106		(BCM63138_PIN_NO(106) | 5)
+
+#define BCM63138_RGMII3_TXCTL__FUNC_RGMII3_TXCTL	(BCM63138_PIN_NO(107) | 1)
+#define BCM63138_RGMII3_TXCTL__FUNC_LED_07		(BCM63138_PIN_NO(107) | 4)
+#define BCM63138_RGMII3_TXCTL__FUNC_GPIO_107		(BCM63138_PIN_NO(107) | 5)
+
+#define BCM63138_RGMII3_TXD_00__FUNC_RGMII3_TXD_00	(BCM63138_PIN_NO(108) | 1)
+#define BCM63138_RGMII3_TXD_00__FUNC_LED_08		(BCM63138_PIN_NO(108) | 4)
+#define BCM63138_RGMII3_TXD_00__FUNC_GPIO_108		(BCM63138_PIN_NO(108) | 5)
+#define BCM63138_RGMII3_TXD_00__FUNC_LED_20		(BCM63138_PIN_NO(108) | 6)
+
+#define BCM63138_RGMII3_TXD_01__FUNC_RGMII3_TXD_01	(BCM63138_PIN_NO(109) | 1)
+#define BCM63138_RGMII3_TXD_01__FUNC_LED_09		(BCM63138_PIN_NO(109) | 4)
+#define BCM63138_RGMII3_TXD_01__FUNC_GPIO_109		(BCM63138_PIN_NO(109) | 5)
+#define BCM63138_RGMII3_TXD_01__FUNC_LED_21		(BCM63138_PIN_NO(109) | 6)
+
+#define BCM63138_RGMII3_TXD_02__FUNC_RGMII3_TXD_02	(BCM63138_PIN_NO(110) | 1)
+#define BCM63138_RGMII3_TXD_02__FUNC_LED_10		(BCM63138_PIN_NO(110) | 4)
+#define BCM63138_RGMII3_TXD_02__FUNC_GPIO_110		(BCM63138_PIN_NO(110) | 5)
+
+#define BCM63138_RGMII3_TXD_03__FUNC_RGMII3_TXD_03	(BCM63138_PIN_NO(111) | 1)
+#define BCM63138_RGMII3_TXD_03__FUNC_LED_11		(BCM63138_PIN_NO(111) | 4)
+#define BCM63138_RGMII3_TXD_03__FUNC_GPIO_111		(BCM63138_PIN_NO(111) | 5)
+
+#define BCM63138_RGMII_MDC__FUNC_RGMII_MDC		(BCM63138_PIN_NO(112) | 1)
+#define BCM63138_RGMII_MDC__FUNC_GPIO_112		(BCM63138_PIN_NO(112) | 5)
+
+#define BCM63138_RGMII_MDIO__FUNC_RGMII_MDIO		(BCM63138_PIN_NO(113) | 1)
+#define BCM63138_RGMII_MDIO__FUNC_GPIO_113		(BCM63138_PIN_NO(113) | 5)
+
+#define BCM63138_BMU_AC_EN__FUNC_BMU_AC_EN		(BCM63138_PIN_NO(114) | 1)
+#define BCM63138_BMU_AC_EN__FUNC_GPIO_114		(BCM63138_PIN_NO(114) | 5)
+
+#define BCM63138_BMU_DIS_CTRL__FUNC_BMU_DIS_CTRL	(BCM63138_PIN_NO(115) | 1)
+#define BCM63138_BMU_DIS_CTRL__FUNC_GPIO_115		(BCM63138_PIN_NO(115) | 5)
+
+#define BCM63138_BMU_ENA__FUNC_BMU_ENA		(BCM63138_PIN_NO(116) | 1)
+#define BCM63138_BMU_ENA__FUNC_GPIO_116		(BCM63138_PIN_NO(116) | 5)
+
+#define BCM63138_BMU_ENB__FUNC_BMU_ENB		(BCM63138_PIN_NO(117) | 1)
+#define BCM63138_BMU_ENB__FUNC_I2C_SDA		(BCM63138_PIN_NO(117) | 2)
+#define BCM63138_BMU_ENB__FUNC_GPIO_117		(BCM63138_PIN_NO(117) | 5)
+
+#define BCM63138_BMU_OWA__FUNC_BMU_OWA		(BCM63138_PIN_NO(118) | 1)
+#define BCM63138_BMU_OWA__FUNC_GPIO_118		(BCM63138_PIN_NO(118) | 5)
+
+#define BCM63138_BMU_OWB__FUNC_BMU_OWB		(BCM63138_PIN_NO(119) | 1)
+#define BCM63138_BMU_OWB__FUNC_I2C_SCL		(BCM63138_PIN_NO(119) | 2)
+#define BCM63138_BMU_OWB__FUNC_GPIO_119		(BCM63138_PIN_NO(119) | 5)
+
+#define BCM63138_BMU_PWM_OUT__FUNC_BMU_PWM_OUT		(BCM63138_PIN_NO(120) | 1)
+#define BCM63138_BMU_PWM_OUT__FUNC_GPIO_120		(BCM63138_PIN_NO(120) | 5)
+
+#define BCM63138_UART0_SIN__FUNC_UART0_SIN		(BCM63138_PIN_NO(121) | 1)
+#define BCM63138_UART0_SIN__FUNC_GPIO_121		(BCM63138_PIN_NO(121) | 5)
+
+#define BCM63138_UART0_SOUT__FUNC_UART0_SOUT		(BCM63138_PIN_NO(122) | 1)
+#define BCM63138_UART0_SOUT__FUNC_GPIO_122		(BCM63138_PIN_NO(122) | 5)
+
+#define BCM63138_SPI_CLK__FUNC_SPI_CLK		(BCM63138_PIN_NO(123) | 0)
+#define BCM63138_SPI_CLK__FUNC_GPIO_123		(BCM63138_PIN_NO(123) | 5)
+
+#define BCM63138_SPI_MOSI__FUNC_SPI_MOSI		(BCM63138_PIN_NO(124) | 0)
+#define BCM63138_SPI_MOSI__FUNC_GPIO_124		(BCM63138_PIN_NO(124) | 5)
+
+#define BCM63138_SPI_MISO__FUNC_SPI_MISO		(BCM63138_PIN_NO(125) | 0)
+#define BCM63138_SPI_MISO__FUNC_SPI_MISO_1		(BCM63138_PIN_NO(125) | 1)
+#define BCM63138_SPI_MISO__FUNC_GPIO_125		(BCM63138_PIN_NO(125) | 5)
+
+#define BCM63138_SPI_SSB0__FUNC_SPI_SSB0		(BCM63138_PIN_NO(126) | 0)
+#define BCM63138_SPI_SSB0__FUNC_SPI_SSB0_1		(BCM63138_PIN_NO(126) | 1)
+#define BCM63138_SPI_SSB0__FUNC_GPIO_126		(BCM63138_PIN_NO(126) | 5)
+
+#define BCM63138_SPI_SSB1__FUNC_SPI_SSB1		(BCM63138_PIN_NO(127) | 0)
+#define BCM63138_SPI_SSB1__FUNC_SPI_SSB1_1		(BCM63138_PIN_NO(127) | 1)
+#define BCM63138_SPI_SSB1__FUNC_GPIO_127		(BCM63138_PIN_NO(127) | 5)
+
+#define BCM63138_PCIE0_CLKREQ_B__FUNC_PCIE0_CLKREQ_B	(BCM63138_PIN_NO(128) | 0)
+#define BCM63138_PCIE0_CLKREQ_B__FUNC_GPIO_128		(BCM63138_PIN_NO(128) | 5)
+
+#define BCM63138_PCIE0_RST_B__FUNC_PCIE0_RST_B		(BCM63138_PIN_NO(129) | 0)
+#define BCM63138_PCIE0_RST_B__FUNC_GPIO_129		(BCM63138_PIN_NO(129) | 5)
+
+#define BCM63138_PCIE1_CLKREQ_B__FUNC_PCIE1_CLKREQ_B	(BCM63138_PIN_NO(130) | 0)
+#define BCM63138_PCIE1_CLKREQ_B__FUNC_GPIO_130		(BCM63138_PIN_NO(130) | 5)
+
+#define BCM63138_PCIE1_RST_B__FUNC_PCIE1_RST_B		(BCM63138_PIN_NO(131) | 0)
+#define BCM63138_PCIE1_RST_B__FUNC_GPIO_131		(BCM63138_PIN_NO(131) | 5)
+
+#define BCM63138_USB0_PWRFLT__FUNC_USB0_PWRFLT		(BCM63138_PIN_NO(132) | 1)
+#define BCM63138_USB0_PWRFLT__FUNC_GPIO_132		(BCM63138_PIN_NO(132) | 5)
+
+#define BCM63138_USB0_PWRON__FUNC_USB0_PWRON		(BCM63138_PIN_NO(133) | 1)
+#define BCM63138_USB0_PWRON__FUNC_GPIO_133		(BCM63138_PIN_NO(133) | 5)
+
+#define BCM63138_USB1_PWRFLT__FUNC_USB1_PWRFLT		(BCM63138_PIN_NO(134) | 1)
+#define BCM63138_USB1_PWRFLT__FUNC_GPIO_134		(BCM63138_PIN_NO(134) | 5)
+
+#define BCM63138_USB1_PWRON__FUNC_USB1_PWRON		(BCM63138_PIN_NO(135) | 1)
+#define BCM63138_USB1_PWRON__FUNC_GPIO_135		(BCM63138_PIN_NO(135) | 5)
+
+#define BCM63138_RESET_OUT_B__FUNC_RESET_OUT_B		(BCM63138_PIN_NO(136) | 0)
+#define BCM63138_RESET_OUT_B__FUNC_GPIO_136		(BCM63138_PIN_NO(136) | 5)
+
+#define BCM63138_DECT_RDI__FUNC_DECT_RDI		(BCM63138_PIN_NO(137) | 1)
+#define BCM63138_DECT_RDI__FUNC_GPIO_137		(BCM63138_PIN_NO(137) | 5)
+
+#define BCM63138_DECT_BTDO__FUNC_DECT_BTDO		(BCM63138_PIN_NO(138) | 1)
+#define BCM63138_DECT_BTDO__FUNC_GPIO_138		(BCM63138_PIN_NO(138) | 5)
+
+#define BCM63138_DECT_MWR_LE__FUNC_DECT_MWR_LE		(BCM63138_PIN_NO(139) | 1)
+#define BCM63138_DECT_MWR_LE__FUNC_GPIO_139		(BCM63138_PIN_NO(139) | 5)
+
+#define BCM63138_DECT_MWR_SK__FUNC_DECT_MWR_SK		(BCM63138_PIN_NO(140) | 1)
+#define BCM63138_DECT_MWR_SK__FUNC_GPIO_140		(BCM63138_PIN_NO(140) | 5)
+
+#define BCM63138_DECT_MWR_SIO__FUNC_DECT_MWR_SIO	(BCM63138_PIN_NO(141) | 1)
+#define BCM63138_DECT_MWR_SIO__FUNC_GPIO_141		(BCM63138_PIN_NO(141) | 5)
+
+#endif /* _DT_BINDINGS_BCM63138_PINFUNC_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/pinctrl/bcm63158-pinfunc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,519 @@
+/*
+ * bcm63158-pinfunc.h for pinctrl-bcm63158
+ * Created by <nschichan@freebox.fr> on Wed May 22 18:17:39 2019
+ */
+
+#ifndef _DT_BINDINGS_BCM63158_PINFUNC_H
+#define _DT_BINDINGS_BCM63158_PINFUNC_H
+
+#define BCM63158_PIN_NO(x, y)		(((x) << 8) | (y))
+
+/*
+ * generated from gen-pinmux.pl < bcm63158-pinctrl-table
+ */
+#define BCM63158_GPIO_00__FUNC_A_SER_LED_DATA          (BCM63158_PIN_NO(0, 1))
+#define BCM63158_GPIO_00__FUNC_A_LED_00                (BCM63158_PIN_NO(0, 4))
+#define BCM63158_GPIO_00__FUNC_GPIO_00                 (BCM63158_PIN_NO(0, 5))
+
+#define BCM63158_GPIO_01__FUNC_A_SER_LED_CLK           (BCM63158_PIN_NO(1, 1))
+#define BCM63158_GPIO_01__FUNC_A_LED_01                (BCM63158_PIN_NO(1, 4))
+#define BCM63158_GPIO_01__FUNC_GPIO_01                 (BCM63158_PIN_NO(1, 5))
+
+#define BCM63158_GPIO_02__FUNC_A_SER_LED_MASK          (BCM63158_PIN_NO(2, 1))
+#define BCM63158_GPIO_02__FUNC_A_LED_02                (BCM63158_PIN_NO(2, 4))
+#define BCM63158_GPIO_02__FUNC_GPIO_02                 (BCM63158_PIN_NO(2, 5))
+
+#define BCM63158_GPIO_03__FUNC_A_UART2_CTS             (BCM63158_PIN_NO(3, 1))
+#define BCM63158_GPIO_03__FUNC_B_PPS_IN                (BCM63158_PIN_NO(3, 2))
+#define BCM63158_GPIO_03__FUNC_A_LED_03                (BCM63158_PIN_NO(3, 4))
+#define BCM63158_GPIO_03__FUNC_GPIO_03                 (BCM63158_PIN_NO(3, 5))
+
+#define BCM63158_GPIO_04__FUNC_A_UART2_RTS             (BCM63158_PIN_NO(4, 1))
+#define BCM63158_GPIO_04__FUNC_B_PPS_OUT               (BCM63158_PIN_NO(4, 2))
+#define BCM63158_GPIO_04__FUNC_A_LED_04                (BCM63158_PIN_NO(4, 4))
+#define BCM63158_GPIO_04__FUNC_GPIO_04                 (BCM63158_PIN_NO(4, 5))
+
+#define BCM63158_GPIO_05__FUNC_A_UART2_SIN             (BCM63158_PIN_NO(5, 1))
+#define BCM63158_GPIO_05__FUNC_A_LED_05                (BCM63158_PIN_NO(5, 4))
+#define BCM63158_GPIO_05__FUNC_GPIO_05                 (BCM63158_PIN_NO(5, 5))
+
+#define BCM63158_GPIO_06__FUNC_A_UART2_SOUT            (BCM63158_PIN_NO(6, 1))
+#define BCM63158_GPIO_06__FUNC_A_LED_06                (BCM63158_PIN_NO(6, 4))
+#define BCM63158_GPIO_06__FUNC_GPIO_06                 (BCM63158_PIN_NO(6, 5))
+
+#define BCM63158_GPIO_07__FUNC_A_SPIM_SS5_B            (BCM63158_PIN_NO(7, 1))
+#define BCM63158_GPIO_07__FUNC_B_NTR_OUT               (BCM63158_PIN_NO(7, 2))
+#define BCM63158_GPIO_07__FUNC_A_LED_07                (BCM63158_PIN_NO(7, 4))
+#define BCM63158_GPIO_07__FUNC_GPIO_07                 (BCM63158_PIN_NO(7, 5))
+#define BCM63158_GPIO_07__FUNC_B_NTR_IN                (BCM63158_PIN_NO(7, 6))
+
+#define BCM63158_GPIO_08__FUNC_A_SPIM_SS4_B            (BCM63158_PIN_NO(8, 1))
+#define BCM63158_GPIO_08__FUNC_A_LED_08                (BCM63158_PIN_NO(8, 4))
+#define BCM63158_GPIO_08__FUNC_GPIO_08                 (BCM63158_PIN_NO(8, 5))
+
+#define BCM63158_GPIO_09__FUNC_A_SPIM_SS3_B            (BCM63158_PIN_NO(9, 1))
+#define BCM63158_GPIO_09__FUNC_B_USBD_ID               (BCM63158_PIN_NO(9, 3))
+#define BCM63158_GPIO_09__FUNC_A_LED_09                (BCM63158_PIN_NO(9, 4))
+#define BCM63158_GPIO_09__FUNC_GPIO_09                 (BCM63158_PIN_NO(9, 5))
+#define BCM63158_GPIO_09__FUNC_A_AE_SERDES_MOD_DEF0    (BCM63158_PIN_NO(9, 6))
+
+#define BCM63158_GPIO_10__FUNC_A_SPIM_SS2_B            (BCM63158_PIN_NO(10, 1))
+#define BCM63158_GPIO_10__FUNC_A_PMD_EXT_LOS           (BCM63158_PIN_NO(10, 2))
+#define BCM63158_GPIO_10__FUNC_B_USBD_VBUS_PRESENT     (BCM63158_PIN_NO(10, 3))
+#define BCM63158_GPIO_10__FUNC_A_LED_10                (BCM63158_PIN_NO(10, 4))
+#define BCM63158_GPIO_10__FUNC_GPIO_10                 (BCM63158_PIN_NO(10, 5))
+#define BCM63158_GPIO_10__FUNC_A_AE_FIBER_DETECT       (BCM63158_PIN_NO(10, 6))
+
+#define BCM63158_GPIO_11__FUNC_A_I2C_SDA_0             (BCM63158_PIN_NO(11, 2))
+#define BCM63158_GPIO_11__FUNC_A_LED_11                (BCM63158_PIN_NO(11, 4))
+#define BCM63158_GPIO_11__FUNC_GPIO_11                 (BCM63158_PIN_NO(11, 5))
+
+#define BCM63158_GPIO_12__FUNC_A_PPS_IN                (BCM63158_PIN_NO(12, 1))
+#define BCM63158_GPIO_12__FUNC_A_I2C_SCL_0             (BCM63158_PIN_NO(12, 2))
+#define BCM63158_GPIO_12__FUNC_A_LED_12                (BCM63158_PIN_NO(12, 4))
+#define BCM63158_GPIO_12__FUNC_GPIO_12                 (BCM63158_PIN_NO(12, 5))
+#define BCM63158_GPIO_12__FUNC_C_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(12, 6))
+
+#define BCM63158_GPIO_13__FUNC_A_PPS_OUT               (BCM63158_PIN_NO(13, 1))
+#define BCM63158_GPIO_13__FUNC_A_LED_13                (BCM63158_PIN_NO(13, 4))
+#define BCM63158_GPIO_13__FUNC_GPIO_13                 (BCM63158_PIN_NO(13, 5))
+
+#define BCM63158_GPIO_14__FUNC_A_NTR_OUT               (BCM63158_PIN_NO(14, 1))
+#define BCM63158_GPIO_14__FUNC_I2S_RX_SDATA            (BCM63158_PIN_NO(14, 2))
+#define BCM63158_GPIO_14__FUNC_A_LED_14                (BCM63158_PIN_NO(14, 4))
+#define BCM63158_GPIO_14__FUNC_GPIO_14                 (BCM63158_PIN_NO(14, 5))
+#define BCM63158_GPIO_14__FUNC_A_NTR_IN                (BCM63158_PIN_NO(14, 6))
+
+#define BCM63158_GPIO_15__FUNC_SW_SPIS_CLK             (BCM63158_PIN_NO(15, 2))
+#define BCM63158_GPIO_15__FUNC_A_LED_15                (BCM63158_PIN_NO(15, 4))
+#define BCM63158_GPIO_15__FUNC_GPIO_15                 (BCM63158_PIN_NO(15, 5))
+#define BCM63158_GPIO_15__FUNC_B_I2C_SDA_1             (BCM63158_PIN_NO(15, 6))
+
+#define BCM63158_GPIO_16__FUNC_SW_SPIS_SS_B            (BCM63158_PIN_NO(16, 2))
+#define BCM63158_GPIO_16__FUNC_A_LED_16                (BCM63158_PIN_NO(16, 4))
+#define BCM63158_GPIO_16__FUNC_GPIO_16                 (BCM63158_PIN_NO(16, 5))
+#define BCM63158_GPIO_16__FUNC_B_I2C_SCL_1             (BCM63158_PIN_NO(16, 6))
+
+#define BCM63158_GPIO_17__FUNC_SW_SPIS_MISO            (BCM63158_PIN_NO(17, 2))
+#define BCM63158_GPIO_17__FUNC_A_LED_17                (BCM63158_PIN_NO(17, 4))
+#define BCM63158_GPIO_17__FUNC_GPIO_17                 (BCM63158_PIN_NO(17, 5))
+#define BCM63158_GPIO_17__FUNC_C_UART3_SIN             (BCM63158_PIN_NO(17, 6))
+
+#define BCM63158_GPIO_18__FUNC_SW_SPIS_MOSI            (BCM63158_PIN_NO(18, 2))
+#define BCM63158_GPIO_18__FUNC_A_LED_18                (BCM63158_PIN_NO(18, 4))
+#define BCM63158_GPIO_18__FUNC_GPIO_18                 (BCM63158_PIN_NO(18, 5))
+#define BCM63158_GPIO_18__FUNC_C_UART3_SOUT            (BCM63158_PIN_NO(18, 6))
+
+#define BCM63158_GPIO_19__FUNC_VREG_SYNC               (BCM63158_PIN_NO(19, 2))
+#define BCM63158_GPIO_19__FUNC_A_LED_19                (BCM63158_PIN_NO(19, 4))
+#define BCM63158_GPIO_19__FUNC_GPIO_19                 (BCM63158_PIN_NO(19, 5))
+#define BCM63158_GPIO_19__FUNC_A_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(19, 6))
+
+#define BCM63158_GPIO_20__FUNC_SPIS_CLK                (BCM63158_PIN_NO(20, 1))
+#define BCM63158_GPIO_20__FUNC_B_UART2_CTS             (BCM63158_PIN_NO(20, 2))
+#define BCM63158_GPIO_20__FUNC_B_UART3_SIN             (BCM63158_PIN_NO(20, 3))
+#define BCM63158_GPIO_20__FUNC_A_LED_20                (BCM63158_PIN_NO(20, 4))
+#define BCM63158_GPIO_20__FUNC_GPIO_20                 (BCM63158_PIN_NO(20, 5))
+#define BCM63158_GPIO_20__FUNC_A_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(20, 6))
+
+#define BCM63158_GPIO_21__FUNC_SPIS_SS_B               (BCM63158_PIN_NO(21, 1))
+#define BCM63158_GPIO_21__FUNC_B_UART2_RTS             (BCM63158_PIN_NO(21, 2))
+#define BCM63158_GPIO_21__FUNC_B_UART3_SOUT            (BCM63158_PIN_NO(21, 3))
+#define BCM63158_GPIO_21__FUNC_A_LED_21                (BCM63158_PIN_NO(21, 4))
+#define BCM63158_GPIO_21__FUNC_GPIO_21                 (BCM63158_PIN_NO(21, 5))
+#define BCM63158_GPIO_21__FUNC_C_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(21, 6))
+
+#define BCM63158_GPIO_22__FUNC_SPIS_MISO               (BCM63158_PIN_NO(22, 1))
+#define BCM63158_GPIO_22__FUNC_B_UART2_SOUT            (BCM63158_PIN_NO(22, 2))
+#define BCM63158_GPIO_22__FUNC_A_LED_22                (BCM63158_PIN_NO(22, 4))
+#define BCM63158_GPIO_22__FUNC_GPIO_22                 (BCM63158_PIN_NO(22, 5))
+
+#define BCM63158_GPIO_23__FUNC_SPIS_MOSI               (BCM63158_PIN_NO(23, 1))
+#define BCM63158_GPIO_23__FUNC_B_UART2_SIN             (BCM63158_PIN_NO(23, 2))
+#define BCM63158_GPIO_23__FUNC_A_LED_23                (BCM63158_PIN_NO(23, 4))
+#define BCM63158_GPIO_23__FUNC_GPIO_23                 (BCM63158_PIN_NO(23, 5))
+
+#define BCM63158_GPIO_24__FUNC_B_UART1_SOUT            (BCM63158_PIN_NO(24, 2))
+#define BCM63158_GPIO_24__FUNC_B_I2C_SDA_0             (BCM63158_PIN_NO(24, 3))
+#define BCM63158_GPIO_24__FUNC_A_LED_24                (BCM63158_PIN_NO(24, 4))
+#define BCM63158_GPIO_24__FUNC_GPIO_24                 (BCM63158_PIN_NO(24, 5))
+
+#define BCM63158_GPIO_25__FUNC_B_SPIM_SS2_B            (BCM63158_PIN_NO(25, 1))
+#define BCM63158_GPIO_25__FUNC_B_UART1_SIN             (BCM63158_PIN_NO(25, 2))
+#define BCM63158_GPIO_25__FUNC_B_I2C_SCL_0             (BCM63158_PIN_NO(25, 3))
+#define BCM63158_GPIO_25__FUNC_A_LED_25                (BCM63158_PIN_NO(25, 4))
+#define BCM63158_GPIO_25__FUNC_GPIO_25                 (BCM63158_PIN_NO(25, 5))
+
+#define BCM63158_GPIO_26__FUNC_B_SPIM_SS3_B            (BCM63158_PIN_NO(26, 1))
+#define BCM63158_GPIO_26__FUNC_A_I2C_SDA_1             (BCM63158_PIN_NO(26, 2))
+#define BCM63158_GPIO_26__FUNC_A_UART3_SIN             (BCM63158_PIN_NO(26, 3))
+#define BCM63158_GPIO_26__FUNC_A_LED_26                (BCM63158_PIN_NO(26, 4))
+#define BCM63158_GPIO_26__FUNC_GPIO_26                 (BCM63158_PIN_NO(26, 5))
+
+#define BCM63158_GPIO_27__FUNC_B_SPIM_SS4_B            (BCM63158_PIN_NO(27, 1))
+#define BCM63158_GPIO_27__FUNC_A_I2C_SCL_1             (BCM63158_PIN_NO(27, 2))
+#define BCM63158_GPIO_27__FUNC_A_UART3_SOUT            (BCM63158_PIN_NO(27, 3))
+#define BCM63158_GPIO_27__FUNC_A_LED_27                (BCM63158_PIN_NO(27, 4))
+#define BCM63158_GPIO_27__FUNC_GPIO_27                 (BCM63158_PIN_NO(27, 5))
+
+#define BCM63158_GPIO_28__FUNC_B_SPIM_SS5_B            (BCM63158_PIN_NO(28, 1))
+#define BCM63158_GPIO_28__FUNC_I2S_MCLK                (BCM63158_PIN_NO(28, 2))
+#define BCM63158_GPIO_28__FUNC_A_LED_28                (BCM63158_PIN_NO(28, 4))
+#define BCM63158_GPIO_28__FUNC_GPIO_28                 (BCM63158_PIN_NO(28, 5))
+
+#define BCM63158_GPIO_29__FUNC_B_SER_LED_DATA          (BCM63158_PIN_NO(29, 1))
+#define BCM63158_GPIO_29__FUNC_I2S_LRCK                (BCM63158_PIN_NO(29, 2))
+#define BCM63158_GPIO_29__FUNC_A_LED_29                (BCM63158_PIN_NO(29, 4))
+#define BCM63158_GPIO_29__FUNC_GPIO_29                 (BCM63158_PIN_NO(29, 5))
+
+#define BCM63158_GPIO_30__FUNC_B_SER_LED_CLK           (BCM63158_PIN_NO(30, 1))
+#define BCM63158_GPIO_30__FUNC_I2S_SCLK                (BCM63158_PIN_NO(30, 2))
+#define BCM63158_GPIO_30__FUNC_A_LED_30                (BCM63158_PIN_NO(30, 4))
+#define BCM63158_GPIO_30__FUNC_GPIO_30                 (BCM63158_PIN_NO(30, 5))
+
+#define BCM63158_GPIO_31__FUNC_B_SER_LED_MASK          (BCM63158_PIN_NO(31, 1))
+#define BCM63158_GPIO_31__FUNC_I2S_TX_SDATA            (BCM63158_PIN_NO(31, 2))
+#define BCM63158_GPIO_31__FUNC_A_LED_31                (BCM63158_PIN_NO(31, 4))
+#define BCM63158_GPIO_31__FUNC_GPIO_31                 (BCM63158_PIN_NO(31, 5))
+
+#define BCM63158_GPIO_32__FUNC_VDSL_CTRL0              (BCM63158_PIN_NO(32, 2))
+#define BCM63158_GPIO_32__FUNC_GPIO_32                 (BCM63158_PIN_NO(32, 5))
+
+#define BCM63158_GPIO_33__FUNC_VDSL_CTRL_1             (BCM63158_PIN_NO(33, 2))
+#define BCM63158_GPIO_33__FUNC_B_WAN_EARLY_TXEN        (BCM63158_PIN_NO(33, 3))
+#define BCM63158_GPIO_33__FUNC_GPIO_33                 (BCM63158_PIN_NO(33, 5))
+
+#define BCM63158_GPIO_34__FUNC_VDSL_CTRL_2             (BCM63158_PIN_NO(34, 2))
+#define BCM63158_GPIO_34__FUNC_B_ROGUE_IN              (BCM63158_PIN_NO(34, 3))
+#define BCM63158_GPIO_34__FUNC_GPIO_34                 (BCM63158_PIN_NO(34, 5))
+
+#define BCM63158_GPIO_35__FUNC_VDSL_CTRL_3             (BCM63158_PIN_NO(35, 2))
+#define BCM63158_GPIO_35__FUNC_B_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(35, 3))
+#define BCM63158_GPIO_35__FUNC_GPIO_35                 (BCM63158_PIN_NO(35, 5))
+
+#define BCM63158_GPIO_36__FUNC_VDSL_CTRL_4             (BCM63158_PIN_NO(36, 2))
+#define BCM63158_GPIO_36__FUNC_B_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(36, 3))
+#define BCM63158_GPIO_36__FUNC_GPIO_36                 (BCM63158_PIN_NO(36, 5))
+
+#define BCM63158_GPIO_37__FUNC_B_PMD_EXT_LOS           (BCM63158_PIN_NO(37, 1))
+#define BCM63158_GPIO_37__FUNC_VDSL_CTRL_5             (BCM63158_PIN_NO(37, 2))
+#define BCM63158_GPIO_37__FUNC_B_AE_FIBER_DETECT       (BCM63158_PIN_NO(37, 3))
+#define BCM63158_GPIO_37__FUNC_GPIO_37                 (BCM63158_PIN_NO(37, 5))
+
+#define BCM63158_GPIO_38__FUNC_B_VREG_SYNC             (BCM63158_PIN_NO(38, 2))
+#define BCM63158_GPIO_38__FUNC_B_AE_SERDES_MOD_DEF0    (BCM63158_PIN_NO(38, 3))
+#define BCM63158_GPIO_38__FUNC_GPIO_38                 (BCM63158_PIN_NO(38, 5))
+
+#define BCM63158_GPIO_39__FUNC_A_WAN_EARLY_TXEN        (BCM63158_PIN_NO(39, 2))
+#define BCM63158_GPIO_39__FUNC_GPIO_39                 (BCM63158_PIN_NO(39, 5))
+
+#define BCM63158_GPIO_40__FUNC_A_ROGUE_IN              (BCM63158_PIN_NO(40, 2))
+#define BCM63158_GPIO_40__FUNC_GPIO_40                 (BCM63158_PIN_NO(40, 5))
+
+#define BCM63158_GPIO_41__FUNC_SYS_IRQ_OUT             (BCM63158_PIN_NO(41, 2))
+#define BCM63158_GPIO_41__FUNC_C_WAN_EARLY_TXEN        (BCM63158_PIN_NO(41, 3))
+#define BCM63158_GPIO_41__FUNC_GPIO_41                 (BCM63158_PIN_NO(41, 5))
+
+#define BCM63158_GPIO_42__FUNC_PCM_SDIN                (BCM63158_PIN_NO(42, 1))
+#define BCM63158_GPIO_42__FUNC_A_UART1_SIN             (BCM63158_PIN_NO(42, 4))
+#define BCM63158_GPIO_42__FUNC_GPIO_42                 (BCM63158_PIN_NO(42, 5))
+
+#define BCM63158_GPIO_43__FUNC_PCM_SDOUT               (BCM63158_PIN_NO(43, 1))
+#define BCM63158_GPIO_43__FUNC_A_UART1_SOUT            (BCM63158_PIN_NO(43, 4))
+#define BCM63158_GPIO_43__FUNC_GPIO_43                 (BCM63158_PIN_NO(43, 5))
+
+#define BCM63158_GPIO_44__FUNC_PCM_CLK                 (BCM63158_PIN_NO(44, 1))
+#define BCM63158_GPIO_44__FUNC_A_USBD_VBUS_PRESENT     (BCM63158_PIN_NO(44, 4))
+#define BCM63158_GPIO_44__FUNC_GPIO_44                 (BCM63158_PIN_NO(44, 5))
+
+#define BCM63158_GPIO_45__FUNC_PCM_FS                  (BCM63158_PIN_NO(45, 1))
+#define BCM63158_GPIO_45__FUNC_A_USBD_ID               (BCM63158_PIN_NO(45, 4))
+#define BCM63158_GPIO_45__FUNC_GPIO_45                 (BCM63158_PIN_NO(45, 5))
+
+#define BCM63158_GPIO_46__FUNC_C_VREG_SYNC             (BCM63158_PIN_NO(46, 2))
+#define BCM63158_GPIO_46__FUNC_GPIO_46                 (BCM63158_PIN_NO(46, 5))
+
+#define BCM63158_GPIO_47__FUNC_NAND_WP                 (BCM63158_PIN_NO(47, 3))
+#define BCM63158_GPIO_47__FUNC_GPIO_47                 (BCM63158_PIN_NO(47, 5))
+
+#define BCM63158_GPIO_48__FUNC_NAND_CE_B               (BCM63158_PIN_NO(48, 3))
+#define BCM63158_GPIO_48__FUNC_GPIO_48                 (BCM63158_PIN_NO(48, 5))
+
+#define BCM63158_GPIO_49__FUNC_NAND_RE_B               (BCM63158_PIN_NO(49, 3))
+#define BCM63158_GPIO_49__FUNC_GPIO_49                 (BCM63158_PIN_NO(49, 5))
+
+#define BCM63158_GPIO_50__FUNC_NAND_RB_B               (BCM63158_PIN_NO(50, 3))
+#define BCM63158_GPIO_50__FUNC_GPIO_50                 (BCM63158_PIN_NO(50, 5))
+
+#define BCM63158_GPIO_51__FUNC_NAND_DATA_0             (BCM63158_PIN_NO(51, 3))
+#define BCM63158_GPIO_51__FUNC_GPIO_51                 (BCM63158_PIN_NO(51, 5))
+
+#define BCM63158_GPIO_52__FUNC_NAND_DATA_1             (BCM63158_PIN_NO(52, 3))
+#define BCM63158_GPIO_52__FUNC_GPIO_52                 (BCM63158_PIN_NO(52, 5))
+
+#define BCM63158_GPIO_53__FUNC_NAND_DATA_2             (BCM63158_PIN_NO(53, 3))
+#define BCM63158_GPIO_53__FUNC_GPIO_53                 (BCM63158_PIN_NO(53, 5))
+
+#define BCM63158_GPIO_54__FUNC_NAND_DATA_3             (BCM63158_PIN_NO(54, 3))
+#define BCM63158_GPIO_54__FUNC_GPIO_54                 (BCM63158_PIN_NO(54, 5))
+
+#define BCM63158_GPIO_55__FUNC_NAND_DATA_4             (BCM63158_PIN_NO(55, 3))
+#define BCM63158_GPIO_55__FUNC_GPIO_55                 (BCM63158_PIN_NO(55, 5))
+
+#define BCM63158_GPIO_56__FUNC_NAND_DATA_5             (BCM63158_PIN_NO(56, 3))
+#define BCM63158_GPIO_56__FUNC_GPIO_56                 (BCM63158_PIN_NO(56, 5))
+
+#define BCM63158_GPIO_57__FUNC_NAND_DATA_6             (BCM63158_PIN_NO(57, 3))
+#define BCM63158_GPIO_57__FUNC_GPIO_57                 (BCM63158_PIN_NO(57, 5))
+
+#define BCM63158_GPIO_58__FUNC_NAND_DATA_7             (BCM63158_PIN_NO(58, 3))
+#define BCM63158_GPIO_58__FUNC_GPIO_58                 (BCM63158_PIN_NO(58, 5))
+
+#define BCM63158_GPIO_59__FUNC_NAND_ALE                (BCM63158_PIN_NO(59, 3))
+#define BCM63158_GPIO_59__FUNC_GPIO_59                 (BCM63158_PIN_NO(59, 5))
+
+#define BCM63158_GPIO_60__FUNC_NAND_WE_B               (BCM63158_PIN_NO(60, 3))
+#define BCM63158_GPIO_60__FUNC_GPIO_60                 (BCM63158_PIN_NO(60, 5))
+
+#define BCM63158_GPIO_61__FUNC_NAND_CLE                (BCM63158_PIN_NO(61, 3))
+#define BCM63158_GPIO_61__FUNC_GPIO_61                 (BCM63158_PIN_NO(61, 5))
+
+#define BCM63158_GPIO_62__FUNC_NAND_CE2_B              (BCM63158_PIN_NO(62, 2))
+#define BCM63158_GPIO_62__FUNC_EMMC_CLK                (BCM63158_PIN_NO(62, 3))
+#define BCM63158_GPIO_62__FUNC_GPIO_62                 (BCM63158_PIN_NO(62, 5))
+
+#define BCM63158_GPIO_63__FUNC_NAND_CE1_B              (BCM63158_PIN_NO(63, 2))
+#define BCM63158_GPIO_63__FUNC_EMMC_CMD                (BCM63158_PIN_NO(63, 3))
+#define BCM63158_GPIO_63__FUNC_GPIO_63                 (BCM63158_PIN_NO(63, 5))
+
+#define BCM63158_GPIO_64__FUNC_RGMII0_RXCLK            (BCM63158_PIN_NO(64, 1))
+#define BCM63158_GPIO_64__FUNC_GPIO_64                 (BCM63158_PIN_NO(64, 5))
+#define BCM63158_GPIO_64__FUNC_B_LED_00                (BCM63158_PIN_NO(64, 6))
+
+#define BCM63158_GPIO_65__FUNC_GPIO_65                 (BCM63158_PIN_NO(65, 5))
+#define BCM63158_GPIO_65__FUNC_B_LED_01                (BCM63158_PIN_NO(65, 6))
+
+#define BCM63158_GPIO_66__FUNC_RGMII0_RXCTL            (BCM63158_PIN_NO(66, 1))
+#define BCM63158_GPIO_66__FUNC_GPIO_66                 (BCM63158_PIN_NO(66, 5))
+#define BCM63158_GPIO_66__FUNC_B_LED_02                (BCM63158_PIN_NO(66, 6))
+
+#define BCM63158_GPIO_67__FUNC_RGMII0_RXD_0            (BCM63158_PIN_NO(67, 1))
+#define BCM63158_GPIO_67__FUNC_GPIO_67                 (BCM63158_PIN_NO(67, 5))
+#define BCM63158_GPIO_67__FUNC_B_LED_03                (BCM63158_PIN_NO(67, 6))
+
+#define BCM63158_GPIO_68__FUNC_RGMII0_RXD_1            (BCM63158_PIN_NO(68, 1))
+#define BCM63158_GPIO_68__FUNC_GPIO_68                 (BCM63158_PIN_NO(68, 5))
+#define BCM63158_GPIO_68__FUNC_B_LED_04                (BCM63158_PIN_NO(68, 6))
+
+#define BCM63158_GPIO_69__FUNC_RGMII0_RXD_2            (BCM63158_PIN_NO(69, 1))
+#define BCM63158_GPIO_69__FUNC_GPIO_69                 (BCM63158_PIN_NO(69, 5))
+#define BCM63158_GPIO_69__FUNC_B_LED_05                (BCM63158_PIN_NO(69, 6))
+
+#define BCM63158_GPIO_70__FUNC_RGMII0_RXD_3            (BCM63158_PIN_NO(70, 1))
+#define BCM63158_GPIO_70__FUNC_GPIO_70                 (BCM63158_PIN_NO(70, 5))
+#define BCM63158_GPIO_70__FUNC_B_LED_06                (BCM63158_PIN_NO(70, 6))
+
+#define BCM63158_GPIO_71__FUNC_RGMII0_TXCLK            (BCM63158_PIN_NO(71, 1))
+#define BCM63158_GPIO_71__FUNC_GPIO_71                 (BCM63158_PIN_NO(71, 5))
+#define BCM63158_GPIO_71__FUNC_B_LED_07                (BCM63158_PIN_NO(71, 6))
+
+#define BCM63158_GPIO_72__FUNC_RGMII0_TXCTL            (BCM63158_PIN_NO(72, 1))
+#define BCM63158_GPIO_72__FUNC_GPIO_72                 (BCM63158_PIN_NO(72, 5))
+#define BCM63158_GPIO_72__FUNC_B_LED_08                (BCM63158_PIN_NO(72, 6))
+
+#define BCM63158_GPIO_73__FUNC_GPIO_73                 (BCM63158_PIN_NO(73, 5))
+#define BCM63158_GPIO_73__FUNC_B_LED_09                (BCM63158_PIN_NO(73, 6))
+
+#define BCM63158_GPIO_74__FUNC_RGMII0_TXD_0            (BCM63158_PIN_NO(74, 1))
+#define BCM63158_GPIO_74__FUNC_GPIO_74                 (BCM63158_PIN_NO(74, 5))
+#define BCM63158_GPIO_74__FUNC_B_LED_10                (BCM63158_PIN_NO(74, 6))
+
+#define BCM63158_GPIO_75__FUNC_RGMII0_TXD_1            (BCM63158_PIN_NO(75, 1))
+#define BCM63158_GPIO_75__FUNC_GPIO_75                 (BCM63158_PIN_NO(75, 5))
+#define BCM63158_GPIO_75__FUNC_B_LED_11                (BCM63158_PIN_NO(75, 6))
+
+#define BCM63158_GPIO_76__FUNC_RGMII0_TXD_2            (BCM63158_PIN_NO(76, 1))
+#define BCM63158_GPIO_76__FUNC_GPIO_76                 (BCM63158_PIN_NO(76, 5))
+#define BCM63158_GPIO_76__FUNC_B_LED_12                (BCM63158_PIN_NO(76, 6))
+
+#define BCM63158_GPIO_77__FUNC_RGMII0_TXD_3            (BCM63158_PIN_NO(77, 1))
+#define BCM63158_GPIO_77__FUNC_GPIO_77                 (BCM63158_PIN_NO(77, 5))
+#define BCM63158_GPIO_77__FUNC_B_LED_13                (BCM63158_PIN_NO(77, 6))
+
+#define BCM63158_GPIO_78__FUNC_GPIO_78                 (BCM63158_PIN_NO(78, 5))
+#define BCM63158_GPIO_78__FUNC_B_LED_14                (BCM63158_PIN_NO(78, 6))
+
+#define BCM63158_GPIO_79__FUNC_GPIO_79                 (BCM63158_PIN_NO(79, 5))
+#define BCM63158_GPIO_79__FUNC_B_LED_15                (BCM63158_PIN_NO(79, 6))
+
+#define BCM63158_GPIO_80__FUNC_RGMII1_RXCLK            (BCM63158_PIN_NO(80, 1))
+#define BCM63158_GPIO_80__FUNC_GPIO_80                 (BCM63158_PIN_NO(80, 5))
+#define BCM63158_GPIO_80__FUNC_B_LED_16                (BCM63158_PIN_NO(80, 6))
+
+#define BCM63158_GPIO_81__FUNC_RGMII1_RXCTL            (BCM63158_PIN_NO(81, 1))
+#define BCM63158_GPIO_81__FUNC_GPIO_81                 (BCM63158_PIN_NO(81, 5))
+#define BCM63158_GPIO_81__FUNC_B_LED_17                (BCM63158_PIN_NO(81, 6))
+
+#define BCM63158_GPIO_82__FUNC_RGMII1_RXD_0            (BCM63158_PIN_NO(82, 1))
+#define BCM63158_GPIO_82__FUNC_GPIO_82                 (BCM63158_PIN_NO(82, 5))
+#define BCM63158_GPIO_82__FUNC_B_LED_18                (BCM63158_PIN_NO(82, 6))
+
+#define BCM63158_GPIO_83__FUNC_RGMII1_RXD_1            (BCM63158_PIN_NO(83, 1))
+#define BCM63158_GPIO_83__FUNC_GPIO_83                 (BCM63158_PIN_NO(83, 5))
+#define BCM63158_GPIO_83__FUNC_B_LED_19                (BCM63158_PIN_NO(83, 6))
+
+#define BCM63158_GPIO_84__FUNC_RGMII1_RXD_2            (BCM63158_PIN_NO(84, 1))
+#define BCM63158_GPIO_84__FUNC_GPIO_84                 (BCM63158_PIN_NO(84, 5))
+#define BCM63158_GPIO_84__FUNC_B_LED_20                (BCM63158_PIN_NO(84, 6))
+
+#define BCM63158_GPIO_85__FUNC_RGMII1_RXD_3            (BCM63158_PIN_NO(85, 1))
+#define BCM63158_GPIO_85__FUNC_GPIO_85                 (BCM63158_PIN_NO(85, 5))
+#define BCM63158_GPIO_85__FUNC_B_LED_21                (BCM63158_PIN_NO(85, 6))
+
+#define BCM63158_GPIO_86__FUNC_RGMII1_TXCLK            (BCM63158_PIN_NO(86, 1))
+#define BCM63158_GPIO_86__FUNC_GPIO_86                 (BCM63158_PIN_NO(86, 5))
+#define BCM63158_GPIO_86__FUNC_B_LED_22                (BCM63158_PIN_NO(86, 6))
+
+#define BCM63158_GPIO_87__FUNC_RGMII1_TXCTL            (BCM63158_PIN_NO(87, 1))
+#define BCM63158_GPIO_87__FUNC_GPIO_87                 (BCM63158_PIN_NO(87, 5))
+#define BCM63158_GPIO_87__FUNC_B_LED_23                (BCM63158_PIN_NO(87, 6))
+
+#define BCM63158_GPIO_88__FUNC_RGMII1_TXD_0            (BCM63158_PIN_NO(88, 1))
+#define BCM63158_GPIO_88__FUNC_GPIO_88                 (BCM63158_PIN_NO(88, 5))
+#define BCM63158_GPIO_88__FUNC_B_LED_24                (BCM63158_PIN_NO(88, 6))
+
+#define BCM63158_GPIO_89__FUNC_RGMII1_TXD_1            (BCM63158_PIN_NO(89, 1))
+#define BCM63158_GPIO_89__FUNC_GPIO_89                 (BCM63158_PIN_NO(89, 5))
+#define BCM63158_GPIO_89__FUNC_B_LED_25                (BCM63158_PIN_NO(89, 6))
+
+#define BCM63158_GPIO_90__FUNC_RGMII1_TXD_2            (BCM63158_PIN_NO(90, 1))
+#define BCM63158_GPIO_90__FUNC_GPIO_90                 (BCM63158_PIN_NO(90, 5))
+#define BCM63158_GPIO_90__FUNC_B_LED_26                (BCM63158_PIN_NO(90, 6))
+
+#define BCM63158_GPIO_91__FUNC_RGMII1_TXD_3            (BCM63158_PIN_NO(91, 1))
+#define BCM63158_GPIO_91__FUNC_GPIO_91                 (BCM63158_PIN_NO(91, 5))
+#define BCM63158_GPIO_91__FUNC_B_LED_27                (BCM63158_PIN_NO(91, 6))
+
+#define BCM63158_GPIO_92__FUNC_RGMII2_RXCLK            (BCM63158_PIN_NO(92, 1))
+#define BCM63158_GPIO_92__FUNC_GPIO_92                 (BCM63158_PIN_NO(92, 5))
+#define BCM63158_GPIO_92__FUNC_B_LED_28                (BCM63158_PIN_NO(92, 6))
+
+#define BCM63158_GPIO_93__FUNC_RGMII2_RXCTL            (BCM63158_PIN_NO(93, 1))
+#define BCM63158_GPIO_93__FUNC_GPIO_93                 (BCM63158_PIN_NO(93, 5))
+#define BCM63158_GPIO_93__FUNC_B_LED_29                (BCM63158_PIN_NO(93, 6))
+
+#define BCM63158_GPIO_94__FUNC_RGMII2_RXD_0            (BCM63158_PIN_NO(94, 1))
+#define BCM63158_GPIO_94__FUNC_GPIO_94                 (BCM63158_PIN_NO(94, 5))
+#define BCM63158_GPIO_94__FUNC_B_LED_30                (BCM63158_PIN_NO(94, 6))
+
+#define BCM63158_GPIO_95__FUNC_RGMII2_RXD_1            (BCM63158_PIN_NO(95, 1))
+#define BCM63158_GPIO_95__FUNC_GPIO_95                 (BCM63158_PIN_NO(95, 5))
+#define BCM63158_GPIO_95__FUNC_B_LED_31                (BCM63158_PIN_NO(95, 6))
+
+#define BCM63158_GPIO_96__FUNC_RGMII2_RXD_2            (BCM63158_PIN_NO(96, 1))
+#define BCM63158_GPIO_96__FUNC_GPIO_96                 (BCM63158_PIN_NO(96, 5))
+
+#define BCM63158_GPIO_97__FUNC_RGMII2_RXD_3            (BCM63158_PIN_NO(97, 1))
+#define BCM63158_GPIO_97__FUNC_GPIO_97                 (BCM63158_PIN_NO(97, 5))
+
+#define BCM63158_GPIO_98__FUNC_RGMII2_TXCLK            (BCM63158_PIN_NO(98, 1))
+#define BCM63158_GPIO_98__FUNC_GPIO_98                 (BCM63158_PIN_NO(98, 5))
+
+#define BCM63158_GPIO_99__FUNC_RGMII2_TXCTL            (BCM63158_PIN_NO(99, 1))
+#define BCM63158_GPIO_99__FUNC_GPIO_99                 (BCM63158_PIN_NO(99, 5))
+
+#define BCM63158_GPIO_100__FUNC_RGMII2_TXD_0           (BCM63158_PIN_NO(100, 1))
+#define BCM63158_GPIO_100__FUNC_GPIO_100               (BCM63158_PIN_NO(100, 5))
+
+#define BCM63158_GPIO_101__FUNC_RGMII2_TXD_1           (BCM63158_PIN_NO(101, 1))
+#define BCM63158_GPIO_101__FUNC_GPIO_101               (BCM63158_PIN_NO(101, 5))
+
+#define BCM63158_GPIO_102__FUNC_RGMII2_TXD_2           (BCM63158_PIN_NO(102, 1))
+#define BCM63158_GPIO_102__FUNC_GPIO_102               (BCM63158_PIN_NO(102, 5))
+
+#define BCM63158_GPIO_103__FUNC_RGMII2_TXD_3           (BCM63158_PIN_NO(103, 1))
+#define BCM63158_GPIO_103__FUNC_GPIO_103               (BCM63158_PIN_NO(103, 5))
+
+#define BCM63158_GPIO_104__FUNC_RGMII_MDC              (BCM63158_PIN_NO(104, 1))
+#define BCM63158_GPIO_104__FUNC_GPIO_104               (BCM63158_PIN_NO(104, 5))
+
+#define BCM63158_GPIO_105__FUNC_RGMII_MDIO             (BCM63158_PIN_NO(105, 1))
+#define BCM63158_GPIO_105__FUNC_GPIO_105               (BCM63158_PIN_NO(105, 5))
+
+#define BCM63158_GPIO_106__FUNC_UART0_SDIN             (BCM63158_PIN_NO(106, 1))
+#define BCM63158_GPIO_106__FUNC_GPIO_106               (BCM63158_PIN_NO(106, 5))
+
+#define BCM63158_GPIO_107__FUNC_UART0_SDOUT            (BCM63158_PIN_NO(107, 1))
+#define BCM63158_GPIO_107__FUNC_GPIO_107               (BCM63158_PIN_NO(107, 5))
+
+#define BCM63158_GPIO_108__FUNC_SPIM_CLK               (BCM63158_PIN_NO(108, 0))
+#define BCM63158_GPIO_108__FUNC_GPIO_108               (BCM63158_PIN_NO(108, 5))
+
+#define BCM63158_GPIO_109__FUNC_SPIM_MOSI              (BCM63158_PIN_NO(109, 0))
+#define BCM63158_GPIO_109__FUNC_GPIO_109               (BCM63158_PIN_NO(109, 5))
+
+#define BCM63158_GPIO_110__FUNC_SPIM_MISO              (BCM63158_PIN_NO(110, 0))
+#define BCM63158_GPIO_110__FUNC_GPIO_110               (BCM63158_PIN_NO(110, 5))
+
+#define BCM63158_GPIO_111__FUNC_SPIM_SS0_B             (BCM63158_PIN_NO(111, 0))
+#define BCM63158_GPIO_111__FUNC_GPIO_111               (BCM63158_PIN_NO(111, 5))
+
+#define BCM63158_GPIO_112__FUNC_SPIM_SS1_B             (BCM63158_PIN_NO(112, 0))
+#define BCM63158_GPIO_112__FUNC_GPIO_112               (BCM63158_PIN_NO(112, 5))
+
+#define BCM63158_GPIO_113__FUNC_PCIE0a_CLKREQ_B        (BCM63158_PIN_NO(113, 1))
+#define BCM63158_GPIO_113__FUNC_PCIE2b_CLKREQ_B        (BCM63158_PIN_NO(113, 2))
+#define BCM63158_GPIO_113__FUNC_PCIE1c_CLKREQ_B        (BCM63158_PIN_NO(113, 3))
+#define BCM63158_GPIO_113__FUNC_GPIO_113               (BCM63158_PIN_NO(113, 5))
+
+#define BCM63158_GPIO_114__FUNC_PCIE0a_RST_B           (BCM63158_PIN_NO(114, 1))
+#define BCM63158_GPIO_114__FUNC_PCIE2b_RST_B           (BCM63158_PIN_NO(114, 2))
+#define BCM63158_GPIO_114__FUNC_PCIE1c_RST_B           (BCM63158_PIN_NO(114, 3))
+#define BCM63158_GPIO_114__FUNC_GPIO_114               (BCM63158_PIN_NO(114, 5))
+
+#define BCM63158_GPIO_115__FUNC_PCIE1a_CLKREQ_B        (BCM63158_PIN_NO(115, 1))
+#define BCM63158_GPIO_115__FUNC_PCIE0b_CLKREQ_B        (BCM63158_PIN_NO(115, 2))
+#define BCM63158_GPIO_115__FUNC_PCIE2c_CLKREQ_B        (BCM63158_PIN_NO(115, 3))
+#define BCM63158_GPIO_115__FUNC_GPIO_115               (BCM63158_PIN_NO(115, 5))
+
+#define BCM63158_GPIO_116__FUNC_PCIE1a_RST_B           (BCM63158_PIN_NO(116, 1))
+#define BCM63158_GPIO_116__FUNC_PCIE0b_RST_B           (BCM63158_PIN_NO(116, 2))
+#define BCM63158_GPIO_116__FUNC_PCIE2c_RST_B           (BCM63158_PIN_NO(116, 3))
+#define BCM63158_GPIO_116__FUNC_GPIO_116               (BCM63158_PIN_NO(116, 5))
+
+#define BCM63158_GPIO_117__FUNC_PCIE2a_CLKREQ_B        (BCM63158_PIN_NO(117, 1))
+#define BCM63158_GPIO_117__FUNC_PCIE1b_CLKREQ_B        (BCM63158_PIN_NO(117, 2))
+#define BCM63158_GPIO_117__FUNC_PCIE0c_CLKREQ_B        (BCM63158_PIN_NO(117, 3))
+#define BCM63158_GPIO_117__FUNC_GPIO_117               (BCM63158_PIN_NO(117, 5))
+
+#define BCM63158_GPIO_118__FUNC_PCIE2a_RST_B           (BCM63158_PIN_NO(118, 1))
+#define BCM63158_GPIO_118__FUNC_PCIE1b_RST_B           (BCM63158_PIN_NO(118, 2))
+#define BCM63158_GPIO_118__FUNC_PCIE0c_RST_B           (BCM63158_PIN_NO(118, 3))
+#define BCM63158_GPIO_118__FUNC_GPIO_118               (BCM63158_PIN_NO(118, 5))
+
+#define BCM63158_GPIO_119__FUNC_PCIE3_CLKREQ_B         (BCM63158_PIN_NO(119, 1))
+#define BCM63158_GPIO_119__FUNC_GPIO_119               (BCM63158_PIN_NO(119, 5))
+
+#define BCM63158_GPIO_120__FUNC_PCIE3_RST_B            (BCM63158_PIN_NO(120, 0))
+#define BCM63158_GPIO_120__FUNC_GPIO_120               (BCM63158_PIN_NO(120, 5))
+
+#define BCM63158_GPIO_121__FUNC_USB0a_PWRFLT           (BCM63158_PIN_NO(121, 1))
+#define BCM63158_GPIO_121__FUNC_USB1b_PWRFLT           (BCM63158_PIN_NO(121, 2))
+#define BCM63158_GPIO_121__FUNC_GPIO_121               (BCM63158_PIN_NO(121, 5))
+
+#define BCM63158_GPIO_122__FUNC_USB0a_PWRON            (BCM63158_PIN_NO(122, 1))
+#define BCM63158_GPIO_122__FUNC_USB1b_PWRON            (BCM63158_PIN_NO(122, 2))
+#define BCM63158_GPIO_122__FUNC_GPIO_122               (BCM63158_PIN_NO(122, 5))
+
+#define BCM63158_GPIO_123__FUNC_USB1a_PWRFLT           (BCM63158_PIN_NO(123, 1))
+#define BCM63158_GPIO_123__FUNC_USB0b_PWRFLT           (BCM63158_PIN_NO(123, 2))
+#define BCM63158_GPIO_123__FUNC_GPIO_123               (BCM63158_PIN_NO(123, 5))
+
+#define BCM63158_GPIO_124__FUNC_USB1a_PWRON            (BCM63158_PIN_NO(124, 1))
+#define BCM63158_GPIO_124__FUNC_USB0b_PWRON            (BCM63158_PIN_NO(124, 2))
+#define BCM63158_GPIO_124__FUNC_GPIO_124               (BCM63158_PIN_NO(124, 5))
+
+#define BCM63158_GPIO_125__FUNC_RESET_OUT_B            (BCM63158_PIN_NO(125, 0))
+#define BCM63158_GPIO_125__FUNC_GPIO_125               (BCM63158_PIN_NO(125, 5))
+
+#endif /* _DT_BINDINGS_BCM63138_PINFUNC_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/reset/brcm,bcm63xx-pmc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,39 @@
+/*
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H
+#define _DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H
+
+#define PMC_R_CPU0		0
+#define PMC_R_CPU1		1
+#define PMC_R_CPU2		2
+#define PMC_R_CPU3		3
+
+#define PMC_R_RDP		10
+#define PMC_R_SF2		11
+#define PMC_R_USBH		12
+#define PMC_R_SAR		13
+#define PMC_R_SATA		14
+
+#define PMC_R_PCIE0		15
+#define PMC_R_PCIE01		16
+#define PMC_R_PCIE1		17
+#define PMC_R_PCIE2		18
+#define PMC_R_PCIE3		19
+
+#define PMC_R_XRDP		20
+
+#define PMC_R_WAN_AE		21
+
+#define PMC_R_LAST		22
+
+#endif /* !_DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H */
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/soc/broadcom,bcm63158-procmon.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,13 @@
+/*
+ * brcm,bcm63158-procmon.h for bcm63158-procmon.h
+ * Created by <nschichan@freebox.fr> on Thu Oct  3 19:11:25 2019
+ */
+
+#pragma once
+
+#define RCAL_0P25UM_HORZ	0
+#define RCAL_0P25UM_VERT	1
+#define RCAL_0P5UM_HORZ		2
+#define RCAL_0P5UM_VERT		3
+#define RCAL_1UM_HORZ           4
+#define RCAL_1UM_VERT		5
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/dt-bindings/soc/broadcom,bcm63xx-xdslphy.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,83 @@
+#ifndef _DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H
+#define _DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H
+
+/*
+ * imported from broadcom boardparams.h
+ */
+
+/* AFE IDs */
+#define BCM63XX_XDSLPHY_AFE_DEFAULT			0
+
+#define BCM63XX_XDSLPHY_AFE_CHIP_INT			(1 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_6505			(2 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_6306			(3 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_CH0			(4 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_CH1			(5 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_GFAST			(6 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0		(7 << 28)
+
+#define BCM63XX_XDSLPHY_AFE_LD_ISIL1556			(1 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6301			(2 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6302			(3 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6303			(4 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6304			(5 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6305			(6 << 21)
+
+#define BCM63XX_XDSLPHY_AFE_LD_REV_6303_VR5P3		(1 << 18)
+
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXA			(1 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXB			(2 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXJ			(3 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXBJ			(4 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXM			(5 << 15)
+
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_COMBO		(0 << 13)
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_ADSL		(1 << 13)
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_VDSL		(2 << 13)
+
+/* VDSL only */
+#define BCM63XX_XDSLPHY_AFE_FE_REV_ISIL_REV1		(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_12_20             \
+	BCM63XX_XDSLPHY_AFE_FE_REV_ISIL_REV1
+#define BCM63XX_XDSLPHY_AFE_FE_REV_12_21		(2 << 8)
+
+/* Combo */
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV1		(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_12	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_21	(2 << 8)
+
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_1	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2		(4 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_UR2	(5 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_2	(6 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_30	(7 << 8)
+#define BCM63XX_XDSLPHY_AFE_6302_6306_REV_A_12_40	(8 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_30	(9 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_20	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_40	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_50	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_35	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_50	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_51	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_40	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_45	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6305_REV_12_5_60_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6305_REV_12_5_60_2	(2 << 8)
+
+
+/* ADSL only*/
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_2	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_3	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_2	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_3	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_4	(4 << 8)
+
+#define BCM63XX_XDSLPHY_AFE_FE_COAX			(1 << 7)
+
+#define BCM63XX_XDSLPHY_AFE_FE_RNC			(1 << 6)
+
+#endif /* !_DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/bcm63xx-procmon.h	2025-09-25 17:40:36.935374010 +0200
@@ -0,0 +1,18 @@
+/*
+ * procmon.h for bcm63158-procmon.h
+ * Created by <nschichan@freebox.fr> on Thu Oct  3 19:29:58 2019
+ */
+
+#pragma once
+
+#include <linux/of.h>
+
+#ifdef CONFIG_PROCMON_BCM63158
+int procmon_get_rcal(struct device_node *np);
+#else
+static inline int procmon_get_rcal(struct device_node *np)
+{
+	return -ENOSYS;
+}
+#endif
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/dgasp.h	2025-09-25 17:40:36.967374168 +0200
@@ -0,0 +1,23 @@
+#ifndef LINUX_DGASP_H_
+#define LINUX_DGASP_H_
+
+#include <linux/notifier.h>
+
+extern struct atomic_notifier_head dgasp_notifier_list;
+
+#ifdef CONFIG_DGASP
+static inline void dgasp_notifier_chain_register(struct notifier_block *nb)
+{
+	atomic_notifier_chain_register(&dgasp_notifier_list, nb);
+}
+
+static inline void dgasp_notifier_chain_unregister(struct notifier_block *nb)
+{
+	atomic_notifier_chain_unregister(&dgasp_notifier_list, nb);
+}
+#else
+static inline void dgasp_notifier_chain_register(struct notifier_block *nb) {}
+static inline void dgasp_notifier_chain_unregister(struct notifier_block *nb) {}
+#endif
+
+#endif /* LINUX_DGASP_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/fbxatm_dev.h	2025-09-25 17:40:36.975374208 +0200
@@ -0,0 +1,436 @@
+#ifndef LINUX_FBXATM_DEV_H_
+#define LINUX_FBXATM_DEV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/skbuff.h>
+#include <linux/mutex.h>
+#include <linux/fbxatm.h>
+#include <linux/device.h>
+#include <asm/atomic.h>
+#include <linux/if_vlan.h>
+#include <linux/fbxatm_remote.h>
+
+/*
+ * atm cell helper
+ */
+#define ATM_CELL_HDR_SIZE	5
+
+#define ATM_GET_GFC(h)		(((h)[0] & 0xf0) >> 4)
+#define ATM_SET_GFC(h,v)	do {					\
+					(h)[0] &= ~0xf0;		\
+					(h)[0] |= (v) << 4;		\
+				} while (0)
+
+#define ATM_GET_VPI(h)		((((h)[0] & 0x0f) << 4) |		\
+				 (((h)[1] & 0xf0) >> 4))
+#define ATM_SET_VPI(h,v)	do {					\
+					(h)[0] &= ~0xf;			\
+					(h)[1] &= ~0xf0;		\
+					(h)[0] |= (v) >> 4;		\
+					(h)[1] |= ((v) & 0xf) << 4;	\
+				} while (0)
+
+#define ATM_GET_VCI(h)		((((h)[1] & 0x0f) << 12) |		\
+				 ((h)[2] << 4) |			\
+				 ((((h)[3] & 0xf0) >> 4)))
+#define ATM_SET_VCI(h,v)	do {					\
+					(h)[1] &= ~0xf;			\
+					(h)[3] &= ~0xf0;		\
+					(h)[1] |= (v) >> 12;		\
+					(h)[2] = ((v) & 0xff0) >> 4;	\
+					(h)[3] |= ((v) & 0xf) << 4;	\
+				} while (0)
+
+
+#define ATM_GET_PT(h)		(((h)[3] & 0x0e) >> 1)
+#define ATM_SET_PT(h,v)		do {					\
+					(h)[3] &= ~0xe;			\
+					(h)[3] |= (v) << 1;		\
+				} while (0)
+
+#define ATM_GET_CLP(h)		(((h)[3] & 0x01))
+#define ATM_SET_CLP(h,v)	do {					\
+					(h)[3] &= ~1;			\
+					(h)[3] |= (v);			\
+				} while (0)
+
+#define ATM_GET_HEC(h)		((h)[4])
+#define ATM_SET_HEC(h,v)	do {					\
+					(h)[4] = (v);			\
+				} while (0)
+
+
+/*
+ * OAM definition
+ */
+#define OAM_VCI_SEG_F4			3
+#define OAM_VCI_END2END_F4		4
+
+#define OAM_PTI_SEG_F5			0x4
+#define OAM_PTI_END2END_F5		0x5
+
+#define OAM_TYPE_SHIFT			4
+#define OAM_TYPE_MASK			(0xf << OAM_TYPE_SHIFT)
+#define OAM_TYPE_FAULT_MANAGEMENT	0x1
+#define OAM_TYPE_PERF_MANAGEMENT	0x2
+#define OAM_TYPE_ACTIVATION		0x8
+
+#define FUNC_TYPE_SHIFT			0
+#define FUNC_TYPE_MASK			(0xf << FUNC_TYPE_SHIFT)
+#define FUNC_TYPE_AIS			0x0
+#define FUNC_TYPE_FERF			0x1
+#define FUNC_TYPE_CONT_CHECK		0x4
+#define FUNC_TYPE_OAM_LOOPBACK		0x8
+
+struct fbxatm_oam_cell_payload {
+	u8			cell_hdr[5];
+	u8			cell_type;
+	u8			loopback_indication;
+	u8			correlation_tag[4];
+	u8			loopback_id[16];
+	u8			source_id[16];
+	u8			reserved[8];
+	u8			crc10[2];
+};
+
+struct fbxatm_oam_cell {
+	struct fbxatm_oam_cell_payload	payload;
+	struct list_head		next;
+};
+
+struct fbxatm_oam_ping {
+	struct fbxatm_oam_ping_req	req;
+	u32				correlation_id;
+	int				replied;
+	wait_queue_head_t		wq;
+	struct list_head		next;
+};
+
+/*
+ * vcc/device stats
+ */
+struct fbxatm_vcc_stats {
+	u64			rx_bytes;
+	u64			tx_bytes;
+	u32			rx_aal5;
+	u32			tx_aal5;
+};
+
+struct fbxatm_dev_stats {
+	u64			rx_bytes;
+	u64			tx_bytes;
+	u32			rx_aal5;
+	u32			tx_aal5;
+	u32			rx_f4_oam;
+	u32			tx_f4_oam;
+	u32			rx_f5_oam;
+	u32			tx_f5_oam;
+	u32			rx_bad_oam;
+	u32			rx_bad_llid_oam;
+	u32			rx_other_oam;
+	u32			rx_dropped;
+	u32			tx_drop_nolink;
+};
+
+/*
+ * vcc user ops
+ */
+struct fbxatm_vcc_uops {
+	void	(*link_change)(void *cb_data, int link,
+			       unsigned int rx_cell_rate,
+			       unsigned int tx_cell_rate);
+	void	(*rx_pkt)(struct sk_buff *skb, void *cb_data);
+	void	(*tx_done)(void *cb_data);
+};
+
+/*
+ * vcc status flags
+ */
+enum {
+	FBXATM_VCC_F_FULL		= (1 << 0),
+
+	FBXATM_VCC_F_LINK_UP		= (1 << 1),
+};
+
+
+/*
+ * vcc definition
+ */
+struct fbxatm_dev;
+
+struct fbxatm_vcc {
+	unsigned int			vpi;
+	unsigned int			vci;
+
+	struct fbxatm_vcc_qos		qos;
+
+	struct fbxatm_vcc_stats		stats;
+
+	enum fbxatm_vcc_user		user;
+	void				*user_priv;
+
+	struct fbxatm_dev		*adev;
+	void				*dev_priv;
+
+	spinlock_t			user_ops_lock;
+	const struct fbxatm_vcc_uops	*user_ops;
+	void				*user_cb_data;
+
+	unsigned int			to_drop_pkt;
+
+	spinlock_t			tx_lock;
+	unsigned long			vcc_flags;
+
+	struct list_head		next;
+};
+
+/*
+ * fbxatm device operation
+ */
+struct fbxatm_dev_ops {
+	int (*open)(struct fbxatm_vcc *vcc);
+
+	void (*close)(struct fbxatm_vcc *vcc);
+
+	int (*ioctl)(struct fbxatm_dev *adev,
+		     unsigned int cmd, void __user *arg);
+
+	int (*send)(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+
+	int (*send_oam)(struct fbxatm_dev *adev,
+			struct fbxatm_oam_cell *cell);
+
+	int (*init_procfs)(struct fbxatm_dev *adev);
+	void (*release_procfs)(struct fbxatm_dev *adev);
+
+	struct module			*owner;
+};
+
+/*
+ * device flags
+ */
+enum {
+	FBXATM_DEV_F_LINK_UP		= (1 << 0),
+};
+
+/*
+ * fbxatm device definition
+ */
+struct fbxatm_dev {
+	int				ifindex;
+	unsigned long			dev_flags;
+	spinlock_t			dev_link_lock;
+
+	unsigned int			max_vcc;
+	unsigned int			vci_mask;
+	unsigned int			vpi_mask;
+	unsigned int			max_priority;
+	unsigned int			max_rx_priority;
+	unsigned int			tx_headroom;
+
+	char				*name;
+
+	/* unit: b/s */
+	unsigned int			link_rate_ds;
+	unsigned int			link_rate_us;
+
+	unsigned int			link_cell_rate_ds;
+	unsigned int			link_cell_rate_us;
+
+	const struct fbxatm_dev_ops	*ops;
+
+	spinlock_t			stats_lock;
+	struct fbxatm_dev_stats		stats;
+
+	spinlock_t			vcc_list_lock;
+	struct list_head		vcc_list;
+
+	struct device			dev;
+
+	spinlock_t			oam_list_lock;
+	struct list_head		rx_oam_cells;
+	unsigned int			rx_oam_cells_count;
+	struct work_struct		oam_work;
+
+	struct list_head		oam_pending_ping;
+	u32				oam_correlation_id;
+
+	struct proc_dir_entry		*dev_proc_entry;
+	void				*priv;
+	struct list_head		next;
+};
+
+/*
+ * API for device drivers
+ */
+struct fbxatm_dev *fbxatm_alloc_device(int sizeof_priv);
+
+int fbxatm_register_device(struct fbxatm_dev *adev,
+			   const char *base_name,
+			   const struct fbxatm_dev_ops *ops);
+
+void fbxatm_free_device(struct fbxatm_dev *adev);
+
+void fbxatm_dev_set_link_up(struct fbxatm_dev *adev);
+
+void fbxatm_dev_set_link_down(struct fbxatm_dev *adev);
+
+int fbxatm_unregister_device(struct fbxatm_dev *adev);
+
+void fbxatm_netifrx_oam(struct fbxatm_dev *adev,
+			struct fbxatm_oam_cell *cell);
+
+
+static inline int fbxatm_vcc_link_is_up(struct fbxatm_vcc *vcc)
+{
+	return test_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+}
+
+#define	FBXATMDEV_ALIGN		4
+
+static inline void *fbxatm_dev_priv(struct fbxatm_dev *adev)
+{
+	return (u8 *)adev + ((sizeof(struct fbxatm_dev)
+			      + (FBXATMDEV_ALIGN - 1))
+			     & ~(FBXATMDEV_ALIGN - 1));
+}
+
+/*
+ * API for FBXATM stack user
+ */
+struct fbxatm_ioctl {
+	int (*handler)(struct socket *sock,
+		       unsigned int cmd, void __user *useraddr);
+
+	void (*release)(struct socket *sock);
+
+	struct module		*owner;
+	struct list_head	next;
+};
+
+void fbxatm_set_uops(struct fbxatm_vcc *vcc,
+		     const struct fbxatm_vcc_uops *user_ops,
+		     void *user_cb_data);
+
+struct fbxatm_vcc *
+fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		   enum fbxatm_vcc_user user);
+
+void fbxatm_unbind_vcc(struct fbxatm_vcc *vcc);
+
+
+static inline int fbxatm_vcc_queue_full(struct fbxatm_vcc *vcc)
+{
+	return test_bit(FBXATM_VCC_F_FULL, &vcc->vcc_flags);
+}
+
+#ifdef CONFIG_FBXATM_STACK
+/*
+ * stack user callback to send data on given vcc
+ */
+static inline int fbxatm_send(struct fbxatm_vcc *vcc, struct sk_buff *skb)
+{
+	int ret;
+	unsigned int len;
+
+	len = skb->len;
+
+	spin_lock_bh(&vcc->tx_lock);
+	if (!test_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags)) {
+		spin_unlock_bh(&vcc->tx_lock);
+		dev_kfree_skb(skb);
+		spin_lock(&vcc->adev->stats_lock);
+		vcc->adev->stats.tx_drop_nolink++;
+		spin_unlock(&vcc->adev->stats_lock);
+		return 0;
+	}
+
+	ret = vcc->adev->ops->send(vcc, skb);
+	if (!ret) {
+		vcc->stats.tx_bytes += len;
+		vcc->stats.tx_aal5++;
+	}
+	spin_unlock_bh(&vcc->tx_lock);
+
+	if (!ret) {
+		spin_lock_bh(&vcc->adev->stats_lock);
+		vcc->adev->stats.tx_bytes += len;
+		vcc->adev->stats.tx_aal5++;
+		spin_unlock_bh(&vcc->adev->stats_lock);
+	}
+	return ret;
+}
+
+/*
+ * device callback when packet comes in
+ */
+static inline void fbxatm_netifrx(struct fbxatm_vcc *vcc, struct sk_buff *skb)
+{
+	unsigned int len;
+
+	len = skb->len;
+
+	spin_lock_bh(&vcc->user_ops_lock);
+	if (!vcc->user_ops) {
+		spin_unlock_bh(&vcc->user_ops_lock);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	if (vcc->to_drop_pkt) {
+		vcc->to_drop_pkt--;
+		spin_unlock_bh(&vcc->user_ops_lock);
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	vcc->stats.rx_bytes += len;
+	vcc->stats.rx_aal5++;
+
+	vcc->user_ops->rx_pkt(skb, vcc->user_cb_data);
+	spin_unlock_bh(&vcc->user_ops_lock);
+
+	spin_lock_bh(&vcc->adev->stats_lock);
+	vcc->adev->stats.rx_bytes += len;
+	vcc->adev->stats.rx_aal5++;
+	spin_unlock_bh(&vcc->adev->stats_lock);
+}
+
+/*
+ * device callback when tx is done on vcc
+ */
+static inline void fbxatm_tx_done(struct fbxatm_vcc *vcc)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	if (vcc->user_ops)
+		vcc->user_ops->tx_done(vcc->user_cb_data);
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+#else
+int fbxatm_send(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+void fbxatm_netifrx(struct fbxatm_vcc *vcc, struct sk_buff *skb);
+void fbxatm_tx_done(struct fbxatm_vcc *vcc);
+#endif
+
+static inline unsigned int fbxatm_rx_reserve(void)
+{
+#ifdef CONFIG_FBXATM_STACK
+	/* normal stack, no headroom needed */
+	return 0;
+#else
+	/* remote stub, we need to send rx skb to another location,
+	 * adding the fbxatm_remote header, an ethernet header (with
+	 * possible vlan) */
+	return ALIGN(sizeof (struct fbxatm_remote_hdr) + VLAN_ETH_HLEN, 4);
+#endif
+}
+
+void fbxatm_register_ioctl(struct fbxatm_ioctl *ioctl);
+
+void fbxatm_unregister_ioctl(struct fbxatm_ioctl *ioctl);
+
+#endif /* !LINUX_FBXATM_DEV_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/fbxatm_remote.h	2025-09-25 17:40:36.975374208 +0200
@@ -0,0 +1,216 @@
+#ifndef FBXATM_REMOTE_H_
+#define FBXATM_REMOTE_H_
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+/*
+ * fbxatm remote protocol messages
+ */
+#define ETH_P_FBXATM_REMOTE	0x8844
+#define FBXATM_REMOTE_MAGIC	0xd76f8d2f
+
+enum fbxatm_remote_flags {
+	FBXATM_RFLAGS_ACK = (1 << 0),
+};
+
+enum fbxatm_remote_mtype {
+	/* driver => stub */
+	FBXATM_RMT_CONNECT = 0,
+
+	/* stub => driver */
+	FBXATM_RMT_DEV_LINK,
+	FBXATM_RMT_DEV_RX_OAM,
+
+	/* driver => stub */
+	FBXATM_RMT_KEEPALIVE,
+	FBXATM_RMT_DEV_SEND_OAM,
+	FBXATM_RMT_VCC_ACTION,
+
+	/* driver => stub */
+	FBXATM_RMT_VCC_SEND,
+
+	/* stub => driver */
+	FBXATM_RMT_VCC_QEMPTY,
+	FBXATM_RMT_VCC_RX,
+};
+
+struct fbxatm_remote_hdr {
+	u32	magic;
+	u8	flags;
+	u8	seq;
+	u16	len;
+	u16	sport;
+	u16	dport;
+
+	u32	session_id;
+	u32	mtype;
+};
+
+/*
+ * sent to destination port 0
+ */
+struct fbxatm_remote_connect {
+	u8	name[32];
+
+	u16	dev_link_port;
+	u16	dev_rx_oam_port;
+};
+
+struct fbxatm_remote_connect_ack {
+	u16	vcc_action_port;
+	u16	dev_send_oam_port;
+	u16	keepalive_port;
+	u16	pad;
+
+	u32	max_vcc;
+	u32	vci_mask;
+	u32	vpi_mask;
+	u32	max_priority;
+	u32	max_rx_priority;
+
+	u32	link;
+	u32	link_rate_ds;
+	u32	link_rate_us;
+	u32	link_cell_rate_ds;
+	u32	link_cell_rate_us;
+};
+
+/*
+ * sent on dev_link port
+ */
+struct fbxatm_remote_dev_link {
+	u32	link;
+	u32	link_rate_ds;
+	u32	link_rate_us;
+	u32	link_cell_rate_ds;
+	u32	link_cell_rate_us;
+};
+
+/*
+ * sent on vcc_action port
+ */
+struct fbxatm_remote_vcc_action {
+	/* 1: open - 0: close */
+	u32	action;
+
+	/*
+	 * open args
+	 */
+	u16	vcc_rx_port;
+	u16	vcc_qempty_port;
+
+	/* from vcc id struct */
+	u32	vpi;
+	u32	vci;
+
+	/* from qos struct */
+	u32	traffic_class;
+	u32	max_sdu;
+	u32	max_buffered_pkt;
+	u32	priority;
+	u32	rx_priority;
+
+	/*
+	 * close args
+	 */
+	u32	vcc_remote_id;
+};
+
+struct fbxatm_remote_vcc_action_ack {
+	u32	ret;
+
+	/* open args ack */
+	u32	vcc_remote_id;
+	u16	vcc_send_port;
+	u16	pad;
+};
+
+/*
+ * sent on vcc_send port
+ */
+struct fbxatm_remote_vcc_send_ack {
+	u32	full;
+};
+
+/*
+ * pseudo socket layer
+ */
+struct fbxatm_remote_sock;
+struct fbxatm_remote_ctx;
+
+struct fbxatm_remote_sockaddr {
+	u16		lport;
+	u16		dport;
+	u32		mtype;
+	int		infinite_retry;
+	int		(*deliver)(void *priv, struct sk_buff *skb,
+				   struct sk_buff **ack);
+	void		(*response)(void *priv, struct sk_buff *skb);
+	void		*priv;
+};
+
+struct sk_buff *fbxatm_remote_alloc_skb(struct fbxatm_remote_ctx *ctx,
+					unsigned int size);
+
+unsigned int fbxatm_remote_headroom(struct fbxatm_remote_ctx *ctx);
+
+void fbxatm_remote_sock_getaddr(struct fbxatm_remote_sock *sock,
+				struct fbxatm_remote_sockaddr *addr);
+
+void fbxatm_remote_sock_purge(struct fbxatm_remote_sock *sock);
+
+int fbxatm_remote_sock_pending(struct fbxatm_remote_sock *sock);
+
+struct fbxatm_remote_ctx *fbxatm_remote_alloc_ctx(struct net_device *netdev,
+						  u8 *remote_mac,
+						  u32 session_id,
+						  void (*timeout)(void *priv),
+						  void *priv);
+
+struct fbxatm_remote_sock *
+fbxatm_remote_sock_bind(struct fbxatm_remote_ctx *ctx,
+			struct fbxatm_remote_sockaddr *addr,
+			int send_ack);
+
+struct fbxatm_remote_sock *
+fbxatm_remote_sock_connect(struct fbxatm_remote_ctx *ctx,
+			   struct fbxatm_remote_sockaddr *addr,
+			   int need_ack);
+
+int fbxatm_remote_sock_send(struct fbxatm_remote_sock *sock,
+			    struct sk_buff *skb);
+
+int fbxatm_remote_sock_send_ack(struct fbxatm_remote_sock *sock,
+				struct sk_buff *skb);
+
+int fbxatm_remote_sock_send_raw_ack(struct fbxatm_remote_ctx *ctx,
+				    struct net_device *dev,
+				    u8 *remote_mac,
+				    struct fbxatm_remote_hdr *hdr,
+				    struct sk_buff *ack);
+
+void fbxatm_remote_sock_close(struct fbxatm_remote_sock *sock);
+
+void fbxatm_remote_set_unknown_cb(void (*cb)(struct net_device *,
+					     struct sk_buff *));
+
+void fbxatm_remote_free_ctx(struct fbxatm_remote_ctx *ctx);
+
+void fbxatm_remote_ctx_set_dead(struct fbxatm_remote_ctx *ctx);
+
+int fbxatm_remote_init(void);
+
+void fbxatm_remote_exit(void);
+
+/*
+ * platform data for fbxatm_remote driver
+ */
+struct fbxatm_remote_pdata {
+	u8	remote_mac[ETH_ALEN];
+	char	netdev_name[IFNAMSIZ];
+	char	remote_name[32];
+};
+
+#endif /* !FBXATM_REMOTE_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/fbxgpio_core.h	2025-09-25 17:40:36.975374208 +0200
@@ -0,0 +1,55 @@
+/*
+ * fbxgpio.h for linux-freebox
+ * Created by <nschichan@freebox.fr> on Wed Feb 21 22:09:46 2007
+ * Freebox SA
+ */
+
+#ifndef FBXGPIO_H
+# define FBXGPIO_H
+
+# include <linux/types.h>
+# include <linux/gpio/consumer.h>
+
+struct fbxgpio_pin;
+
+#define FBXGPIO_PIN_REVERSE_POL		(1 << 0)
+
+struct fbxgpio_pin {
+	const char			*pin_name;
+	bool				use_desc;
+
+	/* when use_desc is true */
+	struct gpio_desc		*(*request_desc)(struct fbxgpio_pin *);
+	void				(*release_desc)(struct fbxgpio_pin *);
+
+	/* when use_desc is false */
+	int				pin_num;
+	unsigned int			flags;
+
+	int				direction;
+	unsigned int			cur_dataout;
+	struct device			*dev;
+	struct device_node		*of_node;
+
+	/* private flags used by fbxgpio-dt */
+	struct {
+		enum gpiod_flags		flags;
+		bool				no_claim;
+		struct gpio_desc		*desc;
+		char				pin_name[32];
+	} dt;
+};
+
+
+#define GPIO_DIR_OUT_UNINITIALIZED	0x2
+#define GPIO_DIR_IN			0x1
+#define GPIO_DIR_OUT			0x0
+
+struct fbxgpio_pin *fbxgpio_of_get(struct device_node *np,
+				   const char *propname,
+				   int index);
+
+int fbxgpio_set_data_out(struct fbxgpio_pin *pin, int val);
+int fbxgpio_get_data_in(struct fbxgpio_pin *pin);
+
+#endif /* !FBXGPIO_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/fbxprocfs.h	2025-09-25 17:40:36.975374208 +0200
@@ -0,0 +1,40 @@
+#ifndef FBXPROCFS_H_
+#define FBXPROCFS_H_
+
+#include <linux/proc_fs.h>
+#include <asm/atomic.h>
+#include <linux/seq_file.h>
+
+struct fbxprocfs_client
+{
+	const char *dirname;
+	struct module *owner;
+	struct proc_dir_entry *dir;
+	atomic_t refcount;
+	struct list_head list;
+};
+
+struct fbxprocfs_desc {
+	char		*name;
+	unsigned long	id;
+	int	(*rfunc)(struct seq_file *, void *);
+	int	(*wfunc)(struct file *, const char *, unsigned long, void *);
+};
+
+struct fbxprocfs_client *fbxprocfs_add_client(const char *dirname,
+					      struct module *owner);
+
+int fbxprocfs_remove_client(struct fbxprocfs_client *client);
+
+
+int
+fbxprocfs_create_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+int
+fbxprocfs_remove_entries(struct fbxprocfs_client *client,
+			 const struct fbxprocfs_desc *ro_desc,
+			 const struct fbxprocfs_desc *rw_desc);
+
+#endif /* FBXPROCFS_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/fbxserial.h	2025-09-25 17:40:36.975374208 +0200
@@ -0,0 +1,131 @@
+#ifndef FBXSERIAL_H_
+#define FBXSERIAL_H_
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+/*
+ * some part of serial may vary, we use abstract struct to store this,
+ * data content depends on type.
+ */
+#define EXTINFO_SIZE		128
+#define EXTINFO_MAX_COUNT	16
+
+/*
+ * extdev desc
+ */
+#define EXTINFO_TYPE_EXTDEV	1
+
+#define EXTDEV_TYPE_BUNDLE	1
+#define EXTDEV_TYPE_MAX		2
+
+struct fbx_serial_extinfo {
+	u32			type;
+
+	union {
+		/* extdev */
+		struct {
+			u32	type;
+			u32	model;
+			char	serial[64];
+		} extdev;
+
+		/* raw access */
+		unsigned char	data[EXTINFO_SIZE];
+	} u;
+}  __attribute__ ((packed));;
+
+
+/*
+ * master serial structure
+ */
+
+#define FBXSERIAL_VERSION	1
+
+#define FBXSERIAL_MAGIC		0x2d9521ab
+
+#define MAC_ADDR_SIZE		6
+#define RANDOM_DATA_SIZE	32
+
+/*
+ * this  is the  maximum size  we accept  to check  crc32  against, so
+ * structure may no grow larger than this
+ */
+#define FBXSERIAL_MAX_SIZE	8192
+
+struct fbx_serial {
+	u32	crc32;
+	u32	magic;
+	u32	struct_version;
+	u32	len;
+
+	/* board serial */
+	u16	type;
+	u8	version;
+	u8	manufacturer;
+	u16	year;
+	u8	week;
+	u32	number;
+	u32	flags;
+
+	/* mac address base */
+	u8	mac_addr_base[MAC_ADDR_SIZE];
+
+	/* mac address count */
+	u8	mac_count;
+
+	/* random data */
+	u8	random_data[RANDOM_DATA_SIZE];
+
+	/* last update of data (seconds since epoch) */
+	u32	last_modified;
+
+	/* count of following extinfo tag */
+	u32	extinfo_count;
+
+	/* beginning of extended info */
+	struct fbx_serial_extinfo	extinfos[EXTINFO_MAX_COUNT];
+
+} __attribute__ ((packed));
+
+#define FBXSERIAL_MIN_SIZE	offsetof(struct fbx_serial, extinfos)
+
+
+/*
+ * default value to use in case magic is wrong (no cksum in that case)
+ */
+static inline void fbxserial_set_default(struct fbx_serial *s)
+{
+	memset(s, 0, sizeof (*s));
+	s->magic = FBXSERIAL_MAGIC;
+	s->struct_version = FBXSERIAL_VERSION;
+	s->len = sizeof (*s);
+	s->manufacturer = '_';
+	memcpy(s->mac_addr_base, "\x00\x07\xCB\x00\x00\xFD", 6);
+	s->mac_count = 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len);
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index);
+
+int
+fbxserialinfo_read(const void *data, struct fbx_serial *out);
+
+struct fbx_serial *fbxserialinfo_get(void);
+
+/*
+ * implemented in board specific code
+ */
+#ifdef CONFIG_ARCH_HAS_FBXSERIAL
+extern const struct fbx_serial *arch_get_fbxserial(void);
+#else
+static inline const struct fbx_serial *arch_get_fbxserial(void)
+{
+	return NULL;
+}
+#endif
+
+#endif /* FBXSERIAL_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/hwmon-pericom-pcie.h	2025-09-25 17:40:36.991374287 +0200
@@ -0,0 +1,22 @@
+#ifndef __KERNEL_HWMON_PERICOM_PCIE_H
+#define __KERNEL_HWMON_PERICOM_PCIE_H
+
+struct pci_dev;
+
+#ifdef CONFIG_SENSORS_PERICOM_PCIE
+int hwmon_pericom_pcie_probe(struct pci_dev *pdev);
+void hwmon_pericom_pcie_remove(struct pci_dev *pdev);
+#else
+
+static inline int hwmon_pericom_pcie_probe(struct pci_dev *pdev)
+{
+	return 0;
+}
+
+static inline void hwmon_pericom_pcie_remove(struct pci_dev *pdev)
+{
+}
+
+#endif /* CONFIG_SENSORS_PERICOM_PCIE */
+
+#endif /* __KERNEL_HWMON_PERICOM_PCIE_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/mfd/fbxgwr_pmu.h	2025-09-25 17:40:37.035374506 +0200
@@ -0,0 +1,465 @@
+#pragma once
+
+/*
+ * note: unknown registers will return 0 on read
+ */
+
+enum pmu_reg {
+
+	/*
+	 * NOTE NOTE NOTE NOTE
+	 *
+	 * this first set of registers is not subject to API
+	 * versioning, so the semantic cannot ever change
+	 */
+
+	/* used to detect presence of PMU */
+	PMU_REG_MAGIC0 = 0x00,
+	PMU_REG_MAGIC1 = 0x01,
+
+	/*
+	 * bit 5 for test mode through usb-pd
+	 * bit 6 for test mode through gpio)
+	 * bit 7 for any test mode
+	 */
+	PMU_REG_TEST_MODE = 0x02,
+
+	/* return API major/minor version of active app */
+	PMU_REG_API_MAJOR = 0x03,
+	PMU_REG_API_MINOR = 0x04,
+
+	/* return board id */
+	PMU_REG_BOARD_ID = 0x05,
+
+	/* return version of active app (16 bits) */
+	PMU_REG_APP_VERSION_LO = 0x06,
+	PMU_REG_APP_VERSION_HI = 0x07,
+
+	/* return revision of active app (16 bits) */
+	PMU_REG_APP_REVISION_LO = 0x08,
+	PMU_REG_APP_REVISION_HI = 0x09,
+
+	/* return currently active app bank (0 or 1) */
+	PMU_REG_CUR_APP_BANK = 0x0a,
+
+
+
+	/*
+	 * the remaining set of registers is defined for API major
+	 * version 1
+	 */
+
+	/* return firmware capabilities */
+	PMU_REG_FW_CAPABILITIES = 0xb,
+
+	/* return number of gpio bank available, max is 4, so 32
+	 * gpios */
+	PMU_REG_GPIO_BANK_COUNT = 0xc,
+
+	/* return number of leds available, max is 8 */
+	PMU_REG_LED_COUNT = 0xd,
+
+	/* return number of fans pwm available, max is 4 */
+	PMU_REG_FAN_PWM_COUNT = 0xe,
+
+	/* return number of input sensors registers (including
+	 * inactive ones) */
+	PMU_REG_IN_COUNT = 0xf,
+
+	/* trigger false dying gasp interrupt */
+	PMU_REG_FAKE_DGASP = 0x10,
+
+	/*
+	 * Watchdog registers
+	 */
+	PMU_REG_WDT_CTL = 0x11,
+	PMU_REG_WDT_STS = 0x12,
+	PMU_REG_WDT_TIMEOUT = 0x13,
+	PMU_REG_WDT_REFRESH = 0x14,
+
+	/*
+	 * Type-C polarity
+	 */
+	PMU_REG_CC_POLARITY = 0x15,
+
+	PMU_REG_HELLO = 0x16,
+
+	/* wake-on-pon interval (seconds unit) */
+	PMU_REG_WAKE_PON_INTERVAL = 0x17,
+
+	/* soft-reset the PMU, which will cause a full reset */
+	PMU_REG_BOARD_RESET = 0x18,
+
+	/* app bank switch (light soft reset), write bank number to
+	 * switch to */
+	PMU_REG_BANK_SWITCH = 0x19,
+
+	/* suspend command, set wakeup mask and write magic value to
+	 * enter standby */
+	PMU_REG_ENTER_STANDBY = 0x1a,
+
+	/* mask of allowed wakeup source to leave standby */
+	PMU_REG_WAKE_SRC_MASK = 0x1b,
+
+	/* reason we left standby state (wake-up source mask), 0 means
+	 * POR, write 1 to clear */
+	PMU_REG_WAKE_REASON_MASK = 0x1c,
+
+	/* reset reason for the PMU itself */
+	PMU_REG_PMU_RESET_REASON = 0x1d,
+
+	/* wake-on-pon over i2c */
+	PMU_REG_PON_BOOT_MODE = 0x1e,
+	PMU_REG_PON_WOP_RESULT = 0x1f,
+
+	/*
+	 * gpio input value
+	 */
+	PMU_REG_GPIO_IN_BASE = 0x20,
+
+	PMU_REG_GPIO_IN_0 = PMU_REG_GPIO_IN_BASE + 0,
+	PMU_REG_GPIO_IN_1 = PMU_REG_GPIO_IN_BASE + 1,
+	PMU_REG_GPIO_IN_2 = PMU_REG_GPIO_IN_BASE + 2,
+	PMU_REG_GPIO_IN_3 = PMU_REG_GPIO_IN_BASE + 3,
+
+	/* gpio output:
+	 *  setting bit n in PMU_REG_GPIO_OUT_SET_x will set the n-th
+	 *  GPIO in bank x to logical level 1.
+	 *
+	 *  setting bit n in PMU_REG_GPIO_OUT_CLR_x will set the n-th
+	 *  GPIO in bank x to logical level 0.
+	 */
+	PMU_REG_GPIO_OUT_SET_BASE = 0x24,
+
+	PMU_REG_GPIO_OUT_SET_0 = PMU_REG_GPIO_OUT_SET_BASE + 0,
+	PMU_REG_GPIO_OUT_SET_1 = PMU_REG_GPIO_OUT_SET_BASE + 1,
+	PMU_REG_GPIO_OUT_SET_2 = PMU_REG_GPIO_OUT_SET_BASE + 2,
+	PMU_REG_GPIO_OUT_SET_3 = PMU_REG_GPIO_OUT_SET_BASE + 3,
+
+	PMU_REG_GPIO_OUT_CLR_BASE = 0x28,
+
+	PMU_REG_GPIO_OUT_CLR_0 = PMU_REG_GPIO_OUT_CLR_BASE + 0,
+	PMU_REG_GPIO_OUT_CLR_1 = PMU_REG_GPIO_OUT_CLR_BASE + 1,
+	PMU_REG_GPIO_OUT_CLR_2 = PMU_REG_GPIO_OUT_CLR_BASE + 2,
+	PMU_REG_GPIO_OUT_CLR_3 = PMU_REG_GPIO_OUT_CLR_BASE + 3,
+
+	/*
+	 * gpio direction:
+	 *  when reading PMU_REG_GPIO_DIR_GET_x, a bit clear indicates
+	 *  the pin is configure as input.
+	 *
+	 *  setting bit n in PMU_REG_GPIO_DIR_SET_x will configure the
+	 *  pin as output, output value is set to previous written
+	 *  value inside the corresponding OUT register
+	 *
+	 *  setting bit n in PMU_REG_GPIO_DIR_CLR_x will configure
+	 *  the pin as input
+	 */
+	PMU_REG_GPIO_DIR_GET_BASE = 0x2c,
+
+	PMU_REG_GPIO_DIR_GET_0 = PMU_REG_GPIO_DIR_GET_BASE + 0,
+	PMU_REG_GPIO_DIR_GET_1 = PMU_REG_GPIO_DIR_GET_BASE + 1,
+	PMU_REG_GPIO_DIR_GET_2 = PMU_REG_GPIO_DIR_GET_BASE + 2,
+	PMU_REG_GPIO_DIR_GET_3 = PMU_REG_GPIO_DIR_GET_BASE + 3,
+
+	PMU_REG_GPIO_DIR_SET_BASE = 0x30,
+
+	PMU_REG_GPIO_DIR_SET_0 = PMU_REG_GPIO_DIR_SET_BASE + 0,
+	PMU_REG_GPIO_DIR_SET_1 = PMU_REG_GPIO_DIR_SET_BASE + 1,
+	PMU_REG_GPIO_DIR_SET_2 = PMU_REG_GPIO_DIR_SET_BASE + 2,
+	PMU_REG_GPIO_DIR_SET_3 = PMU_REG_GPIO_DIR_SET_BASE + 3,
+
+	PMU_REG_GPIO_DIR_CLR_BASE = 0x34,
+
+	PMU_REG_GPIO_DIR_CLR_0 = PMU_REG_GPIO_DIR_CLR_BASE + 0,
+	PMU_REG_GPIO_DIR_CLR_1 = PMU_REG_GPIO_DIR_CLR_BASE + 1,
+	PMU_REG_GPIO_DIR_CLR_2 = PMU_REG_GPIO_DIR_CLR_BASE + 2,
+	PMU_REG_GPIO_DIR_CLR_3 = PMU_REG_GPIO_DIR_CLR_BASE + 3,
+
+	/*
+	 * gpio IRQ capable/status/mask, will detect any edge, write 1
+	 * to status bit to clear
+	 */
+	PMU_REG_GPIO_IRQ_CAP_BASE = 0x38,
+
+	PMU_REG_GPIO_IRQ_CAP_0 = PMU_REG_GPIO_IRQ_CAP_BASE + 0,
+	PMU_REG_GPIO_IRQ_CAP_1 = PMU_REG_GPIO_IRQ_CAP_BASE + 1,
+	PMU_REG_GPIO_IRQ_CAP_2 = PMU_REG_GPIO_IRQ_CAP_BASE + 2,
+	PMU_REG_GPIO_IRQ_CAP_3 = PMU_REG_GPIO_IRQ_CAP_BASE + 3,
+
+	PMU_REG_GPIO_IRQ_MASK_BASE = 0x3c,
+
+	PMU_REG_GPIO_IRQ_MASK_0 = PMU_REG_GPIO_IRQ_MASK_BASE + 0,
+	PMU_REG_GPIO_IRQ_MASK_1 = PMU_REG_GPIO_IRQ_MASK_BASE + 1,
+	PMU_REG_GPIO_IRQ_MASK_2 = PMU_REG_GPIO_IRQ_MASK_BASE + 2,
+	PMU_REG_GPIO_IRQ_MASK_3 = PMU_REG_GPIO_IRQ_MASK_BASE + 3,
+
+	PMU_REG_GPIO_IRQ_STAT_BASE = 0x40,
+
+	PMU_REG_GPIO_IRQ_STAT_0 = PMU_REG_GPIO_IRQ_STAT_BASE + 0,
+	PMU_REG_GPIO_IRQ_STAT_1 = PMU_REG_GPIO_IRQ_STAT_BASE + 1,
+	PMU_REG_GPIO_IRQ_STAT_2 = PMU_REG_GPIO_IRQ_STAT_BASE + 2,
+	PMU_REG_GPIO_IRQ_STAT_3 = PMU_REG_GPIO_IRQ_STAT_BASE + 3,
+
+	/* command registers to enable/disable IRQ */
+	PMU_REG_GPIO_IRQ_CMD = 0x44,
+	PMU_REG_GPIO_IRQ_CMD_NR = 0x45,
+	PMU_REG_GPIO_IRQ_CMD_STAT = 0x46,
+
+	/* write 1 to reset internal state (unrequest all irq gpios),
+	 * self clear */
+	PMU_REG_GPIO_REINIT = 0x47,
+
+	/*
+	 * led pwm control
+	 */
+	PMU_REG_LED_PWM_BASE = 0x50,
+
+	PMU_REG_LED0_PWM = PMU_REG_LED_PWM_BASE + 0,
+	PMU_REG_LED1_PWM = PMU_REG_LED_PWM_BASE + 1,
+	PMU_REG_LED2_PWM = PMU_REG_LED_PWM_BASE + 2,
+	PMU_REG_LED3_PWM = PMU_REG_LED_PWM_BASE + 3,
+	PMU_REG_LED4_PWM = PMU_REG_LED_PWM_BASE + 4,
+	PMU_REG_LED5_PWM = PMU_REG_LED_PWM_BASE + 5,
+	PMU_REG_LED6_PWM = PMU_REG_LED_PWM_BASE + 6,
+	PMU_REG_LED7_PWM = PMU_REG_LED_PWM_BASE + 7,
+
+	/*
+	 * input value (current/voltage/temperature/power/fan_input)
+	 *
+	 * 16 bits
+	 *
+	 * current: mA unit
+	 * voltage: mV unit
+	 * temperature: milli °C unit
+	 * power: mW unit
+	 * fan_input: RPM unit
+	 *
+	 * value reported is optionally scaled by *10^n, (1 = value
+	 * reported is divided by 10, ...)
+	 */
+	PMU_REG_IN_BASE = 0x60,
+
+	PMU_REG_IN0_TYPE = PMU_REG_IN_BASE + 0,
+	PMU_REG_IN0_LO = PMU_REG_IN_BASE + 1,
+	PMU_REG_IN0_HI = PMU_REG_IN_BASE + 2,
+	PMU_REG_IN1_TYPE = PMU_REG_IN_BASE + 3,
+	PMU_REG_IN1_LO = PMU_REG_IN_BASE + 4,
+	PMU_REG_IN1_HI = PMU_REG_IN_BASE + 5,
+	PMU_REG_IN2_TYPE = PMU_REG_IN_BASE + 6,
+	PMU_REG_IN2_LO = PMU_REG_IN_BASE + 7,
+	PMU_REG_IN2_HI = PMU_REG_IN_BASE + 8,
+	PMU_REG_IN3_TYPE = PMU_REG_IN_BASE + 9,
+	PMU_REG_IN3_LO = PMU_REG_IN_BASE + 10,
+	PMU_REG_IN3_HI = PMU_REG_IN_BASE + 11,
+	PMU_REG_IN4_TYPE = PMU_REG_IN_BASE + 12,
+	PMU_REG_IN4_LO = PMU_REG_IN_BASE + 13,
+	PMU_REG_IN4_HI = PMU_REG_IN_BASE + 14,
+	PMU_REG_IN5_TYPE = PMU_REG_IN_BASE + 15,
+	PMU_REG_IN5_LO = PMU_REG_IN_BASE + 16,
+	PMU_REG_IN5_HI = PMU_REG_IN_BASE + 17,
+	PMU_REG_IN6_TYPE = PMU_REG_IN_BASE + 18,
+	PMU_REG_IN6_LO = PMU_REG_IN_BASE + 19,
+	PMU_REG_IN6_HI = PMU_REG_IN_BASE + 20,
+	PMU_REG_IN7_TYPE = PMU_REG_IN_BASE + 21,
+	PMU_REG_IN7_LO = PMU_REG_IN_BASE + 22,
+	PMU_REG_IN7_HI = PMU_REG_IN_BASE + 23,
+	PMU_REG_IN8_TYPE = PMU_REG_IN_BASE + 24,
+	PMU_REG_IN8_LO = PMU_REG_IN_BASE + 25,
+	PMU_REG_IN8_HI = PMU_REG_IN_BASE + 26,
+	PMU_REG_IN9_TYPE = PMU_REG_IN_BASE + 27,
+	PMU_REG_IN9_LO = PMU_REG_IN_BASE + 28,
+	PMU_REG_IN9_HI = PMU_REG_IN_BASE + 29,
+	PMU_REG_IN10_TYPE = PMU_REG_IN_BASE + 30,
+	PMU_REG_IN10_LO = PMU_REG_IN_BASE + 31,
+	PMU_REG_IN10_HI = PMU_REG_IN_BASE + 32,
+	PMU_REG_IN11_TYPE = PMU_REG_IN_BASE + 33,
+	PMU_REG_IN11_LO = PMU_REG_IN_BASE + 34,
+	PMU_REG_IN11_HI = PMU_REG_IN_BASE + 35,
+	PMU_REG_IN12_TYPE = PMU_REG_IN_BASE + 36,
+	PMU_REG_IN12_LO = PMU_REG_IN_BASE + 37,
+	PMU_REG_IN12_HI = PMU_REG_IN_BASE + 38,
+	PMU_REG_IN13_TYPE = PMU_REG_IN_BASE + 39,
+	PMU_REG_IN13_LO = PMU_REG_IN_BASE + 40,
+	PMU_REG_IN13_HI = PMU_REG_IN_BASE + 41,
+	PMU_REG_IN14_TYPE = PMU_REG_IN_BASE + 42,
+	PMU_REG_IN14_LO = PMU_REG_IN_BASE + 43,
+	PMU_REG_IN14_HI = PMU_REG_IN_BASE + 44,
+	PMU_REG_IN15_TYPE = PMU_REG_IN_BASE + 45,
+	PMU_REG_IN15_LO = PMU_REG_IN_BASE + 46,
+	PMU_REG_IN15_HI = PMU_REG_IN_BASE + 47,
+
+	/*
+	 * fan control
+	 * PWM value between 0 -> 255,
+	 * fan speed in RPM, 16 bits unsigned
+	 */
+	PMU_REG_FAN_PWM_BASE = 0x90,
+
+	PMU_REG_FAN0_PWM = PMU_REG_FAN_PWM_BASE + 0,
+	PMU_REG_FAN1_PWM = PMU_REG_FAN_PWM_BASE + 1,
+	PMU_REG_FAN2_PWM = PMU_REG_FAN_PWM_BASE + 2,
+	PMU_REG_FAN3_PWM = PMU_REG_FAN_PWM_BASE + 3,
+
+	/*
+	 * firmware update registers
+	 */
+	PMU_REG_FWUP_BASE = 0xa0,
+
+	/* issue command by writing to this register */
+	PMU_REG_FWUP_CMD = PMU_REG_FWUP_BASE + 0,
+
+	/* read-only: poll for command completion via busy bit;
+	 * writing to command will clear other bits */
+	PMU_REG_FWUP_STATUS = PMU_REG_FWUP_BASE + 1,
+
+	/* all data block must be transfered with the following size,
+	 * only last block can be smaller */
+	PMU_REG_FWUP_BLOCK_SIZE = PMU_REG_FWUP_BASE + 2,
+
+	/* set data len before START_DATA/NEXT_DATA command with
+	 * actual data len in buffer (must be multiple of BLOCK_SIZE
+	 * except last block) */
+	PMU_REG_FWUP_DATA_LEN = PMU_REG_FWUP_BASE + 3,
+
+	/* area used to write sig / firmware data */
+	PMU_REG_FWUP_DATA_BASE = PMU_REG_FWUP_BASE + 4,
+	PMU_REG_FWUP_DATA_LAST = PMU_REG_FWUP_DATA_BASE + 31,
+
+	/*
+	 * I2C proxy registers
+	 */
+	PMU_REG_I2C_PROXY_BASE = 0xc4,
+
+	/* issue command by writing to this register */
+	PMU_REG_I2C_PROXY_CMD = PMU_REG_I2C_PROXY_BASE + 0,
+
+	/* addr of the slave device to initiate transfer to */
+	PMU_REG_I2C_PROXY_ADDR = PMU_REG_I2C_PROXY_BASE + 1,
+
+	/* register to read/write (auto-incremented on successful operation) */
+	PMU_REG_I2C_PROXY_REG = PMU_REG_I2C_PROXY_BASE + 2,
+
+	/* read-only: poll for command completion via busy bit;
+	 * writing to command will clear other bits */
+	PMU_REG_I2C_PROXY_STATUS = PMU_REG_I2C_PROXY_BASE + 3,
+
+	/* data to transfer / transferred */
+	PMU_REG_I2C_PROXY_DATA = PMU_REG_I2C_PROXY_BASE + 4,
+
+	/*
+	 * RTC registers
+	 *
+	 * current value, read value 0 first (LSB) to snapshot
+	 */
+	PMU_REG_RTC_VALUE_0 = 0xd0,
+	PMU_REG_RTC_VALUE_1 = 0xd1,
+	PMU_REG_RTC_VALUE_2 = 0xd2,
+	PMU_REG_RTC_VALUE_3 = 0xd3,
+
+	/* RTC compare value for wakeup source */
+	PMU_REG_RTC_CMP_VALUE_0 = 0xd4,
+	PMU_REG_RTC_CMP_VALUE_1 = 0xd5,
+	PMU_REG_RTC_CMP_VALUE_2 = 0xd6,
+	PMU_REG_RTC_CMP_VALUE_3 = 0xd7,
+
+	PMU_REG_PRODUCT_VARIANT = 0xe0,
+};
+
+
+#define PMU_API_VERSION_MAJOR	1
+#define PMU_API_VERSION_MINOR	0
+
+#define PMU_MAGIC0_VAL		0xfb
+#define PMU_MAGIC1_VAL		0xec
+
+#define PMU_TEST_MODE_NONE	0x00
+#define PMU_TEST_MODE_USBPD_MASK	(1 << 5)
+#define PMU_TEST_MODE_GPIO_MASK		(1 << 6)
+#define PMU_TEST_MODE_ANY_MASK	(1 << 7)
+
+#define PMU_RESET_MAGIC		0x3d
+
+#define PMU_FAKE_DGASP_MAGIC	0x3e
+
+#define PMU_HELLO_MAGIC		0x3f
+
+#define PMU_IN_TYPE_SHIFT	(0)
+#define PMU_IN_TYPE_MASK	(0xf << PMU_IN_TYPE_SHIFT)
+#define PMU_IN_TYPE_UNUSED	0x00
+#define PMU_IN_TYPE_CURRENT	0x01
+#define PMU_IN_TYPE_VOLTAGE	0x02
+#define PMU_IN_TYPE_POWER	0x03
+#define PMU_IN_TYPE_TEMPERATURE	0x04
+#define PMU_IN_TYPE_FAN_INPUT	0x05
+#define PMU_IN_DIVIDER_SHIFT	4
+#define PMU_IN_DIVIDER_MASK	(0x7 << PMU_IN_DIVIDER_SHIFT)
+#define PMU_IN_SIGNED_MASK	(1 << 7)
+
+#define PMU_GPIOIRQCMD_ENABLE	0
+#define PMU_GPIOIRQCMD_DISABLE	1
+#define PMU_GPIOIRQCMD_RES_BUSY		(1 << 0)
+#define PMU_GPIOIRQCMD_RES_SUCCESS	(1 << 1)
+
+#define PMU_STANDBY_MAGIC	0x94
+#define PMU_WAKE_R_RTC_MASK	(1 << 0)
+#define PMU_WAKE_R_PWRBTN_MASK	(1 << 1)
+#define PMU_WAKE_R_WAKEPON_MASK	(1 << 2)
+#define PMU_WAKE_R_SOC_RST_MASK	(1 << 3)
+#define PMU_WAKE_R_WDT_RST_MASK	(1 << 4)
+
+#define PMU_FW_CAP_FWUPGRADE	(1 << 0)
+#define PMU_FW_CAP_BANK_SWITCH	(1 << 1)
+#define PMU_FW_CAP_RTC		(1 << 2)
+#define PMU_FW_CAP_STANDBY	(1 << 3)
+#define PMU_FW_CAP_GPIO_IRQ	(1 << 4)
+#define PMU_FW_CAP_WDT		(1 << 5)
+#define PMU_FW_CAP_I2C_PROXY	(1 << 6)
+
+#define PMU_FWUPCMD_NOOP	0
+#define PMU_FWUPCMD_START_SIG	1
+#define PMU_FWUPCMD_NEXT_SIG	2
+#define PMU_FWUPCMD_START_DATA	3
+#define PMU_FWUPCMD_NEXT_DATA	4
+#define PMU_FWUPCMD_VALIDATE	5
+
+#define PMU_FWUPSTATUS_BUSY	(1 << 0)
+#define PMU_FWUPSTATUS_SUCCESS	(1 << 1)
+
+#define PMU_RESET_R_UNKNOWN	0
+#define PMU_RESET_R_POR		1
+#define PMU_RESET_R_SWRESET	2
+#define PMU_RESET_R_VDROP	3
+#define PMU_RESET_R_HWRESET	4
+#define PMU_RESET_R_WATCHDOG	5
+#define PMU_RESET_R_BUS_ERROR	6
+#define PMU_RESET_R_SRAM_PARITY	7
+#define PMU_RESET_R_BOOTSTRAP	8
+
+#define PMU_PON_BOOT_MODE_NORMAL	0
+#define PMU_PON_BOOT_MODE_WOP		1
+
+#define PMU_PON_WOP_RES_WAKE		1
+#define PMU_PON_WOP_RES_SLEEP		2
+
+#define PMU_WDT_CTL_EN			(1 << 0)
+#define PMU_WDT_CTL_INT_EN		(1 << 1)
+
+#define PMU_WDT_STS_INT_STS		(1 << 0)
+#define PMU_WDT_REFRESH_VAL		0xA5
+
+#define PMU_CC_POLARITY_UNKNOWN		0
+#define PMU_CC_POLARITY_CC1		1
+#define PMU_CC_POLARITY_CC2		2
+
+#define PMU_PRODUCT_VARIANT_FBX		0
+#define PMU_PRODUCT_VARIANT_JBX		1
+#define PMU_PRODUCT_VARIANT_FBX_LTD	2
+
+#define PMU_I2C_PROXY_CMD_NOOP		0
+#define PMU_I2C_PROXY_CMD_READ		1
+#define PMU_I2C_PROXY_CMD_WRITE		2
+
+#define PMU_I2C_PROXY_STATUS_BUSY	(1 << 0)
+#define PMU_I2C_PROXY_STATUS_SUCCESS	(1 << 1)
+#define PMU_I2C_PROXY_STATUS_ERROR	(1 << 2)
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/pmc-bcm63xx.h	2025-09-25 17:40:37.083374744 +0200
@@ -0,0 +1,47 @@
+/*
+ * pmc.h for pmc
+ * Created by <nschichan@freebox.fr> on Fri Jun 26 15:29:58 2020
+ */
+
+#pragma once
+
+struct bcm63xx_pmc;
+struct device_node;
+
+enum pmc_addr_id {
+	PMB_ADDR_SF2,
+	PMB_ADDR_AIP,
+	PMB_ADDR_SAR,
+	PMB_ADDR_RDP,
+	PMB_ADDR_RDPPLL,
+	PMB_ADDR_USB30_2X,
+	PMB_ADDR_AFEPLL,
+	PMB_ADDR_PCIE0,
+	PMB_ADDR_PCIE1,
+	PMB_ADDR_PCIE2,
+	PMB_ADDR_PCIE3,
+	PMB_ADDR_SYSTEMPORT,
+	PMB_ADDR_VDSL3_CORE,
+	PMB_ADDR_VDSL3_PMB,
+	PMB_ADDR_VDSL3_MIPS,
+	PMB_ADDR_WAN,
+	PMB_ADDR_XRDP,
+	PMB_ADDR_XRDP_RC0,
+	PMB_ADDR_XRDP_RC1,
+	PMB_ADDR_XRDP_RC2,
+	PMB_ADDR_XRDP_RC3,
+	PMB_ADDR_XRDP_RC4,
+	PMB_ADDR_XRDP_RC5,
+	PMB_ADDR_BIU_PLL,
+	PMB_ADDR_LAST,
+};
+
+
+int pmc_read_bpcm_register(struct bcm63xx_pmc *priv,
+			   enum pmc_addr_id addr_id,
+			   u32 word_offset, u32 *value);
+int pmc_write_bpcm_register(struct bcm63xx_pmc *priv,
+			    enum pmc_addr_id addr_id,
+			    u32 word_offset, u32 value);
+
+struct bcm63xx_pmc *pmc_of_get(struct device_node *np);
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/soc/bcm63xx_xrdp_api.h	2025-09-25 17:40:37.111374883 +0200
@@ -0,0 +1,95 @@
+#ifndef SOC_BCM63XX_XRDP_API_H_
+#define SOC_BCM63XX_XRDP_API_H_
+
+/*
+ * xrdp api
+ */
+struct bcm_xrdp_priv;
+
+#define XRDP_MAX_RX_QUEUE	1
+#define XRDP_MAX_RX_XF		3
+#define XRDP_MAX_TX_QUEUE	8
+
+struct bcm_xrdp_enet_params {
+	size_t		rx_queue_count;
+	size_t		tx_queue_count;
+
+	int		rx_irq[XRDP_MAX_RX_QUEUE];
+	int		tx_irq[XRDP_MAX_TX_QUEUE];
+	unsigned int	rx_irq_mask[XRDP_MAX_RX_QUEUE];
+	unsigned int	tx_done_irq_mask[XRDP_MAX_TX_QUEUE];
+	u8		tx_bbh_bbid;
+	u8		tx_bbh_queue_id;
+	u16		tx_bbh_mdu_addr;
+	u16		tx_bbh_pd_queue_size;
+	bool		tx_need_reporting;
+	bool		tx_need_batch;
+
+	void __iomem	*rx_regs;
+	void __iomem	*tx_regs;
+	void __iomem	*mac_regs;
+
+	unsigned int	rx_core_id;
+	unsigned int	tx_core_id;
+	unsigned int	rxq_fqm_wakeup_thread;
+	unsigned int	rxq_xf_wakeup_thread[XRDP_MAX_RX_XF];
+	unsigned int	txq_wakeup_thread[XRDP_MAX_TX_QUEUE];
+};
+
+int bcm_xrdp_api_get_enet_params(struct bcm_xrdp_priv *priv,
+				 unsigned int bbh_id,
+				 struct bcm_xrdp_enet_params *params);
+
+struct bcm_xrdp_dsl_params {
+	size_t		rx_queue_count;
+	size_t		tx_queue_count;
+
+	int		rx_irq[XRDP_MAX_RX_QUEUE];
+	int		tx_irq[XRDP_MAX_TX_QUEUE];
+	unsigned int	rxq_irq_mask[XRDP_MAX_RX_QUEUE];
+	unsigned int	txq_done_irq_mask[XRDP_MAX_TX_QUEUE];
+
+	void __iomem	*rx_regs;
+	void __iomem	*tx_regs;
+
+	unsigned int	rx_core_id;
+	unsigned int	tx_core_id;
+	unsigned int	rxq_wakeup_thread[XRDP_MAX_RX_QUEUE];
+	unsigned int	txq_wakeup_thread[XRDP_MAX_TX_QUEUE];
+};
+
+int bcm_xrdp_api_get_dsl_params(struct bcm_xrdp_priv *priv,
+				struct bcm_xrdp_dsl_params *params);
+
+bool bcm_xrdp_api_bbh_txq_is_empty(struct bcm_xrdp_priv *priv,
+				   unsigned int bbh_id,
+				   unsigned int hw_queue_idx);
+
+void bcm_xrdp_api_wakeup(struct bcm_xrdp_priv *priv,
+			 unsigned int core_id,
+			 unsigned int thread);
+
+u32 bcm_xrdp_api_irq_read_status(struct bcm_xrdp_priv *priv,
+				 unsigned int core_id);
+
+void bcm_xrdp_api_irq_write_status(struct bcm_xrdp_priv *priv,
+				   unsigned int core_id,
+				   u32 val);
+
+void bcm_xrdp_api_irq_mask_clear(struct bcm_xrdp_priv *priv,
+				 unsigned int core_id,
+				 u32 bits);
+
+void bcm_xrdp_api_irq_mask_set(struct bcm_xrdp_priv *priv,
+			       unsigned int core_id,
+			       u32 bits);
+
+void bcm_xrdp_api_dsl_flow_id_set(struct bcm_xrdp_priv *priv,
+				  unsigned int flow_id,
+				  u32 hwval);
+
+void bcm_xrdp_api_pon_flow_id_set(struct bcm_xrdp_priv *priv,
+				  unsigned int flow_id,
+				  u32 hwval);
+
+#endif /* ! SOC_BCM63XX_XRDP_API_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/ubus4.h	2025-09-25 17:40:37.131374982 +0200
@@ -0,0 +1,18 @@
+/*
+ * ubus4.h for ubus4
+ * Created by <nschichan@freebox.fr> on Fri Jun  7 15:43:06 2019
+ */
+
+#pragma once
+
+#include <linux/of.h>
+
+struct ubus4_master;
+
+struct ubus4_master *ubus4_master_of_get(struct device_node *np);
+struct ubus4_master *ubus4_master_of_get_index(struct device_node *np, int);
+
+void ubus_master_apply_credits(struct ubus4_master *master);
+void ubus_master_set_congestion_threshold(struct ubus4_master *master, u32 v);
+void ubus_master_remap_port(struct ubus4_master *master);
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/linux/xdsl_phy_api.h	2025-09-25 17:40:37.143375041 +0200
@@ -0,0 +1,125 @@
+#ifndef XDSL_PHY_API_H_
+#define XDSL_PHY_API_H_
+
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/of.h>
+#include <linux/notifier.h>
+
+struct xdsl_phy;
+
+/*
+ * PHY device API
+ */
+struct xdsl_phy_status {
+	bool		powered_up;
+	bool		link_up;
+	bool		ptm_mode;
+	unsigned int	ds_rate;
+	unsigned int	us_rate;
+	unsigned int	ds_cell_rate;
+	unsigned int	us_cell_rate;
+};
+
+struct xdsl_phy_ops {
+	void	(*get_status)(struct xdsl_phy *, struct xdsl_phy_status *);
+	int	(*open_vcc)(struct xdsl_phy *, int vpi, int vci);
+	int	(*close_vcc)(struct xdsl_phy *, int vpi, int vci);
+	int	(*set_max_sdu)(struct xdsl_phy *, unsigned int max_sdu);
+};
+
+struct xdsl_phy {
+	/*
+	 * to fill before registering
+	 */
+	const struct xdsl_phy_ops	*ops;
+	struct device_node		*of_node;
+	unsigned int			id;
+	struct module			*owner;
+	void				*priv;
+
+	struct mutex			lock;
+	struct mutex			ops_lock;
+	bool				in_use;
+	bool				started;
+	bool				initial_change_pending;
+	struct work_struct		initial_change_work;
+	void				(*change_cb)(struct xdsl_phy *,
+						     void *);
+	void				*change_priv;
+
+	struct list_head		next;
+};
+
+int xdsl_phy_device_register(struct xdsl_phy *);
+
+void xdsl_phy_device_notify_change(struct xdsl_phy *);
+
+void xdsl_phy_device_unregister(struct xdsl_phy *);
+
+
+/*
+ * PHY users API
+ */
+struct xdsl_phy *xdsl_phy_attach(struct device_node *node,
+				 unsigned int id,
+				 void (*change_cb)(struct xdsl_phy *,
+						   void *),
+				 void *change_priv);
+
+void xdsl_phy_start(struct xdsl_phy *);
+void xdsl_phy_stop(struct xdsl_phy *);
+
+void xdsl_phy_detach(struct xdsl_phy *);
+
+static inline void xdsl_phy_op_get_status(struct xdsl_phy *phy_dev,
+					  struct xdsl_phy_status *s)
+{
+	mutex_lock(&phy_dev->ops_lock);
+	phy_dev->ops->get_status(phy_dev, s);
+	mutex_unlock(&phy_dev->ops_lock);
+}
+
+static inline int xdsl_phy_op_open_vcc(struct xdsl_phy *phy_dev,
+				       int vpi, int vci)
+{
+	int ret;
+
+	if (!phy_dev->ops->open_vcc)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->open_vcc(phy_dev, vpi, vci);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+static inline int xdsl_phy_op_close_vcc(struct xdsl_phy *phy_dev,
+					int vpi, int vci)
+{
+	int ret;
+
+	if (!phy_dev->ops->close_vcc)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->close_vcc(phy_dev, vpi, vci);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+static inline int xdsl_phy_op_set_max_sdu(struct xdsl_phy *phy_dev,
+					  unsigned int max_sdu)
+{
+	int ret;
+
+	if (!phy_dev->ops->set_max_sdu)
+		return -ENOTSUPP;
+
+	mutex_lock(&phy_dev->ops_lock);
+	ret = phy_dev->ops->set_max_sdu(phy_dev, max_sdu);
+	mutex_unlock(&phy_dev->ops_lock);
+	return ret;
+}
+
+#endif /* ! BCM_DSL_API_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/net/fbx80211.h	2025-09-29 14:23:07.617732469 +0200
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2023 Freebox
+ */
+#ifndef FBX80211_H
+#define FBX80211_H
+
+#include <linux/nlfbx.h>
+#include <linux/average.h>
+
+#ifdef CONFIG_FBX80211_SCUM
+
+DECLARE_EWMA(scum_signal, 10, 8)
+
+struct fbx80211_scum_stats {
+	struct ewma_scum_signal signal;
+	unsigned long last_seen;
+	u32 bytes;
+	u32 packets;
+	u32 cumulative;
+};
+
+struct fbx80211_scum {
+	struct fbx80211_scum_stats stats[NUM_NLFBX_ATTR_SCUM_INFO - 1];
+	struct list_head next;
+	struct rcu_head rcu;
+	spinlock_t lock;
+	u8 bssid[ETH_ALEN];
+	u8 addr[ETH_ALEN];
+};
+
+struct fbx80211_scum_ops {
+	int (*client_add)(u8 const *bssid, u8 const *addr,
+			  struct fbx80211_scum_ops *ops);
+	void (*client_del)(u8 const *bssid, u8 const *addr,
+			   struct fbx80211_scum_ops *ops);
+	void (*remove)(struct fbx80211_scum_ops *ops);
+};
+
+void fbx80211_scum_client_for_each(struct wireless_dev *wdev,
+				   void (*cb)(struct fbx80211_scum *, void *),
+				   void *args);
+
+struct fbx80211_scum *fbx80211_scum_client_get(struct wireless_dev *wdev,
+					       u8 const *addr);
+int fbx80211_scum_client_add(struct wireless_dev *wdev, u8 const *bssid,
+			     u8 const *addr);
+int fbx80211_scum_client_del(struct wireless_dev *wdev, u8 const *bssid,
+			     u8 const *addr);
+int fbx80211_scum_create(struct wireless_dev *wdev);
+void fbx80211_scum_remove(struct wireless_dev *wdev);
+void fbx80211_scum_rx_pkt(struct wireless_dev *wdev,
+			  struct sk_buff *skb, struct ieee80211_rate *rate,
+			  unsigned int rtap_space);
+
+static inline enum nlfbx_scum_info_attrs
+fbx80211_scum_stat_idx_to_info_type(unsigned int idx)
+{
+	static enum nlfbx_scum_info_attrs const __idx_to_scum_info_type[] = {
+		NLFBX_ATTR_SCUM_INFO_ALL,
+		NLFBX_ATTR_SCUM_INFO_DATA,
+		NLFBX_ATTR_SCUM_INFO_NONDATA,
+		NLFBX_ATTR_SCUM_INFO_ACK,
+	};
+
+	BUILD_BUG_ON(ARRAY_SIZE(__idx_to_scum_info_type) !=
+		     (NUM_NLFBX_ATTR_SCUM_INFO - 1));
+
+	if (idx >= ARRAY_SIZE(__idx_to_scum_info_type))
+		return NLFBX_ATTR_SCUM_INFO_UNSPEC;
+
+	return __idx_to_scum_info_type[idx];
+}
+
+static inline int
+fbx80211_scum_stat_info_type_to_idx(enum nlfbx_scum_info_attrs type)
+{
+	BUILD_BUG_ON(ARRAY_SIZE(((struct fbx80211_scum *)NULL)->stats) !=
+		     (NUM_NLFBX_ATTR_SCUM_INFO - 1));
+
+	return type - 1;
+}
+
+#endif
+
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/net/ip6_ffn.h	2025-09-25 17:40:37.163375140 +0200
@@ -0,0 +1,59 @@
+#ifndef IP6_FFN_H_
+#define IP6_FFN_H_
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/rwlock.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct ffn6_data {
+	u32 new_sip[4];
+	u32 new_dip[4];
+
+	u16 new_sport;
+	u16 new_dport;
+	__sum16 adjustment;
+	u8 new_tos;
+	u32 new_skb_prio;
+	u32 new_mark;
+
+	u32 force_skb_prio : 1;
+	u32 alter : 1;
+	u32 tos_change : 1;
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	void (*priv_destructor)(void *);
+	u32 ffn_priv_area[8];
+};
+
+struct ffn6_lookup_entry {
+	u32 sip[4];
+	u32 dip[4];
+	u16 sport;
+	u16 dport;
+	u8 protocol;
+	u8 added_when;
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	uint64_t forwarded_bytes;
+	uint32_t forwarded_packets;
+#endif
+	struct list_head next;
+	struct ffn6_data manip;
+	struct list_head all_next;
+	struct rcu_head rcu;
+};
+
+struct ffn6_lookup_key {
+	const u32 *sip;
+	const u32 *dip;
+	u16 sport;
+	u16 dport;
+	bool is_tcp;
+};
+
+struct ffn6_lookup_entry *__ffn6_get_rcu(const struct ffn6_lookup_key *key);
+
+#endif /* ! IP6_FFN_H_*/
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/net/ip_ffn.h	2025-09-25 17:40:37.163375140 +0200
@@ -0,0 +1,58 @@
+#ifndef IP_FFN_H_
+#define IP_FFN_H_
+
+#include <linux/types.h>
+#include <linux/net.h>
+#include <linux/rwlock.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct ffn_data {
+	u32 new_sip;
+	u32 new_dip;
+	u16 new_sport;
+	u16 new_dport;
+	u8 new_tos;
+	u8 force_skb_prio : 1;
+	u8 alter : 1;
+	u8 tos_change : 1;
+	__sum16 ip_adjustment;
+	__sum16 l4_adjustment;
+	unsigned int new_skb_prio;
+	u32 new_mark;
+	struct dst_entry *dst;
+	struct nf_conn *ct;
+	enum ip_conntrack_info ctinfo;
+
+	void (*priv_destructor)(void *);
+	u32 ffn_priv_area[8];
+};
+
+struct ffn_lookup_entry {
+	int added_when;
+	u32 sip;
+	u32 dip;
+	u16 sport;
+	u16 dport;
+	u8 protocol;
+#ifdef CONFIG_IP_FFN_PROCFS
+	uint64_t forwarded_bytes;
+	uint32_t forwarded_packets;
+#endif
+	struct list_head next;
+	struct ffn_data manip;
+	struct list_head all_next;
+	struct rcu_head rcu;
+};
+
+struct ffn_lookup_key {
+	u32 sip;
+	u32 dip;
+	u16 sport;
+	u16 dport;
+	bool is_tcp;
+};
+
+struct ffn_lookup_entry *__ffn_get_rcu(const struct ffn_lookup_key *key);
+
+#endif /* ! IP_FFN_H_*/
diff -Nruw linux-6.13.12-fbx/include/soc/bcm63xx./pmc.h linux-6.13.12-fbx/include/soc/bcm63xx/pmc.h
--- linux-6.13.12-fbx/include/soc/bcm63xx./pmc.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/include/soc/bcm63xx/pmc.h	2025-09-25 17:40:37.199375319 +0200
@@ -0,0 +1,25 @@
+/*
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SOC_BCM63XX_PMC_H__
+#define __SOC_BCM63XX_PMC_H__
+
+#ifdef CONFIG_SOC_BCM63XX
+int bcm63xx_pmc_cpu_power_on(unsigned int cpuid);
+#else
+static inline int bcm63xx_pmc_cpu_power_on(unsigned int cpuid)
+{
+	return -ENODEV;
+}
+#endif /* CONFIG_SOC_BCM63XX */
+
+#endif /* __SOC_BCM63XX_PMC_H__ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/soc/qcom/license-manager-simple.h	2025-09-25 17:40:37.247375557 +0200
@@ -0,0 +1,28 @@
+/*
+ * license-manager-simple.h for license-manager
+ * Created by <nschichan@freebox.fr> on Fri Oct  4 14:31:08 2024
+ */
+
+#pragma once
+
+struct lm_license_buf {
+	void *kernel_addr;
+	dma_addr_t dma_addr;
+	size_t size;
+};
+
+
+#if IS_ENABLED(CONFIG_QCOM_LICENSE_MANAGER_SIMPLE)
+int lm_get_license_pcidev(struct pci_dev *dev, struct lm_license_buf *lmbuf);
+
+void lm_free_license(struct lm_license_buf *lmbuf);
+#else
+static inline int lm_get_license_pcidev(struct pci_dev *dev,
+					struct lm_license_buf *lmbuf)
+{
+	return -ENOSYS;
+}
+static inline void lm_free_license(struct lm_license_buf *lmbuf)
+{
+}
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/bcm63xx_rdp_ioctl.h	2025-09-25 17:40:37.279375716 +0200
@@ -0,0 +1,71 @@
+#ifndef LINUX_BCM63XX_RDP_IOCTL_H_
+#define LINUX_BCM63XX_RDP_IOCTL_H_
+
+#include <linux/types.h>
+
+enum {
+	RDP_IOC_OP_GET_INFO,
+
+	RDP_IOC_OP_READ8,
+	RDP_IOC_OP_READ16,
+	RDP_IOC_OP_READ32,
+	RDP_IOC_OP_WRITE8,
+	RDP_IOC_OP_WRITE16,
+	RDP_IOC_OP_WRITE32,
+
+	RDP_IOC_OP_READ_TM_32,
+	RDP_IOC_OP_WRITE_TM_32,
+	RDP_IOC_OP_READ_MC_32,
+	RDP_IOC_OP_WRITE_MC_32,
+
+	RDP_IOC_OP_RESET,
+
+	RDP_IOC_DMA_MAP,
+	RDP_IOC_DMA_GET_INFO,
+	RDP_IOC_DMA_FLUSH_ALL,
+	RDP_IOC_DMA_READ_BUFFER,
+	RDP_IOC_DMA_WRITE_BUFFER,
+
+	RDP_IOC_OP_MAP_INTERRUPTS,
+};
+
+struct bcm_rdp_pioctl_dma_result {
+	__u32		id;
+	__u32		size;
+	__u64		virt_addr;
+	__u64		dma_addr;
+};
+
+struct bcm_rdp_pioctl_get_info_result {
+	__u64		tm_dma_addr;
+	__u64		mc_dma_addr;
+	__u32		tm_size;
+	__u32		mc_size;
+};
+
+struct bcm_rdp_pioctl {
+	union {
+		/* for get_info op */
+		struct {
+			void __user	*buf_addr;
+		} get_info;
+
+		/* for read/write op */
+		struct {
+			__u32		reg_area;
+			__u32		offset;
+			__u32		size;
+			void __user	*buf_addr;
+		} io;
+
+		/* for dma op */
+		struct {
+			__u32		id;
+			__u32		size;
+			void __user	*buf_addr;
+		} dma;
+	} u;
+};
+
+#endif /* LINUX_BCM63XX_RDP_IOCTL_H_ */
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/exfat_user.h	2025-09-25 17:40:37.287375755 +0200
@@ -0,0 +1,47 @@
+/*
+ * exfat_user.h for exfat
+ * Created by <nschichan@freebox.fr> on Fri Aug 23 15:31:08 2013
+ */
+
+#ifndef __EXFAT_USER_H
+# define __EXFAT_USER_H
+
+struct exfat_fragment {
+	uint32_t	fcluster_start;
+	uint32_t	dcluster_start;
+	uint32_t	nr_clusters;
+	uint64_t	sector_start;
+};
+
+struct exfat_fragment_head {
+	uint32_t		fcluster_start;
+	uint32_t		nr_fragments;
+	uint32_t		sector_size;
+	uint32_t		cluster_size;
+	struct exfat_fragment	fragments[0];
+};
+
+struct exfat_bitmap_data {
+	uint32_t		start_cluster;
+	uint32_t		nr_clusters;
+	uint64_t		sector_start;
+	uint64_t		nr_sectors;
+};
+
+struct exfat_bitmap_head {
+	uint32_t			start_cluster;
+	uint32_t			nr_entries;
+	struct exfat_bitmap_data	entries[0];
+};
+
+struct exfat_dirent_head {
+	uint32_t offset;
+	uint32_t nr_entries;
+	uint8_t entries[0];
+};
+
+#define EXFAT_IOCGETFRAGMENTS	_IOR('X', 0x01, struct exfat_fragment_head)
+#define EXFAT_IOCGETBITMAP	_IOR('X', 0x02, struct exfat_bitmap_head)
+#define EXFAT_IOCGETDIRENTS	_IOR('X', 0x03, struct exfat_dirent_head)
+
+#endif /* !__EXFAT_USER_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/fbxatm.h	2025-09-25 17:40:37.287375755 +0200
@@ -0,0 +1,159 @@
+/*
+ * Generic fbxatm definition, exported to userspace
+ */
+#ifndef LINUX_FBXATM_H_
+#define LINUX_FBXATM_H_
+
+#include <linux/types.h>
+#include <linux/if.h>
+
+#define FBXATM_IOCTL_MAGIC		0xd3
+
+/* allow userspace usage without up to date kernel headers */
+#ifndef PF_FBXATM
+#define PF_FBXATM			32
+#define AF_FBXATM			PF_FBXATM
+#endif
+
+struct fbxatm_vcc_id {
+	int				dev_idx;
+	__u32				vpi;
+	__u32				vci;
+};
+
+enum fbxatm_vcc_user {
+	FBXATM_VCC_USER_NONE = 0,
+	FBXATM_VCC_USER_2684,
+	FBXATM_VCC_USER_PPPOA,
+};
+
+enum fbxatm_vcc_traffic_class {
+	FBXATM_VCC_TC_UBR_NO_PCR = 0,
+	FBXATM_VCC_TC_UBR,
+};
+
+struct fbxatm_vcc_qos {
+	__u32				traffic_class;
+	__u32				max_sdu;
+	__u32				max_buffered_pkt;
+	__u32				priority;
+	__u32				rx_priority;
+};
+
+
+/*
+ * VCC related
+ */
+struct fbxatm_vcc_params {
+	/* ADD/DEL/GET */
+	struct fbxatm_vcc_id		id;
+
+	/* ADD/GET */
+	struct fbxatm_vcc_qos		qos;
+
+	/* GET */
+	enum fbxatm_vcc_user		user;
+};
+
+#define FBXATM_IOCADD		_IOW(FBXATM_IOCTL_MAGIC,	1,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCDEL		_IOR(FBXATM_IOCTL_MAGIC,	2,	\
+					struct fbxatm_vcc_params)
+
+#define FBXATM_IOCGET		_IOWR(FBXATM_IOCTL_MAGIC,	3,	\
+					struct fbxatm_vcc_params)
+
+
+struct fbxatm_vcc_drop_params {
+	struct fbxatm_vcc_id		id;
+	unsigned int			drop_count;
+};
+
+#define FBXATM_IOCDROP		_IOWR(FBXATM_IOCTL_MAGIC,	5,	\
+					struct fbxatm_vcc_drop_params)
+
+/*
+ * OAM related
+ */
+enum fbxatm_oam_ping_type {
+	FBXATM_OAM_PING_SEG_F4	= 0,
+	FBXATM_OAM_PING_SEG_F5,
+	FBXATM_OAM_PING_E2E_F4,
+	FBXATM_OAM_PING_E2E_F5,
+};
+
+struct fbxatm_oam_ping_req {
+	/* only dev_idx for F4 */
+	struct fbxatm_vcc_id		id;
+
+	__u8				llid[16];
+	enum fbxatm_oam_ping_type	type;
+};
+
+#define FBXATM_IOCOAMPING	_IOWR(FBXATM_IOCTL_MAGIC,	10,	\
+				      struct fbxatm_oam_ping_req)
+
+
+/*
+ * PPPOA related
+ */
+enum fbxatm_pppoa_encap {
+	FBXATM_EPPPOA_AUTODETECT = 0,
+	FBXATM_EPPPOA_VCMUX,
+	FBXATM_EPPPOA_LLC,
+};
+
+struct fbxatm_pppoa_vcc_params {
+	struct fbxatm_vcc_id		id;
+	__u32				encap;
+	__u32				cur_encap;
+};
+
+#define FBXATM_PPPOA_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	20,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	21,	\
+					struct fbxatm_pppoa_vcc_params)
+
+#define FBXATM_PPPOA_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	22,	\
+					struct fbxatm_pppoa_vcc_params)
+
+
+
+/*
+ * 2684 related
+ */
+enum fbxatm_2684_encap {
+	FBXATM_E2684_VCMUX = 0,
+	FBXATM_E2684_LLC,
+};
+
+enum fbxatm_2684_payload {
+	FBXATM_P2684_BRIDGE = 0,
+	FBXATM_P2684_ROUTED,
+};
+
+#define FBXATM_2684_MAX_VCC		8
+
+struct fbxatm_2684_vcc_params {
+	struct fbxatm_vcc_id		id_list[FBXATM_2684_MAX_VCC];
+	size_t				id_count;
+
+	__u32				encap;
+	__u32				payload;
+	char				dev_name[IFNAMSIZ];
+	__u8				perm_addr[6];
+};
+
+
+#define FBXATM_2684_IOCADD	_IOW(FBXATM_IOCTL_MAGIC,	30,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCDEL	_IOW(FBXATM_IOCTL_MAGIC,	31,	\
+					struct fbxatm_2684_vcc_params)
+
+#define FBXATM_2684_IOCGET	_IOWR(FBXATM_IOCTL_MAGIC,	32,	\
+					struct fbxatm_2684_vcc_params)
+
+#endif /* LINUX_FBXATM_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/fbxbridge.h	2025-09-25 17:40:37.291375775 +0200
@@ -0,0 +1,72 @@
+#ifndef _UAPI_FBXBRIDGE_H
+# define _UAPI_FBXBRIDGE_H
+
+#include <linux/if.h>
+#include <linux/if_ether.h>
+
+#define MAX_ALIASES				3
+
+#define FBXBRIDGE_FLAGS_FILTER			(1 << 0)
+#define FBXBRIDGE_FLAGS_DHCPD			(1 << 1)
+#define FBXBRIDGE_FLAGS_NETFILTER		(1 << 2)
+
+/*
+ * ioctl command
+ */
+
+enum fbxbridge_ioctl_cmd
+{
+	E_CMD_BR_CHG = 0,
+	E_CMD_BR_DEV_CHG,
+	E_CMD_BR_PARAMS,
+};
+
+struct fbxbridge_ioctl_chg
+{
+	char	brname[IFNAMSIZ];
+	__u32	action;
+};
+
+struct fbxbridge_ioctl_dev_chg
+{
+	char	brname[IFNAMSIZ];
+	char	devname[IFNAMSIZ];
+	__u32	wan;
+	__u32	action;
+};
+
+struct fbxbridge_port_info
+{
+	char	name[IFNAMSIZ];
+	__u32	present;
+};
+
+struct fbxbridge_ioctl_params
+{
+	int				action;
+	char				brname[IFNAMSIZ];
+
+	/* config */
+	__u32				flags;
+	__be32				dns1_addr;
+	__be32				dns2_addr;
+	__be32				ip_aliases[MAX_ALIASES];
+	__u32				dhcpd_renew_time;
+	__u32				dhcpd_rebind_time;
+	__u32				dhcpd_lease_time;
+	__u32				inputmark;
+
+	/* status */
+	struct fbxbridge_port_info	wan_dev;
+	struct fbxbridge_port_info	lan_dev;
+	__u8				lan_hwaddr[ETH_ALEN];
+	__u32				have_hw_addr;
+};
+
+struct fbxbridge_ioctl_req
+{
+	enum fbxbridge_ioctl_cmd	cmd;
+	unsigned long			arg;
+};
+
+#endif /* _UAPI_FBXBRIDGE_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/fbxjtag.h	2025-09-25 17:40:37.291375775 +0200
@@ -0,0 +1,89 @@
+#ifndef FBXJTAG_H_
+# define FBXJTAG_H_
+
+#ifdef __KERNEL__
+# include <linux/types.h>
+#endif
+
+# define JTAG_RESET_STEPS	16
+# define JTAG_DATA_READ_SIZE	128
+# define JTAG_INST_READ_SIZE	128
+# define JTAG_DEF_CLOCK_DELAY	500
+# define JTAG_DEF_WAIT_TMS	0
+
+enum jtag_main_state {
+	JTAG_STATE_TEST_MASK	=	0x10,
+	JTAG_STATE_RUN_MASK	=	0x20,
+	JTAG_STATE_DR_MASK	=	0x40,
+	JTAG_STATE_IR_MASK	=	0x80,
+};
+#define JTAG_STATE_MASK			0xF0
+
+enum jtag_sub_state {
+	JTAG_SUB_STATE_SELECT	=	0x0,
+	JTAG_SUB_STATE_CAPTURE	=	0x1,
+	JTAG_SUB_STATE_SHIFT	=	0x2,
+	JTAG_SUB_STATE_EXIT1	=	0x3,
+	JTAG_SUB_STATE_PAUSE	=	0x4,
+	JTAG_SUB_STATE_EXIT2	=	0x5,
+	JTAG_SUB_STATE_UPDATE	=	0x6,
+};
+#define JTAG_SUB_STATE_MASK		0xF
+
+enum jtag_state {
+	JTAG_STATE_UNDEF	= 0,
+	JTAG_STATE_TEST_LOGIC_RESET	= JTAG_STATE_TEST_MASK,
+	JTAG_STATE_RUN_TEST_IDLE	= JTAG_STATE_RUN_MASK,
+
+	JTAG_STATE_SELECT_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_SELECT,
+	JTAG_STATE_CAPTURE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_CAPTURE,
+	JTAG_STATE_SHIFT_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_SHIFT,
+	JTAG_STATE_EXIT1_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_EXIT1,
+	JTAG_STATE_PAUSE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_PAUSE,
+	JTAG_STATE_EXIT2_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_EXIT2,
+	JTAG_STATE_UPDATE_DR	= JTAG_STATE_DR_MASK | JTAG_SUB_STATE_UPDATE,
+
+	JTAG_STATE_SELECT_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_SELECT,
+	JTAG_STATE_CAPTURE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_CAPTURE,
+	JTAG_STATE_SHIFT_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_SHIFT,
+	JTAG_STATE_EXIT1_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_EXIT1,
+	JTAG_STATE_PAUSE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_PAUSE,
+	JTAG_STATE_EXIT2_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_EXIT2,
+	JTAG_STATE_UPDATE_IR	= JTAG_STATE_IR_MASK | JTAG_SUB_STATE_UPDATE,
+
+	JTAG_STATE_MAX
+};
+
+#define JTAG_STATE_IN_DR(state)	((state) & JTAG_STATE_DR_MASK)
+#define JTAG_STATE_IN_IR(state)	((state) & JTAG_STATE_IR_MASK)
+
+#ifdef __KERNEL__
+
+#define JTAG_BUF_SIZE	2048
+
+struct fbxjtag_data {
+	const char	*name;
+	struct {
+		struct fbxgpio_pin	*tck;
+		struct fbxgpio_pin	*tms;
+		struct fbxgpio_pin	*tdi;
+		struct fbxgpio_pin	*tdo;
+	}		gpios;
+	u32		clock_delay;
+	u32		wait_tms;
+	u32		data_read_size;
+	u32		instruction_read_size;
+	bool		last_tms_dataout;
+	struct device	*dev;
+	enum jtag_state state;
+	char		nb_reset;
+	char		dr_buf[JTAG_BUF_SIZE];
+	unsigned 	dr_w;
+	unsigned 	dr_r;
+	char		ir_buf[JTAG_BUF_SIZE];
+	unsigned 	ir_r;
+	unsigned 	ir_w;
+};
+#endif
+
+#endif /* !FBXJTAG_H_ */
diff -Nruw linux-6.13.12-fbx/include/uapi/linux/hdmi-cec./dev.h linux-6.13.12-fbx/include/uapi/linux/hdmi-cec/dev.h
--- linux-6.13.12-fbx/include/uapi/linux/hdmi-cec./dev.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/include/uapi/linux/hdmi-cec/dev.h	2025-09-25 17:40:37.291375775 +0200
@@ -0,0 +1,30 @@
+#ifndef __HDMI_CEC_DEV_H
+#define __HDMI_CEC_DEV_H
+
+#include <linux/ioctl.h>
+#include <linux/hdmi-cec/hdmi-cec.h>
+
+#define CEC_IOCTL_BASE	'C'
+
+#define CEC_SET_LOGICAL_ADDRESS	_IOW(CEC_IOCTL_BASE, 0, int)
+#define CEC_RESET_DEVICE	_IOW(CEC_IOCTL_BASE, 3, int)
+#define CEC_GET_COUNTERS	_IOR(CEC_IOCTL_BASE, 4, struct cec_counters)
+#define CEC_SET_RX_MODE		_IOW(CEC_IOCTL_BASE, 5, enum cec_rx_mode)
+#define CEC_GET_TX_STATUS	_IOW(CEC_IOCTL_BASE, 6, struct cec_tx_status)
+#define CEC_SET_DETACHED_CONFIG	_IOW(CEC_IOCTL_BASE, 7, struct cec_detached_config)
+
+#define CEC_MAX_DEVS	(10)
+
+#ifdef __KERNEL__
+
+struct cec_adapter;
+
+int __init cec_cdev_init(void);
+void __exit cec_cdev_exit(void);
+
+int cec_create_adapter_node(struct cec_adapter *);
+void cec_remove_adapter_node(struct cec_adapter *);
+
+#endif /* __KERNEL__ */
+
+#endif /* __HDMI_CEC_DEV_H */
diff -Nruw linux-6.13.12-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h linux-6.13.12-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h
--- linux-6.13.12-fbx/include/uapi/linux/hdmi-cec./hdmi-cec.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/include/uapi/linux/hdmi-cec/hdmi-cec.h	2025-09-25 17:40:37.295375795 +0200
@@ -0,0 +1,153 @@
+#ifndef __UAPI_HDMI_CEC_H
+#define __UAPI_HDMI_CEC_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/* Common defines for HDMI CEC */
+#define CEC_BCAST_ADDR		(0x0f)
+#define CEC_ADDR_MAX		CEC_BCAST_ADDR
+
+#define CEC_MAX_MSG_LEN		(16)	/* 16 blocks */
+
+enum cec_rx_msg_flags {
+	/*
+	 * an ACK was received for this message
+	 */
+	CEC_RX_F_ACKED			= (1 << 0),
+
+	/*
+	 * message was fully received
+	 */
+	CEC_RX_F_COMPLETE		= (1 << 1),
+};
+
+/**
+ * struct cec_rx_msg - user-space exposed cec message cookie
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @valid:	0 for invalid message
+ * @flags:	flag field (cec_rx_msg_flags)
+ */
+struct cec_rx_msg {
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	valid;
+	__u8	flags;
+
+} __attribute__((packed));
+
+enum cec_tx_status_flags {
+	/*
+	 * message was nacked at some point
+	 */
+	CEC_TX_F_NACK			= (1 << 0),
+
+	/*
+	 * abort sending because total time to send was elapsed
+	 */
+	CEC_TX_F_TIMEOUT		= (1 << 1),
+
+	/*
+	 * abort sending because maximum number of retry has passed
+	 */
+	CEC_TX_F_MAX_RETRIES		= (1 << 2),
+
+	/*
+	 * abort sending because of arbitration loss
+	 */
+	CEC_TX_F_ARBITRATION_LOST	= (1 << 3),
+
+	/*
+	 * message failed for other reason
+	 */
+	CEC_TX_F_UNKNOWN_ERROR		= (1 << 7),
+};
+
+/**
+ * struct cec_tx_msg - user-space exposed cec message cookie
+ * @expire_ms:	how long we try to send message (milliseconds)
+ * @data:	cec message payload
+ * @len:	cec message length
+ * @success:	0 => message was sent, else => failed to send message
+ * @flags:	flag field (cec_tx_msg_flags)
+ * @tries:	number of try done to send message
+ */
+struct cec_tx_msg {
+	__u16	expire_ms;
+	__u8	data[CEC_MAX_MSG_LEN];
+	__u8	len;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+struct cec_tx_status {
+	__u8	sent;
+	__u8	success;
+	__u8	flags;
+	__u8	tries;
+} __attribute__((packed));
+
+#define DETACH_CFG_F_WAKEUP		(1 << 0)
+
+struct cec_detached_config {
+	__u8	phys_addr_valid;
+	__u8	phys_addr[2];
+	__u8	flags;
+} __attribute__((packed));
+
+/* Counters */
+
+/**
+ * struct cec_rx_counters - cec adpater RX counters
+ */
+struct cec_rx_counters {
+	__u8	pkts;
+	__u8	filtered_pkts;
+	__u8	valid_pkts;
+	__u8	rx_queue_full;
+	__u8	late_ack;
+	__u8	error;
+	__u8	rx_timeout_abort;
+	__u8	rx_throttled;
+};
+
+/**
+ * struct cec_tx_counters - cec adapter TX counters
+ */
+struct cec_tx_counters {
+	__u8	done;
+	__u8	fail;
+	__u8	timeout;
+	__u8	arb_loss;
+	__u8	bad_ack_timings;
+	__u8	tx_miss_early;
+	__u8	tx_miss_late;
+};
+
+/**
+ * struct cec_counters - tx and rx cec counters
+ * @rx:	struct cec_rx_counters
+ * @tx: struct cec_tx_counters
+ */
+struct cec_counters {
+	struct cec_rx_counters	rx;
+	struct cec_tx_counters	tx;
+};
+
+/**
+ * enum cec_rx_mode - cec adapter rx mode
+ * @CEC_RX_MODE_DISABLED:	RX path is disabled (default)
+ * @CEC_RX_MODE_DEFAULT:	accept only unicast traffic
+ * @CEC_RX_MODE_ACCEPT_ALL:	accept all incoming RX traffic (sniffing mode)
+ * @CEC_RX_MODE_MAX:		sentinel
+ */
+enum cec_rx_mode {
+	CEC_RX_MODE_DISABLED = 0,
+	CEC_RX_MODE_DEFAULT,
+	CEC_RX_MODE_ACCEPT_ALL,
+	CEC_RX_MODE_MAX
+};
+
+#endif /* __UAPI_HDMI_CEC_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/ipx.h	2025-09-25 17:40:37.303375835 +0200
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _IPX_H_
+#define _IPX_H_
+#include <linux/libc-compat.h>	/* for compatibility with glibc netipx/ipx.h */
+#include <linux/types.h>
+#include <linux/sockios.h>
+#include <linux/socket.h>
+#define IPX_NODE_LEN	6
+#define IPX_MTU		576
+
+#if __UAPI_DEF_SOCKADDR_IPX
+struct sockaddr_ipx {
+	__kernel_sa_family_t sipx_family;
+	__be16		sipx_port;
+	__be32		sipx_network;
+	unsigned char 	sipx_node[IPX_NODE_LEN];
+	__u8		sipx_type;
+	unsigned char	sipx_zero;	/* 16 byte fill */
+};
+#endif /* __UAPI_DEF_SOCKADDR_IPX */
+
+/*
+ * So we can fit the extra info for SIOCSIFADDR into the address nicely
+ */
+#define sipx_special	sipx_port
+#define sipx_action	sipx_zero
+#define IPX_DLTITF	0
+#define IPX_CRTITF	1
+
+#if __UAPI_DEF_IPX_ROUTE_DEFINITION
+struct ipx_route_definition {
+	__be32        ipx_network;
+	__be32        ipx_router_network;
+	unsigned char ipx_router_node[IPX_NODE_LEN];
+};
+#endif /* __UAPI_DEF_IPX_ROUTE_DEFINITION */
+
+#if __UAPI_DEF_IPX_INTERFACE_DEFINITION
+struct ipx_interface_definition {
+	__be32        ipx_network;
+	unsigned char ipx_device[16];
+	unsigned char ipx_dlink_type;
+#define IPX_FRAME_NONE		0
+#define IPX_FRAME_SNAP		1
+#define IPX_FRAME_8022		2
+#define IPX_FRAME_ETHERII	3
+#define IPX_FRAME_8023		4
+#define IPX_FRAME_TR_8022       5 /* obsolete */
+	unsigned char ipx_special;
+#define IPX_SPECIAL_NONE	0
+#define IPX_PRIMARY		1
+#define IPX_INTERNAL		2
+	unsigned char ipx_node[IPX_NODE_LEN];
+};
+#endif /* __UAPI_DEF_IPX_INTERFACE_DEFINITION */
+
+#if __UAPI_DEF_IPX_CONFIG_DATA
+struct ipx_config_data {
+	unsigned char	ipxcfg_auto_select_primary;
+	unsigned char	ipxcfg_auto_create_interfaces;
+};
+#endif /* __UAPI_DEF_IPX_CONFIG_DATA */
+
+/*
+ * OLD Route Definition for backward compatibility.
+ */
+
+#if __UAPI_DEF_IPX_ROUTE_DEF
+struct ipx_route_def {
+	__be32		ipx_network;
+	__be32		ipx_router_network;
+#define IPX_ROUTE_NO_ROUTER	0
+	unsigned char	ipx_router_node[IPX_NODE_LEN];
+	unsigned char	ipx_device[16];
+	unsigned short	ipx_flags;
+#define IPX_RT_SNAP		8
+#define IPX_RT_8022		4
+#define IPX_RT_BLUEBOOK		2
+#define IPX_RT_ROUTED		1
+};
+#endif /* __UAPI_DEF_IPX_ROUTE_DEF */
+
+#define SIOCAIPXITFCRT		(SIOCPROTOPRIVATE)
+#define SIOCAIPXPRISLT		(SIOCPROTOPRIVATE + 1)
+#define SIOCIPXCFGDATA		(SIOCPROTOPRIVATE + 2)
+#define SIOCIPXNCPCONN		(SIOCPROTOPRIVATE + 3)
+#endif /* _IPX_H_ */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/nlfbx.h	2025-09-29 14:23:07.617732469 +0200
@@ -0,0 +1,132 @@
+#ifndef __LINUX_NLFBX_H
+#define __LINUX_NLFBX_H
+
+/*
+ * Freebox netlink interface public header
+ *
+ * Copyright 2023 Freebox
+ */
+
+#include <linux/types.h>
+
+#define NLFBX_GENL_NAME "nlfbx"
+
+/**
+ * enum nlfbx_commands - supported nlfbx commands
+ *
+ * @NLFBX_CMD_UNSPEC: unspecified command to catch errors
+ *
+ * @NLFBX_CMD_CREATE_STA_MONITOR: Create a STA monitor interface
+ * @NLFBX_CMD_REMOVE_STA_MONITOR: Remove a STA monitor interface
+ * @NLFBX_CMD_ADD_STA_MONITOR: Add a new unasociated STA to monitor, needs
+ *	%NLFBX_ATTR_IFINDEX and %NLFBX_ATTR_MAC.
+ * @NLFBX_CMD_DEL_STA_MONITOR: Remove a new unasociated STA to monitor, needs
+ *	%NLFBX_ATTR_IFINDEX and %NLFBX_ATTR_MAC.
+ * @NLFBX_CMD_GET_STA_MONITOR: Dump info for all monitored unasociated STA,
+ *	a single monitor dev could be filtered with %NLFBX_ATTR_IFINDEX.
+ *
+ * @NLFBX_CMD_MAX: highest used command number
+ * @__NLFBX_CMD_AFTER_LAST: internal use
+ */
+enum nlfbx_commands {
+	NLFBX_CMD_UNSPEC,
+
+	NLFBX_CMD_CREATE_STA_MONITOR,
+	NLFBX_CMD_REMOVE_STA_MONITOR,
+	NLFBX_CMD_ADD_STA_MONITOR,
+	NLFBX_CMD_DEL_STA_MONITOR,
+	NLFBX_CMD_GET_STA_MONITOR,
+
+	/* add new commands above here */
+
+	/* used to define NLFBX_CMD_MAX below */
+	__NLFBX_CMD_AFTER_LAST,
+	NLFBX_CMD_MAX = __NLFBX_CMD_AFTER_LAST - 1
+};
+
+/**
+ * enum nlfbx_attrs - nlfbx netlink attributes
+ *
+ * @NLFBX_ATTR_UNSPEC: unspecified attribute to catch errors
+ *
+ * @NLFBX_ATTR_IFINDEX: network interface index of the device to operate on
+ * @NLFBX_ATTR_BSSID: BSSID MAC address
+ * @NLFBX_ATTR_MAC: MAC address (various uses)
+ * @NLFBX_ATTR_SCUM_INFO: Same Channel Unassociated Metrics information
+ */
+enum nlfbx_attrs {
+	NLFBX_ATTR_UNSPEC,
+	NLFBX_ATTR_IFINDEX,
+	NLFBX_ATTR_BSSID,
+	NLFBX_ATTR_MAC,
+	NLFBX_ATTR_SCUM_INFO,
+
+	/* add attributes here, update the policy in nlfbx.c */
+
+	__NLFBX_ATTR_AFTER_LAST,
+	NUM_NLFBX_ATTR = __NLFBX_ATTR_AFTER_LAST,
+	NLFBX_ATTR_MAX = __NLFBX_ATTR_AFTER_LAST - 1
+};
+
+/**
+ * enum nlfbx_scum_info_attrs - type of unassociated station information
+ *
+ * These describe the type of packets received to compute related unassociated
+ * station metrics.
+ *
+ * @NLFBX_ATTR_SCUM_INFO_UNSPEC: unspecified atttribute to catch errors
+ *
+ * @NLFBX_ATTR_SCUM_INFO_DATA: Unassociated station metrics for data packets
+ * @NLFBX_ATTR_SCUM_INFO_NONDATA: Unassociated station metrics for non data
+ *	packets
+ *
+ * @NLFBX_ATTR_SCUM_INFO_MAX: highest type of unassociated station infomartions
+ */
+enum nlfbx_scum_info_attrs {
+	NLFBX_ATTR_SCUM_INFO_UNSPEC,
+
+	NLFBX_ATTR_SCUM_INFO_ALL,
+	NLFBX_ATTR_SCUM_INFO_DATA,
+	NLFBX_ATTR_SCUM_INFO_NONDATA,
+	NLFBX_ATTR_SCUM_INFO_ACK,
+
+	__NLFBX_ATTR_SCUM_INFO_AFTER_LAST,
+	NUM_NLFBX_ATTR_SCUM_INFO = __NLFBX_ATTR_SCUM_INFO_AFTER_LAST,
+	NLFBX_ATTR_SCUM_INFO_MAX = __NLFBX_ATTR_SCUM_INFO_AFTER_LAST - 1,
+};
+
+/**
+ * enum nlfbx_scum_info_metrics - Type of unassociated station information
+ * metrics
+ *
+ * These attribute types are used within a %NLFBX_ATTR_SCUM_INFO_*
+ * when getting information about a station.
+ *
+ * @NLFBX_SCUM_INFO_METRICS_UNSPEC: attribute number 0 is reserved
+ *
+ * @NLFBX_SCUM_INFO_METRICS_SIGNAL: Average signal stength of PPDU monitored
+ * @NLFBX_SCUM_INFO_METRICS_CUMULATIVE: cumulative signal of PPDU monitored
+ * @NLFBX_SCUM_INFO_METRICS_BYTES: Total bytes monitored
+ * @NLFBX_SCUM_INFO_METRICS_PACKETS: Total number of packets monitored
+ * @NLFBX_SCUM_INFO_METRICS_INACTIVE_TIME: Time since last activity
+ *					      (u32, msecs)
+ *
+ * @__NLFBX_SCUM_INFO_METRICS_AFTER_LAST: internal
+ * @NLFBX_SCUM_LIST_INFO_ENTRY_MAX: highest possible scum info metrics attribute
+ */
+enum nlfbx_scum_info_metrics {
+	NLFBX_SCUM_INFO_METRICS_UNSPEC,
+
+	NLFBX_SCUM_INFO_METRICS_SIGNAL,
+	NLFBX_SCUM_INFO_METRICS_CUMULATIVE,
+	NLFBX_SCUM_INFO_METRICS_BYTES,
+	NLFBX_SCUM_INFO_METRICS_PACKETS,
+	NLFBX_SCUM_INFO_METRICS_INACTIVE_TIME,
+
+	/* keep last */
+	__NLFBX_SCUM_INFO_METRICS_AFTER_LAST,
+	NUM_NLFBX_SCUM_INFO_METRICS = __NLFBX_SCUM_INFO_METRICS_AFTER_LAST,
+	NLFBX_SCUM_INFO_METRICS_MAX = __NLFBX_SCUM_INFO_METRICS_AFTER_LAST - 1
+};
+
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/include/uapi/linux/prctl-private.h	2025-09-25 17:40:37.315375894 +0200
@@ -0,0 +1,19 @@
+#ifndef _LINUX_PRCTL_PRIVATE_H
+#define _LINUX_PRCTL_PRIVATE_H
+
+/*
+ * Freebox addition: set/get exec mode.
+ */
+#define PR_SET_EXEC_MODE	77
+#define PR_GET_EXEC_MODE	78
+
+/*
+ * Freebox addition: set pseudo aslr mode for process
+ */
+#define PR_SET_PASLR_POLICY	79
+
+#define PR_PASLR_POLICY_UID		0
+#define PR_PASLR_POLICY_DISABLE		1
+#define PR_PASLR_POLICY_PRESEED		2
+
+#endif /* ! _LINUX_PRCTL_PRIVATE_H */
diff -Nruw linux-6.13.12-fbx/include/uapi/linux/remoti./remoti.h linux-6.13.12-fbx/include/uapi/linux/remoti/remoti.h
--- linux-6.13.12-fbx/include/uapi/linux/remoti./remoti.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/include/uapi/linux/remoti/remoti.h	2025-09-25 17:40:37.315375894 +0200
@@ -0,0 +1,137 @@
+#ifndef _UAPI_REMOTI_H
+#define _UAPI_REMOTI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * subsystem definitions
+ */
+#define NPI_SYS_RES0		0
+#define NPI_SYS_SYS		1
+#define NPI_SYS_MAC		2
+#define NPI_SYS_NWK		3
+#define NPI_SYS_AF		4
+#define NPI_SYS_ZDO		5
+#define NPI_SYS_SAPI		6
+#define NPI_SYS_UTIL		7
+#define NPI_SYS_DBG		8
+#define NPI_SYS_APP		9
+#define NPI_SYS_RCAF		10
+#define NPI_SYS_RCN		11
+#define NPI_SYS_RCN_CLI		12
+#define NPI_SYS_BOOT		13
+#define NPI_SYS_MAX		14
+#define NPI_SYS_MASK		0x1F
+
+/*
+ * type definitions
+ */
+#define NPI_POLL		0
+#define NPI_SREQ		1
+#define NPI_AREQ		2
+#define NPI_SRSP		3
+#define NPI_TYPE_MAX		4
+#define NPI_TYPE_MASK		3
+#define NPI_TYPE_SHIFT		5
+
+
+/* common error codes (see RemoTI API) */
+#define RTI_SUCCESS		0x00
+
+/*
+ * rti user message
+ */
+#define NPI_MAX_DATA_LEN	123
+
+struct rti_msg {
+	__u8	type;
+	__u8	subsys;
+	__u8	cmd;
+
+	__u8	data_len;
+	__u8	data[NPI_MAX_DATA_LEN];
+
+	__u8	custom_reply_cmd;
+	__u8	reply_cmd;
+	__u8	reply_len;
+	__u8	reply[NPI_MAX_DATA_LEN];
+};
+
+/*
+ * socket addr family on "user" device
+ */
+#ifndef PF_REMOTI
+#define PF_REMOTI			37
+#define AF_REMOTI			PF_REMOTI
+#endif
+
+struct sockaddr_rti {
+	__u32	device_id;
+};
+
+#define SOL_REMOTI			280
+#define REMOTI_REGISTER_CB		0
+
+struct rti_callback {
+	__u8	subsys;
+	__u8	cmd;
+};
+
+/*
+ * ioctl on uart device
+ */
+enum rti_dev_state {
+	RTI_DEV_S_STOPPED = 0,
+	RTI_DEV_S_BOOTING,
+	RTI_DEV_S_BOOT_FAILED,
+	RTI_DEV_S_OPERATIONAL,
+	RTI_DEV_S_STOPPING,
+	RTI_DEV_S_DEAD,
+};
+
+struct rti_dev_status {
+	__u32	dev_state;
+	__u32	fw_version;
+};
+
+struct rti_dev_stats {
+	__u64	tx_bytes;
+	__u64	tx_packets;
+
+	__u64	tx_boot_packets;
+	__u64	tx_rcaf_packets;
+	__u64	tx_util_packets;
+	__u64	tx_other_packets;
+
+
+	__u64	rx_bytes;
+	__u64	rx_packets;
+	__u64	rx_bad_sof;
+	__u64	rx_len_errors;
+	__u64	rx_fcs_errors;
+	__u64	rx_tty_errors;
+	__u64	rx_full_errors;
+	__u64	rx_subsys_errors;
+	__u64	rx_type_errors;
+	__u64	rx_no_callback;
+
+	__u64	rx_boot_packets;
+	__u64	rx_rcaf_packets;
+	__u64	rx_util_packets;
+	__u64	rx_other_packets;
+};
+
+enum {
+	RTI_BOOT_FLAGS_FORCE_UPDATE	= (1 << 0),
+};
+
+#define RTI_IOCTL_MAGIC		0xd4
+#define RTI_ATTACH_DEVICE	_IOR(RTI_IOCTL_MAGIC, 1, __u32)
+#define RTI_GET_STATUS		_IOW(RTI_IOCTL_MAGIC, 2, struct rti_dev_status)
+#define RTI_GET_STATS		_IOW(RTI_IOCTL_MAGIC, 3, struct rti_dev_stats)
+
+#define RTI_START_DEVICE	_IOR(RTI_IOCTL_MAGIC, 8, __u32)
+#define RTI_STOP_DEVICE		_IO(RTI_IOCTL_MAGIC, 9)
+
+#endif /* _UAPI_REMOTI_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/lib/fbxserial.c	2025-09-25 17:40:37.527376945 +0200
@@ -0,0 +1,175 @@
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/crc32.h>
+#include <linux/slab.h>
+
+#include <asm/io.h>
+
+#include <linux/fbxserial.h>
+
+#define PFX "builtin-fbxserial: "
+
+static void
+fbxserialinfo_use_default(struct fbx_serial *serial)
+{
+	printk(KERN_WARNING PFX "warning: using default serial infos\n");
+	fbxserial_set_default(serial);
+}
+
+/*
+ * add trailing 0 for bundle string here.
+ */
+static void
+bundle_fixup(struct fbx_serial *serial)
+{
+	struct fbx_serial_extinfo *p;
+	int i;
+
+	for (i = 0; i < be32_to_cpu(serial->extinfo_count); i++) {
+
+		if (i >= EXTINFO_MAX_COUNT)
+			break;
+
+		p = &serial->extinfos[i];
+		if (be32_to_cpu(p->type) == EXTINFO_TYPE_EXTDEV &&
+		    be32_to_cpu(p->u.extdev.type) == EXTDEV_TYPE_BUNDLE) {
+			int size;
+
+			size = sizeof (p->u.extdev.serial);
+			p->u.extdev.serial[size - 1] = 0;
+		}
+	}
+}
+
+/*
+ * called from  arch code early  in the boot sequence.   This function
+ * returns 1  in case serial infos are  invalid/unreadable and default
+ * values have been used.
+ */
+int
+fbxserialinfo_read(const void *data, struct fbx_serial *out)
+{
+	uint32_t sum;
+
+	/*
+	 * get partial serial data from flash/whatever.
+	 */
+	memcpy(out, data, sizeof (*out));
+
+	/* check magic first */
+	if (be32_to_cpu(out->magic) != FBXSERIAL_MAGIC) {
+		printk(KERN_NOTICE PFX "invalid magic (%08x, expected %08x), "
+			"using defaults !\n", be32_to_cpu(out->magic),
+		       FBXSERIAL_MAGIC);
+		goto out_default;
+	}
+
+	/* fetch size for which we have to check CRC */
+	if (be32_to_cpu(out->len) > FBXSERIAL_MAX_SIZE) {
+		printk(KERN_NOTICE PFX "structure size too big (%d), "
+		       "using defaults !\n", be32_to_cpu(out->len));
+		goto out_default;
+	}
+
+	/* compute and check checksum */
+	sum = crc32(0, data + 4, be32_to_cpu(out->len) - 4);
+
+	if (be32_to_cpu(out->crc32) != sum) {
+		printk(KERN_NOTICE PFX "invalid checksum (%08x, "
+		       "expected %08x), using defaults !\n", sum,
+		       be32_to_cpu(out->crc32));
+		goto out_default;
+	}
+
+	printk(KERN_INFO PFX "Found valid serial infos !\n");
+	bundle_fixup(out);
+	return 0;
+
+ out_default:
+	fbxserialinfo_use_default(out);
+	bundle_fixup(out);
+	return 1;
+}
+
+void
+fbxserialinfo_get_random(unsigned char *data, unsigned int len)
+{
+	const struct fbx_serial *s;
+
+	memset(data, 0, 6);
+	s = arch_get_fbxserial();
+	if (WARN(!s, "arch_get_fbxserial returned NULL"))
+		return;
+
+	if (len > sizeof (s->random_data))
+		len = sizeof (s->random_data);
+
+	memcpy(data, s->random_data, len);
+}
+EXPORT_SYMBOL(fbxserialinfo_get_random);
+
+static u8 *mac_table;
+
+static void inc_mac(u8 *mac, int count)
+{
+	int index = 5;
+	int overflow;
+
+	do {
+		unsigned int val = mac[index] + count;
+
+		overflow = val >> 8;
+		mac[index] = val;
+		count = (count + 255) >> 8;
+		--index;
+	} while (index >= 0 && overflow);
+}
+
+static int gen_mac_table(const struct fbx_serial *s)
+{
+	int i;
+
+	mac_table = kmalloc(6 * s->mac_count, GFP_KERNEL);
+	if (!mac_table)
+		return -ENOMEM;
+
+	for (i = 0; i < s->mac_count; ++i) {
+		u8 *mac = &mac_table[6 * i];
+
+		memcpy(mac, s->mac_addr_base, 6);
+		inc_mac(mac, i);
+	}
+	return 0;
+}
+
+const void *
+fbxserialinfo_get_mac_addr(unsigned int index)
+{
+	const struct fbx_serial *s;
+
+	s = arch_get_fbxserial();
+	if (!s)
+		return ERR_PTR(-EPROBE_DEFER);
+
+	if (index >= s->mac_count) {
+		pr_warn(PFX "mac index %d too high: using default.\n",
+			index);
+		goto default_mac;
+	}
+
+	if (!mac_table) {
+		int error = gen_mac_table(s);
+		if (error) {
+			pr_err(PFX "gen_mac_table() failed: using default.\n");
+			goto default_mac;
+		}
+	}
+
+	return &mac_table[6 * index];
+
+default_mac:
+	 return "\x00\x07\xcb\x00\x00\xfd";
+}
+EXPORT_SYMBOL(fbxserialinfo_get_mac_addr);
diff -Nruw linux-6.13.12-fbx/net/batman-adv/fbx./fbx.c linux-6.13.12-fbx/net/batman-adv/fbx/fbx.c
--- linux-6.13.12-fbx/net/batman-adv/fbx./fbx.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/batman-adv/fbx/fbx.c	2025-09-29 14:23:07.621732489 +0200
@@ -0,0 +1,1019 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#include <linux/skbuff.h>
+#include <net/genetlink.h>
+
+#include "../main.h"
+#include "../tvlv.h"
+#include "fbx.h"
+
+#pragma pack(2)
+/**
+ * batadv_fbx_tvlv_hdr() - FBX TVLV header
+ * @type: FBX tvlv type
+ * @ver: FBX tvlv version
+ * @len: FBX tvlv data length
+ */
+struct batadv_fbx_tvlv_hdr {
+	__u8 type;
+	__u8 ver;
+	__be16 len;
+};
+#pragma pack()
+
+static int (*__fbx_handler[BATADV_FBX_SUB_LAST])(struct batadv_hard_iface *,
+						 struct sk_buff *);
+
+static struct batadv_fbx_module const *__fbx_modules[] = {
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+	&batadv_mtu_module,
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_SLAP
+	&batadv_slap_module,
+#endif
+#ifdef CONFIG_BATMAN_ADV_FBX_PERIF_ROUTER
+	&batadv_router_module,
+#endif
+};
+
+static int batadv_fbx_recv_unhandled_packet(struct batadv_hard_iface *recv_if,
+					    struct sk_buff *skb)
+{
+	kfree_skb(skb);
+
+	return NET_RX_DROP;
+}
+
+static int batadv_fbx_recv_packet(struct sk_buff *skb,
+				  struct batadv_hard_iface *hard_iface)
+{
+	struct batadv_fbx_packet *batadv_fbx;
+
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_HLEN)))
+		goto drop;
+
+	batadv_fbx = (struct batadv_fbx_packet *)skb->data;
+
+	if (batadv_fbx->subtype >= ARRAY_SIZE(__fbx_handler))
+		goto drop;
+
+	return __fbx_handler[batadv_fbx->subtype](hard_iface, skb);
+
+drop:
+	kfree_skb(skb);
+	return NET_RX_DROP;
+}
+
+/**
+ * batadv_fbx_recv_handler_register() - Register handler for batman-adv FBX sub
+ * packet type
+ * @fbx_type: subtype which should be handled
+ * @recv_handler: receive handler for the packet type
+ *
+ * Return: 0 on success or negative error number in case of failure
+ */
+int batadv_fbx_recv_handler_register(u8 packet_type,
+				     int (*hdl)(struct batadv_hard_iface *,
+						struct sk_buff *))
+{
+	int (*curr)(struct batadv_hard_iface *,
+		    struct sk_buff *);
+
+	curr = __fbx_handler[packet_type];
+
+	if (curr != batadv_fbx_recv_unhandled_packet)
+		return -EBUSY;
+
+	__fbx_handler[packet_type] = hdl;
+	return 0;
+}
+
+/**
+ * batadv_fbx_recv_handler_unregister() - Unregister FBX handler for packet
+ * subtype.
+ * @packet_type: subtype which should no longer be handled
+ */
+void batadv_fbx_recv_handler_unregister(u8 packet_type)
+{
+	__fbx_handler[packet_type] = batadv_fbx_recv_unhandled_packet;
+}
+
+/**
+ * batadv_fbx_tvlv_container - Container for a FBX TVLV to send in each OGM
+ */
+struct batadv_fbx_tvlv_container {
+	/** @list: hlist node for bat_priv->fbx_tvlv_containers */
+	struct hlist_node list;
+	/** @hdr: FBX tvlv header information */
+	struct batadv_fbx_tvlv_hdr hdr;
+	/** @data: FBX tvlv actual data */
+	u8 data[];
+};
+
+/**
+ * batadv_fbx_tvlv_container_update() - Update current FBX TVLV global
+ * container
+ * fbx_tvlv_lock should be held
+ * @bat_priv: the bat priv with all the soft interface information
+ */
+static void batadv_fbx_tvlv_update(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_tvlv_container *tvlv;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	u8 *tvlv_value;
+	void *ptr;
+	size_t len = 0;
+
+	lockdep_assert_held(&bat_priv->fbx_tvlv_lock);
+
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		len += sizeof(struct batadv_fbx_tvlv_hdr);
+		len += ntohs(tvlv->hdr.len);
+	}
+
+	if (!len) {
+		batadv_tvlv_container_unregister(bat_priv, BATADV_TVLV_FBX, 1);
+		return;
+	}
+
+	tvlv_value = kmalloc(len, GFP_ATOMIC);
+	if (!tvlv_value)
+		return;
+
+	ptr = tvlv_value;
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		hdr = ptr;
+		hdr->type = tvlv->hdr.type;
+		hdr->ver = tvlv->hdr.ver;
+		hdr->len = tvlv->hdr.len;
+		ptr = hdr + 1;
+		memcpy(ptr, tvlv->data, ntohs(tvlv->hdr.len));
+		ptr = (u8 *)ptr + ntohs(tvlv->hdr.len);
+	}
+
+	batadv_tvlv_container_register(bat_priv, BATADV_TVLV_FBX, 1,
+				       tvlv_value, len);
+	kfree(tvlv_value);
+}
+
+/**
+ * batadv_fbx_tvlv_container_unregister() - Unregister FBX TVLV container of a
+ * specific type and verison
+ * Takes bat_priv->fbx_tvlv_lock
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: FBX tvlv container type to unregister
+ * @ver: FBX tvlv container type version to unregister
+ */
+void batadv_fbx_tvlv_container_unregister(struct batadv_priv *bat_priv,
+					  u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_container *tvlv;
+
+	spin_lock_bh(&bat_priv->fbx_tvlv_lock);
+	hlist_for_each_entry(tvlv, &bat_priv->fbx_tvlv_containers, list) {
+		if (tvlv->hdr.type == type && tvlv->hdr.ver == ver) {
+			hlist_del(&tvlv->list);
+			kfree(tvlv);
+			batadv_fbx_tvlv_update(bat_priv);
+			break;
+		}
+	}
+	spin_unlock_bh(&bat_priv->fbx_tvlv_lock);
+}
+
+/**
+ * batadv_fbx_tvlv_container_unregister() - Unregister FBX TVLV container of a
+ * specific type and verison
+ * Takes bat_priv->fbx_tvlv_lock
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: FBX tvlv container type to unregister
+ * @ver: FBX tvlv container type version to unregister
+ */
+void batadv_fbx_tvlv_container_register(struct batadv_priv *bat_priv,
+					u8 type, u8 ver,
+					void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_container *tvlv_old, *tvlv_new;
+
+	if (!tvlv)
+		return;
+
+	tvlv_new = kzalloc(sizeof(*tvlv_new) + len, GFP_ATOMIC);
+	tvlv_new->hdr.type = type;
+	tvlv_new->hdr.ver = ver;
+	tvlv_new->hdr.len = htons(len);
+
+	memcpy(tvlv_new->data, tvlv, len);
+	INIT_HLIST_NODE(&tvlv_new->list);
+
+	spin_lock_bh(&bat_priv->fbx_tvlv_lock);
+	hlist_for_each_entry(tvlv_old, &bat_priv->fbx_tvlv_containers, list) {
+		if (tvlv_old->hdr.type == type && tvlv_old->hdr.ver == ver) {
+			hlist_del(&tvlv_old->list);
+			kfree(tvlv_old);
+			break;
+		}
+	}
+	hlist_add_head(&tvlv_new->list, &bat_priv->fbx_tvlv_containers);
+	batadv_fbx_tvlv_update(bat_priv);
+	spin_unlock_bh(&bat_priv->fbx_tvlv_lock);
+}
+/**
+ * struct batadv_tvlv_handler - handler for FBX specific tvlv type and version
+ */
+struct batadv_fbx_tvlv_handler {
+	/** @list: hlist node to keep list of register handler in bat_priv */
+	struct hlist_node list;
+	/** @ref: reference counter for this handler */
+	struct kref ref;
+	/** @rcu: struct used to free handler in RCU-safe manner */
+	struct rcu_head rcu;
+	/**
+	 * @ogm: Callback called when matching FBX tvlv is received in OGM
+	 * packet
+	 */
+	void (*ogm)(struct batadv_priv *bat_priv,
+		    struct batadv_orig_node *orig,
+		    void *tvlv, u16 len);
+	/**
+	 * @ogm: Callback called when matching FBX tvlv is received in direct
+	 * unicast packet
+	 */
+	int (*uni)(struct batadv_priv *bat_priv,
+		   u8 *src, u8 *dst,
+		   void *tvlv, u16 len);
+	/** @type: FBX tvlv type this handler is responsible for */
+	u8 type;
+	/** @ver: FBX tvlv version this handler is responsible for */
+	u8 ver;
+};
+
+/**
+ * batadv_fbx_tvlv_handler_release() - release FBX tvlv handler
+ * @ref: FBX tvlv handler's ref pointer
+ */
+static void batadv_fbx_tvlv_handler_release(struct kref *ref)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+
+	hdl = container_of(ref, struct batadv_fbx_tvlv_handler, ref);
+	kfree_rcu(hdl, rcu);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_put() - decrement FBX tvlv handler ref, releasing it
+ * if needed
+ * @tvlv_handler: the FBX tvlv handler to put
+ */
+static void batadv_fbx_tvlv_handler_put(struct batadv_fbx_tvlv_handler *hdl)
+{
+	if (!hdl)
+		return;
+	kref_put(&hdl->ref, batadv_fbx_tvlv_handler_release);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_get() - Get a FBX tvlv handler from the register
+ * handler list.
+ * Takes rcu_read_lock()
+ * @bat_priv: the bat priv with all the soft interface information
+ * @type: tvlv handler type to look for
+ * @ver: tvlv handler version to look for
+ * @return: tvlv handler if found (with ref incremented), NULL otherwise
+ */
+static struct batadv_fbx_tvlv_handler *
+batadv_fbx_tvlv_handler_get(struct batadv_priv *bat_priv, u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_handler *hdl, *tvlv_hdl = NULL;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(hdl, &bat_priv->fbx_tvlv_handlers, list) {
+		if (hdl->type != type)
+			continue;
+		if (hdl->ver != ver)
+			continue;
+		if (!kref_get_unless_zero(&hdl->ref))
+			continue;
+		tvlv_hdl = hdl;
+		break;
+	}
+	rcu_read_unlock();
+	return tvlv_hdl;
+}
+
+/**
+ * batadv_fbx_tvlv_ogm_handler() - parse a OGM FBX TVLV buffer to call
+ * appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: originator node emitting the OGM packet
+ * @flags: OGM handler flags
+ * @tvlv: tvlv content
+ * @len: tvlv content length
+ */
+static void batadv_fbx_tvlv_ogm_handler(struct batadv_priv *bat_priv,
+					struct batadv_orig_node *orig,
+					u8 flags, void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	void *fbx_tvlv;
+	u16 fbx_len;
+
+	while (len >= sizeof(*hdr)) {
+		hdr = tvlv;
+		fbx_len = ntohs(hdr->len);
+		fbx_tvlv = hdr + 1;
+		len -= sizeof(*hdr);
+
+		if (fbx_len > len)
+			break;
+
+		tvlv = (u8 *)tvlv + fbx_len;
+		len -= fbx_len;
+
+		hdl = batadv_fbx_tvlv_handler_get(bat_priv, hdr->type,
+						  hdr->ver);
+		if (!hdl)
+			continue;
+		if (hdl->ogm)
+			hdl->ogm(bat_priv, orig, fbx_tvlv, fbx_len);
+		batadv_fbx_tvlv_handler_put(hdl);
+	}
+}
+
+/**
+ * batadv_fbx_tvlv_uni_handler() - parse a direct unicast FBX TVLV buffer to
+ * call appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @src: src MAC address of the unicast TVLV packet
+ * @dst: dst MAC address of the unicast TVLV packet
+ * @tvlv: tvlv content
+ * @len: tvlv content length
+ * @return: NET_RX_SUCCESS
+ */
+static int batadv_fbx_tvlv_uni_handler(struct batadv_priv *bat_priv,
+				       u8 *src, u8 *dst,
+				       void *tvlv, u16 len)
+{
+	struct batadv_fbx_tvlv_handler *hdl;
+	struct batadv_fbx_tvlv_hdr *hdr;
+	void *fbx_tvlv;
+	u16 fbx_len;
+
+	while (len >= sizeof(*hdr)) {
+		hdr = tvlv;
+		fbx_len = ntohs(hdr->len);
+		fbx_tvlv = hdr + 1;
+		len -= sizeof(*hdr);
+
+		if (fbx_len > len)
+			break;
+
+		tvlv = (u8 *)tvlv + fbx_len;
+		len -= fbx_len;
+
+		hdl = batadv_fbx_tvlv_handler_get(bat_priv, hdr->type,
+						  hdr->ver);
+		if (!hdl)
+			continue;
+		if (hdl->uni)
+			hdl->uni(bat_priv, src, dst, fbx_tvlv, fbx_len);
+		batadv_fbx_tvlv_handler_put(hdl);
+	}
+
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_fbx_tvlv_mcast_handler() - parse a multicast FBX TVLV buffer to
+ * call appropriate handlers
+ * @bat_priv: the bat priv with all the soft interface information
+ * @skb: received TVLV skb data
+ * @return: NET_RX_SUCCESS
+ */
+static int batadv_fbx_tvlv_mcast_handler(struct batadv_priv *bat_priv,
+					 struct sk_buff *skb)
+{
+	WARN(1, "FBX multicast TVLV handler not supported\n");
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * batadv_fbx_tvlv_handler_register() - Register a FBX tvlv handler
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: FBX tvlv subtype
+ * @ver: tvlv handler version
+ * @ogm: OGM FBX tvlv handler callback function
+ * @uni: Direct unicast tvlv handler callback function
+ */
+void batadv_fbx_tvlv_handler_register(struct batadv_priv *bat_priv,
+				      u8 type, u8 ver,
+				      void (*ogm)(struct batadv_priv *bat_priv,
+						  struct batadv_orig_node *orig,
+						  void *tvlv, u16 len),
+				      int (*uni)(struct batadv_priv *bat_priv,
+						 u8 *src, u8 *dst,
+						 void *tvlv, u16 len))
+{
+	struct batadv_fbx_tvlv_handler *tvlv_hdl;
+
+	spin_lock_bh(&bat_priv->fbx_tvlv_lock);
+	tvlv_hdl = batadv_fbx_tvlv_handler_get(bat_priv, type, ver);
+	if (tvlv_hdl)
+		goto out;
+
+	tvlv_hdl = kzalloc(sizeof(*tvlv_hdl), GFP_ATOMIC);
+	if (!tvlv_hdl)
+		goto out;
+
+	tvlv_hdl->ogm = ogm;
+	tvlv_hdl->uni = uni;
+	tvlv_hdl->type = type;
+	tvlv_hdl->ver = ver;
+	kref_init(&tvlv_hdl->ref);
+	INIT_HLIST_NODE(&tvlv_hdl->list);
+
+	kref_get(&tvlv_hdl->ref);
+	hlist_add_head_rcu(&tvlv_hdl->list, &bat_priv->fbx_tvlv_handlers);
+out:
+	spin_unlock_bh(&bat_priv->fbx_tvlv_lock);
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+}
+
+/**
+ * batadv_fbx_tvlv_handler_unregister() - Unregister a FBX tvlv handler
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: FBX tvlv subtype
+ * @ver: tvlv handler version
+ */
+void batadv_fbx_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+					u8 type, u8 ver)
+{
+	struct batadv_fbx_tvlv_handler *tvlv_hdl;
+
+	tvlv_hdl = batadv_fbx_tvlv_handler_get(bat_priv, type, ver);
+	if (!tvlv_hdl)
+		return;
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+	spin_lock_bh(&bat_priv->fbx_tvlv_lock);
+	hlist_del_rcu(&tvlv_hdl->list);
+	spin_unlock_bh(&bat_priv->fbx_tvlv_lock);
+	batadv_fbx_tvlv_handler_put(tvlv_hdl);
+}
+
+/**
+ * batadv_fbx_shortcut: Check if we are a shortcut for dest orig
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @dest: destination address
+ * @return: true if this node is a valid shortcut, false otherwise
+ */
+bool batadv_fbx_shortcut(struct batadv_priv *bat_priv, u8 const *dest)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = false;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->shortcut)
+			ret = m->ops->shortcut(bat_priv, dest);
+		if (ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_check_skb_rx: Check ingress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: B.A.T.M.A.N-Adv packet type
+ * @skb: ingress skb
+ * @return: true if packet shall pass, false otherwise
+ */
+bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+			    enum batadv_packettype type,
+			    struct sk_buff *skb)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->check_skb_rx)
+			ret = m->ops->check_skb_rx(bat_priv, type, skb);
+		if (!ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_check_skb_tx: Check egress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @skb: egress skb
+ * @vid: skb's vlan ID
+ * @return: true if packet shall pass, false otherwise
+ */
+bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid)
+{
+	struct batadv_fbx_module const *m;
+	bool ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->check_skb_tx)
+			ret = m->ops->check_skb_tx(bat_priv, skb, vid);
+		if (!ret)
+			break;
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_ogm_process: FBX specific OGM2 process
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig_node: The orig node that generates this OGM
+ * @neigh: Neighbour that sends this OGM on behalf of orig_node
+ * @ogm: The OGM2 packet
+ */
+void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh,
+			    struct batadv_ogm2_packet *ogm)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->ogm_process)
+			m->ops->ogm_process(bat_priv, orig_node, neigh, ogm);
+	}
+}
+
+/**
+ * batadv_fbx_neigh_release: Call FBX specific work on neighbour release event
+ * @neigh: the neighbor being freed
+ */
+void batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->neigh_release)
+			m->ops->neigh_release(neigh);
+	}
+}
+
+/**
+ * batadv_fbx_neigh_init: Call FBX specific work on neighbour creation event
+ *
+ * @neigh: Neighbor to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->neigh_init)
+			ret = m->ops->neigh_init(neigh);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->neigh_release)
+			m->ops->neigh_release(neigh);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_orig_release: Call FBX specific work on originator release event
+ * @orig: the originator being freed
+ */
+void batadv_fbx_orig_release(struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_release)
+			m->ops->orig_release(orig);
+	}
+}
+
+/**
+ * batadv_fbx_orig_init: Call FBX specific work on originator creation event
+ *
+ * @orig: Neighbor to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_orig_init(struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_init)
+			ret = m->ops->orig_init(orig);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->orig_release)
+			m->ops->orig_release(orig);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_orig_ifinfo_release: Call FBX specific work on originator ifinfo
+ * release event
+ *
+ * @orig_ifinfo: The originator ifinfo being freed
+ */
+void batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_ifinfo_release)
+			m->ops->orig_ifinfo_release(orig_ifinfo);
+	}
+}
+
+/**
+ * batadv_fbx_orig_ifinfo_init: Call FBX specific work on originator ifinfo
+ * creation event
+ *
+ * @orig_ifinfo: Originator ifinfo to initialize
+ * @return: 0 on success negative number otherwise
+ */
+int batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_ifinfo)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->orig_ifinfo_init)
+			ret = m->ops->orig_ifinfo_init(orig_ifinfo);
+		if (ret)
+			goto clean;
+	}
+
+	return 0;
+
+clean:
+	for (; i > 0; i--) {
+		m = __fbx_modules[i - 1];
+		if (m->ops->orig_ifinfo_release)
+			m->ops->orig_ifinfo_release(orig_ifinfo);
+	}
+
+	return -1;
+}
+
+/**
+ * batadv_fbx_hardif_update() - Update hardif event
+ */
+void batadv_fbx_hardif_update(struct batadv_hard_iface *hard_iface)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->hardif_update)
+			m->ops->hardif_update(hard_iface);
+	}
+}
+
+/**
+ * batadv_fbx_orig_update() - Update primary iface event
+ */
+void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->primary_update)
+			m->ops->primary_update(bat_priv, primary);
+	}
+}
+
+/**
+ * batadv_fbx_tt_local_add() - Notify FBX modules a local TT is added
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Local TT that is added
+ * @return: false if we want to prevent roaming notification, true otherwise
+ */
+bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_local_add) {
+			rc = m->ops->tt_local_add(bat_priv, tt, tg, ifindex);
+			ret = ret && rc;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_local_del() - Notify FBX modules a local TT is deleted
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Local TT that is removed
+ * @return: True if local TT entry should be removed, false otherwise
+ */
+bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_local_del) {
+			rc = m->ops->tt_local_del(bat_priv, tt);
+			ret = ret && rc;
+		}
+	}
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_global_add() - Notify FBX modules a global TT is added
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: the global TT that is added
+ * @orig: Originator that can reach this global TT
+ * @return: False if we want to prevent matching local TT removal, true
+ * otherwise
+ */
+bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	bool rc, ret = true;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_global_add) {
+			rc = m->ops->tt_global_add(bat_priv, tt, orig);
+			ret = ret && rc;
+		}
+	}
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_tt_global_del() - Notify FBX modules a global TT is deleted
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: the global TT that is removed
+ * @orig: Originator that was able to reach this global TT
+ */
+void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->tt_global_del)
+			m->ops->tt_global_del(bat_priv, tt, orig);
+	}
+}
+
+static const struct nla_policy batadv_fbx_policy[NUM_BATADV_ATTR_FBX] = {
+	[BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS] = { .len = ETH_ALEN },
+	[BATADV_ATTR_FBX_SLAP_MASTER_PRIO] = { .type = NLA_U32 },
+	[BATADV_ATTR_FBX_SLAP_IFINDEX] = { .type = NLA_U32 },
+	[BATADV_ATTR_FBX_SLAP_PRIO] = { .type = NLA_U32 },
+};
+
+/**
+ * batadv_fbx_nl_parse_fbx() - Parse FBX specific attributes of NL message
+ * @info: NL message info
+ * @fbx: FBX attr array
+ * @max: Max FBX NL id
+ * @return: return @fbx if attributes were found and parsed correctly, NULL
+ * otherwise
+ */
+static struct nlattr **batadv_fbx_nl_parse_fbx(struct genl_info *info,
+					       struct nlattr *fbx[],
+					       size_t max)
+{
+	struct nlattr *attr;
+	int err;
+
+	if (!info)
+		return NULL;
+
+	attr = info->attrs[BATADV_ATTR_FBX];
+	if (!attr)
+		return NULL;
+
+	err = nla_parse_nested_deprecated(fbx, max, attr, batadv_fbx_policy,
+					  NULL);
+	if (err)
+		return NULL;
+
+	return fbx;
+}
+
+/**
+ * batadv_fbx_nl_start_fbx() - Start nested FBX attributes of NL response
+ * @skb: NL response
+ * @return: Nest attributes
+ */
+static struct nlattr *batadv_fbx_nl_start_fbx(struct sk_buff *skb)
+{
+	if (!skb)
+		return NULL;
+
+	return nla_nest_start(skb, BATADV_ATTR_FBX);
+}
+
+/**
+ * batadv_fbx_nl_stop_fbx() - Stop nested FBX attributes of NL response
+ * @nested: FBX nested to close
+ */
+static void batadv_fbx_nl_stop_fbx(struct sk_buff *skb, struct nlattr *attr)
+{
+	if (!attr || !skb)
+		return;
+
+	nla_nest_end(skb, attr);
+}
+
+/**
+ * batadv_fbx_nl() - Handle FBX specific part of a B.A.T.M.A.N-Adv NL command
+ * @bat_priv: The bat priv with all the soft interface information
+ * @cmd: B.A.T.M.A.N-Adv NL command
+ * @info: NL message info
+ * @skb: NL message to fill
+ * @data: Handler specific data
+ */
+void batadv_fbx_nl(struct batadv_priv *bat_priv,
+		   enum batadv_nl_commands cmd,
+		   struct genl_info *info,
+		   struct sk_buff *skb,
+		   void *data)
+{
+	struct nlattr *nest, **attr, *fbxattr[NUM_BATADV_ATTR_FBX];
+	struct batadv_fbx_module const *m;
+	int i, j;
+
+	attr = batadv_fbx_nl_parse_fbx(info, fbxattr, BATADV_ATTR_FBX_MAX);
+	nest = batadv_fbx_nl_start_fbx(skb);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		for (j = 0; j < m->nl_ops_sz; ++j) {
+			if (m->nl_ops[j].cmd == cmd) {
+				m->nl_ops[j].hdl(bat_priv, info, attr,
+						 skb, data);
+			}
+		}
+	}
+
+	batadv_fbx_nl_stop_fbx(skb, nest);
+}
+
+/**
+ * batadv_fbx_new_priv: init FBX specific bits in bat_priv
+ * @bat_priv: the bat_priv to init
+ *
+ */
+int batadv_fbx_new_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_module const *m;
+	int ret = 0, i;
+
+	INIT_HLIST_HEAD(&bat_priv->fbx_tvlv_handlers);
+	INIT_HLIST_HEAD(&bat_priv->fbx_tvlv_containers);
+	spin_lock_init(&bat_priv->fbx_tvlv_lock);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->new_priv)
+			ret |= m->ops->new_priv(bat_priv);
+	}
+
+	batadv_tvlv_handler_register(bat_priv, batadv_fbx_tvlv_ogm_handler,
+				     batadv_fbx_tvlv_uni_handler,
+				     batadv_fbx_tvlv_mcast_handler,
+				     BATADV_TVLV_FBX, 1, BATADV_NO_FLAGS);
+
+	return ret;
+}
+
+/**
+ * batadv_fbx_free_priv: release FBX specific bits in bat_priv
+ * @bat_priv: the bat_priv to release
+ *
+ */
+void batadv_fbx_free_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	batadv_tvlv_handler_unregister(bat_priv, BATADV_TVLV_FBX, 1);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->free_priv)
+			m->ops->free_priv(bat_priv);
+	}
+}
+
+/**
+ * batadv_fbx_init: Init B.A.T.M.A.N-Adv fbx submodule
+ */
+int __init batadv_fbx_init(void)
+{
+	struct batadv_fbx_module const *m;
+	int ret, i;
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_handler); i++)
+		__fbx_handler[i] = batadv_fbx_recv_unhandled_packet;
+
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		ret = 0;
+		m = __fbx_modules[i];
+		if (m->ops->init)
+			ret = m->ops->init();
+		if (ret)
+			pr_err("batadv: Cannot init fbx module %s\n", m->name);
+	}
+
+	return batadv_recv_handler_register(BATADV_FBX,
+					    batadv_fbx_recv_packet);
+}
+
+/**
+ * batadv_fbx_exit: Exit B.A.T.M.A.N-Adv fbx submodule
+ */
+void __exit batadv_fbx_exit(void)
+{
+	struct batadv_fbx_module const *m;
+	int i;
+
+	batadv_recv_handler_unregister(BATADV_FBX);
+
+	for (i = 0; i < ARRAY_SIZE(__fbx_modules); i++) {
+		m = __fbx_modules[i];
+		if (m->ops->exit)
+			m->ops->exit();
+	}
+}
diff -Nruw linux-6.13.12-fbx/net/batman-adv/fbx./fbx.h linux-6.13.12-fbx/net/batman-adv/fbx/fbx.h
--- linux-6.13.12-fbx/net/batman-adv/fbx./fbx.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/batman-adv/fbx/fbx.h	2025-09-29 14:23:07.621732489 +0200
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) B.A.T.M.A.N. contributors:
+ *
+ * Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#ifndef _NET_BATMAN_ADV_FBX_H_
+#define _NET_BATMAN_ADV_FBX_H_
+
+#ifdef CONFIG_BATMAN_ADV_FBX
+
+struct genl_info;
+
+enum batadv_fbx_tvlv_type {
+	BATADV_FBX_TVLV_SLAP_MASTER,
+};
+
+#define BATADV_FBX_TVLV_SLAP_VERSION 1
+
+struct batadv_fbx_module_ops {
+	int (*init)(void);
+	void (*exit)(void);
+	int (*new_priv)(struct batadv_priv *bat_priv);
+	void (*free_priv)(struct batadv_priv *bat_priv);
+	void (*neigh_release)(struct batadv_hardif_neigh_node *neigh);
+	int (*neigh_init)(struct batadv_hardif_neigh_node *neigh);
+	void (*orig_release)(struct batadv_orig_node *orig);
+	int (*orig_init)(struct batadv_orig_node *orig);
+	void (*orig_ifinfo_release)(struct batadv_orig_ifinfo *orig_ifinfo);
+	int (*orig_ifinfo_init)(struct batadv_orig_ifinfo *orig_ifinfo);
+	void (*hardif_update)(struct batadv_hard_iface *hard_iface);
+	void (*primary_update)(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary);
+	bool (*tt_local_add)(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tl,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex);
+	bool (*tt_local_del)(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt);
+	bool (*tt_global_add)(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig);
+	void (*tt_global_del)(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt,
+			      struct batadv_orig_node *orig);
+	bool (*shortcut)(struct batadv_priv *bat_priv, u8 const *dest);
+	bool (*check_skb_rx)(struct batadv_priv *bat_priv,
+			     enum batadv_packettype type,
+			     struct sk_buff *skb);
+	bool (*check_skb_tx)(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid);
+	void (*ogm_process)(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh_node,
+			    struct batadv_ogm2_packet *ogm);
+};
+
+struct batadv_fbx_nl_ops {
+	enum batadv_nl_commands cmd;
+	void (*hdl)(struct batadv_priv *, struct genl_info *,
+		    struct nlattr **, struct sk_buff *, void *data);
+};
+
+struct batadv_fbx_module {
+	char const *name;
+	struct batadv_fbx_module_ops const *ops;
+	struct batadv_fbx_nl_ops const *nl_ops;
+	size_t nl_ops_sz;
+};
+
+int __init batadv_fbx_init(void);
+void __exit batadv_fbx_exit(void);
+int batadv_fbx_new_priv(struct batadv_priv *bat_priv);
+void batadv_fbx_free_priv(struct batadv_priv *bat_priv);
+void batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh);
+int batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh);
+void batadv_fbx_orig_release(struct batadv_orig_node *orig);
+int batadv_fbx_orig_init(struct batadv_orig_node *orig);
+void batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_info);
+int batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_info);
+void batadv_fbx_hardif_update(struct batadv_hard_iface *hard_iface);
+void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+			       struct batadv_hard_iface *primary);
+void batadv_fbx_nl(struct batadv_priv *bat_priv, enum batadv_nl_commands cmd,
+		   struct genl_info *info, struct sk_buff *skb, void *data);
+
+void batadv_fbx_tvlv_container_unregister(struct batadv_priv *bat_priv,
+					  u8 type, u8 ver);
+void batadv_fbx_tvlv_container_register(struct batadv_priv *bat_priv,
+					u8 type, u8 ver,
+					void *tvlv, u16 len);
+void batadv_fbx_tvlv_handler_register(struct batadv_priv *bat_priv,
+				      u8 type, u8 ver,
+				      void (*ogm)(struct batadv_priv *bat_priv,
+						  struct batadv_orig_node *orig,
+						  void *tvlv, u16 len),
+				      int (*uni)(struct batadv_priv *bat_priv,
+						 u8 *src, u8 *dst,
+						 void *tvlv, u16 len));
+void batadv_fbx_tvlv_handler_unregister(struct batadv_priv *bat_priv,
+					u8 type, u8 ver);
+bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tl,
+			     struct batadv_tt_global_entry *tg,
+			     int ifindex);
+bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+			     struct batadv_tt_local_entry *tt_local);
+bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt_global,
+			      struct batadv_orig_node *orig_node);
+void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+			      struct batadv_tt_global_entry *tt_global,
+			      struct batadv_orig_node *orig_node);
+bool batadv_fbx_shortcut(struct batadv_priv *bat_priv, u8 const *dest);
+bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+			     enum batadv_packettype type,
+			     struct sk_buff *skb);
+bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+			     struct sk_buff *skb,
+			     unsigned short vid);
+void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+			    struct batadv_orig_node *orig_node,
+			    struct batadv_neigh_node *neigh_node,
+			    struct batadv_ogm2_packet *ogm);
+int batadv_fbx_recv_handler_register(u8 packet_type,
+				     int (*hdl)(struct batadv_hard_iface *,
+						struct sk_buff *));
+void batadv_fbx_recv_handler_unregister(u8 packet_type);
+
+extern struct batadv_fbx_module const batadv_mtu_module;
+extern struct batadv_fbx_module const batadv_slap_module;
+extern struct batadv_fbx_module const batadv_router_module;
+
+#else
+
+struct genl_info;
+
+static inline int __init batadv_fbx_init(void)
+{
+	return 0;
+}
+
+static inline void __exit batadv_fbx_exit(void)
+{
+}
+
+static inline int batadv_fbx_new_priv(struct batadv_priv *bat_priv)
+{
+	return 0;
+}
+
+static inline void batadv_fbx_free_priv(struct batadv_priv *bat_priv)
+{
+}
+
+static inline void
+batadv_fbx_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+}
+
+static inline int
+batadv_fbx_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	return 0;
+}
+
+static inline void
+batadv_fbx_orig_release(struct batadv_orig_node *orig)
+{
+}
+
+static inline int
+batadv_fbx_orig_init(struct batadv_orig_node *orig)
+{
+	return 0;
+}
+
+static inline void
+batadv_fbx_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_info)
+{
+}
+
+static inline int
+batadv_fbx_orig_ifinfo_init(struct batadv_orig_ifinfo *orig_info)
+{
+	return 0;
+}
+
+static inline void batadv_fbx_hardif_update(struct batadv_hard_iface *hif)
+{
+}
+
+static inline void batadv_fbx_primary_update(struct batadv_priv *bat_priv,
+					     struct batadv_hard_iface *primary)
+{
+}
+
+static inline void batadv_fbx_nl(struct batadv_priv *bat_priv,
+				 enum batadv_nl_commands cmd,
+				 struct genl_info *info,
+				 struct sk_buff *skb,
+				 void *data)
+{
+}
+
+static inline bool batadv_fbx_tt_local_add(struct batadv_priv *bat_priv,
+					   struct batadv_tt_local_entry *tl,
+					   struct batadv_tt_global_entry *tg,
+					   int ifindex)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_tt_local_del(struct batadv_priv *bat_priv,
+					   struct batadv_tt_local_entry *tt)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_tt_global_add(struct batadv_priv *bat_priv,
+					    struct batadv_tt_global_entry *tt,
+					    struct batadv_orig_node *orig_node)
+{
+	return true;
+}
+
+static inline void batadv_fbx_tt_global_del(struct batadv_priv *bat_priv,
+					    struct batadv_tt_global_entry *tt,
+					    struct batadv_orig_node *orig_node)
+{
+}
+
+static inline bool batadv_fbx_check_skb_rx(struct batadv_priv *bat_priv,
+					   enum batadv_packettype type,
+					   struct sk_buff *skb)
+{
+	return true;
+}
+
+static inline bool batadv_fbx_check_skb_tx(struct batadv_priv *bat_priv,
+					   struct sk_buff *skb,
+					   unsigned short vid)
+{
+	return true;
+}
+
+static inline void batadv_fbx_ogm_process(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig_node,
+					  struct batadv_neigh_node *neigh_node,
+					  struct batadv_ogm2_packet *ogm)
+{
+}
+
+#endif
+
+#endif
diff -Nruw linux-6.13.12-fbx/net/batman-adv/fbx./mtu.c linux-6.13.12-fbx/net/batman-adv/fbx/mtu.c
--- linux-6.13.12-fbx/net/batman-adv/fbx./mtu.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/batman-adv/fbx/mtu.c	2025-09-25 17:40:37.655377580 +0200
@@ -0,0 +1,501 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#include "../main.h"
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/byteorder/generic.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/gfp.h>
+#include <linux/if_ether.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/lockdep.h>
+#include <linux/minmax.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/workqueue.h>
+#include <net/genetlink.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "../hard-interface.h"
+#include "../originator.h"
+#include "../send.h"
+#include "../log.h"
+#include "fbx.h"
+#include "mtu.h"
+
+#define BATADV_MTU_NB_FRAMES 3
+#define BATADV_MTU_RECV_DELAY 1000
+#define BATADV_MTU_INTERVAL_MIN 3000
+#define BATADV_MTU_INTERVAL_MAX 30000
+#define BATADV_MTU_DEF 1500
+
+static_assert(BATADV_MTU_RECV_DELAY < BATADV_MTU_INTERVAL_MIN);
+
+#define DELAY_MIN msecs_to_jiffies(BATADV_MTU_INTERVAL_MIN)
+#define DELAY_MAX msecs_to_jiffies(BATADV_MTU_INTERVAL_MAX)
+#define DELAY_RECV msecs_to_jiffies(BATADV_MTU_RECV_DELAY)
+
+/**
+ * batadv_mtu_send_probes - send a burst of MTU probe packets
+ * @neigh: the neighbor we sould send probes to
+ *
+ * This will only send a few probe packets for the maximum MTU the hard if
+ * supports (increasing the seqno in the process)
+ */
+static int batadv_mtu_send_probes(struct batadv_hardif_neigh_node *neigh)
+{
+	struct sk_buff *skb;
+	struct batadv_hard_iface *hard_if = neigh->if_incoming;
+	int mtu = hard_if->net_dev->mtu;
+	struct batadv_priv *bat_priv;
+	struct batadv_fbx_mtu_packet pkt = {
+		.hdr = {
+			.packet_type = BATADV_FBX,
+			.version = BATADV_COMPAT_VERSION,
+			.subtype = BATADV_FBX_SUB_MTU_PROBE,
+		},
+		.mtu = mtu,
+	};
+	size_t i;
+
+	if (!hard_if->soft_iface)
+		return 0;
+
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	for (i = 0; i < BATADV_MTU_NB_FRAMES; i++) {
+		pkt.hdr.seqno =
+			cpu_to_be16(atomic_inc_return(&bat_priv->mtu_seqno));
+		skb = alloc_skb(ETH_HLEN + NET_IP_ALIGN + mtu, GFP_KERNEL);
+		if (!skb)
+			return -ENOMEM;
+
+		skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+		skb_put_data(skb, &pkt, sizeof(pkt));
+		skb_put(skb, mtu - sizeof(pkt));
+		batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+	}
+
+	return 0;
+}
+
+/**
+ * batadv_mtu_work_to_neigh() - Get neighbor reference from MTU neighbor
+ * work.
+ *
+ * @mtud: work_struct associate with neighbor
+ * @return: NULL if Neighbor is currently being deleted, neighbor hardif
+ * pointer with incremented ref count otherwise
+ */
+static struct batadv_hardif_neigh_node *
+batadv_mtu_work_to_neigh(struct batadv_mtu *mtud)
+{
+	struct batadv_hardif_neigh_node *neigh;
+
+	rcu_read_lock();
+	neigh = rcu_dereference(mtud->neigh);
+	if (!neigh)
+		goto out;
+	if (!kref_get_unless_zero(&neigh->refcount))
+		neigh = NULL;
+out:
+	rcu_read_unlock();
+	return neigh;
+}
+
+/**
+ * batadv_mtu_process_periodic() - periodic resend of the probing frames
+ * @work: the delayed work struct
+ *
+ * This will :
+ *   - send a burst of probing frames
+ *   - schedule the next periodic run
+ *   - schedule the no response wq
+ *
+ */
+static void batadv_mtu_process_periodic(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	struct batadv_hard_iface *hard_if;
+	unsigned long delay;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, periodic_work);
+	neigh = batadv_mtu_work_to_neigh(mtud);
+	if (!neigh)
+		goto out;
+	hard_if = neigh->if_incoming;
+	if (!hard_if->soft_iface)
+		goto out;
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+		"MTU: %pM/%s - reprobing link MTU %d\n",
+		neigh->addr, netdev_name(hard_if->net_dev),
+		hard_if->net_dev->mtu);
+
+	/* send probes */
+	mod_delayed_work(batadv_event_workqueue, &mtud->recv_work, DELAY_RECV);
+	batadv_mtu_send_probes(neigh);
+
+	/* reschedule periodic */
+	delay = READ_ONCE(mtud->delay);
+	delay = clamp(delay * 2, DELAY_MIN, DELAY_MAX);
+	WRITE_ONCE(mtud->delay, delay);
+	mod_delayed_work(batadv_event_workqueue, &mtud->periodic_work, delay);
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_mtu_process_timeout() - ack delay of the probing frame
+ * @work: the delayed work struct
+ *
+ * If triggered this means we should:
+ *   - fall back to conservative mtu
+ *   - reschedule the periodic wq soonish
+ *
+ */
+static void batadv_mtu_process_timeout(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	struct batadv_hard_iface *hard_if;
+	int prev_mtu;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, recv_work);
+	neigh = batadv_mtu_work_to_neigh(mtud);
+	if (!neigh)
+		goto out;
+	hard_if = neigh->if_incoming;
+	if (!hard_if->soft_iface)
+		goto out;
+	bat_priv = netdev_priv(hard_if->soft_iface);
+
+	batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+		"MTU: %pM/%s - probing timeout for MTU %d\n",
+		neigh->addr, netdev_name(hard_if->net_dev),
+		hard_if->net_dev->mtu);
+
+	/* send probes */
+	prev_mtu = atomic_xchg(&mtud->mtu, BATADV_MTU_DEF);
+	if (prev_mtu != BATADV_MTU_DEF) {
+		batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+			"MTU: %pM/%s - downgrading MTU to %d\n",
+			neigh->addr, netdev_name(hard_if->net_dev),
+			BATADV_MTU_DEF);
+		/* reschedule periodic */
+		WRITE_ONCE(mtud->delay, DELAY_MIN);
+		mod_delayed_work(batadv_event_workqueue,
+				 &mtud->periodic_work,
+				 DELAY_MIN);
+	}
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_mtu_do_neigh_release() - Release neighbor related data
+ */
+static void batadv_mtu_do_neigh_release(struct work_struct *work)
+{
+	struct delayed_work *delayed_work;
+	struct batadv_mtu *mtud;
+
+	delayed_work = to_delayed_work(work);
+	mtud = container_of(delayed_work, struct batadv_mtu, release_work);
+
+	cancel_delayed_work_sync(&mtud->periodic_work);
+	cancel_delayed_work_sync(&mtud->recv_work);
+	kfree(mtud);
+}
+
+/**
+ * Checks a skb is a valid MTU probe/resp packet
+ * @skb: the ethernet pkt
+ */
+static int batadv_mtu_skb_check(struct sk_buff *skb)
+{
+	struct ethhdr *ethhdr = eth_hdr(skb);
+	struct batadv_fbx_mtu_packet *pkt;
+
+	pkt = (struct batadv_fbx_mtu_packet*)(ethhdr + 1);
+
+	/* handle packet */
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_MTU_HLEN)))
+		return -EINVAL;
+	if (is_broadcast_ether_addr(ethhdr->h_dest))
+		return -EINVAL;
+	if (!is_valid_ether_addr(ethhdr->h_source))
+		return -EINVAL;
+
+	return 0;
+}
+
+static const char * subtype_to_str[] = {
+	[BATADV_FBX_SUB_MTU_PROBE] = "probe",
+	[BATADV_FBX_SUB_MTU_RESP] = "resp",
+};
+
+/**
+ * batadv_recv_mtu_packet() - receive a MTU probe packet
+ * @iface: the hard interface we received the skb on
+ * @skb: probe packet received
+ *
+ * This will process a probe request or a probe response.
+ * Either sending a responce or adjusting the mtu if needed.
+ * If the MTU gets upgraded, we reschedule the periodic work
+ *
+ */
+static int batadv_recv_mtu_packet(struct batadv_hard_iface *iface,
+				  struct sk_buff *skb)
+{
+	struct batadv_priv *bat_priv;
+	struct ethhdr *ethhdr;
+	struct batadv_fbx_mtu_packet *pkt;
+	struct batadv_hardif_neigh_node *neigh = NULL;
+	u8 dst[ETH_ALEN];
+	int ret = NET_RX_DROP, rc;
+
+	if (!iface->soft_iface)
+		goto free_skb;
+	bat_priv = netdev_priv(iface->soft_iface);
+
+	skb = skb_unshare(skb, GFP_ATOMIC);
+	if (!skb)
+		goto free_skb;
+
+	ethhdr = eth_hdr(skb);
+	pkt = (struct batadv_fbx_mtu_packet *)(ethhdr + 1);
+
+	if (batadv_mtu_skb_check(skb) < 0)
+		goto free_skb;
+
+	batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+		"MTU: %pM/%s: received MTU %s packet\n",
+		ethhdr->h_source, netdev_name(iface->net_dev),
+		subtype_to_str[pkt->hdr.subtype]);
+
+	switch(pkt->hdr.subtype) {
+	case BATADV_FBX_SUB_MTU_PROBE:
+		ether_addr_copy(dst, ethhdr->h_source);
+
+		/* convert probe packet in responce packet */
+		pkt->hdr.subtype = BATADV_FBX_SUB_MTU_RESP;
+
+		/* trim to new size */
+		if (skb_linearize(skb) < 0)
+			goto free_skb;
+		skb_trim(skb, sizeof(*pkt));
+
+		/* send it back to owner */
+		batadv_send_skb_packet(skb, iface, dst);
+		break;
+	case BATADV_FBX_SUB_MTU_RESP:
+		/* get neigh */
+		neigh = batadv_hardif_neigh_get(iface, ethhdr->h_source);
+		if (!neigh) {
+			pr_warn("batadv: MTU: %pM - unknown neigh",
+				ethhdr->h_source);
+			goto free_skb;
+		}
+
+		/* increase counters */
+		if (pkt->mtu != iface->net_dev->mtu){
+			pr_warn("batadv: %pM - bad mtu %d",
+				ethhdr->h_source, pkt->mtu);
+			goto free_skb;
+		}
+
+		/* use this mtu and store ack time */
+		cancel_delayed_work(&neigh->mtud->recv_work);
+		rc = atomic_xchg(&neigh->mtud->mtu, iface->net_dev->mtu);
+		if (rc != iface->net_dev->mtu) {
+			batadv_dbg(BATADV_DBG_ROUTES, bat_priv,
+				"MTU: %pM/%s: upgrading MTU to %d\n",
+				ethhdr->h_source, netdev_name(iface->net_dev),
+				iface->net_dev->mtu);
+			WRITE_ONCE(neigh->mtud->delay, DELAY_MIN);
+			mod_delayed_work(batadv_event_workqueue,
+					 &neigh->mtud->periodic_work,
+					 DELAY_MIN);
+		}
+
+		consume_skb(skb);
+		break;
+	default:
+		pr_warn_ratelimited("batadv: MTU: %pM - unknown subtype: %d\n",
+				ethhdr->h_source, pkt->hdr.subtype);
+		goto free_skb;
+	}
+
+	ret = NET_RX_SUCCESS;
+
+free_skb:
+	if (ret == NET_RX_DROP)
+		kfree_skb(skb);
+	if (neigh)
+		batadv_hardif_neigh_put(neigh);
+	return ret;
+}
+
+/**
+ * batadv_mtu_neigh_release: unschedules the periodic & recv wq
+ * @neigh: the neighbor being freed
+ */
+static void batadv_mtu_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	rcu_assign_pointer(neigh->mtud->neigh, NULL);
+	mod_delayed_work(batadv_event_workqueue, &neigh->mtud->release_work, 0);
+}
+
+/**
+ * batadv_mtu_neigh_init: init a neighbor for mtu check
+ *
+ * @neigh: the neighbor being initialized
+ *
+ * - init the periodic & recv wq
+ * - sets the default MTU
+ * - schedules the periodic MTU check if needed
+ */
+static int batadv_mtu_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_hard_iface *hard_if = neigh->if_incoming;
+	struct batadv_mtu *mtud;
+	int hard_mtu;
+
+	mtud = kmalloc(sizeof(*mtud), GFP_ATOMIC);
+	if (!mtud)
+		return -ENOMEM;
+
+	rcu_assign_pointer(mtud->neigh, neigh);
+	hard_mtu = hard_if->net_dev->mtu;
+	INIT_DELAYED_WORK(&mtud->periodic_work, batadv_mtu_process_periodic);
+	INIT_DELAYED_WORK(&mtud->recv_work, batadv_mtu_process_timeout);
+	INIT_DELAYED_WORK(&mtud->release_work, batadv_mtu_do_neigh_release);
+	atomic_set(&mtud->mtu, hard_mtu);
+	mtud->delay = 0;
+
+	if (!batadv_is_wifi_hardif(hard_if) && hard_mtu > BATADV_MTU_DEF) {
+		atomic_set(&mtud->mtu, BATADV_MTU_DEF);
+		mod_delayed_work(batadv_event_workqueue,
+				 &mtud->periodic_work, 0);
+	}
+
+	neigh->mtud = mtud;
+	return 0;
+}
+
+/**
+ * batadv_mtu_hardif_update() - update mtu of hardif
+ *
+ * This will cause all neighs to renegotiate their MTU if they are on an
+ * ethernet link with a big MTU
+ */
+static void batadv_mtu_hardif_update(struct batadv_hard_iface *iface)
+{
+	struct batadv_hardif_neigh_node *hardif_neigh;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(hardif_neigh, &iface->neigh_list, list) {
+		batadv_mtu_neigh_release(hardif_neigh);
+		batadv_mtu_neigh_init(hardif_neigh);
+	}
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_mtu_neigh_dump() - Dump MTU specific information for a specific
+ * neighbour
+ * @bat_priv: The bat priv with all the soft interface information
+ * @info: NL message info (not used here)
+ * @attr: NL message attributes (not used here)
+ * @skb: Current originator NL message
+ * @data: Here this is the neighbour being dumped
+ */
+static void batadv_mtu_neigh_dump(struct batadv_priv *bat_priv,
+				  struct genl_info *info,
+				  struct nlattr **attr,
+				  struct sk_buff *skb,
+				  void *data)
+{
+	struct batadv_hardif_neigh_node *n = data;
+
+	if (!skb)
+		return;
+
+	nla_put_u32(skb, BATADV_ATTR_FBX_MTU, atomic_read(&n->mtud->mtu));
+}
+
+/**
+ * batadv_mtu_new_priv: init MTU data for a bat_priv
+ * @bat_priv: the bat_priv to init
+ *
+ * - inits the MTU packet seqno
+ */
+static int batadv_mtu_new_priv(struct batadv_priv *bat_priv)
+{
+	atomic_set(&bat_priv->mtu_seqno, 0);
+	return 0;
+}
+
+/**
+ * batadv_mtu_init: init FBX MTU module
+ */
+static int __init batadv_mtu_init(void)
+{
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_mtu_packet) != 10);
+
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_MTU_PROBE,
+					 batadv_recv_mtu_packet);
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_MTU_RESP,
+					 batadv_recv_mtu_packet);
+	return 0;
+}
+
+/**
+ * batadv_mtu_exit: Exit FBX MTU module
+ */
+static void __exit batadv_mtu_exit(void)
+{
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_MTU_PROBE);
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_MTU_RESP);
+}
+
+struct batadv_fbx_module_ops const batadv_mtu_module_ops = {
+	.init = batadv_mtu_init,
+	.exit = batadv_mtu_exit,
+	.new_priv = batadv_mtu_new_priv,
+	.hardif_update = batadv_mtu_hardif_update,
+	.neigh_init = batadv_mtu_neigh_init,
+	.neigh_release = batadv_mtu_neigh_release,
+};
+
+struct batadv_fbx_nl_ops const batadv_mtu_nl_ops[] = {
+	{
+		.cmd = BATADV_CMD_GET_NEIGHBORS,
+		.hdl = batadv_mtu_neigh_dump,
+	},
+};
+
+struct batadv_fbx_module const batadv_mtu_module = {
+	.name = "mtu",
+	.ops = &batadv_mtu_module_ops,
+	.nl_ops = batadv_mtu_nl_ops,
+	.nl_ops_sz = ARRAY_SIZE(batadv_mtu_nl_ops),
+};
diff -Nruw linux-6.13.12-fbx/net/batman-adv/fbx./mtu.h linux-6.13.12-fbx/net/batman-adv/fbx/mtu.h
--- linux-6.13.12-fbx/net/batman-adv/fbx./mtu.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/batman-adv/fbx/mtu.h	2025-09-25 17:40:37.655377580 +0200
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Nicolas Escande <nico.escande@gmail.com>
+ */
+
+#ifndef _NET_BATMAN_ADV_FBX_MTU_H_
+#define _NET_BATMAN_ADV_FBX_MTU_H_
+
+#ifdef CONFIG_BATMAN_ADV_FBX_MTU
+
+#include <linux/skbuff.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+
+#include "../main.h"
+
+/**
+ * batadv_mtu_get_for_neigh() - get the MTU to use for this neigh
+ *
+ * This functions returns a MTU to use when talking to a given neighbor
+ *
+ * returns: the mtu
+ */
+static inline int batadv_mtu_get_for_neigh(struct batadv_hardif_neigh_node *n)
+{
+	return atomic_read(&n->mtud->mtu);
+}
+#else
+static inline int batadv_mtu_get_for_neigh(struct batadv_hardif_neigh_node *n)
+{
+	return n->if_incoming->net_dev->mtu;
+}
+#endif
+
+#endif /* _NET_BATMAN_ADV_MTU_H_ */
diff -Nruw linux-6.13.12-fbx/net/batman-adv/fbx./slap.c linux-6.13.12-fbx/net/batman-adv/fbx/slap.c
--- linux-6.13.12-fbx/net/batman-adv/fbx./slap.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/batman-adv/fbx/slap.c	2025-09-29 14:23:07.621732489 +0200
@@ -0,0 +1,1463 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) Remi Pommarel <rpommarel@freebox.fr>
+ */
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/llc_pdu.h>
+#include <linux/llc.h>
+#include <uapi/linux/batadv_packet.h>
+
+#include "../main.h"
+#include "../hard-interface.h"
+#include "../send.h"
+#include "../originator.h"
+#include "../netlink.h"
+#include "../translation-table.h"
+#include "fbx.h"
+
+#define SLAP_MASTER_ANNOUNCE_RATE 500 /* 500 ms */
+#define SLAP_MASTER_EXPIRE (2 * SLAP_MASTER_ANNOUNCE_RATE + 100)
+
+#define SLAP_PRIO_DEFAULT (U32_MAX >> 1)
+
+#define slap_dereference_check(b, p)					\
+	rcu_dereference_check(p, lockdep_is_held(&(b)->slap_lock))
+
+#define slap_id_get_rcu(b) slap_dereference_check(b, (b)->slap_id)
+#define slap_master_get_rcu(b) slap_dereference_check(b, (b)->slap_master)
+
+#define slap_printk(lvl, p, fmt, args...)				\
+	pr_ ## lvl("%s: " fmt, dev_name(&(p)->soft_iface->dev), ##args)
+#define slap_debug_ratelimited(p, fmt, args...)				\
+	slap_printk(debug_ratelimited, p , fmt, ##args)
+#define slap_debug(p, fmt, args...) slap_printk(debug, p , fmt, ##args)
+#define slap_info(p, fmt, args...) slap_printk(info, p , fmt, ##args)
+#define slap_err(p, fmt, args...) slap_printk(err, p , fmt, ##args)
+
+#define to_ns(w, f)							\
+	container_of(w, struct batadv_hardif_neigh_slap, f.work)
+
+#define to_slap_id(w)							\
+	container_of(w, struct batadv_slap_id, expire.work)
+
+#pragma pack(2)
+
+/**
+ * batadv_slap_tvlv_master - FBX TVLV packet used to propagate our current
+ * master to all nodes through OGM
+ */
+struct batadv_slap_tvlv_master {
+	__u8 addr[ETH_ALEN];
+};
+
+#pragma pack()
+
+/**
+ * Compare SLAP ID with a prio and addr
+ * @id: SLAP ID to compare
+ * @prio: SLAP ID priority to compare id with
+ * @addr: SLAP ID address to compare id with
+ * @return: If id is lower than, equals to, higher than prio and addr returns
+ *          negative number, zero, positive number respectively
+ */
+static int batadv_slap_id_cmp(struct batadv_slap_id const *id1,
+			      u32 prio, u8 const *addr)
+{
+	if (id1->prio < prio)
+		return -1;
+
+	if (id1->prio > prio)
+		return 1;
+
+	return memcmp(id1->addr, addr, ETH_ALEN);
+}
+
+/**
+ * Is the bat soft interface currently master
+ * Should be called either with rcu_read_lock or bat_priv->slap_lock held
+ */
+static bool batadv_slap_is_master(struct batadv_priv const *bat_priv)
+{
+	return slap_id_get_rcu(bat_priv) == slap_master_get_rcu(bat_priv);
+}
+
+/**
+ * Check if SLAP ID could be new SLAP master
+ * Should be called with rcu_read_lock or bat_priv->slap_lock held
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID of the potential master
+ * @return: True if id could be the segment master, false otherwise
+ */
+static bool batadv_slap_id_is_new_master(struct batadv_priv *bat_priv,
+					 struct batadv_slap_id const *id)
+{
+	struct batadv_slap_id *master;
+
+	master = slap_master_get_rcu(bat_priv);
+	return (batadv_slap_id_cmp(id, master->prio, master->addr) < 0);
+}
+
+/**
+ * Queue SLAP ID to be freed after RCU grace period
+ *
+ * @ref: kref pointer of the SLAP ID to free
+ */
+static void batadv_slap_id_free_rcu(struct kref *ref)
+{
+	struct batadv_slap_id *id;
+
+	id = container_of(ref, struct batadv_slap_id, refcount);
+	kfree_rcu(id, rcu);
+}
+
+
+/**
+ * Increase SLAP ID refcount
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @return: True if SLAP ID refcount was successfully incremented, 0 otherwise
+ */
+static bool batadv_slap_id_get(struct batadv_slap_id *id)
+{
+	return kref_get_unless_zero(&id->refcount) != 0;
+}
+
+/**
+ * Release reference on a SLAP ID, potentially freeing it
+ *
+ * @id: ID to release reference on
+ */
+static void batadv_slap_id_put(struct batadv_slap_id *id)
+{
+	kref_put(&id->refcount, batadv_slap_id_free_rcu);
+}
+
+static void _batadv_slap_set_master(struct batadv_priv *bat_priv,
+				    struct batadv_slap_id *id);
+
+/**
+ * Hit a current SLAP ID (prevent its expiration)
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to keep alive
+ */
+static void batadv_slap_id_hit(struct batadv_priv *bat_priv,
+			       struct batadv_slap_id *id)
+{
+	/*
+	 * WRITE_ONCE/READ_ONCE used to avoid load/store tearing, see
+	 * https://lwn.net/Articles/793253/
+	 */
+	WRITE_ONCE(id->exp_time,
+		   jiffies + msecs_to_jiffies(SLAP_MASTER_EXPIRE));
+}
+
+/**
+ * Schedule SLAP ID expiration delayed work
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to schedule expiration delayed work for
+ * @return: False if expiration work has been queued, True if it as only been
+ * rescheduled
+ */
+static bool batadv_slap_id_schedule_expire(struct batadv_priv *bat_priv,
+					   struct batadv_slap_id *id)
+{
+	return mod_delayed_work(bat_priv->slap_wq, &id->expire,
+				READ_ONCE(id->exp_time) - jiffies);
+}
+
+/**
+ * Start SLAP ID expiration routine
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to set expiration deadline for
+ */
+static void batadv_slap_id_start_expire(struct batadv_priv *bat_priv,
+					struct batadv_slap_id *id)
+{
+	bool ret;
+
+	batadv_slap_id_hit(bat_priv, id);
+	ret = batadv_slap_id_schedule_expire(bat_priv, id);
+	if (!ret)
+		batadv_slap_id_get(id);
+}
+
+/**
+ * Force SLAP ID expiration routine
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: SLAP ID to set expiration deadline for
+ */
+static void batadv_slap_id_force_expire(struct batadv_priv *bat_priv,
+					struct batadv_slap_id *id)
+{
+	bool ret;
+
+	/* If already expired do nothing */
+	if (!batadv_slap_id_get(id))
+		return;
+
+	/*
+	 * At this point we know we have a reference on ID, so it is safe to
+	 * re-schedule the expiration delayed work here. We will drop the ref if
+	 * the expiration delayed work was only re-scheduled (not queued).
+	 * This is a bit convoluted, but this way the SLAP ID will very be
+	 * likely deleted by the expiration timer outside of bat_priv->slap_lock
+	 * critical section and only very rarely by this function which will
+	 * likely be called while holding bat_priv->slap_lock.
+	 */
+
+	/* Force expiration now */
+	WRITE_ONCE(id->exp_time, jiffies);
+	ret = batadv_slap_id_schedule_expire(bat_priv, id);
+	if (ret)
+		batadv_slap_id_put(id);
+}
+
+/**
+ * Expire a neighbor SLAP ID, if it was master demote it first
+ * Takes bat->slap_lock
+ *
+ * @work: work_struct associate with neighbor
+ * @return: 0 on success, negative number otherwise
+ */
+static void batadv_slap_id_expire(struct work_struct *work)
+{
+	struct batadv_slap_id *id = to_slap_id(work);
+	struct batadv_priv *bat_priv = id->bat_priv;
+
+	/* If expire fire too soon let's rearm it */
+	if (time_before(jiffies, READ_ONCE(id->exp_time))) {
+		batadv_slap_id_schedule_expire(bat_priv, id);
+		return;
+	}
+
+	slap_debug(bat_priv, "Expiring SLAP ID %u/%pM\n", id->prio, id->addr);
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	if (id == slap_master_get_rcu(bat_priv))
+		_batadv_slap_set_master(bat_priv, slap_id_get_rcu(bat_priv));
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(id);
+}
+
+/**
+ * Init a new SLAP ID
+ * @id: SLAP ID to initialize
+ * @bat_priv: bat_priv with all soft interface information
+ * @prio: Prio of the new SLAP ID
+ * @addr: Address of the new SLAP ID
+ * @return: New SLAP ID on success, NULL pointer otherwise
+ */
+static void batadv_slap_id_init(struct batadv_slap_id *id,
+				struct batadv_priv *bat_priv,
+				u32 prio,
+				u8 const *addr)
+{
+	kref_init(&id->refcount);
+	id->bat_priv = bat_priv;
+	id->prio = prio;
+	memcpy(id->addr, addr, sizeof(id->addr));
+	INIT_DELAYED_WORK(&id->expire, batadv_slap_id_expire);
+}
+
+/**
+ * Send or Resend our SLAP ID to a specific neighbor if it is not up to date
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @neigh: Neighbor to send our SLAP ID to
+ * @return: 0 on success, negative number otherwise
+ */
+static int batadv_slap_send_id(struct batadv_priv *bat_priv,
+			       struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_fbx_slap_packet *pkt;
+	struct batadv_slap_id *id;
+	struct sk_buff *skb;
+	int ret = -1;
+	u32 prio;
+
+	rcu_read_lock();
+	id = slap_id_get_rcu(bat_priv);
+	prio = id->prio;
+	skb = skb_copy(rcu_dereference(bat_priv->slap_skb), GFP_ATOMIC);
+	rcu_read_unlock();
+
+	if (!skb)
+		goto out;
+
+	pkt = (struct batadv_fbx_slap_packet *)skb->data;
+	pkt->prio = htonl(prio);
+	batadv_send_skb_packet(skb, neigh->if_incoming, neigh->addr);
+
+	slap_debug_ratelimited(bat_priv, "Sending SLAP Prio %u to %pM\n",
+			       prio, neigh->orig);
+	ret = 0;
+out:
+	return ret;
+}
+
+/**
+ * batadv_slap_get_priv() - Get bat_priv from soft interface
+ *
+ * @soft_iface: batman interface to get bat_priv from
+ * @return: priv on success, NULL otherwise
+ */
+static struct batadv_priv *batadv_slap_get_priv(struct net_device *soft_iface)
+{
+	if (!soft_iface)
+		return NULL;
+	return netdev_priv(soft_iface);
+}
+
+/**
+ * batadv_slap_work_to_neigh() - Get neighbor reference from SLAP neighbor
+ * work.
+ *
+ * @work: work_struct associate with neighbor
+ * @return: NULL if Neighbor is currently being deleted, neighbor hardif
+ * pointer with incremented ref count otherwise
+ */
+static struct batadv_hardif_neigh_node *
+batadv_slap_work_to_neigh(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_slap *ns = to_ns(work, announce);
+	struct batadv_hardif_neigh_node *neigh;
+
+	rcu_read_lock();
+	neigh = rcu_dereference(ns->neigh);
+	if (!neigh)
+		goto out;
+	if (!kref_get_unless_zero(&neigh->refcount))
+		neigh = NULL;
+out:
+	rcu_read_unlock();
+	return neigh;
+}
+
+/**
+ * Announce work that recurrently sends SLAP ID to a specific neighbor while
+ * this originator is SLAP master
+ * Takes RCU read lock
+ *
+ * @work: work_struct associate with neighbor
+ */
+static void batadv_slap_do_announce(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_priv *bat_priv;
+	bool slap, master;
+
+	neigh = batadv_slap_work_to_neigh(work);
+	/* Neighbor is being delete */
+	if (!neigh)
+		goto out;
+
+	bat_priv = batadv_slap_get_priv(neigh->if_incoming->soft_iface);
+	if (!bat_priv)
+		goto out;
+
+	rcu_read_lock();
+	slap = rcu_dereference(bat_priv->slap_iface) == neigh->if_incoming;
+	master = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	/* Only current SLAP master should announce itself */
+	if (!slap || !master)
+		goto out;
+
+	batadv_slap_send_id(bat_priv, neigh);
+
+	mod_delayed_work(bat_priv->slap_wq, &neigh->slap->announce,
+			 msecs_to_jiffies(SLAP_MASTER_ANNOUNCE_RATE));
+out:
+	batadv_hardif_neigh_put(neigh);
+}
+
+/**
+ * batadv_slap_do_neigh_release() - Work that effectively clean SLAP neighbor
+ * data.
+ *
+ * @work: work_struct associate with neighbor
+ */
+static void batadv_slap_do_neigh_release(struct work_struct *work)
+{
+	struct batadv_hardif_neigh_slap *ns = to_ns(work, release);
+
+	cancel_delayed_work_sync(&ns->announce);
+	kfree(ns);
+}
+
+/**
+ * Start announce for all neighbor, used when originator just get elected SLAP
+ * master
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ */
+static void batadv_slap_start_announce(struct batadv_priv *bat_priv)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_hard_iface *slap_iface;
+
+	rcu_read_lock();
+	slap_iface = rcu_dereference(bat_priv->slap_iface);
+	if (slap_iface == NULL)
+		goto out;
+	hlist_for_each_entry_rcu(neigh, &slap_iface->neigh_list, list)
+		mod_delayed_work(bat_priv->slap_wq, &neigh->slap->announce, 0);
+out:
+	rcu_read_unlock();
+}
+
+/**
+ * Stop announce for all neighbor, used when originator just get demoted as
+ * SLAP master
+ * Takes RCU read lock
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ */
+static void batadv_slap_stop_announce(struct batadv_priv *bat_priv)
+{
+	struct batadv_hardif_neigh_node *neigh;
+	struct batadv_hard_iface *slap_iface;
+
+	rcu_read_lock();
+	slap_iface = rcu_dereference(bat_priv->slap_iface);
+	if (slap_iface == NULL)
+		goto out;
+	hlist_for_each_entry_rcu(neigh, &slap_iface->neigh_list, list)
+		cancel_delayed_work(&neigh->slap->announce);
+out:
+	rcu_read_unlock();
+}
+
+/**
+ * Update current SLAP master
+ * Needs bat_priv->slap_lock to be held
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: New master ID
+ */
+static void _batadv_slap_set_master(struct batadv_priv *bat_priv,
+				    struct batadv_slap_id *id)
+{
+	struct batadv_slap_tvlv_master tvlv;
+	struct batadv_slap_id *old_master;
+
+	lockdep_assert_held(&bat_priv->slap_lock);
+
+	old_master = slap_master_get_rcu(bat_priv);
+	rcu_assign_pointer(bat_priv->slap_master, id);
+	slap_info(bat_priv, "New SLAP master %u/%pM\n",
+		  id->prio, id->addr);
+
+	if (old_master == slap_id_get_rcu(bat_priv))
+		batadv_slap_stop_announce(bat_priv);
+	if (batadv_slap_is_master(bat_priv))
+		batadv_slap_start_announce(bat_priv);
+
+	ether_addr_copy(tvlv.addr, id->addr);
+	batadv_fbx_tvlv_container_register(bat_priv,
+					   BATADV_FBX_TVLV_SLAP_MASTER,
+					   BATADV_FBX_TVLV_SLAP_VERSION,
+					   &tvlv, sizeof(tvlv));
+
+	batadv_slap_id_force_expire(bat_priv, old_master);
+}
+
+/**
+ * Set new SLAP segment master
+ * Because master candidate has been tested only under rcu protection, it needs
+ * to be rechecked under lock, if it is still a good candidate then it is
+ * elected the segment master
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @id: new master candidate SLAP ID
+ * @return: 0 if candidate is now the SLAP master, negative number otherwise
+ */
+static int batadv_slap_set_master(struct batadv_priv *bat_priv,
+				  struct batadv_slap_id *id)
+{
+	int ret = -1;
+
+	lockdep_assert_in_softirq();
+
+	spin_lock(&bat_priv->slap_lock);
+
+	/* TODO check for interface to be SLAP interface */
+	if (!batadv_slap_id_is_new_master(bat_priv, id))
+		goto unlock;
+
+	_batadv_slap_set_master(bat_priv, id);
+
+	ret = 0;
+unlock:
+	spin_unlock(&bat_priv->slap_lock);
+	return ret;
+}
+
+/**
+ * Alloc and try to set new neighbor master, if current master is already this
+ * very neighbor just hit it
+ *
+ * @bat_priv: bat_priv with all soft interface information
+ * @prio: Neighbor SLAP ID priority
+ * @addr: Neigh SLAP ID address
+ */
+static void batadv_slap_recv_neigh_id(struct batadv_priv *bat_priv,
+				      u32 prio, u8 const *addr)
+{
+	struct batadv_slap_id *id;
+	int ret;
+
+	/*
+	 * First try to fastpath test if neighbor is new master, only false
+	 * positive can happen here
+	 */
+	rcu_read_lock();
+	id = slap_master_get_rcu(bat_priv);
+	ret = batadv_slap_id_cmp(id, prio, addr);
+	/* Neighbor is already master, just hit it */
+	if (ret == 0)
+		batadv_slap_id_hit(bat_priv, id);
+	rcu_read_unlock();
+
+	if (ret <= 0)
+		return;
+
+	id = kmalloc(sizeof(*id), GFP_ATOMIC);
+	if (!id)
+		return;
+
+	batadv_slap_id_init(id, bat_priv, prio, addr);
+	ret = batadv_slap_set_master(bat_priv, id);
+	if (ret < 0) {
+		kfree(id);
+		return;
+	}
+
+	batadv_slap_id_start_expire(bat_priv, id);
+	batadv_slap_id_put(id);
+}
+
+/**
+ * Process a SLAP ID packet
+ *
+ * @iface: SLAP ID packet received interfaces
+ * @skb: SLAP ID packet
+ *
+ * @return: NET_RX_SUCCESS on success, NET_RX_DROP otherwise.
+ */
+static int batadv_slap_recv_packet(struct batadv_hard_iface *iface,
+				   struct sk_buff *skb)
+{
+	struct batadv_priv *bat_priv = batadv_slap_get_priv(iface->soft_iface);
+	struct ethhdr *ethhdr = (struct ethhdr *)skb_mac_header(skb);
+	struct batadv_hardif_neigh_node *neigh = NULL;
+	struct batadv_fbx_slap_packet *pkt;
+	bool reply = false;
+	u32 prio;
+
+	if (!bat_priv) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	if (unlikely(!pskb_may_pull(skb, BATADV_FBX_SLAP_HLEN))) {
+		kfree_skb(skb);
+		return NET_RX_DROP;
+	}
+
+	pkt = (struct batadv_fbx_slap_packet *)skb->data;
+	prio = ntohl(pkt->prio);
+
+	neigh = batadv_hardif_neigh_get(iface, ethhdr->h_source);
+	if (!neigh)
+		goto exit;
+
+	slap_debug_ratelimited(bat_priv,
+			       "Receive SLAP pkt from neighbor %pM\n",
+			       neigh->orig);
+
+	batadv_slap_recv_neigh_id(bat_priv, prio, neigh->orig);
+
+	rcu_read_lock();
+	reply = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+	if (!reply)
+		goto exit;
+
+	batadv_slap_send_id(bat_priv, neigh);
+
+exit:
+	batadv_hardif_neigh_put(neigh);
+	consume_skb(skb);
+	return NET_RX_SUCCESS;
+}
+
+/**
+ * Neighbor disappeared stop announcing we are master to it
+ */
+static void batadv_slap_neigh_release(struct batadv_hardif_neigh_node *neigh)
+{
+	rcu_assign_pointer(neigh->slap->neigh, NULL);
+	mod_delayed_work(batadv_event_workqueue, &neigh->slap->release, 0);
+}
+
+/**
+ * New neighbor discovered, start announcing we are master to it if it is the
+ * case
+ */
+static int batadv_slap_neigh_init(struct batadv_hardif_neigh_node *neigh)
+{
+	struct batadv_hardif_neigh_slap *ns;
+	struct batadv_priv *bat_priv;
+
+	ns = kmalloc(sizeof(*ns), GFP_ATOMIC);
+	if (!ns)
+		return -ENOMEM;
+
+	bat_priv = batadv_slap_get_priv(neigh->if_incoming->soft_iface);
+	if (!bat_priv)
+		return -EINVAL;
+
+	rcu_assign_pointer(ns->neigh, neigh);
+	INIT_DELAYED_WORK(&ns->announce, batadv_slap_do_announce);
+	INIT_DELAYED_WORK(&ns->release, batadv_slap_do_neigh_release);
+	mod_delayed_work(bat_priv->slap_wq, &ns->announce, 0);
+	neigh->slap = ns;
+
+	return 0;
+}
+
+static void batadv_slap_orig_release(struct batadv_orig_node *node)
+{
+	kfree_rcu(node->slap_segid, rcu);
+}
+
+/**
+ * batadv_slap_orig_init() - Init SLAP specific bit in new originator node
+ *
+ * @node: Originator node to init
+ * @return: 0 on success, negative number otherwise
+ */
+static int batadv_slap_orig_init(struct batadv_orig_node *node)
+{
+	struct batadv_slap_segid *id;
+
+	spin_lock_init(&node->slap_lock);
+	id = kmalloc(sizeof(*id), GFP_ATOMIC);
+	if (!id)
+		return -ENOMEM;
+	ether_addr_copy(id->addr, node->orig);
+	rcu_assign_pointer(node->slap_segid, id);
+	return 0;
+}
+
+/**
+ * Update primary interface callback
+ */
+static void batadv_slap_primary_update(struct batadv_priv *bat_priv,
+				       struct batadv_hard_iface *primary)
+{
+	const u8 *addr = primary->net_dev->dev_addr;
+	struct batadv_slap_id *oid, *id;
+	bool cur_master;
+
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (!id)
+		return;
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	cur_master = batadv_slap_is_master(bat_priv);
+	oid = slap_id_get_rcu(bat_priv);
+	batadv_slap_id_init(id, bat_priv, oid->prio, addr);
+	slap_debug(bat_priv, "New SLAP ID %u/%pM\n", id->prio, id->addr);
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	if (cur_master || batadv_slap_id_is_new_master(bat_priv, id))
+		_batadv_slap_set_master(bat_priv, id);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(oid);
+}
+
+/**
+ * batadv_slap_ogm_master_recv() - Receive SLAP master OGM TVLV
+ * @bat_priv: the bat priv with all the soft interface information
+ * @orig: Originator sending the TVLV
+ * @tvlv: TVLV data
+ * @len: TVLV data length
+ */
+static void batadv_slap_ogm_master_recv(struct batadv_priv *bat_priv,
+					struct batadv_orig_node *orig,
+					void *tvlv, u16 len)
+{
+	struct batadv_slap_tvlv_master *tvlv_master;
+	struct batadv_slap_segid *new, *old;
+	bool update;
+
+	lockdep_assert_in_softirq();
+
+	if (len < sizeof(*tvlv_master))
+		return;
+
+	tvlv_master = tvlv;
+
+	/* Quick test if master changed */
+	rcu_read_lock();
+	old = rcu_dereference(orig->slap_segid);
+	update = !ether_addr_equal(old->addr, tvlv_master->addr);
+	rcu_read_unlock();
+
+	if (!update)
+		return;
+
+	new = kmalloc(sizeof(*new), GFP_ATOMIC);
+	if (!new)
+		return;
+
+	ether_addr_copy(new->addr, tvlv_master->addr);
+
+	spin_lock(&orig->slap_lock);
+	old = rcu_replace_pointer(orig->slap_segid, new,
+				  lockdep_is_held(&orig->slap_lock));
+	spin_unlock(&orig->slap_lock);
+	kfree_rcu(old, rcu);
+}
+
+#define BATADV_SLAP_STP_GUARD_JIFFIES msecs_to_jiffies(40000) /* 40 secs */
+
+/**
+ * batadv_slap_stp_guard - Should STP packet be dropped because STP guard
+ * interval not finished yet
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @skb: outgoing skb packet
+ * @return: If packet is STP and we still are in STP guard interval return true,
+ * flase otherwise
+ */
+static bool batadv_slap_stp_guard(struct batadv_priv *bat_priv,
+				  struct sk_buff *skb)
+{
+	static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00,
+					      0x00, 0x00};
+	struct llc_pdu_un *pdu = llc_pdu_un_hdr(skb);
+	struct ethhdr *ethhdr;
+	unsigned long guard_stop;
+
+	if (!pskb_may_pull(skb, sizeof(*ethhdr) + sizeof(*pdu)))
+		return false;
+
+	ethhdr = eth_hdr(skb);
+	pdu = (struct llc_pdu_un *)(ethhdr + 1);
+
+	if (eth_proto_is_802_3(ethhdr->h_proto))
+		return false;
+
+	/* don't accept stp packets directly. STP does not help in meshes.
+	 * better use the bridge loop avoidance .... But as some devices uses
+	 * that, such as SONOS ones, if BLA is not enabled let those packets
+	 * going and print debug once.
+	 */
+	if (pdu->ssap != LLC_SAP_BSPAN ||
+	    pdu->dsap != LLC_SAP_BSPAN ||
+	    pdu->ctrl_1 != LLC_PDU_TYPE_U)
+		return false;
+
+	if(!batadv_compare_eth(ethhdr->h_dest, stp_addr))
+		return false;
+
+	WARN_ONCE(1, "Some STP packets from %pM stroll around this network",
+		  ethhdr->h_source);
+
+	/* TODO check config, TCN and forwarding time to be more reliable */
+
+	if (bat_priv->stp_guard_stop == 0) {
+		guard_stop = jiffies + BATADV_SLAP_STP_GUARD_JIFFIES;
+		bat_priv->stp_guard_stop = (guard_stop) ? : 1;
+	}
+
+	return time_before(jiffies, bat_priv->stp_guard_stop);
+}
+
+/**
+ * Check ingress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @type: Packet type (UNICAST, ICMP, TVLV, etc)
+ * @skb: incoming skb packet
+ * @return: true if packet shall pass, false otherwise
+ */
+static bool batadv_slap_check_skb_rx(struct batadv_priv *bat_priv,
+				     enum batadv_packettype type,
+				     struct sk_buff *skb)
+{
+	bool master;
+
+	/* don't accept stp packets directly to let SLAP converge first */
+	if (batadv_slap_stp_guard(bat_priv, skb))
+		return false;
+
+	rcu_read_lock();
+	master = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	if (master)
+		return true;
+
+	if (type != BATADV_BCAST)
+		return true;
+
+	return false;
+}
+
+/**
+ * _batadv_slap_orig_same_master() - Check if originator is on same SLAP
+ * segment
+ * rcu_read_lock() should be held
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @orig: Originator to check
+ * @return: True if originator is on same SLAP segment, false otherwise
+ */
+static bool _batadv_slap_orig_same_master(struct batadv_priv *bat_priv,
+					  struct batadv_orig_node *orig)
+{
+	struct batadv_slap_id *master;
+	struct batadv_slap_segid *id;
+
+	RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+			 "batadv_slap_orig_same_master() "
+			 "called but no rcu_read_lock held");
+
+	master = slap_master_get_rcu(bat_priv);
+	id = rcu_dereference(orig->slap_segid);
+
+	return ether_addr_equal(master->addr, id->addr);
+}
+
+/**
+ * batadv_slap_orig_same_master() - Check if originator is on same SLAP
+ * segment
+ * This one actually takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @orig: Originator to check
+ * @return: True if originator is on same SLAP segment, false otherwise
+ */
+static bool batadv_slap_orig_same_master(struct batadv_priv *bat_priv,
+					 struct batadv_orig_node *orig)
+{
+	bool ret;
+
+	rcu_read_lock();
+	ret = _batadv_slap_orig_same_master(bat_priv, orig);
+	rcu_read_unlock();
+	return ret;
+}
+
+/**
+ * Check egress skb packet
+ * @bat_priv: the bat_priv with all the soft interface information
+ * @skb: outgoing skb packet
+ * @vid: skb's vlan ID
+ * @return: true if packet shall pass, false otherwise
+ */
+static bool batadv_slap_check_skb_tx(struct batadv_priv *bat_priv,
+				     struct sk_buff *skb,
+				     unsigned short vid)
+{
+	struct batadv_orig_node *orig = NULL;
+	struct ethhdr *ethhdr;
+	bool ret;
+
+	ethhdr = eth_hdr(skb);
+
+	/* don't accept stp packets directly to let SLAP converge first */
+	if (batadv_slap_stp_guard(bat_priv, skb))
+		return false;
+
+	rcu_read_lock();
+	ret = batadv_slap_is_master(bat_priv);
+	rcu_read_unlock();
+
+	if (is_multicast_ether_addr(ethhdr->h_dest))
+		goto out;
+
+	orig = batadv_transtable_search(bat_priv, ethhdr->h_source,
+					ethhdr->h_dest, vid);
+	if (!orig)
+		goto out;
+
+	if (batadv_slap_orig_same_master(bat_priv, orig)) {
+		ret = false;
+		goto out;
+	}
+
+	ret = true;
+out:
+	batadv_orig_node_put(orig);
+	return ret;
+}
+
+static bool batadv_slap_shortcut(struct batadv_priv *bat_priv, u8 const *dest)
+{
+	struct batadv_orig_node *orig_node = NULL;
+	bool ret = false;
+
+	orig_node = batadv_orig_hash_find(bat_priv, dest);
+	if (!orig_node)
+		goto out;
+
+	ret = batadv_slap_orig_same_master(bat_priv, orig_node);
+out:
+	batadv_orig_node_put(orig_node);
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_global_seen() - Check if global TT entry is actually seen by
+ * any node in same SLAP segement
+ * Takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Global TT entry to check
+ * @return: True if a originator on same SLAP segement has seen this entry,
+ * false otherwise
+ */
+static bool batadv_slap_tt_global_seen(struct batadv_priv *bat_priv,
+				       struct batadv_tt_global_entry *tt)
+{
+	struct batadv_tt_orig_list_entry *orig_entry;
+	struct batadv_orig_node *orig;
+	bool ret = false;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_entry, &tt->orig_list, list) {
+		orig = orig_entry->orig_node;
+		if (!batadv_slap_orig_same_master(bat_priv, orig))
+			continue;
+		if (!(orig_entry->flags & BATADV_TT_CLIENT_SEEN))
+			continue;
+		ret = true;
+		break;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_roam() - Check if TT roam from another SLAP segment
+ * Takes rcu_read_lock()
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: Global TT entry to check
+ * @return: True if entry was seen on another SLAP segement, false otherwise
+ */
+static bool batadv_slap_tt_roam(struct batadv_priv *bat_priv,
+				struct batadv_tt_global_entry *tt)
+{
+	struct batadv_tt_orig_list_entry *orig_entry;
+	struct batadv_orig_node *orig;
+	bool ret = false;
+
+	if (!tt)
+		return false;
+
+	rcu_read_lock();
+	hlist_for_each_entry_rcu(orig_entry, &tt->orig_list, list) {
+		orig = orig_entry->orig_node;
+		if (batadv_slap_orig_same_master(bat_priv, orig))
+			continue;
+		ret = true;
+		break;
+	}
+	rcu_read_unlock();
+
+	return ret;
+}
+
+/**
+ * batadv_slap_tt_global_add() - A new global TT has been added, check if it
+ * comes from same segment, if so create new shallow local TT if needed
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tt: The global TT that is being added
+ * @orig: The Originator seeing this client locally
+ * @return: False if matching local TT removal should not happen, true otherwise
+ */
+static bool batadv_slap_tt_global_add(struct batadv_priv *bat_priv,
+				      struct batadv_tt_global_entry *tt,
+				      struct batadv_orig_node *orig)
+{
+	struct batadv_tt_local_entry *local;
+	u16 local_flags;
+
+	if (is_multicast_ether_addr(tt->common.addr)) {
+		WARN_ONCE(1, "Adding global entry with src multicast addr "
+			     "(orig %pM addr %pM vid %d)\n",
+			     orig->orig, tt->common.addr, tt->common.vid);
+		return true;
+	}
+
+	if (!batadv_slap_orig_same_master(bat_priv, orig))
+		return true;
+
+	local = batadv_tt_local_hash_find(bat_priv, tt->common.addr,
+					  tt->common.vid);
+	if (local)
+		local_flags = local->common.flags;
+	batadv_tt_local_entry_put(local);
+
+	/* The client is already seen locally, keep our TL */
+	if (local_flags & BATADV_TT_CLIENT_SEEN)
+		return false;
+
+	/* All SLAP segment ref expired, remove our TL */
+	if (!batadv_slap_tt_global_seen(bat_priv, tt))
+		return true;
+
+	/* Another SLAP node detect a client, add a shallow reference to it
+	 * locally, so that shortcut through this node could happen to reach it
+	 */
+	slap_debug(bat_priv, "New SLAP shortcut for %pM\n", tt->common.addr);
+
+	batadv_tt_local_add(bat_priv->soft_iface, tt->common.addr,
+			    tt->common.vid, 0, 0);
+
+	return false;
+}
+
+/**
+ * batadv_slap_tt_global_del() - Deleting an existing global TT, if there is no
+ * more same SLAP segement node actually seeing this client remove our TL entry
+ *
+ * @bat_priv: The bat_priv with all the soft interface information
+ * @tt: Global TT entry being removed
+ * @orig: Originator node removing this TT entry
+ */
+static void batadv_slap_tt_global_del(struct batadv_priv *bat_priv,
+				      struct batadv_tt_global_entry *tt,
+				      struct batadv_orig_node *orig)
+{
+	struct batadv_tt_local_entry *local;
+	u16 local_flags = 0;
+
+	if (!tt)
+		return;
+
+	if (!batadv_slap_orig_same_master(bat_priv, orig))
+		return;
+
+	local = batadv_tt_local_hash_find(bat_priv, tt->common.addr,
+					  tt->common.vid);
+	if (local)
+		local_flags = local->common.flags;
+	batadv_tt_local_entry_put(local);
+
+	/* The client is still seen locally, keep our TL */
+	if (local_flags & BATADV_TT_CLIENT_SEEN)
+		return;
+
+	/* Entry is still seen by a SLAP node, keep our TL */
+	if (batadv_slap_tt_global_seen(bat_priv, tt))
+		return;
+
+	/* No more hard reference for this client in our SLAP segment, let's
+	 * remove our shallow ref.
+	 * TODO do we need roaming info here ?
+	 */
+	slap_debug(bat_priv, "Del SLAP shortcut for %pM\n", tt->common.addr);
+	batadv_tt_local_remove(bat_priv, tt->common.addr,
+			       tt->common.vid, "No more SLAP ref",
+			       false);
+}
+
+/**
+ * batadv_slap_tt_local_add() - Add a new local TT entry
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tl: New local tt entry
+ * @ifindex: Index receiving packet
+ * @return: false if roaming notification should be prevented, true otherwise
+ */
+static bool batadv_slap_tt_local_add(struct batadv_priv *bat_priv,
+				     struct batadv_tt_local_entry *tl,
+				     struct batadv_tt_global_entry *tg,
+				     int ifindex)
+{
+	if (!ifindex)
+		return true;
+
+	tl->common.flags |= BATADV_TT_CLIENT_SEEN;
+	return batadv_slap_tt_roam(bat_priv, tg);
+}
+
+/**
+ * batadv_slap_tt_local_del() - Remove a local TT entry
+ *
+ * @bat_priv: The bat priv with all the soft interface information
+ * @tl: The local tt entry to delete
+ * @return: True if local TT entry should be removed, false otherwise (still
+ * seen in SLAP segment)
+ */
+static bool batadv_slap_tt_local_del(struct batadv_priv *bat_priv,
+				     struct batadv_tt_local_entry *tl)
+{
+	struct batadv_tt_global_entry *tg;
+	bool shared;
+
+	tl->common.flags &= ~BATADV_TT_CLIENT_SEEN;
+
+	tg = batadv_tt_global_hash_find(bat_priv, tl->common.addr,
+					tl->common.vid);
+	if (!tg)
+		return true;
+
+	shared = batadv_slap_tt_global_seen(bat_priv, tg);
+	batadv_tt_global_entry_put(tg);
+
+	if (!shared)
+		return true;
+
+	return false;
+}
+
+/**
+ * batadv_slap_orig_dump() - Dump SLAP specific information for a specific
+ * originator.
+ * @bat_priv: The bat priv with all the soft interface information
+ * @info: NL message info (not used here)
+ * @attr: NL message attributes (not used here)
+ * @skb: Current originator NL message
+ * @data: Here this is the originator being dumped
+ */
+static void batadv_slap_orig_dump(struct batadv_priv *bat_priv,
+				  struct genl_info *info,
+				  struct nlattr **attr,
+				  struct sk_buff *skb,
+				  void *data)
+{
+	struct batadv_slap_segid *segid;
+	struct batadv_orig_node *orig;
+
+	orig = data;
+
+	rcu_read_lock();
+	segid = rcu_dereference(orig->slap_segid);
+	nla_put(skb,
+		BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+		ETH_ALEN,
+		segid->addr);
+	rcu_read_unlock();
+}
+
+/**
+ * Enable SLAP on interface
+ *
+ * @bat_priv: batadv instance
+ * @ifindex: Interface index to activate SLAP on, if 0 disable SLAP
+ */
+static int batadv_slap_set_iface(struct batadv_priv *bat_priv, struct net *net,
+				 int ifindex)
+{
+	struct batadv_hard_iface *hard_iface = NULL;
+	struct net_device *hard_dev = NULL;
+	int ret = -EINVAL;
+
+	if (ifindex) {
+		hard_dev = dev_get_by_index(net, ifindex);
+		if (!hard_dev)
+			goto out;
+		hard_iface = batadv_hardif_get_by_netdev(hard_dev);
+		if (!hard_iface)
+			goto out;
+	}
+
+	/*
+	 * locking bh is not strictly needed here, but slap_lock is also used to
+	 * protect master that needs it
+	 */
+	spin_lock_bh(&bat_priv->slap_lock);
+	if (bat_priv->slap_iface && hard_iface) {
+		spin_unlock_bh(&bat_priv->slap_lock);
+		ret = -EBUSY;
+		goto out;
+	}
+	rcu_assign_pointer(bat_priv->slap_iface, hard_iface);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	ret = 0;
+	if (!hard_iface)
+		goto out;
+
+	slap_debug(bat_priv, "Enable SLAP on %s\n",
+		   dev_name(&hard_iface->net_dev->dev));
+
+	batadv_slap_start_announce(bat_priv);
+
+out:
+	batadv_hardif_put(hard_iface);
+	dev_put(hard_dev);
+	return ret;
+}
+
+/**
+ * Set SLAP prio
+ *
+ * @bat_priv: batadv instance
+ * @prio: New SLAP prio
+ */
+static void batadv_slap_set_prio(struct batadv_priv *bat_priv, u32 prio)
+{
+	struct batadv_slap_id *oid, *id;
+	bool cur_master;
+
+	id = kmalloc(sizeof(*id), GFP_KERNEL);
+	if (!id)
+		return;
+
+	spin_lock_bh(&bat_priv->slap_lock);
+	cur_master = batadv_slap_is_master(bat_priv);
+	oid = slap_id_get_rcu(bat_priv);
+	batadv_slap_id_init(id, bat_priv, prio, oid->addr);
+	slap_debug(bat_priv, "New SLAP ID %u/%pM\n", id->prio, id->addr);
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	if (cur_master || batadv_slap_id_is_new_master(bat_priv, id))
+		_batadv_slap_set_master(bat_priv, id);
+	spin_unlock_bh(&bat_priv->slap_lock);
+
+	batadv_slap_id_put(oid);
+}
+
+/**
+ * batadv_slap_mesh_parse() - Set SLAP specific mesh information
+ * @bat_priv: bat priv with all the soft interface information
+ * @info: NL message info
+ * @attr: FBX specific NL attr to set
+ */
+static void batadv_slap_mesh_parse(struct batadv_priv *bat_priv,
+				   struct genl_info *info,
+				   struct nlattr **attrs)
+{
+	int ifindex;
+	u32 prio;
+
+	if (!info || !attrs)
+		return;
+
+	if (attrs[BATADV_ATTR_FBX_SLAP_IFINDEX]) {
+		ifindex = nla_get_u32(attrs[BATADV_ATTR_FBX_SLAP_IFINDEX]);
+		batadv_slap_set_iface(bat_priv, genl_info_net(info), ifindex);
+	}
+
+	if (attrs[BATADV_ATTR_FBX_SLAP_PRIO]) {
+		prio = nla_get_u32(attrs[BATADV_ATTR_FBX_SLAP_PRIO]);
+		batadv_slap_set_prio(bat_priv, prio);
+	}
+}
+
+/**
+ * batadv_slap_mesh_fill() - Get SLAP specific mesh information
+ * @bat_priv: bat priv with all the soft interface information
+ * @skb: NL response
+ */
+static void batadv_slap_mesh_fill(struct batadv_priv *bat_priv,
+				  struct sk_buff *skb)
+{
+	struct batadv_hard_iface *slap;
+	struct batadv_slap_id *master;
+	struct batadv_slap_id *local;
+
+	if (!skb)
+		return;
+
+	rcu_read_lock();
+	master = slap_master_get_rcu(bat_priv);
+	local = slap_id_get_rcu(bat_priv);
+	nla_put(skb,
+		BATADV_ATTR_FBX_SLAP_MASTER_ADDRESS,
+		ETH_ALEN,
+		master->addr);
+	nla_put_u32(skb,
+		    BATADV_ATTR_FBX_SLAP_MASTER_PRIO,
+		    master->prio);
+	nla_put_u32(skb,
+		    BATADV_ATTR_FBX_SLAP_PRIO,
+		    local->prio);
+
+	slap = rcu_dereference(bat_priv->slap_iface);
+	if (slap)
+		nla_put_u32(skb,
+			    BATADV_ATTR_FBX_SLAP_IFINDEX,
+			    slap->net_dev->ifindex);
+	rcu_read_unlock();
+}
+
+/**
+ * batadv_slap_mesh_nl() - Do SLAP softif related NL work
+ * @bat_priv: bat priv with all the soft interface information
+ * @info: NL message info
+ * @attr: FBX specific NL attr to set
+ * @skb: NL response
+ * @data: Callback specific data, not used here
+ */
+static void batadv_slap_mesh_nl(struct batadv_priv *bat_priv,
+				struct genl_info *info,
+				struct nlattr **attrs,
+				struct sk_buff *skb,
+				void *data)
+{
+	batadv_slap_mesh_parse(bat_priv, info, attrs);
+	batadv_slap_mesh_fill(bat_priv, skb);
+}
+
+/**
+ * batadv_slap_new_priv: init SLAP specific data for a bat_priv
+ * @bat_priv: the bat_priv instance to init SLAP for
+ */
+static int batadv_slap_new_priv(struct batadv_priv *bat_priv)
+{
+	char const *batdev = dev_name(&bat_priv->soft_iface->dev);
+	struct batadv_fbx_slap_packet *slap_pkt;
+	struct batadv_slap_id *id;
+	struct sk_buff *skb;
+	u8 addr[ETH_ALEN];
+	size_t size;
+
+	id = kmalloc(sizeof(*bat_priv->slap_id), GFP_KERNEL);
+	if (!id)
+		goto err;
+
+	/* TODO batdev NULL here */
+	bat_priv->slap_wq = alloc_workqueue("%s-slap-wq", 0, 0, batdev);
+	if (!bat_priv->slap_wq)
+		goto slap_id_free;
+
+	size = ETH_HLEN + NET_IP_ALIGN + BATADV_FBX_SLAP_HLEN;
+	skb = dev_alloc_skb(size);
+	if (!skb)
+		goto workqueue_free;
+
+	skb_reserve(skb, ETH_HLEN + NET_IP_ALIGN);
+	slap_pkt = skb_put_zero(skb, BATADV_FBX_SLAP_HLEN);
+	slap_pkt->hdr.packet_type = BATADV_FBX;
+	slap_pkt->hdr.version = BATADV_COMPAT_VERSION;
+	slap_pkt->hdr.subtype = BATADV_FBX_SUB_SLAP;
+	rcu_assign_pointer(bat_priv->slap_skb, skb);
+
+	ether_addr_copy(addr, bat_priv->soft_iface->dev_addr);
+	batadv_slap_id_init(id, bat_priv, SLAP_PRIO_DEFAULT, addr);
+
+	rcu_assign_pointer(bat_priv->slap_id, id);
+	rcu_assign_pointer(bat_priv->slap_master, id);
+	rcu_assign_pointer(bat_priv->slap_iface, NULL);
+	spin_lock_init(&bat_priv->slap_lock);
+	bat_priv->stp_guard_stop = 0;
+	batadv_fbx_tvlv_handler_register(bat_priv,
+					 BATADV_FBX_TVLV_SLAP_MASTER,
+					 BATADV_FBX_TVLV_SLAP_VERSION,
+					 batadv_slap_ogm_master_recv, NULL);
+	return 0;
+
+workqueue_free:
+	destroy_workqueue(bat_priv->slap_wq);
+slap_id_free:
+	kfree(id);
+err:
+	return -1;
+}
+
+/**
+ * batadv_slap_free_priv: free SLAP specific data of a bat_priv
+ * @bat_priv: the bat_priv instance to clean SLAP for
+ */
+static void batadv_slap_free_priv(struct batadv_priv *bat_priv)
+{
+	struct batadv_slap_id *id, *master;
+	batadv_fbx_tvlv_handler_unregister(bat_priv,
+					   BATADV_FBX_TVLV_SLAP_MASTER,
+					   BATADV_FBX_TVLV_SLAP_VERSION);
+	rcu_read_lock();
+	master = slap_master_get_rcu(bat_priv);
+	id = slap_id_get_rcu(bat_priv);
+	if (id != master)
+		batadv_slap_id_force_expire(bat_priv, master);
+	rcu_read_unlock();
+	flush_workqueue(bat_priv->slap_wq);
+	destroy_workqueue(bat_priv->slap_wq);
+	batadv_slap_id_put(bat_priv->slap_id);
+	batadv_fbx_tvlv_container_unregister(bat_priv,
+					    BATADV_FBX_TVLV_SLAP_MASTER,
+					    BATADV_FBX_TVLV_SLAP_VERSION);
+	kfree_skb(bat_priv->slap_skb);
+}
+
+/**
+ * batadv_slap_init: init SLAP specific data for a bat_priv
+ * @bat_priv: the bat_priv instance to init SLAP for
+ */
+static int __init batadv_slap_init(void)
+{
+	BUILD_BUG_ON(sizeof(struct batadv_fbx_slap_packet) != 12);
+	batadv_fbx_recv_handler_register(BATADV_FBX_SUB_SLAP,
+					 batadv_slap_recv_packet);
+	return 0;
+}
+
+/**
+ * batadv_slap_exit: free SLAP specific data of a bat_priv
+ * @bat_priv: the bat_priv instance to clean SLAP for
+ */
+static void __exit batadv_slap_exit(void)
+{
+	batadv_fbx_recv_handler_unregister(BATADV_FBX_SUB_SLAP);
+}
+
+struct batadv_fbx_module_ops const batadv_slap_ops = {
+	.init = batadv_slap_init,
+	.exit = batadv_slap_exit,
+	.new_priv = batadv_slap_new_priv,
+	.free_priv = batadv_slap_free_priv,
+	.neigh_init = batadv_slap_neigh_init,
+	.neigh_release = batadv_slap_neigh_release,
+	.orig_init = batadv_slap_orig_init,
+	.orig_release = batadv_slap_orig_release,
+	.primary_update = batadv_slap_primary_update,
+	.tt_local_add = batadv_slap_tt_local_add,
+	.tt_local_del = batadv_slap_tt_local_del,
+	.tt_global_add = batadv_slap_tt_global_add,
+	.tt_global_del = batadv_slap_tt_global_del,
+	.shortcut = batadv_slap_shortcut,
+	.check_skb_rx = batadv_slap_check_skb_rx,
+	.check_skb_tx = batadv_slap_check_skb_tx,
+};
+
+struct batadv_fbx_nl_ops const batadv_slap_nl_ops[] = {
+	{
+		.cmd = BATADV_CMD_SET_MESH,
+		.hdl = batadv_slap_mesh_nl,
+	},
+	{
+		.cmd = BATADV_CMD_GET_MESH,
+		.hdl = batadv_slap_mesh_nl,
+	},
+	{
+		.cmd = BATADV_CMD_GET_ORIGINATORS,
+		.hdl = batadv_slap_orig_dump,
+	},
+};
+
+struct batadv_fbx_module const batadv_slap_module = {
+	.name = "slap",
+	.ops = &batadv_slap_ops,
+	.nl_ops = batadv_slap_nl_ops,
+	.nl_ops_sz = ARRAY_SIZE(batadv_slap_nl_ops),
+};
diff -Nruw linux-6.13.12-fbx/net/fbxatm./Kconfig linux-6.13.12-fbx/net/fbxatm/Kconfig
--- linux-6.13.12-fbx/net/fbxatm./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/Kconfig	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,28 @@
+menuconfig FBXATM
+	tristate "Freebox Asynchronous Transfer Mode (ATM)"
+
+if FBXATM
+
+config FBXATM_REMOTE
+	bool
+
+choice
+	prompt "mode"
+	default FBXATM_STACK
+
+config FBXATM_STACK
+	bool "standard"
+
+config FBXATM_REMOTE_STUB
+	bool "remote stub"
+	select FBXATM_REMOTE
+
+endchoice
+
+config FBXATM_REMOTE_DRIVER
+	tristate "remote fbxatm driver"
+	depends on FBXATM_STACK
+	select FBXATM_REMOTE
+	select OF
+
+endif
diff -Nruw linux-6.13.12-fbx/net/fbxatm./Makefile linux-6.13.12-fbx/net/fbxatm/Makefile
--- linux-6.13.12-fbx/net/fbxatm./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/Makefile	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,18 @@
+obj-$(CONFIG_FBXATM) += fbxatm.o
+obj-$(CONFIG_FBXATM_REMOTE) += fbxatm_remote.o
+
+fbxatm-y := fbxatm_procfs.o fbxatm_sysfs.o
+
+ifeq ($(CONFIG_FBXATM_STACK),y)
+fbxatm-y += 	fbxatm_core.o	\
+		fbxatm_2684.o	\
+		fbxatm_dev.o	\
+		crc10.o
+fbxatm-$(CONFIG_PPP) += fbxatm_pppoa.o
+endif
+
+ifeq ($(CONFIG_FBXATM_REMOTE_STUB),y)
+fbxatm-y += fbxatm_remote_stub.o
+endif
+
+obj-$(CONFIG_FBXATM_REMOTE_DRIVER) += fbxatm_remote_driver.o
diff -Nruw linux-6.13.12-fbx/net/fbxatm./crc10.c linux-6.13.12-fbx/net/fbxatm/crc10.c
--- linux-6.13.12-fbx/net/fbxatm./crc10.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/crc10.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,49 @@
+#include <linux/types.h>
+#include "fbxatm_priv.h"
+
+static const u16 crc10_table[256] = {
+	0x0000, 0x0233, 0x0255, 0x0066, 0x0299, 0x00aa, 0x00cc, 0x02ff,
+	0x0301, 0x0132, 0x0154, 0x0367, 0x0198, 0x03ab, 0x03cd, 0x01fe,
+	0x0031, 0x0202, 0x0264, 0x0057, 0x02a8, 0x009b, 0x00fd, 0x02ce,
+	0x0330, 0x0103, 0x0165, 0x0356, 0x01a9, 0x039a, 0x03fc, 0x01cf,
+	0x0062, 0x0251, 0x0237, 0x0004, 0x02fb, 0x00c8, 0x00ae, 0x029d,
+	0x0363, 0x0150, 0x0136, 0x0305, 0x01fa, 0x03c9, 0x03af, 0x019c,
+	0x0053, 0x0260, 0x0206, 0x0035, 0x02ca, 0x00f9, 0x009f, 0x02ac,
+	0x0352, 0x0161, 0x0107, 0x0334, 0x01cb, 0x03f8, 0x039e, 0x01ad,
+	0x00c4, 0x02f7, 0x0291, 0x00a2, 0x025d, 0x006e, 0x0008, 0x023b,
+	0x03c5, 0x01f6, 0x0190, 0x03a3, 0x015c, 0x036f, 0x0309, 0x013a,
+	0x00f5, 0x02c6, 0x02a0, 0x0093, 0x026c, 0x005f, 0x0039, 0x020a,
+	0x03f4, 0x01c7, 0x01a1, 0x0392, 0x016d, 0x035e, 0x0338, 0x010b,
+	0x00a6, 0x0295, 0x02f3, 0x00c0, 0x023f, 0x000c, 0x006a, 0x0259,
+	0x03a7, 0x0194, 0x01f2, 0x03c1, 0x013e, 0x030d, 0x036b, 0x0158,
+	0x0097, 0x02a4, 0x02c2, 0x00f1, 0x020e, 0x003d, 0x005b, 0x0268,
+	0x0396, 0x01a5, 0x01c3, 0x03f0, 0x010f, 0x033c, 0x035a, 0x0169,
+	0x0188, 0x03bb, 0x03dd, 0x01ee, 0x0311, 0x0122, 0x0144, 0x0377,
+	0x0289, 0x00ba, 0x00dc, 0x02ef, 0x0010, 0x0223, 0x0245, 0x0076,
+	0x01b9, 0x038a, 0x03ec, 0x01df, 0x0320, 0x0113, 0x0175, 0x0346,
+	0x02b8, 0x008b, 0x00ed, 0x02de, 0x0021, 0x0212, 0x0274, 0x0047,
+	0x01ea, 0x03d9, 0x03bf, 0x018c, 0x0373, 0x0140, 0x0126, 0x0315,
+	0x02eb, 0x00d8, 0x00be, 0x028d, 0x0072, 0x0241, 0x0227, 0x0014,
+	0x01db, 0x03e8, 0x038e, 0x01bd, 0x0342, 0x0171, 0x0117, 0x0324,
+	0x02da, 0x00e9, 0x008f, 0x02bc, 0x0043, 0x0270, 0x0216, 0x0025,
+	0x014c, 0x037f, 0x0319, 0x012a, 0x03d5, 0x01e6, 0x0180, 0x03b3,
+	0x024d, 0x007e, 0x0018, 0x022b, 0x00d4, 0x02e7, 0x0281, 0x00b2,
+	0x017d, 0x034e, 0x0328, 0x011b, 0x03e4, 0x01d7, 0x01b1, 0x0382,
+	0x027c, 0x004f, 0x0029, 0x021a, 0x00e5, 0x02d6, 0x02b0, 0x0083,
+	0x012e, 0x031d, 0x037b, 0x0148, 0x03b7, 0x0184, 0x01e2, 0x03d1,
+	0x022f, 0x001c, 0x007a, 0x0249, 0x00b6, 0x0285, 0x02e3, 0x00d0,
+	0x011f, 0x032c, 0x034a, 0x0179, 0x0386, 0x01b5, 0x01d3, 0x03e0,
+	0x021e, 0x002d, 0x004b, 0x0278, 0x0087, 0x02b4, 0x02d2, 0x00e1,
+};
+
+static u16 crc10_byte(u16 crc, const u8 c)
+{
+	return ((crc << 8) & 0x3ff) ^ crc10_table[(crc >> 2) & 0xff] ^ c;
+}
+
+u16 crc10(u16 crc, const u8 *buffer, size_t len)
+{
+	while (len--)
+		crc = crc10_byte(crc, *buffer++);
+	return crc;
+}
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_2684.c linux-6.13.12-fbx/net/fbxatm/fbxatm_2684.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_2684.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_2684.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,851 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include <linux/rtnetlink.h>
+#include <linux/pkt_sched.h>
+#include <linux/net.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/fbxatm_dev.h>
+
+#include "fbxatm_priv.h"
+
+#define PFX	"fbxatm_2684: "
+
+static LIST_HEAD(fbxatm_2684_dev_list);
+static DEFINE_MUTEX(fbxatm_2684_mutex);
+
+#define LLC_NEEDED_HEADROOM		10
+#define VCMUX_BRIDGED_NEEDED_HEADROOM	2
+
+#define LLC			0xaa, 0xaa, 0x03
+#define SNAP_BRIDGED		0x00, 0x80, 0xc2
+#define SNAP_ROUTED		0x00, 0x00, 0x00
+#define PID_ETHERNET_NOFCS	0x00, 0x07
+
+static u8 llc_bridged_802d3_pad[] = { LLC, SNAP_BRIDGED, PID_ETHERNET_NOFCS,
+				      0, 0 };
+static u8 llc_snap_routed[] = { LLC, SNAP_ROUTED };
+
+/*
+ * private data for 2684 vcc
+ */
+struct fbxatm_2684_vcc;
+
+struct fbxatm_2684_queue {
+	struct fbxatm_vcc		*vcc;
+	unsigned int			queue_idx;
+	struct fbxatm_2684_vcc		*priv;
+};
+
+struct fbxatm_2684_vcc {
+	struct fbxatm_2684_queue	queues[FBXATM_2684_MAX_VCC];
+	size_t				queue_count;
+
+	struct net_device		*dev;
+	struct fbxatm_2684_vcc_params	params;
+
+	spinlock_t			tx_lock;
+
+	struct rtnl_link_stats64	stats;
+
+	struct list_head		next;
+};
+
+static uint32_t tel_last_ip;
+
+static void warn_if_tel(struct fbxatm_2684_vcc *vcc, struct sk_buff *skb)
+{
+	struct iphdr *iph;
+	struct udphdr *udph = NULL;
+
+	iph = (struct iphdr *)skb->data;
+
+	if (iph->protocol != IPPROTO_UDP)
+		return;
+
+	if (skb_headlen(skb) < (iph->ihl * 4) + sizeof (struct udphdr))
+		return;
+
+	udph = (struct udphdr *)((unsigned char *)iph + (iph->ihl * 4));
+	if (ntohs(udph->dest) >= 5004 && ntohs(udph->dest) <= 5020) {
+		static u32 last_ip;
+		static unsigned long last_time;
+		unsigned long now;
+
+		now = jiffies;
+		if ((last_ip == iph->saddr &&
+		     (!last_time || time_before(now, last_time + 2 * HZ)))) {
+			static unsigned int consecutive;
+			consecutive++;
+			if (consecutive > 5) {
+				tel_last_ip = iph->saddr;
+				consecutive = 0;
+			}
+		}
+
+		last_time = now;
+		last_ip = iph->saddr;
+	}
+}
+
+/*
+ * procfs read callback
+ */
+static int tel_proc_show(struct seq_file *m, void *v)
+{
+	seq_printf(m, "%pI4\n", &tel_last_ip);
+	return 0;
+}
+
+static ssize_t tel_proc_write(struct file *file, const char __user *ubuf,
+			      size_t len, loff_t *off)
+{
+	tel_last_ip = 0;
+	return len;
+}
+
+static int tel_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, tel_proc_show, pde_data(inode));
+}
+
+static const struct proc_ops tel_proc_fops = {
+	.proc_open	= tel_proc_open,
+	.proc_read	= seq_read,
+	.proc_write	= tel_proc_write,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= single_release,
+};
+
+/*
+ * fbxatm stack receive callback, called from softirq
+ */
+static void vcc_rx_callback(struct sk_buff *skb, void *data)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_VCMUX:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			/* assume 802.3, need to remove 2 bytes zero
+			 * padding */
+			if (skb->len < 2 || memcmp(skb->data, "\0\0", 2))
+				goto drop;
+			skb_pull(skb, 2);
+			skb->protocol = eth_type_trans(skb, priv->dev);
+			memset(skb->data, 0, 2);
+			break;
+
+		case FBXATM_P2684_ROUTED:
+			/* kludge to detect ipv6 or ipv4 */
+			if (skb->len && (skb->data[0] & 0xf0) == 0x60)
+				skb->protocol = htons(ETH_P_IPV6);
+			else
+				skb->protocol = htons(ETH_P_IP);
+			skb_reset_mac_header(skb);
+			break;
+		}
+		break;
+
+	case FBXATM_E2684_LLC:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+		{
+			/* recognize only 802.3 */
+			if (skb->len < sizeof(llc_bridged_802d3_pad))
+				goto drop;
+
+			if (memcmp(skb->data, llc_bridged_802d3_pad, 7))
+				goto drop;
+
+			/* don't check the last bytes of pid, it can
+			 * be 1 or 7 depending on the presence of
+			 * FCS */
+			skb_pull(skb, sizeof(llc_bridged_802d3_pad));
+			skb->protocol = eth_type_trans(skb, priv->dev);
+			break;
+		}
+
+		case FBXATM_P2684_ROUTED:
+		{
+			u16 proto;
+			unsigned int offset;
+
+			if (skb->len < sizeof(llc_snap_routed) + 2)
+				goto drop;
+
+			offset = sizeof (llc_snap_routed);
+			proto = skb->data[offset] << 8;
+			proto |= skb->data[offset + 1];
+
+			skb->protocol = proto;
+			skb_pull(skb, sizeof(llc_snap_routed) + 2);
+			skb_reset_mac_header(skb);
+			break;
+		}
+		}
+		break;
+	}
+
+	skb->dev = priv->dev;
+	skb->pkt_type = PACKET_HOST;
+	priv->stats.rx_bytes += skb->len;
+	priv->stats.rx_packets++;
+
+	if (priv->params.encap == FBXATM_E2684_VCMUX &&
+	    priv->params.payload == FBXATM_P2684_ROUTED &&
+	    queue->vcc->vpi == 8 && queue->vcc->vci == 35)
+		warn_if_tel(priv, skb);
+
+	netif_rx(skb);
+	return;
+
+drop:
+	priv->stats.rx_errors++;
+	dev_kfree_skb(skb);
+}
+
+/*
+ * fbxatm stack tx done callback, called from softirq
+ */
+static void vcc_tx_done_callback(void *data)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	spin_lock(&priv->tx_lock);
+	if (__netif_subqueue_stopped(priv->dev, queue->queue_idx))
+		netif_wake_subqueue(priv->dev, queue->queue_idx);
+	spin_unlock(&priv->tx_lock);
+}
+
+/*
+ * fbxatm stack callback when vcc link changes
+ */
+static void vcc_link_change(void *data, int link,
+			    unsigned int rx_cell_rate,
+			    unsigned int tx_cell_rate)
+{
+	struct fbxatm_2684_queue *queue;
+	struct fbxatm_2684_vcc *priv;
+
+	queue = (struct fbxatm_2684_queue *)data;
+	priv = queue->priv;
+
+	if (link)
+		netif_carrier_on(priv->dev);
+	else
+		netif_carrier_off(priv->dev);
+}
+
+/*
+ * vcc user ops, callback from fbxatm stack
+ */
+static const struct fbxatm_vcc_uops fbxatm_2684_uops = {
+	.link_change	= vcc_link_change,
+	.rx_pkt		= vcc_rx_callback,
+	.tx_done	= vcc_tx_done_callback,
+};
+
+/*
+ * netdevice ->ndo_select_queue() callback
+ */
+static u16 fbxatm_2684_netdev_select_queue(struct net_device *dev,
+					   struct sk_buff *skb,
+					   struct net_device *sb_dev)
+{
+	/* force lower band to avoid kernel doing round robin */
+	return 0;
+}
+
+/*
+ * netdevice xmit callback
+ */
+static int fbxatm_2684_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fbxatm_2684_vcc *priv;
+	int ret, queue_idx;
+	unsigned int needed_headroom;
+	struct fbxatm_2684_queue *queue;
+	unsigned int len;
+
+	priv = netdev_priv(dev);
+	queue_idx = skb_get_queue_mapping(skb);
+	queue = &priv->queues[queue_idx];
+
+	/*
+	 * check if we have to expand skb head
+	 */
+	needed_headroom = 0;
+	if (priv->params.encap == FBXATM_E2684_VCMUX) {
+		if (priv->params.payload == FBXATM_P2684_BRIDGE)
+			needed_headroom = VCMUX_BRIDGED_NEEDED_HEADROOM;
+	} else
+		needed_headroom = LLC_NEEDED_HEADROOM;
+
+	if (skb_headroom(skb) < needed_headroom) {
+		struct sk_buff *nskb;
+		unsigned int new_head;
+
+		new_head = skb_headroom(skb) + needed_headroom;
+		nskb = skb_realloc_headroom(skb, new_head);
+		dev_kfree_skb(skb);
+		if (!nskb)
+			goto dropped;
+		skb = nskb;
+	}
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_VCMUX:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			skb_push(skb, 2);
+			memset(skb->data, 0, 2);
+			break;
+		case FBXATM_P2684_ROUTED:
+			/* nothing to do */
+			break;
+		}
+		break;
+
+	case FBXATM_E2684_LLC:
+		switch (priv->params.payload) {
+		case FBXATM_P2684_BRIDGE:
+			skb_push(skb, sizeof(llc_bridged_802d3_pad));
+			memcpy(skb->data, llc_bridged_802d3_pad,
+			       sizeof(llc_bridged_802d3_pad));
+			break;
+
+		case FBXATM_P2684_ROUTED:
+		{
+			unsigned int offset;
+
+			skb_push(skb, sizeof(llc_snap_routed));
+			memcpy(skb->data, llc_snap_routed,
+			       sizeof(llc_snap_routed));
+
+			offset = sizeof (llc_snap_routed);
+			skb->data[offset] = (skb->protocol >> 8) & 0xff;
+			skb->data[offset + 1] = skb->protocol & 0xff;
+			break;
+		}
+		}
+		break;
+	}
+
+	spin_lock(&priv->tx_lock);
+
+	len = skb->len;
+	ret = fbxatm_send(queue->vcc, skb);
+	if (ret) {
+		/* packet was not sent, queue is full */
+		netif_stop_subqueue(dev, queue_idx);
+		spin_unlock(&priv->tx_lock);
+		WARN_ONCE(1, "fbxatm2684_xmit called with full queue");
+		priv->stats.tx_errors++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	priv->stats.tx_bytes += len;
+	priv->stats.tx_packets++;
+
+	/* check if queue is full */
+	if (fbxatm_vcc_queue_full(queue->vcc))
+		netif_stop_subqueue(dev, queue_idx);
+	spin_unlock(&priv->tx_lock);
+
+	return NETDEV_TX_OK;
+
+dropped:
+	priv->stats.tx_errors++;
+	return NETDEV_TX_OK;
+}
+
+/*
+ * netdevice get_stats callback
+ */
+static void
+fbxatm_2684_netdev_get_stats64(struct net_device *dev,
+			       struct rtnl_link_stats64 *stats)
+{
+	struct fbxatm_2684_vcc *priv;
+	priv = netdev_priv(dev);
+	memcpy(stats, &priv->stats, sizeof (*stats));
+}
+
+/*
+ * netdevice setup callback for bridge encap
+ */
+static void setup_bridged(struct net_device *dev)
+{
+	ether_setup(dev);
+}
+
+/*
+ * netdevice setup callback for routed encap
+ */
+static void setup_routed(struct net_device *dev)
+{
+	dev->type		= ARPHRD_PPP;
+	dev->hard_header_len	= 0;
+	dev->mtu		= 1500;
+	dev->addr_len		= 0;
+	dev->tx_queue_len	= 128;
+	dev->flags		= IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
+}
+
+static const struct net_device_ops fbxatm_2684_ops = {
+	.ndo_start_xmit		= fbxatm_2684_netdev_xmit,
+	.ndo_get_stats64	= fbxatm_2684_netdev_get_stats64,
+	.ndo_select_queue	= fbxatm_2684_netdev_select_queue,
+};
+
+/*
+ * sysfs callback, show encapsulation
+ */
+static ssize_t show_encap(struct device *d,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	switch (priv->params.encap) {
+	case FBXATM_E2684_LLC:
+		return sprintf(buf, "llc\n");
+	case FBXATM_E2684_VCMUX:
+	default:
+		return sprintf(buf, "vcmux\n");
+	}
+}
+
+static DEVICE_ATTR(encap, S_IRUGO, show_encap, NULL);
+
+/*
+ * sysfs callback, show payload
+ */
+static ssize_t show_payload(struct device *d,
+			    struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	switch (priv->params.payload) {
+	case FBXATM_P2684_BRIDGE:
+		return sprintf(buf, "bridge\n");
+	case FBXATM_P2684_ROUTED:
+	default:
+		return sprintf(buf, "routed\n");
+	}
+}
+
+static DEVICE_ATTR(payload, S_IRUGO, show_payload, NULL);
+
+/*
+ * sysfs callback, show vcc id
+ */
+static ssize_t show_vcc(struct device *d,
+			struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_2684_vcc *priv = netdev_priv(to_net_dev(d));
+
+	return sprintf(buf, "%u.%u.%u\n",
+		       priv->queues[0].vcc->adev->ifindex,
+		       priv->queues[0].vcc->vpi, priv->queues[0].vcc->vci);
+}
+
+static DEVICE_ATTR(vcc, S_IRUGO, show_vcc, NULL);
+
+static struct attribute *fbxatm2684_attrs[] = {
+	&dev_attr_encap.attr,
+	&dev_attr_payload.attr,
+	&dev_attr_vcc.attr,
+	NULL
+};
+
+static struct attribute_group fbxatm2684_group = {
+	.name = "fbxatm2684",
+	.attrs = fbxatm2684_attrs,
+};
+
+/*
+ * create sysfs files for 2684 device
+ */
+static int vcc2684_sysfs_register(struct fbxatm_2684_vcc *priv,
+				  struct net_device *dev)
+{
+	int ret;
+
+	ret = sysfs_create_group(&dev->dev.kobj, &fbxatm2684_group);
+	if (ret)
+		goto out1;
+
+	ret = sysfs_create_link(&dev->dev.kobj,
+				&priv->queues[0].vcc->adev->dev.kobj,
+				"fbxatm_dev");
+	if (ret)
+		goto out2;
+
+	return 0;
+
+out2:
+	sysfs_remove_group(&dev->dev.kobj, &fbxatm2684_group);
+out1:
+	return ret;
+}
+
+/*
+ * remove sysfs files for 2684 device
+ */
+static void vcc2684_sysfs_unregister(struct fbxatm_2684_vcc *priv,
+				     struct net_device *dev)
+{
+	sysfs_remove_group(&dev->dev.kobj, &fbxatm2684_group);
+	sysfs_remove_link(&dev->dev.kobj, "fbxatm_dev");
+}
+
+/*
+ * register netdevice & sysfs attribute
+ */
+static int register_2684_netdev(struct fbxatm_2684_vcc *priv,
+				struct net_device *dev)
+{
+	int ret;
+
+	/* hold rtnl while registering netdevice and creating sysfs
+	 * files to avoid race */
+	rtnl_lock();
+
+	if (strchr(dev->name, '%')) {
+		ret = dev_alloc_name(dev, dev->name);
+		if (ret < 0)
+			goto out;
+	}
+
+	ret = register_netdevice(dev);
+	if (ret)
+		goto out;
+
+	ret = vcc2684_sysfs_register(priv, dev);
+	if (ret)
+		goto out_unregister;
+
+	rtnl_unlock();
+	return 0;
+
+out_unregister:
+	unregister_netdevice(dev);
+
+out:
+	rtnl_unlock();
+	return ret;
+}
+
+/*
+ * create a RFC2684 encapsulation on given vcc
+ */
+static int __create_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	struct fbxatm_2684_vcc *priv;
+	struct fbxatm_vcc *vccs[FBXATM_2684_MAX_VCC];
+	struct net_device *dev = NULL;
+	void (*netdev_setup_cb)(struct net_device *dev);
+	unsigned int headroom;
+	size_t i;
+	int ret;
+
+	/* sanity check */
+	switch (params->encap) {
+	case FBXATM_E2684_VCMUX:
+	case FBXATM_E2684_LLC:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	switch (params->payload) {
+	case FBXATM_P2684_BRIDGE:
+		netdev_setup_cb = setup_bridged;
+		break;
+	case FBXATM_P2684_ROUTED:
+		netdev_setup_cb = setup_routed;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (!params->dev_name[0])
+		return -EINVAL;
+
+	/* bind to vcc */
+	memset(vccs, 0, sizeof (vccs));
+	for (i = 0; i < params->id_count; i++) {
+		struct fbxatm_vcc *vcc;
+
+		vcc = fbxatm_bind_to_vcc(&params->id_list[i],
+					 FBXATM_VCC_USER_2684);
+		if (IS_ERR(vcc)) {
+			ret = PTR_ERR(vcc);
+			goto fail;
+		}
+		vccs[i] = vcc;
+	}
+
+	/* create netdevice */
+	dev = alloc_netdev_mqs(sizeof(*priv), params->dev_name,
+			       NET_NAME_UNKNOWN, netdev_setup_cb,
+			       params->id_count, 1);
+	if (!dev) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	netif_set_real_num_tx_queues(dev, params->id_count);
+	netif_set_real_num_rx_queues(dev, 1);
+
+	priv = netdev_priv(dev);
+	memset(priv, 0, sizeof (*priv));
+	memcpy(&priv->params, params, sizeof (*params));
+	memcpy(dev->name, priv->params.dev_name, IFNAMSIZ);
+
+	spin_lock_init(&priv->tx_lock);
+	priv->dev = dev;
+	for (i = 0; i < params->id_count; i++) {
+		priv->queues[i].vcc = vccs[i];
+		priv->queues[i].queue_idx = i;
+		priv->queues[i].priv = priv;
+	}
+	priv->queue_count = params->id_count;
+
+	if (!is_zero_ether_addr(params->perm_addr))
+		memcpy(dev->perm_addr, params->perm_addr, 6);
+
+	dev->netdev_ops = &fbxatm_2684_ops;
+
+	/* make sure kernel generated packet have correct headroom for
+	 * encapsulation/payload */
+	headroom = 0;
+	for (i = 0; i < params->id_count; i++)
+		headroom = max_t(int, headroom, vccs[i]->adev->tx_headroom);
+	dev->hard_header_len += headroom;
+
+
+	switch (params->encap) {
+	case FBXATM_E2684_VCMUX:
+	default:
+		if (params->payload == FBXATM_P2684_BRIDGE)
+			dev->hard_header_len += VCMUX_BRIDGED_NEEDED_HEADROOM;
+		break;
+	case FBXATM_E2684_LLC:
+		dev->hard_header_len += LLC_NEEDED_HEADROOM;
+		break;
+	}
+
+	ret = register_2684_netdev(priv, dev);
+	if (ret)
+		goto fail;
+
+	if (fbxatm_vcc_link_is_up(vccs[0])) {
+		netif_carrier_on(dev);
+		netif_tx_start_all_queues(dev);
+	} else
+		netif_carrier_off(dev);
+	list_add_tail(&priv->next, &fbxatm_2684_dev_list);
+
+	for (i = 0; i < params->id_count; i++)
+		fbxatm_set_uops(vccs[i], &fbxatm_2684_uops, &priv->queues[i]);
+
+	return 0;
+
+fail:
+	for (i = 0; i < ARRAY_SIZE(vccs); i++) {
+		if (vccs[i])
+			fbxatm_unbind_vcc(vccs[i]);
+	}
+	if (dev)
+		free_netdev(dev);
+	return ret;
+}
+
+/*
+ * find 2684 vcc from id list
+ */
+static struct fbxatm_2684_vcc *__find_2684_vcc(const struct fbxatm_vcc_id *id,
+					       size_t count)
+{
+	struct fbxatm_2684_vcc *priv;
+	size_t i;
+
+	/* find it */
+	list_for_each_entry(priv, &fbxatm_2684_dev_list, next) {
+		for (i = 0; i < priv->queue_count; i++) {
+			struct fbxatm_2684_queue *q;
+			size_t j;
+
+			q = &priv->queues[i];
+
+			for (j = 0; j < count; j++) {
+				if (q->vcc->adev->ifindex == id[j].dev_idx &&
+				    q->vcc->vpi == id[0].vpi &&
+				    q->vcc->vci == id[0].vci)
+					return priv;
+			}
+		}
+	}
+	return NULL;
+}
+
+/*
+ * create a RFC2684 encapsulation on given vcc
+ */
+static int create_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_2684_mutex);
+	ret = __create_2684_vcc(params);
+	mutex_unlock(&fbxatm_2684_mutex);
+	return ret;
+}
+
+/*
+ * remove RFC2684 encapsulation from given vcc
+ */
+static int __remove_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	struct fbxatm_2684_vcc *priv;
+	size_t i;
+
+	priv = __find_2684_vcc(params->id_list, params->id_count);
+	if (!priv)
+		return -ENOENT;
+
+	/* close netdevice, fbxatm_2684_netdev_xmit cannot be called
+	 * again */
+	rtnl_lock();
+	dev_close(priv->dev);
+	rtnl_unlock();
+
+	for (i = 0; i < priv->queue_count; i++)
+		fbxatm_unbind_vcc(priv->queues[i].vcc);
+	vcc2684_sysfs_unregister(priv, priv->dev);
+	unregister_netdev(priv->dev);
+	list_del(&priv->next);
+	free_netdev(priv->dev);
+	return 0;
+}
+
+/*
+ * remove RFC2684 encapsulation from given vcc
+ */
+static int remove_2684_vcc(const struct fbxatm_2684_vcc_params *params)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_2684_mutex);
+	ret = __remove_2684_vcc(params);
+	mutex_unlock(&fbxatm_2684_mutex);
+	return ret;
+}
+
+/*
+ * 2684 related ioctl handler
+ */
+static int fbxatm_2684_ioctl(struct socket *sock,
+			     unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_2684_IOCADD:
+	case FBXATM_2684_IOCDEL:
+	{
+		struct fbxatm_2684_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_2684_IOCADD)
+			ret = create_2684_vcc(&params);
+		else
+			ret = remove_2684_vcc(&params);
+		break;
+	}
+
+	case FBXATM_2684_IOCGET:
+	{
+		struct fbxatm_2684_vcc_params params;
+		struct fbxatm_2684_vcc *priv;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_2684_mutex);
+		priv = __find_2684_vcc(params.id_list, params.id_count);
+		if (!priv)
+			ret = -ENOENT;
+		else {
+			memcpy(&params, &priv->params, sizeof (params));
+			memcpy(params.dev_name, priv->dev->name, IFNAMSIZ);
+		}
+		mutex_unlock(&fbxatm_2684_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static struct fbxatm_ioctl fbxatm_2684_ioctl_ops = {
+	.handler	= fbxatm_2684_ioctl,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_2684_init(void)
+{
+	struct proc_dir_entry *root, *proc;
+
+	root = fbxatm_proc_misc_register("tel");
+	if (!root)
+		return -ENOMEM;
+
+	/* tel debug crap */
+	proc = proc_create_data("bad_ip", 0666, root, &tel_proc_fops, NULL);
+	if (!proc)
+		return -ENOMEM;
+
+	fbxatm_register_ioctl(&fbxatm_2684_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_2684_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_2684_ioctl_ops);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_core.c linux-6.13.12-fbx/net/fbxatm/fbxatm_core.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_core.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_core.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,203 @@
+#include <linux/init.h>
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/fbxatm.h>
+#include <linux/fbxatm_dev.h>
+#include <linux/module.h>
+#include <net/sock.h>
+#include "fbxatm_priv.h"
+
+static DEFINE_MUTEX(ioctl_mutex);
+static LIST_HEAD(ioctl_list);
+
+void fbxatm_register_ioctl(struct fbxatm_ioctl *ioctl)
+{
+	mutex_lock(&ioctl_mutex);
+	list_add_tail(&ioctl->next, &ioctl_list);
+	mutex_unlock(&ioctl_mutex);
+}
+
+void fbxatm_unregister_ioctl(struct fbxatm_ioctl *ioctl)
+{
+	mutex_lock(&ioctl_mutex);
+	list_del(&ioctl->next);
+	mutex_unlock(&ioctl_mutex);
+}
+
+static int fbxatm_sock_ioctl(struct socket *sock, unsigned int cmd,
+			     unsigned long arg)
+{
+	struct fbxatm_ioctl *ioctl;
+	void __user *useraddr;
+	int ret;
+
+	/* sanity check */
+	useraddr = (void __user *)arg;
+
+	if (!capable(CAP_NET_ADMIN))
+		return -EPERM;
+
+	ret = -ENOIOCTLCMD;
+	mutex_lock(&ioctl_mutex);
+
+	list_for_each_entry(ioctl, &ioctl_list, next) {
+		if (!ioctl->handler)
+			continue;
+
+		if (!try_module_get(ioctl->owner))
+			continue;
+
+		ret = ioctl->handler(sock, cmd, useraddr);
+		module_put(ioctl->owner);
+		if (ret != -ENOIOCTLCMD)
+			break;
+	}
+	mutex_unlock(&ioctl_mutex);
+
+	return ret;
+}
+
+static int fbxatm_sock_release(struct socket *sock)
+{
+	struct fbxatm_ioctl *ioctl;
+	struct sock *sk = sock->sk;
+
+	mutex_lock(&ioctl_mutex);
+
+	list_for_each_entry(ioctl, &ioctl_list, next) {
+		if (!ioctl->release)
+			continue;
+
+		if (!try_module_get(ioctl->owner))
+			continue;
+
+		ioctl->release(sock);
+		module_put(ioctl->owner);
+	}
+	mutex_unlock(&ioctl_mutex);
+
+	if (sk)
+		sock_put(sk);
+
+	return 0;
+}
+
+static const struct proto_ops fbxatm_proto_ops = {
+	.family		= PF_FBXATM,
+
+	.release =	fbxatm_sock_release,
+	.ioctl =	fbxatm_sock_ioctl,
+
+	.bind =		sock_no_bind,
+	.connect =	sock_no_connect,
+	.socketpair =	sock_no_socketpair,
+	.accept =	sock_no_accept,
+	.getname =	sock_no_getname,
+	.listen =	sock_no_listen,
+	.shutdown =	sock_no_shutdown,
+	.sendmsg =	sock_no_sendmsg,
+	.recvmsg =	sock_no_recvmsg,
+	.mmap =		sock_no_mmap,
+	.owner		= THIS_MODULE,
+};
+
+static struct proto fbxatm_proto = {
+        .name           = "fbxatm",
+        .owner          =  THIS_MODULE,
+        .obj_size       = sizeof (struct sock),
+};
+
+static int fbxatm_sock_create(struct net *net, struct socket *sock,
+			      int protocol, int kern)
+{
+	struct sock *sk;
+
+        sk = sk_alloc(net, PF_FBXATM, GFP_KERNEL, &fbxatm_proto, kern);
+	if (!sk)
+		return -ENOMEM;
+
+        sock_init_data(sock, sk);
+        sock->state = SS_UNCONNECTED;
+        sock->ops = &fbxatm_proto_ops;
+	return 0;
+}
+
+static struct net_proto_family fbxatm_family_ops = {
+	.family = PF_FBXATM,
+	.create = fbxatm_sock_create,
+	.owner = THIS_MODULE,
+};
+
+
+static int __init fbxatm_init(void)
+{
+	int ret;
+
+	printk(KERN_INFO "Freebox ATM stack\n");
+	ret = fbxatm_sysfs_init();
+	if (ret)
+		return ret;
+
+	ret = fbxatm_procfs_init();
+	if (ret)
+		goto fail_sysfs;
+
+	ret = fbxatm_vcc_init();
+	if (ret)
+		goto fail_procfs;
+
+	ret = fbxatm_2684_init();
+	if (ret)
+		goto fail_vcc;
+
+	ret = fbxatm_pppoa_init();
+	if (ret)
+		goto fail_2684;
+
+	ret = proto_register(&fbxatm_proto, 0);
+	if (ret)
+		goto fail_pppoa;
+
+	ret = sock_register(&fbxatm_family_ops);
+	if (ret)
+		goto fail_proto;
+
+	return 0;
+
+fail_proto:
+	proto_unregister(&fbxatm_proto);
+
+fail_pppoa:
+	fbxatm_pppoa_exit();
+
+fail_2684:
+	fbxatm_2684_exit();
+
+fail_vcc:
+	fbxatm_vcc_exit();
+
+fail_procfs:
+	fbxatm_procfs_exit();
+
+fail_sysfs:
+	fbxatm_sysfs_exit();
+	printk(KERN_ERR "failed to initialize Freebox ATM stack\n");
+	return ret;
+}
+
+static void __exit fbxatm_exit(void)
+{
+	sock_unregister(PF_FBXATM);
+	proto_unregister(&fbxatm_proto);
+	fbxatm_pppoa_exit();
+	fbxatm_2684_exit();
+	fbxatm_vcc_exit();
+	fbxatm_procfs_exit();
+	fbxatm_sysfs_exit();
+}
+
+subsys_initcall(fbxatm_init);
+module_exit(fbxatm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_FBXATM);
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_dev.c linux-6.13.12-fbx/net/fbxatm/fbxatm_dev.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_dev.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,983 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+/*
+ * list of registered device & lock
+ */
+LIST_HEAD(fbxatm_dev_list);
+
+/*
+ * big "rtnl" lock
+ */
+DEFINE_MUTEX(fbxatm_mutex);
+static int fbxatm_ifindex = -1;
+
+/*
+ * find device by index
+ */
+static struct fbxatm_dev *__fbxatm_dev_get_by_index(int ifindex)
+{
+	struct fbxatm_dev *pdev;
+
+	list_for_each_entry(pdev, &fbxatm_dev_list, next) {
+		if (pdev->ifindex == ifindex)
+			return pdev;
+	}
+	return NULL;
+}
+
+/*
+ * find vcc by id
+ */
+static struct fbxatm_vcc *
+__fbxatm_vcc_get_by_id(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *vcc;
+	int found;
+
+	adev = __fbxatm_dev_get_by_index(id->dev_idx);
+	if (!adev)
+		return ERR_PTR(-ENODEV);
+
+	found = 0;
+	spin_lock_bh(&adev->vcc_list_lock);
+	list_for_each_entry(vcc, &adev->vcc_list, next) {
+		if (vcc->vpi != id->vpi || vcc->vci != id->vci)
+			continue;
+		found = 1;
+		break;
+	}
+	spin_unlock_bh(&adev->vcc_list_lock);
+
+	if (found)
+		return vcc;
+	return ERR_PTR(-ENOENT);
+}
+
+/*
+ * allocate device
+ */
+struct fbxatm_dev *fbxatm_alloc_device(int sizeof_priv)
+{
+	unsigned int size;
+
+	size = sizeof(struct fbxatm_dev) + sizeof_priv + FBXATMDEV_ALIGN;
+	return kzalloc(size, GFP_KERNEL);
+}
+
+EXPORT_SYMBOL(fbxatm_alloc_device);
+
+/*
+ * calculate crc10 of oam cell
+ */
+static void compute_oam_crc10(struct fbxatm_oam_cell_payload *cell)
+{
+	u8 *pdu;
+	u16 crc;
+
+	/* crc10 does not cover header */
+	pdu = (u8 *)&cell->cell_type;
+	memset(cell->crc10, 0, 2);
+
+	crc = crc10(0, pdu, sizeof (*cell) - sizeof (cell->cell_hdr));
+	cell->crc10[0] = crc >> 8;
+	cell->crc10[1] = crc & 0xff;
+}
+
+/*
+ * check crc10 of oam cell
+ */
+static int check_oam_crc10(struct fbxatm_oam_cell_payload *cell)
+{
+	u8 *pdu;
+	u16 crc;
+
+	pdu = (u8 *)&cell->cell_type;
+
+	crc = (cell->crc10[0] << 8) | cell->crc10[1];
+	memset(cell->crc10, 0, 2);
+
+	if (crc != crc10(0, pdu, sizeof (*cell) - sizeof (cell->cell_hdr)))
+		return 1;
+
+	return 0;
+}
+
+/*
+ * send an oam ping and wait for answer
+ */
+static int do_oam_ping(struct fbxatm_oam_ping *ping)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_oam_cell *oam_cell;
+	struct fbxatm_oam_cell_payload *cell;
+	u8 *hdr;
+	int ret;
+
+	switch (ping->req.type) {
+	case FBXATM_OAM_PING_SEG_F4:
+	case FBXATM_OAM_PING_E2E_F4:
+		return -ENOTSUPP;
+	case FBXATM_OAM_PING_SEG_F5:
+	case FBXATM_OAM_PING_E2E_F5:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* find device */
+	mutex_lock(&fbxatm_mutex);
+	adev = __fbxatm_dev_get_by_index(ping->req.id.dev_idx);
+	if (!adev) {
+		ret = -ENODEV;
+		goto out_unlock;
+	}
+
+	/* if f5, vcc need to be opened */
+	switch (ping->req.type) {
+	case FBXATM_OAM_PING_SEG_F5:
+	case FBXATM_OAM_PING_E2E_F5:
+	{
+		struct fbxatm_vcc *vcc;
+
+		vcc = __fbxatm_vcc_get_by_id(&ping->req.id);
+		if (IS_ERR(vcc)) {
+			ret = -ENETDOWN;
+			goto out_unlock;
+		}
+		break;
+	}
+
+	default:
+		break;
+	}
+
+	ping->correlation_id = ++adev->oam_correlation_id;
+
+	/* prepare atm oam cell and send it */
+	oam_cell = kmalloc(sizeof (*oam_cell), GFP_KERNEL);
+	if (!oam_cell) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
+	cell = &oam_cell->payload;
+
+	hdr = cell->cell_hdr;
+	ATM_SET_GFC(hdr, 0);
+
+	ATM_SET_VPI(hdr, ping->req.id.vpi);
+	ATM_SET_VCI(hdr, ping->req.id.vci);
+	if (ping->req.type == FBXATM_OAM_PING_E2E_F5)
+		ATM_SET_PT(hdr, OAM_PTI_END2END_F5);
+	else
+		ATM_SET_PT(hdr, OAM_PTI_SEG_F5);
+	ATM_SET_CLP(hdr, 0);
+	ATM_SET_HEC(hdr, 0);
+
+	cell->cell_type = (OAM_TYPE_FAULT_MANAGEMENT << OAM_TYPE_SHIFT) |
+		(FUNC_TYPE_OAM_LOOPBACK << FUNC_TYPE_SHIFT);
+	cell->loopback_indication = 1;
+
+	memcpy(cell->correlation_tag, &ping->correlation_id,
+	       sizeof (cell->correlation_tag));
+	memcpy(cell->loopback_id, ping->req.llid, sizeof (ping->req.llid));
+	memset(cell->source_id, 0x6a, sizeof (cell->source_id));
+	memset(cell->reserved, 0x6a, sizeof (cell->reserved));
+
+	compute_oam_crc10(cell);
+
+	spin_lock_bh(&adev->dev_link_lock);
+	if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		ret = -ENETDOWN;
+	else
+		ret = adev->ops->send_oam(adev, oam_cell);
+	spin_unlock_bh(&adev->dev_link_lock);
+	if (ret)
+		goto out_unlock;
+
+	/* wait for an answer */
+	adev->stats.tx_f5_oam++;
+	list_add(&ping->next, &adev->oam_pending_ping);
+	ping->replied = 0;
+	init_waitqueue_head(&ping->wq);
+	mutex_unlock(&fbxatm_mutex);
+
+	ret = wait_event_interruptible_timeout(ping->wq, ping->replied,
+					       HZ * 5);
+	list_del(&ping->next);
+
+	if (ret == -ERESTARTSYS)
+		return ret;
+
+	if (ping->replied < 0) {
+		/* ping failed */
+		return ping->replied;
+	}
+
+	if (!ping->replied) {
+		/* timeout */
+		return -ETIME;
+	}
+
+	return 0;
+
+
+out_unlock:
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * special llid values
+ */
+static const u8 llid_all1[16] = { 0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff,
+				  0xff, 0xff, 0xff, 0xff };
+
+static const u8 llid_all0[16] = { 0 };
+
+/*
+ * handle incoming oam cell
+ */
+static void handle_oam_cell(struct fbxatm_dev *adev,
+			    struct fbxatm_oam_cell *oam_cell)
+{
+	struct fbxatm_oam_cell_payload *cell;
+	u16 vci;
+	u8 *hdr, pt, oam, func;
+
+	/* check CRC10 */
+	cell = &oam_cell->payload;
+	if (check_oam_crc10(cell)) {
+		adev->stats.rx_bad_oam++;
+		goto out;
+	}
+
+	/* drop f4 cells */
+	hdr = cell->cell_hdr;
+	vci = ATM_GET_VCI(hdr);
+
+	if (vci == OAM_VCI_SEG_F4 || vci == OAM_VCI_END2END_F4) {
+		adev->stats.rx_f4_oam++;
+		goto out;
+	}
+
+	/* keep f5 cells only */
+	pt = ATM_GET_PT(hdr);
+	if (pt != OAM_PTI_SEG_F5 && pt != OAM_PTI_END2END_F5) {
+		adev->stats.rx_other_oam++;
+		goto out;
+	}
+
+	adev->stats.rx_f5_oam++;
+
+	/* keep oam loopback type only */
+	oam = (cell->cell_type & OAM_TYPE_MASK) >> OAM_TYPE_SHIFT;
+	func = (cell->cell_type & FUNC_TYPE_MASK) >> FUNC_TYPE_SHIFT;
+
+	if (oam != OAM_TYPE_FAULT_MANAGEMENT ||
+	    func != FUNC_TYPE_OAM_LOOPBACK) {
+		adev->stats.rx_other_oam++;
+		goto out;
+	}
+
+	if (cell->loopback_indication & 1) {
+		int match, ret;
+
+		/* request, check for llid match */
+		match = 0;
+		switch (pt) {
+		case OAM_PTI_SEG_F5:
+			/* 0x0 or 0xffffffff */
+			if (!memcmp(cell->loopback_id, llid_all0,
+				    sizeof (llid_all0)))
+				match = 1;
+			fallthrough;
+
+		case OAM_PTI_END2END_F5:
+			/* 0xffffffff only */
+			if (!memcmp(cell->loopback_id, llid_all1,
+				    sizeof (llid_all1)))
+				match = 1;
+			break;
+		}
+
+		if (!match) {
+			adev->stats.rx_bad_llid_oam++;
+			goto out;
+		}
+
+		/* ok, update llid and answer */
+		cell->loopback_indication = 0;
+		memcpy(cell->loopback_id, llid_all1, sizeof (llid_all1));
+		compute_oam_crc10(cell);
+
+		spin_lock_bh(&adev->dev_link_lock);
+		if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+			ret = adev->ops->send_oam(adev, oam_cell);
+		else
+			ret = -ENETDOWN;
+		spin_unlock_bh(&adev->dev_link_lock);
+
+		if (!ret) {
+			/* send successful, don't free cell */
+			adev->stats.tx_f5_oam++;
+			return;
+		}
+
+	} else {
+		struct fbxatm_oam_ping *ping;
+
+		/* reply, find a matching sender */
+		spin_lock_bh(&adev->oam_list_lock);
+		list_for_each_entry(ping, &adev->oam_pending_ping, next) {
+
+			/* compare correlation id */
+			if (memcmp(&ping->correlation_id,
+				   cell->correlation_tag,
+				   sizeof (cell->correlation_tag)))
+				continue;
+
+			/* compare ping type */
+			switch (ping->req.type) {
+			case FBXATM_OAM_PING_SEG_F5:
+				if (pt != OAM_PTI_SEG_F5)
+					continue;
+				break;
+			case FBXATM_OAM_PING_E2E_F5:
+				if (pt != OAM_PTI_END2END_F5)
+					continue;
+				break;
+			default:
+				break;
+			}
+
+			/* seems we have a match */
+			ping->replied = 1;
+			wake_up(&ping->wq);
+		}
+		spin_unlock_bh(&adev->oam_list_lock);
+	}
+
+out:
+	kfree(oam_cell);
+}
+
+/*
+ * oam rx processing workqueue
+ */
+static void fbxatm_oam_work(struct work_struct *work)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_oam_cell *cell;
+
+	adev = container_of(work, struct fbxatm_dev, oam_work);
+
+	do {
+		cell = NULL;
+		spin_lock_bh(&adev->oam_list_lock);
+		if (!list_empty(&adev->rx_oam_cells)) {
+			cell = list_first_entry(&adev->rx_oam_cells,
+						struct fbxatm_oam_cell, next);
+			list_del(&cell->next);
+			adev->rx_oam_cells_count--;
+		}
+		spin_unlock_bh(&adev->oam_list_lock);
+
+		if (cell)
+			handle_oam_cell(adev, cell);
+
+	} while (cell);
+}
+
+/*
+ * register given device
+ */
+static int __fbxatm_register_device(struct fbxatm_dev *adev,
+				    const char *base_name,
+				    const struct fbxatm_dev_ops *ops)
+{
+	struct fbxatm_dev *pdev;
+	int name_len, count, ret;
+	long *inuse;
+
+	adev->ops = ops;
+	INIT_LIST_HEAD(&adev->vcc_list);
+	spin_lock_init(&adev->vcc_list_lock);
+	INIT_LIST_HEAD(&adev->next);
+	spin_lock_init(&adev->stats_lock);
+	spin_lock_init(&adev->oam_list_lock);
+	spin_lock_init(&adev->dev_link_lock);
+	INIT_LIST_HEAD(&adev->rx_oam_cells);
+	INIT_WORK(&adev->oam_work, fbxatm_oam_work);
+	INIT_LIST_HEAD(&adev->oam_pending_ping);
+	get_random_bytes(&adev->oam_correlation_id, 4);
+
+	name_len = strlen(base_name);
+	adev->name = kmalloc(name_len + 10, GFP_KERNEL);
+	if (!adev->name) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/* allocate ifindex */
+	while (1) {
+		if (++fbxatm_ifindex < 0)
+			fbxatm_ifindex = 0;
+		if (__fbxatm_dev_get_by_index(fbxatm_ifindex))
+			continue;
+		adev->ifindex = fbxatm_ifindex;
+		break;
+	}
+
+	/* allocate device name */
+	inuse = (long *)get_zeroed_page(GFP_ATOMIC);
+	if (!inuse) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	list_for_each_entry(pdev, &fbxatm_dev_list, next) {
+		unsigned long val;
+		char *end;
+
+		/* look for common prefix */
+		if (strncmp(base_name, pdev->name, name_len))
+			continue;
+
+		/* make sure name is the same, not just a prefix */
+		val = simple_strtoul(pdev->name + name_len, &end, 10);
+		if (!*end)
+			continue;
+
+		set_bit(val, inuse);
+	}
+
+	count = find_first_zero_bit(inuse, PAGE_SIZE * 8);
+	free_page((unsigned long)inuse);
+
+	snprintf(adev->name, name_len + 10, "%s%d", base_name, count);
+	list_add_tail(&adev->next, &fbxatm_dev_list);
+
+	/* create procfs entries */
+	ret = fbxatm_proc_dev_register(adev);
+	if (ret)
+		goto fail;
+
+	/* call device procfs init if any */
+	if (adev->ops->init_procfs) {
+		ret = adev->ops->init_procfs(adev);
+		if (ret)
+			goto fail_procfs;
+	}
+
+	/* create sysfs entries */
+	ret = fbxatm_register_dev_sysfs(adev);
+	if (ret)
+		goto fail_procfs;
+
+	return 0;
+
+fail_procfs:
+	fbxatm_proc_dev_deregister(adev);
+
+fail:
+	list_del(&adev->next);
+	kfree(adev->name);
+	return ret;
+}
+
+/*
+ * take lock and register device
+ */
+int fbxatm_register_device(struct fbxatm_dev *adev,
+			   const char *base_name,
+			   const struct fbxatm_dev_ops *ops)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	ret = __fbxatm_register_device(adev, base_name, ops);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+EXPORT_SYMBOL(fbxatm_register_device);
+
+/*
+ * change device "link" state
+ */
+static void fbxatm_dev_set_link(struct fbxatm_dev *adev, int link)
+{
+	struct fbxatm_vcc *vcc;
+
+	if (link) {
+		memset(&adev->stats, 0, sizeof (adev->stats));
+		set_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+
+		spin_lock_bh(&adev->vcc_list_lock);
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			memset(&vcc->stats, 0, sizeof (vcc->stats));
+			set_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+			if (!vcc->user_ops || !vcc->user_ops->link_change)
+				continue;
+			vcc->user_ops->link_change(vcc->user_cb_data, 1,
+						   adev->link_cell_rate_ds,
+						   adev->link_cell_rate_us);
+		}
+		spin_unlock_bh(&adev->vcc_list_lock);
+	} else {
+		/* prevent further oam cells input */
+		spin_lock_bh(&adev->dev_link_lock);
+		clear_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+		spin_unlock_bh(&adev->dev_link_lock);
+
+		/* flush rx oam work */
+		cancel_work_sync(&adev->oam_work);
+
+		/* now disable tx on all vcc */
+		spin_lock_bh(&adev->vcc_list_lock);
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			spin_lock_bh(&vcc->tx_lock);
+			clear_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+			spin_unlock_bh(&vcc->tx_lock);
+			if (!vcc->user_ops || !vcc->user_ops->link_change)
+				continue;
+			vcc->user_ops->link_change(vcc->user_cb_data, 0, 0, 0);
+		}
+		spin_unlock_bh(&adev->vcc_list_lock);
+	}
+
+	fbxatm_dev_change_sysfs(adev);
+}
+
+/*
+ * set device "link" to up, allowing vcc/device send ops to be called,
+ * this function sleeps
+ */
+void fbxatm_dev_set_link_up(struct fbxatm_dev *adev)
+{
+	if (!test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		printk(KERN_INFO "%s: link UP - "
+		       "down: %u kbit/s - up: %u kbit/s\n", adev->name,
+		       adev->link_rate_ds / 1000, adev->link_rate_us / 1000);
+	return fbxatm_dev_set_link(adev, 1);
+}
+
+EXPORT_SYMBOL(fbxatm_dev_set_link_up);
+
+/*
+ * set device link to down, disallowing any vcc/device send ops to be
+ * called, this function sleeps
+ */
+void fbxatm_dev_set_link_down(struct fbxatm_dev *adev)
+{
+	if (test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		printk(KERN_INFO "%s: link DOWN\n", adev->name);
+	return fbxatm_dev_set_link(adev, 0);
+}
+
+EXPORT_SYMBOL(fbxatm_dev_set_link_down);
+
+/*
+ * take lock and unregister device
+ */
+int fbxatm_unregister_device(struct fbxatm_dev *adev)
+{
+	int ret;
+	bool empty;
+
+	ret = 0;
+	mutex_lock(&fbxatm_mutex);
+
+	spin_lock_bh(&adev->vcc_list_lock);
+	empty = list_empty(&adev->vcc_list);
+	spin_unlock_bh(&adev->vcc_list_lock);
+	if (!empty) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (!list_empty(&adev->oam_pending_ping)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	list_del(&adev->next);
+
+	if (adev->ops->release_procfs)
+		adev->ops->release_procfs(adev);
+	fbxatm_proc_dev_deregister(adev);
+
+	fbxatm_unregister_dev_sysfs(adev);
+out:
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+EXPORT_SYMBOL(fbxatm_unregister_device);
+
+/*
+ * actually free device memory
+ */
+void __fbxatm_free_device(struct fbxatm_dev *adev)
+{
+	kfree(adev->name);
+	kfree(adev);
+}
+
+/*
+ * free device memory
+ */
+void fbxatm_free_device(struct fbxatm_dev *adev)
+{
+	/* actual free is done in sysfs release */
+//	class_device_put(&adev->class_dev);
+}
+
+EXPORT_SYMBOL(fbxatm_free_device);
+
+/*
+ * device callback when oam cell comes in
+ */
+void fbxatm_netifrx_oam(struct fbxatm_dev *adev, struct fbxatm_oam_cell *cell)
+{
+	bool link_up;
+
+	spin_lock_bh(&adev->dev_link_lock);
+	link_up = test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags);
+	spin_unlock_bh(&adev->dev_link_lock);
+
+	if (!link_up || adev->rx_oam_cells_count > 8) {
+		kfree(cell);
+		return;
+	}
+
+	adev->rx_oam_cells_count++;
+	spin_lock_bh(&adev->oam_list_lock);
+	list_add_tail(&cell->next, &adev->rx_oam_cells);
+	spin_unlock_bh(&adev->oam_list_lock);
+	schedule_work(&adev->oam_work);
+}
+
+EXPORT_SYMBOL(fbxatm_netifrx_oam);
+
+/*
+ * set user ops on vcc
+ */
+void fbxatm_set_uops(struct fbxatm_vcc *vcc,
+		     const struct fbxatm_vcc_uops *user_ops,
+		     void *user_cb_data)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	vcc->user_ops = user_ops;
+	vcc->user_cb_data = user_cb_data;
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+
+/*
+ * bind to given vcc
+ */
+static struct fbxatm_vcc *
+__fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		     enum fbxatm_vcc_user user)
+{
+	struct fbxatm_vcc *vcc;
+
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (IS_ERR(vcc))
+		return vcc;
+
+	if (vcc->user != FBXATM_VCC_USER_NONE)
+		return ERR_PTR(-EBUSY);
+
+	vcc->user = user;
+	return vcc;
+}
+
+/*
+ * bind to given vcc
+ */
+struct fbxatm_vcc *
+fbxatm_bind_to_vcc(const struct fbxatm_vcc_id *id,
+		   enum fbxatm_vcc_user user)
+{
+	struct fbxatm_vcc *vcc;
+
+	mutex_lock(&fbxatm_mutex);
+	vcc = __fbxatm_bind_to_vcc(id, user);
+	mutex_unlock(&fbxatm_mutex);
+	return vcc;
+}
+
+/*
+ * unbind from given vcc
+ */
+void fbxatm_unbind_vcc(struct fbxatm_vcc *vcc)
+{
+	spin_lock_bh(&vcc->user_ops_lock);
+	vcc->user_ops = NULL;
+	vcc->user_cb_data = NULL;
+	vcc->user = FBXATM_VCC_USER_NONE;
+	spin_unlock_bh(&vcc->user_ops_lock);
+}
+
+/*
+ * open vcc on given device
+ */
+static int __fbxatm_dev_open_vcc(const struct fbxatm_vcc_id *id,
+				 const struct fbxatm_vcc_qos *qos)
+{
+	struct fbxatm_vcc *vcc;
+	struct fbxatm_dev *adev;
+	int ret, count;
+
+	/* check vpi/vci unicity  */
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (!IS_ERR(vcc))
+		return -EBUSY;
+
+	/* sanity check */
+	switch (qos->traffic_class) {
+	case FBXATM_VCC_TC_UBR_NO_PCR:
+	case FBXATM_VCC_TC_UBR:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (qos->max_sdu > 4096)
+		return -EINVAL;
+
+	if (!qos->max_buffered_pkt || qos->max_buffered_pkt > 128)
+		return -EINVAL;
+
+	adev = __fbxatm_dev_get_by_index(id->dev_idx);
+	if (!adev)
+		return -ENODEV;
+
+	/* make sure device accept requested priorities */
+	if (qos->priority > adev->max_priority)
+		return -EINVAL;
+
+	if (qos->rx_priority > adev->max_rx_priority)
+		return -EINVAL;
+
+	/* don't open more vcc than device can handle */
+	count = 0;
+	list_for_each_entry(vcc, &adev->vcc_list, next)
+		count++;
+	if (count + 1 > adev->max_vcc)
+		return -ENOSPC;
+
+	/* make sure vpi/vci is valid for this device */
+	if ((~adev->vpi_mask & id->vpi) || (~adev->vci_mask & id->vci))
+		return -EINVAL;
+
+	if (!try_module_get(adev->ops->owner))
+		return -ENODEV;
+
+	/* ok, create vcc */
+	vcc = kzalloc(sizeof (*vcc), GFP_KERNEL);
+	if (!vcc)
+		return -ENOMEM;
+
+	spin_lock_init(&vcc->user_ops_lock);
+	spin_lock_init(&vcc->tx_lock);
+	vcc->vpi = id->vpi;
+	vcc->vci = id->vci;
+	vcc->adev = adev;
+	vcc->to_drop_pkt = 0;
+	memcpy(&vcc->qos, qos, sizeof (*qos));
+
+	ret = adev->ops->open(vcc);
+	if (ret) {
+		kfree(vcc);
+		return ret;
+	}
+
+	/* inherit vcc link state from device */
+	spin_lock_bh(&adev->vcc_list_lock);
+	if (test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags))
+		set_bit(FBXATM_VCC_F_LINK_UP, &vcc->vcc_flags);
+	list_add_tail(&vcc->next, &adev->vcc_list);
+	spin_unlock_bh(&adev->vcc_list_lock);
+
+	return ret;
+}
+
+/*
+ * find device & open vcc on it
+ */
+static int fbxatm_dev_open_vcc(const struct fbxatm_vcc_id *id,
+			       const struct fbxatm_vcc_qos *qos)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	ret = __fbxatm_dev_open_vcc(id, qos);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * close vcc on device
+ */
+static int __fbxatm_dev_close_vcc(struct fbxatm_vcc *vcc)
+{
+	struct fbxatm_dev *adev;
+
+	if (vcc->user != FBXATM_VCC_USER_NONE)
+		return -EBUSY;
+	adev = vcc->adev;
+	module_put(adev->ops->owner);
+	adev->ops->close(vcc);
+	spin_lock_bh(&adev->vcc_list_lock);
+	list_del(&vcc->next);
+	spin_unlock_bh(&adev->vcc_list_lock);
+	kfree(vcc);
+	return 0;
+}
+
+/*
+ * find device & vcc and close it
+ */
+static int fbxatm_dev_close_vcc(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_vcc *vcc;
+	int ret;
+
+	mutex_lock(&fbxatm_mutex);
+	vcc = __fbxatm_vcc_get_by_id(id);
+	if (IS_ERR(vcc))
+		ret = PTR_ERR(vcc);
+	else
+		ret = __fbxatm_dev_close_vcc(vcc);
+	mutex_unlock(&fbxatm_mutex);
+	return ret;
+}
+
+/*
+ * ioctl handler
+ */
+static int fbxatm_vcc_ioctl(struct socket *sock,
+			    unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_IOCADD:
+	case FBXATM_IOCDEL:
+	{
+		struct fbxatm_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_IOCADD)
+			ret = fbxatm_dev_open_vcc(&params.id, &params.qos);
+		else
+			ret = fbxatm_dev_close_vcc(&params.id);
+		break;
+	}
+
+	case FBXATM_IOCGET:
+	{
+		struct fbxatm_vcc_params params;
+		struct fbxatm_vcc *vcc;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_mutex);
+		vcc = __fbxatm_vcc_get_by_id(&params.id);
+		if (IS_ERR(vcc))
+			ret = PTR_ERR(vcc);
+		else {
+			memcpy(&params.qos, &vcc->qos, sizeof (vcc->qos));
+			params.user = vcc->user;
+		}
+		mutex_unlock(&fbxatm_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	case FBXATM_IOCOAMPING:
+	{
+		struct fbxatm_oam_ping ping;
+
+		if (copy_from_user(&ping.req, useraddr, sizeof(ping.req)))
+			return -EFAULT;
+
+		ret = do_oam_ping(&ping);
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &ping.req, sizeof(ping.req)))
+			return -EFAULT;
+		break;
+	}
+
+	case FBXATM_IOCDROP:
+	{
+		struct fbxatm_vcc_drop_params params;
+		struct fbxatm_vcc *vcc;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_mutex);
+		vcc = __fbxatm_vcc_get_by_id(&params.id);
+		if (IS_ERR(vcc))
+			ret = PTR_ERR(vcc);
+		else {
+			spin_lock_bh(&vcc->user_ops_lock);
+			vcc->to_drop_pkt += params.drop_count;
+			spin_unlock_bh(&vcc->user_ops_lock);
+			ret = 0;
+		}
+		mutex_unlock(&fbxatm_mutex);
+
+		if (ret)
+			return ret;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+static struct fbxatm_ioctl fbxatm_vcc_ioctl_ops = {
+	.handler	= fbxatm_vcc_ioctl,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_vcc_init(void)
+{
+	fbxatm_register_ioctl(&fbxatm_vcc_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_vcc_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_vcc_ioctl_ops);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_pppoa.c linux-6.13.12-fbx/net/fbxatm/fbxatm_pppoa.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_pppoa.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_pppoa.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,500 @@
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/if_pppox.h>
+#include <linux/ppp_channel.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#include <linux/fbxatm.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+#define PFX	"fbxatm_pppoa: "
+
+static LIST_HEAD(fbxatm_pppoa_vcc_list);
+static DEFINE_MUTEX(fbxatm_pppoa_mutex);
+
+/*
+ * private data for pppoa vcc
+ */
+struct fbxatm_pppoa_vcc {
+	struct fbxatm_vcc		*vcc;
+	struct fbxatm_pppoa_vcc_params	params;
+	enum fbxatm_pppoa_encap		cur_encap;
+
+	/* used by ppp */
+	int				flags;
+	struct ppp_channel		chan;
+	struct tasklet_struct		tx_done_tasklet;
+
+	struct socket			*sock;
+	struct list_head		next;
+};
+
+
+#define __LLC_HDR		0xfe, 0xfe, 0x03
+#define __NLPID_PPP		0xcf
+#define __PPP_LCP		0xc0, 0x21
+
+static const u8 llc_ppp[]	= { __LLC_HDR, __NLPID_PPP };
+static const u8 llc_ppp_lcp[]	= { __LLC_HDR, __NLPID_PPP, __PPP_LCP };
+static const u8 lcp[]		= { __PPP_LCP };
+
+
+/*
+ * fbxatm stack receive callback, called from softirq
+ */
+static void vcc_rx_callback(struct sk_buff *skb, void *data)
+{
+	struct fbxatm_pppoa_vcc *priv;
+
+	priv = (struct fbxatm_pppoa_vcc *)data;
+
+	if (priv->chan.ppp == NULL) {
+		dev_kfree_skb(skb);
+		return;
+	}
+
+	switch (priv->cur_encap) {
+	case FBXATM_EPPPOA_VCMUX:
+		/* nothing to do */
+		break;
+
+	case FBXATM_EPPPOA_LLC:
+		/* make sure llc header is present and remove */
+		if (skb->len < sizeof(llc_ppp) ||
+		    memcmp(skb->data, llc_ppp, sizeof(llc_ppp)))
+			goto error;
+		skb_pull(skb, sizeof(llc_ppp));
+		break;
+
+	case FBXATM_EPPPOA_AUTODETECT:
+		/* look for lcp, with an llc header or not */
+		if (skb->len >= sizeof(llc_ppp_lcp) &&
+		    !memcmp(skb->data, llc_ppp_lcp, sizeof(llc_ppp_lcp))) {
+			priv->cur_encap = FBXATM_EPPPOA_LLC;
+			skb_pull(skb, sizeof(llc_ppp));
+			break;
+		}
+
+		if (skb->len >= sizeof(lcp) &&
+		    !memcmp(skb->data, lcp, sizeof (lcp))) {
+			priv->cur_encap = FBXATM_EPPPOA_VCMUX;
+			break;
+		}
+
+		/* no match */
+		goto error;
+	}
+
+	ppp_input(&priv->chan, skb);
+	return;
+
+error:
+	dev_kfree_skb(skb);
+	ppp_input_error(&priv->chan, 0);
+}
+
+/*
+ * tx done tasklet callback
+ */
+static void tx_done_tasklet_func(unsigned long data)
+{
+	struct fbxatm_pppoa_vcc *priv = (struct fbxatm_pppoa_vcc *)data;
+	ppp_output_wakeup(&priv->chan);
+}
+
+/*
+ * fbxatm stack tx done callback, called from softirq
+ */
+static void vcc_tx_done_callback(void *data)
+{
+	struct fbxatm_pppoa_vcc *priv = data;
+
+	/* schedule taslket to avoid re-entering in ppp_xmit */
+	tasklet_schedule(&priv->tx_done_tasklet);
+}
+
+/*
+ * vcc user ops, callback from fbxatm stack
+ */
+static const struct fbxatm_vcc_uops fbxatm_pppoa_vcc_uops = {
+	.rx_pkt		= vcc_rx_callback,
+	.tx_done	= vcc_tx_done_callback,
+};
+
+/*
+ * ppp xmit callback
+ */
+static int ppp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	struct sk_buff *to_send_skb, *nskb;
+	int ret;
+
+	priv = (struct fbxatm_pppoa_vcc *)chan->private;
+
+	/* MAYBE FIXME: handle protocol compression ? */
+
+	to_send_skb = skb;
+	nskb = NULL;
+
+	/* send using vcmux encap if not yet known */
+	switch (priv->cur_encap) {
+	case FBXATM_EPPPOA_AUTODETECT:
+	case FBXATM_EPPPOA_VCMUX:
+		break;
+
+	case FBXATM_EPPPOA_LLC:
+	{
+		unsigned int headroom;
+
+		headroom = skb_headroom(skb);
+
+		if (headroom < sizeof(llc_ppp)) {
+			headroom += sizeof(llc_ppp);
+			nskb = skb_realloc_headroom(skb, headroom);
+			if (!nskb) {
+				dev_kfree_skb(skb);
+				return 1;
+			}
+			to_send_skb = nskb;
+		}
+
+		skb_push(to_send_skb, sizeof(llc_ppp));
+		memcpy(to_send_skb->data, llc_ppp, sizeof(llc_ppp));
+		break;
+	}
+	}
+
+	ret = fbxatm_send(priv->vcc, to_send_skb);
+	if (ret) {
+		/* packet was not sent, queue is full, free any newly
+		 * created skb */
+		if (nskb)
+			dev_kfree_skb(nskb);
+		else {
+			/* restore original skb if we altered it */
+			if (priv->cur_encap == FBXATM_EPPPOA_LLC)
+				skb_pull(skb, sizeof(llc_ppp));
+		}
+
+		/* suspend ppp output, will be woken up by
+		 * ppp_output_wakeup, we're called under ppp lock so
+		 * we can't race with tx done */
+		return 0;
+	}
+
+	/* packet was sent, if we sent a copy free the original */
+	if (nskb)
+		dev_kfree_skb(skb);
+
+	if (fbxatm_vcc_queue_full(priv->vcc))
+		ppp_output_stop(chan);
+
+	return 1;
+}
+
+static int ppp_ioctl(struct ppp_channel *chan, unsigned int cmd,
+		     unsigned long arg)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	priv = (struct fbxatm_pppoa_vcc *)chan->private;
+
+	switch (cmd) {
+	case PPPIOCGFLAGS:
+		ret = put_user(priv->flags, (int __user *)arg) ? -EFAULT : 0;
+		break;
+	case PPPIOCSFLAGS:
+		ret = get_user(priv->flags, (int __user *) arg) ? -EFAULT : 0;
+		break;
+	default:
+		ret = -ENOTTY;
+		break;
+	}
+	return ret;
+}
+
+static struct ppp_channel_ops fbxatm_pppoa_ppp_ops = {
+	.start_xmit = ppp_xmit,
+	.ioctl = ppp_ioctl,
+};
+
+/*
+ * find pppoa vcc from id
+ */
+static struct fbxatm_pppoa_vcc *
+__find_pppoa_vcc(const struct fbxatm_vcc_id *id)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int found;
+
+	/* find it */
+	found = 0;
+	list_for_each_entry(priv, &fbxatm_pppoa_vcc_list, next) {
+		if (priv->vcc->adev->ifindex != id->dev_idx ||
+		    priv->vcc->vpi != id->vpi ||
+		    priv->vcc->vci != id->vci)
+			continue;
+
+		found = 1;
+		break;
+	}
+
+	if (found)
+		return priv;
+	return NULL;
+}
+
+/*
+ * find pppoa vcc from socket
+ */
+static struct fbxatm_pppoa_vcc *
+__find_pppoa_vcc_from_socket(const struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int found;
+
+	/* find it */
+	found = 0;
+	list_for_each_entry(priv, &fbxatm_pppoa_vcc_list, next) {
+		if (priv->sock != sock)
+			continue;
+
+		found = 1;
+		break;
+	}
+
+	if (found)
+		return priv;
+	return NULL;
+}
+
+/*
+ * bind to given vcc
+ */
+static int __bind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params,
+			    struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	/* sanity check */
+	switch (params->encap) {
+	case FBXATM_EPPPOA_AUTODETECT:
+	case FBXATM_EPPPOA_VCMUX:
+	case FBXATM_EPPPOA_LLC:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+	memcpy(&priv->params, params, sizeof (*params));
+	priv->cur_encap = params->encap;
+
+	/* bind to vcc */
+	priv->vcc = fbxatm_bind_to_vcc(&params->id, FBXATM_VCC_USER_PPPOA);
+	if (IS_ERR(priv->vcc)) {
+		ret = PTR_ERR(priv->vcc);
+		goto fail;
+	}
+
+	fbxatm_set_uops(priv->vcc, &fbxatm_pppoa_vcc_uops, priv);
+	priv->chan.private = priv;
+	priv->chan.ops = &fbxatm_pppoa_ppp_ops;
+	priv->chan.mtu = priv->vcc->qos.max_sdu - PPP_HDRLEN;
+	priv->chan.hdrlen = 0;
+	priv->sock = sock;
+	tasklet_init(&priv->tx_done_tasklet, tx_done_tasklet_func,
+		     (unsigned long)priv);
+
+	if (priv->cur_encap != FBXATM_EPPPOA_VCMUX) {
+		/* assume worst case if vcmux is not forced */
+		priv->chan.mtu -= sizeof(llc_ppp);
+		priv->chan.hdrlen += sizeof(llc_ppp);
+	}
+
+	priv->chan.mtu -= priv->vcc->adev->tx_headroom;
+	priv->chan.hdrlen += priv->vcc->adev->tx_headroom;
+
+	ret = ppp_register_channel(&priv->chan);
+	if (ret)
+		goto fail_unbind;
+	list_add_tail(&priv->next, &fbxatm_pppoa_vcc_list);
+	return 0;
+
+fail_unbind:
+	fbxatm_unbind_vcc(priv->vcc);
+
+fail:
+	kfree(priv);
+	return ret;
+}
+
+/*
+ * bind to given vcc
+ */
+static int bind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params,
+			  struct socket *sock)
+{
+	int ret;
+
+	mutex_lock(&fbxatm_pppoa_mutex);
+	ret = __bind_pppoa_vcc(params, sock);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+	return ret;
+}
+
+/*
+ * unbind from given vcc
+ */
+static void __unbind_pppoa_vcc(struct fbxatm_pppoa_vcc *priv)
+{
+	ppp_unregister_channel(&priv->chan);
+	fbxatm_unbind_vcc(priv->vcc);
+	tasklet_kill(&priv->tx_done_tasklet);
+	list_del(&priv->next);
+	kfree(priv);
+}
+
+/*
+ * unbind from given vcc
+ */
+static int unbind_pppoa_vcc(const struct fbxatm_pppoa_vcc_params *params)
+{
+	struct fbxatm_pppoa_vcc *priv;
+	int ret;
+
+	ret = 0;
+	mutex_lock(&fbxatm_pppoa_mutex);
+	priv = __find_pppoa_vcc(&params->id);
+	if (!priv)
+		ret = -ENOENT;
+	else
+		__unbind_pppoa_vcc(priv);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+	return ret;
+}
+
+/*
+ * pppoa related ioctl handler
+ */
+static int fbxatm_pppoa_ioctl(struct socket *sock,
+			      unsigned int cmd, void __user *useraddr)
+{
+	int ret;
+
+	ret = 0;
+
+	switch (cmd) {
+	case FBXATM_PPPOA_IOCADD:
+	case FBXATM_PPPOA_IOCDEL:
+	{
+		struct fbxatm_pppoa_vcc_params params;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		if (cmd == FBXATM_PPPOA_IOCADD)
+			ret = bind_pppoa_vcc(&params, sock);
+		else
+			ret = unbind_pppoa_vcc(&params);
+		break;
+	}
+
+	case FBXATM_PPPOA_IOCGET:
+	{
+		struct fbxatm_pppoa_vcc_params params;
+		struct fbxatm_pppoa_vcc *priv;
+
+		if (copy_from_user(&params, useraddr, sizeof(params)))
+			return -EFAULT;
+
+		mutex_lock(&fbxatm_pppoa_mutex);
+		priv = __find_pppoa_vcc(&params.id);
+		if (!priv)
+			ret = -ENOENT;
+		else
+			memcpy(&params, &priv->params, sizeof (params));
+		mutex_unlock(&fbxatm_pppoa_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &params, sizeof(params)))
+			return -EFAULT;
+		break;
+	}
+
+	case PPPIOCGCHAN:
+	case PPPIOCGUNIT:
+	{
+		struct fbxatm_pppoa_vcc *priv;
+		int value;
+
+		value = 0;
+
+		mutex_lock(&fbxatm_pppoa_mutex);
+		priv = __find_pppoa_vcc_from_socket(sock);
+		if (!priv)
+			ret = -ENOENT;
+		else {
+			if (cmd == PPPIOCGCHAN)
+				value = ppp_channel_index(&priv->chan);
+			else
+				value = ppp_unit_number(&priv->chan);
+		}
+		mutex_unlock(&fbxatm_pppoa_mutex);
+
+		if (ret)
+			return ret;
+
+		if (copy_to_user(useraddr, &value, sizeof(value)))
+			ret = -EFAULT;
+		break;
+	}
+
+	default:
+		return -ENOIOCTLCMD;
+	}
+
+	return ret;
+}
+
+/*
+ * pppoa related release handler
+ */
+static void fbxatm_pppoa_release(struct socket *sock)
+{
+	struct fbxatm_pppoa_vcc *priv;
+
+	mutex_lock(&fbxatm_pppoa_mutex);
+	priv = __find_pppoa_vcc_from_socket(sock);
+	if (priv)
+		__unbind_pppoa_vcc(priv);
+	mutex_unlock(&fbxatm_pppoa_mutex);
+}
+
+static struct fbxatm_ioctl fbxatm_pppoa_ioctl_ops = {
+	.handler	= fbxatm_pppoa_ioctl,
+	.release	= fbxatm_pppoa_release,
+	.owner		= THIS_MODULE,
+};
+
+int __init fbxatm_pppoa_init(void)
+{
+	fbxatm_register_ioctl(&fbxatm_pppoa_ioctl_ops);
+	return 0;
+}
+
+void fbxatm_pppoa_exit(void)
+{
+	fbxatm_unregister_ioctl(&fbxatm_pppoa_ioctl_ops);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_priv.h linux-6.13.12-fbx/net/fbxatm/fbxatm_priv.h
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_priv.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_priv.h	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,68 @@
+#ifndef FBXATM_PRIV_H_
+#define FBXATM_PRIV_H_
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+struct fbxatm_dev;
+extern struct list_head fbxatm_dev_list;
+extern struct mutex fbxatm_mutex;
+
+int __init fbxatm_vcc_init(void);
+
+void fbxatm_vcc_exit(void);
+
+void __fbxatm_free_device(struct fbxatm_dev *adev);
+
+int __init fbxatm_2684_init(void);
+
+void fbxatm_2684_exit(void);
+
+/*
+ * pppoa
+ */
+#ifdef CONFIG_PPP
+int __init fbxatm_pppoa_init(void);
+
+void fbxatm_pppoa_exit(void);
+#else
+static inline int fbxatm_pppoa_init(void) { return 0; };
+static inline void fbxatm_pppoa_exit(void) { };
+#endif
+
+/*
+ * procfs stuff
+ */
+int fbxatm_proc_dev_register(struct fbxatm_dev *dev);
+
+void fbxatm_proc_dev_deregister(struct fbxatm_dev *dev);
+
+struct proc_dir_entry *fbxatm_proc_misc_register(const char *path);
+
+void fbxatm_proc_misc_deregister(const char *path);
+
+int __init fbxatm_procfs_init(void);
+
+void fbxatm_procfs_exit(void);
+
+
+/*
+ * sysfs stuff
+ */
+int __init fbxatm_sysfs_init(void);
+
+void fbxatm_sysfs_exit(void);
+
+void fbxatm_dev_change_sysfs(struct fbxatm_dev *adev);
+
+int fbxatm_register_dev_sysfs(struct fbxatm_dev *adev);
+
+void fbxatm_unregister_dev_sysfs(struct fbxatm_dev *adev);
+
+
+/*
+ * crc10
+ */
+u16 crc10(u16 crc, const u8 *buffer, size_t len);
+
+#endif /* !FBXATM_PRIV_H_ */
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_procfs.c linux-6.13.12-fbx/net/fbxatm/fbxatm_procfs.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_procfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_procfs.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,340 @@
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/fbxatm_dev.h>
+#include <net/net_namespace.h>
+#include "fbxatm_priv.h"
+
+static struct proc_dir_entry *fbxatm_proc_root;
+
+#define FMT_U64		"%llu"
+
+/*
+ * /proc/net/atm/vcc
+ */
+static int vcc_seq_show(struct seq_file *seq, void *v)
+{
+	struct fbxatm_vcc *vcc;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		seq_printf(seq, "%s",
+			   "Itf.VPI.VCI USER TC MaxSDU  RX TX  RXAAL5 "
+			   "TXAAL5\n");
+		return 0;
+	}
+
+	vcc = (struct fbxatm_vcc *)v;
+	seq_printf(seq, "%d.%u.%u %d ", vcc->adev->ifindex,
+		   vcc->vpi, vcc->vci, vcc->user);
+	seq_printf(seq, "%u %u ", vcc->qos.traffic_class, vcc->qos.max_sdu);
+	seq_printf(seq, FMT_U64 " " FMT_U64 " %u %u\n",
+		   vcc->stats.rx_bytes,
+		   vcc->stats.tx_bytes,
+		   vcc->stats.rx_aal5,
+		   vcc->stats.tx_aal5);
+	return 0;
+}
+
+static void *vcc_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *tvcc, *vcc;
+	int count;
+
+	mutex_lock(&fbxatm_mutex);
+
+	if (!*pos)
+		return SEQ_START_TOKEN;
+
+	count = 1;
+	tvcc = NULL;
+	list_for_each_entry(adev, &fbxatm_dev_list, next) {
+		list_for_each_entry(vcc, &adev->vcc_list, next) {
+			if (count == *pos) {
+				tvcc = vcc;
+				break;
+			}
+			count++;
+		}
+	}
+
+	return tvcc;
+}
+
+static void *vcc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct fbxatm_dev *adev;
+	struct fbxatm_vcc *last_vcc, *vcc, *tvcc;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		if (list_empty(&fbxatm_dev_list))
+			return NULL;
+		adev = list_entry(fbxatm_dev_list.next, struct fbxatm_dev,
+				  next);
+		last_vcc = NULL;
+	} else {
+		last_vcc = (struct fbxatm_vcc *)v;
+		adev = last_vcc->adev;
+	}
+
+	tvcc = NULL;
+	list_for_each_entry_continue(adev, &fbxatm_dev_list, next) {
+
+		if (last_vcc && last_vcc->adev == adev) {
+			vcc = last_vcc;
+			list_for_each_entry_continue(vcc, &adev->vcc_list,
+						     next) {
+				tvcc = vcc;
+				break;
+			}
+		} else {
+			list_for_each_entry(vcc, &adev->vcc_list, next) {
+				tvcc = vcc;
+				break;
+			}
+		}
+	}
+
+	(*pos)++;
+	return tvcc;
+}
+
+static void vcc_seq_stop(struct seq_file *seq, void *v)
+{
+	mutex_unlock(&fbxatm_mutex);
+}
+
+static const struct seq_operations vcc_seq_ops = {
+	.start		= vcc_seq_start,
+	.next		= vcc_seq_next,
+	.stop		= vcc_seq_stop,
+	.show		= vcc_seq_show,
+};
+
+static int vcc_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &vcc_seq_ops);
+}
+
+static const struct proc_ops vcc_seq_fops = {
+	.proc_open	= vcc_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+
+/*
+ * /proc/net/atm/dev
+ */
+static int adev_seq_show(struct seq_file *seq, void *v)
+{
+	struct fbxatm_dev *adev;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		seq_printf(seq, "%s",
+			   "Itf  RX TX  RXAAL5 TXAAL5  RXF4OAM TXF4OAM  "
+			   "RXF5OAM TXF5OAM  RXBADOAM RXBADLLIDOAM "
+			   "RXOTHEROAM RXDROPPED TXDROPNOLINK\n");
+		return 0;
+	}
+
+	adev = (struct fbxatm_dev *)v;
+	seq_printf(seq, "%d  " FMT_U64 " " FMT_U64 "  %u %u  ",
+		   adev->ifindex,
+		   adev->stats.rx_bytes,
+		   adev->stats.tx_bytes,
+		   adev->stats.rx_aal5,
+		   adev->stats.tx_aal5);
+
+	seq_printf(seq, "%u %u  %u %u  %u %u %u %u %u\n",
+		   adev->stats.rx_f4_oam,
+		   adev->stats.tx_f4_oam,
+
+		   adev->stats.rx_f5_oam,
+		   adev->stats.tx_f5_oam,
+
+		   adev->stats.rx_bad_oam,
+		   adev->stats.rx_bad_llid_oam,
+		   adev->stats.rx_other_oam,
+		   adev->stats.rx_dropped,
+		   adev->stats.tx_drop_nolink);
+	return 0;
+}
+
+static void *adev_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	struct fbxatm_dev *adev, *tadev;
+	int count;
+
+	mutex_lock(&fbxatm_mutex);
+
+	if (!*pos)
+		return SEQ_START_TOKEN;
+
+	count = 1;
+	tadev = NULL;
+	list_for_each_entry(adev, &fbxatm_dev_list, next) {
+		if (count == *pos) {
+			tadev = adev;
+			break;
+		}
+		count++;
+	}
+
+	return tadev;
+}
+
+static void *adev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	struct fbxatm_dev *adev, *tadev;
+
+	if (v == (void *)SEQ_START_TOKEN) {
+		if (list_empty(&fbxatm_dev_list))
+			return NULL;
+		adev = list_entry(fbxatm_dev_list.next, struct fbxatm_dev,
+				  next);
+	} else
+		adev = (struct fbxatm_dev *)v;
+
+	tadev = NULL;
+	list_for_each_entry_continue(adev, &fbxatm_dev_list, next) {
+		tadev = adev;
+		break;
+	}
+
+	(*pos)++;
+	return tadev;
+}
+
+static void adev_seq_stop(struct seq_file *seq, void *v)
+{
+	mutex_unlock(&fbxatm_mutex);
+}
+
+static const struct seq_operations adev_seq_ops = {
+	.start		= adev_seq_start,
+	.next		= adev_seq_next,
+	.stop		= adev_seq_stop,
+	.show		= adev_seq_show,
+};
+
+static int adev_seq_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &adev_seq_ops);
+}
+
+static const struct proc_ops adev_seq_fops = {
+	.proc_open	= adev_seq_open,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+	.proc_release	= seq_release,
+};
+
+
+/*
+ * create device private entry in proc
+ */
+int fbxatm_proc_dev_register(struct fbxatm_dev *adev)
+{
+	adev->dev_proc_entry = proc_mkdir(adev->name, fbxatm_proc_root);
+	if (!adev->dev_proc_entry)
+		return 1;
+	return 0;
+}
+
+
+void fbxatm_proc_dev_deregister(struct fbxatm_dev *adev)
+{
+	remove_proc_entry(adev->name, fbxatm_proc_root);
+}
+
+/*
+ * create misc private entry in proc
+ */
+struct proc_dir_entry *fbxatm_proc_misc_register(const char *path)
+{
+	return proc_mkdir(path, fbxatm_proc_root);
+}
+
+void fbxatm_proc_misc_deregister(const char *path)
+{
+	remove_proc_entry(path, fbxatm_proc_root);
+}
+
+/*
+ * list of proc entries for fbxatm
+ */
+static struct fbxatm_proc_entry {
+	char *name;
+	const struct proc_ops *proc_fops;
+	struct proc_dir_entry *dirent;
+
+} fbxatm_proc_entries[] = {
+	{
+		.name = "dev",
+		.proc_fops = &adev_seq_fops,
+	},
+	{
+		.name = "vcc",
+		.proc_fops = &vcc_seq_fops,
+	},
+};
+
+static void fbxatm_remove_proc(void)
+{
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_proc_entries); i++) {
+		struct fbxatm_proc_entry *e;
+
+		e = &fbxatm_proc_entries[i];
+
+		if (!e->dirent)
+			continue;
+		remove_proc_entry(e->name, fbxatm_proc_root);
+		e->dirent = NULL;
+	}
+
+	remove_proc_entry("fbxatm", init_net.proc_net);
+}
+
+int __init fbxatm_procfs_init(void)
+{
+	unsigned int i;
+	int ret;
+
+	fbxatm_proc_root = proc_net_mkdir(&init_net, "fbxatm",
+					  init_net.proc_net);
+	if (!fbxatm_proc_root) {
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_proc_entries); i++) {
+		struct proc_dir_entry *dirent;
+		struct fbxatm_proc_entry *e;
+
+		e = &fbxatm_proc_entries[i];
+
+		dirent = proc_create_data(e->name, S_IRUGO, fbxatm_proc_root,
+					  e->proc_fops, NULL);
+		if (!dirent) {
+			ret = -ENOMEM;
+			goto err;
+		}
+		e->dirent = dirent;
+	}
+
+	return 0;
+
+err:
+	if (fbxatm_proc_root)
+		fbxatm_remove_proc();
+	return ret;
+}
+
+void fbxatm_procfs_exit(void)
+{
+	fbxatm_remove_proc();
+}
diff -Nruw linux-6.13.12-fbx/net/fbxatm./fbxatm_sysfs.c linux-6.13.12-fbx/net/fbxatm/fbxatm_sysfs.c
--- linux-6.13.12-fbx/net/fbxatm./fbxatm_sysfs.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxatm/fbxatm_sysfs.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,184 @@
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+#include <linux/stat.h>
+#include <linux/fbxatm_dev.h>
+#include "fbxatm_priv.h"
+
+#define to_fbxatm_dev(cldev) container_of(cldev, struct fbxatm_dev, dev)
+
+static const char fmt_u64[] = "%llu\n";
+
+static ssize_t show_ifindex(struct device *dev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->ifindex);
+}
+
+static ssize_t show_link_state(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n",
+		       test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags) ?
+		       1 : 0);
+}
+
+static ssize_t show_link_rate_us(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->link_rate_us);
+}
+
+static ssize_t show_link_rate_ds(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->link_rate_ds);
+}
+
+static ssize_t show_max_priority(struct device *dev,
+				 struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->max_priority);
+}
+
+static ssize_t show_max_rx_priority(struct device *dev,
+				    struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	return sprintf(buf, "%d\n", adev->max_rx_priority);
+}
+
+static ssize_t show_rx_bytes(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	u64 val;
+
+	spin_lock_bh(&adev->stats_lock);
+	val = adev->stats.rx_bytes;
+	spin_unlock_bh(&adev->stats_lock);
+	return sprintf(buf, fmt_u64, val);
+}
+
+static ssize_t show_tx_bytes(struct device *dev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	u64 val;
+
+	spin_lock_bh(&adev->stats_lock);
+	val = adev->stats.tx_bytes;
+	spin_unlock_bh(&adev->stats_lock);
+	return sprintf(buf, fmt_u64, val);
+}
+
+static DEVICE_ATTR(ifindex, S_IRUGO, show_ifindex, NULL);
+static DEVICE_ATTR(link_state, S_IRUGO, show_link_state, NULL);
+static DEVICE_ATTR(link_rate_us, S_IRUGO, show_link_rate_us, NULL);
+static DEVICE_ATTR(link_rate_ds, S_IRUGO, show_link_rate_ds, NULL);
+static DEVICE_ATTR(max_priority, S_IRUGO, show_max_priority, NULL);
+static DEVICE_ATTR(max_rx_priority, S_IRUGO, show_max_rx_priority, NULL);
+static DEVICE_ATTR(rx_bytes, S_IRUGO, show_rx_bytes, NULL);
+static DEVICE_ATTR(tx_bytes, S_IRUGO, show_tx_bytes, NULL);
+
+static struct device_attribute *fbxatm_attrs[] = {
+	&dev_attr_ifindex,
+	&dev_attr_link_state,
+	&dev_attr_link_rate_us,
+	&dev_attr_link_rate_ds,
+	&dev_attr_max_priority,
+	&dev_attr_max_rx_priority,
+	&dev_attr_rx_bytes,
+	&dev_attr_tx_bytes,
+};
+
+static int fbxatm_uevent(const struct device *dev, struct kobj_uevent_env *env)
+{
+	struct fbxatm_dev *adev;
+
+	if (!dev)
+		return -ENODEV;
+
+	adev = to_fbxatm_dev(dev);
+	if (!adev)
+		return -ENODEV;
+
+	if (add_uevent_var(env, "NAME=%s", adev->name))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "IFINDEX=%u", adev->ifindex))
+		return -ENOMEM;
+
+	if (add_uevent_var(env, "LINK=%u",
+			   test_bit(FBXATM_DEV_F_LINK_UP, &adev->dev_flags) ?
+			   1 : 0))
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void fbxatm_release(struct device *dev)
+{
+	struct fbxatm_dev *adev = to_fbxatm_dev(dev);
+	__fbxatm_free_device(adev);
+}
+
+static struct class fbxatm_class = {
+	.name		= "fbxatm",
+	.dev_release	= fbxatm_release,
+	.dev_uevent	= fbxatm_uevent,
+};
+
+void fbxatm_dev_change_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+
+	kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, NULL);
+}
+
+int fbxatm_register_dev_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+	int i, j, ret;
+
+	dev->class = &fbxatm_class;
+	dev_set_name(dev, "%s", adev->name);
+	ret = device_register(dev);
+	if (ret < 0)
+		return ret;
+
+	for (i = 0; i < ARRAY_SIZE(fbxatm_attrs); i++) {
+		ret = device_create_file(dev, fbxatm_attrs[i]);
+		if (ret)
+			goto err;
+	}
+	return 0;
+
+err:
+	for (j = 0; j < i; j++)
+		device_remove_file(dev, fbxatm_attrs[j]);
+	device_del(dev);
+	return ret;
+}
+
+void fbxatm_unregister_dev_sysfs(struct fbxatm_dev *adev)
+{
+	struct device *dev = &adev->dev;
+	device_del(dev);
+}
+
+int __init fbxatm_sysfs_init(void)
+{
+	return class_register(&fbxatm_class);
+}
+
+void fbxatm_sysfs_exit(void)
+{
+	class_unregister(&fbxatm_class);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./Kconfig linux-6.13.12-fbx/net/fbxbridge/Kconfig
--- linux-6.13.12-fbx/net/fbxbridge./Kconfig	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/Kconfig	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,8 @@
+
+#
+# Freebox bridge
+#
+config FBXBRIDGE
+	bool "Freebox Bridge"
+	select NETFILTER
+	select NF_CONNTRACK
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./Makefile linux-6.13.12-fbx/net/fbxbridge/Makefile
--- linux-6.13.12-fbx/net/fbxbridge./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/Makefile	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,12 @@
+
+obj-$(CONFIG_FBXBRIDGE)	+= fbxbridge.o
+
+fbxbridge-objs := 		\
+	fbxbr_dev.o		\
+	fbxbr_dhcp.o		\
+	fbxbr_filter.o		\
+	fbxbr_fwcache.o		\
+	fbxbr_input.o		\
+	fbxbr_ioctl.o		\
+	fbxbr_output.o		\
+	fbxbr_utils.o
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_dev.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_dev.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_dev.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_dev.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,736 @@
+#define pr_fmt(fmt)	"fbxbridge: " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/sockios.h>
+#include <linux/inetdevice.h>
+#include <linux/notifier.h>
+#include <linux/if_arp.h>
+#include <linux/mutex.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+static LIST_HEAD(fbxbr_list);
+static DEFINE_MUTEX(fbxbr_list_mutex);
+
+/*
+ * ioctl "install" func
+ */
+extern void fbxbridge_set(int (*hook)(struct net *net,
+				      unsigned int, void __user *));
+
+
+/*
+ * caller must hold rtnl lock
+ */
+static struct fbxbr *__fbxbr_get_by_name(struct net *net, const char *name)
+{
+	struct net_device *dev;
+
+	dev = __dev_get_by_name(net, name);
+	if (dev == NULL)
+		return ERR_PTR(-ENODEV);
+
+	if (!(dev->priv_flags & IFF_FBXBRIDGE))
+		return ERR_PTR(-ENODEV);
+
+	return netdev_priv(dev);
+}
+
+/*
+ * compute ip address that we will pretend to be on the lan side
+ */
+static inline __be32 gen_lan_gw(__be32 be_ipaddr, __be32 be_netmask)
+{
+	u32 ipaddr, netmask;
+	u32 gw, mask;
+
+	ipaddr = __be32_to_cpu(be_ipaddr);
+	netmask = __be32_to_cpu(be_netmask);
+
+	/* default to last address of subnet */
+	gw = ipaddr & netmask;
+	mask = ~netmask;
+	gw |= (mask - 1);
+
+	/* if it happens to be the ip address, then take another one */
+	if (gw == ipaddr) {
+		gw &= netmask;
+		gw |= mask - 2;
+	}
+	return __cpu_to_be32(gw);
+}
+
+/*
+ * must be called with bridge write lock held
+ */
+static void __fetch_wan_parameters(struct fbxbr *br, struct in_ifaddr *ifa)
+{
+	struct net_device *wan_dev;
+
+	if (!ifa)
+		return;
+
+	if (WARN_ON(!br->wan_port))
+		return;
+
+	if (br->wan_ipaddr == ifa->ifa_local &&
+	    br->wan_netmask == ifa->ifa_mask)
+		return;
+
+	br->wan_ipaddr = ifa->ifa_local;
+	br->wan_netmask = ifa->ifa_mask;
+
+	if (br->wan_netmask != 0xffffffff) {
+		/* standard netmask */
+		br->lan_gw = gen_lan_gw(br->wan_ipaddr,	br->wan_netmask);
+		br->lan_netmask = br->wan_netmask;
+	} else {
+		u32 gw;
+
+		/* switch to /24 if wan it pointtopoint */
+		gw = ntohl(br->wan_ipaddr) & 0xffffff00;
+		if ((gw | 0xfe) == ntohl(br->wan_ipaddr))
+			gw |= 0xfd;
+		else
+			gw |= 0xfe;
+
+		br->lan_gw = htonl(gw);
+		br->lan_netmask = htonl(0xffffff00);
+	}
+
+	wan_dev = br->wan_port->dev;
+	pr_notice("%s: wan inet device %s address changed to [%pI4]\n",
+		  br->dev->name, wan_dev->name, &br->wan_ipaddr);
+
+	pr_info("%s: %s: wan netmask: %pI4\n",
+		br->dev->name, wan_dev->name, &br->wan_netmask);
+
+	pr_info("%s: %s: lan gw: %pI4\n",
+		br->dev->name, wan_dev->name, &br->lan_gw);
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_add_br_port(struct net *net, const char *name,
+			const char *port_name, bool is_wan)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	struct fbxbr_port *p;
+	int ret;
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	/* check that we don't have a device already */
+	if ((is_wan && br->wan_port) || (!is_wan && br->lan_port))
+		return -EBUSY;
+
+	/* locate port */
+	dev = __dev_get_by_name(net, port_name);
+	if (!dev)
+		return -ENODEV;
+
+	/* make sure it's not used by us */
+	if (dev->priv_flags & (IFF_FBXBRIDGE | IFF_FBXBRIDGE_PORT))
+		return -EBUSY;
+
+	/* allocate new port */
+	p = kzalloc(sizeof (*p), GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	p->br = br;
+	p->dev = dev;
+	p->is_wan = is_wan;
+
+	write_lock_bh(&br->lock);
+	if (is_wan)
+		br->wan_port = p;
+	else
+		br->lan_port = p;
+
+	if (is_wan) {
+		struct in_device *in_dev;
+
+		rcu_read_lock();
+
+		in_dev = __in_dev_get_rcu(dev);
+		if (in_dev)
+			__fetch_wan_parameters(br, in_dev->ifa_list);
+
+		rcu_read_unlock();
+	}
+
+	write_unlock_bh(&br->lock);
+
+	ret = netdev_rx_handler_register(dev, fbxbr_handle_frame, p);
+	if (ret)
+		goto err;
+
+	dev->priv_flags |= IFF_FBXBRIDGE_PORT;
+
+	ret = netdev_master_upper_dev_link(dev, br->dev, NULL, NULL, NULL);
+	if (ret)
+		goto err;
+
+	pr_info("%s: %s device %s grabbed\n",
+		br->dev->name, is_wan ? "wan" : "lan", dev->name);
+
+	return 0;
+
+err:
+	write_lock_bh(&br->lock);
+	netdev_rx_handler_unregister(dev);
+	if (is_wan)
+		br->wan_port = NULL;
+	else
+		br->lan_port = NULL;
+	dev->priv_flags &= ~IFF_FBXBRIDGE_PORT;
+	write_unlock_bh(&br->lock);
+	kfree(p);
+	return ret;
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+void __fbxbr_del_br_port(struct fbxbr_port *p)
+{
+	struct fbxbr *br = p->br;
+	struct net_device *dev = p->dev;
+	bool is_wan;
+
+	netdev_upper_dev_unlink(dev, br->dev);
+	netdev_rx_handler_unregister(dev);
+	dev->priv_flags &= ~IFF_FBXBRIDGE_PORT;
+	is_wan = p->is_wan;
+
+	write_lock_bh(&br->lock);
+	if (p->is_wan)
+		br->wan_port = NULL;
+	else
+		br->lan_port = NULL;
+
+	if (p->rt)
+		ip_rt_put(p->rt);
+	write_unlock_bh(&br->lock);
+	kfree(p);
+
+	pr_info("%s: %s device %s released\n",
+		br->dev->name, is_wan ? "wan" : "lan", dev->name);
+}
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_del_br_port_by_name(struct net *net, const char *name,
+				const char *port_name)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	struct fbxbr_port *p;
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	/* locate port */
+	dev = __dev_get_by_name(net, port_name);
+	if (!dev)
+		return -ENODEV;
+
+	p = fbxbr_port_get_rtnl(dev);
+	if (!p || p->br != br)
+		return -EINVAL;
+
+	__fbxbr_del_br_port(p);
+	return 0;
+}
+
+/*
+ * bridge device netdevice ops
+ */
+static int fbxbr_net_open(struct net_device *dev)
+{
+	return 0;
+}
+
+static int fbxbr_net_stop(struct net_device *dev)
+{
+	return 0;
+}
+
+static int fbxbr_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct fbxbr *br = netdev_priv(dev);
+	const struct iphdr *iph;
+
+	read_lock(&br->lock);
+
+	if (skb->protocol != htons(ETH_P_IP))
+		goto drop;
+
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	if (!br->lan_port)
+		goto drop;
+
+	if (!pskb_may_pull(skb, sizeof (*iph)))
+		goto drop;
+
+	iph = ip_hdr(skb);
+
+	if (ipv4_is_multicast(iph->daddr)) {
+		dev->stats.tx_packets++;
+		dev->stats.tx_bytes += skb->len;
+		fbxbr_output_lan_mcast_frame(br, skb);
+		goto done;
+	}
+
+	if (iph->daddr != br->br_remote_ipaddr)
+		goto drop;
+
+	fbxbr_dnat_packet(skb, br->wan_ipaddr);
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += skb->len;
+	fbxbr_output_lan_frame(br, skb);
+
+done:
+	read_unlock(&br->lock);
+	return 0;
+
+drop:
+	dev->stats.tx_dropped++;
+	read_unlock(&br->lock);
+	kfree_skb(skb);
+	return 0;
+}
+
+static const struct net_device_ops fbxbr_net_ops = {
+	.ndo_open		= fbxbr_net_open,
+	.ndo_stop		= fbxbr_net_stop,
+	.ndo_start_xmit		= fbxbr_net_start_xmit,
+};
+
+static struct device_type fbxbr_type = {
+	.name	= "fbxbridge",
+};
+
+/*
+ * fbxbridge alloc_netdev setup func
+ */
+#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
+			 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
+
+static void fbxbr_netdev_setup(struct net_device *dev)
+{
+	struct fbxbr *br = netdev_priv(dev);
+	size_t i;
+
+	dev->flags = IFF_NOARP;
+	dev->type = ARPHRD_PPP;
+	dev->mtu = 1500;
+	dev->hard_header_len = 16;
+
+	dev->netdev_ops = &fbxbr_net_ops;
+	dev->needs_free_netdev = true;
+	SET_NETDEV_DEVTYPE(dev, &fbxbr_type);
+	dev->priv_flags = IFF_FBXBRIDGE | IFF_NO_QUEUE;
+
+	dev->features = 0;
+	dev->hw_features = 0;
+	dev->vlan_features = 0;
+
+	br->dev = dev;
+	rwlock_init(&br->lock);
+	rwlock_init(&br->lan_hwaddr_lock);
+	br->dhcpd_renew_time = DEFAULT_RENEWAL_TIME;
+	br->dhcpd_rebind_time = DEFAULT_REBIND_TIME;
+	br->dhcpd_lease_time = DEFAULT_LEASE_TIME;
+	spin_lock_init(&br->last_arp_lock);
+	br->last_arp_send = jiffies;
+
+	rwlock_init(&br->fwcache_lock);
+	INIT_LIST_HEAD(&br->fwcache_rules);
+	for (i = 0; i < ARRAY_SIZE(br->fwcache_hrules); i++)
+		INIT_HLIST_HEAD(&br->fwcache_hrules[i]);
+}
+
+/*
+ *
+ */
+int fbxbr_add_br(struct net *net, const char *name)
+{
+	struct net_device *dev;
+	struct fbxbr *br;
+	int ret;
+
+	dev = alloc_netdev(sizeof (struct fbxbr), name, NET_NAME_UNKNOWN,
+			   fbxbr_netdev_setup);
+	if (!dev)
+		return -ENOMEM;
+
+	dev_net_set(dev, net);
+
+	ret = register_netdev(dev);
+	if (ret) {
+		free_netdev(dev);
+		return ret;
+	}
+
+	br = netdev_priv(dev);
+	mutex_lock(&fbxbr_list_mutex);
+	list_add(&br->next, &fbxbr_list);
+	mutex_unlock(&fbxbr_list_mutex);
+
+	pr_notice("%s: new fbxbridge\n", dev->name);
+	return 0;
+}
+
+
+/*
+ * caller must hold rtnl lock
+ */
+int __fbxbr_del_br(struct net *net, const char *name)
+{
+	struct fbxbr *br;
+
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br))
+		return PTR_ERR(br);
+
+	mutex_lock(&fbxbr_list_mutex);
+	list_del(&br->next);
+	mutex_unlock(&fbxbr_list_mutex);
+
+	if (br->wan_port)
+		__fbxbr_del_br_port(br->wan_port);
+	if (br->lan_port)
+		__fbxbr_del_br_port(br->lan_port);
+
+	unregister_netdevice(br->dev);
+	return 0;
+}
+
+/*
+ *
+ */
+int fbxbr_get_params(struct net *net, const char *name,
+		     struct fbxbridge_ioctl_params *params)
+{
+	struct fbxbr *br;
+
+	rtnl_lock();
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br)) {
+		rtnl_unlock();
+		return PTR_ERR(br);
+	}
+
+	/* copy current config */
+	params->flags = br->flags;
+	params->dns1_addr = br->dns1_ipaddr;
+	params->dns2_addr = br->dns2_ipaddr;
+	memcpy(params->ip_aliases, br->ip_aliases, sizeof (br->ip_aliases));
+	params->dhcpd_renew_time = br->dhcpd_renew_time;
+	params->dhcpd_rebind_time = br->dhcpd_rebind_time;
+	params->dhcpd_lease_time = br->dhcpd_lease_time;
+	params->inputmark = br->inputmark;
+
+	/* current ports */
+	if (br->wan_port) {
+		memcpy(params->wan_dev.name,
+		       br->wan_port->dev->name,
+		       IFNAMSIZ);
+		params->wan_dev.present = 1;
+	} else {
+		params->wan_dev.name[0] = 0;
+		params->wan_dev.present = 0;
+	}
+
+	if (br->lan_port) {
+		memcpy(params->lan_dev.name,
+		       br->lan_port->dev->name,
+		       IFNAMSIZ);
+		params->lan_dev.present = 1;
+	} else {
+		params->lan_dev.name[0] = 0;
+		params->lan_dev.present = 0;
+	}
+
+	/* copy state */
+	read_lock_bh(&br->lan_hwaddr_lock);
+	params->have_hw_addr = br->have_hw_addr;
+	memcpy(params->lan_hwaddr, br->lan_hwaddr, ETH_ALEN);
+	read_unlock_bh(&br->lan_hwaddr_lock);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+/*
+ *
+ */
+int fbxbr_set_params(struct net *net, const char *name,
+		     const struct fbxbridge_ioctl_params *params)
+{
+	struct fbxbr *br;
+
+	rtnl_lock();
+
+	/* locate bridge */
+	br = __fbxbr_get_by_name(net, name);
+	if (IS_ERR(br)) {
+		rtnl_unlock();
+		return PTR_ERR(br);
+	}
+
+	write_lock_bh(&br->lock);
+
+	br->flags = params->flags;
+	br->dns1_ipaddr = params->dns1_addr;
+	br->dns2_ipaddr = params->dns2_addr;
+	memcpy(br->ip_aliases, params->ip_aliases, sizeof (br->ip_aliases));
+	br->dhcpd_renew_time = params->dhcpd_renew_time;
+	br->dhcpd_rebind_time = params->dhcpd_rebind_time;
+	br->dhcpd_lease_time = params->dhcpd_lease_time;
+	br->inputmark = params->inputmark;
+
+	write_unlock_bh(&br->lock);
+
+	fbxbr_fwcache_flush(br);
+
+	rtnl_unlock();
+
+	return 0;
+}
+
+/*
+ *
+ */
+void fbxbr_flush_cache(void);
+
+void fbxbr_flush_cache(void)
+{
+	struct fbxbr *br;
+
+	mutex_lock(&fbxbr_list_mutex);
+	list_for_each_entry(br, &fbxbr_list, next)
+		fbxbr_fwcache_flush(br);
+	mutex_unlock(&fbxbr_list_mutex);
+}
+
+/*
+ * must be called with BH disabled
+ */
+void fbxbr_capture_hw_addr(struct fbxbr *br, const u8 *hwaddr)
+{
+	bool same;
+
+	read_lock(&br->lan_hwaddr_lock);
+	same = (br->have_hw_addr && !memcmp(br->lan_hwaddr, hwaddr, ETH_ALEN));
+	read_unlock(&br->lan_hwaddr_lock);
+
+	if (same)
+		return;
+
+	write_lock(&br->lan_hwaddr_lock);
+	memcpy(br->lan_hwaddr, hwaddr, ETH_ALEN);
+	br->have_hw_addr = 1;
+	write_unlock(&br->lan_hwaddr_lock);
+
+	pr_notice("%s: new lan hw address is now %pM\n",
+		  br->dev->name, hwaddr);
+}
+
+/*
+ * netdevice notifier callback, called with rtnl lock
+ */
+static int fbxbr_netdev_event_callback(struct notifier_block *this,
+				       unsigned long event, void *ptr)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+	ASSERT_RTNL();
+
+	if (!(dev->priv_flags & IFF_FBXBRIDGE_PORT))
+		return NOTIFY_DONE;
+
+	/* catch port that goes away */
+	switch (event) {
+	case NETDEV_UNREGISTER:
+		__fbxbr_del_br_port(fbxbr_port_get_rtnl(dev));
+		break;
+
+	default:
+		break;
+	};
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * handle inet configuration event on port
+ */
+static void __handle_inet_port_event(struct fbxbr_port *p,
+				     unsigned long event,
+				     struct in_ifaddr *ifa)
+{
+	struct fbxbr *br;
+
+	if (!p->is_wan)
+		return;
+
+	br = p->br;
+
+	switch (event) {
+	case NETDEV_UP:
+		write_lock_bh(&br->lan_hwaddr_lock);
+		__fetch_wan_parameters(br, ifa);
+		write_unlock_bh(&br->lan_hwaddr_lock);
+		break;
+
+	case NETDEV_DOWN:
+		/* we never  clear wan address, so we  can continue to
+		 * use the bridge on lan side even if wan is down */
+		break;
+
+	default:
+		break;
+	}
+}
+
+/*
+ * handle inet configuration event on bridge interface (fbxbr%d)
+ */
+static void __handle_inet_bridge_event(struct fbxbr *br,
+				       unsigned long event,
+				       struct in_ifaddr *ifa)
+{
+	switch (event) {
+	case NETDEV_UP:
+		if (!ifa->ifa_address || ifa->ifa_local == ifa->ifa_address)
+			return;
+
+		write_lock_bh(&br->lan_hwaddr_lock);
+		br->br_ipaddr = ifa->ifa_local;
+		br->br_remote_ipaddr = ifa->ifa_address;
+		write_unlock_bh(&br->lan_hwaddr_lock);
+
+		if (br->br_ipaddr)
+			pr_info("%s: bridge local interface configured: "
+				"[%pI4 -> %pI4]\n",
+				br->dev->name,
+				&br->br_ipaddr,
+				&br->br_remote_ipaddr);
+		break;
+
+	case NETDEV_DOWN:
+		write_lock_bh(&br->lan_hwaddr_lock);
+		if (br->br_ipaddr) {
+			br->br_ipaddr = br->br_remote_ipaddr = 0;
+			pr_info("%s: bridge interface unconfigured\n",
+				br->dev->name);
+		}
+		write_unlock_bh(&br->lan_hwaddr_lock);
+		break;
+
+	default:
+		return;
+	}
+}
+
+/*
+ * kernel inet event notifier callback
+ */
+static int fbxbr_inet_event_callback(struct notifier_block *this,
+				     unsigned long event, void *ptr)
+{
+	struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+	struct net_device *dev = ifa->ifa_dev->dev;
+
+	ASSERT_RTNL();
+
+	/* is it a bridge ? */
+	if (dev->priv_flags & IFF_FBXBRIDGE) {
+		struct fbxbr *br = netdev_priv(dev);
+		__handle_inet_bridge_event(br, event, ifa);
+		return NOTIFY_DONE;
+	}
+
+	/* is it a bridge port */
+	if (dev->priv_flags & IFF_FBXBRIDGE_PORT) {
+		struct fbxbr_port *p = fbxbr_port_get_rtnl(dev);
+		__handle_inet_port_event(p, event, ifa);
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_DONE;
+}
+
+
+static struct notifier_block fbxbr_netdev_notifier = {
+	notifier_call: fbxbr_netdev_event_callback,
+};
+
+static struct notifier_block fbxbr_inet_notifier = {
+	notifier_call: fbxbr_inet_event_callback,
+};
+
+/*
+ *
+ */
+static int __init fbxbr_init_module(void)
+{
+	int err;
+
+	err = register_netdevice_notifier(&fbxbr_netdev_notifier);
+	if (err) {
+		pr_err("can't register netdevice notifier\n");
+		return err;
+	}
+
+	err = register_inetaddr_notifier(&fbxbr_inet_notifier);
+	if (err) {
+		pr_err("can't register inet notifier\n");
+		goto err_netdev;
+	}
+
+	fbxbridge_set(fbxbr_ioctl);
+	return 0;
+
+err_netdev:
+	unregister_netdevice_notifier(&fbxbr_netdev_notifier);
+	return err;
+}
+
+/*
+ *
+ */
+static void __exit fbxbr_exit_module(void)
+{
+	unregister_netdevice_notifier(&fbxbr_netdev_notifier);
+	unregister_inetaddr_notifier(&fbxbr_inet_notifier);
+	fbxbridge_set(NULL);
+}
+
+module_init(fbxbr_init_module);
+module_exit(fbxbr_exit_module);
+
+MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
+MODULE_DESCRIPTION("Freebox Network Bridge - www.freebox.fr");
+MODULE_LICENSE("GPL");
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_dhcp.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_dhcp.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_dhcp.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_dhcp.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,502 @@
+#include "fbxbr_private.h"
+#include <linux/udp.h>
+#include <net/ip.h>
+#include <asm/checksum.h>
+
+#define BOOTP_REQUEST   1
+#define BOOTP_REPLY     2
+
+struct bootp_pkt {              /* BOOTP packet format */
+	struct iphdr iph;       /* IP header */
+	struct udphdr udph;     /* UDP header */
+	u8 op;                  /* 1=request, 2=reply */
+	u8 htype;               /* HW address type */
+	u8 hlen;                /* HW address length */
+	u8 hops;                /* Used only by gateways */
+	u32 xid;                /* Transaction ID */
+	u16 secs;               /* Seconds since we started */
+	u16 flags;              /* Just what it says */
+	u32 client_ip;          /* Client's IP address if known */
+	u32 your_ip;            /* Assigned IP address */
+	u32 server_ip;          /* (Next, e.g. NFS) Server's IP address */
+	u32 relay_ip;           /* IP address of BOOTP relay */
+	u8 hw_addr[16];         /* Client's HW address */
+	u8 serv_name[64];       /* Server host name */
+	u8 boot_file[128];      /* Name of boot file */
+	u8 exten[312];          /* DHCP options / BOOTP vendor extensions */
+};
+
+#define FBX_OPT_VENDOR_F_IGNORE_BRIDGE	(1 << 0)
+
+struct fbx_opt_vendor {
+	u8	oui[3];
+	u32	version;
+	u32	flags;
+} __attribute__((packed));
+
+#define DHCPDISCOVER	1
+#define DHCPOFFER	2
+#define DHCPREQUEST	3
+#define DHCPDECLINE	4
+#define DHCPACK		5
+#define DHCPNACK	6
+#define DHCPRELEASE	7
+#define DHCPINFORM	8
+
+#define BROADCAST_FLAG	0x8000 /* "I need broadcast replies" */
+
+static const char *dhcp_to_name[] = {
+	"NONE",
+	"DHCPDISCOVER",
+	"DHCPOFFER",
+	"DHCPREQUEST",
+	"DHCPDECLINE",
+	"DHCPACK",
+	"DHCPNACK",
+	"DHCPRELEASE",
+	"DHCPINFORM",
+};
+
+
+#define PARAM_SUBMASK	(1 << 0)
+#define PARAM_ROUTER	(1 << 1)
+#define PARAM_DNS	(1 << 2)
+#define PARAM_BROADCAST	(1 << 3)
+
+struct dhcp_options
+{
+	u8	msg_type;
+	u32	t1;		/* renewal timeout */
+	u32	t2;		/* rebinding timemout */
+	u32	lease_time;	/* lease time */
+	u32	server_id;	/* server identifier */
+	u32	request_param;	/* requested config params (bitfield) */
+
+	u32	netmask;	/* netmask assigne to client */
+	u32	router;
+	u32	bcast;
+	u32	dns1;
+	u32	dns2;
+	u32	requested_ip;
+
+	struct fbx_opt_vendor	fbx;
+	bool			fbx_valid;
+
+	bool	need_bcast;
+};
+
+static const unsigned char dhcp_magic_cookie[] = { 0x63, 0x82, 0x53, 0x63 };
+
+/* parse the dhcp options string to a struct */
+static void parse_dhcp_opts(const u8 *opts_str, int maxlen,
+			    struct dhcp_options *opts)
+{
+	const u8 *p, *end;
+
+	memset(opts, 0, sizeof(*opts));
+
+	/* check magic cookie */
+	if (memcmp(opts_str, dhcp_magic_cookie, sizeof(dhcp_magic_cookie)))
+		return;
+
+	/* now go for options */
+	p = opts_str + 4;
+	end = opts_str + maxlen;
+
+	while (p < end && *p != 0xff) {
+		const u8 *option;
+		size_t len, i;
+
+		option = p++;
+
+                if (*option == 0)
+                        continue;
+
+		/* jump of 'len' + 1 bytes */
+		len = *p;
+		p += len + 1;
+		if (p >= end)
+			break;
+
+		/* search for known parameter */
+		switch (*option) {
+		case 53: /* msg_type */
+			if (len)
+				opts->msg_type = option[2];
+			break;
+
+		case 55: /* param request */
+			for (i = 0; i < len; i++) {
+				switch (option[2 + i]) {
+				case 1: /* subnet */
+					opts->request_param |= PARAM_SUBMASK;
+					break;
+
+				case 3: /* router */
+					opts->request_param |= PARAM_ROUTER;
+					break;
+
+				case 6: /* dns */
+					opts->request_param |= PARAM_DNS;
+					break;
+
+				case 28: /* broadcast */
+					opts->request_param |= PARAM_BROADCAST;
+					break;
+				}
+			}
+			break;
+
+		case 50: /* requested_ip */
+			if (len >= 4)
+				memcpy(&opts->requested_ip, option + 2, 4);
+			break;
+
+		case 54: /* server_id */
+			if (len >= 4)
+				memcpy(&opts->server_id, option + 2, 4);
+			break;
+
+		case 224: /* IANA reserved for freebox use */
+		{
+			if (len >= sizeof (opts->fbx)) {
+				memcpy(&opts->fbx, option + 2,
+				       sizeof (opts->fbx));
+				if (opts->fbx.oui[0] == 0x00 &&
+				    opts->fbx.oui[1] == 0x07 &&
+				    opts->fbx.oui[2] == 0xCB)
+					opts->fbx_valid = true;
+			}
+			break;
+		}
+		}
+	}
+}
+
+static void dump_dhcp_message(struct fbxbr *br, struct sk_buff *skb,
+			      struct bootp_pkt *bpkt, const char *action,
+			      const char *dest)
+{
+	struct dhcp_options opts;
+
+	parse_dhcp_opts(bpkt->exten, skb->len - (sizeof(*bpkt) - 312),
+			&opts);
+
+	if (opts.msg_type < 9) {
+		struct iphdr *iph;
+
+		iph = ip_hdr(skb);
+		printk(KERN_DEBUG "%s: %s dhcp %s %s "
+		       "(%pI4 -> %pI4) "
+		       "(caddr: %pI4 - yaddr: %pI4 - "
+		       "saddr: %pI4 - req_addr: %pI4)\n",
+		       br->dev->name,
+		       action,
+		       dhcp_to_name[opts.msg_type],
+		       dest,
+		       &iph->saddr,
+		       &iph->daddr,
+		       &bpkt->client_ip,
+		       &bpkt->your_ip,
+		       &bpkt->server_ip,
+		       &opts.requested_ip);
+	} else {
+		printk(KERN_DEBUG "%s: %s unknown dhcp message %s\n",
+		       br->dev->name, action, dest);
+	}
+}
+
+/* write a the dhcp options string from a struct */
+static void make_dhcp_opts(u8 *opts_str, const struct dhcp_options *opts,
+			   int type)
+{
+	int len = 0;
+
+	memcpy(opts_str, dhcp_magic_cookie, sizeof(dhcp_magic_cookie));
+	len += sizeof(dhcp_magic_cookie);
+
+	/* msg type (REPLY or OFFER) */
+	opts_str[len++] = 53;
+	opts_str[len++] = 1;
+	opts_str[len++] = opts->msg_type;
+
+	/* server id */
+	opts_str[len++] = 54;
+	opts_str[len++] = 4;
+	memcpy(opts_str + len, &opts->server_id, 4);
+	len += 4;
+
+	/* t1 */
+	if (opts->t1) {
+		opts_str[len++] = 58;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->t1, 4);
+		len += 4;
+	}
+
+	/* t2 */
+	if (opts->t2) {
+		opts_str[len++] = 59;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->t2, 4);
+		len += 4;
+	}
+
+	/* lease time */
+	if (opts->lease_time) {
+		opts_str[len++] = 51;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->lease_time, 4);
+		len += 4;
+	}
+
+	/* add requested_param */
+	if (opts->request_param & PARAM_SUBMASK) {
+		opts_str[len++] = 1;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->netmask, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_ROUTER) {
+		opts_str[len++] = 3;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->router, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_BROADCAST) {
+		opts_str[len++] = 28;
+		opts_str[len++] = 4;
+		memcpy(opts_str + len, &opts->bcast, 4);
+		len += 4;
+	}
+
+	if (opts->request_param & PARAM_DNS) {
+		opts_str[len++] = 6;
+		opts_str[len++] = (opts->dns2 ? 8 : 4);
+		memcpy(opts_str + len, &opts->dns1, 4);
+		if (opts->dns2)
+			memcpy(opts_str + len + 4, &opts->dns2, 4);
+		len += (opts->dns2 ? 8 : 4);
+	}
+
+	opts_str[len++] = 255;
+}
+
+/* dhcp server */
+static void send_dhcp_reply(struct fbxbr *br,
+			    struct net_device *dev,
+			    const u8 *dest_hw,
+			    int type,
+			    const struct bootp_pkt *src_packet,
+			    const struct dhcp_options *src_opts)
+{
+	struct sk_buff *skb;
+	struct iphdr *h;
+	struct bootp_pkt *b;
+	struct dhcp_options dhcp_opts;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
+
+	/* Allocate packet */
+	skb = alloc_skb(sizeof (struct bootp_pkt) + hlen + tlen, GFP_ATOMIC);
+	if (!skb)
+		return;
+
+	skb->dev = dev;
+	skb_reserve(skb, hlen);
+	skb_reset_network_header(skb);
+
+	b = (struct bootp_pkt *)skb_put(skb, sizeof(struct bootp_pkt));
+	memset(b, 0, sizeof(struct bootp_pkt));
+
+	/* Construct IP header */
+	h = &b->iph;
+	h->version = 4;
+	h->ihl = 5;
+	h->tot_len = htons(sizeof(struct bootp_pkt));
+	h->frag_off = htons(IP_DF);
+	h->ttl = 64;
+	h->protocol = IPPROTO_UDP;
+	h->saddr = br->lan_gw;
+
+	switch (type) {
+	case DHCPOFFER:
+	case DHCPACK:
+		if (src_packet->client_ip)
+			h->daddr = src_packet->client_ip;
+                else if (src_opts->need_bcast)
+                        h->daddr = INADDR_BROADCAST;
+		else
+			h->daddr = br->wan_ipaddr;
+		break;
+
+	case DHCPNACK:
+		/* always broadcast NAK */
+		h->daddr = INADDR_BROADCAST;
+		break;
+	}
+
+	h->check = ip_fast_csum((unsigned char *) h, h->ihl);
+
+	/* Construct UDP header */
+	b->udph.source = __constant_htons(67);
+	b->udph.dest = __constant_htons(68);
+	b->udph.len = htons(sizeof(struct bootp_pkt) - sizeof(struct iphdr));
+
+	/* Construct DHCP header */
+	b->op = BOOTP_REPLY;
+	b->htype = ARPHRD_ETHER;
+	b->hlen = ETH_ALEN;
+	b->secs = 0;
+	b->xid = src_packet->xid;
+
+	switch (type) {
+	case DHCPOFFER:
+		b->server_ip = br->lan_gw;
+		b->your_ip = br->wan_ipaddr;
+		break;
+
+	case DHCPACK:
+		b->client_ip = src_packet->client_ip;
+		b->server_ip = br->lan_gw;
+		b->your_ip = br->wan_ipaddr;
+		break;
+
+	case DHCPNACK:
+		break;
+	}
+
+	b->relay_ip = src_packet->relay_ip;
+	memcpy(b->hw_addr, src_packet->hw_addr, sizeof(src_packet->hw_addr));
+
+	/* Construct DHCP options */
+	memset(&dhcp_opts, 0, sizeof (dhcp_opts));
+	dhcp_opts.msg_type = type;
+	dhcp_opts.server_id = br->lan_gw;
+
+	switch (type) {
+	case DHCPOFFER:
+	case DHCPACK:
+		dhcp_opts.t1 = htonl(br->dhcpd_renew_time);
+		dhcp_opts.t2 = htonl(br->dhcpd_rebind_time);
+		dhcp_opts.lease_time = htonl(br->dhcpd_lease_time);
+		dhcp_opts.netmask = br->lan_netmask;
+		dhcp_opts.bcast = (br->lan_netmask & br->lan_gw) |
+			~br->lan_netmask;
+		dhcp_opts.dns1 = br->dns1_ipaddr;
+		dhcp_opts.dns2 = br->dns2_ipaddr ? br->dns2_ipaddr : 0;
+		dhcp_opts.router = br->lan_gw;
+		dhcp_opts.request_param = src_opts->request_param;
+		break;
+	}
+
+	make_dhcp_opts(b->exten, &dhcp_opts, type);
+	dump_dhcp_message(br, skb, b, "sending", "to lan");
+
+	if (dev_hard_header(skb, dev, ETH_P_IP,
+			    dest_hw, dev->dev_addr, skb->len) < 0) {
+		kfree_skb(skb);
+		return;
+	}
+
+	dev_queue_xmit(skb);
+}
+
+/*
+ * called under bridge lock
+ *
+ * packet must be a valid IP & UDP packet with dport 67
+ *
+ * answer will be sent to skb->dev
+ */
+void fbxbr_dhcpd(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct bootp_pkt *bpkt;
+	struct dhcp_options opts;
+
+	/* code assumes linear skb */
+	if (skb_linearize(skb) < 0)
+		return;
+
+	/* reject short packet */
+	if (skb->len < (sizeof(*bpkt) - 312))
+		return;
+
+	bpkt = (struct bootp_pkt *)skb->data;
+
+	/* select only valid BOOTP Request/Discover */
+	if (bpkt->op != BOOTP_REQUEST || bpkt->hlen != ETH_ALEN)
+		return;
+
+	parse_dhcp_opts(bpkt->exten, skb->len - (sizeof(*bpkt) - 312), &opts);
+
+	if (opts.fbx_valid &&
+	    (be32_to_cpu(opts.fbx.flags) & FBX_OPT_VENDOR_F_IGNORE_BRIDGE)) {
+		printk(KERN_DEBUG "%s: ignore DHCP message with "
+		       "freebox ignore-bridge flags set\n", br->dev->name);
+		return;
+	}
+
+        if (ntohs(bpkt->flags) & BROADCAST_FLAG)
+		opts.need_bcast = true;
+
+	dump_dhcp_message(br, skb, bpkt, "received", "from lan");
+
+	/* select DHCPDISCOVER to send a DHCPOFFER */
+	if (opts.msg_type == DHCPDISCOVER) {
+		send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+				DHCPOFFER, bpkt, &opts);
+
+	} else if (opts.msg_type == DHCPREQUEST) {
+		/* send ACK or NACK */
+		if (!opts.requested_ip) {
+			/* RENEWING/REBINDING */
+			if (!bpkt->client_ip) {
+				/* invalid packet; ignore */
+				return;
+			}
+
+			if (bpkt->client_ip != br->wan_ipaddr)
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPNACK, bpkt, &opts);
+			else {
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPACK, bpkt, &opts);
+				fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+			}
+			return;
+
+		}
+
+		/* INIT-REBOOT or SELECTING */
+		if (bpkt->client_ip) {
+			/* invalid packet; ignore */
+			return;
+		}
+
+		if (!opts.server_id) {
+			/* INIT-REBOOT */
+			if (opts.requested_ip != br->wan_ipaddr)
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPNACK, bpkt, &opts);
+			else {
+				send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+						DHCPACK, bpkt, &opts);
+				fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+			}
+			return;
+		}
+
+		/* SELECTING */
+		if (opts.server_id == br->lan_gw) {
+			/* client selected us */
+			send_dhcp_reply(br, skb->dev, bpkt->hw_addr,
+					DHCPACK, bpkt, &opts);
+			fbxbr_capture_hw_addr(br, bpkt->hw_addr);
+		} else {
+			/* ignore */
+		}
+	}
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_filter.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_filter.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_filter.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_filter.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,258 @@
+#include <net/ip.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter.h>
+#include "fbxbr_private.h"
+
+static int lolfn(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	return 0;
+}
+
+/*
+ * invoke netfilter table for finer grained control
+ */
+static int
+netfilter_call_hook(struct sk_buff *skb,
+		    unsigned int hook,
+		    struct net_device *in_dev,
+		    struct net_device *out_dev)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	int ret;
+
+	/* don't run frags into netfilter */
+	if ((iph->frag_off & htons(IP_OFFSET)))
+		return NF_ACCEPT;
+
+	nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
+
+	/* NF_HOOK will kfree_skb(), guard against this */
+	skb_get(skb);
+
+	ret = NF_HOOK(NFPROTO_IPV4,
+		      hook,
+		      &init_net,
+		      NULL,
+		      skb,
+		      in_dev,
+		      out_dev,
+		      lolfn);
+
+	if (ret < 0)
+		return NF_DROP;
+
+	skb_unref(skb);
+	return NF_ACCEPT;
+}
+
+static int
+netfilter_forward_hook(struct sk_buff *skb,
+		       struct net_device *in_dev,
+		       struct net_device *out_dev)
+{
+	return netfilter_call_hook(skb, NF_INET_FORWARD, in_dev, out_dev);
+}
+
+static int
+netfilter_input_hook(struct sk_buff *skb, struct net_device *in_dev)
+{
+	return netfilter_call_hook(skb, NF_INET_LOCAL_IN, in_dev, NULL);
+}
+
+/*
+ * set input mark bits, return true if changed
+ */
+static bool skb_set_br_inputmark(struct fbxbr *br, struct sk_buff *skb)
+{
+	if (unlikely(skb->mark & br->inputmark)) {
+		if (net_ratelimit())
+			pr_err("%s: input mark already set on skb\n",
+			       br->dev->name);
+		return false;
+	}
+
+	skb->mark |= br->inputmark;
+	return true;
+}
+
+static inline void skb_clear_br_inputmark(struct fbxbr *br,
+					  struct sk_buff *skb)
+{
+	skb->mark &= ~br->inputmark;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool wan_to_lan_want_keep(struct fbxbr *br,
+				 struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+	bool changed;
+	int ret;
+
+	/* keep ETHER_IP packets */
+	if (iph->protocol == 97)
+		return true;
+
+	/* give ipv6 in ip private to freebox back to the
+	 * kernel */
+	if (iph->protocol == IPPROTO_IPV6) {
+		struct ipv6hdr *iph6;
+		unsigned int hlen;
+
+		/* capture at least all traffic from our GW
+		 * (192.88.99.101) */
+		if (iph->saddr == htonl(0xc0586365))
+			return true;
+
+		/* rest if peer-to-peer shortcut traffic, check if
+		 * this is for our IPv6 subnet, we cannot do it on
+		 * fragmented traffic thought */
+		if (iph->frag_off & htons(IP_OFFSET))
+			return false;
+
+		/* sanity check on header value */
+		hlen = iph->ihl * 4;
+		if (skb->len < hlen + sizeof(struct ipv6hdr))
+			return false;
+
+		iph6 = (struct ipv6hdr *)((unsigned char *)iph + hlen);
+		if ((iph6->daddr.s6_addr32[0] & htonl(0xfffffff0)) ==
+		    htonl(0x2a010e30))
+			return true;
+	}
+
+	if (!(br->flags & FBXBRIDGE_FLAGS_NETFILTER))
+		return false;
+
+	/* we cant filter frags with netfilter */
+	if (iph->frag_off & htons(IP_OFFSET))
+		return false;
+
+	/* check netfilter input hook */
+	changed = skb_set_br_inputmark(br, skb);
+	ret = netfilter_input_hook(skb, skb->dev);
+	if (changed)
+		skb_clear_br_inputmark(br, skb);
+
+	if (ret == NF_ACCEPT)
+		return true;
+
+	return false;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool wan_to_lan_can_forward(struct fbxbr *br, struct sk_buff *skb)
+{
+	if ((br->flags & FBXBRIDGE_FLAGS_NETFILTER)) {
+		int ret;
+
+		ret = netfilter_forward_hook(skb, br->wan_port->dev, br->dev);
+		if (ret == NF_DROP)
+			return false;
+	}
+	return true;
+}
+
+/*
+ * note: caller assured that ip header is valid and holds bridge read
+ * lock
+ *
+ * use netfilter hook return type
+ */
+int
+fbxbr_filter_wan_to_lan_packet(struct fbxbr *br, struct sk_buff *skb)
+{
+	int ret;
+
+	if (wan_to_lan_want_keep(br, skb))
+		return NF_STOP;
+
+	if (!br->lan_port)
+		return NF_DROP;
+
+	ret = wan_to_lan_can_forward(br, skb);
+	if (ret != NF_ACCEPT)
+		return NF_DROP;
+
+	return NF_ACCEPT;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool lan_to_wan_want_keep(struct fbxbr *br, struct sk_buff *skb)
+{
+	return false;
+}
+
+/*
+ * assume linear ip header
+ */
+static bool lan_to_wan_can_forward(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct iphdr *iph = ip_hdr(skb);
+
+	/* disallow source spoofing */
+	if (iph->saddr != br->wan_ipaddr)
+		return false;
+
+	/* disallow all private net destination */
+	if (ipv4_is_loopback(iph->daddr) ||
+	    ipv4_is_private_10(iph->daddr) ||
+	    ipv4_is_private_172(iph->daddr) ||
+	    ipv4_is_private_192(iph->daddr) ||
+	    ipv4_is_linklocal_169(iph->daddr) ||
+	    ipv4_is_anycast_6to4(iph->daddr) ||
+	    ipv4_is_test_192(iph->daddr) ||
+	    ipv4_is_test_198(iph->daddr))
+		return false;
+
+	/* no multicast please */
+	if (ipv4_is_multicast(iph->daddr))
+		return false;
+
+	/* Don't let IP broadcast go through us */
+	if (ipv4_is_zeronet(iph->daddr))
+		return false;
+
+	if (ipv4_is_lbcast(iph->daddr))
+		return false;
+
+	if ((br->flags & FBXBRIDGE_FLAGS_NETFILTER)) {
+		int ret;
+
+		ret = netfilter_forward_hook(skb, br->dev, br->wan_port->dev);
+		if (ret == NF_DROP)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * note: caller assured that ip header is valid and holds bridge read
+ * lock
+ *
+ * use netfilter hook return type
+ */
+int
+fbxbr_filter_lan_to_wan_packet(struct fbxbr *br, struct sk_buff *skb)
+{
+	int ret;
+
+	if (lan_to_wan_want_keep(br, skb))
+		return NF_STOP;
+
+	if (!br->wan_port)
+		return NF_DROP;
+
+	ret = lan_to_wan_can_forward(br, skb);
+	if (ret != NF_ACCEPT)
+		return NF_DROP;
+
+	return NF_ACCEPT;
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_fwcache.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_fwcache.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_fwcache.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_fwcache.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,215 @@
+#include <linux/jhash.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ *
+ */
+u32 fbxbr_fwcache_hash(const struct fbxbr_fwcache_key *k)
+{
+	return jhash_3words(k->lan_ip,
+			    k->is_tcp ? k->wan_ip : ~k->wan_ip,
+			    k->lan_port | k->wan_port << 16, 0);
+}
+EXPORT_SYMBOL(fbxbr_fwcache_hash);
+
+/*
+ *
+ */
+static bool entry_match(const struct fbxbr_fwcache *fwc,
+			const struct fbxbr_fwcache_key *k)
+{
+	return (fwc->lan_ip == k->lan_ip &&
+		fwc->wan_ip == k->wan_ip &&
+		fwc->is_tcp == k->is_tcp &&
+		fwc->lan_port == k->lan_port &&
+		fwc->wan_port == k->wan_port);
+}
+
+/*
+ * must be called with bh disabled and rcu read lock held
+ */
+struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup_rcu(struct fbxbr *br, u32 hash,
+			   const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	hlist_for_each_entry_rcu(fwc,
+				 &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE],
+				 hnext) {
+		if (entry_match(fwc, k))
+			return fwc;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL(__fbxbr_fwcache_lookup_rcu);
+
+/*
+ * must be called with bh disabled and cache lock held
+ */
+static struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup(struct fbxbr *br, u32 hash,
+		       const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	hlist_for_each_entry(fwc,
+			     &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE],
+			     hnext) {
+		if (entry_match(fwc, k))
+			return fwc;
+	}
+
+	return NULL;
+}
+
+/*
+ * return true if the flow has a chance to be in the fwcache
+ *
+ * skb must be a valid ipv4 packet
+ */
+bool fbxbr_fwcache_skb_allowable(struct sk_buff *skb,
+				 bool from_wan,
+				 struct fbxbr_fwcache_key *k,
+				 bool *can_create)
+{
+	const struct iphdr *iph;
+	__be16 psrc, pdst;
+
+	iph = ip_hdr(skb);
+
+	if (iph->frag_off & htons(IP_OFFSET))
+		return false;
+
+	if (iph->protocol != IPPROTO_UDP && iph->protocol != IPPROTO_TCP)
+		return false;
+
+	if (from_wan) {
+		k->wan_ip = iph->saddr;
+		k->lan_ip = iph->daddr;
+	} else {
+		k->lan_ip = iph->saddr;
+		k->wan_ip = iph->daddr;
+	}
+
+	if (iph->protocol == IPPROTO_UDP) {
+		struct udphdr *udph;
+
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct udphdr)))
+			return false;
+
+		udph = (struct udphdr *)skb_transport_header(skb);
+		*can_create = true;
+
+		psrc = udph->source;
+		pdst = udph->dest;
+		k->is_tcp = false;
+	} else {
+		struct tcphdr *tcph;
+
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct tcphdr)))
+			return false;
+
+		tcph = (struct tcphdr *)skb_transport_header(skb);
+		if (tcph->syn)
+			*can_create = true;
+		else
+			*can_create = false;
+
+		psrc = tcph->source;
+		pdst = tcph->dest;
+		k->is_tcp = true;
+	}
+
+	if (from_wan) {
+		k->wan_port = psrc;
+		k->lan_port = pdst;
+	} else {
+		k->lan_port = psrc;
+		k->wan_port = pdst;
+	}
+	return true;
+}
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_fwc_free_entry(struct rcu_head *rhp)
+{
+	struct fbxbr_fwcache *fwc;
+
+	fwc = container_of(rhp, struct fbxbr_fwcache, rcu);
+	if (fwc->priv_destructor)
+		fwc->priv_destructor((void *)fwc->priv_area);
+        kfree(fwc);
+}
+
+/*
+ * must be called with bh disabled
+ */
+int fbxbr_fwcache_add(struct fbxbr *br,
+		      u32 hash, const struct fbxbr_fwcache_key *k)
+{
+	struct fbxbr_fwcache *fwc;
+
+	write_lock(&br->fwcache_lock);
+
+	if (unlikely(__fbxbr_fwcache_lookup(br, hash, k)))
+		goto done;
+
+	/* add new entry */
+	if (br->fwcache_count >= FBXBR_FWCACHE_MAX_ENTRY) {
+		/* make some room */
+		fwc = list_first_entry(&br->fwcache_rules,
+				       struct fbxbr_fwcache,
+				       next);
+		hlist_del_rcu(&fwc->hnext);
+		list_del_rcu(&fwc->next);
+		call_rcu(&fwc->rcu, delayed_fwc_free_entry);
+		br->fwcache_count--;
+	}
+
+	fwc = kmalloc(sizeof (*fwc), GFP_ATOMIC);
+	if (!fwc)
+		goto done;
+
+	br->fwcache_count++;
+	fwc->lan_ip = k->lan_ip;
+	fwc->wan_ip = k->wan_ip;
+	fwc->lan_port = k->lan_port;
+	fwc->wan_port = k->wan_port;
+	fwc->is_tcp = k->is_tcp;
+	fwc->priv_destructor = NULL;
+	memset(fwc->priv_area, 0, sizeof (fwc->priv_area));
+
+	hlist_add_head_rcu(&fwc->hnext,
+			   &br->fwcache_hrules[hash % FBXBR_FWCACHE_SIZE]);
+	list_add_tail_rcu(&fwc->next, &br->fwcache_rules);
+
+done:
+	write_unlock(&br->fwcache_lock);
+	return 0;
+}
+
+/*
+ *
+ */
+void fbxbr_fwcache_flush(struct fbxbr *br)
+{
+	struct fbxbr_fwcache *fwc, *tmp;
+
+	write_lock_bh(&br->fwcache_lock);
+
+	list_for_each_entry_safe(fwc, tmp, &br->fwcache_rules, next) {
+		hlist_del_rcu(&fwc->hnext);
+		list_del_rcu(&fwc->next);
+		call_rcu(&fwc->rcu, delayed_fwc_free_entry);
+		br->fwcache_count--;
+	}
+
+	write_unlock_bh(&br->fwcache_lock);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_input.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_input.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_input.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_input.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,347 @@
+#include <linux/if_arp.h>
+#include <linux/in.h>
+#include <linux/udp.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ *
+ */
+static rx_handler_result_t __handle_wan_frame(struct fbxbr_port *p,
+					      struct sk_buff *skb)
+{
+	struct fbxbr *br = p->br;
+	struct fbxbr_fwcache_key fwk;
+	bool fwc_present, fwc_can_create;
+	u32 hash = 0;
+	int ret;
+
+	/* give back non IPv4 packets */
+	if (skb->protocol != htons(ETH_P_IP))
+		return RX_HANDLER_PASS;
+
+	/* stop here if we have no idea what the wan ip address is or
+	 * was */
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	if (!fbxbr_is_valid_ip_packet(skb))
+		goto drop;
+
+	/* lookup into forward cache */
+	fwc_present = false;
+	fwc_can_create = false;
+
+	if (fbxbr_fwcache_skb_allowable(skb, true, &fwk, &fwc_can_create)) {
+		hash = fbxbr_fwcache_hash(&fwk);
+		rcu_read_lock();
+		fwc_present = (__fbxbr_fwcache_lookup_rcu(br, hash, &fwk) != NULL);
+		rcu_read_unlock();
+	}
+
+	if (fwc_present)
+		goto output_lan;
+
+	ret = fbxbr_filter_wan_to_lan_packet(br, skb);
+	switch (ret) {
+	default:
+		WARN(1, "unsupported filter action");
+		fallthrough;
+
+	case NF_DROP:
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+
+	case NF_STOP:
+		nf_reset_ct(skb);
+		return RX_HANDLER_PASS;
+
+	case NF_ACCEPT:
+		break;
+	}
+
+output_lan:
+	if (!br->lan_port)
+		goto drop;
+
+	fbxbr_output_lan_frame(br, skb);
+
+	if (!fwc_present && fwc_can_create)
+		fbxbr_fwcache_add(br, hash, &fwk);
+
+	return RX_HANDLER_CONSUMED;
+
+drop:
+	kfree_skb(skb);
+	return RX_HANDLER_CONSUMED;
+}
+
+/*
+ *
+ */
+static void
+__handle_lan_arp_frame(struct fbxbr_port *p, struct sk_buff *skb)
+{
+	struct net_device *dev = p->dev;
+	struct fbxbr *br = p->br;
+	__be32 sender_ipaddr, target_ipaddr;
+	u8 *sender_hwaddr, *req;
+	struct arphdr *arp;
+
+	if (!pskb_may_pull(skb, arp_hdr_len(p->dev)))
+		goto done;
+
+	arp = arp_hdr(skb);
+	if (arp->ar_hln != dev->addr_len || arp->ar_pln != 4)
+		goto done;
+
+	if ((arp->ar_hrd != htons(ARPHRD_ETHER) &&
+	     arp->ar_hrd != htons(ARPHRD_IEEE802)) ||
+	    arp->ar_pro != htons(ETH_P_IP))
+		goto done;
+
+	if (arp->ar_op != htons(ARPOP_REQUEST) &&
+	    arp->ar_op != htons(ARPOP_REPLY))
+		goto done;
+
+	/* fetch subfields */
+	req = (unsigned char *)(arp + 1);
+
+	sender_hwaddr = req;
+	req += ETH_ALEN;
+
+	memcpy(&sender_ipaddr, req, 4);
+	req += 4;
+
+	/* skip target_hwaddr */
+	req += dev->addr_len;
+
+	memcpy(&target_ipaddr, req, 4);
+
+	/* ignore gratuitous ARP */
+	if (!sender_ipaddr)
+		goto done;
+
+	if (arp->ar_op == htons(ARPOP_REQUEST)) {
+
+		/* client is sending an arp request */
+		if (!br->wan_ipaddr) {
+			/* wan has never been up, our wan address is
+			 * not known, answer to every arp requests */
+
+			/* ignore what looks like gratuitous ARP */
+			if (sender_ipaddr == target_ipaddr)
+				goto done;
+
+			/* don't answer for special ip address */
+			if (ipv4_is_private_10(target_ipaddr) ||
+			    ipv4_is_private_172(target_ipaddr) ||
+			    ipv4_is_private_192(target_ipaddr) ||
+			    ipv4_is_linklocal_169(target_ipaddr) ||
+			    ipv4_is_anycast_6to4(target_ipaddr) ||
+			    ipv4_is_test_192(target_ipaddr) ||
+			    ipv4_is_test_198(target_ipaddr))
+				goto done;
+
+			/* ok, will reply with a zero source
+			 * address */
+		} else {
+			/* wan is up, filter our arp reply to match
+			 * WAN */
+
+			/* accept only arp from remote client */
+			if (sender_ipaddr != br->wan_ipaddr)
+				goto done;
+
+			/* accept only arp request for wan network */
+			if ((target_ipaddr & br->lan_netmask) !=
+			    (br->wan_ipaddr & br->lan_netmask))
+				goto done;
+
+			/* request is for the client's address, keep quiet */
+			if (target_ipaddr == br->wan_ipaddr)
+				goto done;
+		}
+
+		/* ok I can answer */
+		fbxbr_send_arp_frame(dev, ARPOP_REPLY, sender_hwaddr,
+				     target_ipaddr, NULL,
+				     br->wan_ipaddr, sender_hwaddr);
+
+		/* keep the client address */
+		fbxbr_capture_hw_addr(br, sender_hwaddr);
+
+	} else {
+
+		/* accept only arp from remote client */
+		if (sender_ipaddr != br->wan_ipaddr)
+			goto done;
+
+		/* we received  an arp reply,  iff it was  addressed to
+		 * us, then keep the client mac address  */
+		if (target_ipaddr != br->lan_gw)
+			goto done;
+
+		fbxbr_capture_hw_addr(br, sender_hwaddr);
+	}
+
+done:
+	kfree_skb(skb);
+}
+
+/*
+ *
+ */
+static inline bool __is_local_ip(struct fbxbr *br, __be32 ipaddr)
+{
+	int i;
+
+	if (ipaddr == br->br_ipaddr || ipv4_is_multicast(ipaddr))
+		return true;
+
+	for (i = 0; i < MAX_ALIASES; i++) {
+		if (br->ip_aliases[i] && br->ip_aliases[i] == ipaddr)
+			return true;
+	}
+
+	return false;
+}
+
+/*
+ *
+ */
+static rx_handler_result_t __handle_lan_frame(struct fbxbr_port *p,
+					      struct sk_buff *skb)
+{
+	struct fbxbr *br = p->br;
+	struct iphdr *iph;
+	struct fbxbr_fwcache_key fwk;
+	bool fwc_present, fwc_can_create, is_fragment;
+	u32 hash = 0;
+	int ret;
+
+	if (skb->protocol == htons(ETH_P_ARP)) {
+		__handle_lan_arp_frame(p, skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	/* give back non IPv4 packets */
+	if (skb->protocol != htons(ETH_P_IP))
+		return RX_HANDLER_PASS;
+
+	if (!fbxbr_is_valid_ip_packet(skb))
+		goto drop;
+
+	iph = ip_hdr(skb);
+
+	/* look  the destination  address, if  talking to  our private
+	 * address or alias, then frame is local */
+	if (__is_local_ip(br, iph->daddr)) {
+
+		if (!br->br_remote_ipaddr)
+			goto drop;
+
+		/* packet comes from lan, snat it and make it local */
+		fbxbr_snat_packet(skb, br->br_remote_ipaddr);
+		skb->dev = br->dev;
+		skb->pkt_type = PACKET_HOST;
+		br->dev->stats.rx_packets++;
+		br->dev->stats.rx_bytes += skb->len;
+		netif_rx(skb);
+		return RX_HANDLER_CONSUMED;
+	}
+
+	/* stop here if we have no idea what the wan ip address is or
+	 * was */
+	if (!br->wan_ipaddr)
+		goto drop;
+
+	/* lookup into forward cache */
+	fwc_present = false;
+	fwc_can_create = false;
+
+	if (fbxbr_fwcache_skb_allowable(skb, false, &fwk, &fwc_can_create)) {
+		hash = fbxbr_fwcache_hash(&fwk);
+		rcu_read_lock();
+		fwc_present = (__fbxbr_fwcache_lookup_rcu(br, hash, &fwk) != NULL);
+		rcu_read_unlock();
+	}
+
+	if (fwc_present)
+		goto output_wan;
+
+	/* process DHCP if enabled */
+	is_fragment = iph->frag_off & htons(IP_OFFSET);
+	if (iph->protocol == IPPROTO_UDP &&
+	    !is_fragment &&
+	    (br->flags & FBXBRIDGE_FLAGS_DHCPD)) {
+		struct udphdr *udp;
+
+		if (!fbxbr_is_valid_udp_tcp_packet(skb))
+			goto drop;
+
+		udp = udp_hdr(skb);
+		if (udp->dest == htons(67)) {
+			fbxbr_dhcpd(br, skb);
+			goto drop;
+		}
+	}
+
+	ret = fbxbr_filter_lan_to_wan_packet(br, skb);
+	switch (ret) {
+	default:
+		WARN(1, "unsupported filter action");
+		fallthrough;
+
+	case NF_DROP:
+		kfree_skb(skb);
+		return RX_HANDLER_CONSUMED;
+
+	case NF_STOP:
+		nf_reset_ct(skb);
+		return RX_HANDLER_PASS;
+
+	case NF_ACCEPT:
+		break;
+	}
+
+output_wan:
+	if (!br->wan_port)
+		goto drop;
+
+	fbxbr_output_wan_frame(br, skb);
+
+	if (!fwc_present && fwc_can_create)
+		fbxbr_fwcache_add(br, hash, &fwk);
+
+	return RX_HANDLER_CONSUMED;
+
+drop:
+	kfree_skb(skb);
+	return RX_HANDLER_CONSUMED;
+}
+
+/*
+ *
+ */
+rx_handler_result_t fbxbr_handle_frame(struct sk_buff **pskb)
+{
+	struct sk_buff *skb = *pskb;
+	struct fbxbr_port *p;
+	rx_handler_result_t ret;
+
+	skb = skb_share_check(skb, GFP_ATOMIC);
+	if (!skb)
+		return RX_HANDLER_CONSUMED;
+
+	p = fbxbr_port_get_rcu(skb->dev);
+
+	read_lock(&p->br->lock);
+	if (p->is_wan)
+		ret = __handle_wan_frame(p, skb);
+	else
+		ret = __handle_lan_frame(p, skb);
+	read_unlock(&p->br->lock);
+
+	return ret;
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_ioctl.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_ioctl.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_ioctl.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_ioctl.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,85 @@
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <net/net_namespace.h>
+#include <linux/uaccess.h>
+#include "fbxbr_private.h"
+
+/*
+ * ioctl handling
+ */
+int fbxbr_ioctl(struct net *net, unsigned int ign, void __user *arg)
+{
+	struct fbxbridge_ioctl_req req;
+	struct fbxbridge_ioctl_chg chg;
+	struct fbxbridge_ioctl_dev_chg dev_chg;
+	struct fbxbridge_ioctl_params params;
+	int ret;
+
+	/* fetch ioctl request */
+	if (copy_from_user(&req, arg, sizeof (req)))
+		return -EFAULT;
+
+	switch (req.cmd) {
+	case E_CMD_BR_CHG:
+		if (copy_from_user(&chg, (void *)req.arg, sizeof (chg)))
+			return -EFAULT;
+
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		if (!chg.action)
+			return fbxbr_add_br(net, chg.brname);
+
+		rtnl_lock();
+		ret = __fbxbr_del_br(net, chg.brname);
+		rtnl_unlock();
+		return ret;
+
+	case E_CMD_BR_DEV_CHG:
+		if (copy_from_user(&dev_chg, (void *)req.arg,
+				   sizeof (dev_chg)))
+			return -EFAULT;
+
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		rtnl_lock();
+		if (!dev_chg.action)
+			ret = __fbxbr_add_br_port(net,
+						  dev_chg.brname,
+						  dev_chg.devname,
+						  dev_chg.wan);
+		else
+			ret = __fbxbr_del_br_port_by_name(net,
+							  dev_chg.brname,
+							  dev_chg.devname);
+		rtnl_unlock();
+		return ret;
+
+	case E_CMD_BR_PARAMS:
+		if (copy_from_user(&params, (void *)req.arg, sizeof (params)))
+			return -EFAULT;
+
+		if (!params.action) {
+			/* this is a get */
+			ret = fbxbr_get_params(net, params.brname, &params);
+			if (ret)
+				return ret;
+
+			return copy_to_user((void *)req.arg, &params,
+					    sizeof (params));
+		}
+
+		/* this is a set */
+		if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+			return -EPERM;
+
+		return fbxbr_set_params(net, params.brname, &params);
+
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_output.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_output.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_output.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_output.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,164 @@
+#include <net/ip.h>
+#include <net/arp.h>
+#include "fbxbr_private.h"
+
+/*
+ * caller must hold bridge lock
+ *
+ * lan port must be valid
+ */
+void fbxbr_output_lan_mcast_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->lan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *ip;
+	u8 mcast_hwaddr[6];
+	u32 daddr;
+
+	ip = ip_hdr(skb);
+
+	/* compute mcast hwaddr */
+	mcast_hwaddr[0] = 0x1;
+	mcast_hwaddr[1] = 0x0;
+	mcast_hwaddr[2] = 0x5e;
+	daddr = ntohl(ip->daddr);
+	mcast_hwaddr[3] = (daddr & 0x7f0000) >> 16;
+	mcast_hwaddr[4] = (daddr & 0xff00) >> 8;
+	mcast_hwaddr[5] = (daddr & 0xff);
+
+	skb->dev = dev;
+	dev_hard_header(skb, dev, ETH_P_802_3, mcast_hwaddr, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
+
+/*
+ * caller must hold bridge lock and have BH disabled
+ *
+ * lan port must be valid
+ *
+ * must be a valid ip packet
+ */
+void fbxbr_output_lan_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->lan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *iph;
+	const char *dest_hw;
+
+	iph = ip_hdr(skb);
+
+	if (!br->have_hw_addr && iph->daddr != INADDR_BROADCAST) {
+
+		/* (fixme: try to queue instead of dropping ?) */
+		kfree_skb(skb);
+
+		/* rate limit arp sending to ARP_RATE_LIMIT  */
+		spin_lock(&br->last_arp_lock);
+		if (time_before(jiffies, br->last_arp_send + ARP_RATE_LIMIT)) {
+			spin_unlock(&br->last_arp_lock);
+			return;
+		}
+
+		br->last_arp_send = jiffies;
+		spin_unlock(&br->last_arp_lock);
+
+		fbxbr_send_arp_frame(dev,
+				     ARPOP_REQUEST,
+				     NULL,
+				     br->lan_gw,
+				     NULL,
+				     br->wan_ipaddr,
+				     NULL);
+		return;
+	}
+
+	/* we have  an active device, send  to the hw addr  if we have
+	 * it, or to  the bcast hw addr if we don't  or the packet is
+	 * an ip broadcast */
+	skb->dev = dev;
+
+	if (br->have_hw_addr && iph->daddr != INADDR_BROADCAST)
+		dest_hw = br->lan_hwaddr;
+	else
+		dest_hw = dev->broadcast;
+
+	dev_hard_header(skb, dev, ETH_P_802_3, dest_hw, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
+
+/*
+ * caller must hold bridge lock and have BH disabled
+ *
+ * wan port must be valid
+ *
+ * must be a valid ip packet
+ */
+void fbxbr_output_wan_frame(struct fbxbr *br, struct sk_buff *skb)
+{
+	struct fbxbr_port *p = br->wan_port;
+	struct net_device *dev = p->dev;
+	struct iphdr *iph;
+	struct neighbour *neigh;
+	__be32 nh;
+
+	skb->dev = dev;
+
+	if (!dev->hard_header_len) {
+		dev_queue_xmit(skb);
+		return;
+	}
+
+	iph = ip_hdr(skb);
+
+	/* resolve next hop */
+	nh = iph->daddr;
+	if ((nh & br->wan_netmask) != (br->wan_ipaddr & br->wan_netmask)) {
+		struct rtable *rt;
+
+		rt = p->rt;
+		if (rt && rt->dst.obsolete > 0) {
+			ip_rt_put(rt);
+			p->rt = NULL;
+			rt = NULL;
+		}
+
+		/* need to find default gateway */
+		if (!rt) {
+			rt = ip_route_output(&init_net, nh, 0, 0,
+					     dev->ifindex, RT_SCOPE_UNIVERSE);
+			if (IS_ERR(rt) || rt->rt_type != RTN_UNICAST) {
+				kfree_skb(skb);
+				return;
+			}
+
+			p->rt = rt;
+		}
+
+		nh = rt_nexthop(rt, nh);
+	}
+
+	/* resolve neighbour */
+	neigh = __ipv4_neigh_lookup_noref(dev, nh);
+        if (unlikely(!neigh))
+                neigh = __neigh_create(&arp_tbl, &nh, dev, false);
+
+	if (IS_ERR(neigh)) {
+		kfree_skb(skb);
+		return;
+	}
+
+	if (!(neigh->nud_state & NUD_VALID)) {
+		neigh_event_send(neigh, NULL);
+		kfree_skb(skb);
+		return;
+	}
+
+	neigh_event_send(neigh, NULL);
+
+	/* send */
+	dev_hard_header(skb, dev, ETH_P_802_3, neigh->ha, dev->dev_addr,
+			ETH_P_IP);
+	dev_queue_xmit(skb);
+}
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_private.h linux-6.13.12-fbx/net/fbxbridge/fbxbr_private.h
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_private.h	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_private.h	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,198 @@
+#ifndef FBXBRIDGE_PRIVATE_H_
+#define FBXBRIDGE_PRIVATE_H_
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/fbxbridge.h>
+#include <linux/rtnetlink.h>
+#include <linux/spinlock.h>
+
+#define ARP_RATE_LIMIT			(HZ)
+#define ARP_ETHER_SIZE			(8 + ETH_ALEN * 2 + 4 * 2)
+#define	DEFAULT_RENEWAL_TIME		60
+#define	DEFAULT_REBIND_TIME		300
+#define	DEFAULT_LEASE_TIME		600
+
+#define FBXBR_FWCACHE_SIZE		256
+#define FBXBR_FWCACHE_MAX_ENTRY		2048
+
+struct fbxbr;
+
+struct fbxbr_fwcache_key {
+	__be32			lan_ip;
+	__be32			wan_ip;
+	__be16			lan_port;
+	__be16			wan_port;
+	bool			is_tcp;
+};
+
+struct fbxbr_fwcache {
+	__be32			lan_ip;
+	__be32			wan_ip;
+	__be16			lan_port;
+	__be16			wan_port;
+	u8			is_tcp;
+	struct hlist_node       hnext;
+	struct list_head        next;
+
+	void			(*priv_destructor)(void *);
+	u32			priv_area[8];
+	struct rcu_head		rcu;
+};
+
+struct fbxbr_port {
+	struct fbxbr		*br;
+	struct net_device	*dev;
+	struct rtable		*rt;
+	bool			is_wan;
+};
+
+struct fbxbr {
+	struct net_device	*dev;
+
+	/* protect all fields but lan_hwaddr */
+	rwlock_t		lock;
+
+	/*
+	 * currently assigned lan & wan port, updated by userspace
+	 * under rtnl
+	 */
+	struct fbxbr_port	*wan_port;
+	struct fbxbr_port	*lan_port;
+
+	/*
+	 * config, updated by userspace
+	 */
+	unsigned int		flags;
+	unsigned int		inputmark;
+
+	unsigned int		dns1_ipaddr;
+	unsigned int		dns2_ipaddr;
+
+	unsigned long		dhcpd_renew_time;
+	unsigned long		dhcpd_rebind_time;
+	unsigned long		dhcpd_lease_time;
+
+	/* list of ip we consider to be local */
+	unsigned long		ip_aliases[MAX_ALIASES];
+
+	/*
+	 * runtime state
+	 */
+
+	/* local and remote (fbx) ip address, maintained using inet
+	 * notifier */
+	__be32			br_ipaddr;
+	__be32			br_remote_ipaddr;
+
+	/* wan side inet info */
+	__be32			wan_ipaddr;
+	__be32			wan_netmask;
+	__be32			lan_gw;
+	__be32			lan_netmask;
+
+	/* currently detected lan device hardware address */
+	rwlock_t		lan_hwaddr_lock;
+	bool			have_hw_addr;
+	unsigned char		lan_hwaddr[ETH_ALEN];
+
+	spinlock_t		last_arp_lock;
+	unsigned long		last_arp_send;
+
+	rwlock_t		fwcache_lock;
+        struct hlist_head       fwcache_hrules[FBXBR_FWCACHE_SIZE];
+        struct list_head        fwcache_rules;
+        unsigned int            fwcache_count;
+
+	struct list_head	next;
+};
+
+/*
+ * helpers to get bridge port from netdevice
+ */
+#define fbxbr_port_exists(dev) (dev->priv_flags & IFF_FBXBRIDGE_PORT)
+
+static inline struct fbxbr_port *
+fbxbr_port_get_rcu(const struct net_device *dev)
+{
+	return rcu_dereference(dev->rx_handler_data);
+}
+
+static inline struct fbxbr_port *
+fbxbr_port_get_rtnl(const struct net_device *dev)
+{
+	return fbxbr_port_exists(dev) ?
+		rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
+/* fbxbr_dev.c */
+int fbxbr_add_br(struct net *net, const char *name);
+
+int __fbxbr_del_br(struct net *net, const char *name);
+
+int __fbxbr_add_br_port(struct net *net, const char *name,
+			const char *port_name, bool is_wan);
+
+int __fbxbr_del_br_port_by_name(struct net *net, const char *name,
+				const char *port_name);
+
+void __fbxbr_del_br_port(struct fbxbr_port *p);
+
+int fbxbr_get_params(struct net *net, const char *name,
+		     struct fbxbridge_ioctl_params *params);
+
+int fbxbr_set_params(struct net *net, const char *name,
+		     const struct fbxbridge_ioctl_params *params);
+
+void fbxbr_capture_hw_addr(struct fbxbr *br, const u8 *hwaddr);
+
+
+/* fbxbr_dhcp.c */
+void fbxbr_dhcpd(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_filter.c */
+int
+fbxbr_filter_wan_to_lan_packet(struct fbxbr *br, struct sk_buff *skb);
+int
+fbxbr_filter_lan_to_wan_packet(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_fwcache.c */
+u32 fbxbr_fwcache_hash(const struct fbxbr_fwcache_key *k);
+
+struct fbxbr_fwcache *
+__fbxbr_fwcache_lookup_rcu(struct fbxbr *br, u32 hash,
+			   const struct fbxbr_fwcache_key *k);
+
+bool fbxbr_fwcache_skb_allowable(struct sk_buff *skb,
+				 bool from_wan,
+				 struct fbxbr_fwcache_key *k,
+				 bool *can_create);
+int fbxbr_fwcache_add(struct fbxbr *br,
+		      u32 hash, const struct fbxbr_fwcache_key *k);
+
+void fbxbr_fwcache_flush(struct fbxbr *br);
+
+/* fbxbr_ioctl.c */
+int fbxbr_ioctl(struct net *net, unsigned int ign, void __user *arg);
+
+/* fbxbr_input.c */
+rx_handler_result_t fbxbr_handle_frame(struct sk_buff **pskb);
+
+/* fbxbr_output.c */
+void fbxbr_output_lan_mcast_frame(struct fbxbr *br, struct sk_buff *skb);
+void fbxbr_output_lan_frame(struct fbxbr *br, struct sk_buff *skb);
+void fbxbr_output_wan_frame(struct fbxbr *br, struct sk_buff *skb);
+
+/* fbxbr_utils.c */
+int fbxbr_send_arp_frame(struct net_device *dev, u16 op,
+			 const u8 *dest_hw,
+			 __be32 src_ip, const u8 *src_hw,
+			 __be32 target_ip, const u8 *target_hw);
+
+bool fbxbr_is_valid_ip_packet(struct sk_buff *skb);
+bool fbxbr_is_valid_udp_tcp_packet(struct sk_buff *skb);
+void fbxbr_snat_packet(struct sk_buff *skb, __be32 new_addr);
+void fbxbr_dnat_packet(struct sk_buff *skb, __be32 new_addr);
+
+
+#endif /* !FBXBRIDGE_PRIVATE_H_ */
diff -Nruw linux-6.13.12-fbx/net/fbxbridge./fbxbr_utils.c linux-6.13.12-fbx/net/fbxbridge/fbxbr_utils.c
--- linux-6.13.12-fbx/net/fbxbridge./fbxbr_utils.c	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/net/fbxbridge/fbxbr_utils.c	2025-09-25 17:40:37.747378036 +0200
@@ -0,0 +1,204 @@
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include "fbxbr_private.h"
+
+/*
+ * allocate & send ARP frame to given device
+ *
+ * src_hw can be NULL, device address is used instead
+ * dest_hw can be NULL, device broadcast address is used instead
+ * target_hw can be NULL, empty address is used instead
+ */
+int fbxbr_send_arp_frame(struct net_device *dev, u16 op,
+			 const u8 *dest_hw,
+			 __be32 src_ip, const u8 *src_hw,
+			 __be32 target_ip, const u8 *target_hw)
+{
+	struct arphdr *arp;
+	struct sk_buff *skb;
+	unsigned char *arp_ptr;
+	int hlen = LL_RESERVED_SPACE(dev);
+	int tlen = dev->needed_tailroom;
+	int ret;
+
+	/* prepare arp packet */
+	skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	skb_reserve(skb, hlen);
+	skb_reset_network_header(skb);
+	arp = skb_put(skb, arp_hdr_len(dev));
+	skb->dev = dev;
+	skb->protocol = htons(ETH_P_ARP);
+
+	if (!src_hw)
+		src_hw = dev->dev_addr;
+	if (!dest_hw)
+		dest_hw = dev->broadcast;
+
+	arp->ar_hrd = htons(dev->type);
+	arp->ar_pro = htons(ETH_P_IP);
+	arp->ar_hln = dev->addr_len;
+	arp->ar_pln = 4;
+	arp->ar_op = htons(op);
+
+	arp_ptr = (unsigned char *)(arp + 1);
+
+	memcpy(arp_ptr, src_hw, dev->addr_len);
+	arp_ptr += dev->addr_len;
+	memcpy(arp_ptr, &src_ip, 4);
+	arp_ptr += 4;
+
+	if (target_hw)
+		memcpy(arp_ptr, target_hw, dev->addr_len);
+	else
+		memset(arp_ptr, 0, dev->addr_len);
+
+	arp_ptr += dev->addr_len;
+	memcpy(arp_ptr, &target_ip, 4);
+
+	ret = dev_hard_header(skb, dev, ETH_P_ARP, dest_hw, src_hw, skb->len);
+	if (ret < 0) {
+		kfree_skb(skb);
+		return ret;
+	}
+
+	return dev_queue_xmit(skb);
+}
+
+/*
+ * validate header fields & checksum, also linearize IP header and
+ * setup transport headers
+ */
+bool fbxbr_is_valid_ip_packet(struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+
+	if (!pskb_may_pull(skb, sizeof (*iph)))
+		return false;
+
+	iph = ip_hdr(skb);
+
+	if (iph->ihl < 5 || iph->version != 4)
+		return false;
+
+	if (!pskb_may_pull(skb, iph->ihl * 4))
+		return false;
+
+	iph = ip_hdr(skb);
+
+	if (ntohs(iph->tot_len) > skb->len)
+		return false;
+
+	skb->transport_header = skb->network_header + iph->ihl * 4;
+
+	return true;
+}
+
+/*
+ * make sure the udp/tcp header is present in the linear section
+ */
+bool fbxbr_is_valid_udp_tcp_packet(struct sk_buff *skb)
+{
+	const struct iphdr *iph;
+
+	iph = ip_hdr(skb);
+
+	switch (iph->protocol) {
+	case IPPROTO_UDP:
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct udphdr)))
+			return false;
+		break;
+	case IPPROTO_TCP:
+		if (!pskb_may_pull(skb, skb_transport_offset(skb) +
+				   sizeof (struct tcphdr)))
+			return false;
+		break;
+	}
+	return true;
+}
+
+
+/*
+ * do source or destination nat
+ */
+static void recalculate_l4_checksum(struct sk_buff *skb,
+				    __be32 osaddr, __be32 odaddr)
+{
+	struct iphdr *iph;
+	u16 check;
+
+	iph = ip_hdr(skb);
+	if (iph->frag_off & htons(IP_OFFSET))
+		return;
+
+	if (!fbxbr_is_valid_udp_tcp_packet(skb))
+		return;
+
+	iph = ip_hdr(skb);
+
+	switch (iph->protocol) {
+	case IPPROTO_TCP:
+	{
+		struct tcphdr *tcph;
+
+		tcph = (struct tcphdr *)skb_transport_header(skb);
+		check = tcph->check;
+		if (skb->ip_summed != CHECKSUM_COMPLETE)
+			check = ~check;
+		check = csum_tcpudp_magic(iph->saddr, iph->daddr, 0, 0, check);
+		check = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0, ~check);
+		if (skb->ip_summed == CHECKSUM_COMPLETE)
+			check = ~check;
+		tcph->check = check;
+		break;
+	}
+
+	case IPPROTO_UDP:
+	{
+		struct udphdr *udph;
+
+		udph = (struct udphdr *)skb_transport_header(skb);
+		check = udph->check;
+		if (check != 0) {
+			check = csum_tcpudp_magic(iph->saddr, iph->daddr,
+						  0, 0, ~check);
+			check = csum_tcpudp_magic(~osaddr, ~odaddr, 0, 0,
+						  ~check);
+			udph->check = check ? : 0xFFFF;
+		}
+		break;
+	}
+	}
+}
+
+/*
+ * packet must be valid IPv4 with header in linear section
+ */
+void fbxbr_snat_packet(struct sk_buff *skb, __be32 new_addr)
+{
+	struct iphdr *ip;
+	__be32 oaddr;
+
+	ip = ip_hdr(skb);
+	oaddr = ip->saddr;
+	ip->saddr = new_addr;
+	ip->check = 0;
+	ip->check = ip_fast_csum((unsigned char *) ip, ip->ihl);
+	recalculate_l4_checksum(skb, oaddr, ip->daddr);
+}
+
+void fbxbr_dnat_packet(struct sk_buff *skb, __be32 new_addr)
+{
+	struct iphdr *ip;
+	__be32 oaddr;
+
+	ip = ip_hdr(skb);
+	oaddr = ip->daddr;
+	ip->daddr = new_addr;
+	ip->check = 0;
+	ip->check = ip_fast_csum((unsigned char *) ip, ip->ihl);
+	recalculate_l4_checksum(skb, ip->saddr, oaddr);
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/ipv4/ip_ffn.c	2025-09-25 17:40:37.763378116 +0200
@@ -0,0 +1,767 @@
+/*
+ * IP fast forwarding and NAT
+ *
+ * Very restrictive code, that only cope non fragmented UDP and TCP
+ * packets, that are routed and NATed with no other modification.
+ *
+ * Provide a fast path for established conntrack entries so that
+ * packets go out ASAP.
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/net.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/proc_fs.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+#include <net/ip_ffn.h>
+
+#define FFN_CACHE_SIZE		256
+#define MAX_FFN_ENTRY		2048
+
+static DEFINE_SPINLOCK(ffn_lock);
+static struct list_head ffn_cache[FFN_CACHE_SIZE];
+static struct list_head ffn_all;
+static unsigned int ffn_entry_count;
+
+/*
+ * hash on five parameter
+ */
+static inline unsigned int ffn_hash(const struct ffn_lookup_key *k)
+{
+	return jhash_3words(k->sip, k->is_tcp ? k->dip : ~k->dip,
+			    k->sport | k->dport << 16, 0);
+}
+
+/*
+ * attempt to find entry with given value in cache, under RCU lock
+ */
+struct ffn_lookup_entry *__ffn_get_rcu(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn_hash(k);
+
+	list_for_each_entry_rcu(tmp, &ffn_cache[hash % FFN_CACHE_SIZE], next) {
+		if (tmp->sip == k->sip && tmp->dip == k->dip &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL(__ffn_get_rcu);
+
+
+/*
+ * attempt to find entry with given value in cache, under ff lock
+ */
+static struct ffn_lookup_entry *__ffn_get(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn_hash(k);
+
+	list_for_each_entry(tmp, &ffn_cache[hash % FFN_CACHE_SIZE], next) {
+		if (tmp->sip == k->sip && tmp->dip == k->dip &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_ffn_free_entry(struct rcu_head *rhp)
+{
+	struct ffn_lookup_entry *e;
+
+	e = container_of(rhp, struct ffn_lookup_entry, rcu);
+	if (e->manip.priv_destructor)
+		e->manip.priv_destructor((void *)e->manip.ffn_priv_area);
+
+	dst_release(e->manip.dst);
+	kfree(e);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static void __ffn_remove_entry(struct ffn_lookup_entry *e)
+{
+	list_del_rcu(&e->next);
+	list_del_rcu(&e->all_next);
+	call_rcu(&e->rcu, delayed_ffn_free_entry);
+	ffn_entry_count--;
+}
+
+/*
+ *
+ */
+static void ffn_find_and_remove(const struct ffn_lookup_key *k)
+{
+	struct ffn_lookup_entry *e;
+
+	spin_lock_bh(&ffn_lock);
+	e = __ffn_get(k);
+	if (e)
+		__ffn_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static int __ffn_add_entry(struct ffn_lookup_entry *e)
+{
+	struct ffn_lookup_key k = {
+		.sip = e->sip,
+		.dip = e->dip,
+		.sport = e->sport,
+		.dport = e->dport,
+		.is_tcp = (e->protocol == IPPROTO_TCP)
+	};
+
+	/* make sure it's not present */
+	if (__ffn_get(&k))
+		return 1;
+
+	if (ffn_entry_count >= MAX_FFN_ENTRY)
+		return 1;
+
+	/* add new entry */
+	list_add_tail_rcu(&e->next, &ffn_cache[ffn_hash(&k) % FFN_CACHE_SIZE]);
+	list_add_tail_rcu(&e->all_next, &ffn_all);
+	ffn_entry_count++;
+	return 0;
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust(u32 osip,
+				      u32 nsip,
+				      u32 odip,
+				      u32 ndip,
+				      u16 osport,
+				      u16 nsport,
+				      u16 odport,
+				      u16 ndport)
+{
+	const u32 old[] = { osip, odip, osport, odport };
+	const u32 new[] = { nsip, ndip, nsport, ndport };
+	__wsum osum, nsum;
+
+	osum = csum_partial(old, sizeof (old), 0);
+	nsum = csum_partial(new, sizeof (new), 0);
+
+	return ~csum_fold(csum_sub(nsum, osum));
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust_ip(u32 osip,
+					 u32 nsip,
+					 u32 odip,
+					 u32 ndip)
+{
+	const u32 old[] = { osip, odip };
+	const u32 new[] = { nsip, ndip };
+	__wsum osum, nsum;
+
+	osum = csum_partial(old, sizeof (old), 0);
+	nsum = csum_partial(new, sizeof (new), 0);
+
+	/* -1 for TTL decrease */
+	return ~csum_fold(csum_sub(csum_sub(nsum, osum), 1));
+}
+
+/*
+ * two hooks into netfilter code
+ */
+extern int external_tcpv4_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int external_udpv4_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int ip_local_deliver_finish(struct net *net,
+				   struct sock *sk, struct sk_buff *skb);
+
+/*
+ * check if packet is in ffn cache, or mark it if it can be added
+ * later
+ */
+int ip_ffn_process(struct sk_buff *skb)
+{
+	struct ffn_lookup_entry *e;
+	struct nf_conntrack *nfct;
+	struct iphdr *iph;
+	struct tcphdr *tcph = NULL;
+	struct udphdr *udph = NULL;
+	struct ffn_lookup_key k;
+	bool remove_me, drop_ct;
+	u16 tcheck;
+	u8 proto;
+	int res, added_when;
+
+	if (!net_eq(dev_net(skb->dev), &init_net))
+		goto not_ffnable;
+
+	iph = ip_hdr(skb);
+
+	/* refuse fragmented IP packet, or packets with IP options */
+	if (iph->ihl > 5 || (iph->frag_off & htons(IP_MF | IP_OFFSET)))
+		goto not_ffnable;
+
+	/* check encapsulated protocol is udp or tcp */
+	if (iph->protocol != IPPROTO_TCP && iph->protocol != IPPROTO_UDP)
+		goto not_ffnable;
+
+	if (iph->ttl <= 1)
+		goto not_ffnable;
+
+	proto = iph->protocol;
+	if (proto == IPPROTO_TCP) {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		tcph = (struct tcphdr *)((unsigned char *)iph + sizeof (*iph));
+
+		if (tcph->doff * 4 < sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		if (skb_headlen(skb) < sizeof (*iph) + tcph->doff * 4)
+			goto not_ffnable;
+
+		k.sport = tcph->source;
+		k.dport = tcph->dest;
+		k.is_tcp = true;
+	} else {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct udphdr))
+			goto not_ffnable;
+
+		udph = (struct udphdr *)((unsigned char *)iph + sizeof (*iph));
+		k.sport = udph->source;
+		k.dport = udph->dest;
+		k.is_tcp = false;
+	}
+
+	rcu_read_lock();
+
+	k.sip = iph->saddr;
+	k.dip = iph->daddr;
+	e = __ffn_get_rcu(&k);
+	if (!e) {
+		rcu_read_unlock();
+		goto ffnable;
+	}
+
+	if (e->manip.dst->obsolete > 0) {
+		rcu_read_unlock();
+		ffn_find_and_remove(&k);
+		goto ffnable;
+	}
+
+	remove_me = false;
+	nfct = &e->manip.ct->ct_general;
+	nf_conntrack_get(nfct);
+
+	if (proto == IPPROTO_TCP) {
+		/* do sequence number checking and update
+		 * conntrack info */
+		res = external_tcpv4_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		if (e->manip.ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+			remove_me = true;
+		tcheck = tcph->check;
+
+	} else {
+		res = external_udpv4_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		tcheck = udph->check;
+	}
+
+	if (unlikely(res != NF_ACCEPT)) {
+		/* packet rejected by conntrack, unless asked to drop,
+		 * send it back into kernel */
+		rcu_read_unlock();
+		nf_conntrack_put(nfct);
+
+		if (remove_me)
+			ffn_find_and_remove(&k);
+
+		if (res == NF_DROP) {
+			dev_kfree_skb(skb);
+			return 0;
+		}
+
+		goto ffnable;
+	}
+
+	if (!e->manip.alter)
+		goto fix_ip_hdr;
+
+	if (skb->ip_summed != CHECKSUM_PARTIAL) {
+		/* fix ports & transport protocol checksum */
+		if (proto == IPPROTO_TCP) {
+			tcph->source = e->manip.new_sport;
+			tcph->dest = e->manip.new_dport;
+			tcph->check = csum16_sub(tcph->check,
+						 e->manip.l4_adjustment);
+		} else {
+			udph->source = e->manip.new_sport;
+			udph->dest = e->manip.new_dport;
+			if (udph->check) {
+				u16 tcheck;
+
+				tcheck = csum16_sub(udph->check,
+						    e->manip.l4_adjustment);
+				udph->check = tcheck ? tcheck : 0xffff;
+			}
+		}
+	} else {
+		unsigned int len;
+
+		/*
+		 * assume tcph->check only covers ip pseudo header, so
+		 * don't update checksum wrt port change
+		 *
+		 * we might check skb->csum_offset to confirm that
+		 * this is a valid assertion
+		 */
+		if (proto == IPPROTO_TCP) {
+			len = skb->len - ((void *)tcph - (void *)iph);
+			tcheck = ~csum_tcpudp_magic(e->manip.new_sip,
+						    e->manip.new_dip,
+						    len, IPPROTO_TCP, 0);
+			tcph->check = tcheck;
+			tcph->source = e->manip.new_sport;
+			tcph->dest = e->manip.new_dport;
+		} else {
+			len = skb->len - ((void *)udph - (void *)iph);
+			if (udph->check) {
+				tcheck = ~csum_tcpudp_magic(e->manip.new_sip,
+							    e->manip.new_dip,
+							    len,
+							    IPPROTO_UDP, 0);
+				udph->check = tcheck ? tcheck : 0xffff;
+			}
+			udph->source = e->manip.new_sport;
+			udph->dest = e->manip.new_dport;
+		}
+	}
+
+	/* update IP header field */
+	iph->saddr = e->manip.new_sip;
+	iph->daddr = e->manip.new_dip;
+
+fix_ip_hdr:
+	iph->ttl--;
+
+	if (e->manip.tos_change) {
+		iph->tos = e->manip.new_tos;
+		iph->check = 0;
+		iph->check = ip_fast_csum((u8 *)iph, 5);
+	} else {
+		iph->check = csum16_sub(iph->check,
+					e->manip.ip_adjustment);
+	}
+
+	/* forward skb */
+	if (e->manip.force_skb_prio)
+		skb->priority = e->manip.new_skb_prio;
+	else
+		skb->priority = rt_tos2priority(iph->tos);
+
+	skb->mark = e->manip.new_mark;
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	e->forwarded_packets++;
+	e->forwarded_bytes += skb->len;
+#endif
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst_clone(e->manip.dst));
+
+	added_when = e->added_when;
+
+	drop_ct = true;
+	if (nfct != skb_nfct(skb)) {
+		if (unlikely(skb_nfct(skb) != NULL)) {
+			/*
+			 * conntrack is not NULL here and it is not
+			 * the same as the one we have in the
+			 * ffn_entry, this shoud not happen, warn once
+			 * and switch to slow path.
+			 */
+			WARN_ONCE(1,
+				  "weird skb->nfct %p, NULL was expected\n",
+				  skb_nfct(skb));
+			printk_once(KERN_WARNING "ffn entry:\n"
+				    " added_when: %i\n"
+				    " sip: %pI4 -> %pI4\n"
+				    " dip: %pI4 -> %pI4\n"
+				    " sport: %u -> %u\n"
+				    " dport: %u -> %u\n",
+				    e->added_when,
+				    &e->sip, &e->manip.new_sip,
+				    &e->dip, &e->manip.new_dip,
+				    htons(e->sport), htons(e->manip.new_sport),
+				    htons(e->dport), htons(e->manip.new_dport));
+			rcu_read_unlock();
+
+			if (remove_me)
+				ffn_find_and_remove(&k);
+			goto not_ffnable;
+		}
+
+		nf_ct_set(skb, (struct nf_conn *)nfct, e->manip.ctinfo);
+		drop_ct = false;
+	}
+
+	rcu_read_unlock();
+	if (unlikely(remove_me))
+		ffn_find_and_remove(&k);
+
+	if (drop_ct) {
+		/*
+		 * skbs to/from localhost will have the conntrack
+		 * already set, don't leak references here.
+		 */
+		nf_conntrack_put(nfct);
+	}
+
+	skb->ffn_state = FFN_STATE_FAST_FORWARDED;
+	IPCB(skb)->flags |= IPSKB_FORWARDED;
+
+	if (added_when == IP_FFN_FINISH_OUT)
+		dst_output(&init_net, skb->sk, skb);
+	else
+		ip_local_deliver_finish(&init_net, skb->sk, skb);
+
+	return 0;
+
+ffnable:
+	skb->ffn_state = FFN_STATE_FORWARDABLE;
+	skb->ffn_orig_tos = iph->tos;
+	return 1;
+
+not_ffnable:
+	skb->ffn_state = FFN_STATE_INCOMPATIBLE;
+	return 1;
+}
+
+/*
+ * check if skb is candidate for ffn, and if so add it to ffn cache
+ *
+ * called after post routing
+ */
+void ip_ffn_add(struct sk_buff *skb, int when)
+{
+	struct nf_conn *ct;
+	struct nf_conntrack_tuple *tuple, *rtuple;
+	enum ip_conntrack_info ctinfo;
+	struct ffn_lookup_entry *e;
+	struct iphdr *iph;
+	struct net *skb_net;
+	int dir;
+
+	skb_net = dev_net(skb->dev);
+	if (!unlikely(net_eq(skb_net, &init_net)))
+		return;
+
+	/* make sure external_tcp_packet/external_udp_packet won't
+	 * attempt to checksum packet, the ffn code does not update
+	 * skb->csum, which must stay valid if skb_checksum_complete
+	 * has been called */
+	if (unlikely(skb_net->ct.sysctl_checksum))
+		skb_net->ct.sysctl_checksum = 0;
+
+	if (ffn_entry_count >= MAX_FFN_ENTRY)
+		return;
+
+	iph = ip_hdr(skb);
+
+	if (skb_dst(skb)->output != ip_output && when == IP_FFN_FINISH_OUT)
+		return;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_UNTRACKED)
+		return;
+
+	if ((ctinfo != IP_CT_ESTABLISHED) &&
+	    (ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)) {
+		return;
+	}
+
+	if (nfct_help(ct))
+		return;
+
+	dir = (ctinfo == IP_CT_ESTABLISHED) ?
+		IP_CT_DIR_ORIGINAL : IP_CT_DIR_REPLY;
+	tuple = &ct->tuplehash[dir].tuple;
+
+	if (tuple->dst.protonum != IPPROTO_TCP &&
+	    tuple->dst.protonum != IPPROTO_UDP)
+		return;
+
+	if (tuple->dst.protonum == IPPROTO_TCP &&
+	    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+		return;
+
+	rtuple = &ct->tuplehash[1 - dir].tuple;
+
+	e = kmalloc(sizeof (*e), GFP_ATOMIC);
+	if (!e)
+		return;
+
+	e->added_when = when;
+	e->sip = tuple->src.u3.ip;
+	e->dip = tuple->dst.u3.ip;
+	e->sport = tuple->src.u.all;
+	e->dport = tuple->dst.u.all;
+	e->protocol = tuple->dst.protonum;
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	e->forwarded_packets = 0;
+	e->forwarded_bytes = 0;
+#endif
+
+	e->manip.new_sip = rtuple->dst.u3.ip;
+	e->manip.new_dip = rtuple->src.u3.ip;
+	e->manip.new_sport = rtuple->dst.u.all;
+	e->manip.new_dport = rtuple->src.u.all;
+
+	if (e->manip.new_sip == e->sip &&
+	    e->manip.new_dip == e->dip &&
+	    e->manip.new_sport == e->sport &&
+	    e->manip.new_dport == e->dport)
+		e->manip.alter = 0;
+	else
+		e->manip.alter = 1;
+
+	if (e->manip.alter) {
+		/* compute checksum adjustement */
+		e->manip.l4_adjustment = checksum_adjust(e->sip,
+							 e->manip.new_sip,
+							 e->dip,
+							 e->manip.new_dip,
+							 e->sport,
+							 e->manip.new_sport,
+							 e->dport,
+							 e->manip.new_dport);
+	}
+
+	e->manip.ip_adjustment = checksum_adjust_ip(e->sip,
+						    e->manip.new_sip,
+						    e->dip,
+						    e->manip.new_dip);
+
+	if (skb->ffn_orig_tos != iph->tos) {
+		e->manip.tos_change = 1;
+		e->manip.new_tos = iph->tos;
+	} else
+		e->manip.tos_change = 0;
+
+	if (skb->priority != rt_tos2priority(iph->tos)) {
+		e->manip.force_skb_prio = 1;
+		e->manip.new_skb_prio = skb->priority;
+	} else
+		e->manip.force_skb_prio = 0;
+
+	e->manip.new_mark = skb->mark;
+	e->manip.priv_destructor = NULL;
+	e->manip.dst = skb_dst(skb);
+	dst_hold(e->manip.dst);
+	e->manip.ct = ct;
+	e->manip.ctinfo = ctinfo;
+
+	spin_lock_bh(&ffn_lock);
+	if (__ffn_add_entry(e)) {
+		spin_unlock_bh(&ffn_lock);
+		dst_release(e->manip.dst);
+		kfree(e);
+		return;
+	}
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * netfilter callback when conntrack is about to be destroyed
+ */
+void ip_ffn_ct_destroy(struct nf_conn *ct);
+void ip_ffn_ct_destroy(struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple *tuple;
+	int dir;
+
+	/* locate all entry that use this conntrack */
+	for (dir = 0; dir < 2; dir++) {
+		struct ffn_lookup_key k;
+
+		tuple = &ct->tuplehash[dir].tuple;
+
+		if (tuple->dst.protonum != IPPROTO_TCP &&
+		    tuple->dst.protonum != IPPROTO_UDP)
+			return;
+
+		k.sip = tuple->src.u3.ip;
+		k.dip = tuple->dst.u3.ip;
+		k.sport = tuple->src.u.all;
+		k.dport = tuple->dst.u.all;
+		k.is_tcp = (tuple->dst.protonum == IPPROTO_TCP);
+		ffn_find_and_remove(&k);
+	}
+}
+
+/*
+ * initialize ffn cache data
+ */
+static void __ip_ffn_init_cache(void)
+{
+	int i;
+
+	for (i = 0; i < FFN_CACHE_SIZE; i++)
+		INIT_LIST_HEAD(&ffn_cache[i]);
+	INIT_LIST_HEAD(&ffn_all);
+	ffn_entry_count = 0;
+}
+
+/*
+ * flush all ffn cache
+ */
+void ip_ffn_flush_all(void)
+{
+	struct ffn_lookup_entry *e, *tmp;
+
+	spin_lock_bh(&ffn_lock);
+	list_for_each_entry_safe(e, tmp, &ffn_all, all_next)
+		__ffn_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+#ifdef CONFIG_IP_FFN_PROCFS
+struct proc_dir_entry *proc_net_ip_ffn;
+
+static int ip_ffn_entries_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < FFN_CACHE_SIZE; ++i) {
+		struct ffn_lookup_entry *e;
+
+		if (list_empty(&ffn_cache[i]))
+			continue;
+
+		seq_printf(m, "Bucket %i:\n", i);
+		list_for_each_entry_rcu(e, &ffn_cache[i], next) {
+			seq_printf(m, " Protocol: ");
+			switch (e->protocol) {
+			case IPPROTO_TCP:
+				seq_printf(m, "TCPv4\n");
+				break;
+			case IPPROTO_UDP:
+				seq_printf(m, "UDPv4\n");
+				break;
+			default:
+				seq_printf(m, "ipproto_%i\n", e->protocol);
+				break;
+			}
+			seq_printf(m, " Original flow: %pI4:%u -> %pI4:%u\n",
+				   &e->sip,
+				   ntohs(e->sport),
+				   &e->dip,
+				   ntohs(e->dport));
+
+			if (e->sip != e->manip.new_sip ||
+			    e->dip != e->manip.new_dip ||
+			    e->sport != e->manip.new_sport ||
+			    e->dport != e->manip.new_dport) {
+				seq_printf(m,
+					   " Modified flow: %pI4:%u -> "
+					   "%pI4:%u\n",
+					   &e->manip.new_sip,
+					   ntohs(e->manip.new_sport),
+					   &e->manip.new_dip,
+					   ntohs(e->manip.new_dport));
+			}
+
+			seq_printf(m, "  Forwarded packets: %u\n",
+				   e->forwarded_packets);
+			seq_printf(m, "  Forwarded bytes: %llu\n",
+				   e->forwarded_bytes);
+			seq_printf(m, "\n");
+		}
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static int ip_ffn_entries_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ip_ffn_entries_show, NULL);
+}
+
+static const struct proc_ops ip_ffn_entries_fops = {
+	.proc_open	= ip_ffn_entries_open,
+	.proc_release	= single_release,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+};
+
+static int __init __ip_ffn_init_procfs(void)
+{
+	proc_net_ip_ffn = proc_net_mkdir(&init_net, "ip_ffn",
+					 init_net.proc_net);
+	if (!proc_net_ip_ffn) {
+		printk(KERN_ERR "proc_mkdir() has failed for 'net/ip_ffn'.\n");
+		return -1;
+	}
+
+	if (proc_create("entries", 0400, proc_net_ip_ffn,
+			&ip_ffn_entries_fops) == NULL) {
+		printk(KERN_ERR "proc_create() has failed for "
+		       "'net/ip_ffn/entries'.\n");
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+/*
+ * initialize ffn
+ */
+void __init ip_ffn_init(void)
+{
+	printk("IP Fast Forward and NAT enabled\n");
+	__ip_ffn_init_cache();
+
+#ifdef CONFIG_IP_FFN_PROCFS
+	if (__ip_ffn_init_procfs() < 0)
+		printk(KERN_WARNING "IP FFN: unable to create proc entries.\n");
+#endif
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/ipv6/ip6_ffn.c	2025-09-25 17:40:37.791378254 +0200
@@ -0,0 +1,706 @@
+/*
+ * IPv6 fast forwarding and NAT
+ *
+ * Very restrictive code, that only cope non fragmented UDP and TCP
+ * packets, that are routed and NATed with no other modification.
+ *
+ * Provide a fast path for established conntrack entries so that
+ * packets go out ASAP.
+ */
+
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/jhash.h>
+#include <linux/proc_fs.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+
+#include <net/ip6_ffn.h>
+#include <net/dsfield.h>
+
+#define FFN6_CACHE_SIZE		256
+#define MAX_FFN6_ENTRY		2048
+
+static DEFINE_SPINLOCK(ffn_lock);
+static struct list_head ffn6_cache[FFN6_CACHE_SIZE];
+static struct list_head ffn6_all;
+static unsigned int ffn6_entry_count;
+
+/*
+ * rcu release defered callback
+ */
+static void delayed_ffn_free_entry(struct rcu_head *rhp)
+{
+	struct ffn6_lookup_entry *e;
+
+	e = container_of(rhp, struct ffn6_lookup_entry, rcu);
+	if (e->manip.priv_destructor)
+		e->manip.priv_destructor((void *)e->manip.ffn_priv_area);
+
+	dst_release(e->manip.dst);
+	kfree(e);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static void __ffn6_remove_entry(struct ffn6_lookup_entry *e)
+{
+	list_del_rcu(&e->next);
+	list_del_rcu(&e->all_next);
+	call_rcu(&e->rcu, delayed_ffn_free_entry);
+	ffn6_entry_count--;
+}
+
+/*
+ * hash on five parameter
+ */
+static inline unsigned int ffn6_hash(const struct ffn6_lookup_key *k)
+{
+	return jhash_3words(k->sip[3], k->is_tcp ? k->dip[3] : ~k->dip[3],
+			    k->sport | k->dport << 16, 0);
+}
+
+/*
+ * attempt to find entry with given value in cache, under RCU lock
+ */
+struct ffn6_lookup_entry *
+__ffn6_get_rcu(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn6_hash(k);
+
+	list_for_each_entry_rcu(tmp,
+				&ffn6_cache[hash % FFN6_CACHE_SIZE], next) {
+		if (!memcmp(tmp->sip, k->sip, 16) &&
+		    !memcmp(tmp->dip, k->dip, 16) &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+EXPORT_SYMBOL(__ffn6_get_rcu);
+
+/*
+ * attempt to find entry with given value in cache, under ff lock
+ */
+static struct ffn6_lookup_entry *__ffn6_get(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *tmp;
+	unsigned int hash;
+	u8 protocol;
+
+	protocol = (k->is_tcp) ? IPPROTO_TCP : IPPROTO_UDP;
+	hash = ffn6_hash(k);
+
+	list_for_each_entry(tmp, &ffn6_cache[hash % FFN6_CACHE_SIZE], next) {
+		if (!memcmp(tmp->sip, k->sip, 16) &&
+		    !memcmp(tmp->dip, k->dip, 16) &&
+		    tmp->sport == k->sport && tmp->dport == k->dport &&
+		    tmp->protocol == protocol)
+			return tmp;
+	}
+	return NULL;
+}
+
+/*
+ *
+ */
+static void ffn6_find_and_remove(const struct ffn6_lookup_key *k)
+{
+	struct ffn6_lookup_entry *e;
+
+	spin_lock_bh(&ffn_lock);
+	e = __ffn6_get(k);
+	if (e)
+		__ffn6_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * must be called with ffn lock held
+ */
+static int __ffn6_add_entry(struct ffn6_lookup_entry *e)
+{
+	struct ffn6_lookup_key k = {
+		.sip = e->sip,
+		.dip = e->dip,
+		.sport = e->sport,
+		.dport = e->dport,
+		.is_tcp = (e->protocol == IPPROTO_TCP)
+	};
+
+	/* make sure it's not present */
+	if (__ffn6_get(&k))
+		return 1;
+
+	if (ffn6_entry_count >= MAX_FFN6_ENTRY)
+		return 1;
+
+	/* add new entry */
+	list_add_tail_rcu(&e->next, &ffn6_cache[ffn6_hash(&k) % FFN6_CACHE_SIZE]);
+	list_add_tail_rcu(&e->all_next, &ffn6_all);
+	ffn6_entry_count++;
+	return 0;
+}
+
+/*
+ * two hooks into netfilter code
+ */
+extern int external_tcpv6_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+extern int external_udpv6_packet(struct nf_conn *ct,
+				 struct sk_buff *skb,
+				 unsigned int dataoff,
+				 enum ip_conntrack_info ctinfo);
+
+/*
+ * check if packet is in ffn cache, or mark it if it can be added
+ * later
+ */
+int ipv6_ffn_process(struct sk_buff *skb)
+{
+	struct ffn6_lookup_entry *e;
+	struct nf_conntrack *nfct;
+	struct ipv6hdr *iph;
+	struct tcphdr *tcph = NULL;
+	struct udphdr *udph = NULL;
+	struct ffn6_lookup_key k;
+	bool remove_me, drop_ct;
+	int added_when;
+	u8 proto;
+	int res;
+
+	if (!net_eq(dev_net(skb->dev), &init_net))
+		goto not_ffnable;
+
+	iph = ipv6_hdr(skb);
+
+	/* check encapsulated protocol is udp or tcp */
+	proto = iph->nexthdr;
+	if (proto != IPPROTO_TCP && proto != IPPROTO_UDP)
+		goto not_ffnable;
+
+	if (iph->hop_limit <= 1 || !iph->payload_len)
+		goto not_ffnable;
+
+	/* TODO: implement this later, no hardware to test for now */
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		goto not_ffnable;
+
+	proto = iph->nexthdr;
+	if (proto == IPPROTO_TCP) {
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		tcph = (struct tcphdr *)((unsigned char *)iph + sizeof (*iph));
+
+		if (tcph->doff * 4 < sizeof (struct tcphdr))
+			goto not_ffnable;
+
+		if (skb_headlen(skb) < sizeof (*iph) + tcph->doff * 4)
+			goto not_ffnable;
+
+		k.sport = tcph->source;
+		k.dport = tcph->dest;
+		k.is_tcp = true;
+	} else {
+
+		if (skb_headlen(skb) < sizeof (*iph) + sizeof (struct udphdr))
+			goto not_ffnable;
+
+		udph = (struct udphdr *)((unsigned char *)iph + sizeof (*iph));
+		k.sport = udph->source;
+		k.dport = udph->dest;
+		k.is_tcp = false;
+	}
+
+	rcu_read_lock();
+
+	k.sip = iph->saddr.s6_addr32;
+	k.dip = iph->daddr.s6_addr32;
+
+	e = __ffn6_get_rcu(&k);
+	if (!e) {
+		rcu_read_unlock();
+		goto ffnable;
+	}
+
+	if (e->manip.dst->obsolete > 0) {
+		rcu_read_unlock();
+		ffn6_find_and_remove(&k);
+		goto ffnable;
+	}
+
+	nfct = &e->manip.ct->ct_general;
+	nf_conntrack_get(nfct);
+
+	remove_me = false;
+	if (proto == IPPROTO_TCP) {
+		/* do sequence number checking and update
+		 * conntrack info */
+		res = external_tcpv6_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+		if (e->manip.ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+			remove_me = true;
+	} else {
+		res = external_udpv6_packet(e->manip.ct, skb, sizeof (*iph),
+					    e->manip.ctinfo);
+	}
+
+	if (unlikely(res != NF_ACCEPT)) {
+		/* packet rejected by conntrack, unless asked to drop,
+		 * send it back into kernel */
+		rcu_read_unlock();
+		nf_conntrack_put(nfct);
+
+		if (remove_me)
+			ffn6_find_and_remove(&k);
+
+		if (res == NF_DROP) {
+			dev_kfree_skb(skb);
+			return 0;
+		}
+
+		goto ffnable;
+	}
+
+	if (!e->manip.alter)
+		goto fix_ip_hdr;
+
+	/* fix ports & transport protocol checksum */
+	if (proto == IPPROTO_TCP) {
+		tcph->source = e->manip.new_sport;
+		tcph->dest = e->manip.new_dport;
+		tcph->check = csum16_sub(tcph->check, e->manip.adjustment);
+	} else {
+		udph->source = e->manip.new_sport;
+		udph->dest = e->manip.new_dport;
+		if (udph->check) {
+			u16 tcheck;
+
+			tcheck = csum16_sub(udph->check, e->manip.adjustment);
+			udph->check = tcheck ? tcheck : 0xffff;
+		}
+	}
+
+	memcpy(iph->saddr.s6_addr32, e->manip.new_sip, 16);
+	memcpy(iph->daddr.s6_addr32, e->manip.new_dip, 16);
+
+fix_ip_hdr:
+	/* update IP header field */
+	iph->hop_limit--;
+	if (e->manip.tos_change)
+		ipv6_change_dsfield(iph, 0, e->manip.new_tos);
+
+	if (e->manip.force_skb_prio)
+		skb->priority = e->manip.new_skb_prio;
+	else
+		skb->priority = rt_tos2priority(ipv6_get_dsfield(iph));
+
+	skb->mark = e->manip.new_mark;
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	e->forwarded_packets++;
+	e->forwarded_bytes += skb->len;
+#endif
+
+	skb_dst_drop(skb);
+	skb_dst_set(skb, dst_clone(e->manip.dst));
+
+	added_when = e->added_when;
+
+	drop_ct = true;
+	if (nfct != skb_nfct(skb)) {
+		if (unlikely(skb_nfct(skb) != NULL)) {
+			/*
+			 * conntrack is not NULL here and it is not
+			 * the same as the one we have in the
+			 * ffn_entry, this shoud not happen, warn once
+			 * and switch to slow path.
+			 */
+			WARN_ONCE(1,
+				  "weird skb->nfct %p, NULL was expected\n",
+				  skb_nfct(skb));
+			printk_once(KERN_WARNING "ffn entry:\n"
+				    " added_when: %i\n"
+				    " sip: %pI6 -> %pI6\n"
+				    " dip: %pI6 -> %pI6\n"
+				    " sport: %u -> %u\n"
+				    " dport: %u -> %u\n",
+				    e->added_when,
+				    e->sip, e->manip.new_sip,
+				    e->dip, e->manip.new_dip,
+				    htons(e->sport), htons(e->manip.new_sport),
+				    htons(e->dport), htons(e->manip.new_dport));
+			rcu_read_unlock();
+
+			if (remove_me)
+				ffn6_find_and_remove(&k);
+
+			goto not_ffnable;
+		}
+		nf_ct_set(skb, (struct nf_conn *)nfct, e->manip.ctinfo);
+		drop_ct = false;
+	}
+
+	rcu_read_unlock();
+	if (unlikely(remove_me))
+		ffn6_find_and_remove(&k);
+
+	if (drop_ct) {
+		/*
+		 * skbs to/from localhost will have the conntrack
+		 * already set, don't leak references here.
+		 */
+		nf_conntrack_put(nfct);
+	}
+
+	skb->ffn_state = FFN_STATE_FAST_FORWARDED;
+
+	if (added_when == IPV6_FFN_FINISH_OUT)
+		dst_output(&init_net, skb->sk, skb);
+	else
+		ip6_input_finish(&init_net, skb->sk, skb);
+
+	return 0;
+
+ffnable:
+	skb->ffn_state = FFN_STATE_FORWARDABLE;
+	skb->ffn_orig_tos = ipv6_get_dsfield(iph);
+	return 1;
+
+not_ffnable:
+	skb->ffn_state = FFN_STATE_INCOMPATIBLE;
+	return 1;
+}
+
+/*
+ *
+ */
+static inline __sum16 checksum_adjust(const u32 *osip,
+				      const u32 *nsip,
+				      const u32 *odip,
+				      const u32 *ndip,
+				      u16 osport,
+				      u16 nsport,
+				      u16 odport,
+				      u16 ndport)
+{
+	const u32 oports[] = { osport, odport };
+	const u32 nports[] = { nsport, ndport };
+	__wsum osum, nsum;
+
+	osum = csum_partial(osip, 16, 0);
+	osum = csum_partial(odip, 16, osum);
+	osum = csum_partial(oports, 8, osum);
+
+	nsum = csum_partial(nsip, 16, 0);
+	nsum = csum_partial(ndip, 16, nsum);
+	nsum = csum_partial(nports, 8, nsum);
+
+	return ~csum_fold(csum_sub(nsum, osum));
+}
+
+/*
+ * check if skb is candidate for ffn, and if so add it to ffn cache
+ *
+ * called after post routing
+ */
+void ipv6_ffn_add(struct sk_buff *skb, int when)
+{
+	struct nf_conn *ct;
+	struct nf_conntrack_tuple *tuple, *rtuple;
+	enum ip_conntrack_info ctinfo;
+	struct ffn6_lookup_entry *e;
+	struct ipv6hdr *iph;
+	int dir;
+	struct net *skb_net;
+	u8 tos;
+
+	skb_net = dev_net(skb->dev);
+	if (!unlikely(net_eq(skb_net, &init_net)))
+		return;
+
+	/* make sure external_tcp_packet/external_udp_packet won't
+	 * attempt to checksum packet, the ffn code does not update
+	 * skb->csum, which must stay valid if skb_checksum_complete
+	 * has been called */
+	if (unlikely(skb_net->ct.sysctl_checksum))
+		skb_net->ct.sysctl_checksum = 0;
+
+	if (ffn6_entry_count >= MAX_FFN6_ENTRY)
+		return;
+
+	iph = ipv6_hdr(skb);
+
+	if ((when == IPV6_FFN_FINISH_OUT &&
+	     skb_dst(skb)->output != ip6_output))
+		return;
+
+	ct = nf_ct_get(skb, &ctinfo);
+	if (!ct || ctinfo == IP_CT_UNTRACKED)
+		return;
+
+	if ((ctinfo != IP_CT_ESTABLISHED) &&
+	    (ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY)) {
+		return;
+	}
+
+	if (nfct_help(ct))
+		return;
+
+	dir = (ctinfo == IP_CT_ESTABLISHED) ?
+		IP_CT_DIR_ORIGINAL : IP_CT_DIR_REPLY;
+	tuple = &ct->tuplehash[dir].tuple;
+
+	if (tuple->dst.protonum != IPPROTO_TCP &&
+	    tuple->dst.protonum != IPPROTO_UDP)
+		return;
+
+	if (tuple->dst.protonum == IPPROTO_TCP &&
+	    ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
+		return;
+
+	rtuple = &ct->tuplehash[1 - dir].tuple;
+
+	e = kmalloc(sizeof (*e), GFP_ATOMIC);
+	if (!e)
+		return;
+
+	e->added_when = when;
+	memcpy(e->sip, tuple->src.u3.ip6, 16);
+	memcpy(e->dip, tuple->dst.u3.ip6, 16);
+	e->sport = tuple->src.u.all;
+	e->dport = tuple->dst.u.all;
+	e->protocol = tuple->dst.protonum;
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	e->forwarded_packets = 0;
+	e->forwarded_bytes = 0;
+#endif
+
+	memcpy(e->manip.new_sip, rtuple->dst.u3.ip6, 16);
+	memcpy(e->manip.new_dip, rtuple->src.u3.ip6, 16);
+	e->manip.new_sport = rtuple->dst.u.all;
+	e->manip.new_dport = rtuple->src.u.all;
+
+	if (!memcmp(e->manip.new_sip, e->sip, 16) &&
+	    !memcmp(e->manip.new_dip, e->dip, 16) &&
+	    e->manip.new_sport == e->sport &&
+	    e->manip.new_dport == e->dport)
+		e->manip.alter = 0;
+	else
+		e->manip.alter = 1;
+
+	if (e->manip.alter) {
+		/* compute checksum adjustement */
+		e->manip.adjustment = checksum_adjust(e->sip,
+						      e->manip.new_sip,
+						      e->dip,
+						      e->manip.new_dip,
+						      e->sport,
+						      e->manip.new_sport,
+						      e->dport,
+						      e->manip.new_dport);
+	}
+
+	tos = ipv6_get_dsfield(iph);
+	if (skb->ffn_orig_tos != tos) {
+		e->manip.tos_change = 1;
+		e->manip.new_tos = tos;
+	} else
+		e->manip.tos_change = 0;
+
+	if (skb->priority != rt_tos2priority(tos)) {
+		e->manip.force_skb_prio = 1;
+		e->manip.new_skb_prio = skb->priority;
+	} else
+		e->manip.force_skb_prio = 0;
+
+	e->manip.new_mark = skb->mark;
+	e->manip.dst = skb_dst(skb);
+	e->manip.priv_destructor = NULL;
+	dst_hold(e->manip.dst);
+	e->manip.ct = ct;
+	e->manip.ctinfo = ctinfo;
+
+	spin_lock_bh(&ffn_lock);
+	if (__ffn6_add_entry(e)) {
+		spin_unlock_bh(&ffn_lock);
+		dst_release(e->manip.dst);
+		kfree(e);
+		return;
+	}
+	spin_unlock_bh(&ffn_lock);
+}
+
+/*
+ * netfilter callback when conntrack is about to be destroyed
+ */
+void ipv6_ffn_ct_destroy(struct nf_conn *ct);
+void ipv6_ffn_ct_destroy(struct nf_conn *ct)
+{
+	struct nf_conntrack_tuple *tuple;
+	int dir;
+
+	/* locate all entry that use this conntrack */
+	for (dir = 0; dir < 2; dir++) {
+		struct ffn6_lookup_key k;
+
+		tuple = &ct->tuplehash[dir].tuple;
+
+		if (tuple->dst.protonum != IPPROTO_TCP &&
+		    tuple->dst.protonum != IPPROTO_UDP)
+			return;
+
+		k.sip = tuple->src.u3.ip6;
+		k.dip = tuple->dst.u3.ip6;
+		k.sport = tuple->src.u.all;
+		k.dport = tuple->dst.u.all;
+		k.is_tcp = (tuple->dst.protonum == IPPROTO_TCP);
+		ffn6_find_and_remove(&k);
+	}
+}
+
+/*
+ * initialize ffn cache data
+ */
+static void __ipv6_ffn_init_cache(void)
+{
+	int i;
+
+	for (i = 0; i < FFN6_CACHE_SIZE; i++)
+		INIT_LIST_HEAD(&ffn6_cache[i]);
+	INIT_LIST_HEAD(&ffn6_all);
+	ffn6_entry_count = 0;
+}
+
+/*
+ * flush all ffn cache
+ */
+void ipv6_ffn_flush_all(void)
+{
+	struct ffn6_lookup_entry *e, *tmp;
+
+	spin_lock_bh(&ffn_lock);
+	list_for_each_entry_safe(e, tmp, &ffn6_all, all_next)
+		__ffn6_remove_entry(e);
+	spin_unlock_bh(&ffn_lock);
+}
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+struct proc_dir_entry *proc_net_ipv6_ffn;
+
+static int ipv6_ffn_entries_show(struct seq_file *m, void *v)
+{
+	int i;
+
+	rcu_read_lock();
+
+	for (i = 0; i < FFN6_CACHE_SIZE; ++i) {
+		struct ffn6_lookup_entry *e;
+
+		if (list_empty(&ffn6_cache[i]))
+			continue;
+
+		seq_printf(m, "Bucket %i:\n", i);
+		list_for_each_entry_rcu(e, &ffn6_cache[i], next) {
+			seq_printf(m, " Protocol: ");
+			switch (e->protocol) {
+			case IPPROTO_TCP:
+				seq_printf(m, "TCPv6\n");
+				break;
+			case IPPROTO_UDP:
+				seq_printf(m, "UDPv6\n");
+				break;
+			default:
+				seq_printf(m, "ipproto_%i\n", e->protocol);
+				break;
+			}
+
+			seq_printf(m, " Original flow: %pI6:%u -> %pI6:%u\n",
+				   e->sip,
+				   ntohs(e->sport),
+				   e->dip,
+				   ntohs(e->dport));
+
+			if (memcmp(e->sip, e->manip.new_sip, 16) ||
+			    memcmp(e->dip, e->manip.new_dip, 16) ||
+			    e->sport != e->manip.new_sport ||
+			    e->dport != e->manip.new_dport) {
+				seq_printf(m,
+					   " Modified flow: %pI6:%u -> "
+					   "%pI6:%u\n",
+					   e->manip.new_sip,
+					   ntohs(e->manip.new_sport),
+					   e->manip.new_dip,
+					   ntohs(e->manip.new_dport));
+			}
+
+			seq_printf(m, "  Forwarded packets: %u\n",
+				   e->forwarded_packets);
+			seq_printf(m, "  Forwarded bytes: %llu\n",
+				   e->forwarded_bytes);
+			seq_printf(m, "\n");
+		}
+	}
+
+	rcu_read_unlock();
+	return 0;
+}
+
+static int ipv6_ffn_entries_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, ipv6_ffn_entries_show, NULL);
+}
+
+static const struct proc_ops ipv6_ffn_entries_fops = {
+	.proc_open	= ipv6_ffn_entries_open,
+	.proc_release	= single_release,
+	.proc_read	= seq_read,
+	.proc_lseek	= seq_lseek,
+};
+
+static int __init __ipv6_ffn_init_procfs(void)
+{
+	proc_net_ipv6_ffn = proc_net_mkdir(&init_net, "ipv6_ffn",
+					 init_net.proc_net);
+	if (!proc_net_ipv6_ffn) {
+		printk(KERN_ERR "proc_mkdir() has failed "
+		       "for 'net/ipv6_ffn'.\n");
+		return -1;
+	}
+
+	if (proc_create("entries", 0400, proc_net_ipv6_ffn,
+			&ipv6_ffn_entries_fops) == NULL) {
+		printk(KERN_ERR "proc_create() has failed for "
+		       "'net/ipv6_ffn/entries'.\n");
+		return -1;
+	}
+	return 0;
+}
+#endif
+
+/*
+ * initialize ffn
+ */
+void __init ipv6_ffn_init(void)
+{
+	printk("IPv6 Fast Forward and NAT enabled\n");
+	__ipv6_ffn_init_cache();
+
+#ifdef CONFIG_IPV6_FFN_PROCFS
+	if (__ipv6_ffn_init_procfs() < 0)
+		printk(KERN_WARNING "IPv6 FFN: unable to create proc entries.\n");
+#endif
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/mac80211/fbx_scum.h	2025-09-25 17:40:37.815378373 +0200
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2023 Freebox
+ */
+#ifndef FBX_SCUM_H
+#define FBX_SCUM_H
+
+#ifdef CONFIG_FBX80211_SCUM
+static inline bool fbx80211_skip_mon(struct ieee80211_sub_if_data *sdata)
+{
+	return sdata->u.mntr.scum.skip_mon;
+}
+#else
+static inline bool fbx80211_skip_mon(struct ieee80211_sub_if_data *sdata)
+{
+	return false;
+}
+#endif
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/mac80211/fbx_scum_monif.h	2025-09-29 14:23:07.621732489 +0200
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright(c) 2024 Freebox
+ */
+
+#ifndef FBX_SCUM_MONIF_H
+#define FBX_SCUM_MONIF_H
+
+#ifdef CONFIG_FBX80211_SCUM
+void fbx80211_scum_mon_rx(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_rate *rate, unsigned int rtap_space);
+struct fbx80211_scum_ops *fbx80211_scum_monif_create(struct ieee80211_hw *hw,
+						     struct ieee80211_vif *vif);
+#else
+static inline
+void fbx80211_scum_mon_rx(struct ieee80211_local *local, struct sk_buff *skb,
+			  struct ieee80211_rate *rate, unsigned int rtap_space)
+{
+}
+
+static inline
+struct fbx80211_scum_ops *fbx80211_scum_monif_create(struct ieee80211_hw *hw,
+						     struct ieee80211_vif *vif)
+{
+	return NULL;
+}
+#endif
+
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/wireless/nlfbx.h	2025-09-25 17:40:37.959379088 +0200
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Freebox
+ */
+
+#ifdef CONFIG_FBX80211
+int nlfbx_init(void);
+void nlfbx_exit(void);
+#else
+static int nlfbx_init(void) {
+	return 0;
+}
+
+static inline void nlfbx_exit(void)
+{
+}
+#endif
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/net/wireless/sysfs-radio.c	2025-09-25 17:40:37.963379107 +0200
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This file provides /sys/class/ieee80211/<wiphy name>/
+ * and some default attributes.
+ *
+ * Copyright 2005-2006	Jiri Benc <jbenc@suse.cz>
+ * Copyright 2006	Johannes Berg <johannes@sipsolutions.net>
+ * Copyright (C) 2020-2021, 2023-2024 Intel Corporation
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/nl80211.h>
+#include <linux/rtnetlink.h>
+#include <net/cfg80211.h>
+#include "sysfs.h"
+#include "core.h"
+#include "rdev-ops.h"
+
+static inline struct wiphy_radio_dev *dev_to_radio(struct device *dev)
+{
+	return (struct wiphy_radio_dev *)dev->platform_data;
+}
+
+static void wiphy_radio_dev_release(struct device *dev)
+{
+	struct wiphy_radio_dev *radio = dev_to_radio(dev);
+	put_device(&radio->wiphy->dev);
+	kfree(radio);
+}
+
+#define SHOW_FMT(name, fmt, member)					\
+static ssize_t name ## _show(struct device *dev,			\
+			      struct device_attribute *attr,		\
+			      char *buf)				\
+{									\
+	return sprintf(buf, fmt "\n", dev_to_radio(dev)->member);	\
+}									\
+static DEVICE_ATTR_RO(name)
+
+SHOW_FMT(index, "%d", idx);
+
+static ssize_t phy_index_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct wiphy_radio_dev *radio = dev_to_radio(dev);
+	return sprintf(buf, "%d\n", wiphy_to_rdev(radio->wiphy)->wiphy_idx);
+}
+static DEVICE_ATTR_RO(phy_index);
+
+static ssize_t radio_count_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	struct wiphy_radio_dev *radio = dev_to_radio(dev);
+	return sprintf(buf, "%d\n", radio->wiphy->n_radio_dev);
+}
+static DEVICE_ATTR_RO(radio_count);
+
+static ssize_t name_show(struct device *dev,
+			 struct device_attribute *attr,
+			 char *buf)
+{
+	struct wiphy_radio_dev *radio = dev_to_radio(dev);
+	return sprintf(buf, "%s_radio%d\n",
+		       wiphy_name(radio->wiphy), radio->idx);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t addresses_show(struct device *dev,
+			      struct device_attribute *attr,
+			      char *buf)
+{
+	struct wiphy_radio_dev *radio = dev_to_radio(dev);
+	struct wiphy *wiphy = radio->wiphy;
+	const u8 *mac;
+
+	if (!wiphy->n_radio) {
+		char *start = buf;
+		int i;
+
+		if (!wiphy->addresses)
+			return sprintf(buf, "%pM\n", wiphy->perm_addr);
+
+		for (i = 0; i < wiphy->n_addresses; i++)
+			buf += sprintf(buf, "%pM\n", wiphy->addresses[i].addr);
+
+		return buf - start;
+	}
+
+	if (wiphy->n_addresses > radio->idx)
+		mac = wiphy->addresses[radio->idx].addr;
+	else {
+		if (radio->idx > 0)
+			return 0;
+		mac = wiphy->perm_addr;
+	}
+
+	return sprintf(buf, "%pM\n", mac);
+}
+static DEVICE_ATTR_RO(addresses);
+
+static struct attribute *ieee80211_radio_attrs[] = {
+	&dev_attr_index.attr,
+	&dev_attr_addresses.attr,
+	&dev_attr_phy_index.attr,
+	&dev_attr_radio_count.attr,
+	&dev_attr_name.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(ieee80211_radio);
+
+struct class ieee80211_radio_class = {
+	.name = "ieee80211_radio",
+	.dev_release = wiphy_radio_dev_release,
+	.dev_groups = ieee80211_radio_groups,
+};
+
+int wiphy_sysfs_radio_init(void)
+{
+	return class_register(&ieee80211_radio_class);
+}
+
+void wiphy_sysfs_radio_exit(void)
+{
+	class_unregister(&ieee80211_radio_class);
+}
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtbs.sh	2025-09-25 17:40:37.999379286 +0200
@@ -0,0 +1,23 @@
+#!/bin/sh
+##  dtbs.sh for kernel
+##  Created by <nschichan@freebox.fr> on Thu Jul 26 13:28:58 2018
+##
+
+dtb_align=32
+
+out=$1
+shift
+
+echo $*
+for i in $*; do
+    sz=$(stat -c %s $i)
+    mod=$((sz % $dtb_align))
+    padd=$((mod > 0 ? $dtb_align - $mod : 0))
+
+    # echo $(basename $i): mod $mod padd $padd
+    cat $i
+
+    if [ "$padd" != 0 ]; then
+	dd if=/dev/zero bs=$padd count=1 2>/dev/null
+    fi
+done > $out
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/scripts/dtc/include-prefixes/arm64/amlogic/meson-g12a-fbx8am-brcm.dtso	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+&uart_A {
+	bluetooth {
+		compatible = "brcm,bcm43438-bt";
+		shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+		max-speed = <2000000>;
+		clocks = <&wifi32k>;
+		clock-names = "lpo";
+		vbat-supply = <&vddao_3v3>;
+		vddio-supply = <&vddio_ao1v8>;
+	};
+};
+
+&sd_emmc_a {
+	/* Per mmc-controller.yaml */
+	#address-cells = <1>;
+	#size-cells = <0>;
+	/* NB: may be either AP6398S or AP6398SR3 wifi module */
+	brcmf: wifi@1 {
+		reg = <1>;
+		compatible = "brcm,bcm4329-fmac";
+	};
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/scripts/dtc/include-prefixes/arm64/amlogic/meson-g12a-fbx8am-realtek.dtso	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/dts-v1/;
+/plugin/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+
+&uart_A {
+	bluetooth {
+		compatible = "realtek,rtl8822cs-bt";
+		enable-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
+		host-wake-gpios = <&gpio GPIOX_19 GPIO_ACTIVE_HIGH>;
+		device-wake-gpios = <&gpio GPIOX_18 GPIO_ACTIVE_HIGH>;
+	};
+};
+
+&sd_emmc_a {
+	/* No explicit compatible for rtl8822cs sdio */
+};
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12/scripts/dtc/include-prefixes/arm64/amlogic/meson-g12a-fbx8am.dts	2025-04-20 10:18:30.000000000 +0200
@@ -0,0 +1,466 @@
+// SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+// Copyright (c) 2024 Freebox SAS
+
+/*
+ * SEI codename: SEI530FB (based on SEI510)
+ * Freebox codename: fbx8am
+ * Commercial names: Freebox Pop, Player TV Free 4K
+ */
+
+/dts-v1/;
+
+#include "meson-g12a.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+#include <dt-bindings/gpio/meson-g12a-gpio.h>
+#include <dt-bindings/sound/meson-g12a-tohdmitx.h>
+
+/ {
+	compatible = "freebox,fbx8am", "amlogic,g12a";
+	model = "Freebox Player Pop";
+	chassis-type = "embedded";
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+		};
+	};
+
+	gpio-keys-polled {
+		compatible = "gpio-keys-polled";
+		poll-interval = <100>;
+
+		/* Physical user-accessible reset button near USB port */
+		power-button {
+			label = "Reset";
+			linux,code = <BTN_MISC>;
+			gpios = <&gpio_ao GPIOAO_3 GPIO_ACTIVE_HIGH>;
+		};
+	};
+
+	spdif_dit: audio-codec-2 {
+		#sound-dai-cells = <0>;
+		compatible = "linux,spdif-dit";
+		status = "okay";
+		sound-name-prefix = "DIT";
+	};
+
+	aliases {
+		serial0 = &uart_AO;
+		ethernet0 = &ethmac;
+	};
+
+	chosen {
+		stdout-path = "serial0:115200n8";
+	};
+
+	emmc_pwrseq: emmc-pwrseq {
+		compatible = "mmc-pwrseq-emmc";
+		reset-gpios = <&gpio BOOT_12 GPIO_ACTIVE_LOW>;
+	};
+
+	hdmi-connector {
+		compatible = "hdmi-connector";
+		type = "a";
+
+		port {
+			hdmi_connector_in: endpoint {
+				remote-endpoint = <&hdmi_tx_tmds_out>;
+			};
+		};
+	};
+
+	memory@0 {
+		device_type = "memory";
+		reg = <0x0 0x0 0x0 0x80000000>;
+	};
+
+	ao_5v: regulator-ao-5v {
+		compatible = "regulator-fixed";
+		regulator-name = "AO_5V";
+		regulator-min-microvolt = <5000000>;
+		regulator-max-microvolt = <5000000>;
+		vin-supply = <&dc_in>;
+		regulator-always-on;
+	};
+
+	dc_in: regulator-dc-in {
+		compatible = "regulator-fixed";
+		regulator-name = "DC_IN";
+		regulator-min-microvolt = <12000000>;
+		regulator-max-microvolt = <12000000>;
+		regulator-always-on;
+	};
+
+	emmc_1v8: regulator-emmc-1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "EMMC_1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vddao_3v3>;
+		regulator-always-on;
+	};
+
+	vddao_3v3: regulator-vddao-3v3 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&ao_5v>;
+		regulator-always-on;
+	};
+
+	vddao_3v3_t: regulator-vddao-3v3-t {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDAO_3V3_T";
+		regulator-min-microvolt = <3300000>;
+		regulator-max-microvolt = <3300000>;
+		vin-supply = <&vddao_3v3>;
+		gpio = <&gpio GPIOH_8 GPIO_OPEN_DRAIN>;
+		enable-active-high;
+	};
+
+	vddcpu: regulator-vddcpu {
+		/*
+		 * SY8120B1ABC DC/DC Regulator.
+		 */
+		compatible = "pwm-regulator";
+
+		regulator-name = "VDDCPU";
+		regulator-min-microvolt = <721000>;
+		regulator-max-microvolt = <1022000>;
+
+		pwm-supply = <&ao_5v>;
+
+		pwms = <&pwm_AO_cd 1 1250 0>;
+		pwm-dutycycle-range = <100 0>;
+
+		regulator-boot-on;
+		regulator-always-on;
+	};
+
+	vddio_ao1v8: regulator-vddio-ao1v8 {
+		compatible = "regulator-fixed";
+		regulator-name = "VDDIO_AO1V8";
+		regulator-min-microvolt = <1800000>;
+		regulator-max-microvolt = <1800000>;
+		vin-supply = <&vddao_3v3>;
+		regulator-always-on;
+	};
+
+	sdio_pwrseq: sdio-pwrseq {
+		compatible = "mmc-pwrseq-simple";
+		reset-gpios = <&gpio GPIOX_6 GPIO_ACTIVE_LOW>;
+		post-power-on-delay-ms = <10>; /* required for 43752 */
+		clocks = <&wifi32k>;
+		clock-names = "ext_clock";
+	};
+
+	wifi32k: wifi32k {
+		compatible = "pwm-clock";
+		#clock-cells = <0>;
+		clock-frequency = <32768>;
+		pwms = <&pwm_ef 0 30518 0>; /* PWM_E at 32.768KHz */
+	};
+
+	sound {
+		compatible = "amlogic,axg-sound-card";
+		model = "fbx8am";
+		audio-aux-devs = <&tdmout_b>;
+		audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1",
+				"TDMOUT_B IN 1", "FRDDR_B OUT 1",
+				"TDMOUT_B IN 2", "FRDDR_C OUT 1",
+				"TDM_B Playback", "TDMOUT_B OUT",
+				"SPDIFOUT_A IN 0", "FRDDR_A OUT 3",
+				"SPDIFOUT_A IN 1", "FRDDR_B OUT 3",
+				"SPDIFOUT_A IN 2", "FRDDR_C OUT 3";
+
+		clocks = <&clkc CLKID_MPLL2>,
+			 <&clkc CLKID_MPLL0>,
+			 <&clkc CLKID_MPLL1>;
+
+		assigned-clocks = <&clkc CLKID_MPLL2>,
+				  <&clkc CLKID_MPLL0>,
+				  <&clkc CLKID_MPLL1>;
+		assigned-clock-parents = <0>, <0>, <0>;
+		assigned-clock-rates = <294912000>,
+				       <270950400>,
+				       <393216000>;
+
+		dai-link-0 {
+			sound-dai = <&frddr_a>;
+		};
+
+		dai-link-1 {
+			sound-dai = <&frddr_b>;
+		};
+
+		dai-link-2 {
+			sound-dai = <&frddr_c>;
+		};
+
+		/* 8ch hdmi interface */
+		dai-link-3 {
+			sound-dai = <&tdmif_b>;
+			dai-format = "i2s";
+			dai-tdm-slot-tx-mask-0 = <1 1>;
+			dai-tdm-slot-tx-mask-1 = <1 1>;
+			dai-tdm-slot-tx-mask-2 = <1 1>;
+			dai-tdm-slot-tx-mask-3 = <1 1>;
+			mclk-fs = <256>;
+
+			codec {
+				sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>;
+			};
+		};
+
+		/* spdif hdmi or toslink interface */
+		dai-link-4 {
+			sound-dai = <&spdifout_a>;
+
+			codec-0 {
+			sound-dai = <&spdif_dit>;
+			};
+
+			codec-1 {
+				sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_A>;
+			};
+		};
+
+		/* spdif hdmi interface */
+		dai-link-5 {
+			sound-dai = <&spdifout_b>;
+
+			codec {
+				sound-dai = <&tohdmitx TOHDMITX_SPDIF_IN_B>;
+			};
+		};
+
+		/* hdmi glue */
+		dai-link-6 {
+			sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>;
+
+			codec {
+				sound-dai = <&hdmi_tx>;
+			};
+		};
+	};
+};
+
+&arb {
+	status = "okay";
+};
+
+&cecb_AO {
+	pinctrl-0 = <&cec_ao_b_h_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+	hdmi-phandle = <&hdmi_tx>;
+};
+
+&clkc_audio {
+	status = "okay";
+};
+
+&cpu0 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu1 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu2 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&cpu3 {
+	cpu-supply = <&vddcpu>;
+	operating-points-v2 = <&cpu_opp_table>;
+	clocks = <&clkc CLKID_CPU_CLK>;
+	clock-latency = <50000>;
+};
+
+&ethmac {
+	status = "okay";
+	phy-handle = <&internal_ephy>;
+	phy-mode = "rmii";
+};
+
+&frddr_a {
+	status = "okay";
+};
+
+&frddr_b {
+	status = "okay";
+};
+
+&frddr_c {
+	status = "okay";
+};
+
+&spdifout_a {
+	pinctrl-0 = <&spdif_out_h_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&spdifout_b {
+	status = "okay";
+};
+
+&hdmi_tx {
+	status = "okay";
+	pinctrl-0 = <&hdmitx_hpd_pins>, <&hdmitx_ddc_pins>;
+	pinctrl-names = "default";
+};
+
+&hdmi_tx_tmds_port {
+	hdmi_tx_tmds_out: endpoint {
+		remote-endpoint = <&hdmi_connector_in>;
+	};
+};
+
+&i2c3 {
+	status = "okay";
+	pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>;
+	pinctrl-names = "default";
+};
+
+&ir {
+	status = "okay";
+	pinctrl-0 = <&remote_input_ao_pins>;
+	pinctrl-names = "default";
+};
+
+&pwm_AO_cd {
+	pinctrl-0 = <&pwm_ao_d_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&xtal>;
+	clock-names = "clkin1";
+	status = "okay";
+};
+
+&pwm_ef {
+	status = "okay";
+	pinctrl-0 = <&pwm_e_pins>;
+	pinctrl-names = "default";
+	clocks = <&xtal>;
+	clock-names = "clkin0";
+};
+
+&pdm {
+	pinctrl-0 = <&pdm_din0_z_pins>, <&pdm_din1_z_pins>,
+		    <&pdm_din2_z_pins>, <&pdm_din3_z_pins>,
+		    <&pdm_dclk_z_pins>;
+	pinctrl-names = "default";
+	status = "okay";
+};
+
+&saradc {
+	status = "okay";
+	vref-supply = <&vddio_ao1v8>;
+};
+
+/* SDIO */
+&sd_emmc_a {
+	status = "okay";
+	pinctrl-0 = <&sdio_pins>;
+	pinctrl-1 = <&sdio_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+	#address-cells = <1>;
+	#size-cells = <0>;
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	sd-uhs-sdr50;
+	max-frequency = <100000000>;
+
+	non-removable;
+	disable-wp;
+
+	/* WiFi firmware requires power to be kept while in suspend */
+	keep-power-in-suspend;
+
+	mmc-pwrseq = <&sdio_pwrseq>;
+
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddio_ao1v8>;
+};
+
+/* SD card */
+&sd_emmc_b {
+	status = "okay";
+	pinctrl-0 = <&sdcard_c_pins>;
+	pinctrl-1 = <&sdcard_clk_gate_c_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <4>;
+	cap-sd-highspeed;
+	max-frequency = <50000000>;
+	disable-wp;
+
+	cd-gpios = <&gpio GPIOC_6 GPIO_ACTIVE_LOW>;
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&vddao_3v3>;
+};
+
+/* eMMC */
+&sd_emmc_c {
+	status = "okay";
+	pinctrl-0 = <&emmc_ctrl_pins>, <&emmc_data_8b_pins>, <&emmc_ds_pins>;
+	pinctrl-1 = <&emmc_clk_gate_pins>;
+	pinctrl-names = "default", "clk-gate";
+
+	bus-width = <8>;
+	cap-mmc-highspeed;
+	mmc-ddr-1_8v;
+	mmc-hs200-1_8v;
+	max-frequency = <200000000>;
+	non-removable;
+	disable-wp;
+
+	mmc-pwrseq = <&emmc_pwrseq>;
+	vmmc-supply = <&vddao_3v3>;
+	vqmmc-supply = <&emmc_1v8>;
+};
+
+&tdmif_b {
+	status = "okay";
+};
+
+&tdmout_b {
+	status = "okay";
+};
+
+&tohdmitx {
+	status = "okay";
+};
+
+&uart_A {
+	status = "okay";
+	pinctrl-0 = <&uart_a_pins>, <&uart_a_cts_rts_pins>;
+	pinctrl-names = "default";
+	uart-has-rtscts;
+};
+
+&uart_AO {
+	status = "okay";
+	pinctrl-0 = <&uart_ao_a_pins>;
+	pinctrl-names = "default";
+};
+
+&usb {
+	status = "okay";
+	dr_mode = "host";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/Makefile
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/Makefile	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,37 @@
+board-dtbs = \
+	fbxgw8r-board-00.dtb \
+	fbxgw8r-board-01.dtb \
+	fbxgw8r-board-02.dtb \
+	fbxgw8r-board-03.dtb \
+	fbxgw8r-board-04.dtb
+
+dtb-$(CONFIG_ARCH_BCMBCA) += bcm963158ref1d.dtb fbxgw8r.dtb $(board-dtbs)
+
+always-y	:= $(dtb-y)
+always-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_dtbs
+
+subdir-y	:= $(dts-dirs)
+clean-files	:= *.dtb fbxgw8r_dtbs
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+# calculate size of file $1 aligned to a $2 boundary
+file_size = $(shell echo "sz=$$(stat -c %s $1); a=$2; (sz + a - 1) / a * a" | bc)
+
+# due to a bug in CFE v2.4, check that $1 and $2's sizes are okay and
+# will be accepted. in flash mode we need to account for the AES
+# padding (15 bytes at most)
+check_dtb_size	= if [ $(call file_size,$1,16) -gt $(call file_size,$2,1) ]; \
+	then echo "bad DTB size for CFE v2.4: $2's size must be greater than $1's size"; exit 1; fi
+
+$(obj)/fbxgw8r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(Q)$(call check_dtb_size,$(obj)/fbxgw8r.dtb,$(obj)/fbxgw8r-board-00.dtb)
+	$(Q)$(call check_dtb_size,$(obj)/fbxgw8r.dtb,$(obj)/fbxgw8r-board-01.dtb)
+	$(call cmd,dtbs)
+
+# export symbols in DTBs file to allow overlay usage
+DTC_FLAGS	+= -@
+
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_asmedia.dtb
+dtb-$(CONFIG_ARCH_BCMBCA) += fbxgw8r_pcie_pine_dualband_noswitch.dtb
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./bcm63158.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/bcm63158.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./bcm63158.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/bcm63158.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,1126 @@
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/reset/brcm,bcm63xx-pmc.h>
+#include <dt-bindings/brcm,bcm63158-ubus.h>
+#include <dt-bindings/pinctrl/bcm63158-pinfunc.h>
+#include <dt-bindings/brcm,bcm63xx-pcie.h>
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/soc/broadcom,bcm63xx-xdslphy.h>
+#include <dt-bindings/soc/broadcom,bcm63158-procmon.h>
+
+
+#define USE_PSCI	// comment when booting on broadcom CFE.
+
+/*
+ * uncomment when xrdp reserved memory is needed for debugging.
+ */
+// #define USE_RDP_RESERVED_TM
+
+#define SDIO_EMMC_SPI                   95
+#define SPU_GMAC_SPI                    75
+#define HS_SPI_SPI			37
+#define BSC_I2C0_SPI			82
+#define BSC_I2C1_SPI			83
+#define PCIE0_SPI			60
+#define PCIE1_SPI			61
+#define PCIE2_SPI			62
+#define PCIE3_SPI			63
+#define HS_UART_SPI			34
+#define XHCI_SPI			123
+#define OHCI0_SPI			124
+#define EHCI0_SPI			125
+#define OHCI1_SPI			121
+#define EHCI1_SPI			122
+
+#define DRAM_BASE			0x0
+#define DRAM_DEF_SIZE			0x08000000
+
+/memreserve/ 0x00000000 0x00020000;
+
+/ {
+   	model = "Broadcom-v8A";
+	compatible = "brcm,brcm-v8A";
+	interrupt-parent = <&gic>;
+	#address-cells = <2>;
+	#size-cells = <2>;
+
+#ifdef USE_PSCI
+	psci {
+		compatible = "arm,psci-1.0";
+		method = "smc";
+	};
+#define CPU_ENABLE_METHOD "psci"
+#else
+#define CPU_ENABLE_METHOD "spin-table"
+#endif
+
+	firmware {
+		optee {
+			compatible = "linaro,optee-tz";
+			method = "smc";
+			skip-enumeration;
+		};
+	};
+
+        cpus {
+                #address-cells = <2>;
+		#size-cells = <0>;
+
+                B53_0: cpu@0 {
+                        device_type = "cpu";
+                        compatible = "arm,cortex-a53";
+			reg = <0x0 0x0>;
+                        next-level-cache = <&L2_0>;
+                };
+                B53_1: cpu@1 {
+                        device_type = "cpu";
+                        compatible = "arm,cortex-a53";
+		        reg = <0x0 0x1>;
+			enable-method = CPU_ENABLE_METHOD;
+                        cpu-release-addr = <0x0 0xfff8>;
+                        next-level-cache = <&L2_0>;
+                };
+                B53_2: cpu@2 {
+                        device_type = "cpu";
+			compatible = "arm,cortex-a53";
+                        reg = <0x0 0x2>;
+			enable-method = CPU_ENABLE_METHOD;
+                        cpu-release-addr = <0x0 0xfff8>;
+			next-level-cache = <&L2_0>;
+                };
+                B53_3: cpu@3 {
+                        device_type = "cpu";
+			compatible = "arm,cortex-a53";
+	                reg = <0x0 0x3>;
+			enable-method = CPU_ENABLE_METHOD;
+      			cpu-release-addr = <0x0 0xfff8>;
+                        next-level-cache = <&L2_0>;
+                };
+
+                L2_0: l2-cache0 {
+                        compatible = "cache";
+                };
+        };
+
+	timer {
+                compatible = "arm,armv8-timer";
+                interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>,
+                             <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>;
+	};
+
+        pmu {
+                compatible = "arm,armv8-pmuv3";
+                interrupts = <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
+                             <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>;
+                interrupt-affinity = <&B53_0>,
+                                     <&B53_1>,
+                                     <&B53_2>,
+                                     <&B53_3>;
+	};
+
+	soc_dram: memory@00000000 {
+		device_type = "memory";
+		reg = <0x00000000 DRAM_BASE 0x0 DRAM_DEF_SIZE>;
+
+		// this is overwritten by bootloader with correct value
+		brcm,ddr-mcb = <0x4142b>;
+	};
+
+        reserved-memory {
+		#address-cells = <2>;
+		#size-cells = <2>;
+		ranges;
+
+		atf@0x10000000 {
+			no-map;
+			reg = <0x0 0x10000000 0x0 0x00100000>;
+		};
+		optee@0x10800000 {
+			no-map;
+			reg = <0x0 0x10800000 0x0 0x00400000>;
+		};
+
+		optee-shared-area@0x11000000 {
+			no-map;
+			reg = <0x0 0x11000000 0x0 0x00100000>;
+		};
+		dsl_reserved: dsl_reserved {
+			compatible = "shared-dma-pool";
+			/*
+			 * only 3MB are actually used, but because of pointer alignment
+			 * arithmetics done by the driver, they need to be at the end of an
+			* 8MB aligned region, must be at an address lower than 256M too
+			 */
+			size = <0x0 0x00800000>;
+			alignment = <0x0 0x00800000>;
+			alloc-ranges = <0x0 0x0 0x0 0x10000000>;
+			no-map;
+			no-cache;
+                };
+#ifdef USE_RDP_RESERVED_TM
+		rdp_reserved_tm: rdp_reserved_tm {
+			compatible = "shared-dma-pool";
+			size = <0x0 0x00800000>;
+			alloc-ranges = <0x0 0x0 0x0 0x10000000>;
+			no-map;
+			no-cache;
+                };
+#endif
+	};
+
+        uartclk: uartclk {
+                compatible = "fixed-clock";
+                #clock-cells = <0>;
+                clock-frequency = <50000000>;
+	};
+
+	spiclk: spiclk {
+                compatible = "fixed-clock";
+                #clock-cells = <0>;
+                clock-frequency = <(200 * 1000 * 1000)>;
+	};
+
+	pcie01: pcidual@80040000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80040000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xC0000000 0 0xC0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE01>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <2>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie0: pci@80040000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80040000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xC0000000 0 0xC0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE0>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE0_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie1: pci@80050000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80050000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xD0000000 0 0xD0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE1>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE0>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE1_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE1_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie2: pci@80060000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80060000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xE0000000 0 0xE0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE2>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE2>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE2_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE2_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+		brcm,dram = <&soc_dram>;
+	};
+
+	pcie3: pci@80070000 {
+		status = "disabled";
+		device_type = "pci";
+		compatible = "brcm,bcm63xx-pcie";
+		reg = <0x0 0x80070000 0x0 0xa000>;
+		dma-coherent;
+
+		#address-cells = <3>;
+		#size-cells = <2>;
+		ranges = <0x02000000 0 0xB0000000 0 0xB0000000 0 0x10000000>;
+		bus-range = <0x0 0xff>;
+
+		resets = <&pmc PMC_R_PCIE3>;
+		reset-names = "pcie0";
+
+		ubus = <&ubus4 UBUS_PORT_ID_PCIE3>;
+		procmon = <&procmon RCAL_1UM_VERT>;
+
+		interrupt-names = "intr";
+		interrupts = <GIC_SPI PCIE3_SPI IRQ_TYPE_LEVEL_HIGH>;
+		#interrupt-cells = <1>;
+		interrupt-map-mask = <0 0 0 0>;
+		interrupt-map = <0 0 0 0 &gic GIC_SPI PCIE3_SPI IRQ_TYPE_LEVEL_HIGH>;
+
+		brcm,num-lanes = <1>;
+	};
+
+	/* ARM bus */
+	axi@80000000 {
+                compatible = "simple-bus";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                ranges = <0x0 0x0 0x0 0x80000000 0x0 0x04000000>;
+
+		xtm: xtm@80130000 {
+			compatible = "brcm,bcm63158-xtm";
+			status = "disabled";
+
+			interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+			reg = <0x0 0x130000 0x0 0x4000>;
+
+			xdsl-phy = <&xdsl_phy>;
+			xtm-runner,xrdp = <&xrdp>;
+		};
+
+		memc: memc@0x80180000 {
+			compatible = "brcm,bcm63158-memc";
+			reg = <0x0 0x180000 0x0 0x40000>;
+		};
+
+		pmc: pmc@80200000 {
+			compatible = "brcm,bcm63158-pmc";
+			reg = <0x0 0x200000 0x0 0x10000>;
+			#reset-cells = <1>;
+		};
+
+		procmon: procmon@80280000 {
+			compatible = "brcm,bcm63158-procmon";
+			reg = <0x0 0x280000 0x0 0x100>;
+			#procmon-cells = <1>;
+		};
+
+		ubus4: ubus4@80300000 {
+			compatible = "brcm,bcm63158-ubus4";
+			reg = <0x0 0x03000000 0x0 0x00500000>,
+				<0x0 0x10a0400 0x0 0x400>;
+			reg-names = "master-config", "coherency-config";
+			#ubus-cells = <1>;
+			brcm,dram = <&soc_dram>;
+		};
+
+		sf2: sf2@80400000 {
+			compatible = "brcm,bcm63158-sf2";
+			reg = <0x0 0x400000 0x0 0x80000>,
+			    <0x0 0x480000 0x0 0x500>,
+			    <0x0 0x4805c0 0x0 0x10>,
+			    <0x0 0x480600 0x0 0x200>,
+			    <0x0 0x480800 0x0 0x500>;
+			reg-names = "core", "reg", "mdio", "fcb", "acb";
+			resets = <&pmc PMC_R_SF2>;
+			reset-names = "sf2";
+			status = "disabled";
+
+			sf2,qphy-base-id = <1>;
+			sf2,sphy-phy-id = <5>;
+			sf2,serdes-phy-id = <6>;
+
+			leds-top = <&leds_top_syscon>;
+
+			ports {
+				#address-cells = <1>;
+				#size-cells = <0>;
+
+				sf2_port0: port@0 {
+					// this is a normal port
+					reg = <0>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy0>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port1: port@1 {
+					// this is a normal port
+					reg = <1>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy1>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port2: port@2 {
+					// this is a normal port
+					reg = <2>;
+					status = "disabled";
+					phy-handle = <&sf2_qphy2>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port3: port@3 {
+					// this is a normal port
+					reg = <3>;
+					status = "disabled";
+					/* 0: quad phy3, 1: rgmii2 */
+					mux1-in-port = <0>;
+					phy-handle = <&sf2_qphy3>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port4: port@4 {
+					// this is a normal port
+					reg = <4>;
+					status = "disabled";
+					/* default config is xbar to sphy */
+					xbar-in-port = <2>;
+					phy-handle = <&sf2_sphy>;
+					phy-connection-type = "gmii";
+				};
+
+				sf2_port5: port@5 {
+					// this is a CPU port
+					reg = <5>;
+					status = "disabled";
+					phy-connection-type = "internal";
+					ethernet = <&runner_unimac1>;
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+
+				sf2_port6: port@6 {
+					// this is a normal port
+					reg = <6>;
+					status = "disabled";
+					xbar-in-port = <1>;
+					/* default config is xbar to serdes */
+					phy-connection-type = "sgmii";
+				};
+
+				sf2_port7: port@7 {
+					// this is a CPU port
+					reg = <7>;
+					status = "disabled";
+					ethernet = <&runner_unimac2>;
+					phy-connection-type = "internal";
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+
+				sf2_port8: port@8 {
+					// this is a CPU port
+					reg = <8>;
+					status = "disabled";
+					/* 0: system port, 1: unimac bbh */
+					mux2-in-port = <1>;
+
+					dsa,def-cpu-port;
+					//ethernet = <&systemport>;
+					ethernet = <&runner_unimac0>;
+
+					phy-connection-type = "internal";
+					fixed-link {
+						speed = <2500>;
+						full-duplex;
+					};
+				};
+			};
+
+			sf2,wan-port-config {
+				status = "disabled";
+				xbar-in-port = <0>;
+			};
+
+			sf2,mdio {
+		                #address-cells = <1>;
+		                #size-cells = <0>;
+
+				/* XXX: depends on sf2,qphy-base-id */
+				sf2_qphy0: ethernetphy@1 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <1>;
+				};
+				sf2_qphy1: ethernet-phy@2 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <2>;
+				};
+				sf2_qphy2: ethernet-phy@3 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <3>;
+				};
+				sf2_qphy3: ethernet-phy@4 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <4>;
+				};
+				/* XXX: depends on sf2,sphy-base-id */
+				sf2_sphy: ethernet-phy@5 {
+					compatible = "ethernet-phy-idae02.51c1", "ethernet-phy-ieee802.3-c22";
+					status = "disabled";
+					reg = <5>;
+				};
+			};
+		};
+
+		systemport: systemport@80490000 {
+			compatible = "brcm,systemport-63158";
+			reg = <0x0 0x490000 0x0 0x4650>;
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>,
+				   <GIC_SPI 48 IRQ_TYPE_LEVEL_HIGH>;
+			status = "disabled";
+			dma-coherent;
+
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		xdsl_phy: xdsl-phy@80650000 {
+			compatible = "brcm,bcm63158-xdsl-phy";
+			status = "disabled";
+
+			memory-region = <&dsl_reserved>;
+			interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+			reg = <0x0 0x650000 0x0 0x20000>,
+				<0x0 0x800000 0x0 0xe0000>,
+				<0x0 0x9A0000 0x0 0x660000>;
+			reg-names = "phy", "lmem", "xmem";
+
+			pinctrl-0 = <&ld0_pins>;
+			pinctrl-names = "default";
+
+			ubus = <&ubus4 UBUS_PORT_ID_DSLCPU>,
+				<&ubus4 UBUS_PORT_ID_DSL>;
+
+			/*
+			 * this is used by dsldiags, but unfortunately
+			 * lying outside the axi space, in the ubus
+			 * space.
+			 */
+			perf-base = <0xff800000>;
+			perf-size = <0x10>;
+		};
+
+		gic: interrupt-controller@81000000 {
+	                compatible = "arm,cortex-a15-gic", "arm,cortex-a9-gic";
+	                #interrupt-cells = <3>;
+	                #address-cells = <0>;
+	                interrupt-controller;
+	                reg = <0x0 0x1001000 0 0x1000>,
+	                      <0x0 0x1002000 0 0x2000>;
+	        };
+
+		usb: usb@8000d000 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-usb";
+
+			reg = <0x0 0xd000 0x0 0x1000>,
+				<0x0 0xc200 0x0 0x100>,
+				<0x0 0xc300 0x0 0x100>,
+				<0x0 0xc400 0x0 0x100>,
+				<0x0 0xc500 0x0 0x100>,
+				<0x0 0xc600 0x0 0x100>;
+			reg-names = "xhci", "usb-control", "ehci0",
+				"ohci0", "ehci1", "ohci1";
+			dma-coherent;
+
+			interrupts = <GIC_SPI XHCI_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI EHCI0_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI OHCI0_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI EHCI1_SPI IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI OHCI1_SPI IRQ_TYPE_LEVEL_HIGH>;
+			interrupt-names = "xhci", "ehci0", "ohci0",
+				"ehci1", "ohci1";
+
+			resets = <&pmc PMC_R_USBH>;
+			reset-names = "xhci-pmc-reset";
+
+			ubus = <&ubus4 UBUS_PORT_ID_USB>;
+		};
+
+		xrdp: xrdp@82000000 {
+			compatible = "brcm,bcm63158-xrdp";
+			reg = <0x0 0x2000000 0x0 0x1000000>,
+				<0x0 0x0170000 0x0 0x10000>;
+			reg-names = "core", "wan_top";
+
+			interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 132 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 138 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 149 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 150 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 151 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 152 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 156 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>,
+				    <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>;
+
+			interrupt-names = "fpm",
+				"hash",
+				"qm",
+				"dsptchr",
+				"sbpm",
+				"runner0",
+				"runner1",
+				"runner2",
+				"runner3",
+				"runner4",
+				"runner5",
+				"queue0",
+				"queue1",
+				"queue2",
+				"queue3",
+				"queue4",
+				"queue5",
+				"queue6",
+				"queue7",
+				"queue8",
+				"queue9",
+				"queue10",
+				"queue11",
+				"queue12",
+				"queue13",
+				"queue14",
+				"queue15",
+				"queue16",
+				"queue17",
+				"queue18",
+				"queue19",
+				"queue20",
+				"queue21",
+				"queue22",
+				"queue23",
+				"queue24",
+				"queue25",
+				"queue26",
+				"queue27",
+				"queue28",
+				"queue29",
+				"queue30",
+				"queue31";
+
+#ifdef USE_RDP_RESERVED_TM
+			memory-region = <&rdp_reserved_tm>;
+#endif
+			resets = <&pmc PMC_R_XRDP>;
+			reset-names = "rdp";
+			ubus = <&ubus4 UBUS_PORT_ID_QM>,
+				<&ubus4 UBUS_PORT_ID_DQM>,
+				<&ubus4 UBUS_PORT_ID_NATC>,
+				<&ubus4 UBUS_PORT_ID_DMA0>,
+				<&ubus4 UBUS_PORT_ID_RQ0>,
+				<&ubus4 UBUS_PORT_ID_SWH>;
+		};
+
+		runner_unimac0: runner-unimac0 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <0>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_unimac1: runner-unimac1 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <1>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_unimac2: runner-unimac2 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-unimac";
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,bbh = <2>;
+			dma-coherent;
+
+			phy-mode = "internal";
+			fixed-link {
+				speed = <2500>;
+				full-duplex;
+			};
+		};
+
+		runner_xport0: runner-xport0 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-enet-runner-xport";
+			reg = <0x0 0x00144000 0x0 0x100>,
+				<0x0 0x00138000 0x0 0x6fff>,
+				<0x0 0x00147800 0x0 0xe80>,
+				<0x0 0x00140000 0x0 0x3fff>;
+			reg-names = "wan_top", "xport", "xlif", "epon";
+			dma-coherent;
+
+			resets = <&pmc PMC_R_WAN_AE>;
+			reset-names = "wan_ae";
+
+			local-mac-address = [ 00 07 CB 00 00 FE ];
+			enet-runner,xrdp = <&xrdp>;
+			enet-runner,xport-pon-bbh = <3>;
+			enet-runner,xport-ae-bbh = <4>;
+
+			//phy-mode = "1000base-x";
+			phy-mode = "10gbase-r";
+			managed = "in-band-status";
+		};
+	};
+
+	ubus@ff800000 {
+                compatible = "simple-bus";
+                #address-cells = <2>;
+                #size-cells = <2>;
+                ranges = <0x0 0x0 0x0 0xff800000 0x0 0x62000>;
+
+		leds_top_syscon: system-controller@ff800800 {
+			compatible = "syscon", "simple-mfd";
+			reg = <0x0 0x800 0x0 0x100>;
+		};
+
+		sdhci: sdhci@ff810000 {
+			status = "disabled";
+			compatible = "brcm,bcm63xx-sdhci";
+			reg = <0x0 0x00010000 0x0 0x100>;
+			interrupts = <GIC_SPI SDIO_EMMC_SPI IRQ_TYPE_LEVEL_HIGH>;
+			no-1-8v;
+			bus-width = <8>;
+		};
+
+                arm_serial0: serial@ff812000 {
+                        compatible = "arm,pl011", "arm,primecell";
+                        reg = <0x0 0x12000 0x0 0x1000>;
+                        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
+                        clocks = <&uartclk>, <&uartclk>;
+                        clock-names = "uartclk", "apb_pclk";
+			status = "disabled";
+                };
+
+                arm_serial2: serial@ff814000 {
+                        compatible = "arm,pl011", "arm,primecell";
+                        reg = <0x0 0x14000 0x0 0x1000>;
+                        interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
+                        clocks = <&uartclk>, <&uartclk>;
+                        clock-names = "uartclk", "apb_pclk";
+			status = "disabled";
+                };
+
+		timer: timer@400 {
+			compatible = "syscon", "brcm,bcm63158-timer";
+			reg = <0x0 0x400 0x0 0x94>,
+				<0x0 0x5a03c 0x0 0x4>;
+			reg-names = "timer", "top-reset-status";
+			interrupts = <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		reboot {
+			compatible = "syscon-reboot";
+			regmap = <&timer>;
+			offset = <0x8c>;
+			mask = <1>;
+		};
+
+		pinctrl: pinctrl@500 {
+			compatible = "brcm,bcm63158-pinctrl";
+			reg = <0x0 0x500 0x0 0x60>,
+				<0x0 0x20 0x0 0x2c>;
+			reg-names = "gpio", "irq";
+
+			gpio-controller;
+			#gpio-cells = <2>;
+
+			interrupt-controller;
+			#interrupt-cells = <2>;
+
+			/* just for ref, they are hardcoded in driver too */
+			interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 179 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
+
+			emmc_pins: emmc-pins-0 {
+				emmc-d0 {
+					pinmux = <BCM63158_GPIO_51__FUNC_NAND_DATA_0>;
+				};
+				emmc-d1 {
+					pinmux = <BCM63158_GPIO_52__FUNC_NAND_DATA_1>;
+				};
+				emmc-d2 {
+					pinmux = <BCM63158_GPIO_53__FUNC_NAND_DATA_2>;
+				};
+				emmc-d3 {
+					pinmux = <BCM63158_GPIO_54__FUNC_NAND_DATA_3>;
+				};
+				emmc-d4 {
+					pinmux = <BCM63158_GPIO_55__FUNC_NAND_DATA_4>;
+				};
+				emmc-d5 {
+					pinmux = <BCM63158_GPIO_56__FUNC_NAND_DATA_5>;
+				};
+				emmc-d6 {
+					pinmux = <BCM63158_GPIO_57__FUNC_NAND_DATA_6>;
+				};
+				emmc-d7 {
+					pinmux = <BCM63158_GPIO_58__FUNC_NAND_DATA_7>;
+				};
+				emmc-clk {
+					pinmux = <BCM63158_GPIO_62__FUNC_EMMC_CLK>;
+				};
+				emmc-cmd {
+					pinmux = <BCM63158_GPIO_63__FUNC_EMMC_CMD>;
+				};
+			};
+
+			spi_pins: spi-pins {
+				spi-clk {
+					pinmux = <BCM63158_GPIO_108__FUNC_SPIM_CLK>;
+				};
+				spi-mosi {
+					pinmux = <BCM63158_GPIO_109__FUNC_SPIM_MOSI>;
+				};
+				spi-miso {
+					pinmux = <BCM63158_GPIO_110__FUNC_SPIM_MISO>;
+				};
+
+				/*
+				 * board DTS will have to specify SPI
+				 * SS pins as required.
+				 */
+			};
+
+			i2c0_pins: i2c0-pins {
+				i2c-sda {
+					pinmux = <BCM63158_GPIO_24__FUNC_B_I2C_SDA_0>;
+				};
+				i2c-scl {
+					pinmux = <BCM63158_GPIO_25__FUNC_B_I2C_SCL_0>;
+				};
+			};
+
+			i2c1_pins: i2c1-pins {
+				i2c-sda {
+					pinmux = <BCM63158_GPIO_15__FUNC_B_I2C_SDA_1>;
+				};
+				i2c-scl {
+					pinmux = <BCM63158_GPIO_16__FUNC_B_I2C_SCL_1>;
+				};
+			};
+
+			pcie0_pins: pcie0-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_113__FUNC_PCIE0a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_114__FUNC_PCIE0a_RST_B>;
+				};
+			};
+
+			pcie1_pins: pcie1-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_115__FUNC_PCIE1a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_116__FUNC_PCIE1a_RST_B>;
+				};
+			};
+
+			pcie2_pins: pcie2-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_117__FUNC_PCIE2a_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_118__FUNC_PCIE2a_RST_B>;
+				};
+			};
+
+			pcie3_pins: pcie3-pins {
+				pcie-clk {
+					pinmux = <BCM63158_GPIO_119__FUNC_PCIE3_CLKREQ_B>;
+				};
+				pcie-rst {
+					pinmux = <BCM63158_GPIO_120__FUNC_PCIE3_RST_B>;
+				};
+			};
+
+			pcm_pins: pcm-pins {
+				pcm-clk {
+					pinmux = <BCM63158_GPIO_44__FUNC_PCM_CLK>;
+				};
+				pcm-fsync {
+					pinmux = <BCM63158_GPIO_45__FUNC_PCM_FS>;
+				};
+				pcm-sdin {
+					pinmux = <BCM63158_GPIO_42__FUNC_PCM_SDIN>;
+				};
+				pcm-sdout {
+					pinmux = <BCM63158_GPIO_43__FUNC_PCM_SDOUT>;
+				};
+			};
+
+			hs_uart_pins: hs-uart-pins {
+				hs-uart-sout {
+					pinmux = <BCM63158_GPIO_06__FUNC_A_UART2_SOUT>;
+				};
+				hs-uart-sin {
+					pinmux = <BCM63158_GPIO_05__FUNC_A_UART2_SIN>;
+				};
+				hs-uart-cts {
+					pinmux = <BCM63158_GPIO_03__FUNC_A_UART2_CTS>;
+				};
+				hs-uart-rts {
+					pinmux = <BCM63158_GPIO_04__FUNC_A_UART2_RTS>;
+				};
+			};
+
+			usb01_pins: usb01-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_122__FUNC_USB0a_PWRON>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_121__FUNC_USB0a_PWRFLT>;
+				};
+				pwr1-en {
+					pinmux = <BCM63158_GPIO_124__FUNC_USB1a_PWRON>;
+				};
+				pwr1-fault {
+					pinmux = <BCM63158_GPIO_123__FUNC_USB1a_PWRFLT>;
+				};
+			};
+
+			usb0_pins: usb0-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_122__FUNC_USB0a_PWRON>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_121__FUNC_USB0a_PWRFLT>;
+				};
+			};
+
+			usb1_pins: usb1-pins {
+				pwr0-en {
+					pinmux = <BCM63158_GPIO_123__FUNC_USB1a_PWRFLT>;
+				};
+				pwr0-fault {
+					pinmux = <BCM63158_GPIO_124__FUNC_USB1a_PWRON>;
+				};
+			};
+
+			ld0_pins: ld0-pins {
+				ld0_pwr_up {
+					pinmux = <BCM63158_GPIO_32__FUNC_VDSL_CTRL0>;
+				};
+
+				ld0_din {
+					pinmux = <BCM63158_GPIO_33__FUNC_VDSL_CTRL_1>;
+				};
+
+				ld0_dclk {
+					pinmux = <BCM63158_GPIO_34__FUNC_VDSL_CTRL_2>;
+				};
+			};
+
+			ld1_pins: ld1-pins-0 {
+				ld1_pwr_up {
+					pinmux = <BCM63158_GPIO_35__FUNC_VDSL_CTRL_3>;
+				};
+
+				ld1_din {
+					pinmux = <BCM63158_GPIO_36__FUNC_VDSL_CTRL_4>;
+				};
+
+				ld1_dclk {
+					pinmux = <BCM63158_GPIO_37__FUNC_VDSL_CTRL_5>;
+				};
+			};
+
+			gphy01_link_act_leds: gphy01-link-act-leds {
+				gphy0_link_act_led {
+					pinmux = <BCM63158_GPIO_84__FUNC_B_LED_20>;
+				};
+				gphy1_link_act_led {
+					pinmux = <BCM63158_GPIO_85__FUNC_B_LED_21>;
+				};
+			};
+
+			sfp_rogue0_pins: sfp-rogue0-pins {
+				sfp_rogue0_in {
+					pinmux = <BCM63158_GPIO_34__FUNC_B_ROGUE_IN>;
+				};
+			};
+
+			sfp_rogue1_pins: sfp-rogue1-pins {
+				sfp_rogue1_in {
+					pinmux = <BCM63158_GPIO_40__FUNC_A_ROGUE_IN>;
+				};
+			};
+			sfp_rs0_gpio_pins: sfp-rs0-gpio {
+				sfp_rogue1_rs0 {
+					pinmux = <BCM63158_GPIO_40__FUNC_GPIO_40>;
+				};
+			};
+		};
+
+		hs_spim: spi@1000 {
+			status = "disabled";
+			compatible = "brcm,bcm6328-hsspi";
+			reg = <0x0 0x1000 0x0 0x600>;
+			interrupts = <GIC_SPI HS_SPI_SPI IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&spiclk>;
+			clock-names = "hsspi";
+			#address-cells = <1>;
+			#size-cells = <0>;
+		};
+
+		hs_uart: hs-uart@10400 {
+			status = "disabled";
+			compatible = "brcm,bcm63xx-hs-uart";
+			reg = <0x0 0x00010400 0x0 0x1e0>;
+			interrupts = <GIC_SPI HS_UART_SPI IRQ_TYPE_LEVEL_HIGH>;
+			clocks = <&uartclk>;
+		};
+
+		i2c_bsc0: i2c@2100 {
+			status = "disabled";
+			compatible = "brcm,brcmper-i2c";
+			reg = <0x0 0x2100 0x0 0x60>;
+			interrupts = <GIC_SPI BSC_I2C0_SPI IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		i2c_bsc1: i2c@5a800 {
+			status = "disabled";
+			compatible = "brcm,brcmper-i2c";
+			reg = <0x0 0x5a800 0x0 0x60>;
+			interrupts = <GIC_SPI BSC_I2C1_SPI IRQ_TYPE_LEVEL_HIGH>;
+		};
+
+		bcm_pcm: bcm_pcm@60000 {
+			status = "disabled";
+			compatible = "brcm,bcm63158-pcm";
+			reg = <0x0 0x60000 0x0 0x2000>;
+			interrupts = <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>,
+				<GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>;
+                        interrupt-names = "pcm", "dma0", "dma1";
+		};
+
+		bcm63158_cpufreq {
+			compatible = "brcm,bcm63158-cpufreq";
+			pmc = <&pmc>;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./bcm963158ref1d.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/bcm963158ref1d.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./bcm963158ref1d.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/bcm963158ref1d.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,331 @@
+/*
+ * Broadcom BCM63158 Reference Board REF1 DTS
+ */
+
+/dts-v1/;
+
+#include "bcm63158.dtsi"
+
+/ {
+	compatible = "brcm,BCM963158REF1", "brcm,bcm63158";
+	model = "Broadcom BCM963158REF1";
+
+	chosen {
+		bootargs = "console=ttyAMA0,115200";
+		stdout-path = &arm_serial0;
+	};
+
+	reserved-memory {
+		ramoops@3fff0000 {
+			compatible = "ramoops";
+			/* RAM top - 64k */
+			reg = <0x0 0x3fff0000 0x0 (64 * 1024)>;
+			record-size = <(64 * 1024)>;
+			ecc-size = <16>;
+			no-dump-oops;
+		};
+	};
+
+	bcm963158ref1d-fbxgpio {
+		compatible = "fbx,fbxgpio";
+
+		wps-button {
+			gpio = <&pinctrl 41 0>;
+			input;
+		};
+		dsl0-link-led {
+			gpio = <&pinctrl 18 0>;
+			output-low;
+		};
+
+		sfp-ae-pwren {
+			gpio = <&pinctrl 3 0>;
+			output-low;
+		};
+		sfp-ae-rs0 {
+			gpio = <&pinctrl 40 0>;
+			input;
+		};
+		sfp-ae-rs1 {
+			gpio = <&pinctrl 12 0>;
+			output-low;
+		};
+
+		sfp-ae-presence {
+			gpio = <&pinctrl 9 0>;
+			input;
+		};
+		sfp-ae-rxlos {
+			gpio = <&pinctrl 8 0>;
+			input;
+		};
+
+		sfp-sgmii-presence {
+			gpio = <&pinctrl 20 0>;
+			input;
+		};
+		sfp-sgmii-rxlos {
+			gpio = <&pinctrl 21 0>;
+			input;
+		};
+	};
+
+	i2c0_gpio: i2c0-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 24 0 /* sda */
+			 &pinctrl 25 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	i2c1_gpio: i2c1-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 15 0 /* sda */
+			 &pinctrl 16 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+};
+
+&arm_serial0 {
+	status = "okay";
+};
+
+&sf2 {
+	status = "okay";
+};
+
+&sf2_port0 {
+	status = "okay";
+	label = "swp1";
+};
+
+&sf2_port1 {
+	status = "okay";
+	label = "swp2";
+};
+
+&sf2_port2 {
+	status = "okay";
+	label = "swp3";
+};
+
+&sf2_port3 {
+	status = "okay";
+	label = "swp4";
+};
+
+&sf2_port4 {
+	status = "okay";
+	label = "swp5";
+};
+
+&sf2_port8 {
+	status = "okay";
+};
+
+&sf2_qphy0 {
+	status = "okay";
+};
+
+&sf2_qphy1 {
+	status = "okay";
+};
+
+&sf2_qphy2 {
+	status = "okay";
+};
+
+&sf2_qphy3 {
+	status = "okay";
+};
+
+&sf2_sphy {
+	status = "okay";
+};
+
+&systemport {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&sdhci {
+	status = "okay";
+
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <0 (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label = "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label = "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label = "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		new-bank0@0 {
+			label = "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+		};
+	};
+
+
+	partitions-boot {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%dboot0";
+
+		cfe@0 {
+			label = "cfe";
+			reg = /bits/64 <0 (1 * 1024 * 1024)>;
+			read-only;
+		};
+
+		serial@0 {
+			label = "fbxserial";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+
+		fbxboot@0 {
+			label = "fbxboot";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+	};
+};
+
+&spi_pins {
+	spi-ss0 {
+		pinmux = <BCM63158_GPIO_111__FUNC_SPIM_SS0_B>;
+	};
+	spi-ss1 {
+		pinmux = <BCM63158_GPIO_112__FUNC_SPIM_SS1_B>;
+	};
+};
+
+&hs_spim {
+	status = "okay";
+	num-cs = <2>;
+	broadcom,dummy-cs = <2>;
+	pinctrl-0 = <&spi_pins>;
+	pinctrl-names = "default";
+	serial-flash@0 {
+		compatible = "m25p80";
+		reg = <0>;
+		spi-max-frequency = <(50 * 1000 * 1000)>;
+		label = "serial-flash";
+	};
+
+	/* TO TEST SLAC */
+	/*
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(1 * 1000 * 1000)>;
+	};
+	*/
+
+	/* TO TEST LCD  */
+	/*
+	ssd1320@1 {
+		compatible = "solomon,ssd1320";
+		reg = <1>;
+		spi-max-frequency = <(9 * 1000 * 1000)>;
+		ssd1320,width = <160>;
+		ssd1320,height = <100>;
+		ssd1320,segs-hw-skip = <0>;
+		ssd1320,coms-hw-skip = <30>;
+		ssd1320,rotate = <180>;
+		ssd1320,watchdog = <300>;
+		ssd1320,data-select-gpio = <&pinctrl 14 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&pinctrl 4 GPIO_ACTIVE_HIGH>;
+	};
+	*/
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&pcie2 {
+	status = "okay";
+	pinctrl-0 = <&pcie2_pins>;
+	pinctrl-names = "default";
+};
+
+
+&pcie3 {
+	status = "okay";
+	pinctrl-0 = <&pcie3_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>, <&ld1_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6304 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+
+	afe-id-1 = <(BCM63XX_XDSLPHY_AFE_CHIP_CH1 |
+		   BCM63XX_XDSLPHY_AFE_LD_6304 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+/* TO TEST TELEPHONY */
+/*
+&bcm_pcm {
+	status = "okay";
+	pinctrl-0 = <&pcm_pins>;
+	pinctrl-names = "default";
+};
+*/
+
+&usb {
+	status = "okay";
+
+	pinctrl-0 = <&usb01_pins>;
+	pinctrl-names = "default";
+
+	brcm,pwren-low;
+	brcm,pwrflt-low;
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-00.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-00.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-00.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-00.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x00
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-00", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+	cfe-v2.4-work-around-padding;
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-01.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-01.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-01.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-01.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,18 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x01
+ *
+ * With Realtek PHY 2.5G phy replacing the Aquantia 2.5G PHY, at
+ * address 0x6 (protos) or 0x7 (final version).
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-01", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+	cfe-v2.4-work-around-padding;
+	cfe-v2.4-work-around-padding-01;
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-02.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-02.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-02.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-02.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x02, same as 0x00 but with external
+ * PMU (only for power-btn/rtc & leds) and wifi 7
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-external-pmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-02", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-03.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-03.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-03.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-03.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,14 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x02, same as 0x01 but with external
+ * PMU (only for power-btn/rtc & leds) and wifi 7
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-external-pmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-03", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-04.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-04.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-board-04.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-board-04.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,16 @@
+/*
+ * Freebox FBXGW8R Board DTS, board ID 0x04
+ *
+ * With Realtek PHY 2.5G phy, DDR4 & fbxpmu, dual pci express, fbxpmu
+ * based keypad.
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-onboard-pmu.dtsi"
+#include "fbxgw8r-phy-realtek.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-04", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-external-pmu.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-external-pmu.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-external-pmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-external-pmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,272 @@
+/*
+ * common dtsi file for fbxgw8r for boards with on-board PMU, wifi 7
+ */
+/ {
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	dgasp {
+		compatible = "misc,dgasp";
+		interrupt-parent = <&pinctrl>;
+		interrupts = <20 IRQ_TYPE_LEVEL_LOW>;
+
+		toset-gpios = <&pinctrl 99 GPIO_ACTIVE_LOW>; /* w-enable-2 */
+	};
+};
+
+&i2c1_gpio {
+	adt7475@2e {
+		compatible = "adi,adt7475";
+		reg = <0x2e>;
+	};
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "bt-rst", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "i2c-int", /* 19 */
+			  "pmu-dyinggasp-int", /* 20 */
+			  "fan-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "pmu-int", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "backlight-en", /* 81 */
+			  "led-white", /* 82 */
+			  "led-red", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "keypad-left", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "bt-rst", /* 88 */
+			  "ha-swd-clk", /* 89 */
+			  "ha-swd-io", /* 90 */
+			  "ha-rst", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "keypad-up", /* 96 */
+			  "keypad-down", /* 97 */
+			  "w-enable-1", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "keypad-right", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&pinctrl 97 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&pinctrl 96 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&pinctrl 102 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&pinctrl 86 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&i2c1_gpio {
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&pinctrl>;
+			interrupts = <37 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "", /* 0 */
+					  "", /* 1 */
+					  "", /* 2 */
+					  "", /* 3 */
+					  "power-button", /* 4 */
+					  "test-mode"; /* 5 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+	};
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6303 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+&xtm {
+	status = "okay";
+};
+
+&fbxgw8r_gpio {
+	w-disable-2 {
+		gpio = <&pinctrl 99 GPIO_ACTIVE_LOW>;
+		output-low;
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-nopmu.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-nopmu.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-nopmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-nopmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,231 @@
+/*
+ * common dtsi file for fbxgw8r for boards without PMU
+ */
+/ {
+	leds {
+		compatible = "gpio-leds";
+		led0 {
+			gpios = <&pinctrl 82 GPIO_ACTIVE_HIGH>;
+			default-state = "on";
+			label = "white";
+		};
+		led1 {
+			gpios = <&pinctrl 83 GPIO_ACTIVE_HIGH>;
+			default-state = "off";
+			label = "red";
+		};
+	};
+};
+
+&i2c1_gpio {
+	adt7475@2e {
+		compatible = "adi,adt7475";
+		reg = <0x2e>;
+	};
+
+	ld6710-fbx@68 {
+		compatible = "leadtrend,ld6710-fbx";
+		reg = <0x68>;
+	};
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "bt-rst", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "i2c-int", /* 19 */
+			  "poe-on", /* 20 */
+			  "fan-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "boot-eth", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "backlight-en", /* 81 */
+			  "led-white", /* 82 */
+			  "led-red", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "keypad-left", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "bt-rst", /* 88 */
+			  "ha-swd-clk", /* 89 */
+			  "ha-swd-io", /* 90 */
+			  "ha-rst", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "keypad-up", /* 96 */
+			  "keypad-down", /* 97 */
+			  "w-enable-1", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "keypad-right", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&fbxgw8r_gpio {
+	poe-on {
+		gpio = <&pinctrl 20 0>;
+		output-low;
+	};
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&pinctrl 97 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&pinctrl 96 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&pinctrl 102 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&pinctrl 86 GPIO_ACTIVE_HIGH>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&pcie0 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&pcie1 {
+	status = "okay";
+	pinctrl-0 = <&pcie1_pins>;
+	pinctrl-names = "default";
+};
+
+&xdsl_phy {
+	status = "okay";
+
+	pinctrl-0 = <&ld0_pins>;
+	pinctrl-names = "default";
+
+	afe-id-0 = <(BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0 |
+		   BCM63XX_XDSLPHY_AFE_LD_6303 |
+		   BCM63XX_XDSLPHY_AFE_FE_ANNEXA |
+		   BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60 |
+		   BCM63XX_XDSLPHY_AFE_FE_RNC)>;
+};
+
+&xtm {
+	status = "okay";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-onboard-pmu.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-onboard-pmu.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common-onboard-pmu.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common-onboard-pmu.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,256 @@
+/*
+ * common dtsi file for fbxgw8r for boards with on-board PMU
+ */
+/ {
+	powerbtn {
+		compatible = "gpio-keys";
+		autorepeat = <0>;
+
+		powerbtn {
+			label = "power";
+			linux,code = <KEY_POWER>;
+			gpios = <&fbxpmu_gpio_expander 4 GPIO_ACTIVE_HIGH>;
+			debounce-interval = <50>;
+			linux,can-disable;
+		};
+	};
+
+	dgasp {
+		compatible = "misc,dgasp";
+		interrupt-parent = <&pinctrl>;
+		interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+
+		toset-gpios = <&pinctrl 99 GPIO_ACTIVE_LOW>; /* w-enable-2 */
+	};
+};
+
+&ramoops {
+	reg = <0x0 0x3fff0000 0x0 (64 * 1024)>;
+};
+
+&pinctrl {
+	gpio-line-names = "", /* 0 */
+			  "", /* 1 */
+			  "", /* 2 */
+			  "pmu-dyinggasp-int", /* 3 */
+			  "", /* 4 */
+			  "", /* 5 */
+			  "", /* 6 */
+			  "", /* 7 */
+			  "", /* 8 */
+			  "wan-sfp-presence", /* 9 */
+			  "wan-sfp-rxlos", /* 10 */
+			  "wan-sfp-txfault", /* 11 */
+			  "wan-sfp-rs1", /* 12 */
+			  "", /* 13 */
+			  "wan-sfp-pwren", /* 14 */
+			  "", /* 15 */
+			  "", /* 16 */
+			  "", /* 17 */
+			  "", /* 18 */
+			  "", /* 19 */
+			  "", /* 20 */
+			  "pmu-int", /* 21 */
+			  "", /* 22 */
+			  "fxs-int", /* 23 */
+			  "", /* 24 */
+			  "", /* 25 */
+			  "phy25-int", /* 26 */
+			  "phy25-reset", /* 27 */
+			  "", /* 28 */
+			  "", /* 29 */
+			  "", /* 30 */
+			  "", /* 31 */
+			  "", /* 32 */
+			  "", /* 33 */
+			  "", /* 34 */
+			  "", /* 35 */
+			  "oled-rst", /* 36 */
+			  "boot-eth", /* 37 */
+			  "", /* 38 */
+			  "", /* 39 */
+			  "wan-sfp-rs0", /* 40 */
+			  "wan-sfp-pwrgood", /* 41 */
+			  "", /* 42 */
+			  "", /* 43 */
+			  "", /* 44 */
+			  "", /* 45 */
+			  "", /* 46 */
+			  "", /* 47 */
+			  "", /* 48 */
+			  "", /* 49 */
+			  "", /* 50 */
+			  "", /* 51 */
+			  "", /* 52 */
+			  "", /* 53 */
+			  "", /* 54 */
+			  "", /* 55 */
+			  "", /* 56 */
+			  "", /* 57 */
+			  "", /* 58 */
+			  "", /* 59 */
+			  "", /* 60 */
+			  "", /* 61 */
+			  "", /* 62 */
+			  "", /* 63 */
+			  "", /* 64 */
+			  "", /* 65 */
+			  "", /* 66 */
+			  "", /* 67 */
+			  "", /* 68 */
+			  "", /* 69 */
+			  "", /* 70 */
+			  "", /* 71 */
+			  "", /* 72 */
+			  "", /* 73 */
+			  "", /* 74 */
+			  "", /* 75 */
+			  "", /* 76 */
+			  "", /* 77 */
+			  "", /* 78 */
+			  "", /* 79 */
+			  "oled-data-select", /* 80 */
+			  "", /* 81 */
+			  "", /* 82 */
+			  "", /* 83 */
+			  "", /* 84 */
+			  "", /* 85 */
+			  "", /* 86 */
+			  "oled-vcc", /* 87 */
+			  "", /* 88 */
+			  "", /* 89 */
+			  "", /* 90 */
+			  "", /* 91 */
+			  "", /* 92 */
+			  "", /* 93 */
+			  "", /* 94 */
+			  "", /* 95 */
+			  "", /* 96 */
+			  "", /* 97 */
+			  "", /* 98 */
+			  "w-enable-2", /* 99 */
+			  "", /* 100 */
+			  "", /* 101 */
+			  "", /* 102 */
+			  "", /* 103 */
+			  "", /* 104 */
+			  "", /* 105 */
+			  "", /* 106 */
+			  "", /* 107 */
+			  "", /* 108 */
+			  "", /* 109 */
+			  "", /* 110 */
+			  "", /* 111 */
+			  "", /* 112 */
+			  "", /* 113 */
+			  "", /* 114 */
+			  "", /* 115 */
+			  "", /* 116 */
+			  "", /* 117 */
+			  "", /* 118 */
+			  "", /* 119 */
+			  "", /* 120 */
+			  "", /* 121 */
+			  "", /* 122 */
+			  "", /* 123 */
+			  "", /* 124 */
+			  "", /* 125 */
+			  "", /* 126 */
+			  "", /* 127 */
+			  "", /* 128 */
+			  "", /* 129 */
+			  ""; /* 130 */
+};
+
+&dsl_reserved {
+	status = "disabled";
+};
+
+&i2c1_gpio {
+	fbxpmu@3c {
+		compatible = "freebox,fbxgwr-pmu";
+		reg = <0x3c>;
+
+		fbxpmu_gpio_expander: fbxpmu@3c {
+			compatible = "freebox,fbxgwr-pmu-gpio";
+			interrupt-parent = <&pinctrl>;
+			interrupts = <21 IRQ_TYPE_LEVEL_LOW>;
+			gpio-controller;
+			ngpios = <24>;
+			#gpio-cells = <2>;
+			gpio-line-names = "keypad-down", /* 0 */
+					  "keypad-up", /* 1 */
+					  "keypad-cancel", /* 2 */
+					  "keypad-ok", /* 3 */
+					  "power-button", /* 4 */
+					  "test-mode"; /* 5 */
+		};
+
+		led-controller {
+			compatible = "freebox,fbxgwr-pmu-led";
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			nleds = <3>;
+
+			led0@0 {
+				label = "green";
+				reg = <0x00>;
+			};
+
+			led1@1 {
+				label = "red";
+				reg = <0x01>;
+			};
+
+			led2@2 {
+				label = "blue";
+				reg = <0x02>;
+			};
+		};
+	};
+};
+
+&keypad {
+	keyup {
+		label = "key up";
+		linux,code = <KEY_UP>;
+		gpios = <&fbxpmu_gpio_expander 1 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keydown {
+		label = "key down";
+		linux,code = <KEY_DOWN>;
+		gpios = <&fbxpmu_gpio_expander 0 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyright {
+		label = "key right";
+		linux,code = <KEY_RIGHT>;
+		gpios = <&fbxpmu_gpio_expander 3 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+	keyleft {
+		label = "key left";
+		linux,code = <KEY_LEFT>;
+		gpios = <&fbxpmu_gpio_expander 2 GPIO_ACTIVE_LOW>;
+		debounce-interval = <50>;
+		linux,can-disable;
+	};
+};
+
+&pcie01 {
+	status = "okay";
+	pinctrl-0 = <&pcie0_pins>;
+	pinctrl-names = "default";
+};
+
+&fbxgw8r_gpio {
+	w-disable-2 {
+		gpio = <&pinctrl 99 GPIO_ACTIVE_LOW>;
+		output-low;
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-common.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-common.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,441 @@
+/*
+ * common dtsi file for fbxgw8r.
+ */
+#include <dt-bindings/input/linux-event-codes.h>
+#include "bcm63158.dtsi"
+
+#include <../../../include/generated/autoconf.h>
+
+/ {
+	model = "Freebox FBXGW8R";
+
+	chosen {
+		bootargs = "console=ttyAMA0,115200";
+		stdout-path = &arm_serial0;
+	};
+
+	reserved-memory {
+		ramoops: ramoops@1fff0000 {
+			compatible = "ramoops";
+			/* RAM top - 64k */
+			reg = <0x0 0x1fff0000 0x0 (64 * 1024)>;
+			record-size = <(64 * 1024)>;
+			ecc-size = <16>;
+			no-dump-oops;
+		};
+
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+		qca_pine_2G4: qca-pine-2G4 {
+			reg = <0x0 0x14000000 0x0 0x01A00000>;
+			no-map;
+		};
+		qca_pine_5G: qca-pine-5G {
+			reg = <0x0 0x15a00000 0x0 0x01A00000>;
+			no-map;
+		};
+#endif
+	};
+	fbxgw8r_gpio: fbxgw8r-gpio {
+		compatible = "fbx,fbxgpio";
+
+		wan-sfp-txfault {
+			gpio = <&pinctrl 11 0>;
+			input;
+		};
+		wan-sfp-pwren {
+			gpio = <&pinctrl 14 0>;
+			output-low;
+		};
+		wan-sfp-presence {
+			gpio = <&pinctrl 9 0>;
+			input;
+		};
+		wan-sfp-pwrgood {
+			gpio = <&pinctrl 41 0>;
+			input;
+		};
+		wan-sfp-rxlos {
+			gpio = <&pinctrl 10 0>;
+			input;
+		};
+		wan-sfp-rs1 {
+			gpio = <&pinctrl 12 0>;
+			output-high;
+		};
+		wan-sfp-rogue-in {
+			gpio = <&pinctrl 40 0>;
+			input;
+			no-claim;
+		};
+
+		boot-eth {
+			gpio = <&pinctrl 37 0>;
+			input;
+		};
+
+		ha-rst {
+			gpio = <&pinctrl 91 0>;
+			output-low;
+		};
+		backlight-en {
+			gpio = <&pinctrl 81 0>;
+			output-low;
+		};
+
+		board-id-0 {
+			gpio = <&pinctrl 35 0>;
+			input;
+		};
+		board-id-1 {
+			gpio = <&pinctrl 28 0>;
+			input;
+		};
+		board-id-2 {
+			gpio = <&pinctrl 29 0>;
+			input;
+		};
+		board-id-3 {
+			gpio = <&pinctrl 30 0>;
+			input;
+		};
+		board-id-4 {
+			gpio = <&pinctrl 31 0>;
+			input;
+		};
+		board-id-5 {
+			gpio = <&pinctrl 13 0>;
+			input;
+		};
+	};
+
+	keypad: keypad {
+		compatible = "gpio-keys";
+		autorepeat = <1>;
+	};
+
+	i2c0_gpio: i2c0-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 24 0 /* sda */
+			 &pinctrl 25 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	i2c1_gpio: i2c1-gpio {
+		compatible = "i2c-gpio";
+		gpios = <&pinctrl 15 0 /* sda */
+			 &pinctrl 16 0 /* scl */
+			>;
+		i2c-gpio,delay-us = <10>;	/* ~100 kHz */
+		#address-cells = <1>;
+		#size-cells = <0>;
+	};
+
+	aliases {
+		i2c0 = &i2c0_gpio;
+		i2c1 = &i2c1_gpio;
+	};
+
+	qcom,diag@0 {
+		compatible = "qcom,diag";
+		status = "ok";
+	};
+};
+
+&pinctrl {
+	arm_serial2_pins: arm-serial2-pins-0 {
+		arm_serial2_sout {
+			pinmux = <BCM63158_GPIO_18__FUNC_C_UART3_SOUT>;
+		};
+		arm_serial2_sin {
+			pinmux = <BCM63158_GPIO_17__FUNC_C_UART3_SIN>;
+		};
+	};
+};
+
+&arm_serial0 {
+	status = "okay";
+};
+
+
+&arm_serial2 {
+	/* home automation */
+	status = "okay";
+	pinctrl-0 = <&arm_serial2_pins>;
+	pinctrl-names = "default";
+};
+
+&sf2 {
+	status = "okay";
+
+	pinctrl-0 = <&gphy01_link_act_leds>;
+	pinctrl-names = "default";
+
+	sf2,mdio {
+		reset-gpio = <&pinctrl 27 GPIO_ACTIVE_LOW>;
+		reset-delay-us = <100000>;
+		reset-post-delay-us = <100000>;
+		keep-broken-phy;
+	};
+};
+
+&sf2_port0 {
+	status = "okay";
+	label = "swp1";
+	sf2,led-link-act = <20>;
+};
+
+&sf2_port1 {
+	status = "okay";
+	label = "swp2";
+	sf2,led-link-act = <21>;
+	dsa,cpu-port = <&sf2_port7>;
+};
+
+&sf2_port5 {
+	status = "okay";
+};
+
+&sf2_port6 {
+	status = "okay";
+	xbar-in-port = <0>;
+	phy-handle = <&port6_phy>;
+	label = "swp3";
+	dsa,cpu-port = <&sf2_port5>;
+};
+
+&sf2_port7 {
+	status = "okay";
+};
+
+&sf2_port8 {
+	status = "okay";
+};
+
+&sf2_qphy0 {
+	status = "okay";
+};
+
+&sf2_qphy1 {
+	status = "okay";
+};
+
+&runner_unimac0 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_unimac1 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_unimac2 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+};
+
+&runner_xport0 {
+	status = "okay";
+	fbxserial-mac-address = <0>;
+	pinctrl-0 = <&sfp_rogue1_pins>;
+	pinctrl-1 = <&sfp_rs0_gpio_pins>;
+	pinctrl-names = "rogue1", "rs0";
+
+};
+
+&sdhci {
+	status = "okay";
+
+	pinctrl-0 = <&emmc_pins>;
+	pinctrl-names = "default";
+
+	user-ro-area = /bits/64 <0 (32 << 20)>;
+	boot-ro-area = /bits/64 <0 (4 << 20)>;
+
+	partitions-main {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%d";
+
+		bank0@0 {
+			label = "bank0";
+			reg = /bits/64 <0 (32 * 1024 * 1024)>;
+			read-only;
+		};
+
+		bank1@0 {
+			label = "bank1";
+			reg = /bits/64 <(-1) (256 * 1024 * 1024)>;
+		};
+
+		nvram@0 {
+			label = "nvram";
+			reg = /bits/64 <(-1) (4 * 1024 * 1024)>;
+		};
+
+		config@0 {
+			label = "config";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+		new-bank0@0 {
+			label = "new_bank0";
+			reg = /bits/64 <(-1) (32 * 1024 * 1024)>;
+		};
+
+                fbxmbr@0 {
+			label = "fbxmbr";
+			reg = /bits/64 <(-1) (4096)>;
+                };
+
+		fortknox@0 {
+			label = "fortknox";
+			reg = /bits/64 <(-1) (128 * 1024 * 1024)>;
+                };
+
+		userdata@0 {
+			label = "userdata";
+			reg = /bits/64 <(-1) (-1)>;
+                };
+
+	};
+
+
+	partitions-boot {
+		compatible = "fixed-partitions";
+		#address-cells = <2>;
+		#size-cells = <2>;
+		disk-name = "mmcblk%dboot0";
+
+		cfe@0 {
+			label = "cfe0";
+			reg = /bits/64 <0 (256 * 1024)>;
+			read-only;
+		};
+		cfe@1 {
+			label = "cfe1";
+			reg = /bits/64 <(-1) (256 * 1024)>;
+			read-only;
+		};
+		cfe@2 {
+			label = "cfe2";
+			reg = /bits/64 <(-1) (256 * 1024)>;
+			read-only;
+		};
+
+		serial@0 {
+			label = "fbxserial";
+			reg = /bits/64 <(1024 * 1024) (8 * 1024)>;
+			read-only;
+		};
+
+		fbxboot@0 {
+			label = "fbxboot";
+			reg = /bits/64 <(-1) (8 * 1024)>;
+			read-only;
+		};
+
+		calibration@0 {
+			label = "calibration";
+			reg = /bits/64 <(-1) (64 * 1024)>;
+			read-only;
+		};
+	};
+};
+
+&spi_pins {
+	spi-ss0 {
+		pinmux = <BCM63158_GPIO_111__FUNC_SPIM_SS0_B>;
+	};
+	spi-ss1 {
+		pinmux = <BCM63158_GPIO_112__FUNC_SPIM_SS1_B>;
+	};
+};
+
+&hs_spim {
+	status = "okay";
+	num-cs = <2>;
+	broadcom,dummy-cs = <2>;
+	pinctrl-0 = <&spi_pins>;
+	pinctrl-names = "default";
+
+	ssd1320@0 {
+		compatible = "solomon,ssd1320";
+		reg = <0>;
+
+		spi-max-frequency = <(14 * 1000 * 1000)>;
+
+		/*
+		* display mapping info (when looking at it such as keypad
+		* is on the right):
+		*
+		* SEG used on x-axis
+		* COM used on y-axis
+		*
+		* top-left: COM159/SEG0
+		* bottom-right: COM0/SEG159
+		*
+		* visible area (160x80)
+		*  top-left: COM119/SEG0
+		*  bottom-right: COM40/SEG159
+		*
+		* SEG are mapped in alternate: SEG0, SEG80, SEG1, ...
+		*/
+		ssd1320,com-range = <40 119>;
+		ssd1320,seg-range = <0 159>;
+		ssd1320,com-reverse-dir;
+		ssd1320,seg-first-odd;
+
+		ssd1320,clk-divide-ratio = <0xb1>;
+		ssd1320,precharge-period = <0x42>;
+		ssd1320,vcom-deselect-level = <0x30>;
+		ssd1320,precharge-voltage = <0x10>;
+		ssd1320,iref = <0x10>;
+
+		ssd1320,display-enh-a = <0xd5>;
+		ssd1320,display-enh-b = <0x21>;
+
+		ssd1320,grayscale-table = <0x01 0x02 0x03 0x05 0x08 0x0b
+			0xe 0x12 0x17 0x1c 0x22 0x29 0x2f 0x36 0x3f>;
+
+		ssd1320,default-brightness = <0x9f>;
+		ssd1320,max-brightness = <0xff>;
+
+		ssd1320,watchdog = <300>;
+		ssd1320,vcc-gpio = <&pinctrl 87 GPIO_ACTIVE_HIGH>;
+		ssd1320,data-select-gpio = <&pinctrl 80 GPIO_ACTIVE_HIGH>;
+		ssd1320,reset-gpio = <&pinctrl 36 GPIO_ACTIVE_LOW>;
+	};
+
+	spi-slac@1 {
+		compatible = "microsemi,le9641";
+		reg = <1>;
+		spi-max-frequency = <(1 * 1000 * 1000)>;
+	};
+};
+
+&bcm_pcm {
+	status = "okay";
+	pinctrl-0 = <&pcm_pins>;
+	pinctrl-names = "default";
+};
+
+&usb {
+	status = "okay";
+
+	pinctrl-0 = <&usb1_pins>;
+	pinctrl-names = "default";
+
+	brcm,pwren-high;
+	brcm,pwrflt-low;
+};
+
+&memc {
+	// status = "disabled";
+	brcm,auto-sr-en;
+	brcm,auto-sr-thresh = <20>;
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-phy-aquantia.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-phy-aquantia.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-phy-aquantia.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-phy-aquantia.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,12 @@
+&sf2 {
+	sf2,mdio {
+		/* aquantia PHY */
+		port6_phy: ethernet-phy@8 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			status = "okay";
+			reg = <8>;
+			eee-broken-2500t;
+			eee-broken-5000t;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-phy-realtek.dtsi linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-phy-realtek.dtsi
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r-phy-realtek.dtsi	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r-phy-realtek.dtsi	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,10 @@
+&sf2 {
+	sf2,mdio {
+		/* realtek PHY */
+		port6_phy: ethernet-phy@8 {
+			compatible = "ethernet-phy-ieee802.3-c45";
+			status = "okay";
+			reg = <7>;
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r.dts	2025-09-25 17:40:30.219340707 +0200
@@ -0,0 +1,13 @@
+/*
+ * Freebox FBXGW8R Board DTS
+ */
+/dts-v1/;
+
+#include "fbxgw8r-common.dtsi"
+#include "fbxgw8r-common-nopmu.dtsi"
+#include "fbxgw8r-phy-aquantia.dtsi"
+
+/ {
+	compatible = "freebox,fbxgw8r-board-00", "freebox,fbxgw8r",
+		"brcm,bcm63158";
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r_pcie_pine_asmedia.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r_pcie_pine_asmedia.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r_pcie_pine_asmedia.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r_pcie_pine_asmedia.dts	2025-09-25 17:40:30.223340727 +0200
@@ -0,0 +1,60 @@
+/dts-v1/;
+/plugin/;
+
+/ {
+	compatible = "freebox,fbxgw8r";
+
+	fragment@1 {
+		target = <&pcie0>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine6G {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa9>;
+				};
+			};
+		};
+	};
+
+
+	fragment@2 {
+		target = <&pcie1>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				us {
+					#address-cells = <3>;
+					#size-cells = <2>;
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					ds1 {
+						#address-cells = <3>;
+						#size-cells = <2>;
+						reg = <0x1800 0x0 0x0 0x0 0x0>;
+						ep_pine2G4 {
+							reg = <0x0000 0x0 0x0 0x0 0x0>;
+							qcom,board_id = <0xa6>;
+						};
+					};
+					ds2 {
+						#address-cells = <3>;
+						#size-cells = <2>;
+						reg = <0x3800 0x0 0x0 0x0 0x0>;
+						ep_pine5G {
+							reg = <0x0000 0x0 0x0 0x0 0x0>;
+							qcom,board_id = <0xa3>;
+						};
+					};
+				};
+			};
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r_pcie_pine_dualband_noswitch.dts linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r_pcie_pine_dualband_noswitch.dts
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx./fbxgw8r_pcie_pine_dualband_noswitch.dts	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/broadcom/bcm63xx/fbxgw8r_pcie_pine_dualband_noswitch.dts	2025-09-25 17:40:30.223340727 +0200
@@ -0,0 +1,49 @@
+/dts-v1/;
+/plugin/;
+
+#include <../../../include/generated/autoconf.h>
+
+/ {
+	compatible = "freebox,fbxgw8r";
+
+	fragment@1 {
+		target = <&pcie0>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine5G {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa3>;
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+					memory-region = <&qca_pine_5G>;
+#endif
+				};
+			};
+		};
+	};
+
+
+	fragment@2 {
+		target = <&pcie1>;
+		__overlay__ {
+			#address-cells = <3>;
+			#size-cells = <2>;
+			rc@0,0 {
+				#address-cells = <3>;
+				#size-cells = <2>;
+				reg = <0x0000 0x0 0x0 0x0 0x0>;
+				ep_pine2G4 {
+					reg = <0x0000 0x0 0x0 0x0 0x0>;
+					qcom,board_id = <0xa6>;
+#ifdef CONFIG_ATH11K_QCN9074_FIXED_MEM_REGION
+					memory-region = <&qca_pine_2G4>;
+#endif
+				};
+			};
+		};
+	};
+};
diff -Nruw linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/cortina-access./Makefile linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/cortina-access/Makefile
--- linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/cortina-access./Makefile	1970-01-01 01:00:00.000000000 +0100
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/arm64/cortina-access/Makefile	2025-09-25 17:40:30.227340746 +0200
@@ -0,0 +1,16 @@
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += fbxgw3r-evb.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += fbxgw3r-board-00.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += ca8289-engboard.dtb
+dtb-$(CONFIG_ARCH_CORTINA_ACCESS) += ca8289-refboard.dtb
+
+always-$(CONFIG_ARCH_CORTINA_ACCESS)	+= fbxgw3r_dtbs
+clean-files				+= fbxgw3r_dtbs
+board-dtbs				=  \
+					fbxgw3r-evb.dtb \
+					fbxgw3r-board-00.dtb
+
+cmd_dtbs               = ./scripts/dtbs.sh $@ $^
+quiet_cmd_dtbs         = DTBS    $@
+
+$(obj)/fbxgw3r_dtbs: $(addprefix $(obj)/,$(board-dtbs))
+	$(call cmd,dtbs)
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/brcm,bcm63158-ubus.h	2025-09-25 17:40:36.915373911 +0200
@@ -0,0 +1,31 @@
+
+#pragma once
+
+/*
+ * this is SoC specific, maybe abstract this in some kind of virtual
+ * ID just like the PMC code does.
+ */
+#define UBUS_PORT_ID_MEMC        1
+#define UBUS_PORT_ID_BIU         2
+#define UBUS_PORT_ID_PER         3
+#define UBUS_PORT_ID_USB         4
+#define UBUS_PORT_ID_SPU         5
+#define UBUS_PORT_ID_DSL         6
+#define UBUS_PORT_ID_PERDMA      7
+#define UBUS_PORT_ID_PCIE0       8
+#define UBUS_PORT_ID_PCIE2       9
+#define UBUS_PORT_ID_PCIE3       10
+#define UBUS_PORT_ID_DSLCPU      11
+#define UBUS_PORT_ID_WAN         12
+#define UBUS_PORT_ID_PMC         13
+#define UBUS_PORT_ID_SWH         14
+#define UBUS_PORT_ID_PSRAM       16
+#define UBUS_PORT_ID_VPB         20
+#define UBUS_PORT_ID_FPM         21
+#define UBUS_PORT_ID_QM          22
+#define UBUS_PORT_ID_DQM         23
+#define UBUS_PORT_ID_DMA0        24
+#define UBUS_PORT_ID_NATC        26
+#define UBUS_PORT_ID_SYSXRDP     27
+#define UBUS_PORT_ID_SYS         31
+#define UBUS_PORT_ID_RQ0         32
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/brcm,bcm63xx-pcie.h	2025-09-25 17:40:36.915373911 +0200
@@ -0,0 +1,7 @@
+
+#pragma once
+
+#define PCIE_SPEED_DEFAULT	0
+#define PCIE_SPEED_GEN1		1
+#define PCIE_SPEED_GEN2		2
+#define PCIE_SPEED_GEN3		3
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/net/realtek-phy-rtl8211f.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,19 @@
+/*
+ * Device Tree constants for Realek rtl8211f PHY
+ *
+ * Author: Remi Pommarel
+ *
+ * License: GPL
+ * Copyright (c) 2017 Remi Pommarel
+ */
+
+#ifndef _DT_BINDINGS_RTL_8211F_H
+#define _DT_BINDINGS_RTL_8211F_H
+
+#define RTL8211F_LED_MODE_10M			0x1
+#define RTL8211F_LED_MODE_100M			0x2
+#define RTL8211F_LED_MODE_1000M			0x8
+#define RTL8211F_LED_MODE_ACT			0x10
+
+#endif
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/pinctrl/bcm63138-pinfunc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,512 @@
+#ifndef _DT_BINDINGS_BCM63138_PINFUNC_H
+#define _DT_BINDINGS_BCM63138_PINFUNC_H
+
+#define BCM63138_PIN_NO(x)		((x) << 8)
+#define BCM63138_GET_PIN_NO(x)		((x) >> 8)
+#define BCM63138_GET_PIN_FUNC(x)	((x) & 0xff)
+
+#define BCM63138_GPIO_00__FUNC_SER_LED_DATA	(BCM63138_PIN_NO(0) | 1)
+#define BCM63138_GPIO_00__FUNC_LED_00		(BCM63138_PIN_NO(0) | 4)
+#define BCM63138_GPIO_00__FUNC_GPIO_00		(BCM63138_PIN_NO(0) | 5)
+
+#define BCM63138_GPIO_01__FUNC_SER_LED_CLK	(BCM63138_PIN_NO(1) | 1)
+#define BCM63138_GPIO_01__FUNC_LED_01		(BCM63138_PIN_NO(1) | 4)
+#define BCM63138_GPIO_01__FUNC_GPIO_01		(BCM63138_PIN_NO(1) | 5)
+
+#define BCM63138_GPIO_02__FUNC_SER_LED_MASK	(BCM63138_PIN_NO(2) | 1)
+#define BCM63138_GPIO_02__FUNC_LED_02		(BCM63138_PIN_NO(2) | 4)
+#define BCM63138_GPIO_02__FUNC_GPIO_02		(BCM63138_PIN_NO(2) | 5)
+
+#define BCM63138_GPIO_03__FUNC_UART2_CTS	(BCM63138_PIN_NO(3) | 1)
+#define BCM63138_GPIO_03__FUNC_NTR_PULSE_IN_0	(BCM63138_PIN_NO(3) | 2)
+#define BCM63138_GPIO_03__FUNC_MOCA_GPIO_0	(BCM63138_PIN_NO(3) | 3)
+#define BCM63138_GPIO_03__FUNC_LED_03		(BCM63138_PIN_NO(3) | 4)
+#define BCM63138_GPIO_03__FUNC_GPIO_03		(BCM63138_PIN_NO(3) | 5)
+
+#define BCM63138_GPIO_04__FUNC_UART2_RTS	(BCM63138_PIN_NO(4) | 1)
+#define BCM63138_GPIO_04__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(4) | 2)
+#define BCM63138_GPIO_04__FUNC_MOCA_GPIO_1	(BCM63138_PIN_NO(4) | 3)
+#define BCM63138_GPIO_04__FUNC_LED_04		(BCM63138_PIN_NO(4) | 4)
+#define BCM63138_GPIO_04__FUNC_GPIO_04		(BCM63138_PIN_NO(4) | 5)
+
+#define BCM63138_GPIO_05__FUNC_UART2_SIN	(BCM63138_PIN_NO(5) | 1)
+#define BCM63138_GPIO_05__FUNC_MOCA_GPIO_2	(BCM63138_PIN_NO(5) | 3)
+#define BCM63138_GPIO_05__FUNC_LED_05		(BCM63138_PIN_NO(5) | 4)
+#define BCM63138_GPIO_05__FUNC_GPIO_05		(BCM63138_PIN_NO(5) | 5)
+
+#define BCM63138_GPIO_06__FUNC_UART2_SOUT	(BCM63138_PIN_NO(6) | 1)
+#define BCM63138_GPIO_06__FUNC_MOCA_GPIO_3	(BCM63138_PIN_NO(6) | 3)
+#define BCM63138_GPIO_06__FUNC_LED_06		(BCM63138_PIN_NO(6) | 4)
+#define BCM63138_GPIO_06__FUNC_GPIO_06		(BCM63138_PIN_NO(6) | 5)
+
+#define BCM63138_GPIO_07__FUNC_SPIM_SS5_B	(BCM63138_PIN_NO(7) | 1)
+#define BCM63138_GPIO_07__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(7) | 2)
+#define BCM63138_GPIO_07__FUNC_MOCA_GPIO_4	(BCM63138_PIN_NO(7) | 3)
+#define BCM63138_GPIO_07__FUNC_LED_07		(BCM63138_PIN_NO(7) | 4)
+#define BCM63138_GPIO_07__FUNC_GPIO_07		(BCM63138_PIN_NO(7) | 5)
+
+#define BCM63138_GPIO_08__FUNC_SPIM_SS4_B	(BCM63138_PIN_NO(8) | 1)
+#define BCM63138_GPIO_08__FUNC_MOCA_GPIO_5	(BCM63138_PIN_NO(8) | 3)
+#define BCM63138_GPIO_08__FUNC_LED_08		(BCM63138_PIN_NO(8) | 4)
+#define BCM63138_GPIO_08__FUNC_GPIO_08		(BCM63138_PIN_NO(8) | 5)
+
+#define BCM63138_GPIO_09__FUNC_SPIM_SS3_B	(BCM63138_PIN_NO(9) | 1)
+#define BCM63138_GPIO_09__FUNC_LD1_DIN		(BCM63138_PIN_NO(9) | 2)
+#define BCM63138_GPIO_09__FUNC_LED_09		(BCM63138_PIN_NO(9) | 4)
+#define BCM63138_GPIO_09__FUNC_GPIO_09		(BCM63138_PIN_NO(9) | 5)
+
+#define BCM63138_GPIO_10__FUNC_SPIM_SS2_B	(BCM63138_PIN_NO(10) | 1)
+#define BCM63138_GPIO_10__FUNC_LD1_DCLK		(BCM63138_PIN_NO(10) | 2)
+#define BCM63138_GPIO_10__FUNC_LED_10		(BCM63138_PIN_NO(10) | 4)
+#define BCM63138_GPIO_10__FUNC_GPIO_10		(BCM63138_PIN_NO(10) | 5)
+
+#define BCM63138_GPIO_11__FUNC_MOCA_GPIO_6	(BCM63138_PIN_NO(11) | 3)
+#define BCM63138_GPIO_11__FUNC_LED_11		(BCM63138_PIN_NO(11) | 4)
+#define BCM63138_GPIO_11__FUNC_GPIO_11		(BCM63138_PIN_NO(11) | 5)
+
+#define BCM63138_GPIO_12__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(12) | 1)
+#define BCM63138_GPIO_12__FUNC_MOCA_GPIO_7	(BCM63138_PIN_NO(12) | 3)
+#define BCM63138_GPIO_12__FUNC_LED_12		(BCM63138_PIN_NO(12) | 4)
+#define BCM63138_GPIO_12__FUNC_GPIO_12		(BCM63138_PIN_NO(12) | 5)
+
+#define BCM63138_GPIO_13__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(13) | 1)
+#define BCM63138_GPIO_13__FUNC_MOCA_GPIO_8	(BCM63138_PIN_NO(13) | 3)
+#define BCM63138_GPIO_13__FUNC_LED_13		(BCM63138_PIN_NO(13) | 4)
+#define BCM63138_GPIO_13__FUNC_GPIO_13		(BCM63138_PIN_NO(13) | 5)
+
+#define BCM63138_GPIO_14__FUNC_MOCA_GPIO_9	(BCM63138_PIN_NO(14) | 3)
+#define BCM63138_GPIO_14__FUNC_LED_14		(BCM63138_PIN_NO(14) | 4)
+#define BCM63138_GPIO_14__FUNC_GPIO_14		(BCM63138_PIN_NO(14) | 5)
+
+#define BCM63138_GPIO_15__FUNC_LED_15		(BCM63138_PIN_NO(15) | 4)
+#define BCM63138_GPIO_15__FUNC_GPIO_15		(BCM63138_PIN_NO(15) | 5)
+
+#define BCM63138_GPIO_16__FUNC_DECT_PD_0	(BCM63138_PIN_NO(16) | 3)
+#define BCM63138_GPIO_16__FUNC_LED_16		(BCM63138_PIN_NO(16) | 4)
+#define BCM63138_GPIO_16__FUNC_GPIO_16		(BCM63138_PIN_NO(16) | 5)
+
+#define BCM63138_GPIO_17__FUNC_DECT_PD_1	(BCM63138_PIN_NO(17) | 3)
+#define BCM63138_GPIO_17__FUNC_LED_17		(BCM63138_PIN_NO(17) | 4)
+#define BCM63138_GPIO_17__FUNC_GPIO_17		(BCM63138_PIN_NO(17) | 5)
+
+#define BCM63138_GPIO_18__FUNC_VREG_CLK		(BCM63138_PIN_NO(18) | 1)
+#define BCM63138_GPIO_18__FUNC_LED_18		(BCM63138_PIN_NO(18) | 4)
+#define BCM63138_GPIO_18__FUNC_GPIO_18		(BCM63138_PIN_NO(18) | 5)
+
+#define BCM63138_GPIO_19__FUNC_LED_19		(BCM63138_PIN_NO(19) | 4)
+#define BCM63138_GPIO_19__FUNC_GPIO_19		(BCM63138_PIN_NO(19) | 5)
+
+#define BCM63138_GPIO_20__FUNC_UART2_CTS	(BCM63138_PIN_NO(20) | 2)
+#define BCM63138_GPIO_20__FUNC_LED_20		(BCM63138_PIN_NO(20) | 4)
+#define BCM63138_GPIO_20__FUNC_GPIO_20		(BCM63138_PIN_NO(20) | 5)
+
+#define BCM63138_GPIO_21__FUNC_UART2_RTS	(BCM63138_PIN_NO(21) | 2)
+#define BCM63138_GPIO_21__FUNC_LED_21		(BCM63138_PIN_NO(21) | 4)
+#define BCM63138_GPIO_21__FUNC_GPIO_21		(BCM63138_PIN_NO(21) | 5)
+
+#define BCM63138_GPIO_22__FUNC_UART2_SIN	(BCM63138_PIN_NO(22) | 2)
+#define BCM63138_GPIO_22__FUNC_LED_22		(BCM63138_PIN_NO(22) | 4)
+#define BCM63138_GPIO_22__FUNC_GPIO_22		(BCM63138_PIN_NO(22) | 5)
+
+#define BCM63138_GPIO_23__FUNC_UART2_SOUT	(BCM63138_PIN_NO(23) | 2)
+#define BCM63138_GPIO_23__FUNC_LED_23		(BCM63138_PIN_NO(23) | 4)
+#define BCM63138_GPIO_23__FUNC_GPIO_23		(BCM63138_PIN_NO(23) | 5)
+
+#define BCM63138_GPIO_24__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(24) | 1)
+#define BCM63138_GPIO_24__FUNC_I2C_SDA		(BCM63138_PIN_NO(24) | 3)
+#define BCM63138_GPIO_24__FUNC_LED_24		(BCM63138_PIN_NO(24) | 4)
+#define BCM63138_GPIO_24__FUNC_GPIO_24		(BCM63138_PIN_NO(24) | 5)
+
+#define BCM63138_GPIO_25__FUNC_SPIM_SS2_B	(BCM63138_PIN_NO(25) | 1)
+#define BCM63138_GPIO_25__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(25) | 2)
+#define BCM63138_GPIO_25__FUNC_I2C_SCL		(BCM63138_PIN_NO(25) | 3)
+#define BCM63138_GPIO_25__FUNC_LED_25		(BCM63138_PIN_NO(25) | 4)
+#define BCM63138_GPIO_25__FUNC_GPIO_25		(BCM63138_PIN_NO(25) | 5)
+
+#define BCM63138_GPIO_26__FUNC_SPIM_SS3_B	(BCM63138_PIN_NO(26) | 1)
+#define BCM63138_GPIO_26__FUNC_NTR_PULSE_OUT_0	(BCM63138_PIN_NO(26) | 2)
+#define BCM63138_GPIO_26__FUNC_NTR_PULSE_IN	(BCM63138_PIN_NO(26) | 3)
+#define BCM63138_GPIO_26__FUNC_LED_26		(BCM63138_PIN_NO(26) | 4)
+#define BCM63138_GPIO_26__FUNC_GPIO_26		(BCM63138_PIN_NO(26) | 5)
+
+#define BCM63138_GPIO_27__FUNC_SPIM_SS4_B	(BCM63138_PIN_NO(27) | 1)
+#define BCM63138_GPIO_27__FUNC_NTR_PULSE_OUT_1	(BCM63138_PIN_NO(27) | 2)
+#define BCM63138_GPIO_27__FUNC_UART2_SIN	(BCM63138_PIN_NO(27) | 3)
+#define BCM63138_GPIO_27__FUNC_LED_27		(BCM63138_PIN_NO(27) | 4)
+#define BCM63138_GPIO_27__FUNC_GPIO_27		(BCM63138_PIN_NO(27) | 5)
+
+#define BCM63138_GPIO_28__FUNC_SPIM_SS5_B	(BCM63138_PIN_NO(28) | 1)
+#define BCM63138_GPIO_28__FUNC_AE_LOS		(BCM63138_PIN_NO(28) | 2)
+#define BCM63138_GPIO_28__FUNC_UART2_SOUT	(BCM63138_PIN_NO(28) | 3)
+#define BCM63138_GPIO_28__FUNC_LED_28		(BCM63138_PIN_NO(28) | 4)
+#define BCM63138_GPIO_28__FUNC_GPIO_28		(BCM63138_PIN_NO(28) | 5)
+
+#define BCM63138_GPIO_29__FUNC_SER_LED_DATA	(BCM63138_PIN_NO(29) | 1)
+#define BCM63138_GPIO_29__FUNC_LED_29		(BCM63138_PIN_NO(29) | 4)
+#define BCM63138_GPIO_29__FUNC_GPIO_29		(BCM63138_PIN_NO(29) | 5)
+
+#define BCM63138_GPIO_30__FUNC_SER_LED_CLK	(BCM63138_PIN_NO(30) | 1)
+#define BCM63138_GPIO_30__FUNC_LED_30		(BCM63138_PIN_NO(30) | 4)
+#define BCM63138_GPIO_30__FUNC_GPIO_30		(BCM63138_PIN_NO(30) | 5)
+
+#define BCM63138_GPIO_31__FUNC_SER_LED_MASK	(BCM63138_PIN_NO(31) | 1)
+#define BCM63138_GPIO_31__FUNC_LED_31		(BCM63138_PIN_NO(31) | 4)
+#define BCM63138_GPIO_31__FUNC_GPIO_31		(BCM63138_PIN_NO(31) | 5)
+
+#define BCM63138_GPIO_32__FUNC_EXT_IRQ_0	(BCM63138_PIN_NO(32) | 1)
+#define BCM63138_GPIO_32__FUNC_GPIO_32		(BCM63138_PIN_NO(32) | 5)
+
+#define BCM63138_GPIO_33__FUNC_EXT_IRQ_1	(BCM63138_PIN_NO(33) | 1)
+#define BCM63138_GPIO_33__FUNC_GPIO_33		(BCM63138_PIN_NO(33) | 5)
+
+#define BCM63138_GPIO_34__FUNC_EXT_IRQ_2	(BCM63138_PIN_NO(34) | 1)
+#define BCM63138_GPIO_34__FUNC_GPIO_34		(BCM63138_PIN_NO(34) | 5)
+
+#define BCM63138_GPIO_35__FUNC_EXT_IRQ_3	(BCM63138_PIN_NO(35) | 1)
+#define BCM63138_GPIO_35__FUNC_SYS_IRQ_OUT	(BCM63138_PIN_NO(35) | 2)
+#define BCM63138_GPIO_35__FUNC_GPIO_35		(BCM63138_PIN_NO(35) | 5)
+
+#define BCM63138_GPIO_36__FUNC_EXT_IRQ_4	(BCM63138_PIN_NO(36) | 1)
+#define BCM63138_GPIO_36__FUNC_AE_LOS		(BCM63138_PIN_NO(36) | 2)
+#define BCM63138_GPIO_36__FUNC_GPIO_36		(BCM63138_PIN_NO(36) | 5)
+
+#define BCM63138_GPIO_37__FUNC_EXT_IRQ_5	(BCM63138_PIN_NO(37) | 1)
+#define BCM63138_GPIO_37__FUNC_VREG_CLK		(BCM63138_PIN_NO(37) | 2)
+#define BCM63138_GPIO_37__FUNC_GPIO_37		(BCM63138_PIN_NO(37) | 5)
+
+#define BCM63138_GPIO_38__FUNC_NAND_CE_B	(BCM63138_PIN_NO(38) | 3)
+#define BCM63138_GPIO_38__FUNC_GPIO_38		(BCM63138_PIN_NO(38) | 5)
+
+#define BCM63138_GPIO_39__FUNC_NAND_RE_B	(BCM63138_PIN_NO(39) | 3)
+#define BCM63138_GPIO_39__FUNC_GPIO_39		(BCM63138_PIN_NO(39) | 5)
+
+#define BCM63138_GPIO_40__FUNC_NAND_RB_B	(BCM63138_PIN_NO(40) | 3)
+#define BCM63138_GPIO_40__FUNC_GPIO_40		(BCM63138_PIN_NO(40) | 5)
+
+#define BCM63138_GPIO_41__FUNC_NAND_DATA_00	(BCM63138_PIN_NO(41) | 3)
+#define BCM63138_GPIO_41__FUNC_GPIO_41		(BCM63138_PIN_NO(41) | 5)
+
+#define BCM63138_GPIO_42__FUNC_DECT_PD_0	(BCM63138_PIN_NO(42) | 1)
+#define BCM63138_GPIO_42__FUNC_NAND_DATA_01	(BCM63138_PIN_NO(42) | 3)
+#define BCM63138_GPIO_42__FUNC_GPIO_42		(BCM63138_PIN_NO(42) | 5)
+
+#define BCM63138_GPIO_43__FUNC_DECT_PD_1	(BCM63138_PIN_NO(43) | 1)
+#define BCM63138_GPIO_43__FUNC_NAND_DATA_02	(BCM63138_PIN_NO(43) | 3)
+#define BCM63138_GPIO_43__FUNC_GPIO_43		(BCM63138_PIN_NO(43) | 5)
+
+#define BCM63138_GPIO_44__FUNC_NAND_DATA_03	(BCM63138_PIN_NO(44) | 3)
+#define BCM63138_GPIO_44__FUNC_GPIO_44		(BCM63138_PIN_NO(44) | 5)
+
+#define BCM63138_GPIO_45__FUNC_NAND_DATA_04	(BCM63138_PIN_NO(45) | 3)
+#define BCM63138_GPIO_45__FUNC_GPIO_45		(BCM63138_PIN_NO(45) | 5)
+
+#define BCM63138_GPIO_46__FUNC_NAND_DATA_05	(BCM63138_PIN_NO(46) | 3)
+#define BCM63138_GPIO_46__FUNC_GPIO_46		(BCM63138_PIN_NO(46) | 5)
+
+#define BCM63138_GPIO_47__FUNC_NAND_DATA_06	(BCM63138_PIN_NO(47) | 3)
+#define BCM63138_GPIO_47__FUNC_GPIO_47		(BCM63138_PIN_NO(47) | 5)
+
+#define BCM63138_GPIO_48__FUNC_NAND_DATA_07	(BCM63138_PIN_NO(48) | 3)
+#define BCM63138_GPIO_48__FUNC_GPIO_48		(BCM63138_PIN_NO(48) | 5)
+
+#define BCM63138_GPIO_49__FUNC_NAND_ALE		(BCM63138_PIN_NO(49) | 3)
+#define BCM63138_GPIO_49__FUNC_GPIO_49		(BCM63138_PIN_NO(49) | 5)
+
+#define BCM63138_GPIO_50__FUNC_NAND_WE_B	(BCM63138_PIN_NO(50) | 3)
+#define BCM63138_GPIO_50__FUNC_GPIO_50		(BCM63138_PIN_NO(50) | 5)
+
+#define BCM63138_GPIO_51__FUNC_NAND_CLE		(BCM63138_PIN_NO(51) | 3)
+#define BCM63138_GPIO_51__FUNC_GPIO_51		(BCM63138_PIN_NO(51) | 5)
+
+#define BCM63138_GPIO_52__FUNC_LD0_PWRUP	(BCM63138_PIN_NO(52) | 1)
+#define BCM63138_GPIO_52__FUNC_I2C_SDA		(BCM63138_PIN_NO(52) | 2)
+#define BCM63138_GPIO_52__FUNC_GPIO_52		(BCM63138_PIN_NO(52) | 5)
+
+#define BCM63138_GPIO_53__FUNC_LD0_DIN		(BCM63138_PIN_NO(53) | 1)
+#define BCM63138_GPIO_53__FUNC_I2C_SCL		(BCM63138_PIN_NO(53) | 2)
+#define BCM63138_GPIO_53__FUNC_GPIO_53		(BCM63138_PIN_NO(53) | 5)
+
+#define BCM63138_GPIO_54__FUNC_LD1_PWRUP	(BCM63138_PIN_NO(54) | 1)
+#define BCM63138_GPIO_54__FUNC_GPIO_54		(BCM63138_PIN_NO(54) | 5)
+
+#define BCM63138_GPIO_55__FUNC_LD0_DCLK		(BCM63138_PIN_NO(55) | 1)
+#define BCM63138_GPIO_55__FUNC_GPIO_55		(BCM63138_PIN_NO(55) | 5)
+
+#define BCM63138_GPIO_56__FUNC_PCM_SDIN		(BCM63138_PIN_NO(56) | 1)
+#define BCM63138_GPIO_56__FUNC_GPIO_56		(BCM63138_PIN_NO(56) | 5)
+
+#define BCM63138_GPIO_57__FUNC_PCM_SDOUT	(BCM63138_PIN_NO(57) | 1)
+#define BCM63138_GPIO_57__FUNC_GPIO_57		(BCM63138_PIN_NO(57) | 5)
+
+#define BCM63138_GPIO_58__FUNC_PCM_CLK		(BCM63138_PIN_NO(58) | 1)
+#define BCM63138_GPIO_58__FUNC_GPIO_58		(BCM63138_PIN_NO(58) | 5)
+
+#define BCM63138_GPIO_59__FUNC_PCM_FS		(BCM63138_PIN_NO(59) | 1)
+#define BCM63138_GPIO_59__FUNC_GPIO_59		(BCM63138_PIN_NO(59) | 5)
+
+#define BCM63138_MII1_COL__FUNC_MII1_COL	(BCM63138_PIN_NO(60) | 1)
+#define BCM63138_MII1_COL__FUNC_GPIO_60		(BCM63138_PIN_NO(60) | 5)
+
+#define BCM63138_MII1_CRS__FUNC_MII1_CRS	(BCM63138_PIN_NO(61) | 1)
+#define BCM63138_MII1_CRS__FUNC_GPIO_61		(BCM63138_PIN_NO(61) | 5)
+
+#define BCM63138_MII1_RXCLK__FUNC_MII1_RXCLK	(BCM63138_PIN_NO(62) | 1)
+#define BCM63138_MII1_RXCLK__FUNC_GPIO_62	(BCM63138_PIN_NO(62) | 5)
+
+#define BCM63138_MII1_RXER__FUNC_MII1_RXER	(BCM63138_PIN_NO(63) | 1)
+#define BCM63138_MII1_RXER__FUNC_GPIO_63	(BCM63138_PIN_NO(63) | 5)
+
+#define BCM63138_MII1_RXDV__FUNC_MII1_RXDV	(BCM63138_PIN_NO(64) | 1)
+#define BCM63138_MII1_RXDV__FUNC_GPIO_64	(BCM63138_PIN_NO(64) | 5)
+
+#define BCM63138_MII_RXD_00__FUNC_MII_RXD_00	(BCM63138_PIN_NO(65) | 1)
+#define BCM63138_MII_RXD_00__FUNC_GPIO_65	(BCM63138_PIN_NO(65) | 5)
+
+#define BCM63138_MII_RXD_01__FUNC_MII_RXD_01	(BCM63138_PIN_NO(66) | 1)
+#define BCM63138_MII_RXD_01__FUNC_GPIO_66	(BCM63138_PIN_NO(66) | 5)
+
+#define BCM63138_MII_RXD_02__FUNC_MII_RXD_02	(BCM63138_PIN_NO(67) | 1)
+#define BCM63138_MII_RXD_02__FUNC_GPIO_67	(BCM63138_PIN_NO(67) | 5)
+
+#define BCM63138_MII_RXD_03__FUNC_MII_RXD_03	(BCM63138_PIN_NO(68) | 1)
+#define BCM63138_MII_RXD_03__FUNC_GPIO_68	(BCM63138_PIN_NO(68) | 5)
+
+#define BCM63138_MII_TXCLK__FUNC_MII_TXCLK	(BCM63138_PIN_NO(69) | 1)
+#define BCM63138_MII_TXCLK__FUNC_GPIO_69	(BCM63138_PIN_NO(69) | 5)
+
+#define BCM63138_MII_TXEN__FUNC_MII_TXEN	(BCM63138_PIN_NO(70) | 1)
+#define BCM63138_MII_TXEN__FUNC_GPIO_70		(BCM63138_PIN_NO(70) | 5)
+
+#define BCM63138_MII_TXER__FUNC_MII_TXER	(BCM63138_PIN_NO(71) | 1)
+#define BCM63138_MII_TXER__FUNC_GPIO_71		(BCM63138_PIN_NO(71) | 5)
+
+#define BCM63138_MII_TXD_00__FUNC_MII_TXD_00	(BCM63138_PIN_NO(72) | 1)
+#define BCM63138_MII_TXD_00__FUNC_GPIO_72	(BCM63138_PIN_NO(72) | 5)
+
+#define BCM63138_MII_TXD_01__FUNC_MII_TXD_01	(BCM63138_PIN_NO(73) | 1)
+#define BCM63138_MII_TXD_01__FUNC_GPIO_73	(BCM63138_PIN_NO(73) | 5)
+
+#define BCM63138_MII_TXD_02__FUNC_MII_TXD_02	(BCM63138_PIN_NO(74) | 1)
+#define BCM63138_MII_TXD_02__FUNC_GPIO_74	(BCM63138_PIN_NO(74) | 5)
+
+#define BCM63138_MII_TXD_03__FUNC_MII_TXD_03	(BCM63138_PIN_NO(75) | 1)
+#define BCM63138_MII_TXD_03__FUNC_GPIO_75	(BCM63138_PIN_NO(75) | 5)
+
+#define BCM63138_RGMII1_RXCLK__FUNC_RGMII1_RXCLK	(BCM63138_PIN_NO(76) | 1)
+#define BCM63138_RGMII1_RXCLK__FUNC_GPIO_76		(BCM63138_PIN_NO(76) | 5)
+
+#define BCM63138_RGMII1_RXCTL__FUNC_RGMII1_RXCTL	(BCM63138_PIN_NO(77) | 1)
+#define BCM63138_RGMII1_RXCTL__FUNC_GPIO_77		(BCM63138_PIN_NO(77) | 5)
+
+#define BCM63138_RGMII1_RXD_00__FUNC_RGMII1_RXD_00	(BCM63138_PIN_NO(78) | 1)
+#define BCM63138_RGMII1_RXD_00__FUNC_GPIO_78		(BCM63138_PIN_NO(78) | 5)
+
+#define BCM63138_RGMII1_RXD_01__FUNC_RGMII1_RXD_01	(BCM63138_PIN_NO(79) | 1)
+#define BCM63138_RGMII1_RXD_01__FUNC_GPIO_79		(BCM63138_PIN_NO(79) | 5)
+
+#define BCM63138_RGMII1_RXD_02__FUNC_RGMII1_RXD_02	(BCM63138_PIN_NO(80) | 1)
+#define BCM63138_RGMII1_RXD_02__FUNC_GPIO_80		(BCM63138_PIN_NO(80) | 5)
+
+#define BCM63138_RGMII1_RXD_03__FUNC_RGMII1_RXD_03	(BCM63138_PIN_NO(81) | 1)
+#define BCM63138_RGMII1_RXD_03__FUNC_GPIO_81		(BCM63138_PIN_NO(81) | 5)
+
+#define BCM63138_RGMII1_TXCLK__FUNC_RGMII1_TXCLK	(BCM63138_PIN_NO(82) | 1)
+#define BCM63138_RGMII1_TXCLK__FUNC_GPIO_82		(BCM63138_PIN_NO(82) | 5)
+
+#define BCM63138_RGMII1_TXCTL__FUNC_RGMII1_TXCTL	(BCM63138_PIN_NO(83) | 1)
+#define BCM63138_RGMII1_TXCTL__FUNC_GPIO_83		(BCM63138_PIN_NO(83) | 5)
+
+#define BCM63138_RGMII1_TXD_00__FUNC_RGMII1_TXD_00	(BCM63138_PIN_NO(84) | 1)
+#define BCM63138_RGMII1_TXD_00__FUNC_GPIO_84		(BCM63138_PIN_NO(84) | 5)
+
+#define BCM63138_RGMII1_TXD_01__FUNC_RGMII1_TXD_01	(BCM63138_PIN_NO(85) | 1)
+#define BCM63138_RGMII1_TXD_01__FUNC_GPIO_85		(BCM63138_PIN_NO(85) | 5)
+
+#define BCM63138_RGMII1_TXD_02__FUNC_RGMII1_TXD_02	(BCM63138_PIN_NO(86) | 1)
+#define BCM63138_RGMII1_TXD_02__FUNC_GPIO_86		(BCM63138_PIN_NO(86) | 5)
+
+#define BCM63138_RGMII1_TXD_03__FUNC_RGMII1_TXD_03	(BCM63138_PIN_NO(87) | 1)
+#define BCM63138_RGMII1_TXD_03__FUNC_GPIO_87		(BCM63138_PIN_NO(87) | 5)
+
+#define BCM63138_RGMII2_RXCLK__FUNC_RGMII2_RXCLK	(BCM63138_PIN_NO(88) | 1)
+#define BCM63138_RGMII2_RXCLK__FUNC_GPIO_88		(BCM63138_PIN_NO(88) | 5)
+
+#define BCM63138_RGMII2_RXCTL__FUNC_RGMII2_RXCTL	(BCM63138_PIN_NO(89) | 1)
+#define BCM63138_RGMII2_RXCTL__FUNC_GPIO_89		(BCM63138_PIN_NO(89) | 5)
+
+#define BCM63138_RGMII2_RXD_00__FUNC_RGMII2_RXD_00	(BCM63138_PIN_NO(90) | 1)
+#define BCM63138_RGMII2_RXD_00__FUNC_GPIO_90		(BCM63138_PIN_NO(90) | 5)
+
+#define BCM63138_RGMII2_RXD_01__FUNC_RGMII2_RXD_01	(BCM63138_PIN_NO(91) | 1)
+#define BCM63138_RGMII2_RXD_01__FUNC_GPIO_91		(BCM63138_PIN_NO(91) | 5)
+
+#define BCM63138_RGMII2_RXD_02__FUNC_RGMII2_RXD_02	(BCM63138_PIN_NO(92) | 1)
+#define BCM63138_RGMII2_RXD_02__FUNC_GPIO_92		(BCM63138_PIN_NO(92) | 5)
+
+#define BCM63138_RGMII2_RXD_03__FUNC_RGMII2_RXD_03	(BCM63138_PIN_NO(93) | 1)
+#define BCM63138_RGMII2_RXD_03__FUNC_GPIO_93		(BCM63138_PIN_NO(93) | 5)
+
+#define BCM63138_RGMII2_TXCLK__FUNC_RGMII2_TXCLK	(BCM63138_PIN_NO(94) | 1)
+#define BCM63138_RGMII2_TXCLK__FUNC_GPIO_94		(BCM63138_PIN_NO(94) | 5)
+
+#define BCM63138_RGMII2_TXCTL__FUNC_RGMII2_TXCTL	(BCM63138_PIN_NO(95) | 1)
+#define BCM63138_RGMII2_TXCTL__FUNC_GPIO_95		(BCM63138_PIN_NO(95) | 5)
+
+#define BCM63138_RGMII2_TXD_00__FUNC_RGMII2_TXD_00	(BCM63138_PIN_NO(96) | 1)
+#define BCM63138_RGMII2_TXD_00__FUNC_GPIO_96		(BCM63138_PIN_NO(96) | 5)
+
+#define BCM63138_RGMII2_TXD_01__FUNC_RGMII2_TXD_01	(BCM63138_PIN_NO(97) | 1)
+#define BCM63138_RGMII2_TXD_01__FUNC_GPIO_97		(BCM63138_PIN_NO(97) | 5)
+
+#define BCM63138_RGMII2_TXD_02__FUNC_RGMII2_TXD_02	(BCM63138_PIN_NO(98) | 1)
+#define BCM63138_RGMII2_TXD_02__FUNC_GPIO_98		(BCM63138_PIN_NO(98) | 5)
+
+#define BCM63138_RGMII2_TXD_03__FUNC_RGMII2_TXD_03	(BCM63138_PIN_NO(99) | 1)
+#define BCM63138_RGMII2_TXD_03__FUNC_GPIO_99		(BCM63138_PIN_NO(99) | 5)
+
+#define BCM63138_RGMII3_RXCLK__FUNC_RGMII3_RXCLK	(BCM63138_PIN_NO(100) | 1)
+#define BCM63138_RGMII3_RXCLK__FUNC_LED_00		(BCM63138_PIN_NO(100) | 4)
+#define BCM63138_RGMII3_RXCLK__FUNC_GPIO_100		(BCM63138_PIN_NO(100) | 5)
+
+#define BCM63138_RGMII3_RXCTL__FUNC_RGMII3_RXCTL	(BCM63138_PIN_NO(101) | 1)
+#define BCM63138_RGMII3_RXCTL__FUNC_LED_01		(BCM63138_PIN_NO(101) | 4)
+#define BCM63138_RGMII3_RXCTL__FUNC_GPIO_101		(BCM63138_PIN_NO(101) | 5)
+
+#define BCM63138_RGMII3_RXD_00__FUNC_RGMII3_RXD_00	(BCM63138_PIN_NO(102) | 1)
+#define BCM63138_RGMII3_RXD_00__FUNC_LED_02		(BCM63138_PIN_NO(102) | 4)
+#define BCM63138_RGMII3_RXD_00__FUNC_GPIO_102		(BCM63138_PIN_NO(102) | 5)
+
+#define BCM63138_RGMII3_RXD_01__FUNC_RGMII3_RXD_01	(BCM63138_PIN_NO(103) | 1)
+#define BCM63138_RGMII3_RXD_01__FUNC_LED_03		(BCM63138_PIN_NO(103) | 4)
+#define BCM63138_RGMII3_RXD_01__FUNC_GPIO_103		(BCM63138_PIN_NO(103) | 5)
+
+#define BCM63138_RGMII3_RXD_02__FUNC_RGMII3_RXD_02	(BCM63138_PIN_NO(104) | 1)
+#define BCM63138_RGMII3_RXD_02__FUNC_LED_04		(BCM63138_PIN_NO(104) | 4)
+#define BCM63138_RGMII3_RXD_02__FUNC_GPIO_104		(BCM63138_PIN_NO(104) | 5)
+
+#define BCM63138_RGMII3_RXD_03__FUNC_RGMII3_RXD_03	(BCM63138_PIN_NO(105) | 1)
+#define BCM63138_RGMII3_RXD_03__FUNC_LED_05		(BCM63138_PIN_NO(105) | 4)
+#define BCM63138_RGMII3_RXD_03__FUNC_GPIO_105		(BCM63138_PIN_NO(105) | 5)
+
+#define BCM63138_RGMII3_TXCLK__FUNC_RGMII3_TXCLK	(BCM63138_PIN_NO(106) | 1)
+#define BCM63138_RGMII3_TXCLK__FUNC_LED_06		(BCM63138_PIN_NO(106) | 4)
+#define BCM63138_RGMII3_TXCLK__FUNC_GPIO_106		(BCM63138_PIN_NO(106) | 5)
+
+#define BCM63138_RGMII3_TXCTL__FUNC_RGMII3_TXCTL	(BCM63138_PIN_NO(107) | 1)
+#define BCM63138_RGMII3_TXCTL__FUNC_LED_07		(BCM63138_PIN_NO(107) | 4)
+#define BCM63138_RGMII3_TXCTL__FUNC_GPIO_107		(BCM63138_PIN_NO(107) | 5)
+
+#define BCM63138_RGMII3_TXD_00__FUNC_RGMII3_TXD_00	(BCM63138_PIN_NO(108) | 1)
+#define BCM63138_RGMII3_TXD_00__FUNC_LED_08		(BCM63138_PIN_NO(108) | 4)
+#define BCM63138_RGMII3_TXD_00__FUNC_GPIO_108		(BCM63138_PIN_NO(108) | 5)
+#define BCM63138_RGMII3_TXD_00__FUNC_LED_20		(BCM63138_PIN_NO(108) | 6)
+
+#define BCM63138_RGMII3_TXD_01__FUNC_RGMII3_TXD_01	(BCM63138_PIN_NO(109) | 1)
+#define BCM63138_RGMII3_TXD_01__FUNC_LED_09		(BCM63138_PIN_NO(109) | 4)
+#define BCM63138_RGMII3_TXD_01__FUNC_GPIO_109		(BCM63138_PIN_NO(109) | 5)
+#define BCM63138_RGMII3_TXD_01__FUNC_LED_21		(BCM63138_PIN_NO(109) | 6)
+
+#define BCM63138_RGMII3_TXD_02__FUNC_RGMII3_TXD_02	(BCM63138_PIN_NO(110) | 1)
+#define BCM63138_RGMII3_TXD_02__FUNC_LED_10		(BCM63138_PIN_NO(110) | 4)
+#define BCM63138_RGMII3_TXD_02__FUNC_GPIO_110		(BCM63138_PIN_NO(110) | 5)
+
+#define BCM63138_RGMII3_TXD_03__FUNC_RGMII3_TXD_03	(BCM63138_PIN_NO(111) | 1)
+#define BCM63138_RGMII3_TXD_03__FUNC_LED_11		(BCM63138_PIN_NO(111) | 4)
+#define BCM63138_RGMII3_TXD_03__FUNC_GPIO_111		(BCM63138_PIN_NO(111) | 5)
+
+#define BCM63138_RGMII_MDC__FUNC_RGMII_MDC		(BCM63138_PIN_NO(112) | 1)
+#define BCM63138_RGMII_MDC__FUNC_GPIO_112		(BCM63138_PIN_NO(112) | 5)
+
+#define BCM63138_RGMII_MDIO__FUNC_RGMII_MDIO		(BCM63138_PIN_NO(113) | 1)
+#define BCM63138_RGMII_MDIO__FUNC_GPIO_113		(BCM63138_PIN_NO(113) | 5)
+
+#define BCM63138_BMU_AC_EN__FUNC_BMU_AC_EN		(BCM63138_PIN_NO(114) | 1)
+#define BCM63138_BMU_AC_EN__FUNC_GPIO_114		(BCM63138_PIN_NO(114) | 5)
+
+#define BCM63138_BMU_DIS_CTRL__FUNC_BMU_DIS_CTRL	(BCM63138_PIN_NO(115) | 1)
+#define BCM63138_BMU_DIS_CTRL__FUNC_GPIO_115		(BCM63138_PIN_NO(115) | 5)
+
+#define BCM63138_BMU_ENA__FUNC_BMU_ENA		(BCM63138_PIN_NO(116) | 1)
+#define BCM63138_BMU_ENA__FUNC_GPIO_116		(BCM63138_PIN_NO(116) | 5)
+
+#define BCM63138_BMU_ENB__FUNC_BMU_ENB		(BCM63138_PIN_NO(117) | 1)
+#define BCM63138_BMU_ENB__FUNC_I2C_SDA		(BCM63138_PIN_NO(117) | 2)
+#define BCM63138_BMU_ENB__FUNC_GPIO_117		(BCM63138_PIN_NO(117) | 5)
+
+#define BCM63138_BMU_OWA__FUNC_BMU_OWA		(BCM63138_PIN_NO(118) | 1)
+#define BCM63138_BMU_OWA__FUNC_GPIO_118		(BCM63138_PIN_NO(118) | 5)
+
+#define BCM63138_BMU_OWB__FUNC_BMU_OWB		(BCM63138_PIN_NO(119) | 1)
+#define BCM63138_BMU_OWB__FUNC_I2C_SCL		(BCM63138_PIN_NO(119) | 2)
+#define BCM63138_BMU_OWB__FUNC_GPIO_119		(BCM63138_PIN_NO(119) | 5)
+
+#define BCM63138_BMU_PWM_OUT__FUNC_BMU_PWM_OUT		(BCM63138_PIN_NO(120) | 1)
+#define BCM63138_BMU_PWM_OUT__FUNC_GPIO_120		(BCM63138_PIN_NO(120) | 5)
+
+#define BCM63138_UART0_SIN__FUNC_UART0_SIN		(BCM63138_PIN_NO(121) | 1)
+#define BCM63138_UART0_SIN__FUNC_GPIO_121		(BCM63138_PIN_NO(121) | 5)
+
+#define BCM63138_UART0_SOUT__FUNC_UART0_SOUT		(BCM63138_PIN_NO(122) | 1)
+#define BCM63138_UART0_SOUT__FUNC_GPIO_122		(BCM63138_PIN_NO(122) | 5)
+
+#define BCM63138_SPI_CLK__FUNC_SPI_CLK		(BCM63138_PIN_NO(123) | 0)
+#define BCM63138_SPI_CLK__FUNC_GPIO_123		(BCM63138_PIN_NO(123) | 5)
+
+#define BCM63138_SPI_MOSI__FUNC_SPI_MOSI		(BCM63138_PIN_NO(124) | 0)
+#define BCM63138_SPI_MOSI__FUNC_GPIO_124		(BCM63138_PIN_NO(124) | 5)
+
+#define BCM63138_SPI_MISO__FUNC_SPI_MISO		(BCM63138_PIN_NO(125) | 0)
+#define BCM63138_SPI_MISO__FUNC_SPI_MISO_1		(BCM63138_PIN_NO(125) | 1)
+#define BCM63138_SPI_MISO__FUNC_GPIO_125		(BCM63138_PIN_NO(125) | 5)
+
+#define BCM63138_SPI_SSB0__FUNC_SPI_SSB0		(BCM63138_PIN_NO(126) | 0)
+#define BCM63138_SPI_SSB0__FUNC_SPI_SSB0_1		(BCM63138_PIN_NO(126) | 1)
+#define BCM63138_SPI_SSB0__FUNC_GPIO_126		(BCM63138_PIN_NO(126) | 5)
+
+#define BCM63138_SPI_SSB1__FUNC_SPI_SSB1		(BCM63138_PIN_NO(127) | 0)
+#define BCM63138_SPI_SSB1__FUNC_SPI_SSB1_1		(BCM63138_PIN_NO(127) | 1)
+#define BCM63138_SPI_SSB1__FUNC_GPIO_127		(BCM63138_PIN_NO(127) | 5)
+
+#define BCM63138_PCIE0_CLKREQ_B__FUNC_PCIE0_CLKREQ_B	(BCM63138_PIN_NO(128) | 0)
+#define BCM63138_PCIE0_CLKREQ_B__FUNC_GPIO_128		(BCM63138_PIN_NO(128) | 5)
+
+#define BCM63138_PCIE0_RST_B__FUNC_PCIE0_RST_B		(BCM63138_PIN_NO(129) | 0)
+#define BCM63138_PCIE0_RST_B__FUNC_GPIO_129		(BCM63138_PIN_NO(129) | 5)
+
+#define BCM63138_PCIE1_CLKREQ_B__FUNC_PCIE1_CLKREQ_B	(BCM63138_PIN_NO(130) | 0)
+#define BCM63138_PCIE1_CLKREQ_B__FUNC_GPIO_130		(BCM63138_PIN_NO(130) | 5)
+
+#define BCM63138_PCIE1_RST_B__FUNC_PCIE1_RST_B		(BCM63138_PIN_NO(131) | 0)
+#define BCM63138_PCIE1_RST_B__FUNC_GPIO_131		(BCM63138_PIN_NO(131) | 5)
+
+#define BCM63138_USB0_PWRFLT__FUNC_USB0_PWRFLT		(BCM63138_PIN_NO(132) | 1)
+#define BCM63138_USB0_PWRFLT__FUNC_GPIO_132		(BCM63138_PIN_NO(132) | 5)
+
+#define BCM63138_USB0_PWRON__FUNC_USB0_PWRON		(BCM63138_PIN_NO(133) | 1)
+#define BCM63138_USB0_PWRON__FUNC_GPIO_133		(BCM63138_PIN_NO(133) | 5)
+
+#define BCM63138_USB1_PWRFLT__FUNC_USB1_PWRFLT		(BCM63138_PIN_NO(134) | 1)
+#define BCM63138_USB1_PWRFLT__FUNC_GPIO_134		(BCM63138_PIN_NO(134) | 5)
+
+#define BCM63138_USB1_PWRON__FUNC_USB1_PWRON		(BCM63138_PIN_NO(135) | 1)
+#define BCM63138_USB1_PWRON__FUNC_GPIO_135		(BCM63138_PIN_NO(135) | 5)
+
+#define BCM63138_RESET_OUT_B__FUNC_RESET_OUT_B		(BCM63138_PIN_NO(136) | 0)
+#define BCM63138_RESET_OUT_B__FUNC_GPIO_136		(BCM63138_PIN_NO(136) | 5)
+
+#define BCM63138_DECT_RDI__FUNC_DECT_RDI		(BCM63138_PIN_NO(137) | 1)
+#define BCM63138_DECT_RDI__FUNC_GPIO_137		(BCM63138_PIN_NO(137) | 5)
+
+#define BCM63138_DECT_BTDO__FUNC_DECT_BTDO		(BCM63138_PIN_NO(138) | 1)
+#define BCM63138_DECT_BTDO__FUNC_GPIO_138		(BCM63138_PIN_NO(138) | 5)
+
+#define BCM63138_DECT_MWR_LE__FUNC_DECT_MWR_LE		(BCM63138_PIN_NO(139) | 1)
+#define BCM63138_DECT_MWR_LE__FUNC_GPIO_139		(BCM63138_PIN_NO(139) | 5)
+
+#define BCM63138_DECT_MWR_SK__FUNC_DECT_MWR_SK		(BCM63138_PIN_NO(140) | 1)
+#define BCM63138_DECT_MWR_SK__FUNC_GPIO_140		(BCM63138_PIN_NO(140) | 5)
+
+#define BCM63138_DECT_MWR_SIO__FUNC_DECT_MWR_SIO	(BCM63138_PIN_NO(141) | 1)
+#define BCM63138_DECT_MWR_SIO__FUNC_GPIO_141		(BCM63138_PIN_NO(141) | 5)
+
+#endif /* _DT_BINDINGS_BCM63138_PINFUNC_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/pinctrl/bcm63158-pinfunc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,519 @@
+/*
+ * bcm63158-pinfunc.h for pinctrl-bcm63158
+ * Created by <nschichan@freebox.fr> on Wed May 22 18:17:39 2019
+ */
+
+#ifndef _DT_BINDINGS_BCM63158_PINFUNC_H
+#define _DT_BINDINGS_BCM63158_PINFUNC_H
+
+#define BCM63158_PIN_NO(x, y)		(((x) << 8) | (y))
+
+/*
+ * generated from gen-pinmux.pl < bcm63158-pinctrl-table
+ */
+#define BCM63158_GPIO_00__FUNC_A_SER_LED_DATA          (BCM63158_PIN_NO(0, 1))
+#define BCM63158_GPIO_00__FUNC_A_LED_00                (BCM63158_PIN_NO(0, 4))
+#define BCM63158_GPIO_00__FUNC_GPIO_00                 (BCM63158_PIN_NO(0, 5))
+
+#define BCM63158_GPIO_01__FUNC_A_SER_LED_CLK           (BCM63158_PIN_NO(1, 1))
+#define BCM63158_GPIO_01__FUNC_A_LED_01                (BCM63158_PIN_NO(1, 4))
+#define BCM63158_GPIO_01__FUNC_GPIO_01                 (BCM63158_PIN_NO(1, 5))
+
+#define BCM63158_GPIO_02__FUNC_A_SER_LED_MASK          (BCM63158_PIN_NO(2, 1))
+#define BCM63158_GPIO_02__FUNC_A_LED_02                (BCM63158_PIN_NO(2, 4))
+#define BCM63158_GPIO_02__FUNC_GPIO_02                 (BCM63158_PIN_NO(2, 5))
+
+#define BCM63158_GPIO_03__FUNC_A_UART2_CTS             (BCM63158_PIN_NO(3, 1))
+#define BCM63158_GPIO_03__FUNC_B_PPS_IN                (BCM63158_PIN_NO(3, 2))
+#define BCM63158_GPIO_03__FUNC_A_LED_03                (BCM63158_PIN_NO(3, 4))
+#define BCM63158_GPIO_03__FUNC_GPIO_03                 (BCM63158_PIN_NO(3, 5))
+
+#define BCM63158_GPIO_04__FUNC_A_UART2_RTS             (BCM63158_PIN_NO(4, 1))
+#define BCM63158_GPIO_04__FUNC_B_PPS_OUT               (BCM63158_PIN_NO(4, 2))
+#define BCM63158_GPIO_04__FUNC_A_LED_04                (BCM63158_PIN_NO(4, 4))
+#define BCM63158_GPIO_04__FUNC_GPIO_04                 (BCM63158_PIN_NO(4, 5))
+
+#define BCM63158_GPIO_05__FUNC_A_UART2_SIN             (BCM63158_PIN_NO(5, 1))
+#define BCM63158_GPIO_05__FUNC_A_LED_05                (BCM63158_PIN_NO(5, 4))
+#define BCM63158_GPIO_05__FUNC_GPIO_05                 (BCM63158_PIN_NO(5, 5))
+
+#define BCM63158_GPIO_06__FUNC_A_UART2_SOUT            (BCM63158_PIN_NO(6, 1))
+#define BCM63158_GPIO_06__FUNC_A_LED_06                (BCM63158_PIN_NO(6, 4))
+#define BCM63158_GPIO_06__FUNC_GPIO_06                 (BCM63158_PIN_NO(6, 5))
+
+#define BCM63158_GPIO_07__FUNC_A_SPIM_SS5_B            (BCM63158_PIN_NO(7, 1))
+#define BCM63158_GPIO_07__FUNC_B_NTR_OUT               (BCM63158_PIN_NO(7, 2))
+#define BCM63158_GPIO_07__FUNC_A_LED_07                (BCM63158_PIN_NO(7, 4))
+#define BCM63158_GPIO_07__FUNC_GPIO_07                 (BCM63158_PIN_NO(7, 5))
+#define BCM63158_GPIO_07__FUNC_B_NTR_IN                (BCM63158_PIN_NO(7, 6))
+
+#define BCM63158_GPIO_08__FUNC_A_SPIM_SS4_B            (BCM63158_PIN_NO(8, 1))
+#define BCM63158_GPIO_08__FUNC_A_LED_08                (BCM63158_PIN_NO(8, 4))
+#define BCM63158_GPIO_08__FUNC_GPIO_08                 (BCM63158_PIN_NO(8, 5))
+
+#define BCM63158_GPIO_09__FUNC_A_SPIM_SS3_B            (BCM63158_PIN_NO(9, 1))
+#define BCM63158_GPIO_09__FUNC_B_USBD_ID               (BCM63158_PIN_NO(9, 3))
+#define BCM63158_GPIO_09__FUNC_A_LED_09                (BCM63158_PIN_NO(9, 4))
+#define BCM63158_GPIO_09__FUNC_GPIO_09                 (BCM63158_PIN_NO(9, 5))
+#define BCM63158_GPIO_09__FUNC_A_AE_SERDES_MOD_DEF0    (BCM63158_PIN_NO(9, 6))
+
+#define BCM63158_GPIO_10__FUNC_A_SPIM_SS2_B            (BCM63158_PIN_NO(10, 1))
+#define BCM63158_GPIO_10__FUNC_A_PMD_EXT_LOS           (BCM63158_PIN_NO(10, 2))
+#define BCM63158_GPIO_10__FUNC_B_USBD_VBUS_PRESENT     (BCM63158_PIN_NO(10, 3))
+#define BCM63158_GPIO_10__FUNC_A_LED_10                (BCM63158_PIN_NO(10, 4))
+#define BCM63158_GPIO_10__FUNC_GPIO_10                 (BCM63158_PIN_NO(10, 5))
+#define BCM63158_GPIO_10__FUNC_A_AE_FIBER_DETECT       (BCM63158_PIN_NO(10, 6))
+
+#define BCM63158_GPIO_11__FUNC_A_I2C_SDA_0             (BCM63158_PIN_NO(11, 2))
+#define BCM63158_GPIO_11__FUNC_A_LED_11                (BCM63158_PIN_NO(11, 4))
+#define BCM63158_GPIO_11__FUNC_GPIO_11                 (BCM63158_PIN_NO(11, 5))
+
+#define BCM63158_GPIO_12__FUNC_A_PPS_IN                (BCM63158_PIN_NO(12, 1))
+#define BCM63158_GPIO_12__FUNC_A_I2C_SCL_0             (BCM63158_PIN_NO(12, 2))
+#define BCM63158_GPIO_12__FUNC_A_LED_12                (BCM63158_PIN_NO(12, 4))
+#define BCM63158_GPIO_12__FUNC_GPIO_12                 (BCM63158_PIN_NO(12, 5))
+#define BCM63158_GPIO_12__FUNC_C_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(12, 6))
+
+#define BCM63158_GPIO_13__FUNC_A_PPS_OUT               (BCM63158_PIN_NO(13, 1))
+#define BCM63158_GPIO_13__FUNC_A_LED_13                (BCM63158_PIN_NO(13, 4))
+#define BCM63158_GPIO_13__FUNC_GPIO_13                 (BCM63158_PIN_NO(13, 5))
+
+#define BCM63158_GPIO_14__FUNC_A_NTR_OUT               (BCM63158_PIN_NO(14, 1))
+#define BCM63158_GPIO_14__FUNC_I2S_RX_SDATA            (BCM63158_PIN_NO(14, 2))
+#define BCM63158_GPIO_14__FUNC_A_LED_14                (BCM63158_PIN_NO(14, 4))
+#define BCM63158_GPIO_14__FUNC_GPIO_14                 (BCM63158_PIN_NO(14, 5))
+#define BCM63158_GPIO_14__FUNC_A_NTR_IN                (BCM63158_PIN_NO(14, 6))
+
+#define BCM63158_GPIO_15__FUNC_SW_SPIS_CLK             (BCM63158_PIN_NO(15, 2))
+#define BCM63158_GPIO_15__FUNC_A_LED_15                (BCM63158_PIN_NO(15, 4))
+#define BCM63158_GPIO_15__FUNC_GPIO_15                 (BCM63158_PIN_NO(15, 5))
+#define BCM63158_GPIO_15__FUNC_B_I2C_SDA_1             (BCM63158_PIN_NO(15, 6))
+
+#define BCM63158_GPIO_16__FUNC_SW_SPIS_SS_B            (BCM63158_PIN_NO(16, 2))
+#define BCM63158_GPIO_16__FUNC_A_LED_16                (BCM63158_PIN_NO(16, 4))
+#define BCM63158_GPIO_16__FUNC_GPIO_16                 (BCM63158_PIN_NO(16, 5))
+#define BCM63158_GPIO_16__FUNC_B_I2C_SCL_1             (BCM63158_PIN_NO(16, 6))
+
+#define BCM63158_GPIO_17__FUNC_SW_SPIS_MISO            (BCM63158_PIN_NO(17, 2))
+#define BCM63158_GPIO_17__FUNC_A_LED_17                (BCM63158_PIN_NO(17, 4))
+#define BCM63158_GPIO_17__FUNC_GPIO_17                 (BCM63158_PIN_NO(17, 5))
+#define BCM63158_GPIO_17__FUNC_C_UART3_SIN             (BCM63158_PIN_NO(17, 6))
+
+#define BCM63158_GPIO_18__FUNC_SW_SPIS_MOSI            (BCM63158_PIN_NO(18, 2))
+#define BCM63158_GPIO_18__FUNC_A_LED_18                (BCM63158_PIN_NO(18, 4))
+#define BCM63158_GPIO_18__FUNC_GPIO_18                 (BCM63158_PIN_NO(18, 5))
+#define BCM63158_GPIO_18__FUNC_C_UART3_SOUT            (BCM63158_PIN_NO(18, 6))
+
+#define BCM63158_GPIO_19__FUNC_VREG_SYNC               (BCM63158_PIN_NO(19, 2))
+#define BCM63158_GPIO_19__FUNC_A_LED_19                (BCM63158_PIN_NO(19, 4))
+#define BCM63158_GPIO_19__FUNC_GPIO_19                 (BCM63158_PIN_NO(19, 5))
+#define BCM63158_GPIO_19__FUNC_A_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(19, 6))
+
+#define BCM63158_GPIO_20__FUNC_SPIS_CLK                (BCM63158_PIN_NO(20, 1))
+#define BCM63158_GPIO_20__FUNC_B_UART2_CTS             (BCM63158_PIN_NO(20, 2))
+#define BCM63158_GPIO_20__FUNC_B_UART3_SIN             (BCM63158_PIN_NO(20, 3))
+#define BCM63158_GPIO_20__FUNC_A_LED_20                (BCM63158_PIN_NO(20, 4))
+#define BCM63158_GPIO_20__FUNC_GPIO_20                 (BCM63158_PIN_NO(20, 5))
+#define BCM63158_GPIO_20__FUNC_A_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(20, 6))
+
+#define BCM63158_GPIO_21__FUNC_SPIS_SS_B               (BCM63158_PIN_NO(21, 1))
+#define BCM63158_GPIO_21__FUNC_B_UART2_RTS             (BCM63158_PIN_NO(21, 2))
+#define BCM63158_GPIO_21__FUNC_B_UART3_SOUT            (BCM63158_PIN_NO(21, 3))
+#define BCM63158_GPIO_21__FUNC_A_LED_21                (BCM63158_PIN_NO(21, 4))
+#define BCM63158_GPIO_21__FUNC_GPIO_21                 (BCM63158_PIN_NO(21, 5))
+#define BCM63158_GPIO_21__FUNC_C_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(21, 6))
+
+#define BCM63158_GPIO_22__FUNC_SPIS_MISO               (BCM63158_PIN_NO(22, 1))
+#define BCM63158_GPIO_22__FUNC_B_UART2_SOUT            (BCM63158_PIN_NO(22, 2))
+#define BCM63158_GPIO_22__FUNC_A_LED_22                (BCM63158_PIN_NO(22, 4))
+#define BCM63158_GPIO_22__FUNC_GPIO_22                 (BCM63158_PIN_NO(22, 5))
+
+#define BCM63158_GPIO_23__FUNC_SPIS_MOSI               (BCM63158_PIN_NO(23, 1))
+#define BCM63158_GPIO_23__FUNC_B_UART2_SIN             (BCM63158_PIN_NO(23, 2))
+#define BCM63158_GPIO_23__FUNC_A_LED_23                (BCM63158_PIN_NO(23, 4))
+#define BCM63158_GPIO_23__FUNC_GPIO_23                 (BCM63158_PIN_NO(23, 5))
+
+#define BCM63158_GPIO_24__FUNC_B_UART1_SOUT            (BCM63158_PIN_NO(24, 2))
+#define BCM63158_GPIO_24__FUNC_B_I2C_SDA_0             (BCM63158_PIN_NO(24, 3))
+#define BCM63158_GPIO_24__FUNC_A_LED_24                (BCM63158_PIN_NO(24, 4))
+#define BCM63158_GPIO_24__FUNC_GPIO_24                 (BCM63158_PIN_NO(24, 5))
+
+#define BCM63158_GPIO_25__FUNC_B_SPIM_SS2_B            (BCM63158_PIN_NO(25, 1))
+#define BCM63158_GPIO_25__FUNC_B_UART1_SIN             (BCM63158_PIN_NO(25, 2))
+#define BCM63158_GPIO_25__FUNC_B_I2C_SCL_0             (BCM63158_PIN_NO(25, 3))
+#define BCM63158_GPIO_25__FUNC_A_LED_25                (BCM63158_PIN_NO(25, 4))
+#define BCM63158_GPIO_25__FUNC_GPIO_25                 (BCM63158_PIN_NO(25, 5))
+
+#define BCM63158_GPIO_26__FUNC_B_SPIM_SS3_B            (BCM63158_PIN_NO(26, 1))
+#define BCM63158_GPIO_26__FUNC_A_I2C_SDA_1             (BCM63158_PIN_NO(26, 2))
+#define BCM63158_GPIO_26__FUNC_A_UART3_SIN             (BCM63158_PIN_NO(26, 3))
+#define BCM63158_GPIO_26__FUNC_A_LED_26                (BCM63158_PIN_NO(26, 4))
+#define BCM63158_GPIO_26__FUNC_GPIO_26                 (BCM63158_PIN_NO(26, 5))
+
+#define BCM63158_GPIO_27__FUNC_B_SPIM_SS4_B            (BCM63158_PIN_NO(27, 1))
+#define BCM63158_GPIO_27__FUNC_A_I2C_SCL_1             (BCM63158_PIN_NO(27, 2))
+#define BCM63158_GPIO_27__FUNC_A_UART3_SOUT            (BCM63158_PIN_NO(27, 3))
+#define BCM63158_GPIO_27__FUNC_A_LED_27                (BCM63158_PIN_NO(27, 4))
+#define BCM63158_GPIO_27__FUNC_GPIO_27                 (BCM63158_PIN_NO(27, 5))
+
+#define BCM63158_GPIO_28__FUNC_B_SPIM_SS5_B            (BCM63158_PIN_NO(28, 1))
+#define BCM63158_GPIO_28__FUNC_I2S_MCLK                (BCM63158_PIN_NO(28, 2))
+#define BCM63158_GPIO_28__FUNC_A_LED_28                (BCM63158_PIN_NO(28, 4))
+#define BCM63158_GPIO_28__FUNC_GPIO_28                 (BCM63158_PIN_NO(28, 5))
+
+#define BCM63158_GPIO_29__FUNC_B_SER_LED_DATA          (BCM63158_PIN_NO(29, 1))
+#define BCM63158_GPIO_29__FUNC_I2S_LRCK                (BCM63158_PIN_NO(29, 2))
+#define BCM63158_GPIO_29__FUNC_A_LED_29                (BCM63158_PIN_NO(29, 4))
+#define BCM63158_GPIO_29__FUNC_GPIO_29                 (BCM63158_PIN_NO(29, 5))
+
+#define BCM63158_GPIO_30__FUNC_B_SER_LED_CLK           (BCM63158_PIN_NO(30, 1))
+#define BCM63158_GPIO_30__FUNC_I2S_SCLK                (BCM63158_PIN_NO(30, 2))
+#define BCM63158_GPIO_30__FUNC_A_LED_30                (BCM63158_PIN_NO(30, 4))
+#define BCM63158_GPIO_30__FUNC_GPIO_30                 (BCM63158_PIN_NO(30, 5))
+
+#define BCM63158_GPIO_31__FUNC_B_SER_LED_MASK          (BCM63158_PIN_NO(31, 1))
+#define BCM63158_GPIO_31__FUNC_I2S_TX_SDATA            (BCM63158_PIN_NO(31, 2))
+#define BCM63158_GPIO_31__FUNC_A_LED_31                (BCM63158_PIN_NO(31, 4))
+#define BCM63158_GPIO_31__FUNC_GPIO_31                 (BCM63158_PIN_NO(31, 5))
+
+#define BCM63158_GPIO_32__FUNC_VDSL_CTRL0              (BCM63158_PIN_NO(32, 2))
+#define BCM63158_GPIO_32__FUNC_GPIO_32                 (BCM63158_PIN_NO(32, 5))
+
+#define BCM63158_GPIO_33__FUNC_VDSL_CTRL_1             (BCM63158_PIN_NO(33, 2))
+#define BCM63158_GPIO_33__FUNC_B_WAN_EARLY_TXEN        (BCM63158_PIN_NO(33, 3))
+#define BCM63158_GPIO_33__FUNC_GPIO_33                 (BCM63158_PIN_NO(33, 5))
+
+#define BCM63158_GPIO_34__FUNC_VDSL_CTRL_2             (BCM63158_PIN_NO(34, 2))
+#define BCM63158_GPIO_34__FUNC_B_ROGUE_IN              (BCM63158_PIN_NO(34, 3))
+#define BCM63158_GPIO_34__FUNC_GPIO_34                 (BCM63158_PIN_NO(34, 5))
+
+#define BCM63158_GPIO_35__FUNC_VDSL_CTRL_3             (BCM63158_PIN_NO(35, 2))
+#define BCM63158_GPIO_35__FUNC_B_SGMII_FIBER_DETECT    (BCM63158_PIN_NO(35, 3))
+#define BCM63158_GPIO_35__FUNC_GPIO_35                 (BCM63158_PIN_NO(35, 5))
+
+#define BCM63158_GPIO_36__FUNC_VDSL_CTRL_4             (BCM63158_PIN_NO(36, 2))
+#define BCM63158_GPIO_36__FUNC_B_SGMII_SERDES_MOD_DEF0 (BCM63158_PIN_NO(36, 3))
+#define BCM63158_GPIO_36__FUNC_GPIO_36                 (BCM63158_PIN_NO(36, 5))
+
+#define BCM63158_GPIO_37__FUNC_B_PMD_EXT_LOS           (BCM63158_PIN_NO(37, 1))
+#define BCM63158_GPIO_37__FUNC_VDSL_CTRL_5             (BCM63158_PIN_NO(37, 2))
+#define BCM63158_GPIO_37__FUNC_B_AE_FIBER_DETECT       (BCM63158_PIN_NO(37, 3))
+#define BCM63158_GPIO_37__FUNC_GPIO_37                 (BCM63158_PIN_NO(37, 5))
+
+#define BCM63158_GPIO_38__FUNC_B_VREG_SYNC             (BCM63158_PIN_NO(38, 2))
+#define BCM63158_GPIO_38__FUNC_B_AE_SERDES_MOD_DEF0    (BCM63158_PIN_NO(38, 3))
+#define BCM63158_GPIO_38__FUNC_GPIO_38                 (BCM63158_PIN_NO(38, 5))
+
+#define BCM63158_GPIO_39__FUNC_A_WAN_EARLY_TXEN        (BCM63158_PIN_NO(39, 2))
+#define BCM63158_GPIO_39__FUNC_GPIO_39                 (BCM63158_PIN_NO(39, 5))
+
+#define BCM63158_GPIO_40__FUNC_A_ROGUE_IN              (BCM63158_PIN_NO(40, 2))
+#define BCM63158_GPIO_40__FUNC_GPIO_40                 (BCM63158_PIN_NO(40, 5))
+
+#define BCM63158_GPIO_41__FUNC_SYS_IRQ_OUT             (BCM63158_PIN_NO(41, 2))
+#define BCM63158_GPIO_41__FUNC_C_WAN_EARLY_TXEN        (BCM63158_PIN_NO(41, 3))
+#define BCM63158_GPIO_41__FUNC_GPIO_41                 (BCM63158_PIN_NO(41, 5))
+
+#define BCM63158_GPIO_42__FUNC_PCM_SDIN                (BCM63158_PIN_NO(42, 1))
+#define BCM63158_GPIO_42__FUNC_A_UART1_SIN             (BCM63158_PIN_NO(42, 4))
+#define BCM63158_GPIO_42__FUNC_GPIO_42                 (BCM63158_PIN_NO(42, 5))
+
+#define BCM63158_GPIO_43__FUNC_PCM_SDOUT               (BCM63158_PIN_NO(43, 1))
+#define BCM63158_GPIO_43__FUNC_A_UART1_SOUT            (BCM63158_PIN_NO(43, 4))
+#define BCM63158_GPIO_43__FUNC_GPIO_43                 (BCM63158_PIN_NO(43, 5))
+
+#define BCM63158_GPIO_44__FUNC_PCM_CLK                 (BCM63158_PIN_NO(44, 1))
+#define BCM63158_GPIO_44__FUNC_A_USBD_VBUS_PRESENT     (BCM63158_PIN_NO(44, 4))
+#define BCM63158_GPIO_44__FUNC_GPIO_44                 (BCM63158_PIN_NO(44, 5))
+
+#define BCM63158_GPIO_45__FUNC_PCM_FS                  (BCM63158_PIN_NO(45, 1))
+#define BCM63158_GPIO_45__FUNC_A_USBD_ID               (BCM63158_PIN_NO(45, 4))
+#define BCM63158_GPIO_45__FUNC_GPIO_45                 (BCM63158_PIN_NO(45, 5))
+
+#define BCM63158_GPIO_46__FUNC_C_VREG_SYNC             (BCM63158_PIN_NO(46, 2))
+#define BCM63158_GPIO_46__FUNC_GPIO_46                 (BCM63158_PIN_NO(46, 5))
+
+#define BCM63158_GPIO_47__FUNC_NAND_WP                 (BCM63158_PIN_NO(47, 3))
+#define BCM63158_GPIO_47__FUNC_GPIO_47                 (BCM63158_PIN_NO(47, 5))
+
+#define BCM63158_GPIO_48__FUNC_NAND_CE_B               (BCM63158_PIN_NO(48, 3))
+#define BCM63158_GPIO_48__FUNC_GPIO_48                 (BCM63158_PIN_NO(48, 5))
+
+#define BCM63158_GPIO_49__FUNC_NAND_RE_B               (BCM63158_PIN_NO(49, 3))
+#define BCM63158_GPIO_49__FUNC_GPIO_49                 (BCM63158_PIN_NO(49, 5))
+
+#define BCM63158_GPIO_50__FUNC_NAND_RB_B               (BCM63158_PIN_NO(50, 3))
+#define BCM63158_GPIO_50__FUNC_GPIO_50                 (BCM63158_PIN_NO(50, 5))
+
+#define BCM63158_GPIO_51__FUNC_NAND_DATA_0             (BCM63158_PIN_NO(51, 3))
+#define BCM63158_GPIO_51__FUNC_GPIO_51                 (BCM63158_PIN_NO(51, 5))
+
+#define BCM63158_GPIO_52__FUNC_NAND_DATA_1             (BCM63158_PIN_NO(52, 3))
+#define BCM63158_GPIO_52__FUNC_GPIO_52                 (BCM63158_PIN_NO(52, 5))
+
+#define BCM63158_GPIO_53__FUNC_NAND_DATA_2             (BCM63158_PIN_NO(53, 3))
+#define BCM63158_GPIO_53__FUNC_GPIO_53                 (BCM63158_PIN_NO(53, 5))
+
+#define BCM63158_GPIO_54__FUNC_NAND_DATA_3             (BCM63158_PIN_NO(54, 3))
+#define BCM63158_GPIO_54__FUNC_GPIO_54                 (BCM63158_PIN_NO(54, 5))
+
+#define BCM63158_GPIO_55__FUNC_NAND_DATA_4             (BCM63158_PIN_NO(55, 3))
+#define BCM63158_GPIO_55__FUNC_GPIO_55                 (BCM63158_PIN_NO(55, 5))
+
+#define BCM63158_GPIO_56__FUNC_NAND_DATA_5             (BCM63158_PIN_NO(56, 3))
+#define BCM63158_GPIO_56__FUNC_GPIO_56                 (BCM63158_PIN_NO(56, 5))
+
+#define BCM63158_GPIO_57__FUNC_NAND_DATA_6             (BCM63158_PIN_NO(57, 3))
+#define BCM63158_GPIO_57__FUNC_GPIO_57                 (BCM63158_PIN_NO(57, 5))
+
+#define BCM63158_GPIO_58__FUNC_NAND_DATA_7             (BCM63158_PIN_NO(58, 3))
+#define BCM63158_GPIO_58__FUNC_GPIO_58                 (BCM63158_PIN_NO(58, 5))
+
+#define BCM63158_GPIO_59__FUNC_NAND_ALE                (BCM63158_PIN_NO(59, 3))
+#define BCM63158_GPIO_59__FUNC_GPIO_59                 (BCM63158_PIN_NO(59, 5))
+
+#define BCM63158_GPIO_60__FUNC_NAND_WE_B               (BCM63158_PIN_NO(60, 3))
+#define BCM63158_GPIO_60__FUNC_GPIO_60                 (BCM63158_PIN_NO(60, 5))
+
+#define BCM63158_GPIO_61__FUNC_NAND_CLE                (BCM63158_PIN_NO(61, 3))
+#define BCM63158_GPIO_61__FUNC_GPIO_61                 (BCM63158_PIN_NO(61, 5))
+
+#define BCM63158_GPIO_62__FUNC_NAND_CE2_B              (BCM63158_PIN_NO(62, 2))
+#define BCM63158_GPIO_62__FUNC_EMMC_CLK                (BCM63158_PIN_NO(62, 3))
+#define BCM63158_GPIO_62__FUNC_GPIO_62                 (BCM63158_PIN_NO(62, 5))
+
+#define BCM63158_GPIO_63__FUNC_NAND_CE1_B              (BCM63158_PIN_NO(63, 2))
+#define BCM63158_GPIO_63__FUNC_EMMC_CMD                (BCM63158_PIN_NO(63, 3))
+#define BCM63158_GPIO_63__FUNC_GPIO_63                 (BCM63158_PIN_NO(63, 5))
+
+#define BCM63158_GPIO_64__FUNC_RGMII0_RXCLK            (BCM63158_PIN_NO(64, 1))
+#define BCM63158_GPIO_64__FUNC_GPIO_64                 (BCM63158_PIN_NO(64, 5))
+#define BCM63158_GPIO_64__FUNC_B_LED_00                (BCM63158_PIN_NO(64, 6))
+
+#define BCM63158_GPIO_65__FUNC_GPIO_65                 (BCM63158_PIN_NO(65, 5))
+#define BCM63158_GPIO_65__FUNC_B_LED_01                (BCM63158_PIN_NO(65, 6))
+
+#define BCM63158_GPIO_66__FUNC_RGMII0_RXCTL            (BCM63158_PIN_NO(66, 1))
+#define BCM63158_GPIO_66__FUNC_GPIO_66                 (BCM63158_PIN_NO(66, 5))
+#define BCM63158_GPIO_66__FUNC_B_LED_02                (BCM63158_PIN_NO(66, 6))
+
+#define BCM63158_GPIO_67__FUNC_RGMII0_RXD_0            (BCM63158_PIN_NO(67, 1))
+#define BCM63158_GPIO_67__FUNC_GPIO_67                 (BCM63158_PIN_NO(67, 5))
+#define BCM63158_GPIO_67__FUNC_B_LED_03                (BCM63158_PIN_NO(67, 6))
+
+#define BCM63158_GPIO_68__FUNC_RGMII0_RXD_1            (BCM63158_PIN_NO(68, 1))
+#define BCM63158_GPIO_68__FUNC_GPIO_68                 (BCM63158_PIN_NO(68, 5))
+#define BCM63158_GPIO_68__FUNC_B_LED_04                (BCM63158_PIN_NO(68, 6))
+
+#define BCM63158_GPIO_69__FUNC_RGMII0_RXD_2            (BCM63158_PIN_NO(69, 1))
+#define BCM63158_GPIO_69__FUNC_GPIO_69                 (BCM63158_PIN_NO(69, 5))
+#define BCM63158_GPIO_69__FUNC_B_LED_05                (BCM63158_PIN_NO(69, 6))
+
+#define BCM63158_GPIO_70__FUNC_RGMII0_RXD_3            (BCM63158_PIN_NO(70, 1))
+#define BCM63158_GPIO_70__FUNC_GPIO_70                 (BCM63158_PIN_NO(70, 5))
+#define BCM63158_GPIO_70__FUNC_B_LED_06                (BCM63158_PIN_NO(70, 6))
+
+#define BCM63158_GPIO_71__FUNC_RGMII0_TXCLK            (BCM63158_PIN_NO(71, 1))
+#define BCM63158_GPIO_71__FUNC_GPIO_71                 (BCM63158_PIN_NO(71, 5))
+#define BCM63158_GPIO_71__FUNC_B_LED_07                (BCM63158_PIN_NO(71, 6))
+
+#define BCM63158_GPIO_72__FUNC_RGMII0_TXCTL            (BCM63158_PIN_NO(72, 1))
+#define BCM63158_GPIO_72__FUNC_GPIO_72                 (BCM63158_PIN_NO(72, 5))
+#define BCM63158_GPIO_72__FUNC_B_LED_08                (BCM63158_PIN_NO(72, 6))
+
+#define BCM63158_GPIO_73__FUNC_GPIO_73                 (BCM63158_PIN_NO(73, 5))
+#define BCM63158_GPIO_73__FUNC_B_LED_09                (BCM63158_PIN_NO(73, 6))
+
+#define BCM63158_GPIO_74__FUNC_RGMII0_TXD_0            (BCM63158_PIN_NO(74, 1))
+#define BCM63158_GPIO_74__FUNC_GPIO_74                 (BCM63158_PIN_NO(74, 5))
+#define BCM63158_GPIO_74__FUNC_B_LED_10                (BCM63158_PIN_NO(74, 6))
+
+#define BCM63158_GPIO_75__FUNC_RGMII0_TXD_1            (BCM63158_PIN_NO(75, 1))
+#define BCM63158_GPIO_75__FUNC_GPIO_75                 (BCM63158_PIN_NO(75, 5))
+#define BCM63158_GPIO_75__FUNC_B_LED_11                (BCM63158_PIN_NO(75, 6))
+
+#define BCM63158_GPIO_76__FUNC_RGMII0_TXD_2            (BCM63158_PIN_NO(76, 1))
+#define BCM63158_GPIO_76__FUNC_GPIO_76                 (BCM63158_PIN_NO(76, 5))
+#define BCM63158_GPIO_76__FUNC_B_LED_12                (BCM63158_PIN_NO(76, 6))
+
+#define BCM63158_GPIO_77__FUNC_RGMII0_TXD_3            (BCM63158_PIN_NO(77, 1))
+#define BCM63158_GPIO_77__FUNC_GPIO_77                 (BCM63158_PIN_NO(77, 5))
+#define BCM63158_GPIO_77__FUNC_B_LED_13                (BCM63158_PIN_NO(77, 6))
+
+#define BCM63158_GPIO_78__FUNC_GPIO_78                 (BCM63158_PIN_NO(78, 5))
+#define BCM63158_GPIO_78__FUNC_B_LED_14                (BCM63158_PIN_NO(78, 6))
+
+#define BCM63158_GPIO_79__FUNC_GPIO_79                 (BCM63158_PIN_NO(79, 5))
+#define BCM63158_GPIO_79__FUNC_B_LED_15                (BCM63158_PIN_NO(79, 6))
+
+#define BCM63158_GPIO_80__FUNC_RGMII1_RXCLK            (BCM63158_PIN_NO(80, 1))
+#define BCM63158_GPIO_80__FUNC_GPIO_80                 (BCM63158_PIN_NO(80, 5))
+#define BCM63158_GPIO_80__FUNC_B_LED_16                (BCM63158_PIN_NO(80, 6))
+
+#define BCM63158_GPIO_81__FUNC_RGMII1_RXCTL            (BCM63158_PIN_NO(81, 1))
+#define BCM63158_GPIO_81__FUNC_GPIO_81                 (BCM63158_PIN_NO(81, 5))
+#define BCM63158_GPIO_81__FUNC_B_LED_17                (BCM63158_PIN_NO(81, 6))
+
+#define BCM63158_GPIO_82__FUNC_RGMII1_RXD_0            (BCM63158_PIN_NO(82, 1))
+#define BCM63158_GPIO_82__FUNC_GPIO_82                 (BCM63158_PIN_NO(82, 5))
+#define BCM63158_GPIO_82__FUNC_B_LED_18                (BCM63158_PIN_NO(82, 6))
+
+#define BCM63158_GPIO_83__FUNC_RGMII1_RXD_1            (BCM63158_PIN_NO(83, 1))
+#define BCM63158_GPIO_83__FUNC_GPIO_83                 (BCM63158_PIN_NO(83, 5))
+#define BCM63158_GPIO_83__FUNC_B_LED_19                (BCM63158_PIN_NO(83, 6))
+
+#define BCM63158_GPIO_84__FUNC_RGMII1_RXD_2            (BCM63158_PIN_NO(84, 1))
+#define BCM63158_GPIO_84__FUNC_GPIO_84                 (BCM63158_PIN_NO(84, 5))
+#define BCM63158_GPIO_84__FUNC_B_LED_20                (BCM63158_PIN_NO(84, 6))
+
+#define BCM63158_GPIO_85__FUNC_RGMII1_RXD_3            (BCM63158_PIN_NO(85, 1))
+#define BCM63158_GPIO_85__FUNC_GPIO_85                 (BCM63158_PIN_NO(85, 5))
+#define BCM63158_GPIO_85__FUNC_B_LED_21                (BCM63158_PIN_NO(85, 6))
+
+#define BCM63158_GPIO_86__FUNC_RGMII1_TXCLK            (BCM63158_PIN_NO(86, 1))
+#define BCM63158_GPIO_86__FUNC_GPIO_86                 (BCM63158_PIN_NO(86, 5))
+#define BCM63158_GPIO_86__FUNC_B_LED_22                (BCM63158_PIN_NO(86, 6))
+
+#define BCM63158_GPIO_87__FUNC_RGMII1_TXCTL            (BCM63158_PIN_NO(87, 1))
+#define BCM63158_GPIO_87__FUNC_GPIO_87                 (BCM63158_PIN_NO(87, 5))
+#define BCM63158_GPIO_87__FUNC_B_LED_23                (BCM63158_PIN_NO(87, 6))
+
+#define BCM63158_GPIO_88__FUNC_RGMII1_TXD_0            (BCM63158_PIN_NO(88, 1))
+#define BCM63158_GPIO_88__FUNC_GPIO_88                 (BCM63158_PIN_NO(88, 5))
+#define BCM63158_GPIO_88__FUNC_B_LED_24                (BCM63158_PIN_NO(88, 6))
+
+#define BCM63158_GPIO_89__FUNC_RGMII1_TXD_1            (BCM63158_PIN_NO(89, 1))
+#define BCM63158_GPIO_89__FUNC_GPIO_89                 (BCM63158_PIN_NO(89, 5))
+#define BCM63158_GPIO_89__FUNC_B_LED_25                (BCM63158_PIN_NO(89, 6))
+
+#define BCM63158_GPIO_90__FUNC_RGMII1_TXD_2            (BCM63158_PIN_NO(90, 1))
+#define BCM63158_GPIO_90__FUNC_GPIO_90                 (BCM63158_PIN_NO(90, 5))
+#define BCM63158_GPIO_90__FUNC_B_LED_26                (BCM63158_PIN_NO(90, 6))
+
+#define BCM63158_GPIO_91__FUNC_RGMII1_TXD_3            (BCM63158_PIN_NO(91, 1))
+#define BCM63158_GPIO_91__FUNC_GPIO_91                 (BCM63158_PIN_NO(91, 5))
+#define BCM63158_GPIO_91__FUNC_B_LED_27                (BCM63158_PIN_NO(91, 6))
+
+#define BCM63158_GPIO_92__FUNC_RGMII2_RXCLK            (BCM63158_PIN_NO(92, 1))
+#define BCM63158_GPIO_92__FUNC_GPIO_92                 (BCM63158_PIN_NO(92, 5))
+#define BCM63158_GPIO_92__FUNC_B_LED_28                (BCM63158_PIN_NO(92, 6))
+
+#define BCM63158_GPIO_93__FUNC_RGMII2_RXCTL            (BCM63158_PIN_NO(93, 1))
+#define BCM63158_GPIO_93__FUNC_GPIO_93                 (BCM63158_PIN_NO(93, 5))
+#define BCM63158_GPIO_93__FUNC_B_LED_29                (BCM63158_PIN_NO(93, 6))
+
+#define BCM63158_GPIO_94__FUNC_RGMII2_RXD_0            (BCM63158_PIN_NO(94, 1))
+#define BCM63158_GPIO_94__FUNC_GPIO_94                 (BCM63158_PIN_NO(94, 5))
+#define BCM63158_GPIO_94__FUNC_B_LED_30                (BCM63158_PIN_NO(94, 6))
+
+#define BCM63158_GPIO_95__FUNC_RGMII2_RXD_1            (BCM63158_PIN_NO(95, 1))
+#define BCM63158_GPIO_95__FUNC_GPIO_95                 (BCM63158_PIN_NO(95, 5))
+#define BCM63158_GPIO_95__FUNC_B_LED_31                (BCM63158_PIN_NO(95, 6))
+
+#define BCM63158_GPIO_96__FUNC_RGMII2_RXD_2            (BCM63158_PIN_NO(96, 1))
+#define BCM63158_GPIO_96__FUNC_GPIO_96                 (BCM63158_PIN_NO(96, 5))
+
+#define BCM63158_GPIO_97__FUNC_RGMII2_RXD_3            (BCM63158_PIN_NO(97, 1))
+#define BCM63158_GPIO_97__FUNC_GPIO_97                 (BCM63158_PIN_NO(97, 5))
+
+#define BCM63158_GPIO_98__FUNC_RGMII2_TXCLK            (BCM63158_PIN_NO(98, 1))
+#define BCM63158_GPIO_98__FUNC_GPIO_98                 (BCM63158_PIN_NO(98, 5))
+
+#define BCM63158_GPIO_99__FUNC_RGMII2_TXCTL            (BCM63158_PIN_NO(99, 1))
+#define BCM63158_GPIO_99__FUNC_GPIO_99                 (BCM63158_PIN_NO(99, 5))
+
+#define BCM63158_GPIO_100__FUNC_RGMII2_TXD_0           (BCM63158_PIN_NO(100, 1))
+#define BCM63158_GPIO_100__FUNC_GPIO_100               (BCM63158_PIN_NO(100, 5))
+
+#define BCM63158_GPIO_101__FUNC_RGMII2_TXD_1           (BCM63158_PIN_NO(101, 1))
+#define BCM63158_GPIO_101__FUNC_GPIO_101               (BCM63158_PIN_NO(101, 5))
+
+#define BCM63158_GPIO_102__FUNC_RGMII2_TXD_2           (BCM63158_PIN_NO(102, 1))
+#define BCM63158_GPIO_102__FUNC_GPIO_102               (BCM63158_PIN_NO(102, 5))
+
+#define BCM63158_GPIO_103__FUNC_RGMII2_TXD_3           (BCM63158_PIN_NO(103, 1))
+#define BCM63158_GPIO_103__FUNC_GPIO_103               (BCM63158_PIN_NO(103, 5))
+
+#define BCM63158_GPIO_104__FUNC_RGMII_MDC              (BCM63158_PIN_NO(104, 1))
+#define BCM63158_GPIO_104__FUNC_GPIO_104               (BCM63158_PIN_NO(104, 5))
+
+#define BCM63158_GPIO_105__FUNC_RGMII_MDIO             (BCM63158_PIN_NO(105, 1))
+#define BCM63158_GPIO_105__FUNC_GPIO_105               (BCM63158_PIN_NO(105, 5))
+
+#define BCM63158_GPIO_106__FUNC_UART0_SDIN             (BCM63158_PIN_NO(106, 1))
+#define BCM63158_GPIO_106__FUNC_GPIO_106               (BCM63158_PIN_NO(106, 5))
+
+#define BCM63158_GPIO_107__FUNC_UART0_SDOUT            (BCM63158_PIN_NO(107, 1))
+#define BCM63158_GPIO_107__FUNC_GPIO_107               (BCM63158_PIN_NO(107, 5))
+
+#define BCM63158_GPIO_108__FUNC_SPIM_CLK               (BCM63158_PIN_NO(108, 0))
+#define BCM63158_GPIO_108__FUNC_GPIO_108               (BCM63158_PIN_NO(108, 5))
+
+#define BCM63158_GPIO_109__FUNC_SPIM_MOSI              (BCM63158_PIN_NO(109, 0))
+#define BCM63158_GPIO_109__FUNC_GPIO_109               (BCM63158_PIN_NO(109, 5))
+
+#define BCM63158_GPIO_110__FUNC_SPIM_MISO              (BCM63158_PIN_NO(110, 0))
+#define BCM63158_GPIO_110__FUNC_GPIO_110               (BCM63158_PIN_NO(110, 5))
+
+#define BCM63158_GPIO_111__FUNC_SPIM_SS0_B             (BCM63158_PIN_NO(111, 0))
+#define BCM63158_GPIO_111__FUNC_GPIO_111               (BCM63158_PIN_NO(111, 5))
+
+#define BCM63158_GPIO_112__FUNC_SPIM_SS1_B             (BCM63158_PIN_NO(112, 0))
+#define BCM63158_GPIO_112__FUNC_GPIO_112               (BCM63158_PIN_NO(112, 5))
+
+#define BCM63158_GPIO_113__FUNC_PCIE0a_CLKREQ_B        (BCM63158_PIN_NO(113, 1))
+#define BCM63158_GPIO_113__FUNC_PCIE2b_CLKREQ_B        (BCM63158_PIN_NO(113, 2))
+#define BCM63158_GPIO_113__FUNC_PCIE1c_CLKREQ_B        (BCM63158_PIN_NO(113, 3))
+#define BCM63158_GPIO_113__FUNC_GPIO_113               (BCM63158_PIN_NO(113, 5))
+
+#define BCM63158_GPIO_114__FUNC_PCIE0a_RST_B           (BCM63158_PIN_NO(114, 1))
+#define BCM63158_GPIO_114__FUNC_PCIE2b_RST_B           (BCM63158_PIN_NO(114, 2))
+#define BCM63158_GPIO_114__FUNC_PCIE1c_RST_B           (BCM63158_PIN_NO(114, 3))
+#define BCM63158_GPIO_114__FUNC_GPIO_114               (BCM63158_PIN_NO(114, 5))
+
+#define BCM63158_GPIO_115__FUNC_PCIE1a_CLKREQ_B        (BCM63158_PIN_NO(115, 1))
+#define BCM63158_GPIO_115__FUNC_PCIE0b_CLKREQ_B        (BCM63158_PIN_NO(115, 2))
+#define BCM63158_GPIO_115__FUNC_PCIE2c_CLKREQ_B        (BCM63158_PIN_NO(115, 3))
+#define BCM63158_GPIO_115__FUNC_GPIO_115               (BCM63158_PIN_NO(115, 5))
+
+#define BCM63158_GPIO_116__FUNC_PCIE1a_RST_B           (BCM63158_PIN_NO(116, 1))
+#define BCM63158_GPIO_116__FUNC_PCIE0b_RST_B           (BCM63158_PIN_NO(116, 2))
+#define BCM63158_GPIO_116__FUNC_PCIE2c_RST_B           (BCM63158_PIN_NO(116, 3))
+#define BCM63158_GPIO_116__FUNC_GPIO_116               (BCM63158_PIN_NO(116, 5))
+
+#define BCM63158_GPIO_117__FUNC_PCIE2a_CLKREQ_B        (BCM63158_PIN_NO(117, 1))
+#define BCM63158_GPIO_117__FUNC_PCIE1b_CLKREQ_B        (BCM63158_PIN_NO(117, 2))
+#define BCM63158_GPIO_117__FUNC_PCIE0c_CLKREQ_B        (BCM63158_PIN_NO(117, 3))
+#define BCM63158_GPIO_117__FUNC_GPIO_117               (BCM63158_PIN_NO(117, 5))
+
+#define BCM63158_GPIO_118__FUNC_PCIE2a_RST_B           (BCM63158_PIN_NO(118, 1))
+#define BCM63158_GPIO_118__FUNC_PCIE1b_RST_B           (BCM63158_PIN_NO(118, 2))
+#define BCM63158_GPIO_118__FUNC_PCIE0c_RST_B           (BCM63158_PIN_NO(118, 3))
+#define BCM63158_GPIO_118__FUNC_GPIO_118               (BCM63158_PIN_NO(118, 5))
+
+#define BCM63158_GPIO_119__FUNC_PCIE3_CLKREQ_B         (BCM63158_PIN_NO(119, 1))
+#define BCM63158_GPIO_119__FUNC_GPIO_119               (BCM63158_PIN_NO(119, 5))
+
+#define BCM63158_GPIO_120__FUNC_PCIE3_RST_B            (BCM63158_PIN_NO(120, 0))
+#define BCM63158_GPIO_120__FUNC_GPIO_120               (BCM63158_PIN_NO(120, 5))
+
+#define BCM63158_GPIO_121__FUNC_USB0a_PWRFLT           (BCM63158_PIN_NO(121, 1))
+#define BCM63158_GPIO_121__FUNC_USB1b_PWRFLT           (BCM63158_PIN_NO(121, 2))
+#define BCM63158_GPIO_121__FUNC_GPIO_121               (BCM63158_PIN_NO(121, 5))
+
+#define BCM63158_GPIO_122__FUNC_USB0a_PWRON            (BCM63158_PIN_NO(122, 1))
+#define BCM63158_GPIO_122__FUNC_USB1b_PWRON            (BCM63158_PIN_NO(122, 2))
+#define BCM63158_GPIO_122__FUNC_GPIO_122               (BCM63158_PIN_NO(122, 5))
+
+#define BCM63158_GPIO_123__FUNC_USB1a_PWRFLT           (BCM63158_PIN_NO(123, 1))
+#define BCM63158_GPIO_123__FUNC_USB0b_PWRFLT           (BCM63158_PIN_NO(123, 2))
+#define BCM63158_GPIO_123__FUNC_GPIO_123               (BCM63158_PIN_NO(123, 5))
+
+#define BCM63158_GPIO_124__FUNC_USB1a_PWRON            (BCM63158_PIN_NO(124, 1))
+#define BCM63158_GPIO_124__FUNC_USB0b_PWRON            (BCM63158_PIN_NO(124, 2))
+#define BCM63158_GPIO_124__FUNC_GPIO_124               (BCM63158_PIN_NO(124, 5))
+
+#define BCM63158_GPIO_125__FUNC_RESET_OUT_B            (BCM63158_PIN_NO(125, 0))
+#define BCM63158_GPIO_125__FUNC_GPIO_125               (BCM63158_PIN_NO(125, 5))
+
+#endif /* _DT_BINDINGS_BCM63138_PINFUNC_H */
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/reset/brcm,bcm63xx-pmc.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,39 @@
+/*
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H
+#define _DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H
+
+#define PMC_R_CPU0		0
+#define PMC_R_CPU1		1
+#define PMC_R_CPU2		2
+#define PMC_R_CPU3		3
+
+#define PMC_R_RDP		10
+#define PMC_R_SF2		11
+#define PMC_R_USBH		12
+#define PMC_R_SAR		13
+#define PMC_R_SATA		14
+
+#define PMC_R_PCIE0		15
+#define PMC_R_PCIE01		16
+#define PMC_R_PCIE1		17
+#define PMC_R_PCIE2		18
+#define PMC_R_PCIE3		19
+
+#define PMC_R_XRDP		20
+
+#define PMC_R_WAN_AE		21
+
+#define PMC_R_LAST		22
+
+#endif /* !_DT_BINDINGS_RESET_BRCM_BCM63XX_PMC_H */
+
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/soc/broadcom,bcm63158-procmon.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,13 @@
+/*
+ * brcm,bcm63158-procmon.h for bcm63158-procmon.h
+ * Created by <nschichan@freebox.fr> on Thu Oct  3 19:11:25 2019
+ */
+
+#pragma once
+
+#define RCAL_0P25UM_HORZ	0
+#define RCAL_0P25UM_VERT	1
+#define RCAL_0P5UM_HORZ		2
+#define RCAL_0P5UM_VERT		3
+#define RCAL_1UM_HORZ           4
+#define RCAL_1UM_VERT		5
--- /dev/null	2025-08-21 03:50:04.461059392 +0200
+++ linux-6.13.12-fbx/scripts/dtc/include-prefixes/dt-bindings/soc/broadcom,bcm63xx-xdslphy.h	2025-09-25 17:40:36.923373950 +0200
@@ -0,0 +1,83 @@
+#ifndef _DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H
+#define _DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H
+
+/*
+ * imported from broadcom boardparams.h
+ */
+
+/* AFE IDs */
+#define BCM63XX_XDSLPHY_AFE_DEFAULT			0
+
+#define BCM63XX_XDSLPHY_AFE_CHIP_INT			(1 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_6505			(2 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_6306			(3 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_CH0			(4 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_CH1			(5 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_GFAST			(6 << 28)
+#define BCM63XX_XDSLPHY_AFE_CHIP_GFAST_CH0		(7 << 28)
+
+#define BCM63XX_XDSLPHY_AFE_LD_ISIL1556			(1 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6301			(2 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6302			(3 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6303			(4 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6304			(5 << 21)
+#define BCM63XX_XDSLPHY_AFE_LD_6305			(6 << 21)
+
+#define BCM63XX_XDSLPHY_AFE_LD_REV_6303_VR5P3		(1 << 18)
+
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXA			(1 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXB			(2 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXJ			(3 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXBJ			(4 << 15)
+#define BCM63XX_XDSLPHY_AFE_FE_ANNEXM			(5 << 15)
+
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_COMBO		(0 << 13)
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_ADSL		(1 << 13)
+#define BCM63XX_XDSLPHY_AFE_FE_AVMODE_VDSL		(2 << 13)
+
+/* VDSL only */
+#define BCM63XX_XDSLPHY_AFE_FE_REV_ISIL_REV1		(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_12_20             \
+	BCM63XX_XDSLPHY_AFE_FE_REV_ISIL_REV1
+#define BCM63XX_XDSLPHY_AFE_FE_REV_12_21		(2 << 8)
+
+/* Combo */
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV1		(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_12	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_21	(2 << 8)
+
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_1	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2		(4 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_UR2	(5 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_2	(6 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_7_2_30	(7 << 8)
+#define BCM63XX_XDSLPHY_AFE_6302_6306_REV_A_12_40	(8 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_30	(9 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_20	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_40	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_60	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_50	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_3_35	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_50	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6303_REV_12_51	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_40	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_45	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6304_REV_12_4_60	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6305_REV_12_5_60_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6305_REV_12_5_60_2	(2 << 8)
+
+
+/* ADSL only*/
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_2	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6302_REV_5_2_3	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_1	(1 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_2	(2 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_3	(3 << 8)
+#define BCM63XX_XDSLPHY_AFE_FE_REV_6301_REV_5_1_4	(4 << 8)
+
+#define BCM63XX_XDSLPHY_AFE_FE_COAX			(1 << 7)
+
+#define BCM63XX_XDSLPHY_AFE_FE_RNC			(1 << 6)
+
+#endif /* !_DT_BINDINGS_SOC_BCM63XX_XDSLPHY_H */
