aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorZipsnet <hcolmenares@gmail.com>2018-05-29 13:47:01 -0400
committerZipsnet <hcolmenares@gmail.com>2018-05-29 13:47:01 -0400
commit78cd7c614eabe4b7b1aa6c85553ec3dc276d32fd (patch)
treef309a2b0c94e6a7d9e403f79563b2936c336061a
parent582634331303ffb0f7146344b265469eedb73118 (diff)
parenteddcf87321a599f28fdccadc7fbbce85d5990b33 (diff)
Merge branch 'oreo-m2' of https://github.com/nathanchance/wahoo into HEAD
Change-Id: Idda3031b160e6b6088064c567634acc8e70bff16
-rw-r--r--Documentation/ABI/testing/sysfs-devices-ufs15
-rw-r--r--Documentation/device-mapper/verity.txt11
-rw-r--r--Documentation/filesystems/proc.txt13
-rw-r--r--Documentation/kernel-parameters.txt3
-rw-r--r--Makefile5
-rw-r--r--arch/alpha/include/asm/futex.h26
-rw-r--r--arch/arc/include/asm/futex.h40
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi1
-rw-r--r--arch/arm/include/asm/assembler.h10
-rw-r--r--arch/arm/include/asm/futex.h26
-rw-r--r--arch/arm/kernel/traps.c5
-rw-r--r--arch/arm/lib/getuser.S10
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c4
-rw-r--r--arch/arm64/Kconfig14
-rw-r--r--arch/arm64/boot/dts/htc/batterydata-walleye.dtsi1
-rwxr-xr-xarch/arm64/boot/dts/htc/msm8998-htc-common.dtsi8
-rw-r--r--arch/arm64/boot/dts/htc/msm8998-htc-usb-xa.dtsi15
-rw-r--r--arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi22
-rw-r--r--arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi7
-rw-r--r--arch/arm64/boot/dts/lge/msm8998-taimen-touch-stm-ftm4.dtsi2
-rw-r--r--arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi14
-rw-r--r--arch/arm64/configs/flash-taimen_defconfig795
-rw-r--r--arch/arm64/configs/flash_defconfig (renamed from arch/arm64/configs/flash-walleye_defconfig)10
-rw-r--r--arch/arm64/configs/wahoo_defconfig5
-rw-r--r--arch/arm64/include/asm/assembler.h40
-rw-r--r--arch/arm64/include/asm/cputype.h10
-rw-r--r--arch/arm64/include/asm/futex.h26
-rw-r--r--arch/arm64/kernel/arm64ksyms.c8
-rw-r--r--arch/arm64/kernel/cpuinfo.c19
-rw-r--r--arch/arm64/lib/strcmp.S2
-rw-r--r--arch/arm64/lib/tishift.S15
-rw-r--r--arch/arm64/mm/kasan_init.c4
-rw-r--r--arch/arm64/mm/proc.S5
-rw-r--r--arch/frv/include/asm/futex.h3
-rw-r--r--arch/frv/kernel/futex.c27
-rw-r--r--arch/hexagon/include/asm/futex.h38
-rw-r--r--arch/ia64/include/asm/futex.h25
-rw-r--r--arch/microblaze/include/asm/futex.h38
-rw-r--r--arch/mips/include/asm/futex.h25
-rw-r--r--arch/parisc/include/asm/futex.h25
-rw-r--r--arch/powerpc/include/asm/firmware.h5
-rw-r--r--arch/powerpc/include/asm/futex.h26
-rw-r--r--arch/powerpc/kernel/setup-common.c11
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c4
-rw-r--r--arch/powerpc/platforms/powernv/idle.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c14
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c36
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c12
-rw-r--r--arch/powerpc/platforms/powernv/smp.c74
-rw-r--r--arch/s390/Kconfig47
-rw-r--r--arch/s390/Makefile10
-rw-r--r--arch/s390/include/asm/alternative-asm.h108
-rw-r--r--arch/s390/include/asm/alternative.h149
-rw-r--r--arch/s390/include/asm/barrier.h24
-rw-r--r--arch/s390/include/asm/facility.h18
-rw-r--r--arch/s390/include/asm/futex.h23
-rw-r--r--arch/s390/include/asm/kvm_host.h3
-rw-r--r--arch/s390/include/asm/lowcore.h7
-rw-r--r--arch/s390/include/asm/nospec-branch.h17
-rw-r--r--arch/s390/include/asm/nospec-insn.h182
-rw-r--r--arch/s390/include/asm/processor.h4
-rw-r--r--arch/s390/include/asm/thread_info.h4
-rw-r--r--arch/s390/include/uapi/asm/kvm.h3
-rw-r--r--arch/s390/kernel/Makefile6
-rw-r--r--arch/s390/kernel/alternative.c112
-rw-r--r--arch/s390/kernel/base.S24
-rw-r--r--arch/s390/kernel/early.c5
-rw-r--r--arch/s390/kernel/entry.S193
-rw-r--r--arch/s390/kernel/ipl.c1
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/module.c65
-rw-r--r--arch/s390/kernel/nospec-branch.c166
-rw-r--r--arch/s390/kernel/nospec-sysfs.c21
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c4
-rw-r--r--arch/s390/kernel/processor.c18
-rw-r--r--arch/s390/kernel/reipl.S5
-rw-r--r--arch/s390/kernel/setup.c14
-rw-r--r--arch/s390/kernel/smp.c7
-rw-r--r--arch/s390/kernel/swsusp.S10
-rw-r--r--arch/s390/kernel/uprobes.c9
-rw-r--r--arch/s390/kernel/vmlinux.lds.S37
-rw-r--r--arch/s390/kvm/kvm-s390.c16
-rw-r--r--arch/s390/lib/mem.S9
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/net/bpf_jit.S16
-rw-r--r--arch/s390/net/bpf_jit_comp.c63
-rw-r--r--arch/sh/include/asm/futex.h26
-rw-r--r--arch/sparc/include/asm/futex_64.h26
-rw-r--r--arch/tile/include/asm/futex.h40
-rw-r--r--arch/x86/boot/compressed/eboot.c6
-rw-r--r--arch/x86/include/asm/futex.h40
-rw-r--r--arch/x86/include/asm/thread_info.h44
-rw-r--r--arch/x86/include/uapi/asm/msgbuf.h31
-rw-r--r--arch/x86/include/uapi/asm/shmbuf.h42
-rw-r--r--arch/x86/kernel/cpu/perf_event.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cstate.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_msr.c9
-rw-r--r--arch/x86/kernel/machine_kexec_32.c6
-rw-r--r--arch/x86/kernel/machine_kexec_64.c4
-rw-r--r--arch/x86/kernel/smpboot.c2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/xtensa/include/asm/futex.h27
-rw-r--r--block/Kconfig.iosched2
-rw-r--r--block/blk-core.c93
-rw-r--r--block/maple-iosched.c46
-rw-r--r--block/scsi_ioctl.c29
-rw-r--r--build.config.kasan3
-rw-r--r--crypto/af_alg.c8
-rw-r--r--drivers/amba/bus.c17
-rw-r--r--drivers/android/binder.c8
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/atm/zatm.c3
-rw-r--r--drivers/base/power/main.c3
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/cdrom/cdrom.c2
-rw-r--r--drivers/char/Makefile4
-rw-r--r--drivers/char/adsprpc.c4
-rw-r--r--drivers/char/diag/diag_dci.c100
-rw-r--r--drivers/char/diag/diag_masks.c14
-rw-r--r--drivers/char/diag/diag_memorydevice.c1
-rw-r--r--drivers/char/diag/diagchar.h3
-rw-r--r--drivers/char/diag/diagchar_core.c66
-rw-r--r--drivers/char/diag/diagfwd.c10
-rw-r--r--drivers/char/diag/diagfwd_mhi.c2
-rw-r--r--drivers/char/diag/diagfwd_peripheral.c23
-rw-r--r--drivers/char/virtio_console.c49
-rw-r--r--drivers/clk/msm/clock-osm.c4
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c4
-rw-r--r--drivers/cpufreq/Kconfig9
-rw-r--r--drivers/cpufreq/Makefile3
-rw-r--r--drivers/cpufreq/cpu_wake_boost.c139
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c34
-rw-r--r--drivers/cpufreq/powernv-cpufreq.c2
-rw-r--r--drivers/cpuidle/coupled.c1
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c2
-rw-r--r--drivers/devfreq/devfreq.c99
-rw-r--r--drivers/gpio/gpio-rcar.c46
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c1
-rw-r--r--drivers/gpu/msm/adreno.c6
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c26
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c42
-rw-r--r--drivers/gpu/msm/kgsl_iommu.h15
-rw-r--r--drivers/infiniband/core/ucma.c2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c22
-rw-r--r--drivers/input/input-leds.c8
-rw-r--r--drivers/input/misc/drv260x.c2
-rw-r--r--drivers/input/misc/vl53L0/stmvl53l0_module-cci.c1
-rw-r--r--drivers/input/misc/vl53L0/stmvl53l0_module-i2c.c1
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c9
-rw-r--r--drivers/input/touchscreen/stm/ftm4_ts.c2
-rw-r--r--drivers/input/touchscreen/wake_gestures.c61
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-verity-target.c65
-rw-r--r--drivers/md/dm-verity.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c6
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c24
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c17
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c25
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c7
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c6
-rw-r--r--drivers/message/fusion/mptsas.c1
-rw-r--r--drivers/misc/mnh/mnh-ddr.c131
-rw-r--r--drivers/misc/mnh/mnh-ddr.h16
-rw-r--r--drivers/misc/mnh/mnh-sm.c31
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c5
-rw-r--r--drivers/mmc/core/bus.c12
-rw-r--r--drivers/mmc/core/core.c22
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c33
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c9
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c78
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h7
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c5
-rw-r--r--drivers/net/ppp/pppoe.c4
-rw-r--r--drivers/net/team/team.c38
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c13
-rw-r--r--drivers/net/usb/r8152.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c105
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c5
-rw-r--r--drivers/net/wireless/ath/ath10k/rx_desc.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c8
-rw-r--r--drivers/power/htc_battery.c110
-rw-r--r--drivers/power/lge_battery.c124
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/reset/msm-poweroff.c130
-rw-r--r--drivers/power/supply/qcom/Kconfig11
-rw-r--r--drivers/power/supply/qcom/bcl_peripheral.c7
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c109
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c16
-rw-r--r--drivers/power/supply/qcom/smb-lib.c41
-rw-r--r--drivers/power/supply/qcom/smb-lib.h4
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c6
-rw-r--r--drivers/s390/char/Makefile2
-rw-r--r--drivers/s390/cio/chsc.c14
-rw-r--r--drivers/s390/cio/qdio_setup.c12
-rw-r--r--drivers/s390/scsi/zfcp_dbf.c23
-rw-r--r--drivers/s390/scsi/zfcp_ext.h5
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c14
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c33
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c93
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c210
-rw-r--r--drivers/scsi/ufs/ufshcd.h39
-rw-r--r--drivers/soc/qcom/Kconfig3
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/icnss.c30
-rw-r--r--drivers/soc/qcom/state_notifier.c130
-rw-r--r--drivers/spi/spi-pxa2xx.h2
-rw-r--r--drivers/staging/android/lowmemorykiller.c5
-rw-r--r--drivers/staging/easel/regulator/bcm15602-regulator.c1
-rw-r--r--drivers/staging/fw-api/fw/htc.h32
-rw-r--r--drivers/staging/fw-api/fw/htt.h12
-rw-r--r--drivers/staging/fw-api/fw/htt_ppdu_stats.h1595
-rw-r--r--drivers/staging/fw-api/fw/htt_stats.h670
-rw-r--r--drivers/staging/fw-api/fw/wmi_services.h13
-rw-r--r--drivers/staging/fw-api/fw/wmi_tlv_defs.h160
-rw-r--r--drivers/staging/fw-api/fw/wmi_unified.h775
-rw-r--r--drivers/staging/fw-api/fw/wmi_version.h2
-rw-r--r--drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h3
-rw-r--r--drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c22
-rw-r--r--drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h6
-rw-r--r--drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c16
-rw-r--r--drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h11
-rw-r--r--drivers/staging/qca-wifi-host-cmn/htc/htc.c10
-rw-r--r--drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c4
-rw-r--r--drivers/staging/qca-wifi-host-cmn/htc/htc_send.c19
-rw-r--r--drivers/staging/qca-wifi-host-cmn/htc/htc_services.c6
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h271
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h3
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h163
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h34
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h14
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h24
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h5
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h24
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c416
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c25
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c24
-rw-r--r--drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c794
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h100
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h111
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h192
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h29
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h25
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c37
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c33
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c229
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c107
-rw-r--r--drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c260
-rw-r--r--drivers/staging/qcacld-3.0/Kbuild23
-rw-r--r--drivers/staging/qcacld-3.0/Makefile20
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h11
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/inc/cds_concurrency.h23
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h34
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h12
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/inc/cds_utils.h4
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/src/cds_api.c76
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c224
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/src/cds_mc_timer.c4
-rw-r--r--drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c46
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/htt/htt.c8
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c59
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/htt/htt_tx.c3
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx.c7
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c7
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c19
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c71
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_desc.c27
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_queue.c10
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c14
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c251
-rw-r--r--drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h12
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_apf.h129
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h11
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h440
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_ipa.h26
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h56
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h6
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_softap_tx_rx.h4
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h2
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_wext.h4
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_apf.c729
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c328
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c249
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c945
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h33
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c45
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c51
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c191
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c171
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_lro.c11
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c603
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.c105
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c2
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c141
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c47
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c2
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c18
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c63
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c11
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tdls.c20
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c124
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c168
-rw-r--r--drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c78
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h5
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h8
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h93
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms10
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h299
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/include/sir_params.h9
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_api.h6
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h5
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_trace.h4
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c88
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_ft_preauth.c9
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c1
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c24
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_deauth_frame.c250
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_disassoc_frame.c109
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c15
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c11
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_req_messages.c16
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_probe_rsp_frame.c12
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c14
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c15
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c13
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c17
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_sme_rsp_messages.c12
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session.c4
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session_utils.c4
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c55
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_trace.c7
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h43
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c23
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c4
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/system/src/sys_entry_func.c17
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c233
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/mac_trace.c4
-rw-r--r--drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c32
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h23
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c25
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/src/sap_ch_select.c135
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c58
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/src/sap_internal.h24
-rw-r--r--drivers/staging/qcacld-3.0/core/sap/src/sap_module.c56
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h30
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h14
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h15
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h186
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/sme_internal.h8
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h3
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c277
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/common/sme_trace.c7
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c337
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c12
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c9
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c54
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c10
-rw-r--r--drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c11
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/epping/src/epping_main.c5
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/epping/src/epping_txrx.c8
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c14
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h6
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/pktlog/linux_ac.c12
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c25
-rw-r--r--drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c157
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/inc/wma.h113
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h18
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h4
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h5
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h11
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_data.c6
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c377
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_features.c276
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_main.c111
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c50
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c15
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_power.c22
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c286
-rw-r--r--drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c80
-rw-r--r--drivers/staging/qcacld-3.0/uapi/linux/qca_vendor.h35
-rw-r--r--drivers/thermal/msm_lmh_dcvs.c5
-rw-r--r--drivers/tty/n_gsm.c23
-rw-r--r--drivers/tty/serial/serial_mctrl_gpio.c5
-rw-r--r--drivers/tty/tty_io.c5
-rw-r--r--drivers/tty/tty_ldisc.c16
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/dwc3/core.h8
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c7
-rw-r--r--drivers/usb/dwc3/ep0.c28
-rw-r--r--drivers/usb/dwc3/gadget.c141
-rw-r--r--drivers/usb/gadget/function/f_accessory.c10
-rw-r--r--drivers/usb/gadget/function/f_fs.c6
-rw-r--r--drivers/usb/musb/musb_host.c4
-rw-r--r--drivers/usb/pd/pd_engine.c189
-rw-r--r--drivers/usb/serial/Kconfig1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c3
-rw-r--r--drivers/usb/serial/option.c448
-rw-r--r--drivers/usb/serial/usb-serial-simple.c7
-rw-r--r--drivers/usb/serial/visor.c69
-rw-r--r--drivers/usb/typec/tcpm.c8
-rw-r--r--drivers/usb/typec/tcpm.h10
-rw-r--r--drivers/usb/usbip/stub.h2
-rw-r--r--drivers/usb/usbip/stub_dev.c43
-rw-r--r--drivers/usb/usbip/stub_main.c100
-rw-r--r--drivers/usb/usbip/usbip_common.h2
-rw-r--r--drivers/video/fbdev/msm/mdss.h9
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c40
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c9
-rw-r--r--fs/block_dev.c15
-rw-r--r--fs/btrfs/ctree.c6
-rw-r--r--fs/btrfs/tree-log.c7
-rw-r--r--fs/btrfs/volumes.c9
-rw-r--r--fs/cifs/dir.c9
-rw-r--r--fs/ext2/inode.c10
-rw-r--r--fs/ext4/balloc.c17
-rw-r--r--fs/ext4/extents.c16
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/inline.c66
-rw-r--r--fs/ext4/inode.c76
-rw-r--r--fs/ext4/super.c35
-rw-r--r--fs/ext4/xattr.c30
-rw-r--r--fs/ext4/xattr.h32
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/hfsplus/super.c1
-rw-r--r--fs/jbd2/transaction.c1
-rw-r--r--fs/lockd/svc.c2
-rw-r--r--fs/pipe.c3
-rw-r--r--fs/proc/base.c15
-rw-r--r--fs/proc/meminfo.c5
-rw-r--r--fs/proc/task_mmu.c17
-rw-r--r--fs/sdcardfs/dentry.c8
-rw-r--r--fs/sdcardfs/lookup.c2
-rw-r--r--fs/sdcardfs/main.c7
-rw-r--r--fs/select.c3
-rw-r--r--fs/xfs/xfs_file.c14
-rw-r--r--include/asm-generic/futex.h50
-rw-r--r--include/dt-bindings/usb/typec.h23
-rw-r--r--include/linux/blkdev.h39
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/compiler-clang.h16
-rw-r--r--include/linux/devfreq.h3
-rw-r--r--include/linux/dmaengine.h20
-rw-r--r--include/linux/efi.h8
-rw-r--r--include/linux/if_vlan.h7
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/mm_types.h7
-rw-r--r--include/linux/mmc/host.h3
-rw-r--r--include/linux/mtd/flashchip.h1
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/signal.h17
-rw-r--r--include/linux/state_notifier.h20
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/usb/audio-v2.h4
-rw-r--r--include/linux/virtio.h3
-rw-r--r--include/linux/wahoo_info.h4
-rw-r--r--include/linux/wake_gestures.h3
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/inet_timewait_sock.h1
-rw-r--r--include/net/llc_conn.h1
-rw-r--r--include/net/mac80211.h14
-rw-r--r--include/net/nexthop.h2
-rw-r--r--include/net/sock.h35
-rw-r--r--include/net/tcp.h4
-rw-r--r--include/sound/control.h7
-rw-r--r--include/trace/events/sock.h2
-rw-r--r--include/trace/events/xen.h16
-rw-r--r--include/uapi/linux/kvm.h1
-rw-r--r--include/uapi/linux/nl80211.h2
-rw-r--r--init/main.c5
-rw-r--r--kernel/auditsc.c7
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/hashtab.c9
-rw-r--r--kernel/bpf/syscall.c20
-rw-r--r--kernel/events/callchain.c10
-rw-r--r--kernel/events/core.c6
-rw-r--r--kernel/events/ring_buffer.c7
-rw-r--r--kernel/events/uprobes.c2
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/futex.c44
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/time/tick-broadcast.c8
-rw-r--r--kernel/trace/trace.c5
-rw-r--r--kernel/trace/trace_events_filter.c3
-rw-r--r--kernel/trace/trace_uprobe.c2
-rw-r--r--kernel/tracepoint.c4
-rw-r--r--lib/kobject.c12
-rw-r--r--lib/test_kasan.c120
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/filemap.c90
-rw-r--r--mm/kasan/kasan.c68
-rw-r--r--mm/kasan/kasan.h53
-rw-r--r--mm/kasan/report.c7
-rw-r--r--mm/memory.c30
-rw-r--r--mm/oom_kill.c5
-rw-r--r--mm/percpu.c1
-rw-r--r--mm/rmap.c12
-rw-r--r--mm/util.c16
-rw-r--r--mm/vmscan.c12
-rw-r--r--net/Kconfig4
-rw-r--r--net/atm/lec.c9
-rw-r--r--net/bridge/br_if.c4
-rw-r--r--net/ceph/messenger.c7
-rw-r--r--net/compat.c6
-rw-r--r--net/core/Makefile2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/dev_addr_lists.c4
-rw-r--r--net/core/neighbour.c40
-rw-r--r--net/core/netpoll.c2
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c13
-rw-r--r--net/dccp/ccids/ccid2.c14
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/timer.c2
-rw-r--r--net/dns_resolver/dns_key.c13
-rw-r--r--net/ipv4/inet_timewait_sock.c1
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ping.c7
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_output.c145
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/l2tp/l2tp_netlink.c2
-rw-r--r--net/l2tp/l2tp_ppp.c7
-rw-r--r--net/llc/af_llc.c17
-rw-r--r--net/llc/llc_c_ac.c9
-rw-r--r--net/llc/llc_conn.c22
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/mac80211/wep.c3
-rw-r--r--net/mac80211/wpa.c45
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c155
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/nfc/hci/core.c10
-rw-r--r--net/openvswitch/flow_netlink.c9
-rw-r--r--net/packet/af_packet.c92
-rw-r--r--net/packet/internal.h10
-rw-r--r--net/rfkill/rfkill-gpio.c7
-rw-r--r--net/sched/sch_fq.c37
-rw-r--r--net/sctp/associola.c30
-rw-r--r--net/sctp/inqueue.c2
-rw-r--r--net/sctp/ipv6.c61
-rw-r--r--net/sctp/sm_statefuns.c89
-rw-r--r--net/tipc/net.c3
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/db.txt12
-rw-r--r--net/xfrm/xfrm_state.c1
-rw-r--r--net/xfrm/xfrm_user.c2
-rw-r--r--scripts/Makefile.kasan41
-rw-r--r--scripts/Makefile.lib2
-rwxr-xr-xscripts/fetch-latest-wireguard.sh2
-rwxr-xr-xscripts/mkcompile_h2
-rw-r--r--sound/core/control_compat.c3
-rw-r--r--sound/core/pcm_compat.c2
-rw-r--r--sound/core/pcm_native.c1
-rw-r--r--sound/core/seq/oss/seq_oss_event.c15
-rw-r--r--sound/core/seq/oss/seq_oss_midi.c2
-rw-r--r--sound/core/seq/oss/seq_oss_synth.c85
-rw-r--r--sound/core/seq/oss/seq_oss_synth.h3
-rw-r--r--sound/core/seq/seq_virmidi.c4
-rw-r--r--sound/drivers/aloop.c29
-rw-r--r--sound/drivers/opl3/opl3_synth.c7
-rw-r--r--sound/pci/asihpi/hpimsginit.c13
-rw-r--r--sound/pci/asihpi/hpioctl.c4
-rw-r--r--sound/pci/hda/hda_hwdep.c12
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_realtek.c2
-rw-r--r--sound/pci/rme9652/hdspm.c24
-rw-r--r--sound/pci/rme9652/rme9652.c6
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c7
-rw-r--r--sound/soc/fsl/fsl_esai.c7
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c10
-rw-r--r--sound/usb/card.c2
-rw-r--r--sound/usb/clock.c5
-rw-r--r--sound/usb/mixer.c183
-rw-r--r--sound/usb/mixer_maps.c3
-rwxr-xr-xtools/testing/selftests/firmware/fw_filesystem.sh6
613 files changed, 19860 insertions, 6900 deletions
diff --git a/Documentation/ABI/testing/sysfs-devices-ufs b/Documentation/ABI/testing/sysfs-devices-ufs
new file mode 100644
index 000000000000..63328ed347f3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-devices-ufs
@@ -0,0 +1,15 @@
+What: /sys/devices/soc/<....>ufshc/slowio_us
+Date: February, 2018
+Contact: "Hyojun Kim" <hyojun@google.com>
+Description:
+ Contains watermark value in micro seconds unit for slow
+ UFS I/O logging and counting. Can be updated by writing
+ value to.
+
+What: /sys/devices/soc/<....>ufshc/slowio_cnt
+Date: February, 2018
+Contact: "Hyojun Kim" <hyojun@google.com>
+Description:
+ Contains the number of UFS I/O requests which took more
+ or euqal to the defined slow I/O water mark time.
+ Can be reset by writing any value.
diff --git a/Documentation/device-mapper/verity.txt b/Documentation/device-mapper/verity.txt
index 89fd8f9a259f..b3d2e4a42255 100644
--- a/Documentation/device-mapper/verity.txt
+++ b/Documentation/device-mapper/verity.txt
@@ -109,6 +109,17 @@ fec_start <offset>
This is the offset, in <data_block_size> blocks, from the start of the
FEC device to the beginning of the encoding data.
+check_at_most_once
+ Verify data blocks only the first time they are read from the data device,
+ rather than every time. This reduces the overhead of dm-verity so that it
+ can be used on systems that are memory and/or CPU constrained. However, it
+ provides a reduced level of security because only offline tampering of the
+ data device's content will be detected, not online tampering.
+
+ Hash blocks are still verified each time they are read from the hash device,
+ since verification of hash blocks is less performance critical than data
+ blocks, and a hash block will not be verified any more after all the data
+ blocks it covers have been verified anyway.
Theory of operation
===================
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index e7aa730b927b..c0cabfe8d7d4 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -171,6 +171,9 @@ read the file /proc/PID/status:
VmLck: 0 kB
VmHWM: 476 kB
VmRSS: 476 kB
+ RssAnon: 352 kB
+ RssFile: 120 kB
+ RssShmem: 4 kB
VmData: 156 kB
VmStk: 88 kB
VmExe: 68 kB
@@ -233,7 +236,12 @@ Table 1-2: Contents of the status files (as of 4.1)
VmSize total program size
VmLck locked memory size
VmHWM peak resident set size ("high water mark")
- VmRSS size of memory portions
+ VmRSS size of memory portions. It contains the three
+ following parts (VmRSS = RssAnon + RssFile + RssShmem)
+ RssAnon size of resident anonymous memory
+ RssFile size of resident file mappings
+ RssShmem size of resident shmem memory (includes SysV shm,
+ mapping of tmpfs and shared anonymous mappings)
VmData size of data, stack, and text segments
VmStk size of data, stack, and text segments
VmExe size of text segment
@@ -267,7 +275,8 @@ Table 1-3: Contents of the statm files (as of 2.6.8-rc3)
Field Content
size total program size (pages) (same as VmSize in status)
resident size of memory portions (pages) (same as VmRSS in status)
- shared number of pages that are shared (i.e. backed by a file)
+ shared number of pages that are shared (i.e. backed by a file, same
+ as RssFile+RssShmem in status)
trs number of pages that are 'code' (not including libs; broken,
includes data segment)
lrs number of pages of library (always 0 on 2.6)
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index a1734cf72335..a552a6e98822 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2418,6 +2418,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
noalign [KNL,ARM]
+ noaltinstr [S390] Disables alternative instructions patching
+ (CPU alternatives feature).
+
noapic [SMP,APIC] Tells the kernel to not make use of any
IOAPICs that may be present in the system.
diff --git a/Makefile b/Makefile
index d80918f2847b..ea7d8b45a535 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 4
-SUBLEVEL = 129
+SUBLEVEL = 133
EXTRAVERSION =
NAME = Blurry Fish Butt
@@ -436,7 +436,8 @@ export MAKE AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS DTC_FLAGS
-export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV CFLAGS_KASAN CFLAGS_UBSAN
+export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_GCOV CFLAGS_KCOV
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h
index f939794363ac..56474690e685 100644
--- a/arch/alpha/include/asm/futex.h
+++ b/arch/alpha/include/asm/futex.h
@@ -29,18 +29,10 @@
: "r" (uaddr), "r"(oparg) \
: "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,17 +58,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arc/include/asm/futex.h b/arch/arc/include/asm/futex.h
index 11e1b1f3acda..eb887dd13e74 100644
--- a/arch/arc/include/asm/futex.h
+++ b/arch/arc/include/asm/futex.h
@@ -73,20 +73,11 @@
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
-
#ifndef CONFIG_ARC_HAS_LLSC
preempt_disable(); /* to guarantee atomic r-m-w of futex op */
#endif
@@ -118,30 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 7a032dd84bb2..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -88,7 +88,6 @@
clocks = <&clks 201>;
VDDA-supply = <&reg_2p5v>;
VDDIO-supply = <&reg_3p3v>;
- lrclk-strength = <3>;
};
};
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index 2c16d9e7c03c..4a275fba6059 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -530,4 +530,14 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
#endif
.endm
+#ifdef CONFIG_KPROBES
+#define _ASM_NOKPROBE(entry) \
+ .pushsection "_kprobe_blacklist", "aw" ; \
+ .balign 4 ; \
+ .long entry; \
+ .popsection
+#else
+#define _ASM_NOKPROBE(entry)
+#endif
+
#endif /* __ASM_ASSEMBLER_H__ */
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 6795368ad023..cc414382dab4 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -128,20 +128,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#endif /* !SMP */
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
#ifndef CONFIG_SMP
preempt_disable();
#endif
@@ -172,17 +162,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
preempt_enable();
#endif
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 75a371951f1a..191f5fd87da3 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -19,6 +19,7 @@
#include <linux/uaccess.h>
#include <linux/hardirq.h>
#include <linux/kdebug.h>
+#include <linux/kprobes.h>
#include <linux/module.h>
#include <linux/kexec.h>
#include <linux/bug.h>
@@ -396,7 +397,8 @@ void unregister_undef_hook(struct undef_hook *hook)
raw_spin_unlock_irqrestore(&undef_lock, flags);
}
-static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
+static nokprobe_inline
+int call_undef_hook(struct pt_regs *regs, unsigned int instr)
{
struct undef_hook *hook;
unsigned long flags;
@@ -469,6 +471,7 @@ die_sig:
arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
}
+NOKPROBE_SYMBOL(do_undefinstr)
/*
* Handle FIQ similarly to NMI on x86 systems.
diff --git a/arch/arm/lib/getuser.S b/arch/arm/lib/getuser.S
index df73914e81c8..746e7801dcdf 100644
--- a/arch/arm/lib/getuser.S
+++ b/arch/arm/lib/getuser.S
@@ -38,6 +38,7 @@ ENTRY(__get_user_1)
mov r0, #0
ret lr
ENDPROC(__get_user_1)
+_ASM_NOKPROBE(__get_user_1)
ENTRY(__get_user_2)
check_uaccess r0, 2, r1, r2, __get_user_bad
@@ -58,6 +59,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_2)
+_ASM_NOKPROBE(__get_user_2)
ENTRY(__get_user_4)
check_uaccess r0, 4, r1, r2, __get_user_bad
@@ -65,6 +67,7 @@ ENTRY(__get_user_4)
mov r0, #0
ret lr
ENDPROC(__get_user_4)
+_ASM_NOKPROBE(__get_user_4)
ENTRY(__get_user_8)
check_uaccess r0, 8, r1, r2, __get_user_bad8
@@ -78,6 +81,7 @@ ENTRY(__get_user_8)
mov r0, #0
ret lr
ENDPROC(__get_user_8)
+_ASM_NOKPROBE(__get_user_8)
#ifdef __ARMEB__
ENTRY(__get_user_32t_8)
@@ -91,6 +95,7 @@ ENTRY(__get_user_32t_8)
mov r0, #0
ret lr
ENDPROC(__get_user_32t_8)
+_ASM_NOKPROBE(__get_user_32t_8)
ENTRY(__get_user_64t_1)
check_uaccess r0, 1, r1, r2, __get_user_bad8
@@ -98,6 +103,7 @@ ENTRY(__get_user_64t_1)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_1)
+_ASM_NOKPROBE(__get_user_64t_1)
ENTRY(__get_user_64t_2)
check_uaccess r0, 2, r1, r2, __get_user_bad8
@@ -114,6 +120,7 @@ rb .req r0
mov r0, #0
ret lr
ENDPROC(__get_user_64t_2)
+_ASM_NOKPROBE(__get_user_64t_2)
ENTRY(__get_user_64t_4)
check_uaccess r0, 4, r1, r2, __get_user_bad8
@@ -121,6 +128,7 @@ ENTRY(__get_user_64t_4)
mov r0, #0
ret lr
ENDPROC(__get_user_64t_4)
+_ASM_NOKPROBE(__get_user_64t_4)
#endif
__get_user_bad8:
@@ -131,6 +139,8 @@ __get_user_bad:
ret lr
ENDPROC(__get_user_bad)
ENDPROC(__get_user_bad8)
+_ASM_NOKPROBE(__get_user_bad)
+_ASM_NOKPROBE(__get_user_bad8)
.pushsection __ex_table, "a"
.long 1b, __get_user_bad
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
index bcdecc25461b..b2aa9b32bff2 100644
--- a/arch/arm/probes/kprobes/opt-arm.c
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -165,13 +165,14 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
{
unsigned long flags;
struct kprobe *p = &op->kp;
- struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ struct kprobe_ctlblk *kcb;
/* Save skipped registers */
regs->ARM_pc = (unsigned long)op->kp.addr;
regs->ARM_ORIG_r0 = ~0UL;
local_irq_save(flags);
+ kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(&op->kp);
@@ -191,6 +192,7 @@ optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
local_irq_restore(flags);
}
+NOKPROBE_SYMBOL(optimized_callback)
int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
{
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6feff273b8a9..634c6c946a03 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -451,6 +451,20 @@ config ARM64_ERRATUM_843419
If unsure, say Y.
+config ARM64_ERRATUM_1024718
+ bool "Cortex-A55: 1024718: Update of DBM/AP bits without break before make might result in incorrect update"
+ default y
+ help
+ This option adds work around for Arm Cortex-A55 Erratum 1024718.
+
+ Affected Cortex-A55 cores (r0p0, r0p1, r1p0) could cause incorrect
+ update of the hardware dirty bit when the DBM/AP bits are updated
+ without a break-before-make. The work around is to disable the usage
+ of hardware DBM locally on the affected cores. CPUs not affected by
+ erratum will continue to use the feature.
+
+ If unsure, say Y.
+
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y
diff --git a/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi b/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
index 66451798f637..617b4b9f12cb 100644
--- a/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
+++ b/arch/arm64/boot/dts/htc/batterydata-walleye.dtsi
@@ -18,7 +18,6 @@
qcom,fg-chg-term-current = <50>;
qcom,fg-sys-term-current = <(-405)>;
qcom,fg-cutoff-voltage = <3400>;
- qcom,fg-empty-voltage = <3200>;
qcom,hold-soc-while-full;
qcom,linearize-soc;
qcom,cl-max-decrement = <5>;
diff --git a/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi b/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
index d88e9784de31..6b9c250ba5c4 100755
--- a/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
+++ b/arch/arm64/boot/dts/htc/msm8998-htc-common.dtsi
@@ -176,4 +176,12 @@
qcom,bcl-hotplug-list = <>;
qcom,bcl-soc-hotplug-list = <>;
};
+
+ qcom,msm-imem@146bf000 {
+ restart_info@a94 {
+ compatible = "msm-imem-restart_info";
+ reg = <0xa94 100>;
+ info_size = <100>;
+ };
+ };
};
diff --git a/arch/arm64/boot/dts/htc/msm8998-htc-usb-xa.dtsi b/arch/arm64/boot/dts/htc/msm8998-htc-usb-xa.dtsi
index 07880f504447..fe088adff02f 100644
--- a/arch/arm64/boot/dts/htc/msm8998-htc-usb-xa.dtsi
+++ b/arch/arm64/boot/dts/htc/msm8998-htc-usb-xa.dtsi
@@ -11,6 +11,8 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/usb/typec.h>
+
&pm8998_gpios {
gpio@c400 { /* GPIO 5 - V_OTG_5V_EN_AP */
qcom,mode = <1>;
@@ -73,6 +75,19 @@
qcom,fcc-max-ua = <500000>;
};
+&pmi8998_pdphy {
+ src-pdo = <PDO_TYPE_FIXED 5000 900 0>; /* 5V-0.9A */
+ snk-pdo = <PDO_TYPE_FIXED 5000 3000 0>, /* 5V-3A */
+ <PDO_TYPE_FIXED 9000 3000 0>; /* 9V-3A */
+ max-snk-mv = <9000>;
+ max-snk-ma = <3000>;
+ max-snk-mw = <27000>;
+ op-snk-mw = <7600>;
+ port-type = <TYPEC_PORT_DRP>;
+ default-role = <TYPEC_SINK>;
+ try-role-hw;
+};
+
&qusb_phy0 {
qcom,qusb-phy-init-seq =
/* <value reg_offset> */
diff --git a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
index 077cbe1ef642..29251319478a 100644
--- a/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
+++ b/arch/arm64/boot/dts/lge/dsi-panel-sw43402-dsc-qhd-cmd.dtsi
@@ -116,20 +116,22 @@
qcom,alpm-off-command = [
15 01 00 00 00 00 02 5E 00
15 01 00 00 00 00 02 51 00
- 05 01 00 00 12 00 01 22
+ 05 01 00 00 00 00 01 22
+ 39 01 00 00 12 00 02 E4 33
05 01 00 00 00 00 01 38
39 01 00 00 00 00 03 E2 20 0D
05 01 00 00 40 00 01 28
+ 39 01 00 00 00 00 02 E4 30
05 01 00 00 00 00 01 13
05 01 00 00 00 00 01 29
];
qcom,alpm-dim-command = [
05 01 00 00 00 00 01 22
15 01 00 00 00 00 02 E0 1A
- 39 01 00 00 34 00 03 E2 20 03
- 39 01 00 00 00 00 03 E4 30 44
39 01 00 00 00 00 07 E5 04 06 03 03 56 61
39 01 00 00 00 00 09 E7 00 0D 76 23 00 00 0D 44
+ 39 01 00 00 00 00 03 E4 33 44
+ 39 01 00 00 66 00 03 E2 20 03
05 01 00 00 00 00 01 39
15 01 00 00 00 00 02 51 35
05 01 00 00 54 00 01 13
@@ -138,10 +140,10 @@
qcom,alpm-low-command = [
05 01 00 00 00 00 01 22
15 01 00 00 00 00 02 E0 1A
- 39 01 00 00 34 00 03 E2 20 03
- 39 01 00 00 00 00 03 E4 30 44
39 01 00 00 00 00 07 E5 04 06 03 03 56 61
39 01 00 00 00 00 09 E7 00 0D 76 23 00 00 0D 44
+ 39 01 00 00 00 00 03 E4 33 44
+ 39 01 00 00 66 00 03 E2 20 03
05 01 00 00 00 00 01 39
15 01 00 00 00 00 02 51 90
05 01 00 00 54 00 01 13
@@ -150,10 +152,10 @@
qcom,alpm-high-command = [
05 01 00 00 00 00 01 22
15 01 00 00 00 00 02 E0 1A
- 39 01 00 00 34 00 03 E2 20 03
- 39 01 00 00 00 00 03 E4 30 04
39 01 00 00 00 00 07 E5 04 06 03 03 56 61
39 01 00 00 00 00 09 E7 00 0D 76 23 00 00 0D 44
+ 39 01 00 00 00 00 03 E4 33 04
+ 39 01 00 00 66 00 03 E2 20 03
05 01 00 00 00 00 01 39
15 01 00 00 00 00 02 51 F6
05 01 00 00 54 00 01 13
@@ -161,15 +163,15 @@
];
qcom,alpm-dim-transition-command = [
15 01 00 00 00 00 02 51 35
- 39 01 00 00 20 00 03 E4 30 44
+ 39 01 00 00 20 00 03 E4 33 44
];
qcom,alpm-low-transition-command = [
15 01 00 00 00 00 02 51 90
- 39 01 00 00 20 00 03 E4 30 44
+ 39 01 00 00 20 00 03 E4 33 44
];
qcom,alpm-high-transition-command = [
15 01 00 00 00 00 02 51 F6
- 39 01 00 00 20 00 03 E4 30 04
+ 39 01 00 00 20 00 03 E4 33 04
];
qcom,mdss-pan-physical-width-dimension = <68>;
qcom,mdss-pan-physical-height-dimension = <136>;
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
index 27c33b31de87..20ded59a15da 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-pm.dtsi
@@ -238,12 +238,7 @@
(-40) (-40) (-65) (-60)
(-60) (-40) (-40) (-30)
(-30)>;
- qcom,fg-cutoff-voltage = <3300>;
- qcom,fg-empty-voltage = <3100>;
-};
-
-&pmi8998_bcl {
- qcom,vbat-too-low-threshold = <4>; /* 2.8V */
+ qcom,fg-cutoff-voltage = <3400>;
};
&pmi8998_pdphy {
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-touch-stm-ftm4.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-touch-stm-ftm4.dtsi
index db72bc968a94..daacab1eb799 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-touch-stm-ftm4.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-touch-stm-ftm4.dtsi
@@ -51,7 +51,7 @@
stm,irq_gpio = <&tlmm 125 0>;
stm,irq_type = <0x2004>;
stm,num_lines = <32 16>;
- stm,max_coords = <1440 2880>;
+ stm,max_coords = <1439 2879>;
stm,regulator_dvdd = "pm8998_l6";
stm,regulator_avdd = "pm8998_l28";
//stm,tspid_gpio = <&gpg1 4 0>;
diff --git a/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi b/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
index f97c4786355b..d8a25d2f35c4 100644
--- a/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
+++ b/arch/arm64/boot/dts/lge/msm8998-taimen-usb.dtsi
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <dt-bindings/usb/typec.h>
&qusb_phy0 {
qcom,qusb-phy-init-seq =
@@ -167,3 +168,16 @@
0x8dc 0x13 0x00 /* rx_a equalization 0x03 | 0x10 */
0xffffffff 0xffffffff 0x00>;
};
+
+&pmi8998_pdphy {
+ src-pdo = <PDO_TYPE_FIXED 5000 900 0>; /* 5V-0.9A */
+ snk-pdo = <PDO_TYPE_FIXED 5000 3000 0>, /* 5V-3A */
+ <PDO_TYPE_FIXED 9000 3000 0>; /* 9V-3A */
+ max-snk-mv = <9000>;
+ max-snk-ma = <3000>;
+ max-snk-mw = <27000>;
+ op-snk-mw = <7600>;
+ port-type = <TYPEC_PORT_DRP>;
+ default-role = <TYPEC_SINK>;
+ try-role-hw;
+};
diff --git a/arch/arm64/configs/flash-taimen_defconfig b/arch/arm64/configs/flash-taimen_defconfig
deleted file mode 100644
index a9d45e640534..000000000000
--- a/arch/arm64/configs/flash-taimen_defconfig
+++ /dev/null
@@ -1,795 +0,0 @@
-CONFIG_LOCALVERSION="-FlashKernel"
-# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_KERNEL_LZ4=y
-# CONFIG_USELIB is not set
-CONFIG_AUDIT=y
-# CONFIG_AUDITSYSCALL is not set
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IRQ_TIME_ACCOUNTING=y
-CONFIG_SCHED_WALT=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
-CONFIG_RCU_EXPERT=y
-CONFIG_RCU_FAST_NO_HZ=y
-CONFIG_RCU_NOCB_CPU=y
-CONFIG_RCU_NOCB_CPU_ALL=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_DEBUG=y
-CONFIG_CGROUP_FREEZER=y
-CONFIG_CPUSETS=y
-CONFIG_CGROUP_CPUACCT=y
-CONFIG_CGROUP_SCHEDTUNE=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_RT_GROUP_SCHED=y
-CONFIG_NAMESPACES=y
-# CONFIG_UTS_NS is not set
-# CONFIG_PID_NS is not set
-CONFIG_SCHED_TUNE=y
-CONFIG_DEFAULT_USE_ENERGY_AWARE=y
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_RD_BZIP2 is not set
-# CONFIG_RD_LZMA is not set
-# CONFIG_RD_XZ is not set
-# CONFIG_RD_LZO is not set
-# CONFIG_RD_LZ4 is not set
-CONFIG_KALLSYMS_ALL=y
-# CONFIG_MEMBARRIER is not set
-CONFIG_EMBEDDED=y
-# CONFIG_SLUB_DEBUG is not set
-# CONFIG_COMPAT_BRK is not set
-# CONFIG_SLAB_MERGE_DEFAULT is not set
-CONFIG_SLAB_FREELIST_RANDOM=y
-CONFIG_SLAB_FREELIST_HARDENED=y
-CONFIG_PROFILING=y
-CONFIG_JUMP_LABEL=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
-CONFIG_ARCH_MMAP_RND_BITS=24
-CONFIG_ARCH_MMAP_RND_COMPAT_BITS=16
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_TEST is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MSM8998=y
-CONFIG_BOARD_MSM8998_SOC=y
-CONFIG_BOARD_MUSKIE=y
-CONFIG_BOARD_TAIMEN=y
-CONFIG_PCI=y
-CONFIG_PCI_MSM=y
-CONFIG_SCHED_MC=y
-CONFIG_NR_CPUS=8
-CONFIG_PREEMPT=y
-CONFIG_HZ_1000=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
-CONFIG_CLEANCACHE=y
-CONFIG_CMA=y
-CONFIG_CMA_DEBUGFS=y
-CONFIG_ZSMALLOC=y
-CONFIG_BALANCE_ANON_FILE_RECLAIM=y
-CONFIG_SECCOMP=y
-CONFIG_ARMV8_DEPRECATED=y
-CONFIG_SWP_EMULATION=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
-CONFIG_ARM64_SW_TTBR0_PAN=y
-CONFIG_ARM64_PAN=y
-CONFIG_RANDOMIZE_BASE=y
-CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
-CONFIG_BUILD_ARM64_DTC_FLAGS="-@ -q"
-# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_COMPAT=y
-CONFIG_COMPAT_VDSO=n
-CONFIG_SUSPEND_SKIP_SYNC=y
-CONFIG_PM_AUTOSLEEP=y
-CONFIG_PM_WAKELOCKS=y
-CONFIG_PM_WAKELOCKS_LIMIT=0
-# CONFIG_PM_WAKELOCKS_GC is not set
-CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
-CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_XFRM_USER=y
-CONFIG_XFRM_STATISTICS=y
-CONFIG_NET_KEY=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_MULTIPLE_TABLES=y
-CONFIG_IP_ROUTE_VERBOSE=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_NET_IPVTI=y
-CONFIG_INET_AH=y
-CONFIG_INET_ESP=y
-CONFIG_INET_IPCOMP=y
-# CONFIG_INET_LRO is not set
-CONFIG_INET_DIAG_DESTROY=y
-CONFIG_TCP_CONG_ADVANCED=y
-# CONFIG_TCP_CONG_BIC is not set
-CONFIG_TCP_CONG_WESTWOOD=y
-# CONFIG_TCP_CONG_HTCP is not set
-CONFIG_TCP_CONG_CDG=y
-CONFIG_DEFAULT_WESTWOOD=y
-CONFIG_IPV6_ROUTER_PREF=y
-CONFIG_IPV6_ROUTE_INFO=y
-CONFIG_IPV6_OPTIMISTIC_DAD=y
-CONFIG_INET6_AH=y
-CONFIG_INET6_ESP=y
-CONFIG_INET6_IPCOMP=y
-CONFIG_IPV6_MIP6=y
-CONFIG_IPV6_VTI=y
-CONFIG_IPV6_MULTIPLE_TABLES=y
-CONFIG_IPV6_SUBTREES=y
-CONFIG_NETFILTER=y
-# CONFIG_BRIDGE_NETFILTER is not set
-CONFIG_NF_CONNTRACK=y
-CONFIG_NF_CONNTRACK_SECMARK=y
-CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=y
-CONFIG_NF_CT_PROTO_SCTP=y
-CONFIG_NF_CT_PROTO_UDPLITE=y
-CONFIG_NF_CONNTRACK_AMANDA=y
-CONFIG_NF_CONNTRACK_FTP=y
-CONFIG_NF_CONNTRACK_H323=y
-CONFIG_NF_CONNTRACK_IRC=y
-CONFIG_NF_CONNTRACK_NETBIOS_NS=y
-CONFIG_NF_CONNTRACK_PPTP=y
-CONFIG_NF_CONNTRACK_SANE=y
-CONFIG_NF_CONNTRACK_TFTP=y
-CONFIG_NF_CT_NETLINK=y
-CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
-CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
-CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
-CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_HARDIDLETIMER=y
-CONFIG_NETFILTER_XT_TARGET_LOG=y
-CONFIG_NETFILTER_XT_TARGET_MARK=y
-CONFIG_NETFILTER_XT_TARGET_NFLOG=y
-CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
-CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
-CONFIG_NETFILTER_XT_TARGET_TEE=y
-CONFIG_NETFILTER_XT_TARGET_TPROXY=y
-CONFIG_NETFILTER_XT_TARGET_TRACE=y
-CONFIG_NETFILTER_XT_TARGET_SECMARK=y
-CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
-CONFIG_NETFILTER_XT_MATCH_COMMENT=y
-CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
-CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
-CONFIG_NETFILTER_XT_MATCH_DSCP=y
-CONFIG_NETFILTER_XT_MATCH_ESP=y
-CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
-CONFIG_NETFILTER_XT_MATCH_HELPER=y
-CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
-# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
-CONFIG_NETFILTER_XT_MATCH_LENGTH=y
-CONFIG_NETFILTER_XT_MATCH_LIMIT=y
-CONFIG_NETFILTER_XT_MATCH_MAC=y
-CONFIG_NETFILTER_XT_MATCH_MARK=y
-CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
-CONFIG_NETFILTER_XT_MATCH_POLICY=y
-CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
-CONFIG_NETFILTER_XT_MATCH_QTAGUID=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA=y
-CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
-CONFIG_NETFILTER_XT_MATCH_SOCKET=y
-CONFIG_NETFILTER_XT_MATCH_STATE=y
-CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
-CONFIG_NETFILTER_XT_MATCH_STRING=y
-CONFIG_NETFILTER_XT_MATCH_TIME=y
-CONFIG_NETFILTER_XT_MATCH_U32=y
-CONFIG_NF_CONNTRACK_IPV4=y
-CONFIG_IP_NF_IPTABLES=y
-CONFIG_IP_NF_MATCH_AH=y
-CONFIG_IP_NF_MATCH_ECN=y
-CONFIG_IP_NF_MATCH_RPFILTER=y
-CONFIG_IP_NF_MATCH_TTL=y
-CONFIG_IP_NF_FILTER=y
-CONFIG_IP_NF_TARGET_REJECT=y
-CONFIG_IP_NF_NAT=y
-CONFIG_IP_NF_TARGET_MASQUERADE=y
-CONFIG_IP_NF_TARGET_NETMAP=y
-CONFIG_IP_NF_TARGET_REDIRECT=y
-CONFIG_IP_NF_MANGLE=y
-CONFIG_IP_NF_RAW=y
-CONFIG_IP_NF_SECURITY=y
-CONFIG_IP_NF_ARPTABLES=y
-CONFIG_IP_NF_ARPFILTER=y
-CONFIG_IP_NF_ARP_MANGLE=y
-CONFIG_NF_CONNTRACK_IPV6=y
-CONFIG_IP6_NF_IPTABLES=y
-CONFIG_IP6_NF_IPTABLES_128=y
-CONFIG_IP6_NF_MATCH_RPFILTER=y
-CONFIG_IP6_NF_FILTER=y
-CONFIG_IP6_NF_TARGET_REJECT=y
-CONFIG_IP6_NF_MANGLE=y
-CONFIG_IP6_NF_RAW=y
-CONFIG_BRIDGE_NF_EBTABLES=y
-CONFIG_BRIDGE_EBT_BROUTE=y
-CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
-CONFIG_L2TP_V3=y
-CONFIG_L2TP_IP=y
-CONFIG_L2TP_ETH=y
-CONFIG_BRIDGE=y
-CONFIG_NET_SCHED=y
-CONFIG_NET_SCH_HTB=y
-CONFIG_NET_SCH_PRIO=y
-CONFIG_NET_SCH_MULTIQ=y
-CONFIG_NET_SCH_INGRESS=y
-CONFIG_NET_CLS_FW=y
-CONFIG_NET_CLS_U32=y
-CONFIG_CLS_U32_MARK=y
-CONFIG_NET_CLS_FLOW=y
-CONFIG_NET_EMATCH=y
-CONFIG_NET_EMATCH_CMP=y
-CONFIG_NET_EMATCH_NBYTE=y
-CONFIG_NET_EMATCH_U32=y
-CONFIG_NET_EMATCH_META=y
-CONFIG_NET_EMATCH_TEXT=y
-CONFIG_NET_CLS_ACT=y
-CONFIG_NET_ACT_GACT=y
-CONFIG_NET_ACT_MIRRED=y
-CONFIG_NET_ACT_SKBEDIT=y
-CONFIG_RMNET_DATA=y
-CONFIG_RMNET_DATA_FC=y
-CONFIG_SOCKEV_NLMCAST=y
-CONFIG_BT=y
-CONFIG_BT_BDA=y
-CONFIG_MSM_BT_POWER=y
-CONFIG_BTFM_SLIM=y
-CONFIG_BTFM_SLIM_WCN3990=y
-CONFIG_CFG80211=y
-CONFIG_CFG80211_INTERNAL_REGDB=y
-# CONFIG_CFG80211_CRDA_SUPPORT is not set
-CONFIG_RFKILL=y
-CONFIG_NFC_NQ=y
-CONFIG_NFC_NQ_PN81A=y
-CONFIG_IPC_ROUTER=y
-CONFIG_IPC_ROUTER_SECURITY=y
-CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
-CONFIG_DMA_CMA=y
-CONFIG_ZRAM=y
-CONFIG_ZRAM_LZ4_COMPRESS=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_QSEECOM=y
-CONFIG_HDCP_QSEECOM=y
-CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
-CONFIG_GOOGLE_EASEL=y
-CONFIG_GOOGLE_EASEL_AP=y
-CONFIG_ACCESS_RAMOOPS=y
-CONFIG_MNH_THERMAL_HOST=y
-CONFIG_MNH_PCIE_MULTIPLE_MSI=y
-CONFIG_MNH_SIG=y
-CONFIG_FPR_FPC=y
-CONFIG_SCSI=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_CHR_DEV_SG=y
-CONFIG_SCSI_CONSTANTS=y
-CONFIG_SCSI_LOGGING=y
-CONFIG_SCSI_SCAN_ASYNC=y
-CONFIG_SCSI_UFSHCD=y
-CONFIG_SCSI_UFSHCD_PLATFORM=y
-CONFIG_SCSI_UFS_QCOM=y
-CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=y
-CONFIG_MD_LINEAR=y
-CONFIG_BLK_DEV_DM=y
-CONFIG_DM_CRYPT=y
-CONFIG_DM_DEFAULT_KEY=y
-CONFIG_DM_REQ_CRYPT=y
-CONFIG_DM_UEVENT=y
-CONFIG_DM_VERITY=y
-CONFIG_DM_VERITY_FEC=y
-CONFIG_DM_VERITY_AVB=y
-CONFIG_NETDEVICES=y
-CONFIG_BONDING=y
-CONFIG_DUMMY=y
-CONFIG_TUN=y
-# CONFIG_NET_VENDOR_3COM is not set
-# CONFIG_NET_VENDOR_ADAPTEC is not set
-# CONFIG_NET_VENDOR_AGERE is not set
-# CONFIG_NET_VENDOR_ALTEON is not set
-# CONFIG_NET_VENDOR_AMD is not set
-# CONFIG_NET_VENDOR_ARC is not set
-# CONFIG_NET_VENDOR_ATHEROS is not set
-# CONFIG_NET_CADENCE is not set
-# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_BROCADE is not set
-# CONFIG_NET_VENDOR_CAVIUM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
-# CONFIG_NET_VENDOR_CISCO is not set
-# CONFIG_NET_VENDOR_DEC is not set
-# CONFIG_NET_VENDOR_DLINK is not set
-# CONFIG_NET_VENDOR_EMULEX is not set
-# CONFIG_NET_VENDOR_EZCHIP is not set
-# CONFIG_NET_VENDOR_EXAR is not set
-# CONFIG_NET_VENDOR_HISILICON is not set
-# CONFIG_NET_VENDOR_HP is not set
-# CONFIG_NET_VENDOR_INTEL is not set
-# CONFIG_NET_VENDOR_MARVELL is not set
-# CONFIG_NET_VENDOR_MELLANOX is not set
-# CONFIG_NET_VENDOR_MICREL is not set
-# CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_RNDIS_IPA=y
-# CONFIG_NET_VENDOR_MYRI is not set
-# CONFIG_NET_VENDOR_NATSEMI is not set
-# CONFIG_NET_VENDOR_NVIDIA is not set
-# CONFIG_NET_VENDOR_OKI is not set
-# CONFIG_NET_PACKET_ENGINE is not set
-# CONFIG_NET_VENDOR_QLOGIC is not set
-# CONFIG_NET_VENDOR_QUALCOMM is not set
-# CONFIG_NET_VENDOR_REALTEK is not set
-# CONFIG_NET_VENDOR_RENESAS is not set
-# CONFIG_NET_VENDOR_RDC is not set
-# CONFIG_NET_VENDOR_ROCKER is not set
-# CONFIG_NET_VENDOR_SAMSUNG is not set
-# CONFIG_NET_VENDOR_SEEQ is not set
-# CONFIG_NET_VENDOR_SILAN is not set
-# CONFIG_NET_VENDOR_SIS is not set
-# CONFIG_NET_VENDOR_SMSC is not set
-# CONFIG_NET_VENDOR_STMICRO is not set
-# CONFIG_NET_VENDOR_SUN is not set
-# CONFIG_NET_VENDOR_SYNOPSYS is not set
-# CONFIG_NET_VENDOR_TEHUTI is not set
-# CONFIG_NET_VENDOR_TI is not set
-# CONFIG_NET_VENDOR_VIA is not set
-# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_PPP=y
-CONFIG_PPP_BSDCOMP=y
-CONFIG_PPP_DEFLATE=y
-CONFIG_PPP_FILTER=y
-CONFIG_PPP_MPPE=y
-CONFIG_PPP_MULTILINK=y
-CONFIG_PPPOE=y
-CONFIG_PPPOL2TP=y
-CONFIG_PPPOLAC=y
-CONFIG_PPPOPNS=y
-CONFIG_PPP_ASYNC=y
-CONFIG_USB_RTL8150=y
-CONFIG_USB_RTL8152=y
-CONFIG_USB_USBNET=y
-# CONFIG_USB_NET_NET1080 is not set
-# CONFIG_USB_NET_CDC_SUBSET is not set
-# CONFIG_USB_NET_ZAURUS is not set
-CONFIG_WCNSS_MEM_PRE_ALLOC=y
-CONFIG_ATH_CARDS=y
-CONFIG_CLD_LL_CORE=y
-CONFIG_CNSS_GENL=y
-# CONFIG_INPUT_MOUSEDEV is not set
-CONFIG_INPUT_EVDEV=y
-CONFIG_INPUT_KEYRESET=y
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_GPIO=y
-# CONFIG_INPUT_MOUSE is not set
-CONFIG_INPUT_JOYSTICK=y
-CONFIG_JOYSTICK_XPAD=y
-CONFIG_JOYSTICK_XPAD_FF=y
-CONFIG_JOYSTICK_XPAD_LEDS=y
-CONFIG_INPUT_TOUCHSCREEN=y
-CONFIG_LGE_TOUCH_CORE=y
-CONFIG_LGE_TOUCH_LGSIC_SW49408=y
-CONFIG_TOUCHSCREEN_FTM4=y
-CONFIG_TOUCHSCREEN_GEN_VKEYS=y
-CONFIG_WAKE_GESTURES=y
-CONFIG_INPUT_MISC=y
-CONFIG_STMVL53L0=y
-CONFIG_INPUT_QPNP_POWER_ON=y
-CONFIG_INPUT_KEYCHORD=y
-CONFIG_INPUT_UINPUT=y
-CONFIG_INPUT_GPIO=y
-CONFIG_INPUT_DRV2624_HAPTICS=y
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVMEM is not set
-# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
-CONFIG_SERIAL_MSM_HS=y
-CONFIG_SERIAL_MSM_SMD=y
-CONFIG_DIAG_CHAR=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_MSM_LEGACY=y
-# CONFIG_DEVPORT is not set
-CONFIG_MSM_ADSPRPC=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_MSM_V2=y
-CONFIG_SLIMBUS_MSM_NGD=y
-CONFIG_SOUNDWIRE=y
-CONFIG_SPI=y
-CONFIG_SPI_QUP=y
-CONFIG_SPI_SPIDEV=y
-CONFIG_SPMI=y
-CONFIG_PINCTRL_MSM8998=y
-CONFIG_GPIOLIB=y
-CONFIG_GPIO_SYSFS=y
-CONFIG_GPIO_QPNP_PIN=y
-CONFIG_LGE_BATTERY=y
-CONFIG_POWER_RESET_QCOM=y
-CONFIG_QCOM_DLOAD_MODE=y
-CONFIG_POWER_RESET_XGENE=y
-CONFIG_POWER_RESET_SYSCON=y
-CONFIG_QPNP_FG_GEN3=y
-CONFIG_SMB135X_CHARGER=y
-CONFIG_SMB1351_USB_CHARGER=y
-CONFIG_MSM_BCL_CTL=y
-CONFIG_MSM_BCL_PERIPHERAL_CTL=y
-CONFIG_BATTERY_BCL=y
-CONFIG_QPNP_SMB2=y
-CONFIG_SMB138X_CHARGER=y
-CONFIG_MSM_PM=y
-CONFIG_MSM_APM=y
-CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
-CONFIG_CPU_THERMAL=y
-CONFIG_LIMITS_MONITOR=y
-CONFIG_LIMITS_LITE_HW=y
-CONFIG_THERMAL_MONITOR=y
-CONFIG_THERMAL_TSENS8974=y
-CONFIG_THERMAL_QPNP=y
-CONFIG_THERMAL_QPNP_ADC_TM=y
-CONFIG_QCOM_THERMAL_LIMITS_DCVS=y
-CONFIG_MFD_SPMI_PMIC=y
-CONFIG_MFD_I2C_PMIC=y
-CONFIG_WCD9335_CODEC=y
-CONFIG_WCD934X_CODEC=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
-CONFIG_REGULATOR_TPS61099=y
-CONFIG_REGULATOR_RPM_SMD=y
-CONFIG_REGULATOR_QPNP=y
-CONFIG_REGULATOR_QPNP_LABIBB=y
-CONFIG_REGULATOR_QPNP_LCDB=y
-CONFIG_REGULATOR_SPM=y
-CONFIG_REGULATOR_CPR3_HMSS=y
-CONFIG_REGULATOR_CPR3_MMSS=y
-CONFIG_REGULATOR_CPRH_KBSS=y
-CONFIG_REGULATOR_MEM_ACC=y
-CONFIG_REGULATOR_PROXY_CONSUMER=y
-CONFIG_REGULATOR_STUB=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_MEDIA_CAMERA_SUPPORT=y
-CONFIG_MEDIA_CONTROLLER=y
-CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_FIXED_MINOR_RANGES=y
-CONFIG_V4L_PLATFORM_DRIVERS=y
-CONFIG_MSM_CAMERA=y
-CONFIG_MSMB_CAMERA=y
-CONFIG_MSM_CAMERA_SENSOR=y
-CONFIG_MSM_CPP=y
-CONFIG_MSM_CCI=y
-CONFIG_MSM_CSI20_HEADER=y
-CONFIG_MSM_CSI22_HEADER=y
-CONFIG_MSM_CSI30_HEADER=y
-CONFIG_MSM_CSI31_HEADER=y
-CONFIG_MSM_CSIPHY=y
-CONFIG_MSM_CSID=y
-CONFIG_MSM_EEPROM=y
-CONFIG_MSM_ISPIF=y
-CONFIG_FW_UPDATE=y
-CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
-CONFIG_MSMB_JPEG=y
-CONFIG_MSM_FD=y
-CONFIG_MSM_JPEGDMA=y
-CONFIG_LC898123F40=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
-CONFIG_MSM_SDE_ROTATOR=y
-CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
-CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
-CONFIG_FB_MSM_MDSS_KCAL_CTRL=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_PCI is not set
-# CONFIG_SND_SPI is not set
-CONFIG_SND_USB_AUDIO=y
-CONFIG_SND_USB_AUDIO_QMI=y
-CONFIG_SND_SOC=y
-CONFIG_SND_SOC_MACHINE_MSM8998_WAHOO=y
-CONFIG_SND_SOC_MSM8998=y
-CONFIG_SND_SOC_TFA98XX=y
-CONFIG_SND_SOC_TAS2557_STEREO=y
-CONFIG_HIDRAW=y
-CONFIG_UHID=y
-CONFIG_HID_APPLE=y
-CONFIG_HID_DRAGONRISE=y
-CONFIG_DRAGONRISE_FF=y
-CONFIG_HID_ELECOM=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KEYTOUCH=y
-CONFIG_HID_KYE=y
-CONFIG_HID_GYRATION=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LCPOWER=y
-CONFIG_HID_LOGITECH=y
-CONFIG_HID_LOGITECH_DJ=y
-CONFIG_LOGITECH_FF=y
-CONFIG_LOGIRUMBLEPAD2_FF=y
-CONFIG_LOGIG940_FF=y
-CONFIG_HID_MAGICMOUSE=y
-CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MULTITOUCH=y
-CONFIG_HID_NTRIG=y
-CONFIG_HID_PLANTRONICS=y
-CONFIG_HID_ROCCAT=y
-CONFIG_HID_SAITEK=y
-CONFIG_HID_SAMSUNG=y
-CONFIG_HID_SONY=y
-CONFIG_SONY_FF=y
-CONFIG_HID_SPEEDLINK=y
-CONFIG_HID_THRUSTMASTER=y
-CONFIG_THRUSTMASTER_FF=y
-CONFIG_HID_WACOM=y
-CONFIG_HID_WIIMOTE=y
-CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_DWC3=y
-CONFIG_USB_PD_ENGINE=y
-CONFIG_QPNP_USB_PDPHY=y
-CONFIG_USB_OTG_WAKELOCK=y
-CONFIG_NOP_USB_XCEIV=y
-CONFIG_USB_MSM_SSPHY_QMP=y
-CONFIG_MSM_QUSB_PHY=y
-CONFIG_DUAL_ROLE_USB_INTF=y
-CONFIG_USB_GADGET=y
-CONFIG_USB_GADGET_VBUS_DRAW=500
-CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_NCM=y
-CONFIG_USB_CONFIGFS_MASS_STORAGE=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
-CONFIG_USB_CONFIGFS_F_ACC=y
-CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_UEVENT=y
-CONFIG_USB_CONFIGFS_F_MIDI=y
-CONFIG_USB_CONFIGFS_F_HID=y
-CONFIG_USB_CONFIGFS_F_DIAG=y
-CONFIG_USB_CONFIGFS_F_GSI=y
-CONFIG_USB_CONFIGFS_F_CDEV=y
-CONFIG_USB_CONFIGFS_F_QDSS=y
-CONFIG_USB_TUSB1044=y
-CONFIG_USB_PTN36241G=y
-CONFIG_LEDS_QPNP=y
-CONFIG_LEDS_QPNP_FLASH_V2=y
-CONFIG_LEDS_QPNP_WLED=y
-CONFIG_LEDS_SYSCON=y
-CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_CPU=y
-CONFIG_LEDS_TRIGGER_TRANSIENT=y
-CONFIG_SWITCH=y
-CONFIG_EDAC=y
-CONFIG_EDAC_MM_EDAC=y
-CONFIG_EDAC_CORTEX_ARM64=y
-CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_DRV_QPNP=y
-CONFIG_DMADEVICES=y
-CONFIG_QCOM_SPS_DMA=y
-CONFIG_UIO=y
-CONFIG_UIO_MSM_SHAREDMEM=y
-CONFIG_STAGING=y
-CONFIG_ASHMEM=y
-# CONFIG_ANDROID_TIMED_OUTPUT is not set
-CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_ION=y
-CONFIG_ION_MSM=y
-CONFIG_QCA_CLD_WLAN=y
-CONFIG_QCACLD_WLAN_LFR3=y
-CONFIG_PRIMA_WLAN_OKC=y
-CONFIG_PRIMA_WLAN_11AC_HIGH_TP=y
-CONFIG_WLAN_FEATURE_11W=y
-CONFIG_WLAN_FEATURE_LPSS=y
-CONFIG_QCOM_VOWIFI_11R=y
-CONFIG_QCACLD_FEATURE_NAN=y
-CONFIG_WLAN_FEATURE_NAN_DATAPATH=y
-CONFIG_HELIUMPLUS=y
-CONFIG_64BIT_PADDR=y
-CONFIG_QCOM_TDLS=y
-CONFIG_QCOM_LTE_COEX=y
-CONFIG_MPC_UT_FRAMEWORK=y
-CONFIG_WLAN_OFFLOAD_PACKETS=y
-CONFIG_FEATURE_TSO=y
-CONFIG_FEATURE_TSO_DEBUG=y
-CONFIG_WLAN_FASTPATH=y
-CONFIG_WLAN_NAPI=y
-CONFIG_WLAN_TX_FLOW_CONTROL_V2=y
-CONFIG_WLAN_SYNC_TSF=y
-CONFIG_LFR_SUBNET_DETECTION=y
-CONFIG_MCC_TO_SCC_SWITCH=y
-CONFIG_QCACLD_WLAN_LFR2=y
-CONFIG_REGULATOR_BCM15602=y
-CONFIG_HTC_WLAN_NV=y
-CONFIG_QPNP_REVID=y
-CONFIG_QPNP_COINCELL=y
-CONFIG_SPS=y
-CONFIG_SPS_SUPPORT_NDP_BAM=y
-CONFIG_IPA=y
-CONFIG_RMNET_IPA=y
-CONFIG_GSI=y
-CONFIG_IPA3=y
-CONFIG_RMNET_IPA3=y
-CONFIG_GPIO_USB_DETECT=y
-# CONFIG_MSM_11AD is not set
-CONFIG_SEEMP_CORE=y
-CONFIG_USB_BAM=y
-CONFIG_MSM_MDSS_PLL=y
-CONFIG_REMOTE_SPINLOCK_MSM=y
-CONFIG_MSM_TIMER_LEAP=y
-CONFIG_IOMMU_IO_PGTABLE_FAST=y
-CONFIG_ARM_SMMU=y
-CONFIG_QCOM_COMMON_LOG=y
-CONFIG_MSM_SMEM=y
-CONFIG_MSM_SMD=y
-CONFIG_MSM_GLINK=y
-CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
-CONFIG_MSM_GLINK_SMD_XPRT=y
-CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
-CONFIG_MSM_GLINK_SPI_XPRT=y
-CONFIG_MSM_SPCOM=y
-CONFIG_MSM_SPSS_UTILS=y
-CONFIG_MSM_SMEM_LOGGING=y
-CONFIG_MSM_SMP2P=y
-CONFIG_MSM_SMP2P_TEST=y
-CONFIG_MSM_QMI_INTERFACE=y
-CONFIG_MSM_RPM_SMD=y
-CONFIG_QCOM_BUS_SCALING=y
-CONFIG_MSM_SERVICE_LOCATOR=y
-CONFIG_QCOM_DCC=y
-CONFIG_MSM_IPC_ROUTER_SMD_XPRT=y
-CONFIG_MSM_SYSMON_GLINK_COMM=y
-CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
-CONFIG_MSM_GLINK_PKT=y
-CONFIG_MSM_SPM=y
-CONFIG_QCOM_SCM=y
-CONFIG_QCOM_WATCHDOG_V2=y
-CONFIG_QCOM_IRQ_HELPER=y
-CONFIG_QCOM_MEMORY_DUMP_V2=y
-CONFIG_ICNSS=y
-CONFIG_MSM_GLADIATOR_ERP_V2=y
-CONFIG_MSM_CORE_HANG_DETECT=y
-CONFIG_MSM_RUN_QUEUE_STATS=y
-CONFIG_MSM_BOOT_STATS=y
-CONFIG_QCOM_CPUSS_DUMP=y
-CONFIG_MSM_ADSP_LOADER=y
-CONFIG_MSM_SUBSYSTEM_RESTART=y
-CONFIG_MSM_PIL=y
-CONFIG_MSM_PIL_SSR_GENERIC=y
-CONFIG_MSM_PIL_MSS_QDSP6V5=y
-CONFIG_TRACER_PKT=y
-CONFIG_QCOM_FORCE_WDOG_BITE_ON_PANIC=y
-CONFIG_MSM_MPM_OF=y
-CONFIG_MSM_EVENT_TIMER=y
-CONFIG_MSM_AVTIMER=y
-CONFIG_MSM_KERNEL_PROTECT=y
-CONFIG_QCOM_REMOTEQDSS=y
-CONFIG_MSM_SERVICE_NOTIFIER=y
-CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
-CONFIG_MSM_RPM_STATS_LOG=y
-CONFIG_QSEE_IPC_IRQ_BRIDGE=y
-CONFIG_QCOM_SMCINVOKE=y
-CONFIG_QCOM_EARLY_RANDOM=y
-CONFIG_STATE_NOTIFIER=y
-CONFIG_MEM_SHARE_QMI_SERVICE=y
-CONFIG_QCOM_BIMC_BWMON=y
-CONFIG_ARM_MEMLAT_MON=y
-CONFIG_DEVFREQ_GOV_QCOM_BW_HWMON=y
-CONFIG_DEVFREQ_GOV_MEMLAT=y
-CONFIG_QCOM_DEVFREQ_DEVBW=y
-CONFIG_SPDM_SCM=y
-CONFIG_DEVFREQ_SPDM=y
-CONFIG_EXTCON=y
-CONFIG_IIO=y
-CONFIG_QCOM_RRADC=y
-CONFIG_QCOM_TADC=y
-CONFIG_PWM=y
-CONFIG_PWM_QPNP=y
-CONFIG_ARM_GIC_V3_ACL=y
-CONFIG_ANDROID=y
-CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_MSM_TZ_LOG=y
-CONFIG_SENSORS_SSC=y
-CONFIG_HTC_RADIO_SMEM=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_SECURITY=y
-CONFIG_EXT4_ENCRYPTION=y
-CONFIG_EXT4_FS_ENCRYPTION=y
-CONFIG_EXT4_FS_ICE_ENCRYPTION=y
-CONFIG_QUOTA=y
-CONFIG_QUOTA_NETLINK_INTERFACE=y
-# CONFIG_PRINT_QUOTA_WARNING is not set
-CONFIG_QFMT_V2=y
-CONFIG_FUSE_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-# CONFIG_EFIVAR_FS is not set
-CONFIG_SDCARD_FS=y
-CONFIG_PSTORE=y
-CONFIG_PSTORE_CONSOLE=y
-CONFIG_PSTORE_PMSG=y
-CONFIG_PSTORE_RAM=y
-CONFIG_HTC_DEBUG_BOOTLOADER_LOG=y
-# CONFIG_NETWORK_FILESYSTEMS is not set
-CONFIG_NLS_CODEPAGE_437=y
-CONFIG_NLS_ISO8859_1=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DYNAMIC_DEBUG=y
-CONFIG_DEBUG_INFO=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_PANIC_ON_OOPS=y
-CONFIG_PANIC_TIMEOUT=1
-CONFIG_SCHEDSTATS=y
-CONFIG_SCHED_STACK_END_CHECK=y
-# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_IPC_LOGGING=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_PANIC_ON_DATA_CORRUPTION=y
-CONFIG_CC_WERROR=y
-CONFIG_PID_IN_CONTEXTIDR=y
-CONFIG_DEBUG_SET_MODULE_RONX=y
-CONFIG_PFK=y
-CONFIG_SECURITY_PERF_EVENTS_RESTRICT=y
-CONFIG_SECURITY=y
-CONFIG_SECURITY_NETWORK=y
-CONFIG_HARDENED_USERCOPY=y
-CONFIG_FORTIFY_SOURCE=y
-CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_YAMA=y
-CONFIG_CRYPTO_GCM=y
-CONFIG_CRYPTO_ECHAINIV=y
-CONFIG_CRYPTO_XCBC=y
-CONFIG_CRYPTO_MD4=y
-CONFIG_CRYPTO_SHA512=y
-CONFIG_CRYPTO_TWOFISH=y
-CONFIG_CRYPTO_ANSI_CPRNG=y
-CONFIG_CRYPTO_DEV_QCRYPTO=y
-CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
-CONFIG_CRYPTO_DEV_QCEDEV=y
-CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
-CONFIG_CRYPTO_DEV_QCE=y
-CONFIG_CRYPTO_DEV_QCOM_ICE=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y
-CONFIG_X509_CERTIFICATE_PARSER=y
-CONFIG_PKCS7_MESSAGE_PARSER=y
-CONFIG_SYSTEM_TRUSTED_KEYRING=y
-CONFIG_SYSTEM_TRUSTED_KEYS="certs/esl_key.pem"
-CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM64_CE=y
-CONFIG_CRYPTO_SHA2_ARM64_CE=y
-CONFIG_CRYPTO_GHASH_ARM64_CE=y
-CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
-CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
-CONFIG_CRYPTO_CRC32_ARM64=y
-CONFIG_XZ_DEC=y
-CONFIG_QMI_ENCDEC=y
diff --git a/arch/arm64/configs/flash-walleye_defconfig b/arch/arm64/configs/flash_defconfig
index e41b565010f2..337a67b2f182 100644
--- a/arch/arm64/configs/flash-walleye_defconfig
+++ b/arch/arm64/configs/flash_defconfig
@@ -58,7 +58,7 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_TEST is not set
-# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_IOSCHED_MAPLE=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8998=y
CONFIG_BOARD_MSM8998_SOC=y
@@ -76,6 +76,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_VM_MAX_READAHEAD=2048
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
@@ -98,6 +99,7 @@ CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPU_WAKE_BOOST=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -384,6 +386,9 @@ CONFIG_JOYSTICK_XPAD=y
CONFIG_JOYSTICK_XPAD_FF=y
CONFIG_JOYSTICK_XPAD_LEDS=y
CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_LGE_TOUCH_CORE=y
+CONFIG_LGE_TOUCH_LGSIC_SW49408=y
+CONFIG_TOUCHSCREEN_FTM4=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_HTC=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_HTC=y
@@ -424,11 +429,13 @@ CONFIG_GPIOLIB=y
CONFIG_GPIO_SYSFS=y
CONFIG_GPIO_QPNP_PIN=y
CONFIG_HTC_BATTERY=y
+CONFIG_LGE_BATTERY=y
CONFIG_POWER_RESET_QCOM=y
CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG_GEN3=y
+# CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT is not set
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
@@ -697,7 +704,6 @@ CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
CONFIG_QCOM_SMCINVOKE=y
CONFIG_QCOM_EARLY_RANDOM=y
-CONFIG_STATE_NOTIFIER=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
diff --git a/arch/arm64/configs/wahoo_defconfig b/arch/arm64/configs/wahoo_defconfig
index ced7ff23e40f..affa072b4440 100644
--- a/arch/arm64/configs/wahoo_defconfig
+++ b/arch/arm64/configs/wahoo_defconfig
@@ -51,7 +51,6 @@ CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_TEST is not set
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8998=y
CONFIG_BOARD_MSM8998_SOC=y
@@ -69,6 +68,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_VM_MAX_READAHEAD=2048
CONFIG_SECCOMP=y
# CONFIG_UNMAP_KERNEL_AT_EL0 is not set
CONFIG_ARMV8_DEPRECATED=y
@@ -425,6 +425,7 @@ CONFIG_QCOM_DLOAD_MODE=y
CONFIG_POWER_RESET_XGENE=y
CONFIG_POWER_RESET_SYSCON=y
CONFIG_QPNP_FG_GEN3=y
+# CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT is not set
CONFIG_SMB135X_CHARGER=y
CONFIG_SMB1351_USB_CHARGER=y
CONFIG_MSM_BCL_CTL=y
@@ -597,7 +598,7 @@ CONFIG_ASHMEM=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
CONFIG_ION=y
CONFIG_ION_MSM=y
-CONFIG_QCA_CLD_WLAN=y
+CONFIG_QCA_CLD_WLAN=m
CONFIG_QCACLD_WLAN_LFR3=y
CONFIG_PRIMA_WLAN_OKC=y
CONFIG_PRIMA_WLAN_11AC_HIGH_TP=y
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 1d791a0129fd..a689e6ed80f4 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -25,6 +25,7 @@
#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/page.h>
#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
@@ -428,4 +429,43 @@ alternative_endif
mrs \rd, sp_el0
.endm
+/*
+ * Check the MIDR_EL1 of the current CPU for a given model and a range of
+ * variant/revision. See asm/cputype.h for the macros used below.
+ *
+ * model: MIDR_CPU_PART of CPU
+ * rv_min: Minimum of MIDR_CPU_VAR_REV()
+ * rv_max: Maximum of MIDR_CPU_VAR_REV()
+ * res: Result register.
+ * tmp1, tmp2, tmp3: Temporary registers
+ *
+ * Corrupts: res, tmp1, tmp2, tmp3
+ * Returns: 0, if the CPU id doesn't match. Non-zero otherwise
+ */
+ .macro cpu_midr_match model, rv_min, rv_max, res, tmp1, tmp2, tmp3
+ mrs \res, midr_el1
+ mov_q \tmp1, (MIDR_REVISION_MASK | MIDR_VARIANT_MASK)
+ mov_q \tmp2, MIDR_CPU_PART_MASK
+ and \tmp3, \res, \tmp2 // Extract model
+ and \tmp1, \res, \tmp1 // rev & variant
+ mov_q \tmp2, \model
+ cmp \tmp3, \tmp2
+ cset \res, eq
+ cbz \res, .Ldone\@ // Model matches ?
+
+ .if (\rv_min != 0) // Skip min check if rv_min == 0
+ mov_q \tmp3, \rv_min
+ cmp \tmp1, \tmp3
+ cset \res, ge
+ .endif // \rv_min != 0
+ /* Skip rv_max check if rv_min == rv_max && rv_min != 0 */
+ .if ((\rv_min != \rv_max) || \rv_min == 0)
+ mov_q \tmp2, \rv_max
+ cmp \tmp1, \tmp2
+ cset \tmp2, le
+ and \res, \res, \tmp2
+ .endif
+.Ldone\@:
+ .endm
+
#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 717ef4519521..4d0cc8e33534 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -55,6 +55,14 @@
#define MIDR_IMPLEMENTOR(midr) \
(((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT)
+#define MIDR_CPU_VAR_REV(var, rev) \
+ (((var) << MIDR_VARIANT_SHIFT) | (rev))
+
+#define MIDR_CPU_PART_MASK \
+ (MIDR_IMPLEMENTOR_MASK | \
+ MIDR_ARCHITECTURE_MASK | \
+ MIDR_PARTNUM_MASK)
+
#define MIDR_CPU_MODEL(imp, partnum) \
(((imp) << MIDR_IMPLEMENTOR_SHIFT) | \
(0xf << MIDR_ARCHITECTURE_SHIFT) | \
@@ -84,6 +92,7 @@
#define ARM_CPU_PART_CORTEX_A72 0xD08
#define ARM_CPU_PART_CORTEX_A73 0xD09
#define ARM_CPU_PART_CORTEX_A75 0xD0A
+#define ARM_CPU_PART_CORTEX_A55 0xD05
#define ARM_CPU_PART_KRYO2XX_GOLD 0x800
#define ARM_CPU_PART_KRYO2XX_SILVER 0x801
#define QCOM_CPU_PART_KRYO 0x200
@@ -97,6 +106,7 @@
#define MIDR_CORTEX_A72 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A72)
#define MIDR_CORTEX_A73 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A73)
#define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
+#define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
#define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
#define MIDR_KRYO2XX_SILVER \
MIDR_CPU_MODEL(ARM_CPU_IMP_QCOM, ARM_CPU_PART_KRYO2XX_SILVER)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index c5bc52e47f6a..07fe2479d310 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -48,21 +48,11 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *_uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (int)(encoded_op << 8) >> 20;
- int cmparg = (int)(encoded_op << 20) >> 20;
int oldval = 0, ret, tmp;
u32 __user *uaddr = __uaccess_mask_ptr(_uaddr);
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1U << (oparg & 0x1f);
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -92,17 +82,9 @@ futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *_uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c
index 546ce8979b3a..6e7bd3a354f2 100644
--- a/arch/arm64/kernel/arm64ksyms.c
+++ b/arch/arm64/kernel/arm64ksyms.c
@@ -80,3 +80,11 @@ EXPORT_SYMBOL(__dma_flush_range);
/* arm-smccc */
EXPORT_SYMBOL(arm_smccc_smc);
EXPORT_SYMBOL(arm_smccc_hvc);
+
+ /* tishift.S */
+extern long long __ashlti3(long long a, int b);
+EXPORT_SYMBOL(__ashlti3);
+extern long long __ashrti3(long long a, int b);
+EXPORT_SYMBOL(__ashrti3);
+extern long long __lshrti3(long long a, int b);
+EXPORT_SYMBOL(__lshrti3);
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 4b2caefd3a8f..eaf5d415971e 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -276,3 +276,22 @@ void __init cpuinfo_store_boot_cpu(void)
boot_cpu_data = *info;
init_cpu_features(&boot_cpu_data);
}
+
+static bool is_taimen;
+
+static int __init get_hardware(char *cmdline)
+{
+ is_taimen = !strcmp(cmdline, "taimen");
+ return 0;
+}
+__setup("androidboot.hardware=", get_hardware);
+
+bool is_google_taimen(void)
+{
+ return is_taimen;
+}
+
+bool is_google_walleye(void)
+{
+ return !is_taimen;
+}
diff --git a/arch/arm64/lib/strcmp.S b/arch/arm64/lib/strcmp.S
index f1adbb4952fb..6d3963b17d41 100644
--- a/arch/arm64/lib/strcmp.S
+++ b/arch/arm64/lib/strcmp.S
@@ -60,8 +60,8 @@ tmp3 .req x9
zeroones .req x10
pos .req x11
+ .p2align 6
ENTRY(strcmp)
-.p2align 6
eor tmp1, src1, src2
mov zeroones, #REP8_01
tst tmp1, #7
diff --git a/arch/arm64/lib/tishift.S b/arch/arm64/lib/tishift.S
index d3db9b2cd479..0fdff97794de 100644
--- a/arch/arm64/lib/tishift.S
+++ b/arch/arm64/lib/tishift.S
@@ -1,17 +1,6 @@
-/*
- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ * Copyright (C) 2017-2018 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*/
#include <linux/linkage.h>
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 03588d136f93..d35d95511932 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -164,7 +164,7 @@ void __init kasan_init(void)
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
- pfn_to_nid(virt_to_pfn(_text)));
+ early_pfn_to_nid(virt_to_pfn(_text)));
/*
* vmemmap_populate() has populated the shadow region that covers the
@@ -201,7 +201,7 @@ void __init kasan_init(void)
*/
vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
(unsigned long)kasan_mem_to_shadow(end) + 1,
- pfn_to_nid(virt_to_pfn(start)));
+ early_pfn_to_nid(virt_to_pfn(start)));
}
/*
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index f99afb7218a2..6e7c26380e47 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -279,6 +279,11 @@ ENTRY(__cpu_setup)
cbz x9, 2f
cmp x9, #2
b.lt 1f
+#ifdef CONFIG_ARM64_ERRATUM_1024718
+ /* Disable hardware DBM on Cortex-A55 r0p0, r0p1 & r1p0 */
+ cpu_midr_match MIDR_CORTEX_A55, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, 0), x1, x2, x3, x4
+ cbnz x1, 1f
+#endif
orr x10, x10, #TCR_HD // hardware Dirty flag update
1: orr x10, x10, #TCR_HA // hardware Access flag update
2:
diff --git a/arch/frv/include/asm/futex.h b/arch/frv/include/asm/futex.h
index 4bea27f50a7a..2702bd802d44 100644
--- a/arch/frv/include/asm/futex.h
+++ b/arch/frv/include/asm/futex.h
@@ -7,7 +7,8 @@
#include <asm/errno.h>
#include <asm/uaccess.h>
-extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr);
+extern int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr);
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
diff --git a/arch/frv/kernel/futex.c b/arch/frv/kernel/futex.c
index d155ca9e5098..37f7b2bf7f73 100644
--- a/arch/frv/kernel/futex.c
+++ b/arch/frv/kernel/futex.c
@@ -186,20 +186,10 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o
/*
* do the futex operations
*/
-int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -225,18 +215,9 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS; break;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
-} /* end futex_atomic_op_inuser() */
+} /* end arch_futex_atomic_op_inuser() */
diff --git a/arch/hexagon/include/asm/futex.h b/arch/hexagon/include/asm/futex.h
index 7e597f8434da..c607b77c8215 100644
--- a/arch/hexagon/include/asm/futex.h
+++ b/arch/hexagon/include/asm/futex.h
@@ -31,18 +31,9 @@
static inline int
-futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
pagefault_disable();
@@ -72,30 +63,9 @@ futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h
index 76acbcd5c060..6d67dc1eaf2b 100644
--- a/arch/ia64/include/asm/futex.h
+++ b/arch/ia64/include/asm/futex.h
@@ -45,18 +45,9 @@ do { \
} while (0)
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -84,17 +75,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/microblaze/include/asm/futex.h b/arch/microblaze/include/asm/futex.h
index 01848f056f43..a9dad9e5e132 100644
--- a/arch/microblaze/include/asm/futex.h
+++ b/arch/microblaze/include/asm/futex.h
@@ -29,18 +29,9 @@
})
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -66,30 +57,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 1de190bdfb9c..a9e61ea54ca9 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -83,18 +83,9 @@
}
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -125,17 +116,9 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h
index 49df14805a9b..ae5b64981d72 100644
--- a/arch/parisc/include/asm/futex.h
+++ b/arch/parisc/include/asm/futex.h
@@ -32,20 +32,11 @@ _futex_spin_unlock_irqrestore(u32 __user *uaddr, unsigned long int *flags)
}
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
{
unsigned long int flags;
u32 val;
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr)))
- return -EFAULT;
pagefault_disable();
@@ -98,17 +89,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index e05808a328db..b0629249778b 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -47,12 +47,10 @@
#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
#define FW_FEATURE_OPAL ASM_CONST(0x0000000010000000)
-#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
-#define FW_FEATURE_OPALv3 ASM_CONST(0x0000000400000000)
#ifndef __ASSEMBLY__
@@ -70,8 +68,7 @@ enum {
FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
FW_FEATURE_PSERIES_ALWAYS = 0,
- FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2 |
- FW_FEATURE_OPALv3,
+ FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL,
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
index 2a9cf845473b..f4c7467f7465 100644
--- a/arch/powerpc/include/asm/futex.h
+++ b/arch/powerpc/include/asm/futex.h
@@ -31,18 +31,10 @@
: "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
: "cr0", "memory")
-static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -68,17 +60,9 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 44c8d03558ac..318224784114 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -217,14 +217,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
unsigned short maj;
unsigned short min;
- /* We only show online cpus: disable preempt (overzealous, I
- * knew) to prevent cpu going down. */
- preempt_disable();
- if (!cpu_online(cpu_id)) {
- preempt_enable();
- return 0;
- }
-
#ifdef CONFIG_SMP
pvr = per_cpu(cpu_pvr, cpu_id);
#else
@@ -329,9 +321,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
#ifdef CONFIG_SMP
seq_printf(m, "\n");
#endif
-
- preempt_enable();
-
/* If this is the last cpu, print the summary */
if (cpumask_next(cpu_id, cpu_online_mask) >= nr_cpu_ids)
show_cpuinfo_summary(m);
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 92736851c795..3f653f5201e7 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -48,8 +48,8 @@ static int pnv_eeh_init(void)
struct pci_controller *hose;
struct pnv_phb *phb;
- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
- pr_warn("%s: OPALv3 is required !\n",
+ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
+ pr_warn("%s: OPAL is required !\n",
__func__);
return -EINVAL;
}
diff --git a/arch/powerpc/platforms/powernv/idle.c b/arch/powerpc/platforms/powernv/idle.c
index 59d735d2e5c0..15bfbcd5debc 100644
--- a/arch/powerpc/platforms/powernv/idle.c
+++ b/arch/powerpc/platforms/powernv/idle.c
@@ -242,7 +242,7 @@ static int __init pnv_init_idle_states(void)
if (cpuidle_disable != IDLE_NO_OVERRIDE)
goto out;
- if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
goto out;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 1bceb95f422d..5584247f5029 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
return count;
}
+/*
+ * This can be called in the panic path with interrupts off, so use
+ * mdelay in that case.
+ */
static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
{
s64 rc = OPAL_BUSY;
@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
if (rc == OPAL_BUSY_EVENT) {
- msleep(OPAL_BUSY_DELAY_MS);
+ if (in_interrupt() || irqs_disabled())
+ mdelay(OPAL_BUSY_DELAY_MS);
+ else
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
- msleep(OPAL_BUSY_DELAY_MS);
+ if (in_interrupt() || irqs_disabled())
+ mdelay(OPAL_BUSY_DELAY_MS);
+ else
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 7634d1c62299..d0ac535cf5d7 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -126,7 +126,7 @@ static const struct scom_controller opal_scom_controller = {
static int opal_xscom_init(void)
{
- if (firmware_has_feature(FW_FEATURE_OPALv3))
+ if (firmware_has_feature(FW_FEATURE_OPAL))
scom_init(&opal_scom_controller);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index ae29eaf85e9e..e48826aa314c 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -98,16 +98,11 @@ int __init early_init_dt_scan_opal(unsigned long node,
pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
opal.size, sizep, runtimesz);
- powerpc_firmware_features |= FW_FEATURE_OPAL;
if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
- powerpc_firmware_features |= FW_FEATURE_OPALv2;
- powerpc_firmware_features |= FW_FEATURE_OPALv3;
- pr_info("OPAL V3 detected !\n");
- } else if (of_flat_dt_is_compatible(node, "ibm,opal-v2")) {
- powerpc_firmware_features |= FW_FEATURE_OPALv2;
- pr_info("OPAL V2 detected !\n");
+ powerpc_firmware_features |= FW_FEATURE_OPAL;
+ pr_info("OPAL detected !\n");
} else {
- pr_info("OPAL V1 detected !\n");
+ panic("OPAL != V3 detected, no longer supported.\n");
}
/* Reinit all cores with the right endian */
@@ -352,17 +347,15 @@ int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
* enough room and be done with it
*/
spin_lock_irqsave(&opal_write_lock, flags);
- if (firmware_has_feature(FW_FEATURE_OPALv2)) {
- rc = opal_console_write_buffer_space(vtermno, &olen);
- len = be64_to_cpu(olen);
- if (rc || len < total_len) {
- spin_unlock_irqrestore(&opal_write_lock, flags);
- /* Closed -> drop characters */
- if (rc)
- return total_len;
- opal_poll_events(NULL);
- return -EAGAIN;
- }
+ rc = opal_console_write_buffer_space(vtermno, &olen);
+ len = be64_to_cpu(olen);
+ if (rc || len < total_len) {
+ spin_unlock_irqrestore(&opal_write_lock, flags);
+ /* Closed -> drop characters */
+ if (rc)
+ return total_len;
+ opal_poll_events(NULL);
+ return -EAGAIN;
}
/* We still try to handle partial completions, though they
@@ -696,10 +689,7 @@ static int __init opal_init(void)
}
/* Register OPAL consoles if any ports */
- if (firmware_has_feature(FW_FEATURE_OPALv2))
- consoles = of_find_node_by_path("/ibm,opal/consoles");
- else
- consoles = of_node_get(opal_node);
+ consoles = of_find_node_by_path("/ibm,opal/consoles");
if (consoles) {
for_each_child_of_node(consoles, np) {
if (strcmp(np->name, "serial"))
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index ecb7f3220355..eac3b7cc78c6 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -344,7 +344,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
return;
}
- if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
+ if (!firmware_has_feature(FW_FEATURE_OPAL)) {
pr_info(" Firmware too old to support M64 window\n");
return;
}
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 30c6b3b7be90..c57afc619b20 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -140,12 +140,8 @@ static void pnv_show_cpuinfo(struct seq_file *m)
if (root)
model = of_get_property(root, "model", NULL);
seq_printf(m, "machine\t\t: PowerNV %s\n", model);
- if (firmware_has_feature(FW_FEATURE_OPALv3))
- seq_printf(m, "firmware\t: OPAL v3\n");
- else if (firmware_has_feature(FW_FEATURE_OPALv2))
- seq_printf(m, "firmware\t: OPAL v2\n");
- else if (firmware_has_feature(FW_FEATURE_OPAL))
- seq_printf(m, "firmware\t: OPAL v1\n");
+ if (firmware_has_feature(FW_FEATURE_OPAL))
+ seq_printf(m, "firmware\t: OPAL\n");
else
seq_printf(m, "firmware\t: BML\n");
of_node_put(root);
@@ -274,9 +270,9 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
{
xics_kexec_teardown_cpu(secondary);
- /* On OPAL v3, we return all CPUs to firmware */
+ /* On OPAL, we return all CPUs to firmware */
- if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
return;
if (secondary) {
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index ca264833ee64..ad7b1a3dbed0 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -61,14 +61,15 @@ static int pnv_smp_kick_cpu(int nr)
unsigned long start_here =
__pa(ppc_function_entry(generic_secondary_smp_init));
long rc;
+ uint8_t status;
BUG_ON(nr < 0 || nr >= NR_CPUS);
/*
- * If we already started or OPALv2 is not supported, we just
+ * If we already started or OPAL is not supported, we just
* kick the CPU via the PACA
*/
- if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPALv2))
+ if (paca[nr].cpu_start || !firmware_has_feature(FW_FEATURE_OPAL))
goto kick;
/*
@@ -77,55 +78,42 @@ static int pnv_smp_kick_cpu(int nr)
* first time. OPAL v3 allows us to query OPAL to know if it
* has the CPUs, so we do that
*/
- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
- uint8_t status;
-
- rc = opal_query_cpu_status(pcpu, &status);
- if (rc != OPAL_SUCCESS) {
- pr_warn("OPAL Error %ld querying CPU %d state\n",
- rc, nr);
- return -ENODEV;
- }
+ rc = opal_query_cpu_status(pcpu, &status);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("OPAL Error %ld querying CPU %d state\n", rc, nr);
+ return -ENODEV;
+ }
- /*
- * Already started, just kick it, probably coming from
- * kexec and spinning
- */
- if (status == OPAL_THREAD_STARTED)
- goto kick;
+ /*
+ * Already started, just kick it, probably coming from
+ * kexec and spinning
+ */
+ if (status == OPAL_THREAD_STARTED)
+ goto kick;
- /*
- * Available/inactive, let's kick it
- */
- if (status == OPAL_THREAD_INACTIVE) {
- pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n",
- nr, pcpu);
- rc = opal_start_cpu(pcpu, start_here);
- if (rc != OPAL_SUCCESS) {
- pr_warn("OPAL Error %ld starting CPU %d\n",
- rc, nr);
- return -ENODEV;
- }
- } else {
- /*
- * An unavailable CPU (or any other unknown status)
- * shouldn't be started. It should also
- * not be in the possible map but currently it can
- * happen
- */
- pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
- " (status %d)...\n", nr, pcpu, status);
+ /*
+ * Available/inactive, let's kick it
+ */
+ if (status == OPAL_THREAD_INACTIVE) {
+ pr_devel("OPAL: Starting CPU %d (HW 0x%x)...\n", nr, pcpu);
+ rc = opal_start_cpu(pcpu, start_here);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("OPAL Error %ld starting CPU %d\n", rc, nr);
return -ENODEV;
}
} else {
/*
- * On OPAL v2, we just kick it and hope for the best,
- * we must not test the error from opal_start_cpu() or
- * we would fail to get CPUs from kexec.
+ * An unavailable CPU (or any other unknown status)
+ * shouldn't be started. It should also
+ * not be in the possible map but currently it can
+ * happen
*/
- opal_start_cpu(pcpu, start_here);
+ pr_devel("OPAL: CPU %d (HW 0x%x) is unavailable"
+ " (status %d)...\n", nr, pcpu, status);
+ return -ENODEV;
}
- kick:
+
+kick:
return smp_generic_kick_cpu(nr);
}
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 0ba746d8912c..e92a684e855d 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -111,6 +111,7 @@ config S390
select GENERIC_CLOCKEVENTS
select GENERIC_CPU_AUTOPROBE
select GENERIC_CPU_DEVICES if !SMP
+ select GENERIC_CPU_VULNERABILITIES
select GENERIC_FIND_FIRST_BIT
select GENERIC_SMP_IDLE_THREAD
select GENERIC_TIME_VSYSCALL
@@ -706,6 +707,51 @@ config SECCOMP
If unsure, say Y.
+config KERNEL_NOBP
+ def_bool n
+ prompt "Enable modified branch prediction for the kernel by default"
+ help
+ If this option is selected the kernel will switch to a modified
+ branch prediction mode if the firmware interface is available.
+ The modified branch prediction mode improves the behaviour in
+ regard to speculative execution.
+
+ With the option enabled the kernel parameter "nobp=0" or "nospec"
+ can be used to run the kernel in the normal branch prediction mode.
+
+ With the option disabled the modified branch prediction mode is
+ enabled with the "nobp=1" kernel parameter.
+
+ If unsure, say N.
+
+config EXPOLINE
+ def_bool n
+ prompt "Avoid speculative indirect branches in the kernel"
+ help
+ Compile the kernel with the expoline compiler options to guard
+ against kernel-to-user data leaks by avoiding speculative indirect
+ branches.
+ Requires a compiler with -mindirect-branch=thunk support for full
+ protection. The kernel may run slower.
+
+ If unsure, say N.
+
+choice
+ prompt "Expoline default"
+ depends on EXPOLINE
+ default EXPOLINE_FULL
+
+config EXPOLINE_OFF
+ bool "spectre_v2=off"
+
+config EXPOLINE_AUTO
+ bool "spectre_v2=auto"
+
+config EXPOLINE_FULL
+ bool "spectre_v2=on"
+
+endchoice
+
endmenu
menu "Power Management"
@@ -755,6 +801,7 @@ config PFAULT
config SHARED_KERNEL
bool "VM shared kernel support"
depends on !JUMP_LABEL
+ depends on !ALTERNATIVES
help
Select this option, if you want to share the text segment of the
Linux kernel between different VM guests. This reduces memory
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e8d4423e4f85..d924f9b6dc73 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -77,6 +77,16 @@ ifeq ($(call cc-option-yn,-mwarn-dynamicstack),y)
cflags-$(CONFIG_WARN_DYNAMIC_STACK) += -mwarn-dynamicstack
endif
+ifdef CONFIG_EXPOLINE
+ ifeq ($(call cc-option-yn,$(CC_FLAGS_MARCH) -mindirect-branch=thunk),y)
+ CC_FLAGS_EXPOLINE := -mindirect-branch=thunk
+ CC_FLAGS_EXPOLINE += -mfunction-return=thunk
+ CC_FLAGS_EXPOLINE += -mindirect-branch-table
+ export CC_FLAGS_EXPOLINE
+ cflags-y += $(CC_FLAGS_EXPOLINE) -DCC_USING_EXPOLINE
+ endif
+endif
+
ifdef CONFIG_FUNCTION_TRACER
# make use of hotpatch feature if the compiler supports it
cc_hotpatch := -mhotpatch=0,3
diff --git a/arch/s390/include/asm/alternative-asm.h b/arch/s390/include/asm/alternative-asm.h
new file mode 100644
index 000000000000..955d620db23e
--- /dev/null
+++ b/arch/s390/include/asm/alternative-asm.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_ALTERNATIVE_ASM_H
+#define _ASM_S390_ALTERNATIVE_ASM_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * Check the length of an instruction sequence. The length may not be larger
+ * than 254 bytes and it has to be divisible by 2.
+ */
+.macro alt_len_check start,end
+ .if ( \end - \start ) > 254
+ .error "cpu alternatives does not support instructions blocks > 254 bytes\n"
+ .endif
+ .if ( \end - \start ) % 2
+ .error "cpu alternatives instructions length is odd\n"
+ .endif
+.endm
+
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
+.macro alt_entry orig_start, orig_end, alt_start, alt_end, feature
+ .long \orig_start - .
+ .long \alt_start - .
+ .word \feature
+ .byte \orig_end - \orig_start
+ .byte \alt_end - \alt_start
+.endm
+
+/*
+ * Fill up @bytes with nops. The macro emits 6-byte nop instructions
+ * for the bulk of the area, possibly followed by a 4-byte and/or
+ * a 2-byte nop if the size of the area is not divisible by 6.
+ */
+.macro alt_pad_fill bytes
+ .fill ( \bytes ) / 6, 6, 0xc0040000
+ .fill ( \bytes ) % 6 / 4, 4, 0x47000000
+ .fill ( \bytes ) % 6 % 4 / 2, 2, 0x0700
+.endm
+
+/*
+ * Fill up @bytes with nops. If the number of bytes is larger
+ * than 6, emit a jg instruction to branch over all nops, then
+ * fill an area of size (@bytes - 6) with nop instructions.
+ */
+.macro alt_pad bytes
+ .if ( \bytes > 0 )
+ .if ( \bytes > 6 )
+ jg . + \bytes
+ alt_pad_fill \bytes - 6
+ .else
+ alt_pad_fill \bytes
+ .endif
+ .endif
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE oldinstr, newinstr, feature
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr
+771: .popsection
+772: \oldinstr
+773: alt_len_check 770b, 771b
+ alt_len_check 772b, 773b
+ alt_pad ( ( 771b - 770b ) - ( 773b - 772b ) )
+774: .pushsection .altinstructions,"a"
+ alt_entry 772b, 774b, 770b, 771b, \feature
+ .popsection
+.endm
+
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
+.macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
+ .pushsection .altinstr_replacement,"ax"
+770: \newinstr1
+771: \newinstr2
+772: .popsection
+773: \oldinstr
+774: alt_len_check 770b, 771b
+ alt_len_check 771b, 772b
+ alt_len_check 773b, 774b
+ .if ( 771b - 770b > 772b - 771b )
+ alt_pad ( ( 771b - 770b ) - ( 774b - 773b ) )
+ .else
+ alt_pad ( ( 772b - 771b ) - ( 774b - 773b ) )
+ .endif
+775: .pushsection .altinstructions,"a"
+ alt_entry 773b, 775b, 770b, 771b,\feature1
+ alt_entry 773b, 775b, 771b, 772b,\feature2
+ .popsection
+.endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_ASM_H */
diff --git a/arch/s390/include/asm/alternative.h b/arch/s390/include/asm/alternative.h
new file mode 100644
index 000000000000..a72002056b54
--- /dev/null
+++ b/arch/s390/include/asm/alternative.h
@@ -0,0 +1,149 @@
+#ifndef _ASM_S390_ALTERNATIVE_H
+#define _ASM_S390_ALTERNATIVE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/stringify.h>
+
+struct alt_instr {
+ s32 instr_offset; /* original instruction */
+ s32 repl_offset; /* offset to replacement instruction */
+ u16 facility; /* facility bit set for replacement */
+ u8 instrlen; /* length of original instruction */
+ u8 replacementlen; /* length of new instruction */
+} __packed;
+
+void apply_alternative_instructions(void);
+void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
+
+/*
+ * |661: |662: |6620 |663:
+ * +-----------+---------------------+
+ * | oldinstr | oldinstr_padding |
+ * | +----------+----------+
+ * | | | |
+ * | | >6 bytes |6/4/2 nops|
+ * | |6 bytes jg----------->
+ * +-----------+---------------------+
+ * ^^ static padding ^^
+ *
+ * .altinstr_replacement section
+ * +---------------------+-----------+
+ * |6641: |6651:
+ * | alternative instr 1 |
+ * +-----------+---------+- - - - - -+
+ * |6642: |6652: |
+ * | alternative instr 2 | padding
+ * +---------------------+- - - - - -+
+ * ^ runtime ^
+ *
+ * .altinstructions section
+ * +---------------------------------+
+ * | alt_instr entries for each |
+ * | alternative instr |
+ * +---------------------------------+
+ */
+
+#define b_altinstr(num) "664"#num
+#define e_altinstr(num) "665"#num
+
+#define e_oldinstr_pad_end "663"
+#define oldinstr_len "662b-661b"
+#define oldinstr_total_len e_oldinstr_pad_end"b-661b"
+#define altinstr_len(num) e_altinstr(num)"b-"b_altinstr(num)"b"
+#define oldinstr_pad_len(num) \
+ "-(((" altinstr_len(num) ")-(" oldinstr_len ")) > 0) * " \
+ "((" altinstr_len(num) ")-(" oldinstr_len "))"
+
+#define INSTR_LEN_SANITY_CHECK(len) \
+ ".if " len " > 254\n" \
+ "\t.error \"cpu alternatives does not support instructions " \
+ "blocks > 254 bytes\"\n" \
+ ".endif\n" \
+ ".if (" len ") %% 2\n" \
+ "\t.error \"cpu alternatives instructions length is odd\"\n" \
+ ".endif\n"
+
+#define OLDINSTR_PADDING(oldinstr, num) \
+ ".if " oldinstr_pad_len(num) " > 6\n" \
+ "\tjg " e_oldinstr_pad_end "f\n" \
+ "6620:\n" \
+ "\t.fill (" oldinstr_pad_len(num) " - (6620b-662b)) / 2, 2, 0x0700\n" \
+ ".else\n" \
+ "\t.fill " oldinstr_pad_len(num) " / 6, 6, 0xc0040000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 / 4, 4, 0x47000000\n" \
+ "\t.fill " oldinstr_pad_len(num) " %% 6 %% 4 / 2, 2, 0x0700\n" \
+ ".endif\n"
+
+#define OLDINSTR(oldinstr, num) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ OLDINSTR_PADDING(oldinstr, num) \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define OLDINSTR_2(oldinstr, num1, num2) \
+ "661:\n\t" oldinstr "\n662:\n" \
+ ".if " altinstr_len(num1) " < " altinstr_len(num2) "\n" \
+ OLDINSTR_PADDING(oldinstr, num2) \
+ ".else\n" \
+ OLDINSTR_PADDING(oldinstr, num1) \
+ ".endif\n" \
+ e_oldinstr_pad_end ":\n" \
+ INSTR_LEN_SANITY_CHECK(oldinstr_len)
+
+#define ALTINSTR_ENTRY(facility, num) \
+ "\t.long 661b - .\n" /* old instruction */ \
+ "\t.long " b_altinstr(num)"b - .\n" /* alt instruction */ \
+ "\t.word " __stringify(facility) "\n" /* facility bit */ \
+ "\t.byte " oldinstr_total_len "\n" /* source len */ \
+ "\t.byte " altinstr_len(num) "\n" /* alt instruction len */
+
+#define ALTINSTR_REPLACEMENT(altinstr, num) /* replacement */ \
+ b_altinstr(num)":\n\t" altinstr "\n" e_altinstr(num) ":\n" \
+ INSTR_LEN_SANITY_CHECK(altinstr_len(num))
+
+/* alternative assembly primitive: */
+#define ALTERNATIVE(oldinstr, altinstr, facility) \
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr, 1) \
+ ".popsection\n" \
+ OLDINSTR(oldinstr, 1) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility, 1) \
+ ".popsection\n"
+
+#define ALTERNATIVE_2(oldinstr, altinstr1, facility1, altinstr2, facility2)\
+ ".pushsection .altinstr_replacement, \"ax\"\n" \
+ ALTINSTR_REPLACEMENT(altinstr1, 1) \
+ ALTINSTR_REPLACEMENT(altinstr2, 2) \
+ ".popsection\n" \
+ OLDINSTR_2(oldinstr, 1, 2) \
+ ".pushsection .altinstructions,\"a\"\n" \
+ ALTINSTR_ENTRY(facility1, 1) \
+ ALTINSTR_ENTRY(facility2, 2) \
+ ".popsection\n"
+
+/*
+ * Alternative instructions for different CPU types or capabilities.
+ *
+ * This allows to use optimized instructions even on generic binary
+ * kernels.
+ *
+ * oldinstr is padded with jump and nops at compile time if altinstr is
+ * longer. altinstr is padded with jump and nops at run-time during patching.
+ *
+ * For non barrier like inlines please define new variants
+ * without volatile and memory clobber.
+ */
+#define alternative(oldinstr, altinstr, facility) \
+ asm volatile(ALTERNATIVE(oldinstr, altinstr, facility) : : : "memory")
+
+#define alternative_2(oldinstr, altinstr1, facility1, altinstr2, facility2) \
+ asm volatile(ALTERNATIVE_2(oldinstr, altinstr1, facility1, \
+ altinstr2, facility2) ::: "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_ALTERNATIVE_H */
diff --git a/arch/s390/include/asm/barrier.h b/arch/s390/include/asm/barrier.h
index d68e11e0df5e..e903b28e7358 100644
--- a/arch/s390/include/asm/barrier.h
+++ b/arch/s390/include/asm/barrier.h
@@ -53,4 +53,28 @@ do { \
___p1; \
})
+/**
+ * array_index_mask_nospec - generate a mask for array_idx() that is
+ * ~0UL when the bounds check succeeds and 0 otherwise
+ * @index: array element index
+ * @size: number of elements in array
+ */
+#define array_index_mask_nospec array_index_mask_nospec
+static inline unsigned long array_index_mask_nospec(unsigned long index,
+ unsigned long size)
+{
+ unsigned long mask;
+
+ if (__builtin_constant_p(size) && size > 0) {
+ asm(" clgr %2,%1\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size-1), "d" (index) :"cc");
+ return mask;
+ }
+ asm(" clgr %1,%2\n"
+ " slbgr %0,%0\n"
+ :"=d" (mask) : "d" (size), "d" (index) :"cc");
+ return ~mask;
+}
+
#endif /* __ASM_BARRIER_H */
diff --git a/arch/s390/include/asm/facility.h b/arch/s390/include/asm/facility.h
index 0aa6a7ed95a3..155fcc7bcba6 100644
--- a/arch/s390/include/asm/facility.h
+++ b/arch/s390/include/asm/facility.h
@@ -13,6 +13,24 @@
#define MAX_FACILITY_BIT (256*8) /* stfle_fac_list has 256 bytes */
+static inline void __set_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] |= 0x80 >> (nr & 7);
+}
+
+static inline void __clear_facility(unsigned long nr, void *facilities)
+{
+ unsigned char *ptr = (unsigned char *) facilities;
+
+ if (nr >= MAX_FACILITY_BIT)
+ return;
+ ptr[nr >> 3] &= ~(0x80 >> (nr & 7));
+}
+
static inline int __test_facility(unsigned long nr, void *facilities)
{
unsigned char *ptr;
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index a4811aa0304d..8f8eec9e1198 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -21,17 +21,12 @@
: "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
"m" (*uaddr) : "cc");
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, newval, ret;
load_kernel_asce();
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
pagefault_disable();
switch (op) {
@@ -60,17 +55,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index e9a983f40a24..7d9c5917da2b 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -136,7 +136,8 @@ struct kvm_s390_sie_block {
__u16 ipa; /* 0x0056 */
__u32 ipb; /* 0x0058 */
__u32 scaoh; /* 0x005c */
- __u8 reserved60; /* 0x0060 */
+#define FPF_BPBC 0x20
+ __u8 fpf; /* 0x0060 */
__u8 ecb; /* 0x0061 */
__u8 ecb2; /* 0x0062 */
#define ECB3_AES 0x04
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index afe1cfebf1a4..8520c23e419b 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -155,7 +155,9 @@ struct _lowcore {
/* Per cpu primary space access list */
__u32 paste[16]; /* 0x0400 */
- __u8 pad_0x04c0[0x0e00-0x0440]; /* 0x0440 */
+ /* br %r1 trampoline */
+ __u16 br_r1_trampoline; /* 0x0440 */
+ __u8 pad_0x0442[0x0e00-0x0442]; /* 0x0442 */
/*
* 0xe00 contains the address of the IPL Parameter Information
@@ -170,7 +172,8 @@ struct _lowcore {
__u8 pad_0x0e20[0x0f00-0x0e20]; /* 0x0e20 */
/* Extended facility list */
- __u64 stfle_fac_list[32]; /* 0x0f00 */
+ __u64 stfle_fac_list[16]; /* 0x0f00 */
+ __u64 alt_stfle_fac_list[16]; /* 0x0f80 */
__u8 pad_0x1000[0x11b0-0x1000]; /* 0x1000 */
/* Pointer to vector register save area */
diff --git a/arch/s390/include/asm/nospec-branch.h b/arch/s390/include/asm/nospec-branch.h
new file mode 100644
index 000000000000..b4bd8c41e9d3
--- /dev/null
+++ b/arch/s390/include/asm/nospec-branch.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_EXPOLINE_H
+#define _ASM_S390_EXPOLINE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+extern int nospec_disable;
+
+void nospec_init_branches(void);
+void nospec_auto_detect(void);
+void nospec_revert(s32 *start, s32 *end);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_EXPOLINE_H */
diff --git a/arch/s390/include/asm/nospec-insn.h b/arch/s390/include/asm/nospec-insn.h
new file mode 100644
index 000000000000..087fc9b972c5
--- /dev/null
+++ b/arch/s390/include/asm/nospec-insn.h
@@ -0,0 +1,182 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_S390_NOSPEC_ASM_H
+#define _ASM_S390_NOSPEC_ASM_H
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_EXPOLINE
+
+/*
+ * The expoline macros are used to create thunks in the same format
+ * as gcc generates them. The 'comdat' section flag makes sure that
+ * the various thunks are merged into a single copy.
+ */
+ .macro __THUNK_PROLOG_NAME name
+ .pushsection .text.\name,"axG",@progbits,\name,comdat
+ .globl \name
+ .hidden \name
+ .type \name,@function
+\name:
+ .cfi_startproc
+ .endm
+
+ .macro __THUNK_EPILOG
+ .cfi_endproc
+ .popsection
+ .endm
+
+ .macro __THUNK_PROLOG_BR r1,r2
+ __THUNK_PROLOG_NAME __s390x_indirect_jump_r\r2\()use_r\r1
+ .endm
+
+ .macro __THUNK_PROLOG_BC d0,r1,r2
+ __THUNK_PROLOG_NAME __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ .endm
+
+ .macro __THUNK_BR r1,r2
+ jg __s390x_indirect_jump_r\r2\()use_r\r1
+ .endm
+
+ .macro __THUNK_BC d0,r1,r2
+ jg __s390x_indirect_branch_\d0\()_\r2\()use_\r1
+ .endm
+
+ .macro __THUNK_BRASL r1,r2,r3
+ brasl \r1,__s390x_indirect_jump_r\r3\()use_r\r2
+ .endm
+
+ .macro __DECODE_RR expand,reg,ruse
+ .set __decode_fail,1
+ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \reg,%r\r1
+ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \ruse,%r\r2
+ \expand \r1,\r2
+ .set __decode_fail,0
+ .endif
+ .endr
+ .endif
+ .endr
+ .if __decode_fail == 1
+ .error "__DECODE_RR failed"
+ .endif
+ .endm
+
+ .macro __DECODE_RRR expand,rsave,rtarget,ruse
+ .set __decode_fail,1
+ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \rsave,%r\r1
+ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \rtarget,%r\r2
+ .irp r3,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \ruse,%r\r3
+ \expand \r1,\r2,\r3
+ .set __decode_fail,0
+ .endif
+ .endr
+ .endif
+ .endr
+ .endif
+ .endr
+ .if __decode_fail == 1
+ .error "__DECODE_RRR failed"
+ .endif
+ .endm
+
+ .macro __DECODE_DRR expand,disp,reg,ruse
+ .set __decode_fail,1
+ .irp r1,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \reg,%r\r1
+ .irp r2,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15
+ .ifc \ruse,%r\r2
+ \expand \disp,\r1,\r2
+ .set __decode_fail,0
+ .endif
+ .endr
+ .endif
+ .endr
+ .if __decode_fail == 1
+ .error "__DECODE_DRR failed"
+ .endif
+ .endm
+
+ .macro __THUNK_EX_BR reg,ruse
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,555f
+ j .
+#else
+ larl \ruse,555f
+ ex 0,0(\ruse)
+ j .
+#endif
+555: br \reg
+ .endm
+
+ .macro __THUNK_EX_BC disp,reg,ruse
+#ifdef CONFIG_HAVE_MARCH_Z10_FEATURES
+ exrl 0,556f
+ j .
+#else
+ larl \ruse,556f
+ ex 0,0(\ruse)
+ j .
+#endif
+556: b \disp(\reg)
+ .endm
+
+ .macro GEN_BR_THUNK reg,ruse=%r1
+ __DECODE_RR __THUNK_PROLOG_BR,\reg,\ruse
+ __THUNK_EX_BR \reg,\ruse
+ __THUNK_EPILOG
+ .endm
+
+ .macro GEN_B_THUNK disp,reg,ruse=%r1
+ __DECODE_DRR __THUNK_PROLOG_BC,\disp,\reg,\ruse
+ __THUNK_EX_BC \disp,\reg,\ruse
+ __THUNK_EPILOG
+ .endm
+
+ .macro BR_EX reg,ruse=%r1
+557: __DECODE_RR __THUNK_BR,\reg,\ruse
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 557b-.
+ .popsection
+ .endm
+
+ .macro B_EX disp,reg,ruse=%r1
+558: __DECODE_DRR __THUNK_BC,\disp,\reg,\ruse
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 558b-.
+ .popsection
+ .endm
+
+ .macro BASR_EX rsave,rtarget,ruse=%r1
+559: __DECODE_RRR __THUNK_BRASL,\rsave,\rtarget,\ruse
+ .pushsection .s390_indirect_branches,"a",@progbits
+ .long 559b-.
+ .popsection
+ .endm
+
+#else
+ .macro GEN_BR_THUNK reg,ruse=%r1
+ .endm
+
+ .macro GEN_B_THUNK disp,reg,ruse=%r1
+ .endm
+
+ .macro BR_EX reg,ruse=%r1
+ br \reg
+ .endm
+
+ .macro B_EX disp,reg,ruse=%r1
+ b \disp(\reg)
+ .endm
+
+ .macro BASR_EX rsave,rtarget,ruse=%r1
+ basr \rsave,\rtarget
+ .endm
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_S390_NOSPEC_ASM_H */
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index c61ed7890cef..f915a0f1b0fc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -69,6 +69,7 @@ extern void s390_adjust_jiffies(void);
extern const struct seq_operations cpuinfo_op;
extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
+extern void __bpon(void);
/*
* User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
@@ -315,6 +316,9 @@ extern void memcpy_absolute(void *, void *, size_t);
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
+extern int s390_isolate_bp(void);
+extern int s390_isolate_bp_guest(void);
+
#endif /* __ASSEMBLY__ */
#endif /* __ASM_S390_PROCESSOR_H */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 692b9247c019..b2504163c8fa 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -78,6 +78,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define TIF_SECCOMP 5 /* secure computing */
#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 7 /* breakpointed or single-stepping */
+#define TIF_ISOLATE_BP 8 /* Run process with isolated BP */
+#define TIF_ISOLATE_BP_GUEST 9 /* Run KVM guests with isolated BP */
#define TIF_31BIT 16 /* 32bit process */
#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 18 /* restore signal mask in do_signal() */
@@ -93,6 +95,8 @@ void arch_release_task_struct(struct task_struct *tsk);
#define _TIF_SECCOMP _BITUL(TIF_SECCOMP)
#define _TIF_SYSCALL_TRACEPOINT _BITUL(TIF_SYSCALL_TRACEPOINT)
#define _TIF_UPROBE _BITUL(TIF_UPROBE)
+#define _TIF_ISOLATE_BP _BITUL(TIF_ISOLATE_BP)
+#define _TIF_ISOLATE_BP_GUEST _BITUL(TIF_ISOLATE_BP_GUEST)
#define _TIF_31BIT _BITUL(TIF_31BIT)
#define _TIF_SINGLE_STEP _BITUL(TIF_SINGLE_STEP)
diff --git a/arch/s390/include/uapi/asm/kvm.h b/arch/s390/include/uapi/asm/kvm.h
index ef1a5fcc6c66..beb508a9e72c 100644
--- a/arch/s390/include/uapi/asm/kvm.h
+++ b/arch/s390/include/uapi/asm/kvm.h
@@ -151,6 +151,7 @@ struct kvm_guest_debug_arch {
#define KVM_SYNC_ARCH0 (1UL << 4)
#define KVM_SYNC_PFAULT (1UL << 5)
#define KVM_SYNC_VRS (1UL << 6)
+#define KVM_SYNC_BPBC (1UL << 10)
/* definition of registers in kvm_run */
struct kvm_sync_regs {
__u64 prefix; /* prefix register */
@@ -168,6 +169,8 @@ struct kvm_sync_regs {
__u64 vrs[32][2]; /* vector registers */
__u8 reserved[512]; /* for future vector expansion */
__u32 fpc; /* only valid with vector registers */
+ __u8 bpbc : 1; /* bp mode */
+ __u8 reserved2 : 7;
};
#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index dc167a23b920..c4d4d4ef5e58 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -44,10 +44,14 @@ obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o dumpstack.o
-obj-y += entry.o reipl.o relocate_kernel.o
+obj-y += entry.o reipl.o relocate_kernel.o alternative.o
+obj-y += nospec-branch.o
extra-y += head.o head64.o vmlinux.lds
+obj-$(CONFIG_SYSFS) += nospec-sysfs.o
+CFLAGS_REMOVE_nospec-branch.o += $(CC_FLAGS_EXPOLINE)
+
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SCHED_BOOK) += topology.o
diff --git a/arch/s390/kernel/alternative.c b/arch/s390/kernel/alternative.c
new file mode 100644
index 000000000000..b57b293998dc
--- /dev/null
+++ b/arch/s390/kernel/alternative.c
@@ -0,0 +1,112 @@
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+#define MAX_PATCH_LEN (255 - 1)
+
+static int __initdata_or_module alt_instr_disabled;
+
+static int __init disable_alternative_instructions(char *str)
+{
+ alt_instr_disabled = 1;
+ return 0;
+}
+
+early_param("noaltinstr", disable_alternative_instructions);
+
+struct brcl_insn {
+ u16 opc;
+ s32 disp;
+} __packed;
+
+static u16 __initdata_or_module nop16 = 0x0700;
+static u32 __initdata_or_module nop32 = 0x47000000;
+static struct brcl_insn __initdata_or_module nop48 = {
+ 0xc004, 0
+};
+
+static const void *nops[] __initdata_or_module = {
+ &nop16,
+ &nop32,
+ &nop48
+};
+
+static void __init_or_module add_jump_padding(void *insns, unsigned int len)
+{
+ struct brcl_insn brcl = {
+ 0xc0f4,
+ len / 2
+ };
+
+ memcpy(insns, &brcl, sizeof(brcl));
+ insns += sizeof(brcl);
+ len -= sizeof(brcl);
+
+ while (len > 0) {
+ memcpy(insns, &nop16, 2);
+ insns += 2;
+ len -= 2;
+ }
+}
+
+static void __init_or_module add_padding(void *insns, unsigned int len)
+{
+ if (len > 6)
+ add_jump_padding(insns, len);
+ else if (len >= 2)
+ memcpy(insns, nops[len / 2 - 1], len);
+}
+
+static void __init_or_module __apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ struct alt_instr *a;
+ u8 *instr, *replacement;
+ u8 insnbuf[MAX_PATCH_LEN];
+
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ */
+ for (a = start; a < end; a++) {
+ int insnbuf_sz = 0;
+
+ instr = (u8 *)&a->instr_offset + a->instr_offset;
+ replacement = (u8 *)&a->repl_offset + a->repl_offset;
+
+ if (!__test_facility(a->facility,
+ S390_lowcore.alt_stfle_fac_list))
+ continue;
+
+ if (unlikely(a->instrlen % 2 || a->replacementlen % 2)) {
+ WARN_ONCE(1, "cpu alternatives instructions length is "
+ "odd, skipping patching\n");
+ continue;
+ }
+
+ memcpy(insnbuf, replacement, a->replacementlen);
+ insnbuf_sz = a->replacementlen;
+
+ if (a->instrlen > a->replacementlen) {
+ add_padding(insnbuf + a->replacementlen,
+ a->instrlen - a->replacementlen);
+ insnbuf_sz += a->instrlen - a->replacementlen;
+ }
+
+ s390_kernel_write(instr, insnbuf, insnbuf_sz);
+ }
+}
+
+void __init_or_module apply_alternatives(struct alt_instr *start,
+ struct alt_instr *end)
+{
+ if (!alt_instr_disabled)
+ __apply_alternatives(start, end);
+}
+
+extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
+void __init apply_alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+}
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 326f717df587..61fca549a93b 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -8,18 +8,22 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
#include <asm/ptrace.h>
#include <asm/sigp.h>
+ GEN_BR_THUNK %r9
+ GEN_BR_THUNK %r14
+
ENTRY(s390_base_mcck_handler)
basr %r13,0
0: lg %r15,__LC_PANIC_STACK # load panic stack
aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_mcck_handler_fn
- lg %r1,0(%r1)
- ltgr %r1,%r1
+ lg %r9,0(%r1)
+ ltgr %r9,%r9
jz 1f
- basr %r14,%r1
+ BASR_EX %r14,%r9
1: la %r1,4095
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
lpswe __LC_MCK_OLD_PSW
@@ -36,10 +40,10 @@ ENTRY(s390_base_ext_handler)
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_ext_handler_fn
- lg %r1,0(%r1)
- ltgr %r1,%r1
+ lg %r9,0(%r1)
+ ltgr %r9,%r9
jz 1f
- basr %r14,%r1
+ BASR_EX %r14,%r9
1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
lpswe __LC_EXT_OLD_PSW
@@ -56,10 +60,10 @@ ENTRY(s390_base_pgm_handler)
basr %r13,0
0: aghi %r15,-STACK_FRAME_OVERHEAD
larl %r1,s390_base_pgm_handler_fn
- lg %r1,0(%r1)
- ltgr %r1,%r1
+ lg %r9,0(%r1)
+ ltgr %r9,%r9
jz 1f
- basr %r14,%r1
+ BASR_EX %r14,%r9
lmg %r0,%r15,__LC_SAVE_AREA_SYNC
lpswe __LC_PGM_OLD_PSW
1: lpswe disabled_wait_psw-0b(%r13)
@@ -116,7 +120,7 @@ ENTRY(diag308_reset)
larl %r4,.Lcontinue_psw # Restore PSW flags
lpswe 0(%r4)
.Lcontinue:
- br %r14
+ BR_EX %r14
.align 16
.Lrestart_psw:
.long 0x00080000,0x80000000 + .Lrestart_part2
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index ee7b8e7ca4f8..8eccead675d4 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -279,6 +279,11 @@ static noinline __init void setup_facility_list(void)
{
stfle(S390_lowcore.stfle_fac_list,
ARRAY_SIZE(S390_lowcore.stfle_fac_list));
+ memcpy(S390_lowcore.alt_stfle_fac_list,
+ S390_lowcore.stfle_fac_list,
+ sizeof(S390_lowcore.alt_stfle_fac_list));
+ if (!IS_ENABLED(CONFIG_KERNEL_NOBP))
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
}
static __init void detect_diag9c(void)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 4612ed7ec2e5..5416d5d68308 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -23,6 +23,7 @@
#include <asm/vx-insn.h>
#include <asm/setup.h>
#include <asm/nmi.h>
+#include <asm/nospec-insn.h>
__PT_R0 = __PT_GPRS
__PT_R1 = __PT_GPRS + 8
@@ -104,6 +105,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
j 3f
1: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,\timer
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@@ -162,8 +164,79 @@ _PIF_WORK = (_PIF_PER_TRAP)
tm off+\addr, \mask
.endm
+ .macro BPOFF
+ .pushsection .altinstr_replacement, "ax"
+660: .long 0xb2e8c000
+ .popsection
+661: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 661b - .
+ .long 660b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPON
+ .pushsection .altinstr_replacement, "ax"
+662: .long 0xb2e8d000
+ .popsection
+663: .long 0x47000000
+ .pushsection .altinstructions, "a"
+ .long 663b - .
+ .long 662b - .
+ .word 82
+ .byte 4
+ .byte 4
+ .popsection
+ .endm
+
+ .macro BPENTER tif_ptr,tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .word 0xc004, 0x0000, 0x0000 # 6 byte nop
+ .popsection
+664: TSTMSK \tif_ptr,\tif_mask
+ jz . + 8
+ .long 0xb2e8d000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 12
+ .byte 12
+ .popsection
+ .endm
+
+ .macro BPEXIT tif_ptr,tif_mask
+ TSTMSK \tif_ptr,\tif_mask
+ .pushsection .altinstr_replacement, "ax"
+662: jnz . + 8
+ .long 0xb2e8d000
+ .popsection
+664: jz . + 8
+ .long 0xb2e8c000
+ .pushsection .altinstructions, "a"
+ .long 664b - .
+ .long 662b - .
+ .word 82
+ .byte 8
+ .byte 8
+ .popsection
+ .endm
+
+ GEN_BR_THUNK %r9
+ GEN_BR_THUNK %r14
+ GEN_BR_THUNK %r14,%r11
+
.section .kprobes.text, "ax"
+ENTRY(__bpon)
+ .globl __bpon
+ BPON
+ BR_EX %r14
+
/*
* Scheduler resume function, called by switch_to
* gpr2 = (task_struct *) prev
@@ -190,9 +263,9 @@ ENTRY(__switch_to)
mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
- bzr %r14
+ jz 0f
.insn s,0xb2800000,__LC_LPP # set program parameter
- br %r14
+0: BR_EX %r14
.L__critical_start:
@@ -204,9 +277,11 @@ ENTRY(__switch_to)
*/
ENTRY(sie64a)
stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers
+ lg %r12,__LC_CURRENT
stg %r2,__SF_EMPTY(%r15) # save control block pointer
stg %r3,__SF_EMPTY+8(%r15) # save guest register save area
xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # reason code = 0
+ mvc __SF_EMPTY+24(8,%r15),__TI_flags(%r12) # copy thread flags
TSTMSK __LC_CPU_FLAGS,_CIF_FPU # load guest fp/vx registers ?
jno .Lsie_load_guest_gprs
brasl %r14,load_fpu_regs # load guest fp/vx regs
@@ -223,7 +298,11 @@ ENTRY(sie64a)
jnz .Lsie_skip
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
jo .Lsie_skip # exit if fp/vx regs changed
+ BPEXIT __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
sie 0(%r14)
+.Lsie_exit:
+ BPOFF
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
.Lsie_skip:
ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
@@ -244,9 +323,15 @@ ENTRY(sie64a)
sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
stmg %r0,%r13,0(%r14) # save guest gprs 0-13
+ xgr %r0,%r0 # clear guest registers to
+ xgr %r1,%r1 # prevent speculative use
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
lg %r2,__SF_EMPTY+16(%r15) # return exit reason code
- br %r14
+ BR_EX %r14
.Lsie_fault:
lghi %r14,-EFAULT
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
@@ -267,6 +352,7 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
+ BPOFF
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
lghi %r14,_PIF_SYSCALL
@@ -276,12 +362,15 @@ ENTRY(system_call)
LAST_BREAK %r13
.Lsysc_vtime:
UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
+ # clear user controlled register to prevent speculative use
+ xgr %r0,%r0
lg %r10,__TI_sysc_table(%r12) # address of system call table
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
@@ -299,7 +388,7 @@ ENTRY(system_call)
lgf %r9,0(%r8,%r10) # get system call add.
TSTMSK __TI_flags(%r12),_TIF_TRACE
jnz .Lsysc_tracesys
- basr %r14,%r9 # call sys_xxxx
+ BASR_EX %r14,%r9 # call sys_xxxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
@@ -311,6 +400,7 @@ ENTRY(system_call)
jnz .Lsysc_work # check for work
TSTMSK __LC_CPU_FLAGS,_CIF_WORK
jnz .Lsysc_work
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lsysc_restore:
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
@@ -438,7 +528,7 @@ ENTRY(system_call)
lmg %r3,%r7,__PT_R3(%r11)
stg %r7,STACK_FRAME_OVERHEAD(%r15)
lg %r2,__PT_ORIG_GPR2(%r11)
- basr %r14,%r9 # call sys_xxx
+ BASR_EX %r14,%r9 # call sys_xxx
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_tracenogo:
TSTMSK __TI_flags(%r12),_TIF_TRACE
@@ -462,7 +552,7 @@ ENTRY(ret_from_fork)
lmg %r9,%r10,__PT_R9(%r11) # load gprs
ENTRY(kernel_thread_starter)
la %r2,0(%r10)
- basr %r14,%r9
+ BASR_EX %r14,%r9
j .Lsysc_tracenogo
/*
@@ -471,6 +561,7 @@ ENTRY(kernel_thread_starter)
ENTRY(pgm_check_handler)
stpt __LC_SYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -495,6 +586,7 @@ ENTRY(pgm_check_handler)
j 3f
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
+ BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
lg %r15,__LC_KERNEL_STACK
lg %r14,__TI_task(%r12)
aghi %r14,__TASK_thread # pointer to thread_struct
@@ -504,6 +596,15 @@ ENTRY(pgm_check_handler)
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC
@@ -525,9 +626,9 @@ ENTRY(pgm_check_handler)
nill %r10,0x007f
sll %r10,2
je .Lpgm_return
- lgf %r1,0(%r10,%r1) # load address of handler routine
+ lgf %r9,0(%r10,%r1) # load address of handler routine
lgr %r2,%r11 # pass pointer to pt_regs
- basr %r14,%r1 # branch to interrupt-handler
+ BASR_EX %r14,%r9 # branch to interrupt-handler
.Lpgm_return:
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
@@ -560,6 +661,7 @@ ENTRY(pgm_check_handler)
ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -567,6 +669,16 @@ ENTRY(io_int_handler)
lmg %r8,%r9,__LC_IO_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
@@ -601,9 +713,13 @@ ENTRY(io_int_handler)
lg %r14,__LC_VDSO_PER_CPU
lmg %r0,%r10,__PT_R0(%r11)
mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
+ tm __PT_PSW+1(%r11),0x01 # returning to user ?
+ jno .Lio_exit_kernel
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
.Lio_exit_timer:
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
+.Lio_exit_kernel:
lmg %r11,%r15,__PT_R11(%r11)
lpswe __LC_RETURN_PSW
.Lio_done:
@@ -735,6 +851,7 @@ ENTRY(io_int_handler)
ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
+ BPOFF
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_THREAD_INFO
@@ -742,6 +859,16 @@ ENTRY(ext_int_handler)
lmg %r8,%r9,__LC_EXT_OLD_PSW
SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
stmg %r8,%r9,__PT_PSW(%r11)
lghi %r1,__LC_EXT_PARAMS2
@@ -773,11 +900,12 @@ ENTRY(psw_idle)
.insn rsy,0xeb0000000017,%r1,5,__SF_EMPTY+16(%r15)
.Lpsw_idle_stcctm:
#endif
+ BPON
STCK __CLOCK_IDLE_ENTER(%r2)
stpt __TIMER_IDLE_ENTER(%r2)
.Lpsw_idle_lpsw:
lpswe __SF_EMPTY(%r15)
- br %r14
+ BR_EX %r14
.Lpsw_idle_end:
/*
@@ -791,7 +919,7 @@ ENTRY(save_fpu_regs)
lg %r2,__LC_CURRENT
aghi %r2,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bor %r14
+ jo .Lsave_fpu_regs_exit
stfpc __THREAD_FPU_fpc(%r2)
.Lsave_fpu_regs_fpc_end:
lg %r3,__THREAD_FPU_regs(%r2)
@@ -821,7 +949,8 @@ ENTRY(save_fpu_regs)
std 15,120(%r3)
.Lsave_fpu_regs_done:
oi __LC_CPU_FLAGS+7,_CIF_FPU
- br %r14
+.Lsave_fpu_regs_exit:
+ BR_EX %r14
.Lsave_fpu_regs_end:
/*
@@ -838,7 +967,7 @@ load_fpu_regs:
lg %r4,__LC_CURRENT
aghi %r4,__TASK_thread
TSTMSK __LC_CPU_FLAGS,_CIF_FPU
- bnor %r14
+ jno .Lload_fpu_regs_exit
lfpc __THREAD_FPU_fpc(%r4)
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_VX
lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area
@@ -867,7 +996,8 @@ load_fpu_regs:
ld 15,120(%r4)
.Lload_fpu_regs_done:
ni __LC_CPU_FLAGS+7,255-_CIF_FPU
- br %r14
+.Lload_fpu_regs_exit:
+ BR_EX %r14
.Lload_fpu_regs_end:
.L__critical_end:
@@ -877,6 +1007,7 @@ load_fpu_regs:
*/
ENTRY(mcck_int_handler)
STCK __LC_MCCK_CLOCK
+ BPOFF
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
@@ -908,6 +1039,16 @@ ENTRY(mcck_int_handler)
.Lmcck_skip:
lghi %r14,__LC_GPREGS_SAVE_AREA+64
stmg %r0,%r7,__PT_R0(%r11)
+ # clear user controlled registers to prevent speculative use
+ xgr %r0,%r0
+ xgr %r1,%r1
+ xgr %r2,%r2
+ xgr %r3,%r3
+ xgr %r4,%r4
+ xgr %r5,%r5
+ xgr %r6,%r6
+ xgr %r7,%r7
+ xgr %r10,%r10
mvc __PT_R8(64,%r11),0(%r14)
stmg %r8,%r9,__PT_PSW(%r11)
xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
@@ -933,6 +1074,7 @@ ENTRY(mcck_int_handler)
mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
jno 0f
+ BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
stpt __LC_EXIT_TIMER
mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
0: lmg %r11,%r15,__PT_R11(%r11)
@@ -1028,7 +1170,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: br %r14
+0: BR_EX %r14
.align 8
.Lcleanup_table:
@@ -1053,11 +1195,12 @@ cleanup_critical:
.quad .Lsie_done
.Lcleanup_sie:
+ BPENTER __SF_EMPTY+24(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
lg %r9,__SF_EMPTY(%r15) # get control block pointer
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- br %r14
+ BR_EX %r14
#endif
.Lcleanup_system_call:
@@ -1099,7 +1242,8 @@ cleanup_critical:
srag %r9,%r9,23
jz 0f
mvc __TI_last_break(8,%r12),16(%r11)
-0: # set up saved register r11
+0: BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
+ # set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@@ -1114,7 +1258,7 @@ cleanup_critical:
stg %r15,56(%r11) # r15 stack pointer
# set new psw address and exit
larl %r9,.Lsysc_do_svc
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_system_call_insn:
.quad system_call
.quad .Lsysc_stmg
@@ -1124,7 +1268,7 @@ cleanup_critical:
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_sysc_restore:
# check if stpt has been executed
@@ -1141,14 +1285,14 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_sysc_restore_insn:
.quad .Lsysc_exit_timer
.quad .Lsysc_done - 4
.Lcleanup_io_tif:
larl %r9,.Lio_tif
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_io_restore:
# check if stpt has been executed
@@ -1162,7 +1306,7 @@ cleanup_critical:
mvc 0(64,%r11),__PT_R8(%r9)
lmg %r0,%r7,__PT_R0(%r9)
1: lmg %r8,%r9,__LC_RETURN_PSW
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_io_restore_insn:
.quad .Lio_exit_timer
.quad .Lio_done - 4
@@ -1214,17 +1358,17 @@ cleanup_critical:
# prepare return psw
nihh %r8,0xfcfd # clear irq & wait state bits
lg %r9,48(%r11) # return from psw_idle
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_idle_insn:
.quad .Lpsw_idle_lpsw
.Lcleanup_save_fpu_regs:
larl %r9,save_fpu_regs
- br %r14
+ BR_EX %r14,%r11
.Lcleanup_load_fpu_regs:
larl %r9,load_fpu_regs
- br %r14
+ BR_EX %r14,%r11
/*
* Integer constants
@@ -1240,7 +1384,6 @@ cleanup_critical:
.Lsie_critical_length:
.quad .Lsie_done - .Lsie_gmap
#endif
-
.section .rodata, "a"
#define SYSCALL(esame,emu) .long esame
.globl sys_call_table
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index e73979236659..837bb301023f 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -563,6 +563,7 @@ static struct kset *ipl_kset;
static void __ipl_run(void *unused)
{
+ __bpon();
diag308(DIAG308_IPL, NULL);
if (MACHINE_IS_VM)
__cpcmd("IPL", NULL, 0, NULL);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index f41d5208aaf7..590e9394b4dd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -173,10 +173,9 @@ void do_softirq_own_stack(void)
new -= STACK_FRAME_OVERHEAD;
((struct stack_frame *) new)->back_chain = old;
asm volatile(" la 15,0(%0)\n"
- " basr 14,%2\n"
+ " brasl 14,__do_softirq\n"
" la 15,0(%1)\n"
- : : "a" (new), "a" (old),
- "a" (__do_softirq)
+ : : "a" (new), "a" (old)
: "0", "1", "2", "3", "4", "5", "14",
"cc", "memory" );
} else {
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index 0c1a679314dd..9bd1933848b8 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -31,6 +31,9 @@
#include <linux/kernel.h>
#include <linux/moduleloader.h>
#include <linux/bug.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
+#include <asm/facility.h>
#if 0
#define DEBUGP printk
@@ -163,7 +166,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
me->arch.got_offset = me->core_size;
me->core_size += me->arch.got_size;
me->arch.plt_offset = me->core_size;
- me->core_size += me->arch.plt_size;
+ if (me->arch.plt_size) {
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable)
+ me->arch.plt_size += PLT_ENTRY_SIZE;
+ me->core_size += me->arch.plt_size;
+ }
return 0;
}
@@ -317,9 +324,20 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
unsigned int *ip;
ip = me->module_core + me->arch.plt_offset +
info->plt_offset;
- ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
- ip[1] = 0x100a0004;
- ip[2] = 0x07f10000;
+ ip[0] = 0x0d10e310; /* basr 1,0 */
+ ip[1] = 0x100a0004; /* lg 1,10(1) */
+ if (IS_ENABLED(CONFIG_EXPOLINE) && !nospec_disable) {
+ unsigned int *ij;
+ ij = me->module_core +
+ me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ ip[2] = 0xa7f40000 + /* j __jump_r1 */
+ (unsigned int)(u16)
+ (((unsigned long) ij - 8 -
+ (unsigned long) ip) / 2);
+ } else {
+ ip[2] = 0x07f10000; /* br %r1 */
+ }
ip[3] = (unsigned int) (val >> 32);
ip[4] = (unsigned int) val;
info->plt_initialized = 1;
@@ -424,6 +442,45 @@ int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *me)
{
+ const Elf_Shdr *s;
+ char *secstrings, *secname;
+ void *aseg;
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ !nospec_disable && me->arch.plt_size) {
+ unsigned int *ij;
+
+ ij = me->module_core + me->arch.plt_offset +
+ me->arch.plt_size - PLT_ENTRY_SIZE;
+ if (test_facility(35)) {
+ ij[0] = 0xc6000000; /* exrl %r0,.+10 */
+ ij[1] = 0x0005a7f4; /* j . */
+ ij[2] = 0x000007f1; /* br %r1 */
+ } else {
+ ij[0] = 0x44000000 | (unsigned int)
+ offsetof(struct _lowcore, br_r1_trampoline);
+ ij[1] = 0xa7f40000; /* j . */
+ }
+ }
+
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ aseg = (void *) s->sh_addr;
+ secname = secstrings + s->sh_name;
+
+ if (!strcmp(".altinstructions", secname))
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_indirect", secname, 14)))
+ nospec_revert(aseg, aseg + s->sh_size);
+
+ if (IS_ENABLED(CONFIG_EXPOLINE) &&
+ (!strncmp(".s390_return", secname, 12)))
+ nospec_revert(aseg, aseg + s->sh_size);
+ }
+
jump_label_apply_nops(me);
vfree(me->arch.syminfo);
me->arch.syminfo = NULL;
diff --git a/arch/s390/kernel/nospec-branch.c b/arch/s390/kernel/nospec-branch.c
new file mode 100644
index 000000000000..d5eed651b5ab
--- /dev/null
+++ b/arch/s390/kernel/nospec-branch.c
@@ -0,0 +1,166 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/module.h>
+#include <linux/device.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+static int __init nobp_setup_early(char *str)
+{
+ bool enabled;
+ int rc;
+
+ rc = kstrtobool(str, &enabled);
+ if (rc)
+ return rc;
+ if (enabled && test_facility(82)) {
+ /*
+ * The user explicitely requested nobp=1, enable it and
+ * disable the expoline support.
+ */
+ __set_facility(82, S390_lowcore.alt_stfle_fac_list);
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_disable = 1;
+ } else {
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ return 0;
+}
+early_param("nobp", nobp_setup_early);
+
+static int __init nospec_setup_early(char *str)
+{
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ return 0;
+}
+early_param("nospec", nospec_setup_early);
+
+static int __init nospec_report(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ pr_info("Spectre V2 mitigation: execute trampolines.\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ pr_info("Spectre V2 mitigation: limited branch prediction.\n");
+ return 0;
+}
+arch_initcall(nospec_report);
+
+#ifdef CONFIG_EXPOLINE
+
+int nospec_disable = IS_ENABLED(CONFIG_EXPOLINE_OFF);
+
+static int __init nospectre_v2_setup_early(char *str)
+{
+ nospec_disable = 1;
+ return 0;
+}
+early_param("nospectre_v2", nospectre_v2_setup_early);
+
+void __init nospec_auto_detect(void)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE)) {
+ /*
+ * The kernel has been compiled with expolines.
+ * Keep expolines enabled and disable nobp.
+ */
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ /*
+ * If the kernel has not been compiled with expolines the
+ * nobp setting decides what is done, this depends on the
+ * CONFIG_KERNEL_NP option and the nobp/nospec parameters.
+ */
+}
+
+static int __init spectre_v2_setup_early(char *str)
+{
+ if (str && !strncmp(str, "on", 2)) {
+ nospec_disable = 0;
+ __clear_facility(82, S390_lowcore.alt_stfle_fac_list);
+ }
+ if (str && !strncmp(str, "off", 3))
+ nospec_disable = 1;
+ if (str && !strncmp(str, "auto", 4))
+ nospec_auto_detect();
+ return 0;
+}
+early_param("spectre_v2", spectre_v2_setup_early);
+
+static void __init_or_module __nospec_revert(s32 *start, s32 *end)
+{
+ enum { BRCL_EXPOLINE, BRASL_EXPOLINE } type;
+ u8 *instr, *thunk, *br;
+ u8 insnbuf[6];
+ s32 *epo;
+
+ /* Second part of the instruction replace is always a nop */
+ for (epo = start; epo < end; epo++) {
+ instr = (u8 *) epo + *epo;
+ if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x04)
+ type = BRCL_EXPOLINE; /* brcl instruction */
+ else if (instr[0] == 0xc0 && (instr[1] & 0x0f) == 0x05)
+ type = BRASL_EXPOLINE; /* brasl instruction */
+ else
+ continue;
+ thunk = instr + (*(int *)(instr + 2)) * 2;
+ if (thunk[0] == 0xc6 && thunk[1] == 0x00)
+ /* exrl %r0,<target-br> */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else if (thunk[0] == 0xc0 && (thunk[1] & 0x0f) == 0x00 &&
+ thunk[6] == 0x44 && thunk[7] == 0x00 &&
+ (thunk[8] & 0x0f) == 0x00 && thunk[9] == 0x00 &&
+ (thunk[1] & 0xf0) == (thunk[8] & 0xf0))
+ /* larl %rx,<target br> + ex %r0,0(%rx) */
+ br = thunk + (*(int *)(thunk + 2)) * 2;
+ else
+ continue;
+ /* Check for unconditional branch 0x07f? or 0x47f???? */
+ if ((br[0] & 0xbf) != 0x07 || (br[1] & 0xf0) != 0xf0)
+ continue;
+
+ memcpy(insnbuf + 2, (char[]) { 0x47, 0x00, 0x07, 0x00 }, 4);
+ switch (type) {
+ case BRCL_EXPOLINE:
+ insnbuf[0] = br[0];
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ if (br[0] == 0x47) {
+ /* brcl to b, replace with bc + nopr */
+ insnbuf[2] = br[2];
+ insnbuf[3] = br[3];
+ } else {
+ /* brcl to br, replace with bcr + nop */
+ }
+ break;
+ case BRASL_EXPOLINE:
+ insnbuf[1] = (instr[1] & 0xf0) | (br[1] & 0x0f);
+ if (br[0] == 0x47) {
+ /* brasl to b, replace with bas + nopr */
+ insnbuf[0] = 0x4d;
+ insnbuf[2] = br[2];
+ insnbuf[3] = br[3];
+ } else {
+ /* brasl to br, replace with basr + nop */
+ insnbuf[0] = 0x0d;
+ }
+ break;
+ }
+
+ s390_kernel_write(instr, insnbuf, 6);
+ }
+}
+
+void __init_or_module nospec_revert(s32 *start, s32 *end)
+{
+ if (nospec_disable)
+ __nospec_revert(start, end);
+}
+
+extern s32 __nospec_call_start[], __nospec_call_end[];
+extern s32 __nospec_return_start[], __nospec_return_end[];
+void __init nospec_init_branches(void)
+{
+ nospec_revert(__nospec_call_start, __nospec_call_end);
+ nospec_revert(__nospec_return_start, __nospec_return_end);
+}
+
+#endif /* CONFIG_EXPOLINE */
diff --git a/arch/s390/kernel/nospec-sysfs.c b/arch/s390/kernel/nospec-sysfs.c
new file mode 100644
index 000000000000..8affad5f18cb
--- /dev/null
+++ b/arch/s390/kernel/nospec-sysfs.c
@@ -0,0 +1,21 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/device.h>
+#include <linux/cpu.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
+
+ssize_t cpu_show_spectre_v1(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "Mitigation: __user pointer sanitization\n");
+}
+
+ssize_t cpu_show_spectre_v2(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable)
+ return sprintf(buf, "Mitigation: execute trampolines\n");
+ if (__test_facility(82, S390_lowcore.alt_stfle_fac_list))
+ return sprintf(buf, "Mitigation: limited branch prediction\n");
+ return sprintf(buf, "Vulnerable\n");
+}
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 3d8da1e742c2..b79d51459cf2 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -744,6 +744,10 @@ static int __hw_perf_event_init(struct perf_event *event)
*/
rate = 0;
if (attr->freq) {
+ if (!attr->sample_freq) {
+ err = -EINVAL;
+ goto out;
+ }
rate = freq_to_sample_rate(&si, attr->sample_freq);
rate = hw_limit_rate(&si, rate);
attr->freq = 0;
diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
index 7ce00e7a709a..ab236bd970bb 100644
--- a/arch/s390/kernel/processor.c
+++ b/arch/s390/kernel/processor.c
@@ -13,6 +13,7 @@
#include <linux/cpu.h>
#include <asm/diag.h>
#include <asm/elf.h>
+#include <asm/facility.h>
#include <asm/lowcore.h>
#include <asm/param.h>
#include <asm/smp.h>
@@ -113,3 +114,20 @@ const struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
+int s390_isolate_bp(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp);
+
+int s390_isolate_bp_guest(void)
+{
+ if (!test_facility(82))
+ return -EOPNOTSUPP;
+ set_thread_flag(TIF_ISOLATE_BP_GUEST);
+ return 0;
+}
+EXPORT_SYMBOL(s390_isolate_bp_guest);
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 52aab0bd84f8..6b1b91c17b40 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -6,8 +6,11 @@
#include <linux/linkage.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
#include <asm/sigp.h>
+ GEN_BR_THUNK %r14
+
#
# store_status
#
@@ -62,7 +65,7 @@ ENTRY(store_status)
st %r3,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 4(%r1)
larl %r2,store_status
stg %r2,__LC_PSW_SAVE_AREA-SAVE_AREA_BASE + 8(%r1)
- br %r14
+ BR_EX %r14
.section .bss
.align 8
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index d097d71685df..e7a43a30e3ff 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -63,6 +63,8 @@
#include <asm/sclp.h>
#include <asm/sysinfo.h>
#include <asm/numa.h>
+#include <asm/alternative.h>
+#include <asm/nospec-branch.h>
#include "entry.h"
/*
@@ -333,7 +335,9 @@ static void __init setup_lowcore(void)
lc->machine_flags = S390_lowcore.machine_flags;
lc->stfl_fac_list = S390_lowcore.stfl_fac_list;
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -370,6 +374,7 @@ static void __init setup_lowcore(void)
#ifdef CONFIG_SMP
lc->spinlock_lockval = arch_spin_lockval(0);
#endif
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
set_prefix((u32)(unsigned long) lc);
lowcore_ptr[0] = lc;
@@ -841,6 +846,9 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end;
+ if (IS_ENABLED(CONFIG_EXPOLINE_AUTO))
+ nospec_auto_detect();
+
parse_early_param();
os_info_init();
setup_ipl();
@@ -893,6 +901,10 @@ void __init setup_arch(char **cmdline_p)
conmode_default();
set_preferred_console();
+ apply_alternative_instructions();
+ if (IS_ENABLED(CONFIG_EXPOLINE))
+ nospec_init_branches();
+
/* Setup zfcpdump support */
setup_zfcpdump();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 9062df575afe..77f4f334a465 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -200,6 +200,7 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
lc->cpu_nr = cpu;
lc->spinlock_lockval = arch_spin_lockval(cpu);
+ lc->br_r1_trampoline = 0x07f1; /* br %r1 */
if (MACHINE_HAS_VX)
lc->vector_save_area_addr =
(unsigned long) &lc->vector_save_area;
@@ -250,7 +251,9 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
__ctl_store(lc->cregs_save_area, 0, 15);
save_access_regs((unsigned int *) lc->access_regs_save_area);
memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
- MAX_FACILITY_BIT/8);
+ sizeof(lc->stfle_fac_list));
+ memcpy(lc->alt_stfle_fac_list, S390_lowcore.alt_stfle_fac_list,
+ sizeof(lc->alt_stfle_fac_list));
}
static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
@@ -299,6 +302,7 @@ static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
mem_assign_absolute(lc->restart_fn, (unsigned long) func);
mem_assign_absolute(lc->restart_data, (unsigned long) data);
mem_assign_absolute(lc->restart_source, source_cpu);
+ __bpon();
asm volatile(
"0: sigp 0,%0,%2 # sigp restart to target cpu\n"
" brc 2,0b # busy, try again\n"
@@ -888,6 +892,7 @@ void __cpu_die(unsigned int cpu)
void __noreturn cpu_die(void)
{
idle_task_exit();
+ __bpon();
pcpu_sigp_retry(pcpu_devices + smp_processor_id(), SIGP_STOP, 0);
for (;;) ;
}
diff --git a/arch/s390/kernel/swsusp.S b/arch/s390/kernel/swsusp.S
index 2d6b6e81f812..60a829c77378 100644
--- a/arch/s390/kernel/swsusp.S
+++ b/arch/s390/kernel/swsusp.S
@@ -12,6 +12,7 @@
#include <asm/ptrace.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
+#include <asm/nospec-insn.h>
#include <asm/sigp.h>
/*
@@ -23,6 +24,8 @@
* (see below) in the resume process.
* This function runs with disabled interrupts.
*/
+ GEN_BR_THUNK %r14
+
.section .text
ENTRY(swsusp_arch_suspend)
stmg %r6,%r15,__SF_GPRS(%r15)
@@ -102,7 +105,7 @@ ENTRY(swsusp_arch_suspend)
spx 0x318(%r1)
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
- br %r14
+ BR_EX %r14
/*
* Restore saved memory image to correct place and restore register context.
@@ -196,11 +199,10 @@ pgm_check_entry:
larl %r15,init_thread_union
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER)
larl %r2,.Lpanic_string
- larl %r3,_sclp_print_early
lghi %r1,0
sam31
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
- basr %r14,%r3
+ brasl %r14,_sclp_print_early
larl %r3,.Ldisabled_wait_31
lpsw 0(%r3)
4:
@@ -266,7 +268,7 @@ restore_registers:
/* Return 0 */
lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15)
lghi %r2,0
- br %r14
+ BR_EX %r14
.section .data..nosave,"aw",@progbits
.align 8
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 66956c09d5bf..3d04dfdabc9f 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -147,6 +147,15 @@ unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
return orig;
}
+bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
+ struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return user_stack_pointer(regs) <= ret->stack;
+ else
+ return user_stack_pointer(regs) < ret->stack;
+}
+
/* Instruction Emulation */
static void adjust_psw_addr(psw_t *psw, unsigned long len)
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index da4f3f2a8186..fb98894a1361 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -79,6 +79,43 @@ SECTIONS
EXIT_DATA
}
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ * Note, that it is a part of __init region.
+ */
+ . = ALIGN(8);
+ .altinstructions : {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ /*
+ * And here are the replacement instructions. The linker sticks
+ * them as binary blobs. The .altinstructions has enough data to
+ * get the address and the length of them to patch the kernel safely.
+ * Note, that it is a part of __init region.
+ */
+ .altinstr_replacement : {
+ *(.altinstr_replacement)
+ }
+
+ /*
+ * Table with the patch locations to undo expolines
+ */
+ .nospec_call_table : {
+ __nospec_call_start = . ;
+ *(.s390_indirect*)
+ __nospec_call_end = . ;
+ }
+ .nospec_return_table : {
+ __nospec_return_start = . ;
+ *(.s390_return*)
+ __nospec_return_end = . ;
+ }
+
/* early.c uses stsi, which requires page aligned data. */
. = ALIGN(PAGE_SIZE);
INIT_DATA_SECTION(0x100)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 23e3f5d77a24..5ddb1debba95 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -118,8 +118,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
/* upper facilities limit for kvm */
unsigned long kvm_s390_fac_list_mask[] = {
- 0xffe6fffbfcfdfc40UL,
- 0x005e800000000000UL,
+ 0xffe6ffffffffffffUL,
+ 0x005effffffffffffUL,
};
unsigned long kvm_s390_fac_list_mask_size(void)
@@ -257,6 +257,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_S390_VECTOR_REGISTERS:
r = MACHINE_HAS_VX;
break;
+ case KVM_CAP_S390_BPB:
+ r = test_facility(82);
+ break;
default:
r = 0;
}
@@ -1264,6 +1267,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
KVM_SYNC_PFAULT;
if (test_kvm_facility(vcpu->kvm, 129))
vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
+ if (test_kvm_facility(vcpu->kvm, 82))
+ vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
if (kvm_is_ucontrol(vcpu->kvm))
return __kvm_ucontrol_vcpu_init(vcpu);
@@ -1327,6 +1332,7 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
current->thread.fpu.fpc = 0;
vcpu->arch.sie_block->gbea = 1;
vcpu->arch.sie_block->pp = 0;
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
kvm_clear_async_pf_completion_queue(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
@@ -2145,6 +2151,11 @@ static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
kvm_clear_async_pf_completion_queue(vcpu);
}
+ if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
+ test_kvm_facility(vcpu->kvm, 82)) {
+ vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
+ vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
+ }
kvm_run->kvm_dirty_regs = 0;
}
@@ -2162,6 +2173,7 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
kvm_run->s.regs.pft = vcpu->arch.pfault_token;
kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
+ kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
}
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
diff --git a/arch/s390/lib/mem.S b/arch/s390/lib/mem.S
index c6d553e85ab1..16c5998b9792 100644
--- a/arch/s390/lib/mem.S
+++ b/arch/s390/lib/mem.S
@@ -5,6 +5,9 @@
*/
#include <linux/linkage.h>
+#include <asm/nospec-insn.h>
+
+ GEN_BR_THUNK %r14
/*
* memset implementation
@@ -38,7 +41,7 @@ ENTRY(memset)
.Lmemset_clear_rest:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
- br %r14
+ BR_EX %r14
.Lmemset_fill:
stc %r3,0(%r2)
cghi %r4,1
@@ -55,7 +58,7 @@ ENTRY(memset)
.Lmemset_fill_rest:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
- br %r14
+ BR_EX %r14
.Lmemset_xc:
xc 0(1,%r1),0(%r1)
.Lmemset_mvc:
@@ -77,7 +80,7 @@ ENTRY(memcpy)
.Lmemcpy_rest:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
- br %r14
+ BR_EX %r14
.Lmemcpy_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 05ae254f84cf..e4a9b585f704 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -574,10 +574,7 @@ static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
else if (is_migration_entry(entry)) {
struct page *page = migration_entry_to_page(entry);
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
+ dec_mm_counter(mm, mm_counter(page));
}
free_swap_and_cache(entry);
}
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index a1c917d881ec..fa716f2a95a7 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -8,6 +8,7 @@
*/
#include <linux/linkage.h>
+#include <asm/nospec-insn.h>
#include "bpf_jit.h"
/*
@@ -53,7 +54,7 @@ ENTRY(sk_load_##NAME##_pos); \
clg %r3,STK_OFF_HLEN(%r15); /* Offset + SIZE > hlen? */ \
jh sk_load_##NAME##_slow; \
LOAD %r14,-SIZE(%r3,%r12); /* Get data from skb */ \
- b OFF_OK(%r6); /* Return */ \
+ B_EX OFF_OK,%r6; /* Return */ \
\
sk_load_##NAME##_slow:; \
lgr %r2,%r7; /* Arg1 = skb pointer */ \
@@ -63,11 +64,14 @@ sk_load_##NAME##_slow:; \
brasl %r14,skb_copy_bits; /* Get data from skb */ \
LOAD %r14,STK_OFF_TMP(%r15); /* Load from temp bufffer */ \
ltgr %r2,%r2; /* Set cc to (%r2 != 0) */ \
- br %r6; /* Return */
+ BR_EX %r6; /* Return */
sk_load_common(word, 4, llgf) /* r14 = *(u32 *) (skb->data+offset) */
sk_load_common(half, 2, llgh) /* r14 = *(u16 *) (skb->data+offset) */
+ GEN_BR_THUNK %r6
+ GEN_B_THUNK OFF_OK,%r6
+
/*
* Load 1 byte from SKB (optimized version)
*/
@@ -79,7 +83,7 @@ ENTRY(sk_load_byte_pos)
clg %r3,STK_OFF_HLEN(%r15) # Offset >= hlen?
jnl sk_load_byte_slow
llgc %r14,0(%r3,%r12) # Get byte from skb
- b OFF_OK(%r6) # Return OK
+ B_EX OFF_OK,%r6 # Return OK
sk_load_byte_slow:
lgr %r2,%r7 # Arg1 = skb pointer
@@ -89,7 +93,7 @@ sk_load_byte_slow:
brasl %r14,skb_copy_bits # Get data from skb
llgc %r14,STK_OFF_TMP(%r15) # Load result from temp buffer
ltgr %r2,%r2 # Set cc to (%r2 != 0)
- br %r6 # Return cc
+ BR_EX %r6 # Return cc
#define sk_negative_common(NAME, SIZE, LOAD) \
sk_load_##NAME##_slow_neg:; \
@@ -103,7 +107,7 @@ sk_load_##NAME##_slow_neg:; \
jz bpf_error; \
LOAD %r14,0(%r2); /* Get data from pointer */ \
xr %r3,%r3; /* Set cc to zero */ \
- br %r6; /* Return cc */
+ BR_EX %r6; /* Return cc */
sk_negative_common(word, 4, llgf)
sk_negative_common(half, 2, llgh)
@@ -112,4 +116,4 @@ sk_negative_common(byte, 1, llgc)
bpf_error:
# force a return 0 from jit handler
ltgr %r15,%r15 # Set condition code
- br %r6
+ BR_EX %r6
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index 1395eeb6005f..a26528afceb2 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -24,6 +24,8 @@
#include <linux/bpf.h>
#include <asm/cacheflush.h>
#include <asm/dis.h>
+#include <asm/facility.h>
+#include <asm/nospec-branch.h>
#include "bpf_jit.h"
int bpf_jit_enable __read_mostly;
@@ -41,6 +43,8 @@ struct bpf_jit {
int base_ip; /* Base address for literal pool */
int ret0_ip; /* Address of return 0 */
int exit_ip; /* Address of exit */
+ int r1_thunk_ip; /* Address of expoline thunk for 'br %r1' */
+ int r14_thunk_ip; /* Address of expoline thunk for 'br %r14' */
int tail_call_start; /* Tail call start offset */
int labels[1]; /* Labels for local jumps */
};
@@ -248,6 +252,19 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1)
REG_SET_SEEN(b2); \
})
+#define EMIT6_PCREL_RILB(op, b, target) \
+({ \
+ int rel = (target - jit->prg) / 2; \
+ _EMIT6(op | reg_high(b) << 16 | rel >> 16, rel & 0xffff); \
+ REG_SET_SEEN(b); \
+})
+
+#define EMIT6_PCREL_RIL(op, target) \
+({ \
+ int rel = (target - jit->prg) / 2; \
+ _EMIT6(op | rel >> 16, rel & 0xffff); \
+})
+
#define _EMIT6_IMM(op, imm) \
({ \
unsigned int __imm = (imm); \
@@ -475,8 +492,45 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
EMIT4(0xb9040000, REG_2, BPF_REG_0);
/* Restore registers */
save_restore_regs(jit, REGS_RESTORE);
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ jit->r14_thunk_ip = jit->prg;
+ /* Generate __s390_indirect_jump_r14 thunk */
+ if (test_facility(35)) {
+ /* exrl %r0,.+10 */
+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+ } else {
+ /* larl %r1,.+14 */
+ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+ /* ex 0,0(%r1) */
+ EMIT4_DISP(0x44000000, REG_0, REG_1, 0);
+ }
+ /* j . */
+ EMIT4_PCREL(0xa7f40000, 0);
+ }
/* br %r14 */
_EMIT2(0x07fe);
+
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable &&
+ (jit->seen & SEEN_FUNC)) {
+ jit->r1_thunk_ip = jit->prg;
+ /* Generate __s390_indirect_jump_r1 thunk */
+ if (test_facility(35)) {
+ /* exrl %r0,.+10 */
+ EMIT6_PCREL_RIL(0xc6000000, jit->prg + 10);
+ /* j . */
+ EMIT4_PCREL(0xa7f40000, 0);
+ /* br %r1 */
+ _EMIT2(0x07f1);
+ } else {
+ /* larl %r1,.+14 */
+ EMIT6_PCREL_RILB(0xc0000000, REG_1, jit->prg + 14);
+ /* ex 0,S390_lowcore.br_r1_tampoline */
+ EMIT4_DISP(0x44000000, REG_0, REG_0,
+ offsetof(struct _lowcore, br_r1_trampoline));
+ /* j . */
+ EMIT4_PCREL(0xa7f40000, 0);
+ }
+ }
}
/*
@@ -980,8 +1034,13 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i
/* lg %w1,<d(imm)>(%l) */
EMIT6_DISP_LH(0xe3000000, 0x0004, REG_W1, REG_0, REG_L,
EMIT_CONST_U64(func));
- /* basr %r14,%w1 */
- EMIT2(0x0d00, REG_14, REG_W1);
+ if (IS_ENABLED(CC_USING_EXPOLINE) && !nospec_disable) {
+ /* brasl %r14,__s390_indirect_jump_r1 */
+ EMIT6_PCREL_RILB(0xc0050000, REG_14, jit->r1_thunk_ip);
+ } else {
+ /* basr %r14,%w1 */
+ EMIT2(0x0d00, REG_14, REG_W1);
+ }
/* lgr %b0,%r2: load return value into %b0 */
EMIT4(0xb9040000, BPF_REG_0, REG_2);
if (bpf_helper_changes_skb_data((void *)func)) {
diff --git a/arch/sh/include/asm/futex.h b/arch/sh/include/asm/futex.h
index 7be39a646fbd..e05187d26d76 100644
--- a/arch/sh/include/asm/futex.h
+++ b/arch/sh/include/asm/futex.h
@@ -10,20 +10,11 @@
/* XXX: UP variants, fix for SH-4A and SMP.. */
#include <asm/futex-irq.h>
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -49,17 +40,8 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
return ret;
}
diff --git a/arch/sparc/include/asm/futex_64.h b/arch/sparc/include/asm/futex_64.h
index 4e899b0dabf7..1cfd89d92208 100644
--- a/arch/sparc/include/asm/futex_64.h
+++ b/arch/sparc/include/asm/futex_64.h
@@ -29,22 +29,14 @@
: "r" (uaddr), "r" (oparg), "i" (-EFAULT) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
if (unlikely((((unsigned long) uaddr) & 0x3UL)))
return -EINVAL;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
pagefault_disable();
switch (op) {
@@ -69,17 +61,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index 1a6ef1b69cb1..d96d9dab5c0b 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -106,12 +106,9 @@
lock = __atomic_hashed_lock((int __force *)uaddr)
#endif
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int uninitialized_var(val), ret;
__futex_prolog();
@@ -119,12 +116,6 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
/* The 32-bit futex code makes this assumption, so validate it here. */
BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
case FUTEX_OP_SET:
@@ -148,30 +139,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
}
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (val == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (val != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (val < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (val >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (val <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (val > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = val;
+
return ret;
}
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 583d539a4197..2bc6651791cc 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -364,7 +364,8 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS)
goto free_struct;
- memcpy(rom->romdata, pci->romimage, pci->romsize);
+ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+ pci->romsize);
return status;
free_struct:
@@ -470,7 +471,8 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
if (status != EFI_SUCCESS)
goto free_struct;
- memcpy(rom->romdata, pci->romimage, pci->romsize);
+ memcpy(rom->romdata, (void *)(unsigned long)pci->romimage,
+ pci->romsize);
return status;
free_struct:
diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
index b4c1f5453436..f4dc9b63bdda 100644
--- a/arch/x86/include/asm/futex.h
+++ b/arch/x86/include/asm/futex.h
@@ -41,20 +41,11 @@
"+m" (*uaddr), "=&r" (tem) \
: "r" (oparg), "i" (-EFAULT), "1" (0))
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret, tem;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
-
pagefault_disable();
switch (op) {
@@ -80,30 +71,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ:
- ret = (oldval == cmparg);
- break;
- case FUTEX_OP_CMP_NE:
- ret = (oldval != cmparg);
- break;
- case FUTEX_OP_CMP_LT:
- ret = (oldval < cmparg);
- break;
- case FUTEX_OP_CMP_GE:
- ret = (oldval >= cmparg);
- break;
- case FUTEX_OP_CMP_LE:
- ret = (oldval <= cmparg);
- break;
- case FUTEX_OP_CMP_GT:
- ret = (oldval > cmparg);
- break;
- default:
- ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index c706b7796870..9b028204685d 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -166,50 +166,6 @@ static inline struct thread_info *current_thread_info(void)
return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
}
-/*
- * Walks up the stack frames to make sure that the specified object is
- * entirely contained by a single stack frame.
- *
- * Returns:
- * 1 if within a frame
- * -1 if placed across a frame boundary (or outside stack)
- * 0 unable to determine (no frame pointers, etc)
- */
-static inline int arch_within_stack_frames(const void * const stack,
- const void * const stackend,
- const void *obj, unsigned long len)
-{
-#if defined(CONFIG_FRAME_POINTER)
- const void *frame = NULL;
- const void *oldframe;
-
- oldframe = __builtin_frame_address(1);
- if (oldframe)
- frame = __builtin_frame_address(2);
- /*
- * low ----------------------------------------------> high
- * [saved bp][saved ip][args][local vars][saved bp][saved ip]
- * ^----------------^
- * allow copies only within here
- */
- while (stack <= frame && frame < stackend) {
- /*
- * If obj + len extends past the last frame, this
- * check won't pass and the next frame will be 0,
- * causing us to bail out and correctly report
- * the copy as invalid.
- */
- if (obj + len <= frame)
- return obj >= oldframe + 2 * sizeof(void *) ? 1 : -1;
- oldframe = frame;
- frame = *(const void * const *)frame;
- }
- return -1;
-#else
- return 0;
-#endif
-}
-
#else /* !__ASSEMBLY__ */
#ifdef CONFIG_X86_64
diff --git a/arch/x86/include/uapi/asm/msgbuf.h b/arch/x86/include/uapi/asm/msgbuf.h
index 809134c644a6..90ab9a795b49 100644
--- a/arch/x86/include/uapi/asm/msgbuf.h
+++ b/arch/x86/include/uapi/asm/msgbuf.h
@@ -1 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X64_MSGBUF_H
+#define __ASM_X64_MSGBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/msgbuf.h>
+#else
+/*
+ * The msqid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+ __kernel_time_t msg_ctime; /* last change time */
+ __kernel_ulong_t msg_cbytes; /* current number of bytes on queue */
+ __kernel_ulong_t msg_qnum; /* number of messages in queue */
+ __kernel_ulong_t msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+#endif
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/arch/x86/include/uapi/asm/shmbuf.h b/arch/x86/include/uapi/asm/shmbuf.h
index 83c05fc2de38..644421f3823b 100644
--- a/arch/x86/include/uapi/asm/shmbuf.h
+++ b/arch/x86/include/uapi/asm/shmbuf.h
@@ -1 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_X86_SHMBUF_H
+#define __ASM_X86_SHMBUF_H
+
+#if !defined(__x86_64__) || !defined(__ILP32__)
#include <asm-generic/shmbuf.h>
+#else
+/*
+ * The shmid64_ds structure for x86 architecture with x32 ABI.
+ *
+ * On x86-32 and x86-64 we can just use the generic definition, but
+ * x32 uses the same binary layout as x86_64, which is differnet
+ * from other 32-bit architectures.
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+ __kernel_time_t shm_dtime; /* last detach time */
+ __kernel_time_t shm_ctime; /* last change time */
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ __kernel_ulong_t shm_nattch; /* no. of current attaches */
+ __kernel_ulong_t __unused4;
+ __kernel_ulong_t __unused5;
+};
+
+struct shminfo64 {
+ __kernel_ulong_t shmmax;
+ __kernel_ulong_t shmmin;
+ __kernel_ulong_t shmmni;
+ __kernel_ulong_t shmseg;
+ __kernel_ulong_t shmall;
+ __kernel_ulong_t __unused1;
+ __kernel_ulong_t __unused2;
+ __kernel_ulong_t __unused3;
+ __kernel_ulong_t __unused4;
+};
+
+#endif
+
+#endif /* __ASM_X86_SHMBUF_H */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b52a8d08ab36..fbf2edc3eb35 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -25,6 +25,7 @@
#include <linux/cpu.h>
#include <linux/bitops.h>
#include <linux/device.h>
+#include <linux/nospec.h>
#include <asm/apic.h>
#include <asm/stacktrace.h>
@@ -297,17 +298,20 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
config = attr->config;
- cache_type = (config >> 0) & 0xff;
+ cache_type = (config >> 0) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
+ cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
cache_op = (config >> 8) & 0xff;
if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
+ cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
cache_result = (config >> 16) & 0xff;
if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
+ cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
val = hw_cache_event_ids[cache_type][cache_op][cache_result];
@@ -404,6 +408,8 @@ int x86_setup_perfctr(struct perf_event *event)
if (attr->config >= x86_pmu.max_events)
return -EINVAL;
+ attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
+
/*
* The generic map:
*/
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cstate.c b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
index 75a38b5a2e26..5b8c90935270 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cstate.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cstate.c
@@ -88,6 +88,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/perf_event.h>
+#include <linux/nospec.h>
#include <asm/cpu_device_id.h>
#include "perf_event.h"
@@ -409,6 +410,7 @@ static int cstate_pmu_event_init(struct perf_event *event)
} else if (event->pmu == &cstate_pkg_pmu) {
if (cfg >= PERF_CSTATE_PKG_EVENT_MAX)
return -EINVAL;
+ cfg = array_index_nospec((unsigned long)cfg, PERF_CSTATE_PKG_EVENT_MAX);
if (!pkg_msr[cfg].attr)
return -EINVAL;
event->hw.event_base = pkg_msr[cfg].msr;
diff --git a/arch/x86/kernel/cpu/perf_event_msr.c b/arch/x86/kernel/cpu/perf_event_msr.c
index ec863b9a9f78..067427384a63 100644
--- a/arch/x86/kernel/cpu/perf_event_msr.c
+++ b/arch/x86/kernel/cpu/perf_event_msr.c
@@ -1,4 +1,5 @@
#include <linux/perf_event.h>
+#include <linux/nospec.h>
enum perf_msr_id {
PERF_MSR_TSC = 0,
@@ -115,9 +116,6 @@ static int msr_event_init(struct perf_event *event)
if (event->attr.type != event->pmu->type)
return -ENOENT;
- if (cfg >= PERF_MSR_EVENT_MAX)
- return -EINVAL;
-
/* unsupported modes and filters */
if (event->attr.exclude_user ||
event->attr.exclude_kernel ||
@@ -128,6 +126,11 @@ static int msr_event_init(struct perf_event *event)
event->attr.sample_period) /* no sampling */
return -EINVAL;
+ if (cfg >= PERF_MSR_EVENT_MAX)
+ return -EINVAL;
+
+ cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);
+
if (!msr[cfg].attr)
return -EINVAL;
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 469b23d6acc2..fd7e9937ddd6 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -71,12 +71,17 @@ static void load_segments(void)
static void machine_kexec_free_page_tables(struct kimage *image)
{
free_page((unsigned long)image->arch.pgd);
+ image->arch.pgd = NULL;
#ifdef CONFIG_X86_PAE
free_page((unsigned long)image->arch.pmd0);
+ image->arch.pmd0 = NULL;
free_page((unsigned long)image->arch.pmd1);
+ image->arch.pmd1 = NULL;
#endif
free_page((unsigned long)image->arch.pte0);
+ image->arch.pte0 = NULL;
free_page((unsigned long)image->arch.pte1);
+ image->arch.pte1 = NULL;
}
static int machine_kexec_alloc_page_tables(struct kimage *image)
@@ -93,7 +98,6 @@ static int machine_kexec_alloc_page_tables(struct kimage *image)
!image->arch.pmd0 || !image->arch.pmd1 ||
#endif
!image->arch.pte0 || !image->arch.pte1) {
- machine_kexec_free_page_tables(image);
return -ENOMEM;
}
return 0;
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index ca6e65250b1a..13d6b8ac0b0b 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -37,8 +37,11 @@ static struct kexec_file_ops *kexec_file_loaders[] = {
static void free_transition_pgtable(struct kimage *image)
{
free_page((unsigned long)image->arch.pud);
+ image->arch.pud = NULL;
free_page((unsigned long)image->arch.pmd);
+ image->arch.pmd = NULL;
free_page((unsigned long)image->arch.pte);
+ image->arch.pte = NULL;
}
static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
@@ -79,7 +82,6 @@ static int init_transition_pgtable(struct kimage *image, pgd_t *pgd)
set_pte(pte, pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
return 0;
err:
- free_transition_pgtable(image);
return result;
}
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index fe89f938e0f0..00c7878043ef 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1442,6 +1442,8 @@ static inline void mwait_play_dead(void)
void *mwait_ptr;
int i;
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return;
if (!this_cpu_has(X86_FEATURE_MWAIT))
return;
if (!this_cpu_has(X86_FEATURE_CLFLUSH))
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index eb02087650d2..c42d4a3d9494 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -408,7 +408,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2)
hpet2 -= hpet1;
tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD));
do_div(tmp, 1000000);
- do_div(deltatsc, tmp);
+ deltatsc = div64_u64(deltatsc, tmp);
return (unsigned long) deltatsc;
}
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 63146c378f1e..2b05f681a1fd 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1316,8 +1316,6 @@ void xen_flush_tlb_all(void)
struct mmuext_op *op;
struct multicall_space mcs;
- trace_xen_mmu_flush_tlb_all(0);
-
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
@@ -1335,8 +1333,6 @@ static void xen_flush_tlb(void)
struct mmuext_op *op;
struct multicall_space mcs;
- trace_xen_mmu_flush_tlb(0);
-
preempt_disable();
mcs = xen_mc_entry(sizeof(*op));
diff --git a/arch/xtensa/include/asm/futex.h b/arch/xtensa/include/asm/futex.h
index 72bfc1cbc2b5..5bfbc1c401d4 100644
--- a/arch/xtensa/include/asm/futex.h
+++ b/arch/xtensa/include/asm/futex.h
@@ -44,18 +44,10 @@
: "r" (uaddr), "I" (-EFAULT), "r" (oparg) \
: "memory")
-static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
+ u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
#if !XCHAL_HAVE_S32C1I
return -ENOSYS;
@@ -89,19 +81,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (ret)
- return ret;
+ if (!ret)
+ *oval = oldval;
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: return (oldval == cmparg);
- case FUTEX_OP_CMP_NE: return (oldval != cmparg);
- case FUTEX_OP_CMP_LT: return (oldval < cmparg);
- case FUTEX_OP_CMP_GE: return (oldval >= cmparg);
- case FUTEX_OP_CMP_LE: return (oldval <= cmparg);
- case FUTEX_OP_CMP_GT: return (oldval > cmparg);
- }
-
- return -ENOSYS;
+ return ret;
}
static inline int
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
index 04d93d762ded..b999d352f69c 100644
--- a/block/Kconfig.iosched
+++ b/block/Kconfig.iosched
@@ -34,7 +34,7 @@ config IOSCHED_DEADLINE
config IOSCHED_MAPLE
tristate "Maple I/O scheduler"
- default y
+ depends on FB
config IOSCHED_CFQ
tristate "CFQ I/O scheduler"
diff --git a/block/blk-core.c b/block/blk-core.c
index 43f070865640..aa5095eb2e82 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -4078,76 +4078,43 @@ int __init blk_dev_init(void)
* TODO : If necessary, we can make the histograms per-cpu and aggregate
* them when printing them out.
*/
-void
-blk_zero_latency_hist(struct io_latency_state *s)
-{
- memset(s->latency_y_axis_read, 0,
- sizeof(s->latency_y_axis_read));
- memset(s->latency_y_axis_write, 0,
- sizeof(s->latency_y_axis_write));
- s->latency_reads_elems = 0;
- s->latency_writes_elems = 0;
-}
-EXPORT_SYMBOL(blk_zero_latency_hist);
-
ssize_t
-blk_latency_hist_show(struct io_latency_state *s, char *buf)
+blk_latency_hist_show(char *name, struct io_latency_state *s, char *buf,
+ int buf_size)
{
int i;
int bytes_written = 0;
u_int64_t num_elem, elem;
int pct;
-
- num_elem = s->latency_reads_elems;
- if (num_elem > 0) {
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "IO svc_time Read Latency Histogram (n = %llu):\n",
- num_elem);
- for (i = 0;
- i < ARRAY_SIZE(latency_x_axis_us);
- i++) {
- elem = s->latency_y_axis_read[i];
- pct = div64_u64(elem * 100, num_elem);
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "\t< %5lluus%15llu%15d%%\n",
- latency_x_axis_us[i],
- elem, pct);
- }
- /* Last element in y-axis table is overflow */
- elem = s->latency_y_axis_read[i];
- pct = div64_u64(elem * 100, num_elem);
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "\t> %5dms%15llu%15d%%\n", 10,
- elem, pct);
- }
- num_elem = s->latency_writes_elems;
- if (num_elem > 0) {
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "IO svc_time Write Latency Histogram (n = %llu):\n",
- num_elem);
- for (i = 0;
- i < ARRAY_SIZE(latency_x_axis_us);
- i++) {
- elem = s->latency_y_axis_write[i];
- pct = div64_u64(elem * 100, num_elem);
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "\t< %5lluus%15llu%15d%%\n",
- latency_x_axis_us[i],
- elem, pct);
- }
- /* Last element in y-axis table is overflow */
- elem = s->latency_y_axis_write[i];
- pct = div64_u64(elem * 100, num_elem);
- bytes_written += scnprintf(buf + bytes_written,
- PAGE_SIZE - bytes_written,
- "\t> %5dms%15llu%15d%%\n", 10,
- elem, pct);
+ u_int64_t average;
+
+ num_elem = s->latency_elems;
+ if (num_elem > 0) {
+ average = div64_u64(s->latency_sum, s->latency_elems);
+ bytes_written += scnprintf(buf + bytes_written,
+ buf_size - bytes_written,
+ "IO svc_time %s Latency Histogram (n = %llu,"
+ " average = %llu):\n", name, num_elem, average);
+ for (i = 0;
+ i < ARRAY_SIZE(latency_x_axis_us);
+ i++) {
+ elem = s->latency_y_axis[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t< %6lluus%15llu%15d%%\n",
+ latency_x_axis_us[i],
+ elem, pct);
+ }
+ /* Last element in y-axis table is overflow */
+ elem = s->latency_y_axis[i];
+ pct = div64_u64(elem * 100, num_elem);
+ bytes_written += scnprintf(buf + bytes_written,
+ PAGE_SIZE - bytes_written,
+ "\t>=%6lluus%15llu%15d%%\n",
+ latency_x_axis_us[i - 1], elem, pct);
}
+
return bytes_written;
}
EXPORT_SYMBOL(blk_latency_hist_show);
diff --git a/block/maple-iosched.c b/block/maple-iosched.c
index 795ac4a68a58..c9d1019b3128 100644
--- a/block/maple-iosched.c
+++ b/block/maple-iosched.c
@@ -17,7 +17,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/state_notifier.h>
+#include <linux/fb.h>
#define MAPLE_IOSCHED_PATCHLEVEL (8)
@@ -46,6 +46,10 @@ struct maple_data {
int fifo_batch;
int writes_starved;
int sleep_latency_multiple;
+
+ /* Display state */
+ struct notifier_block fb_notifier;
+ int display_on;
};
static inline struct maple_data *
@@ -86,10 +90,10 @@ maple_add_request(struct request_queue *q, struct request *rq)
* Add request to the proper fifo list and set its
* expire time.
*/
- if (!state_suspended && mdata->fifo_expire[sync][dir]) {
+ if (mdata->display_on && mdata->fifo_expire[sync][dir]) {
rq->fifo_time = jiffies + mdata->fifo_expire[sync][dir];
list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]);
- } else if (state_suspended && fifo_expire_suspended) {
+ } else if (!mdata->display_on && fifo_expire_suspended) {
rq->fifo_time = jiffies + fifo_expire_suspended;
list_add_tail(&rq->queuelist, &mdata->fifo_list[sync][dir]);
}
@@ -214,9 +218,10 @@ maple_dispatch_requests(struct request_queue *q, int force)
/* Retrieve request */
if (!rq) {
/* Treat writes fairly while suspended, otherwise allow them to be starved */
- if (!state_suspended && mdata->starved >= mdata->writes_starved)
+ if (mdata->display_on &&
+ mdata->starved >= mdata->writes_starved)
data_dir = WRITE;
- else if (state_suspended && mdata->starved >= 1)
+ else if (!mdata->display_on && mdata->starved >= 1)
data_dir = WRITE;
rq = maple_choose_request(mdata, data_dir);
@@ -258,6 +263,32 @@ maple_latter_request(struct request_queue *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
+static int fb_notifier_callback(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ struct maple_data *mdata = container_of(self,
+ struct maple_data, fb_notifier);
+ struct fb_event *evdata = data;
+ int *blank;
+
+ if (evdata && evdata->data && event == FB_EVENT_BLANK) {
+ blank = evdata->data;
+ switch (*blank) {
+ case FB_BLANK_UNBLANK:
+ mdata->display_on = 1;
+ break;
+ case FB_BLANK_POWERDOWN:
+ case FB_BLANK_HSYNC_SUSPEND:
+ case FB_BLANK_VSYNC_SUSPEND:
+ case FB_BLANK_NORMAL:
+ mdata->display_on = 0;
+ break;
+ }
+ }
+
+ return 0;
+}
+
static int maple_init_queue(struct request_queue *q, struct elevator_type *e)
{
struct maple_data *mdata;
@@ -275,6 +306,9 @@ static int maple_init_queue(struct request_queue *q, struct elevator_type *e)
}
eq->elevator_data = mdata;
+ mdata->fb_notifier.notifier_call = fb_notifier_callback;
+ fb_register_client(&mdata->fb_notifier);
+
/* Initialize fifo lists */
INIT_LIST_HEAD(&mdata->fifo_list[SYNC][READ]);
INIT_LIST_HEAD(&mdata->fifo_list[SYNC][WRITE]);
@@ -302,6 +336,8 @@ maple_exit_queue(struct elevator_queue *e)
{
struct maple_data *mdata = e->elevator_data;
+ fb_unregister_client(&mdata->fb_notifier);
+
/* Free structure */
kfree(mdata);
}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index c6fee7437be4..9094c4e3c847 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -694,38 +694,9 @@ int scsi_verify_blk_ioctl(struct block_device *bd, unsigned int cmd)
if (bd && bd == bd->bd_contains)
return 0;
- /* Actually none of these is particularly useful on a partition,
- * but they are safe.
- */
- switch (cmd) {
- case SCSI_IOCTL_GET_IDLUN:
- case SCSI_IOCTL_GET_BUS_NUMBER:
- case SCSI_IOCTL_GET_PCI:
- case SCSI_IOCTL_PROBE_HOST:
- case SG_GET_VERSION_NUM:
- case SG_SET_TIMEOUT:
- case SG_GET_TIMEOUT:
- case SG_GET_RESERVED_SIZE:
- case SG_SET_RESERVED_SIZE:
- case SG_EMULATED_HOST:
- return 0;
- case CDROM_GET_CAPABILITY:
- /* Keep this until we remove the printk below. udev sends it
- * and we do not want to spam dmesg about it. CD-ROMs do
- * not have partitions, so we get here only for disks.
- */
- return -ENOIOCTLCMD;
- default:
- break;
- }
-
if (capable(CAP_SYS_RAWIO))
return 0;
- /* In particular, rule out all resets and host-specific ioctls. */
- printk_ratelimited(KERN_WARNING
- "%s: sending ioctl %x to a partition!\n", current->comm, cmd);
-
return -ENOIOCTLCMD;
}
EXPORT_SYMBOL(scsi_verify_blk_ioctl);
diff --git a/build.config.kasan b/build.config.kasan
index 1d578aa31125..21cf9905edb0 100644
--- a/build.config.kasan
+++ b/build.config.kasan
@@ -9,7 +9,8 @@ function update_kasan_config() {
-e CONFIG_SLUB_DEBUG_ON \
-d CONFIG_SLUB_DEBUG_PANIC_ON \
-d CONFIG_KASAN_OUTLINE \
- -d CONFIG_KERNEL_LZ4
+ -d CONFIG_KERNEL_LZ4 \
+ -d CONFIG_RANDOMIZE_BASE
(cd ${OUT_DIR} && \
make O=${OUT_DIR} $archsubarch CROSS_COMPILE=${CROSS_COMPILE} olddefconfig)
}
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index ca50eeb13097..b5953f1d1a18 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -157,16 +157,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
void *private;
int err;
- /* If caller uses non-allowed flag, return error. */
- if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
- return -EINVAL;
-
if (sock->state == SS_CONNECTED)
return -EINVAL;
if (addr_len != sizeof(*sa))
return -EINVAL;
+ /* If caller uses non-allowed flag, return error. */
+ if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
+ return -EINVAL;
+
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
sa->salg_name[sizeof(sa->salg_name) - 1] = 0;
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index f0099360039e..1accc01fb0ca 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -68,11 +68,12 @@ static ssize_t driver_override_show(struct device *_dev,
struct device_attribute *attr, char *buf)
{
struct amba_device *dev = to_amba_device(_dev);
+ ssize_t len;
- if (!dev->driver_override)
- return 0;
-
- return sprintf(buf, "%s\n", dev->driver_override);
+ device_lock(_dev);
+ len = sprintf(buf, "%s\n", dev->driver_override);
+ device_unlock(_dev);
+ return len;
}
static ssize_t driver_override_store(struct device *_dev,
@@ -80,9 +81,10 @@ static ssize_t driver_override_store(struct device *_dev,
const char *buf, size_t count)
{
struct amba_device *dev = to_amba_device(_dev);
- char *driver_override, *old = dev->driver_override, *cp;
+ char *driver_override, *old, *cp;
- if (count > PATH_MAX)
+ /* We need to keep extra room for a newline */
+ if (count >= (PAGE_SIZE - 1))
return -EINVAL;
driver_override = kstrndup(buf, count, GFP_KERNEL);
@@ -93,12 +95,15 @@ static ssize_t driver_override_store(struct device *_dev,
if (cp)
*cp = '\0';
+ device_lock(_dev);
+ old = dev->driver_override;
if (strlen(driver_override)) {
dev->driver_override = driver_override;
} else {
kfree(driver_override);
dev->driver_override = NULL;
}
+ device_unlock(_dev);
kfree(old);
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 6a650eb833f5..4e1d55403879 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3040,6 +3040,14 @@ static void binder_transaction(struct binder_proc *proc,
else
return_error = BR_DEAD_REPLY;
mutex_unlock(&context->context_mgr_node_lock);
+ if (target_node && target_proc == proc) {
+ binder_user_error("%d:%d got transaction to context manager from process owning it\n",
+ proc->pid, thread->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -EINVAL;
+ return_error_line = __LINE__;
+ goto err_invalid_target_handle;
+ }
}
if (!target_node) {
/*
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 2d677ba46d77..60d6db82ce5a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4243,6 +4243,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
ATA_HORKAGE_ZERO_AFTER_TRIM |
ATA_HORKAGE_NOLPM, },
+ /* Sandisk devices which are known to not handle LPM well */
+ { "SanDisk SD7UB3Q*G1001", NULL, ATA_HORKAGE_NOLPM, },
+
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
ATA_HORKAGE_ZERO_AFTER_TRIM, },
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index cecfb943762f..6eab52b92e01 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -23,6 +23,7 @@
#include <linux/bitops.h>
#include <linux/wait.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
#include <asm/byteorder.h>
#include <asm/string.h>
#include <asm/io.h>
@@ -1456,6 +1457,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
spin_lock_irqsave(&zatm_dev->lock, flags);
info = zatm_dev->pool_info[pool];
if (cmd == ZATM_GETPOOLZ) {
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index a117a16f0f94..578de603bcfd 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -31,7 +31,6 @@
#include <linux/suspend.h>
#include <trace/events/power.h>
#include <linux/cpufreq.h>
-#include <linux/cpuidle.h>
#include <linux/timer.h>
#include <linux/wakeup_reason.h>
@@ -592,7 +591,6 @@ void dpm_resume_noirq(pm_message_t state)
dpm_show_time(starttime, state, "noirq");
resume_device_irqs();
device_wakeup_disarm_wake_irqs();
- cpuidle_resume();
trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
}
@@ -1114,7 +1112,6 @@ int dpm_suspend_noirq(pm_message_t state)
int error = 0;
trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
- cpuidle_pause();
device_wakeup_arm_wake_irqs();
suspend_device_irqs();
mutex_lock(&dpm_list_mtx);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 7fca7cfd5b09..54cef3dc0beb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -216,6 +216,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
@@ -246,7 +247,6 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
/* QCA ROME chipset */
- { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index c206ccda899b..b5f245d2875c 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -2358,7 +2358,7 @@ static int cdrom_ioctl_media_changed(struct cdrom_device_info *cdi,
if (!CDROM_CAN(CDC_SELECT_DISC) || arg == CDSL_CURRENT)
return media_changed(cdi, 1);
- if ((unsigned int)arg >= cdi->capacity)
+ if (arg >= cdi->capacity)
return -EINVAL;
info = kmalloc(sizeof(*info), GFP_KERNEL);
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 7b0bd5408324..cb95c5d994ae 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -62,6 +62,10 @@ obj-$(CONFIG_TILE_SROM) += tile-srom.o
obj-$(CONFIG_DIAG_CHAR) += diag/
obj-$(CONFIG_XILLYBUS) += xillybus/
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc.o
+ifeq ($(SDK_VERSION),)
+SDK_VERSION := 27
+endif
+CFLAGS_adsprpc.o := -DCONFIG_SDK_VERSION="$(SDK_VERSION)"
ifdef CONFIG_COMPAT
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
endif
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index ae7753a69341..4d7a39a5e30d 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -56,7 +56,11 @@
#define ADSP_MMAP_HEAP_ADDR 4
#define ADSP_MMAP_REMOTE_HEAP_ADDR 8
#define FASTRPC_ENOSUCH 39
+#if CONFIG_SDK_VERSION >= 28
+#define VMID_SSC_Q6 38
+#else
#define VMID_SSC_Q6 5
+#endif
#define VMID_ADSP_Q6 6
#define DEBUGFS_SIZE 1024
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index debf7285342a..5341b4d487f4 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -1162,18 +1162,31 @@ void extract_dci_events(unsigned char *buf, int len, int data_source,
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
- length = *(uint16_t *)(buf + 1); /* total length of event series */
- if (length == 0) {
- pr_err("diag: Incoming dci event length is invalid\n");
+ if (!buf) {
+ pr_err("diag: In %s buffer is NULL\n", __func__);
return;
}
/*
- * Move directly to the start of the event series. 1 byte for
- * event code and 2 bytes for the length field.
+ * 1 byte for event code and 2 bytes for the length field.
* The length field indicates the total length removing the cmd_code
* and the lenght field. The event parsing in that case should happen
* till the end.
*/
+ if (len < 3) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
+ return;
+ }
+ length = *(uint16_t *)(buf + 1); /* total length of event series */
+ if ((length == 0) || (len != (length + 3))) {
+ pr_err("diag: Incoming dci event length: %d is invalid\n",
+ length);
+ return;
+ }
+ /*
+ * Move directly to the start of the event series.
+ * The event parsing should happen from start of event
+ * series till the end.
+ */
temp_len = 3;
while (temp_len < length) {
event_id_packet = *(uint16_t *)(buf + temp_len);
@@ -1190,30 +1203,60 @@ void extract_dci_events(unsigned char *buf, int len, int data_source,
* necessary.
*/
timestamp_len = 8;
- memcpy(timestamp, buf + temp_len + 2, timestamp_len);
+ if ((temp_len + timestamp_len + 2) <= len)
+ memcpy(timestamp, buf + temp_len + 2,
+ timestamp_len);
+ else {
+ pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+ __func__, len, temp_len);
+ return;
+ }
}
/* 13th and 14th bit represent the payload length */
if (((event_id_packet & 0x6000) >> 13) == 3) {
payload_len_field = 1;
- payload_len = *(uint8_t *)
+ if ((temp_len + timestamp_len + 3) <= len) {
+ payload_len = *(uint8_t *)
(buf + temp_len + 2 + timestamp_len);
- if (payload_len < (MAX_EVENT_SIZE - 13)) {
- /* copy the payload length and the payload */
+ } else {
+ pr_err("diag: Invalid length in %s, len: %d, temp_len: %d",
+ __func__, len, temp_len);
+ return;
+ }
+ if ((payload_len < (MAX_EVENT_SIZE - 13)) &&
+ ((temp_len + timestamp_len + payload_len + 3) <= len)) {
+ /*
+ * Copy the payload length and the payload
+ * after skipping temp_len bytes for already
+ * parsed packet, timestamp_len for timestamp
+ * buffer, 2 bytes for event_id_packet.
+ */
memcpy(event_data + 12, buf + temp_len + 2 +
timestamp_len, 1);
memcpy(event_data + 13, buf + temp_len + 2 +
timestamp_len + 1, payload_len);
} else {
- pr_err("diag: event > %d, payload_len = %d\n",
- (MAX_EVENT_SIZE - 13), payload_len);
+ pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+ (MAX_EVENT_SIZE - 13), payload_len, temp_len);
return;
}
} else {
payload_len_field = 0;
payload_len = (event_id_packet & 0x6000) >> 13;
- /* copy the payload */
- memcpy(event_data + 12, buf + temp_len + 2 +
+ /*
+ * Copy the payload after skipping temp_len bytes
+ * for already parsed packet, timestamp_len for
+ * timestamp buffer, 2 bytes for event_id_packet.
+ */
+ if ((payload_len < (MAX_EVENT_SIZE - 12)) &&
+ ((temp_len + timestamp_len + payload_len + 2) <= len))
+ memcpy(event_data + 12, buf + temp_len + 2 +
timestamp_len, payload_len);
+ else {
+ pr_err("diag: event > %d, payload_len = %d, temp_len = %d\n",
+ (MAX_EVENT_SIZE - 12), payload_len, temp_len);
+ return;
+ }
}
/* Before copying the data to userspace, check if we are still
@@ -1337,19 +1380,19 @@ void extract_dci_log(unsigned char *buf, int len, int data_source, int token,
pr_err("diag: In %s buffer is NULL\n", __func__);
return;
}
-
- /* The first six bytes for the incoming log packet contains
- * Command code (2), the length of the packet (2) and the length
- * of the log (2)
+ /*
+ * The first eight bytes for the incoming log packet contains
+ * Command code (2), the length of the packet (2), the length
+ * of the log (2) and log code (2)
*/
- log_code = *(uint16_t *)(buf + 6);
- read_bytes += sizeof(uint16_t) + 6;
- if (read_bytes > len) {
- pr_err("diag: Invalid length in %s, len: %d, read: %d",
- __func__, len, read_bytes);
+ if (len < 8) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
return;
}
+ log_code = *(uint16_t *)(buf + 6);
+ read_bytes += sizeof(uint16_t) + 6;
+
/* parse through log mask table of each client and check mask */
mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
@@ -1376,6 +1419,10 @@ void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
pr_err("diag: In %s buffer is NULL\n", __func__);
return;
}
+ if (len < (EXT_HDR_LEN + sizeof(uint8_t))) {
+ pr_err("diag: In %s invalid len: %d\n", __func__, len);
+ return;
+ }
version = *(uint8_t *)buf + 1;
if (version < EXT_HDR_VERSION) {
@@ -1387,10 +1434,6 @@ void extract_dci_ext_pkt(unsigned char *buf, int len, int data_source,
pkt = buf + EXT_HDR_LEN;
pkt_cmd_code = *(uint8_t *)pkt;
len -= EXT_HDR_LEN;
- if (len < 0) {
- pr_err("diag: %s, Invalid length len: %d\n", __func__, len);
- return;
- }
switch (pkt_cmd_code) {
case LOG_CMD_CODE:
@@ -2893,6 +2936,8 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
new_entry->num_buffers = 1;
break;
}
+
+ new_entry->buffers = NULL;
new_entry->real_time = MODE_REALTIME;
new_entry->in_service = 0;
INIT_LIST_HEAD(&new_entry->list_write_buf);
@@ -2966,7 +3011,8 @@ int diag_dci_register_client(struct diag_dci_reg_tbl_t *reg_entry)
fail_alloc:
if (new_entry) {
- for (i = 0; i < new_entry->num_buffers; i++) {
+ for (i = 0; ((i < new_entry->num_buffers) &&
+ new_entry->buffers); i++) {
proc_buf = &new_entry->buffers[i];
if (proc_buf) {
mutex_destroy(&proc_buf->health_mutex);
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index ba6ff16030f0..01ec07b466ca 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -784,7 +784,9 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, req->ssid_first, req->ssid_last);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -858,7 +860,9 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(i, ALL_SSID, ALL_SSID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -956,7 +960,9 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -1008,7 +1014,9 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_event_mask_update(i);
+ mutex_unlock(&driver->md_session_lock);
}
memcpy(dest_buf, &header, sizeof(header));
write_len += sizeof(header);
@@ -1281,7 +1289,9 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, req->equip_id);
+ mutex_unlock(&driver->md_session_lock);
}
end:
return write_len;
@@ -1340,7 +1350,9 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
for (i = 0; i < NUM_PERIPHERALS; i++) {
if (!diag_check_update(i))
continue;
+ mutex_lock(&driver->md_session_lock);
diag_send_log_mask_update(i, ALL_EQUIP_ID);
+ mutex_unlock(&driver->md_session_lock);
}
return write_len;
@@ -2040,9 +2052,11 @@ void diag_send_updates_peripheral(uint8_t peripheral)
diag_send_feature_mask_update(peripheral);
if (driver->time_sync_enabled)
diag_send_time_sync_update(peripheral);
+ mutex_lock(&driver->md_session_lock);
diag_send_msg_mask_update(peripheral, ALL_SSID, ALL_SSID);
diag_send_log_mask_update(peripheral, ALL_EQUIP_ID);
diag_send_event_mask_update(peripheral);
+ mutex_unlock(&driver->md_session_lock);
diag_send_real_time_update(peripheral,
driver->real_time_mode[DIAG_LOCAL_PROC]);
diag_send_peripheral_buffering_mode(
diff --git a/drivers/char/diag/diag_memorydevice.c b/drivers/char/diag/diag_memorydevice.c
index 1253921e7c02..c6e29ef0bb71 100644
--- a/drivers/char/diag/diag_memorydevice.c
+++ b/drivers/char/diag/diag_memorydevice.c
@@ -210,6 +210,7 @@ int diag_md_write(int id, unsigned char *buf, int len, int ctx)
found = 1;
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: wake up logging process\n");
wake_up_interruptible(&driver->wait_q);
}
diff --git a/drivers/char/diag/diagchar.h b/drivers/char/diag/diagchar.h
index a652256e0c29..d9714e323976 100644
--- a/drivers/char/diag/diagchar.h
+++ b/drivers/char/diag/diagchar.h
@@ -26,6 +26,8 @@
#include <asm/atomic.h>
#include "diagfwd_bridge.h"
+#define THRESHOLD_CLIENT_LIMIT 50
+
/* Size of the USB buffers used for read and write*/
#define USB_MAX_OUT_BUF 4096
#define APPS_BUF_SIZE 4096
@@ -497,6 +499,7 @@ struct diagchar_dev {
wait_queue_head_t wait_q;
struct diag_client_map *client_map;
int *data_ready;
+ atomic_t data_ready_notif[THRESHOLD_CLIENT_LIMIT];
int num_clients;
int polling_reg_flag;
int use_device_tree;
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 93bbe3d28344..a45b7a87c641 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -137,7 +137,6 @@ module_param(poolsize_qsc_usb, uint, 0);
/* This is the max number of user-space clients supported at initialization*/
static unsigned int max_clients = 15;
-static unsigned int threshold_client_limit = 50;
module_param(max_clients, uint, 0);
/* Timer variables */
@@ -333,7 +332,7 @@ static int diagchar_open(struct inode *inode, struct file *file)
if (i < driver->num_clients) {
diag_add_client(i, file);
} else {
- if (i < threshold_client_limit) {
+ if (i < THRESHOLD_CLIENT_LIMIT) {
driver->num_clients++;
temp = krealloc(driver->client_map
, (driver->num_clients) * sizeof(struct
@@ -363,11 +362,17 @@ static int diagchar_open(struct inode *inode, struct file *file)
}
}
driver->data_ready[i] = 0x0;
+ atomic_set(&driver->data_ready_notif[i], 0);
driver->data_ready[i] |= MSG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_LOG_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
driver->data_ready[i] |= DCI_EVENT_MASKS_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
if (driver->ref_count == 0)
diag_mempool_init();
@@ -378,8 +383,8 @@ static int diagchar_open(struct inode *inode, struct file *file)
return -ENOMEM;
fail:
- mutex_unlock(&driver->diagchar_mutex);
driver->num_clients--;
+ mutex_unlock(&driver->diagchar_mutex);
pr_err_ratelimited("diag: Insufficient memory for new client");
return -ENOMEM;
}
@@ -991,14 +996,34 @@ static int diag_send_raw_data_remote(int proc, void *buf, int len,
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled) {
+ if (len < 4) {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
payload = *(uint16_t *)(buf + 2);
+ if (payload > DIAG_MAX_HDLC_BUF_SIZE) {
+ pr_err("diag: Dropping packet, payload size is %d\n",
+ payload);
+ return -EBADMSG;
+ }
driver->hdlc_encode_buf_len = payload;
/*
- * Adding 4 bytes for start (1 byte), version (1 byte) and
- * payload (2 bytes)
+ * Adding 5 bytes for start (1 byte), version (1 byte),
+ * payload (2 bytes) and end (1 byte)
*/
- memcpy(driver->hdlc_encode_buf, buf + 4, payload);
- goto send_data;
+ if (len == (payload + 5)) {
+ /*
+ * Adding 4 bytes for start (1 byte), version (1 byte)
+ * and payload (2 bytes)
+ */
+ memcpy(driver->hdlc_encode_buf, buf + 4, payload);
+ goto send_data;
+ } else {
+ pr_err("diag: In %s, invalid len: %d of non_hdlc pkt",
+ __func__, len);
+ return -EBADMSG;
+ }
}
if (hdlc_flag) {
@@ -1770,6 +1795,7 @@ static int diag_ioctl_lsm_deinit(void)
}
driver->data_ready[i] |= DEINIT_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
mutex_unlock(&driver->diagchar_mutex);
wake_up_interruptible(&driver->wait_q);
@@ -2885,9 +2911,11 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
int write_len = 0;
struct diag_md_session_t *session_info = NULL;
+ mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == current->tgid)
index = i;
+ mutex_unlock(&driver->diagchar_mutex);
if (index == -1) {
pr_err("diag: Client PID not found in table");
@@ -2897,7 +2925,8 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
pr_err("diag: bad address from user side\n");
return -EFAULT;
}
- wait_event_interruptible(driver->wait_q, driver->data_ready[index]);
+ wait_event_interruptible(driver->wait_q,
+ atomic_read(&driver->data_ready_notif[index]) > 0);
mutex_lock(&driver->diagchar_mutex);
@@ -2908,6 +2937,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/*Copy the type of data being passed*/
data_type = driver->data_ready[index] & USER_SPACE_DATA_TYPE;
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
/* place holder for number of data field */
ret += sizeof(int);
@@ -2921,11 +2951,13 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
/* In case, the thread wakes up and the logging mode is
not memory device any more, the condition needs to be cleared */
driver->data_ready[index] ^= USER_SPACE_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
}
if (driver->data_ready[index] & HDLC_SUPPORT_TYPE) {
data_type = driver->data_ready[index] & HDLC_SUPPORT_TYPE;
driver->data_ready[index] ^= HDLC_SUPPORT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
COPY_USER_SPACE_OR_EXIT(buf, data_type, sizeof(int));
mutex_lock(&driver->md_session_lock);
session_info = diag_md_session_get_pid(current->tgid);
@@ -2942,6 +2974,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
data_type = driver->data_ready[index] & DEINIT_TYPE;
COPY_USER_SPACE_OR_EXIT(buf, data_type, 4);
driver->data_ready[index] ^= DEINIT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
diag_remove_client_entry(file);
return ret;
@@ -2957,6 +2990,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= MSG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -2976,6 +3010,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
event_mask.mask_len);
}
driver->data_ready[index] ^= EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -2989,6 +3024,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
if (write_len > 0)
ret += write_len;
driver->data_ready[index] ^= LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3000,6 +3036,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
*(driver->apps_req_buf),
driver->apps_req_buf_len);
driver->data_ready[index] ^= PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_pktdata = 0;
goto exit;
}
@@ -3011,6 +3048,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+4, *(driver->dci_pkt_buf),
driver->dci_pkt_length);
driver->data_ready[index] ^= DCI_PKT_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
driver->in_busy_dcipktdata = 0;
goto exit;
}
@@ -3023,6 +3061,7 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf + 8, (dci_ops_tbl[DCI_LOCAL_PROC].
event_mask_composite), DCI_EVENT_MASK_SIZE);
driver->data_ready[index] ^= DCI_EVENT_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
@@ -3034,15 +3073,16 @@ static ssize_t diagchar_read(struct file *file, char __user *buf, size_t count,
COPY_USER_SPACE_OR_EXIT(buf+8, (dci_ops_tbl[DCI_LOCAL_PROC].
log_mask_composite), DCI_LOG_MASK_SIZE);
driver->data_ready[index] ^= DCI_LOG_MASKS_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
goto exit;
}
exit:
- mutex_unlock(&driver->diagchar_mutex);
if (driver->data_ready[index] & DCI_DATA_TYPE) {
- mutex_lock(&driver->dci_mutex);
- /* Copy the type of data being passed */
data_type = driver->data_ready[index] & DCI_DATA_TYPE;
+ mutex_unlock(&driver->diagchar_mutex);
+ /* Copy the type of data being passed */
+ mutex_lock(&driver->dci_mutex);
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl,
track);
@@ -3065,6 +3105,7 @@ exit:
exit_stat = diag_copy_dci(buf, count, entry, &ret);
mutex_lock(&driver->diagchar_mutex);
driver->data_ready[index] ^= DCI_DATA_TYPE;
+ atomic_dec(&driver->data_ready_notif[index]);
mutex_unlock(&driver->diagchar_mutex);
if (exit_stat == 1) {
mutex_unlock(&driver->dci_mutex);
@@ -3074,6 +3115,7 @@ exit:
mutex_unlock(&driver->dci_mutex);
goto end;
}
+ mutex_unlock(&driver->diagchar_mutex);
end:
/*
* Flush any read that is currently pending on DCI data and
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index 30d02cea4776..3f8349978456 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -225,6 +225,7 @@ void chk_logging_wakeup(void)
* situation.
*/
driver->data_ready[i] |= USER_SPACE_DATA_TYPE;
+ atomic_inc(&driver->data_ready_notif[i]);
pr_debug("diag: Force wakeup of logging process\n");
wake_up_interruptible(&driver->wait_q);
break;
@@ -479,8 +480,10 @@ void diag_update_userspace_clients(unsigned int type)
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
- if (driver->client_map[i].pid != 0)
+ if (driver->client_map[i].pid != 0) {
driver->data_ready[i] |= type;
+ atomic_inc(&driver->data_ready_notif[i]);
+ }
wake_up_interruptible(&driver->wait_q);
mutex_unlock(&driver->diagchar_mutex);
}
@@ -497,6 +500,8 @@ void diag_update_md_clients(unsigned int type)
driver->client_map[j].pid ==
driver->md_session_map[i]->pid) {
driver->data_ready[j] |= type;
+ atomic_inc(
+ &driver->data_ready_notif[j]);
break;
}
}
@@ -512,6 +517,7 @@ void diag_update_sleeping_process(int process_id, int data_type)
for (i = 0; i < driver->num_clients; i++)
if (driver->client_map[i].pid == process_id) {
driver->data_ready[i] |= data_type;
+ atomic_inc(&driver->data_ready_notif[i]);
break;
}
wake_up_interruptible(&driver->wait_q);
@@ -1694,6 +1700,8 @@ int diagfwd_init(void)
, GFP_KERNEL)) == NULL)
goto err;
kmemleak_not_leak(driver->data_ready);
+ for (i = 0; i < THRESHOLD_CLIENT_LIMIT; i++)
+ atomic_set(&driver->data_ready_notif[i], 0);
if (driver->apps_req_buf == NULL) {
driver->apps_req_buf = kzalloc(DIAG_MAX_REQ_SIZE, GFP_KERNEL);
if (!driver->apps_req_buf)
diff --git a/drivers/char/diag/diagfwd_mhi.c b/drivers/char/diag/diagfwd_mhi.c
index 8b0e1f32bdc5..58623f23eec2 100644
--- a/drivers/char/diag/diagfwd_mhi.c
+++ b/drivers/char/diag/diagfwd_mhi.c
@@ -197,7 +197,7 @@ static void mhi_buf_tbl_clear(struct diag_mhi_info *mhi_info)
struct diag_mhi_buf_tbl_t *item = NULL;
struct diag_mhi_ch_t *ch = NULL;
- if (!mhi_info || !mhi_info->enabled)
+ if (!mhi_info)
return;
/* Clear all the pending reads */
diff --git a/drivers/char/diag/diagfwd_peripheral.c b/drivers/char/diag/diagfwd_peripheral.c
index f4ad8b1549ed..8d7d877af3a9 100644
--- a/drivers/char/diag/diagfwd_peripheral.c
+++ b/drivers/char/diag/diagfwd_peripheral.c
@@ -995,6 +995,21 @@ static void __diag_fwd_open(struct diagfwd_info *fwd_info)
if (!fwd_info->inited)
return;
+ /*
+ * Logging mode here is reflecting previous mode
+ * status and will be updated to new mode later.
+ *
+ * Keeping the buffers busy for Memory Device Mode.
+ */
+
+ if ((driver->logging_mode != DIAG_USB_MODE) ||
+ driver->usb_connected) {
+ if (fwd_info->buf_1)
+ atomic_set(&fwd_info->buf_1->in_busy, 0);
+ if (fwd_info->buf_2)
+ atomic_set(&fwd_info->buf_2->in_busy, 0);
+ }
+
if (fwd_info->p_ops && fwd_info->p_ops->open)
fwd_info->p_ops->open(fwd_info->ctxt);
@@ -1165,11 +1180,13 @@ void diagfwd_write_done(uint8_t peripheral, uint8_t type, int ctxt)
return;
fwd_info = &peripheral_info[type][peripheral];
- if (ctxt == 1 && fwd_info->buf_1)
+ if (ctxt == 1 && fwd_info->buf_1) {
atomic_set(&fwd_info->buf_1->in_busy, 0);
- else if (ctxt == 2 && fwd_info->buf_2)
+ driver->cpd_len_1 = 0;
+ } else if (ctxt == 2 && fwd_info->buf_2) {
atomic_set(&fwd_info->buf_2->in_busy, 0);
- else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
+ driver->cpd_len_2 = 0;
+ } else if (ctxt == 3 && fwd_info->buf_upd_1_a) {
atomic_set(&fwd_info->buf_upd_1_a->in_busy, 0);
if (driver->cpd_len_1 == 0)
atomic_set(&fwd_info->buf_1->in_busy, 0);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index be0b09a0fb44..2aca689061e1 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1399,7 +1399,6 @@ static int add_port(struct ports_device *portdev, u32 id)
{
char debugfs_name[16];
struct port *port;
- struct port_buffer *buf;
dev_t devt;
unsigned int nr_added_bufs;
int err;
@@ -1510,8 +1509,6 @@ static int add_port(struct ports_device *portdev, u32 id)
return 0;
free_inbufs:
- while ((buf = virtqueue_detach_unused_buf(port->in_vq)))
- free_buf(buf, true);
free_device:
device_destroy(pdrvdata.class, port->dev->devt);
free_cdev:
@@ -1536,34 +1533,14 @@ static void remove_port(struct kref *kref)
static void remove_port_data(struct port *port)
{
- struct port_buffer *buf;
-
spin_lock_irq(&port->inbuf_lock);
/* Remove unused data this port might have received. */
discard_port_data(port);
spin_unlock_irq(&port->inbuf_lock);
- /* Remove buffers we queued up for the Host to send us data in. */
- do {
- spin_lock_irq(&port->inbuf_lock);
- buf = virtqueue_detach_unused_buf(port->in_vq);
- spin_unlock_irq(&port->inbuf_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
-
spin_lock_irq(&port->outvq_lock);
reclaim_consumed_buffers(port);
spin_unlock_irq(&port->outvq_lock);
-
- /* Free pending buffers from the out-queue. */
- do {
- spin_lock_irq(&port->outvq_lock);
- buf = virtqueue_detach_unused_buf(port->out_vq);
- spin_unlock_irq(&port->outvq_lock);
- if (buf)
- free_buf(buf, true);
- } while (buf);
}
/*
@@ -1788,13 +1765,24 @@ static void control_work_handler(struct work_struct *work)
spin_unlock(&portdev->c_ivq_lock);
}
+static void flush_bufs(struct virtqueue *vq, bool can_sleep)
+{
+ struct port_buffer *buf;
+ unsigned int len;
+
+ while ((buf = virtqueue_get_buf(vq, &len)))
+ free_buf(buf, can_sleep);
+}
+
static void out_intr(struct virtqueue *vq)
{
struct port *port;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
wake_up_interruptible(&port->waitqueue);
}
@@ -1805,8 +1793,10 @@ static void in_intr(struct virtqueue *vq)
unsigned long flags;
port = find_port_by_vq(vq->vdev->priv, vq);
- if (!port)
+ if (!port) {
+ flush_bufs(vq, false);
return;
+ }
spin_lock_irqsave(&port->inbuf_lock, flags);
port->inbuf = get_inbuf(port);
@@ -1981,6 +1971,15 @@ static const struct file_operations portdev_fops = {
static void remove_vqs(struct ports_device *portdev)
{
+ struct virtqueue *vq;
+
+ virtio_device_for_each_vq(portdev->vdev, vq) {
+ struct port_buffer *buf;
+
+ flush_bufs(vq, true);
+ while ((buf = virtqueue_detach_unused_buf(vq)))
+ free_buf(buf, true);
+ }
portdev->vdev->config->del_vqs(portdev->vdev);
kfree(portdev->in_vqs);
kfree(portdev->out_vqs);
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 167648744d13..2a7dc48d4ee6 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2743,7 +2743,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
else
return -EINVAL;
- rc = simple_read_from_buffer((void __user *) buf, len, ppos,
+ rc = simple_read_from_buffer((void __user *) buf, count, ppos,
(void *) debug_buf, len);
mutex_unlock(&debug_buf_mutex);
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index f82ddc3b008b..36c62ea1d539 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2623,7 +2623,7 @@ static ssize_t debugfs_trace_method_get(struct file *file, char __user *buf,
else if (c->trace_method == XOR_PACKET)
len = snprintf(debug_buf, sizeof(debug_buf), "xor\n");
- rc = simple_read_from_buffer((void __user *) buf, len, ppos,
+ rc = simple_read_from_buffer((void __user *) buf, count, ppos,
(void *) debug_buf, len);
mutex_unlock(&debug_buf_mutex);
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 68bb849b1e13..d86ee9723a14 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -232,6 +232,15 @@ config CPU_FREQ_GOV_SCHEDUTIL
If in doubt, say N.
+config CPU_WAKE_BOOST
+ bool "CPU wake boost"
+ depends on FB
+ help
+ Boosts all online CPUs to policy->max (scaling_max_freq) when the display
+ is powered on, as reported by the framebuffer chain notifier. This helps
+ make the display power on faster, as well as ensures good performance
+ immediately after powering on the display.
+
comment "CPU frequency scaling drivers"
config CPUFREQ_DT
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 5eb75463c53b..96e1252a199e 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,6 +13,9 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
obj-$(CONFIG_CPU_FREQ_GOV_COMMON) += cpufreq_governor.o
+# CPU wake boost
+obj-$(CONFIG_CPU_WAKE_BOOST) += cpu_wake_boost.o
+
obj-$(CONFIG_CPUFREQ_DT) += cpufreq-dt.o
##################################################################################
diff --git a/drivers/cpufreq/cpu_wake_boost.c b/drivers/cpufreq/cpu_wake_boost.c
new file mode 100644
index 000000000000..e72322cc4091
--- /dev/null
+++ b/drivers/cpufreq/cpu_wake_boost.c
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2018, Sultan Alsawaf <sultanxda@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/fb.h>
+#include <linux/slab.h>
+
+enum boost_state {
+ NO_BOOST,
+ UNBOOST,
+ BOOST
+};
+
+/* The duration in milliseconds for the wake boost */
+#define FB_BOOST_MS (3000)
+
+struct wake_boost_info {
+ struct workqueue_struct *wq;
+ struct work_struct boost_work;
+ struct delayed_work unboost_work;
+ struct notifier_block cpu_notif;
+ struct notifier_block fb_notif;
+ enum boost_state state;
+};
+
+static void update_online_cpu_policy(void)
+{
+ int cpu;
+
+ /* Trigger cpufreq notifier for online CPUs */
+ get_online_cpus();
+ for_each_online_cpu(cpu)
+ cpufreq_update_policy(cpu);
+ put_online_cpus();
+}
+
+static void wake_boost(struct work_struct *work)
+{
+ struct wake_boost_info *w = container_of(work, typeof(*w), boost_work);
+
+ w->state = BOOST;
+ update_online_cpu_policy();
+
+ queue_delayed_work(w->wq, &w->unboost_work,
+ msecs_to_jiffies(FB_BOOST_MS));
+}
+
+static void wake_unboost(struct work_struct *work)
+{
+ struct wake_boost_info *w =
+ container_of(work, typeof(*w), unboost_work.work);
+
+ w->state = UNBOOST;
+ update_online_cpu_policy();
+}
+
+static int do_cpu_boost(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct wake_boost_info *w = container_of(nb, typeof(*w), cpu_notif);
+ struct cpufreq_policy *policy = data;
+
+ if (action != CPUFREQ_ADJUST)
+ return NOTIFY_OK;
+
+ switch (w->state) {
+ case UNBOOST:
+ policy->min = policy->cpuinfo.min_freq;
+ w->state = NO_BOOST;
+ break;
+ case BOOST:
+ policy->min = policy->max;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static int fb_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct wake_boost_info *w = container_of(nb, typeof(*w), fb_notif);
+ struct fb_event *evdata = data;
+ int *blank = evdata->data;
+
+ /* Parse framebuffer events as soon as they occur */
+ if (action != FB_EARLY_EVENT_BLANK)
+ return NOTIFY_OK;
+
+ if (*blank == FB_BLANK_UNBLANK) {
+ queue_work(w->wq, &w->boost_work);
+ } else {
+ if (cancel_delayed_work_sync(&w->unboost_work))
+ queue_delayed_work(w->wq, &w->unboost_work, 0);
+ }
+
+ return NOTIFY_OK;
+}
+
+static int __init cpu_wake_boost_init(void)
+{
+ struct wake_boost_info *w;
+
+ w = kzalloc(sizeof(*w), GFP_KERNEL);
+ if (!w)
+ return -ENOMEM;
+
+ w->wq = alloc_workqueue("wake_boost_wq", WQ_HIGHPRI, 0);
+ if (!w->wq) {
+ kfree(w);
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&w->boost_work, wake_boost);
+ INIT_DELAYED_WORK(&w->unboost_work, wake_unboost);
+
+ w->cpu_notif.notifier_call = do_cpu_boost;
+ cpufreq_register_notifier(&w->cpu_notif, CPUFREQ_POLICY_NOTIFIER);
+
+ w->fb_notif.notifier_call = fb_notifier_callback;
+ w->fb_notif.priority = INT_MAX;
+ fb_register_client(&w->fb_notif);
+
+ return 0;
+}
+late_initcall(cpu_wake_boost_init);
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 49e5ce8d0445..1823f62799df 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -1349,7 +1349,7 @@ static int __init cpufreq_stats_init(void)
proc_create_data("concurrent_policy_time", 0444, uid_cpupower,
&concurrent_policy_time_fops, NULL);
- uid_cpupower_enable = 0;
+ uid_cpupower_enable = 1;
}
cpufreq_stats_initialized = true;
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 49310727fe18..d4a8e7ee9914 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -1362,6 +1362,11 @@ static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
#endif /* CONFIG_ACPI */
+static const struct x86_cpu_id hwp_support_ids[] __initconst = {
+ { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ {}
+};
+
static int __init intel_pstate_init(void)
{
int cpu, rc = 0;
@@ -1371,17 +1376,16 @@ static int __init intel_pstate_init(void)
if (no_load)
return -ENODEV;
+ if (x86_match_cpu(hwp_support_ids) && !no_hwp) {
+ copy_cpu_funcs(&core_params.funcs);
+ hwp_active++;
+ goto hwp_cpu_matched;
+ }
+
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
- /*
- * The Intel pstate driver will be ignored if the platform
- * firmware has its own power management modes.
- */
- if (intel_pstate_platform_pwr_mgmt_exists())
- return -ENODEV;
-
cpu_def = (struct cpu_defaults *)id->driver_data;
copy_pid_params(&cpu_def->pid_policy);
@@ -1390,17 +1394,20 @@ static int __init intel_pstate_init(void)
if (intel_pstate_msrs_not_valid())
return -ENODEV;
+hwp_cpu_matched:
+ /*
+ * The Intel pstate driver will be ignored if the platform
+ * firmware has its own power management modes.
+ */
+ if (intel_pstate_platform_pwr_mgmt_exists())
+ return -ENODEV;
+
pr_info("Intel P-state driver initializing.\n");
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
if (!all_cpu_data)
return -ENOMEM;
- if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp) {
- pr_info("intel_pstate: HWP enabled\n");
- hwp_active++;
- }
-
if (!hwp_active && hwp_only)
goto out;
@@ -1411,6 +1418,9 @@ static int __init intel_pstate_init(void)
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
+ if (hwp_active)
+ pr_info("intel_pstate: HWP enabled\n");
+
return rc;
out:
get_online_cpus();
diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
index c4b0ef65988c..57e6c45724e7 100644
--- a/drivers/cpufreq/powernv-cpufreq.c
+++ b/drivers/cpufreq/powernv-cpufreq.c
@@ -592,7 +592,7 @@ static int __init powernv_cpufreq_init(void)
int rc = 0;
/* Don't probe on pseries (guest) platforms */
- if (!firmware_has_feature(FW_FEATURE_OPALv3))
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
return -ENODEV;
/* Discover pstates from device tree and init */
diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c
index 344058f8501a..d5657d50ac40 100644
--- a/drivers/cpuidle/coupled.c
+++ b/drivers/cpuidle/coupled.c
@@ -119,7 +119,6 @@ struct cpuidle_coupled {
#define CPUIDLE_COUPLED_NOT_IDLE (-1)
-static DEFINE_MUTEX(cpuidle_coupled_lock);
static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb);
/*
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index d5c5a476360f..c44a843cb405 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -282,7 +282,7 @@ static int powernv_idle_probe(void)
if (cpuidle_disable != IDLE_NO_OVERRIDE)
return -ENODEV;
- if (firmware_has_feature(FW_FEATURE_OPALv3)) {
+ if (firmware_has_feature(FW_FEATURE_OPAL)) {
cpuidle_state_table = powernv_states;
/* Device tree can indicate more idle states */
max_idle_state = powernv_add_idle_states();
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index 64e86ad63d0a..3ade800d4091 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/printk.h>
#include <linux/hrtimer.h>
+#include <linux/fb.h>
#include "governor.h"
static struct class *devfreq_class;
@@ -42,6 +43,15 @@ static LIST_HEAD(devfreq_governor_list);
static LIST_HEAD(devfreq_list);
static DEFINE_MUTEX(devfreq_list_lock);
+/* List of devices to boost when the screen is woken */
+static const char *boost_devices[] = {
+ "soc:qcom,cpubw",
+};
+
+#define WAKE_BOOST_DURATION_MS (3000)
+static struct delayed_work wake_unboost_work;
+static struct work_struct wake_boost_work;
+
/**
* find_device_devfreq() - find devfreq struct using device pointer
* @dev: device pointer used to lookup device devfreq.
@@ -120,6 +130,10 @@ static int devfreq_update_status(struct devfreq *devfreq, unsigned long freq)
cur_time = jiffies;
+ /* Immediately exit if previous_freq is not initialized yet. */
+ if (!devfreq->previous_freq)
+ goto out;
+
prev_lev = devfreq_get_freq_level(devfreq, devfreq->previous_freq);
if (prev_lev < 0) {
ret = prev_lev;
@@ -196,9 +210,14 @@ int update_devfreq(struct devfreq *devfreq)
return -EINVAL;
/* Reevaluate the proper frequency */
- err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
- if (err)
- return err;
+ if (devfreq->do_wake_boost) {
+ /* Use the max freq when the screen is turned on */
+ freq = UINT_MAX;
+ } else {
+ err = devfreq->governor->get_target_freq(devfreq, &freq, &flags);
+ if (err)
+ return err;
+ }
/*
* Adjust the frequency with user freq and QoS.
@@ -467,7 +486,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- int err = 0;
+ int i, err = 0;
if (!dev || !profile || !governor_name) {
dev_err(dev, "%s: Invalid parameters.\n", __func__);
@@ -531,17 +550,26 @@ struct devfreq *devfreq_add_device(struct device *dev,
if (devfreq->governor)
err = devfreq->governor->event_handler(devfreq,
DEVFREQ_GOV_START, NULL);
- mutex_unlock(&devfreq_list_lock);
if (err) {
dev_err(dev, "%s: Unable to start governor for the device\n",
__func__);
goto err_init;
}
+ mutex_unlock(&devfreq_list_lock);
+
+ for (i = 0; i < ARRAY_SIZE(boost_devices); i++) {
+ if (!strcmp(dev_name(dev), boost_devices[i])) {
+ devfreq->needs_wake_boost = true;
+ break;
+ }
+ }
return devfreq;
err_init:
list_del(&devfreq->node);
+ mutex_unlock(&devfreq_list_lock);
+
device_unregister(&devfreq->dev);
kfree(devfreq);
err_out:
@@ -1090,6 +1118,63 @@ static struct attribute *devfreq_attrs[] = {
};
ATTRIBUTE_GROUPS(devfreq);
+static void set_wake_boost(bool enable)
+{
+ struct devfreq *df;
+
+ mutex_lock(&devfreq_list_lock);
+ list_for_each_entry(df, &devfreq_list, node) {
+ if (!df->needs_wake_boost)
+ continue;
+
+ mutex_lock(&df->lock);
+ df->do_wake_boost = enable;
+ update_devfreq(df);
+ mutex_unlock(&df->lock);
+ }
+ mutex_unlock(&devfreq_list_lock);
+}
+
+static void wake_boost_fn(struct work_struct *work)
+{
+ set_wake_boost(true);
+ schedule_delayed_work(&wake_unboost_work,
+ msecs_to_jiffies(WAKE_BOOST_DURATION_MS));
+}
+
+static void wake_unboost_fn(struct work_struct *work)
+{
+ set_wake_boost(false);
+}
+
+static int fb_notifier_callback(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct fb_event *evdata = data;
+ int *blank = evdata->data;
+
+ /* Parse framebuffer events as soon as they occur */
+ if (action != FB_EARLY_EVENT_BLANK)
+ return NOTIFY_OK;
+
+ switch (*blank) {
+ case FB_BLANK_UNBLANK:
+ schedule_work(&wake_boost_work);
+ break;
+ default:
+ cancel_work_sync(&wake_boost_work);
+ if (cancel_delayed_work_sync(&wake_unboost_work))
+ set_wake_boost(false);
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block fb_notifier_callback_nb = {
+ .notifier_call = fb_notifier_callback,
+ .priority = INT_MAX,
+};
+
static int __init devfreq_init(void)
{
devfreq_class = class_create(THIS_MODULE, "devfreq");
@@ -1106,6 +1191,10 @@ static int __init devfreq_init(void)
}
devfreq_class->dev_groups = devfreq_groups;
+ INIT_WORK(&wake_boost_work, wake_boost_fn);
+ INIT_DELAYED_WORK(&wake_unboost_work, wake_unboost_fn);
+ fb_register_client(&fb_notifier_callback_nb);
+
return 0;
}
subsys_initcall(devfreq_init);
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index 2a8122444614..9ba4aaa9f755 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -200,6 +200,48 @@ static int gpio_rcar_irq_set_wake(struct irq_data *d, unsigned int on)
return 0;
}
+static void gpio_rcar_irq_bus_lock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_get_sync(&p->pdev->dev);
+}
+
+static void gpio_rcar_irq_bus_sync_unlock(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
+
+static int gpio_rcar_irq_request_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+ int error;
+
+ error = pm_runtime_get_sync(&p->pdev->dev);
+ if (error < 0)
+ return error;
+
+ return 0;
+}
+
+static void gpio_rcar_irq_release_resources(struct irq_data *d)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct gpio_rcar_priv *p = container_of(gc, struct gpio_rcar_priv,
+ gpio_chip);
+
+ pm_runtime_put(&p->pdev->dev);
+}
+
static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
{
struct gpio_rcar_priv *p = dev_id;
@@ -460,6 +502,10 @@ static int gpio_rcar_probe(struct platform_device *pdev)
irq_chip->irq_unmask = gpio_rcar_irq_enable;
irq_chip->irq_set_type = gpio_rcar_irq_set_type;
irq_chip->irq_set_wake = gpio_rcar_irq_set_wake;
+ irq_chip->irq_bus_lock = gpio_rcar_irq_bus_lock;
+ irq_chip->irq_bus_sync_unlock = gpio_rcar_irq_bus_sync_unlock;
+ irq_chip->irq_request_resources = gpio_rcar_irq_request_resources;
+ irq_chip->irq_release_resources = gpio_rcar_irq_release_resources;
irq_chip->flags = IRQCHIP_SET_TYPE_MASKED | IRQCHIP_MASK_ON_SUSPEND;
ret = gpiochip_add(gpio_chip);
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5a0f8a745b9d..52436b3c01bb 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -324,7 +324,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
spin_lock(&vgdev->ctrlq.qlock);
goto retry;
} else {
@@ -399,7 +399,7 @@ retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
if (ret == -ENOSPC) {
spin_unlock(&vgdev->cursorq.qlock);
- wait_event(vgdev->cursorq.ack_queue, vq->num_free);
+ wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
spin_lock(&vgdev->cursorq.qlock);
goto retry;
} else {
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 098e562bd579..9b97f70fbb3d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1991,6 +1991,7 @@ void vmw_kms_helper_resource_finish(struct vmw_validation_ctx *ctx,
vmw_kms_helper_buffer_finish(res->dev_priv, NULL, ctx->buf,
out_fence, NULL);
+ vmw_dmabuf_unreference(&ctx->buf);
vmw_resource_unreserve(res, false, NULL, 0);
mutex_unlock(&res->dev_priv->cmdbuf_mutex);
}
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 14159ce876dc..9e48b4fa7327 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1290,7 +1290,7 @@ static void _set_secvid(struct kgsl_device *device)
adreno_writereg64(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
- KGSL_IOMMU_SECURE_BASE);
+ KGSL_IOMMU_SECURE_BASE(&device->mmu));
adreno_writereg(adreno_dev,
ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
KGSL_IOMMU_SECURE_SIZE);
@@ -1693,7 +1693,7 @@ static int adreno_getproperty(struct kgsl_device *device,
* anything to mmap().
*/
shadowprop.gpuaddr =
- (unsigned int) device->memstore.gpuaddr;
+ (unsigned long)device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
appears to be meaningless */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 78f74b883877..84487e85162e 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1376,31 +1376,27 @@ static int _execute_reg_sequence(struct adreno_device *adreno_dev,
/* todo double check the reg writes */
while ((cur - opcode) < length) {
- switch (cur[0]) {
- /* Write a 32 bit value to a 64 bit reg */
- case 1:
+ if (cur[0] == 1 && (length - (cur - opcode) >= 4)) {
+ /* Write a 32 bit value to a 64 bit reg */
reg = cur[2];
reg = (reg << 32) | cur[1];
kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, cur[3]);
cur += 4;
- break;
- /* Write a 64 bit value to a 64 bit reg */
- case 2:
+ } else if (cur[0] == 2 && (length - (cur - opcode) >= 5)) {
+ /* Write a 64 bit value to a 64 bit reg */
reg = cur[2];
reg = (reg << 32) | cur[1];
val = cur[4];
val = (val << 32) | cur[3];
kgsl_regwrite(KGSL_DEVICE(adreno_dev), reg, val);
cur += 5;
- break;
- /* Delay for X usec */
- case 3:
+ } else if (cur[0] == 3 && (length - (cur - opcode) >= 2)) {
+ /* Delay for X usec */
udelay(cur[1]);
cur += 2;
- break;
- default:
+ } else
return -EINVAL;
- } }
+ }
return 0;
}
@@ -2489,8 +2485,8 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
A5XX_CP_RB_CNTL_DEFAULT);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
- rb->buffer_desc.gpuaddr);
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
+ ADRENO_REG_CP_RB_BASE_HI, rb->buffer_desc.gpuaddr);
ret = a5xx_microcode_load(adreno_dev);
if (ret)
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index af9fc1c15236..b49d20207096 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,9 +38,10 @@
#define _IOMMU_PRIV(_mmu) (&((_mmu)->priv.iommu))
-#define ADDR_IN_GLOBAL(_a) \
- (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE) && \
- ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE + KGSL_IOMMU_GLOBAL_MEM_SIZE)))
+#define ADDR_IN_GLOBAL(_mmu, _a) \
+ (((_a) >= KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)) && \
+ ((_a) < (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) + \
+ KGSL_IOMMU_GLOBAL_MEM_SIZE)))
static struct kgsl_mmu_pt_ops iommu_pt_ops;
static bool need_iommu_sync;
@@ -200,7 +201,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
BUG_ON(global_pt_count >= GLOBAL_PT_ENTRIES);
BUG_ON((global_pt_alloc + memdesc->size) >= KGSL_IOMMU_GLOBAL_MEM_SIZE);
- memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE + global_pt_alloc;
+ memdesc->gpuaddr = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu) + global_pt_alloc;
memdesc->priv |= KGSL_MEMDESC_GLOBAL;
global_pt_alloc += memdesc->size;
@@ -213,7 +214,7 @@ static void kgsl_iommu_add_global(struct kgsl_mmu *mmu,
void kgsl_add_global_secure_entry(struct kgsl_device *device,
struct kgsl_memdesc *memdesc)
{
- memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE;
+ memdesc->gpuaddr = KGSL_IOMMU_SECURE_BASE(&device->mmu);
kgsl_global_secure_pt_entry = memdesc;
}
@@ -686,7 +687,7 @@ static void _find_mem_entries(struct kgsl_mmu *mmu, uint64_t faultaddr,
/* Set the maximum possible size as an initial value */
nextentry->gpuaddr = (uint64_t) -1;
- if (ADDR_IN_GLOBAL(faultaddr)) {
+ if (ADDR_IN_GLOBAL(mmu, faultaddr)) {
_get_global_entries(faultaddr, preventry, nextentry);
} else if (context) {
private = context->proc_priv;
@@ -1056,14 +1057,14 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
unsigned int secure_global_size = kgsl_global_secure_pt_entry != NULL ?
kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured && pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
- pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE + secure_global_size;
- pt->va_end = KGSL_IOMMU_SECURE_END;
+ pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+ pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) + secure_global_size;
+ pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->compat_va_start = KGSL_IOMMU_SVM_BASE32;
- pt->compat_va_end = KGSL_IOMMU_SVM_END32;
+ pt->compat_va_end = KGSL_IOMMU_SECURE_BASE(mmu);
pt->va_start = KGSL_IOMMU_VA_BASE64;
pt->va_end = KGSL_IOMMU_VA_END64;
}
@@ -1072,7 +1073,7 @@ static void setup_64bit_pagetable(struct kgsl_mmu *mmu,
pagetable->name != KGSL_MMU_SECURE_PT) {
if ((BITS_PER_LONG == 32) || is_compat_task()) {
pt->svm_start = KGSL_IOMMU_SVM_BASE32;
- pt->svm_end = KGSL_IOMMU_SVM_END32;
+ pt->svm_end = KGSL_IOMMU_SECURE_BASE(mmu);
} else {
pt->svm_start = KGSL_IOMMU_SVM_BASE64;
pt->svm_end = KGSL_IOMMU_SVM_END64;
@@ -1088,22 +1089,22 @@ static void setup_32bit_pagetable(struct kgsl_mmu *mmu,
kgsl_global_secure_pt_entry->size : 0;
if (mmu->secured) {
if (pagetable->name == KGSL_MMU_SECURE_PT) {
- pt->compat_va_start = KGSL_IOMMU_SECURE_BASE +
+ pt->compat_va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
- pt->compat_va_end = KGSL_IOMMU_SECURE_END;
- pt->va_start = KGSL_IOMMU_SECURE_BASE +
+ pt->compat_va_end = KGSL_IOMMU_SECURE_END(mmu);
+ pt->va_start = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
- pt->va_end = KGSL_IOMMU_SECURE_END;
+ pt->va_end = KGSL_IOMMU_SECURE_END(mmu);
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_SECURE_BASE +
+ pt->va_end = KGSL_IOMMU_SECURE_BASE(mmu) +
secure_global_size;
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
} else {
pt->va_start = KGSL_IOMMU_SVM_BASE32;
- pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE;
+ pt->va_end = KGSL_IOMMU_GLOBAL_MEM_BASE(mmu);
pt->compat_va_start = pt->va_start;
pt->compat_va_end = pt->va_end;
}
@@ -2352,7 +2353,8 @@ static int kgsl_iommu_set_svm_region(struct kgsl_pagetable *pagetable,
struct rb_node *node;
/* Make sure the requested address doesn't fall in the global range */
- if (ADDR_IN_GLOBAL(gpuaddr) || ADDR_IN_GLOBAL(gpuaddr + size))
+ if (ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr) ||
+ ADDR_IN_GLOBAL(pagetable->mmu, gpuaddr + size))
return -ENOMEM;
spin_lock(&pagetable->lock);
diff --git a/drivers/gpu/msm/kgsl_iommu.h b/drivers/gpu/msm/kgsl_iommu.h
index 06f6d65effad..a21e74f92d7c 100644
--- a/drivers/gpu/msm/kgsl_iommu.h
+++ b/drivers/gpu/msm/kgsl_iommu.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -24,12 +24,17 @@
* are mapped into all pagetables.
*/
#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_8M
-#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE32 0xf8000000
+#define KGSL_IOMMU_GLOBAL_MEM_BASE64 0xfc000000
+
+#define KGSL_IOMMU_GLOBAL_MEM_BASE(__mmu) \
+ (MMU_FEATURE(__mmu, KGSL_MMU_64BIT) ? \
+ KGSL_IOMMU_GLOBAL_MEM_BASE64 : KGSL_IOMMU_GLOBAL_MEM_BASE32)
#define KGSL_IOMMU_SECURE_SIZE SZ_256M
-#define KGSL_IOMMU_SECURE_END KGSL_IOMMU_GLOBAL_MEM_BASE
-#define KGSL_IOMMU_SECURE_BASE \
- (KGSL_IOMMU_GLOBAL_MEM_BASE - KGSL_IOMMU_SECURE_SIZE)
+#define KGSL_IOMMU_SECURE_END(_mmu) KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu)
+#define KGSL_IOMMU_SECURE_BASE(_mmu) \
+ (KGSL_IOMMU_GLOBAL_MEM_BASE(_mmu) - KGSL_IOMMU_SECURE_SIZE)
#define KGSL_IOMMU_SVM_BASE32 0x300000
#define KGSL_IOMMU_SVM_END32 (0xC0000000 - SZ_16M)
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index ea3bc9bb1b7a..2b9c00faca7d 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -675,7 +675,7 @@ static ssize_t ucma_resolve_ip(struct ucma_file *file,
if (copy_from_user(&cmd, inbuf, sizeof(cmd)))
return -EFAULT;
- if (!rdma_addr_size_in6(&cmd.src_addr) ||
+ if ((cmd.src_addr.sin6_family && !rdma_addr_size_in6(&cmd.src_addr)) ||
!rdma_addr_size_in6(&cmd.dst_addr))
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index cfcfbb6b84d7..c5390f6f94c5 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -231,7 +231,11 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
+ if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
+ return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
+ if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
+ return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
@@ -1348,18 +1352,18 @@ enum {
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
- if (rate == IB_RATE_PORT_CURRENT) {
+ if (rate == IB_RATE_PORT_CURRENT)
return 0;
- } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
+
+ if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL;
- } else {
- while (rate != IB_RATE_2_5_GBPS &&
- !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
- MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
- --rate;
- }
- return rate + MLX5_STAT_RATE_OFFSET;
+ while (rate != IB_RATE_PORT_CURRENT &&
+ !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
+ MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
+ --rate;
+
+ return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 766bf2660116..5f04b2d94635 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -88,6 +88,7 @@ static int input_leds_connect(struct input_handler *handler,
const struct input_device_id *id)
{
struct input_leds *leds;
+ struct input_led *led;
unsigned int num_leds;
unsigned int led_code;
int led_no;
@@ -119,14 +120,13 @@ static int input_leds_connect(struct input_handler *handler,
led_no = 0;
for_each_set_bit(led_code, dev->ledbit, LED_CNT) {
- struct input_led *led = &leds->leds[led_no];
+ if (!input_led_info[led_code].name)
+ continue;
+ led = &leds->leds[led_no];
led->handle = &leds->handle;
led->code = led_code;
- if (!input_led_info[led_code].name)
- continue;
-
led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
dev_name(&dev->dev),
input_led_info[led_code].name);
diff --git a/drivers/input/misc/drv260x.c b/drivers/input/misc/drv260x.c
index 930424e55439..251d64ca41ce 100644
--- a/drivers/input/misc/drv260x.c
+++ b/drivers/input/misc/drv260x.c
@@ -521,7 +521,7 @@ static int drv260x_probe(struct i2c_client *client,
if (!haptics)
return -ENOMEM;
- haptics->rated_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
+ haptics->overdrive_voltage = DRV260X_DEF_OD_CLAMP_VOLT;
haptics->rated_voltage = DRV260X_DEF_RATED_VOLT;
if (pdata) {
diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
index b0ba67be4cfd..5fb90ff5515c 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
+++ b/drivers/input/misc/vl53L0/stmvl53l0_module-cci.c
@@ -346,6 +346,7 @@ static struct platform_driver stmvl53l0_platform_driver = {
.name = STMVL53L0_DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = st_stmvl53l0_dt_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/input/misc/vl53L0/stmvl53l0_module-i2c.c b/drivers/input/misc/vl53L0/stmvl53l0_module-i2c.c
index 3d7ae12127fe..007f6ba60ec9 100644
--- a/drivers/input/misc/vl53L0/stmvl53l0_module-i2c.c
+++ b/drivers/input/misc/vl53L0/stmvl53l0_module-i2c.c
@@ -325,6 +325,7 @@ static struct i2c_driver stmvl53l0_driver = {
.name = STMVL53L0_DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = st_stmvl53l0_dt_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = stmvl53l0_probe,
.remove = stmvl53l0_remove,
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index 2d5794ec338b..88dfe3008cf4 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -2523,6 +2523,15 @@ static const struct dmi_system_id mxt_dmi_table[] = {
.driver_data = samus_platform_data,
},
{
+ /* Samsung Chromebook Pro */
+ .ident = "Samsung Chromebook Pro",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Google"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Caroline"),
+ },
+ .driver_data = samus_platform_data,
+ },
+ {
/* Other Google Chromebooks */
.ident = "Chromebook",
.matches = {
diff --git a/drivers/input/touchscreen/stm/ftm4_ts.c b/drivers/input/touchscreen/stm/ftm4_ts.c
index 0a03c193c862..733098b7de21 100644
--- a/drivers/input/touchscreen/stm/ftm4_ts.c
+++ b/drivers/input/touchscreen/stm/ftm4_ts.c
@@ -2528,8 +2528,8 @@ static int fts_resume(struct i2c_client *client)
fts_start_device(info);
-exit:
#ifdef CONFIG_WAKE_GESTURES
+exit:
if (wg_changed) {
wg_switch = wg_switch_temp;
wg_changed = false;
diff --git a/drivers/input/touchscreen/wake_gestures.c b/drivers/input/touchscreen/wake_gestures.c
index b990f5fcf6a5..fcf1d15c7add 100644
--- a/drivers/input/touchscreen/wake_gestures.c
+++ b/drivers/input/touchscreen/wake_gestures.c
@@ -33,6 +33,7 @@
#include <linux/hrtimer.h>
#include <asm-generic/cputime.h>
#include <linux/wakelock.h>
+#include <linux/wahoo_info.h>
/* Tunables */
#define WG_DEBUG 0
@@ -127,35 +128,14 @@ static struct work_struct s2w_input_work;
static struct work_struct dt2w_input_work;
static struct wake_lock dt2w_wakelock;
-//get hardware type
static int hw_version = TAIMEN;
-static int __init get_model(char *cmdline_model)
-{
- if (strstr(cmdline_model, "walleye")) {
- sweep_y_limit = SWEEP_Y_LIMIT_WALLEYE;
- sweep_x_limit = SWEEP_X_LIMIT_WALLEYE;
- sweep_x_b1 = SWEEP_X_B1_WALLEYE;
- sweep_x_b2 = SWEEP_X_B2_WALLEYE;
- sweep_y_start = SWEEP_Y_START_WALLEYE;
- sweep_x_start = SWEEP_X_START_WALLEYE;
- sweep_x_final = SWEEP_X_FINAL_WALLEYE;
- sweep_y_next = SWEEP_Y_NEXT_WALLEYE;
- sweep_x_max = SWEEP_X_MAX_WALLEYE;
- sweep_edge = SWEEP_EDGE_WALLEYE;
- hw_version = WALLEYE;
- }
-
- return 0;
-}
-__setup("androidboot.hardware=", get_model);
static bool is_suspended(void)
{
-#if IS_ENABLED(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC)
- return scr_suspended();
-#else
- return scr_suspended_taimen();
-#endif
+ if (hw_version == WALLEYE)
+ return scr_suspended();
+ else
+ return scr_suspended_taimen();
}
/* Wake Gestures */
@@ -728,16 +708,26 @@ static DEVICE_ATTR(vib_strength, (S_IWUSR|S_IRUGO),
* INIT / EXIT stuff below here
*/
-#if IS_ENABLED(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC)
extern struct kobject *android_touch_kobj;
-#else
-struct kobject *android_touch_kobj;
-#endif
static int __init wake_gestures_init(void)
{
int rc = 0;
+ if (is_google_walleye()) {
+ sweep_y_limit = SWEEP_Y_LIMIT_WALLEYE;
+ sweep_x_limit = SWEEP_X_LIMIT_WALLEYE;
+ sweep_x_b1 = SWEEP_X_B1_WALLEYE;
+ sweep_x_b2 = SWEEP_X_B2_WALLEYE;
+ sweep_y_start = SWEEP_Y_START_WALLEYE;
+ sweep_x_start = SWEEP_X_START_WALLEYE;
+ sweep_x_final = SWEEP_X_FINAL_WALLEYE;
+ sweep_y_next = SWEEP_Y_NEXT_WALLEYE;
+ sweep_x_max = SWEEP_X_MAX_WALLEYE;
+ sweep_edge = SWEEP_EDGE_WALLEYE;
+ hw_version = WALLEYE;
+ }
+
wake_dev = input_allocate_device();
if (!wake_dev) {
pr_err("Failed to allocate wake_dev\n");
@@ -793,14 +783,13 @@ static int __init wake_gestures_init(void)
}
#endif
-
-#if !IS_ENABLED(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC)
- android_touch_kobj = kobject_create_and_add("android_touch", NULL);
- if (android_touch_kobj == NULL) {
- pr_err("%s: subsystem_register failed\n", __func__);
- goto err_input_dev;
+ if (hw_version == TAIMEN) {
+ android_touch_kobj = kobject_create_and_add("android_touch", NULL);
+ if (android_touch_kobj == NULL) {
+ pr_err("%s: subsystem_register failed\n", __func__);
+ goto err_input_dev;
+ }
}
-#endif
rc = sysfs_create_file(android_touch_kobj, &dev_attr_sweep2wake.attr);
if (rc) {
pr_warn("%s: sysfs_create_file failed for sweep2wake\n", __func__);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index cc140eb397a7..7271899f39bb 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -813,6 +813,7 @@ enum new_flag {
static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
{
struct dm_buffer *b;
+ bool tried_noio_alloc = false;
/*
* dm-bufio is resistant to allocation failures (it just keeps
@@ -837,6 +838,15 @@ static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client
if (nf == NF_PREFETCH)
return NULL;
+ if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
+ dm_bufio_unlock(c);
+ b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ dm_bufio_lock(c);
+ if (b)
+ return b;
+ tried_noio_alloc = true;
+ }
+
if (!list_empty(&c->reserved_buffers)) {
b = list_entry(c->reserved_buffers.next,
struct dm_buffer, lru_list);
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index e34cf53bd068..ceff074b3b74 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -19,6 +19,7 @@
#include <linux/module.h>
#include <linux/reboot.h>
+#include <linux/vmalloc.h>
#define DM_MSG_PREFIX "verity"
@@ -32,6 +33,7 @@
#define DM_VERITY_OPT_LOGGING "ignore_corruption"
#define DM_VERITY_OPT_RESTART "restart_on_corruption"
#define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
+#define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
#define DM_VERITY_OPTS_MAX (2 + DM_VERITY_OPTS_FEC)
@@ -399,6 +401,18 @@ static int verity_bv_zero(struct dm_verity *v, struct dm_verity_io *io,
}
/*
+ * Moves the bio iter one data block forward.
+ */
+static inline void verity_bv_skip_block(struct dm_verity *v,
+ struct dm_verity_io *io,
+ struct bvec_iter *iter)
+{
+ struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_bio_data_size);
+
+ bio_advance_iter(bio, iter, 1 << v->data_dev_block_bits);
+}
+
+/*
* Verify one "dm_verity_io" structure.
*/
static int verity_verify_io(struct dm_verity_io *io)
@@ -410,9 +424,16 @@ static int verity_verify_io(struct dm_verity_io *io)
for (b = 0; b < io->n_blocks; b++) {
int r;
+ sector_t cur_block = io->block + b;
struct shash_desc *desc = verity_io_hash_desc(v, io);
- r = verity_hash_for_block(v, io, io->block + b,
+ if (v->validated_blocks &&
+ likely(test_bit(cur_block, v->validated_blocks))) {
+ verity_bv_skip_block(v, io, &io->iter);
+ continue;
+ }
+
+ r = verity_hash_for_block(v, io, cur_block,
verity_io_want_digest(v, io),
&is_zero);
if (unlikely(r < 0))
@@ -445,13 +466,16 @@ static int verity_verify_io(struct dm_verity_io *io)
return r;
if (likely(memcmp(verity_io_real_digest(v, io),
- verity_io_want_digest(v, io), v->digest_size) == 0))
+ verity_io_want_digest(v, io), v->digest_size) == 0)) {
+ if (v->validated_blocks)
+ set_bit(cur_block, v->validated_blocks);
continue;
+ }
else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b, NULL, &start) == 0)
+ cur_block, NULL, &start) == 0)
continue;
else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA,
- io->block + b))
+ cur_block))
return -EIO;
}
@@ -645,6 +669,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
args += DM_VERITY_OPTS_FEC;
if (v->zero_digest)
args++;
+ if (v->validated_blocks)
+ args++;
if (!args)
return;
DMEMIT(" %u", args);
@@ -663,6 +689,8 @@ void verity_status(struct dm_target *ti, status_type_t type,
}
if (v->zero_digest)
DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
+ if (v->validated_blocks)
+ DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
sz = verity_fec_status_table(v, sz, result, maxlen);
break;
}
@@ -716,6 +744,7 @@ void verity_dtr(struct dm_target *ti)
if (v->bufio)
dm_bufio_client_destroy(v->bufio);
+ vfree(v->validated_blocks);
kfree(v->salt);
kfree(v->root_digest);
kfree(v->zero_digest);
@@ -737,6 +766,26 @@ void verity_dtr(struct dm_target *ti)
}
EXPORT_SYMBOL_GPL(verity_dtr);
+static int verity_alloc_most_once(struct dm_verity *v)
+{
+ struct dm_target *ti = v->ti;
+
+ /* the bitset can only handle INT_MAX blocks */
+ if (v->data_blocks > INT_MAX) {
+ ti->error = "device too large to use check_at_most_once";
+ return -E2BIG;
+ }
+
+ v->validated_blocks = vzalloc(BITS_TO_LONGS(v->data_blocks) *
+ sizeof(unsigned long));
+ if (!v->validated_blocks) {
+ ti->error = "failed to allocate bitset for check_at_most_once";
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static int verity_alloc_zero_digest(struct dm_verity *v)
{
int r = -ENOMEM;
@@ -806,6 +855,12 @@ static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v)
}
continue;
+ } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
+ r = verity_alloc_most_once(v);
+ if (r)
+ return r;
+ continue;
+
} else if (verity_is_fec_opt_arg(arg_name)) {
r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
if (r)
@@ -1074,7 +1129,7 @@ EXPORT_SYMBOL_GPL(verity_ctr);
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 3, 0},
+ .version = {1, 4, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index a90d1d416107..d216fc76d350 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -63,6 +63,7 @@ struct dm_verity {
sector_t hash_level_block[DM_VERITY_MAX_LEVELS];
struct dm_verity_fec *fec; /* forward error correction */
+ unsigned long *validated_blocks; /* bitset blocks validated */
};
struct dm_verity_io {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index 721bb62d5c2a..2645d03d4c95 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -155,9 +155,11 @@ struct msm_vfe_irq_ops {
struct msm_isp_timestamp *ts);
void (*process_axi_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
void (*process_stats_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t pingpong_status,
struct msm_isp_timestamp *ts);
void (*config_irq)(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
@@ -594,6 +596,7 @@ struct msm_vfe_tasklet_queue_cmd {
struct list_head list;
uint32_t vfeInterruptStatus0;
uint32_t vfeInterruptStatus1;
+ uint32_t vfe_pingpong_status;
struct msm_isp_timestamp ts;
uint8_t cmd_used;
struct vfe_device *vfe_dev;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index db16db995d6e..e802f8f2ecdd 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -1045,15 +1045,18 @@ static int msm_vfe40_start_fetch_engine(struct vfe_device *vfe_dev,
fe_cfg->stream_id);
vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -1106,14 +1109,15 @@ static int msm_vfe40_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index d40fe5e73984..50ab541f3bdb 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -889,13 +889,14 @@ static int msm_vfe44_fetch_engine_start(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0) {
pr_err("%s: No fetch buffer\n", __func__);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index ebb6930e1d8d..0be0378076cf 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -830,14 +830,15 @@ static int msm_vfe46_start_fetch_engine(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index eb58fb18e2d6..fad25e201038 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -563,6 +563,7 @@ void msm_vfe47_process_error_status(struct vfe_device *vfe_dev)
void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev,
uint32_t *irq_status0, uint32_t *irq_status1)
{
+ uint32_t count = 0;
*irq_status0 = msm_camera_io_r(vfe_dev->vfe_base + 0x6C);
*irq_status1 = msm_camera_io_r(vfe_dev->vfe_base + 0x70);
/* Mask off bits that are not enabled */
@@ -571,6 +572,14 @@ void msm_vfe47_read_and_clear_irq_status(struct vfe_device *vfe_dev,
msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
*irq_status0 &= vfe_dev->irq0_mask;
*irq_status1 &= vfe_dev->irq1_mask;
+ /* check if status register is cleared if not clear again*/
+ while (*irq_status0 &&
+ (*irq_status0 & msm_camera_io_r(vfe_dev->vfe_base + 0x6C)) &&
+ (count < MAX_RECOVERY_THRESHOLD)) {
+ msm_camera_io_w(*irq_status0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w_mb(1, vfe_dev->vfe_base + 0x58);
+ count++;
+ }
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
@@ -1077,15 +1086,18 @@ int msm_vfe47_start_fetch_engine(struct vfe_device *vfe_dev,
fe_cfg->stream_id);
vfe_dev->fetch_engine_info.bufq_handle = bufq_handle;
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
@@ -1138,14 +1150,15 @@ int msm_vfe47_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
mutex_lock(&vfe_dev->buf_mgr->lock);
rc = vfe_dev->buf_mgr->ops->get_buf_by_index(
vfe_dev->buf_mgr, bufq_handle, fe_cfg->buf_idx, &buf);
- mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0 || !buf) {
pr_err("%s: No fetch buffer rc= %d buf= %pK\n",
__func__, rc, buf);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
return -EINVAL;
}
mapped_info = buf->mapped_info[0];
buf->state = MSM_ISP_BUFFER_STATE_DISPATCHED;
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
} else {
rc = vfe_dev->buf_mgr->ops->map_buf(vfe_dev->buf_mgr,
&mapped_info, fe_cfg->fd);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 46cd7496d6d6..ea768648b597 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3058,12 +3058,18 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
return -EINVAL;
msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
-
+ mutex_lock(&vfe_dev_ioctl->buf_mgr->lock);
for (i = 0; i < stream_cfg_cmd->num_streams; i++) {
if (stream_cfg_cmd->stream_handle[i] == 0)
continue;
stream_info = msm_isp_get_stream_common_data(vfe_dev_ioctl,
HANDLE_TO_IDX(stream_cfg_cmd->stream_handle[i]));
+
+ if (!stream_info) {
+ pr_err("%s: stream_info is NULL", __func__);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
+ return -EINVAL;
+ }
if (SRC_TO_INTF(stream_info->stream_src) < VFE_SRC_MAX)
src_state = axi_data->src_info[
SRC_TO_INTF(stream_info->stream_src)].active;
@@ -3071,6 +3077,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
else {
ISP_DBG("%s: invalid src info index\n", __func__);
rc = -EINVAL;
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
spin_lock_irqsave(&stream_info->lock, flags);
@@ -3082,6 +3089,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
}
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
@@ -3104,6 +3112,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
HANDLE_TO_IDX(
stream_cfg_cmd->stream_handle[i]));
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
for (k = 0; k < stream_info->num_isp; k++) {
@@ -3150,6 +3159,7 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev_ioctl,
spin_unlock_irqrestore(&stream_info->lock, flags);
streams[num_streams++] = stream_info;
}
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
for (i = 0; i < MAX_VFE; i++) {
vfe_dev = update_vfes[i];
@@ -3886,10 +3896,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
&update_cmd->update_info[i];
stream_info = msm_isp_get_stream_common_data(vfe_dev,
HANDLE_TO_IDX(update_info->stream_handle));
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
update_info->user_stream_id,
update_info->frame_id,
MSM_ISP_INVALID_BUF_INDEX);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -3940,10 +3952,12 @@ int msm_isp_update_axi_stream(struct vfe_device *vfe_dev, void *arg)
rc = -EINVAL;
break;
}
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_request_frame(vfe_dev, stream_info,
req_frm->user_stream_id,
req_frm->frame_id,
req_frm->buf_index);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc)
pr_err("%s failed to request frame!\n",
__func__);
@@ -4113,11 +4127,11 @@ void msm_isp_process_axi_irq_stream(struct vfe_device *vfe_dev,
void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts)
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
{
int i, rc = 0;
uint32_t comp_mask = 0, wm_mask = 0;
- uint32_t pingpong_status, stream_idx;
+ uint32_t stream_idx;
struct msm_vfe_axi_stream *stream_info;
struct msm_vfe_axi_composite_info *comp_info;
struct msm_vfe_axi_shared_data *axi_data = &vfe_dev->axi_data;
@@ -4131,8 +4145,6 @@ void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
return;
ISP_DBG("%s: status: 0x%x\n", __func__, irq_status0);
- pingpong_status =
- vfe_dev->hw_info->vfe_ops.axi_ops.get_pingpong_status(vfe_dev);
for (i = 0; i < axi_data->hw_info->num_comp_mask; i++) {
rc = 0;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
index a8d4cfb43927..e6713cbffbe9 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@ void msm_isp_notify(struct vfe_device *vfe_dev, uint32_t event_type,
void msm_isp_process_axi_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts);
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
void msm_isp_axi_disable_all_wm(struct vfe_device *vfe_dev);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 3119a54115be..5984301a2723 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -253,21 +253,18 @@ static int32_t msm_isp_stats_buf_divert(struct vfe_device *vfe_dev,
static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
uint32_t stats_irq_mask, struct msm_isp_timestamp *ts,
- bool is_composite)
+ uint32_t pingpong_status, bool is_composite)
{
int i, rc = 0;
struct msm_isp_event_data buf_event;
struct msm_isp_stats_event *stats_event = &buf_event.u.stats;
struct msm_vfe_stats_stream *stream_info = NULL;
- uint32_t pingpong_status;
uint32_t comp_stats_type_mask = 0;
int result = 0;
memset(&buf_event, 0, sizeof(struct msm_isp_event_data));
buf_event.timestamp = ts->buf_time;
buf_event.frame_id = vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
- pingpong_status = vfe_dev->hw_info->
- vfe_ops.stats_ops.get_pingpong_status(vfe_dev);
for (i = 0; i < vfe_dev->hw_info->stats_hw_info->num_stats_type; i++) {
if (!(stats_irq_mask & (1 << i)))
@@ -304,7 +301,7 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts)
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts)
{
int j, rc;
uint32_t atomic_stats_mask = 0;
@@ -332,7 +329,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
/* Process non-composite irq */
if (stats_irq_mask) {
rc = msm_isp_stats_configure(vfe_dev, stats_irq_mask, ts,
- comp_flag);
+ pingpong_status, comp_flag);
}
/* Process composite irq */
@@ -345,7 +342,7 @@ void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
&vfe_dev->stats_data.stats_comp_mask[j]);
rc = msm_isp_stats_configure(vfe_dev, atomic_stats_mask,
- ts, !comp_flag);
+ ts, pingpong_status, !comp_flag);
}
}
}
@@ -1100,6 +1097,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
struct vfe_device *vfe_dev;
msm_isp_get_timestamp(&timestamp, vfe_dev_ioctl);
+ mutex_lock(&vfe_dev_ioctl->buf_mgr->lock);
num_stats_comp_mask =
vfe_dev_ioctl->hw_info->stats_hw_info->num_stats_comp_mask;
@@ -1125,6 +1123,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
}
if (rc) {
spin_unlock_irqrestore(&stream_info->lock, flags);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
goto error;
}
rc = msm_isp_init_stats_ping_pong_reg(
@@ -1132,6 +1131,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
if (rc < 0) {
spin_unlock_irqrestore(&stream_info->lock, flags);
pr_err("%s: No buffer for stream%d\n", __func__, idx);
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
return rc;
}
init_completion(&stream_info->active_comp);
@@ -1166,6 +1166,7 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev_ioctl,
stats_data->num_active_stream);
streams[num_stream++] = stream_info;
}
+ mutex_unlock(&vfe_dev_ioctl->buf_mgr->lock);
for (k = 0; k < MAX_VFE; k++) {
if (!update_vfes[k] || num_active_streams[k])
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
index 2e3a24dd1f0d..3efd5b57a029 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,7 @@
void msm_isp_process_stats_irq(struct vfe_device *vfe_dev,
uint32_t irq_status0, uint32_t irq_status1,
- struct msm_isp_timestamp *ts);
+ uint32_t pingpong_status, struct msm_isp_timestamp *ts);
void msm_isp_stats_stream_update(struct vfe_device *vfe_dev);
int msm_isp_cfg_stats_stream(struct vfe_device *vfe_dev, void *arg);
int msm_isp_update_stats_stream(struct vfe_device *vfe_dev, void *arg);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index 027477a62122..ef41575339e4 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -403,8 +403,10 @@ static int msm_isp_start_fetch_engine_multi_pass(struct vfe_device *vfe_dev,
vfe_idx = msm_isp_get_vfe_idx_for_stream(vfe_dev, stream_info);
msm_isp_reset_framedrop(vfe_dev, stream_info);
+ mutex_lock(&vfe_dev->buf_mgr->lock);
rc = msm_isp_cfg_offline_ping_pong_address(vfe_dev, stream_info,
VFE_PING_FLAG, fe_cfg->output_buf_idx);
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
if (rc < 0) {
pr_err("%s: Fetch engine config failed\n", __func__);
return -EINVAL;
@@ -915,6 +917,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
case VIDIOC_MSM_ISP_AXI_RESTART:
mutex_lock(&vfe_dev->core_mutex);
MSM_ISP_DUAL_VFE_MUTEX_LOCK(vfe_dev);
+ mutex_lock(&vfe_dev->buf_mgr->lock);
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
@@ -925,6 +928,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
__func__);
}
+ mutex_unlock(&vfe_dev->buf_mgr->lock);
MSM_ISP_DUAL_VFE_MUTEX_UNLOCK(vfe_dev);
mutex_unlock(&vfe_dev->core_mutex);
break;
@@ -2054,7 +2058,8 @@ void msm_isp_prepare_tasklet_debug_info(struct vfe_device *vfe_dev,
}
static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
- uint32_t irq_status0, uint32_t irq_status1)
+ uint32_t irq_status0, uint32_t irq_status1,
+ uint32_t ping_pong_status)
{
unsigned long flags;
struct msm_vfe_tasklet_queue_cmd *queue_cmd = NULL;
@@ -2077,8 +2082,8 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
}
queue_cmd->vfeInterruptStatus0 = irq_status0;
queue_cmd->vfeInterruptStatus1 = irq_status1;
+ queue_cmd->vfe_pingpong_status = ping_pong_status;
msm_isp_get_timestamp(&queue_cmd->ts, vfe_dev);
-
queue_cmd->cmd_used = 1;
queue_cmd->vfe_dev = vfe_dev;
@@ -2092,7 +2097,7 @@ static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
irqreturn_t msm_isp_process_irq(int irq_num, void *data)
{
struct vfe_device *vfe_dev = (struct vfe_device *) data;
- uint32_t irq_status0, irq_status1;
+ uint32_t irq_status0, irq_status1, ping_pong_status;
uint32_t error_mask0, error_mask1;
vfe_dev->hw_info->vfe_ops.irq_ops.
@@ -2103,6 +2108,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data)
__func__, vfe_dev->pdev->id);
return IRQ_HANDLED;
}
+ ping_pong_status = vfe_dev->hw_info->vfe_ops.axi_ops.
+ get_pingpong_status(vfe_dev);
if (vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq) {
vfe_dev->hw_info->vfe_ops.irq_ops.preprocess_camif_irq(
vfe_dev, irq_status0);
@@ -2130,7 +2137,8 @@ irqreturn_t msm_isp_process_irq(int irq_num, void *data)
return IRQ_HANDLED;
}
msm_isp_prepare_irq_debug_info(vfe_dev, irq_status0, irq_status1);
- msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1);
+ msm_isp_enqueue_tasklet_cmd(vfe_dev, irq_status0, irq_status1,
+ ping_pong_status);
return IRQ_HANDLED;
}
@@ -2143,7 +2151,7 @@ void msm_isp_do_tasklet(unsigned long data)
struct msm_vfe_irq_ops *irq_ops;
struct msm_vfe_tasklet_queue_cmd *queue_cmd;
struct msm_isp_timestamp ts;
- uint32_t irq_status0, irq_status1;
+ uint32_t irq_status0, irq_status1, pingpong_status;
while (1) {
spin_lock_irqsave(&tasklet->tasklet_lock, flags);
@@ -2159,6 +2167,7 @@ void msm_isp_do_tasklet(unsigned long data)
queue_cmd->vfe_dev = NULL;
irq_status0 = queue_cmd->vfeInterruptStatus0;
irq_status1 = queue_cmd->vfeInterruptStatus1;
+ pingpong_status = queue_cmd->vfe_pingpong_status;
ts = queue_cmd->ts;
spin_unlock_irqrestore(&tasklet->tasklet_lock, flags);
if (vfe_dev->vfe_open_cnt == 0) {
@@ -2183,9 +2192,11 @@ void msm_isp_do_tasklet(unsigned long data)
}
msm_isp_process_error_info(vfe_dev);
irq_ops->process_stats_irq(vfe_dev,
- irq_status0, irq_status1, &ts);
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
irq_ops->process_axi_irq(vfe_dev,
- irq_status0, irq_status1, &ts);
+ irq_status0, irq_status1,
+ pingpong_status, &ts);
irq_ops->process_camif_irq(vfe_dev,
irq_status0, irq_status1, &ts);
irq_ops->process_reg_update(vfe_dev,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index f41382b5b20c..9f4544571189 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -482,6 +482,11 @@ static ssize_t sde_rot_evtlog_dump_read(struct file *file, char __user *buff,
if (__sde_rot_evtlog_dump_calc_range()) {
len = sde_rot_evtlog_dump_entry(evtlog_buf,
SDE_ROT_EVTLOG_BUF_MAX);
+ if (len < 0 || len > count) {
+ pr_err("len is more than the user buffer size\n");
+ return 0;
+ }
+
if (copy_to_user(buff, evtlog_buf, len))
return -EFAULT;
*ppos += len;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 11de57c53df9..ad806f0ce595 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -3115,7 +3115,7 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
{
int rc = 0;
struct msm_smem *handle;
- struct internal_buf *binfo;
+ struct internal_buf *binfo = NULL;
u32 smem_flags = 0, buffer_size;
struct hal_buffer_requirements *output_buf, *extradata_buf;
int i;
@@ -3221,10 +3221,10 @@ static int set_output_buffers(struct msm_vidc_inst *inst,
}
return rc;
fail_set_buffers:
- kfree(binfo);
-fail_kzalloc:
msm_comm_smem_free(inst, handle);
err_no_mem:
+ kfree(binfo);
+fail_kzalloc:
return rc;
}
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 7ebccfa8072a..cb790b68920f 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -1994,6 +1994,7 @@ static struct scsi_host_template mptsas_driver_template = {
.cmd_per_lun = 7,
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = mptscsih_host_attrs,
+ .no_write_same = 1,
};
static int mptsas_get_linkerrors(struct sas_phy *phy)
diff --git a/drivers/misc/mnh/mnh-ddr.c b/drivers/misc/mnh/mnh-ddr.c
index 6698fd10dc36..47f04c0b04da 100644
--- a/drivers/misc/mnh/mnh-ddr.c
+++ b/drivers/misc/mnh/mnh-ddr.c
@@ -69,7 +69,7 @@
#define MNH_RSTC_OUTf(...) \
HW_OUTf(HWIO_SCU_BASE_ADDR, SCU, RSTC, __VA_ARGS__)
-#define WRITE_DDR_REG_CONFIG(ddrblock, regindex) \
+#define WRITE_DDR_REG_CONFIG(_state, ddrblock, regindex) \
do { \
if (_state->ddrblock[regindex]) { \
mnh_reg_write(_state->ddrblock##_base + \
@@ -78,7 +78,7 @@ do { \
} \
} while (0)
-#define WRITE_DDR_PHY_CONFIG(fsp, regindex) \
+#define WRITE_DDR_PHY_CONFIG(_state, fsp, regindex) \
do { \
if (_state->phy[fsp][regindex]) { \
mnh_reg_write(_state->phy_base + (regindex * sizeof(u32)), \
@@ -86,27 +86,27 @@ do { \
} \
} while (0)
-#define WRITE_SET_ELEMENT(regindex, regvalue) \
+#define WRITE_SET_ELEMENT(_state, regindex, regvalue) \
mnh_reg_write(_state->phy_base + (regindex * sizeof(u32)),\
regvalue)
-#define WRITE_SCU_FSP(fsp) \
+#define WRITE_SCU_FSP(_state, fsp) \
do { \
_state->fsps[fsp] &= 0xFFFFFF00;\
_state->fsps[fsp] |= 0x7d;\
MNH_SCU_OUTx(LPDDR4_FSP_SETTING, fsp, _state->fsps[fsp]); \
} while (0)
-#define SAVE_CURRENT_FSP() \
+#define SAVE_CURRENT_FSP(dev, _state) \
do { \
_state->suspend_fsp = \
MNH_SCU_INf(LPDDR4_LOW_POWER_STS, LPDDR4_CUR_FSP); \
dev_dbg(dev, "%s: saved fsp: %d\n", __func__, _state->suspend_fsp); \
} while (0)
-#define SAVED_FSP() _state->suspend_fsp
+#define SAVED_FSP(_state) _state->suspend_fsp
-#define WRITE_CLK_FROM_FSP(fsp) \
+#define WRITE_CLK_FROM_FSP(dev, _state, fsp) \
do { \
if (fsp < (MNH_DDR_NUM_FSPS)) { \
MNH_SCU_OUTf(CCU_CLK_DIV, LPDDR4_REFCLK_DIV, \
@@ -125,15 +125,20 @@ do { \
dev_err(dev, "%s: invalid fsp 0x%x", __func__, fsp); \
} while (0)
-#define SAVE_DDR_REG_CONFIG(ddrblock, regindex) \
+#define SAVE_DDR_REG_CONFIG(_state, ddrblock, regindex) \
+do { \
_state->ddrblock[regindex] = \
- mnh_reg_read(_state->ddrblock##_base + (regindex * sizeof(u32)))
+ mnh_reg_read(_state->ddrblock##_base + \
+ (regindex * sizeof(u32))); \
+} while (0)
-#define SAVE_DDR_PHY_REG_CONFIG(fsp, regindex) \
+#define SAVE_DDR_PHY_REG_CONFIG(_state, fsp, regindex) \
+do { \
_state->phy[fsp][regindex] = \
- mnh_reg_read(_state->phy_base + (regindex * sizeof(u32)))
+ mnh_reg_read(_state->phy_base + (regindex * sizeof(u32))); \
+} while (0)
-#define CLR_START(ddrblock) (_state->ddrblock[0] &= (0xFFFFFFFE))
+#define CLR_START(_state, ddrblock) (_state->ddrblock[0] &= (0xFFFFFFFE))
/* timeout for training all FSPs */
#define TRAINING_TIMEOUT msecs_to_jiffies(45)
@@ -148,8 +153,6 @@ do { \
#define LP_CMD_SBIT 5
#define INIT_DONE_SBIT 4
-static struct mnh_ddr_internal_state *_state;
-
/* read entire int_status */
u64 mnh_ddr_int_status(struct device *dev)
{
@@ -252,7 +255,8 @@ static void mnh_ddr_disable_lp(struct device *dev)
mnh_ddr_send_lp_cmd(dev, LP_CMD_EXIT_LP);
}
-static void mnh_ddr_init_internal_state(const struct mnh_ddr_reg_config *cfg)
+static void mnh_ddr_init_internal_state(struct mnh_ddr_internal_state *_state,
+ const struct mnh_ddr_reg_config *cfg)
{
_state->ctl_base = HWIO_DDR_CTL_BASE_ADDR;
_state->pi_base = HWIO_DDR_PI_BASE_ADDR;
@@ -281,8 +285,11 @@ static void mnh_ddr_init_internal_state(const struct mnh_ddr_reg_config *cfg)
_state->tref[3] = cfg->ctl[59] & 0xFFFF;
}
-void mnh_ddr_init_clocks(struct device *dev)
+static void mnh_ddr_init_clocks(struct mnh_ddr_data *data)
{
+ struct device *dev = &data->pdev->dev;
+ struct mnh_ddr_internal_state *_state = &data->_state;
+
int timeout = 0;
/* MNH_PLL_PASSCODE_SET */
@@ -305,38 +312,43 @@ void mnh_ddr_init_clocks(struct device *dev)
dev_dbg(dev, "%s lpddr4 pll locked after %d iterations",
__func__, timeout);
- WRITE_SCU_FSP(0);
- WRITE_SCU_FSP(1);
- WRITE_SCU_FSP(2);
- WRITE_SCU_FSP(3);
+ WRITE_SCU_FSP(_state, 0);
+ WRITE_SCU_FSP(_state, 1);
+ WRITE_SCU_FSP(_state, 2);
+ WRITE_SCU_FSP(_state, 3);
- WRITE_CLK_FROM_FSP(SAVED_FSP());
+ WRITE_CLK_FROM_FSP(dev, _state, SAVED_FSP(_state));
dev_dbg(dev, "%s lpddr4 pll locked", __func__);
MNH_SCU_OUTf(LPDDR4_LOW_POWER_CFG, LP4_FSP_SW_OVERRIDE, 0);
/* MNH_PLL_PASSCODE_CLR */
MNH_SCU_OUTf(PLL_PASSCODE, PASSCODE, 0x0);
}
-static void mnh_ddr_pull_config(void)
+static void mnh_ddr_pull_config(struct mnh_ddr_data *data)
{
+ struct mnh_ddr_internal_state *_state = &data->_state;
+
int index, fsp;
for (index = 0; index < MNH_DDR_NUM_CTL_REG; index++)
- SAVE_DDR_REG_CONFIG(ctl, index);
- CLR_START(ctl);
+ SAVE_DDR_REG_CONFIG(_state, ctl, index);
+ CLR_START(_state, ctl);
for (index = 0; index < MNH_DDR_NUM_PI_REG; index++)
- SAVE_DDR_REG_CONFIG(pi, index);
- CLR_START(pi);
+ SAVE_DDR_REG_CONFIG(_state, pi, index);
+ CLR_START(_state, pi);
for (fsp = 0; fsp < MNH_DDR_NUM_FSPS; fsp++) {
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, fsp);
for (index = 0; index < MNH_DDR_NUM_PHY_REG; index++)
- SAVE_DDR_PHY_REG_CONFIG(fsp, index);
+ SAVE_DDR_PHY_REG_CONFIG(_state, fsp, index);
}
}
-int mnh_ddr_suspend(struct device *dev, struct gpio_desc *iso_n)
+int mnh_ddr_suspend(struct mnh_ddr_data *data, struct gpio_desc *iso_n)
{
+ struct device *dev = &data->pdev->dev;
+ struct mnh_ddr_internal_state *_state = &data->_state;
+
mnh_ddr_disable_lp(dev);
dev_dbg(dev, "%s: tref 0x%04x 0x%04x 0x%04x 0x%04x\n",
@@ -358,8 +370,8 @@ int mnh_ddr_suspend(struct device *dev, struct gpio_desc *iso_n)
/* resume to fsp3 */
mnh_lpddr_freq_change(LPDDR_FREQ_FSP3);
- SAVE_CURRENT_FSP();
- mnh_ddr_pull_config();
+ SAVE_CURRENT_FSP(dev, _state);
+ mnh_ddr_pull_config(data);
mnh_ddr_send_lp_cmd(dev, LP_CMD_DSRPD);
dev_dbg(dev, "%s LP_STATE is 0x%x", __func__,
@@ -381,24 +393,27 @@ int mnh_ddr_suspend(struct device *dev, struct gpio_desc *iso_n)
}
EXPORT_SYMBOL(mnh_ddr_suspend);
-int mnh_ddr_resume(struct device *dev, struct gpio_desc *iso_n)
+int mnh_ddr_resume(struct mnh_ddr_data *data, struct gpio_desc *iso_n)
{
+ struct device *dev = &data->pdev->dev;
+ struct mnh_ddr_internal_state *_state = &data->_state;
+
int index, fsp;
int timeout = 0;
- mnh_ddr_init_clocks(dev);
+ mnh_ddr_init_clocks(data);
for (index = 0; index < MNH_DDR_NUM_CTL_REG; index++)
- WRITE_DDR_REG_CONFIG(ctl, index);
+ WRITE_DDR_REG_CONFIG(_state, ctl, index);
- MNH_DDR_CTL_OUTf(23, DFIBUS_FREQ_INIT, SAVED_FSP());
+ MNH_DDR_CTL_OUTf(23, DFIBUS_FREQ_INIT, SAVED_FSP(_state));
MNH_DDR_CTL_OUTf(23, DFIBUS_BOOT_FREQ, 0);
MNH_DDR_CTL_OUTf(23, PHY_INDEP_TRAIN_MODE, 0);
MNH_DDR_CTL_OUTf(23, CDNS_INTRL0, 1);
for (index = 0; index < MNH_DDR_NUM_PI_REG; index++)
- WRITE_DDR_REG_CONFIG(pi, index);
+ WRITE_DDR_REG_CONFIG(_state, pi, index);
for (fsp = 0; fsp < MNH_DDR_NUM_FSPS; fsp++) {
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_MULTICAST_EN, 0);
@@ -406,7 +421,7 @@ int mnh_ddr_resume(struct device *dev, struct gpio_desc *iso_n)
for (index = 0; index < MNH_DDR_NUM_PHY_REG; index++) {
if (index != 1025)
- WRITE_DDR_PHY_CONFIG(fsp, index);
+ WRITE_DDR_PHY_CONFIG(_state, fsp, index);
}
MNH_DDR_PHY_OUTf(1084, PHY_CAL_CLK_SELECT_0, 0x4);
}
@@ -450,7 +465,7 @@ int mnh_ddr_resume(struct device *dev, struct gpio_desc *iso_n)
dev_dbg(dev, "%s got init done %llx.\n", __func__,
mnh_ddr_int_status(dev));
mnh_ddr_clr_int_status(dev);
- mnh_lpddr_freq_change(SAVED_FSP());
+ mnh_lpddr_freq_change(SAVED_FSP(_state));
dev_dbg(dev, "%s: tref 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__, MNH_DDR_CTL_INf(56, TREF_F0),
@@ -463,40 +478,38 @@ int mnh_ddr_resume(struct device *dev, struct gpio_desc *iso_n)
}
EXPORT_SYMBOL(mnh_ddr_resume);
-int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n)
+int mnh_ddr_po_init(struct mnh_ddr_data *data, struct gpio_desc *iso_n)
{
+ struct device *dev = &data->pdev->dev;
+ struct mnh_ddr_internal_state *_state = &data->_state;
+
int index, setindex;
unsigned long timeout;
const struct mnh_ddr_reg_config *cfg = &mnh_ddr_33_100_400_600;
- _state = devm_kzalloc(dev, sizeof(struct mnh_ddr_internal_state),
- GFP_KERNEL);
- if (!_state)
- return -ENOMEM;
-
- mnh_ddr_init_internal_state(cfg);
+ mnh_ddr_init_internal_state(_state, cfg);
dev_dbg(dev, "%s start.", __func__);
/* deassert iso_n */
gpiod_set_value_cansleep(iso_n, 1);
- mnh_ddr_init_clocks(dev);
+ mnh_ddr_init_clocks(data);
for (index = 0; index < MNH_DDR_NUM_CTL_REG; index++)
- WRITE_DDR_REG_CONFIG(ctl, index);
+ WRITE_DDR_REG_CONFIG(_state, ctl, index);
/* Make sure DRAM will request refresh rate adjustments */
MNH_DDR_CTL_OUTf(164, MR13_DATA_0, 0xD0);
for (index = 0; index < MNH_DDR_NUM_PI_REG; index++)
- WRITE_DDR_REG_CONFIG(pi, index);
+ WRITE_DDR_REG_CONFIG(_state, pi, index);
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_MULTICAST_EN, 1);
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, 0);
for (index = 0; index < MNH_DDR_NUM_PHY_REG; index++)
- WRITE_DDR_PHY_CONFIG(0, index);
+ WRITE_DDR_PHY_CONFIG(_state, 0, index);
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_MULTICAST_EN, 0);
MNH_DDR_PHY_OUTf(1025, PHY_FREQ_SEL_INDEX, 1);
@@ -505,8 +518,9 @@ int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n)
setindex = 0;
while ((setindex < MNH_DDR_PHY_SET_SIZE) &&
(cfg->phy_setA[setindex][0] != 0xFFFFFFFF)) {
- WRITE_SET_ELEMENT(cfg->phy_setA[setindex][0],
- cfg->phy_setA[setindex][1]);
+ WRITE_SET_ELEMENT(_state,
+ cfg->phy_setA[setindex][0],
+ cfg->phy_setA[setindex][1]);
setindex++;
}
@@ -516,8 +530,9 @@ int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n)
setindex = 0;
while ((setindex < MNH_DDR_PHY_SET_SIZE) &&
(cfg->phy_setB[setindex][0] != 0xFFFFFFFF)) {
- WRITE_SET_ELEMENT(cfg->phy_setB[setindex][0],
- cfg->phy_setB[setindex][1]);
+ WRITE_SET_ELEMENT(_state,
+ cfg->phy_setB[setindex][0],
+ cfg->phy_setB[setindex][1]);
setindex++;
}
@@ -558,8 +573,10 @@ int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n)
}
EXPORT_SYMBOL(mnh_ddr_po_init);
-u32 mnh_ddr_mbist(struct device *dev, enum mnh_ddr_bist_type bist_type)
+u32 mnh_ddr_mbist(struct mnh_ddr_data *data, enum mnh_ddr_bist_type bist_type)
{
+ struct device *dev = &data->pdev->dev;
+
u32 result = 0;
u32 timeout = 1000000;
const u32 pattern[] = {
@@ -624,3 +641,11 @@ u32 mnh_ddr_mbist(struct device *dev, enum mnh_ddr_bist_type bist_type)
return result;
}
EXPORT_SYMBOL(mnh_ddr_mbist);
+
+int mnh_ddr_platform_init(struct platform_device *pdev,
+ struct mnh_ddr_data *data)
+{
+ data->pdev = pdev;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mnh_ddr_platform_init);
diff --git a/drivers/misc/mnh/mnh-ddr.h b/drivers/misc/mnh/mnh-ddr.h
index 86a7d5e62e07..372ca55d0505 100644
--- a/drivers/misc/mnh/mnh-ddr.h
+++ b/drivers/misc/mnh/mnh-ddr.h
@@ -20,6 +20,7 @@
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/gpio/consumer.h>
+#include <linux/platform_device.h>
#define MNH_DDR_NUM_CTL_REG (558 + 1)
#define MNH_DDR_NUM_PHY_REG (1100 + 1)
@@ -58,11 +59,18 @@ enum mnh_ddr_bist_type {
LIMITED_MOVI1_3N,
};
-int mnh_ddr_po_init(struct device *dev, struct gpio_desc *iso_n);
-int mnh_ddr_resume(struct device *dev, struct gpio_desc *iso_n);
-int mnh_ddr_suspend(struct device *dev, struct gpio_desc *iso_n);
+struct mnh_ddr_data {
+ struct platform_device *pdev;
+ struct mnh_ddr_internal_state _state;
+};
+
+int mnh_ddr_platform_init(struct platform_device *pdev,
+ struct mnh_ddr_data *data);
+int mnh_ddr_po_init(struct mnh_ddr_data *data, struct gpio_desc *iso_n);
+int mnh_ddr_resume(struct mnh_ddr_data *data, struct gpio_desc *iso_n);
+int mnh_ddr_suspend(struct mnh_ddr_data *data, struct gpio_desc *iso_n);
int mnh_ddr_clr_int_status(struct device *dev);
u64 mnh_ddr_int_status(struct device *dev);
-u32 mnh_ddr_mbist(struct device *dev, enum mnh_ddr_bist_type bist_type);
+u32 mnh_ddr_mbist(struct mnh_ddr_data *data, enum mnh_ddr_bist_type bist_type);
#endif /* __MNH_DDR_H__ */
diff --git a/drivers/misc/mnh/mnh-sm.c b/drivers/misc/mnh/mnh-sm.c
index 9b0e3279ea72..3652e5b3c779 100644
--- a/drivers/misc/mnh/mnh-sm.c
+++ b/drivers/misc/mnh/mnh-sm.c
@@ -191,6 +191,9 @@ struct mnh_sm_device {
/* state of the ddr channel */
enum mnh_ddr_status ddr_status;
+ /* mnh-ddr data */
+ struct mnh_ddr_data mnh_ddr_data;
+
/* pin used for ddr pad isolation */
struct gpio_desc *ddr_pad_iso_n_pin;
@@ -1373,8 +1376,9 @@ static ssize_t ddr_mbist_store(struct device *dev,
return -EINVAL;
mnh_pwr_set_state(MNH_PWR_S0);
- mnh_ddr_po_init(mnh_sm_dev->dev, mnh_sm_dev->ddr_pad_iso_n_pin);
- mnh_ddr_mbist(dev, val);
+ mnh_ddr_po_init(&mnh_sm_dev->mnh_ddr_data,
+ mnh_sm_dev->ddr_pad_iso_n_pin);
+ mnh_ddr_mbist(&mnh_sm_dev->mnh_ddr_data, val);
mnh_pwr_set_state(MNH_PWR_S4);
return count;
@@ -1484,7 +1488,8 @@ static int mnh_sm_config_ddr(void)
int ret;
/* Initialize DDR */
- ret = mnh_ddr_po_init(mnh_sm_dev->dev, mnh_sm_dev->ddr_pad_iso_n_pin);
+ ret = mnh_ddr_po_init(&mnh_sm_dev->mnh_ddr_data,
+ mnh_sm_dev->ddr_pad_iso_n_pin);
if (ret) {
dev_err(mnh_sm_dev->dev, "%s: ddr training failed (%d)\n",
__func__, ret);
@@ -1498,7 +1503,8 @@ static int mnh_sm_config_ddr(void)
static int mnh_sm_resume_ddr(void)
{
/* deassert pad isolation, take ddr out of self-refresh mode */
- mnh_ddr_resume(mnh_sm_dev->dev, mnh_sm_dev->ddr_pad_iso_n_pin);
+ mnh_ddr_resume(&mnh_sm_dev->mnh_ddr_data,
+ mnh_sm_dev->ddr_pad_iso_n_pin);
mnh_sm_dev->ddr_status = MNH_DDR_ACTIVE;
return 0;
}
@@ -1506,7 +1512,8 @@ static int mnh_sm_resume_ddr(void)
static int mnh_sm_suspend_ddr(void)
{
/* put ddr into self-refresh mode, assert pad isolation */
- mnh_ddr_suspend(mnh_sm_dev->dev, mnh_sm_dev->ddr_pad_iso_n_pin);
+ mnh_ddr_suspend(&mnh_sm_dev->mnh_ddr_data,
+ mnh_sm_dev->ddr_pad_iso_n_pin);
mnh_sm_dev->ddr_status = MNH_DDR_SELF_REFRESH;
return 0;
}
@@ -2405,7 +2412,18 @@ static int mnh_sm_probe(struct platform_device *pdev)
mnh_crypto_config_sysfs();
/* initialize mnh-clk driver */
- mnh_clk_init(dev, HWIO_SCU_BASE_ADDR);
+ error = mnh_clk_init(dev, HWIO_SCU_BASE_ADDR);
+ if (error) {
+ dev_err(dev, "failed to initialize mnh-clk (%d)\n", error);
+ goto fail_probe_2;
+ }
+
+ /* initialize mnh-ddr driver */
+ error = mnh_ddr_platform_init(pdev, &mnh_sm_dev->mnh_ddr_data);
+ if (error) {
+ dev_err(dev, "failed to initialize mnh-ddr (%d)\n", error);
+ goto fail_probe_2;
+ }
mnh_sm_dev->initialized = true;
dev_info(dev, "MNH SM initialized successfully\n");
@@ -2438,6 +2456,7 @@ static struct platform_driver mnh_sm = {
.driver = {
.name = DEVICE_NAME,
.of_match_table = mnh_sm_ids,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
};
diff --git a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
index e602650c4cb5..ebe9ab763a68 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_hwacc_effects.c
@@ -161,7 +161,6 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd,
pr_err("%s: Read buffer Allocation failed rc = %d\n",
__func__, rc);
rc = -ENOMEM;
- mutex_unlock(&effects->lock);
goto readbuf_fail;
}
atomic_set(&effects->out_count, effects->config.output.num_buf);
@@ -176,7 +175,6 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd,
if (rc < 0) {
pr_err("%s: pcm read block config failed\n", __func__);
rc = -EINVAL;
- mutex_unlock(&effects->lock);
goto cfg_fail;
}
pr_debug("%s: dec: sample_rate: %d, num_channels: %d, bit_width: %d\n",
@@ -191,7 +189,6 @@ static int audio_effects_shared_ioctl(struct file *file, unsigned cmd,
pr_err("%s: pcm write format block config failed\n",
__func__);
rc = -EINVAL;
- mutex_unlock(&effects->lock);
goto cfg_fail;
}
@@ -325,6 +322,7 @@ ioctl_fail:
readbuf_fail:
q6asm_audio_client_buf_free_contiguous(IN,
effects->ac);
+ mutex_unlock(&effects->lock);
return rc;
cfg_fail:
q6asm_audio_client_buf_free_contiguous(IN,
@@ -332,6 +330,7 @@ cfg_fail:
q6asm_audio_client_buf_free_contiguous(OUT,
effects->ac);
effects->buf_alloc = 0;
+ mutex_unlock(&effects->lock);
return rc;
}
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index 311f6d639d06..24e04ff595fe 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -167,19 +167,9 @@ static int mmc_bus_suspend(struct device *dev)
if (mmc_bus_needs_resume(host))
return 0;
ret = host->bus_ops->suspend(host);
-
- /*
- * bus_ops->suspend may fail due to some reason
- * In such cases if we return error to PM framework
- * from here without calling pm_generic_resume then mmc
- * request may get stuck since PM framework will assume
- * that mmc bus is not suspended (because of error) and
- * it won't call resume again.
- *
- * So in case of error call pm_generic_resume().
- */
if (ret)
pm_generic_resume(dev);
+
return ret;
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 28bc2346ba0a..7e991696971e 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1009,9 +1009,10 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
completion = ktime_get();
delta_us = ktime_us_delta(completion,
mrq->io_start);
- blk_update_latency_hist(&host->io_lat_s,
- (mrq->data->flags & MMC_DATA_READ),
- delta_us);
+ blk_update_latency_hist(
+ (mrq->data->flags & MMC_DATA_READ) ?
+ &host->io_lat_read :
+ &host->io_lat_write, delta_us);
}
#endif
trace_mmc_blk_rw_end(cmd->opcode, cmd->arg, mrq->data);
@@ -4545,8 +4546,14 @@ static ssize_t
latency_hist_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct mmc_host *host = cls_dev_to_mmc_host(dev);
+ size_t written_bytes;
- return blk_latency_hist_show(&host->io_lat_s, buf);
+ written_bytes = blk_latency_hist_show("Read", &host->io_lat_read,
+ buf, PAGE_SIZE);
+ written_bytes += blk_latency_hist_show("Write", &host->io_lat_write,
+ buf + written_bytes, PAGE_SIZE - written_bytes);
+
+ return written_bytes;
}
/*
@@ -4564,9 +4571,10 @@ latency_hist_store(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 0, &value))
return -EINVAL;
- if (value == BLK_IO_LAT_HIST_ZERO)
- blk_zero_latency_hist(&host->io_lat_s);
- else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ if (value == BLK_IO_LAT_HIST_ZERO) {
+ memset(&host->io_lat_read, 0, sizeof(host->io_lat_read));
+ memset(&host->io_lat_write, 0, sizeof(host->io_lat_write));
+ } else if (value == BLK_IO_LAT_HIST_ENABLE ||
value == BLK_IO_LAT_HIST_DISABLE)
host->latency_hist_enabled = value;
return count;
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 286b97a304cf..4509ee0b294a 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -45,6 +45,7 @@
#define I82802AB 0x00ad
#define I82802AC 0x00ac
#define PF38F4476 0x881c
+#define M28F00AP30 0x8963
/* STMicroelectronics chips */
#define M50LPW080 0x002F
#define M50FLW080A 0x0080
@@ -375,6 +376,17 @@ static void cfi_fixup_major_minor(struct cfi_private *cfi,
extp->MinorVersion = '1';
}
+static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
+{
+ /*
+ * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
+ * Erase Supend for their small Erase Blocks(0x8000)
+ */
+ if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
+ return 1;
+ return 0;
+}
+
static inline struct cfi_pri_intelext *
read_pri_intelext(struct map_info *map, __u16 adr)
{
@@ -825,21 +837,30 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
(mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
goto sleep;
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
+
+ /* do not suspend small EBs, buggy Micron Chips */
+ if (cfi_is_micron_28F00AP30(cfi, chip) &&
+ (chip->in_progress_block_mask == ~(0x8000-1)))
+ goto sleep;
/* Erase suspend */
- map_write(map, CMD(0xB0), adr);
+ map_write(map, CMD(0xB0), chip->in_progress_block_addr);
/* If the flash has finished erasing, then 'erase suspend'
* appears to make some (28F320) flash devices switch to
* 'read' mode. Make sure that we switch to 'read status'
* mode so we get the right data. --rmk
*/
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_ERASING;
chip->state = FL_ERASE_SUSPENDING;
chip->erase_suspended = 1;
for (;;) {
- status = map_read(map, adr);
+ status = map_read(map, chip->in_progress_block_addr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
@@ -1035,8 +1056,8 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
sending the 0x70 (Read Status) command to an erasing
chip and expecting it to be ignored, that's what we
do. */
- map_write(map, CMD(0xd0), adr);
- map_write(map, CMD(0x70), adr);
+ map_write(map, CMD(0xd0), chip->in_progress_block_addr);
+ map_write(map, CMD(0x70), chip->in_progress_block_addr);
chip->oldstate = FL_READY;
chip->state = FL_ERASING;
break;
@@ -1927,6 +1948,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
map_write(map, CMD(0xD0), adr);
chip->state = FL_ERASING;
chip->erase_suspended = 0;
+ chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index c3624eb571d1..31448a2b39ae 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -814,9 +814,10 @@ static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr
(mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
goto sleep;
- /* We could check to see if we're trying to access the sector
- * that is currently being erased. However, no user will try
- * anything like that so we just wait for the timeout. */
+ /* Do not allow suspend iff read/write to EB address */
+ if ((adr & chip->in_progress_block_mask) ==
+ chip->in_progress_block_addr)
+ goto sleep;
/* Erase suspend */
/* It's harmless to issue the Erase-Suspend and Erase-Resume
@@ -2265,6 +2266,7 @@ static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(map->size - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, map->size,
@@ -2354,6 +2356,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
chip->state = FL_ERASING;
chip->erase_suspended = 0;
chip->in_progress_block_addr = adr;
+ chip->in_progress_block_mask = ~(len - 1);
INVALIDATE_CACHE_UDELAY(map, chip,
adr, len,
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index e2a239c1f40b..40a335c6b792 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -1032,14 +1032,87 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
/* Loop over status bytes, accumulating ECC status. */
status = auxiliary_virt + nfc_geo->auxiliary_status_offset;
+ read_page_swap_end(this, buf, nfc_geo->payload_size,
+ this->payload_virt, this->payload_phys,
+ nfc_geo->payload_size,
+ payload_virt, payload_phys);
+
for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) {
if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
continue;
if (*status == STATUS_UNCORRECTABLE) {
+ int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
+ u8 *eccbuf = this->raw_buffer;
+ int offset, bitoffset;
+ int eccbytes;
+ int flips;
+
+ /* Read ECC bytes into our internal raw_buffer */
+ offset = nfc_geo->metadata_size * 8;
+ offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
+ offset -= eccbits;
+ bitoffset = offset % 8;
+ eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
+ offset /= 8;
+ eccbytes -= offset;
+ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, offset, -1);
+ chip->read_buf(mtd, eccbuf, eccbytes);
+
+ /*
+ * ECC data are not byte aligned and we may have
+ * in-band data in the first and last byte of
+ * eccbuf. Set non-eccbits to one so that
+ * nand_check_erased_ecc_chunk() does not count them
+ * as bitflips.
+ */
+ if (bitoffset)
+ eccbuf[0] |= GENMASK(bitoffset - 1, 0);
+
+ bitoffset = (bitoffset + eccbits) % 8;
+ if (bitoffset)
+ eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
+
+ /*
+ * The ECC hardware has an uncorrectable ECC status
+ * code in case we have bitflips in an erased page. As
+ * nothing was written into this subpage the ECC is
+ * obviously wrong and we can not trust it. We assume
+ * at this point that we are reading an erased page and
+ * try to correct the bitflips in buffer up to
+ * ecc_strength bitflips. If this is a page with random
+ * data, we exceed this number of bitflips and have a
+ * ECC failure. Otherwise we use the corrected buffer.
+ */
+ if (i == 0) {
+ /* The first block includes metadata */
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ auxiliary_virt,
+ nfc_geo->metadata_size,
+ nfc_geo->ecc_strength);
+ } else {
+ flips = nand_check_erased_ecc_chunk(
+ buf + i * nfc_geo->ecc_chunk_size,
+ nfc_geo->ecc_chunk_size,
+ eccbuf, eccbytes,
+ NULL, 0,
+ nfc_geo->ecc_strength);
+ }
+
+ if (flips > 0) {
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ flips);
+ mtd->ecc_stats.corrected += flips;
+ continue;
+ }
+
mtd->ecc_stats.failed++;
continue;
}
+
mtd->ecc_stats.corrected += *status;
max_bitflips = max_t(unsigned int, max_bitflips, *status);
}
@@ -1062,11 +1135,6 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
}
- read_page_swap_end(this, buf, nfc_geo->payload_size,
- this->payload_virt, this->payload_phys,
- nfc_geo->payload_size,
- payload_virt, payload_phys);
-
return max_bitflips;
}
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index bb9e9fc45e1b..82d23bd3a742 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -453,7 +453,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave)
+ if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
return;
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 278d12888cab..339118f3c718 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1614,8 +1614,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
} /* switch(bond_mode) */
#ifdef CONFIG_NET_POLL_CONTROLLER
- slave_dev->npinfo = bond->dev->npinfo;
- if (slave_dev->npinfo) {
+ if (bond->dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
res = -EBUSY;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index db1855b0e08f..59f891bebcc6 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1175,7 +1175,7 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
skb = alloc_can_skb(priv->netdev, &cf);
if (!skb) {
- stats->tx_dropped++;
+ stats->rx_dropped++;
return;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3bba92fc9c1a..1325825d5225 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8722,14 +8722,15 @@ static void tg3_free_consistent(struct tg3 *tp)
tg3_mem_rx_release(tp);
tg3_mem_tx_release(tp);
- /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
- tg3_full_lock(tp, 0);
+ /* tp->hw_stats can be referenced safely:
+ * 1. under rtnl_lock
+ * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
+ */
if (tp->hw_stats) {
dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
- tg3_full_unlock(tp);
}
/*
@@ -14163,7 +14164,7 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
struct tg3 *tp = netdev_priv(dev);
spin_lock_bh(&tp->lock);
- if (!tp->hw_stats) {
+ if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
*stats = tp->net_stats_prev;
spin_unlock_bh(&tp->lock);
return stats;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ddb5541882f5..bcfac000199e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -967,6 +967,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
if (!coal->tx_max_coalesced_frames_irq)
return -EINVAL;
+ if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
+ coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
+ netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
+ __func__, MLX4_EN_MAX_COAL_TIME);
+ return -ERANGE;
+ }
+
+ if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
+ coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
+ netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
+ __func__, MLX4_EN_MAX_COAL_PKTS);
+ return -ERANGE;
+ }
+
priv->rx_frames = (coal->rx_max_coalesced_frames ==
MLX4_EN_AUTO_CONF) ?
MLX4_EN_RX_COAL_TARGET :
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 10aa6544cf4d..607daaffae98 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -140,6 +140,9 @@ enum {
#define MLX4_EN_TX_COAL_PKTS 16
#define MLX4_EN_TX_COAL_TIME 0x10
+#define MLX4_EN_MAX_COAL_PKTS U16_MAX
+#define MLX4_EN_MAX_COAL_TIME U16_MAX
+
#define MLX4_EN_RX_RATE_LOW 400000
#define MLX4_EN_RX_COAL_TIME_LOW 0
#define MLX4_EN_RX_RATE_HIGH 450000
@@ -518,8 +521,8 @@ struct mlx4_en_priv {
u16 rx_usecs_low;
u32 pkt_rate_high;
u16 rx_usecs_high;
- u16 sample_interval;
- u16 adaptive_rx_coal;
+ u32 sample_interval;
+ u32 adaptive_rx_coal;
u32 msg_enable;
u32 loopback_ok;
u32 validate_loopback;
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index ef668d300800..d987d571fdd6 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -2229,7 +2229,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
- disable_irq(irq);
+ disable_irq_nosync(irq);
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index a82c89af7124..8b4069ea52ce 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4832,6 +4832,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
static void rtl_pll_power_up(struct rtl8169_private *tp)
{
rtl_generic_op(tp, tp->pll_power_ops.up);
+
+ /* give MAC/PHY some time to resume */
+ msleep(20);
}
static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index ab6051a43134..ccebf89aa1e4 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
len = (val & RCR_ENTRY_L2_LEN) >>
RCR_ENTRY_L2_LEN_SHIFT;
- len -= ETH_FCS_LEN;
+ append_size = len + ETH_HLEN + ETH_FCS_LEN;
addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
@@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
RCR_ENTRY_PKTBUFSZ_SHIFT];
off = addr & ~PAGE_MASK;
- append_size = rcr_size;
if (num_rcr == 1) {
int ptype;
@@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
else
skb_checksum_none_assert(skb);
} else if (!(val & RCR_ENTRY_MULTI))
- append_size = len - skb->len;
+ append_size = append_size - skb->len;
niu_rx_skb_append(skb, page, off, append_size, rcr_size);
if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index b7b859c3a0c7..583d50f80b24 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -638,6 +638,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppox))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OE)
goto end;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 9bca36e1fefd..e74709e4b5dd 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -247,6 +247,17 @@ static void __team_option_inst_mark_removed_port(struct team *team,
}
}
+static bool __team_option_inst_tmp_find(const struct list_head *opts,
+ const struct team_option_inst *needle)
+{
+ struct team_option_inst *opt_inst;
+
+ list_for_each_entry(opt_inst, opts, tmp_list)
+ if (opt_inst == needle)
+ return true;
+ return false;
+}
+
static int __team_options_register(struct team *team,
const struct team_option *option,
size_t option_count)
@@ -1039,14 +1050,11 @@ static void team_port_leave(struct team *team, struct team_port *port)
}
#ifdef CONFIG_NET_POLL_CONTROLLER
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int __team_port_enable_netpoll(struct team_port *port)
{
struct netpoll *np;
int err;
- if (!team->dev->npinfo)
- return 0;
-
np = kzalloc(sizeof(*np), GFP_KERNEL);
if (!np)
return -ENOMEM;
@@ -1060,6 +1068,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port)
return err;
}
+static int team_port_enable_netpoll(struct team_port *port)
+{
+ if (!port->team->dev->npinfo)
+ return 0;
+
+ return __team_port_enable_netpoll(port);
+}
+
static void team_port_disable_netpoll(struct team_port *port)
{
struct netpoll *np = port->np;
@@ -1074,7 +1090,7 @@ static void team_port_disable_netpoll(struct team_port *port)
kfree(np);
}
#else
-static int team_port_enable_netpoll(struct team *team, struct team_port *port)
+static int team_port_enable_netpoll(struct team_port *port)
{
return 0;
}
@@ -1181,7 +1197,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_vids_add;
}
- err = team_port_enable_netpoll(team, port);
+ err = team_port_enable_netpoll(port);
if (err) {
netdev_err(dev, "Failed to enable netpoll on device %s\n",
portname);
@@ -1889,7 +1905,7 @@ static int team_netpoll_setup(struct net_device *dev,
mutex_lock(&team->lock);
list_for_each_entry(port, &team->port_list, list) {
- err = team_port_enable_netpoll(team, port);
+ err = __team_port_enable_netpoll(port);
if (err) {
__team_netpoll_cleanup(team);
break;
@@ -2544,6 +2560,14 @@ static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
if (err)
goto team_put;
opt_inst->changed = true;
+
+ /* dumb/evil user-space can send us duplicate opt,
+ * keep only the last one
+ */
+ if (__team_option_inst_tmp_find(&opt_inst_list,
+ opt_inst))
+ continue;
+
list_add(&opt_inst->tmp_list, &opt_inst_list);
}
if (!opt_found) {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 6578127db847..f71abe50ea6f 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -461,6 +461,7 @@ static const struct driver_info wwan_info = {
#define REALTEK_VENDOR_ID 0x0bda
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
+#define LINKSYS_VENDOR_ID 0x13b1
#define NVIDIA_VENDOR_ID 0x0955
#define HP_VENDOR_ID 0x03f0
@@ -650,6 +651,15 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+#if IS_ENABLED(CONFIG_USB_RTL8152)
+/* Linksys USB3GIGV1 Ethernet Adapter */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(LINKSYS_VENDOR_ID, 0x0041, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+#endif
+
/* Lenovo Thinkpad USB 3.0 Ethernet Adapters (based on Realtek RTL8153) */
{
USB_DEVICE_AND_INTERFACE_INFO(LENOVO_VENDOR_ID, 0x7205, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b0ea8dee5f06..8aaa09b3c753 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -631,6 +631,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x05c6, 0x9080, 8)},
{QMI_FIXED_INTF(0x05c6, 0x9083, 3)},
{QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
+ {QMI_FIXED_INTF(0x05c6, 0x90b2, 3)}, /* ublox R410M */
{QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
{QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
{QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
@@ -854,6 +855,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
id->driver_info = (unsigned long)&qmi_wwan_info;
}
+ /* There are devices where the same interface number can be
+ * configured as different functions. We should only bind to
+ * vendor specific functions when matching on interface number
+ */
+ if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
+ desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
+ dev_dbg(&intf->dev,
+ "Rejecting interface number match for class %02x\n",
+ desc->bInterfaceClass);
+ return -ENODEV;
+ }
+
/* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 89950f5cea71..b2c1a435357f 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -506,6 +506,7 @@ enum rtl8152_flags {
#define VENDOR_ID_REALTEK 0x0bda
#define VENDOR_ID_SAMSUNG 0x04e8
#define VENDOR_ID_LENOVO 0x17ef
+#define VENDOR_ID_LINKSYS 0x13b1
#define VENDOR_ID_NVIDIA 0x0955
#define MCU_TYPE_PLA 0x0100
@@ -4376,6 +4377,7 @@ static struct usb_device_id rtl8152_table[] = {
{REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x7205)},
{REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f)},
+ {REALTEK_USB_DEVICE(VENDOR_ID_LINKSYS, 0x0041)},
{REALTEK_USB_DEVICE(VENDOR_ID_NVIDIA, 0x09ff)},
{}
};
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 437ea2c192b3..16d9bd4385de 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -564,6 +564,11 @@ static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_HDR_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -589,6 +594,11 @@ static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ return IEEE80211_CCMP_256_MIC_LEN;
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return IEEE80211_GCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
@@ -1040,9 +1050,21 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
hdr = (void *)msdu->data;
/* Tail */
- if (status->flag & RX_FLAG_IV_STRIPPED)
+ if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ } else {
+ /* MIC */
+ if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
+ enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ skb_trim(msdu, msdu->len - 8);
+
+ /* ICV */
+ if (status->flag & RX_FLAG_ICV_STRIPPED &&
+ enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ }
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
@@ -1064,7 +1086,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
@@ -1072,6 +1095,7 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
@@ -1100,6 +1124,14 @@ static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1160,6 +1192,7 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
u8 sa[ETH_ALEN];
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
@@ -1188,6 +1221,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
@@ -1201,12 +1242,14 @@ static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
- const u8 first_hdr[64])
+ const u8 first_hdr[64],
+ enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
struct htt_rx_desc *rxd;
+ int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
@@ -1222,6 +1265,14 @@ static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
+
+ if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
+ memcpy(skb_push(msdu,
+ ath10k_htt_rx_crypto_param_len(ar, enctype)),
+ (void *)hdr + round_up(hdr_len, bytes_aligned),
+ ath10k_htt_rx_crypto_param_len(ar, enctype));
+ }
+
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
@@ -1256,13 +1307,15 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
is_decrypted);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
- ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
+ enctype);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
- ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr);
+ ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
+ enctype);
break;
}
}
@@ -1305,7 +1358,8 @@ static void ath10k_htt_rx_h_csum_offload(struct sk_buff *msdu)
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
- struct ieee80211_rx_status *status)
+ struct ieee80211_rx_status *status,
+ bool fill_crypt_header)
{
struct sk_buff *first;
struct sk_buff *last;
@@ -1315,7 +1369,6 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
u8 *qos;
- size_t hdr_len;
bool has_fcs_err;
bool has_crypto_err;
bool has_tkip_err;
@@ -1340,15 +1393,17 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
* decapped header. It'll be used for undecapping of each MSDU.
*/
hdr = (void *)rxd->rx_hdr_status;
- hdr_len = ieee80211_hdrlen(hdr->frame_control);
- memcpy(first_hdr, hdr, hdr_len);
+ memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
/* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/
hdr = (void *)first_hdr;
- qos = ieee80211_get_qos_ctl(hdr);
- qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+
+ if (ieee80211_is_data_qos(hdr->frame_control)) {
+ qos = ieee80211_get_qos_ctl(hdr);
+ qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
+ }
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
@@ -1395,9 +1450,14 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt))
- status->flag |= RX_FLAG_IV_STRIPPED |
- RX_FLAG_MMIC_STRIPPED;
-}
+ status->flag |= RX_FLAG_MMIC_STRIPPED;
+
+ if (fill_crypt_header)
+ status->flag |= RX_FLAG_MIC_STRIPPED |
+ RX_FLAG_ICV_STRIPPED;
+ else
+ status->flag |= RX_FLAG_IV_STRIPPED;
+ }
skb_queue_walk(amsdu, msdu) {
ath10k_htt_rx_h_csum_offload(msdu);
@@ -1413,6 +1473,9 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
if (is_mgmt)
continue;
+ if (fill_crypt_header)
+ continue;
+
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
}
@@ -1423,6 +1486,9 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
struct ieee80211_rx_status *status)
{
struct sk_buff *msdu;
+ struct sk_buff *first_subframe;
+
+ first_subframe = skb_peek(amsdu);
while ((msdu = __skb_dequeue(amsdu))) {
/* Setup per-MSDU flags */
@@ -1431,6 +1497,13 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
else
status->flag |= RX_FLAG_AMSDU_MORE;
+ if (msdu == first_subframe) {
+ first_subframe = NULL;
+ status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
+ } else {
+ status->flag |= RX_FLAG_ALLOW_SAME_PN;
+ }
+
ath10k_process_rx(ar, status, msdu);
}
}
@@ -1573,7 +1646,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
ath10k_htt_rx_h_unchain(ar, &amsdu, ret > 0);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status);
return num_msdus;
@@ -1912,7 +1985,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status);
- ath10k_htt_rx_h_mpdu(ar, &amsdu, status);
+ ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
ath10k_htt_rx_h_deliver(ar, &amsdu, status);
break;
case -EAGAIN:
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index 1c59b832a27e..04e2f17ebdd0 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -5822,9 +5822,8 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr, smps, err);
}
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
- changed & IEEE80211_RC_NSS_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
+ if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h
index bb711b525af8..5499bd2712e4 100644
--- a/drivers/net/wireless/ath/ath10k/rx_desc.h
+++ b/drivers/net/wireless/ath/ath10k/rx_desc.h
@@ -252,6 +252,9 @@ enum htt_rx_mpdu_encrypt_type {
HTT_RX_MPDU_ENCRYPT_WAPI = 5,
HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2 = 6,
HTT_RX_MPDU_ENCRYPT_NONE = 7,
+ HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2 = 8,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2 = 9,
+ HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2 = 10,
};
#define RX_MPDU_START_INFO0_PEER_IDX_MASK 0x000007ff
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 41382f89abe1..4435c7bbb625 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1595,6 +1595,10 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
int count = 50;
u32 reg, last_val;
+ /* Check if chip failed to wake up */
+ if (REG_READ(ah, AR_CFG) == 0xdeadbeef)
+ return false;
+
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index 598c9cd06e81..6c47a7336c38 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,7 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%llx\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index 1d2d9b2e9aca..56f81311482c 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -1365,8 +1365,14 @@ int wmi_set_ie(struct wil6210_priv *wil, u8 type, u16 ie_len, const void *ie)
};
int rc;
u16 len = sizeof(struct wmi_set_appie_cmd) + ie_len;
- struct wmi_set_appie_cmd *cmd = kzalloc(len, GFP_KERNEL);
+ struct wmi_set_appie_cmd *cmd;
+ if (len < ie_len) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+ cmd = kzalloc(len, GFP_KERNEL);
if (!cmd) {
rc = -ENOMEM;
goto out;
diff --git a/drivers/power/htc_battery.c b/drivers/power/htc_battery.c
index cf24526d9717..d4fc6a225e1e 100644
--- a/drivers/power/htc_battery.c
+++ b/drivers/power/htc_battery.c
@@ -24,6 +24,7 @@
#include <linux/rtc.h>
#include <linux/wakelock.h>
#include <linux/workqueue.h>
+#include <linux/wahoo_info.h>
#define HTC_BATT_NAME "htc_battery"
@@ -36,10 +37,11 @@
*/
DEFINE_MUTEX(htc_battery_lock);
-static int full_level_dis_chg = 100;
-module_param_named(
- full_level_dis_chg, full_level_dis_chg, int, S_IRUSR | S_IWUSR
-);
+#define DEFAULT_CHARGE_STOP_LEVEL 100
+#define DEFAULT_CHARGE_START_LEVEL 0
+
+static int charge_stop_level = DEFAULT_CHARGE_STOP_LEVEL;
+static int charge_start_level = DEFAULT_CHARGE_START_LEVEL;
#define BATT_LOG(x...) pr_info("[BATT] " x)
@@ -281,13 +283,17 @@ static int is_bounding_fully_charged_level(void)
{
static int s_pingpong = 1;
int is_batt_chg_off_by_bounding = 0;
- int upperbd = htc_batt_info.rep.full_level;
+ int upperbd = charge_stop_level;
+ int lowerbd = charge_start_level;
int current_level = htc_batt_info.rep.level;
- /* Default 5% range */
- int lowerbd = upperbd - 5;
- if ((htc_batt_info.rep.full_level > 0) &&
- (htc_batt_info.rep.full_level < 100)) {
+ if ((upperbd == DEFAULT_CHARGE_STOP_LEVEL) &&
+ (lowerbd == DEFAULT_CHARGE_START_LEVEL))
+ return 0;
+
+ if ((upperbd > lowerbd) &&
+ (upperbd <= DEFAULT_CHARGE_STOP_LEVEL) &&
+ (lowerbd >= DEFAULT_CHARGE_START_LEVEL)) {
if (lowerbd < 0)
lowerbd = 0;
@@ -579,7 +585,6 @@ static void batt_worker(struct work_struct *work)
if (((int)htc_batt_info.rep.charging_source >
POWER_SUPPLY_TYPE_BATTERY) ||
(chg_present && !ex_otg)) {
- htc_batt_info.rep.full_level = full_level_dis_chg;
if (is_bounding_fully_charged_level())
g_pwrsrc_dis_reason |= HTC_BATT_PWRSRC_DIS_BIT_MFG;
else
@@ -772,6 +777,83 @@ static int htc_notifier_batt_callback(struct notifier_block *nb,
return NOTIFY_OK;
}
+static int set_full_level_dis_chg(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ int old_val = charge_stop_level;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ BATT_ERR("Unable to set charge_stop_level: %d\n", rc);
+ return rc;
+ }
+
+ if (charge_stop_level != old_val) {
+ charge_start_level = charge_stop_level - 5;
+ htc_batt_schedule_batt_info_update();
+ }
+
+ return 0;
+}
+
+static struct kernel_param_ops disable_charge_ops = {
+ .set = set_full_level_dis_chg,
+ .get = param_get_int,
+};
+module_param_cb(full_level_dis_chg, &disable_charge_ops,
+ &charge_stop_level, 0644);
+
+static int set_charge_stop_level(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ int old_val = charge_stop_level;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ BATT_ERR("Unable to set charge_stop_level: %d\n", rc);
+ return rc;
+ }
+
+ if (charge_stop_level != old_val)
+ htc_batt_schedule_batt_info_update();
+
+ return 0;
+}
+
+static struct kernel_param_ops charge_stop_ops = {
+ .set = set_charge_stop_level,
+ .get = param_get_int,
+};
+module_param_cb(charge_stop_level, &charge_stop_ops,
+ &charge_stop_level, 0644);
+
+static int set_charge_start_level(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ int old_val = charge_start_level;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ BATT_ERR("Unable to set charge_start_level: %d\n", rc);
+ return rc;
+ }
+
+ if (charge_start_level != old_val)
+ htc_batt_schedule_batt_info_update();
+
+ return 0;
+}
+
+static struct kernel_param_ops charge_start_ops = {
+ .set = set_charge_start_level,
+ .get = param_get_int,
+};
+module_param_cb(charge_start_level, &charge_start_ops,
+ &charge_start_level, 0644);
+
#define WALLEYE_BATT_ID_1 "walleye 1"
#define WALLEYE_BATT_ID_2 "walleye 2"
#define MUSKIE_BATT_ID_1 "muskie 1"
@@ -940,6 +1022,14 @@ static int htc_battery_probe(struct platform_device *pdev)
{
int rc = 0;
+#ifndef MODULE
+ if (is_google_taimen()) {
+ BATT_ERR("%s: This is not the Pixel 2, bailing out...\n",
+ __func__);
+ return -ENODEV;
+ }
+#endif
+
mutex_lock(&htc_battery_lock);
rc = htc_battery_probe_process();
diff --git a/drivers/power/lge_battery.c b/drivers/power/lge_battery.c
index b72ac969e589..917ee084ffed 100644
--- a/drivers/power/lge_battery.c
+++ b/drivers/power/lge_battery.c
@@ -17,6 +17,7 @@
#include <linux/power_supply.h>
#include <linux/slab.h>
#include <linux/wakelock.h>
+#include <linux/wahoo_info.h>
#define BATT_DRV_NAME "lge_battery"
@@ -38,6 +39,8 @@
#define WATCH_DELAY 30000
#define DEMO_MODE_MAX 35
#define DEMO_MODE_MIN 30
+#define DEFAULT_CHARGE_STOP_LEVEL 100
+#define DEFAULT_CHARGE_START_LEVEL 0
enum debug_mask_print {
ASSERT = BIT(0),
@@ -156,6 +159,8 @@ static struct bm_batt_id_table valid_batt_id[BM_BATT_MAX] = {
static int debug_mask = ERROR | INTERRUPT | MISC | VERBOSE;
static int demo_mode;
+static int charge_stop_level = DEFAULT_CHARGE_STOP_LEVEL;
+static int charge_start_level = DEFAULT_CHARGE_START_LEVEL;
static int bm_get_property(struct power_supply *psy,
enum power_supply_property prop, int *value)
@@ -197,6 +202,20 @@ static int bm_set_property(struct power_supply *psy,
return rc;
}
+static int battery_power_supply_changed(void)
+{
+ struct power_supply *batt_psy;
+
+ batt_psy = power_supply_get_by_name("battery");
+ if (!batt_psy) {
+ pr_bm(ERROR, "Couldn't get batt_psy\n");
+ return -ENODEV;
+ }
+
+ power_supply_changed(batt_psy);
+ return 0;
+}
+
static int bm_vote_fcc_update(struct battery_manager *bm)
{
int fcc = INT_MAX;
@@ -274,13 +293,13 @@ void bm_check_demo_mode(struct battery_manager *bm)
bm->demo_iusb = 1;
bm->demo_ibat = 1;
} else {
- if (bm->batt_soc > DEMO_MODE_MAX) {
+ if (bm->batt_soc > charge_stop_level) {
bm->demo_iusb = 0;
bm->demo_ibat = 0;
- } else if (bm->batt_soc >= DEMO_MODE_MAX) {
+ } else if (bm->batt_soc >= charge_stop_level) {
bm->demo_iusb = 1;
bm->demo_ibat = 0;
- } else if (bm->batt_soc < DEMO_MODE_MIN) {
+ } else if (bm->batt_soc <= charge_start_level) {
bm->demo_iusb = 1;
bm->demo_ibat = 1;
}
@@ -448,8 +467,7 @@ static void bm_watch_work(struct work_struct *work)
goto error;
}
- if (bm->bm_active)
- bm_check_therm_charging(bm, batt_temp, batt_volt);
+ bm_check_therm_charging(bm, batt_temp, batt_volt);
rc = bm_get_property(bm->batt_psy,
POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
@@ -749,6 +767,13 @@ static int lge_battery_probe(struct platform_device *pdev)
struct battery_manager *bm;
int rc = 0;
+#ifndef MODULE
+ if (is_google_walleye()) {
+ pr_bm(ERROR, "This is not the Pixel 2 XL, bailing out...\n");
+ return -ENODEV;
+ }
+#endif
+
bm = devm_kzalloc(&pdev->dev, sizeof(struct battery_manager),
GFP_KERNEL);
if (!bm) {
@@ -868,7 +893,6 @@ static void __exit lge_battery_exit(void)
static int set_demo_mode(const char *val, const struct kernel_param *kp)
{
- struct power_supply *batt_psy;
int rc = 0;
int old_val = demo_mode;
@@ -881,13 +905,15 @@ static int set_demo_mode(const char *val, const struct kernel_param *kp)
if (demo_mode == old_val)
return 0;
- batt_psy = power_supply_get_by_name("battery");
- if (!batt_psy) {
- pr_bm(ERROR, "Couldn't get batt_psy\n");
- return -ENODEV;
+ if (demo_mode) {
+ charge_stop_level = DEMO_MODE_MAX;
+ charge_start_level = DEMO_MODE_MIN;
+ } else {
+ charge_stop_level = DEFAULT_CHARGE_STOP_LEVEL;
+ charge_start_level = DEFAULT_CHARGE_START_LEVEL;
}
+ battery_power_supply_changed();
- power_supply_changed(batt_psy);
return 0;
}
@@ -899,6 +925,82 @@ static struct kernel_param_ops demo_mode_ops = {
module_param_cb(demo_mode, &demo_mode_ops, &demo_mode, 0644);
MODULE_PARM_DESC(demo_mode, "VZW Demo mode <on|off>");
+static int set_charge_stop_level(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ int old_val = charge_stop_level;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_bm(ERROR, "Unable to set charge_stop_level: %d\n", rc);
+ return rc;
+ }
+
+ if (charge_stop_level == old_val)
+ return 0;
+
+ if (charge_stop_level <= charge_start_level) {
+ charge_stop_level = old_val;
+ return 0;
+ }
+
+ if ((charge_stop_level == DEFAULT_CHARGE_STOP_LEVEL) &&
+ (charge_start_level == DEFAULT_CHARGE_START_LEVEL))
+ demo_mode = 0;
+ else
+ demo_mode = 1;
+
+ battery_power_supply_changed();
+
+ return 0;
+}
+
+static struct kernel_param_ops charge_stop_ops = {
+ .set = set_charge_stop_level,
+ .get = param_get_int,
+};
+module_param_cb(charge_stop_level, &charge_stop_ops,
+ &charge_stop_level, 0644);
+
+static int set_charge_start_level(const char *val,
+ const struct kernel_param *kp)
+{
+ int rc;
+ int old_val = charge_start_level;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_bm(ERROR, "Unable to set charge_start_level: %d\n", rc);
+ return rc;
+ }
+
+ if (charge_start_level == old_val)
+ return 0;
+
+ if (charge_stop_level <= charge_start_level) {
+ charge_start_level = old_val;
+ return 0;
+ }
+
+ if ((charge_stop_level == DEFAULT_CHARGE_STOP_LEVEL) &&
+ (charge_start_level == DEFAULT_CHARGE_START_LEVEL))
+ demo_mode = 0;
+ else
+ demo_mode = 1;
+
+ battery_power_supply_changed();
+
+ return 0;
+}
+
+static struct kernel_param_ops charge_start_ops = {
+ .set = set_charge_start_level,
+ .get = param_get_int,
+};
+module_param_cb(charge_start_level, &charge_start_ops,
+ &charge_start_level, 0644);
+
module_init(lge_battery_init);
module_exit(lge_battery_exit);
MODULE_LICENSE("GPL");
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 11e259da4972..fa7d5e16c9fb 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -294,7 +294,6 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(fcc_delta),
POWER_SUPPLY_ATTR(icl_reduction),
POWER_SUPPLY_ATTR(parallel_mode),
- POWER_SUPPLY_ATTR(port_temp),
POWER_SUPPLY_ATTR(die_health),
POWER_SUPPLY_ATTR(connector_health),
POWER_SUPPLY_ATTR(ctm_current_max),
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 42d102f43280..8570fa82ed9e 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -27,6 +27,10 @@
#include <linux/of_address.h>
#include <linux/console.h>
#include <linux/rtc.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/kallsyms.h>
+#include <linux/io.h>
#include <asm/cacheflush.h>
#include <asm/system_misc.h>
@@ -48,9 +52,19 @@
#define SCM_EDLOAD_MODE 0X01
#define SCM_DLOAD_CMD 0x10
+#define MAX_SZ_DIAG_ERR_MSG 200
+
+struct reboot_params {
+ u32 abnrst;
+ u32 xbl_log_addr;
+ u32 ddr_vendor;
+ u8 msg[0];
+};
static int restart_mode;
static void *restart_reason;
+static struct reboot_params *reboot_params;
+static size_t rst_msg_size;
static bool scm_pmic_arbiter_disable_supported;
static bool scm_deassert_ps_hold_supported;
/* Download mode master kill-switch */
@@ -99,9 +113,59 @@ struct reset_attribute {
module_param_call(download_mode, dload_set, param_get_int,
&download_mode, 0644);
+static struct die_args *tombstone;
+
+static inline void set_restart_msg(const char *msg)
+{
+ if (!reboot_params || rst_msg_size == 0)
+ return;
+
+ pr_info("%s: set restart msg = `%s'\r\n", __func__, msg?:"<null>");
+ memset_io(reboot_params->msg, 0, rst_msg_size);
+ memcpy_toio(reboot_params->msg, msg,
+ min(strlen(msg), rst_msg_size - 1));
+}
+
+int die_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ static struct die_args args;
+
+ memcpy(&args, data, sizeof(args));
+ tombstone = &args;
+ pr_debug("saving oops: %pK\n", (void *) tombstone);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block die_nb = {
+ .notifier_call = die_notify,
+};
+
static int panic_prep_restart(struct notifier_block *this,
unsigned long event, void *ptr)
{
+ char kernel_panic_msg[MAX_SZ_DIAG_ERR_MSG] = "Kernel Panic";
+
+ if (tombstone) { /* tamper the panic message for Oops */
+ char pc_symn[KSYM_NAME_LEN] = "<unknown>";
+ char lr_symn[KSYM_NAME_LEN] = "<unknown>";
+
+#if defined(CONFIG_ARM)
+ sprint_symbol(pc_symn, tombstone->regs->ARM_pc);
+ sprint_symbol(lr_symn, tombstone->regs->ARM_lr);
+#elif defined(CONFIG_ARM64)
+ sprint_symbol(pc_symn, tombstone->regs->pc);
+ sprint_symbol(lr_symn, tombstone->regs->regs[30]);
+#endif
+
+ if (rst_msg_size)
+ snprintf(kernel_panic_msg, rst_msg_size - 1,
+ "KP: %s PC:%s LR:%s",
+ current->comm, pc_symn, lr_symn);
+
+ set_restart_msg(kernel_panic_msg);
+ }
+
in_panic = 1;
return NOTIFY_DONE;
}
@@ -548,6 +612,68 @@ static struct attribute_group reset_attr_group = {
};
#endif
+int restart_handler_init(void)
+{
+ struct device_node *np;
+ u32 rst_info_size;
+ int ret = 0;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "qcom,msm-imem-restart_reason");
+ if (!np) {
+ pr_err("unable to find DT imem restart reason node\n");
+ ret = -ENOENT;
+ } else {
+ restart_reason = of_iomap(np, 0);
+ if (!restart_reason) {
+ pr_err("unable to map imem restart reason offset\n");
+ ret = -ENOMEM;
+ }
+ }
+ if (ret)
+ goto err_restart_reason;
+
+ np = of_find_compatible_node(NULL, NULL,
+ "msm-imem-restart_info");
+ if (!np) {
+ pr_err("unable to find DT imem restart info node\n");
+ ret = -ENOENT;
+ } else {
+ reboot_params = of_iomap(np, 0);
+ if (!reboot_params) {
+ pr_err("unable to map imem restart info offset\n");
+ ret = -ENOMEM;
+ } else {
+ ret = of_property_read_u32(np, "info_size",
+ &rst_info_size);
+ if (ret) {
+ pr_err("%s: Failed to find info_size property in restart info device node %d\n"
+ , __func__, ret);
+ goto err_info_size;
+ }
+ }
+ }
+ if (ret)
+ goto err_restart_msg;
+
+ rst_msg_size = (size_t) rst_info_size -
+ offsetof(struct reboot_params, msg);
+ if (rst_msg_size > MAX_SZ_DIAG_ERR_MSG)
+ rst_msg_size = MAX_SZ_DIAG_ERR_MSG;
+
+
+ set_restart_msg("Unknown");
+ pr_debug("%s: default message is set\n", __func__);
+ return ret;
+
+err_info_size:
+ iounmap(reboot_params);
+err_restart_msg:
+ iounmap(restart_reason);
+err_restart_reason:
+ return ret;
+}
+
static int msm_restart_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -555,10 +681,14 @@ static int msm_restart_probe(struct platform_device *pdev)
struct device_node *np;
int ret = 0;
+ if (restart_handler_init() < 0)
+ pr_err("restart_handler_init failure\n");
+
#ifdef CONFIG_QCOM_DLOAD_MODE
if (scm_is_call_available(SCM_SVC_BOOT, SCM_DLOAD_CMD) > 0)
scm_dload_supported = true;
+ register_die_notifier(&die_nb);
atomic_notifier_chain_register(&panic_notifier_list, &panic_blk);
np = of_find_compatible_node(NULL, NULL, DL_MODE_PROP);
if (!np) {
diff --git a/drivers/power/supply/qcom/Kconfig b/drivers/power/supply/qcom/Kconfig
index 0e091b8fc881..a7e472624fd8 100644
--- a/drivers/power/supply/qcom/Kconfig
+++ b/drivers/power/supply/qcom/Kconfig
@@ -9,6 +9,17 @@ config QPNP_FG_GEN3
the fuel gauge. The state of charge is reported through a BMS power
supply property and also sends uevents when the capacity is updated.
+config QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
+ tristate "QPNP GEN3 fuel gauge cycle count bins"
+ depends on QPNP_FG_GEN3
+ default y
+ help
+ Say Y here to enable the GEN3 FG initial QCOM cycle count implementation.
+ When not selected then POWER_SUPPLY_PROP_CYCLE_COUNT exposes the linux
+ power supply battery cycle count which is not based on cycle bins.
+ When selected only 'bms' will expose POWER_SUPPLY_PROP_CYCLE_COUNT
+ reporting the bucket based value.
+
config SMB135X_CHARGER
tristate "SMB135X Battery Charger"
depends on I2C
diff --git a/drivers/power/supply/qcom/bcl_peripheral.c b/drivers/power/supply/qcom/bcl_peripheral.c
index d0f26c69ac3c..54e110773505 100644
--- a/drivers/power/supply/qcom/bcl_peripheral.c
+++ b/drivers/power/supply/qcom/bcl_peripheral.c
@@ -107,11 +107,12 @@
} while (0)
#define READ_OPTIONAL_PROP(_node, _key, _val, _ret, _dest) do { \
- _ret = of_property_read_u32(_node, _key, &_val); \
- if (_ret && _ret != -EINVAL) { \
+ int __ret = of_property_read_u32(_node, _key, &_val); \
+ if (__ret && __ret != -EINVAL) { \
+ _ret = __ret; \
pr_err("Error reading key:%s. err:%d\n", _key, _ret); \
goto bcl_dev_exit; \
- } else if (!_ret) { \
+ } else if (!__ret) { \
_dest = _val; \
} \
} while (0)
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index b6501fd49883..341448259d08 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -2437,6 +2437,86 @@ out:
pm_relax(chip->dev);
}
+static ssize_t fg_get_cycle_counts_bins(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0, i;
+ u8 data[2];
+ int length = 0;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ for (i = 0; i < BUCKET_COUNT; i++) {
+ rc = fg_sram_read(chip, CYCLE_COUNT_WORD + (i / 2),
+ CYCLE_COUNT_OFFSET + (i % 2) * 2, data, 2,
+ FG_IMA_DEFAULT);
+
+ if (rc < 0) {
+ pr_err("failed to read bucket %d rc=%d\n", i, rc);
+ chip->cyc_ctr.count[i] = 0;
+ } else
+ chip->cyc_ctr.count[i] = data[0] | data[1] << 8;
+
+ length += scnprintf(buf + length,
+ PAGE_SIZE - length, "%d",
+ chip->cyc_ctr.count[i]);
+
+ if (i == BUCKET_COUNT-1)
+ length += scnprintf(buf + length,
+ PAGE_SIZE - length, "\n");
+ else
+ length += scnprintf(buf + length,
+ PAGE_SIZE - length, " ");
+ }
+ mutex_unlock(&chip->cyc_ctr.lock);
+
+ return length;
+}
+
+static ssize_t fg_set_cycle_counts_bins(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fg_chip *chip = dev_get_drvdata(dev);
+ int rc = 0, strval[BUCKET_COUNT], bucket;
+ u16 cyc_count;
+ u8 data[2];
+
+ if (sscanf(buf, "%d %d %d %d %d %d %d %d",
+ &strval[0], &strval[1], &strval[2], &strval[3],
+ &strval[4], &strval[5], &strval[6], &strval[7])
+ != BUCKET_COUNT)
+ return -EINVAL;
+
+ mutex_lock(&chip->cyc_ctr.lock);
+ for (bucket = 0; bucket < BUCKET_COUNT; bucket++) {
+ if (strval[bucket] > chip->cyc_ctr.count[bucket]) {
+ cyc_count = strval[bucket];
+ data[0] = cyc_count & 0xFF;
+ data[1] = cyc_count >> 8;
+
+ rc = fg_sram_write(chip,
+ CYCLE_COUNT_WORD + (bucket / 2),
+ CYCLE_COUNT_OFFSET + (bucket % 2) * 2,
+ data, 2, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("failed to write BATT_CYCLE[%d] rc=%d\n",
+ bucket, rc);
+ mutex_unlock(&chip->cyc_ctr.lock);
+ return rc;
+ }
+ chip->cyc_ctr.count[bucket] = cyc_count;
+ }
+ }
+ mutex_unlock(&chip->cyc_ctr.lock);
+
+ return count;
+}
+
+static DEVICE_ATTR(cycle_counts_bins, 0660,
+ fg_get_cycle_counts_bins, fg_set_cycle_counts_bins);
+
static void restore_cycle_counter(struct fg_chip *chip)
{
int rc = 0, i;
@@ -2570,12 +2650,25 @@ static int fg_get_cycle_count(struct fg_chip *chip)
if (!chip->cyc_ctr.en)
return 0;
+#ifdef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
if ((chip->cyc_ctr.id <= 0) || (chip->cyc_ctr.id > BUCKET_COUNT))
return -EINVAL;
mutex_lock(&chip->cyc_ctr.lock);
count = chip->cyc_ctr.count[chip->cyc_ctr.id - 1];
mutex_unlock(&chip->cyc_ctr.lock);
+#else
+ mutex_lock(&chip->cyc_ctr.lock);
+ {
+ int i;
+
+ count = 0;
+ for (i = 0 ; i < BUCKET_COUNT; i++)
+ count += chip->cyc_ctr.count[i];
+ count = DIV_ROUND_CLOSEST(count, 8);
+ }
+ mutex_unlock(&chip->cyc_ctr.lock);
+#endif
return count;
}
@@ -3296,9 +3389,11 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CYCLE_COUNT:
pval->intval = fg_get_cycle_count(chip);
break;
+#ifdef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
pval->intval = chip->cyc_ctr.id;
break;
+#endif
case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
rc = fg_get_charge_raw(chip, &pval->intval);
break;
@@ -3346,9 +3441,10 @@ static int fg_psy_set_property(struct power_supply *psy,
const union power_supply_propval *pval)
{
struct fg_chip *chip = power_supply_get_drvdata(psy);
- int rc = 0;
+ int rc = -EINVAL;
switch (psp) {
+#ifdef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
if ((pval->intval > 0) && (pval->intval <= BUCKET_COUNT)) {
chip->cyc_ctr.id = pval->intval;
@@ -3358,6 +3454,7 @@ static int fg_psy_set_property(struct power_supply *psy,
return -EINVAL;
}
break;
+#endif
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
rc = fg_set_constant_chg_voltage(chip, pval->intval);
break;
@@ -3372,7 +3469,9 @@ static int fg_property_is_writeable(struct power_supply *psy,
enum power_supply_property psp)
{
switch (psp) {
+#ifdef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
+#endif
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE:
return 1;
default:
@@ -3425,7 +3524,9 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
+#ifdef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+#endif
POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
POWER_SUPPLY_PROP_CHARGE_NOW,
POWER_SUPPLY_PROP_CHARGE_FULL,
@@ -4784,6 +4885,12 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
+ rc = device_create_file(chip->dev, &dev_attr_cycle_counts_bins);
+ if (rc != 0) {
+ dev_err(chip->dev,
+ "Failed to create cycle_counts_bins files: %d\n", rc);
+ }
+
mutex_init(&chip->bus_lock);
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 736728f46568..8995a6dc070d 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -449,7 +449,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_BOOST_CURRENT,
POWER_SUPPLY_PROP_PE_START,
POWER_SUPPLY_PROP_USE_EXTERNAL_VBUS_OUTPUT,
- POWER_SUPPLY_PROP_PORT_TEMP,
+ POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
POWER_SUPPLY_PROP_HW_CURRENT_MAX,
POWER_SUPPLY_PROP_PR_SWAP,
@@ -544,7 +544,7 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_USE_EXTERNAL_VBUS_OUTPUT:
rc = smblib_get_prop_use_external_vbus_output(chg, val);
break;
- case POWER_SUPPLY_PROP_PORT_TEMP:
+ case POWER_SUPPLY_PROP_TEMP:
rc = smblib_get_prop_usb_port_temp(chg, val);
break;
case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
@@ -612,7 +612,7 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_USE_EXTERNAL_VBUS_OUTPUT:
rc = smblib_set_prop_use_external_vbus_output(chg, val);
break;
- case POWER_SUPPLY_PROP_PORT_TEMP:
+ case POWER_SUPPLY_PROP_TEMP:
rc = smblib_set_prop_usb_port_temp(chg, val);
break;
case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
@@ -642,7 +642,7 @@ static int smb2_usb_prop_is_writeable(struct power_supply *psy,
return 1;
case POWER_SUPPLY_PROP_USE_EXTERNAL_VBUS_OUTPUT:
return 1;
- case POWER_SUPPLY_PROP_PORT_TEMP:
+ case POWER_SUPPLY_PROP_TEMP:
return 1;
default:
break;
@@ -1021,6 +1021,9 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_RERUN_AICL,
POWER_SUPPLY_PROP_DP_DM,
POWER_SUPPLY_PROP_CHARGE_COUNTER,
+#ifndef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
+ POWER_SUPPLY_PROP_CYCLE_COUNT,
+#endif
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -1135,6 +1138,11 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
rc = smblib_get_prop_batt_charge_counter(chg, val);
break;
+#ifndef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
+ case POWER_SUPPLY_PROP_CYCLE_COUNT:
+ rc = smblib_get_cycle_count(chg, val);
+ break;
+#endif
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index de5146a7a112..b8c0d6b3ce10 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1957,6 +1957,21 @@ int smblib_get_prop_batt_charge_counter(struct smb_charger *chg,
return rc;
}
+#ifndef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
+int smblib_get_cycle_count(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+
+ if (!chg->bms_psy)
+ return -EINVAL;
+
+ rc = power_supply_get_property(chg->bms_psy,
+ POWER_SUPPLY_PROP_CYCLE_COUNT, val);
+ return rc;
+}
+#endif
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -2673,14 +2688,23 @@ int smblib_set_prop_pd_current_max(struct smb_charger *chg,
int smblib_set_prop_usb_current_max(struct smb_charger *chg,
const union power_supply_propval *val)
{
- int rc;
+ int rc = 0;
int icl_ua = val->intval;
if (icl_ua < 0)
return -EINVAL;
/* cancel vote when icl_ua is voted 0 */
- rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, icl_ua != 0, icl_ua);
+ if (val->intval > USBIN_25MA) {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ true, val->intval);
+ } else if (chg->system_suspend_supported) {
+ rc = vote(chg->usb_icl_votable, USB_PSY_VOTER,
+ false, 0);
+ } else {
+ smblib_dbg(chg, PR_MISC, "suspend not supported\n");
+ return rc;
+ }
if (rc < 0) {
smblib_err(chg, "Couldn't vote USB ICL %d, rc=%d\n",
@@ -4243,10 +4267,15 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
rc);
/* enable DRP */
- rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
- TYPEC_POWER_ROLE_CMD_MASK, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+ if (chg->typec_pr_disabled)
+ smblib_err(chg, "Skip enable DRP due to typec_pr_disabled=true\n");
+ else {
+ rc = smblib_masked_write(chg,
+ TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+ TYPEC_POWER_ROLE_CMD_MASK, 0);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't enable DRP rc=%d\n", rc);
+ }
/* HW controlled CC_OUT */
rc = smblib_masked_write(chg, TAPER_TIMER_SEL_CFG_REG,
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 6dd7d3e84dfa..cc3937276783 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -585,6 +585,10 @@ int smblib_get_prop_pr_swap_in_progress(struct smb_charger *chg,
int smblib_set_prop_pr_swap_in_progress(struct smb_charger *chg,
const union power_supply_propval *val);
+#ifndef CONFIG_QPNP_FG_GEN3_LEGACY_CYCLE_COUNT
+int smblib_get_cycle_count(struct smb_charger *chg,
+ union power_supply_propval *val);
+#endif
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#endif /* __SMB2_CHARGER_H */
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index cd81f9b0a469..c4ace3f8ecdb 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -857,10 +857,10 @@ static int smb138x_init_slave_hw(struct smb138x *chip)
}
}
- /* configure to a fixed 700khz freq to avoid tdie errors */
- rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ /* configure to a fixed 900khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 900);
if (rc < 0) {
- pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ pr_err("Couldn't configure 900Khz switch freq rc=%d\n", rc);
return rc;
}
diff --git a/drivers/s390/char/Makefile b/drivers/s390/char/Makefile
index 6fa9364d1c07..835f1054976b 100644
--- a/drivers/s390/char/Makefile
+++ b/drivers/s390/char/Makefile
@@ -2,6 +2,8 @@
# S/390 character devices
#
+CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_EXPOLINE)
+
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1e16331891a9..f9d6a9f00640 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -451,6 +451,7 @@ static void chsc_process_sei_link_incident(struct chsc_sei_nt0_area *sei_area)
static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
{
+ struct channel_path *chp;
struct chp_link link;
struct chp_id chpid;
int status;
@@ -463,10 +464,17 @@ static void chsc_process_sei_res_acc(struct chsc_sei_nt0_area *sei_area)
chpid.id = sei_area->rsid;
/* allocate a new channel path structure, if needed */
status = chp_get_status(chpid);
- if (status < 0)
- chp_new(chpid);
- else if (!status)
+ if (!status)
return;
+
+ if (status < 0) {
+ chp_new(chpid);
+ } else {
+ chp = chpid_to_chp(chpid);
+ mutex_lock(&chp->lock);
+ chp_update_desc(chp);
+ mutex_unlock(&chp->lock);
+ }
memset(&link, 0, sizeof(struct chp_link));
link.chpid = chpid;
if ((sei_area->vf & 0xc0) != 0) {
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 48b3866a9ded..35286907c636 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -140,7 +140,7 @@ static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
int i;
for (i = 0; i < nr_queues; i++) {
- q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
+ q = kmem_cache_zalloc(qdio_q_cache, GFP_KERNEL);
if (!q)
return -ENOMEM;
@@ -456,7 +456,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
{
struct ciw *ciw;
struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
- int rc;
memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
@@ -493,16 +492,14 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->equeue = *ciw;
ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
if (!ciw) {
DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
- rc = -EINVAL;
- goto out_err;
+ return -EINVAL;
}
irq_ptr->aqueue = *ciw;
@@ -510,9 +507,6 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
irq_ptr->orig_handler = init_data->cdev->handler;
init_data->cdev->handler = qdio_int_handler;
return 0;
-out_err:
- qdio_release_memory(irq_ptr);
- return rc;
}
void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index 34367d172961..4534a7ce77b8 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
*
* Debug traces for zfcp.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -287,6 +287,27 @@ void zfcp_dbf_rec_trig(char *tag, struct zfcp_adapter *adapter,
spin_unlock_irqrestore(&dbf->rec_lock, flags);
}
+/**
+ * zfcp_dbf_rec_trig_lock - trace event related to triggered recovery with lock
+ * @tag: identifier for event
+ * @adapter: adapter on which the erp_action should run
+ * @port: remote port involved in the erp_action
+ * @sdev: scsi device involved in the erp_action
+ * @want: wanted erp_action
+ * @need: required erp_action
+ *
+ * The adapter->erp_lock must not be held.
+ */
+void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port, struct scsi_device *sdev,
+ u8 want, u8 need)
+{
+ unsigned long flags;
+
+ read_lock_irqsave(&adapter->erp_lock, flags);
+ zfcp_dbf_rec_trig(tag, adapter, port, sdev, want, need);
+ read_unlock_irqrestore(&adapter->erp_lock, flags);
+}
/**
* zfcp_dbf_rec_run_lvl - trace event related to running recovery
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 21c8c689b02b..7a7984a50683 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -3,7 +3,7 @@
*
* External function declarations.
*
- * Copyright IBM Corp. 2002, 2016
+ * Copyright IBM Corp. 2002, 2018
*/
#ifndef ZFCP_EXT_H
@@ -34,6 +34,9 @@ extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
extern void zfcp_dbf_adapter_unregister(struct zfcp_adapter *);
extern void zfcp_dbf_rec_trig(char *, struct zfcp_adapter *,
struct zfcp_port *, struct scsi_device *, u8, u8);
+extern void zfcp_dbf_rec_trig_lock(char *tag, struct zfcp_adapter *adapter,
+ struct zfcp_port *port,
+ struct scsi_device *sdev, u8 want, u8 need);
extern void zfcp_dbf_rec_run(char *, struct zfcp_erp_action *);
extern void zfcp_dbf_rec_run_lvl(int level, char *tag,
struct zfcp_erp_action *erp);
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index a9b8104b982e..bb99db2948ab 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -3,7 +3,7 @@
*
* Interface to Linux SCSI midlayer.
*
- * Copyright IBM Corp. 2002, 2017
+ * Copyright IBM Corp. 2002, 2018
*/
#define KMSG_COMPONENT "zfcp"
@@ -616,9 +616,9 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
ids.port_id = port->d_id;
ids.roles = FC_RPORT_ROLE_FCP_TARGET;
- zfcp_dbf_rec_trig("scpaddy", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
+ zfcp_dbf_rec_trig_lock("scpaddy", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_ADD);
rport = fc_remote_port_add(port->adapter->scsi_host, 0, &ids);
if (!rport) {
dev_err(&port->adapter->ccw_device->dev,
@@ -640,9 +640,9 @@ static void zfcp_scsi_rport_block(struct zfcp_port *port)
struct fc_rport *rport = port->rport;
if (rport) {
- zfcp_dbf_rec_trig("scpdely", port->adapter, port, NULL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
- ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
+ zfcp_dbf_rec_trig_lock("scpdely", port->adapter, port, NULL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL,
+ ZFCP_PSEUDO_ERP_ACTION_RPORT_DEL);
fc_remote_port_delete(rport);
port->rport = NULL;
}
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index 519dac4e341e..9a8c2f97ed70 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -222,6 +222,7 @@ out_done:
static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
{
struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(cmd->device->host);
+ struct domain_device *dev = cmd_to_domain_dev(cmd);
struct sas_task *task = TO_SAS_TASK(cmd);
/* At this point, we only get called following an actual abort
@@ -230,6 +231,14 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
*/
sas_end_task(cmd, task);
+ if (dev_is_sata(dev)) {
+ /* defer commands to libata so that libata EH can
+ * handle ata qcs correctly
+ */
+ list_move_tail(&cmd->eh_entry, &sas_ha->eh_ata_q);
+ return;
+ }
+
/* now finish the command and move it on to the error
* handler done list, this also takes it off the
* error handler pending list.
@@ -237,22 +246,6 @@ static void sas_eh_finish_cmd(struct scsi_cmnd *cmd)
scsi_eh_finish_cmd(cmd, &sas_ha->eh_done_q);
}
-static void sas_eh_defer_cmd(struct scsi_cmnd *cmd)
-{
- struct domain_device *dev = cmd_to_domain_dev(cmd);
- struct sas_ha_struct *ha = dev->port->ha;
- struct sas_task *task = TO_SAS_TASK(cmd);
-
- if (!dev_is_sata(dev)) {
- sas_eh_finish_cmd(cmd);
- return;
- }
-
- /* report the timeout to libata */
- sas_end_task(cmd, task);
- list_move_tail(&cmd->eh_entry, &ha->eh_ata_q);
-}
-
static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd *my_cmd)
{
struct scsi_cmnd *cmd, *n;
@@ -260,7 +253,7 @@ static void sas_scsi_clear_queue_lu(struct list_head *error_q, struct scsi_cmnd
list_for_each_entry_safe(cmd, n, error_q, eh_entry) {
if (cmd->device->sdev_target == my_cmd->device->sdev_target &&
cmd->device->lun == my_cmd->device->lun)
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
}
}
@@ -622,12 +615,12 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
case TASK_IS_DONE:
SAS_DPRINTK("%s: task 0x%p is done\n", __func__,
task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_ABORTED:
SAS_DPRINTK("%s: task 0x%p is aborted\n",
__func__, task);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
continue;
case TASK_IS_AT_LU:
SAS_DPRINTK("task 0x%p is at LU: lu recover\n", task);
@@ -638,7 +631,7 @@ static void sas_eh_handle_sas_errors(struct Scsi_Host *shost, struct list_head *
"recovered\n",
SAS_ADDR(task->dev),
cmd->device->lun);
- sas_eh_defer_cmd(cmd);
+ sas_eh_finish_cmd(cmd);
sas_scsi_clear_queue_lu(work_q, cmd);
goto Again;
}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2bd0fa4b9dda..306e5a493b1b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1873,6 +1873,8 @@ sd_spinup_disk(struct scsi_disk *sdkp)
break; /* standby */
if (sshdr.asc == 4 && sshdr.ascq == 0xc)
break; /* unavailable */
+ if (sshdr.asc == 4 && sshdr.ascq == 0x1b)
+ break; /* sanitize in progress */
/*
* Issue command to spin up drive when not ready
*/
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 2a12a4999f89..dfade13c30a3 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1906,7 +1906,7 @@ retry:
num = (rem_sz > scatter_elem_sz_prev) ?
scatter_elem_sz_prev : rem_sz;
- schp->pages[k] = alloc_pages(gfp_mask, order);
+ schp->pages[k] = alloc_pages(gfp_mask | __GFP_ZERO, order);
if (!schp->pages[k])
goto out;
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
index fc990a5bfbaf..aa9872227786 100644
--- a/drivers/scsi/ufs/ufs-debugfs.c
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -504,8 +504,8 @@ static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
seq_puts(file, sep);
}
seq_printf(file,
- "\n #\tnum uses\t%s\t #\tAll\tRead\tWrite\tUrg.R\tUrg.W\tFlush\n",
- sep);
+ "\n #\tnum uses\t%s\t #\tAll\tRead\tWrite\tUrg.R\tUrg.W\tFlush"
+ "\tDiscard\n", sep);
/* values */
for (i = 0; i < max_depth; i++) {
@@ -514,7 +514,8 @@ static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
ufs_stats->tag_stats[i][TS_WRITE] <= 0 &&
ufs_stats->tag_stats[i][TS_URGENT_READ] <= 0 &&
ufs_stats->tag_stats[i][TS_URGENT_WRITE] <= 0 &&
- ufs_stats->tag_stats[i][TS_FLUSH] <= 0)
+ ufs_stats->tag_stats[i][TS_FLUSH] <= 0 &&
+ ufs_stats->tag_stats[i][TS_DISCARD] <= 0)
continue;
is_tag_empty = false;
@@ -528,7 +529,8 @@ static int ufsdbg_tag_stats_show(struct seq_file *file, void *data)
ufs_stats->tag_stats[i][TS_WRITE] +
ufs_stats->tag_stats[i][TS_URGENT_READ] +
ufs_stats->tag_stats[i][TS_URGENT_WRITE] +
- ufs_stats->tag_stats[i][TS_FLUSH]);
+ ufs_stats->tag_stats[i][TS_FLUSH] +
+ ufs_stats->tag_stats[i][TS_DISCARD]);
}
seq_puts(file, "\n");
}
@@ -1411,8 +1413,9 @@ static int ufsdbg_req_stats_show(struct seq_file *file, void *data)
unsigned long flags;
/* Header */
- seq_printf(file, "\t%-10s %-10s %-10s %-10s %-10s %-10s",
- "All", "Write", "Read", "Read(urg)", "Write(urg)", "Flush");
+ seq_printf(file, "\t%-10s %-10s %-10s %-10s %-10s %-10s %-10s",
+ "All", "Write", "Read", "Read(urg)", "Write(urg)", "Flush",
+ "Discard");
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1447,6 +1450,73 @@ static const struct file_operations ufsdbg_req_stats_desc = {
.write = ufsdbg_req_stats_write,
};
+static ssize_t ufsdbg_io_stats_write(struct file *filp,
+ const char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ struct ufs_hba *hba = filp->f_mapping->host->i_private;
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->ufs_stats.io_read.max_diff_req_count = 0;
+ hba->ufs_stats.io_read.max_diff_total_bytes = 0;
+ hba->ufs_stats.io_readwrite.max_diff_req_count = 0;
+ hba->ufs_stats.io_readwrite.max_diff_total_bytes = 0;
+ hba->ufs_stats.io_write.max_diff_req_count = 0;
+ hba->ufs_stats.io_write.max_diff_total_bytes = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return cnt;
+}
+
+static int ufsdbg_io_stats_show(struct seq_file *file, void *data)
+{
+ struct ufs_hba *hba = (struct ufs_hba *)file->private;
+ unsigned long flags;
+
+ seq_printf(file, "\t\t%-10s %-10s %-10s %-10s %-10s %-10s\n",
+ "ReadCnt", "ReadBytes", "WriteCnt", "WriteBytes", "RWCnt",
+ "RWBytes");
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ seq_printf(file,
+ "Started: \t%-10llu %-10llu %-10llu %-10llu %-10llu %-10llu\n",
+ hba->ufs_stats.io_read.req_count_started,
+ hba->ufs_stats.io_read.total_bytes_started,
+ hba->ufs_stats.io_write.req_count_started,
+ hba->ufs_stats.io_write.total_bytes_started,
+ hba->ufs_stats.io_readwrite.req_count_started,
+ hba->ufs_stats.io_readwrite.total_bytes_started);
+ seq_printf(file,
+ "Completed: \t%-10llu %-10llu %-10llu %-10llu %-10llu %-10llu\n",
+ hba->ufs_stats.io_read.req_count_completed,
+ hba->ufs_stats.io_read.total_bytes_completed,
+ hba->ufs_stats.io_write.req_count_completed,
+ hba->ufs_stats.io_write.total_bytes_completed,
+ hba->ufs_stats.io_readwrite.req_count_completed,
+ hba->ufs_stats.io_readwrite.total_bytes_completed);
+ seq_printf(file,
+ "MaxDiff: \t%-10llu %-10llu %-10llu %-10llu %-10llu %-10llu\n",
+ hba->ufs_stats.io_read.max_diff_req_count,
+ hba->ufs_stats.io_read.max_diff_total_bytes,
+ hba->ufs_stats.io_write.max_diff_req_count,
+ hba->ufs_stats.io_write.max_diff_total_bytes,
+ hba->ufs_stats.io_readwrite.max_diff_req_count,
+ hba->ufs_stats.io_readwrite.max_diff_total_bytes);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+ return 0;
+}
+
+static int ufsdbg_io_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufsdbg_io_stats_show, inode->i_private);
+}
+
+static const struct file_operations ufsdbg_io_stats_desc = {
+ .open = ufsdbg_io_stats_open,
+ .read = seq_read,
+ .write = ufsdbg_io_stats_write,
+};
static int ufsdbg_reset_controller_show(struct seq_file *file, void *data)
{
@@ -1685,6 +1755,17 @@ void ufsdbg_add_debugfs(struct ufs_hba *hba)
goto err;
}
+ hba->debugfs_files.io_stats =
+ debugfs_create_file("io_stats", 0600,
+ hba->debugfs_files.stats_folder, hba,
+ &ufsdbg_io_stats_desc);
+ if (!hba->debugfs_files.io_stats) {
+ dev_err(hba->dev,
+ "%s: failed create io_stats debugfs entry\n",
+ __func__);
+ goto err;
+ }
+
hba->debugfs_files.reset_controller =
debugfs_create_file("reset_controller", S_IRUSR | S_IWUSR,
hba->debugfs_files.debugfs_root, hba,
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index b1c86d42148a..1c1d16d9e865 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -402,8 +402,8 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
return -EINVAL;
}
-
memset(&ice_set, 0, sizeof(ice_set));
+
if (qcom_host->ice.vops->config_start) {
spin_lock_irqsave(
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1c7b041c895d..3b8715c8c0db 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -53,6 +53,34 @@
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
+static void ufshcd_log_slowio(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp, s64 iotime_us)
+{
+ sector_t lba = -1;
+ int transfer_len = -1;
+ u8 opcode = 0xff;
+
+ if (likely(iotime_us < hba->slowio_us))
+ return;
+
+ hba->slowio_cnt++;
+
+ if (lrbp->cmd) {
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if (opcode == READ_10 || opcode == WRITE_10) {
+ if (lrbp->cmd->request && lrbp->cmd->request->bio)
+ lba = lrbp->cmd->request->bio->
+ bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+ }
+ dev_err_ratelimited(hba->dev,
+ "Slow UFS (%lld): time = %lld us, opcode = %02x, lba = %ld, "
+ "len = %d\n", hba->slowio_cnt, iotime_us, opcode, lba,
+ transfer_len);
+}
+
#ifdef CONFIG_DEBUG_FS
static int ufshcd_tag_req_type(struct request *rq)
@@ -63,6 +91,8 @@ static int ufshcd_tag_req_type(struct request *rq)
rq_type = TS_NOT_SUPPORTED;
else if (rq->cmd_flags & REQ_FLUSH)
rq_type = TS_FLUSH;
+ else if (rq->cmd_flags & REQ_DISCARD)
+ rq_type = TS_DISCARD;
else if (rq_data_dir(rq) == READ)
rq_type = (rq->cmd_flags & REQ_URGENT) ?
TS_URGENT_READ : TS_READ;
@@ -115,6 +145,9 @@ static void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
lrbp->issue_time_stamp);
+ /* Log for slow I/O */
+ ufshcd_log_slowio(hba, lrbp, delta);
+
/* update general request statistics */
if (hba->ufs_stats.req_stats[TS_TAG].count == 0)
hba->ufs_stats.req_stats[TS_TAG].min = delta;
@@ -147,6 +180,56 @@ ufshcd_update_query_stats(struct ufs_hba *hba, enum query_opcode opcode, u8 idn)
hba->ufs_stats.query_stats_arr[opcode][idn]++;
}
+static void
+__update_io_stat(struct ufs_hba *hba, struct ufshcd_io_stat *io_stat,
+ int transfer_len, int is_start)
+{
+ if (is_start) {
+ u64 diff;
+ io_stat->req_count_started++;
+ io_stat->total_bytes_started += transfer_len;
+ diff = io_stat->req_count_started -
+ io_stat->req_count_completed;
+ if (diff > io_stat->max_diff_req_count) {
+ io_stat->max_diff_req_count = diff;
+ }
+ diff = io_stat->total_bytes_started -
+ io_stat->total_bytes_completed;
+ if (diff > io_stat->max_diff_total_bytes) {
+ io_stat->max_diff_total_bytes = diff;
+ }
+ } else {
+ io_stat->req_count_completed++;
+ io_stat->total_bytes_completed += transfer_len;
+ }
+}
+
+static void
+update_io_stat(struct ufs_hba *hba, int tag, int is_start)
+{
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ u8 opcode;
+ int transfer_len;
+
+ if (!lrbp->cmd)
+ return;
+ opcode = (u8)(*lrbp->cmd->cmnd);
+ if (opcode != READ_10 && opcode != WRITE_10)
+ return;
+
+ transfer_len = be32_to_cpu(lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+
+ __update_io_stat(hba, &hba->ufs_stats.io_readwrite, transfer_len,
+ is_start);
+ if (opcode == READ_10) {
+ __update_io_stat(hba, &hba->ufs_stats.io_read, transfer_len,
+ is_start);
+ } else {
+ __update_io_stat(hba, &hba->ufs_stats.io_write, transfer_len,
+ is_start);
+ }
+}
+
#else
static inline void ufshcd_update_tag_stats(struct ufs_hba *hba, int tag)
{
@@ -164,6 +247,11 @@ static inline void ufshcd_update_error_stats(struct ufs_hba *hba, int type)
static inline
void update_req_stats(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
{
+ s64 delta = ktime_us_delta(lrbp->complete_time_stamp,
+ lrbp->issue_time_stamp);
+
+ /* Log for slow I/O */
+ ufshcd_log_slowio(hba, lrbp, delta);
}
static inline
@@ -171,6 +259,12 @@ void ufshcd_update_query_stats(struct ufs_hba *hba,
enum query_opcode opcode, u8 idn)
{
}
+
+static void
+update_io_stat(struct ufs_hba *hba, int tag, int is_start)
+{
+}
+
#endif
#define UFSHCD_REQ_SENSE_SIZE 18
@@ -2334,6 +2428,7 @@ int ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
wmb();
ufshcd_cond_add_cmd_trace(hba, task_tag, "send");
ufshcd_update_tag_stats(hba, task_tag);
+ update_io_stat(hba, task_tag, 1);
return ret;
}
@@ -5537,11 +5632,11 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
/* Clear pending transfer requests */
ufshcd_clear_cmd(hba, index);
ufshcd_outstanding_req_clear(hba, index);
- clear_bit_unlock(index, &hba->lrb_in_use);
lrbp->complete_time_stamp = ktime_get();
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
ufshcd_release_all(hba);
if (cmd->request) {
/*
@@ -5581,7 +5676,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
struct scsi_cmnd *cmd;
int result;
int index;
- struct request *req;
for_each_set_bit(index, &completed_reqs, hba->nutrs) {
lrbp = &hba->lrb[index];
@@ -5589,15 +5683,16 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
if (cmd) {
ufshcd_cond_add_cmd_trace(hba, index, "complete");
ufshcd_update_tag_stats_completion(hba, cmd);
+ update_io_stat(hba, index, 0);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
- clear_bit_unlock(index, &hba->lrb_in_use);
lrbp->complete_time_stamp = ktime_get();
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
+ clear_bit_unlock(index, &hba->lrb_in_use);
__ufshcd_release(hba, false);
__ufshcd_hibern8_release(hba, false);
if (cmd->request) {
@@ -5610,23 +5705,22 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
false);
ufshcd_vops_crypto_engine_cfg_end(hba,
lrbp, cmd->request);
- }
- clear_bit_unlock(index, &hba->lrb_in_use);
- req = cmd->request;
- if (req) {
/* Update IO svc time latency histogram */
- if (req->lat_hist_enabled) {
+ if (cmd->request->lat_hist_enabled) {
ktime_t completion;
u_int64_t delta_us;
completion = ktime_get();
delta_us = ktime_us_delta(completion,
- req->lat_hist_io_start);
+ cmd->request->
+ lat_hist_io_start);
/* rq_data_dir() => true if WRITE */
- blk_update_latency_hist(&hba->io_lat_s,
- (rq_data_dir(req) == READ),
- delta_us);
+ blk_update_latency_hist(
+ (rq_data_dir(
+ cmd->request) == READ) ?
+ &hba->io_lat_read :
+ &hba->io_lat_write, delta_us);
}
}
/* Do not touch lrbp after scsi done */
@@ -9274,9 +9368,10 @@ latency_hist_store(struct device *dev, struct device_attribute *attr,
if (kstrtol(buf, 0, &value))
return -EINVAL;
- if (value == BLK_IO_LAT_HIST_ZERO)
- blk_zero_latency_hist(&hba->io_lat_s);
- else if (value == BLK_IO_LAT_HIST_ENABLE ||
+ if (value == BLK_IO_LAT_HIST_ZERO) {
+ memset(&hba->io_lat_read, 0, sizeof(hba->io_lat_read));
+ memset(&hba->io_lat_write, 0, sizeof(hba->io_lat_write));
+ } else if (value == BLK_IO_LAT_HIST_ENABLE ||
value == BLK_IO_LAT_HIST_DISABLE)
hba->latency_hist_enabled = value;
return count;
@@ -9287,8 +9382,14 @@ latency_hist_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev);
+ size_t written_bytes;
+
+ written_bytes = blk_latency_hist_show("Read", &hba->io_lat_read,
+ buf, PAGE_SIZE);
+ written_bytes += blk_latency_hist_show("Write", &hba->io_lat_write,
+ buf + written_bytes, PAGE_SIZE - written_bytes);
- return blk_latency_hist_show(&hba->io_lat_s, buf);
+ return written_bytes;
}
static DEVICE_ATTR(latency_hist, S_IRUGO | S_IWUSR,
@@ -9304,7 +9405,78 @@ ufshcd_init_latency_hist(struct ufs_hba *hba)
static void
ufshcd_exit_latency_hist(struct ufs_hba *hba)
{
- device_create_file(hba->dev, &dev_attr_latency_hist);
+ device_remove_file(hba->dev, &dev_attr_latency_hist);
+}
+
+/**
+ * Two sysfs entries for slow I/O monitoring:
+ * - slowio_us: watermark time in us. Can be updated by writing.
+ * - slowio_cnt: number of I/O count. Can be reseted by writing any value.
+*/
+static ssize_t
+slowio_us_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags, value;
+
+ if (kstrtol(buf, 0, &value))
+ return -EINVAL;
+
+ if (value < UFSHCD_MIN_SLOWIO_US)
+ return -EINVAL;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->slowio_us = value;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t
+slowio_us_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%lld\n", hba->slowio_us);
+}
+
+static ssize_t
+slowio_cnt_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->slowio_cnt = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return count;
+}
+
+static ssize_t
+slowio_cnt_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ return snprintf(buf, PAGE_SIZE, "%lld\n", hba->slowio_cnt);
+}
+
+static DEVICE_ATTR_RW(slowio_us);
+static DEVICE_ATTR_RW(slowio_cnt);
+
+static void
+ufshcd_init_slowio(struct ufs_hba *hba)
+{
+ if (device_create_file(hba->dev, &dev_attr_slowio_us))
+ dev_err(hba->dev, "Failed to create slowio_us sysfs entry\n");
+ if (device_create_file(hba->dev, &dev_attr_slowio_cnt))
+ dev_err(hba->dev, "Failed to create slowio_cnt sysfs entry\n");
+
+ hba->slowio_us = UFSHCD_DEFAULT_SLOWIO_US;
+}
+
+static void
+ufshcd_exit_slowio(struct ufs_hba *hba)
+{
+ device_remove_file(hba->dev, &dev_attr_slowio_us);
+ device_remove_file(hba->dev, &dev_attr_slowio_cnt);
}
/**
@@ -9321,6 +9493,7 @@ void ufshcd_remove(struct ufs_hba *hba)
ufshcd_exit_clk_gating(hba);
ufshcd_exit_hibern8_on_idle(hba);
+ ufshcd_exit_slowio(hba);
if (ufshcd_is_clkscaling_supported(hba)) {
device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
ufshcd_exit_latency_hist(hba);
@@ -10032,6 +10205,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufshcd_init_latency_hist(hba);
+ ufshcd_init_slowio(hba);
+
/*
* We are assuming that device wasn't put in sleep/power-down
* state exclusively during the boot stage before kernel.
@@ -10055,6 +10230,7 @@ out_remove_scsi_host:
exit_gating:
ufshcd_exit_clk_gating(hba);
ufshcd_exit_latency_hist(hba);
+ ufshcd_exit_slowio(hba);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index e6703603d54a..f5a380226576 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -547,6 +547,7 @@ struct debugfs_files {
struct dentry *dbg_print_en;
struct dentry *req_stats;
struct dentry *query_stats;
+ struct dentry *io_stats;
u32 dme_local_attr_id;
u32 dme_peer_attr_id;
struct dentry *reset_controller;
@@ -570,7 +571,8 @@ enum ts_types {
TS_URGENT_READ = 3,
TS_URGENT_WRITE = 4,
TS_FLUSH = 5,
- TS_NUM_STATS = 6,
+ TS_DISCARD = 6,
+ TS_NUM_STATS = 7,
};
/**
@@ -586,6 +588,24 @@ struct ufshcd_req_stat {
u64 sum;
u64 count;
};
+
+/**
+ * struct ufshcd_io_stat - statistics for I/O amount.
+ * @req_count_started: total number of I/O requests, which were started.
+ * @total_bytes_started: total I/O amount in bytes, which were started.
+ * @req_count_completed: total number of I/O request, which were completed.
+ * @total_bytes_completed: total I/O amount in bytes, which were completed.
+ * @max_diff_req_count: MAX of 'req_count_started - req_count_completed'.
+ * @max_diff_total_bytes: MAX of 'total_bytes_started - total_bytes_completed'.
+ */
+struct ufshcd_io_stat {
+ u64 req_count_started;
+ u64 total_bytes_started;
+ u64 req_count_completed;
+ u64 total_bytes_completed;
+ u64 max_diff_req_count;
+ u64 max_diff_total_bytes;
+};
#endif
enum ufshcd_ctx {
@@ -630,6 +650,9 @@ struct ufs_stats {
int err_stats[UFS_ERR_MAX];
struct ufshcd_req_stat req_stats[TS_NUM_STATS];
int query_stats_arr[UPIU_QUERY_OPCODE_MAX][MAX_QUERY_IDN];
+ struct ufshcd_io_stat io_read;
+ struct ufshcd_io_stat io_write;
+ struct ufshcd_io_stat io_readwrite;
#endif
u32 last_intr_status;
@@ -951,9 +974,14 @@ struct ufs_hba {
bool full_init_linereset;
struct pinctrl *pctrl;
-
- int latency_hist_enabled;
- struct io_latency_state io_lat_s;
+
+ int latency_hist_enabled;
+ struct io_latency_state io_lat_read;
+ struct io_latency_state io_lat_write;
+
+ /* To monitor slow UFS I/O requests. */
+ u64 slowio_us;
+ u64 slowio_cnt;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
@@ -1424,4 +1452,7 @@ static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
hba->var->pm_qos_vops->req_end(hba, req, lock);
}
+#define UFSHCD_MIN_SLOWIO_US (1000) /* 1 ms */
+#define UFSHCD_DEFAULT_SLOWIO_US (10000000) /* 10 seconds */
+
#endif /* End of Header */
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index b1def24175d7..4ce9640810ea 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -884,7 +884,4 @@ config QCOM_CX_IPEAK
clients are going to cross their thresholds then Cx ipeak hw module will raise
an interrupt to cDSP block to throttle cDSP fmax.
-config STATE_NOTIFIER
- bool "State Notifier"
-
source "drivers/soc/qcom/memshare/Kconfig"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 80a91ff5d3f3..caad280c1c45 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -105,7 +105,6 @@ obj-$(CONFIG_WCD_DSP_GLINK) += wcd-dsp-glink.o
obj-$(CONFIG_QCOM_SMCINVOKE) += smcinvoke.o
obj-$(CONFIG_QCOM_EARLY_RANDOM) += early_random.o
obj-$(CONFIG_QCOM_CX_IPEAK) += cx_ipeak.o
-obj-$(CONFIG_STATE_NOTIFIER) += state_notifier.o
# TODO: remove me b/62058353
subdir-ccflags-y += \
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index a7f109533c92..9c9e4f63a08d 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2265,29 +2265,6 @@ out:
return 0;
}
-static int icnss_call_driver_remove(struct icnss_priv *priv)
-{
- icnss_pr_dbg("Calling driver remove state: 0x%lx\n", priv->state);
-
- clear_bit(ICNSS_FW_READY, &priv->state);
-
- if (!test_bit(ICNSS_DRIVER_PROBED, &penv->state))
- return 0;
-
- if (!priv->ops || !priv->ops->remove)
- return 0;
-
- set_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
- penv->ops->remove(&priv->pdev->dev);
-
- clear_bit(ICNSS_DRIVER_UNLOADING, &penv->state);
- clear_bit(ICNSS_DRIVER_PROBED, &priv->state);
-
- icnss_hw_power_off(penv);
-
- return 0;
-}
-
static int icnss_fw_crashed(struct icnss_priv *priv,
struct icnss_event_pd_service_down_data *event_data)
{
@@ -2323,10 +2300,7 @@ static int icnss_driver_event_pd_service_down(struct icnss_priv *priv,
goto out;
}
- if (event_data->crashed)
- icnss_fw_crashed(priv, event_data);
- else
- icnss_call_driver_remove(priv);
+ icnss_fw_crashed(priv, event_data);
out:
kfree(data);
diff --git a/drivers/soc/qcom/state_notifier.c b/drivers/soc/qcom/state_notifier.c
deleted file mode 100644
index a3cfb16d13cf..000000000000
--- a/drivers/soc/qcom/state_notifier.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * State Notifier Driver
- *
- * Copyright (c) 2013-2017, Pranav Vashi <neobuddy89@gmail.com>
- * (c) 2017, Joe Maples <joe@frap129.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-#include <linux/export.h>
-#include <linux/module.h>
-#include <linux/state_notifier.h>
-
-#define DEFAULT_SUSPEND_DEFER_TIME 1
-#define STATE_NOTIFIER "state_notifier"
-
-/*
- * debug = 1 will print all
- */
-static unsigned int debug;
-
-#define dprintk(msg...) \
-do { \
- if (debug) \
- pr_info(msg); \
-} while (0)
-
-static unsigned int suspend_defer_time = DEFAULT_SUSPEND_DEFER_TIME;
-module_param_named(suspend_defer_time, suspend_defer_time, uint, 0664);
-static struct delayed_work suspend_work;
-static struct workqueue_struct *susp_wq;
-struct work_struct resume_work;
-bool state_suspended;
-module_param_named(state_suspended, state_suspended, bool, 0444);
-static bool suspend_in_progress;
-
-static BLOCKING_NOTIFIER_HEAD(state_notifier_list);
-
-/**
- * state_register_client - register a client notifier
- * @nb: notifier block to callback on events
- */
-int state_register_client(struct notifier_block *nb)
-{
- return blocking_notifier_chain_register(&state_notifier_list, nb);
-}
-EXPORT_SYMBOL(state_register_client);
-
-/**
- * state_unregister_client - unregister a client notifier
- * @nb: notifier block to callback on events
- */
-int state_unregister_client(struct notifier_block *nb)
-{
- return blocking_notifier_chain_unregister(&state_notifier_list, nb);
-}
-EXPORT_SYMBOL(state_unregister_client);
-
-/**
- * state_notifier_call_chain - notify clients on state_events
- * @val: Value passed unmodified to notifier function
- * @v: pointer passed unmodified to notifier function
- *
- */
-int state_notifier_call_chain(unsigned long val, void *v)
-{
- return blocking_notifier_call_chain(&state_notifier_list, val, v);
-}
-EXPORT_SYMBOL_GPL(state_notifier_call_chain);
-
-static void _suspend_work(struct work_struct *work)
-{
- state_suspended = true;
- state_notifier_call_chain(STATE_NOTIFIER_SUSPEND, NULL);
- suspend_in_progress = false;
- dprintk("%s: suspend completed.\n", STATE_NOTIFIER);
-}
-
-static void _resume_work(struct work_struct *work)
-{
- state_suspended = false;
- state_notifier_call_chain(STATE_NOTIFIER_ACTIVE, NULL);
- dprintk("%s: resume completed.\n", STATE_NOTIFIER);
-}
-
-void state_suspend(void)
-{
- dprintk("%s: suspend called.\n", STATE_NOTIFIER);
- if (state_suspended || suspend_in_progress)
- return;
-
- suspend_in_progress = true;
-
- queue_delayed_work(susp_wq, &suspend_work,
- msecs_to_jiffies(suspend_defer_time * 1000));
-}
-
-void state_resume(void)
-{
- dprintk("%s: resume called.\n", STATE_NOTIFIER);
- cancel_delayed_work_sync(&suspend_work);
- suspend_in_progress = false;
-
- if (state_suspended)
- queue_work(susp_wq, &resume_work);
-}
-
-static int __init state_notifier_init(void)
-{
- susp_wq =
- alloc_workqueue("state_susp_wq",
- WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
-
- if (!susp_wq)
- pr_err("State Notifier failed to allocate suspend workqueue\n");
-
- INIT_DELAYED_WORK(&suspend_work, _suspend_work);
- INIT_WORK(&resume_work, _resume_work);
-
- return 0;
-}
-
-subsys_initcall(state_notifier_init);
-
-MODULE_AUTHOR("Pranav Vashi <neobuddy89@gmail.com>");
-MODULE_DESCRIPTION("State Notifier Driver");
-MODULE_LICENSE("GPLv2");
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
index 58efa98313aa..24c07fea9de2 100644
--- a/drivers/spi/spi-pxa2xx.h
+++ b/drivers/spi/spi-pxa2xx.h
@@ -38,7 +38,7 @@ struct driver_data {
/* SSP register addresses */
void __iomem *ioaddr;
- u32 ssdr_physical;
+ phys_addr_t ssdr_physical;
/* SSP masks*/
u32 dma_cr1;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 606d13a1ea74..480aa8d3dd11 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -46,6 +46,8 @@
#define CREATE_TRACE_POINTS
#include "trace/lowmemorykiller.h"
+extern int extra_free_kbytes;
+
static uint32_t lowmem_debug_level = 1;
static short lowmem_adj[6] = {
0,
@@ -102,7 +104,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (lowmem_minfree_size < array_size)
array_size = lowmem_minfree_size;
for (i = 0; i < array_size; i++) {
- minfree = lowmem_minfree[i];
+ minfree = lowmem_minfree[i] +
+ ((extra_free_kbytes * 1024) / PAGE_SIZE);
if (other_free < minfree && other_file < minfree) {
min_score_adj = lowmem_adj[i];
break;
diff --git a/drivers/staging/easel/regulator/bcm15602-regulator.c b/drivers/staging/easel/regulator/bcm15602-regulator.c
index a9a663193a07..d0a3e589aa50 100644
--- a/drivers/staging/easel/regulator/bcm15602-regulator.c
+++ b/drivers/staging/easel/regulator/bcm15602-regulator.c
@@ -1439,6 +1439,7 @@ static struct i2c_driver bcm15602_driver = {
#ifdef CONFIG_PM
.pm = &bcm15602_dev_pm_ops,
#endif
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.probe = bcm15602_probe,
.id_table = bcm15602_id_table,
diff --git a/drivers/staging/fw-api/fw/htc.h b/drivers/staging/fw-api/fw/htc.h
index 2b141684cd82..92b3b680837a 100644
--- a/drivers/staging/fw-api/fw/htc.h
+++ b/drivers/staging/fw-api/fw/htc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -233,7 +233,8 @@ typedef PREPACK struct {
#define HTC_CONNECT_FLAGS_ENABLE_HTC_SCHEDULE (1 << 4)
ServiceMetaLength : 8, /* length of meta data that follows */
- _Pad1 : 8;
+ LookAheadV2 : 1, /* 1 if host supports HTC_LOOKAHEAD_REPORT_V2 */
+ _Pad1 : 7;
/* service-specific meta data starts after the header */
@@ -266,7 +267,8 @@ typedef PREPACK struct {
EndpointID : 8, /* assigned endpoint ID */
MaxMsgSize : 16; /* maximum expected message size on this endpoint */
A_UINT32 ServiceMetaLength : 8, /* length of meta data that follows */
- _Pad1 : 8,
+ LookAheadV2 : 1,/* 1 if target supports HTC_LOOKAHEAD_REPORT_V2 */
+ _Pad1 : 7,
reserved : 16;
/* service-specific meta data starts after the header */
@@ -392,6 +394,30 @@ typedef PREPACK struct {
} POSTPACK HTC_LOOKAHEAD_REPORT;
+/*
+ * If the host sets the HTC_CONNECT_SERVICE_MSG.LookAheadV2 flag and the
+ * target sets the HTC_CONNECT_SERVICE_RESPONSE_MSG.LookAheadV2 flag,
+ * HTC_LOOKAHEAD_REPORT_V2 is used; otherwise HTC_LOOKAHEAD_REPORT is used.
+ */
+typedef PREPACK struct {
+ A_UINT32 PreValid : 8, /* pre valid guard */
+ reserved0 : 24;
+ A_UINT32 LookAhead0 : 8, /* 8 byte lookahead */
+ LookAhead1 : 8,
+ LookAhead2 : 8,
+ LookAhead3 : 8;
+ A_UINT32 LookAhead4 : 8, /* 8 byte lookahead */
+ LookAhead5 : 8,
+ LookAhead6 : 8,
+ LookAhead7 : 8;
+ A_UINT32 PostValid : 8, /* post valid guard */
+ reserved1 : 24;
+ /* NOTE: the LookAhead array is guarded by PreValid and Post Valid
+ * guard bytes.
+ * The PreValid byte must equal the inverse of the PostValid byte.
+ */
+} POSTPACK HTC_LOOKAHEAD_REPORT_V2;
+
#define HTC_LOOKAHEAD_REPORT_PREVALID_LSB 0
#define HTC_LOOKAHEAD_REPORT_PREVALID_MASK 0x000000ff
#define HTC_LOOKAHEAD_REPORT_PREVALID_OFFSET 0x00000000
diff --git a/drivers/staging/fw-api/fw/htt.h b/drivers/staging/fw-api/fw/htt.h
index 23f026624d2d..e56ca9d38ff4 100644
--- a/drivers/staging/fw-api/fw/htt.h
+++ b/drivers/staging/fw-api/fw/htt.h
@@ -5079,23 +5079,23 @@ PREPACK struct htt_rx_ring_selection_cfg_t {
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_0111_S 23
/* Block Ack Request */
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_M 0x01000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_M 0x01000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1000_S 24
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_M 0x02000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_M 0x02000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1000_S 25
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_M 0x04000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1000_S 26
/* Block Ack*/
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_M 0x08000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_FP_CTRL_1001_S 27
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_M 0x10000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MD_CTRL_1001_S 28
-#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_M 0x00000001
+#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_M 0x20000000
#define HTT_RX_RING_SELECTION_CFG_PKT_TYPE_ENABLE_FLAG2_MO_CTRL_1001_S 29
/* PS-POLL */
diff --git a/drivers/staging/fw-api/fw/htt_ppdu_stats.h b/drivers/staging/fw-api/fw/htt_ppdu_stats.h
new file mode 100644
index 000000000000..7499827dd978
--- /dev/null
+++ b/drivers/staging/fw-api/fw/htt_ppdu_stats.h
@@ -0,0 +1,1595 @@
+/*
+ * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+/**
+ * @file htt_ppdu_stats.h
+ *
+ * @details the public header file of HTT STATS
+ */
+#ifndef __HTT_PPDU_STATS_H__
+#define __HTT_PPDU_STATS_H__
+
+#include <htt.h>
+#include <htt_stats.h>
+
+#define HTT_BA_64_BIT_MAP_SIZE_DWORDS 2
+#define HTT_BA_256_BIT_MAP_SIZE_DWORDS 8
+enum htt_ppdu_stats_tlv_tag {
+ HTT_PPDU_STATS_COMMON_TLV,
+ HTT_PPDU_STATS_USR_COMMON_TLV,
+ HTT_PPDU_STATS_USR_RATE_TLV,
+ HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_64_TLV,
+ HTT_PPDU_STATS_USR_MPDU_ENQ_BITMAP_256_TLV,
+ HTT_PPDU_STATS_SCH_CMD_STATUS_TLV,
+ HTT_PPDU_STATS_USR_COMPLTN_COMMON_TLV,
+ HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_64_TLV,
+ HTT_PPDU_STATS_USR_COMPLTN_BA_BITMAP_256_TLV,
+ HTT_PPDU_STATS_USR_COMPLTN_ACK_BA_STATUS_TLV,
+ HTT_PPDU_STATS_USR_COMPLTN_FLUSH_TLV,
+ HTT_PPDU_STATS_USR_COMMON_ARRAY_TLV,
+ HTT_PPDU_STATS_INFO_TLV,
+ HTT_PPDU_STATS_TX_MGMTCTRL_PAYLOAD_TLV,
+
+ /* New TLV's are added above to this line */
+ HTT_PPDU_STATS_MAX_TAG,
+};
+typedef enum htt_ppdu_stats_tlv_tag htt_ppdu_stats_tlv_tag_t;
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_M 0x000000ff
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S 0
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RATE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_M 0x00000100
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S 8
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_IS_AMPDU_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_M 0x00000600
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S 9
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_BA_ACK_FAILED_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_M 0x00003800
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S 11
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_BW_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_M 0x00004000
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S 14
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_SGI_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_M 0xffff0000
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S 16
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_PEERID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_M 0x0000ffff
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S 0
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_SUCC_MSDUS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_M 0xffff0000
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S 16
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_RETRY_MSDUS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_M 0x0000ffff
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S 0
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_FAILED_MSDUS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_M 0xffff0000
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S 16
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_M) >> \
+ HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S)
+
+#define HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ARRAY_ITEM_TLV_TX_DUR_S)); \
+ } while (0)
+
+PREPACK struct htt_tx_ppdu_stats_info {
+ htt_tlv_hdr_t tlv_hdr;
+ A_UINT32 tx_success_bytes;
+ A_UINT32 tx_retry_bytes;
+ A_UINT32 tx_failed_bytes;
+ A_UINT32 tx_ratecode: 8,
+ is_ampdu: 1,
+ ba_ack_failed: 2,
+ /* 0: 20 MHz
+ 1: 40 MHz
+ 2: 80 MHz
+ 3: 160 MHz or 80+80 MHz */
+ bw: 3,
+ sgi: 1,
+ reserved0: 1,
+ peer_id: 16;
+ A_UINT32 tx_success_msdus: 16,
+ tx_retry_msdus: 16;
+ A_UINT32 tx_failed_msdus: 16,
+ /* united in us */
+ tx_duration: 16;
+} POSTPACK;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ A_UINT32 number_of_ppdu_stats;
+ /*
+ * tx_ppdu_stats_info is filled by multiple struct htt_tx_ppdu_stats_info
+ * elements.
+ * tx_ppdu_stats_info is variable length, with length =
+ * number_of_ppdu_stats * sizeof (struct htt_tx_ppdu_stats_info)
+ */
+ A_UINT32 tx_ppdu_stats_info[1/*number_of_ppdu_stats*/];
+} htt_ppdu_stats_usr_common_array_tlv_v;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* Refer bmi_msg.h */
+ A_UINT32 target_type;
+ A_UINT32 hw[1]; /* Variable length, refer to struct scheduler_cmd_status */
+} htt_ppdu_stats_sch_cmd_tlv_v;
+
+#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_M 0x0000ffff
+#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S 0
+
+#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_SCH_CMDID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_M 0x00ff0000
+#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_S 16
+
+#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_RING_ID_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_RING_ID_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_RING_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_RING_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_RING_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_M 0xff000000
+#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S 24
+
+#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_NUM_USERS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_NUM_USERS_S)); \
+ } while (0)
+
+/* HW queue type */
+enum HTT_TX_QUEUE_TYPE {
+ HTT_TX_QUEUE_INACTIVE,
+ HTT_TX_QUEUE_DATA,
+ HTT_TX_QUEUE_BEACON,
+ HTT_TX_QUEUE_PSPOLL,
+ HTT_TX_QUEUE_CAB,
+ HTT_TX_QUEUE_HALPHY,
+ HTT_TX_QUEUE_QBOOST_RESP,
+ HTT_TX_QUEUE_NAN_BEACON,
+ HTT_TX_QUEUE_NAN_MGMT,
+ HTT_TX_QUEUE_UL_DATA,
+ HTT_TX_QUEUE_UL_BSR_RESP,
+ HTT_TX_QUEUE_MGMT,
+ HTT_TX_QUEUE_MAX,
+};
+typedef enum HTT_TX_QUEUE_TYPE HTT_TX_QUEUE_TYPE;
+
+/* frame_type */
+enum HTT_STATS_FTYPE {
+ HTT_STATS_FTYPE_SGEN_NDPA = 0,
+ HTT_STATS_FTYPE_SGEN_NDP,
+ HTT_STATS_FTYPE_SGEN_BRP,
+ HTT_STATS_FTYPE_SGEN_BAR,
+ HTT_STATS_FTYPE_SGEN_RTS,
+ HTT_STATS_FTYPE_SGEN_CTS,
+ HTT_STATS_FTYPE_SGEN_CFEND,
+ HTT_STATS_FTYPE_SGEN_AX_NDPA,
+ HTT_STATS_FTYPE_SGEN_AX_NDP,
+ HTT_STATS_FTYPE_SGEN_MU_TRIG,
+ HTT_STATS_FTYPE_SGEN_MU_BAR,
+ HTT_STATS_FTYPE_SGEN_MU_BRP,
+ HTT_STATS_FTYPE_SGEN_MU_RTS,
+ HTT_STATS_FTYPE_SGEN_MU_BSR,
+ HTT_STATS_FTYPE_SGEN_UL_BSR,
+ HTT_STATS_FTYPE_TIDQ_DATA_SU,
+ HTT_STATS_FTYPE_TIDQ_DATA_MU,
+ HTT_STATS_FTYPE_MAX,
+};
+typedef enum HTT_STATS_FTYPE HTT_STATS_FTYPE;
+
+/* FRM_TYPE defined in HTT_STATS_FTYPE */
+#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_M 0x000000ff
+#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S 0
+
+#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_FRM_TYPE_S)); \
+ } while (0)
+
+/* QTYPE defined in HTT_TX_QUEUE_TYPE */
+#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_M 0x0000ff00
+#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_S 8
+
+#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_QTYPE_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_QTYPE_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_QTYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_QTYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_QTYPE_S)); \
+ } while (0)
+
+
+enum HTT_PPDU_STATS_BW {
+ HTT_PPDU_STATS_BANDWIDTH_5MHZ = 0,
+ HTT_PPDU_STATS_BANDWIDTH_10MHZ = 1,
+ HTT_PPDU_STATS_BANDWIDTH_20MHZ = 2,
+ HTT_PPDU_STATS_BANDWIDTH_40MHZ = 3,
+ HTT_PPDU_STATS_BANDWIDTH_80MHZ = 4,
+ HTT_PPDU_STATS_BANDWIDTH_160MHZ = 5, /* includes 80+80 */
+ HTT_PPDU_STATS_BANDWIDTH_DYN = 6,
+};
+typedef enum HTT_PPDU_STATS_BW HTT_PPDU_STATS_BW;
+
+#define HTT_PPDU_STATS_COMMON_TLV_BW_M 0x000f0000
+#define HTT_PPDU_STATS_COMMON_TLV_BW_S 16
+
+#define HTT_PPDU_STATS_COMMON_TLV_BW_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_BW_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_BW_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_BW_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_BW, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_BW_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_M 0x0000ffff
+#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S 0
+
+#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_PHY_MODE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_PHY_MODE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_M 0xffff0000
+#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S 16
+
+#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_M) >> \
+ HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S)
+
+#define HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_COMMON_TLV_CHAN_MHZ_S)); \
+ } while (0)
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ A_UINT32 ppdu_id;
+ /* BIT [ 15 : 0] :- sched_cmdid
+ * BIT [ 23 : 16] :- ring_id
+ * BIT [ 31 : 24] :- num_users
+ */
+ union {
+ A_UINT32 ring_id__sched_cmdid;
+ struct {
+ A_UINT32 sched_cmdid: 16,
+ ring_id: 8,
+ num_users: 8;
+ };
+ };
+ /* BIT [ 7 : 0] :- frame_type - HTT_STATS_FTYPE
+ * BIT [ 15: 8] :- queue_type - HTT_TX_QUEUE_TYPE
+ * BIT [ 19: 16] :- bw - HTT_PPDU_STATS_BW
+ * BIT [ 31: 20] :- reserved
+ */
+ union {
+ A_UINT32 bw__queue_type__frame_type;
+ struct {
+ A_UINT32 frame_type: 8,
+ queue_type: 8,
+ bw: 4,
+ reserved0: 12;
+ };
+ };
+ A_UINT32 chain_mask;
+ A_UINT32 fes_duration_us; /* frame exchange sequence */
+ A_UINT32 ppdu_sch_eval_start_tstmp_us;
+ A_UINT32 ppdu_sch_end_tstmp_us;
+ A_UINT32 ppdu_start_tstmp_us;
+ /* BIT [15 : 0] - phy mode (WLAN_PHY_MODE) with which ppdu was transmitted
+ * BIT [31 : 16] - bandwidth (in MHz) with which ppdu was transmitted
+ */
+ union {
+ A_UINT32 chan_mhz__phy_mode;
+ struct {
+ A_UINT32 phy_mode: 16,
+ chan_mhz: 16;
+ };
+ };
+} htt_ppdu_stats_common_tlv;
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_M 0x000000ff
+#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S 0
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_M 0x0000ff00
+#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S 8
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_VAP_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_M 0x00000001
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S 0
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_MCAST, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_MCAST_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_M 0x000003fe
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S 1
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_MPDUS_TRIED_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_M 0x00003c00
+#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_S 10
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_BW_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_BW_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_BW_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_BW, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_BW_S)); \
+ } while (0)
+
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_M 0x0000ffff
+#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S 0
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_FRAME_CTRL_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_M 0xffff0000
+#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S 16
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_M) >> \
+ HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S)
+
+#define HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_COMMON_TLV_QOS_CTRL_S)); \
+ } while (0)
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- vap_id
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__vapid__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ vap_id: 8,
+ sw_peer_id: 16;
+ };
+ };
+
+ /* BIT [ 0 : 0] :- mcast
+ * BIT [ 9 : 1] :- mpdus_tried
+ * BIT [ 13: 10] :- bw - HTT_PPDU_STATS_BW
+ * BIT [ 31: 14] :- rsvd
+ */
+ union {
+ A_UINT32 bw__mpdus_tried__mcast;
+ struct {
+ A_UINT32 mcast: 1,
+ mpdus_tried: 9,
+ bw: 4,
+ reserved0: 18;
+ };
+ };
+
+ /* BIT [ 15: 0] :- frame_ctrl
+ * BIT [ 31: 16] :- qos_ctrl
+ */
+ union {
+ A_UINT32 qos_ctrl_frame_ctrl;
+ struct {
+ A_UINT32 frame_ctrl: 16,
+ qos_ctrl: 16;
+ };
+ };
+
+} htt_ppdu_stats_user_common_tlv;
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_M 0x000000ff
+#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S 0
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESERVED_M 0x0000ff00
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESERVED_S 8
+
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_M 0x0000000f
+#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S 0
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_USER_POS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_USER_POS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_M 0x00000ff0
+#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S 4
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_MU_GROUPID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_M 0x0000ffff
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S 0
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RU_END_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_END_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RU_END, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RU_END_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_M 0xffff0000
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S 16
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RU_START_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RU_START_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RU_START, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RU_START_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_M 0x00000001
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S 0
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RESP_TYPE_VALID_S)); \
+ } while (0)
+
+enum HTT_PPDU_STATS_PPDU_TYPE {
+ HTT_PPDU_STATS_PPDU_TYPE_SU,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_MU_MIMO_OFDMA,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_BURST_BCN,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_RESP,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_BSR_TRIG,
+ HTT_PPDU_STATS_PPDU_TYPE_UL_RESP,
+
+ HTT_PPDU_STATS_PPDU_TYPE_UNKNOWN = 0x1F,
+};
+typedef enum HTT_PPDU_STATS_PPDU_TYPE HTT_PPDU_STATS_PPDU_TYPE;
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_M 0x0000003E
+#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S 1
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_PPDU_TYPE_S)); \
+ } while (0)
+
+enum HTT_PPDU_STATS_TXBF_TYPE {
+ HTT_PPDU_STATS_TXBF_OPEN_LOOP,
+ HTT_PPDU_STATS_TXBF_IMPLICIT,
+ HTT_PPDU_STATS_TXBF_EXPLICIT,
+ HTT_PPDU_STATS_TXBF_MAX,
+};
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_M 0x00000003
+#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S 0
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_LTF_SIZE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_M 0x00000004
+#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_S 2
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_STBC_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_STBC_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_STBC_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_STBC, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_STBC_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_M 0x00000008
+#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S 3
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_HE_RE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_HE_RE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_M 0x000000f0
+#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S 4
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_TXBF_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_TXBF_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_TXBF, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_TXBF_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_BW_M 0x00000f00
+#define HTT_PPDU_STATS_USER_RATE_TLV_BW_S 8
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_BW_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_BW_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_BW_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_BW_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_BW, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_BW_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_M 0x0000f000
+#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_S 12
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_NSS_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_NSS_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_NSS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_NSS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_NSS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_M 0x000f0000
+#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_S 16
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_MCS_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_MCS_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_MCS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_MCS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_MCS_S)); \
+ } while (0)
+
+/* Refer HTT_STATS_PREAM_TYPE */
+#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_M 0x00f00000
+#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S 20
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_PREAMBLE_S)); \
+ } while (0)
+
+/* Guard Intervals */
+enum HTT_PPDU_STATS_GI {
+ HTT_PPDU_STATS_GI_800,
+ HTT_PPDU_STATS_GI_400,
+ HTT_PPDU_STATS_GI_1600,
+ HTT_PPDU_STATS_GI_3200,
+ HTT_PPDU_STATS_GI_CNT,
+};
+typedef enum HTT_PPDU_STATS_GI HTT_PPDU_STATS_GI;
+
+/* Refer HTT_PPDU_STATS_GI */
+#define HTT_PPDU_STATS_USER_RATE_TLV_GI_M 0x0f000000
+#define HTT_PPDU_STATS_USER_RATE_TLV_GI_S 24
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_GI_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_GI_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_GI_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_GI_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_GI, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_GI_S)); \
+ } while (0)
+
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_M 0x10000000
+#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_S 28
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_DCM_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_DCM_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_DCM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_DCM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_DCM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_M 0x20000000
+#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S 29
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_LDPC_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_LDPC_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_LDPC, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_LDPC_S)); \
+ } while (0)
+
+enum HTT_PPDU_STATS_RESP_PPDU_TYPE {
+ HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_MIMO_UL,
+ HTT_PPDU_STATS_RESP_PPDU_TYPE_MU_OFDMA_UL,
+};
+typedef enum HTT_PPDU_STATS_RESP_PPDU_TYPE HTT_PPDU_STATS_RESP_PPDU_TYPE;
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_M 0xC0000000
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S 30
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_M) >> \
+ HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S)
+
+#define HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_RATE_TLV_RESP_PPDU_TYPE_S)); \
+ } while (0)
+
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- reserved0
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ reserved0: 8,
+ sw_peer_id: 16;
+ };
+ };
+
+ /* BIT [ 3 : 0] :- user_pos
+ * BIT [ 11: 4] :- mu_group_id
+ * BIT [ 31: 12] :- reserved1
+ */
+ union {
+ A_UINT32 mu_group_id__user_pos;
+ struct {
+ A_UINT32 user_pos: 4,
+ mu_group_id: 8,
+ reserved1: 20;
+ };
+ };
+
+ /* BIT [ 15 : 0] :- ru_end
+ * BIT [ 31 : 16] :- ru_start
+ */
+ union {
+ A_UINT32 ru_start__ru_end;
+ struct {
+ A_UINT32 ru_end: 16,
+ ru_start: 16;
+ };
+ };
+
+ /* BIT [ 15 : 0] :- ru_end
+ * BIT [ 31 : 16] :- ru_start
+ */
+ union {
+ A_UINT32 resp_ru_start__ru_end;
+ struct {
+ A_UINT32 resp_ru_end: 16,
+ resp_ru_start: 16;
+ };
+ };
+
+ /* BIT [ 0 : 0 ] :- resp_type_valid
+ * BIT [ 5 : 1 ] :- ppdu_type - HTT_PPDU_STAT_PPDU_TYPE
+ * BIT [ 31: 6 ] :- reserved2
+ */
+ union {
+ A_UINT32 resp_type_vld_ppdu_type;
+ struct {
+ A_UINT32 resp_type_vld: 1,
+ ppdu_type: 5,
+ reserved2: 26;
+ };
+ };
+
+ /* BIT [ 1 : 0 ] :- ltf_size
+ * BIT [ 2 : 2 ] :- stbc
+ * BIT [ 3 : 3 ] :- he_re (range extension)
+ * BIT [ 7 : 4 ] :- txbf
+ * BIT [ 11: 8 ] :- bw
+ * BIT [ 15: 12] :- nss NSS 1,2, ...8
+ * BIT [ 19: 16] :- mcs
+ * BIT [ 23: 20] :- preamble
+ * BIT [ 27: 24] :- gi - HTT_PPDU_STATS_GI
+ * BIT [ 28: 28] :- dcm
+ * BIT [ 29: 29] :- ldpc
+ * BIT [ 31: 30] :- reserved4
+ */
+ union {
+ A_UINT32 rate_info;
+ struct {
+ A_UINT32 ltf_size: 2,
+ stbc: 1,
+ he_re: 1,
+ txbf: 4,
+ bw: 4,
+ nss: 4,
+ mcs: 4,
+ preamble: 4,
+ gi: 4,
+ dcm: 1,
+ ldpc: 1,
+ reserved4: 2;
+ };
+ };
+
+ /* Note: resp_rate_info is only valid for if resp_type is UL
+ * BIT [ 1 : 0 ] :- ltf_size
+ * BIT [ 2 : 2 ] :- stbc
+ * BIT [ 3 : 3 ] :- he_re (range extension)
+ * BIT [ 7 : 4 ] :- reserved3
+ * BIT [ 11: 8 ] :- bw
+ * BIT [ 15: 12] :- nss NSS 1,2, ...8
+ * BIT [ 19: 16] :- mcs
+ * BIT [ 23: 20] :- preamble
+ * BIT [ 27: 24] :- gi
+ * BIT [ 28: 28] :- dcm
+ * BIT [ 29: 29] :- ldpc
+ * BIT [ 31: 30] :- resp_ppdu_type - HTT_PPDU_STATS_RESP_PPDU_TYPE
+ */
+ union {
+ A_UINT32 resp_rate_info;
+ struct {
+ A_UINT32 resp_ltf_size: 2,
+ resp_stbc: 1,
+ resp_he_re: 1,
+ reserved3: 4,
+ resp_bw: 4,
+ resp_nss: 4,
+ resp_mcs: 4,
+ resp_preamble: 4,
+ resp_gi: 4,
+ resp_dcm: 1,
+ resp_ldpc: 1,
+ resp_ppdu_type: 2;
+ };
+ };
+} htt_ppdu_stats_user_rate_tlv;
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_M 0x000000ff
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S 0
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_EMQ_MPDU_BITMAP_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_RESERVED_M 0x0000ff00
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_RESERVED_S 8
+
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_ENQ_MPDU_BITMAP_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- reserved0
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ reserved0: 8,
+ sw_peer_id: 16;
+ };
+ };
+ A_UINT32 start_seq;
+ A_UINT32 enq_bitmap[HTT_BA_64_BIT_MAP_SIZE_DWORDS];
+} htt_ppdu_stats_enq_mpdu_bitmap_64_tlv;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- reserved0
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ reserved0: 8,
+ sw_peer_id: 16;
+ };
+ };
+ A_UINT32 start_seq;
+ A_UINT32 enq_bitmap[HTT_BA_256_BIT_MAP_SIZE_DWORDS];
+} htt_ppdu_stats_enq_mpdu_bitmap_256_tlv;
+
+/* COMPLETION_STATUS defined in HTT_PPDU_STATS_USER_COMPLETION_STATUS */
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_M 0x000000ff
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_COMPLETION_STATUS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_M 0x0000ff00
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S 8
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_M 0x0000ffff
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_TRIED_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_M 0xffff0000
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S 16
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_MPDU_SUCCESS_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_M 0x0000000f
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_LONG_RETRY_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_M 0x000000f0
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S 4
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_SHORT_RETRY_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_M 0x00000100
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S 8
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_IS_AMPDU_S)); \
+ } while (0)
+
+enum HTT_PPDU_STATS_RESP_TYPE {
+ HTT_PPDU_STATS_NO_RESPONSE_EXPECTED_E = 0,
+ HTT_PPDU_STATS_ACK_EXPECTED_E = 1,
+ HTT_PPDU_STATS_BA_BITMAP_EXPECTED_E = 2,
+ HTT_PPDU_STATS_UL_MU_BA_EXPECTED_E = 3,
+ HTT_PPDU_STATS_UL_MU_BA_AND_DATA_EXPECTED_E = 4,
+ HTT_PPDU_STATS_CTS_EXPECTED_E = 5,
+ HTT_PPDU_STATS_MU_CBF_EXPECTED_E = 6,
+};
+typedef enum HTT_PPDU_STATS_RESP_TYPE HTT_PPDU_STATS_RESP_TYPE;
+
+/* Refer HTT_PPDU_STATS_RESP_TYPE */
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_M 0x00001e00
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S 9
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_COMMON_TLV_RESP_TYPE_S)); \
+ } while (0)
+
+enum HTT_PPDU_STATS_USER_COMPLETION_STATUS {
+ HTT_PPDU_STATS_USER_STATUS_OK,
+ HTT_PPDU_STATS_USER_STATUS_FILTERED,
+ HTT_PPDU_STATS_USER_STATUS_RESP_TIMEOUT,
+ HTT_PPDU_STATS_USER_STATUS_RESP_MISMATCH,
+ HTT_PPDU_STATS_USER_STATUS_ABORT,
+};
+typedef enum HTT_PPDU_STATS_USER_COMPLETION_STATUS HTT_PPDU_STATS_USER_COMPLETION_STATUS;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* BIT [ 7 : 0] :- completion_status
+ * BIT [ 15: 8] :- tid_num
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num__completion_status;
+ struct {
+ A_UINT32 completion_status: 8,
+ tid_num: 8,
+ sw_peer_id: 16;
+ };
+ };
+
+ /* RSSI value of last ack packet (units = dB above noise floor) */
+ A_UINT32 ack_rssi;
+
+ /* BIT [ 15 : 0] :- mpdu_tried
+ * BIT [ 31 : 16] :- mpdu_success
+ */
+ union {
+ A_UINT32 mpdu_tried__mpdu_success;
+ struct {
+ A_UINT32 mpdu_tried: 16,
+ mpdu_success: 16;
+ };
+ };
+
+ /* BIT [ 3 : 0] :- long_retries
+ * BIT [ 7 : 4] :- short_retries
+ * BIT [ 8 : 8] :- is_ampdu
+ * BIT [ 12: 9] :- resp_type
+ * BIT [ 31: 13] :- reserved0
+ */
+ union {
+ A_UINT32 resp_type_is_ampdu__short_retry__long_retry;
+ struct {
+ A_UINT32 long_retries: 4,
+ short_retries: 4,
+ is_ampdu: 1,
+ resp_type: 4,
+ reserved0: 19;
+ };
+ };
+} htt_ppdu_stats_user_cmpltn_common_tlv;
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_M 0x000000ff
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_EMQ_MPDU_BITMAP_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_RESERVED_M 0x0000ff00
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_RESERVED_S 8
+
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_BA_BITMAP_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- reserved0
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ reserved0: 8,
+ sw_peer_id: 16;
+ };
+ };
+ A_UINT32 ba_seq_no;
+ A_UINT32 ba_bitmap[HTT_BA_64_BIT_MAP_SIZE_DWORDS];
+} htt_ppdu_stats_user_compltn_ba_bitmap_64_tlv;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15: 8] :- reserved0
+ * BIT [ 31: 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ reserved0: 8,
+ sw_peer_id: 16;
+ };
+ };
+ A_UINT32 ba_seq_no;
+ A_UINT32 ba_bitmap[HTT_BA_256_BIT_MAP_SIZE_DWORDS];
+} htt_ppdu_stats_user_compltn_ba_bitmap_256_tlv;
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_M 0x0000ffff
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_M 0x000001ff
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MPDU_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_M 0x01fffe00
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S 9
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_NUM_MSDU_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_M 0xfe000000
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S 25
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_M 0x0000ffff
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S 0
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_CUR_SEQ_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_M 0xffff0000
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S 16
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_M) >> \
+ HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S)
+
+#define HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_USER_CMPLTN_ACK_BA_STATUS_TLV_START_SEQ_S)); \
+ } while (0)
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ A_UINT32 ppdu_id;
+
+ /* BIT [ 15 : 0] :- sw_peer_id
+ * BIT [ 31 : 16] :- reserved0
+ */
+ union {
+ A_UINT32 rsvd_sw_peer_id;
+ struct {
+ A_UINT32 sw_peer_id: 16,
+ reserved0: 16;
+ };
+ };
+
+ /* BIT [ 8 : 0] :- num_mpdu
+ * BIT [ 24 : 9] :- num_msdu
+ * BIT [ 31 : 25] :- tid_num
+ */
+ union {
+ A_UINT32 tid_num__num_msdu__num_mpdu;
+ struct {
+ A_UINT32 num_mpdu: 9,
+ num_msdu: 16,
+ tid_num: 7;
+ };
+ };
+
+ /* BIT [ 15 : 0] :- current_seq
+ * BIT [ 31 : 16] :- start_seq
+ */
+ union {
+ A_UINT32 start_seq__current_seq;
+ struct {
+ A_UINT32 current_seq: 16,
+ start_seq: 16;
+ };
+ };
+
+ A_UINT32 success_bytes;
+} htt_ppdu_stats_user_compltn_ack_ba_status_tlv;
+
+/* FLOW_TYPE defined in HTT_TX_FLOW_TYPE */
+#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_M 0x000000ff
+#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S 0
+
+#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_FLOW_TYPE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_M 0x0001ff00
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S 8
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_NUM_MPDU_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_M 0x01fe0000
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S 17
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_NUM_MSDU_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_M 0x000000ff
+#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S 0
+
+#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_TID_NUM, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_TID_NUM_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_M 0x0000ff00
+#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S 8
+
+#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_QUEUE_TYPE_S)); \
+ } while (0)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_M 0xffff0000
+#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S 16
+
+#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_GET(_var) \
+ (((_var) & HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_M) >> \
+ HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S)
+
+#define HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID, _val); \
+ ((_var) |= ((_val) << HTT_PPDU_STATS_FLUSH_TLV_SW_PEER_ID_S)); \
+ } while (0)
+
+enum HTT_TX_FLOW_TYPE {
+ HTT_TX_TID_FRAMEQ,
+ HTT_TX_TQM_MSDUQ,
+ HTT_TQM_MPDUQ,
+};
+
+enum HTT_FLUSH_STATUS_DROP_REASON {
+ HTT_FLUSH_PEER_DELETE,
+ HTT_FLUSH_TID_DELETE,
+ HTT_FLUSH_TTL_EXCEEDED,
+ HTT_FLUSH_EXCESS_RETRIES,
+ HTT_FLUSH_REINJECT,
+};
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ A_UINT32 drop_reason;
+ /* BIT [ 7 : 0] :- flow_type
+ * BIT [ 16: 8] :- num_mpdu
+ * BIT [ 30: 17] :- num_msdu
+ * BIT [ 31: 31] :- reserved0
+ */
+ union {
+ A_UINT32 num_msdu__num_mpdu__flow_type;
+ struct {
+ A_UINT32 flow_type: 8,
+ num_mpdu: 9,
+ num_msdu: 14,
+ reserved0: 1;
+ };
+ };
+
+ /* BIT [ 7 : 0] :- tid_num
+ * BIT [ 15 : 8] :- queue_type
+ * BIT [ 31 : 16] :- sw_peer_id
+ */
+ union {
+ A_UINT32 sw_peer_id__queue_type__tid_num;
+ struct {
+ A_UINT32 tid_num: 8,
+ queue_type: 8,
+ sw_peer_id: 16;
+ };
+ };
+} htt_ppdu_stats_flush_tlv;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* Future purpose */
+ A_UINT32 reserved1; /* set to 0x0 */
+ A_UINT32 reserved2; /* set to 0x0 */
+ A_UINT32 reserved3; /* set to 0x0 */
+
+ /* mgmt/ctrl frame payload
+ * The size of payload (in bytes) can be derived from the length in
+ * tlv parametes, minus the 12 bytes of the above fields.
+ */
+ A_UINT32 payload[1];
+} htt_ppdu_stats_tx_mgmtctrl_payload_tlv;
+
+
+#endif //__HTT_PPDU_STATS_H__
diff --git a/drivers/staging/fw-api/fw/htt_stats.h b/drivers/staging/fw-api/fw/htt_stats.h
index a68df97ad134..0bbd13c27419 100644
--- a/drivers/staging/fw-api/fw/htt_stats.h
+++ b/drivers/staging/fw-api/fw/htt_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -97,7 +97,9 @@ enum htt_dbg_ext_stats_type {
/* HTT_DBG_EXT_STATS_TQM_CMDQ
* PARAMS:
- * - No Params
+ * - config_param0:
+ * [Bit15: Bit0 ] cmdq id :if 0xFFFF print all cmdq's
+ * [Bit31: Bit16] reserved
* RESP MSG:
* - htt_tx_tqm_cmdq_stats_t
*/
@@ -139,7 +141,7 @@ enum htt_dbg_ext_stats_type {
* 1 bit htt_peer_details_tlv
* 2 bit htt_tx_peer_rate_stats_tlv
* 3 bit htt_rx_peer_rate_stats_tlv
- * 4 bit htt_tx_tid_stats_tlv
+ * 4 bit htt_tx_tid_stats_tlv/htt_tx_tid_stats_v1_tlv
* 5 bit htt_rx_tid_stats_tlv
* 6 bit htt_msdu_flow_stats_tlv
* - config_param2: [Bit31 : Bit0] mac_addr31to0
@@ -168,7 +170,9 @@ enum htt_dbg_ext_stats_type {
/* HTT_DBG_EXT_STATS_RING_IF_INFO
* PARAMS:
- * - No Params
+ * - config_param0:
+ * [Bit15: Bit0 ] ring id :if 0xFFFF print all rings
+ * [Bit31: Bit16] reserved
* RESP MSG:
* - htt_ring_if_stats_t
*/
@@ -176,6 +180,9 @@ enum htt_dbg_ext_stats_type {
/* HTT_DBG_EXT_STATS_SRNG_INFO
* PARAMS:
+ * - config_param0:
+ * [Bit15: Bit0 ] ring id :if 0xFFFF print all rings
+ * [Bit31: Bit16] reserved
* - No Params
* RESP MSG:
* - htt_sring_stats_t
@@ -209,6 +216,35 @@ enum htt_dbg_ext_stats_type {
*/
HTT_DBG_EXT_STATS_ACTIVE_PEERS_LIST = 18,
+ /* HTT_DBG_EXT_STATS_PDEV_CCA_STATS
+ * PARAMS:
+ * - config_param0:
+ * [Bit0] - 1 sec interval histogram
+ * [Bit1] - 100ms interval histogram
+ * [Bit3] - Cumulative CCA stats
+ * RESP MSG:
+ * - htt_pdev_cca_stats_t
+ */
+ HTT_DBG_EXT_STATS_PDEV_CCA_STATS = 19,
+
+ /* HTT_DBG_EXT_STATS_TWT_SESSIONS
+ * PARAMS:
+ * - config_param0:
+ * No params
+ * RESP MSG:
+ * - htt_pdev_twt_sessions_stats_t
+ */
+ HTT_DBG_EXT_STATS_TWT_SESSIONS = 20,
+
+ /* HTT_DBG_EXT_STATS_REO_CNTS
+ * PARAMS:
+ * - config_param0:
+ * No params
+ * RESP MSG:
+ * - htt_soc_reo_resource_stats_t
+ */
+ HTT_DBG_EXT_STATS_REO_RESOURCE_STATS = 21,
+
/* keep this last */
HTT_DBG_NUM_EXT_STATS = 256,
};
@@ -282,6 +318,18 @@ typedef enum {
HTT_STATS_TX_DE_COMPL_STATS_TAG = 65, /* htt_tx_de_compl_stats_tlv */
HTT_STATS_WHAL_TX_TAG = 66, /* htt_hw_stats_whal_tx_tlv */
HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, /* htt_tx_pdev_stats_sifs_hist_tlv_v */
+ HTT_STATS_RX_PDEV_FW_STATS_PHY_ERR_TAG = 68, /* htt_rx_pdev_fw_stats_phy_err_tlv */
+ HTT_STATS_TX_TID_DETAILS_V1_TAG = 69, /* htt_tx_tid_stats_v1_tlv */
+ HTT_STATS_PDEV_CCA_1SEC_HIST_TAG = 70, /* htt_pdev_cca_stats_hist_tlv (for 1 sec interval stats) */
+ HTT_STATS_PDEV_CCA_100MSEC_HIST_TAG = 71, /* htt_pdev_cca_stats_hist_tlv (for 100 msec interval stats) */
+ HTT_STATS_PDEV_CCA_STAT_CUMULATIVE_TAG = 72, /* htt_pdev_stats_cca_stats_tlv */
+ HTT_STATS_PDEV_CCA_COUNTERS_TAG = 73, /* htt_pdev_stats_cca_counters_tlv */
+ HTT_STATS_TX_PDEV_MPDU_STATS_TAG = 74, /* htt_tx_pdev_mpdu_stats_tlv */
+ HTT_STATS_PDEV_TWT_SESSIONS_TAG = 75, /* htt_pdev_stats_twt_sessions_tlv */
+ HTT_STATS_PDEV_TWT_SESSION_TAG = 76, /* htt_pdev_stats_twt_session_tlv */
+ HTT_STATS_RX_REFILL_RXDMA_ERR_TAG = 77, /* htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v */
+ HTT_STATS_RX_REFILL_REO_ERR_TAG = 78, /* htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v */
+ HTT_STATS_RX_REO_RESOURCE_STATS_TAG = 79, /* htt_rx_reo_debug_stats_tlv_v */
HTT_STATS_MAX_TAG,
} htt_tlv_tag_t;
@@ -333,10 +381,11 @@ typedef enum {
HTT_TX_PDEV_MAX_URRN_STATS = 3,
} htt_tx_pdev_underrun_enum;
-#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
-#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
-#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
-#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
+#define HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 71
+#define HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9
+#define HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10
+#define HTT_TX_PDEV_MAX_PHY_ERR_STATS 18
+#define HTT_TX_PDEV_SCHED_TX_MODE_MAX 4
#define HTT_RX_STATS_REFILL_MAX_RING 4
#define HTT_RX_STATS_RXDMA_MAX_ERR 16
@@ -454,6 +503,22 @@ typedef struct {
A_UINT32 num_total_ppdus_tried_ota;
/* Number of data PPDUs tried over the air (OTA) */
A_UINT32 num_data_ppdus_tried_ota;
+ /* Num Local control/mgmt frames (MSDUs) queued */
+ A_UINT32 local_ctrl_mgmt_enqued;
+ /* local_ctrl_mgmt_freed:
+ * Num Local control/mgmt frames (MSDUs) done
+ * It includes all local ctrl/mgmt completions
+ * (acked, no ack, flush, TTL, etc)
+ */
+ A_UINT32 local_ctrl_mgmt_freed;
+ /* Num Local data frames (MSDUs) queued */
+ A_UINT32 local_data_enqued;
+ /* local_data_freed:
+ * Num Local data frames (MSDUs) done
+ * It includes all local data completions
+ * (acked, no ack, flush, TTL, etc)
+ */
+ A_UINT32 local_data_freed;
} htt_tx_pdev_stats_cmn_tlv;
#define HTT_TX_PDEV_STATS_URRN_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems))
@@ -511,6 +576,7 @@ typedef struct _htt_tx_pdev_stats {
htt_tx_pdev_stats_sifs_tlv_v sifs_tlv;
htt_tx_pdev_stats_flush_tlv_v flush_tlv;
htt_tx_pdev_stats_phy_err_tlv_v phy_err_tlv;
+ htt_tx_pdev_stats_sifs_hist_tlv_v sifs_hist_tlv;
} htt_tx_pdev_stats_t;
/* == SOC ERROR STATS == */
@@ -658,6 +724,13 @@ typedef struct _htt_msdu_flow_stats_tlv {
* BIT [31 : 21] :- reserved
*/
A_UINT32 tx_flow_no__tid_num__drop_rule;
+ A_UINT32 last_cycle_enqueue_count;
+ A_UINT32 last_cycle_dequeue_count;
+ A_UINT32 last_cycle_drop_count;
+ /* BIT [15 : 0] :- current_drop_th
+ * BIT [31 : 16] :- reserved
+ */
+ A_UINT32 current_drop_th;
} htt_msdu_flow_stats_tlv;
#define MAX_HTT_TID_NAME 8
@@ -749,6 +822,43 @@ typedef struct _htt_tx_tid_stats_tlv {
A_UINT32 tid_tx_airtime;
} htt_tx_tid_stats_tlv;
+/* Tidq stats */
+typedef struct _htt_tx_tid_stats_v1_tlv {
+ htt_tlv_hdr_t tlv_hdr;
+ /* Stored as little endian */
+ A_UINT8 tid_name[MAX_HTT_TID_NAME];
+ /* BIT [15 : 0] :- sw_peer_id
+ * BIT [31 : 16] :- tid_num
+ */
+ A_UINT32 sw_peer_id__tid_num;
+ /* BIT [ 7 : 0] :- num_sched_pending
+ * BIT [15 : 8] :- num_ppdu_in_hwq
+ * BIT [31 : 16] :- reserved
+ */
+ A_UINT32 num_sched_pending__num_ppdu_in_hwq;
+ A_UINT32 tid_flags;
+ /* Max qdepth in bytes reached by this tid*/
+ A_UINT32 max_qdepth_bytes;
+ /* number of msdus qdepth reached max */
+ A_UINT32 max_qdepth_n_msdus;
+ /* Made reserved this field */
+ A_UINT32 rsvd;
+
+ A_UINT32 qdepth_bytes;
+ A_UINT32 qdepth_num_msdu;
+ A_UINT32 qdepth_num_mpdu;
+ A_UINT32 last_scheduled_tsmp;
+ A_UINT32 pause_module_id;
+ A_UINT32 block_module_id;
+ /* tid tx airtime in sec */
+ A_UINT32 tid_tx_airtime;
+ A_UINT32 allow_n_flags;
+ /* BIT [15 : 0] :- sendn_frms_allowed
+ * BIT [31 : 16] :- reserved
+ */
+ A_UINT32 sendn_frms_allowed;
+} htt_tx_tid_stats_v1_tlv;
+
#define HTT_RX_TID_STATS_SW_PEER_ID_M 0x0000ffff
#define HTT_RX_TID_STATS_SW_PEER_ID_S 0
#define HTT_RX_TID_STATS_TID_NUM_M 0xffff0000
@@ -823,6 +933,16 @@ typedef struct {
A_UINT32 peer_rx_airtime;
/* Peer current rssi in dBm */
A_INT32 rssi;
+ /* Total enqueued, dequeued and dropped msdu's for peer */
+ A_UINT32 peer_enqueued_count_low;
+ A_UINT32 peer_enqueued_count_high;
+ A_UINT32 peer_dequeued_count_low;
+ A_UINT32 peer_dequeued_count_high;
+ A_UINT32 peer_dropped_count_low;
+ A_UINT32 peer_dropped_count_high;
+ /* Total ppdu transmitted bytes for peer: includes MAC header overhead */
+ A_UINT32 ppdu_transmitted_bytes_low;
+ A_UINT32 ppdu_transmitted_bytes_high;
} htt_peer_stats_cmn_tlv;
typedef struct {
@@ -962,7 +1082,7 @@ typedef enum {
#define HTT_DBG_EXT_STATS_PEER_REQ_MODE_SET(_var, _val) \
do { \
- ((_var) |= ((_val) << HTT_DBG_EXT_STATS_PEER_REQ_MODE_M)); \
+ ((_var) |= ((_val) << HTT_DBG_EXT_STATS_PEER_REQ_MODE_S)); \
} while (0)
#define HTT_DBG_EXT_STATS_PEER_INFO_SW_PEER_ID_GET(_var) \
@@ -980,9 +1100,10 @@ typedef enum {
* - HTT_STATS_PEER_DETAILS_TAG
* - HTT_STATS_PEER_TX_RATE_STATS_TAG
* - HTT_STATS_PEER_RX_RATE_STATS_TAG
- * - HTT_STATS_TX_TID_DETAILS_TAG (multiple)
+ * - HTT_STATS_TX_TID_DETAILS_TAG (multiple) (deprecated, so 0 elements in updated systems)
* - HTT_STATS_RX_TID_DETAILS_TAG (multiple)
* - HTT_STATS_PEER_MSDU_FLOWQ_TAG (multiple)
+ * - HTT_STATS_TX_TID_DETAILS_V1_TAG (multiple)
*/
/* NOTE:
* This structure is for documentation, and cannot be safely used directly.
@@ -997,6 +1118,7 @@ typedef struct _htt_peer_stats {
htt_tx_tid_stats_tlv tx_tid_stats[1];
htt_rx_tid_stats_tlv rx_tid_stats[1];
htt_msdu_flow_stats_tlv msdu_flowq[1];
+ htt_tx_tid_stats_v1_tlv tx_tid_stats_v1[1];
} htt_peer_stats_t;
/* =========== ACTIVE PEER LIST ========== */
@@ -1327,6 +1449,10 @@ typedef struct {
/* == TX MU STATS == */
+#define HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS 74
+
typedef struct {
htt_tlv_hdr_t tlv_hdr;
/* mu-mimo sw sched cmd stats */
@@ -1334,11 +1460,24 @@ typedef struct {
A_UINT32 mu_mimo_sch_failed;
/* MU PPDU stats per hwQ */
A_UINT32 mu_mimo_ppdu_posted;
+ /*
+ * Counts the number of users in each transmission of
+ * the given TX mode.
+ *
+ * Index is the number of users - 1.
+ */
+ A_UINT32 ac_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AC_MUMIMO_USER_STATS];
+ A_UINT32 ax_mu_mimo_sch_nusers[HTT_TX_PDEV_STATS_NUM_AX_MUMIMO_USER_STATS];
+ A_UINT32 ax_ofdma_sch_nusers[HTT_TX_PDEV_STATS_NUM_OFDMA_USER_STATS];
} htt_tx_pdev_mu_mimo_sch_stats_tlv;
typedef struct {
htt_tlv_hdr_t tlv_hdr;
/* mu-mimo mpdu level stats */
+ /*
+ * This first block of stats is limited to 11ac
+ * MU-MIMO transmission.
+ */
A_UINT32 mu_mimo_mpdus_queued_usr;
A_UINT32 mu_mimo_mpdus_tried_usr;
A_UINT32 mu_mimo_mpdus_failed_usr;
@@ -1346,12 +1485,46 @@ typedef struct {
A_UINT32 mu_mimo_err_no_ba_usr;
A_UINT32 mu_mimo_mpdu_underrun_usr;
A_UINT32 mu_mimo_ampdu_underrun_usr;
+
+ A_UINT32 ax_mu_mimo_mpdus_queued_usr;
+ A_UINT32 ax_mu_mimo_mpdus_tried_usr;
+ A_UINT32 ax_mu_mimo_mpdus_failed_usr;
+ A_UINT32 ax_mu_mimo_mpdus_requeued_usr;
+ A_UINT32 ax_mu_mimo_err_no_ba_usr;
+ A_UINT32 ax_mu_mimo_mpdu_underrun_usr;
+ A_UINT32 ax_mu_mimo_ampdu_underrun_usr;
+
+ A_UINT32 ax_ofdma_mpdus_queued_usr;
+ A_UINT32 ax_ofdma_mpdus_tried_usr;
+ A_UINT32 ax_ofdma_mpdus_failed_usr;
+ A_UINT32 ax_ofdma_mpdus_requeued_usr;
+ A_UINT32 ax_ofdma_err_no_ba_usr;
+ A_UINT32 ax_ofdma_mpdu_underrun_usr;
+ A_UINT32 ax_ofdma_ampdu_underrun_usr;
} htt_tx_pdev_mu_mimo_mpdu_stats_tlv;
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AC 1 /* SCHED_TX_MODE_MU_MIMO_AC */
+#define HTT_STATS_TX_SCHED_MODE_MU_MIMO_AX 2 /* SCHED_TX_MODE_MU_MIMO_AX */
+#define HTT_STATS_TX_SCHED_MODE_MU_OFDMA_AX 3 /* SCHED_TX_MODE_MU_OFDMA_AX */
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* mpdu level stats */
+ A_UINT32 mpdus_queued_usr;
+ A_UINT32 mpdus_tried_usr;
+ A_UINT32 mpdus_failed_usr;
+ A_UINT32 mpdus_requeued_usr;
+ A_UINT32 err_no_ba_usr;
+ A_UINT32 mpdu_underrun_usr;
+ A_UINT32 ampdu_underrun_usr;
+ A_UINT32 user_index;
+ A_UINT32 tx_sched_mode; /* HTT_STATS_TX_SCHED_MODE_xxx */
+} htt_tx_pdev_mpdu_stats_tlv;
+
/* STATS_TYPE : HTT_DBG_EXT_STATS_PDEV_TX_MU
* TLV_TAGS:
* - HTT_STATS_TX_PDEV_MU_MIMO_STATS_TAG (multiple)
- * - HTT_STATS_TX_PDEV_MUMIMO_MPDU_STATS_TAG (multiple)
+ * - HTT_STATS_TX_PDEV_MPDU_STATS_TAG (multiple)
*/
/* NOTE:
* This structure is for documentation, and cannot be safely used directly.
@@ -1359,7 +1532,11 @@ typedef struct {
*/
typedef struct {
htt_tx_pdev_mu_mimo_sch_stats_tlv mu_mimo_sch_stats_tlv[1]; /* WAL_TX_STATS_MAX_GROUP_SIZE */
- htt_tx_pdev_mu_mimo_mpdu_stats_tlv mu_mimo_mpdu_stats_tlv[1]; /* WAL_TX_STATS_MAX_NUM_USERS */
+ /*
+ * Note that though mu_mimo_mpdu_stats_tlv is named MU-MIMO,
+ * it can also hold MU-OFDMA stats.
+ */
+ htt_tx_pdev_mpdu_stats_tlv mu_mimo_mpdu_stats_tlv[1]; /* WAL_TX_STATS_MAX_NUM_USERS */
} htt_tx_pdev_mu_mimo_stats_t;
/* == TX SCHED STATS == */
@@ -2337,6 +2514,9 @@ typedef struct {
#define HTT_TX_PDEV_STATS_NUM_BW_COUNTERS 4
#define HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS 8
#define HTT_TX_PDEV_STATS_NUM_PREAMBLE_TYPES HTT_STATS_PREAM_COUNT
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS 4
+#define HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS 8
+#define HTT_TX_PDEV_STATS_NUM_LTF 4
#define HTT_TX_PDEV_RATE_STATS_MAC_ID_M 0x000000ff
#define HTT_TX_PDEV_RATE_STATS_MAC_ID_S 0
@@ -2366,8 +2546,11 @@ typedef struct {
A_UINT32 ack_rssi;
A_UINT32 tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ /* tx_xx_mcs: currently unused */
A_UINT32 tx_su_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
A_UINT32 tx_mu_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
A_UINT32 tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS]; /* element 0,1, ...7 -> NSS 1,2, ...8 */
A_UINT32 tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS]; /* element 0: 20 MHz, 1: 40 MHz, 2: 80 MHz, 3: 160 and 80+80 MHz */
A_UINT32 tx_stbc[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
@@ -2378,9 +2561,50 @@ typedef struct {
/* Counters to track packets in dcm mcs (MCS 0, 1, 3, 4) */
A_UINT32 tx_dcm[HTT_TX_PDEV_STATS_NUM_DCM_COUNTERS];
-
/* Number of CTS-acknowledged RTS packets */
A_UINT32 rts_success;
+
+ /*
+ * Counters for legacy 11a and 11b transmissions.
+ *
+ * The index corresponds to:
+ *
+ * CCK: 0: 1 Mbps, 1: 2 Mbps, 2: 5.5 Mbps, 3: 11 Mbps
+ *
+ * OFDM: 0: 6 Mbps, 1: 9 Mbps, 2: 12 Mbps, 3: 18 Mbps,
+ * 4: 24 Mbps, 5: 36 Mbps, 6: 48 Mbps, 7: 54 Mbps
+ */
+ A_UINT32 tx_legacy_cck_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_CCK_STATS];
+ A_UINT32 tx_legacy_ofdm_rate[HTT_TX_PDEV_STATS_NUM_LEGACY_OFDM_STATS];
+
+ A_UINT32 ac_mu_mimo_tx_ldpc;
+ A_UINT32 ax_mu_mimo_tx_ldpc;
+ A_UINT32 ofdma_tx_ldpc;
+
+ /*
+ * Counters for 11ax HE LTF selection during TX.
+ *
+ * The index corresponds to:
+ *
+ * 0: unused, 1: 1x LTF, 2: 2x LTF, 3: 4x LTF
+ */
+ A_UINT32 tx_he_ltf[HTT_TX_PDEV_STATS_NUM_LTF];
+
+ A_UINT32 ac_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ A_UINT32 ax_mu_mimo_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ A_UINT32 ofdma_tx_mcs[HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+
+ A_UINT32 ac_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ A_UINT32 ax_mu_mimo_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+ A_UINT32 ofdma_tx_nss[HTT_TX_PDEV_STATS_NUM_SPATIAL_STREAMS];
+
+ A_UINT32 ac_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ A_UINT32 ax_mu_mimo_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+ A_UINT32 ofdma_tx_bw[HTT_TX_PDEV_STATS_NUM_BW_COUNTERS];
+
+ A_UINT32 ac_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ A_UINT32 ax_mu_mimo_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
+ A_UINT32 ofdma_tx_gi[HTT_TX_PDEV_STATS_NUM_GI_COUNTERS][HTT_TX_PDEV_STATS_NUM_MCS_COUNTERS];
} htt_tx_pdev_rate_stats_tlv;
/* STATS_TYPE : HTT_DBG_EXT_STATS_PDEV_TX_RATE
@@ -2475,6 +2699,21 @@ typedef struct {
A_UINT32 ofld_remote_data_buf_recycle_cnt;
/* Num remote free buf given to offload */
A_UINT32 ofld_remote_free_buf_indication_cnt;
+
+ /* Num unicast packets from local path indicated to host */
+ A_UINT32 ofld_buf_to_host_data_msdu_uc;
+ /* Num unicast packets from REO indicated to host */
+ A_UINT32 reo_fw_ring_to_host_data_msdu_uc;
+
+ /* Num Packets received from WBM SW1 ring */
+ A_UINT32 wbm_sw_ring_reap;
+ /* Num packets from WBM forwarded from fw to host via WBM */
+ A_UINT32 wbm_forward_to_host_cnt;
+ /* Num packets from WBM recycled to target refill ring */
+ A_UINT32 wbm_target_recycle_cnt;
+
+ /* Total Num of recycled to refill ring, including packets from WBM and REO */
+ A_UINT32 target_refill_ring_recycle_cnt;
} htt_rx_soc_fw_stats_tlv;
#define HTT_RX_SOC_FW_REFILL_RING_EMPTY_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems))
@@ -2496,6 +2735,89 @@ typedef struct {
A_UINT32 refill_ring_num_refill[1]; /* HTT_RX_STATS_REFILL_MAX_RING */
} htt_rx_soc_fw_refill_ring_num_refill_tlv_v;
+/* RXDMA error code from WBM released packets */
+typedef enum {
+ HTT_RX_RXDMA_OVERFLOW_ERR = 0,
+ HTT_RX_RXDMA_MPDU_LENGTH_ERR = 1,
+ HTT_RX_RXDMA_FCS_ERR = 2,
+ HTT_RX_RXDMA_DECRYPT_ERR = 3,
+ HTT_RX_RXDMA_TKIP_MIC_ERR = 4,
+ HTT_RX_RXDMA_UNECRYPTED_ERR = 5,
+ HTT_RX_RXDMA_MSDU_LEN_ERR = 6,
+ HTT_RX_RXDMA_MSDU_LIMIT_ERR = 7,
+ HTT_RX_RXDMA_WIFI_PARSE_ERR = 8,
+ HTT_RX_RXDMA_AMSDU_PARSE_ERR = 9,
+ HTT_RX_RXDMA_SA_TIMEOUT_ERR = 10,
+ HTT_RX_RXDMA_DA_TIMEOUT_ERR = 11,
+ HTT_RX_RXDMA_FLOW_TIMEOUT_ERR = 12,
+ HTT_RX_RXDMA_FLUSH_REQUEST = 13,
+ HTT_RX_RXDMA_ERR_CODE_RVSD0 = 14,
+ HTT_RX_RXDMA_ERR_CODE_RVSD1 = 15,
+
+ /*
+ * This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_RXDMA_MAX_ERR_CODE
+} htt_rx_rxdma_error_code_enum;
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* NOTE:
+ * The mapping of RXDMA error types to rxdma_err array elements is HW dependent.
+ * It is expected but not required that the target will provide a rxdma_err element
+ * for each of the htt_rx_rxdma_error_code_enum values, up to but not including
+ * MAX_ERR_CODE. The host should ignore any array elements whose
+ * indices are >= the MAX_ERR_CODE value the host was compiled with.
+ */
+ A_UINT32 rxdma_err[1]; /* HTT_RX_RXDMA_MAX_ERR_CODE */
+} htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v;
+
+/* REO error code from WBM released packets */
+typedef enum {
+ HTT_RX_REO_QUEUE_DESC_ADDR_ZERO = 0,
+ HTT_RX_REO_QUEUE_DESC_NOT_VALID = 1,
+ HTT_RX_AMPDU_IN_NON_BA = 2,
+ HTT_RX_NON_BA_DUPLICATE = 3,
+ HTT_RX_BA_DUPLICATE = 4,
+ HTT_RX_REGULAR_FRAME_2K_JUMP = 5,
+ HTT_RX_BAR_FRAME_2K_JUMP = 6,
+ HTT_RX_REGULAR_FRAME_OOR = 7,
+ HTT_RX_BAR_FRAME_OOR = 8,
+ HTT_RX_BAR_FRAME_NO_BA_SESSION = 9,
+ HTT_RX_BAR_FRAME_SN_EQUALS_SSN = 10,
+ HTT_RX_PN_CHECK_FAILED = 11,
+ HTT_RX_2K_ERROR_HANDLING_FLAG_SET = 12,
+ HTT_RX_PN_ERROR_HANDLING_FLAG_SET = 13,
+ HTT_RX_QUEUE_DESCRIPTOR_BLOCKED_SET = 14,
+ HTT_RX_REO_ERR_CODE_RVSD = 15,
+
+ /*
+ * This MAX_ERR_CODE should not be used in any host/target messages,
+ * so that even though it is defined within a host/target interface
+ * definition header file, it isn't actually part of the host/target
+ * interface, and thus can be modified.
+ */
+ HTT_RX_REO_MAX_ERR_CODE
+} htt_rx_reo_error_code_enum;
+
+/* NOTE: Variable length TLV, use length spec to infer array size */
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* NOTE:
+ * The mapping of REO error types to reo_err array elements is HW dependent.
+ * It is expected but not required that the target will provide a rxdma_err element
+ * for each of the htt_rx_reo_error_code_enum values, up to but not including
+ * MAX_ERR_CODE. The host should ignore any array elements whose
+ * indices are >= the MAX_ERR_CODE value the host was compiled with.
+ */
+ A_UINT32 reo_err[1]; /* HTT_RX_REO_MAX_ERR_CODE */
+} htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v;
/* NOTE:
* This structure is for documentation, and cannot be safely used directly.
@@ -2505,6 +2827,8 @@ typedef struct {
htt_rx_soc_fw_stats_tlv fw_tlv;
htt_rx_soc_fw_refill_ring_empty_tlv_v fw_refill_ring_empty_tlv;
htt_rx_soc_fw_refill_ring_num_refill_tlv_v fw_refill_ring_num_refill_tlv;
+ htt_rx_soc_fw_refill_ring_num_rxdma_err_tlv_v fw_refill_ring_num_rxdma_err_tlv;
+ htt_rx_soc_fw_refill_ring_num_reo_err_tlv_v fw_refill_ring_num_reo_err_tlv;
} htt_rx_soc_stats_t;
/* == RX PDEV STATS == */
@@ -2622,8 +2946,72 @@ typedef struct {
A_UINT32 rx_ring_restore_cnt;
/* Num rx flush issued */
A_UINT32 rx_flush_cnt;
+ /* Num rx recovery */
+ A_UINT32 rx_recovery_reset_cnt;
} htt_rx_pdev_fw_stats_tlv;
+#define HTT_STATS_PHY_ERR_MAX 43
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* BIT [ 7 : 0] :- mac_id
+ * BIT [31 : 8] :- reserved
+ */
+ A_UINT32 mac_id__word;
+ /* Num of phy err */
+ A_UINT32 total_phy_err_cnt;
+ /* Counts of different types of phy errs
+ * The mapping of PHY error types to phy_err array elements is HW dependent.
+ * The only currently-supported mapping is shown below:
+ *
+ * 0 phyrx_err_phy_off Reception aborted due to receiving a PHY_OFF TLV
+ * 1 phyrx_err_synth_off
+ * 2 phyrx_err_ofdma_timing
+ * 3 phyrx_err_ofdma_signal_parity
+ * 4 phyrx_err_ofdma_rate_illegal
+ * 5 phyrx_err_ofdma_length_illegal
+ * 6 phyrx_err_ofdma_restart
+ * 7 phyrx_err_ofdma_service
+ * 8 phyrx_err_ppdu_ofdma_power_drop
+ * 9 phyrx_err_cck_blokker
+ * 10 phyrx_err_cck_timing
+ * 11 phyrx_err_cck_header_crc
+ * 12 phyrx_err_cck_rate_illegal
+ * 13 phyrx_err_cck_length_illegal
+ * 14 phyrx_err_cck_restart
+ * 15 phyrx_err_cck_service
+ * 16 phyrx_err_cck_power_drop
+ * 17 phyrx_err_ht_crc_err
+ * 18 phyrx_err_ht_length_illegal
+ * 19 phyrx_err_ht_rate_illegal
+ * 20 phyrx_err_ht_zlf
+ * 21 phyrx_err_false_radar_ext
+ * 22 phyrx_err_green_field
+ * 23 phyrx_err_bw_gt_dyn_bw
+ * 24 phyrx_err_leg_ht_mismatch
+ * 25 phyrx_err_vht_crc_error
+ * 26 phyrx_err_vht_siga_unsupported
+ * 27 phyrx_err_vht_lsig_len_invalid
+ * 28 phyrx_err_vht_ndp_or_zlf
+ * 29 phyrx_err_vht_nsym_lt_zero
+ * 30 phyrx_err_vht_rx_extra_symbol_mismatch
+ * 31 phyrx_err_vht_rx_skip_group_id0
+ * 32 phyrx_err_vht_rx_skip_group_id1to62
+ * 33 phyrx_err_vht_rx_skip_group_id63
+ * 34 phyrx_err_ofdm_ldpc_decoder_disabled
+ * 35 phyrx_err_defer_nap
+ * 36 phyrx_err_fdomain_timeout
+ * 37 phyrx_err_lsig_rel_check
+ * 38 phyrx_err_bt_collision
+ * 39 phyrx_err_unsupported_mu_feedback
+ * 40 phyrx_err_ppdu_tx_interrupt_rx
+ * 41 phyrx_err_unsupported_cbf
+ * 42 phyrx_err_other
+ */
+ A_UINT32 phy_err[HTT_STATS_PHY_ERR_MAX];
+} htt_rx_pdev_fw_stats_phy_err_tlv;
+
#define HTT_RX_PDEV_FW_RING_MPDU_ERR_TLV_SZ(_num_elems) (sizeof(A_UINT32) * (_num_elems))
/* NOTE: Variable length TLV, use length spec to infer array size */
@@ -2660,6 +3048,262 @@ typedef struct {
htt_rx_pdev_fw_stats_tlv fw_stats_tlv;
htt_rx_pdev_fw_ring_mpdu_err_tlv_v fw_ring_mpdu_err_tlv;
htt_rx_pdev_fw_mpdu_drop_tlv_v fw_ring_mpdu_drop;
+ htt_rx_pdev_fw_stats_phy_err_tlv fw_stats_phy_err_tlv;
} htt_rx_pdev_stats_t;
+#define HTT_PDEV_CCA_STATS_TX_FRAME_INFO_PRESENT (0x1)
+#define HTT_PDEV_CCA_STATS_RX_FRAME_INFO_PRESENT (0x2)
+#define HTT_PDEV_CCA_STATS_RX_CLEAR_INFO_PRESENT (0x4)
+#define HTT_PDEV_CCA_STATS_MY_RX_FRAME_INFO_PRESENT (0x8)
+#define HTT_PDEV_CCA_STATS_USEC_CNT_INFO_PRESENT (0x10)
+#define HTT_PDEV_CCA_STATS_MED_RX_IDLE_INFO_PRESENT (0x20)
+#define HTT_PDEV_CCA_STATS_MED_TX_IDLE_GLOBAL_INFO_PRESENT (0x40)
+#define HTT_PDEV_CCA_STATS_CCA_OBBS_USEC_INFO_PRESENT (0x80)
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* Below values are obtained from the HW Cycles counter registers */
+ A_UINT32 tx_frame_usec;
+ A_UINT32 rx_frame_usec;
+ A_UINT32 rx_clear_usec;
+ A_UINT32 my_rx_frame_usec;
+ A_UINT32 usec_cnt;
+ A_UINT32 med_rx_idle_usec;
+ A_UINT32 med_tx_idle_global_usec;
+ A_UINT32 cca_obss_usec;
+} htt_pdev_stats_cca_counters_tlv;
+
+/* NOTE: THIS htt_pdev_cca_stats_hist_tlv STRUCTURE IS DEPRECATED,
+ * due to lack of support in some host stats infrastructures for
+ * TLVs nested within TLVs.
+ */
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* The channel number on which these stats were collected */
+ A_UINT32 chan_num;
+
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ A_UINT32 num_records;
+
+ /*
+ * Bit map of valid CCA counters
+ * Bit0 - tx_frame_usec
+ * Bit1 - rx_frame_usec
+ * Bit2 - rx_clear_usec
+ * Bit3 - my_rx_frame_usec
+ * bit4 - usec_cnt
+ * Bit5 - med_rx_idle_usec
+ * Bit6 - med_tx_idle_global_usec
+ * Bit7 - cca_obss_usec
+ *
+ * See HTT_PDEV_CCA_STATS_xxx_INFO_PRESENT defs
+ */
+ A_UINT32 valid_cca_counters_bitmap;
+
+ /* Indicates the stats collection interval
+ * Valid Values:
+ * 100 - For the 100ms interval CCA stats histogram
+ * 1000 - For 1sec interval CCA histogram
+ * 0xFFFFFFFF - For Cumulative CCA Stats
+ */
+ A_UINT32 collection_interval;
+
+ /**
+ * This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ */
+ htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+} htt_pdev_cca_stats_hist_tlv;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ /* The channel number on which these stats were collected */
+ A_UINT32 chan_num;
+
+ /* num of CCA records (Num of htt_pdev_stats_cca_counters_tlv)*/
+ A_UINT32 num_records;
+
+ /*
+ * Bit map of valid CCA counters
+ * Bit0 - tx_frame_usec
+ * Bit1 - rx_frame_usec
+ * Bit2 - rx_clear_usec
+ * Bit3 - my_rx_frame_usec
+ * bit4 - usec_cnt
+ * Bit5 - med_rx_idle_usec
+ * Bit6 - med_tx_idle_global_usec
+ * Bit7 - cca_obss_usec
+ *
+ * See HTT_PDEV_CCA_STATS_xxx_INFO_PRESENT defs
+ */
+ A_UINT32 valid_cca_counters_bitmap;
+
+ /* Indicates the stats collection interval
+ * Valid Values:
+ * 100 - For the 100ms interval CCA stats histogram
+ * 1000 - For 1sec interval CCA histogram
+ * 0xFFFFFFFF - For Cumulative CCA Stats
+ */
+ A_UINT32 collection_interval;
+
+ /**
+ * This will be followed by an array which contains the CCA stats
+ * collected in the last N intervals,
+ * if the indication is for last N intervals CCA stats.
+ * Then the pdev_cca_stats[0] element contains the oldest CCA stats
+ * and pdev_cca_stats[N-1] will have the most recent CCA stats.
+ * htt_pdev_stats_cca_counters_tlv cca_hist_tlv[1];
+ */
+} htt_pdev_cca_stats_hist_v1_tlv;
+
+#define HTT_TWT_SESSION_FLAG_FLOW_ID_M 0x0000ffff
+#define HTT_TWT_SESSION_FLAG_FLOW_ID_S 0
+
+#define HTT_TWT_SESSION_FLAG_BCAST_TWT_M 0x00010000
+#define HTT_TWT_SESSION_FLAG_BCAST_TWT_S 16
+
+#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_M 0x00020000
+#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S 17
+
+#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_M 0x00040000
+#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S 18
+
+#define HTT_TWT_SESSION_FLAG_FLOW_ID_GET(_var) \
+ (((_var) & HTT_TWT_SESSION_FLAG_FLOW_ID_M) >> \
+ HTT_TWT_SESSION_FLAG_FLOW_ID_S)
+
+#define HTT_TWT_SESSION_FLAG_FLOW_ID_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_FLOW_ID, _val); \
+ ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_FLOW_ID_S)); \
+ } while (0)
+
+#define HTT_TWT_SESSION_FLAG_BCAST_TWT_GET(_var) \
+ (((_var) & HTT_TWT_SESSION_FLAG_BCAST_TWT_M) >> \
+ HTT_TWT_SESSION_FLAG_BCAST_TWT_S)
+
+#define HTT_TWT_SESSION_FLAG_BCAST_TWT_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_BCAST_TWT, _val); \
+ ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_BCAST_TWT_S)); \
+ } while (0)
+
+#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_GET(_var) \
+ (((_var) & HTT_TWT_SESSION_FLAG_TRIGGER_TWT_M) >> \
+ HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S)
+
+#define HTT_TWT_SESSION_FLAG_TRIGGER_TWT_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_TRIGGER_TWT, _val); \
+ ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_TRIGGER_TWT_S)); \
+ } while (0)
+
+#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_GET(_var) \
+ (((_var) & HTT_TWT_SESSION_FLAG_ANNOUN_TWT_M) >> \
+ HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S)
+
+#define HTT_TWT_SESSION_FLAG_ANNOUN_TWT_SET(_var, _val) \
+ do { \
+ HTT_CHECK_SET_VAL(HTT_TWT_SESSION_FLAG_ANNOUN_TWT, _val); \
+ ((_var) |= ((_val) << HTT_TWT_SESSION_FLAG_ANNOUN_TWT_S)); \
+ } while (0)
+
+#define TWT_DIALOG_ID_UNAVAILABLE 0xFFFFFFFF
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ A_UINT32 vdev_id;
+ htt_mac_addr peer_mac;
+ A_UINT32 flow_id_flags;
+ A_UINT32 dialog_id; /* TWT_DIALOG_ID_UNAVAILABLE is used when TWT session is not initiated by host */
+ A_UINT32 wake_dura_us;
+ A_UINT32 wake_intvl_us;
+ A_UINT32 sp_offset_us;
+} htt_pdev_stats_twt_session_tlv;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+
+ A_UINT32 pdev_id;
+ A_UINT32 num_sessions;
+
+ htt_pdev_stats_twt_session_tlv twt_session[1];
+} htt_pdev_stats_twt_sessions_tlv;
+
+/* STATS_TYPE: HTT_DBG_EXT_STATS_TWT_SESSIONS
+ * TLV_TAGS:
+ * - HTT_STATS_PDEV_TWT_SESSIONS_TAG
+ * - HTT_STATS_PDEV_TWT_SESSION_TAG
+ */
+/* NOTE:
+ * This structure is for documentation, and cannot be safely used directly.
+ * Instead, use the constituent TLV structures to fill/parse.
+ */
+typedef struct {
+ htt_pdev_stats_twt_sessions_tlv twt_sessions[1];
+} htt_pdev_twt_sessions_stats_t;
+
+typedef enum {
+ /* Global link descriptor queued in REO */
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_0 = 0,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_1 = 1,
+ HTT_RX_REO_RESOURCE_GLOBAL_LINK_DESC_COUNT_2 = 2,
+ /*Number of queue descriptors of this aging group */
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC0 = 3,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC1 = 4,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC2 = 5,
+ HTT_RX_REO_RESOURCE_BUFFERS_USED_AC3 = 6,
+ /* Total number of MSDUs buffered in AC */
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC0 = 7,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC1 = 8,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC2 = 9,
+ HTT_RX_REO_RESOURCE_AGING_NUM_QUEUES_AC3 = 10,
+
+ HTT_RX_REO_RESOURCE_STATS_MAX = 16
+} htt_rx_reo_resource_sample_id_enum;
+
+typedef struct {
+ htt_tlv_hdr_t tlv_hdr;
+ /* Variable based on the Number of records. HTT_RX_REO_RESOURCE_STATS_MAX */
+ /* htt_rx_reo_debug_sample_id_enum */
+ A_UINT32 sample_id;
+ /* Max value of all samples */
+ A_UINT32 total_max;
+ /* Average value of total samples */
+ A_UINT32 total_avg;
+ /* Num of samples including both zeros and non zeros ones*/
+ A_UINT32 total_sample;
+ /* Average value of all non zeros samples */
+ A_UINT32 non_zeros_avg;
+ /* Num of non zeros samples */
+ A_UINT32 non_zeros_sample;
+ /* Max value of last N non zero samples (N = last_non_zeros_sample) */
+ A_UINT32 last_non_zeros_max;
+ /* Min value of last N non zero samples (N = last_non_zeros_sample) */
+ A_UINT32 last_non_zeros_min;
+ /* Average value of last N non zero samples (N = last_non_zeros_sample) */
+ A_UINT32 last_non_zeros_avg;
+ /* Num of last non zero samples */
+ A_UINT32 last_non_zeros_sample;
+} htt_rx_reo_resource_stats_tlv_v;
+
+/* STATS_TYPE: HTT_DBG_EXT_STATS_REO_RESOURCE_STATS
+ * TLV_TAGS:
+ * - HTT_STATS_RX_REO_RESOURCE_STATS_TAG
+ */
+/* NOTE:
+ * This structure is for documentation, and cannot be safely used directly.
+ * Instead, use the constituent TLV structures to fill/parse.
+ */
+typedef struct {
+ htt_rx_reo_resource_stats_tlv_v reo_resource_stats;
+} htt_soc_reo_resource_stats_t;
+
#endif /* __HTT_STATS_H__ */
diff --git a/drivers/staging/fw-api/fw/wmi_services.h b/drivers/staging/fw-api/fw/wmi_services.h
index 1740bd30f01d..4cff188b5c68 100644
--- a/drivers/staging/fw-api/fw/wmi_services.h
+++ b/drivers/staging/fw-api/fw/wmi_services.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -243,6 +243,13 @@ typedef enum {
WMI_SERVICE_AP_OBSS_DETECTION_OFFLOAD=147, /* Support SAP mode OBSS detection offload */
WMI_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT=148, /* Support for 11k neighbor report */
WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT=149, /* Support listen interval offload */
+ WMI_SERVICE_BSS_COLOR_OFFLOAD=150, /* Support BSS color change for STA, OBSS color collision detection in FW for AP and STA */
+ WMI_SERVICE_RUNTIME_DPD_RECAL=151, /* Runtime DPD recalibration support */
+ WMI_SERVICE_STA_TWT=152, /* support for TWT (Target Wake Time) of STA */
+ WMI_SERVICE_AP_TWT=153, /* support for TWT (Target Wake Time) on AP */
+ WMI_SERVICE_GMAC_OFFLOAD_SUPPORT=154, /* Support for GMAC */
+ WMI_SERVICE_SPOOF_MAC_SUPPORT=155, /* support for SERVICE_SPOOF_MAC */
+ WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT=156, /* Support TID specific configurations per peer (ack,aggr,retry,rate) */
/******* ADD NEW SERVICES HERE *******/
@@ -303,8 +310,8 @@ typedef enum {
(svc_id) < WMI_MAX_SERVICE ? \
WMI_SERVICE_IS_ENABLED(pwmi_svc_bmap, svc_id) : \
/* If service ID is in the extended range, check ext_bmap */ \
- (pwmi_svc_ext_bmap)[((svc_id) - WMI_MAX_SERVICE) / 32] >> \
- ((svc_id) & 0x1f))
+ (((pwmi_svc_ext_bmap)[((svc_id) - WMI_MAX_SERVICE) / 32] >> \
+ ((svc_id) & 0x1f)) & 0x1))
#ifdef __cplusplus
diff --git a/drivers/staging/fw-api/fw/wmi_tlv_defs.h b/drivers/staging/fw-api/fw/wmi_tlv_defs.h
index b9b56a171076..1baafd7d0417 100644
--- a/drivers/staging/fw-api/fw/wmi_tlv_defs.h
+++ b/drivers/staging/fw-api/fw/wmi_tlv_defs.h
@@ -892,6 +892,26 @@ typedef enum {
WMITLV_TAG_STRUC_wmi_bpf_set_vdev_work_memory_cmd_fixed_param,
WMITLV_TAG_STRUC_wmi_bpf_get_vdev_work_memory_cmd_fixed_param,
WMITLV_TAG_STRUC_wmi_bpf_get_vdev_work_memory_resp_evt_fixed_param,
+ WMITLV_TAG_STRUC_wmi_pdev_get_nfcal_power_fixed_param,
+ WMITLV_TAG_STRUC_wmi_bss_color_change_enable_fixed_param,
+ WMITLV_TAG_STRUC_wmi_obss_color_collision_det_config_fixed_param,
+ WMITLV_TAG_STRUC_wmi_obss_color_collision_evt_fixed_param,
+ WMITLV_TAG_STRUC_wmi_runtime_dpd_recal_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_disable_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_add_dialog_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_del_dialog_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_pause_dialog_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_enable_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_disable_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_add_dialog_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_del_dialog_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_pause_dialog_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_twt_resume_dialog_complete_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param,
+ WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param,
+ WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param,
} WMITLV_TAG_ID;
/*
@@ -1251,6 +1271,18 @@ typedef enum {
OP(WMI_BPF_SET_VDEV_ENABLE_CMDID) \
OP(WMI_BPF_SET_VDEV_WORK_MEMORY_CMDID) \
OP(WMI_BPF_GET_VDEV_WORK_MEMORY_CMDID) \
+ OP(WMI_PDEV_GET_NFCAL_POWER_CMDID) \
+ OP(WMI_BSS_COLOR_CHANGE_ENABLE_CMDID) \
+ OP(WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID) \
+ OP(WMI_RUNTIME_DPD_RECAL_CMDID) \
+ OP(WMI_TWT_ENABLE_CMDID) \
+ OP(WMI_TWT_DISABLE_CMDID) \
+ OP(WMI_TWT_ADD_DIALOG_CMDID) \
+ OP(WMI_TWT_DEL_DIALOG_CMDID) \
+ OP(WMI_TWT_PAUSE_DIALOG_CMDID) \
+ OP(WMI_TWT_RESUME_DIALOG_CMDID) \
+ OP(WMI_REQUEST_ROAM_SCAN_STATS_CMDID) \
+ OP(WMI_PEER_TID_CONFIGURATIONS_CMDID) \
/* add new CMD_LIST elements above this line */
@@ -1449,6 +1481,14 @@ typedef enum {
OP(WMI_SAR_GET_LIMITS_EVENTID) \
OP(WMI_SAR2_RESULT_EVENTID) \
OP(WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID) \
+ OP(WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID) \
+ OP(WMI_TWT_ENABLE_COMPLETE_EVENTID) \
+ OP(WMI_TWT_DISABLE_COMPLETE_EVENTID) \
+ OP(WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID) \
+ OP(WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID) \
+ OP(WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID) \
+ OP(WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID) \
+ OP(WMI_ROAM_SCAN_STATS_EVENTID) \
/* add new EVT_LIST elements above this line */
@@ -2824,6 +2864,12 @@ WMITLV_CREATE_PARAM_STRUC(WMI_LPI_START_SCAN_CMDID);
WMITLV_CREATE_PARAM_STRUC(WMI_LPI_STOP_SCAN_CMDID);
+/* Request for roam stats Cmd */
+#define WMITLV_TABLE_WMI_REQUEST_ROAM_SCAN_STATS_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param, wmi_request_roam_scan_stats_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_REQUEST_ROAM_SCAN_STATS_CMDID);
+
#define WMITLV_TABLE_WMI_LPI_RESULT_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_lpi_result_event_fixed_param, wmi_lpi_result_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, data, WMITLV_SIZE_VAR)
@@ -3082,6 +3128,16 @@ WMITLV_CREATE_PARAM_STRUC(WMI_SAP_SET_BLACKLIST_PARAM_CMDID);
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_sap_obss_detection_cfg_cmd_fixed_param, wmi_sap_obss_detection_cfg_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_SAP_OBSS_DETECTION_CFG_CMDID);
+/* STA BSS Color change offload param Cmd */
+#define WMITLV_TABLE_WMI_BSS_COLOR_CHANGE_ENABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_bss_color_change_enable_fixed_param, wmi_bss_color_change_enable_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+
+/* OBSS Color collision detection config cmd */
+#define WMITLV_TABLE_WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_obss_color_collision_det_config_fixed_param, wmi_obss_color_collision_det_config_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+
/* APFIND Request */
#define WMITLV_TABLE_WMI_APFIND_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_apfind_cmd_param, wmi_apfind_cmd_param, fixed_param, WMITLV_SIZE_FIX) \
@@ -3258,6 +3314,11 @@ WMITLV_CREATE_PARAM_STRUC(WMI_COEX_CONFIG_CMDID);
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_coex_get_antenna_isolation_cmd_fixed_param, wmi_coex_get_antenna_isolation_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_COEX_GET_ANTENNA_ISOLATION_CMDID);
+/* Cal Get power fixed param */
+#define WMITLV_TABLE_WMI_PDEV_GET_NFCAL_POWER_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_get_nfcal_power_fixed_param, wmi_pdev_get_nfcal_power_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_GET_NFCAL_POWER_CMDID);
+
/* bpf offload capability get cmd */
#define WMITLV_TABLE_WMI_BPF_GET_CAPABILITY_CMDID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_bpf_get_capability_cmd_fixed_param, wmi_bpf_get_capability_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -3591,6 +3652,47 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID);
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_pdev_set_rx_filter_promiscuous_cmd_fixed_param, wmi_pdev_set_rx_filter_promiscuous_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_SET_RX_FILTER_PROMISCUOUS_CMDID);
+/* Runtime DPD Recalibration Params */
+#define WMITLV_TABLE_WMI_RUNTIME_DPD_RECAL_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_runtime_dpd_recal_cmd_fixed_param, wmi_runtime_dpd_recal_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+
+WMITLV_CREATE_PARAM_STRUC(WMI_RUNTIME_DPD_RECAL_CMDID);
+
+/* TWT enable cmd */
+#define WMITLV_TABLE_WMI_TWT_ENABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param, wmi_twt_enable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_ENABLE_CMDID);
+
+/* TWT disable cmd */
+#define WMITLV_TABLE_WMI_TWT_DISABLE_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_disable_cmd_fixed_param, wmi_twt_disable_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_DISABLE_CMDID);
+
+/* TWT add dialog cmd */
+#define WMITLV_TABLE_WMI_TWT_ADD_DIALOG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_add_dialog_cmd_fixed_param, wmi_twt_add_dialog_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_ADD_DIALOG_CMDID);
+
+/* TWT delete dialog cmd */
+#define WMITLV_TABLE_WMI_TWT_DEL_DIALOG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_del_dialog_cmd_fixed_param, wmi_twt_del_dialog_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_DEL_DIALOG_CMDID);
+
+/* TWT pause dialog cmd */
+#define WMITLV_TABLE_WMI_TWT_PAUSE_DIALOG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_pause_dialog_cmd_fixed_param, wmi_twt_pause_dialog_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_PAUSE_DIALOG_CMDID);
+
+/* TWT resume dialog cmd */
+#define WMITLV_TABLE_WMI_TWT_RESUME_DIALOG_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param, wmi_twt_resume_dialog_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_RESUME_DIALOG_CMDID);
+
+/* Set peer tid configurations Cmd */
+#define WMITLV_TABLE_WMI_PEER_TID_CONFIGURATIONS_CMDID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param, wmi_peer_tid_configurations_cmd_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_PEER_TID_CONFIGURATIONS_CMDID);
+
/************************** TLV definitions of WMI events *******************************/
@@ -3755,7 +3857,8 @@ WMITLV_CREATE_PARAM_STRUC(WMI_OFFCHAN_DATA_TX_COMPLETION_EVENTID);
#define WMITLV_TABLE_WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_mgmt_tx_compl_bundle_event_fixed_param, wmi_mgmt_tx_compl_bundle_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, desc_ids, WMITLV_SIZE_VAR) \
- WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR)
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, status, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, ppdu_id, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_MGMT_TX_BUNDLE_COMPLETION_EVENTID);
/* VDEV Start response Event */
@@ -4413,6 +4516,11 @@ WMITLV_CREATE_PARAM_STRUC(WMI_SAP_OFL_DEL_STA_EVENTID);
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_sap_obss_detection_info_evt_fixed_param, wmi_sap_obss_detection_info_evt_fixed_param, fixed_param, WMITLV_SIZE_FIX)
WMITLV_CREATE_PARAM_STRUC(WMI_SAP_OBSS_DETECTION_REPORT_EVENTID);
+/* OBSS Color collision detection event */
+#define WMITLV_TABLE_WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_obss_color_collision_evt_fixed_param, wmi_obss_color_collision_evt_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID);
+
/* Set OCB schedule event, DEPRECATED */
#define WMITLV_TABLE_WMI_OCB_SET_SCHED_EVENTID(id,op,buf,len) \
WMITLV_ELEM(id, op, buf, len, WMITLV_TAG_STRUC_wmi_ocb_set_sched_event_fixed_param, wmi_ocb_set_sched_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
@@ -4804,6 +4912,56 @@ WMITLV_CREATE_PARAM_STRUC(WMI_PDEV_DMA_RING_BUF_RELEASE_EVENTID);
WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_BYTE, A_UINT8, bufp, WMITLV_SIZE_VAR)
WMITLV_CREATE_PARAM_STRUC(WMI_UNIT_TEST_EVENTID);
+
+/* enabling TWT complete Event */
+#define WMITLV_TABLE_WMI_TWT_ENABLE_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_enable_complete_event_fixed_param, wmi_twt_enable_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_ENABLE_COMPLETE_EVENTID);
+
+/* disabling TWT complete Event */
+#define WMITLV_TABLE_WMI_TWT_DISABLE_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_disable_complete_event_fixed_param, wmi_twt_disable_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_DISABLE_COMPLETE_EVENTID);
+
+/* adding TWT dialog complete Event */
+#define WMITLV_TABLE_WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_add_dialog_complete_event_fixed_param, wmi_twt_add_dialog_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID);
+
+/* deleting TWT dialog complete Event */
+#define WMITLV_TABLE_WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_del_dialog_complete_event_fixed_param, wmi_twt_del_dialog_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID);
+
+/* pausing TWT dialog complete Event */
+#define WMITLV_TABLE_WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_pause_dialog_complete_event_fixed_param, wmi_twt_pause_dialog_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID);
+
+/* resuming TWT dialog complete Event */
+#define WMITLV_TABLE_WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_twt_resume_dialog_complete_event_fixed_param, wmi_twt_resume_dialog_complete_event_fixed_param, fixed_param, WMITLV_SIZE_FIX)
+WMITLV_CREATE_PARAM_STRUC(WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID);
+
+/* Event to send roam scan stats */
+#define WMITLV_TABLE_WMI_ROAM_SCAN_STATS_EVENTID(id,op,buf,len) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param, wmi_roam_scan_stats_event_fixed_param, fixed_param, WMITLV_SIZE_FIX) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, client_id, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_roaming_timestamp, timestamp, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, num_channels, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, chan_info, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, old_bssid, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, is_roaming_success, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, new_bssid, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, num_roam_candidates, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_roam_scan_trigger_reason, roam_reason, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_FIXED_STRUC, wmi_mac_addr, bssid, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, score, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, channel, WMITLV_SIZE_VAR) \
+ WMITLV_ELEM(id,op,buf,len, WMITLV_TAG_ARRAY_UINT32, A_UINT32, rssi, WMITLV_SIZE_VAR)
+WMITLV_CREATE_PARAM_STRUC(WMI_ROAM_SCAN_STATS_EVENTID);
+
+
#ifdef __cplusplus
}
#endif
diff --git a/drivers/staging/fw-api/fw/wmi_unified.h b/drivers/staging/fw-api/fw/wmi_unified.h
index 43c2db7c6fdd..da7adcf8e389 100644
--- a/drivers/staging/fw-api/fw/wmi_unified.h
+++ b/drivers/staging/fw-api/fw/wmi_unified.h
@@ -242,6 +242,7 @@ typedef enum {
WMI_GRP_HW_DATA_FILTER, /* 0x3b */
WMI_GRP_WLM, /* 0x3c WLAN Latency Manager */
WMI_GRP_11K_OFFLOAD, /* 0x3d */
+ WMI_GRP_TWT, /* 0x3e TWT (Target Wake Time) for STA and AP */
} WMI_GRP_ID;
#define WMI_CMD_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
@@ -501,10 +502,25 @@ typedef enum {
WMI_PEER_SET_RX_BLOCKSIZE_CMDID,
/** request peer antdiv info from FW. FW shall respond with PEER_ANTDIV_INFO_EVENTID */
WMI_PEER_ANTDIV_INFO_REQ_CMDID,
- /** Peer operating mode change indication sent to host to update stats */
- WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+ /*
+ * The WMI_PEER_OPER_MODE_CHANGE_EVENTID def was originally mistakenly
+ * placed here, amongst the CMDID defs.
+ * The WMI_PEER_OPER_MODE_CHANGE_EVENTID def has been moved to the
+ * EVENTID section, but to preserve backwards compatibility, the value
+ * here that had been used for WMI_PEER_OPER_MODE_CHANGE_EVENTID
+ * is kept reserved/deprecated.
+ *
+ * This WMI_PEER_RESERVED0_CMDID value can be replaced with an actual
+ * WMI peer event message ID, though it will be simpler to instead add
+ * new WMI_PEER CMDID defs at the end of the WMI_GRP_PEER WMI_CMD_GRP.
+ */
+ WMI_PEER_RESERVED0_CMDID,
/** Peer/Tid/Msduq threshold update */
WMI_PEER_TID_MSDUQ_QDEPTH_THRESH_UPDATE_CMDID,
+ /** TID specific configurations per peer of type
+ * wmi_peer_tid_configurations_cmd_fixed_param
+ */
+ WMI_PEER_TID_CONFIGURATIONS_CMDID,
/* beacon/management specific commands */
@@ -533,6 +549,8 @@ typedef enum {
WMI_PDEV_SEND_FD_CMDID,
/** Cmd to enable/disable offloaded beacons */
WMI_BCN_OFFLOAD_CTRL_CMDID,
+ /** Cmd to enable FW handling BSS color change notification from AP. */
+ WMI_BSS_COLOR_CHANGE_ENABLE_CMDID,
/** commands to directly control ba negotiation directly from host. only used in test mode */
@@ -610,6 +628,8 @@ typedef enum {
WMI_ROAM_BTM_CONFIG_CMDID,
/** Enable or Disable Fast Initial Link Setup (FILS) feature */
WMI_ENABLE_FILS_CMDID,
+ /** Request for roam scan stats */
+ WMI_REQUEST_ROAM_SCAN_STATS_CMDID,
/** offload scan specific commands */
/** set offload scan AP profile */
@@ -877,6 +897,8 @@ typedef enum {
WMI_READ_DATA_FROM_FLASH_CMDID,
/* Thermal Throttling SET CONF commands */
WMI_THERM_THROT_SET_CONF_CMDID,
+ /* set runtime dpd recalibration params */
+ WMI_RUNTIME_DPD_RECAL_CMDID,
/* Offload 11k related requests */
WMI_11K_OFFLOAD_REPORT_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_11K_OFFLOAD),
@@ -998,6 +1020,7 @@ typedef enum {
*/
WMI_OBSS_SCAN_ENABLE_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_OBSS_OFL),
WMI_OBSS_SCAN_DISABLE_CMDID,
+ WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID,
/**LPI commands*/
/**LPI mgmt snooping config command*/
@@ -1098,6 +1121,13 @@ typedef enum {
/** WMI commands related to WLAN latency module **/
WMI_WLM_CONFIG_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_WLM),
+ /** WMI commands related to STA & AP TWT module **/
+ WMI_TWT_ENABLE_CMDID = WMI_CMD_GRP_START_ID(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_CMDID,
+ WMI_TWT_ADD_DIALOG_CMDID,
+ WMI_TWT_DEL_DIALOG_CMDID,
+ WMI_TWT_PAUSE_DIALOG_CMDID,
+ WMI_TWT_RESUME_DIALOG_CMDID,
} WMI_CMD_ID;
typedef enum {
@@ -1258,6 +1288,32 @@ typedef enum {
/** Peer Ant Div Info Event with rssi per chain, etc */
WMI_PEER_ANTDIV_INFO_EVENTID,
+ /*
+ * WMI_PEER_RESERVED_EVENTID
+ * These values are used for placeholders, to allow the subsequent
+ * WMI_PEER_OPER_MODE_CHANGE_EVENTID constant to have the same value
+ * as it had in its original location, when it was mistakenly placed
+ * amongst the WMI_PEER CMDID defs.
+ *
+ * These WMI_PEER_RESERVED values can be replaced with actual WMI peer
+ * event message IDs, though it will be simpler to instead add new
+ * WMI_PEER EVENTID defs at the end of the WMI_GRP_PEER WMI_EVT_GRP.
+ */
+ WMI_PEER_RESERVED0_EVENTID,
+ WMI_PEER_RESERVED1_EVENTID,
+ WMI_PEER_RESERVED2_EVENTID,
+ WMI_PEER_RESERVED3_EVENTID,
+ WMI_PEER_RESERVED4_EVENTID,
+ WMI_PEER_RESERVED5_EVENTID,
+ WMI_PEER_RESERVED6_EVENTID,
+ WMI_PEER_RESERVED7_EVENTID,
+ WMI_PEER_RESERVED8_EVENTID,
+ WMI_PEER_RESERVED9_EVENTID,
+ WMI_PEER_RESERVED10_EVENTID,
+ /** Peer operating mode change indication sent to host to update stats */
+ WMI_PEER_OPER_MODE_CHANGE_EVENTID,
+
+
/* beacon/mgmt specific events */
/** RX management frame. the entire frame is carried along with the event. */
WMI_MGMT_RX_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_MGMT),
@@ -1312,6 +1368,8 @@ typedef enum {
WMI_ROAM_SYNCH_EVENTID,
/** roam synch frame event */
WMI_ROAM_SYNCH_FRAME_EVENTID,
+ /** various roam scan stats */
+ WMI_ROAM_SCAN_STATS_EVENTID,
/** P2P disc found */
WMI_P2P_DISC_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_P2P),
@@ -1571,6 +1629,9 @@ typedef enum {
WMI_SAP_OFL_DEL_STA_EVENTID,
WMI_SAP_OBSS_DETECTION_REPORT_EVENTID,
+ /* OBSS Offloads events */
+ WMI_OBSS_COLOR_COLLISION_DETECTION_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_OBSS_OFL),
+
/** Out-of-context-of-bss (OCB) events */
WMI_OCB_SET_CONFIG_RESP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_OCB),
WMI_OCB_GET_TSF_TIMER_RESP_EVENTID,
@@ -1599,6 +1660,14 @@ typedef enum {
WMI_REG_CHAN_LIST_CC_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_REGULATORY),
WMI_11D_NEW_COUNTRY_EVENTID,
+ /** Events for TWT(Target Wake Time) of STA and AP */
+ WMI_TWT_ENABLE_COMPLETE_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_TWT),
+ WMI_TWT_DISABLE_COMPLETE_EVENTID,
+ WMI_TWT_ADD_DIALOG_COMPLETE_EVENTID,
+ WMI_TWT_DEL_DIALOG_COMPLETE_EVENTID,
+ WMI_TWT_PAUSE_DIALOG_COMPLETE_EVENTID,
+ WMI_TWT_RESUME_DIALOG_COMPLETE_EVENTID,
+
/** Events in Prototyping phase */
WMI_NDI_CAP_RSP_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_PROTOTYPE),
WMI_NDP_INITIATOR_RSP_EVENTID,
@@ -1988,13 +2057,17 @@ typedef struct _wmi_ppe_threshold {
#define WMI_DBS_CONC_SCAN_CFG_SYNC_DBS_SCAN_GET(scan_cfg) \
((scan_cfg & WMI_DBS_CONC_SCAN_CFG_SYNC_DBS_SCAN_MASK) >> WMI_DBS_CONC_SCAN_CFG_SYNC_DBS_SCAN_BITPOS)
-#define WMI_DBS_FW_MODE_CFG_DBS_BITPOS (31)
-#define WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS (30)
-#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS (29)
+#define WMI_DBS_FW_MODE_CFG_DBS_BITPOS (31)
+#define WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS (30)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS (29)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_BITPOS (28)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_BITPOS (27)
-#define WMI_DBS_FW_MODE_CFG_DBS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_BITPOS)
-#define WMI_DBS_FW_MODE_CFG_AGILE_DFS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS)
-#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_DFS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_AGILE_DFS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_DFS_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_MASK (0x1 << WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_BITPOS)
#define WMI_DBS_FW_MODE_CFG_DBS_SET(fw_mode, value) \
WMI_SET_BITS(fw_mode, WMI_DBS_FW_MODE_CFG_DBS_BITPOS, 1, value)
@@ -2002,6 +2075,10 @@ typedef struct _wmi_ppe_threshold {
WMI_SET_BITS(fw_mode, WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS, 1, value)
#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_SET(fw_mode, value) \
WMI_SET_BITS(fw_mode, WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS, 1, value)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_SET(fw_mode, value) \
+ WMI_SET_BITS(fw_mode, WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_BITPOS, 1, value)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_SET(fw_mode, value) \
+ WMI_SET_BITS(fw_mode, WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_BITPOS, 1, value)
#define WMI_DBS_FW_MODE_CFG_DBS_GET(fw_mode) \
((fw_mode & WMI_DBS_FW_MODE_CFG_DBS_MASK) >> WMI_DBS_FW_MODE_CFG_DBS_BITPOS)
@@ -2009,6 +2086,10 @@ typedef struct _wmi_ppe_threshold {
((fw_mode & WMI_DBS_FW_MODE_CFG_AGILE_DFS_MASK) >> WMI_DBS_FW_MODE_CFG_AGILE_DFS_BITPOS)
#define WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_GET(fw_mode) \
((fw_mode & WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_DFS_MASK) >> WMI_DBS_FW_MODE_CFG_DBS_FOR_CXN_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_GET(fw_mode) \
+ ((fw_mode & WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_MASK) >> WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_BITPOS)
+#define WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_GET(fw_mode) \
+ ((fw_mode & WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_MASK) >> WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_BITPOS)
/** NOTE: This structure cannot be extended in the future without breaking WMI compatibility */
@@ -2175,6 +2256,15 @@ typedef struct {
* bits 31:28 -> CRM sub ID
*/
A_UINT32 fw_build_vers_ext;
+ /* max_nlo_ssids - dynamically negotiated maximum number of SSIDS for NLO
+ * This limit is the maximum number of SSIDs that can be configured in the
+ * target for Network List Offload (i.e. scanning for a preferred network).
+ * If this value is 0x0, the target supports WMI_NLO_MAX_SSIDS (16).
+ * If this value is non-zero, the host should send back in the
+ * WMI_INIT message's wmi_resource_config.max_nlo_ssids a value that
+ * is equal to or less than the target capability limit reported here.
+ */
+ A_UINT32 max_nlo_ssids;
} wmi_service_ready_ext_event_fixed_param;
typedef enum {
@@ -2647,8 +2737,11 @@ typedef struct {
A_UINT32 num_ns_ext_tuples_cfg;
/**
- * size (in bytes) of the buffer the FW shall allocate to store
- * packet filtering instructions
+ * size (in bytes) of the buffer the FW shall allocate per vdev
+ * firmware can dynamic allocate memory (or disable)
+ * packet filtering feature.
+ * 0 - fw chooses its default value
+ * -1 (0XFFFFFFFF) - disable APF
*/
A_UINT32 bpf_instruction_size;
@@ -2684,6 +2777,53 @@ typedef struct {
* For regular use, this field should be set to 0x0.
*/
A_UINT32 sched_params;
+
+ /* Number of MAC on which AP TWT feature is supported */
+ A_UINT32 twt_ap_pdev_count;
+
+ /* Max no of STA with which TWT sessions can be formed by the AP */
+ A_UINT32 twt_ap_sta_count;
+
+ /* max_nlo_ssids - dynamically negotiated maximum number of SSIDS for NLO
+ * This parameter provides the final specification for the maximum number
+ * of SSIDs for the target to support for Network List Offload's scanning
+ * for preferred networks.
+ * This wmi_resource_config.max_nlo_ssids must be <= the max_nlo_ssids
+ * field from the target's WMI_SERVICE_READY_EXT_EVENT message.
+ * (If the target didn't provide a max_nlo_ssids field in the
+ * WMI_SERVICE_READY_EXT message, or if the SERVICE_READY_EXT msg's
+ * max_nlo_ssids value was 0x0, the target doesn't support dynamic
+ * negotiation of max NLO SSIDs, and WMI_NLO_MAX_SSIDS (=16) applies.)
+ * If this wmi_resource_config.max_nlo_ssids field is absent or 0x0,
+ * the host does not support dynamic negotiation of max NLO SSIDs.
+ * In such a case, the target will respond as follows:
+ * If the target supports at least WMI_NLO_MAX_SSIDS, the target will
+ * use the statically-configured WMI_NLO_MAX_SSIDS value.
+ * If the target supports less than WMI_NLO_MAX_SSIDS, the target will
+ * abort its boot-up, due to receiving an invalid/unsupported
+ * configuration specification.
+ */
+ A_UINT32 max_nlo_ssids;
+
+ /**
+ * num_packet_filters: the num that host requests fw to support for
+ * pktfilter in total, then firmware can dynamic allocate
+ * memory(or disable) pktfilter feature.
+ *
+ * 0 - fw chooses its default value.
+ * -1(0XFFFFFFFF)- disable pktfilter.
+ */
+ A_UINT32 num_packet_filters;
+
+ /**
+ * num_max_sta_vdevs: the max num for the sta vdevs
+ * fw will use it to config the memory of offload features that
+ * are only for sta vdevs.
+ * p2p client should be included.
+ *
+ * 0 - fw chooses its default value: 'num_vdevs' of this structure.
+ */
+ A_UINT32 num_max_sta_vdevs;
} wmi_resource_config;
#define WMI_RSRC_CFG_FLAG_SET(word32, flag, value) \
@@ -4784,6 +4924,23 @@ typedef enum {
/* Enable/Disable data stall detection */
WMI_PDEV_PARAM_DATA_STALL_DETECT_ENABLE, /* 0x9b */
+ /* GCMP Support indication to FW */
+ WMI_PDEV_PARAM_GCMP_SUPPORT_ENABLE, /* 0x9c */
+ /** Enable/Disable chain selection optimization for one chain dtim
+ * non-zero - Enable optimization and use this non-zero value as the
+ * chain imbalance threshold for optimization to kick in
+ * (units = dB)
+ * 0- Disable optimization
+ */
+ WMI_PDEV_PARAM_1CH_DTIM_OPTIMIZED_CHAIN_SELECTION,/* 0x9d */
+ /*
+ * Override default FW behavior and explicitly enable / disable
+ * the use of CCK for PPDU transmissions.
+ *
+ * When CCK transmissions are disabled, the default OFDM legacy
+ * rate will be used instead.
+ */
+ WMI_PDEV_PARAM_CCK_TX_ENABLE, /* 0x9e */
} WMI_PDEV_PARAM;
typedef struct {
@@ -4973,6 +5130,11 @@ typedef struct {
* See macros starting with WMI_PDEV_ID_ for values.
*/
A_UINT32 pdev_id;
+ /* ppdu_id
+ * Hardware PPDU ID for tracking the completion stats
+ * A ppdu_id value of 0x0 is invalid, and should be ignored.
+ */
+ A_UINT32 ppdu_id;
} wmi_mgmt_tx_compl_event_fixed_param;
typedef struct {
@@ -4983,6 +5145,11 @@ typedef struct {
* See macros starting with WMI_PDEV_ID_ for values.
*/
A_UINT32 pdev_id;
+ /* ppdu_id
+ * Hardware PPDU ID for tracking the completion stats
+ * A ppdu_id value of 0x0 is invalid, and should be ignored.
+ */
+ A_UINT32 ppdu_id;
} wmi_offchan_data_tx_compl_event_fixed_param;
typedef struct {
@@ -4991,6 +5158,7 @@ typedef struct {
/* tlv for completion
* A_UINT32 desc_ids[num_reports]; <- from tx_send_cmd
* A_UINT32 status[num_reports]; <- WMI_MGMT_TX_COMP_STATUS_TYPE
+ * A_UINT32 ppdu_id[num_reports]; <- list of PPDU IDs
*/
} wmi_mgmt_tx_compl_bundle_event_fixed_param;
@@ -8126,6 +8294,48 @@ typedef enum {
*/
WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, /* 0x7d */
+ /** Parameter to configure BA mode.
+ * Valid values: 0- Auto mode,
+ * 1- Manual mode(addba req not sent).
+ * 2- buffer size 64
+ * 3- buffer size 256
+ */
+ WMI_VDEV_PARAM_BA_MODE, /* 0x7e */
+
+ /**
+ * VDEV parameter to force to set modulate DTIM count as listen interval,
+ * no matter whether WoW is enabled
+ * Default: Disabled.
+ * Valid values: 0- Disabled,
+ * 1- Enabled.
+ */
+ WMI_VDEV_PARAM_FORCED_MODDTIM_ENABLE, /* 0x7f */
+
+ /** specify the setting that are valid for auto rate transmissions.
+ * bits 7:0 (LTF): When bitmask is set, then corresponding LTF value is
+ * used for auto rate.
+ * BIT0 = 1 (WMI_HE_LTF_1X)
+ * BIT1 = 1 (WMI_HE_LTF_2X)
+ * BIT2 = 1 (WMI_HE_LTF_4X)
+ * BIT3-7 = Reserved bits.
+ * bits 15:8 (SGI): When bitmask is set, then corresponding SGI value is
+ * used for auto rate.
+ * BIT8 = 1 (400 NS)
+ * BIT9 = 1 (800 NS)
+ * BIT10 = 1 (1600 NS)
+ * BIT11 = 1 (3200 NS)
+ * BIT12-15 = Reserved bits.
+ * bits 31:16: Reserved bits. should be set to zero.
+ */
+ WMI_VDEV_PARAM_AUTORATE_MISC_CFG, /* 0x80 */
+
+ /** VDEV parameter to enable or disable RTT initiator mac address
+ * randomization.
+ * Default: Disabled.
+ * valid values: 0-Disable random mac 1-Enable random mac
+ */
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_INITIATOR_RANDOM_MAC, /* 0x81 */
+
/*=== ADD NEW VDEV PARAM TYPES ABOVE THIS LINE ===
* The below vdev param types are used for prototyping, and are
@@ -10189,6 +10399,8 @@ enum {
WMI_AUTH_CCKM_RSNA,
WMI_AUTH_RSNA_FILS_SHA256,
WMI_AUTH_RSNA_FILS_SHA384,
+ WMI_AUTH_RSNA_SUITE_B_8021X_SHA256,
+ WMI_AUTH_RSNA_SUITE_B_8021X_SHA384,
};
typedef enum {
@@ -11446,6 +11658,7 @@ typedef enum event_type_e {
WOW_CHIP_POWER_FAILURE_DETECT_EVENT,
WOW_11D_SCAN_EVENT,
WOW_SAP_OBSS_DETECTION_EVENT,
+ WOW_BSS_COLOR_COLLISION_DETECT_EVENT,
} WOW_WAKE_EVENT_TYPE;
typedef enum wake_reason_e {
@@ -11503,6 +11716,7 @@ typedef enum wake_reason_e {
WOW_REASON_OIC_PING_OFFLOAD,
WOW_REASON_WLAN_DHCP_RENEW,
WOW_REASON_SAP_OBSS_DETECTION,
+ WOW_REASON_BSS_COLOR_COLLISION_DETECT,
WOW_REASON_DEBUG_TEST = 0xFF,
} WOW_WAKE_REASON_TYPE;
@@ -12635,11 +12849,24 @@ typedef struct {
A_UINT32 vdev_id; /** unique id identifying the VDEV */
A_UINT32 flags; /* status flags */
A_UINT32 refresh_cnt; /* number of successful GTK refresh exchanges since last SET operation */
+ /*
+ * As with all WMI messages, this message uses little-endian byte
+ * ordering within each A_UINT32 field.
+ * If a big-endian host is using automatic swapping of the bytes within
+ * each 4-byte A_UINT32 to automatically correct the endianness of the
+ * A_UINT32 fields as the message is uploaded from target --> host, the
+ * big-endian host will have to undo the automatic byte swapping for the
+ * below A_UINT8 fields, to restore them to their original order.
+ */
A_UINT8 replay_counter[GTK_REPLAY_COUNTER_BYTES]; /* current replay counter */
A_UINT8 igtk_keyIndex; /* Use if IGTK_OFFLOAD is defined */
A_UINT8 igtk_keyLength; /* Use if IGTK_OFFLOAD is defined */
A_UINT8 igtk_keyRSC[IGTK_PN_SIZE]; /* key replay sequence counter *//* Use if IGTK_OFFLOAD is defined */
A_UINT8 igtk_key[WMI_MAX_KEY_LEN]; /* Use if IGTK_OFFLOAD is defined */
+ A_UINT8 gtk_keyIndex; /* GTK key index */
+ A_UINT8 gtk_keyLength; /* GTK key length */
+ A_UINT8 gtk_keyRSC[GTK_REPLAY_COUNTER_BYTES]; /* GTK key replay sequence counter */
+ A_UINT8 gtk_key[WMI_MAX_KEY_LEN]; /* GTK key data */
} WMI_GTK_OFFLOAD_STATUS_EVENT_fixed_param;
typedef struct {
@@ -14125,10 +14352,13 @@ typedef struct
#define LPI_IE_BITMAP_FLAGS 0x00200000 /* reserved as a bitmap to indicate more scan information; one such use being to indicate if the on-going scan is interrupted or not */
#define LPI_IE_BITMAP_CACHING_REQD 0x00400000 /* extscan will use this field to indicate if this frame info needs to be cached in LOWI LP or not */
#define LPI_IE_BITMAP_REPORT_CONTEXT_HUB 0x00800000 /* extscan will use this field to indicate to LOWI LP whether to report result to context hub or not. */
-#define LPI_IE_BITMAP_CHRE_ESS 0x010000000 /* ESS capability info for CHRE */
-#define LPI_IE_BITMAP_CHRE_SEC_MODE 0x020000000 /* Security capability info for CHRE */
-#define LPI_IE_BITMAP_CHRE_SUPPORTED_RATE 0x040000000 /* Hightest MCS corresponding NCC for TX and RX */
-#define LPI_IE_BITMAP_COUNTRY_STRING 0x080000000 /* send country string inside Country IE to LOWI LP */
+#define LPI_IE_BITMAP_CHRE_RADIO_CHAIN 0x01000000 /* include radio chain and rssi per chain information if this bit is set - for CHRE */
+
+/* 0x02000000, 0x04000000, and 0x08000000 are unused / available */
+
+#define LPI_IE_BITMAP_CHRE_ESS 0x10000000 /* ESS capability info for CHRE */
+#define LPI_IE_BITMAP_CHRE_SEC_MODE 0x20000000 /* Security capability info for CHRE */
+#define LPI_IE_BITMAP_CHRE_SUPPORTED_RATE 0x40000000 /* Highest MCS corresponding NCC for TX and RX */
#define LPI_IE_BITMAP_ALL 0xFFFFFFFF
typedef struct {
@@ -14193,6 +14423,8 @@ typedef struct {
A_UINT32 num_ssids;
/** number of bytes in ie data. In the TLV ie_data[] */
A_UINT32 ie_len;
+ /** Scan control flags extended (see WMI_SCAN_FLAG_EXT_xxx) */
+ A_UINT32 scan_ctrl_flags_ext;
/**
* TLV (tag length value) parameters follow the scan_cmd
@@ -14536,6 +14768,99 @@ typedef struct {
*/
} wmi_peer_tid_msduq_qdepth_thresh_update_cmd_fixed_param;
+/**
+ * ACK policy to be followed for the TID
+ */
+typedef enum {
+ /** Used when the host does not want to configure the ACK policy */
+ WMI_PEER_TID_CONFIG_ACK_POLICY_IGNORE,
+ /** Allow ACK for the TID */
+ WMI_PEER_TID_CONFIG_ACK,
+ /** Do not expect ACK for the TID */
+ WMI_PEER_TID_CONFIG_NOACK,
+} WMI_PEER_TID_CONFIG_ACK_POLICY;
+
+/**
+ * Aggregation control policy for the TID
+ */
+typedef enum {
+ /** Used when the host does not want to configure the aggregation policy */
+ WMI_PEER_TID_CONFIG_AGGR_CONTROL_IGNORE,
+ /** Enable aggregation for the TID */
+ WMI_PEER_TID_CONFIG_AGGR_CONTROL_ENABLE,
+ /** Disable aggregation for the TID */
+ WMI_PEER_TID_CONFIG_AGGR_CONTROL_DISABLE,
+} WMI_PEER_TID_CONFIG_AGGR_CONTROL;
+
+/**
+ * Rate control policy for the TID
+ */
+typedef enum {
+ /** Used when the host does not want to configure the rate control policy */
+ WMI_PEER_TID_CONFIG_RATE_CONTROL_IGNORE,
+ /** Auto rate control */
+ WMI_PEER_TID_CONFIG_RATE_CONTROL_AUTO,
+ /** Fixed rate control */
+ WMI_PEER_TID_CONFIG_RATE_CONTROL_FIXED_RATE,
+} WMI_PEER_TID_CONFIG_RATE_CONTROL;
+
+/**
+ * SW retry threshold for the TID
+ */
+typedef enum {
+ /** Used when the host does not want to configure the SW retry threshold */
+ WMI_PEER_TID_SW_RETRY_IGNORE = 0,
+ WMI_PEER_TID_SW_RETRY_MIN = 1,
+ WMI_PEER_TID_SW_RETRY_MAX = 30,
+ /** No SW retry for the TID */
+ WMI_PEER_TID_SW_RETRY_NO_RETRY = 0xFFFFFFFF,
+} WMI_PEER_TID_CONFIG_SW_RETRY_THRESHOLD;
+
+/**
+ * Command format for the TID configuration
+ */
+typedef struct {
+ /** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_peer_tid_configurations_cmd_fixed_param
+ */
+ A_UINT32 tlv_header;
+
+ /** vdev id */
+ A_UINT32 vdev_id;
+
+ /** peer MAC address */
+ wmi_mac_addr peer_mac_address;
+
+ /** TID number, generated by the caller.
+ * Valid range for QoS TID : 0-15
+ * Valid range for non QOS/Mgmt TID: 16-19
+ * Any other TID number is invalid.
+ */
+ A_UINT32 tid_num;
+
+ /** ACK policy - of type WMI_PEER_TID_CONFIG_ACK_POLICY */
+ A_UINT32 ack_policy;
+
+ /** Aggregation control - of type WMI_PEER_TID_CONFIG_AGGR_CONTROL */
+ A_UINT32 aggr_control;
+
+ /** Rate control - of type WMI_PEER_TID_CONFIG_RATE_CONTROL */
+ A_UINT32 rate_control;
+
+ /** Fixed rate control parameters - of type WMI_PEER_PARAM_FIXED_RATE.
+ * This is applicable only when rate_control is
+ * WMI_PEER_TID_CONFIG_RATE_CONTROL_FIXED_RATE
+ */
+ A_UINT32 rcode_rcflags;
+
+ /** MPDU SW retry threshold - of type WMI_PEER_TID_CONFIG_SW_RETRY_THRESHOLD
+ * This SW retry threshold limits the total number of retransmits of
+ * nacked or unacked MPDUs, but it is up to the FW to decide what
+ * tx rate to use during each retransmission.
+ */
+ A_UINT32 sw_retry_threshold;
+} wmi_peer_tid_configurations_cmd_fixed_param;
+
typedef enum {
WMI_PEER_IND_SMPS = 0x0, /* spatial multiplexing power save */
WMI_PEER_IND_OMN, /* operating mode notification */
@@ -17355,6 +17680,41 @@ typedef struct wmi_sap_obss_detection_info_evt_s {
wmi_mac_addr matched_bssid_addr; /* valid when reason is WMI_SAP_OBSS_DETECTION_EVENT_REASON_PRESENT_NOTIFY */
} wmi_sap_obss_detection_info_evt_fixed_param;
+/** WMI command to enable STA FW handle bss color change notification from AP */
+typedef struct {
+ A_UINT32 tlv_header; /* tag equals WMITLV_TAG_STRUC_wmi_bss_color_change_enable_fixed_param */
+ A_UINT32 vdev_id;
+ A_UINT32 enable;
+} wmi_bss_color_change_enable_fixed_param;
+
+typedef enum {
+ WMI_BSS_COLOR_COLLISION_DISABLE = 0,
+ WMI_BSS_COLOR_COLLISION_DETECTION,
+ WMI_BSS_COLOR_FREE_SLOT_TIMER_EXPIRY,
+ WMI_BSS_COLOR_FREE_SLOT_AVAILABLE,
+} WMI_BSS_COLOR_COLLISION_EVT_TYPE;
+
+/** Command to enable OBSS Color collision detection for both STA and AP mode */
+typedef struct {
+ A_UINT32 tlv_header; /* tag equals WMITLV_TAG_STRUC_wmi_obss_color_collision_det_config_fixed_param */
+ A_UINT32 vdev_id;
+ A_UINT32 flags; /* proposed for future use cases */
+ A_UINT32 evt_type; /* WMI_BSS_COLOR_COLLISION_EVT_TYPE */
+ A_UINT32 current_bss_color;
+ A_UINT32 detection_period_ms; /* scan interval for both AP and STA mode */
+ A_UINT32 scan_period_ms; /* scan period for passive scan to detect collision */
+ A_UINT32 free_slot_expiry_time_ms; /* FW to notify host at timer expiry after which Host disables bss color */
+} wmi_obss_color_collision_det_config_fixed_param;
+
+/** WMI event to notify host on OBSS Color collision detection, free slot available for AP mode */
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_obss_color_collision_evt_fixed_param */
+ A_UINT32 vdev_id;
+ A_UINT32 evt_type; /* WMI_BSS_COLOR_COLLISION_EVT_TYPE */
+ A_UINT32 bss_color_bitmap_bit0to31; /* Bit set indicating BSS color present */
+ A_UINT32 bss_color_bitmap_bit32to63; /* Bit set indicating BSS color present */
+} wmi_obss_color_collision_evt_fixed_param;
+
/**
* OCB DCC types and structures.
*/
@@ -20260,6 +20620,8 @@ typedef struct {
wmi_ppe_threshold he_ppet5G;
/* chainmask table to be used for the MAC */
A_UINT32 chainmask_table_id;
+ /* PDEV ID to LMAC ID mapping */
+ A_UINT32 lmac_id;
} WMI_MAC_PHY_CAPABILITIES;
typedef struct {
@@ -21169,6 +21531,17 @@ static INLINE A_UINT8 *wmi_id_to_name(A_UINT32 wmi_command)
WMI_RETURN_STRING(WMI_BPF_SET_VDEV_ENABLE_CMDID);
WMI_RETURN_STRING(WMI_BPF_SET_VDEV_WORK_MEMORY_CMDID);
WMI_RETURN_STRING(WMI_BPF_GET_VDEV_WORK_MEMORY_CMDID);
+ WMI_RETURN_STRING(WMI_BSS_COLOR_CHANGE_ENABLE_CMDID);
+ WMI_RETURN_STRING(WMI_OBSS_COLOR_COLLISION_DET_CONFIG_CMDID);
+ WMI_RETURN_STRING(WMI_RUNTIME_DPD_RECAL_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_ENABLE_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_DISABLE_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_ADD_DIALOG_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_DEL_DIALOG_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_PAUSE_DIALOG_CMDID);
+ WMI_RETURN_STRING(WMI_TWT_RESUME_DIALOG_CMDID);
+ WMI_RETURN_STRING(WMI_REQUEST_ROAM_SCAN_STATS_CMDID);
+ WMI_RETURN_STRING(WMI_PEER_TID_CONFIGURATIONS_CMDID);
}
return "Invalid WMI cmd";
@@ -21320,6 +21693,16 @@ typedef struct {
typedef struct {
/** TLV tag and len; tag equals
+ * WMITLV_TAG_STRUC_wmi_pdev_get_nfcal_power_fixed_param */
+ A_UINT32 tlv_header;
+ /** pdev_id for identifying the MAC
+ * See macros starting with WMI_PDEV_ID_ for values.
+ */
+ A_UINT32 pdev_id;
+} wmi_pdev_get_nfcal_power_fixed_param;
+
+typedef struct {
+ /** TLV tag and len; tag equals
* WMITLV_TAG_STRUC_wmi_coex_report_isolation_event_fixed_param */
A_UINT32 tlv_header;
/** Antenna isolation value in dB units, none zero value is valid while 0 means failed to do isolation measurement or corresponding chain is not active.
@@ -21616,12 +21999,21 @@ typedef enum {
/*
* Lay out of flags in wmi_wlm_config_cmd_fixed_param
*
-* |31 12| 11 | 10 |9 8|7 6|5 4|3 2| 1 | 0 |
-* +------+------+------+------+------+------+------+-----+-----+
-* | RSVD | SSLP | CSLP | RSVD | Roam | RSVD | DWLT | DFS | SUP |
-* +------+-------------+-------------+-------------------------+
-* | WAL | PS | Roam | Scan |
+* |31 17|16 14| 13 | 12 | 11 | 10 |9 8|7 6|5 4|3 2| 1 | 0 |
+* +------+-----+----+----+------+------+------+------+------+------+-----+-----+
+* | RSVD | NSS |EDCA| TRY| SSLP | CSLP | RSVD | Roam | RSVD | DWLT | DFS | SUP |
+* +----------------------+-------------+-------------+-------------------------+
+* | WAL | PS | Roam | Scan |
*
+* Flag values:
+* TRY: (1) enable short limit for retrying unacked tx, where the limit is
+* based on the traffic's latency level
+* (0) default tx retry behavior
+* EDCA: (1) Apply VO parameters on BE
+* (0) default behavior
+* NSS: (0) no Nss limits, other than those negotiatied during association
+* (1) during 2-chain operation, tx only a single spatial stream
+* (2) - (7) reserved / invalid
*/
/* bit 0-3 of flags is used for scan operation */
/* bit 0: WLM_FLAGS_SCAN_SUPPRESS, suppress all scan and other bits would be ignored if bit is set */
@@ -21673,7 +22065,7 @@ typedef enum {
#define WLM_FLAGS_PS_DISABLE_SYS_SLEEP 1 /* disable sys sleep */
-/* bit 12-31 of flags is reserved for powersave and WAL */
+/* bit 17-31 of flags is reserved for powersave and WAL */
#define WLM_FLAGS_SCAN_IS_SUPPRESS(flag) WMI_GET_BITS(flag, 0, 1)
#define WLM_FLAGS_SCAN_SET_SUPPRESS(flag, val) WMI_SET_BITS(flag, 0, 1, val)
@@ -21687,6 +22079,12 @@ typedef enum {
#define WLM_FLAGS_PS_SET_CSS_CLPS_DISABLE(flag, val) WMI_SET_BITS(flag, 10, 1, val)
#define WLM_FLAGS_PS_IS_SYS_SLP_DISABLED(flag) WMI_GET_BITS(flag, 11, 1)
#define WLM_FLAGS_PS_SET_SYS_SLP_DISABLE(flag, val) WMI_SET_BITS(flag, 11, 1, val)
+#define WLM_FLAGS_WAL_LIMIT_TRY_ENABLED(flag) WMI_GET_BITS(flag, 12, 1)
+#define WLM_FLAGS_WAL_LIMIT_TRY_SET(flag, val) WMI_SET_BITS(flag, 12, 1, val)
+#define WLM_FLAGS_WAL_ADJUST_EDCA_ENABLED(flag) WMI_GET_BITS(flag, 13, 1)
+#define WLM_FLAGS_WAL_ADJUST_EDCA_SET(flag, val) WMI_SET_BITS(flag, 13, 1, val)
+#define WLM_FLAGS_WAL_1NSS_ENABLED(flag) (WMI_GET_BITS(flag, 14, 3) & 0x1)
+#define WLM_FLAGS_WAL_NSS_SET(flag, val) WMI_SET_BITS(flag, 14, 3, val)
typedef struct {
/** TLV tag and len; tag equals
@@ -21714,6 +22112,223 @@ typedef struct {
A_UINT32 flags;
} wmi_wlm_config_cmd_fixed_param;
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_enable_cmd_fixed_param */
+ /** pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0
+ * The host should never set this pdev_id to WMI_PDEV_ID_SOC,
+ * because the configuration parameters herein will be different
+ * for each MAC instance.
+ */
+ A_UINT32 pdev_id;
+ A_UINT32 sta_cong_timer_ms; /* STA TWT congestion timer TO value in terms of ms */
+ A_UINT32 mbss_support; /* Flag indicating if AP TWT feature supported in MBSS mode or not */
+ A_UINT32 default_slot_size; /* This is the default value for the TWT slot setup by AP (units = microseconds) */
+ A_UINT32 congestion_thresh_setup; /* Minimum congestion required to start setting up TWT sessions */
+ /*
+ * The congestion parameters below are in percent of occupied airtime.
+ */
+ A_UINT32 congestion_thresh_teardown; /* Minimum congestion below which TWT will be torn down */
+ A_UINT32 congestion_thresh_critical; /* Threshold above which TWT will not be active */
+ /*
+ * The interference parameters below use an abstract method of evaluating
+ * interference. The parameters are in percent, ranging from 0 for no
+ * interference, to 100 for interference extreme enough to completely
+ * block the signal of interest.
+ */
+ A_UINT32 interference_thresh_teardown; /* Minimum interference above that TWT will not be active */
+ A_UINT32 interference_thresh_setup; /* Minimum interference below that TWT session can be setup */
+ A_UINT32 min_no_sta_setup; /* Minimum no of STA required to start TWT setup */
+ A_UINT32 min_no_sta_teardown; /* Minimum no of STA below which TWT will be torn down */
+ A_UINT32 no_of_bcast_mcast_slots; /* Number of default slot sizes reserved for BCAST/MCAST delivery */
+ A_UINT32 min_no_twt_slots; /* Minimum no of available slots for TWT to be operational */
+ A_UINT32 max_no_sta_twt; /* Max no of STA with which TWT is possible (must be <= the wmi_resource_config's twt_ap_sta_count value) */
+ /*
+ * The below interval parameters have units of milliseconds.
+ */
+ A_UINT32 mode_check_interval; /* Interval between two successive check to decide the mode of TWT */
+ A_UINT32 add_sta_slot_interval; /* Interval between decisions making to create TWT slots for STAs */
+ A_UINT32 remove_sta_slot_interval; /* Inrerval between decisions making to remove TWT slot of STAs */
+} wmi_twt_enable_cmd_fixed_param;
+
+/* status code of enabling TWT */
+typedef enum _WMI_ENABLE_TWT_STATUS_T {
+ WMI_ENABLE_TWT_STATUS_OK, /* enabling TWT successfully completed */
+ WMI_ENABLE_TWT_STATUS_ALREADY_ENABLED, /* TWT already enabled */
+ WMI_ENABLE_TWT_STATUS_NOT_READY, /* FW not ready for enabling TWT */
+ WMI_ENABLE_TWT_INVALID_PARAM, /* invalid parameters */
+ WMI_ENABLE_TWT_STATUS_UNKNOWN_ERROR, /* enabling TWT failed with an unknown reason */
+} WMI_ENABLE_TWT_STATUS_T;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_enable_complete_event_fixed_param */
+ /** pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0 */
+ A_UINT32 pdev_id;
+ A_UINT32 status; /* WMI_ENABLE_TWT_STATUS_T */
+} wmi_twt_enable_complete_event_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_disable_cmd_fixed_param */
+ /** pdev_id for identifying the MAC. See macros starting with WMI_PDEV_ID_ for values. In non-DBDC case host should set it to 0 */
+ A_UINT32 pdev_id; /* host should never set it to WMI_PDEV_ID_SOC */
+} wmi_twt_disable_cmd_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_disable_complete_event_fixed_param */
+ A_UINT32 reserved0; /* unused right now */
+} wmi_twt_disable_complete_event_fixed_param;
+
+/* from IEEE 802.11ah section 9.4.2.200 */
+typedef enum _WMI_TWT_COMMAND_T {
+ WMI_TWT_COMMAND_REQUEST_TWT = 0,
+ WMI_TWT_COMMAND_SUGGEST_TWT = 1,
+ WMI_TWT_COMMAND_DEMAND_TWT = 2,
+ WMI_TWT_COMMAND_TWT_GROUPING = 3,
+ WMI_TWT_COMMAND_ACCEPT_TWT = 4,
+ WMI_TWT_COMMAND_ALTERNATE_TWT = 5,
+ WMI_TWT_COMMAND_DICTATE_TWT = 6,
+ WMI_TWT_COMMAND_REJECT_TWT = 7,
+} WMI_TWT_COMMAND_T;
+
+/* TWT command, refer to WMI_TWT_COMMAND_T */
+#define TWT_FLAGS_GET_CMD(flag) WMI_GET_BITS(flag, 0, 8)
+#define TWT_FLAGS_SET_CMD(flag, val) WMI_SET_BITS(flag, 0, 8, val)
+
+/* 0 means Individual TWT, 1 means Broadcast TWT */
+#define TWT_FLAGS_GET_BROADCAST(flag) WMI_GET_BITS(flag, 8, 1)
+#define TWT_FLAGS_SET_BROADCAST(flag, val) WMI_SET_BITS(flag, 8, 1, val)
+
+/* 0 means non-Trigger-enabled TWT, 1 means means Trigger-enabled TWT */
+#define TWT_FLAGS_GET_TRIGGER(flag) WMI_GET_BITS(flag, 9, 1)
+#define TWT_FLAGS_SET_TRIGGER(flag, val) WMI_SET_BITS(flag, 9, 1, val)
+
+/* flow type 0 means announced TWT, 1 means un-announced TWT */
+#define TWT_FLAGS_GET_FLOW_TYPE(flag) WMI_GET_BITS(flag, 10, 1)
+#define TWT_FLAGS_SET_FLOW_TYPE(flag, val) WMI_SET_BITS(flag, 10, 1, val)
+
+/* 0 means TWT protection is required, 1 means TWT protection is not required */
+#define TWT_FLAGS_GET_PROTECTION(flag) WMI_GET_BITS(flag, 11, 1)
+#define TWT_FLAGS_SET_PROTECTION(flag, val) WMI_SET_BITS(flag, 11, 1, val)
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_add_dialog_cmd_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ wmi_mac_addr peer_macaddr; /* peer MAC address when vdev is AP VDEV */
+ /* diaglog_id (TWT dialog ID)
+ * This dialog ID must be unique within its vdev.
+ */
+ A_UINT32 dialog_id;
+
+ /* 1. wake_intvl_mantis must be <= 0xFFFF
+ * 2. wake_intvl_us must be divided evenly by wake_intvl_mantis,
+ * i.e., wake_intvl_us % wake_intvl_mantis == 0
+ * 2. the quotient of wake_intvl_us/wake_intvl_mantis must be 2 to N-th(0<=N<=31) power,
+ i.e., wake_intvl_us/wake_intvl_mantis == 2^N, 0<=N<=31
+ */
+ A_UINT32 wake_intvl_us; /* TWT Wake Interval in units of us */
+ A_UINT32 wake_intvl_mantis; /* TWT Wake Interval Mantissa */
+
+ /* wake_dura_us must be divided evenly by 256, i.e., wake_dura_us % 256 == 0 */
+ A_UINT32 wake_dura_us; /* TWT Wake Duration in units of us, must be <= 0xFFFF */
+
+ A_UINT32 sp_offset_us; /* this long time after TWT setup the 1st SP will start */
+ A_UINT32 flags; /* TWT flags, refer to MACROs TWT_FLAGS_*(TWT_FLAGS_GET_CMD etc) */
+} wmi_twt_add_dialog_cmd_fixed_param;
+
+/* status code of adding TWT dialog */
+typedef enum _WMI_ADD_TWT_STATUS_T {
+ WMI_ADD_TWT_STATUS_OK, /* adding TWT dialog successfully completed */
+ WMI_ADD_TWT_STATUS_TWT_NOT_ENABLED, /* TWT not enabled */
+ WMI_ADD_TWT_STATUS_USED_DIALOG_ID, /* TWT dialog ID is already used */
+ WMI_ADD_TWT_STATUS_INVALID_PARAM, /* invalid parameters */
+ WMI_ADD_TWT_STATUS_NOT_READY, /* FW not ready */
+ WMI_ADD_TWT_STATUS_NO_RESOURCE, /* FW resource exhausted */
+ WMI_ADD_TWT_STATUS_NO_ACK, /* peer AP/STA did not ACK the request/response frame */
+ WMI_ADD_TWT_STATUS_NO_RESPONSE, /* peer AP did not send the response frame */
+ WMI_ADD_TWT_STATUS_DENIED, /* AP did not accept the request */
+ WMI_ADD_TWT_STATUS_UNKNOWN_ERROR, /* adding TWT dialog failed with an unknown reason */
+} WMI_ADD_TWT_STATUS_T;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_add_dialog_complete_event_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+ A_UINT32 status; /* refer to WMI_ADD_TWT_STATUS_T */
+} wmi_twt_add_dialog_complete_event_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_del_dialog_cmd_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+} wmi_twt_del_dialog_cmd_fixed_param;
+
+/* status code of deleting TWT dialog */
+typedef enum _WMI_DEL_TWT_STATUS_T {
+ WMI_DEL_TWT_STATUS_OK, /* deleting TWT dialog successfully completed */
+ WMI_DEL_TWT_STATUS_DIALOG_ID_NOT_EXIST, /* TWT dialog ID not exists */
+ WMI_DEL_TWT_STATUS_INVALID_PARAM, /* invalid parameters */
+ WMI_DEL_TWT_STATUS_DIALOG_ID_BUSY, /* FW is in the process of handling this dialog */
+ WMI_DEL_TWT_STATUS_NO_RESOURCE, /* FW resource exhausted */
+ WMI_DEL_TWT_STATUS_NO_ACK, /* peer AP/STA did not ACK the request/response frame */
+ WMI_DEL_TWT_STATUS_UNKNOWN_ERROR, /* deleting TWT dialog failed with an unknown reason */
+} WMI_DEL_TWT_STATUS_T;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_del_dialog_complete_event_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+ A_UINT32 status; /* refer to WMI_DEL_TWT_STATUS_T */
+} wmi_twt_del_dialog_complete_event_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_pause_dialog_cmd_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+} wmi_twt_pause_dialog_cmd_fixed_param;
+
+/* status code of pausing TWT dialog */
+typedef enum _WMI_PAUSE_TWT_STATUS_T {
+ WMI_PAUSE_TWT_STATUS_OK, /* pausing TWT dialog successfully completed */
+ WMI_PAUSE_TWT_STATUS_DIALOG_ID_NOT_EXIST, /* TWT dialog ID not exists */
+ WMI_PAUSE_TWT_STATUS_INVALID_PARAM, /* invalid parameters */
+ WMI_PAUSE_TWT_STATUS_DIALOG_ID_BUSY, /* FW is in the process of handling this dialog */
+ WMI_PAUSE_TWT_STATUS_NO_RESOURCE, /* FW resource exhausted */
+ WMI_PAUSE_TWT_STATUS_NO_ACK, /* peer AP/STA did not ACK the request/response frame */
+ WMI_PAUSE_TWT_STATUS_UNKNOWN_ERROR, /* pausing TWT dialog failed with an unknown reason */
+} WMI_PAUSE_TWT_STATUS_T;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_pause_dialog_complete_event_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+ A_UINT32 status; /* refer to WMI_PAUSE_TWT_STATUS_T */
+} wmi_twt_pause_dialog_complete_event_fixed_param;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_resume_dialog_cmd_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+ A_UINT32 sp_offset_us; /* this long time after TWT resumed the 1st SP will start */
+} wmi_twt_resume_dialog_cmd_fixed_param;
+
+/* status code of resuming TWT dialog */
+typedef enum _WMI_RESUME_TWT_STATUS_T {
+ WMI_RESUME_TWT_STATUS_OK, /* resuming TWT dialog successfully completed */
+ WMI_RESUME_TWT_STATUS_DIALOG_ID_NOT_EXIST, /* TWT dialog ID not exists */
+ WMI_RESUME_TWT_STATUS_INVALID_PARAM, /* invalid parameters */
+ WMI_RESUME_TWT_STATUS_DIALOG_ID_BUSY, /* FW is in the process of handling this dialog */
+ WMI_RESUME_TWT_STATUS_NOT_PAUSED, /* dialog not paused currently */
+ WMI_RESUME_TWT_STATUS_NO_RESOURCE, /* FW resource exhausted */
+ WMI_RESUME_TWT_STATUS_NO_ACK, /* peer AP/STA did not ACK the request/response frame */
+ WMI_RESUME_TWT_STATUS_UNKNOWN_ERROR, /* resuming TWT dialog failed with an unknown reason */
+} WMI_RESUME_TWT_STATUS_T;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_twt_resume_dialog_complete_event_fixed_param */
+ A_UINT32 vdev_id; /* VDEV identifier */
+ A_UINT32 dialog_id; /* TWT dialog ID */
+ A_UINT32 status; /* refer to WMI_RESUME_TWT_STATUS_T */
+} wmi_twt_resume_dialog_complete_event_fixed_param;
+
typedef enum {
WMI_DMA_RING_CONFIG_MODULE_SPECTRAL,
} WMI_DMA_RING_SUPPORTED_MODULE;
@@ -21824,6 +22439,126 @@ typedef struct {
A_UINT32 paddr_hi;
} wmi_dma_buf_release_entry;
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_runtime_dpd_recal_cmd_fixed_param */
+ A_UINT32 enable; /* Enable/disable */
+
+ /* Thermal Thresholds,
+ * DPD recalibration will be triggered, when current temperature is
+ * either greater than (tmpt_base_c + dlt_tmpt_c_h),
+ * or less than (tmpt_base_c + dlt_tmpt_c_l).
+ * Here tmpt_base_c is the temperature in centigrade when first run dpd calibration.
+ */
+ A_UINT32 dlt_tmpt_c_h;
+ A_UINT32 dlt_tmpt_c_l;
+
+ /* cooling_time_ms
+ * The time (in milliseconds) expected to be needed for the unit
+ * to cool from dlt_tmpt_c_h to dlt_tmpt_c_l.
+ */
+ A_UINT32 cooling_time_ms;
+
+ /* Max duration for dpd re-cal. Unit: ms */
+ A_UINT32 dpd_dur_max_ms;
+} wmi_runtime_dpd_recal_cmd_fixed_param;
+
+typedef enum {
+ WMI_ROAM_TRIGGER_REASON_NONE = 0,
+ WMI_ROAM_TRIGGER_REASON_PER,
+ WMI_ROAM_TRIGGER_REASON_BMISS,
+ WMI_ROAM_TRIGGER_REASON_LOW_RSSI,
+ WMI_ROAM_TRIGGER_REASON_HIGH_RSSI,
+ WMI_ROAM_TRIGGER_REASON_PERIODIC,
+ WMI_ROAM_TRIGGER_REASON_MAWC,
+ WMI_ROAM_TRIGGER_REASON_DENSE,
+ WMI_ROAM_TRIGGER_REASON_BACKGROUND,
+ WMI_ROAM_TRIGGER_REASON_FORCED,
+ WMI_ROAM_TRIGGER_REASON_BTM,
+ WMI_ROAM_TRIGGER_REASON_UNIT_TEST,
+ WMI_ROAM_TRIGGER_REASON_MAX,
+} WMI_ROAM_TRIGGER_REASON_ID;
+
+/* value for DENSE roam trigger */
+#define WMI_RX_TRAFFIC_ABOVE_THRESHOLD 0x1
+#define WMI_TX_TRAFFIC_ABOVE_THRESHOLD 0x2
+
+typedef struct {
+ A_UINT32 trigger_id; /* id from WMI_ROAM_TRIGGER_REASON_ID */
+ /* interpretation of trigger value is as follows, for different trigger IDs
+ * ID = PER -> value = PER percentage
+ * ID = LOW_RSSI -> value = rssi in dB wrt noise floor,
+ * ID = HIGH_RSSI -> value = rssi in dB wrt to noise floor,
+ * ID = DENSE -> value = specification if it is tx or rx traffic threshold,
+ * (see WMI_[RX,TX]_TRAFFIC_ABOVE_THRESHOLD)
+ * ID = PERIODIC -> value = periodicity in ms
+ *
+ * for other IDs trigger_value would be 0 (invalid)
+ */
+ A_UINT32 trigger_value;
+} wmi_roam_scan_trigger_reason;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param */
+ A_UINT32 vdev_id;
+} wmi_request_roam_scan_stats_cmd_fixed_param;
+
+typedef struct {
+ /*
+ * The timestamp is in units of ticks of a 19.2MHz clock.
+ * The timestamp is taken at roam scan start.
+ */
+ A_UINT32 lower32bit;
+ A_UINT32 upper32bit;
+} wmi_roaming_timestamp;
+
+typedef struct {
+ A_UINT32 tlv_header; /* TLV tag and len; tag equals WMITLV_TAG_STRUC_wmi_roam_scan_stats_event_fixed_param */
+ A_UINT32 vdev_id;
+ /* number of roam scans */
+ A_UINT32 num_roam_scans;
+ /* This TLV is followed by TLV's:
+ * A_UINT32 client_id[num_roam_scans]; based on WMI_SCAN_CLIENT_ID
+ * wmi_roaming_timestamp timestamp[num_roam_scans]; clock ticks at the time of scan start
+ * A_UINT32 num_channels[num_roam_scans]; number of channels that are scanned
+ * A_UINT32 chan_info[]; channel frequencies (MHz) in each scan
+ * The num_channels[] elements specify how many elements there are
+ * within chan_info[] for each scan.
+ * For example, if num_channels = [2, 3] then chan_info will have 5
+ * elements, with the first 2 elements from the first scan, and
+ * the last 3 elements from the second scan.
+ * wmi_mac_addr old_bssid[num_roam_scans]; bssid we are connected to at the time of roaming
+ * A_UINT32 is_roaming_success[num_roam_scans]; value is 1 if roaming is successful, 0 if roaming failed
+ * wmi_mac_addr new_bssid[num_roam_scans]; bssid after roaming
+ * A_UINT32 num_of_roam_candidates[num_roam_scans]; number of candidates found in each roam scan
+ * roam_scan_trigger_reason roam_reason[num_roam_scans]; reason for each roam scan
+ * wmi_mac_addr bssid[]; bssids of candidates in each roam scan
+ * The num_of_roam_candidates[] elements specify how many elements
+ * there are within bssid[] for each scan.
+ * For example, if num_of_roam_candidates = [2, 3] then bssid will
+ * have 5 elements, with the first 2 elements from the first scan,
+ * and the last 3 elements from the second scan.
+ * A_UINT32 score[]; score of candidates in each roam scan
+ * The num_of_roam_candidates[] elements specify how many elements
+ * there are within score[] for each scan.
+ * For example, if num_of_roam_candidates = [2, 3] then score will
+ * have 5 elements, with the first 2 elements from the first scan,
+ * and the last 3 elements from the second scan.
+ * A_UINT32 channel[]; channel frequency (MHz) of candidates in each roam scan
+ * The num_of_roam_candidates[] elements specify how many elements
+ * there are within channel[] for each scan.
+ * For example, if num_of_roam_candidates = [2, 3] then channel will
+ * have 5 elements, with the first 2 elements from the first scan,
+ * and the last 3 elements from the second scan.
+ * A_UINT32 rssi[]; rssi in dB w.r.t. noise floor of candidates
+ * in each roam scan.
+ * The num_of_roam_candidates[] elements specify how many elements
+ * there are within rssi[] for each scan.
+ * For example, if num_of_roam_candidates = [2, 3] then rssi will
+ * have 5 elements, with the first 2 elements from the first scan,
+ * and the last 3 elements from the second scan.
+ */
+} wmi_roam_scan_stats_event_fixed_param;
+
/* ADD NEW DEFS HERE */
diff --git a/drivers/staging/fw-api/fw/wmi_version.h b/drivers/staging/fw-api/fw/wmi_version.h
index 0850e213bda3..75c7c55841a4 100644
--- a/drivers/staging/fw-api/fw/wmi_version.h
+++ b/drivers/staging/fw-api/fw/wmi_version.h
@@ -36,7 +36,7 @@
#define __WMI_VER_MINOR_ 0
/** WMI revision number has to be incremented when there is a
* change that may or may not break compatibility. */
-#define __WMI_REVISION_ 491
+#define __WMI_REVISION_ 517
/** The Version Namespace should not be normally changed. Only
* host and firmware of the same WMI namespace will work
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
index a4253d530f4a..14d61b721260 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/inc/hif.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -52,6 +52,7 @@ typedef void __iomem *A_target_id_t;
typedef void *hif_handle_t;
#define HIF_DBG_PRINT_RATE 1000
+#define HIF_RATE_LIMIT_CE_ACCESS_LOG (64)
#define HIF_TYPE_AR6002 2
#define HIF_TYPE_AR6003 3
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
index f158123d1e41..222cebf8f521 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/ce/ce_service.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -644,9 +644,14 @@ int ce_send_fast(struct CE_handle *copyeng, qdf_nbuf_t msdu,
if (qdf_unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1)
< SLOTS_PER_DATAPATH_TX)) {
- HIF_ERROR("Source ring full, required %d, available %d",
- SLOTS_PER_DATAPATH_TX,
- CE_RING_DELTA(nentries_mask, write_index, sw_index - 1));
+ static unsigned int rate_limit;
+
+ if (rate_limit & 0x0f)
+ HIF_ERROR("Source ring full, required %d, available %d",
+ SLOTS_PER_DATAPATH_TX,
+ CE_RING_DELTA(nentries_mask, write_index,
+ sw_index - 1));
+ rate_limit++;
OL_ATH_CE_PKT_ERROR_COUNT_INCR(scn, CE_RING_DELTA_FAIL);
Q_TARGET_ACCESS_END(scn);
qdf_spin_unlock_bh(&ce_state->ce_index_lock);
@@ -1803,7 +1808,8 @@ more_data:
CE_ENGINE_INT_STATUS_CLEAR(scn, ctrl_addr,
HOST_IS_COPY_COMPLETE_MASK);
} else {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
return;
}
@@ -2002,7 +2008,8 @@ more_watermarks:
CE_WATERMARK_MASK |
HOST_IS_COPY_COMPLETE_MASK);
} else {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
goto unlock_end;
}
@@ -2121,7 +2128,8 @@ ce_per_engine_handler_adjust(struct CE_state *CE_state,
return;
if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
return;
}
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h
index a51437bebf31..dc687aa7e1ba 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/hif_debug.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -40,6 +40,10 @@
#define HIF_DBG(args ...) \
QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_DEBUG, ## args)
+#define HIF_ERROR_RL(rate, args...) \
+ QDF_TRACE_RATE_LIMITED(rate, QDF_MODULE_ID_HIF, \
+ QDF_TRACE_LEVEL_ERROR, ## args)
+
#define HIF_ENTER(fmt, ...) QDF_TRACE(QDF_MODULE_ID_HIF, QDF_TRACE_LEVEL_INFO, \
"Enter: %s "fmt, __func__, ## __VA_ARGS__)
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c
index 02f9767fdae5..1b08f8e7d035 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/pcie/if_pci.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -164,8 +164,8 @@ irqreturn_t hif_pci_interrupt_handler(int irq, void *arg)
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(arg);
volatile int tmp;
- uint16_t val;
- uint32_t bar0;
+ uint16_t val = 0;
+ uint32_t bar0 = 0;
uint32_t fw_indicator_address, fw_indicator;
bool ssr_irq = false;
unsigned int host_cause, host_enable;
@@ -598,7 +598,7 @@ int hif_check_fw_reg(struct hif_opaque_softc *hif_ctx)
int hif_check_soc_status(struct hif_opaque_softc *hif_ctx)
{
struct hif_softc *scn = HIF_GET_SOFTC(hif_ctx);
- uint16_t device_id;
+ uint16_t device_id = 0;
uint32_t val;
uint16_t timeout_count = 0;
struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
@@ -2059,7 +2059,7 @@ static int hif_enable_pci(struct hif_pci_softc *sc,
{
void __iomem *mem;
int ret = 0;
- uint16_t device_id;
+ uint16_t device_id = 0;
struct hif_softc *ol_sc = HIF_GET_SOFTC(sc);
pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
@@ -2739,7 +2739,7 @@ int hif_pci_bus_suspend(struct hif_softc *scn)
*/
static int __hif_check_link_status(struct hif_softc *scn)
{
- uint16_t dev_id;
+ uint16_t dev_id = 0;
struct hif_pci_softc *sc = HIF_GET_PCI_SOFTC(scn);
struct hif_driver_state_callbacks *cbk = hif_get_callbacks_handle(scn);
@@ -3101,8 +3101,8 @@ static inline void hif_msm_pcie_debug_info(struct hif_pci_softc *sc) {};
*/
static int hif_log_soc_wakeup_timeout(struct hif_pci_softc *sc)
{
- uint16_t val;
- uint32_t bar;
+ uint16_t val = 0;
+ uint32_t bar = 0;
struct hif_opaque_softc *hif_hdl = GET_HIF_OPAQUE_HDL(sc);
struct hif_softc *scn = HIF_GET_SOFTC(sc);
struct HIF_CE_state *hif_state = HIF_GET_CE_STATE(sc);
diff --git a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
index 3d440b523f69..d046d883feb7 100644
--- a/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
+++ b/drivers/staging/qca-wifi-host-cmn/hif/src/snoc/hif_io32_snoc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -48,7 +48,8 @@ static inline void ce_enable_irq_in_individual_register(struct hif_softc *scn,
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
return;
}
hif_write32_mb(scn->mem + offset, 1);
@@ -61,13 +62,15 @@ static inline void ce_disable_irq_in_individual_register(struct hif_softc *scn,
offset = HOST_IE_ADDRESS + CE_BASE_ADDRESS(ce_id);
if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
return;
}
hif_write32_mb(scn->mem + offset, 0);
if (!TARGET_REGISTER_ACCESS_ALLOW(scn)) {
- HIF_ERROR("%s: target access is not allowed", __func__);
+ HIF_ERROR_RL(HIF_RATE_LIMIT_CE_ACCESS_LOG,
+ "%s: target access is not allowed", __func__);
return;
}
hif_read32_mb(scn->mem + offset);
diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc.c b/drivers/staging/qca-wifi-host-cmn/htc/htc.c
index 84e0a815817e..8e73390a195f 100644
--- a/drivers/staging/qca-wifi-host-cmn/htc/htc.c
+++ b/drivers/staging/qca-wifi-host-cmn/htc/htc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -473,7 +473,7 @@ A_STATUS htc_setup_target_buffer_assignments(HTC_TARGET *target)
for (i = 0; i < HTC_MAX_SERVICE_ALLOC_ENTRIES; i++) {
if (target->ServiceTxAllocTable[i].service_id != 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
- ("HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
+ ("SVS Index : %d TX : 0x%2.2X : alloc:%d",
i,
target->ServiceTxAllocTable[i].
service_id,
@@ -572,7 +572,7 @@ QDF_STATUS htc_wait_target(HTC_HANDLE HTCHandle)
target->MaxMsgsPerHTCBundle = 1;
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
- ("Target Ready! : transmit resources : %d size:%d, MaxMsgsPerHTCBundle = %d\n",
+ ("Target Ready! TX resource : %d size:%d, MaxMsgsPerHTCBundle = %d",
target->TotalTransmitCredits,
target->TargetCreditSize,
target->MaxMsgsPerHTCBundle));
@@ -698,12 +698,12 @@ QDF_STATUS htc_start(HTC_HANDLE HTCHandle)
if (!htc_credit_flow) {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
- ("HTC will not use TX credit flow control\n"));
+ ("HTC will not use TX credit flow control"));
pSetupComp->SetupFlags |=
HTC_SETUP_COMPLETE_FLAGS_DISABLE_TX_CREDIT_FLOW;
} else {
AR_DEBUG_PRINTF(ATH_DEBUG_INIT,
- ("HTC using TX credit flow control\n"));
+ ("HTC using TX credit flow control"));
}
if ((hif_get_bus_type(target->hif_dev) == QDF_BUS_TYPE_SDIO) ||
diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c
index 574932baf6bd..ca32848da19a 100644
--- a/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c
+++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_recv.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -30,7 +30,7 @@
#include <qdf_nbuf.h> /* qdf_nbuf_t */
/* HTC Control message receive timeout msec */
-#define HTC_CONTROL_RX_TIMEOUT 3000
+#define HTC_CONTROL_RX_TIMEOUT 6000
#if defined(WLAN_DEBUG) || defined(DEBUG)
void debug_dump_bytes(uint8_t *buffer, uint16_t length, char *pDescription)
diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c
index bd45a80cb86e..7ce336e97422 100644
--- a/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c
+++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_send.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1505,8 +1505,17 @@ static inline QDF_STATUS __htc_send_pkt(HTC_HANDLE HTCHandle,
/* HTC API - htc_send_pkt */
QDF_STATUS htc_send_pkt(HTC_HANDLE HTCHandle, HTC_PACKET *pPacket)
{
- if (HTCHandle == NULL || pPacket == NULL)
+ if (HTCHandle == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: HTCHandle is NULL \n", __func__));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (pPacket == NULL) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR,
+ ("%s: pPacket is NULL \n", __func__));
return QDF_STATUS_E_FAILURE;
+ }
AR_DEBUG_PRINTF(ATH_DEBUG_SEND,
("+-htc_send_pkt: Enter endPointId: %d, buffer: %pK, length: %d\n",
@@ -2185,17 +2194,17 @@ void htc_process_credit_rpt(HTC_TARGET *target, HTC_CREDIT_REPORT *pRpt,
#endif
- pEndpoint->TxCredits += rpt_credits;
-
if (pEndpoint->service_id == WMI_CONTROL_SVC) {
LOCK_HTC_CREDIT(target);
htc_credit_record(HTC_PROCESS_CREDIT_REPORT,
- pEndpoint->TxCredits,
+ pEndpoint->TxCredits + rpt_credits,
HTC_PACKET_QUEUE_DEPTH(&pEndpoint->
TxQueue));
UNLOCK_HTC_CREDIT(target);
}
+ pEndpoint->TxCredits += rpt_credits;
+
if (pEndpoint->TxCredits
&& HTC_PACKET_QUEUE_DEPTH(&pEndpoint->TxQueue)) {
UNLOCK_HTC_TX(target);
diff --git a/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c
index 5d5b2c7dd5e9..173ae3f66a9e 100644
--- a/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c
+++ b/drivers/staging/qca-wifi-host-cmn/htc/htc_services.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -383,14 +383,14 @@ QDF_STATUS htc_connect_service(HTC_HANDLE HTCHandle,
}
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("HTC Service:0x%4.4X, ULpipe:%d DLpipe:%d id:%d Ready\n",
+ ("SVC:0x%4.4X, ULpipe:%d DLpipe:%d id:%d Ready",
pEndpoint->service_id, pEndpoint->UL_PipeID,
pEndpoint->DL_PipeID, pEndpoint->Id));
if (disableCreditFlowCtrl && pEndpoint->TxCreditFlowEnabled) {
pEndpoint->TxCreditFlowEnabled = false;
AR_DEBUG_PRINTF(ATH_DEBUG_INFO,
- ("HTC Service:0x%4.4X ep:%d TX flow control disabled\n",
+ ("SVC:0x%4.4X ep:%d TX flow control disabled",
pEndpoint->service_id,
assignedEndpoint));
}
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h
index 339e5ac84e04..94e7c527cb56 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -26,6 +26,38 @@
#include <qdf_status.h>
#include <i_qdf_debugfs.h>
+#include <qdf_atomic.h>
+#include <qdf_types.h>
+
+/* representation of qdf dentry */
+typedef __qdf_dentry_t qdf_dentry_t;
+typedef __qdf_debugfs_file_t qdf_debugfs_file_t;
+
+/* qdf file modes */
+#define QDF_FILE_USR_READ 00400
+#define QDF_FILE_USR_WRITE 00200
+
+#define QDF_FILE_GRP_READ 00040
+#define QDF_FILE_GRP_WRITE 00020
+
+#define QDF_FILE_OTH_READ 00004
+#define QDF_FILE_OTH_WRITE 00002
+
+/**
+ * struct qdf_debugfs_fops - qdf debugfs operations
+ * @show: Callback for show operation.
+ * Following functions can be used to print data in the show function,
+ * qdf_debugfs_print()
+ * qdf_debugfs_hexdump()
+ * qdf_debugfs_write()
+ * @write: Callback for write operation.
+ * @priv: Private pointer which will be passed in the registered callbacks.
+ */
+struct qdf_debugfs_fops {
+ QDF_STATUS(*show)(qdf_debugfs_file_t file, void *arg);
+ QDF_STATUS(*write)(void *priv, const char *buf, qdf_size_t len);
+ void *priv;
+};
#ifdef WLAN_DEBUGFS
/**
@@ -42,7 +74,162 @@ QDF_STATUS qdf_debugfs_init(void);
*/
QDF_STATUS qdf_debugfs_exit(void);
-#else
+/**
+ * qdf_debugfs_create_dir() - create a debugfs directory
+ * @name: name of the new directory
+ * @parent: parent node. If NULL, defaults to base qdf_debugfs_root
+ *
+ * Return: dentry structure pointer in case of success, otherwise NULL.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent);
+
+/**
+ * qdf_debugfs_create_file() - create a debugfs file
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base qdf_debugfs_root
+ * @fops: file operations { .read, .write ... }
+ *
+ * Return: dentry structure pointer in case of success, otherwise NULL.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode,
+ qdf_dentry_t parent,
+ struct qdf_debugfs_fops *fops);
+
+/**
+ * qdf_debugfs_printf() - print formated string into debugfs file
+ * @file: debugfs file handle passed in fops->show() function
+ * @f: the format string to use
+ * @...: arguments for the format string
+ */
+void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...);
+
+/**
+ * qdf_debugfs_hexdump() - print hexdump into debugfs file
+ * @file: debugfs file handle passed in fops->show() function.
+ * @buf: data
+ * @len: data length
+ *
+ */
+void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf,
+ qdf_size_t len);
+
+/**
+ * qdf_debugfs_write() - write data into debugfs file
+ * @file: debugfs file handle passed in fops->show() function.
+ * @buf: data
+ * @len: data length
+ *
+ */
+void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf,
+ qdf_size_t len);
+
+/**
+ * qdf_debugfs_create_u8() - create a debugfs file for a u8 variable
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @value: pointer to a u8 variable (global/static)
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u8 *value);
+
+/**
+ * qdf_debugfs_create_u16() - create a debugfs file for a u16 variable
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @value: pointer to a u16 variable (global/static)
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u16 *value);
+
+/**
+ * qdf_debugfs_create_u32() - create a debugfs file for a u32 variable
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @value: pointer to a u32 variable (global/static)
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_u32(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u32 *value);
+
+/**
+ * qdf_debugfs_create_u64() - create a debugfs file for a u64 variable
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @value: pointer to a u64 variable (global/static)
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u64 *value);
+
+/**
+ * qdf_debugfs_create_atomic() - create a debugfs file for an atomic variable
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @value: pointer to an atomic variable (global/static)
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode,
+ qdf_dentry_t parent,
+ qdf_atomic_t *value);
+
+/**
+ * qdf_debugfs_create_string() - create a debugfs file for a string
+ * @name: name of the file
+ * @mode: qdf file mode
+ * @parent: parent node. If NULL, defaults to base 'qdf_debugfs_root'
+ * @str: a pointer to NULL terminated string (global/static).
+ *
+ * Return: dentry for the file; NULL in case of failure.
+ *
+ */
+qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode,
+ qdf_dentry_t parent, char *str);
+
+/**
+ * qdf_debugfs_remove_dir_recursive() - remove directory recursively
+ * @d: debugfs node
+ *
+ * This function will recursively removes a dreictory in debugfs that was
+ * previously createed with a call to qdf_debugfs_create_file() or it's
+ * variant functions.
+ */
+void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d);
+
+/**
+ * qdf_debugfs_remove_dir() - remove debugfs directory
+ * @d: debugfs node
+ *
+ */
+void qdf_debugfs_remove_dir(qdf_dentry_t d);
+
+/**
+ * qdf_debugfs_remove_file() - remove debugfs file
+ * @d: debugfs node
+ *
+ */
+void qdf_debugfs_remove_file(qdf_dentry_t d);
+
+#else /* WLAN_DEBUGFS */
static inline QDF_STATUS qdf_debugfs_init(void)
{
@@ -54,7 +241,83 @@ static inline QDF_STATUS qdf_debugfs_exit(void)
return QDF_STATUS_E_NOSUPPORT;
}
-#endif /* WLAN_DEBUGFS */
-#endif /* _QDF_DEBUGFS_H */
+static inline qdf_dentry_t qdf_debugfs_create_dir(const char *name,
+ qdf_dentry_t parent)
+{
+ return NULL;
+}
+static inline qdf_dentry_t
+qdf_debugfs_create_file(const char *name, uint16_t mode, qdf_dentry_t parent,
+ struct qdf_debugfs_fops *fops)
+{
+ return NULL;
+}
+
+static inline void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f,
+ ...)
+{
+}
+static inline void qdf_debugfs_hexdump(qdf_debugfs_file_t file,
+ const uint8_t *buf, qdf_size_t len)
+{
+}
+
+static inline void qdf_debugfs_write(qdf_debugfs_file_t file,
+ const uint8_t *buf, qdf_size_t len)
+{
+}
+
+static inline qdf_dentry_t qdf_debugfs_create_u8(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent, u8 *value)
+{
+ return NULL;
+}
+
+static inline qdf_dentry_t qdf_debugfs_create_u16(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent,
+ u16 *value)
+{
+ return NULL;
+}
+
+static inline qdf_dentry_t qdf_debugfs_create_u32(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent,
+ u32 *value)
+{
+ return NULL;
+}
+
+static inline qdf_dentry_t qdf_debugfs_create_u64(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent,
+ u64 *value)
+{
+ return NULL;
+}
+
+static inline qdf_dentry_t qdf_debugfs_create_atomic(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent,
+ qdf_atomic_t *value)
+{
+ return NULL;
+}
+
+static inline qdf_dentry_t debugfs_create_string(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent, char *str)
+{
+ return NULL;
+}
+
+static inline void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d) {}
+static inline void qdf_debugfs_remove_dir(qdf_dentry_t d) {}
+static inline void qdf_debugfs_remove_file(qdf_dentry_t d) {}
+
+#endif /* WLAN_DEBUGFS */
+#endif /* _QDF_DEBUGFS_H */
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h
index a64212bb4285..dfccd985cfea 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_mc_timer.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -81,6 +81,7 @@ typedef struct qdf_mc_timer_s {
qdf_mutex_t lock;
QDF_TIMER_TYPE type;
QDF_TIMER_STATE state;
+ uint32_t cookie;
} qdf_mc_timer_t;
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h
index 05e220c3f6c6..7d6b1b45f225 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -40,6 +40,8 @@
#include <qdf_status.h>
#include <qdf_nbuf.h>
#include <i_qdf_types.h>
+#include <qdf_debugfs.h>
+
/* Type declarations */
@@ -147,8 +149,8 @@ typedef struct s_qdf_trace_data {
#define DPTRACE(p)
#endif
-#define MAX_QDF_DP_TRACE_RECORDS 4000
-#define QDF_DP_TRACE_RECORD_SIZE 16
+#define MAX_QDF_DP_TRACE_RECORDS 2000
+#define QDF_DP_TRACE_RECORD_SIZE 40
#define INVALID_QDF_DP_TRACE_ADDR 0xffffffff
#define QDF_DP_TRACE_VERBOSITY_HIGH 3
#define QDF_DP_TRACE_VERBOSITY_MEDIUM 2
@@ -168,8 +170,8 @@ typedef struct s_qdf_trace_data {
* QDF_DP_TRACE_EVENT_RECORD - record events
* @QDF_DP_TRACE_BASE_VERBOSITY - below this are part of base verbosity
* @QDF_DP_TRACE_ICMP_PACKET_RECORD - record ICMP packets
- * @QDF_DP_TRACE_HDD_TX_PACKET_RECORD - record 32 bytes of tx pkt at HDD
- * @QDF_DP_TRACE_HDD_RX_PACKET_RECORD - record 32 bytes of rx pkt at HDD
+ * @QDF_DP_TRACE_TX_PACKET_RECORD - record tx pkt
+ * @QDF_DP_TRACE_RX_PACKET_RECORD - record rx pkt
* @QDF_DP_TRACE_HDD_TX_TIMEOUT - HDD tx timeout
* @QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT- SOFTAP HDD tx timeout
* @QDF_DP_TRACE_FREE_PACKET_PTR_RECORD - tx completion ptr record
@@ -202,8 +204,8 @@ enum QDF_DP_TRACE_ID {
QDF_DP_TRACE_BASE_VERBOSITY,
QDF_DP_TRACE_ICMP_PACKET_RECORD,
QDF_DP_TRACE_ICMPv6_PACKET_RECORD,
- QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
- QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
+ QDF_DP_TRACE_TX_PACKET_RECORD,
+ QDF_DP_TRACE_RX_PACKET_RECORD,
QDF_DP_TRACE_HDD_TX_TIMEOUT,
QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT,
QDF_DP_TRACE_FREE_PACKET_PTR_RECORD,
@@ -294,6 +296,14 @@ struct qdf_dp_trace_event_buf {
};
/**
+ * struct qdf_dp_trace_data_buf - nbuf data buffer
+ * @msdu_id : msdu_id of the packet (for TX, for RX = 0)
+ */
+struct qdf_dp_trace_data_buf {
+ uint16_t msdu_id;
+};
+
+/**
* struct qdf_dp_trace_record_s - Describes a record in DP trace
* @time: time when it got stored
* @code: Describes the particular event
@@ -302,7 +312,7 @@ struct qdf_dp_trace_event_buf {
* @pid : process id which stored the data in this record
*/
struct qdf_dp_trace_record_s {
- char time[20];
+ u64 time;
uint8_t code;
uint8_t data[QDF_DP_TRACE_RECORD_SIZE];
uint8_t size;
@@ -320,7 +330,11 @@ struct qdf_dp_trace_record_s {
* @enable: enable/disable DP trace
* @count: current packet number
* @live_mode_config: configuration as received during initialization
- * @live_mode: current live mode, enabled or disabled.
+ * @live_mode: current live mode, enabled or disabled, can be throttled based
+ * on throughput
+ * force_live_mode: flag to enable live mode all the time for all packets.
+ * This can be set/unset from userspace and overrides other
+ * live mode flags.
* @print_pkt_cnt: count of number of packets printed in live mode
*.@high_tput_thresh: thresh beyond which live mode is turned off
*.@thresh_time_limit: max time, in terms of BW timer intervals to wait,
@@ -357,6 +371,9 @@ struct s_qdf_dp_trace_data {
bool enable;
bool live_mode_config;
bool live_mode;
+ uint32_t curr_pos;
+ uint32_t saved_tail;
+ bool force_live_mode;
uint8_t print_pkt_cnt;
uint8_t high_tput_thresh;
uint16_t thresh_time_limit;
@@ -386,6 +403,20 @@ struct s_qdf_dp_trace_data {
uint16_t icmpv6_ra;
};
+/**
+ * struct qdf_dpt_debugfs_state - state to control read to debugfs file
+ * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID: invalid state
+ * @QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT: initial state
+ * @QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS: read is in progress
+ * @QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE: read complete
+ */
+
+enum qdf_dpt_debugfs_state {
+ QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID,
+ QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT,
+ QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS,
+ QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE,
+};
/* Function declarations and documenation */
@@ -438,6 +469,9 @@ void qdf_trace_dump_all(void *, uint8_t, uint8_t, uint32_t, uint32_t);
#ifdef FEATURE_DP_TRACE
+#define QDF_DP_TRACE_RECORD_INFO_LIVE (0x1)
+#define QDF_DP_TRACE_RECORD_INFO_THROTTLED (0x1 << 1)
+
bool qdf_dp_trace_log_pkt(uint8_t session_id, struct sk_buff *skb,
enum qdf_proto_dir dir);
void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh,
@@ -449,6 +483,37 @@ void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir);
void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
uint8_t *data, uint8_t size, enum qdf_proto_dir dir);
+
+/**
+ * qdf_dpt_get_curr_pos_debugfs() - get curr position to start read
+ * @file: debugfs file to read
+ * @state: state to control read to debugfs file
+ *
+ * Return: curr pos
+ */
+uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file,
+ enum qdf_dpt_debugfs_state state);
+/**
+ * qdf_dpt_dump_stats_debugfs() - dump DP Trace stats to debugfs file
+ * @file: debugfs file to read
+ * @curr_pos: curr position to start read
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file,
+ uint32_t curr_pos);
+
+/**
+ * qdf_dpt_set_value_debugfs() - dump DP Trace stats to debugfs file
+ * @file: debugfs file to read
+ * @curr_pos: curr position to start read
+ *
+ * Return: none
+ */
+void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record,
+ uint8_t verbosity);
+
+
void qdf_dp_trace_dump_all(uint32_t count);
/**
@@ -459,15 +524,18 @@ void qdf_dp_trace_dump_all(uint32_t count);
void qdf_dp_trace_dump_stats(void);
void qdf_dp_trace_throttle_live_mode(bool high_bw_request);
typedef void (*tp_qdf_dp_trace_cb)(struct qdf_dp_trace_record_s*,
- uint16_t index, bool live);
+ uint16_t index, u8 info);
void qdf_dp_display_record(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live);
+ uint16_t index, u8 info);
+void qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *pRecord,
+ uint16_t rec_index, u8 info);
void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
uint8_t *data, uint8_t size, uint16_t msdu_id, uint16_t status);
-void qdf_dp_display_ptr_record(
- struct qdf_dp_trace_record_s *pRecord,
- uint16_t recIndex, bool live);
+void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
+ uint16_t msdu_id, enum qdf_proto_dir dir);
+void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record,
+ uint16_t rec_index, u8 info);
uint8_t qdf_dp_get_proto_bitmap(void);
void
qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
@@ -476,16 +544,16 @@ qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
bool print);
void qdf_dp_display_proto_pkt(
struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live);
+ uint16_t index, u8 info);
void qdf_dp_trace_disable_live_mode(void);
void qdf_dp_trace_enable_live_mode(void);
void qdf_dp_trace_clear_buffer(void);
void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
enum qdf_proto_type type, enum qdf_proto_subtype subtype);
void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live);
+ uint16_t index, u8 info);
void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live);
+ uint16_t index, u8 info);
void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
enum qdf_proto_type type, enum qdf_proto_subtype subtype);
#else
@@ -510,11 +578,30 @@ void qdf_dp_trace_set_value(uint8_t proto_bitmap, uint8_t no_of_records,
uint8_t verbosity)
{
}
+
static inline
void qdf_dp_trace_dump_all(uint32_t count)
{
}
+static inline
+uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file,
+ enum qdf_dpt_debugfs_state state)
+{
+}
+
+static inline
+QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file,
+ uint32_t curr_pos)
+{
+}
+
+static inline
+void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record,
+ uint8_t verbosity)
+{
+}
+
static inline void qdf_dp_trace_dump_stats(void)
{
}
@@ -558,29 +645,44 @@ void __printf(3, 4) qdf_snprintf(char *str_buffer, unsigned int size,
#define QDF_SNPRINTF qdf_snprintf
#ifdef TSOSEG_DEBUG
+
+static inline void qdf_tso_seg_dbg_bug(char *msg)
+{
+ qdf_print(msg);
+ QDF_BUG(0);
+};
+
static inline
-int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg,
- uint16_t caller)
+int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id)
{
int rc = -1;
+ unsigned int c;
+
+ qdf_assert(tsoseg);
- if (tsoseg != NULL) {
- tsoseg->dbg.cur++; tsoseg->dbg.cur &= 0x0f;
- tsoseg->dbg.history[tsoseg->dbg.cur] = caller;
- rc = tsoseg->dbg.cur;
+ if (id == TSOSEG_LOC_ALLOC) {
+ c = qdf_atomic_read(&(tsoseg->dbg.cur));
+ /* dont crash on the very first alloc on the segment */
+ c &= 0x0f;
+ /* allow only INIT and FREE ops before ALLOC */
+ if (tsoseg->dbg.h[c].id >= id)
+ qdf_tso_seg_dbg_bug("Rogue TSO seg alloc");
}
+ c = qdf_atomic_inc_return(&(tsoseg->dbg.cur));
+
+ c &= 0x0f;
+ tsoseg->dbg.h[c].ts = qdf_get_log_timestamp();
+ tsoseg->dbg.h[c].id = id;
+ rc = c;
+
return rc;
};
-static inline void qdf_tso_seg_dbg_bug(char *msg)
-{
- qdf_print(msg);
- QDF_BUG(0);
-};
static inline void
qdf_tso_seg_dbg_setowner(struct qdf_tso_seg_elem_t *tsoseg, void *owner)
{
- tsoseg->dbg.txdesc = owner;
+ if (tsoseg != NULL)
+ tsoseg->dbg.txdesc = owner;
};
static inline void
@@ -592,8 +694,7 @@ qdf_tso_seg_dbg_zero(struct qdf_tso_seg_elem_t *tsoseg)
#else
static inline
-int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg,
- uint16_t caller)
+int qdf_tso_seg_dbg_record(struct qdf_tso_seg_elem_t *tsoseg, short id)
{
return 0;
};
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h
index 93af31796337..083afd01fbc5 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -35,6 +35,7 @@
/* Include Files */
#include <i_qdf_types.h>
+#include <qdf_atomic.h>
/* Preprocessor definitions and constants */
#define QDF_MAX_SGLIST 4
@@ -456,6 +457,8 @@ void qdf_vtrace_msg(QDF_MODULE_ID module, QDF_TRACE_LEVEL level,
#define qdf_vprint __qdf_vprint
#define qdf_snprint __qdf_snprint
+#define qdf_kstrtoint __qdf_kstrtoint
+
#ifdef WLAN_OPEN_P2P_INTERFACE
/* This should match with WLAN_MAX_INTERFACES */
#define QDF_MAX_CONCURRENCY_PERSONA (4)
@@ -549,7 +552,7 @@ struct qdf_tso_frag_t {
};
#define FRAG_NUM_MAX 6
-#define TSO_SEG_MAGIC_COOKIE 0x7EED
+#define TSO_SEG_MAGIC_COOKIE 0x1EED
/**
* struct qdf_tso_flags_t - TSO specific flags
@@ -629,20 +632,33 @@ enum tsoseg_dbg_caller_e {
TSOSEG_LOC_UNDEFINED,
TSOSEG_LOC_INIT1,
TSOSEG_LOC_INIT2,
+ TSOSEG_LOC_FREE,
+ TSOSEG_LOC_ALLOC,
TSOSEG_LOC_DEINIT,
+ TSOSEG_LOC_GETINFO,
+ TSOSEG_LOC_FILLHTTSEG,
+ TSOSEG_LOC_FILLCMNSEG,
TSOSEG_LOC_PREPARETSO,
TSOSEG_LOC_TXPREPLLFAST,
TSOSEG_LOC_UNMAPTSO,
- TSOSEG_LOC_ALLOC,
- TSOSEG_LOC_FREE,
+ TSOSEG_LOC_UNMAPLAST,
+ TSOSEG_LOC_FORCE_FREE,
};
#ifdef TSOSEG_DEBUG
+/**
+ * WARNING: Don't change the history size without changing the wrap
+ * code in qdf_tso_seg_dbg_record function
+ */
#define MAX_TSO_SEG_ACT_HISTORY 16
+struct qdf_tso_seg_dbg_history_t {
+ uint64_t ts;
+ short id;
+};
struct qdf_tso_seg_dbg_t {
void *txdesc; /* owner - (ol_txrx_tx_desc_t *) */
- int cur; /* index of last valid entry */
- uint16_t history[MAX_TSO_SEG_ACT_HISTORY];
+ qdf_atomic_t cur; /* index of last valid entry */
+ struct qdf_tso_seg_dbg_history_t h[MAX_TSO_SEG_ACT_HISTORY];
};
#endif /* TSOSEG_DEBUG */
@@ -653,8 +669,10 @@ struct qdf_tso_seg_dbg_t {
*/
struct qdf_tso_seg_elem_t {
struct qdf_tso_seg_t seg;
- uint16_t cookie:15,
- on_freelist:1;
+ uint32_t cookie:13,
+ on_freelist:1,
+ sent_to_target:1,
+ force_free:1;
struct qdf_tso_seg_elem_t *next;
#ifdef TSOSEG_DEBUG
struct qdf_tso_seg_dbg_t dbg;
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h
index 8e9b0dcd3c8f..d3059cd10f88 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/inc/qdf_util.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -540,4 +540,16 @@ uint64_t qdf_do_mod(uint64_t dividend, uint32_t divisor)
{
return __qdf_do_mod(dividend, divisor);
}
+
+#ifdef ENABLE_SMMU_S1_TRANSLATION
+/**
+ * qdf_get_ipa_smmu_status() - to get IPA SMMU status
+ *
+ * Return: IPA SMMU status
+ */
+static inline bool qdf_get_ipa_smmu_status(void)
+{
+ return __qdf_get_ipa_smmu_status();
+}
+#endif
#endif /*_QDF_UTIL_H*/
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h
index 4f64484a3cdf..feaa3f68a18e 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_debugfs.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -28,9 +28,31 @@
#include <linux/fs.h>
#include <linux/debugfs.h>
+typedef struct dentry *__qdf_dentry_t;
+typedef struct seq_file *__qdf_debugfs_file_t;
+
#ifdef WLAN_DEBUGFS
+/**
+ * qdf_debugfs_get_root() - get debugfs root
+ *
+ * Return: dentry * or NULL in case of failure
+ */
struct dentry *qdf_debugfs_get_root(void);
+/**
+ * qdf_debugfs_get_filemode() - get Linux specific file mode
+ * @mode: This is a bitmap of file modes,
+ * QDF_FILE_USR_READ
+ * QDF_FILE_USR_WRITE
+ * QDF_FILE_OTH_READ
+ * QDF_FILE_OTH_WRITE
+ * QDF_FILE_GRP_READ
+ * QDF_FILE_GRP_WRITE
+ *
+ * Return: Linux specific file mode
+ */
+umode_t qdf_debugfs_get_filemode(uint16_t mode);
+
#endif /* WLAN_DEBUGFS */
#endif /* _I_QDF_DEBUGFS_H */
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h
index f13046c44390..81861ec1912f 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_types.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -278,6 +278,9 @@ enum __qdf_net_wireless_evcode {
#define __qdf_vprint vprintk
#define __qdf_snprint snprintf
#define __qdf_vsnprint vsnprintf
+#define qdf_kstrtoint __qdf_kstrtoint
+
+#define __qdf_kstrtoint kstrtoint
#define __QDF_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
#define __QDF_DMA_TO_DEVICE DMA_TO_DEVICE
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h
index 0c3b4ee2b50c..5c220f6ba965 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/i_qdf_util.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -64,6 +64,10 @@
#include <linux/byteorder/generic.h>
#endif
+#ifdef ENABLE_SMMU_S1_TRANSLATION
+#include <linux/ipa.h>
+#endif
+
/*
* Generic compiler-dependent macros if defined by the OS
*/
@@ -401,4 +405,22 @@ uint64_t __qdf_do_mod(uint64_t dividend, uint32_t divisor)
return do_div(dividend, divisor);
}
+#ifdef ENABLE_SMMU_S1_TRANSLATION
+/**
+ * qdf_get_ipa_smmu_status() - to get IPA SMMU status
+ *
+ * Return: IPA SMMU status
+ */
+static bool __qdf_get_ipa_smmu_status(void)
+{
+ struct ipa_smmu_in_params params_in;
+ struct ipa_smmu_out_params params_out;
+
+ params_in.smmu_client = IPA_SMMU_WLAN_CLIENT;
+ ipa_get_smmu_params(&params_in, &params_out);
+
+ return params_out.smmu_enable;
+}
+#endif
+
#endif /*_I_QDF_UTIL_H*/
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c
index 12bc3038f505..f7d79390b2d8 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_debugfs.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -24,13 +24,20 @@
#include <qdf_debugfs.h>
#include <i_qdf_debugfs.h>
#include <qdf_module.h>
+#include <qdf_mem.h>
+#include <qdf_trace.h>
+
+/* A private structure definition to qdf sequence */
+struct qdf_debugfs_seq_priv {
+ bool stop;
+};
/* entry for root debugfs directory*/
-static struct dentry *qdf_debugfs_root;
+static qdf_dentry_t qdf_debugfs_root;
QDF_STATUS qdf_debugfs_init(void)
{
- qdf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME"_qdf", NULL);
+ qdf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!qdf_debugfs_root)
return QDF_STATUS_E_FAILURE;
@@ -41,6 +48,9 @@ qdf_export_symbol(qdf_debugfs_init);
QDF_STATUS qdf_debugfs_exit(void)
{
+ if (!qdf_debugfs_root)
+ return QDF_STATUS_SUCCESS;
+
debugfs_remove_recursive(qdf_debugfs_root);
qdf_debugfs_root = NULL;
@@ -48,8 +58,406 @@ QDF_STATUS qdf_debugfs_exit(void)
}
qdf_export_symbol(qdf_debugfs_exit);
-struct dentry *qdf_debugfs_get_root(void)
+qdf_dentry_t qdf_debugfs_get_root(void)
{
return qdf_debugfs_root;
}
+umode_t qdf_debugfs_get_filemode(uint16_t mode)
+{
+ umode_t ret = 0;
+
+ if (mode & QDF_FILE_USR_READ)
+ ret |= 0400;
+ if (mode & QDF_FILE_USR_WRITE)
+ ret |= 0200;
+
+ if (mode & QDF_FILE_GRP_READ)
+ ret |= 0040;
+ if (mode & QDF_FILE_GRP_WRITE)
+ ret |= 0020;
+
+ if (mode & QDF_FILE_OTH_READ)
+ ret |= 0004;
+ if (mode & QDF_FILE_OTH_WRITE)
+ ret |= 0002;
+
+ return ret;
+}
+
+/**
+ * ---------------------- Implementation note ---------------------------------
+ *
+ * A read in debugfs file triggers seq_read() which calls seq_read api. A
+ * sequence begins with the call of the function start(). If the return is a non
+ * NULL value, the function next() is called. This function is an iterator, the
+ * goal is to go though all the data. Each time next() is called, the function
+ * show() is also called. It writes data values in the buffer read by the user.
+ * The function next() is called until it returns NULL. The sequence ends when
+ * next() returns NULL, then the function stop() is called.
+ *
+ * NOTE: When a sequence is finished, another one starts. That means that
+ * at the end of function stop(), the function start() is called again. This
+ * loop finishes when the function start() returns NULL.
+ * ----------------------------------------------------------------------------
+ */
+
+/* .seq_start() */
+static void *qdf_debugfs_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct qdf_debugfs_seq_priv *priv;
+
+ priv = qdf_mem_malloc(sizeof(*priv));
+ if (!priv)
+ return NULL;
+
+ priv->stop = false;
+
+ return priv;
+}
+
+/* .seq_next() */
+static void *qdf_debugfs_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct qdf_debugfs_seq_priv *priv = v;
+
+ if (priv)
+ ++*pos;
+
+ if (priv && priv->stop) {
+ qdf_mem_free(priv);
+ priv = NULL;
+ }
+
+ return priv;
+}
+
+/* .seq_stop() */
+static void qdf_debugfs_seq_stop(struct seq_file *seq, void *v)
+{
+ qdf_mem_free(v);
+}
+
+/* .seq_show() */
+static int qdf_debugfs_seq_show(struct seq_file *seq, void *v)
+{
+ struct qdf_debugfs_seq_priv *priv = v;
+ struct qdf_debugfs_fops *fops;
+ QDF_STATUS status;
+
+ fops = seq->private;
+
+ if (fops && fops->show) {
+ status = fops->show(seq, fops->priv);
+
+ if (priv && (status != QDF_STATUS_E_AGAIN))
+ priv->stop = true;
+ }
+
+ return 0;
+}
+
+void qdf_debugfs_printf(qdf_debugfs_file_t file, const char *f, ...)
+{
+ va_list args;
+
+ va_start(args, f);
+ seq_vprintf(file, f, args);
+ va_end(args);
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0))
+
+void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf,
+ qdf_size_t len)
+{
+ seq_hex_dump(file, "", DUMP_PREFIX_OFFSET, 16, 4, buf, len, false);
+}
+
+#else
+
+void qdf_debugfs_hexdump(qdf_debugfs_file_t file, const uint8_t *buf,
+ qdf_size_t len)
+{
+ const size_t rowsize = 16;
+ const size_t groupsize = 4;
+ char *dst;
+ size_t dstlen, readlen;
+ int prefix = 0;
+ size_t commitlen;
+
+ while (len > 0 && (file->size > file->count)) {
+ seq_printf(file, "%.8x: ", prefix);
+
+ readlen = min(len, rowsize);
+ dstlen = seq_get_buf(file, &dst);
+ hex_dump_to_buffer(buf, readlen, rowsize, groupsize, dst,
+ dstlen, false);
+ commitlen = strnlen(dst, dstlen);
+ seq_commit(file, commitlen);
+ seq_putc(file, '\n');
+
+ len = (len > rowsize) ? len - rowsize : 0;
+ buf += readlen;
+ prefix += rowsize;
+ }
+}
+
+#endif
+
+void qdf_debugfs_write(qdf_debugfs_file_t file, const uint8_t *buf,
+ qdf_size_t len)
+{
+ seq_write(file, buf, len);
+}
+
+/* sequential file operation table */
+static const struct seq_operations __qdf_debugfs_seq_ops = {
+ .start = qdf_debugfs_seq_start,
+ .next = qdf_debugfs_seq_next,
+ .stop = qdf_debugfs_seq_stop,
+ .show = qdf_debugfs_seq_show,
+};
+
+/* .open() */
+static int qdf_seq_open(struct inode *inode, struct file *file)
+{
+ void *private = inode->i_private;
+ struct seq_file *seq;
+ int rc;
+
+ /**
+ * Note: seq_open() will allocate a struct seq_file and store its
+ * pointer in @file->private_data. It warns if private_data is not NULL.
+ */
+
+ rc = seq_open(file, &__qdf_debugfs_seq_ops);
+
+ if (rc == 0) {
+ seq = file->private_data;
+ seq->private = private;
+ }
+
+ return rc;
+}
+
+/* .write() */
+static ssize_t qdf_seq_write(struct file *filp, const char __user *ubuf,
+ size_t len, loff_t *ppos)
+{
+ struct qdf_debugfs_fops *fops;
+ struct seq_file *seq;
+ u8 *buf;
+ ssize_t rc = 0;
+
+ if (len == 0)
+ return 0;
+
+ seq = filp->private_data;
+ fops = seq->private;
+ if (fops && fops->write) {
+ buf = qdf_mem_malloc(len + 1);
+ buf[len] = '\0';
+ rc = simple_write_to_buffer(buf, len, ppos, ubuf, len);
+ fops->write(fops->priv, buf, len + 1);
+ qdf_mem_free(buf);
+ }
+
+ return rc;
+}
+
+/* debugfs file operation table */
+static const struct file_operations __qdf_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = qdf_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ .write = qdf_seq_write,
+};
+
+qdf_dentry_t qdf_debugfs_create_dir(const char *name, qdf_dentry_t parent)
+{
+ qdf_dentry_t dir;
+
+ if (!name)
+ return NULL;
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ dir = debugfs_create_dir(name, parent);
+
+ if (IS_ERR_OR_NULL(dir)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s creation failed", name);
+ dir = NULL;
+ }
+
+ return dir;
+}
+qdf_export_symbol(qdf_debugfs_create_dir);
+
+qdf_dentry_t qdf_debugfs_create_file(const char *name, uint16_t mode,
+ qdf_dentry_t parent,
+ struct qdf_debugfs_fops *fops)
+{
+ qdf_dentry_t file;
+ umode_t filemode;
+
+ if (!name || !fops)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ file = debugfs_create_file(name, filemode, parent, fops,
+ &__qdf_debugfs_fops);
+
+ if (IS_ERR_OR_NULL(file)) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s creation failed 0x%pK", name, file);
+ file = NULL;
+ }
+
+ return file;
+}
+qdf_export_symbol(qdf_debugfs_create_file);
+
+qdf_dentry_t qdf_debugfs_create_u8(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u8 *value)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_u8(name, filemode, parent, value);
+}
+
+qdf_dentry_t qdf_debugfs_create_u16(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u16 *value)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_u16(name, filemode, parent, value);
+}
+qdf_export_symbol(qdf_debugfs_create_u16);
+
+qdf_dentry_t qdf_debugfs_create_u32(const char *name,
+ uint16_t mode,
+ qdf_dentry_t parent, u32 *value)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_u32(name, filemode, parent, value);
+}
+qdf_export_symbol(qdf_debugfs_create_u32);
+
+qdf_dentry_t qdf_debugfs_create_u64(const char *name, uint16_t mode,
+ qdf_dentry_t parent, u64 *value)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_u64(name, filemode, parent, value);
+}
+qdf_export_symbol(qdf_debugfs_create_u64);
+
+qdf_dentry_t qdf_debugfs_create_atomic(const char *name, uint16_t mode,
+ qdf_dentry_t parent, qdf_atomic_t *value)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_atomic_t(name, filemode, parent, value);
+}
+qdf_export_symbol(qdf_debugfs_create_atomic);
+
+static int qdf_debugfs_string_show(struct seq_file *seq, void *pos)
+{
+ char *str = seq->private;
+
+ seq_puts(seq, str);
+ seq_putc(seq, '\n');
+
+ return 0;
+}
+
+static int qdf_debugfs_string_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, qdf_debugfs_string_show, inode->i_private);
+}
+
+static const struct file_operations qdf_string_fops = {
+ .owner = THIS_MODULE,
+ .open = qdf_debugfs_string_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release
+};
+
+qdf_dentry_t qdf_debugfs_create_string(const char *name, uint16_t mode,
+ qdf_dentry_t parent, char *str)
+{
+ umode_t filemode;
+
+ if (!name)
+ return NULL;
+
+ if (!parent)
+ parent = qdf_debugfs_get_root();
+
+ filemode = qdf_debugfs_get_filemode(mode);
+ return debugfs_create_file(name, filemode, parent, str,
+ &qdf_string_fops);
+}
+qdf_export_symbol(qdf_debugfs_create_string);
+
+void qdf_debugfs_remove_dir_recursive(qdf_dentry_t d)
+{
+ debugfs_remove_recursive(d);
+}
+qdf_export_symbol(qdf_debugfs_remove_dir_recursive);
+
+void qdf_debugfs_remove_dir(qdf_dentry_t d)
+{
+ debugfs_remove(d);
+}
+qdf_export_symbol(qdf_debugfs_remove_dir);
+
+void qdf_debugfs_remove_file(qdf_dentry_t d)
+{
+ debugfs_remove(d);
+}
+qdf_export_symbol(qdf_debugfs_remove_file);
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c
index 417c2abc15be..4e4c035a7eae 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_mc_timer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -40,6 +40,7 @@
#include <linux/export.h>
#ifdef CONFIG_MCL
#include <cds_mc_timer.h>
+#include <cds_sched.h>
#endif
/* Preprocessor definitions and constants */
@@ -53,6 +54,9 @@
static unsigned int persistent_timer_count;
static qdf_mutex_t persistent_timer_count_lock;
+static qdf_spinlock_t qdf_mc_timer_cookie_lock;
+static uint32_t g_qdf_mc_timer_cookie;
+
/* Function declarations and documenation */
/**
@@ -119,6 +123,7 @@ void qdf_timer_module_init(void)
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
"Initializing the QDF MC timer module");
qdf_mutex_create(&persistent_timer_count_lock);
+ qdf_spinlock_create(&qdf_mc_timer_cookie_lock);
}
qdf_export_symbol(qdf_timer_module_init);
@@ -662,10 +667,25 @@ QDF_STATUS qdf_mc_timer_start(qdf_mc_timer_t *timer, uint32_t expiration_time)
qdf_spin_unlock_irqrestore(&timer->platform_info.spinlock);
+ qdf_spin_lock_irqsave(&qdf_mc_timer_cookie_lock);
+ timer->cookie = g_qdf_mc_timer_cookie++;
+ qdf_spin_unlock_irqrestore(&qdf_mc_timer_cookie_lock);
+
return QDF_STATUS_SUCCESS;
}
qdf_export_symbol(qdf_mc_timer_start);
+#ifdef CONFIG_MCL
+static void qdf_remove_timer_from_sys_msg(uint32_t timer_cookie)
+{
+ cds_remove_timer_from_sys_msg(timer_cookie);
+}
+#else
+static inline void qdf_remove_timer_from_sys_msg(uint32_t timer_cookie)
+{
+}
+#endif
+
/**
* qdf_mc_timer_stop() - stop a QDF timer
* @timer: Pointer to timer object
@@ -706,6 +726,8 @@ QDF_STATUS qdf_mc_timer_stop(qdf_mc_timer_t *timer)
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
"%s: Cannot stop timer in state = %d",
__func__, timer->state);
+ qdf_remove_timer_from_sys_msg(timer->cookie);
+
return QDF_STATUS_SUCCESS;
}
@@ -777,6 +799,7 @@ void qdf_timer_module_deinit(void)
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH,
"De-Initializing the QDF MC timer module");
qdf_mutex_destroy(&persistent_timer_count_lock);
+ qdf_spinlock_destroy(&qdf_mc_timer_cookie_lock);
}
qdf_export_symbol(qdf_timer_module_deinit);
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
index aa75bace8950..96c8eacc36f2 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_nbuf.c
@@ -1022,9 +1022,6 @@ __qdf_nbuf_data_get_icmp_subtype(uint8_t *data)
subtype = (uint8_t)(*(uint8_t *)
(data + ICMP_SUBTYPE_OFFSET));
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
- "ICMP proto type: 0x%02x", subtype);
-
switch (subtype) {
case ICMP_REQUEST:
proto_subtype = QDF_PROTO_ICMP_REQ;
@@ -1057,9 +1054,6 @@ __qdf_nbuf_data_get_icmpv6_subtype(uint8_t *data)
subtype = (uint8_t)(*(uint8_t *)
(data + ICMPV6_SUBTYPE_OFFSET));
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
- "ICMPv6 proto type: 0x%02x", subtype);
-
switch (subtype) {
case ICMPV6_REQUEST:
proto_subtype = QDF_PROTO_ICMPV6_REQ;
@@ -2575,6 +2569,7 @@ static inline void __qdf_nbuf_fill_tso_cmn_seg_info(
tso_cmn_info->eit_hdr_len,
curr_seg->seg.tso_flags.tcp_seq_num,
curr_seg->seg.total_len);
+ qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_FILLCMNSEG);
}
/**
@@ -2630,8 +2625,14 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
tso_frag_vaddr = skb->data + tso_cmn_info.eit_hdr_len;
/* get the length of the next tso fragment */
tso_frag_len = min(skb_frag_len, tso_seg_size);
- tso_frag_paddr = dma_map_single(osdev->dev,
- tso_frag_vaddr, tso_frag_len, DMA_TO_DEVICE);
+ tso_frag_paddr = dma_map_single(osdev->dev, tso_frag_vaddr,
+ tso_frag_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(osdev->dev,
+ tso_frag_paddr))) {
+ qdf_print("%s:%d DMA mapping error!\n", __func__, __LINE__);
+ qdf_assert(0);
+ return 0;
+ }
TSO_DEBUG("%s[%d] skb frag len %d tso frag len %d\n", __func__,
__LINE__, skb_frag_len, tso_frag_len);
num_seg = tso_info->num_segs;
@@ -2745,7 +2746,8 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(osdev->dev,
tso_frag_paddr))) {
- qdf_print("DMA mapping error!\n");
+ qdf_print("%s:%d DMA mapping error!\n",
+ __func__, __LINE__);
qdf_assert(0);
return 0;
}
@@ -2755,6 +2757,7 @@ uint32_t __qdf_nbuf_get_tso_info(qdf_device_t osdev, struct sk_buff *skb,
if (!num_seg)
curr_seg->seg.tso_flags.fin = tso_cmn_info.tcphdr->fin;
+ qdf_tso_seg_dbg_record(curr_seg, TSOSEG_LOC_GETINFO);
curr_seg = curr_seg->next;
}
return tso_info->num_segs;
@@ -2795,13 +2798,13 @@ void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
qdf_assert(0);
return;
}
- qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
dma_unmap_single(osdev->dev,
tso_seg->seg.tso_frags[num_frags].paddr,
tso_seg->seg.tso_frags[num_frags].length,
QDF_DMA_TO_DEVICE);
tso_seg->seg.tso_frags[num_frags].paddr = 0;
num_frags--;
+ qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPTSO);
}
if (is_last_seg) {
@@ -2816,6 +2819,7 @@ void __qdf_nbuf_unmap_tso_segment(qdf_device_t osdev,
tso_seg->seg.tso_frags[0].length,
QDF_DMA_TO_DEVICE);
tso_seg->seg.tso_frags[0].paddr = 0;
+ qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_UNMAPLAST);
}
}
qdf_export_symbol(__qdf_nbuf_unmap_tso_segment);
diff --git a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c
index 4d3a253d7da1..438d9e46aca5 100644
--- a/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c
+++ b/drivers/staging/qca-wifi-host-cmn/qdf/linux/src/qdf_trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -61,6 +61,8 @@ typedef struct {
unsigned char module_name_str[4];
} module_trace_info;
+#define DP_TRACE_META_DATA_STRLEN 50
+
#define QDF_DEFAULT_TRACE_LEVEL \
((1 << QDF_TRACE_LEVEL_FATAL) | (1 << QDF_TRACE_LEVEL_ERROR))
@@ -403,7 +405,7 @@ qdf_export_symbol(qdf_trace_display);
#define ROW_SIZE 16
/* Buffer size = data bytes(2 hex chars plus space) + NULL */
-#define BUFFER_SIZE ((ROW_SIZE * 3) + 1)
+#define BUFFER_SIZE ((QDF_DP_TRACE_RECORD_SIZE * 3) + 1)
/**
* qdf_trace_hex_dump() - externally called hex dump function
@@ -818,8 +820,16 @@ int qdf_sprint_symbol(char *buffer, void *addr)
qdf_export_symbol(qdf_sprint_symbol);
#ifdef FEATURE_DP_TRACE
+#define QDF_DP_TRACE_PREPEND_STR_SIZE 100
+/*
+ * one dp trace record can't be greater than 300 bytes.
+ * Max Size will be QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121).
+ * Always make sure to change this QDF_DP_TRACE_MAX_RECORD_SIZE
+ * value accordingly whenever above two mentioned MACRO value changes.
+ */
+#define QDF_DP_TRACE_MAX_RECORD_SIZE 300
static void qdf_dp_unused(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live)
+ uint16_t index, u8 info)
{
qdf_print("%s: QDF_DP_TRACE_MAX event should not be generated",
__func__);
@@ -864,6 +874,11 @@ void qdf_dp_trace_init(bool live_mode_config, uint8_t thresh,
for (i = 0; i < ARRAY_SIZE(qdf_dp_trace_cb_table); i++)
qdf_dp_trace_cb_table[i] = qdf_dp_display_record;
+ qdf_dp_trace_cb_table[QDF_DP_TRACE_TX_PACKET_RECORD] =
+ qdf_dp_trace_cb_table[QDF_DP_TRACE_RX_PACKET_RECORD] =
+ qdf_dp_trace_cb_table[QDF_DP_TRACE_DROP_PACKET_RECORD] =
+ qdf_dp_display_data_pkt_record;
+
qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD] =
qdf_dp_trace_cb_table[QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD] =
qdf_dp_trace_cb_table[QDF_DP_TRACE_FREE_PACKET_PTR_RECORD] =
@@ -979,32 +994,50 @@ void qdf_dp_trace_set_track(qdf_nbuf_t nbuf, enum qdf_proto_dir dir)
qdf_export_symbol(qdf_dp_trace_set_track);
#define DPTRACE_PRINT(args...) \
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG, ## args)
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO_HIGH, ## args)
+
+/* Number of bytes to be grouped together while printing DP-Trace data */
+#define QDF_DUMP_DP_GROUP_SIZE 6
/**
- * dump_hex_trace() - Display the data in buffer
+ * dump_dp_hex_trace() - Display the data in buffer
* @str: string to prepend the hexdump with.
* @buf: buffer which contains data to be displayed
* @buf_len: defines the size of the data to be displayed
*
- * Return: None
+ * Return: QDF_STATUS
*/
-static void dump_dp_hex_trace(char *str, uint8_t *buf, uint8_t buf_len)
+static QDF_STATUS
+dump_dp_hex_trace(char *prepend_str, uint8_t *inbuf, uint8_t inbuf_len)
{
- unsigned char linebuf[BUFFER_SIZE];
- const u8 *ptr = buf;
- int i, linelen, remaining = buf_len;
-
- /* Dump the bytes in the last line */
- for (i = 0; i < buf_len; i += ROW_SIZE) {
- linelen = min(remaining, ROW_SIZE);
- remaining -= ROW_SIZE;
-
- hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1,
- linebuf, sizeof(linebuf), false);
+ unsigned char outbuf[BUFFER_SIZE];
+ const u8 *inbuf_ptr = inbuf;
+ char *outbuf_ptr = outbuf;
+ int outbytes_written = 0;
+
+ if (!inbuf || inbuf_len < 0)
+ return QDF_STATUS_E_INVAL;
+
+ qdf_mem_set(outbuf, 0, sizeof(outbuf));
+ do {
+ outbytes_written += scnprintf(outbuf_ptr,
+ BUFFER_SIZE - outbytes_written,
+ "%02x", *inbuf_ptr);
+ outbuf_ptr = outbuf + outbytes_written;
+
+ if ((inbuf_ptr - inbuf) &&
+ (inbuf_ptr - inbuf + 1) % QDF_DUMP_DP_GROUP_SIZE == 0) {
+ outbytes_written +=
+ scnprintf(outbuf_ptr,
+ BUFFER_SIZE - outbytes_written,
+ " ");
+ outbuf_ptr = outbuf + outbytes_written;
+ }
+ inbuf_ptr++;
+ } while (inbuf_ptr < (inbuf + inbuf_len));
+ DPTRACE_PRINT("%s %s", prepend_str, outbuf);
- DPTRACE_PRINT("DPT: %s %s", str, linebuf);
- }
+ return QDF_STATUS_SUCCESS;
}
/**
@@ -1035,8 +1068,8 @@ const char *qdf_dp_code_to_string(enum QDF_DP_TRACE_ID code)
return "EVENT:";
case QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD:
return "HDD: TX: PTR:";
- case QDF_DP_TRACE_HDD_TX_PACKET_RECORD:
- return "HDD: TX: DATA:";
+ case QDF_DP_TRACE_TX_PACKET_RECORD:
+ return "TX:";
case QDF_DP_TRACE_CE_PACKET_PTR_RECORD:
return "CE: TX: PTR:";
case QDF_DP_TRACE_CE_FAST_PACKET_PTR_RECORD:
@@ -1051,8 +1084,8 @@ const char *qdf_dp_code_to_string(enum QDF_DP_TRACE_ID code)
return "HTT: RX: OF: PTR:";
case QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD:
return "HDD: RX: PTR:";
- case QDF_DP_TRACE_HDD_RX_PACKET_RECORD:
- return "HDD: RX: DATA:";
+ case QDF_DP_TRACE_RX_PACKET_RECORD:
+ return "RX:";
case QDF_DP_TRACE_TXRX_QUEUE_PACKET_PTR_RECORD:
return "TXRX: TX: Q: PTR:";
case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
@@ -1207,29 +1240,115 @@ static bool qdf_dp_enable_check(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
if (qdf_dp_trace_enable_track(code) == false)
return false;
- if ((nbuf) && ((QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) !=
- QDF_NBUF_TX_PKT_DATA_TRACK) ||
- ((dir == QDF_TX) && (QDF_NBUF_CB_TX_DP_TRACE(nbuf) == 0)) ||
- ((dir == QDF_RX) && (QDF_NBUF_CB_RX_DP_TRACE(nbuf) == 0))))
+ if (!nbuf)
+ return false;
+
+ if ((QDF_NBUF_CB_TX_PACKET_TRACK(nbuf) != QDF_NBUF_TX_PKT_DATA_TRACK) ||
+ ((dir == QDF_TX) && (QDF_NBUF_CB_TX_DP_TRACE(nbuf) == 0)) ||
+ ((dir == QDF_RX) && (QDF_NBUF_CB_RX_DP_TRACE(nbuf) == 0)))
return false;
return true;
}
+static inline
+int qdf_dp_trace_fill_meta_str(char *prepend_str, int size, int index,
+ u8 info, struct qdf_dp_trace_record_s *record)
+{
+ char buffer[20];
+ int ret = 0;
+ uint8_t live = info & QDF_DP_TRACE_RECORD_INFO_LIVE ? 1 : 0;
+ uint8_t throttled = info & QDF_DP_TRACE_RECORD_INFO_THROTTLED ?
+ 1 : 0;
+
+ qdf_mem_set(prepend_str, 0, size);
+ scnprintf(buffer, sizeof(buffer), "%llu", record->time);
+ ret = scnprintf(prepend_str, size,
+ "%sDPT: %04d:%s %s",
+ throttled == 1 ? "*" : "",
+ index,
+ (live == 1) ? "" : buffer,
+ qdf_dp_code_to_string(record->code));
+
+ return ret;
+}
+
+/**
+ * qdf_dp_fill_record_data() - fill meta data and data into the record
+ * @rec_data: pointer to record data
+ * @meta_data: pointer to metadata
+ * @metadata_size: size of metadata
+ * @print: true to print it in kmsg
+ *
+ * Should be called from within a spin_lock for the qdf record.
+ * Fills up rec->data with |metadata|data|
+ *
+ * Return: none
+ */
+static void qdf_dp_fill_record_data
+ (struct qdf_dp_trace_record_s *rec,
+ uint8_t *data, uint8_t data_size,
+ uint8_t *meta_data, uint8_t metadata_size)
+{
+ int32_t available = QDF_DP_TRACE_RECORD_SIZE;
+ uint8_t *rec_data = rec->data;
+ uint8_t data_to_copy = 0;
+
+ qdf_mem_set(rec_data, QDF_DP_TRACE_RECORD_SIZE, 0);
+
+ /* copy meta data */
+ if (meta_data) {
+ if (metadata_size > available) {
+ QDF_TRACE(QDF_MODULE_ID_QDF,
+ QDF_TRACE_LEVEL_WARN,
+ "%s: meta data does not fit into the record",
+ __func__);
+ goto end;
+ }
+ qdf_mem_copy(rec_data, meta_data, metadata_size);
+ available = available - metadata_size;
+ } else {
+ metadata_size = 0;
+ }
+
+ /* copy data */
+ if (data != NULL && (data_size > 0) && (available > 0)) {
+ data_to_copy = data_size;
+ if (data_size > available)
+ data_to_copy = available;
+ qdf_mem_copy(&rec_data[metadata_size], data, data_to_copy);
+ }
+end:
+ rec->size = data_to_copy;
+}
+
/**
* qdf_dp_add_record() - add dp trace record
* @code: dptrace code
* @data: data pointer
- * @size: size of buffer
- * @print: true to print it in kmsg
+ * @data_size: size of data to be copied
+ * @meta_data: meta data to be prepended to data
+ * @metadata_size: sizeof meta data
+ * @print: whether to print record
*
* Return: none
*/
static void qdf_dp_add_record(enum QDF_DP_TRACE_ID code,
- uint8_t *data, uint8_t size, bool print)
+ uint8_t *data, uint8_t data_size,
+ uint8_t *meta_data, uint8_t metadata_size,
+ bool print)
{
struct qdf_dp_trace_record_s *rec = NULL;
int index;
+ bool print_this_record = false;
+ u8 info = 0;
+
+ if (code >= QDF_DP_TRACE_MAX) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "invalid record code %u, max code %u", code,
+ QDF_DP_TRACE_MAX);
+ return;
+ }
spin_lock_bh(&l_dp_trace_lock);
@@ -1261,42 +1380,27 @@ static void qdf_dp_add_record(enum QDF_DP_TRACE_ID code,
index = g_qdf_dp_trace_data.tail;
rec->code = code;
rec->size = 0;
- if (data != NULL && size > 0) {
- if (size > QDF_DP_TRACE_RECORD_SIZE)
- size = QDF_DP_TRACE_RECORD_SIZE;
-
- rec->size = size;
- qdf_mem_copy(rec->data, data, size);
- }
- qdf_get_time_of_the_day_in_hr_min_sec_usec(rec->time,
- sizeof(rec->time));
+ qdf_dp_fill_record_data(rec, data, data_size,
+ meta_data, metadata_size);
+ rec->time = qdf_get_log_timestamp();
rec->pid = (in_interrupt() ? 0 : current->pid);
- spin_unlock_bh(&l_dp_trace_lock);
-
- if (rec->code >= QDF_DP_TRACE_MAX) {
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
- "invalid record code %u, max code %u", rec->code,
- QDF_DP_TRACE_MAX);
- return;
- }
-
- if (print == true) {
- qdf_dp_trace_cb_table[rec->code] (rec, index, true);
- return;
+ if (print || g_qdf_dp_trace_data.force_live_mode) {
+ print_this_record = true;
+ } else if (g_qdf_dp_trace_data.live_mode == 1) {
+ print_this_record = true;
+ g_qdf_dp_trace_data.print_pkt_cnt++;
+ if (g_qdf_dp_trace_data.print_pkt_cnt >
+ g_qdf_dp_trace_data.high_tput_thresh) {
+ g_qdf_dp_trace_data.live_mode = 0;
+ info |= QDF_DP_TRACE_RECORD_INFO_THROTTLED;
+ }
}
-
- spin_lock_bh(&l_dp_trace_lock);
- g_qdf_dp_trace_data.print_pkt_cnt++;
- if ((g_qdf_dp_trace_data.live_mode == 1) &&
- (g_qdf_dp_trace_data.print_pkt_cnt >
- g_qdf_dp_trace_data.high_tput_thresh))
- g_qdf_dp_trace_data.live_mode = 0;
spin_unlock_bh(&l_dp_trace_lock);
- if (g_qdf_dp_trace_data.live_mode == true) {
- qdf_dp_trace_cb_table[rec->code] (rec, index, true);
- return;
- }
+ info |= QDF_DP_TRACE_RECORD_INFO_LIVE;
+ if (print_this_record)
+ qdf_dp_trace_cb_table[rec->code] (rec, index, info);
+
}
/**
@@ -1582,18 +1686,21 @@ qdf_export_symbol(qdf_dp_trace_log_pkt);
* Return: none
*/
void qdf_dp_display_mgmt_pkt(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live)
+ uint16_t index, u8 info)
{
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
struct qdf_dp_trace_mgmt_buf *buf =
(struct qdf_dp_trace_mgmt_buf *)record->data;
- DPTRACE_PRINT("DPT: %04d: %s [%d] [%s %s %s]",
- index,
- (live == true) ? " " : record->time,
- buf->vdev_id,
- qdf_dp_code_to_string(record->code),
- qdf_dp_type_to_str(buf->type),
- qdf_dp_subtype_to_str(buf->subtype));
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, info, record);
+
+ DPTRACE_PRINT("%s [%d] [%s %s]",
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_type_to_str(buf->type),
+ qdf_dp_subtype_to_str(buf->subtype));
}
qdf_export_symbol(qdf_dp_display_mgmt_pkt);
@@ -1621,7 +1728,7 @@ void qdf_dp_trace_mgmt_pkt(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
buf.type = type;
buf.subtype = subtype;
buf.vdev_id = vdev_id;
- qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, true);
+ qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, NULL, 0, true);
}
qdf_export_symbol(qdf_dp_trace_mgmt_pkt);
@@ -1634,18 +1741,20 @@ qdf_export_symbol(qdf_dp_trace_mgmt_pkt);
* Return: none
*/
void qdf_dp_display_event_record(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live)
+ uint16_t index, u8 info)
{
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
struct qdf_dp_trace_event_buf *buf =
(struct qdf_dp_trace_event_buf *)record->data;
- DPTRACE_PRINT("DPT: %04d: %s [%d] [%s %s %s]",
- index,
- (live == true) ? "" : record->time,
- buf->vdev_id,
- qdf_dp_code_to_string(record->code),
- qdf_dp_type_to_str(buf->type),
- qdf_dp_subtype_to_str(buf->subtype));
+ qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, info, record);
+
+ DPTRACE_PRINT("%s [%d] [%s %s]",
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_type_to_str(buf->type),
+ qdf_dp_subtype_to_str(buf->subtype));
}
qdf_export_symbol(qdf_dp_display_event_record);
@@ -1673,7 +1782,7 @@ void qdf_dp_trace_record_event(enum QDF_DP_TRACE_ID code, uint8_t vdev_id,
buf.type = type;
buf.subtype = subtype;
buf.vdev_id = vdev_id;
- qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, true);
+ qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, NULL, 0, true);
}
qdf_export_symbol(qdf_dp_trace_record_event);
@@ -1686,21 +1795,24 @@ qdf_export_symbol(qdf_dp_trace_record_event);
* Return: none
*/
void qdf_dp_display_proto_pkt(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live)
+ uint16_t index, u8 info)
{
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
struct qdf_dp_trace_proto_buf *buf =
(struct qdf_dp_trace_proto_buf *)record->data;
- DPTRACE_PRINT("DPT: %04d: %s [%d] [%s%s] SA: "
- QDF_MAC_ADDRESS_STR " %s DA: "
- QDF_MAC_ADDRESS_STR,
- index,
- (live == true) ? "" : record->time,
- buf->vdev_id,
- qdf_dp_code_to_string(record->code),
- qdf_dp_subtype_to_str(buf->subtype),
- QDF_MAC_ADDR_ARRAY(buf->sa.bytes),
- qdf_dp_dir_to_str(buf->dir), QDF_MAC_ADDR_ARRAY(buf->da.bytes));
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, info, record);
+ DPTRACE_PRINT("%s [%d] [%s] SA: "
+ QDF_MAC_ADDRESS_STR " %s DA: "
+ QDF_MAC_ADDRESS_STR,
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_subtype_to_str(buf->subtype),
+ QDF_MAC_ADDR_ARRAY(buf->sa.bytes),
+ qdf_dp_dir_to_str(buf->dir),
+ QDF_MAC_ADDR_ARRAY(buf->da.bytes));
}
qdf_export_symbol(qdf_dp_display_proto_pkt);
@@ -1737,7 +1849,7 @@ void qdf_dp_trace_proto_pkt(enum QDF_DP_TRACE_ID code,
buf.type = type;
buf.subtype = subtype;
buf.vdev_id = vdev_id;
- qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, print);
+ qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, NULL, 0, print);
}
qdf_export_symbol(qdf_dp_trace_proto_pkt);
@@ -1750,24 +1862,28 @@ qdf_export_symbol(qdf_dp_trace_proto_pkt);
* Return: none
*/
void qdf_dp_display_ptr_record(struct qdf_dp_trace_record_s *record,
- uint16_t index, bool live)
+ uint16_t index, u8 info)
{
- char prepend_str[100] = {'\0'};
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
struct qdf_dp_trace_ptr_buf *buf =
(struct qdf_dp_trace_ptr_buf *)record->data;
- snprintf(prepend_str, sizeof(prepend_str),
- "%04d: %s [%s] [msdu id %d %s %d]",
- index,
- (live == true) ? "" : record->time,
- qdf_dp_code_to_string(record->code), buf->msdu_id,
- (record->code == QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) ?
- "status" : "vdev_id",
- buf->status);
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, info, record);
- if (live == true) {
+ if (loc < sizeof(prepend_str))
+ scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc,
+ "[msdu id %d %s %d]",
+ buf->msdu_id,
+ (record->code ==
+ QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) ?
+ "status" : "vdev_id",
+ buf->status);
+
+ if (info & QDF_DP_TRACE_RECORD_INFO_LIVE) {
/* In live mode donot dump the contents of the cookie */
- DPTRACE_PRINT("DPT: %s", prepend_str);
+ DPTRACE_PRINT("%s", prepend_str);
} else {
dump_dp_hex_trace(prepend_str, (uint8_t *)&buf->cookie,
sizeof(buf->cookie));
@@ -1800,31 +1916,56 @@ void qdf_dp_trace_ptr(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
qdf_mem_copy(&buf.cookie, data, size);
buf.msdu_id = msdu_id;
buf.status = status;
- qdf_dp_add_record(code, (uint8_t *)&buf, buf_size,
- QDF_NBUF_CB_DP_TRACE_PRINT(nbuf));
+ qdf_dp_add_record(code, (uint8_t *)&buf, buf_size, NULL, 0,
+ QDF_NBUF_CB_DP_TRACE_PRINT(nbuf));
}
qdf_export_symbol(qdf_dp_trace_ptr);
/**
+ * qdf_dp_trace_data_pkt() - trace data packet
+ * @nbuf : nbuf which needs to be traced
+ * @code : QDF_DP_TRACE_ID for the packet (TX or RX)
+ * @msdu_id : tx desc id for the nbuf (Only applies to TX packets)
+ * @dir : TX or RX packet direction
+ *
+ * Return: None
+ */
+void qdf_dp_trace_data_pkt(qdf_nbuf_t nbuf,
+ enum QDF_DP_TRACE_ID code, uint16_t msdu_id, enum qdf_proto_dir dir)
+{
+ struct qdf_dp_trace_data_buf buf;
+
+ buf.msdu_id = msdu_id;
+ if (qdf_dp_enable_check(nbuf, code, dir) == false)
+ return;
+
+ qdf_dp_add_record(code, qdf_nbuf_data(nbuf), nbuf->len - nbuf->data_len,
+ (uint8_t *)&buf, sizeof(struct qdf_dp_trace_data_buf),
+ (nbuf) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf)
+ : false);
+}
+
+qdf_export_symbol(qdf_dp_trace_data_pkt);
+
+/**
* qdf_dp_display_trace() - Displays a record in DP trace
- * @pRecord : pointer to a record in DP trace
- * @recIndex : record index
+ * @record : pointer to a record in DP trace
+ * @index : record index
* @live : live mode or dump mode
*
* Return: None
*/
-void qdf_dp_display_record(struct qdf_dp_trace_record_s *pRecord,
- uint16_t recIndex, bool live)
+void qdf_dp_display_record(struct qdf_dp_trace_record_s *record,
+ uint16_t index, u8 info)
{
- char prepend_str[50] = {'\0'};
- snprintf(prepend_str, sizeof(prepend_str),
- "%04d: %s %s",
- recIndex,
- (live == true) ? "" : pRecord->time,
- qdf_dp_code_to_string(pRecord->code));
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, info, record);
- switch (pRecord->code) {
+ switch (record->code) {
case QDF_DP_TRACE_HDD_TX_TIMEOUT:
DPTRACE_PRINT(" %s: HDD TX Timeout", prepend_str);
break;
@@ -1834,15 +1975,38 @@ void qdf_dp_display_record(struct qdf_dp_trace_record_s *pRecord,
case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD:
DPTRACE_PRINT(" %s: CE Fast Packet Error", prepend_str);
break;
- case QDF_DP_TRACE_HDD_TX_PACKET_RECORD:
- case QDF_DP_TRACE_HDD_RX_PACKET_RECORD:
default:
- dump_dp_hex_trace(prepend_str, pRecord->data, pRecord->size);
+ dump_dp_hex_trace(prepend_str, record->data, record->size);
break;
};
}
qdf_export_symbol(qdf_dp_display_record);
+/**
+ * qdf_dp_display_data_pkt_record() - Displays a data packet in DP trace
+ * @record : pointer to a record in DP trace
+ * @recIndex : record index
+ * @live : live mode or dump mode
+ *
+ * Return: None
+ */
+void qdf_dp_display_data_pkt_record(struct qdf_dp_trace_record_s *record,
+ uint16_t recIndex, u8 info)
+{
+ int loc;
+ char prepend_str[DP_TRACE_META_DATA_STRLEN + 10];
+ struct qdf_dp_trace_data_buf *buf =
+ (struct qdf_dp_trace_data_buf *)record->data;
+
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ recIndex, info, record);
+ if (loc < sizeof(prepend_str))
+ loc += snprintf(&prepend_str[loc], sizeof(prepend_str) - loc,
+ "[%d]", buf->msdu_id);
+ dump_dp_hex_trace(prepend_str,
+ &record->data[sizeof(struct qdf_dp_trace_data_buf)],
+ record->size);
+}
/**
* qdf_dp_trace() - Stores the data in buffer
@@ -1860,7 +2024,7 @@ void qdf_dp_trace(qdf_nbuf_t nbuf, enum QDF_DP_TRACE_ID code,
if (qdf_dp_enable_check(nbuf, code, dir) == false)
return;
- qdf_dp_add_record(code, data, size,
+ qdf_dp_add_record(code, qdf_nbuf_data(nbuf), size, NULL, 0,
(nbuf != NULL) ? QDF_NBUF_CB_DP_TRACE_PRINT(nbuf) : false);
}
qdf_export_symbol(qdf_dp_trace);
@@ -1885,7 +2049,7 @@ qdf_export_symbol(qdf_dp_trace_spin_lock_init);
*/
void qdf_dp_trace_disable_live_mode(void)
{
- g_qdf_dp_trace_data.live_mode = 0;
+ g_qdf_dp_trace_data.force_live_mode = 0;
}
qdf_export_symbol(qdf_dp_trace_disable_live_mode);
@@ -1896,7 +2060,7 @@ qdf_export_symbol(qdf_dp_trace_disable_live_mode);
*/
void qdf_dp_trace_enable_live_mode(void)
{
- g_qdf_dp_trace_data.live_mode = 1;
+ g_qdf_dp_trace_data.force_live_mode = 1;
}
qdf_export_symbol(qdf_dp_trace_enable_live_mode);
@@ -1940,6 +2104,388 @@ void qdf_dp_trace_dump_stats(void)
g_qdf_dp_trace_data.eapol_m4,
g_qdf_dp_trace_data.eapol_others);
}
+qdf_export_symbol(qdf_dp_trace_dump_stats);
+
+/**
+ * qdf_dpt_dump_hex_trace_debugfs() - read data in file
+ * @file: file to read
+ * @str: string to prepend the hexdump with.
+ * @buf: buffer which contains data to be written
+ * @buf_len: defines the size of the data to be written
+ *
+ * Return: None
+ */
+static void qdf_dpt_dump_hex_trace_debugfs(qdf_debugfs_file_t file,
+ char *str, uint8_t *buf, uint8_t buf_len)
+{
+ unsigned char linebuf[BUFFER_SIZE];
+ const u8 *ptr = buf;
+ int i, linelen, remaining = buf_len;
+
+ /* Dump the bytes in the last line */
+ for (i = 0; i < buf_len; i += ROW_SIZE) {
+ linelen = min(remaining, ROW_SIZE);
+ remaining -= ROW_SIZE;
+
+ hex_dump_to_buffer(ptr + i, linelen, ROW_SIZE, 1,
+ linebuf, sizeof(linebuf), false);
+
+ qdf_debugfs_printf(file, "%s %s\n",
+ str, linebuf);
+ }
+}
+
+/**
+ * qdf_dpt_display_proto_pkt_debugfs() - display proto packet
+ * @file: file to read
+ * @record: dptrace record
+ * @index: index
+ *
+ * Return: none
+ */
+static void qdf_dpt_display_proto_pkt_debugfs(qdf_debugfs_file_t file,
+ struct qdf_dp_trace_record_s *record,
+ uint32_t index)
+{
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+ struct qdf_dp_trace_proto_buf *buf =
+ (struct qdf_dp_trace_proto_buf *)record->data;
+
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, 0, record);
+ qdf_debugfs_printf(file, "%s [%d] [%s] SA: "
+ QDF_MAC_ADDRESS_STR " %s DA: "
+ QDF_MAC_ADDRESS_STR,
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_subtype_to_str(buf->subtype),
+ QDF_MAC_ADDR_ARRAY(buf->sa.bytes),
+ qdf_dp_dir_to_str(buf->dir),
+ QDF_MAC_ADDR_ARRAY(buf->da.bytes));
+ qdf_debugfs_printf(file, "\n");
+}
+
+/**
+ * qdf_dpt_display_mgmt_pkt_debugfs() - display mgmt packet
+ * @file: file to read
+ * @record: dptrace record
+ * @index: index
+ *
+ * Return: none
+ */
+static void qdf_dpt_display_mgmt_pkt_debugfs(qdf_debugfs_file_t file,
+ struct qdf_dp_trace_record_s *record,
+ uint32_t index)
+{
+
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+ struct qdf_dp_trace_mgmt_buf *buf =
+ (struct qdf_dp_trace_mgmt_buf *)record->data;
+
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, 0, record);
+
+ qdf_debugfs_printf(file, "%s [%d] [%s %s]\n",
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_type_to_str(buf->type),
+ qdf_dp_subtype_to_str(buf->subtype));
+
+}
+
+/**
+ * qdf_dpt_display_event_record_debugfs() - display event records
+ * @file: file to read
+ * @record: dptrace record
+ * @index: index
+ *
+ * Return: none
+ */
+static void qdf_dpt_display_event_record_debugfs(qdf_debugfs_file_t file,
+ struct qdf_dp_trace_record_s *record,
+ uint32_t index)
+{
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+ struct qdf_dp_trace_event_buf *buf =
+ (struct qdf_dp_trace_event_buf *)record->data;
+
+ qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, 0, record);
+ qdf_debugfs_printf(file, "%s [%d] [%s %s]\n",
+ prepend_str,
+ buf->vdev_id,
+ qdf_dp_type_to_str(buf->type),
+ qdf_dp_subtype_to_str(buf->subtype));
+}
+
+/**
+ * qdf_dpt_display_ptr_record_debugfs() - display record ptr
+ * @file: file to read
+ * @record: dptrace record
+ * @index: index
+ *
+ * Return: none
+ */
+static void qdf_dpt_display_ptr_record_debugfs(qdf_debugfs_file_t file,
+ struct qdf_dp_trace_record_s *record,
+ uint32_t index)
+{
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+ int loc;
+ struct qdf_dp_trace_ptr_buf *buf =
+ (struct qdf_dp_trace_ptr_buf *)record->data;
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, 0, record);
+
+ if (loc < sizeof(prepend_str))
+ scnprintf(&prepend_str[loc], sizeof(prepend_str) - loc,
+ "[msdu id %d %s %d]",
+ buf->msdu_id,
+ (record->code ==
+ QDF_DP_TRACE_FREE_PACKET_PTR_RECORD) ?
+ "status" : "vdev_id",
+ buf->status);
+
+ qdf_dpt_dump_hex_trace_debugfs(file, prepend_str,
+ (uint8_t *)&buf->cookie,
+ sizeof(buf->cookie));
+}
+
+/**
+ * qdf_dpt_display_ptr_record_debugfs() - display record
+ * @file: file to read
+ * @record: dptrace record
+ * @index: index
+ *
+ * Return: none
+ */
+static void qdf_dpt_display_record_debugfs(qdf_debugfs_file_t file,
+ struct qdf_dp_trace_record_s *record,
+ uint32_t index)
+{
+ int loc;
+ char prepend_str[QDF_DP_TRACE_PREPEND_STR_SIZE];
+
+ loc = qdf_dp_trace_fill_meta_str(prepend_str, sizeof(prepend_str),
+ index, 0, record);
+ qdf_dpt_dump_hex_trace_debugfs(file, prepend_str,
+ record->data, record->size);
+}
+
+uint32_t qdf_dpt_get_curr_pos_debugfs(qdf_debugfs_file_t file,
+ enum qdf_dpt_debugfs_state state)
+{
+ uint32_t i = 0;
+ uint32_t tail;
+ uint32_t count = g_qdf_dp_trace_data.num;
+
+ if (!g_qdf_dp_trace_data.enable) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
+ "%s: Tracing Disabled", __func__);
+ return QDF_STATUS_E_EMPTY;
+ }
+
+ if (!count) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
+ "%s: no packets", __func__);
+ return QDF_STATUS_E_EMPTY;
+ }
+
+ if (state == QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS)
+ return g_qdf_dp_trace_data.curr_pos;
+
+ qdf_debugfs_printf(file,
+ "DPT: config - bitmap 0x%x verb %u #rec %u live_config %u thresh %u time_limit %u\n",
+ g_qdf_dp_trace_data.proto_bitmap,
+ g_qdf_dp_trace_data.verbosity,
+ g_qdf_dp_trace_data.no_of_record,
+ g_qdf_dp_trace_data.live_mode_config,
+ g_qdf_dp_trace_data.high_tput_thresh,
+ g_qdf_dp_trace_data.thresh_time_limit);
+
+ qdf_debugfs_printf(file,
+ "STATS |DPT: icmp(%u %u) arp(%u %u) icmpv6(%u %u %u %u %u %u) dhcp(%u %u %u %u %u %u) eapol(%u %u %u %u %u)\n",
+ g_qdf_dp_trace_data.icmp_req,
+ g_qdf_dp_trace_data.icmp_resp,
+ g_qdf_dp_trace_data.arp_req,
+ g_qdf_dp_trace_data.arp_resp,
+ g_qdf_dp_trace_data.icmpv6_req,
+ g_qdf_dp_trace_data.icmpv6_resp,
+ g_qdf_dp_trace_data.icmpv6_ns,
+ g_qdf_dp_trace_data.icmpv6_na,
+ g_qdf_dp_trace_data.icmpv6_rs,
+ g_qdf_dp_trace_data.icmpv6_ra,
+ g_qdf_dp_trace_data.dhcp_disc,
+ g_qdf_dp_trace_data.dhcp_off,
+ g_qdf_dp_trace_data.dhcp_req,
+ g_qdf_dp_trace_data.dhcp_ack,
+ g_qdf_dp_trace_data.dhcp_nack,
+ g_qdf_dp_trace_data.dhcp_others,
+ g_qdf_dp_trace_data.eapol_m1,
+ g_qdf_dp_trace_data.eapol_m2,
+ g_qdf_dp_trace_data.eapol_m3,
+ g_qdf_dp_trace_data.eapol_m4,
+ g_qdf_dp_trace_data.eapol_others);
+
+ qdf_debugfs_printf(file,
+ "DPT: Total Records: %d, Head: %d, Tail: %d\n",
+ g_qdf_dp_trace_data.num, g_qdf_dp_trace_data.head,
+ g_qdf_dp_trace_data.tail);
+
+ spin_lock_bh(&l_dp_trace_lock);
+ if (g_qdf_dp_trace_data.head != INVALID_QDF_DP_TRACE_ADDR) {
+ i = g_qdf_dp_trace_data.head;
+ tail = g_qdf_dp_trace_data.tail;
+
+ if (count > g_qdf_dp_trace_data.num)
+ count = g_qdf_dp_trace_data.num;
+
+ if (tail >= (count - 1))
+ i = tail - count + 1;
+ else if (count != MAX_QDF_DP_TRACE_RECORDS)
+ i = MAX_QDF_DP_TRACE_RECORDS - ((count - 1) -
+ tail);
+ g_qdf_dp_trace_data.curr_pos = 0;
+ g_qdf_dp_trace_data.saved_tail = tail;
+ }
+ spin_unlock_bh(&l_dp_trace_lock);
+ return i;
+}
+qdf_export_symbol(qdf_dpt_get_curr_pos_debugfs);
+
+QDF_STATUS qdf_dpt_dump_stats_debugfs(qdf_debugfs_file_t file,
+ uint32_t curr_pos)
+{
+ struct qdf_dp_trace_record_s p_record;
+ uint32_t i = curr_pos;
+ uint32_t tail = g_qdf_dp_trace_data.saved_tail;
+
+ if (!g_qdf_dp_trace_data.enable) {
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ "%s: Tracing Disabled", __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ /*
+ * Max dp trace record size should always be less than
+ * QDF_DP_TRACE_PREPEND_STR_SIZE(100) + BUFFER_SIZE(121).
+ */
+ if (WARN_ON(QDF_DP_TRACE_MAX_RECORD_SIZE <
+ QDF_DP_TRACE_PREPEND_STR_SIZE + BUFFER_SIZE))
+ return QDF_STATUS_E_FAILURE;
+
+ spin_lock_bh(&l_dp_trace_lock);
+ p_record = g_qdf_dp_trace_tbl[i];
+ spin_unlock_bh(&l_dp_trace_lock);
+ for (;; ) {
+ /*
+ * Initially we get file as 1 page size, and
+ * if remaining size in file is less than one record max size,
+ * then return so that it gets an extra page.
+ */
+ if ((file->size - file->count) < QDF_DP_TRACE_MAX_RECORD_SIZE) {
+ spin_lock_bh(&l_dp_trace_lock);
+ g_qdf_dp_trace_data.curr_pos = i;
+ spin_unlock_bh(&l_dp_trace_lock);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ switch (p_record.code) {
+ case QDF_DP_TRACE_TXRX_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD:
+ case QDF_DP_TRACE_FREE_PACKET_PTR_RECORD:
+ qdf_dpt_display_ptr_record_debugfs(file, &p_record, i);
+ break;
+
+ case QDF_DP_TRACE_EAPOL_PACKET_RECORD:
+ case QDF_DP_TRACE_DHCP_PACKET_RECORD:
+ case QDF_DP_TRACE_ARP_PACKET_RECORD:
+ case QDF_DP_TRACE_ICMP_PACKET_RECORD:
+ case QDF_DP_TRACE_ICMPv6_PACKET_RECORD:
+ qdf_dpt_display_proto_pkt_debugfs(file, &p_record, i);
+ break;
+
+ case QDF_DP_TRACE_MGMT_PACKET_RECORD:
+ qdf_dpt_display_mgmt_pkt_debugfs(file, &p_record, i);
+ break;
+
+ case QDF_DP_TRACE_EVENT_RECORD:
+ qdf_dpt_display_event_record_debugfs(file, &p_record,
+ i);
+ break;
+
+ case QDF_DP_TRACE_HDD_TX_TIMEOUT:
+ qdf_debugfs_printf(file, "DPT: %04d: %s %s\n",
+ i, p_record.time,
+ qdf_dp_code_to_string(p_record.code));
+ qdf_debugfs_printf(file, "%s: HDD TX Timeout\n");
+ break;
+
+ case QDF_DP_TRACE_HDD_SOFTAP_TX_TIMEOUT:
+ qdf_debugfs_printf(file, "%04d: %llu %s\n",
+ i, p_record.time,
+ qdf_dp_code_to_string(p_record.code));
+ qdf_debugfs_printf(file,
+ "%s: HDD SoftAP TX Timeout\n");
+ break;
+
+ case QDF_DP_TRACE_CE_FAST_PACKET_ERR_RECORD:
+ qdf_debugfs_printf(file, "DPT: %llu: %s %s\n",
+ i, p_record.time,
+ qdf_dp_code_to_string(p_record.code));
+ qdf_debugfs_printf(file,
+ "%s: CE Fast Packet Error\n");
+ break;
+
+ case QDF_DP_TRACE_MAX:
+ qdf_debugfs_printf(file,
+ "%s: QDF_DP_TRACE_MAX event should not be generated\n",
+ __func__);
+ break;
+
+ case QDF_DP_TRACE_TX_PACKET_RECORD:
+ case QDF_DP_TRACE_RX_PACKET_RECORD:
+ default:
+ qdf_dpt_display_record_debugfs(file, &p_record, i);
+ break;
+ }
+
+ if (i == tail)
+ break;
+ i += 1;
+
+ spin_lock_bh(&l_dp_trace_lock);
+ if (i == MAX_QDF_DP_TRACE_RECORDS)
+ i = 0;
+
+ p_record = g_qdf_dp_trace_tbl[i];
+ spin_unlock_bh(&l_dp_trace_lock);
+ }
+ return QDF_STATUS_SUCCESS;
+}
+qdf_export_symbol(qdf_dpt_dump_stats_debugfs);
+
+/**
+ * qdf_dpt_set_value_debugfs() - Configure the value to control DP trace
+ * @proto_bitmap: defines the protocol to be tracked
+ * @no_of_records: defines the nth packet which is traced
+ * @verbosity: defines the verbosity level
+ *
+ * Return: None
+ */
+void qdf_dpt_set_value_debugfs(uint8_t proto_bitmap, uint8_t no_of_record,
+ uint8_t verbosity)
+{
+ if (g_qdf_dp_trace_data.enable) {
+ g_qdf_dp_trace_data.proto_bitmap = proto_bitmap;
+ g_qdf_dp_trace_data.no_of_record = no_of_record;
+ g_qdf_dp_trace_data.verbosity = verbosity;
+ }
+}
+qdf_export_symbol(qdf_dpt_set_value_debugfs);
+
/**
* qdf_dp_trace_dump_all() - Dump data from ring buffer via call back functions
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h
new file mode 100644
index 000000000000..09eb1b1e2dd6
--- /dev/null
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_apf_tlv.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#ifndef _WMI_UNIFIED_APF_TLV_H_
+#define _WMI_UNIFIED_APF_TLV_H_
+
+#include "wmi_unified.h"
+#include "wmi_unified_api.h"
+
+/**
+ * send_set_active_apf_mode_cmd_tlv() - configure active APF mode in FW
+ * @wmi_handle: the WMI handle
+ * @vdev_id: the Id of the vdev to apply the configuration to
+ * @ucast_mode: the active APF mode to configure for unicast packets
+ * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast
+ * packets
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle,
+ uint8_t vdev_id,
+ FW_ACTIVE_BPF_MODE ucast_mode,
+ FW_ACTIVE_BPF_MODE mcast_bcast_mode);
+
+/**
+ * send_apf_enable_cmd_tlv() - send cmd to enable/disable APF interpreter
+ * @wmi_handle: the WMI handle
+ * @vdev_id: VDEV on which APF interpreter is to be enabled/disabled
+ * @enable: true: enable, false: disable
+ *
+ * Return: QDF status
+ */
+QDF_STATUS send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle,
+ uint32_t vdev_id, bool enable);
+
+/**
+ * send_apf_write_work_memory_cmd_tlv() - send cmd to write into the APF work
+ * memory
+ * @wmi_handle: the WMI handle
+ * @apf_write_params: parameters and buffer pointer for the write
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_apf_write_memory_params
+ *apf_write_params);
+
+/**
+ * send_apf_read_work_memory_cmd_tlv() - send cmd to read part of APF
+ * work memory
+ * @wmi_handle: the WMI handle
+ * @apf_read_params: contains relative address and length to read from
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_apf_read_memory_params
+ *apf_read_params);
+
+/**
+ * extract_apf_read_memory_resp_event_tlv() - extract read memory response
+ * event into the given structure pointer
+ * @wmi_handle: the WMI handle
+ * @evt_buf: Pointer to the event buffer
+ * @resp: pointer to memory to extract event parameters into
+ *
+ * Return: QDF status
+ */
+QDF_STATUS
+extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, void *evt_buf,
+ struct wmi_apf_read_memory_resp_event_params
+ *resp);
+#endif /* _WMI_UNIFIED_APF_TLV_H_ */
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
index ab275c11217e..a6252fe2cf65 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_api.h
@@ -892,8 +892,7 @@ QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd(void *wmi_hdl,
/**
* wmi_unified_conf_hw_filter_mode_cmd() - Configure hardware filter
* @wmi_hdl: wmi handle
- * @vdev_id: device identifier
- * @config_bitmap: bitmap of packet types to drop
+ * @req: HW filter request parameters
*
* The hardware filter is only effective in DTIM mode. Use this configuration
* to blanket drop broadcast/multicast packets at the hardware level, without
@@ -901,9 +900,9 @@ QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd(void *wmi_hdl,
*
* Return: QDF_STATUS
*/
-QDF_STATUS wmi_unified_conf_hw_filter_mode_cmd(void *wmi_hdl,
- uint8_t vdev_id,
- uint8_t config_bitmap);
+QDF_STATUS
+wmi_unified_conf_hw_filter_mode_cmd(void *wmi_hdl,
+ struct wmi_hw_filter_req_params *req);
QDF_STATUS wmi_unified_set_led_flashing_cmd(void *wmi_hdl,
struct flashing_req_params *flashing);
@@ -963,19 +962,73 @@ QDF_STATUS wmi_unified_get_buf_extscan_hotlist_cmd(void *wmi_hdl,
photlist, int *buf_len);
/**
- * wmi_unified_set_active_bpf_mode_cmd() - config active BPF mode in FW
- * @wmi_hdl: the WMI handle
+ * wmi_unified_set_active_apf_mode_cmd() - config active APF mode in FW
+ * @wmi: the WMI handle
* @vdev_id: the Id of the vdev to apply the configuration to
- * @ucast_mode: the active BPF mode to configure for unicast packets
- * @mcast_bcast_mode: the active BPF mode to configure for multicast/broadcast
+ * @ucast_mode: the active APF mode to configure for unicast packets
+ * @mcast_bcast_mode: the active APF mode to configure for multicast/broadcast
* packets
*/
QDF_STATUS
-wmi_unified_set_active_bpf_mode_cmd(void *wmi_hdl,
+wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi,
uint8_t vdev_id,
FW_ACTIVE_BPF_MODE ucast_mode,
FW_ACTIVE_BPF_MODE mcast_bcast_mode);
+/**
+ * wmi_unified_send_apf_enable_cmd() - send apf enable/disable cmd
+ * @wmi: wmi handle
+ * @vdev_id: VDEV id
+ * @enable: true: enable, false: disable
+ *
+ * This function passes the apf enable command to fw
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi,
+ uint32_t vdev_id, bool enable);
+
+/**
+ * wmi_unified_send_apf_write_work_memory_cmd() - send cmd to write into the APF
+ * work memory.
+ * @wmi: wmi handle
+ * @write_params: parameters and buffer pointer for the write
+ *
+ * This function passes the write apf work mem command to fw
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi,
+ struct wmi_apf_write_memory_params *write_params);
+
+/**
+ * wmi_unified_send_apf_read_work_memory_cmd() - send cmd to read part of APF
+ * work memory
+ * @wmi: wmi handle
+ * @read_params: contains relative address and length to read from
+ *
+ * This function passes the read apf work mem command to fw
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi,
+ struct wmi_apf_read_memory_params *read_params);
+
+/**
+ * wmi_extract_apf_read_memory_resp_event() - exctract read mem resp event
+ * @wmi: wmi handle
+ * @evt_buf: Pointer to the event buffer
+ * @resp: pointer to memory to extract event parameters into
+ *
+ * This function exctracts read mem response event into the given structure ptr
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS
+wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf,
+ struct wmi_apf_read_memory_resp_event_params
+ *read_mem_evt);
+
QDF_STATUS wmi_unified_stats_request_send(void *wmi_hdl,
uint8_t macaddr[IEEE80211_ADDR_LEN],
struct stats_request_params *param);
@@ -1506,4 +1559,42 @@ QDF_STATUS wmi_unified_send_dbs_scan_sel_params_cmd(void *wmi_hdl,
QDF_STATUS wmi_unified_send_limit_off_chan_cmd(void *wmi_hdl,
struct wmi_limit_off_chan_param *wmi_param);
+
+/**
+ * wmi_unified_send_roam_scan_stats_cmd() - Wrapper to request roam scan stats
+ * @wmi_hdl: wmi handle
+ * @params: request params
+ *
+ * This function is used to send the roam scan stats request command to
+ * firmware.
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS
+wmi_unified_send_roam_scan_stats_cmd(void *wmi_hdl,
+ struct wmi_roam_scan_stats_req *params);
+
+/**
+ * wmi_unified_offload_11k_cmd() - send 11k offload command
+ * @wmi_hdl: wmi handle
+ * @params: 11k offload params
+ *
+ * This function passes the 11k offload command params to FW
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_offload_11k_cmd(void *wmi_hdl,
+ struct wmi_11k_offload_params *params);
+
+/**
+ * wmi_unified_invoke_neighbor_report_cmd() - send invoke neighbor report cmd
+ * @wmi_hdl: wmi handle
+ * @params: invoke neighbor report params
+ *
+ * This function passes the invoke neighbor report command to fw
+ *
+ * Return: QDF_STATUS_SUCCESS on success and QDF_STATUS_E_FAILURE for failure
+ */
+QDF_STATUS wmi_unified_invoke_neighbor_report_cmd(void *wmi_hdl,
+ struct wmi_invoke_neighbor_report_params *params);
#endif /* _WMI_UNIFIED_API_H_ */
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
index cda05745ae0d..cf6c7c0cfe9a 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_param.h
@@ -5299,6 +5299,7 @@ typedef enum {
wmi_update_rcpi_event_id,
wmi_get_arp_stats_req_id,
wmi_sar_get_limits_event_id,
+ wmi_roam_scan_stats_event_id,
wmi_events_max,
} wmi_conv_event_id;
@@ -7549,5 +7550,196 @@ struct wmi_mawc_roam_params {
uint8_t rssi_stationary_low_adjust;
};
+/**
+ * @time_offset: time offset after 11k offload command to trigger a neighbor
+ * report request (in seconds)
+ * @low_rssi_offset: Offset from rssi threshold to trigger a neighbor
+ * report request (in dBm)
+ * @bmiss_count_trigger: Number of beacon miss events to trigger neighbor
+ * report request
+ * @per_threshold_offset: offset from PER threshold to trigger neighbor
+ * report request (in %)
+ * @neighbor_report_cache_timeout: timeout after which new trigger can enable
+ * sending of a neighbor report request (in seconds)
+ * @max_neighbor_report_req_cap: max number of neighbor report requests that
+ * can be sent to the peer in the current session
+ * @ssid: Current connect SSID info
+ */
+struct wmi_11k_offload_neighbor_report_params {
+ uint32_t time_offset;
+ uint32_t low_rssi_offset;
+ uint32_t bmiss_count_trigger;
+ uint32_t per_threshold_offset;
+ uint32_t neighbor_report_cache_timeout;
+ uint32_t max_neighbor_report_req_cap;
+ struct mac_ssid ssid;
+};
+
+/**
+ * struct wmi_11k_offload_params - offload 11k features to FW
+ * @vdev_id: vdev id
+ * @offload_11k_bitmask: bitmask to specify offloaded features
+ * B0: Neighbor Report Request offload
+ * B1-B31: Reserved
+ * @neighbor_report_params: neighbor report offload params
+ */
+struct wmi_11k_offload_params {
+ uint32_t vdev_id;
+ uint32_t offload_11k_bitmask;
+ struct wmi_11k_offload_neighbor_report_params neighbor_report_params;
+};
+
+/**
+ * struct wmi_invoke_neighbor_report_params - Invoke neighbor report request
+ * from IW to FW
+ * @vdev_id: vdev id
+ * @send_resp_to_host: bool to send response to host or not
+ * @ssid: ssid given from the IW command
+ */
+struct wmi_invoke_neighbor_report_params {
+ uint32_t vdev_id;
+ uint32_t send_resp_to_host;
+ struct mac_ssid ssid;
+};
+
+/**
+ * struct wmi_apf_write_memory_params - Android Packet Filter write memory
+ * params
+ * @vdev_id: VDEV on which APF memory is to be written
+ * @apf_version: APF version number
+ * @program_len: Length reserved for program in the APF work memory
+ * @addr_offset: Relative address in APF work memory to start writing
+ * @length: Size of the write
+ * @buf: Pointer to the buffer
+ */
+struct wmi_apf_write_memory_params {
+ uint8_t vdev_id;
+ uint32_t apf_version;
+ uint32_t program_len;
+ uint32_t addr_offset;
+ uint32_t length;
+ uint8_t *buf;
+};
+
+/**
+ * struct wmi_apf_read_memory_params - Android Packet Filter read memory params
+ * @vdev_id: vdev id
+ * @addr_offset: Relative address in APF work memory to read from
+ * @length: Size of the memory fetch
+ */
+struct wmi_apf_read_memory_params {
+ uint8_t vdev_id;
+ uint32_t addr_offset;
+ uint32_t length;
+};
+
+/**
+ * struct wmi_apf_read_memory_resp_event_params - Event containing read Android
+ * Packet Filter memory response
+ * @vdev_id: vdev id
+ * @offset: Read memory offset
+ * @length: Read memory length
+ * @more_data: Indicates more data to come
+ * @data: Pointer to the data
+ */
+struct wmi_apf_read_memory_resp_event_params {
+ uint32_t vdev_id;
+ uint32_t offset;
+ uint32_t length;
+ bool more_data;
+ uint8_t *data;
+};
+
+/* Begin of roam scan stats definitions */
+
+#define WMI_ROAM_SCAN_STATS_MAX 5
+#define WMI_ROAM_SCAN_STATS_CANDIDATES_MAX 4
+#define WMI_ROAM_SCAN_STATS_CHANNELS_MAX 50
+
+/**
+ * struct wmi_roam_scan_stats_req - Structure to hold roam scan stats request
+ * @vdev_id: interface id
+ */
+struct wmi_roam_scan_stats_req {
+ uint32_t vdev_id;
+};
+
+/**
+ * struct wmi_roam_scan_stats_candidate - Roam scan candidates
+ * @score: score of AP
+ * @rssi: rssi of the AP
+ * @freq: center frequency
+ * @bssid: bssid of AP
+ */
+struct wmi_roam_scan_stats_candidate {
+ uint32_t score;
+ uint32_t rssi;
+ uint32_t freq;
+ uint8_t bssid[QDF_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct wmi_roam_scan_stats_params - Roam scan details
+ * @time_stamp: time at which this roam scan happened
+ * @client_id: id of client which triggered this scan
+ * @num_scan_chans: number of channels that were scanned as part of this scan
+ * @scan_freqs: frequencies of the channels that were scanned
+ * @is_roam_successful: whether a successful roaming happened after this scan
+ * @old_bssid: bssid to which STA is connected just before this scan
+ * @new_bssid: bssid to which STA is roamed to in case of successful roaming
+ * @num_roam_candidates: no.of roam candidates that are being reported
+ * @roam_candidate: roam scan candidate details
+ * @trigger_id: reason for triggering this roam or roam scan
+ * @trigger_value: threshold value related to trigger_id
+ */
+struct wmi_roam_scan_stats_params {
+ uint64_t time_stamp;
+ uint32_t client_id;
+ uint32_t num_scan_chans;
+ uint32_t scan_freqs[WMI_ROAM_SCAN_STATS_CHANNELS_MAX];
+ uint32_t is_roam_successful;
+
+ /* Bssid to which STA is connected when the roam scan is triggered */
+ uint8_t old_bssid[QDF_MAC_ADDR_SIZE];
+
+ /*
+ * Bssid to which STA is connected after roaming. Will be valid only
+ * if is_roam_successful is true.
+ */
+ uint8_t new_bssid[QDF_MAC_ADDR_SIZE];
+
+ /* Number of roam candidates that are being reported in the stats */
+ uint32_t num_roam_candidates;
+ struct wmi_roam_scan_stats_candidate roam_candidate[WMI_ROAM_SCAN_STATS_CANDIDATES_MAX];
+ uint32_t trigger_id;
+ uint32_t trigger_value;
+};
+
+/**
+ * struct wmi_roam_scan_stats_res - Roam scan stats response from firmware
+ * @num_roam_scan: number of roam scans triggered
+ * @roam_scan: place holder to indicate the array of
+ * wmi_roam_scan_stats_params followed by this structure
+ */
+struct wmi_roam_scan_stats_res {
+ uint32_t num_roam_scans;
+ struct wmi_roam_scan_stats_params roam_scan[0];
+};
+
+/* End of roam scan stats definitions */
+
+/**
+ * struct wmi_hw_filter_req_params - HW Filter mode parameters
+ * @vdev: VDEV id
+ * @enable: True: Enable HW filter, False: Disable
+ * @mode_bitmap: the hardware filter mode to configure
+ * @bssid: bss_id for get session.
+ */
+struct wmi_hw_filter_req_params {
+ uint8_t vdev_id;
+ bool enable;
+ uint8_t mode_bitmap;
+ struct qdf_mac_addr bssid;
+};
#endif /* _WMI_UNIFIED_PARAM_H_ */
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
index 27f9f57949c3..db87ea343644 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_priv.h
@@ -185,8 +185,9 @@ QDF_STATUS (*send_vdev_delete_cmd)(wmi_unified_t wmi_handle,
QDF_STATUS (*send_vdev_stop_cmd)(wmi_unified_t wmi,
uint8_t vdev_id);
-QDF_STATUS (*send_conf_hw_filter_mode_cmd)(wmi_unified_t wmi, uint8_t vdev_id,
- uint8_t mode_bitmap);
+QDF_STATUS
+(*send_conf_hw_filter_mode_cmd)(wmi_unified_t wmi,
+ struct wmi_hw_filter_req_params *req);
QDF_STATUS (*send_vdev_down_cmd)(wmi_unified_t wmi,
uint8_t vdev_id);
@@ -759,11 +760,24 @@ QDF_STATUS (*send_get_buf_extscan_hotlist_cmd)(wmi_unified_t wmi_handle,
struct ext_scan_setbssi_hotlist_params *
photlist, int *buf_len);
-QDF_STATUS (*send_set_active_bpf_mode_cmd)(wmi_unified_t wmi_handle,
+QDF_STATUS (*send_set_active_apf_mode_cmd)(wmi_unified_t wmi_handle,
uint8_t vdev_id,
FW_ACTIVE_BPF_MODE ucast_mode,
FW_ACTIVE_BPF_MODE mcast_bcast_mode);
+QDF_STATUS (*send_apf_enable_cmd)(wmi_unified_t wmi_handle, uint32_t vdev_id,
+ bool enable);
+
+QDF_STATUS (*send_apf_write_work_memory_cmd)(wmi_unified_t wmi_handle,
+ struct wmi_apf_write_memory_params *apf_write_params);
+
+QDF_STATUS (*send_apf_read_work_memory_cmd)(wmi_unified_t wmi_handle,
+ struct wmi_apf_read_memory_params *apf_read_params);
+
+QDF_STATUS (*extract_apf_read_memory_resp_event)(wmi_unified_t wmi_handle,
+ void *evt_buf,
+ struct wmi_apf_read_memory_resp_event_params *resp);
+
QDF_STATUS (*send_pdev_get_tpc_config_cmd)(wmi_unified_t wmi_handle,
uint32_t param);
@@ -1218,6 +1232,15 @@ QDF_STATUS (*send_limit_off_chan_cmd)(wmi_unified_t wmi_handle,
QDF_STATUS (*send_wow_timer_pattern_cmd)(wmi_unified_t wmi_handle,
uint8_t vdev_id, uint32_t cookie, uint32_t time);
+
+QDF_STATUS (*send_roam_scan_stats_cmd)(wmi_unified_t wmi_handle,
+ struct wmi_roam_scan_stats_req *params);
+
+QDF_STATUS (*send_offload_11k_cmd)(wmi_unified_t wmi_handle,
+ struct wmi_11k_offload_params *params);
+
+QDF_STATUS (*send_invoke_neighbor_report_cmd)(wmi_unified_t wmi_handle,
+ struct wmi_invoke_neighbor_report_params *params);
};
struct target_abi_version {
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
index 5ce02c00bf1c..4292d90924d2 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/inc/wmi_unified_tlv.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -577,13 +577,12 @@ QDF_STATUS send_enable_arp_ns_offload_cmd_tlv(wmi_unified_t wmi_handle,
/**
* send_conf_hw_filter_cmd_tlv() - configure hw filter mode to firmware
* @wmi: wmi handle
- * @vdev_id: Id of the vdev to configure
- * @mode_bitmap: the hw filter mode to configure
+ * @req: the hw filter mode request parameters
*
* Return: QDF_STATUS
*/
-QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id,
- uint8_t mode_bitmap);
+QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi,
+ struct wmi_hw_filter_req_params *req);
QDF_STATUS send_set_led_flashing_cmd_tlv(wmi_unified_t wmi_handle,
struct flashing_req_params *flashing);
@@ -641,22 +640,6 @@ QDF_STATUS send_get_buf_extscan_hotlist_cmd_tlv(wmi_unified_t wmi_handle,
struct ext_scan_setbssi_hotlist_params *
photlist, int *buf_len);
-/**
- * send_set_active_bpf_mode_cmd_tlv() - configure active BPF mode in FW
- * @wmi_handle: the WMI handle
- * @vdev_id: the Id of the vdev to apply the configuration to
- * @ucast_mode: the active BPF mode to configure for unicast packets
- * @mcast_bcast_mode: the active BPF mode to configure for multicast/broadcast
- * packets
- *
- * Return: QDF status
- */
-QDF_STATUS
-send_set_active_bpf_mode_cmd_tlv(wmi_unified_t wmi_handle,
- uint8_t vdev_id,
- FW_ACTIVE_BPF_MODE ucast_mode,
- FW_ACTIVE_BPF_MODE mcast_bcast_mode);
-
QDF_STATUS send_set_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle,
struct set_arp_stats *req_buf);
QDF_STATUS send_get_arp_stats_req_cmd_tlv(wmi_unified_t wmi_handle,
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c
index b6931f51a0fa..451a4b3d8ae8 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_tlv_helper.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -505,6 +505,7 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
wmitlv_cmd_param_info *cmd_param_tlvs_ptr = NULL;
A_UINT32 remaining_expected_tlvs = 0xFFFFFFFF;
A_UINT32 len_wmi_cmd_struct_buf;
+ A_UINT32 free_buf_len;
A_INT32 error = -1;
/* Get the number of TLVs for this command/event */
@@ -567,6 +568,13 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
WMITLV_GET_TLVLEN(WMITLV_GET_HDR(buf_ptr));
int num_padding_bytes = 0;
+ free_buf_len = param_buf_len - (buf_idx + WMI_TLV_HDR_SIZE);
+ if (curr_tlv_len > free_buf_len) {
+ wmi_tlv_print_error("%s: TLV length overflow",
+ __func__);
+ goto Error_wmitlv_check_and_pad_tlvs;
+ }
+
/* Get the attributes of the TLV with the given order in "tlv_index" */
wmi_tlv_OS_MEMZERO(&attr_struct_ptr,
sizeof(wmitlv_attributes_struc));
@@ -630,6 +638,13 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
WMITLV_GET_TLVLEN(WMITLV_GET_HDR
(buf_ptr));
in_tlv_len += WMI_TLV_HDR_SIZE;
+ if (in_tlv_len > curr_tlv_len) {
+ wmi_tlv_print_error("%s: Invalid in_tlv_len=%d",
+ __func__,
+ in_tlv_len);
+ goto
+ Error_wmitlv_check_and_pad_tlvs;
+ }
tlv_size_diff =
in_tlv_len -
attr_struct_ptr.tag_struct_size;
@@ -749,8 +764,17 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
/* Move subsequent TLVs by number of bytes to be padded
* for all elements */
- if (param_buf_len >
- (buf_idx + curr_tlv_len)) {
+ if ((free_buf_len <
+ attr_struct_ptr.tag_struct_size *
+ num_of_elems) ||
+ (param_buf_len <
+ buf_idx + curr_tlv_len +
+ num_padding_bytes * num_of_elems)) {
+ wmi_tlv_print_error("%s: Insufficent buffer\n",
+ __func__);
+ goto
+ Error_wmitlv_check_and_pad_tlvs;
+ } else {
src_addr =
buf_ptr + curr_tlv_len;
dst_addr =
@@ -770,12 +794,10 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
* bytes to be padded for one element and alse set
* padding bytes to zero */
tlv_buf_ptr = buf_ptr;
- for (i = 0; i < num_of_elems; i++) {
+ for (i = 0; i < num_of_elems - 1; i++) {
src_addr =
tlv_buf_ptr + in_tlv_len;
if (i != (num_of_elems - 1)) {
- /* Need not move anything for last element
- * in the array */
dst_addr =
tlv_buf_ptr +
in_tlv_len +
@@ -798,6 +820,9 @@ wmitlv_check_and_pad_tlvs(void *os_handle, void *param_struc_ptr,
attr_struct_ptr.
tag_struct_size;
}
+ src_addr = tlv_buf_ptr + in_tlv_len;
+ wmi_tlv_OS_MEMZERO(src_addr,
+ num_padding_bytes);
/* Update the number of padding bytes to total number
* of bytes padded for all elements in the array */
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
index e58c33e2b401..fa3644d4411a 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1710,7 +1710,7 @@ end:
}
-#define WMI_WQ_WD_TIMEOUT (10 * 1000) /* 10s */
+#define WMI_WQ_WD_TIMEOUT (30 * 1000) /* 30s */
static inline void wmi_workqueue_watchdog_warn(uint32_t msg_type_id)
{
@@ -2121,6 +2121,22 @@ void wmi_set_tgt_assert(wmi_unified_t wmi_handle, bool val)
{
wmi_handle->tgt_force_assert_enable = val;
}
+
+/**
+ * wmi_stop() - generic function to block unified WMI command
+ * @wmi_handle: handle to WMI.
+ *
+ * @Return: success always.
+ */
+int
+wmi_stop(wmi_unified_t wmi_handle)
+{
+ QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
+ "WMI Stop\n");
+ wmi_handle->wmi_stopinprogress = 1;
+ return 0;
+}
+
#ifdef WMI_NON_TLV_SUPPORT
/**
* API to flush all the previous packets associated with the wmi endpoint
@@ -2134,17 +2150,4 @@ wmi_flush_endpoint(wmi_unified_t wmi_handle)
wmi_handle->wmi_endpoint_id, 0);
}
-/**
- * generic function to block unified WMI command
- * @param wmi_handle : handle to WMI.
- * @return 0 on success and -ve on failure.
- */
-int
-wmi_stop(wmi_unified_t wmi_handle)
-{
- QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
- "WMI Stop\n");
- wmi_handle->wmi_stopinprogress = 1;
- return 0;
-}
#endif
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c
new file mode 100644
index 000000000000..8cb3aba07dc9
--- /dev/null
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_apf_tlv.c
@@ -0,0 +1,229 @@
+/*
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+#include "wmi_unified_apf_tlv.h"
+
+QDF_STATUS send_set_active_apf_mode_cmd_tlv(wmi_unified_t wmi_handle,
+ uint8_t vdev_id,
+ FW_ACTIVE_BPF_MODE ucast_mode,
+ FW_ACTIVE_BPF_MODE mcast_bcast_mode)
+{
+ const WMITLV_TAG_ID tag_id =
+ WMITLV_TAG_STRUC_wmi_bpf_set_vdev_active_mode_cmd_fixed_param;
+ const uint32_t tlv_len = WMITLV_GET_STRUCT_TLVLEN(
+ wmi_bpf_set_vdev_active_mode_cmd_fixed_param);
+ QDF_STATUS status;
+ wmi_bpf_set_vdev_active_mode_cmd_fixed_param *cmd;
+ wmi_buf_t buf;
+
+ WMI_LOGD("Sending WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID(%u, %d, %d)",
+ vdev_id, ucast_mode, mcast_bcast_mode);
+
+ /* allocate command buffer */
+ buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd));
+ if (!buf) {
+ WMI_LOGE("%s: wmi_buf_alloc failed", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ /* set TLV header */
+ cmd = (wmi_bpf_set_vdev_active_mode_cmd_fixed_param *)wmi_buf_data(buf);
+ WMITLV_SET_HDR(&cmd->tlv_header, tag_id, tlv_len);
+
+ /* populate data */
+ cmd->vdev_id = vdev_id;
+ cmd->uc_mode = ucast_mode;
+ cmd->mcbc_mode = mcast_bcast_mode;
+
+ /* send to FW */
+ status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd),
+ WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ WMI_LOGE("Failed to send WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID:%d",
+ status);
+ wmi_buf_free(buf);
+ return status;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS send_apf_enable_cmd_tlv(wmi_unified_t wmi_handle,
+ uint32_t vdev_id, bool enable)
+{
+ wmi_bpf_set_vdev_enable_cmd_fixed_param *cmd;
+ wmi_buf_t buf;
+
+ buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd));
+ if (!buf) {
+ WMI_LOGP("%s: wmi_buf_alloc failed", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ cmd = (wmi_bpf_set_vdev_enable_cmd_fixed_param *) wmi_buf_data(buf);
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_bpf_set_vdev_enable_cmd_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_bpf_set_vdev_enable_cmd_fixed_param));
+ cmd->vdev_id = vdev_id;
+ cmd->is_enabled = enable;
+
+ if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd),
+ WMI_BPF_SET_VDEV_ENABLE_CMDID)) {
+ WMI_LOGE("%s: Failed to enable/disable APF interpreter",
+ __func__);
+ wmi_buf_free(buf);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS
+send_apf_write_work_memory_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_apf_write_memory_params
+ *apf_write_params)
+{
+ wmi_bpf_set_vdev_work_memory_cmd_fixed_param *cmd;
+ uint32_t wmi_buf_len;
+ wmi_buf_t buf;
+ uint8_t *buf_ptr;
+ uint32_t aligned_len = 0;
+
+ wmi_buf_len = sizeof(*cmd);
+ if (apf_write_params->length) {
+ aligned_len = roundup(apf_write_params->length,
+ sizeof(A_UINT32));
+
+ wmi_buf_len += WMI_TLV_HDR_SIZE + aligned_len;
+
+ }
+
+ buf = wmi_buf_alloc(wmi_handle, wmi_buf_len);
+ if (!buf) {
+ WMI_LOGP("%s: wmi_buf_alloc failed", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ buf_ptr = wmi_buf_data(buf);
+ cmd = (wmi_bpf_set_vdev_work_memory_cmd_fixed_param *)buf_ptr;
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_bpf_set_vdev_work_memory_cmd_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_bpf_set_vdev_work_memory_cmd_fixed_param));
+ cmd->vdev_id = apf_write_params->vdev_id;
+ cmd->bpf_version = apf_write_params->apf_version;
+ cmd->program_len = apf_write_params->program_len;
+ cmd->addr_offset = apf_write_params->addr_offset;
+ cmd->length = apf_write_params->length;
+
+ if (apf_write_params->length) {
+ buf_ptr += sizeof(*cmd);
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE,
+ aligned_len);
+ buf_ptr += WMI_TLV_HDR_SIZE;
+ qdf_mem_copy(buf_ptr, apf_write_params->buf,
+ apf_write_params->length);
+ }
+
+ if (wmi_unified_cmd_send(wmi_handle, buf, wmi_buf_len,
+ WMI_BPF_SET_VDEV_WORK_MEMORY_CMDID)) {
+ WMI_LOGE("%s: Failed to write APF work memory", __func__);
+ wmi_buf_free(buf);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS
+send_apf_read_work_memory_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_apf_read_memory_params
+ *apf_read_params)
+{
+ wmi_bpf_get_vdev_work_memory_cmd_fixed_param *cmd;
+ wmi_buf_t buf;
+
+ buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd));
+ if (!buf) {
+ WMI_LOGP("%s: wmi_buf_alloc failed", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ cmd = (wmi_bpf_get_vdev_work_memory_cmd_fixed_param *)
+ wmi_buf_data(buf);
+
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_bpf_get_vdev_work_memory_cmd_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_bpf_get_vdev_work_memory_cmd_fixed_param));
+ cmd->vdev_id = apf_read_params->vdev_id;
+ cmd->addr_offset = apf_read_params->addr_offset;
+ cmd->length = apf_read_params->length;
+
+ if (wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd),
+ WMI_BPF_GET_VDEV_WORK_MEMORY_CMDID)) {
+ WMI_LOGE("%s: Failed to get APF work memory", __func__);
+ wmi_buf_free(buf);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+QDF_STATUS
+extract_apf_read_memory_resp_event_tlv(wmi_unified_t wmi_handle, void *evt_buf,
+ struct wmi_apf_read_memory_resp_event_params *resp)
+{
+ WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID_param_tlvs *param_buf;
+ wmi_bpf_get_vdev_work_memory_resp_evt_fixed_param *data_event;
+
+ param_buf = evt_buf;
+ if (!param_buf) {
+ WMI_LOGE("encrypt decrypt resp evt_buf is NULL");
+ return QDF_STATUS_E_INVAL;
+ }
+
+ data_event = param_buf->fixed_param;
+
+ resp->vdev_id = data_event->vdev_id;
+ resp->offset = data_event->offset;
+ resp->more_data = data_event->fragment;
+
+ if (data_event->length > param_buf->num_data) {
+ WMI_LOGE("FW msg data_len %d more than TLV hdr %d",
+ data_event->length,
+ param_buf->num_data);
+ return QDF_STATUS_E_INVAL;
+ }
+ resp->length = data_event->length;
+
+ if (resp->length)
+ resp->data = (uint8_t *)param_buf->data;
+
+ return QDF_STATUS_SUCCESS;
+}
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
index 976d17124587..57bb50799109 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_api.c
@@ -3283,17 +3283,16 @@ QDF_STATUS wmi_unified_enable_arp_ns_offload_cmd(void *wmi_hdl,
return QDF_STATUS_E_FAILURE;
}
-QDF_STATUS wmi_unified_conf_hw_filter_mode_cmd(void *wmi_hdl,
- uint8_t vdev_id,
- uint8_t mode_bitmap)
+QDF_STATUS
+wmi_unified_conf_hw_filter_mode_cmd(void *wmi_hdl,
+ struct wmi_hw_filter_req_params *req)
{
wmi_unified_t wmi = wmi_hdl;
if (!wmi->ops->send_conf_hw_filter_mode_cmd)
return QDF_STATUS_E_FAILURE;
- return wmi->ops->send_conf_hw_filter_mode_cmd(wmi, vdev_id,
- mode_bitmap);
+ return wmi->ops->send_conf_hw_filter_mode_cmd(wmi, req);
}
/**
@@ -3651,21 +3650,64 @@ QDF_STATUS wmi_unified_get_buf_extscan_hotlist_cmd(void *wmi_hdl,
}
QDF_STATUS
-wmi_unified_set_active_bpf_mode_cmd(void *wmi_hdl,
+wmi_unified_set_active_apf_mode_cmd(wmi_unified_t wmi,
uint8_t vdev_id,
FW_ACTIVE_BPF_MODE ucast_mode,
FW_ACTIVE_BPF_MODE mcast_bcast_mode)
{
- wmi_unified_t wmi = (wmi_unified_t)wmi_hdl;
- if (!wmi->ops->send_set_active_bpf_mode_cmd) {
- WMI_LOGD("send_set_active_bpf_mode_cmd op is NULL");
- return QDF_STATUS_E_FAILURE;
- }
+ if (wmi->ops->send_set_active_apf_mode_cmd)
+ return wmi->ops->send_set_active_apf_mode_cmd(wmi, vdev_id,
+ ucast_mode,
+ mcast_bcast_mode);
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS
+wmi_unified_send_apf_enable_cmd(wmi_unified_t wmi,
+ uint32_t vdev_id, bool enable)
+{
+ if (wmi->ops->send_apf_enable_cmd)
+ return wmi->ops->send_apf_enable_cmd(wmi, vdev_id, enable);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS
+wmi_unified_send_apf_write_work_memory_cmd(wmi_unified_t wmi,
+ struct wmi_apf_write_memory_params
+ *write_params)
+{
+ if (wmi->ops->send_apf_write_work_memory_cmd)
+ return wmi->ops->send_apf_write_work_memory_cmd(wmi,
+ write_params);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS
+wmi_unified_send_apf_read_work_memory_cmd(wmi_unified_t wmi,
+ struct wmi_apf_read_memory_params
+ *read_params)
+{
+ if (wmi->ops->send_apf_read_work_memory_cmd)
+ return wmi->ops->send_apf_read_work_memory_cmd(wmi,
+ read_params);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS
+wmi_extract_apf_read_memory_resp_event(wmi_unified_t wmi, void *evt_buf,
+ struct wmi_apf_read_memory_resp_event_params
+ *read_mem_evt)
+{
+ if (wmi->ops->extract_apf_read_memory_resp_event)
+ return wmi->ops->extract_apf_read_memory_resp_event(wmi,
+ evt_buf,
+ read_mem_evt);
- return wmi->ops->send_set_active_bpf_mode_cmd(wmi, vdev_id,
- ucast_mode,
- mcast_bcast_mode);
+ return QDF_STATUS_E_FAILURE;
}
/**
@@ -6478,3 +6520,40 @@ QDF_STATUS wmi_unified_send_limit_off_chan_cmd(void *wmi_hdl,
return QDF_STATUS_E_FAILURE;
}
+
+QDF_STATUS
+wmi_unified_send_roam_scan_stats_cmd(void *wmi_hdl,
+ struct wmi_roam_scan_stats_req *params)
+{
+ wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl;
+
+ if (wmi_handle->ops->send_roam_scan_stats_cmd)
+ return wmi_handle->ops->send_roam_scan_stats_cmd(wmi_handle,
+ params);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS wmi_unified_offload_11k_cmd(void *wmi_hdl,
+ struct wmi_11k_offload_params *params)
+{
+ wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl;
+
+ if (wmi_handle->ops->send_offload_11k_cmd)
+ return wmi_handle->ops->send_offload_11k_cmd(
+ wmi_handle, params);
+
+ return QDF_STATUS_E_FAILURE;
+}
+
+QDF_STATUS wmi_unified_invoke_neighbor_report_cmd(void *wmi_hdl,
+ struct wmi_invoke_neighbor_report_params *params)
+{
+ wmi_unified_t wmi_handle = (wmi_unified_t) wmi_hdl;
+
+ if (wmi_handle->ops->send_invoke_neighbor_report_cmd)
+ return wmi_handle->ops->send_invoke_neighbor_report_cmd(
+ wmi_handle, params);
+
+ return QDF_STATUS_E_FAILURE;
+}
diff --git a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
index 0eb3190dac94..ebf3d47e76b7 100644
--- a/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
+++ b/drivers/staging/qca-wifi-host-cmn/wmi/src/wmi_unified_tlv.c
@@ -26,6 +26,7 @@
*/
#include "wmi_unified_tlv.h"
+#include "wmi_unified_apf_tlv.h"
#include "wmi_unified_api.h"
#include "wmi.h"
#include "wmi_version.h"
@@ -1984,7 +1985,7 @@ QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle,
QDF_DMA_TO_DEVICE);
if (status != QDF_STATUS_SUCCESS) {
WMI_LOGE("%s: wmi buf map failed", __func__);
- goto err1;
+ goto free_buf;
}
dma_addr = qdf_nbuf_get_frag_paddr(param->tx_frame, 0);
@@ -2005,7 +2006,7 @@ QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle,
if (status != QDF_STATUS_SUCCESS) {
WMI_LOGE("%s: Populate TX send params failed",
__func__);
- goto err1;
+ goto unmap_tx_frame;
}
cmd_len += sizeof(wmi_tx_send_params);
}
@@ -2013,11 +2014,14 @@ QDF_STATUS send_mgmt_cmd_tlv(wmi_unified_t wmi_handle,
if (wmi_unified_cmd_send(wmi_handle, buf, cmd_len,
WMI_MGMT_TX_SEND_CMDID)) {
WMI_LOGE("%s: Failed to send mgmt Tx", __func__);
- goto err1;
+ goto unmap_tx_frame;
}
return QDF_STATUS_SUCCESS;
-err1:
+unmap_tx_frame:
+ qdf_nbuf_unmap_single(qdf_ctx, param->tx_frame,
+ QDF_DMA_TO_DEVICE);
+free_buf:
wmi_buf_free(buf);
return QDF_STATUS_E_FAILURE;
}
@@ -11370,8 +11374,8 @@ QDF_STATUS send_enable_arp_ns_offload_cmd_tlv(wmi_unified_t wmi_handle,
return QDF_STATUS_SUCCESS;
}
-QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id,
- uint8_t mode_bitmap)
+QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi,
+ struct wmi_hw_filter_req_params *req)
{
QDF_STATUS status;
wmi_hw_data_filter_cmd_fixed_param *cmd;
@@ -11387,11 +11391,12 @@ QDF_STATUS send_conf_hw_filter_cmd_tlv(wmi_unified_t wmi, uint8_t vdev_id,
WMITLV_SET_HDR(&cmd->tlv_header,
WMITLV_TAG_STRUC_wmi_hw_data_filter_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(wmi_hw_data_filter_cmd_fixed_param));
- cmd->vdev_id = vdev_id;
- cmd->enable = mode_bitmap != 0;
- cmd->hw_filter_bitmap = mode_bitmap;
+ cmd->vdev_id = req->vdev_id;
+ cmd->enable = req->enable;
+ cmd->hw_filter_bitmap = req->mode_bitmap;
- WMI_LOGD("conf hw filter vdev_id: %d, mode: %u", vdev_id, mode_bitmap);
+ WMI_LOGD("conf hw filter vdev_id: %d, mode: %u", req->vdev_id,
+ req->mode_bitmap);
status = wmi_unified_cmd_send(wmi, wmi_buf, sizeof(*cmd),
WMI_HW_DATA_FILTER_CMDID);
if (QDF_IS_STATUS_ERROR(status)) {
@@ -12531,53 +12536,6 @@ QDF_STATUS send_get_buf_extscan_hotlist_cmd_tlv(wmi_unified_t wmi_handle,
return QDF_STATUS_SUCCESS;
}
-QDF_STATUS send_set_active_bpf_mode_cmd_tlv(wmi_unified_t wmi_handle,
- uint8_t vdev_id,
- FW_ACTIVE_BPF_MODE ucast_mode,
- FW_ACTIVE_BPF_MODE mcast_bcast_mode)
-{
- const WMITLV_TAG_ID tag_id =
- WMITLV_TAG_STRUC_wmi_bpf_set_vdev_active_mode_cmd_fixed_param;
- const uint32_t tlv_len = WMITLV_GET_STRUCT_TLVLEN(
- wmi_bpf_set_vdev_active_mode_cmd_fixed_param);
- QDF_STATUS status;
- wmi_bpf_set_vdev_active_mode_cmd_fixed_param *cmd;
- wmi_buf_t buf;
-
- WMI_LOGD("Sending WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID(%u, %d, %d)",
- vdev_id, ucast_mode, mcast_bcast_mode);
-
- /* allocate command buffer */
- buf = wmi_buf_alloc(wmi_handle, sizeof(*cmd));
- if (!buf) {
- WMI_LOGE("%s: wmi_buf_alloc failed", __func__);
- return QDF_STATUS_E_NOMEM;
- }
-
- /* set TLV header */
- cmd = (wmi_bpf_set_vdev_active_mode_cmd_fixed_param *)wmi_buf_data(buf);
- WMITLV_SET_HDR(&cmd->tlv_header, tag_id, tlv_len);
-
- /* populate data */
- cmd->vdev_id = vdev_id;
- cmd->uc_mode = ucast_mode;
- cmd->mcbc_mode = mcast_bcast_mode;
-
- /* send to FW */
- status = wmi_unified_cmd_send(wmi_handle, buf, sizeof(*cmd),
- WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID);
- if (QDF_IS_STATUS_ERROR(status)) {
- WMI_LOGE("Failed to send WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID:%d",
- status);
- wmi_buf_free(buf);
- return status;
- }
-
- WMI_LOGD("Sent WMI_BPF_SET_VDEV_ACTIVE_MODE_CMDID successfully");
-
- return QDF_STATUS_SUCCESS;
-}
-
/**
* send_power_dbg_cmd_tlv() - send power debug commands
* @wmi_handle: wmi handle
@@ -14464,6 +14422,183 @@ send_cmd:
return status;
}
+static QDF_STATUS
+send_roam_scan_stats_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_roam_scan_stats_req *params)
+{
+ wmi_buf_t buf;
+ wmi_request_roam_scan_stats_cmd_fixed_param *cmd;
+ uint8_t len = sizeof(*cmd);
+
+ buf = wmi_buf_alloc(wmi_handle, len);
+ if (!buf) {
+ WMI_LOGE(FL("Failed to allocate wmi buffer"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ cmd = (wmi_request_roam_scan_stats_cmd_fixed_param *)wmi_buf_data(buf);
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_request_roam_scan_stats_cmd_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(wmi_request_roam_scan_stats_cmd_fixed_param));
+
+ cmd->vdev_id = params->vdev_id;
+
+ WMI_LOGD(FL("Roam Scan Stats Req vdev_id: %u"), cmd->vdev_id);
+ if (wmi_unified_cmd_send(wmi_handle, buf, len,
+ WMI_REQUEST_ROAM_SCAN_STATS_CMDID)) {
+ WMI_LOGE("%s: Failed to send WMI_REQUEST_ROAM_SCAN_STATS_CMDID",
+ __func__);
+ wmi_buf_free(buf);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
+/**
+ * send_offload_11k_cmd_tlv() - send wmi cmd with 11k offload params
+ * @wmi_handle: wmi handler
+ * @params: pointer to 11k offload params
+ *
+ * Return: 0 for success and non zero for failure
+ */
+static QDF_STATUS send_offload_11k_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_11k_offload_params *params)
+{
+ wmi_11k_offload_report_fixed_param *cmd;
+ wmi_buf_t buf;
+ QDF_STATUS status;
+ uint8_t *buf_ptr;
+ wmi_neighbor_report_11k_offload_tlv_param
+ *neighbor_report_offload_params;
+ wmi_neighbor_report_offload *neighbor_report_offload;
+
+ uint32_t len = sizeof(*cmd);
+
+ if (params->offload_11k_bitmask &
+ WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ)
+ len += WMI_TLV_HDR_SIZE +
+ sizeof(wmi_neighbor_report_11k_offload_tlv_param);
+
+ buf = wmi_buf_alloc(wmi_handle, len);
+ if (!buf) {
+ WMI_LOGP("%s: failed to allocate memory for 11k offload params",
+ __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ buf_ptr = (uint8_t *) wmi_buf_data(buf);
+ cmd = (wmi_11k_offload_report_fixed_param *) buf_ptr;
+
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_offload_11k_report_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_11k_offload_report_fixed_param));
+
+ cmd->vdev_id = params->vdev_id;
+ cmd->offload_11k = params->offload_11k_bitmask;
+
+ if (params->offload_11k_bitmask &
+ WMI_11K_OFFLOAD_BITMAP_NEIGHBOR_REPORT_REQ) {
+ buf_ptr += sizeof(wmi_11k_offload_report_fixed_param);
+
+ WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_STRUC,
+ sizeof(wmi_neighbor_report_11k_offload_tlv_param));
+ buf_ptr += WMI_TLV_HDR_SIZE;
+
+ neighbor_report_offload_params =
+ (wmi_neighbor_report_11k_offload_tlv_param *)buf_ptr;
+ WMITLV_SET_HDR(&neighbor_report_offload_params->tlv_header,
+ WMITLV_TAG_STRUC_wmi_neighbor_report_offload_tlv_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_neighbor_report_11k_offload_tlv_param));
+
+ neighbor_report_offload = &neighbor_report_offload_params->
+ neighbor_rep_ofld_params;
+
+ neighbor_report_offload->time_offset =
+ params->neighbor_report_params.time_offset;
+ neighbor_report_offload->low_rssi_offset =
+ params->neighbor_report_params.low_rssi_offset;
+ neighbor_report_offload->bmiss_count_trigger =
+ params->neighbor_report_params.bmiss_count_trigger;
+ neighbor_report_offload->per_threshold_offset =
+ params->neighbor_report_params.per_threshold_offset;
+ neighbor_report_offload->neighbor_report_cache_timeout =
+ params->neighbor_report_params.
+ neighbor_report_cache_timeout;
+ neighbor_report_offload->max_neighbor_report_req_cap =
+ params->neighbor_report_params.
+ max_neighbor_report_req_cap;
+ neighbor_report_offload->ssid.ssid_len =
+ params->neighbor_report_params.ssid.length;
+ qdf_mem_copy(neighbor_report_offload->ssid.ssid,
+ &params->neighbor_report_params.ssid.mac_ssid,
+ neighbor_report_offload->ssid.ssid_len);
+ }
+
+ status = wmi_unified_cmd_send(wmi_handle, buf, len,
+ WMI_11K_OFFLOAD_REPORT_CMDID);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMI_LOGE("%s: failed to send 11k offload command %d",
+ __func__, status);
+ wmi_buf_free(buf);
+ }
+
+ return status;
+}
+
+/**
+ * send_invoke_neighbor_report_cmd_tlv() - send invoke 11k neighbor report
+ * command
+ * @wmi_handle: wmi handler
+ * @params: pointer to neighbor report invoke params
+ *
+ * Return: 0 for success and non zero for failure
+ */
+static QDF_STATUS send_invoke_neighbor_report_cmd_tlv(wmi_unified_t wmi_handle,
+ struct wmi_invoke_neighbor_report_params *params)
+{
+ wmi_11k_offload_invoke_neighbor_report_fixed_param *cmd;
+ wmi_buf_t buf;
+ QDF_STATUS status;
+ uint8_t *buf_ptr;
+ uint32_t len = sizeof(*cmd);
+
+ buf = wmi_buf_alloc(wmi_handle, len);
+ if (!buf) {
+ WMI_LOGP("%s:failed to allocate memory for neighbor invoke cmd",
+ __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ buf_ptr = (uint8_t *) wmi_buf_data(buf);
+ cmd = (wmi_11k_offload_invoke_neighbor_report_fixed_param *) buf_ptr;
+
+ WMITLV_SET_HDR(&cmd->tlv_header,
+ WMITLV_TAG_STRUC_wmi_invoke_neighbor_report_fixed_param,
+ WMITLV_GET_STRUCT_TLVLEN(
+ wmi_11k_offload_invoke_neighbor_report_fixed_param));
+
+ cmd->vdev_id = params->vdev_id;
+ cmd->flags = params->send_resp_to_host;
+
+ cmd->ssid.ssid_len = params->ssid.length;
+ qdf_mem_copy(cmd->ssid.ssid,
+ &params->ssid.mac_ssid,
+ cmd->ssid.ssid_len);
+
+ status = wmi_unified_cmd_send(wmi_handle, buf, len,
+ WMI_11K_INVOKE_NEIGHBOR_REPORT_CMDID);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMI_LOGE("%s: failed to send invoke neighbor report command %d",
+ __func__, status);
+ wmi_buf_free(buf);
+ }
+
+ return status;
+}
+
struct wmi_ops tlv_ops = {
.send_vdev_create_cmd = send_vdev_create_cmd_tlv,
.send_vdev_delete_cmd = send_vdev_delete_cmd_tlv,
@@ -14688,7 +14823,12 @@ struct wmi_ops tlv_ops = {
send_roam_scan_offload_rssi_change_cmd_tlv,
.send_get_buf_extscan_hotlist_cmd =
send_get_buf_extscan_hotlist_cmd_tlv,
- .send_set_active_bpf_mode_cmd = send_set_active_bpf_mode_cmd_tlv,
+ .send_set_active_apf_mode_cmd = send_set_active_apf_mode_cmd_tlv,
+ .send_apf_enable_cmd = send_apf_enable_cmd_tlv,
+ .send_apf_write_work_memory_cmd = send_apf_write_work_memory_cmd_tlv,
+ .send_apf_read_work_memory_cmd = send_apf_read_work_memory_cmd_tlv,
+ .extract_apf_read_memory_resp_event =
+ extract_apf_read_memory_resp_event_tlv,
.send_adapt_dwelltime_params_cmd =
send_adapt_dwelltime_params_cmd_tlv,
.send_dbs_scan_sel_params_cmd =
@@ -14755,6 +14895,9 @@ struct wmi_ops tlv_ops = {
send_roam_scan_send_hlp_cmd_tlv,
#endif
.send_wow_timer_pattern_cmd = send_wow_timer_pattern_cmd_tlv,
+ .send_roam_scan_stats_cmd = send_roam_scan_stats_cmd_tlv,
+ .send_offload_11k_cmd = send_offload_11k_cmd_tlv,
+ .send_invoke_neighbor_report_cmd = send_invoke_neighbor_report_cmd_tlv,
};
#ifdef WMI_TLV_AND_NON_TLV_SUPPORT
@@ -15101,6 +15244,7 @@ static void populate_tlv_events_id(uint32_t *event_ids)
event_ids[wmi_update_rcpi_event_id] = WMI_UPDATE_RCPI_EVENTID;
event_ids[wmi_get_arp_stats_req_id] = WMI_VDEV_GET_ARP_STATS_EVENTID;
event_ids[wmi_sar_get_limits_event_id] = WMI_SAR_GET_LIMITS_EVENTID;
+ event_ids[wmi_roam_scan_stats_event_id] = WMI_ROAM_SCAN_STATS_EVENTID;
}
}
diff --git a/drivers/staging/qcacld-3.0/Kbuild b/drivers/staging/qcacld-3.0/Kbuild
index 18f4fe569d19..147607295e19 100644
--- a/drivers/staging/qcacld-3.0/Kbuild
+++ b/drivers/staging/qcacld-3.0/Kbuild
@@ -243,6 +243,9 @@ endif
#enable spectral scan feature
CONFIG_WLAN_SPECTRAL_SCAN := y
+# Flag to enable Android Packet Filtering
+CONFIG_WLAN_FEATURE_APF := y
+
#Enable WLAN/Power debugfs feature only if debug_fs is enabled
ifeq ($(CONFIG_DEBUG_FS), y)
# Flag to enable debugfs. Depends on CONFIG_DEBUG_FS in kernel
@@ -269,7 +272,13 @@ BUILD_DEBUG_VERSION := 1
BUILD_DIAG_VERSION := 1
#Do we panic on bug? default is to warn
-PANIC_ON_BUG := 1
+ifeq ($(CONFIG_SLUB_DEBUG), y)
+ PANIC_ON_BUG := 1
+else ifeq ($(CONFIG_PERF_DEBUG), y)
+ PANIC_ON_BUG := 1
+else
+ PANIC_ON_BUG := 0
+endif
#Enable OL debug and wmi unified functions
CONFIG_ATH_PERF_PWR_OFFLOAD := 1
@@ -519,6 +528,10 @@ ifeq ($(CONFIG_WLAN_SPECTRAL_SCAN), y)
HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_spectral.o
endif
+ifeq ($(CONFIG_WLAN_FEATURE_APF), y)
+HDD_OBJS += $(HDD_SRC_DIR)/wlan_hdd_apf.o
+endif
+
########### HOST DIAG LOG ###########
HOST_DIAG_LOG_DIR := core/utils/host_diag_log
@@ -840,6 +853,10 @@ WMI_OBJS := $(WMI_OBJ_DIR)/wmi_unified.o \
$(WMI_OBJ_DIR)/wmi_unified_api.o \
$(WMI_OBJ_DIR)/wmi_unified_non_tlv.o
+ifeq ($(CONFIG_WLAN_FEATURE_APF), y)
+WMI_OBJS += $(WMI_OBJ_DIR)/wmi_unified_apf_tlv.o
+endif
+
WMI_CLEAN_FILES := $(WMI_OBJ_DIR)/*.o \
$(WMI_OBJ_DIR)/*.o.* \
$(WMI_OBJ_DIR)/.*.o.*
@@ -1805,6 +1822,10 @@ ifeq ($(CONFIG_WLAN_SPECTRAL_SCAN), y)
CDEFINES += -DFEATURE_SPECTRAL_SCAN
endif
+ifeq ($(CONFIG_WLAN_FEATURE_APF), y)
+CDEFINES += -DWLAN_FEATURE_APF
+endif
+
#Flag to enable/disable WLAN D0-WOW
ifeq ($(CONFIG_PCI_MSM), y)
ifeq ($(CONFIG_ROME_IF),pci)
diff --git a/drivers/staging/qcacld-3.0/Makefile b/drivers/staging/qcacld-3.0/Makefile
index 0e11ae6e7ae7..40ea0b812436 100644
--- a/drivers/staging/qcacld-3.0/Makefile
+++ b/drivers/staging/qcacld-3.0/Makefile
@@ -1,6 +1,18 @@
KERNEL_SRC ?= /lib/modules/$(shell uname -r)/build
-KBUILD_OPTIONS := WLAN_ROOT=$(PWD)
+# The Make variable $(M) must point to the directory that contains the module
+# source code (which includes this Makefile). It can either be an absolute or a
+# relative path. If it is a relative path, then it must be relative to the
+# kernel source directory (KERNEL_SRC). An absolute path can be obtained very
+# easily through $(shell pwd). Generating a path relative to KERNEL_SRC is
+# difficult and we accept some outside help by letting the caller override the
+# variable $(M). Allowing a relative path for $(M) enables us to have the build
+# system put output/object files (.o, .ko.) into a directory different from the
+# module source directory.
+M ?= $(shell pwd)
+
+# WLAN_ROOT must contain an absolute path (i.e. not a relative path)
+KBUILD_OPTIONS := WLAN_ROOT=$(shell cd $(KERNEL_SRC); readlink -e $(M))
KBUILD_OPTIONS += MODNAME?=wlan
#By default build for CLD
@@ -11,10 +23,10 @@ KBUILD_OPTIONS += $(WLAN_SELECT)
KBUILD_OPTIONS += $(KBUILD_EXTRA) # Extra config if any
all:
- $(MAKE) -C $(KERNEL_SRC) M=$(shell pwd) modules $(KBUILD_OPTIONS)
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) modules $(KBUILD_OPTIONS)
modules_install:
- $(MAKE) INSTALL_MOD_STRIP=1 -C $(KERNEL_SRC) M=$(shell pwd) modules_install
+ $(MAKE) INSTALL_MOD_STRIP=1 M=$(M) -C $(KERNEL_SRC) modules_install
clean:
- $(MAKE) -C $(KERNEL_SRC) M=$(PWD) clean
+ $(MAKE) -C $(KERNEL_SRC) M=$(M) clean
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
index cf6935f2c190..97f244402fea 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_api.h
@@ -253,7 +253,7 @@ if (cds_is_load_or_unload_in_progress() || cds_is_driver_recovering() ||
*/
static inline bool cds_is_fw_down(void)
{
-return pld_is_fw_down();
+ return pld_is_fw_down();
}
/**
@@ -263,9 +263,9 @@ return pld_is_fw_down();
*/
static inline bool cds_is_target_ready(void)
{
-enum cds_driver_state state = cds_get_driver_state();
+ enum cds_driver_state state = cds_get_driver_state();
-return __CDS_IS_DRIVER_STATE(state, CDS_DRIVER_STATE_FW_READY);
+ return __CDS_IS_DRIVER_STATE(state, CDS_DRIVER_STATE_FW_READY);
}
/**
@@ -537,13 +537,14 @@ void cds_print_htc_credit_history(uint32_t count, qdf_abstract_print * print,
* cds_smmu_mem_map_setup() - Check SMMU S1 stage enable
* status and setup wlan driver
* @osdev: Parent device instance
+ * @ipa_present: IPA HW support flag
*
* This API checks if SMMU S1 translation is enabled in
* platform driver or not and sets it accordingly in driver.
*
- * Return: none
+ * Return: QDF_STATUS
*/
-void cds_smmu_mem_map_setup(qdf_device_t osdev);
+QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present);
/**
* cds_smmu_map_unmap() - Map / Unmap DMA buffer to IPA UC
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_concurrency.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_concurrency.h
index e0499f98e426..b6361ed00013 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_concurrency.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_concurrency.h
@@ -41,7 +41,7 @@
#define MAX_NUMBER_OF_CONC_CONNECTIONS 3
#define DBS_OPPORTUNISTIC_TIME 10
#define CONNECTION_UPDATE_TIMEOUT 3000
-#define CHANNEL_SWITCH_COMPLETE_TIMEOUT 1000
+#define CHANNEL_SWITCH_COMPLETE_TIMEOUT 2000
/* Some max value greater than the max length of the channel list */
#define MAX_WEIGHT_OF_PCL_CHANNELS 255
@@ -1052,4 +1052,25 @@ bool cds_is_sta_connected_in_2g(void);
* Return: Connection count
*/
uint32_t cds_get_connection_info(struct connection_info *info);
+
+/**
+ * cds_trim_acs_channel_list() - Trim the ACS channel list based
+ * on the number of active station connections
+ * @sap_cfg: SAP configuration info
+ *
+ * Return: None
+ */
+void cds_trim_acs_channel_list(tsap_Config_t *sap_cfg);
+
+/**
+ * cds_allow_multi_sap_go_concurrency() - check whether multiple SAP/GO
+ * interfaces are allowed
+ * @cds_con_mode: operating mode of the new interface
+ * @channel: operating channel of the new interface
+ * This function checks whether second SAP/GO interface is allowed on the same
+ * MAC.
+ *
+ * Return: true or false
+ */
+bool cds_allow_sap_go_concurrency(enum cds_con_mode mode, uint8_t channel);
#endif /* __CDS_CONCURRENCY_H */
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
index ffe4593c947d..96b5763cc736 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_config.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -42,17 +42,17 @@ enum cfg_sub_20_channel_width {
};
/**
- * enum active_bpf_mode - the modes active BPF can operate in
- * @ACTIVE_BPF_DISABLED: BPF is disabled in active mode
- * @ACTIVE_BPF_ENABLED: BPF is enabled for all packets
- * @ACTIVE_BPF_ADAPTIVE: BPF is enabled for packets up to some threshold
- * @ACTIVE_BPF_MODE_COUNT: The number of active BPF modes
+ * enum active_apf_mode - the modes active APF can operate in
+ * @ACTIVE_APF_DISABLED: APF is disabled in active mode
+ * @ACTIVE_APF_ENABLED: APF is enabled for all packets
+ * @ACTIVE_APF_ADAPTIVE: APF is enabled for packets up to some threshold
+ * @ACTIVE_APF_MODE_COUNT: The number of active APF modes
*/
-enum active_bpf_mode {
- ACTIVE_BPF_DISABLED = 0,
- ACTIVE_BPF_ENABLED,
- ACTIVE_BPF_ADAPTIVE,
- ACTIVE_BPF_MODE_COUNT
+enum active_apf_mode {
+ ACTIVE_APF_DISABLED = 0,
+ ACTIVE_APF_ENABLED,
+ ACTIVE_APF_ADAPTIVE,
+ ACTIVE_APF_MODE_COUNT
};
/**
@@ -138,16 +138,16 @@ enum cds_auto_pwr_detect_failure_mode_t {
* @tx_flow_start_queue_offset: Start queue offset in percentage
* @is_lpass_enabled: Indicate whether LPASS is enabled or not
* @is_nan_enabled: Indicate whether NAN is enabled or not
- * @bool bpf_packet_filter_enable; Indicate bpf filter enabled or not
+ * @bool apf_packet_filter_enable; Indicate apf filter enabled or not
* @tx_chain_mask_cck: Tx chain mask enabled or not
* @self_gen_frm_pwr: Self gen from power
* @sub_20_channel_width: Sub 20 MHz ch width, ini intersected with fw cap
* @flow_steering_enabled: Receive flow steering.
* @is_fw_timeout: Indicate whether crash host when fw timesout or not
* @force_target_assert_enabled: Indicate whether target assert enabled or not
- * @active_uc_bpf_mode: Setting that determines how BPF is applied in active
+ * @active_uc_apf_mode: Setting that determines how APF is applied in active
* mode for uc packets
- * @active_mc_bc_bpf_mode: Setting that determines how BPF is applied in
+ * @active_mc_bc_apf_mode: Setting that determines how APF is applied in
* active mode for MC/BC packets
* @rps_enabled: RPS enabled in SAP mode
* @ito_repeat_count: Indicates ito repeated count
@@ -199,7 +199,7 @@ struct cds_config_info {
#ifdef WLAN_FEATURE_NAN
bool is_nan_enabled;
#endif
- bool bpf_packet_filter_enable;
+ bool apf_packet_filter_enable;
bool tx_chain_mask_cck;
uint16_t self_gen_frm_pwr;
enum cfg_sub_20_channel_width sub_20_channel_width;
@@ -211,8 +211,8 @@ struct cds_config_info {
struct ol_tx_sched_wrr_ac_specs_t ac_specs[TX_WMM_AC_NUM];
bool force_target_assert_enabled;
- enum active_bpf_mode active_uc_bpf_mode;
- enum active_bpf_mode active_mc_bc_bpf_mode;
+ enum active_apf_mode active_uc_apf_mode;
+ enum active_apf_mode active_mc_bc_apf_mode;
bool rps_enabled;
enum cds_auto_pwr_detect_failure_mode_t auto_power_save_fail_mode;
uint8_t ito_repeat_count;
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
index 1d3347f3eac5..a5eaeb07a18e 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_sched.h
@@ -641,4 +641,16 @@ void cds_shutdown_notifier_purge(void);
* shutdown.
*/
void cds_shutdown_notifier_call(void);
+
+/**
+ * cds_remove_timer_from_sys_msg() - Flush timer message from sys msg queue
+ * @timer_cookie: Unique cookie of the timer message to be flushed
+ *
+ * Find the timer message in the sys msg queue for the unique cookie
+ * and flush the message from the queue.
+ *
+ * Return: None
+ */
+void cds_remove_timer_from_sys_msg(uint32_t timer_cookie);
+
#endif /* #if !defined __CDS_SCHED_H */
diff --git a/drivers/staging/qcacld-3.0/core/cds/inc/cds_utils.h b/drivers/staging/qcacld-3.0/core/cds/inc/cds_utils.h
index 5f9015c9eab8..1421419781ee 100644
--- a/drivers/staging/qcacld-3.0/core/cds/inc/cds_utils.h
+++ b/drivers/staging/qcacld-3.0/core/cds/inc/cds_utils.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -56,10 +56,12 @@
#define CDS_24_GHZ_BASE_FREQ (2407)
#define CDS_5_GHZ_BASE_FREQ (5000)
#define CDS_24_GHZ_CHANNEL_6 (6)
+#define CDS_24_GHZ_CHANNEL_1 (1)
#define CDS_5_GHZ_CHANNEL_36 (36)
#define CDS_24_GHZ_CHANNEL_14 (14)
#define CDS_24_GHZ_CHANNEL_15 (15)
#define CDS_24_GHZ_CHANNEL_27 (27)
+#define CDS_5_GHZ_CHANNEL_165 (165)
#define CDS_5_GHZ_CHANNEL_170 (170)
#define CDS_CHAN_SPACING_5MHZ (5)
#define CDS_CHAN_SPACING_20MHZ (20)
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
index 304a38336a1c..c5086b1ae806 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_api.c
@@ -131,10 +131,8 @@ v_CONTEXT_t cds_init(void)
QDF_STATUS ret;
ret = qdf_debugfs_init();
- if (ret != QDF_STATUS_SUCCESS) {
+ if (ret != QDF_STATUS_SUCCESS)
cds_err("Failed to init debugfs");
- goto err_ret;
- }
qdf_lock_stats_init();
qdf_mem_init();
@@ -170,7 +168,7 @@ deinit:
gp_cds_context->qdf_ctx = NULL;
gp_cds_context = NULL;
qdf_mem_zero(&g_cds_context, sizeof(g_cds_context));
-err_ret:
+
return NULL;
}
@@ -699,6 +697,7 @@ QDF_STATUS cds_pre_enable(v_CONTEXT_t cds_context)
if ((!cds_is_fw_down()) && (!cds_is_self_recovery_enabled()))
QDF_BUG(0);
+ wma_wmi_stop();
htc_stop(gp_cds_context->htc_ctx);
return QDF_STATUS_E_FAILURE;
}
@@ -706,6 +705,7 @@ QDF_STATUS cds_pre_enable(v_CONTEXT_t cds_context)
if (ol_txrx_pdev_post_attach(gp_cds_context->pdev_txrx_ctx)) {
QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_FATAL,
"Failed to attach pdev");
+ wma_wmi_stop();
htc_stop(gp_cds_context->htc_ctx);
QDF_ASSERT(0);
return QDF_STATUS_E_FAILURE;
@@ -915,6 +915,10 @@ QDF_STATUS cds_post_disable(void)
return QDF_STATUS_E_INVAL;
}
+ /* Clean up all MC thread message queues */
+ if (gp_cds_sched_context)
+ cds_sched_flush_mc_mqs(gp_cds_sched_context);
+
/*
* With new state machine changes cds_close can be invoked without
* cds_disable. So, send the following clean up prerequisites to fw,
@@ -931,6 +935,7 @@ QDF_STATUS cds_post_disable(void)
hif_reset_soc(hif_ctx);
if (gp_cds_context->htc_ctx) {
+ wma_wmi_stop();
htc_stop(gp_cds_context->htc_ctx);
}
@@ -2438,7 +2443,7 @@ QDF_STATUS cds_flush_logs(uint32_t is_fatal,
return QDF_STATUS_E_FAILURE;
}
- QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_ERROR,
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_INFO,
"%s: Triggering bug report: type:%d, indicator=%d reason_code=%d",
__func__, is_fatal, indicator, reason_code);
@@ -2768,6 +2773,9 @@ uint32_t cds_get_connectivity_stats_pkt_bitmap(void *context)
{
hdd_adapter_t *adapter = NULL;
+ if (!context)
+ return 0;
+
adapter = (hdd_adapter_t *)context;
if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
@@ -2785,7 +2793,12 @@ uint32_t cds_get_connectivity_stats_pkt_bitmap(void *context)
*/
uint32_t cds_get_arp_stats_gw_ip(void *context)
{
- hdd_adapter_t *adapter = (hdd_adapter_t *)context;
+ hdd_adapter_t *adapter = NULL;
+
+ if (!context)
+ return 0;
+
+ adapter = (hdd_adapter_t *)context;
if (unlikely(adapter->magic != WLAN_HDD_ADAPTER_MAGIC)) {
QDF_TRACE(QDF_MODULE_ID_HDD_DATA, QDF_TRACE_LEVEL_ERROR,
@@ -2876,21 +2889,53 @@ cds_print_htc_credit_history(uint32_t count, qdf_abstract_print *print,
#endif
#ifdef ENABLE_SMMU_S1_TRANSLATION
-void cds_smmu_mem_map_setup(qdf_device_t osdev)
+QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
{
int attr = 0;
+ bool ipa_smmu_enable = false;
struct dma_iommu_mapping *mapping = pld_smmu_get_mapping(osdev->dev);
osdev->smmu_s1_enabled = false;
- if (!mapping) {
- cds_info("No SMMU mapping present");
- return;
+
+ if (ipa_present) {
+ ipa_smmu_enable = qdf_get_ipa_smmu_status();
+ if (ipa_smmu_enable)
+ cds_info("SMMU enabled from IPA side");
+ else
+ cds_info("SMMU not enabled from IPA side");
}
- if ((iommu_domain_get_attr(mapping->domain,
- DOMAIN_ATTR_S1_BYPASS, &attr) == 0) &&
- !attr)
- osdev->smmu_s1_enabled = true;
+ if (mapping && ((iommu_domain_get_attr(mapping->domain,
+ DOMAIN_ATTR_S1_BYPASS, &attr) == 0) &&
+ !attr)) {
+ cds_info("SMMU enabled from WLAN side");
+
+ if (ipa_present) {
+ if (ipa_smmu_enable) {
+ cds_info("SMMU enabled from both IPA and WLAN side");
+ osdev->smmu_s1_enabled = true;
+ } else {
+ cds_err("SMMU mismatch: IPA: disable, WLAN: enable");
+ return QDF_STATUS_E_FAILURE;
+ }
+ } else {
+ osdev->smmu_s1_enabled = true;
+ }
+
+ } else {
+ cds_info("No SMMU mapping present or SMMU disabled from WLAN side");
+
+ if (ipa_present) {
+ if (ipa_smmu_enable) {
+ cds_err("SMMU mismatch: IPA: enable, WLAN: disable");
+ return QDF_STATUS_E_FAILURE;
+ } else {
+ cds_info("SMMU diabled from both IPA and WLAN side");
+ }
+ }
+ }
+
+ return QDF_STATUS_SUCCESS;
}
#ifdef IPA_OFFLOAD
@@ -2906,9 +2951,10 @@ int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
#endif
#else
-void cds_smmu_mem_map_setup(qdf_device_t osdev)
+QDF_STATUS cds_smmu_mem_map_setup(qdf_device_t osdev, bool ipa_present)
{
osdev->smmu_s1_enabled = false;
+ return QDF_STATUS_SUCCESS;
}
int cds_smmu_map_unmap(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
index e3380981abc2..3d72e2bb57a1 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_concurrency.c
@@ -2588,7 +2588,7 @@ void cds_set_dual_mac_scan_config(uint8_t dbs_val,
return;
}
- cfg.set_dual_mac_cb = (void *)cds_soc_set_dual_mac_cfg_cb;
+ cfg.set_dual_mac_cb = cds_soc_set_dual_mac_cfg_cb;
cds_debug("scan_config:%x fw_mode_config:%x",
cfg.scan_config, cfg.fw_mode_config);
@@ -2644,7 +2644,7 @@ void cds_set_dual_mac_fw_mode_config(uint8_t dbs, uint8_t dfs)
return;
}
- cfg.set_dual_mac_cb = (void *)cds_soc_set_dual_mac_cfg_cb;
+ cfg.set_dual_mac_cb = cds_soc_set_dual_mac_cfg_cb;
cds_debug("scan_config:%x fw_mode_config:%x",
cfg.scan_config, cfg.fw_mode_config);
@@ -2968,9 +2968,9 @@ bool cds_is_connection_in_progress(uint8_t *session_id,
hdd_sta_ctx =
WLAN_HDD_GET_STATION_CTX_PTR(adapter);
if ((eConnectionState_Associated ==
- hdd_sta_ctx->conn_info.connState)
- && (false ==
- hdd_sta_ctx->conn_info.uIsAuthenticated)) {
+ hdd_sta_ctx->conn_info.connState)
+ && sme_is_sta_key_exchange_in_progress(
+ hdd_ctx->hHal, adapter->sessionId)) {
sta_mac = (uint8_t *)
&(adapter->macAddressCurrent.bytes[0]);
cds_debug("client " MAC_ADDRESS_STR
@@ -5918,6 +5918,49 @@ static bool cds_is_5g_channel_allowed(uint8_t channel, uint32_t *list,
}
+bool cds_allow_sap_go_concurrency(enum cds_con_mode mode, uint8_t channel)
+{
+ uint32_t sap_cnt;
+ uint32_t go_cnt;
+ enum cds_con_mode con_mode;
+ uint8_t con_chan;
+ int id;
+
+ sap_cnt = cds_mode_specific_connection_count(CDS_SAP_MODE, NULL);
+ go_cnt = cds_mode_specific_connection_count(CDS_P2P_GO_MODE, NULL);
+
+ if ((mode == CDS_SAP_MODE || mode == CDS_P2P_GO_MODE) && (sap_cnt ||
+ go_cnt)) {
+ if (!wma_is_dbs_enable()) {
+ /* Don't allow second SAP/GO interface if DBS is not
+ * supported */
+ cds_debug("DBS is not supported, don't allow second SAP interface");
+ return false;
+ }
+
+ /* If DBS is supported then allow second SAP/GO session only if
+ * the freq band of the second SAP/GO interface is different
+ * than the first SAP/GO interface.
+ */
+ for (id = 0; id < MAX_NUMBER_OF_CONC_CONNECTIONS; id++) {
+ if (conc_connection_list[id].in_use) {
+ con_mode = conc_connection_list[id].mode;
+ con_chan = conc_connection_list[id].chan;
+ if (((con_mode == CDS_SAP_MODE) ||
+ (con_mode == CDS_P2P_GO_MODE)) &&
+ (CDS_IS_SAME_BAND_CHANNELS(channel,
+ con_chan))) {
+ cds_debug("DBS is supported, but first SAP and second SAP are on same band, So don't allow second SAP interface");
+ return false;
+ }
+ }
+ }
+ }
+
+ /* Don't block the second interface */
+ return true;
+}
+
/**
* cds_allow_concurrency() - Check for allowed concurrency
* combination
@@ -6122,6 +6165,11 @@ bool cds_allow_concurrency(enum cds_con_mode mode,
qdf_mutex_release(&cds_ctx->qdf_conc_list_lock);
}
+ if (!cds_allow_sap_go_concurrency(mode, channel)) {
+ hdd_err("This concurrency combination is not allowed");
+ goto done;
+ }
+
status = true;
done:
@@ -6641,6 +6689,74 @@ QDF_STATUS cds_update_and_wait_for_connection_update(uint8_t session_id,
}
/**
+ * cds_is_dbs_allowed_for_concurrency() - If dbs is allowed for current
+ * concurreny
+ * @new_conn_mode: new connection mode
+ *
+ * When a new connection is about to come up, check if dbs is allowed for
+ * STA+STA or STA+P2P
+ *
+ * Return: true if dbs is allowed for STA+STA or STA+P2P else false
+ */
+static bool cds_is_dbs_allowed_for_concurrency(
+ enum tQDF_ADAPTER_MODE new_conn_mode)
+{
+ hdd_context_t *hdd_ctx;
+ uint32_t count, dbs_for_sta_sta, dbs_for_sta_p2p;
+ bool ret = true;
+
+ count = cds_get_connection_count();
+
+ hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+ if (!hdd_ctx) {
+ cds_err("HDD context is NULL");
+ return ret;
+ }
+
+ if (count != 1)
+ return ret;
+
+ dbs_for_sta_sta = WMA_CHANNEL_SELECT_LOGIC_STA_STA_GET(hdd_ctx->config->
+ channel_select_logic_conc);
+ dbs_for_sta_p2p = WMA_CHANNEL_SELECT_LOGIC_STA_P2P_GET(hdd_ctx->config->
+ channel_select_logic_conc);
+
+ switch (conc_connection_list[0].mode) {
+ case CDS_STA_MODE:
+ switch (new_conn_mode) {
+ case QDF_STA_MODE:
+ if (!dbs_for_sta_sta)
+ return false;
+ break;
+ case QDF_P2P_DEVICE_MODE:
+ case QDF_P2P_CLIENT_MODE:
+ case QDF_P2P_GO_MODE:
+ if (!dbs_for_sta_p2p)
+ return false;
+ break;
+ default:
+ break;
+ }
+ break;
+ case CDS_P2P_CLIENT_MODE:
+ case CDS_P2P_GO_MODE:
+ switch (new_conn_mode) {
+ case CDS_STA_MODE:
+ if (!dbs_for_sta_p2p)
+ return false;
+ break;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
* cds_current_connections_update() - initiates actions
* needed on current connections once channel has been decided
* for the new connection
@@ -6666,6 +6782,8 @@ QDF_STATUS cds_current_connections_update(uint32_t session_id,
cds_context_type *cds_ctx;
hdd_context_t *hdd_ctx;
QDF_STATUS status = QDF_STATUS_E_FAILURE;
+ hdd_adapter_t *adapter;
+ enum tQDF_ADAPTER_MODE nw_con_mode;
cds_ctx = cds_get_context(QDF_MODULE_ID_QDF);
if (!cds_ctx) {
@@ -6679,6 +6797,13 @@ QDF_STATUS cds_current_connections_update(uint32_t session_id,
return QDF_STATUS_E_FAILURE;
}
+ adapter = hdd_get_adapter_by_sme_session_id(hdd_ctx, session_id);
+ if (!adapter) {
+ cds_err("Invalid HDD adapter");
+ return QDF_STATUS_E_FAILURE;
+ }
+ nw_con_mode = adapter->device_mode;
+
if (wma_is_hw_dbs_capable() == false) {
cds_err("driver isn't dbs capable, no further action needed");
return QDF_STATUS_E_NOSUPPORT;
@@ -6722,6 +6847,22 @@ QDF_STATUS cds_current_connections_update(uint32_t session_id,
break;
}
+ /*
+ * Based on channel_select_logic_conc ini, hw mode is set
+ * when second connection is about to come up that results
+ * in STA+STA and STA+P2P concurrency.
+ * 1) If MCC is set and if current hw mode is dbs, hw mode
+ * should be set to single mac for above concurrency.
+ * 2) If MCC is set and if current hw mode is not dbs, hw
+ * mode change is not required.
+ */
+ if (wma_is_current_hwmode_dbs() &&
+ !cds_is_dbs_allowed_for_concurrency(nw_con_mode))
+ next_action = CDS_SINGLE_MAC;
+ else if (!wma_is_current_hwmode_dbs() &&
+ !cds_is_dbs_allowed_for_concurrency(nw_con_mode))
+ next_action = CDS_NOP;
+
if (CDS_NOP != next_action)
status = cds_next_actions(session_id,
next_action, reason);
@@ -10796,3 +10937,76 @@ uint32_t cds_get_connection_info(struct connection_info *info)
return count;
}
+
+void cds_trim_acs_channel_list(tsap_Config_t *sap_cfg)
+{
+ uint32_t list[MAX_NUMBER_OF_CONC_CONNECTIONS];
+ uint32_t index, count, i, ch_list_count;
+ uint8_t band_mask = 0, ch_5g = 0, ch_24g = 0;
+ uint8_t ch_list[QDF_MAX_NUM_CHAN];
+
+ if (sap_cfg->acs_cfg.ch_list_count >= QDF_MAX_NUM_CHAN) {
+ cds_err("acs_cfg.ch_list_count too big %d",
+ sap_cfg->acs_cfg.ch_list_count);
+ return;
+ }
+ /*
+ * if force SCC is enabled and there is a STA connection, trim the
+ * ACS channel list on the band on which STA connection is present
+ */
+ count = cds_mode_specific_connection_count(CDS_STA_MODE, list);
+ if (cds_is_force_scc() && count) {
+ index = 0;
+ while (index < count) {
+ if (CDS_IS_CHANNEL_24GHZ(
+ conc_connection_list[list[index]].chan) &&
+ cds_is_safe_channel(
+ conc_connection_list[list[index]].chan)) {
+ band_mask |= 1;
+ ch_24g = conc_connection_list[list[index]].chan;
+ }
+ if (CDS_IS_CHANNEL_5GHZ(
+ conc_connection_list[list[index]].chan) &&
+ cds_is_safe_channel(
+ conc_connection_list[list[index]].chan) &&
+ !CDS_IS_DFS_CH(
+ conc_connection_list[list[index]].chan) &&
+ !CDS_IS_PASSIVE_OR_DISABLE_CH(
+ conc_connection_list[list[index]].chan)) {
+ band_mask |= 2;
+ ch_5g = conc_connection_list[list[index]].chan;
+ }
+ index++;
+ }
+ ch_list_count = 0;
+ if (band_mask == 1) {
+ ch_list[ch_list_count++] = ch_24g;
+ for (i = 0; i < sap_cfg->acs_cfg.ch_list_count; i++) {
+ if (CDS_IS_CHANNEL_24GHZ(
+ sap_cfg->acs_cfg.ch_list[i]))
+ continue;
+ ch_list[ch_list_count++] =
+ sap_cfg->acs_cfg.ch_list[i];
+ }
+ } else if (band_mask == 2) {
+ ch_list[ch_list_count++] = ch_5g;
+ for (i = 0; i < sap_cfg->acs_cfg.ch_list_count; i++) {
+ if (CDS_IS_CHANNEL_5GHZ(
+ sap_cfg->acs_cfg.ch_list[i]))
+ continue;
+ ch_list[ch_list_count++] =
+ sap_cfg->acs_cfg.ch_list[i];
+ }
+ } else if (band_mask == 3) {
+ ch_list[ch_list_count++] = ch_24g;
+ ch_list[ch_list_count++] = ch_5g;
+ } else {
+ cds_debug("unexpected band_mask value %d", band_mask);
+ return;
+ }
+
+ sap_cfg->acs_cfg.ch_list_count = ch_list_count;
+ for (i = 0; i < sap_cfg->acs_cfg.ch_list_count; i++)
+ sap_cfg->acs_cfg.ch_list[i] = ch_list[i];
+ }
+}
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_mc_timer.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_mc_timer.c
index c684471da955..4ff88a61eb38 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_mc_timer.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_mc_timer.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -124,7 +124,7 @@ void cds_linux_timer_callback(unsigned long data)
sys_build_message_header(SYS_MSG_ID_MC_TIMER, &msg);
msg.callback = callback;
msg.bodyptr = user_data;
- msg.bodyval = 0;
+ msg.bodyval = timer->cookie;
if (cds_mq_post_message(QDF_MODULE_ID_SYS, &msg) == QDF_STATUS_SUCCESS)
return;
diff --git a/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c b/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
index 228dde2c3082..ceecc57031ba 100644
--- a/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
+++ b/drivers/staging/qcacld-3.0/core/cds/src/cds_sched.c
@@ -409,7 +409,8 @@ __cds_cpu_hotplug_notify(struct notifier_block *block,
if (pref_cpu == 0)
return NOTIFY_OK;
- if (!cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
+ if (pSchedContext->ol_rx_thread &&
+ !cds_set_cpus_allowed_ptr(pSchedContext->ol_rx_thread, pref_cpu))
affine_cpu = pref_cpu;
return NOTIFY_OK;
@@ -1238,6 +1239,49 @@ static int cds_ol_rx_thread(void *arg)
}
#endif
+void cds_remove_timer_from_sys_msg(uint32_t timer_cookie)
+{
+ p_cds_msg_wrapper msg_wrapper = NULL;
+ struct list_head *pos, *q;
+ unsigned long flags;
+ p_cds_mq_type sys_msgq;
+
+ if (!gp_cds_sched_context) {
+ cds_err("gp_cds_sched_context is null");
+ return;
+ }
+
+ if (!gp_cds_sched_context->McThread) {
+ cds_err("Cannot post message because MC thread is stopped");
+ return;
+ }
+
+ sys_msgq = &gp_cds_sched_context->sysMcMq;
+ /* No msg present in sys queue */
+ if (cds_is_mq_empty(sys_msgq))
+ return;
+
+ spin_lock_irqsave(&sys_msgq->mqLock, flags);
+ list_for_each_safe(pos, q, &sys_msgq->mqList) {
+ msg_wrapper = list_entry(pos, cds_msg_wrapper, msgNode);
+
+ if ((msg_wrapper->pVosMsg->type == SYS_MSG_ID_MC_TIMER) &&
+ (msg_wrapper->pVosMsg->bodyval == timer_cookie)) {
+ /* return message to the Core */
+ list_del(pos);
+ spin_unlock_irqrestore(&sys_msgq->mqLock, flags);
+ QDF_TRACE(QDF_MODULE_ID_QDF, QDF_TRACE_LEVEL_DEBUG,
+ "%s: removing timer message with cookie %d",
+ __func__, timer_cookie);
+ cds_core_return_msg(gp_cds_sched_context->pVContext,
+ msg_wrapper);
+ return;
+ }
+
+ }
+ spin_unlock_irqrestore(&sys_msgq->mqLock, flags);
+}
+
/**
* cds_sched_close() - close the cds scheduler
* @p_cds_context: Pointer to the global CDS Context
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
index 35f74db74854..ddad788f1d83 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt.c
@@ -400,13 +400,13 @@ htt_pdev_alloc(ol_txrx_pdev_handle txrx_pdev,
pdev->cfg.is_full_reorder_offload =
ol_cfg_is_full_reorder_offload(pdev->ctrl_pdev);
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
- "is_full_reorder_offloaded? %d",
+ "full_reorder_offloaded %d",
(int)pdev->cfg.is_full_reorder_offload);
pdev->cfg.ce_classify_enabled =
ol_cfg_is_ce_classify_enabled(ctrl_pdev);
QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO,
- "ce_classify_enabled? %d",
+ "ce_classify %d",
pdev->cfg.ce_classify_enabled);
if (pdev->cfg.is_high_latency) {
@@ -883,7 +883,7 @@ int htt_ipa_uc_attach(struct htt_pdev_t *pdev)
*/
void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
{
- QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: enter",
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: enter",
__func__);
/* TX IPA micro controller detach */
@@ -892,7 +892,7 @@ void htt_ipa_uc_detach(struct htt_pdev_t *pdev)
/* RX IPA micro controller detach */
htt_rx_ipa_uc_detach(pdev);
- QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_INFO, "%s: exit",
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_DEBUG, "%s: exit",
__func__);
}
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c
index 30f169b66daa..f017192e93b1 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt_t2h.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -366,6 +366,13 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
{
struct htt_mgmt_tx_compl_ind *compl_msg;
int32_t credit_delta = 1;
+ int msg_len = qdf_nbuf_len(htt_t2h_msg);
+ if (msg_len < (sizeof(struct htt_mgmt_tx_compl_ind) + sizeof(*msg_word))) {
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
+ "Invalid msg_word lenght in HTT_T2H_MSG_TYPE_MGMT_TX_COMPL_IND");
+ WARN_ON(1);
+ break;
+ }
compl_msg =
(struct htt_mgmt_tx_compl_ind *)(msg_word + 1);
@@ -416,7 +423,16 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
#ifndef REMOVE_PKT_LOG
case HTT_T2H_MSG_TYPE_PKTLOG:
{
- pktlog_process_fw_msg(msg_word + 1);
+ uint32_t len = qdf_nbuf_len(htt_t2h_msg);
+
+ if (len < sizeof(*msg_word) + sizeof(uint32_t)) {
+ qdf_print("%s: invalid nbuff len \n", __func__);
+ WARN_ON(1);
+ break;
+ }
+
+ /*len is reduced by sizeof(*msg_word)*/
+ pktlog_process_fw_msg(msg_word + 1, len - sizeof(*msg_word));
break;
}
#endif
@@ -463,6 +479,7 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
uint16_t len;
uint8_t *op_msg_buffer;
uint8_t *msg_start_ptr;
+ int msg_len = qdf_nbuf_len(htt_t2h_msg);
htc_pm_runtime_put(pdev->htc_pdev);
msg_start_ptr = (uint8_t *) msg_word;
@@ -471,6 +488,11 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
msg_word++;
len = HTT_WDI_IPA_OP_RESPONSE_RSP_LEN_GET(*msg_word);
+ if (sizeof(struct htt_wdi_ipa_op_response_t) + len > msg_len) {
+ qdf_print("Invalid buffer length");
+ WARN_ON(1);
+ break;
+ }
op_msg_buffer =
qdf_mem_malloc(sizeof
(struct htt_wdi_ipa_op_response_t) +
@@ -492,9 +514,17 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
{
uint8_t num_flows;
struct htt_flow_pool_map_payload_t *pool_map_payoad;
+ int msg_len = qdf_nbuf_len(htt_t2h_msg);
num_flows = HTT_FLOW_POOL_MAP_NUM_FLOWS_GET(*msg_word);
+ if (((HTT_FLOW_POOL_MAP_PAYLOAD_SZ /
+ HTT_FLOW_POOL_MAP_HEADER_SZ) * num_flows + 1) * sizeof(*msg_word) > msg_len) {
+ qdf_print("Invalid num_flows");
+ WARN_ON(1);
+ break;
+ }
+
msg_word++;
while (num_flows) {
pool_map_payoad = (struct htt_flow_pool_map_payload_t *)
@@ -514,6 +544,14 @@ static void htt_t2h_lp_msg_handler(void *context, qdf_nbuf_t htt_t2h_msg,
case HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP:
{
struct htt_flow_pool_unmap_t *pool_numap_payload;
+ int msg_len = qdf_nbuf_len(htt_t2h_msg);
+
+ if (msg_len < sizeof(struct htt_flow_pool_unmap_t)) {
+ QDF_TRACE(QDF_MODULE_ID_HTT, QDF_TRACE_LEVEL_ERROR,
+ "Invalid msg_word lenght in HTT_T2H_MSG_TYPE_FLOW_POOL_UNMAP");
+ WARN_ON(1);
+ break;
+ }
pool_numap_payload = (struct htt_flow_pool_unmap_t *)msg_word;
ol_tx_flow_pool_unmap_handler(pool_numap_payload->flow_id,
@@ -668,6 +706,7 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
unsigned int num_msdu_bytes;
uint16_t peer_id;
uint8_t tid;
+ int msg_len = qdf_nbuf_len(htt_t2h_msg);
if (qdf_unlikely(pdev->cfg.is_full_reorder_offload)) {
qdf_print("HTT_T2H_MSG_TYPE_RX_IND not supported ");
@@ -682,6 +721,10 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
tid);
break;
}
+ if (msg_len < (2 + HTT_RX_PPDU_DESC_SIZE32 + 1) * sizeof(uint32_t)) {
+ qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid msg_len\n");
+ break;
+ }
num_msdu_bytes =
HTT_RX_IND_FW_RX_DESC_BYTES_GET(
*(msg_word + 2 + HTT_RX_PPDU_DESC_SIZE32));
@@ -697,6 +740,12 @@ void htt_t2h_msg_handler(void *context, HTC_PACKET *pkt)
num_mpdu_ranges =
HTT_RX_IND_NUM_MPDU_RANGES_GET(*(msg_word + 1));
pdev->rx_ind_msdu_byte_idx = 0;
+ if (qdf_unlikely(pdev->rx_mpdu_range_offset_words + (num_mpdu_ranges * 4) > msg_len)) {
+ qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid mpdu_ranges %d\n",
+ num_mpdu_ranges);
+ WARN_ON(1);
+ break;
+ }
ol_rx_indication_handler(pdev->txrx_pdev,
htt_t2h_msg, peer_id,
@@ -950,6 +999,12 @@ void htt_t2h_msg_handler_fast(void *context, qdf_nbuf_t *cmpl_msdus,
peer_id = HTT_RX_IND_PEER_ID_GET(*msg_word);
tid = HTT_RX_IND_EXT_TID_GET(*msg_word);
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ qdf_print("HTT_T2H_MSG_TYPE_RX_IND, invalid tid %d\n",
+ tid);
+ WARN_ON(1);
+ break;
+ }
num_msdu_bytes =
HTT_RX_IND_FW_RX_DESC_BYTES_GET(
diff --git a/drivers/staging/qcacld-3.0/core/dp/htt/htt_tx.c b/drivers/staging/qcacld-3.0/core/dp/htt/htt_tx.c
index 309b4cb2c869..1088d1648d47 100644
--- a/drivers/staging/qcacld-3.0/core/dp/htt/htt_tx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/htt/htt_tx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1532,6 +1532,7 @@ htt_tx_desc_fill_tso_info(htt_pdev_handle pdev, void *desc,
if (tso_seg->seg.num_frags < FRAG_NUM_MAX)
*word = 0;
+ qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_FILLHTTSEG);
}
#endif /* FEATURE_TSO */
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx.c
index 22816b973f1f..7d443be064f4 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1485,6 +1485,11 @@ ol_rx_in_order_indication_handler(ol_txrx_pdev_handle pdev,
uint8_t pktlog_bit;
#endif
uint32_t filled = 0;
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
+ WARN_ON(1);
+ return;
+ }
if (pdev) {
if (qdf_unlikely(QDF_GLOBAL_MONITOR_MODE == cds_get_conparam()))
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c
index 81dcd237cbcf..4496b21eabbf 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_defrag.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -631,6 +631,11 @@ void ol_rx_defrag_waitlist_flush(struct ol_txrx_pdev_t *pdev)
break;
tid = rx_reorder->tid;
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
+ WARN_ON(1);
+ continue;
+ }
/* get index 0 of the rx_reorder array */
rx_reorder_base = rx_reorder - tid;
peer =
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c
index 6820c4c13230..0718969d20f8 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -533,6 +533,12 @@ ol_rx_addba_handler(ol_txrx_pdev_handle pdev,
struct ol_txrx_peer_t *peer;
struct ol_rx_reorder_t *rx_reorder;
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
+ WARN_ON(1);
+ return;
+ }
+
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
if (peer == NULL)
return;
@@ -569,6 +575,12 @@ ol_rx_delba_handler(ol_txrx_pdev_handle pdev, uint16_t peer_id, uint8_t tid)
struct ol_txrx_peer_t *peer;
struct ol_rx_reorder_t *rx_reorder;
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
+ WARN_ON(1);
+ return;
+ }
+
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
if (peer == NULL)
return;
@@ -670,6 +682,11 @@ ol_rx_pn_ind_handler(ol_txrx_pdev_handle pdev,
uint16_t seq_num;
int i = 0;
+ if (tid >= OL_TXRX_NUM_EXT_TIDS) {
+ ol_txrx_err("%s: invalid tid, %u\n", __FUNCTION__, tid);
+ WARN_ON(1);
+ return;
+ }
peer = ol_txrx_peer_find_by_id(pdev, peer_id);
if (!peer) {
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
index dce4fa9b744e..05012df6118d 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -153,6 +153,7 @@ void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
}
next_seg = free_seg->next;
+ free_seg->force_free = 1;
ol_tso_free_segment(pdev, free_seg);
free_seg = next_seg;
}
@@ -163,6 +164,7 @@ void ol_free_remaining_tso_segs(ol_txrx_vdev_handle vdev,
*/
while (free_seg) {
next_seg = free_seg->next;
+ free_seg->force_free = 1;
ol_tso_free_segment(pdev, free_seg);
free_seg = next_seg;
}
@@ -526,6 +528,30 @@ qdf_nbuf_t ol_tx_ll(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
}
#endif /* TSO */
+/**
+ * ol_tx_trace_pkt() - Trace TX packet at OL layer
+ * @skb: skb to be traced
+ * @msdu_id: msdu_id of the packet
+ * @vdev_id: vdev_id of the packet
+ *
+ * Return: None
+ */
+static void ol_tx_trace_pkt(qdf_nbuf_t skb, uint16_t msdu_id,
+ uint8_t vdev_id)
+{
+ DPTRACE(qdf_dp_trace_ptr(skb,
+ QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
+ qdf_nbuf_data_addr(skb),
+ sizeof(qdf_nbuf_data(skb)),
+ msdu_id, vdev_id));
+
+ qdf_dp_trace_log_pkt(vdev_id, skb, QDF_TX);
+
+ qdf_dp_trace_set_track(skb, QDF_TX);
+ DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_DP_TRACE_TX_PACKET_RECORD,
+ msdu_id, QDF_TX));
+}
+
#ifdef WLAN_FEATURE_FASTPATH
/**
* ol_tx_prepare_ll_fast() Alloc and prepare Tx descriptor
@@ -704,6 +730,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
struct ol_txrx_msdu_info_t msdu_info;
uint32_t tso_msdu_stats_idx = 0;
+ qdf_mem_zero(&msdu_info, sizeof(msdu_info));
msdu_info.htt.info.l2_hdr_type = vdev->pdev->htt_pkt_type;
msdu_info.htt.action.tx_comp_req = 0;
/*
@@ -721,7 +748,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
msdu_info.peer = NULL;
if (qdf_unlikely(ol_tx_prepare_tso(vdev, msdu, &msdu_info))) {
- ol_txrx_dbg("ol_tx_prepare_tso failed\n");
+ ol_txrx_err("ol_tx_prepare_tso failed\n");
TXRX_STATS_MSDU_LIST_INCR(vdev->pdev,
tx.dropped.host_reject, msdu);
return msdu;
@@ -780,6 +807,8 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
TXRX_STATS_MSDU_INCR(pdev, tx.from_stack, msdu);
if (qdf_likely(tx_desc)) {
+ struct qdf_tso_seg_elem_t *next_seg;
+
/*
* if this is a jumbo nbuf, then increment the
@@ -791,11 +820,8 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
if (segments)
qdf_nbuf_inc_users(msdu);
- DPTRACE(qdf_dp_trace_ptr(msdu,
- QDF_DP_TRACE_TXRX_FAST_PACKET_PTR_RECORD,
- qdf_nbuf_data_addr(msdu),
- sizeof(qdf_nbuf_data(msdu)),
- tx_desc->id, vdev->vdev_id));
+ ol_tx_trace_pkt(msdu, tx_desc->id,
+ vdev->vdev_id);
/*
* If debug display is enabled, show the meta
* data being downloaded to the target via the
@@ -807,6 +833,17 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
sizeof(struct htt_tx_msdu_desc_ext_t);
htt_tx_desc_display(tx_desc->htt_tx_desc);
+
+ /* mark the relevant tso_seg free-able */
+ if (msdu_info.tso_info.curr_seg) {
+ msdu_info.tso_info.curr_seg->
+ sent_to_target = 1;
+ next_seg = msdu_info.tso_info.
+ curr_seg->next;
+ } else {
+ next_seg = NULL;
+ }
+
if ((0 == ce_send_fast(pdev->ce_tx_hdl, msdu,
ep_id, pkt_download_len))) {
struct qdf_tso_info_t *tso_info =
@@ -817,8 +854,7 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
*/
if (tx_desc->pkt_type ==
OL_TX_FRM_TSO) {
- tso_info->curr_seg =
- tso_info->curr_seg->next;
+ tso_info->curr_seg = next_seg;
ol_free_remaining_tso_segs(vdev,
&msdu_info, true);
}
@@ -833,10 +869,9 @@ ol_tx_ll_fast(ol_txrx_vdev_handle vdev, qdf_nbuf_t msdu_list)
htt_tx_status_download_fail);
return msdu;
}
- if (msdu_info.tso_info.curr_seg) {
- msdu_info.tso_info.curr_seg =
- msdu_info.tso_info.curr_seg->next;
- }
+ if (msdu_info.tso_info.curr_seg)
+ msdu_info.tso_info.curr_seg = next_seg;
+
if (msdu_info.tso_info.is_tso) {
qdf_nbuf_reset_num_frags(msdu);
@@ -2229,7 +2264,7 @@ void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
c_element->cookie = TSO_SEG_MAGIC_COOKIE;
#ifdef TSOSEG_DEBUG
c_element->dbg.txdesc = NULL;
- c_element->dbg.cur = -1; /* history empty */
+ qdf_atomic_init(&c_element->dbg.cur); /* history empty */
qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT1);
#endif /* TSOSEG_DEBUG */
c_element->next =
@@ -2253,7 +2288,8 @@ void ol_tso_seg_list_init(struct ol_txrx_pdev_t *pdev, uint32_t num_seg)
c_element->cookie = TSO_SEG_MAGIC_COOKIE;
#ifdef TSOSEG_DEBUG
c_element->dbg.txdesc = NULL;
- c_element->dbg.cur = -1; /* history empty */
+ qdf_atomic_init(&c_element->dbg.cur); /* history empty */
+ // c_element->dbg.cur = -1; /* history empty */
qdf_tso_seg_dbg_record(c_element, TSOSEG_LOC_INIT2);
#endif /* TSOSEG_DEBUG */
c_element->next = NULL;
@@ -2293,11 +2329,10 @@ void ol_tso_seg_list_deinit(struct ol_txrx_pdev_t *pdev)
while (i-- > 0 && c_element) {
temp = c_element->next;
if (c_element->on_freelist != 1) {
- qdf_tso_seg_dbg_bug("this seg already freed (double?)");
+ qdf_tso_seg_dbg_bug("seg already freed (double?)");
return;
} else if (c_element->cookie != TSO_SEG_MAGIC_COOKIE) {
- qdf_print("this seg cookie is bad (memory corruption?)");
- QDF_BUG(0);
+ qdf_tso_seg_dbg_bug("seg cookie is bad (corruption?)");
return;
}
/* free this seg, so reset the cookie value*/
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_desc.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_desc.c
index 256c87212c77..dd1324670062 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_desc.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_desc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -884,20 +884,22 @@ struct qdf_tso_seg_elem_t *ol_tso_alloc_segment(struct ol_txrx_pdev_t *pdev)
pdev->tso_seg_pool.num_free--;
tso_seg = pdev->tso_seg_pool.freelist;
if (tso_seg->on_freelist != 1) {
- qdf_print("Do not alloc tso seg as this seg is not in freelist\n");
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+ qdf_print("tso seg alloc failed: not in freelist");
QDF_BUG(0);
return NULL;
} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
- qdf_print("Do not alloc tso seg as cookie is not good\n");
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+ qdf_print("tso seg alloc failed: bad cookie");
QDF_BUG(0);
return NULL;
}
/*this tso seg is not a part of freelist now.*/
tso_seg->on_freelist = 0;
- qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_ALLOC);
+ tso_seg->sent_to_target = 0;
+ tso_seg->force_free = 0;
pdev->tso_seg_pool.freelist = pdev->tso_seg_pool.freelist->next;
+ qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_ALLOC);
}
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
@@ -921,11 +923,18 @@ void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
qdf_spin_lock_bh(&pdev->tso_seg_pool.tso_mutex);
if (tso_seg->on_freelist != 0) {
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
- qdf_tso_seg_dbg_bug("Do not free tso seg, already freed");
+ qdf_print("Do not free tso seg, already freed");
+ QDF_BUG(0);
return;
} else if (tso_seg->cookie != TSO_SEG_MAGIC_COOKIE) {
- qdf_print("Do not free the tso seg as cookie is not good. Looks like memory corruption");
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+ qdf_print("Do not free tso seg: cookie is not good.");
+ QDF_BUG(0);
+ return;
+ } else if ((tso_seg->sent_to_target != 1) &&
+ (tso_seg->force_free != 1)) {
+ qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
+ qdf_print("Do not free tso seg: yet to be sent to target");
QDF_BUG(0);
return;
}
@@ -937,10 +946,14 @@ void ol_tso_free_segment(struct ol_txrx_pdev_t *pdev,
qdf_tso_seg_dbg_zero(tso_seg);
tso_seg->next = pdev->tso_seg_pool.freelist;
tso_seg->on_freelist = 1;
+ tso_seg->sent_to_target = 0;
tso_seg->cookie = TSO_SEG_MAGIC_COOKIE;
- qdf_tso_seg_dbg_record(tso_seg, TSOSEG_LOC_FREE);
pdev->tso_seg_pool.freelist = tso_seg;
pdev->tso_seg_pool.num_free++;
+ qdf_tso_seg_dbg_record(tso_seg, tso_seg->force_free
+ ? TSOSEG_LOC_FORCE_FREE
+ : TSOSEG_LOC_FREE);
+ tso_seg->force_free = 0;
qdf_spin_unlock_bh(&pdev->tso_seg_pool.tso_mutex);
}
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_queue.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_queue.c
index afed800847b0..c2193b4c0e06 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_queue.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_queue.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1776,9 +1776,11 @@ void ol_txrx_vdev_flush(ol_txrx_vdev_handle vdev)
qdf_nbuf_t next =
qdf_nbuf_next(vdev->ll_pause.txq.head);
qdf_nbuf_set_next(vdev->ll_pause.txq.head, NULL);
- qdf_nbuf_unmap(vdev->pdev->osdev,
- vdev->ll_pause.txq.head,
- QDF_DMA_TO_DEVICE);
+ if (QDF_NBUF_CB_PADDR(vdev->ll_pause.txq.head)) {
+ qdf_nbuf_unmap(vdev->pdev->osdev,
+ vdev->ll_pause.txq.head,
+ QDF_DMA_TO_DEVICE);
+ }
qdf_nbuf_tx_free(vdev->ll_pause.txq.head,
QDF_NBUF_PKT_ERROR);
vdev->ll_pause.txq.head = next;
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c
index ed69e5f0d79d..3a47006af40b 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_tx_send.c
@@ -694,7 +694,14 @@ ol_tx_completion_handler(ol_txrx_pdev_handle pdev,
for (i = 0; i < num_msdus; i++) {
tx_desc_id = desc_ids[i];
+ if (tx_desc_id >= pdev->tx_desc.pool_size) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+ "%s: drop due to invalid msdu id = %x\n",
+ __func__, tx_desc_id);
+ continue;
+ }
tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ qdf_assert(tx_desc);
tx_desc->status = status;
netbuf = tx_desc->netbuf;
@@ -1004,7 +1011,14 @@ ol_tx_inspect_handler(ol_txrx_pdev_handle pdev,
for (i = 0; i < num_msdus; i++) {
tx_desc_id = desc_ids[i];
+ if (tx_desc_id >= pdev->tx_desc.pool_size) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_DEBUG,
+ "%s: drop due to invalid msdu id = %x\n",
+ __func__, tx_desc_id);
+ continue;
+ }
tx_desc = ol_tx_desc_find(pdev, tx_desc_id);
+ qdf_assert(tx_desc);
netbuf = tx_desc->netbuf;
/* find the "vdev" this tx_desc belongs to */
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c
index b605b44389a6..70a20725e86d 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx.c
@@ -32,6 +32,7 @@
#include <qdf_types.h> /* qdf_device_t, qdf_print */
#include <qdf_lock.h> /* qdf_spinlock */
#include <qdf_atomic.h> /* qdf_atomic_read */
+#include <qdf_debugfs.h>
#if defined(HIF_PCI) || defined(HIF_SNOC) || defined(HIF_AHB)
/* Required for WLAN_FEATURE_FASTPATH */
@@ -94,6 +95,25 @@
/* thresh for peer's cached buf queue beyond which the elements are dropped */
#define OL_TXRX_CACHED_BUFQ_THRESH 128
+#define DPT_DEBUGFS_PERMS (QDF_FILE_USR_READ | \
+ QDF_FILE_USR_WRITE | \
+ QDF_FILE_GRP_READ | \
+ QDF_FILE_OTH_READ)
+
+#define DPT_DEBUGFS_NUMBER_BASE 10
+/**
+ * enum dpt_set_param_debugfs - dpt set params
+ * @DPT_SET_PARAM_PROTO_BITMAP : set proto bitmap
+ * @DPT_SET_PARAM_NR_RECORDS: set num of records
+ * @DPT_SET_PARAM_VERBOSITY: set verbosity
+ */
+enum dpt_set_param_debugfs {
+ DPT_SET_PARAM_PROTO_BITMAP = 1,
+ DPT_SET_PARAM_NR_RECORDS = 2,
+ DPT_SET_PARAM_VERBOSITY = 3,
+ DPT_SET_PARAM_MAX,
+};
+
/* These macros are expected to be used only for data path.
* Existing APIs cannot be used since they log every time
* they are used. Other modules, outside of data path should
@@ -104,6 +124,11 @@
#define OL_TXRX_PEER_DEC_REF_CNT_SILENT(peer) \
qdf_atomic_dec(&peer->ref_cnt)
+ol_txrx_peer_handle
+ol_txrx_peer_find_by_local_id_inc_ref(struct ol_txrx_pdev_t *pdev,
+ uint8_t local_peer_id);
+static void ol_txrx_peer_dec_ref_cnt(struct ol_txrx_peer_t *peer);
+
#if defined(CONFIG_HL_SUPPORT) && defined(FEATURE_WLAN_TDLS)
/**
@@ -323,6 +348,7 @@ void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
{
struct ol_txrx_peer_t *peer = NULL;
ol_txrx_pdev_handle pdev = NULL;
+ ol_txrx_vdev_handle vdev;
if (sta_id >= WLAN_MAX_STA_COUNT) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
@@ -337,14 +363,17 @@ void *ol_txrx_get_vdev_by_sta_id(uint8_t sta_id)
return NULL;
}
- peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+ peer = ol_txrx_peer_find_by_local_id_inc_ref(pdev, sta_id);
if (!peer) {
QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_INFO_HIGH,
"PEER [%d] not found", sta_id);
return NULL;
}
- return peer->vdev;
+ vdev = peer->vdev;
+ ol_txrx_peer_dec_ref_cnt(peer);
+
+ return vdev;
}
/**
@@ -499,6 +528,8 @@ ol_txrx_peer_find_by_local_id_inc_ref(struct ol_txrx_pdev_t *pdev,
qdf_spin_unlock_bh(&pdev->local_peer_ids.lock);
if (peer && peer->valid)
OL_TXRX_PEER_INC_REF_CNT_SILENT(peer);
+ else
+ peer = NULL;
qdf_spin_unlock_bh(&pdev->peer_ref_mutex);
return peer;
@@ -621,6 +652,13 @@ void ol_txrx_update_tx_queue_groups(
u_int32_t membership;
struct ol_txrx_vdev_t *vdev;
+ if (group_id >= OL_TX_MAX_TXQ_GROUPS) {
+ ol_txrx_warn("%s: invalid group_id=%u, ignore update.\n",
+ __func__,
+ group_id);
+ return;
+ }
+
group = &pdev->txq_grps[group_id];
membership = OL_TXQ_GROUP_MEMBERSHIP_GET(vdev_id_mask, ac_mask);
@@ -1209,6 +1247,195 @@ static void ol_txrx_tso_stats_clear(ol_txrx_pdev_handle pdev)
#endif /* defined(FEATURE_TSO) && defined(FEATURE_TSO_DEBUG) */
/**
+ * ol_txrx_read_dpt_buff_debugfs() - read dp trace buffer
+ * @file: file to read
+ * @arg: pdev object
+ *
+ * Return: QDF_STATUS
+ */
+static QDF_STATUS ol_txrx_read_dpt_buff_debugfs(qdf_debugfs_file_t file,
+ void *arg)
+{
+ struct ol_txrx_pdev_t *pdev = (struct ol_txrx_pdev_t *)arg;
+ uint32_t i = 0;
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+
+ if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID)
+ return QDF_STATUS_E_INVAL;
+ else if (pdev->state == QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE) {
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
+ return QDF_STATUS_SUCCESS;
+ }
+
+ i = qdf_dpt_get_curr_pos_debugfs(file, pdev->state);
+ status = qdf_dpt_dump_stats_debugfs(file, i);
+ if (status == QDF_STATUS_E_FAILURE)
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_IN_PROGRESS;
+ else if (status == QDF_STATUS_SUCCESS)
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_COMPLETE;
+
+ return status;
+}
+
+/**
+ * ol_txrx_conv_str_to_int_debugfs() - convert string to int
+ * @buf: buffer containing string
+ * @len: buffer len
+ * @proto_bitmap: defines the protocol to be tracked
+ * @nr_records: defines the nth packet which is traced
+ * @verbosity: defines the verbosity level
+ *
+ * This function expects char buffer to be null terminated.
+ * Otherwise results could be unexpected values.
+ *
+ * Return: 0 on success
+ */
+static int ol_txrx_conv_str_to_int_debugfs(char *buf, qdf_size_t len,
+ int *proto_bitmap,
+ int *nr_records,
+ int *verbosity)
+{
+ int num_value = DPT_SET_PARAM_PROTO_BITMAP;
+ int ret, param_value = 0;
+ char *buf_param = buf;
+ int i;
+
+ for (i = 1; i < DPT_SET_PARAM_MAX; i++) {
+ /* Loop till you reach space as kstrtoint operates till
+ * null character. Replace space with null character
+ * to read each value.
+ * terminate the loop either at null terminated char or
+ * len is 0.
+ */
+ while (*buf && len) {
+ if (*buf == ' ') {
+ *buf = '\0';
+ buf++;
+ len--;
+ break;
+ }
+ buf++;
+ len--;
+ }
+ /* get the parameter */
+ ret = qdf_kstrtoint(buf_param,
+ DPT_DEBUGFS_NUMBER_BASE,
+ &param_value);
+ if (ret) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX,
+ QDF_TRACE_LEVEL_ERROR,
+ "%s: Error while parsing buffer. ret %d",
+ __func__, ret);
+ return ret;
+ }
+ switch (num_value) {
+ case DPT_SET_PARAM_PROTO_BITMAP:
+ *proto_bitmap = param_value;
+ break;
+ case DPT_SET_PARAM_NR_RECORDS:
+ *nr_records = param_value;
+ break;
+ case DPT_SET_PARAM_VERBOSITY:
+ *verbosity = param_value;
+ break;
+ default:
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
+ __func__, __LINE__);
+ break;
+ }
+ num_value++;
+ /*buf_param should now point to the next param value. */
+ buf_param = buf;
+ }
+
+ /* buf is not yet NULL implies more than 3 params are passed. */
+ if (*buf) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s %d: :Set command needs exactly 3 arguments in format <proto_bitmap> <number of record> <Verbosity>.",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+/**
+ * ol_txrx_write_dpt_buff_debugfs() - set dp trace parameters
+ * @priv: pdev object
+ * @buf: buff to get value for dpt parameters
+ * @len: buf length
+ *
+ * Return: QDF_STATUS
+ */
+static QDF_STATUS ol_txrx_write_dpt_buff_debugfs(void *priv,
+ const char *buf,
+ qdf_size_t len)
+{
+ int ret;
+ int proto_bitmap = 0;
+ int nr_records = 0;
+ int verbosity = 0;
+ char *buf1 = NULL;
+
+ if (!buf || !len) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: null buffer or len. len %u",
+ __func__, (uint8_t)len);
+ return QDF_STATUS_E_FAULT;
+ }
+
+ buf1 = (char *)qdf_mem_malloc(len);
+ if (!buf1) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: qdf_mem_malloc failure",
+ __func__);
+ return QDF_STATUS_E_FAULT;
+ }
+ qdf_mem_copy(buf1, buf, len);
+ ret = ol_txrx_conv_str_to_int_debugfs(buf1, len, &proto_bitmap,
+ &nr_records, &verbosity);
+ if (ret) {
+ qdf_mem_free(buf1);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ qdf_dpt_set_value_debugfs(proto_bitmap, nr_records, verbosity);
+ qdf_mem_free(buf1);
+ return QDF_STATUS_SUCCESS;
+}
+
+static int ol_txrx_debugfs_init(struct ol_txrx_pdev_t *pdev)
+{
+ pdev->dpt_debugfs_fops.show = ol_txrx_read_dpt_buff_debugfs;
+ pdev->dpt_debugfs_fops.write = ol_txrx_write_dpt_buff_debugfs;
+ pdev->dpt_debugfs_fops.priv = pdev;
+
+ pdev->dpt_stats_log_dir = qdf_debugfs_create_dir("dpt_stats", NULL);
+
+ if (!pdev->dpt_stats_log_dir) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: error while creating debugfs dir for %s",
+ __func__, "dpt_stats");
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
+ return -EBUSY;
+ }
+
+ if (!qdf_debugfs_create_file("dump_set_dpt_logs", DPT_DEBUGFS_PERMS,
+ pdev->dpt_stats_log_dir,
+ &pdev->dpt_debugfs_fops)) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: debug Entry creation failed!",
+ __func__);
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INVALID;
+ return -EBUSY;
+ }
+
+ pdev->state = QDF_DPT_DEBUGFS_STATE_SHOW_STATE_INIT;
+ return 0;
+}
+
+/**
* ol_txrx_pdev_attach() - allocate txrx pdev
* @ctrl_pdev: cfg pdev
* @htc_pdev: HTC pdev
@@ -1292,6 +1519,8 @@ ol_txrx_pdev_attach(ol_pdev_handle ctrl_pdev,
pdev->tid_to_ac[OL_TX_NUM_TIDS + OL_TX_VDEV_DEFAULT_MGMT] =
OL_TX_SCHED_WRR_ADV_CAT_MCAST_MGMT;
+ ol_txrx_debugfs_init(pdev);
+
return pdev;
fail3:
@@ -2024,6 +2253,11 @@ void ol_txrx_pdev_pre_detach(ol_txrx_pdev_handle pdev, int force)
#endif
}
+static void ol_txrx_debugfs_exit(ol_txrx_pdev_handle pdev)
+{
+ qdf_debugfs_remove_dir_recursive(pdev->dpt_stats_log_dir);
+}
+
/**
* ol_txrx_pdev_detach() - delete the data SW state
* @pdev - the data physical device object being removed
@@ -2084,6 +2318,8 @@ void ol_txrx_pdev_detach(ol_txrx_pdev_handle pdev)
ol_txrx_pdev_txq_log_destroy(pdev);
ol_txrx_pdev_grp_stat_destroy(pdev);
+
+ ol_txrx_debugfs_exit(pdev);
}
#if defined(CONFIG_PER_VDEV_TX_DESC_POOL)
@@ -2646,10 +2882,6 @@ ol_txrx_peer_attach(ol_txrx_vdev_handle vdev, uint8_t *peer_mac_addr)
vdev->wait_on_peer_id, (int) rc);
/* Added for debugging only */
wma_peer_debug_dump();
- if (cds_is_self_recovery_enabled())
- cds_trigger_recovery(PEER_DEL_TIMEOUT);
- else
- QDF_ASSERT(0);
vdev->wait_on_peer_id = OL_TXRX_INVALID_LOCAL_PEER_ID;
return NULL;
}
@@ -3502,8 +3734,12 @@ QDF_STATUS ol_txrx_clear_peer(uint8_t sta_id)
peer = ol_txrx_peer_find_by_local_id(pdev, sta_id);
+
+ /* Return success, if the peer is already cleared by
+ * data path via peer detach function.
+ */
if (!peer)
- return QDF_STATUS_E_FAULT;
+ return QDF_STATUS_SUCCESS;
return ol_txrx_clear_peer_internal(peer);
@@ -4968,6 +5204,7 @@ static void ol_rx_data_cb(struct ol_txrx_pdev_t *pdev,
if (qdf_unlikely(!(peer->state >= OL_TXRX_PEER_STATE_CONN) ||
!peer->vdev->rx)) {
qdf_spin_unlock_bh(&peer->peer_info_lock);
+ ol_txrx_peer_dec_ref_cnt(peer);
goto free_buf;
}
diff --git a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h
index 6d88d95f1b54..cf0634248650 100644
--- a/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h
+++ b/drivers/staging/qcacld-3.0/core/dp/txrx/ol_txrx_types.h
@@ -51,6 +51,7 @@
#include "ol_txrx_osif_api.h" /* ol_rx_callback_fp */
#include "cdp_txrx_flow_ctrl_v2.h"
#include "cdp_txrx_peer_ops.h"
+#include <qdf_trace.h>
/*
* The target may allocate multiple IDs for a peer.
@@ -1027,6 +1028,12 @@ struct ol_txrx_pdev_t {
void (*offld_flush_cb)(void *);
} rx_offld_info;
struct ol_txrx_peer_t *self_peer;
+
+ /* dp debug fs */
+ struct dentry *dpt_stats_log_dir;
+ enum qdf_dpt_debugfs_state state;
+ struct qdf_debugfs_fops dpt_debugfs_fops;
+
};
struct ol_txrx_ocb_chan_info {
@@ -1388,6 +1395,11 @@ struct ol_rx_remote_data {
uint8_t mac_id;
};
+struct ol_fw_data {
+ void *data;
+ uint32_t len;
+};
+
#define INVALID_REORDER_INDEX 0xFFFF
#endif /* _OL_TXRX_TYPES__H_ */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_apf.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_apf.h
new file mode 100644
index 000000000000..fe1e16df81d4
--- /dev/null
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_apf.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+/**
+ * DOC: wlan_hdd_apf.h
+ *
+ * Android Packet Filter related API's and definitions
+ */
+
+#ifndef __WLAN_HDD_APF_H
+#define __WLAN_HDD_APF_H
+
+#include "qdf_nbuf.h"
+#include "qdf_types.h"
+#include "sir_api.h"
+#include "wlan_hdd_main.h"
+#include "wmi_unified.h"
+#include "wmi_unified_api.h"
+#include "wmi_unified_param.h"
+
+#define MAX_APF_MEMORY_LEN 4096
+
+/* APF commands wait times in msec */
+#define WLAN_WAIT_TIME_APF_GET_CAPS 1000
+#define WLAN_WAIT_TIME_APF_READ_MEM 10000
+
+/**
+ * struct hdd_apf_context - hdd Context for apf
+ * @magic: magic number
+ * @qdf_apf_event: Completion variable for APF get operations
+ * @capability_response: capabilities response received from fw
+ * @apf_enabled: True: APF Interpreter enabled, False: Disabled
+ * @cmd_in_progress: Flag that indicates an APF command is in progress
+ * @buf: Buffer to accumulate read memory chunks
+ * @buf_len: Length of the read memory requested
+ * @offset: APF work memory offset to fetch from
+ * @lock: APF Context lock
+ */
+struct hdd_apf_context {
+ unsigned int magic;
+ qdf_event_t qdf_apf_event;
+ struct sir_apf_get_offload capability_response;
+ bool apf_enabled;
+ bool cmd_in_progress;
+ uint8_t *buf;
+ uint32_t buf_len;
+ uint32_t offset;
+ qdf_spinlock_t lock;
+};
+
+/**
+ * hdd_apf_read_memory_callback - HDD Callback for the APF read memory
+ * operation
+ * @context: Hdd context
+ * @read_mem_evt: APF read memory event response parameters
+ *
+ * Return: 0 on success, errno on failure
+ */
+void
+hdd_apf_read_memory_callback(void *context,
+ struct wmi_apf_read_memory_resp_event_params
+ *read_mem_evt);
+
+/**
+ * hdd_apf_context_init - APF Context initialization operations
+ *
+ * Return: None
+ */
+void hdd_apf_context_init(void);
+
+/**
+ * hdd_apf_context_destroy - APF Context de-init operations
+ *
+ * Return: None
+ */
+void hdd_apf_context_destroy(void);
+
+/**
+ * hdd_get_apf_capabilities_cb() - Callback function to get APF capabilities
+ * @hdd_context: hdd_context
+ * @apf_get_offload: struct for get offload
+ *
+ * This function receives the response/data from the lower layer and
+ * checks to see if the thread is still waiting then post the results to
+ * upper layer, if the request has timed out then ignore.
+ *
+ * Return: None
+ */
+void hdd_get_apf_capabilities_cb(void *hdd_context,
+ struct sir_apf_get_offload *data);
+
+/**
+ * wlan_hdd_cfg80211_apf_offload() - SSR Wrapper to APF Offload
+ * @wiphy: wiphy structure pointer
+ * @wdev: Wireless device structure pointer
+ * @data: Pointer to the data received
+ * @data_len: Length of @data
+ *
+ * Return: 0 on success; errno on failure
+ */
+
+int wlan_hdd_cfg80211_apf_offload(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len);
+#endif /* WLAN_HDD_APF_H */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h
index cc80a7803b96..88709619aa7e 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_assoc.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -245,6 +245,15 @@ bool hdd_conn_is_connected(hdd_station_ctx_t *pHddStaCtx);
tSirRFBand hdd_conn_get_connected_band(hdd_station_ctx_t *pHddStaCtx);
/**
+ * hdd_get_sta_connection_in_progress() - get STA for which connection
+ * is in progress
+ * @hdd_ctx: hdd context
+ *
+ * Return: hdd adpater for which connection is in progress
+ */
+hdd_adapter_t *hdd_get_sta_connection_in_progress(hdd_context_t *hdd_ctx);
+
+/**
* hdd_sme_roam_callback() - hdd sme roam callback
* @pContext: pointer to adapter context
* @pRoamInfo: pointer to roam info
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
index 08e79de1d0fe..74aad7cb67cb 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_cfg.h
@@ -5329,8 +5329,8 @@ enum hdd_link_speed_rpt_type {
* <ini>
* gSetTxChainmask1x1 - sets Transmit chain mask.
* @Min: 1
- * @Max: 2
- * @Default: 1
+ * @Max: 3
+ * @Default: 0
*
* This ini sets Transmit chain mask.
*
@@ -5340,6 +5340,7 @@ enum hdd_link_speed_rpt_type {
* chain0 is selected for both Tx and Rx.
* gSetTxChainmask1x1=1 or gSetRxChainmask1x1=1 to select chain0.
* gSetTxChainmask1x1=2 or gSetRxChainmask1x1=2 to select chain1.
+ * gSetTxChainmask1x1=3 or gSetRxChainmask1x1=3 to select both chains.
*
* Supported Feature: 11AC
*
@@ -5350,15 +5351,15 @@ enum hdd_link_speed_rpt_type {
#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK "gSetTxChainmask1x1"
#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK_MIN (0)
-#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK_MAX (2)
-#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK_DEFAULT (1)
+#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK_MAX (3)
+#define CFG_VHT_ENABLE_1x1_TX_CHAINMASK_DEFAULT (0)
/*
* <ini>
* gSetRxChainmask1x1 - Sets Receive chain mask.
* @Min: 1
- * @Max: 2
- * @Default: 1
+ * @Max: 3
+ * @Default: 0
*
* This ini is used to set Receive chain mask.
*
@@ -5368,6 +5369,7 @@ enum hdd_link_speed_rpt_type {
* chain0 is selected for both Tx and Rx.
* gSetTxChainmask1x1=1 or gSetRxChainmask1x1=1 to select chain0.
* gSetTxChainmask1x1=2 or gSetRxChainmask1x1=2 to select chain1.
+ * gSetTxChainmask1x1=3 or gSetRxChainmask1x1=3 to select both chains.
*
* Supported Feature: 11AC
*
@@ -5378,8 +5380,8 @@ enum hdd_link_speed_rpt_type {
#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK "gSetRxChainmask1x1"
#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK_MIN (0)
-#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK_MAX (2)
-#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK_DEFAULT (1)
+#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK_MAX (3)
+#define CFG_VHT_ENABLE_1x1_RX_CHAINMASK_DEFAULT (0)
/*
* <ini>
@@ -7347,6 +7349,29 @@ enum hdd_link_speed_rpt_type {
#define CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_DEFAULT (WNI_CFG_VHT_SU_BEAMFORMEE_CAP_STADEF)
/*
+ * <ini>
+ * enable_subfee_vendor_vhtie - ini to enable/disable SU Bformee in vendor VHTIE
+ * @Min: 0
+ * @Max: 1
+ * @Default: 1
+ *
+ * This ini is used to enable/disable SU Bformee in vendor vht ie if gTxBFEnable
+ * is enabled. if gTxBFEnable is 0 this will not have any effect.
+ *
+ * Related: gTxBFEnable.
+ *
+ * Supported Feature: STA
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_NAME "enable_subfee_vendor_vhtie"
+#define CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_MIN (0)
+#define CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_MAX (1)
+#define CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_DEFAULT (1)
+
+/*
* Enable / Disable Tx beamformee in SAP mode
* Default: Disable
*/
@@ -8202,6 +8227,29 @@ enum hdd_link_speed_rpt_type {
#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_DEFAULT (100)
#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MIN (0)
#define CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MAX (10000)
+
+/*
+ * <ini>
+ * gEnableTcpLimitOutput - Control to enable TCP limit output byte
+ * @Min: 0
+ * @Max: 1
+ * @Default: 1
+ *
+ * This ini is used to enable dynamic configuration of TCP limit output bytes
+ * tcp_limit_output_bytes param. Enabling this will let driver post message to
+ * cnss-daemon, accordingly cnss-daemon will modify the tcp_limit_output_bytes.
+ *
+ * Supported Feature: Tcp limit output bytes
+ *
+ * Usage: Internal
+ *
+ * </ini>
+ */
+#define CFG_ENABLE_TCP_LIMIT_OUTPUT "gTcpLimitOutputEnable"
+#define CFG_ENABLE_TCP_LIMIT_OUTPUT_DEFAULT (1)
+#define CFG_ENABLE_TCP_LIMIT_OUTPUT_MIN (0)
+#define CFG_ENABLE_TCP_LIMIT_OUTPUT_MAX (1)
+
/*
* <ini>
* gTcpAdvWinScaleEnable - Control to enable TCP adv window scaling
@@ -8946,13 +8994,13 @@ enum hdd_link_speed_rpt_type {
#endif
/*
- * 0: Disable BPF packet filter
- * 1: Enable BPF packet filter
+ * 0: Disable APF packet filter
+ * 1: Enable APF packet filter
*/
-#define CFG_BPF_PACKET_FILTER_OFFLOAD "gBpfFilterEnable"
-#define CFG_BPF_PACKET_FILTER_OFFLOAD_MIN (0)
-#define CFG_BPF_PACKET_FILTER_OFFLOAD_MAX (1)
-#define CFG_BPF_PACKET_FILTER_OFFLOAD_DEFAULT (1)
+#define CFG_APF_PACKET_FILTER_OFFLOAD "gBpfFilterEnable"
+#define CFG_APF_PACKET_FILTER_OFFLOAD_MIN (0)
+#define CFG_APF_PACKET_FILTER_OFFLOAD_MAX (1)
+#define CFG_APF_PACKET_FILTER_OFFLOAD_DEFAULT (1)
/*
* <ini>
@@ -11280,53 +11328,53 @@ enum restart_beaconing_on_ch_avoid_rule {
/*
* <ini>
- * gActiveUcBpfMode - Control UC active BPF mode
+ * gActiveUcBpfMode - Control UC active APF mode
* @Min: 0 (disabled)
* @Max: 2 (adaptive)
* @Default: 0 (disabled)
*
- * This config item is used to control UC BPF in active mode. There are 3 modes:
- * 0) disabled - BPF is disabled in active mode
- * 1) enabled - BPF is enabled for all packets in active mode
- * 2) adaptive - BPF is enabled for packets up to some throughput threshold
+ * This config item is used to control UC APF in active mode. There are 3 modes:
+ * 0) disabled - APF is disabled in active mode
+ * 1) enabled - APF is enabled for all packets in active mode
+ * 2) adaptive - APF is enabled for packets up to some throughput threshold
*
* Related: N/A
*
- * Supported Feature: Active Mode BPF
+ * Supported Feature: Active Mode APF
*
* Usage: Internal/External
* </ini>
*/
-#define CFG_ACTIVE_UC_BPF_MODE_NAME "gActiveUcBpfMode"
-#define CFG_ACTIVE_UC_BPF_MODE_MIN (ACTIVE_BPF_DISABLED)
-#define CFG_ACTIVE_UC_BPF_MODE_MAX (ACTIVE_BPF_MODE_COUNT - 1)
-#define CFG_ACTIVE_UC_BPF_MODE_DEFAULT (ACTIVE_BPF_DISABLED)
+#define CFG_ACTIVE_UC_APF_MODE_NAME "gActiveUcBpfMode"
+#define CFG_ACTIVE_UC_APF_MODE_MIN (ACTIVE_APF_DISABLED)
+#define CFG_ACTIVE_UC_APF_MODE_MAX (ACTIVE_APF_MODE_COUNT - 1)
+#define CFG_ACTIVE_UC_APF_MODE_DEFAULT (ACTIVE_APF_DISABLED)
/*
* <ini>
- * g_mc_bc_active_bpf_mode - Control MC/BC active BPF mode
+ * g_mc_bc_active_apf_mode - Control MC/BC active APF mode
* @Min: 0 (disabled)
* @Max: 1 (enabled)
* @Default: 0 (disabled)
*
- * This config item is used to control MC/BC BPF mode.
- * g_mc_bc_active_bpf_mode=disabled(0): BPF is disabled in active mode
- * g_mc_bc_active_bpf_mode=enabled(1): BPF is enabled for all packets in active
+ * This config item is used to control MC/BC APF mode.
+ * g_mc_bc_active_apf_mode=disabled(0): APF is disabled in active mode
+ * g_mc_bc_active_apf_mode=enabled(1): APF is enabled for all packets in active
* mode
- * g_mc_bc_active_bpf_mode=adaptive(2): BPF is enabled for packets up to some
+ * g_mc_bc_active_apf_mode=adaptive(2): APF is enabled for packets up to some
* throughput threshold
*
* Related: N/A
*
- * Supported Feature: Active Mode BPF
+ * Supported Feature: Active Mode APF
*
* Usage: Internal/External
* </ini>
*/
-#define CFG_ACTIVE_MC_BC_BPF_MODE_NAME "gActiveMcBcBpfMode"
-#define CFG_ACTIVE_MC_BC_BPF_MODE_MIN (ACTIVE_BPF_DISABLED)
-#define CFG_ACTIVE_MC_BC_BPF_MODE_MAX (ACTIVE_BPF_ENABLED)
-#define CFG_ACTIVE_MC_BC_BPF_MODE_DEFAULT (ACTIVE_BPF_DISABLED)
+#define CFG_ACTIVE_MC_BC_APF_MODE_NAME "gActiveMcBcBpfMode"
+#define CFG_ACTIVE_MC_BC_APF_MODE_MIN (ACTIVE_APF_DISABLED)
+#define CFG_ACTIVE_MC_BC_APF_MODE_MAX (ACTIVE_APF_ENABLED)
+#define CFG_ACTIVE_MC_BC_APF_MODE_DEFAULT (ACTIVE_APF_DISABLED)
enum hw_filter_mode {
HW_FILTER_DISABLED = 0,
@@ -12424,7 +12472,7 @@ enum hw_filter_mode {
/*
* <ini>
* gAutoChannelSelectWeight - ACS channel weight
- * @Min: 0x1
+ * @Min: 0
* @Max: 0xFFFFFFFF
* @Default: 0x000000FF
*
@@ -12446,7 +12494,7 @@ enum hw_filter_mode {
* </ini>
*/
#define CFG_AUTO_CHANNEL_SELECT_WEIGHT "gAutoChannelSelectWeight"
-#define CFG_AUTO_CHANNEL_SELECT_WEIGHT_MIN (0x1)
+#define CFG_AUTO_CHANNEL_SELECT_WEIGHT_MIN (0)
#define CFG_AUTO_CHANNEL_SELECT_WEIGHT_MAX (0xFFFFFFFF)
#define CFG_AUTO_CHANNEL_SELECT_WEIGHT_DEFAULT (0x000000FF)
@@ -13875,6 +13923,30 @@ enum hw_filter_mode {
/*
* <ini>
+ * force_rsne_override - force rsnie override from user
+ * @Min: 0
+ * @Max: 1
+ * @Default: 0
+ *
+ * This ini is used to enable/disable test mode to force rsne override used in
+ * security enhancement test cases to pass the RSNIE sent by user in
+ * assoc request.
+ *
+ * Related: None
+ *
+ * Supported Feature: STA
+ *
+ * Usage: internal
+ *
+ * </ini>
+ */
+#define CFG_FORCE_RSNE_OVERRIDE_NAME "force_rsne_override"
+#define CFG_FORCE_RSNE_OVERRIDE_MIN (0)
+#define CFG_FORCE_RSNE_OVERRIDE_MAX (1)
+#define CFG_FORCE_RSNE_OVERRIDE_DEFAULT (0)
+
+/*
+ * <ini>
* enable_mac_provision - Enable/disable MAC address provisioning feature
* @Min: 0
* @Max: 1
@@ -13959,6 +14031,274 @@ enum hw_filter_mode {
#define CFG_DERIVED_INTERFACE_POOL_MAX (0xffffffff)
#define CFG_DERIVED_INTERFACE_POOL_DEFAULT (0xffffffff)
+/*
+ * <ini>
+ * gcmp_enabled - ini to enable/disable GCMP
+ * @Min: 0
+ * @Max: 1
+ * @Default: 0
+ *
+ * Currently Firmware update the sequence number for each TID with 2^3
+ * because of security issues. But with this PN mechanism, throughput drop
+ * is observed. With this ini FW takes the decision to trade off between
+ * security and throughput
+ *
+ * Supported Feature: STA/SAP/P2P
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_ENABLE_GCMP_NAME "gcmp_enabled"
+#define CFG_ENABLE_GCMP_MIN (0)
+#define CFG_ENABLE_GCMP_MAX (1)
+#define CFG_ENABLE_GCMP_DEFAULT (0)
+
+/*
+ * <ini>
+ * 11k_offload_enable_bitmask - Bitmask to enable 11k offload to FW
+ * @Min: 0
+ * @Max: 1
+ * @Default: 1
+ *
+ * This ini is used to set which of the 11k features is offloaded to FW
+ * Currently Neighbor Report Request is supported for offload and is enabled
+ * by default
+ * B0: Offload 11k neighbor report requests
+ * B1-B31: Reserved
+ *
+ * Related : None
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_OFFLOAD_11K_ENABLE_BITMASK_NAME "11k_offload_enable_bitmask"
+#define CFG_OFFLOAD_11K_ENABLE_BITMASK_MIN (0)
+#define CFG_OFFLOAD_11K_ENABLE_BITMASK_MAX (1)
+#define CFG_OFFLOAD_11K_ENABLE_BITMASK_DEFAULT (1)
+
+#define OFFLOAD_11K_BITMASK_NEIGHBOR_REPORT_REQUEST 0x1
+
+/*
+ * <ini>
+ * nr_offload_params_bitmask - bitmask to specify which of the
+ * neighbor report offload params are valid in the ini
+ * frame
+ * @Min: 0
+ * @Max: 63
+ * @Default: 63
+ *
+ * This ini specifies which of the neighbor report offload params are valid
+ * and should be considered by the FW. The bitmask is as follows
+ * B0: nr_offload_time_offset
+ * B1: nr_offload_low_rssi_offset
+ * B2: nr_offload_bmiss_count_trigger
+ * B3: nr_offload_per_threshold_offset
+ * B4: nr_offload_cache_timeout
+ * B5: nr_offload_max_req_cap
+ * B6-B7: Reserved
+ *
+ * Related : 11k_offload_enable_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_NAME \
+ "nr_offload_params_bitmask"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_MIN (0)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_MAX (63)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_DEFAULT (63)
+
+/*
+ * <ini>
+ * nr_offload_time_offset - time interval in seconds after the
+ * neighbor report offload command to send the first neighbor report request
+ * frame
+ * @Min: 0
+ * @Max: 3600
+ * @Default: 30
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_NAME \
+ "nr_offload_time_offset"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_MIN (0)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_MAX (3600)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_DEFAULT (30)
+
+/*
+ * <ini>
+ * nr_offload_low_rssi_offset - offset from the roam RSSI threshold
+ * to trigger the neighbor report request frame (in dBm)
+ * @Min: 4
+ * @Max: 10
+ * @Default: 4
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_NAME \
+ "nr_offload_low_rssi_offset"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_MIN (4)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_MAX (10)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_DEFAULT (4)
+
+/*
+ * <ini>
+ * nr_offload_bmiss_count_trigger - Number of beacon miss events to
+ * trigger a neighbor report request frame
+ * @Min: 1
+ * @Max: 5
+ * @Default: 1
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_NAME \
+ "nr_offload_bmiss_count_trigger"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_MIN (1)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_MAX (5)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_DEFAULT (1)
+
+/*
+ * <ini>
+ * nr_offload_per_threshold_offset - offset from PER threshold to
+ * trigger a neighbor report request frame (in %)
+ * @Min: 5
+ * @Max: 20
+ * @Default: 5
+ *
+ * This ini is used to set the neighbor report offload parameter:
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_NAME \
+ "nr_offload_per_threshold_offset"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_MIN (5)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_MAX (20)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_DEFAULT (5)
+
+/*
+ * <ini>
+ * nr_offload_cache_timeout - time in seconds after which the
+ * neighbor report cache is marked as timed out and any of the triggers would
+ * cause a neighbor report request frame to be sent.
+ * @Min: 5
+ * @Max: 86400
+ * @Default: 1200
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_NAME \
+ "nr_offload_cache_timeout"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_MIN (5)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_MAX (86400)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_DEFAULT (1200)
+
+/*
+ * <ini>
+ * nr_offload_max_req_cap - Max number of neighbor
+ * report requests that can be sent to a connected peer in the current session.
+ * This counter is reset once a successful roam happens or at cache timeout
+ * @Min: 3
+ * @Max: 300
+ * @Default: 3
+ *
+ * Related : nr_offload_params_bitmask
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_NAME \
+ "nr_offload_max_req_cap"
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MIN (3)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MAX (300)
+#define CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_DEFAULT (3)
+
+/*
+ * <ini>
+ * channel_select_logic_conc - Set channel selection logic
+ * for different concurrency combinations to DBS or inter band
+ * MCC. Default is DBS for STA+STA and STA+P2P.
+ * @Min: 0x00000000
+ * @Max: 0xFFFFFFFF
+ * @Default: 0x00000000
+ *
+ * 0 - inter-band MCC
+ * 1 - DBS
+ *
+ * BIT 0: STA+STA
+ * BIT 1: STA+P2P
+ * BIT 2-31: Reserved
+ *
+ * Supported Feature: STA+STA, STA+P2P
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_CHANNEL_SELECT_LOGIC_CONC_NAME "channel_select_logic_conc"
+#define CFG_CHANNEL_SELECT_LOGIC_CONC_MIN (0x00000000)
+#define CFG_CHANNEL_SELECT_LOGIC_CONC_MAX (0xFFFFFFFF)
+#define CFG_CHANNEL_SELECT_LOGIC_CONC_DEFAULT (0x00000003)
+
+/*
+ * <ini>
+ * gEnableDTIMSelectionDiversity - Enable/Disable chain
+ * selection optimization for one chain dtim
+ * @Min: 0
+ * @Max: 30
+ * @Default: 5
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_DTIM_SELECTION_DIVERSITY_NAME "gEnableDTIMSelectionDiversity"
+#define CFG_DTIM_SELECTION_DIVERSITY_MIN (0)
+#define CFG_DTIM_SELECTION_DIVERSITY_MAX (30)
+#define CFG_DTIM_SELECTION_DIVERSITY_DEFAULT (5)
+
+/*
+ * <ini>
+ * enable_rtt_mac_randomization - Enable/Disable rtt mac randomization
+ * @Min: 0
+ * @Max: 1
+ * @Default: 0
+ *
+ * Usage: External
+ *
+ * </ini>
+ */
+#define CFG_ENABLE_RTT_MAC_RANDOMIZATION_NAME "enable_rtt_mac_randomization"
+#define CFG_ENABLE_RTT_MAC_RANDOMIZATION_MIN (0)
+#define CFG_ENABLE_RTT_MAC_RANDOMIZATION_MAX (1)
+#define CFG_ENABLE_RTT_MAC_RANDOMIZATION_DEFAULT (0)
+
/*---------------------------------------------------------------------------
Type declarations
-------------------------------------------------------------------------*/
@@ -14272,6 +14612,7 @@ struct hdd_config {
uint8_t vhtRxMCS;
uint8_t vhtTxMCS;
bool enableTxBF;
+ bool enable_subfee_vendor_vhtie;
bool enable_txbf_sap_mode;
uint8_t txBFCsnValue;
bool enable_su_tx_bformer;
@@ -14281,8 +14622,8 @@ struct hdd_config {
bool enable2x2;
uint32_t vdev_type_nss_2g;
uint32_t vdev_type_nss_5g;
- bool txchainmask1x1;
- bool rxchainmask1x1;
+ uint8_t txchainmask1x1;
+ uint8_t rxchainmask1x1;
bool enableMuBformee;
bool enableVhtpAid;
bool enableVhtGid;
@@ -14452,6 +14793,7 @@ struct hdd_config {
uint32_t busBandwidthLowThreshold;
uint32_t busBandwidthComputeInterval;
uint32_t enable_tcp_delack;
+ bool enable_tcp_limit_output;
uint32_t enable_tcp_adv_win_scale;
uint32_t tcpDelackThresholdHigh;
uint32_t tcpDelackThresholdLow;
@@ -14565,7 +14907,7 @@ struct hdd_config {
bool flow_steering_enable;
uint8_t max_msdus_per_rxinorderind;
bool active_mode_offload;
- bool bpf_packet_filter_enable;
+ bool apf_packet_filter_enable;
/* parameter for defer timer for enabling TDLS on p2p listen */
uint16_t tdls_enable_defer_time;
uint32_t fine_time_meas_cap;
@@ -14657,7 +14999,7 @@ struct hdd_config {
uint8_t tx_sched_wrr_bk[TX_SCHED_WRR_PARAM_STRING_LENGTH];
bool enable_fatal_event;
- bool bpf_enabled;
+ bool apf_enabled;
bool enable_dp_trace;
uint8_t dp_trace_config[DP_TRACE_CONFIG_STRING_LENGTH];
bool adaptive_dwell_mode_enabled;
@@ -14717,8 +15059,8 @@ struct hdd_config {
uint32_t max_sched_scan_plan_interval;
uint32_t max_sched_scan_plan_iterations;
uint8_t enable_phy_reg_retention;
- enum active_bpf_mode active_uc_bpf_mode;
- enum active_bpf_mode active_mc_bc_bpf_mode;
+ enum active_apf_mode active_uc_apf_mode;
+ enum active_apf_mode active_mc_bc_apf_mode;
enum hw_filter_mode hw_filter_mode;
bool sap_internal_restart;
enum restart_beaconing_on_ch_avoid_rule
@@ -14846,9 +15188,23 @@ struct hdd_config {
uint8_t rx_chain_mask_2g;
uint8_t tx_chain_mask_5g;
uint8_t rx_chain_mask_5g;
+ bool force_rsne_override;
bool mac_provision;
uint32_t provisioned_intf_pool;
uint32_t derived_intf_pool;
+ bool gcmp_enabled;
+ bool is_11k_offload_supported;
+ uint32_t offload_11k_enable_bitmask;
+ uint32_t neighbor_report_offload_params_bitmask;
+ uint32_t neighbor_report_offload_time_offset;
+ uint32_t neighbor_report_offload_low_rssi_offset;
+ uint32_t neighbor_report_offload_bmiss_count_trigger;
+ uint32_t neighbor_report_offload_per_threshold_offset;
+ uint32_t neighbor_report_offload_cache_timeout;
+ uint32_t neighbor_report_offload_max_req_cap;
+ uint32_t channel_select_logic_conc;
+ bool enable_dtim_selection_diversity;
+ bool enable_rtt_mac_randomization;
};
#define VAR_OFFSET(_Struct, _Var) (offsetof(_Struct, _Var))
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_ipa.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_ipa.h
index dd87f651e4ae..14a34f1c49ee 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_ipa.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_ipa.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -105,6 +105,8 @@ void hdd_ipa_uc_set_quota(hdd_adapter_t *adapter, uint8_t set_quota,
uint64_t quota_bytes);
bool hdd_ipa_is_enabled(hdd_context_t *pHddCtx);
bool hdd_ipa_uc_is_enabled(hdd_context_t *pHddCtx);
+bool hdd_ipa_is_fw_wdi_actived(hdd_context_t *hdd_ctx);
+
#ifndef QCA_LL_TX_FLOW_CONTROL_V2
int hdd_ipa_send_mcc_scc_msg(hdd_context_t *hdd_ctx, bool mcc_mode);
void hdd_ipa_set_mcc_mode(bool mcc_mode);
@@ -128,7 +130,7 @@ int hdd_ipa_uc_ssr_deinit(void);
void hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx);
struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
struct sk_buff *skb, uint8_t session_id);
-bool hdd_ipa_is_present(hdd_context_t *hdd_ctx);
+bool hdd_ipa_is_present(void);
void hdd_ipa_dump_info(hdd_context_t *hdd_ctx);
QDF_STATUS hdd_ipa_uc_ol_init(hdd_context_t *hdd_ctx);
int hdd_ipa_uc_ol_deinit(hdd_context_t *hdd_ctx);
@@ -162,6 +164,14 @@ void hdd_ipa_uc_stat(hdd_adapter_t *adapter);
*/
void hdd_ipa_uc_info(hdd_context_t *hdd_ctx);
+/**
+ * hdd_ipa_clean_adapter_iface() - Clean adapter IPA interface
+ * @adapter: network adapter
+ *
+ * Return: None
+ */
+void hdd_ipa_clean_adapter_iface(hdd_adapter_t *adapter);
+
#else
static inline QDF_STATUS hdd_ipa_init(hdd_context_t *hdd_ctx)
@@ -248,6 +258,11 @@ static inline bool hdd_ipa_uc_is_enabled(hdd_context_t *pHddCtx)
return false;
}
+static inline bool hdd_ipa_is_fw_wdi_actived(hdd_context_t *hdd_ctx)
+{
+ return false;
+}
+
static inline void hdd_ipa_dump_info(hdd_context_t *hdd_ctx)
{
}
@@ -285,7 +300,6 @@ static inline struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
/**
* hdd_ipa_is_present() - get IPA hw status
- * @hdd_ctx: pointer to hdd context
*
* ipa_uc_reg_rdyCB is not directly designed to check
* ipa hw status. This is an undocumented function which
@@ -294,7 +308,7 @@ static inline struct sk_buff *hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
* Return: true - ipa hw present
* false - ipa hw not present
*/
-static inline bool hdd_ipa_is_present(hdd_context_t *hdd_ctx)
+static inline bool hdd_ipa_is_present(void)
{
return false;
}
@@ -335,5 +349,9 @@ static inline void hdd_ipa_uc_info(hdd_context_t *hdd_ctx)
{
}
+static inline void hdd_ipa_clean_adapter_iface(hdd_adapter_t *adapter)
+{
+}
+
#endif /* IPA_OFFLOAD */
#endif /* #ifndef HDD_IPA_H__ */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
index 56a89dc684e2..b6bf79ed4cfa 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_main.h
@@ -163,8 +163,6 @@
#define WLAN_WAIT_TIME_ANTENNA_MODE_REQ 3000
#define WLAN_WAIT_TIME_SET_DUAL_MAC_CFG 1500
-#define WLAN_WAIT_TIME_BPF 1000
-
/* rcpi request timeout in milli seconds */
#define WLAN_WAIT_TIME_RCPI 500
/* Maximum time(ms) to wait for RSO CMD status event */
@@ -444,7 +442,7 @@ extern struct mutex hdd_init_deinit_lock;
#define LINK_CONTEXT_MAGIC 0x4C494E4B /* LINKSPEED */
#define LINK_STATUS_MAGIC 0x4C4B5354 /* LINKSTATUS(LNST) */
#define TEMP_CONTEXT_MAGIC 0x74656d70 /* TEMP (temperature) */
-#define BPF_CONTEXT_MAGIC 0x4575354 /* BPF */
+#define APF_CONTEXT_MAGIC 0x4575354 /* APF */
#define POWER_STATS_MAGIC 0x14111990
#define RCPI_CONTEXT_MAGIC 0x7778888 /* RCPI */
#define ACTION_FRAME_RANDOM_CONTEXT_MAGIC 0x87878787
@@ -1246,15 +1244,6 @@ struct hdd_runtime_pm_context {
qdf_runtime_lock_t scan;
qdf_runtime_lock_t roc;
qdf_runtime_lock_t dfs;
-};
-
-/**
- * struct hdd_connect_pm_context - Runtime PM connect context per adapter
- * @connect: Runtime Connect Context
- *
- * Structure to hold runtime pm connect context for each adapter.
- */
-struct hdd_connect_pm_context {
qdf_runtime_lock_t connect;
};
@@ -1537,7 +1526,6 @@ struct hdd_adapter_s {
* channel needs to be moved from the existing 2.4GHz channel.
*/
uint8_t pre_cac_chan;
- struct hdd_connect_pm_context connect_rpm_ctx;
struct power_stats_response *chip_power_stats;
/* rcpi information */
@@ -1690,18 +1678,6 @@ struct hdd_offloaded_packets_ctx {
#endif
/**
- * struct hdd_bpf_context - hdd Context for bpf
- * @magic: magic number
- * @completion: Completion variable for BPF Get Capability
- * @capability_response: capabilities response received from fw
- */
-struct hdd_bpf_context {
- unsigned int magic;
- struct completion completion;
- struct sir_bpf_get_offload capability_response;
-};
-
-/**
* enum driver_status: Driver Modules status
* @DRIVER_MODULES_UNINITIALIZED: Driver CDS modules uninitialized
* @DRIVER_MODULES_OPENED: Driver CDS modules opened
@@ -2081,7 +2057,7 @@ struct hdd_context_s {
struct completion set_antenna_mode_cmpl;
/* Current number of TX X RX chains being used */
enum antenna_mode current_antenna_mode;
- bool bpf_enabled;
+ bool apf_enabled;
/* the radio index assigned by cnss_logger */
int radio_index;
@@ -2089,6 +2065,7 @@ struct hdd_context_s {
bool hbw_requested;
uint32_t last_nil_scan_bug_report_timestamp;
uint32_t ol_enable;
+ uint32_t tcp_delack_on;
#ifdef WLAN_FEATURE_NAN_DATAPATH
bool nan_datapath_enabled;
#endif
@@ -2146,6 +2123,7 @@ struct hdd_context_s {
hdd_adapter_t *cap_tsf_context;
#endif
struct sta_ap_intf_check_work_ctx *sta_ap_intf_check_work_info;
+ bool force_rsne_override;
qdf_wake_lock_t monitor_mode_wakelock;
bool lte_coex_ant_share;
@@ -2160,6 +2138,7 @@ struct hdd_context_s {
/* mutex lock to block concurrent access */
struct mutex power_stats_lock;
#endif
+ qdf_atomic_t is_acs_allowed;
};
int hdd_validate_channel_and_bandwidth(hdd_adapter_t *adapter,
@@ -2222,6 +2201,7 @@ hdd_adapter_t *hdd_get_adapter_by_rand_macaddr(hdd_context_t *hdd_ctx,
QDF_STATUS hdd_init_station_mode(hdd_adapter_t *pAdapter);
hdd_adapter_t *hdd_get_adapter(hdd_context_t *pHddCtx,
enum tQDF_ADAPTER_MODE mode);
+bool hdd_is_adapter_valid(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter);
void hdd_deinit_adapter(hdd_context_t *pHddCtx, hdd_adapter_t *pAdapter,
bool rtnl_held);
QDF_STATUS hdd_stop_adapter(hdd_context_t *pHddCtx, hdd_adapter_t *pAdapter,
@@ -2251,10 +2231,23 @@ enum tQDF_GLOBAL_CON_MODE hdd_get_conparam(void);
void hdd_abort_mac_scan(hdd_context_t *pHddCtx, uint8_t sessionId,
uint32_t scan_id, eCsrAbortReason reason);
void hdd_cleanup_actionframe(hdd_context_t *pHddCtx, hdd_adapter_t *pAdapter);
-
+void hdd_cleanup_actionframe_no_wait(hdd_context_t *pHddCtx,
+ hdd_adapter_t *pAdapter);
void crda_regulatory_entry_default(uint8_t *countryCode, int domain_id);
void wlan_hdd_reset_prob_rspies(hdd_adapter_t *pHostapdAdapter);
void hdd_prevent_suspend(uint32_t reason);
+
+/*
+ * hdd_get_first_valid_adapter() - Get the first valid adapter from adapter list
+ *
+ * This function is used to fetch the first valid adapter from the adapter
+ * list. If there is no valid adapter then it returns NULL
+ *
+ * Return: NULL if no valid adapter found in the adapter list
+ *
+ */
+hdd_adapter_t *hdd_get_first_valid_adapter(void);
+
void hdd_allow_suspend(uint32_t reason);
void hdd_prevent_suspend_timeout(uint32_t timeout, uint32_t reason);
@@ -3021,4 +3014,13 @@ hdd_station_info_t *hdd_get_stainfo(hdd_station_info_t *aStaInfo,
int hdd_driver_memdump_init(void);
void hdd_driver_memdump_deinit(void);
+/**
+ * hdd_is_cli_iface_up() - check if there is any cli iface up
+ * @hdd_ctx: HDD context
+ *
+ * Return: return true if there is any cli iface(STA/P2P_CLI) is up
+ * else return false
+ */
+bool hdd_is_cli_iface_up(hdd_context_t *hdd_ctx);
+
#endif /* end #if !defined(WLAN_HDD_MAIN_H) */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h
index c57e24ff6279..e27e3cdbbadb 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_power.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -157,10 +157,12 @@ void hdd_conf_hostoffload(hdd_adapter_t *pAdapter, bool fenable);
* hdd_conf_hw_filter_mode() - configure the given mode for the given adapter
* @adapter: the adapter to configure the hw filter for
* @mode: the hw filter mode to configure
+ * @filter_enable: True: Enable HW filter, False: Disable
*
* Return: Errno
*/
-int hdd_conf_hw_filter_mode(hdd_adapter_t *adapter, enum hw_filter_mode mode);
+int hdd_conf_hw_filter_mode(hdd_adapter_t *adapter, enum hw_filter_mode mode,
+ bool filter_enable);
#ifdef WLAN_FEATURE_PACKET_FILTERING
int wlan_hdd_set_mc_addr_list(hdd_adapter_t *pAdapter, uint8_t set);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_softap_tx_rx.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_softap_tx_rx.h
index 517117c70d47..b8ca634f7483 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_softap_tx_rx.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_softap_tx_rx.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -37,7 +37,7 @@
#include <wlan_hdd_hostapd.h>
#include <cdp_txrx_peer_ops.h>
-int hdd_softap_hard_start_xmit(struct sk_buff *skb,
+netdev_tx_t hdd_softap_hard_start_xmit(struct sk_buff *skb,
struct net_device *dev);
void hdd_softap_tx_timeout(struct net_device *dev);
QDF_STATUS hdd_softap_init_tx_rx(hdd_adapter_t *pAdapter);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h
index 95a3df7a7407..c5ff55f5fdcc 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_tx_rx.h
@@ -54,7 +54,7 @@
#define SME_QOS_UAPSD_CFG_VI_CHANGED_MASK 0xF4
#define SME_QOS_UAPSD_CFG_VO_CHANGED_MASK 0xF8
-int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev);
void hdd_tx_timeout(struct net_device *dev);
QDF_STATUS hdd_init_tx_rx(hdd_adapter_t *pAdapter);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_wext.h b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_wext.h
index d689c009bc59..7f20207947fa 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_wext.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/inc/wlan_hdd_wext.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -345,7 +345,7 @@ extern int iw_set_essid(struct net_device *dev,
extern int iw_get_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra);
+ union iwreq_data *dwrq, char *extra);
extern int iw_set_ap_address(struct net_device *dev,
struct iw_request_info *info,
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_apf.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_apf.c
new file mode 100644
index 000000000000..bb177c42735a
--- /dev/null
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_apf.c
@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
+ *
+ * Previously licensed under the ISC license by Qualcomm Atheros, Inc.
+ *
+ *
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all
+ * copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
+ * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
+ * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
+ * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
+ * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/*
+ * This file was originally distributed by Qualcomm Atheros, Inc.
+ * under proprietary terms before Copyright ownership was assigned
+ * to the Linux Foundation.
+ */
+
+
+/**
+ * DOC: wlan_hdd_apf.c
+ *
+ * Android Packet Filter support and implementation
+ */
+
+#include "wlan_hdd_apf.h"
+#include "qca_vendor.h"
+
+struct hdd_apf_context apf_context;
+
+/*
+ * define short names for the global vendor params
+ * used by __wlan_hdd_cfg80211_apf_offload()
+ */
+#define APF_INVALID \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID
+#define APF_SUBCMD \
+ QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER
+#define APF_VERSION \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION
+#define APF_FILTER_ID \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_ID
+#define APF_PACKET_SIZE \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE
+#define APF_CURRENT_OFFSET \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET
+#define APF_PROGRAM \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM
+#define APF_PROG_LEN \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH
+#define APF_MAX \
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_MAX
+
+static const struct nla_policy
+wlan_hdd_apf_offload_policy[APF_MAX + 1] = {
+ [APF_SUBCMD] = {.type = NLA_U32},
+ [APF_VERSION] = {.type = NLA_U32},
+ [APF_FILTER_ID] = {.type = NLA_U32},
+ [APF_PACKET_SIZE] = {.type = NLA_U32},
+ [APF_CURRENT_OFFSET] = {.type = NLA_U32},
+ [APF_PROGRAM] = {.type = NLA_BINARY,
+ .len = MAX_APF_MEMORY_LEN},
+ [APF_PROG_LEN] = {.type = NLA_U32},
+};
+
+void hdd_apf_context_init(void)
+{
+ qdf_event_create(&apf_context.qdf_apf_event);
+ qdf_spinlock_create(&apf_context.lock);
+ apf_context.apf_enabled = true;
+}
+
+void hdd_apf_context_destroy(void)
+{
+ qdf_event_destroy(&apf_context.qdf_apf_event);
+ qdf_spinlock_destroy(&apf_context.lock);
+ qdf_mem_zero(&apf_context, sizeof(apf_context));
+}
+
+void hdd_get_apf_capabilities_cb(void *hdd_context,
+ struct sir_apf_get_offload *data)
+{
+ hdd_context_t *hdd_ctx = hdd_context;
+ struct hdd_apf_context *context = &apf_context;
+
+ ENTER();
+
+ if (wlan_hdd_validate_context(hdd_ctx) || !data) {
+ hdd_err("HDD context is invalid or data(%pK) is null",
+ data);
+ return;
+ }
+
+ qdf_spin_lock(&context->lock);
+
+ /* The caller presumably timed out so there is nothing we can do */
+ if (context->magic != APF_CONTEXT_MAGIC) {
+ qdf_spin_unlock(&context->lock);
+ return;
+ }
+
+ /* context is valid so caller is still waiting */
+ /* paranoia: invalidate the magic */
+ context->magic = 0;
+
+ context->capability_response = *data;
+ qdf_event_set(&context->qdf_apf_event);
+
+ qdf_spin_unlock(&context->lock);
+}
+
+/**
+ * hdd_post_get_apf_capabilities_rsp() - Callback function to APF Offload
+ * @hdd_context: hdd_context
+ * @apf_get_offload: struct for get offload
+ *
+ * Return: 0 on success, error number otherwise.
+ */
+static int
+hdd_post_get_apf_capabilities_rsp(hdd_context_t *hdd_ctx,
+ struct sir_apf_get_offload *apf_get_offload)
+{
+ struct sk_buff *skb;
+ uint32_t nl_buf_len;
+
+ ENTER();
+
+ nl_buf_len = NLMSG_HDRLEN;
+ nl_buf_len +=
+ (sizeof(apf_get_offload->max_bytes_for_apf_inst) + NLA_HDRLEN) +
+ (sizeof(apf_get_offload->apf_version) + NLA_HDRLEN);
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(hdd_ctx->wiphy, nl_buf_len);
+ if (!skb) {
+ hdd_err("cfg80211_vendor_cmd_alloc_reply_skb failed");
+ return -ENOMEM;
+ }
+
+ hdd_debug("APF Version: %u APF max bytes: %u",
+ apf_get_offload->apf_version,
+ apf_get_offload->max_bytes_for_apf_inst);
+
+ if (nla_put_u32(skb, APF_PACKET_SIZE,
+ apf_get_offload->max_bytes_for_apf_inst) ||
+ nla_put_u32(skb, APF_VERSION, apf_get_offload->apf_version)) {
+ hdd_err("nla put failure");
+ goto nla_put_failure;
+ }
+
+ cfg80211_vendor_cmd_reply(skb);
+ EXIT();
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+/**
+ * hdd_get_apf_capabilities - Get APF Capabilities
+ * @hdd_ctx: Hdd context
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int hdd_get_apf_capabilities(hdd_context_t *hdd_ctx)
+{
+ static struct hdd_apf_context *context = &apf_context;
+ QDF_STATUS status;
+ int ret;
+
+ ENTER();
+
+ qdf_spin_lock(&context->lock);
+ context->magic = APF_CONTEXT_MAGIC;
+ qdf_event_reset(&context->qdf_apf_event);
+ qdf_spin_unlock(&context->lock);
+
+ status = sme_get_apf_capabilities(hdd_ctx->hHal);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ hdd_err("Unable to retrieve APF caps");
+ return -EINVAL;
+ }
+ /* request was sent -- wait for the response */
+ status = qdf_wait_for_event_completion(&context->qdf_apf_event,
+ WLAN_WAIT_TIME_APF_GET_CAPS);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ hdd_err("Target response timed out");
+ qdf_spin_lock(&context->lock);
+ context->magic = 0;
+ qdf_spin_unlock(&context->lock);
+
+ return -ETIMEDOUT;
+ }
+ ret = hdd_post_get_apf_capabilities_rsp(hdd_ctx,
+ &apf_context.capability_response);
+ if (ret)
+ hdd_err("Failed to post get apf capabilities");
+
+ EXIT();
+ return ret;
+}
+
+/**
+ * hdd_set_reset_apf_offload - Post set/reset apf to SME
+ * @hdd_ctx: Hdd context
+ * @tb: Length of @data
+ * @adapter: pointer to adapter struct
+ *
+ * Return: 0 on success; errno on failure
+ */
+static int hdd_set_reset_apf_offload(hdd_context_t *hdd_ctx,
+ struct nlattr **tb,
+ hdd_adapter_t *adapter)
+{
+ struct sir_apf_set_offload *apf_set_offload;
+ QDF_STATUS status;
+ int prog_len;
+ int ret = 0;
+
+ ENTER();
+
+ if (!hdd_conn_is_connected(
+ WLAN_HDD_GET_STATION_CTX_PTR(adapter))) {
+ hdd_err("Not in Connected state!");
+ return -ENOTSUPP;
+ }
+
+ apf_set_offload = qdf_mem_malloc(sizeof(*apf_set_offload));
+ if (apf_set_offload == NULL) {
+ hdd_err("qdf_mem_malloc failed for apf_set_offload");
+ return -ENOMEM;
+ }
+
+ /* Parse and fetch apf packet size */
+ if (!tb[APF_PACKET_SIZE]) {
+ hdd_err("attr apf packet size failed");
+ ret = -EINVAL;
+ goto fail;
+ }
+ apf_set_offload->total_length = nla_get_u32(tb[APF_PACKET_SIZE]);
+
+ if (!apf_set_offload->total_length) {
+ hdd_debug("APF reset packet filter received");
+ goto post_sme;
+ }
+
+ /* Parse and fetch apf program */
+ if (!tb[APF_PROGRAM]) {
+ hdd_err("attr apf program failed");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ prog_len = nla_len(tb[APF_PROGRAM]);
+ apf_set_offload->program = qdf_mem_malloc(sizeof(uint8_t) * prog_len);
+
+ if (apf_set_offload->program == NULL) {
+ hdd_err("qdf_mem_malloc failed for apf offload program");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ apf_set_offload->current_length = prog_len;
+ nla_memcpy(apf_set_offload->program, tb[APF_PROGRAM], prog_len);
+ apf_set_offload->session_id = adapter->sessionId;
+
+ hdd_debug("APF set instructions");
+ QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG,
+ apf_set_offload->program, prog_len);
+
+ /* Parse and fetch filter Id */
+ if (!tb[APF_FILTER_ID]) {
+ hdd_err("attr filter id failed");
+ ret = -EINVAL;
+ goto fail;
+ }
+ apf_set_offload->filter_id = nla_get_u32(tb[APF_FILTER_ID]);
+
+ /* Parse and fetch current offset */
+ if (!tb[APF_CURRENT_OFFSET]) {
+ hdd_err("attr current offset failed");
+ ret = -EINVAL;
+ goto fail;
+ }
+ apf_set_offload->current_offset = nla_get_u32(tb[APF_CURRENT_OFFSET]);
+
+post_sme:
+ hdd_debug("Posting APF SET/RESET to SME, session_id: %d APF Version: %d filter ID: %d total_length: %d current_length: %d current offset: %d",
+ apf_set_offload->session_id,
+ apf_set_offload->version,
+ apf_set_offload->filter_id,
+ apf_set_offload->total_length,
+ apf_set_offload->current_length,
+ apf_set_offload->current_offset);
+
+ status = sme_set_apf_instructions(hdd_ctx->hHal, apf_set_offload);
+ if (!QDF_IS_STATUS_SUCCESS(status)) {
+ hdd_err("sme_set_apf_instructions failed(err=%d)", status);
+ ret = -EINVAL;
+ goto fail;
+ }
+ EXIT();
+
+fail:
+ if (apf_set_offload->current_length)
+ qdf_mem_free(apf_set_offload->program);
+ qdf_mem_free(apf_set_offload);
+ return ret;
+}
+
+/**
+ * hdd_enable_disable_apf - Enable or Disable the APF interpreter
+ * @vdev_id: VDEV id
+ * @hdd_ctx: Hdd context
+ * @apf_enable: true: Enable APF Int., false: disable APF Int.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int
+hdd_enable_disable_apf(hdd_context_t *hdd_ctx, uint8_t vdev_id, bool apf_enable)
+{
+ QDF_STATUS status;
+
+ ENTER();
+
+ status = sme_set_apf_enable_disable(hdd_ctx->hHal, vdev_id, apf_enable);
+ if (!QDF_IS_STATUS_SUCCESS(status)) {
+ hdd_err("Unable to post sme apf enable/disable message (status-%d)",
+ status);
+ return -EINVAL;
+ }
+
+ qdf_spin_lock(&apf_context.lock);
+ apf_context.apf_enabled = apf_enable;
+ qdf_spin_unlock(&apf_context.lock);
+
+ EXIT();
+ return 0;
+}
+
+/**
+ * hdd_apf_write_memory - Write into the apf work memory
+ * @hdd_ctx: Hdd context
+ * @tb: list of attributes
+ * @session_id: Session id
+ *
+ * This function writes code/data into the APF work memory and
+ * provides program length that is passed on to the interpreter.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int
+hdd_apf_write_memory(hdd_context_t *hdd_ctx, struct nlattr **tb,
+ uint8_t session_id)
+{
+ struct wmi_apf_write_memory_params write_mem_params = {0};
+ QDF_STATUS status;
+ int ret = 0;
+ bool apf_enabled;
+
+ ENTER();
+
+ write_mem_params.vdev_id = session_id;
+
+ qdf_spin_lock(&apf_context.lock);
+ apf_enabled = apf_context.apf_enabled;
+ qdf_spin_unlock(&apf_context.lock);
+
+ if (apf_enabled) {
+ hdd_err("Cannot get/set when APF interpreter is enabled");
+ return -EINVAL;
+ }
+
+ /* Read program length */
+ if (!tb[APF_PROG_LEN]) {
+ hdd_err("attr program length failed");
+ return -EINVAL;
+ }
+ write_mem_params.program_len = nla_get_u32(tb[APF_PROG_LEN]);
+
+ /* Read APF work memory offset */
+ if (!tb[APF_CURRENT_OFFSET]) {
+ hdd_err("attr apf packet size failed");
+ return -EINVAL;
+ }
+ write_mem_params.addr_offset = nla_get_u32(tb[APF_CURRENT_OFFSET]);
+
+ /* Parse and fetch apf program */
+ if (!tb[APF_PROGRAM]) {
+ hdd_err("attr apf program failed");
+ return -EINVAL;
+ }
+
+ write_mem_params.length = nla_len(tb[APF_PROGRAM]);
+ if (!write_mem_params.length) {
+ hdd_err("Program attr with empty data");
+ return -EINVAL;
+ }
+
+ write_mem_params.buf = qdf_mem_malloc(sizeof(uint8_t)
+ * write_mem_params.length);
+ if (write_mem_params.buf == NULL) {
+ hdd_err("failed to alloc mem for apf write mem operation");
+ return -EINVAL;
+ }
+ nla_memcpy(write_mem_params.buf, tb[APF_PROGRAM],
+ write_mem_params.length);
+
+ write_mem_params.apf_version =
+ apf_context.capability_response.apf_version;
+
+ status = sme_apf_write_work_memory(hdd_ctx->hHal, &write_mem_params);
+ if (!QDF_IS_STATUS_SUCCESS(status)) {
+ hdd_err("Unable to retrieve APF caps");
+ ret = -EINVAL;
+ }
+
+ if (write_mem_params.buf)
+ qdf_mem_free(write_mem_params.buf);
+
+ EXIT();
+ return ret;
+}
+
+void
+hdd_apf_read_memory_callback(void *hdd_context,
+ struct wmi_apf_read_memory_resp_event_params
+ *read_mem_evt)
+{
+ hdd_context_t *hdd_ctx = hdd_context;
+ static struct hdd_apf_context *context = &apf_context;
+ uint8_t *buf_ptr;
+ uint32_t pkt_offset;
+ ENTER();
+
+ if (wlan_hdd_validate_context(hdd_ctx) || !read_mem_evt) {
+ hdd_err("HDD context is invalid or event buf(%pK) is null",
+ read_mem_evt);
+ return;
+ }
+
+ qdf_spin_lock(&context->lock);
+ if (context->magic != APF_CONTEXT_MAGIC) {
+ /* The caller presumably timed out, nothing to do */
+ qdf_spin_unlock(&context->lock);
+ hdd_err("Caller timed out or corrupt magic, simply return");
+ return;
+ }
+
+ if (read_mem_evt->offset < context->offset) {
+ qdf_spin_unlock(&context->lock);
+ hdd_err("Offset in read event(%d) smaller than offset in request(%d)!",
+ read_mem_evt->offset, context->offset);
+ return;
+ }
+
+ /*
+ * offset in the event is relative to the APF work memory.
+ * Calculate the packet offset, which gives us the relative
+ * location in the buffer to start copy into.
+ */
+ pkt_offset = read_mem_evt->offset - context->offset;
+
+ if (context->buf_len < pkt_offset + read_mem_evt->length) {
+ qdf_spin_unlock(&context->lock);
+ hdd_err("Read chunk exceeding allocated space");
+ return;
+ }
+ buf_ptr = context->buf + pkt_offset;
+
+ qdf_mem_copy(buf_ptr, read_mem_evt->data, read_mem_evt->length);
+
+ if (!read_mem_evt->more_data) {
+ /* Release the caller after last event, clear magic */
+ context->magic = 0;
+ qdf_event_set(&context->qdf_apf_event);
+ }
+
+ qdf_spin_unlock(&context->lock);
+
+ EXIT();
+}
+
+/**
+ * hdd_apf_read_memory - Read part of the apf work memory
+ * @hdd_ctx: Hdd context
+ * @tb: list of attributes
+ * @session_id: Session id
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int hdd_apf_read_memory(hdd_context_t *hdd_ctx, struct nlattr **tb,
+ uint8_t session_id)
+{
+ struct wmi_apf_read_memory_params read_mem_params = {0};
+ static struct hdd_apf_context *context = &apf_context;
+ QDF_STATUS status;
+ unsigned long nl_buf_len = NLMSG_HDRLEN;
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ uint8_t *bufptr;
+
+ ENTER();
+
+ read_mem_params.vdev_id = session_id;
+
+ /* Read APF work memory offset */
+ if (!tb[APF_CURRENT_OFFSET]) {
+ hdd_err("attr apf memory offset failed");
+ return -EINVAL;
+ }
+ read_mem_params.addr_offset = nla_get_u32(tb[APF_CURRENT_OFFSET]);
+
+ /* Read length */
+ if (!tb[APF_PACKET_SIZE]) {
+ hdd_err("attr apf packet size failed");
+ return -EINVAL;
+ }
+ read_mem_params.length = nla_get_u32(tb[APF_PACKET_SIZE]);
+ if (!read_mem_params.length) {
+ hdd_err("apf read length cannot be zero!");
+ return -EINVAL;
+ }
+ bufptr = qdf_mem_malloc(read_mem_params.length);
+ if (bufptr == NULL) {
+ hdd_err("alloc failed for cumulative event buffer");
+ return -ENOMEM;
+ }
+
+ qdf_spin_lock(&context->lock);
+ if (context->apf_enabled) {
+ qdf_spin_unlock(&context->lock);
+ hdd_err("Cannot get/set while interpreter is enabled");
+ return -EINVAL;
+ }
+
+ qdf_event_reset(&context->qdf_apf_event);
+ context->offset = read_mem_params.addr_offset;
+
+ context->buf = bufptr;
+ context->buf_len = read_mem_params.length;
+ context->magic = APF_CONTEXT_MAGIC;
+ qdf_spin_unlock(&context->lock);
+
+ status = sme_apf_read_work_memory(hdd_ctx->hHal, &read_mem_params);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ hdd_err("Unable to post sme APF read memory message (status-%d)",
+ status);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* request was sent -- wait for the response */
+ status = qdf_wait_for_event_completion(&context->qdf_apf_event,
+ WLAN_WAIT_TIME_APF_READ_MEM);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ hdd_err("Target response timed out");
+ qdf_spin_lock(&context->lock);
+ context->magic = 0;
+ qdf_spin_unlock(&context->lock);
+ ret = -ETIMEDOUT;
+ goto fail;
+ }
+
+ nl_buf_len += sizeof(uint32_t) + NLA_HDRLEN;
+ nl_buf_len += context->buf_len + NLA_HDRLEN;
+
+ skb = cfg80211_vendor_cmd_alloc_reply_skb(hdd_ctx->wiphy, nl_buf_len);
+ if (!skb) {
+ hdd_err("cfg80211_vendor_cmd_alloc_reply_skb failed");
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (nla_put_u32(skb, APF_SUBCMD, QCA_WLAN_READ_PACKET_FILTER) ||
+ nla_put(skb, APF_PROGRAM, read_mem_params.length, context->buf)) {
+ hdd_err("put fail");
+ kfree_skb(skb);
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ cfg80211_vendor_cmd_reply(skb);
+fail:
+ if (context->buf) {
+ qdf_mem_free(context->buf);
+ context->buf = NULL;
+ }
+
+ EXIT();
+ return ret;
+}
+
+
+/**
+ * wlan_hdd_cfg80211_apf_offload() - Set/Reset to APF Offload
+ * @wiphy: wiphy structure pointer
+ * @wdev: Wireless device structure pointer
+ * @data: Pointer to the data received
+ * @data_len: Length of @data
+ *
+ * Return: 0 on success; errno on failure
+ */
+static int
+__wlan_hdd_cfg80211_apf_offload(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
+ struct net_device *dev = wdev->netdev;
+ hdd_adapter_t *adapter = WLAN_HDD_GET_PRIV_PTR(dev);
+ struct nlattr *tb[APF_MAX + 1];
+ int ret_val = 0, apf_subcmd;
+ uint8_t session_id = adapter->sessionId;
+ static struct hdd_apf_context *context = &apf_context;
+
+ ENTER();
+
+ ret_val = wlan_hdd_validate_context(hdd_ctx);
+ if (ret_val)
+ return ret_val;
+
+ if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
+ hdd_err("Command not allowed in FTM mode");
+ return -EINVAL;
+ }
+
+ if (!hdd_ctx->apf_enabled) {
+ hdd_err("APF offload is not supported/enabled");
+ return -ENOTSUPP;
+ }
+
+ if (hdd_nla_parse(tb, APF_MAX, data, data_len,
+ wlan_hdd_apf_offload_policy)) {
+ hdd_err("Invalid ATTR");
+ return -EINVAL;
+ }
+
+ if (!(adapter->device_mode == QDF_STA_MODE ||
+ adapter->device_mode == QDF_P2P_CLIENT_MODE)) {
+ hdd_err("APF only supported in STA or P2P CLI modes!");
+ return -ENOTSUPP;
+ }
+
+ if (!tb[APF_SUBCMD]) {
+ hdd_err("attr apf sub-command failed");
+ return -EINVAL;
+ }
+ apf_subcmd = nla_get_u32(tb[APF_SUBCMD]);
+
+ qdf_spin_lock(&context->lock);
+ if (context->cmd_in_progress) {
+ qdf_spin_unlock(&context->lock);
+ hdd_err("Another APF cmd in progress, try again later!");
+ return -EAGAIN;
+ }
+ context->cmd_in_progress = true;
+ qdf_spin_unlock(&context->lock);
+
+ switch (apf_subcmd) {
+ /* Legacy APF sub-commands */
+ case QCA_WLAN_SET_PACKET_FILTER:
+ ret_val = hdd_set_reset_apf_offload(hdd_ctx, tb,
+ adapter);
+ break;
+ case QCA_WLAN_GET_PACKET_FILTER:
+ ret_val = hdd_get_apf_capabilities(hdd_ctx);
+ break;
+
+ /* APF 3.0 sub-commands */
+ case QCA_WLAN_WRITE_PACKET_FILTER:
+ ret_val = hdd_apf_write_memory(hdd_ctx, tb, session_id);
+ break;
+ case QCA_WLAN_READ_PACKET_FILTER:
+ ret_val = hdd_apf_read_memory(hdd_ctx, tb, session_id);
+ break;
+ case QCA_WLAN_ENABLE_PACKET_FILTER:
+ ret_val = hdd_enable_disable_apf(hdd_ctx,
+ session_id,
+ true);
+ break;
+ case QCA_WLAN_DISABLE_PACKET_FILTER:
+ ret_val = hdd_enable_disable_apf(hdd_ctx,
+ session_id,
+ false);
+ break;
+ default:
+ hdd_err("Unknown APF Sub-command: %d", apf_subcmd);
+ ret_val = -ENOTSUPP;
+ }
+
+ qdf_spin_lock(&context->lock);
+ context->cmd_in_progress = false;
+ qdf_spin_unlock(&context->lock);
+
+ return ret_val;
+}
+
+/**
+ * wlan_hdd_cfg80211_apf_offload() - SSR Wrapper to APF Offload
+ * @wiphy: wiphy structure pointer
+ * @wdev: Wireless device structure pointer
+ * @data: Pointer to the data received
+ * @data_len: Length of @data
+ *
+ * Return: 0 on success; errno on failure
+ */
+
+int wlan_hdd_cfg80211_apf_offload(struct wiphy *wiphy,
+ struct wireless_dev *wdev,
+ const void *data, int data_len)
+{
+ int ret;
+
+ cds_ssr_protect(__func__);
+ ret = __wlan_hdd_cfg80211_apf_offload(wiphy, wdev, data, data_len);
+ cds_ssr_unprotect(__func__);
+
+ return ret;
+}
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c
index 9367c19e3a3b..2a0ad6e1ca8a 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_assoc.c
@@ -291,6 +291,49 @@ hdd_conn_get_connected_cipher_algo(hdd_station_ctx_t *pHddStaCtx,
return fConnected;
}
+hdd_adapter_t *hdd_get_sta_connection_in_progress(hdd_context_t *hdd_ctx)
+{
+ hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+ hdd_adapter_t *adapter = NULL;
+ QDF_STATUS status;
+ hdd_station_ctx_t *hdd_sta_ctx;
+
+ if (!hdd_ctx) {
+ hdd_err("HDD context is NULL");
+ return NULL;
+ }
+
+ status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
+ while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
+ adapter = adapter_node->pAdapter;
+ if (!adapter)
+ goto end;
+
+ hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+ if ((QDF_STA_MODE == adapter->device_mode) ||
+ (QDF_P2P_CLIENT_MODE == adapter->device_mode) ||
+ (QDF_P2P_DEVICE_MODE == adapter->device_mode)) {
+ if (eConnectionState_Connecting ==
+ hdd_sta_ctx->conn_info.connState) {
+ hdd_debug("session_id %d: Connection is in progress",
+ adapter->sessionId);
+ return adapter;
+ } else if ((eConnectionState_Associated ==
+ hdd_sta_ctx->conn_info.connState) &&
+ sme_is_sta_key_exchange_in_progress(
+ hdd_ctx->hHal, adapter->sessionId)) {
+ hdd_debug("session_id %d: Key exchange is in progress",
+ adapter->sessionId);
+ return adapter;
+ }
+ }
+end:
+ status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next);
+ adapter_node = next;
+ }
+ return NULL;
+}
+
/**
* hdd_remove_beacon_filter() - remove beacon filter
* @adapter: Pointer to the hdd adapter
@@ -2202,8 +2245,9 @@ bool hdd_is_roam_sync_in_progress(tCsrRoamInfo *roaminfo)
static int hdd_change_sta_state_authenticated(hdd_adapter_t *adapter,
tCsrRoamInfo *roaminfo)
{
- int ret;
uint32_t timeout;
+ QDF_STATUS status;
+ uint8_t staid = HDD_WLAN_INVALID_STA_ID;
hdd_station_ctx_t *hddstactx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
@@ -2211,13 +2255,27 @@ static int hdd_change_sta_state_authenticated(hdd_adapter_t *adapter,
AUTO_PS_ENTRY_TIMER_DEFAULT_VALUE :
hdd_ctx->config->auto_bmps_timer_val * 1000;
- hdd_debug("Changing TL state to AUTHENTICATED for StaId= %d",
- hddstactx->conn_info.staId[0]);
+ if (QDF_IBSS_MODE == adapter->device_mode) {
+ if (qdf_is_macaddr_broadcast(&roaminfo->peerMac)) {
+ staid = 0;
+ } else {
+ status = hdd_get_peer_sta_id(hddstactx,
+ &roaminfo->peerMac, &staid);
+ if (status != QDF_STATUS_SUCCESS) {
+ hdd_err("Unable to find staid for %pM",
+ roaminfo->peerMac.bytes);
+ return qdf_status_to_os_return(status);
+ }
+ }
+ } else {
+ staid = hddstactx->conn_info.staId[0];
+ }
+ hdd_debug("Changing TL state to AUTHENTICATED for StaId= %d", staid);
/* Connections that do not need Upper layer authentication,
* transition TL to 'Authenticated' state after the keys are set
*/
- ret = hdd_change_peer_state(adapter,
+ status = hdd_change_peer_state(adapter,
hddstactx->conn_info.staId[0],
OL_TXRX_PEER_STATE_AUTH,
hdd_is_roam_sync_in_progress(roaminfo));
@@ -2230,9 +2288,86 @@ static int hdd_change_sta_state_authenticated(hdd_adapter_t *adapter,
timeout);
}
- return ret;
+ return qdf_status_to_os_return(status);
}
+
+static void hdd_peer_state_transition(hdd_adapter_t *adapter,
+ tCsrRoamInfo *roam_info,
+ eCsrRoamResult roam_status)
+{
+ hdd_station_ctx_t *hdd_sta_ctx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
+ QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
+ eCsrEncryptionType encr_type = hdd_sta_ctx->conn_info.ucEncryptionType;
+
+ /*
+ * If the security mode is one of the following, IBSS peer will be
+ * waiting in CONN state and we will move the peer state to AUTH
+ * here. For non-secure connection, no need to wait for set-key complete
+ * peer will be moved to AUTH in hdd_roam_register_sta.
+ */
+ if (QDF_IBSS_MODE == adapter->device_mode) {
+ if ((encr_type == eCSR_ENCRYPT_TYPE_TKIP) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_AES) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_WEP40_STATICKEY) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_WEP104_STATICKEY)) {
+ hdd_debug("IBSS mode: moving to authenticated state-%d",
+ encr_type);
+ hdd_change_sta_state_authenticated(adapter, roam_info);
+ }
+ return;
+ }
+
+ if (eCSR_ROAM_RESULT_AUTHENTICATED == roam_status) {
+ hdd_sta_ctx->conn_info.gtk_installed = true;
+ /*
+ * PTK exchange happens in preauthentication
+ * itself if key_mgmt is FT-PSK, ptk_installed
+ * was false as there is no set PTK after
+ * roaming. STA TL state moves to authenticated
+ * only if ptk_installed is true. So, make
+ * ptk_installed to true in case of 11R roaming.
+ */
+ if (sme_neighbor_roam_is11r_assoc(
+ WLAN_HDD_GET_HAL_CTX(adapter),
+ adapter->sessionId))
+ hdd_sta_ctx->conn_info.ptk_installed =
+ true;
+ } else {
+ hdd_sta_ctx->conn_info.ptk_installed = true;
+ }
+
+ /* In WPA case move STA to authenticated when
+ * ptk is installed.Earlier in WEP case STA
+ * was moved to AUTHENTICATED prior to setting
+ * the unicast key and it was resulting in sending
+ * few un-encrypted packet. Now in WEP case
+ * STA state will be moved to AUTHENTICATED
+ * after we set the unicast and broadcast key.
+ */
+ if ((encr_type == eCSR_ENCRYPT_TYPE_WEP40) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_WEP104) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_WEP40_STATICKEY) ||
+ (encr_type == eCSR_ENCRYPT_TYPE_WEP104_STATICKEY)) {
+ if (hdd_sta_ctx->conn_info.gtk_installed &&
+ hdd_sta_ctx->conn_info.ptk_installed)
+ qdf_status =
+ hdd_change_sta_state_authenticated(adapter,
+ roam_info);
+ } else if (hdd_sta_ctx->conn_info.ptk_installed) {
+ qdf_status =
+ hdd_change_sta_state_authenticated(adapter,
+ roam_info);
+ }
+
+ if (hdd_sta_ctx->conn_info.gtk_installed &&
+ hdd_sta_ctx->conn_info.ptk_installed) {
+ hdd_sta_ctx->conn_info.gtk_installed = false;
+ hdd_sta_ctx->conn_info.ptk_installed = false;
+ }
+
+ hdd_sta_ctx->roam_info.roamingState = HDD_ROAM_STATE_NONE;
+}
/**
* hdd_roam_set_key_complete_handler() - Update the security parameters
* @pAdapter: pointer to adapter
@@ -2251,7 +2386,6 @@ static QDF_STATUS hdd_roam_set_key_complete_handler(hdd_adapter_t *pAdapter,
{
eCsrEncryptionType connectedCipherAlgo;
bool fConnected = false;
- QDF_STATUS qdf_status = QDF_STATUS_E_FAILURE;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
ENTER();
@@ -2273,80 +2407,7 @@ static QDF_STATUS hdd_roam_set_key_complete_handler(hdd_adapter_t *pAdapter,
fConnected = hdd_conn_get_connected_cipher_algo(pHddStaCtx,
&connectedCipherAlgo);
if (fConnected) {
- if (QDF_IBSS_MODE == pAdapter->device_mode) {
- uint8_t staId;
-
- if (qdf_is_macaddr_broadcast(&pRoamInfo->peerMac)) {
- pHddStaCtx->roam_info.roamingState =
- HDD_ROAM_STATE_NONE;
- } else {
- qdf_status = hdd_get_peer_sta_id(
- pHddStaCtx,
- &pRoamInfo->peerMac,
- &staId);
- if (QDF_STATUS_SUCCESS == qdf_status) {
- hdd_debug("WLAN TL STA Ptk Installed for STAID=%d",
- staId);
- pHddStaCtx->roam_info.roamingState =
- HDD_ROAM_STATE_NONE;
- }
- }
- } else {
- if (eCSR_ROAM_RESULT_AUTHENTICATED == roamResult) {
- pHddStaCtx->conn_info.gtk_installed = true;
- /*
- * PTK exchange happens in preauthentication
- * itself if key_mgmt is FT-PSK, ptk_installed
- * was false as there is no set PTK after
- * roaming. STA TL state moves to authenticated
- * only if ptk_installed is true. So, make
- * ptk_installed to true in case of 11R roaming.
- */
- if (sme_neighbor_roam_is11r_assoc(
- WLAN_HDD_GET_HAL_CTX(pAdapter),
- pAdapter->sessionId))
- pHddStaCtx->conn_info.ptk_installed =
- true;
- } else {
- pHddStaCtx->conn_info.ptk_installed = true;
- }
-
- /* In WPA case move STA to authenticated when
- * ptk is installed.Earlier in WEP case STA
- * was moved to AUTHENTICATED prior to setting
- * the unicast key and it was resulting in sending
- * few un-encrypted packet. Now in WEP case
- * STA state will be moved to AUTHENTICATED
- * after we set the unicast and broadcast key.
- */
- if ((pHddStaCtx->conn_info.ucEncryptionType ==
- eCSR_ENCRYPT_TYPE_WEP40) ||
- (pHddStaCtx->conn_info.ucEncryptionType ==
- eCSR_ENCRYPT_TYPE_WEP104) ||
- (pHddStaCtx->conn_info.ucEncryptionType ==
- eCSR_ENCRYPT_TYPE_WEP40_STATICKEY) ||
- (pHddStaCtx->conn_info.ucEncryptionType ==
- eCSR_ENCRYPT_TYPE_WEP104_STATICKEY)) {
- if (pHddStaCtx->conn_info.gtk_installed &&
- pHddStaCtx->conn_info.ptk_installed)
- qdf_status =
- hdd_change_sta_state_authenticated(pAdapter,
- pRoamInfo);
- } else if (pHddStaCtx->conn_info.ptk_installed) {
- qdf_status =
- hdd_change_sta_state_authenticated(pAdapter,
- pRoamInfo);
- }
-
- if (pHddStaCtx->conn_info.gtk_installed &&
- pHddStaCtx->conn_info.ptk_installed) {
- pHddStaCtx->conn_info.gtk_installed = false;
- pHddStaCtx->conn_info.ptk_installed = false;
- }
-
- pHddStaCtx->roam_info.roamingState =
- HDD_ROAM_STATE_NONE;
- }
+ hdd_peer_state_transition(pAdapter, pRoamInfo, roamResult);
} else {
/*
* possible disassoc after issuing set key and waiting
@@ -3487,6 +3548,7 @@ roam_roam_connect_status_update_handler(hdd_adapter_t *pAdapter,
{
hdd_station_ctx_t *pHddStaCtx =
WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
+ eCsrEncryptionType enc_type = pHddStaCtx->ibss_enc_key.encType;
struct station_info *stainfo;
hdd_debug("IBSS New Peer indication from SME "
@@ -3508,6 +3570,14 @@ roam_roam_connect_status_update_handler(hdd_adapter_t *pAdapter,
pHddCtx->sta_to_adapter[pRoamInfo->staId] = pAdapter;
else
hdd_debug("invalid sta_id %d", pRoamInfo->staId);
+ if ((eCSR_ENCRYPT_TYPE_WEP40_STATICKEY == enc_type) ||
+ (eCSR_ENCRYPT_TYPE_WEP104_STATICKEY == enc_type) ||
+ (eCSR_ENCRYPT_TYPE_TKIP == enc_type) ||
+ (eCSR_ENCRYPT_TYPE_AES == enc_type)) {
+ hdd_debug("IBSS sta-id:%d, auth req set to true",
+ pRoamInfo->staId);
+ pRoamInfo->fAuthRequired = true;
+ }
/* Register the Station with TL for the new peer. */
qdf_status = hdd_roam_register_sta(pAdapter,
@@ -3695,10 +3765,7 @@ QDF_STATUS hdd_roam_deregister_tdlssta(hdd_adapter_t *pAdapter, uint8_t staId)
QDF_STATUS qdf_status;
qdf_status = ol_txrx_clear_peer(staId);
- if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
- hdd_err("ol_txrx_clear_peer() failed staID: %d status: %d [0x%08X]",
- staId, qdf_status, qdf_status);
- }
+
return qdf_status;
}
@@ -4854,10 +4921,11 @@ hdd_sme_roam_callback(void *pContext, tCsrRoamInfo *pRoamInfo, uint32_t roamId,
hdd_station_ctx_t *pHddStaCtx = NULL;
QDF_STATUS status = QDF_STATUS_SUCCESS;
struct cfg80211_bss *bss_status;
+ hdd_context_t *pHddCtx;
- hdd_debug("CSR Callback: status= %d result= %d roamID=%d",
- roamStatus, roamResult, roamId);
-
+ if (eCSR_ROAM_UPDATE_SCAN_RESULT != roamStatus)
+ hdd_debug("CSR Callback: status= %d result= %d roamID=%d",
+ roamStatus, roamResult, roamId);
/* Sanity check */
if ((NULL == pAdapter) || (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic)) {
hdd_err("Invalid adapter or adapter has invalid magic");
@@ -4866,6 +4934,7 @@ hdd_sme_roam_callback(void *pContext, tCsrRoamInfo *pRoamInfo, uint32_t roamId,
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
+ pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
/* Omitting eCSR_ROAM_UPDATE_SCAN_RESULT as this is too frequent */
if (eCSR_ROAM_UPDATE_SCAN_RESULT != roamStatus)
@@ -5203,6 +5272,7 @@ hdd_sme_roam_callback(void *pContext, tCsrRoamInfo *pRoamInfo, uint32_t roamId,
pAdapter->roam_ho_fail = false;
pHddStaCtx->ft_carrier_on = false;
complete(&pAdapter->roaming_comp_var);
+ schedule_delayed_work(&pHddCtx->roc_req_work, 0);
break;
default:
@@ -5411,11 +5481,12 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter,
#endif
uint16_t gen_ie_len, uint8_t *gen_ie)
{
- tHalHandle halHandle = WLAN_HDD_GET_HAL_CTX(pAdapter);
- tDot11fIERSN dot11RSNIE;
- tDot11fIEWPA dot11WPAIE;
+ uint32_t ret;
uint8_t *pRsnIe;
uint16_t RSNIeLen;
+ tDot11fIERSN dot11RSNIE = {0};
+ tDot11fIEWPA dot11WPAIE = {0};
+ tHalHandle halHandle = WLAN_HDD_GET_HAL_CTX(pAdapter);
/*
* Clear struct of tDot11fIERSN and tDot11fIEWPA specifically
@@ -5437,16 +5508,23 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter,
pRsnIe = gen_ie + 2;
RSNIeLen = gen_ie_len - 2;
/* Unpack the RSN IE */
- dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle,
- pRsnIe, RSNIeLen, &dot11RSNIE, false);
+ ret = sme_unpack_rsn_ie(halHandle, pRsnIe, RSNIeLen,
+ &dot11RSNIE, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ hdd_err("Invalid RSN IE: parse status %d",
+ ret);
+ return -EINVAL;
+ }
+ hdd_debug("gp_cipher_suite_present: %d",
+ dot11RSNIE.gp_cipher_suite_present);
/* Copy out the encryption and authentication types */
hdd_debug("pairwise cipher suite count: %d",
dot11RSNIE.pwise_cipher_suite_count);
hdd_debug("authentication suite count: %d",
- dot11RSNIE.akm_suite_count);
+ dot11RSNIE.akm_suite_cnt);
*pAuthType =
hdd_translate_rsn_to_csr_auth_type(
- dot11RSNIE.akm_suites[0]);
+ dot11RSNIE.akm_suite[0]);
/* dot11RSNIE.pwise_cipher_suite_count */
*pEncryptType =
hdd_translate_rsn_to_csr_encryption_type(
@@ -5471,8 +5549,12 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter,
pRsnIe = gen_ie + 2 + 4;
RSNIeLen = gen_ie_len - (2 + 4);
/* Unpack the WPA IE */
- dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle,
+ ret = dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle,
pRsnIe, RSNIeLen, &dot11WPAIE, false);
+ if (DOT11F_FAILED(ret)) {
+ hdd_err("unpack failed, ret: 0x%x", ret);
+ return -EINVAL;
+ }
/* Copy out the encryption and authentication types */
hdd_debug("WPA unicast cipher suite count: %d",
dot11WPAIE.unicast_cipher_count);
@@ -5499,6 +5581,37 @@ static int32_t hdd_process_genie(hdd_adapter_t *pAdapter,
}
/**
+ * hdd_set_def_rsne_override() - set default encryption type and auth type
+ * in profile.
+ * @roam_profile: pointer to adapter
+ * @auth_type: pointer to auth type
+ *
+ * Set default value of encryption type and auth type in profile to
+ * search the AP using filter, as in force_rsne_override the RSNIE can be
+ * currupt and we might not get the proper encryption type and auth type
+ * while parsing the RSNIE.
+ *
+ * Return: void
+ */
+static void hdd_set_def_rsne_override(
+ tCsrRoamProfile *roam_profile, eCsrAuthType *auth_type)
+{
+
+ hdd_debug("Set def values in roam profile");
+ roam_profile->MFPCapable = roam_profile->MFPEnabled;
+ roam_profile->EncryptionType.numEntries = 2;
+ roam_profile->mcEncryptionType.numEntries = 2;
+ /* Use the cipher type in the RSN IE */
+ roam_profile->EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_AES;
+ roam_profile->EncryptionType.encryptionType[1] = eCSR_ENCRYPT_TYPE_TKIP;
+ roam_profile->mcEncryptionType.encryptionType[0] =
+ eCSR_ENCRYPT_TYPE_AES;
+ roam_profile->mcEncryptionType.encryptionType[1] =
+ eCSR_ENCRYPT_TYPE_TKIP;
+ *auth_type = eCSR_AUTH_TYPE_RSN_PSK;
+}
+
+/**
* hdd_set_genie_to_csr() - set genie to csr
* @pAdapter: pointer to adapter
* @RSNAuthType: pointer to auth type
@@ -5511,6 +5624,7 @@ int hdd_set_genie_to_csr(hdd_adapter_t *pAdapter, eCsrAuthType *RSNAuthType)
uint32_t status = 0;
eCsrEncryptionType RSNEncryptType;
eCsrEncryptionType mcRSNEncryptType;
+ hdd_context_t *hdd_ctx;
#ifdef WLAN_FEATURE_11W
uint8_t RSNMfpRequired = 0;
uint8_t RSNMfpCapable = 0;
@@ -5527,6 +5641,7 @@ int hdd_set_genie_to_csr(hdd_adapter_t *pAdapter, eCsrAuthType *RSNAuthType)
} else {
return 0;
}
+
/* The actual processing may eventually be more extensive than this. */
/* Right now, just consume any PMKIDs that are sent in by the app. */
status = hdd_process_genie(pAdapter, bssid,
@@ -5537,6 +5652,7 @@ int hdd_set_genie_to_csr(hdd_adapter_t *pAdapter, eCsrAuthType *RSNAuthType)
#endif
pWextState->WPARSNIE[1] + 2,
pWextState->WPARSNIE);
+
if (status == 0) {
/*
* Now copy over all the security attributes
@@ -5575,7 +5691,23 @@ int hdd_set_genie_to_csr(hdd_adapter_t *pAdapter, eCsrAuthType *RSNAuthType)
hdd_debug("CSR AuthType = %d, EncryptionType = %d mcEncryptionType = %d",
*RSNAuthType, RSNEncryptType, mcRSNEncryptType);
}
- return 0;
+
+ hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
+ if (hdd_ctx->force_rsne_override &&
+ (pWextState->WPARSNIE[0] == DOT11F_EID_RSN)) {
+ hdd_warn("Test mode enabled set def Auth and enc type. RSN IE passed in connect req: ");
+ qdf_trace_hex_dump(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_WARN,
+ pWextState->roamProfile.pRSNReqIE,
+ pWextState->roamProfile.nRSNReqIELength);
+
+ pWextState->roamProfile.force_rsne_override = true;
+ /* If parsing failed set the def value for the roam profile */
+ if (status)
+ hdd_set_def_rsne_override(&pWextState->roamProfile,
+ RSNAuthType);
+ return 0;
+ }
+ return status;
}
#ifdef WLAN_FEATURE_FILS_SK
@@ -6035,12 +6167,12 @@ static int __iw_get_essid(struct net_device *dev,
*/
int iw_get_essid(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *wrqu, char *extra)
+ union iwreq_data *wrqu, char *extra)
{
int ret;
cds_ssr_protect(__func__);
- ret = __iw_get_essid(dev, info, wrqu, extra);
+ ret = __iw_get_essid(dev, info, &wrqu->essid, extra);
cds_ssr_unprotect(__func__);
return ret;
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
index a69ec0b0120b..e1c004567729 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -2595,6 +2595,13 @@ struct reg_table_entry g_registry_table[] = {
CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_MIN,
CFG_VHT_SU_BEAMFORMEE_CAP_FEATURE_MAX),
+ REG_VARIABLE(CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, enable_subfee_vendor_vhtie,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_DEFAULT,
+ CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_MIN,
+ CFG_ENABLE_SUBFEE_IN_VENDOR_VHTIE_MAX),
+
REG_VARIABLE(CFG_VHT_ENABLE_TXBF_SAP_MODE, WLAN_PARAM_Integer,
struct hdd_config, enable_txbf_sap_mode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -3222,6 +3229,13 @@ struct reg_table_entry g_registry_table[] = {
CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MIN,
CFG_BUS_BANDWIDTH_COMPUTE_INTERVAL_MAX),
+ REG_VARIABLE(CFG_ENABLE_TCP_LIMIT_OUTPUT, WLAN_PARAM_Integer,
+ struct hdd_config, enable_tcp_limit_output,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ENABLE_TCP_LIMIT_OUTPUT_DEFAULT,
+ CFG_ENABLE_TCP_LIMIT_OUTPUT_MIN,
+ CFG_ENABLE_TCP_LIMIT_OUTPUT_MAX),
+
REG_VARIABLE(CFG_ENABLE_TCP_ADV_WIN_SCALE, WLAN_PARAM_Integer,
struct hdd_config, enable_tcp_adv_win_scale,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -3713,12 +3727,12 @@ struct reg_table_entry g_registry_table[] = {
CFG_GRO_ENABLED_MIN,
CFG_GRO_ENABLED_MAX),
- REG_VARIABLE(CFG_BPF_PACKET_FILTER_OFFLOAD, WLAN_PARAM_Integer,
- struct hdd_config, bpf_packet_filter_enable,
+ REG_VARIABLE(CFG_APF_PACKET_FILTER_OFFLOAD, WLAN_PARAM_Integer,
+ struct hdd_config, apf_packet_filter_enable,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
- CFG_BPF_PACKET_FILTER_OFFLOAD_DEFAULT,
- CFG_BPF_PACKET_FILTER_OFFLOAD_MIN,
- CFG_BPF_PACKET_FILTER_OFFLOAD_MAX),
+ CFG_APF_PACKET_FILTER_OFFLOAD_DEFAULT,
+ CFG_APF_PACKET_FILTER_OFFLOAD_MIN,
+ CFG_APF_PACKET_FILTER_OFFLOAD_MAX),
REG_VARIABLE(CFG_TDLS_ENABLE_DEFER_TIMER, WLAN_PARAM_Integer,
struct hdd_config, tdls_enable_defer_time,
@@ -4594,19 +4608,19 @@ struct reg_table_entry g_registry_table[] = {
CFG_MAX_SCHED_SCAN_PLAN_ITRNS_DEFAULT,
CFG_MAX_SCHED_SCAN_PLAN_ITRNS_MIN,
CFG_MAX_SCHED_SCAN_PLAN_ITRNS_MAX),
- REG_VARIABLE(CFG_ACTIVE_UC_BPF_MODE_NAME, WLAN_PARAM_Integer,
- struct hdd_config, active_uc_bpf_mode,
+ REG_VARIABLE(CFG_ACTIVE_UC_APF_MODE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, active_uc_apf_mode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
- CFG_ACTIVE_UC_BPF_MODE_DEFAULT,
- CFG_ACTIVE_UC_BPF_MODE_MIN,
- CFG_ACTIVE_UC_BPF_MODE_MAX),
+ CFG_ACTIVE_UC_APF_MODE_DEFAULT,
+ CFG_ACTIVE_UC_APF_MODE_MIN,
+ CFG_ACTIVE_UC_APF_MODE_MAX),
- REG_VARIABLE(CFG_ACTIVE_MC_BC_BPF_MODE_NAME, WLAN_PARAM_Integer,
- struct hdd_config, active_mc_bc_bpf_mode,
+ REG_VARIABLE(CFG_ACTIVE_MC_BC_APF_MODE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, active_mc_bc_apf_mode,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
- CFG_ACTIVE_MC_BC_BPF_MODE_DEFAULT,
- CFG_ACTIVE_MC_BC_BPF_MODE_MIN,
- CFG_ACTIVE_MC_BC_BPF_MODE_MAX),
+ CFG_ACTIVE_MC_BC_APF_MODE_DEFAULT,
+ CFG_ACTIVE_MC_BC_APF_MODE_MIN,
+ CFG_ACTIVE_MC_BC_APF_MODE_MAX),
REG_VARIABLE(CFG_HW_FILTER_MODE_NAME, WLAN_PARAM_Integer,
struct hdd_config, hw_filter_mode,
@@ -5408,6 +5422,13 @@ struct reg_table_entry g_registry_table[] = {
CFG_RX_CHAIN_MASK_5G_MIN,
CFG_RX_CHAIN_MASK_5G_MAX),
+ REG_VARIABLE(CFG_FORCE_RSNE_OVERRIDE_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, force_rsne_override,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_FORCE_RSNE_OVERRIDE_DEFAULT,
+ CFG_FORCE_RSNE_OVERRIDE_MIN,
+ CFG_FORCE_RSNE_OVERRIDE_MAX),
+
REG_VARIABLE(CFG_ENABLE_MAC_PROVISION_NAME, WLAN_PARAM_Integer,
struct hdd_config, mac_provision,
VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
@@ -5428,6 +5449,102 @@ struct reg_table_entry g_registry_table[] = {
CFG_DERIVED_INTERFACE_POOL_DEFAULT,
CFG_DERIVED_INTERFACE_POOL_MIN,
CFG_DERIVED_INTERFACE_POOL_MAX),
+
+ REG_VARIABLE(CFG_ENABLE_GCMP_NAME, WLAN_PARAM_Integer,
+ struct hdd_config, gcmp_enabled,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ENABLE_GCMP_DEFAULT,
+ CFG_ENABLE_GCMP_MIN,
+ CFG_ENABLE_GCMP_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_11K_ENABLE_BITMASK_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, offload_11k_enable_bitmask,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_11K_ENABLE_BITMASK_DEFAULT,
+ CFG_OFFLOAD_11K_ENABLE_BITMASK_MIN,
+ CFG_OFFLOAD_11K_ENABLE_BITMASK_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, neighbor_report_offload_params_bitmask,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, neighbor_report_offload_time_offset,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, neighbor_report_offload_low_rssi_offset,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config,
+ neighbor_report_offload_bmiss_count_trigger,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config,
+ neighbor_report_offload_per_threshold_offset,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, neighbor_report_offload_cache_timeout,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_MAX),
+
+ REG_VARIABLE(CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, neighbor_report_offload_max_req_cap,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_DEFAULT,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MIN,
+ CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_MAX),
+
+ REG_VARIABLE(CFG_CHANNEL_SELECT_LOGIC_CONC_NAME, WLAN_PARAM_HexInteger,
+ struct hdd_config, channel_select_logic_conc,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_CHANNEL_SELECT_LOGIC_CONC_DEFAULT,
+ CFG_CHANNEL_SELECT_LOGIC_CONC_MIN,
+ CFG_CHANNEL_SELECT_LOGIC_CONC_MAX),
+
+ REG_VARIABLE(CFG_DTIM_SELECTION_DIVERSITY_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, enable_dtim_selection_diversity,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_DTIM_SELECTION_DIVERSITY_DEFAULT,
+ CFG_DTIM_SELECTION_DIVERSITY_MIN,
+ CFG_DTIM_SELECTION_DIVERSITY_MAX),
+
+ REG_VARIABLE(CFG_ENABLE_RTT_MAC_RANDOMIZATION_NAME,
+ WLAN_PARAM_Integer,
+ struct hdd_config, enable_rtt_mac_randomization,
+ VAR_FLAGS_OPTIONAL | VAR_FLAGS_RANGE_CHECK_ASSUME_DEFAULT,
+ CFG_ENABLE_RTT_MAC_RANDOMIZATION_DEFAULT,
+ CFG_ENABLE_RTT_MAC_RANDOMIZATION_MIN,
+ CFG_ENABLE_RTT_MAC_RANDOMIZATION_MAX),
};
/**
@@ -6328,6 +6445,42 @@ static void hdd_wlm_cfg_log(hdd_context_t *pHddCtx)
}
/**
+ * hdd_cgf_print_11k_offload_params() - Print 11k offload related parameters
+ * @hdd_ctx: Pointer to HDD context
+ *
+ * Return: None
+ */
+static
+void hdd_cfg_print_11k_offload_params(hdd_context_t *hdd_ctx)
+{
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_11K_ENABLE_BITMASK_NAME,
+ hdd_ctx->config->offload_11k_enable_bitmask);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PARAMS_BITMASK_NAME,
+ hdd_ctx->config->neighbor_report_offload_params_bitmask);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_TIME_OFFSET_NAME,
+ hdd_ctx->config->neighbor_report_offload_time_offset);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_LOW_RSSI_OFFSET_NAME,
+ hdd_ctx->config->neighbor_report_offload_low_rssi_offset);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_BMISS_COUNT_TRIGGER_NAME,
+ hdd_ctx->config->neighbor_report_offload_bmiss_count_trigger);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_PER_THRESHOLD_OFFSET_NAME,
+ hdd_ctx->config->
+ neighbor_report_offload_per_threshold_offset);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_CACHE_TIMEOUT_NAME,
+ hdd_ctx->config->neighbor_report_offload_cache_timeout);
+ hdd_debug("Name = [%s] value = [%u]",
+ CFG_OFFLOAD_NEIGHBOR_REPORT_MAX_REQ_CAP_NAME,
+ hdd_ctx->config->neighbor_report_offload_max_req_cap);
+}
+
+/**
* hdd_cfg_print() - print the hdd configuration
* @iniTable: pointer to hdd context
*
@@ -6697,6 +6850,9 @@ void hdd_cfg_print(hdd_context_t *pHddCtx)
hdd_debug("Name = [gbusBandwidthComputeInterval] Value = [%u] ",
pHddCtx->config->busBandwidthComputeInterval);
hdd_debug("Name = [%s] Value = [%u] ",
+ CFG_ENABLE_TCP_LIMIT_OUTPUT,
+ pHddCtx->config->enable_tcp_limit_output);
+ hdd_debug("Name = [%s] Value = [%u] ",
CFG_ENABLE_TCP_ADV_WIN_SCALE,
pHddCtx->config->enable_tcp_adv_win_scale);
hdd_debug("Name = [%s] Value = [%u] ",
@@ -7031,8 +7187,8 @@ void hdd_cfg_print(hdd_context_t *pHddCtx)
CFG_GO_FORCE_11N_FOR_11AC_NAME,
pHddCtx->config->go_force_11n_for_11ac);
hdd_debug("Name = [%s] Value = [%d]",
- CFG_BPF_PACKET_FILTER_OFFLOAD,
- pHddCtx->config->bpf_packet_filter_enable);
+ CFG_APF_PACKET_FILTER_OFFLOAD,
+ pHddCtx->config->apf_packet_filter_enable);
hdd_debug("Name = [%s] Value = [%u]",
CFG_TDLS_ENABLE_DEFER_TIMER,
pHddCtx->config->tdls_enable_defer_time);
@@ -7049,11 +7205,11 @@ void hdd_cfg_print(hdd_context_t *pHddCtx)
CFG_ENABLE_PHY_REG_NAME,
pHddCtx->config->enable_phy_reg_retention);
hdd_debug("Name = [%s] Value = [%u]",
- CFG_ACTIVE_UC_BPF_MODE_NAME,
- pHddCtx->config->active_uc_bpf_mode);
+ CFG_ACTIVE_UC_APF_MODE_NAME,
+ pHddCtx->config->active_uc_apf_mode);
hdd_debug("Name = [%s] Value = [%u]",
- CFG_ACTIVE_MC_BC_BPF_MODE_NAME,
- pHddCtx->config->active_mc_bc_bpf_mode);
+ CFG_ACTIVE_MC_BC_APF_MODE_NAME,
+ pHddCtx->config->active_mc_bc_apf_mode);
hdd_debug("Name = [%s] Value = [%u]",
CFG_HW_FILTER_MODE_NAME,
pHddCtx->config->hw_filter_mode);
@@ -7241,6 +7397,9 @@ void hdd_cfg_print(hdd_context_t *pHddCtx)
pHddCtx->config->chan_switch_hostapd_rate_enabled);
hdd_debug("Name = [%s] value = [0x%x]", CFG_VC_MODE_BITMAP,
pHddCtx->config->vc_mode_cfg_bitmap);
+ hdd_debug("Name = [%s] Value = [%u]",
+ CFG_FORCE_RSNE_OVERRIDE_NAME,
+ pHddCtx->config->force_rsne_override);
hdd_debug("Name = [%s] value = [0x%x]",
CFG_ENABLE_MAC_PROVISION_NAME,
pHddCtx->config->mac_provision);
@@ -7250,6 +7409,17 @@ void hdd_cfg_print(hdd_context_t *pHddCtx)
hdd_debug("Name = [%s] value = [0x%x]",
CFG_DERIVED_INTERFACE_POOL_NAME,
pHddCtx->config->derived_intf_pool);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_ENABLE_GCMP_NAME,
+ pHddCtx->config->gcmp_enabled);
+ hdd_debug("Name = [%s] value = [%d]",
+ CFG_DTIM_SELECTION_DIVERSITY_NAME,
+ pHddCtx->config->enable_dtim_selection_diversity);
+
+ hdd_cfg_print_11k_offload_params(pHddCtx);
+ hdd_debug("Name = [%s] value = [0x%x]",
+ CFG_CHANNEL_SELECT_LOGIC_CONC_NAME,
+ pHddCtx->config->channel_select_logic_conc);
}
/**
@@ -9378,6 +9548,36 @@ static void hdd_update_bss_score_params(struct hdd_config *config,
}
/**
+ * hdd_update_11k_offload_params() - initializes the 11k offload related params
+ *
+ * @config: pointer to hdd_config structure
+ * @csr_config: pointer to the csr config structure
+ *
+ * Return: None
+ */
+static
+void hdd_update_11k_offload_params(struct hdd_config *config,
+ tCsrConfigParam *csr_config)
+{
+ csr_config->offload_11k_enable_bitmask =
+ config->offload_11k_enable_bitmask;
+ csr_config->neighbor_report_offload.params_bitmask =
+ config->neighbor_report_offload_params_bitmask;
+ csr_config->neighbor_report_offload.time_offset =
+ config->neighbor_report_offload_time_offset;
+ csr_config->neighbor_report_offload.low_rssi_offset =
+ config->neighbor_report_offload_low_rssi_offset;
+ csr_config->neighbor_report_offload.bmiss_count_trigger =
+ config->neighbor_report_offload_bmiss_count_trigger;
+ csr_config->neighbor_report_offload.per_threshold_offset =
+ config->neighbor_report_offload_per_threshold_offset;
+ csr_config->neighbor_report_offload.neighbor_report_cache_timeout =
+ config->neighbor_report_offload_cache_timeout;
+ csr_config->neighbor_report_offload.max_neighbor_report_req_cap =
+ config->neighbor_report_offload_max_req_cap;
+}
+
+/**
* hdd_set_sme_config() -initializes the sme configuration parameters
*
* @pHddCtx: the pointer to hdd context
@@ -9479,6 +9679,9 @@ QDF_STATUS hdd_set_sme_config(hdd_context_t *pHddCtx)
*/
smeConfig->csrConfig.nVhtChannelWidth = pConfig->vhtChannelWidth;
smeConfig->csrConfig.enableTxBF = pConfig->enableTxBF;
+ smeConfig->csrConfig.enable_subfee_vendor_vhtie =
+ pConfig->enable_subfee_vendor_vhtie;
+
smeConfig->csrConfig.enable_txbf_sap_mode =
pConfig->enable_txbf_sap_mode;
smeConfig->csrConfig.enable2x2 = pConfig->enable2x2;
@@ -9833,6 +10036,8 @@ QDF_STATUS hdd_set_sme_config(hdd_context_t *pHddCtx)
hdd_update_bss_score_params(pHddCtx->config,
&smeConfig->csrConfig.bss_score_params);
+ hdd_update_11k_offload_params(pHddCtx->config,
+ &smeConfig->csrConfig);
status = sme_update_config(pHddCtx->hHal, smeConfig);
if (!QDF_IS_STATUS_SUCCESS(status))
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
index 00efba0efa0a..1a552b59e246 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.c
@@ -97,6 +97,10 @@
#include "wlan_hdd_disa.h"
#include "wlan_hdd_spectralscan.h"
+#ifdef WLAN_FEATURE_APF
+#include "wlan_hdd_apf.h"
+#endif
+
#include "wmi_unified.h"
#include "wmi_unified_param.h"
@@ -174,6 +178,11 @@
*/
#define WLAN_DEAUTH_DPTRACE_DUMP_COUNT 100
+static const u32 hdd_gcmp_cipher_suits[] = {
+ WLAN_CIPHER_SUITE_GCMP,
+ WLAN_CIPHER_SUITE_GCMP_256,
+};
+
static const u32 hdd_cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
@@ -193,8 +202,6 @@ static const u32 hdd_cipher_suites[] = {
#ifdef WLAN_FEATURE_11W
WLAN_CIPHER_SUITE_AES_CMAC,
#endif
- WLAN_CIPHER_SUITE_GCMP,
- WLAN_CIPHER_SUITE_GCMP_256,
};
static const struct ieee80211_channel hdd_channels_2_4_ghz[] = {
@@ -615,7 +622,6 @@ static struct ieee80211_iface_combination
};
static struct cfg80211_ops wlan_hdd_cfg80211_ops;
-struct hdd_bpf_context bpf_context;
#ifdef WLAN_NL80211_TESTMODE
enum wlan_hdd_tm_attr {
@@ -1251,6 +1257,10 @@ static const struct nl80211_vendor_cmd_info wlan_hdd_cfg80211_vendor_events[] =
.vendor_id = QCA_NL80211_VENDOR_ID,
.subcmd = QCA_NL80211_VENDOR_SUBCMD_HANG,
},
+ [QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES_INDEX] = {
+ .vendor_id = QCA_NL80211_VENDOR_ID,
+ .subcmd = QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES,
+ },
[QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX] = {
.vendor_id = QCA_NL80211_VENDOR_ID,
.subcmd = QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO,
@@ -1584,7 +1594,8 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
tsap_Config_t *sap_config;
struct sk_buff *temp_skbuff;
- int status = -EINVAL, i = 0;
+ int ret, i;
+ QDF_STATUS status;
struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_ACS_MAX + 1];
bool ht_enabled, ht40_enabled, vht_enabled;
uint8_t ch_width;
@@ -1598,7 +1609,7 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
* config shall be set only from start_acs.
*/
- ENTER_DEV(ndev);
+ hdd_info("enter(%s)", netdev_name(adapter->dev));
if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
hdd_err("Command not allowed in FTM mode");
@@ -1610,28 +1621,33 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
return -EPERM;
}
- status = wlan_hdd_validate_context(hdd_ctx);
- if (status)
+ ret = wlan_hdd_validate_context(hdd_ctx);
+ if (ret)
goto out;
if (cds_is_sub_20_mhz_enabled()) {
hdd_err("ACS not supported in sub 20 MHz ch wd.");
- status = -EINVAL;
+ ret = -EINVAL;
goto out;
}
- sap_config = &adapter->sessionCtx.ap.sapConfig;
- qdf_mem_zero(&sap_config->acs_cfg, sizeof(struct sap_acs_cfg));
+ if (qdf_atomic_inc_return(&hdd_ctx->is_acs_allowed) > 1) {
+ hdd_err("ACS rejected as previous req already in progress");
+ ret = -EINVAL;
+ goto out;
+ }
- status = hdd_nla_parse(tb, QCA_WLAN_VENDOR_ATTR_ACS_MAX, data, data_len,
- wlan_hdd_cfg80211_do_acs_policy);
- if (status) {
+ ret = hdd_nla_parse(tb, QCA_WLAN_VENDOR_ATTR_ACS_MAX, data, data_len,
+ wlan_hdd_cfg80211_do_acs_policy);
+ if (ret) {
hdd_err("Invalid ATTR");
+ qdf_atomic_set(&hdd_ctx->is_acs_allowed, 0);
goto out;
}
if (!tb[QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE]) {
hdd_err("Attr hw_mode failed");
+ qdf_atomic_set(&hdd_ctx->is_acs_allowed, 0);
goto out;
}
hw_mode = nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_ACS_HW_MODE]);
@@ -1681,6 +1697,13 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
ch_width = 20;
}
+ sap_config = &adapter->sessionCtx.ap.sapConfig;
+
+ /* Check and free if memory is already allocated for acs channel list */
+ wlan_hdd_undo_acs(adapter);
+
+ qdf_mem_zero(&sap_config->acs_cfg, sizeof(struct sap_acs_cfg));
+
if (ch_width == 160)
sap_config->acs_cfg.ch_width = CH_WIDTH_160MHZ;
else if (ch_width == 80)
@@ -1699,6 +1722,7 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
* hw_mode = any: only QCA_WLAN_VENDOR_ATTR_ACS_FREQ_LIST attr
* is present
*/
+
if (tb[QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST]) {
char *tmp = nla_data(tb[QCA_WLAN_VENDOR_ATTR_ACS_CH_LIST]);
@@ -1708,8 +1732,11 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
sap_config->acs_cfg.ch_list = qdf_mem_malloc(
sizeof(uint8_t) *
sap_config->acs_cfg.ch_list_count);
- if (sap_config->acs_cfg.ch_list == NULL)
+ if (sap_config->acs_cfg.ch_list == NULL) {
+ qdf_atomic_set(&hdd_ctx->is_acs_allowed, 0);
+ ret = -ENOMEM;
goto out;
+ }
qdf_mem_copy(sap_config->acs_cfg.ch_list, tmp,
sap_config->acs_cfg.ch_list_count);
@@ -1725,7 +1752,8 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
sap_config->acs_cfg.ch_list_count);
if (sap_config->acs_cfg.ch_list == NULL) {
hdd_err("ACS config alloc fail");
- status = -ENOMEM;
+ qdf_atomic_set(&hdd_ctx->is_acs_allowed, 0);
+ ret = -ENOMEM;
goto out;
}
@@ -1736,6 +1764,11 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
}
}
+ if (!sap_config->acs_cfg.ch_list_count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
hdd_debug("get pcl for DO_ACS vendor command");
/* consult policy manager to get PCL */
@@ -1747,6 +1780,9 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
if (QDF_STATUS_SUCCESS != status)
hdd_err("Get PCL failed");
+ if (hw_mode == QCA_ACS_MODE_IEEE80211ANY)
+ cds_trim_acs_channel_list(sap_config);
+
wlan_hdd_set_acs_ch_range(sap_config, hw_mode,
ht_enabled, vht_enabled);
@@ -1762,12 +1798,13 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
sap_config->acs_cfg.hw_mode = eCSR_DOT11_MODE_11ac;
sap_config->acs_cfg.ch_width =
hdd_ctx->config->vhtChannelWidth;
- /* No VHT80 in 2.4G so perform ACS accordingly */
- if (sap_config->acs_cfg.end_ch <= 14 &&
- sap_config->acs_cfg.ch_width == eHT_CHANNEL_WIDTH_80MHZ)
- sap_config->acs_cfg.ch_width = eHT_CHANNEL_WIDTH_40MHZ;
}
+ /* No VHT80 in 2.4G so perform ACS accordingly */
+ if (sap_config->acs_cfg.end_ch <= 14 &&
+ sap_config->acs_cfg.ch_width == eHT_CHANNEL_WIDTH_80MHZ)
+ sap_config->acs_cfg.ch_width = eHT_CHANNEL_WIDTH_40MHZ;
+
if (hdd_ctx->config->auto_channel_select_weight)
sap_config->auto_channel_select_weight =
hdd_ctx->config->auto_channel_select_weight;
@@ -1795,13 +1832,13 @@ static int __wlan_hdd_cfg80211_do_acs(struct wiphy *wiphy,
*/
set_bit(ACS_PENDING, &adapter->event_flags);
hdd_debug("ACS Pending for %s", adapter->dev->name);
- status = 0;
+ ret = 0;
} else {
- status = wlan_hdd_cfg80211_start_acs(adapter);
+ ret = wlan_hdd_cfg80211_start_acs(adapter);
}
out:
- if (0 == status) {
+ if (ret == 0) {
temp_skbuff = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
NLMSG_HDRLEN);
if (temp_skbuff != NULL)
@@ -1810,7 +1847,7 @@ out:
wlan_hdd_undo_acs(adapter);
clear_bit(ACS_IN_PROGRESS, &hdd_ctx->g_event_flags);
- return status;
+ return ret;
}
/**
@@ -1853,10 +1890,15 @@ void wlan_hdd_undo_acs(hdd_adapter_t *adapter)
{
if (adapter == NULL)
return;
+
+ hdd_info("enter(%s)", netdev_name(adapter->dev));
+
if (adapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list) {
qdf_mem_free(adapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list);
adapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list = NULL;
}
+
+ EXIT();
}
/**
@@ -3794,6 +3836,10 @@ hdd_add_link_standard_info(struct sk_buff *skb,
hdd_err("put fail");
goto fail;
}
+ if (nla_put(skb, NL80211_ATTR_MAC, QDF_MAC_ADDR_SIZE,
+ hdd_sta_ctx->conn_info.bssId.bytes)) {
+ goto fail;
+ }
if (hdd_add_survey_info(skb, hdd_sta_ctx, NL80211_ATTR_SURVEY_INFO))
goto fail;
if (hdd_add_sta_info(skb, hdd_sta_ctx, NL80211_ATTR_STA_INFO))
@@ -3860,6 +3906,7 @@ static int hdd_get_station_info(hdd_context_t *hdd_ctx,
nl_buf_len = NLMSG_HDRLEN;
nl_buf_len += sizeof(hdd_sta_ctx->conn_info.last_ssid.SSID.length) +
+ QDF_MAC_ADDR_SIZE +
sizeof(hdd_sta_ctx->conn_info.freq) +
sizeof(hdd_sta_ctx->conn_info.noise) +
sizeof(hdd_sta_ctx->conn_info.signal) +
@@ -4792,8 +4839,8 @@ static int __wlan_hdd_cfg80211_keymgmt_set_key(struct wiphy *wiphy,
return -EPERM;
}
- if ((data == NULL) || (data_len == 0) ||
- (data_len > SIR_ROAM_SCAN_PSK_SIZE)) {
+ if ((data == NULL) || (data_len <= 0) ||
+ (data_len > SIR_ROAM_SCAN_PSK_SIZE)) {
hdd_err("Invalid data");
return -EINVAL;
}
@@ -5026,6 +5073,7 @@ __wlan_hdd_cfg80211_get_logger_supp_feature(struct wiphy *wiphy,
features |= WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED;
features |= WIFI_LOGGER_CONNECT_EVENT_SUPPORTED;
features |= WIFI_LOGGER_WAKE_LOCK_SUPPORTED;
+ features |= WIFI_LOGGER_DRIVER_DUMP_SUPPORTED;
reply_skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy,
sizeof(uint32_t) + NLA_HDRLEN + NLMSG_HDRLEN);
@@ -5411,6 +5459,7 @@ wlan_hdd_wifi_config_policy[QCA_WLAN_VENDOR_ATTR_CONFIG_MAX + 1] = {
[QCA_WLAN_VENDOR_ATTR_CONFIG_TOTAL_BEACON_MISS_COUNT] = {
.type = NLA_U8},
[QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL] = {.type = NLA_U16 },
+ [QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE] = {.type = NLA_U8},
};
/**
@@ -6194,6 +6243,22 @@ __wlan_hdd_cfg80211_wifi_configuration_set(struct wiphy *wiphy,
}
}
+ if (tb[QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE] &&
+ hdd_ctx->config->force_rsne_override) {
+ uint8_t force_rsne_override;
+
+ force_rsne_override =
+ nla_get_u8(tb[QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE]);
+ if (force_rsne_override > 1) {
+ hdd_err("Invalid test_mode %d", force_rsne_override);
+ ret_val = -EINVAL;
+ }
+
+ hdd_ctx->force_rsne_override = force_rsne_override;
+ hdd_debug("force_rsne_override - %d",
+ hdd_ctx->force_rsne_override);
+ }
+
return ret_val;
}
@@ -8366,363 +8431,6 @@ static int wlan_hdd_cfg80211_conditional_chan_switch(struct wiphy *wiphy,
return ret;
}
-/*
- * define short names for the global vendor params
- * used by __wlan_hdd_cfg80211_bpf_offload()
- */
-#define BPF_INVALID \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID
-#define BPF_SET_RESET \
- QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER
-#define BPF_VERSION \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION
-#define BPF_FILTER_ID \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_ID
-#define BPF_PACKET_SIZE \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE
-#define BPF_CURRENT_OFFSET \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET
-#define BPF_PROGRAM \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM
-#define BPF_MAX \
- QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_MAX
-
-static const struct nla_policy
-wlan_hdd_bpf_offload_policy[BPF_MAX + 1] = {
- [BPF_SET_RESET] = {.type = NLA_U32},
- [BPF_VERSION] = {.type = NLA_U32},
- [BPF_FILTER_ID] = {.type = NLA_U32},
- [BPF_PACKET_SIZE] = {.type = NLA_U32},
- [BPF_CURRENT_OFFSET] = {.type = NLA_U32},
- [BPF_PROGRAM] = {.type = NLA_U8},
-};
-
-/**
- * hdd_get_bpf_offload_cb() - Callback function to BPF Offload
- * @hdd_context: hdd_context
- * @bpf_get_offload: struct for get offload
- *
- * This function receives the response/data from the lower layer and
- * checks to see if the thread is still waiting then post the results to
- * upper layer, if the request has timed out then ignore.
- *
- * Return: None
- */
-void hdd_get_bpf_offload_cb(void *hdd_context,
- struct sir_bpf_get_offload *data)
-{
- hdd_context_t *hdd_ctx = hdd_context;
- struct hdd_bpf_context *context;
-
- ENTER();
-
- if (wlan_hdd_validate_context(hdd_ctx) || !data) {
- hdd_err("HDD context is invalid or data(%pK) is null",
- data);
- return;
- }
-
- spin_lock(&hdd_context_lock);
-
- context = &bpf_context;
- /* The caller presumably timed out so there is nothing we can do */
- if (context->magic != BPF_CONTEXT_MAGIC) {
- spin_unlock(&hdd_context_lock);
- return;
- }
-
- /* context is valid so caller is still waiting */
- /* paranoia: invalidate the magic */
- context->magic = 0;
-
- context->capability_response = *data;
- complete(&context->completion);
-
- spin_unlock(&hdd_context_lock);
-}
-
-/**
- * hdd_post_get_bpf_capabilities_rsp() - Callback function to BPF Offload
- * @hdd_context: hdd_context
- * @bpf_get_offload: struct for get offload
- *
- * Return: 0 on success, error number otherwise.
- */
-static int hdd_post_get_bpf_capabilities_rsp(hdd_context_t *hdd_ctx,
- struct sir_bpf_get_offload *bpf_get_offload)
-{
- struct sk_buff *skb;
- uint32_t nl_buf_len;
-
- ENTER();
-
- nl_buf_len = NLMSG_HDRLEN;
- nl_buf_len +=
- (sizeof(bpf_get_offload->max_bytes_for_bpf_inst) + NLA_HDRLEN) +
- (sizeof(bpf_get_offload->bpf_version) + NLA_HDRLEN);
-
- skb = cfg80211_vendor_cmd_alloc_reply_skb(hdd_ctx->wiphy, nl_buf_len);
- if (!skb) {
- hdd_err("cfg80211_vendor_cmd_alloc_reply_skb failed");
- return -ENOMEM;
- }
-
- hdd_debug("BPF Version: %u BPF max bytes: %u",
- bpf_get_offload->bpf_version,
- bpf_get_offload->max_bytes_for_bpf_inst);
-
- if (nla_put_u32(skb, BPF_PACKET_SIZE,
- bpf_get_offload->max_bytes_for_bpf_inst) ||
- nla_put_u32(skb, BPF_VERSION, bpf_get_offload->bpf_version)) {
- hdd_err("nla put failure");
- goto nla_put_failure;
- }
-
- cfg80211_vendor_cmd_reply(skb);
- EXIT();
- return 0;
-
-nla_put_failure:
- kfree_skb(skb);
- return -EINVAL;
-}
-
-/**
- * hdd_get_bpf_offload - Get BPF offload Capabilities
- * @hdd_ctx: Hdd context
- *
- * Return: 0 on success, errno on failure
- */
-static int hdd_get_bpf_offload(hdd_context_t *hdd_ctx)
-{
- unsigned long rc;
- static struct hdd_bpf_context *context;
- QDF_STATUS status;
- int ret;
-
- ENTER();
-
- spin_lock(&hdd_context_lock);
- context = &bpf_context;
- context->magic = BPF_CONTEXT_MAGIC;
- INIT_COMPLETION(context->completion);
- spin_unlock(&hdd_context_lock);
-
- status = sme_get_bpf_offload_capabilities(hdd_ctx->hHal);
- if (!QDF_IS_STATUS_SUCCESS(status)) {
- hdd_err("Unable to retrieve BPF caps");
- return -EINVAL;
- }
- /* request was sent -- wait for the response */
- rc = wait_for_completion_timeout(&context->completion,
- msecs_to_jiffies(WLAN_WAIT_TIME_BPF));
- if (!rc) {
- hdd_err("Target response timed out");
- spin_lock(&hdd_context_lock);
- context->magic = 0;
- spin_unlock(&hdd_context_lock);
-
- return -ETIMEDOUT;
- }
- ret = hdd_post_get_bpf_capabilities_rsp(hdd_ctx,
- &bpf_context.capability_response);
- if (ret)
- hdd_err("Failed to post get bpf capabilities");
-
- EXIT();
- return ret;
-}
-
-/**
- * hdd_set_reset_bpf_offload - Post set/reset bpf to SME
- * @hdd_ctx: Hdd context
- * @tb: Length of @data
- * @adapter: pointer to adapter struct
- *
- * Return: 0 on success; errno on failure
- */
-static int hdd_set_reset_bpf_offload(hdd_context_t *hdd_ctx,
- struct nlattr **tb,
- hdd_adapter_t *adapter)
-{
- struct sir_bpf_set_offload *bpf_set_offload;
- QDF_STATUS status;
- int prog_len;
- int ret = 0;
-
- ENTER();
-
- if (adapter->device_mode == QDF_STA_MODE ||
- adapter->device_mode == QDF_P2P_CLIENT_MODE) {
- if (!hdd_conn_is_connected(
- WLAN_HDD_GET_STATION_CTX_PTR(adapter))) {
- hdd_err("Not in Connected state!");
- return -ENOTSUPP;
- }
- }
-
- bpf_set_offload = qdf_mem_malloc(sizeof(*bpf_set_offload));
- if (bpf_set_offload == NULL) {
- hdd_err("qdf_mem_malloc failed for bpf_set_offload");
- return -ENOMEM;
- }
-
- /* Parse and fetch bpf packet size */
- if (!tb[BPF_PACKET_SIZE]) {
- hdd_err("attr bpf packet size failed");
- ret = -EINVAL;
- goto fail;
- }
- bpf_set_offload->total_length = nla_get_u32(tb[BPF_PACKET_SIZE]);
-
- if (!bpf_set_offload->total_length) {
- hdd_debug("BPF reset packet filter received");
- goto post_sme;
- }
-
- /* Parse and fetch bpf program */
- if (!tb[BPF_PROGRAM]) {
- hdd_err("attr bpf program failed");
- ret = -EINVAL;
- goto fail;
- }
-
- prog_len = nla_len(tb[BPF_PROGRAM]);
- bpf_set_offload->program = qdf_mem_malloc(sizeof(uint8_t) * prog_len);
-
- if (bpf_set_offload->program == NULL) {
- hdd_err("qdf_mem_malloc failed for bpf offload program");
- ret = -ENOMEM;
- goto fail;
- }
-
- bpf_set_offload->current_length = prog_len;
- nla_memcpy(bpf_set_offload->program, tb[BPF_PROGRAM], prog_len);
- bpf_set_offload->session_id = adapter->sessionId;
-
- hdd_debug("BPF set instructions");
- QDF_TRACE_HEX_DUMP(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_DEBUG,
- bpf_set_offload->program, prog_len);
-
- /* Parse and fetch filter Id */
- if (!tb[BPF_FILTER_ID]) {
- hdd_err("attr filter id failed");
- ret = -EINVAL;
- goto fail;
- }
- bpf_set_offload->filter_id = nla_get_u32(tb[BPF_FILTER_ID]);
-
- /* Parse and fetch current offset */
- if (!tb[BPF_CURRENT_OFFSET]) {
- hdd_err("attr current offset failed");
- ret = -EINVAL;
- goto fail;
- }
- bpf_set_offload->current_offset = nla_get_u32(tb[BPF_CURRENT_OFFSET]);
-
-post_sme:
- hdd_debug("Posting BPF SET/RESET to SME, session_id: %d Bpf Version: %d filter ID: %d total_length: %d current_length: %d current offset: %d",
- bpf_set_offload->session_id,
- bpf_set_offload->version,
- bpf_set_offload->filter_id,
- bpf_set_offload->total_length,
- bpf_set_offload->current_length,
- bpf_set_offload->current_offset);
-
- status = sme_set_bpf_instructions(hdd_ctx->hHal, bpf_set_offload);
- if (!QDF_IS_STATUS_SUCCESS(status)) {
- hdd_err("sme_set_bpf_instructions failed(err=%d)", status);
- ret = -EINVAL;
- goto fail;
- }
- EXIT();
-
-fail:
- if (bpf_set_offload->current_length)
- qdf_mem_free(bpf_set_offload->program);
- qdf_mem_free(bpf_set_offload);
- return ret;
-}
-
-/**
- * wlan_hdd_cfg80211_bpf_offload() - Set/Reset to BPF Offload
- * @wiphy: wiphy structure pointer
- * @wdev: Wireless device structure pointer
- * @data: Pointer to the data received
- * @data_len: Length of @data
- *
- * Return: 0 on success; errno on failure
- */
-static int
-__wlan_hdd_cfg80211_bpf_offload(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- const void *data, int data_len)
-{
- hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
- struct net_device *dev = wdev->netdev;
- hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
- struct nlattr *tb[BPF_MAX + 1];
- int ret_val, packet_filter_subcmd;
-
- ENTER();
-
- ret_val = wlan_hdd_validate_context(hdd_ctx);
- if (ret_val)
- return ret_val;
-
- if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
- hdd_err("Command not allowed in FTM mode");
- return -EINVAL;
- }
-
- if (!hdd_ctx->bpf_enabled) {
- hdd_err("BPF offload is not supported/enabled");
- return -ENOTSUPP;
- }
-
- if (hdd_nla_parse(tb, BPF_MAX, data, data_len,
- wlan_hdd_bpf_offload_policy)) {
- hdd_err("Invalid ATTR");
- return -EINVAL;
- }
-
- if (!tb[BPF_SET_RESET]) {
- hdd_err("attr bpf set reset failed");
- return -EINVAL;
- }
-
- packet_filter_subcmd = nla_get_u32(tb[BPF_SET_RESET]);
-
- if (packet_filter_subcmd == QCA_WLAN_GET_PACKET_FILTER)
- return hdd_get_bpf_offload(hdd_ctx);
- else
- return hdd_set_reset_bpf_offload(hdd_ctx, tb,
- pAdapter);
-}
-
-/**
- * wlan_hdd_cfg80211_bpf_offload() - SSR Wrapper to BPF Offload
- * @wiphy: wiphy structure pointer
- * @wdev: Wireless device structure pointer
- * @data: Pointer to the data received
- * @data_len: Length of @data
- *
- * Return: 0 on success; errno on failure
- */
-
-static int wlan_hdd_cfg80211_bpf_offload(struct wiphy *wiphy,
- struct wireless_dev *wdev,
- const void *data, int data_len)
-{
- int ret;
-
- cds_ssr_protect(__func__);
- ret = __wlan_hdd_cfg80211_bpf_offload(wiphy, wdev, data, data_len);
- cds_ssr_unprotect(__func__);
-
- return ret;
-}
-
/**
* wlan_hdd_set_pre_cac_status() - Set the pre cac status
* @pre_cac_adapter: AP adapter used for pre cac
@@ -9118,16 +8826,6 @@ release_intf_addr_and_return_failure:
return -EINVAL;
}
-/**
- * hdd_init_bpf_completion() - Initialize the completion event for bpf
- *
- * Return: None
- */
-void hdd_init_bpf_completion(void)
-{
- init_completion(&bpf_context.completion);
-}
-
static const struct nla_policy
wlan_hdd_sap_config_policy[QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_MAX + 1] = {
[QCA_WLAN_VENDOR_ATTR_SAP_CONFIG_CHANNEL] = {.type = NLA_U8 },
@@ -9420,6 +9118,53 @@ static int wlan_hdd_cfg80211_sta_roam_policy(struct wiphy *wiphy,
}
#ifdef FEATURE_WLAN_CH_AVOID
+
+static int hdd_validate_avoid_freq_chanlist(hdd_context_t *hdd_ctx,
+ tHddAvoidFreqList *channel_list)
+{
+ unsigned int range_idx, ch_idx;
+ unsigned int unsafe_channel_index, unsafe_channel_count = 0;
+ bool ch_found = false;
+
+ unsafe_channel_count = QDF_MIN((uint16_t)hdd_ctx->unsafe_channel_count,
+ (uint16_t)NUM_CHANNELS);
+
+ for (range_idx = 0; range_idx < channel_list->avoidFreqRangeCount;
+ range_idx++) {
+ if ((channel_list->avoidFreqRange[range_idx].startFreq <
+ CDS_24_GHZ_CHANNEL_1) ||
+ (channel_list->avoidFreqRange[range_idx].endFreq >
+ CDS_5_GHZ_CHANNEL_165) ||
+ (channel_list->avoidFreqRange[range_idx].startFreq >
+ channel_list->avoidFreqRange[range_idx].endFreq))
+ continue;
+
+ for (ch_idx = channel_list->avoidFreqRange[range_idx].startFreq;
+ ch_idx <= channel_list->avoidFreqRange[range_idx].endFreq;
+ ch_idx++) {
+ for (unsafe_channel_index = 0;
+ unsafe_channel_index < unsafe_channel_count;
+ unsafe_channel_index++) {
+ if (ch_idx ==
+ hdd_ctx->unsafe_channel_list[
+ unsafe_channel_index]) {
+ hdd_log(QDF_TRACE_LEVEL_INFO,
+ "Duplicate channel %d",
+ ch_idx);
+ ch_found = true;
+ break;
+ }
+ }
+ if (!ch_found) {
+ hdd_ctx->unsafe_channel_list[
+ unsafe_channel_count++] = ch_idx;
+ }
+ ch_found = false;
+ }
+ }
+ return unsafe_channel_count;
+}
+
/**
* __wlan_hdd_cfg80211_avoid_freq() - ask driver to restart SAP if SAP
* is on unsafe channel.
@@ -9442,11 +9187,11 @@ __wlan_hdd_cfg80211_avoid_freq(struct wiphy *wiphy,
{
hdd_context_t *hdd_ctx = wiphy_priv(wiphy);
int ret;
- uint16_t unsafe_channel_count;
- int unsafe_channel_index;
qdf_device_t qdf_ctx = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
uint16_t *local_unsafe_list;
- uint16_t local_unsafe_list_count;
+ uint16_t unsafe_channel_index, local_unsafe_list_count;
+ tHddAvoidFreqList *channel_list;
+ enum tQDF_GLOBAL_CON_MODE curr_mode;
ENTER_DEV(wdev->netdev);
@@ -9454,9 +9199,10 @@ __wlan_hdd_cfg80211_avoid_freq(struct wiphy *wiphy,
cds_err("qdf_ctx is NULL");
return -EINVAL;
}
-
- if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
- hdd_err("Command not allowed in FTM mode");
+ curr_mode = hdd_get_conparam();
+ if (QDF_GLOBAL_FTM_MODE == curr_mode ||
+ QDF_GLOBAL_MONITOR_MODE == curr_mode) {
+ hdd_err("Command not allowed in FTM/MONITOR mode");
return -EINVAL;
}
@@ -9464,6 +9210,13 @@ __wlan_hdd_cfg80211_avoid_freq(struct wiphy *wiphy,
if (0 != ret)
return ret;
+ channel_list = (tHddAvoidFreqList *)data;
+ if (!channel_list) {
+ hdd_log(QDF_TRACE_LEVEL_ERROR,
+ "Avoid frequency channel list empty");
+ return -EINVAL;
+ }
+
ret = hdd_clone_local_unsafe_chan(hdd_ctx,
&local_unsafe_list,
&local_unsafe_list_count);
@@ -9474,13 +9227,18 @@ __wlan_hdd_cfg80211_avoid_freq(struct wiphy *wiphy,
&(hdd_ctx->unsafe_channel_count),
sizeof(hdd_ctx->unsafe_channel_list));
- unsafe_channel_count = QDF_MIN((uint16_t)hdd_ctx->unsafe_channel_count,
- (uint16_t)NUM_CHANNELS);
+ hdd_ctx->unsafe_channel_count = hdd_validate_avoid_freq_chanlist(
+ hdd_ctx,
+ channel_list);
+
+ pld_set_wlan_unsafe_channel(qdf_ctx->dev, hdd_ctx->unsafe_channel_list,
+ hdd_ctx->unsafe_channel_count);
+
for (unsafe_channel_index = 0;
- unsafe_channel_index < unsafe_channel_count;
- unsafe_channel_index++) {
+ unsafe_channel_index < hdd_ctx->unsafe_channel_count;
+ unsafe_channel_index++) {
hdd_debug("Channel %d is not safe",
- hdd_ctx->unsafe_channel_list[unsafe_channel_index]);
+ hdd_ctx->unsafe_channel_list[unsafe_channel_index]);
}
if (hdd_local_unsafe_channel_updated(hdd_ctx, local_unsafe_list,
@@ -9658,14 +9416,14 @@ static int wlan_hdd_cfg80211_sap_configuration_set(struct wiphy *wiphy,
return ret;
}
-#undef BPF_INVALID
-#undef BPF_SET_RESET
-#undef BPF_VERSION
-#undef BPF_ID
-#undef BPF_PACKET_SIZE
-#undef BPF_CURRENT_OFFSET
-#undef BPF_PROGRAM
-#undef BPF_MAX
+#undef APF_INVALID
+#undef APF_SET_RESET
+#undef APF_VERSION
+#undef APF_ID
+#undef APF_PACKET_SIZE
+#undef APF_CURRENT_OFFSET
+#undef APF_PROGRAM
+#undef APF_MAX
/**
* define short names for the global vendor params
@@ -10902,33 +10660,6 @@ static int hdd_set_clear_connectivity_check_stats_info(
uint32_t pkt_bitmap;
int rem;
- /* Clear All Stats command has come */
- if (!is_set_stats) {
- arp_stats_params->pkt_type_bitmap = adapter->pkt_type_bitmap;
- /* DNS tracking is not supported in FW. */
- arp_stats_params->pkt_type_bitmap &=
- ~CONNECTIVITY_CHECK_SET_DNS;
- arp_stats_params->flag = false;
- arp_stats_params->pkt_type = WLAN_NUD_STATS_ARP_PKT_TYPE;
- qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
- sizeof(adapter->hdd_stats.hdd_arp_stats));
- qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
- sizeof(adapter->hdd_stats.hdd_dns_stats));
- qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
- sizeof(adapter->hdd_stats.hdd_tcp_stats));
- qdf_mem_zero(&adapter->hdd_stats.hdd_icmpv4_stats,
- sizeof(adapter->hdd_stats.hdd_icmpv4_stats));
- adapter->track_arp_ip = 0;
- qdf_mem_zero(adapter->dns_payload,
- adapter->track_dns_domain_len);
- adapter->track_dns_domain_len = 0;
- adapter->track_src_port = 0;
- adapter->track_dest_port = 0;
- adapter->track_dest_ipv4 = 0;
- adapter->pkt_type_bitmap = 0;
- goto end;
- }
-
/* Set NUD command for start tracking is received. */
nla_for_each_nested(curr_attr,
tb[STATS_SET_DATA_PKT_INFO],
@@ -10950,12 +10681,14 @@ static int hdd_set_clear_connectivity_check_stats_info(
err = -EINVAL;
goto end;
}
- arp_stats_params->pkt_type_bitmap = pkt_bitmap;
- arp_stats_params->flag = true;
- adapter->pkt_type_bitmap |=
+
+ if (is_set_stats) {
+ arp_stats_params->pkt_type_bitmap = pkt_bitmap;
+ arp_stats_params->flag = true;
+ adapter->pkt_type_bitmap |=
arp_stats_params->pkt_type_bitmap;
- if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ARP) {
+ if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ARP) {
if (!tb[STATS_GW_IPV4]) {
hdd_err("GW ipv4 address is not present");
err = -EINVAL;
@@ -10967,66 +10700,109 @@ static int hdd_set_clear_connectivity_check_stats_info(
WLAN_NUD_STATS_ARP_PKT_TYPE;
adapter->track_arp_ip =
arp_stats_params->ip_addr;
- qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
- sizeof(adapter->hdd_stats.
- hdd_arp_stats));
- }
+ }
- if (pkt_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
- uint8_t *domain_name;
+ if (pkt_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
+ uint8_t *domain_name;
- if (!tb2[STATS_DNS_DOMAIN_NAME]) {
- hdd_err("DNS domain id is not present");
- err = -EINVAL;
- goto end;
- }
- domain_name =
- nla_data(tb2[STATS_DNS_DOMAIN_NAME]);
- adapter->track_dns_domain_len =
- nla_len(tb2[STATS_DNS_DOMAIN_NAME]);
- hdd_dns_make_name_query(domain_name,
- adapter->dns_payload);
- /* DNS tracking is not supported in FW. */
- arp_stats_params->pkt_type_bitmap &=
+ if (!tb2[STATS_DNS_DOMAIN_NAME]) {
+ hdd_err("DNS domain id is not present");
+ err = -EINVAL;
+ goto end;
+ }
+ domain_name = nla_data(
+ tb2[STATS_DNS_DOMAIN_NAME]);
+ adapter->track_dns_domain_len =
+ nla_len(tb2[
+ STATS_DNS_DOMAIN_NAME]);
+ hdd_dns_make_name_query(domain_name,
+ adapter->dns_payload);
+ /* DNStracking isn't supported in FW. */
+ arp_stats_params->pkt_type_bitmap &=
~CONNECTIVITY_CHECK_SET_DNS;
- qdf_mem_zero(&adapter->hdd_stats.hdd_dns_stats,
- sizeof(adapter->hdd_stats.
- hdd_dns_stats));
- }
-
- if (pkt_bitmap & CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
- if (!tb2[STATS_SRC_PORT] ||
- !tb2[STATS_DEST_PORT]) {
- hdd_err("Source/Dest port is not present");
- err = -EINVAL;
- goto end;
}
- arp_stats_params->tcp_src_port =
- nla_get_u32(tb2[STATS_SRC_PORT]);
- arp_stats_params->tcp_dst_port =
- nla_get_u32(tb2[STATS_DEST_PORT]);
- adapter->track_src_port =
+
+ if (pkt_bitmap &
+ CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
+ if (!tb2[STATS_SRC_PORT] ||
+ !tb2[STATS_DEST_PORT]) {
+ hdd_err("Source/Dest port is not present");
+ err = -EINVAL;
+ goto end;
+ }
+ arp_stats_params->tcp_src_port =
+ nla_get_u32(
+ tb2[STATS_SRC_PORT]);
+ arp_stats_params->tcp_dst_port =
+ nla_get_u32(
+ tb2[STATS_DEST_PORT]);
+ adapter->track_src_port =
arp_stats_params->tcp_src_port;
- adapter->track_dest_port =
+ adapter->track_dest_port =
arp_stats_params->tcp_dst_port;
- qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
- sizeof(adapter->hdd_stats.
- hdd_tcp_stats));
- }
-
- if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ICMPV4) {
- if (!tb2[STATS_DEST_IPV4]) {
- hdd_err("destination ipv4 address to track ping packets is not present");
- err = -EINVAL;
- goto end;
}
- arp_stats_params->icmp_ipv4 =
- nla_get_u32(tb2[STATS_DEST_IPV4]);
- adapter->track_dest_ipv4 =
+
+ if (pkt_bitmap &
+ CONNECTIVITY_CHECK_SET_ICMPV4) {
+ if (!tb2[STATS_DEST_IPV4]) {
+ hdd_err("destination ipv4 address to track ping packets is not present");
+ err = -EINVAL;
+ goto end;
+ }
+ arp_stats_params->icmp_ipv4 =
+ nla_get_u32(
+ tb2[STATS_DEST_IPV4]);
+ adapter->track_dest_ipv4 =
arp_stats_params->icmp_ipv4;
- qdf_mem_zero(&adapter->hdd_stats.hdd_tcp_stats,
- sizeof(adapter->hdd_stats.
- hdd_tcp_stats));
+ }
+ } else {
+ /* clear stats command received */
+ arp_stats_params->pkt_type_bitmap = pkt_bitmap;
+ arp_stats_params->flag = false;
+ adapter->pkt_type_bitmap &=
+ (~arp_stats_params->pkt_type_bitmap);
+
+ if (pkt_bitmap & CONNECTIVITY_CHECK_SET_ARP) {
+ arp_stats_params->pkt_type =
+ WLAN_NUD_STATS_ARP_PKT_TYPE;
+ qdf_mem_zero(&adapter->hdd_stats.
+ hdd_arp_stats,
+ sizeof(adapter->hdd_stats.
+ hdd_arp_stats));
+ adapter->track_arp_ip = 0;
+ }
+
+ if (pkt_bitmap & CONNECTIVITY_CHECK_SET_DNS) {
+ /* DNStracking isn't supported in FW. */
+ arp_stats_params->pkt_type_bitmap &=
+ ~CONNECTIVITY_CHECK_SET_DNS;
+ qdf_mem_zero(&adapter->hdd_stats.
+ hdd_dns_stats,
+ sizeof(adapter->hdd_stats.
+ hdd_dns_stats));
+ qdf_mem_zero(adapter->dns_payload,
+ adapter->track_dns_domain_len);
+ adapter->track_dns_domain_len = 0;
+ }
+
+ if (pkt_bitmap &
+ CONNECTIVITY_CHECK_SET_TCP_HANDSHAKE) {
+ qdf_mem_zero(&adapter->hdd_stats.
+ hdd_tcp_stats,
+ sizeof(adapter->hdd_stats.
+ hdd_tcp_stats));
+ adapter->track_src_port = 0;
+ adapter->track_dest_port = 0;
+ }
+
+ if (pkt_bitmap &
+ CONNECTIVITY_CHECK_SET_ICMPV4) {
+ qdf_mem_zero(&adapter->hdd_stats.
+ hdd_icmpv4_stats,
+ sizeof(adapter->hdd_stats.
+ hdd_icmpv4_stats));
+ adapter->track_dest_ipv4 = 0;
+ }
}
} else {
hdd_err("stats list empty");
@@ -11277,19 +11053,20 @@ static int __wlan_hdd_cfg80211_set_nud_stats(struct wiphy *wiphy,
"%s STATS_SET_START CMD", __func__);
return -EINVAL;
}
+
+ arp_stats_params.pkt_type_bitmap =
+ CONNECTIVITY_CHECK_SET_ARP;
+ adapter->pkt_type_bitmap |=
+ arp_stats_params.pkt_type_bitmap;
arp_stats_params.flag = true;
arp_stats_params.ip_addr =
nla_get_u32(tb[STATS_GW_IPV4]);
adapter->track_arp_ip = arp_stats_params.ip_addr;
-
- qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
- sizeof(adapter->hdd_stats.hdd_arp_stats));
-
arp_stats_params.pkt_type = WLAN_NUD_STATS_ARP_PKT_TYPE;
}
} else {
- /* clear tracking stats of other data types as well*/
- if (adapter->pkt_type_bitmap) {
+ /* clear stats command received. */
+ if (tb[STATS_SET_DATA_PKT_INFO]) {
err = hdd_set_clear_connectivity_check_stats_info(
adapter,
&arp_stats_params, tb, false);
@@ -11303,6 +11080,10 @@ static int __wlan_hdd_cfg80211_set_nud_stats(struct wiphy *wiphy,
if (!arp_stats_params.pkt_type_bitmap)
return err;
} else {
+ arp_stats_params.pkt_type_bitmap =
+ CONNECTIVITY_CHECK_SET_ARP;
+ adapter->pkt_type_bitmap &=
+ (~arp_stats_params.pkt_type_bitmap);
arp_stats_params.flag = false;
qdf_mem_zero(&adapter->hdd_stats.hdd_arp_stats,
sizeof(adapter->hdd_stats.hdd_arp_stats));
@@ -11310,10 +11091,9 @@ static int __wlan_hdd_cfg80211_set_nud_stats(struct wiphy *wiphy,
}
}
- if (arp_stats_params.flag) {
- QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_INFO,
- "%s STATS_SET_START Cleared!!", __func__);
- }
+ QDF_TRACE(QDF_MODULE_ID_HDD, QDF_TRACE_LEVEL_INFO,
+ "%s STATS_SET_START Received flag %d!!", __func__,
+ arp_stats_params.flag);
arp_stats_params.vdev_id = adapter->sessionId;
@@ -11843,7 +11623,11 @@ static int __wlan_hdd_cfg80211_get_nud_stats(struct wiphy *wiphy,
INIT_COMPLETION(context->response_event);
spin_unlock(&hdd_context_lock);
- if (hdd_ctx->config->enable_data_stall_det)
+ pkt_type_bitmap = adapter->pkt_type_bitmap;
+
+ /* send NUD failure event only when ARP tracking is enabled. */
+ if (hdd_ctx->config->enable_data_stall_det &&
+ (pkt_type_bitmap & CONNECTIVITY_CHECK_SET_ARP))
ol_txrx_post_data_stall_event(
DATA_STALL_LOG_INDICATOR_FRAMEWORK,
DATA_STALL_LOG_NUD_FAILURE,
@@ -11899,7 +11683,6 @@ static int __wlan_hdd_cfg80211_get_nud_stats(struct wiphy *wiphy,
if (adapter->dad)
nla_put_flag(skb, AP_LINK_DAD);
- pkt_type_bitmap = adapter->pkt_type_bitmap;
/* ARP tracking is done above. */
pkt_type_bitmap &= ~CONNECTIVITY_CHECK_SET_ARP;
@@ -12995,8 +12778,7 @@ const struct wiphy_vendor_command hdd_wiphy_vendor_commands[] = {
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_GET_LOGGER_FEATURE_SET,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
- WIPHY_VENDOR_CMD_NEED_NETDEV |
- WIPHY_VENDOR_CMD_NEED_RUNNING,
+ WIPHY_VENDOR_CMD_NEED_NETDEV,
.doit = wlan_hdd_cfg80211_get_logger_supp_feature
},
{
@@ -13127,14 +12909,16 @@ const struct wiphy_vendor_command hdd_wiphy_vendor_commands[] = {
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlan_hdd_cfg80211_txpower_scale_decr_db
},
+#ifdef WLAN_FEATURE_APF
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_PACKET_FILTER,
.flags = WIPHY_VENDOR_CMD_NEED_WDEV |
WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
- .doit = wlan_hdd_cfg80211_bpf_offload
+ .doit = wlan_hdd_cfg80211_apf_offload
},
+#endif /* WLAN_FEATURE_APF */
{
.info.vendor_id = QCA_NL80211_VENDOR_ID,
.info.subcmd = QCA_NL80211_VENDOR_SUBCMD_ACS_POLICY,
@@ -13573,6 +13357,7 @@ int wlan_hdd_cfg80211_init(struct device *dev,
int i, j;
hdd_context_t *pHddCtx = wiphy_priv(wiphy);
int len = 0;
+ uint32_t *cipher_suites;
ENTER();
@@ -13796,9 +13581,32 @@ int wlan_hdd_cfg80211_init(struct device *dev,
}
}
/*Initialise the supported cipher suite details */
- wiphy->cipher_suites = hdd_cipher_suites;
- wiphy->n_cipher_suites = ARRAY_SIZE(hdd_cipher_suites);
-
+ if (pCfg->gcmp_enabled) {
+ cipher_suites = qdf_mem_malloc(sizeof(hdd_cipher_suites) +
+ sizeof(hdd_gcmp_cipher_suits));
+ if (cipher_suites == NULL) {
+ hdd_err("Not enough memory for cipher suites");
+ return -ENOMEM;
+ }
+ wiphy->n_cipher_suites = QDF_ARRAY_SIZE(hdd_cipher_suites) +
+ QDF_ARRAY_SIZE(hdd_gcmp_cipher_suits);
+ qdf_mem_copy(cipher_suites, &hdd_cipher_suites,
+ sizeof(hdd_cipher_suites));
+ qdf_mem_copy(cipher_suites + QDF_ARRAY_SIZE(hdd_cipher_suites),
+ &hdd_gcmp_cipher_suits,
+ sizeof(hdd_gcmp_cipher_suits));
+ } else {
+ cipher_suites = qdf_mem_malloc(sizeof(hdd_cipher_suites));
+ if (cipher_suites == NULL) {
+ hdd_err("Not enough memory for cipher suites");
+ return -ENOMEM;
+ }
+ wiphy->n_cipher_suites = QDF_ARRAY_SIZE(hdd_cipher_suites);
+ qdf_mem_copy(cipher_suites, &hdd_cipher_suites,
+ sizeof(hdd_cipher_suites));
+ }
+ wiphy->cipher_suites = cipher_suites;
+ cipher_suites = NULL;
/*signal strength in mBm (100*dBm) */
wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
wiphy->max_remain_on_channel_duration = MAX_REMAIN_ON_CHANNEL_DURATION;
@@ -13860,6 +13668,7 @@ mem_fail:
void wlan_hdd_cfg80211_deinit(struct wiphy *wiphy)
{
int i;
+ const uint32_t *cipher_suites;
for (i = 0; i < HDD_NUM_NL80211_BANDS; i++) {
if (NULL != wiphy->bands[i] &&
@@ -13868,6 +13677,11 @@ void wlan_hdd_cfg80211_deinit(struct wiphy *wiphy)
wiphy->bands[i]->channels = NULL;
}
}
+ cipher_suites = wiphy->cipher_suites;
+ wiphy->cipher_suites = NULL;
+ wiphy->n_cipher_suites = 0;
+ qdf_mem_free((uint32_t *)cipher_suites);
+ cipher_suites = NULL;
hdd_reset_global_reg_params();
}
@@ -14781,6 +14595,58 @@ static bool wlan_hdd_is_duplicate_channel(uint8_t *arr,
}
#endif
+QDF_STATUS wlan_hdd_send_sta_authorized_event(
+ hdd_adapter_t *pAdapter,
+ hdd_context_t *pHddCtx,
+ const struct qdf_mac_addr *mac_addr)
+{
+ struct sk_buff *vendor_event;
+ uint32_t sta_flags = 0;
+ QDF_STATUS status;
+
+ ENTER();
+
+ if (!pHddCtx) {
+ hdd_err("HDD context is null");
+ return -EINVAL;
+ }
+
+ vendor_event =
+ cfg80211_vendor_event_alloc(
+ pHddCtx->wiphy, &pAdapter->wdev, sizeof(sta_flags) +
+ QDF_MAC_ADDR_SIZE + NLMSG_HDRLEN,
+ QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES_INDEX,
+ GFP_KERNEL);
+ if (!vendor_event) {
+ hdd_err("cfg80211_vendor_event_alloc failed");
+ return -EINVAL;
+ }
+
+ sta_flags |= BIT(NL80211_STA_FLAG_AUTHORIZED);
+
+ status = nla_put_u32(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_FLAGS,
+ sta_flags);
+ if (status) {
+ hdd_err("STA flag put fails");
+ kfree_skb(vendor_event);
+ return QDF_STATUS_E_FAILURE;
+ }
+ status = nla_put(vendor_event,
+ QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_MAC,
+ QDF_MAC_ADDR_SIZE, mac_addr->bytes);
+ if (status) {
+ hdd_err("STA MAC put fails");
+ kfree_skb(vendor_event);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ cfg80211_vendor_event(vendor_event, GFP_KERNEL);
+
+ EXIT();
+ return 0;
+}
+
/**
* __wlan_hdd_change_station() - change station
* @wiphy: Pointer to the wiphy structure
@@ -14852,6 +14718,13 @@ static int __wlan_hdd_change_station(struct wiphy *wiphy,
hdd_debug("Not able to change TL state to AUTHENTICATED");
return -EINVAL;
}
+ status = wlan_hdd_send_sta_authorized_event(
+ pAdapter,
+ pHddCtx,
+ &STAMacAddress);
+ if (status != QDF_STATUS_SUCCESS) {
+ return -EINVAL;
+ }
}
} else if ((pAdapter->device_mode == QDF_STA_MODE) ||
(pAdapter->device_mode == QDF_P2P_CLIENT_MODE)) {
@@ -16853,8 +16726,8 @@ static int wlan_hdd_cfg80211_connect_start(hdd_adapter_t *pAdapter,
hdd_conn_set_connection_state(pAdapter,
eConnectionState_Connecting);
- qdf_runtime_pm_prevent_suspend(&pAdapter->connect_rpm_ctx.
- connect);
+ qdf_runtime_pm_prevent_suspend(
+ &pHddCtx->runtime_context.connect);
hdd_prevent_suspend_timeout(HDD_WAKELOCK_TIMEOUT_CONNECT,
WIFI_POWER_EVENT_WAKELOCK_CONNECT);
@@ -16872,8 +16745,8 @@ static int wlan_hdd_cfg80211_connect_start(hdd_adapter_t *pAdapter,
/* change back to NotAssociated */
hdd_conn_set_connection_state(pAdapter,
eConnectionState_NotConnected);
- qdf_runtime_pm_allow_suspend(&pAdapter->connect_rpm_ctx.
- connect);
+ qdf_runtime_pm_allow_suspend(
+ &pHddCtx->runtime_context.connect);
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_CONNECT);
}
@@ -18322,16 +18195,7 @@ static int wlan_hdd_cfg80211_connect(struct wiphy *wiphy,
return ret;
}
-/**
- * wlan_hdd_disconnect() - hdd disconnect api
- * @pAdapter: Pointer to adapter
- * @reason: Disconnect reason code
- *
- * This function is used to issue a disconnect request to SME
- *
- * Return: 0 for success, non-zero for failure
- */
-static int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason)
+int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason)
{
int status, result = 0;
unsigned long rc;
@@ -18650,6 +18514,7 @@ static int wlan_hdd_cfg80211_set_privacy_ibss(hdd_adapter_t *pAdapter,
struct cfg80211_ibss_params
*params)
{
+ uint32_t ret;
int status = 0;
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
eCsrEncryptionType encryptionType = eCSR_ENCRYPT_TYPE_NONE;
@@ -18678,13 +18543,27 @@ static int wlan_hdd_cfg80211_set_privacy_ibss(hdd_adapter_t *pAdapter,
if (NULL != ie) {
pWextState->wpaVersion =
IW_AUTH_WPA_VERSION_WPA;
- /* Unpack the WPA IE */
- /* Skip past the EID byte and length byte - and four byte WiFi OUI */
- dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle,
- &ie[2 + 4], ie[1] - 4,
- &dot11WPAIE, false);
- /*Extract the multicast cipher, the encType for unicast
- cipher for wpa-none is none */
+ if (ie[1] < DOT11F_IE_WPA_MIN_LEN ||
+ ie[1] > DOT11F_IE_WPA_MAX_LEN) {
+ hdd_err("invalid ie len:%d", ie[1]);
+ return -EINVAL;
+ }
+ /*
+ * Unpack the WPA IE. Skip past the EID byte and
+ * length byte - and four byte WiFi OUI
+ */
+ ret = dot11f_unpack_ie_wpa(
+ (tpAniSirGlobal) halHandle,
+ &ie[2 + 4], ie[1] - 4,
+ &dot11WPAIE, false);
+ if (DOT11F_FAILED(ret)) {
+ hdd_err("unpack failed ret: 0x%x", ret);
+ return -EINVAL;
+ }
+ /*
+ * Extract the multicast cipher, the encType for
+ * unicast cipher for wpa-none is none
+ */
encryptionType =
hdd_translate_wpa_to_csr_encryption_type
(dot11WPAIE.multicast_cipher);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h
index 4539ffcc63d4..e77294d2f8ab 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_cfg80211.h
@@ -542,10 +542,6 @@ int wlan_hdd_disable_dfs_chan_scan(hdd_context_t *hdd_ctx,
int wlan_hdd_cfg80211_update_band(struct wiphy *wiphy,
tSirRFBand eBand);
-void hdd_get_bpf_offload_cb(void *hdd_context,
- struct sir_bpf_get_offload *data);
-void hdd_init_bpf_completion(void);
-
#if defined(CFG80211_DISCONNECTED_V2) || \
(LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0))
static inline void wlan_hdd_cfg80211_indicate_disconnect(struct net_device *dev,
@@ -609,6 +605,17 @@ void hdd_process_defer_disconnect(hdd_adapter_t *adapter);
int wlan_hdd_try_disconnect(hdd_adapter_t *adapter);
/**
+ * wlan_hdd_disconnect() - hdd disconnect api
+ * @pAdapter: Pointer to adapter
+ * @reason: Disconnect reason code
+ *
+ * This function is used to issue a disconnect request to SME
+ *
+ * Return: 0 for success, non-zero for failure
+ */
+int wlan_hdd_disconnect(hdd_adapter_t *pAdapter, u16 reason);
+
+/**
* hdd_bt_activity_cb() - callback function to receive bt activity
* @context: HDD context
* @bt_activity: specifies the kind of bt activity
@@ -671,4 +678,22 @@ void wlan_hdd_save_gtk_offload_params(hdd_adapter_t *adapter,
* Return : 0 on success and errno on failure
*/
int wlan_hdd_send_mode_change_event(void);
+
+/*
+ * wlan_hdd_send_sta_authorized_event() - Function to send station authorized
+ * event to user space in case of SAP
+ * @pAdapter: Pointer to the adapter
+ * @pHddCtx: HDD Context
+ * @mac_addr: MAC address of the STA for which the Authorized event needs to
+ * be sent
+ *
+ * This api is used to send station authorized event to user space
+ *
+ * Return: Returns QDF_STATUS_SUCCESS on success else rturns error value
+ */
+
+QDF_STATUS wlan_hdd_send_sta_authorized_event(
+ hdd_adapter_t *pAdapter,
+ hdd_context_t *pHddCtx,
+ const struct qdf_mac_addr *mac_addr);
#endif
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
index c261757ed91f..a73f3eacdb84 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_driver_ops.c
@@ -47,6 +47,7 @@
#include "pld_common.h"
#include "wlan_hdd_driver_ops.h"
#include "wlan_hdd_scan.h"
+#include "wlan_hdd_ipa.h"
#ifdef MODULE
#define WLAN_MODULE_NAME module_name(THIS_MODULE)
@@ -301,24 +302,31 @@ void hdd_hif_close(void *hif_ctx)
* @bus_type: Underlying bus type
* @bid: Bus id passed by platform driver
*
- * Return: void
+ * Return: 0 - success, < 0 - failure
*/
-static void hdd_init_qdf_ctx(struct device *dev, void *bdev,
- enum qdf_bus_type bus_type,
- const struct hif_bus_id *bid)
+static int hdd_init_qdf_ctx(struct device *dev, void *bdev,
+ enum qdf_bus_type bus_type,
+ const struct hif_bus_id *bid)
{
qdf_device_t qdf_dev = cds_get_context(QDF_MODULE_ID_QDF_DEVICE);
if (!qdf_dev) {
hdd_err("Invalid QDF device");
- return;
+ return -EINVAL;
}
qdf_dev->dev = dev;
qdf_dev->drv_hdl = bdev;
qdf_dev->bus_type = bus_type;
qdf_dev->bid = bid;
- cds_smmu_mem_map_setup(qdf_dev);
+
+ if (cds_smmu_mem_map_setup(qdf_dev, hdd_ipa_is_present()) !=
+ QDF_STATUS_SUCCESS) {
+ hdd_err("cds_smmu_mem_map_setup() failed");
+ return -EFAULT;
+ }
+
+ return 0;
}
/**
@@ -385,7 +393,10 @@ static int wlan_hdd_probe(struct device *dev, void *bdev, const struct hif_bus_i
else
cds_set_load_in_progress(true);
- hdd_init_qdf_ctx(dev, bdev, bus_type, (const struct hif_bus_id *)bid);
+ ret = hdd_init_qdf_ctx(dev, bdev, bus_type,
+ (const struct hif_bus_id *)bid);
+ if (ret < 0)
+ goto err_init_qdf_ctx;
if (reinit) {
ret = hdd_wlan_re_init();
@@ -406,6 +417,7 @@ static int wlan_hdd_probe(struct device *dev, void *bdev, const struct hif_bus_i
} else {
cds_set_load_in_progress(false);
cds_set_driver_loaded(true);
+ hdd_start_complete(0);
}
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
@@ -431,6 +443,7 @@ err_hdd_deinit:
} else
cds_set_load_in_progress(false);
+err_init_qdf_ctx:
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_INIT);
hdd_remove_pm_qos(dev);
@@ -532,12 +545,6 @@ static void wlan_hdd_shutdown(void)
{
void *hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
- if (hdd_get_conparam() == QDF_GLOBAL_FTM_MODE) {
- hdd_err("Crash recovery is not allowed in FTM mode");
- QDF_BUG(0);
- return;
- }
-
if (!hif_ctx) {
hdd_err("Failed to get HIF context, ignore SSR shutdown");
return;
@@ -548,6 +555,18 @@ static void wlan_hdd_shutdown(void)
hdd_err("Load/unload in progress, ignore SSR shutdown");
return;
}
+
+ /*
+ * Force Complete all the wait events before shutdown.
+ * This is done at "hdd_cleanup_on_fw_down" api also to clean up the
+ * wait events of north bound apis.
+ * In case of SSR there is significant dely between FW down event and
+ * wlan_hdd_shutdown, there is a possibility of race condition that
+ * these wait events gets complete at "hdd_cleanup_on_fw_down" and
+ * some new event is added before shutdown.
+ */
+ qdf_complete_wait_events();
+
/* this is for cases, where shutdown invoked from platform */
cds_set_recovery_in_progress(true);
hdd_wlan_ssr_shutdown_event();
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c
index f807508e3350..eee24657a1a6 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ext_scan.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -4028,6 +4028,13 @@ int wlan_hdd_cfg80211_set_epno_list(struct wiphy *wiphy,
return ret;
}
+#define PARAM_ID QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID
+#define PARAM_REALM QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM
+#define PARAM_ROAM_ID \
+ QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID
+#define PARAM_ROAM_PLMN \
+ QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN
+
/**
* hdd_extscan_passpoint_fill_network_list() - passpoint fill network list
* @hddctx: HDD context
@@ -4046,7 +4053,8 @@ static int hdd_extscan_passpoint_fill_network_list(
{
struct nlattr *network[QCA_WLAN_VENDOR_ATTR_PNO_MAX + 1];
struct nlattr *networks;
- int rem1, len;
+ int rem1;
+ size_t len;
uint8_t index;
uint32_t expected_networks;
@@ -4074,49 +4082,47 @@ static int hdd_extscan_passpoint_fill_network_list(
}
/* Parse and fetch identifier */
- if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID]) {
+ if (!network[PARAM_ID]) {
hdd_err("attr passpoint id failed");
return -EINVAL;
}
- req_msg->networks[index].id = nla_get_u32(
- network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ID]);
+ req_msg->networks[index].id = nla_get_u32(network[PARAM_ID]);
hdd_debug("Id %u", req_msg->networks[index].id);
/* Parse and fetch realm */
- if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]) {
+ if (!network[PARAM_REALM]) {
hdd_err("attr realm failed");
return -EINVAL;
}
- len = nla_len(
- network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]);
- if (len < 0 || len > SIR_PASSPOINT_REALM_LEN) {
- hdd_err("Invalid realm size %d", len);
+ len = nla_strlcpy(req_msg->networks[index].realm,
+ network[PARAM_REALM],
+ SIR_PASSPOINT_REALM_LEN);
+ /* Don't send partial realm to firmware */
+ if (len >= SIR_PASSPOINT_REALM_LEN) {
+ hdd_err("user passed invalid realm, len:%zu", len);
return -EINVAL;
}
- qdf_mem_copy(req_msg->networks[index].realm,
- nla_data(network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_REALM]),
- len);
- hdd_debug("realm len %d", len);
+
hdd_debug("realm: %s", req_msg->networks[index].realm);
/* Parse and fetch roaming consortium ids */
- if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID]) {
+ if (!network[PARAM_ROAM_ID]) {
hdd_err("attr roaming consortium ids failed");
return -EINVAL;
}
nla_memcpy(&req_msg->networks[index].roaming_consortium_ids,
- network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_CNSRTM_ID],
- sizeof(req_msg->networks[0].roaming_consortium_ids));
+ network[PARAM_ROAM_ID],
+ sizeof(req_msg->networks[0].roaming_consortium_ids));
hdd_debug("roaming consortium ids");
/* Parse and fetch plmn */
- if (!network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN]) {
+ if (!network[PARAM_ROAM_PLMN]) {
hdd_err("attr plmn failed");
return -EINVAL;
}
nla_memcpy(&req_msg->networks[index].plmn,
- network[QCA_WLAN_VENDOR_ATTR_PNO_PASSPOINT_NETWORK_PARAM_ROAM_PLMN],
- SIR_PASSPOINT_PLMN_LEN);
+ network[PARAM_ROAM_PLMN],
+ SIR_PASSPOINT_PLMN_LEN);
hdd_debug("plmn %02x:%02x:%02x)",
req_msg->networks[index].plmn[0],
req_msg->networks[index].plmn[1],
@@ -4351,6 +4357,11 @@ int wlan_hdd_cfg80211_reset_passpoint_list(struct wiphy *wiphy,
return ret;
}
+#undef PARAM_ID
+#undef PARAM_REALM
+#undef PARAM_ROAM_ID
+#undef PARAM_ROAM_PLMN
+
/**
* wlan_hdd_init_completion_extwow() - Initialize ext wow variable
* @hdd_ctx: Global HDD context
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
index 0b78116363d0..5607444352c0 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_hostapd.c
@@ -203,6 +203,7 @@ int hdd_sap_context_init(hdd_context_t *hdd_ctx)
qdf_spinlock_create(&hdd_ctx->sap_update_info_lock);
qdf_atomic_init(&hdd_ctx->dfs_radar_found);
+ qdf_atomic_init(&hdd_ctx->is_acs_allowed);
return 0;
}
@@ -404,6 +405,10 @@ static int __hdd_hostapd_stop(struct net_device *dev)
hdd_stop_adapter(hdd_ctx, adapter, true);
clear_bit(DEVICE_IFACE_OPENED, &adapter->event_flags);
+
+ if (!hdd_is_cli_iface_up(hdd_ctx))
+ sme_scan_flush_result(hdd_ctx->hHal);
+
/* Stop all tx queues */
hdd_info("Disabling queues");
wlan_hdd_netif_queue_control(adapter,
@@ -557,6 +562,7 @@ static int __hdd_hostapd_set_mac_address(struct net_device *dev, void *addr)
hdd_adapter_t *adapter;
hdd_context_t *hdd_ctx;
int ret = 0;
+ struct qdf_mac_addr mac_addr;
ENTER_DEV(dev);
@@ -566,6 +572,23 @@ static int __hdd_hostapd_set_mac_address(struct net_device *dev, void *addr)
if (0 != ret)
return ret;
+ qdf_mem_copy(&mac_addr, psta_mac_addr->sa_data, QDF_MAC_ADDR_SIZE);
+
+ if (qdf_is_macaddr_zero(&mac_addr)) {
+ hdd_err("MAC is all zero");
+ return -EINVAL;
+ }
+
+ if (qdf_is_macaddr_broadcast(&mac_addr)) {
+ hdd_err("MAC is Broadcast");
+ return -EINVAL;
+ }
+
+ if (ETHER_IS_MULTICAST(psta_mac_addr->sa_data)) {
+ hdd_err("MAC is Multicast");
+ return -EINVAL;
+ }
+
memcpy(dev->dev_addr, psta_mac_addr->sa_data, ETH_ALEN);
EXIT();
return 0;
@@ -1713,7 +1736,7 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
HDD_IPA_AP_CONNECT,
pHostapdAdapter->dev->dev_addr);
if (status) {
- hdd_err("WLAN_AP_CONNECT event failed!!");
+ hdd_err("WLAN_AP_CONNECT event failed");
/*
* Make sure to set the event before proceeding
* for error handling otherwise caller thread
@@ -1721,7 +1744,6 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
* connection will go through before that.
*/
qdf_event_set(&pHostapdState->qdf_event);
- goto stopbss;
}
}
@@ -1855,16 +1877,6 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
/* Invalidate the channel info. */
pHddApCtx->operatingChannel = 0;
- if (hdd_ipa_is_enabled(pHddCtx)) {
- status = hdd_ipa_wlan_evt(pHostapdAdapter,
- pHddApCtx->uBCStaId,
- HDD_IPA_AP_DISCONNECT,
- pHostapdAdapter->dev->dev_addr);
- if (status) {
- hdd_err("WLAN_AP_DISCONNECT event failed!!");
- goto stopbss;
- }
- }
/* reset the dfs_cac_status and dfs_cac_block_tx flag only when
* the last BSS is stopped
@@ -2103,10 +2115,8 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
status = hdd_ipa_wlan_evt(pHostapdAdapter,
event->staId, HDD_IPA_CLIENT_CONNECT_EX,
event->staMac.bytes);
- if (status) {
+ if (status)
hdd_err("WLAN_CLIENT_CONNECT_EX event failed");
- goto stopbss;
- }
}
DPTRACE(qdf_dp_trace_mgmt_pkt(QDF_DP_TRACE_MGMT_PACKET_RECORD,
@@ -2567,6 +2577,7 @@ QDF_STATUS hdd_hostapd_sap_event_cb(tpSap_Event pSapEvent,
/* send vendor event to hostapd only for hostapd based acs*/
if (!pHddCtx->config->force_sap_acs)
wlan_hdd_cfg80211_acs_ch_select_evt(pHostapdAdapter);
+ qdf_atomic_set(&pHddCtx->is_acs_allowed, 0);
return QDF_STATUS_SUCCESS;
case eSAP_ECSA_CHANGE_CHAN_IND:
hdd_debug("Channel change indication from peer for channel %d",
@@ -2680,11 +2691,11 @@ int hdd_softap_unpack_ie(tHalHandle halHandle,
bool *pMFPRequired,
uint16_t gen_ie_len, uint8_t *gen_ie)
{
- tDot11fIERSN dot11RSNIE;
- tDot11fIEWPA dot11WPAIE;
-
+ uint32_t ret;
uint8_t *pRsnIe;
uint16_t RSNIeLen;
+ tDot11fIERSN dot11RSNIE = {0};
+ tDot11fIEWPA dot11WPAIE = {0};
if (NULL == halHandle) {
hdd_err("Error haHandle returned NULL");
@@ -2707,20 +2718,24 @@ int hdd_softap_unpack_ie(tHalHandle halHandle,
RSNIeLen = gen_ie_len - 2;
/* Unpack the RSN IE */
memset(&dot11RSNIE, 0, sizeof(tDot11fIERSN));
- dot11f_unpack_ie_rsn((tpAniSirGlobal) halHandle,
- pRsnIe, RSNIeLen, &dot11RSNIE, false);
+ ret = sme_unpack_rsn_ie(halHandle, pRsnIe, RSNIeLen,
+ &dot11RSNIE, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ hdd_err("unpack failed, ret: 0x%x", ret);
+ return -EINVAL;
+ }
/* Copy out the encryption and authentication types */
hdd_debug("pairwise cipher suite count: %d",
dot11RSNIE.pwise_cipher_suite_count);
hdd_debug("authentication suite count: %d",
- dot11RSNIE.akm_suite_count);
+ dot11RSNIE.akm_suite_cnt);
/* Here we have followed the apple base code,
* but probably I suspect we can do something different
- * dot11RSNIE.akm_suite_count
+ * dot11RSNIE.akm_suite_cnt
* Just translate the FIRST one
*/
*pAuthType =
- hdd_translate_rsn_to_csr_auth_type(dot11RSNIE.akm_suites[0]);
+ hdd_translate_rsn_to_csr_auth_type(dot11RSNIE.akm_suite[0]);
/* dot11RSNIE.pwise_cipher_suite_count */
*pEncryptType =
hdd_translate_rsn_to_csr_encryption_type(dot11RSNIE.
@@ -2743,8 +2758,12 @@ int hdd_softap_unpack_ie(tHalHandle halHandle,
RSNIeLen = gen_ie_len - (2 + 4);
/* Unpack the WPA IE */
memset(&dot11WPAIE, 0, sizeof(tDot11fIEWPA));
- dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle,
+ ret = dot11f_unpack_ie_wpa((tpAniSirGlobal) halHandle,
pRsnIe, RSNIeLen, &dot11WPAIE, false);
+ if (DOT11F_FAILED(ret)) {
+ hdd_err("unpack failed, ret: 0x%x", ret);
+ return -EINVAL;
+ }
/* Copy out the encryption and authentication types */
hdd_debug("WPA unicast cipher suite count: %d",
dot11WPAIE.unicast_cipher_count);
@@ -4683,7 +4702,7 @@ static int __iw_get_channel_list(struct net_device *dev,
if (hdd_ctx->config->dot11p_mode)
band_end_channel = CHAN_ENUM_184;
else
- band_end_channel = CHAN_ENUM_165;
+ band_end_channel = CHAN_ENUM_173;
}
if (hostapd_adapter->device_mode == QDF_STA_MODE &&
@@ -6614,7 +6633,7 @@ QDF_STATUS hdd_init_ap_mode(hdd_adapter_t *pAdapter, bool reinit)
if (!reinit) {
pAdapter->sessionCtx.ap.sapConfig.acs_cfg.acs_mode = false;
- qdf_mem_free(pAdapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list);
+ wlan_hdd_undo_acs(pAdapter);
qdf_mem_zero(&pAdapter->sessionCtx.ap.sapConfig.acs_cfg,
sizeof(struct sap_acs_cfg));
}
@@ -6817,19 +6836,24 @@ static bool wlan_hdd_rate_is_11g(u8 rate)
*/
static bool wlan_hdd_get_sap_obss(hdd_adapter_t *pHostapdAdapter)
{
- uint8_t ht_cap_ie[DOT11F_IE_HTCAPS_MAX_LEN];
+ uint32_t ret;
+ uint8_t *ie = NULL;
tDot11fIEHTCaps dot11_ht_cap_ie = {0};
+ uint8_t ht_cap_ie[DOT11F_IE_HTCAPS_MAX_LEN];
hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(pHostapdAdapter);
beacon_data_t *beacon = pHostapdAdapter->sessionCtx.ap.beacon;
- uint8_t *ie = NULL;
ie = wlan_hdd_cfg80211_get_ie_ptr(beacon->tail, beacon->tail_len,
WLAN_EID_HT_CAPABILITY);
if (ie && ie[1]) {
qdf_mem_copy(ht_cap_ie, &ie[2], DOT11F_IE_HTCAPS_MAX_LEN);
- dot11f_unpack_ie_ht_caps((tpAniSirGlobal)hdd_ctx->hHal,
- ht_cap_ie, ie[1], &dot11_ht_cap_ie,
- false);
+ ret = dot11f_unpack_ie_ht_caps((tpAniSirGlobal)hdd_ctx->hHal,
+ ht_cap_ie, ie[1],
+ &dot11_ht_cap_ie, false);
+ if (DOT11F_FAILED(ret)) {
+ hdd_err("unpack failed, ret: 0x%x", ret);
+ return false;
+ }
return dot11_ht_cap_ie.supportedChannelWidthSet;
}
@@ -7861,6 +7885,38 @@ static inline int wlan_hdd_set_udp_resp_offload(hdd_adapter_t *padapter,
}
#endif
+static void hdd_check_and_disconnect_sta_on_invalid_channel(
+ hdd_context_t *hdd_ctx)
+{
+ hdd_adapter_t *sta_adapter;
+ uint8_t sta_chan;
+
+ sta_chan = hdd_get_operating_channel(hdd_ctx, QDF_STA_MODE);
+
+ if (!sta_chan) {
+ hdd_err("STA not connected");
+ return;
+ }
+
+ hdd_err("STA connected on chan %d", sta_chan);
+
+ if (sme_is_channel_valid(hdd_ctx->hHal, sta_chan)) {
+ hdd_err("STA connected on chan %d and it is valid", sta_chan);
+ return;
+ }
+
+ sta_adapter = hdd_get_adapter(hdd_ctx, QDF_STA_MODE);
+
+ if (!sta_adapter) {
+ hdd_err("STA adapter does not exist");
+ return;
+ }
+
+ hdd_err("chan %d not valid, issue disconnect", sta_chan);
+ /* Issue Disconnect request */
+ wlan_hdd_disconnect(sta_adapter, eCSR_DISCONNECT_REASON_DEAUTH);
+}
+
/**
* wlan_hdd_cfg80211_start_bss() - start bss
* @pHostapdAdapter: Pointer to hostapd adapter
@@ -7902,6 +7958,7 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter,
enum dfs_mode mode;
bool disable_fw_tdls_state = false;
uint8_t ignore_cac = 0;
+ hdd_adapter_t *sta_adapter;
ENTER();
@@ -7918,6 +7975,30 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter,
}
}
+ /*
+ * For STA+SAP concurrency support from GUI, first STA connection gets
+ * triggered and while it is in progress, SAP start also comes up.
+ * Once STA association is successful, STA connect event is sent to
+ * kernel which gets queued in kernel workqueue and supplicant won't
+ * process M1 received from AP and send M2 until this NL80211_CONNECT
+ * event is received. Workqueue is not scheduled as RTNL lock is already
+ * taken by hostapd thread which has issued start_bss command to driver.
+ * Driver cannot complete start_bss as the pending command at the head
+ * of the SME command pending list is hw_mode_update for STA session
+ * which cannot be processed as SME is in WAITforKey state for STA
+ * interface. The start_bss command for SAP interface is queued behind
+ * the hw_mode_update command and so it cannot be processed until
+ * hw_mode_update command is processed. This is causing a deadlock so
+ * disconnect the STA interface first if connection or key exchange is
+ * in progress and then start SAP interface.
+ */
+ sta_adapter = hdd_get_sta_connection_in_progress(pHddCtx);
+ if (sta_adapter) {
+ hdd_debug("Disconnecting STA with session id: %d",
+ sta_adapter->sessionId);
+ wlan_hdd_disconnect(sta_adapter, eCSR_DISCONNECT_REASON_DEAUTH);
+ }
+
sme_config = qdf_mem_malloc(sizeof(tSmeConfigParams));
if (!sme_config) {
hdd_err("failed to allocate memory");
@@ -7941,6 +8022,11 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter,
qdf_mem_free(sme_config);
return -EINVAL;
}
+
+ /* check if STA is on indoor channel*/
+ if (cds_is_force_scc())
+ hdd_check_and_disconnect_sta_on_invalid_channel(
+ pHddCtx);
}
pConfig = &pHostapdAdapter->sessionCtx.ap.sapConfig;
@@ -8083,8 +8169,6 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter,
wlansap_set_tx_leakage_threshold(hHal,
iniConfig->sap_tx_leakage_threshold);
- wlansap_set_etsi_srd_chan_support(hHal,
- iniConfig->etsi_srd_chan_in_master_mode);
capab_info = pMgmt_frame->u.beacon.capab_info;
pConfig->privacy = (pMgmt_frame->u.beacon.capab_info &
@@ -8395,10 +8479,12 @@ int wlan_hdd_cfg80211_start_bss(hdd_adapter_t *pHostapdAdapter,
pConfig->ch_width_orig = CH_WIDTH_20MHZ;
}
- if (cds_is_force_scc() &&
+ if (!wma_is_hw_dbs_capable() &&
+ (pHostapdAdapter->device_mode == QDF_SAP_MODE) &&
+ cds_is_force_scc() &&
cds_mode_specific_get_channel(CDS_STA_MODE)) {
pConfig->channel = cds_mode_specific_get_channel(CDS_STA_MODE);
- hdd_debug("force SCC is enabled and STA is active, override the SAP channel to %d",
+ hdd_debug("DBS is disabled, force SCC is enabled and STA is active, override the SAP channel to %d",
pConfig->channel);
} else if (wlan_hdd_setup_driver_overrides(pHostapdAdapter)) {
ret = -EINVAL;
@@ -8557,11 +8643,7 @@ error:
if (sme_config)
qdf_mem_free(sme_config);
clear_bit(SOFTAP_INIT_DONE, &pHostapdAdapter->event_flags);
- if (pHostapdAdapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list) {
- qdf_mem_free(pHostapdAdapter->sessionCtx.ap.sapConfig.
- acs_cfg.ch_list);
- pHostapdAdapter->sessionCtx.ap.sapConfig.acs_cfg.ch_list = NULL;
- }
+ wlan_hdd_undo_acs(pHostapdAdapter);
ret_status:
if (disable_fw_tdls_state)
@@ -8593,7 +8675,7 @@ static int __wlan_hdd_cfg80211_stop_ap(struct wiphy *wiphy,
hdd_adapter_list_node_t *pNext = NULL;
tsap_Config_t *pConfig;
- ENTER();
+ hdd_info("enter(%s)", netdev_name(dev));
if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
hdd_err("Command not allowed in FTM mode");
@@ -8632,6 +8714,18 @@ static int __wlan_hdd_cfg80211_stop_ap(struct wiphy *wiphy,
if (0 != ret)
return ret;
+ /*
+ * If a STA connection is in progress in another adapter, disconnect
+ * the STA and complete the SAP operation. STA will reconnect
+ * after SAP stop is done.
+ */
+ staAdapter = hdd_get_sta_connection_in_progress(pHddCtx);
+ if (staAdapter) {
+ hdd_debug("Disconnecting STA with session id: %d",
+ staAdapter->sessionId);
+ wlan_hdd_disconnect(staAdapter, eCSR_DISCONNECT_REASON_DEAUTH);
+ }
+
if (pAdapter->device_mode == QDF_SAP_MODE) {
wlan_hdd_del_station(pAdapter);
hdd_green_ap_stop_bss(pHddCtx);
@@ -8709,6 +8803,10 @@ static int __wlan_hdd_cfg80211_stop_ap(struct wiphy *wiphy,
hdd_hostapd_state_t *pHostapdState =
WLAN_HDD_GET_HOSTAP_STATE_PTR(pAdapter);
+ /* Set the stop_bss_in_progress flag */
+ wlansap_set_stop_bss_inprogress(
+ WLAN_HDD_GET_SAP_CTX_PTR(pAdapter), true);
+
qdf_event_reset(&pHostapdState->qdf_stop_bss_event);
status = wlansap_stop_bss(WLAN_HDD_GET_SAP_CTX_PTR(pAdapter));
if (QDF_IS_STATUS_SUCCESS(status)) {
@@ -8719,10 +8817,19 @@ static int __wlan_hdd_cfg80211_stop_ap(struct wiphy *wiphy,
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
hdd_err("qdf wait for single_event failed!!");
+
+ if (hdd_ipa_uc_is_enabled(pHddCtx))
+ hdd_ipa_clean_adapter_iface(pAdapter);
+
QDF_ASSERT(0);
}
}
clear_bit(SOFTAP_BSS_STARTED, &pAdapter->event_flags);
+
+ /* Clear the stop_bss_in_progress flag */
+ wlansap_set_stop_bss_inprogress(
+ WLAN_HDD_GET_SAP_CTX_PTR(pAdapter), false);
+
/*BSS stopped, clear the active sessions for this device mode*/
cds_decr_session_set_pcl(pAdapter->device_mode,
pAdapter->sessionId);
@@ -8922,7 +9029,7 @@ static int __wlan_hdd_cfg80211_start_ap(struct wiphy *wiphy,
bool sta_sap_scc_on_dfs_chan;
uint16_t sta_cnt;
- ENTER();
+ hdd_info("enter(%s)", netdev_name(dev));
clear_bit(SOFTAP_INIT_DONE, &pAdapter->event_flags);
if (QDF_GLOBAL_FTM_MODE == hdd_get_conparam()) {
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
index fdbab18416bb..2e3420219b88 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_ipa.c
@@ -704,6 +704,14 @@ static void hdd_ipa_uc_loaded_uc_cb(void *priv_ctxt)
}
hdd_ipa = (struct hdd_ipa_priv *)priv_ctxt;
+ hdd_ipa->uc_loaded = true;
+
+ uc_op_work = &hdd_ipa->uc_op_work[HDD_IPA_UC_OPCODE_UC_READY];
+
+ if (!list_empty(&uc_op_work->work.entry))
+ /* uc_op_work is not initialized yet */
+ return;
+
msg = (struct op_msg_type *)qdf_mem_malloc(sizeof(*msg));
if (!msg) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, "op_msg allocation fails");
@@ -712,8 +720,6 @@ static void hdd_ipa_uc_loaded_uc_cb(void *priv_ctxt)
msg->op_code = HDD_IPA_UC_OPCODE_UC_READY;
- uc_op_work = &hdd_ipa->uc_op_work[msg->op_code];
-
/* When the same uC OPCODE is already pended, just return */
if (uc_op_work->msg)
goto done;
@@ -926,6 +932,22 @@ static inline bool hdd_ipa_is_clk_scaling_enabled(hdd_context_t *hdd_ctx)
}
/**
+ * hdd_ipa_is_fw_wdi_actived() - Are FW WDI pipes activated?
+ * @hdd_ipa: Global HDD IPA context
+ *
+ * Return: true if FW WDI pipes activated, otherwise false
+ */
+bool hdd_ipa_is_fw_wdi_actived(hdd_context_t *hdd_ctx)
+{
+ struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
+
+ if (!hdd_ipa)
+ return false;
+
+ return (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe);
+}
+
+/**
* hdd_ipa_uc_rt_debug_host_fill - fill rt debug buffer
* @ctext: pointer to hdd context.
*
@@ -1505,7 +1527,7 @@ static void __hdd_ipa_uc_stat_query(hdd_context_t *hdd_ctx,
}
qdf_mutex_acquire(&hdd_ipa->ipa_lock);
- if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
+ if (hdd_ipa_is_fw_wdi_actived(hdd_ctx) &&
(false == hdd_ipa->resource_loading)) {
*ipa_tx_diff = hdd_ipa->ipa_tx_packets_diff;
*ipa_rx_diff = hdd_ipa->ipa_rx_packets_diff;
@@ -1557,9 +1579,8 @@ static void __hdd_ipa_uc_stat_request(hdd_adapter_t *adapter, uint8_t reason)
return;
}
- hdd_debug("STAT REQ Reason %d", reason);
qdf_mutex_acquire(&hdd_ipa->ipa_lock);
- if ((HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
+ if (hdd_ipa_is_fw_wdi_actived(hdd_ctx) &&
(false == hdd_ipa->resource_loading)) {
hdd_ipa->stat_req_reason = reason;
qdf_mutex_release(&hdd_ipa->ipa_lock);
@@ -1731,7 +1752,7 @@ static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
int result = 0;
p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!hdd_ipa->ipa_pipes_down) {
/*
@@ -1787,7 +1808,7 @@ static int hdd_ipa_uc_enable_pipes(struct hdd_ipa_priv *hdd_ipa)
hdd_ipa->ipa_pipes_down = false;
end:
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: ipa_pipes_down=%d",
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: ipa_pipes_down=%d",
hdd_ipa->ipa_pipes_down);
return result;
}
@@ -1802,7 +1823,7 @@ static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
{
int result = 0;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (hdd_ipa->ipa_pipes_down) {
/*
@@ -1850,7 +1871,7 @@ static int hdd_ipa_uc_disable_pipes(struct hdd_ipa_priv *hdd_ipa)
hdd_ipa->ipa_pipes_down = true;
end:
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: ipa_pipes_down=%d",
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: ipa_pipes_down=%d",
hdd_ipa->ipa_pipes_down);
return result;
}
@@ -1863,7 +1884,7 @@ end:
*/
static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
{
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
hdd_ipa->activated_fw_pipe = 0;
hdd_ipa->resource_loading = true;
@@ -1900,7 +1921,7 @@ static int hdd_ipa_uc_handle_first_con(struct hdd_ipa_priv *hdd_ipa)
hdd_ipa->resource_loading = false;
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: IPA WDI Pipes activated!");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: IPA WDI Pipes activated!");
return 0;
}
@@ -1914,7 +1935,7 @@ static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
{
p_cds_contextType cds_ctx = hdd_ipa->hdd_ctx->pcds_context;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!cds_ctx || !cds_ctx->pdev_txrx_ctx) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, "txrx context is NULL");
@@ -1926,10 +1947,8 @@ static void hdd_ipa_uc_handle_last_discon(struct hdd_ipa_priv *hdd_ipa)
INIT_COMPLETION(hdd_ipa->ipa_resource_comp);
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "Disable FW RX PIPE");
ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, false);
- HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "Disable FW TX PIPE");
- ol_txrx_ipa_uc_set_active(cds_ctx->pdev_txrx_ctx, false, true);
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: IPA WDI Pipes deactivated");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: IPA WDI Pipes deactivated");
}
/**
@@ -1965,8 +1984,9 @@ hdd_ipa_uc_rm_notify_handler(void *context, enum ipa_rm_event event)
/* Differed RM Granted */
qdf_mutex_acquire(&hdd_ipa->ipa_lock);
if ((false == hdd_ipa->resource_unloading) &&
- (!hdd_ipa->activated_fw_pipe)) {
+ (!hdd_ipa->activated_fw_pipe)) {
hdd_ipa_uc_enable_pipes(hdd_ipa);
+ hdd_ipa->resource_loading = false;
}
qdf_mutex_release(&hdd_ipa->ipa_lock);
break;
@@ -2120,7 +2140,6 @@ static void hdd_ipa_uc_loaded_handler(struct hdd_ipa_priv *ipa_ctxt)
return;
}
- ipa_ctxt->uc_loaded = true;
/* Connect pipe */
ipa_connect_wdi_pipe(&ipa_ctxt->cons_pipe_in, &pipe_out);
ipa_ctxt->tx_pipe_handle = pipe_out.clnt_hdl;
@@ -2608,8 +2627,14 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
struct ipa_uc_fw_stats *uc_fw_stat;
struct hdd_ipa_priv *hdd_ipa;
hdd_context_t *hdd_ctx;
+ struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
QDF_STATUS status = QDF_STATUS_SUCCESS;
+ if (!pdev) {
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_FATAL, "pdev is NULL");
+ return;
+ }
+
if (!op_msg || !usr_ctxt) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR, "INVALID ARG");
return;
@@ -2642,7 +2667,7 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
(HDD_IPA_UC_OPCODE_RX_RESUME == msg->op_code)) {
qdf_mutex_acquire(&hdd_ipa->ipa_lock);
hdd_ipa->activated_fw_pipe++;
- if (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) {
+ if (hdd_ipa_is_fw_wdi_actived(hdd_ctx)) {
hdd_ipa->resource_loading = false;
complete(&hdd_ipa->ipa_resource_comp);
if (hdd_ipa->wdi_enabled == false) {
@@ -2660,8 +2685,16 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
}
qdf_mutex_release(&hdd_ipa->ipa_lock);
} else if ((HDD_IPA_UC_OPCODE_TX_SUSPEND == msg->op_code) ||
- (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
+ (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code)) {
qdf_mutex_acquire(&hdd_ipa->ipa_lock);
+
+ if (HDD_IPA_UC_OPCODE_RX_SUSPEND == msg->op_code) {
+ hdd_ipa_uc_disable_pipes(hdd_ipa);
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG,
+ "Disable FW TX PIPE");
+ ol_txrx_ipa_uc_set_active(pdev, false, true);
+ }
+
hdd_ipa->activated_fw_pipe--;
if (!hdd_ipa->activated_fw_pipe) {
/*
@@ -2670,7 +2703,6 @@ static void hdd_ipa_uc_op_cb(struct op_msg_type *op_msg, void *usr_ctxt)
*/
hdd_ipa->resource_unloading = false;
complete(&hdd_ipa->ipa_resource_comp);
- hdd_ipa_uc_disable_pipes(hdd_ipa);
if (hdd_ipa_is_rm_enabled(hdd_ipa->hdd_ctx))
ipa_rm_release_resource(
IPA_RM_RESOURCE_WLAN_PROD);
@@ -3344,9 +3376,9 @@ static void hdd_ipa_cleanup_pending_event(struct hdd_ipa_priv *hdd_ipa)
int hdd_ipa_uc_ol_deinit(hdd_context_t *hdd_ctx)
{
struct hdd_ipa_priv *hdd_ipa = hdd_ctx->hdd_ipa;
- int ret = 0;
+ int i, ret = 0;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!hdd_ipa_uc_is_enabled(hdd_ctx))
return ret;
@@ -3369,7 +3401,13 @@ int hdd_ipa_uc_ol_deinit(hdd_context_t *hdd_ctx)
hdd_ipa_cleanup_pending_event(hdd_ipa);
qdf_mutex_release(&hdd_ipa->ipa_lock);
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: ret=%d", ret);
+ for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
+ cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
+ qdf_mem_free(hdd_ipa->uc_op_work[i].msg);
+ hdd_ipa->uc_op_work[i].msg = NULL;
+ }
+
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: ret=%d", ret);
return ret;
}
@@ -3388,7 +3426,7 @@ static void __hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
{
struct hdd_ipa_priv *hdd_ipa;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!hdd_ipa_is_enabled(hdd_ctx) || !hdd_ctx->hdd_ipa)
return;
@@ -3403,7 +3441,7 @@ static void __hdd_ipa_uc_force_pipe_shutdown(hdd_context_t *hdd_ctx)
"IPA pipes are down, do nothing");
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit");
}
/**
@@ -3500,7 +3538,7 @@ static int hdd_ipa_uc_disconnect_client(hdd_adapter_t *adapter)
int ret = 0;
int i;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
for (i = 0; i < WLAN_MAX_STA_COUNT; i++) {
if (qdf_is_macaddr_broadcast(&adapter->aStaInfo[i].macAddrSTA))
continue;
@@ -3512,7 +3550,7 @@ static int hdd_ipa_uc_disconnect_client(hdd_adapter_t *adapter)
hdd_ipa->sap_num_connected_sta--;
}
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: sap_num_connected_sta=%d",
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: sap_num_connected_sta=%d",
hdd_ipa->sap_num_connected_sta);
return ret;
@@ -3531,12 +3569,12 @@ static int hdd_ipa_uc_disconnect_ap(hdd_adapter_t *adapter)
{
int ret = 0;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (adapter->ipa_context) {
hdd_ipa_uc_send_evt(adapter, WLAN_AP_DISCONNECT,
adapter->dev->dev_addr);
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit");
return ret;
}
@@ -3555,14 +3593,14 @@ static int hdd_ipa_uc_disconnect_sta(hdd_adapter_t *adapter)
struct hdd_ipa_priv *hdd_ipa = ghdd_ipa;
int ret = 0;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (hdd_ipa_uc_sta_is_enabled(hdd_ipa->hdd_ctx) &&
hdd_ipa->sta_connected) {
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(adapter);
hdd_ipa_uc_send_evt(adapter, WLAN_STA_DISCONNECT,
pHddStaCtx->conn_info.bssId.bytes);
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit");
return ret;
}
@@ -3615,7 +3653,7 @@ static int __hdd_ipa_uc_ssr_deinit(void)
struct hdd_ipa_iface_context *iface_context;
hdd_context_t *hdd_ctx;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!hdd_ipa)
return 0;
@@ -3631,9 +3669,10 @@ static int __hdd_ipa_uc_ssr_deinit(void)
for (idx = 0; (hdd_ipa->num_iface > 0) &&
(idx < HDD_IPA_MAX_IFACE); idx++) {
iface_context = &hdd_ipa->iface_context[idx];
- if (iface_context->adapter && iface_context->adapter->magic ==
- WLAN_HDD_ADAPTER_MAGIC)
+ if (iface_context->adapter &&
+ hdd_is_adapter_valid(hdd_ctx, iface_context->adapter)) {
hdd_ipa_cleanup_iface(iface_context);
+ }
}
hdd_ipa->num_iface = 0;
@@ -3658,7 +3697,7 @@ static int __hdd_ipa_uc_ssr_deinit(void)
hdd_ipa->uc_op_work[idx].msg = NULL;
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit");
return 0;
}
@@ -3696,7 +3735,7 @@ static int __hdd_ipa_uc_ssr_reinit(hdd_context_t *hdd_ctx)
int i;
struct hdd_ipa_iface_context *iface_context = NULL;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
if (!hdd_ipa || !hdd_ipa_uc_is_enabled(hdd_ctx))
return 0;
@@ -3721,7 +3760,7 @@ static int __hdd_ipa_uc_ssr_reinit(hdd_context_t *hdd_ctx)
hdd_ipa->uc_loaded = true;
}
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit");
return 0;
}
@@ -3775,7 +3814,7 @@ static struct sk_buff *__hdd_ipa_tx_packet_ipa(hdd_context_t *hdd_ctx,
if (!hdd_ipa)
return skb;
- if (HDD_IPA_UC_NUM_WDI_PIPE != hdd_ipa->activated_fw_pipe)
+ if (!hdd_ipa_is_fw_wdi_actived(hdd_ctx))
return skb;
if (skb_headroom(skb) <
@@ -4422,6 +4461,9 @@ static void hdd_ipa_send_skb_to_network(qdf_nbuf_t skb,
cpu_index = wlan_hdd_get_cpu();
++adapter->hdd_stats.hddTxRxStats.rxPackets[cpu_index];
+ ++adapter->stats.rx_packets;
+ adapter->stats.rx_bytes += skb->len;
+
result = hdd_ipa_aggregated_rx_ind(skb);
if (result == NET_RX_SUCCESS)
++adapter->hdd_stats.hddTxRxStats.rxDelivered[cpu_index];
@@ -4816,7 +4858,6 @@ static void hdd_ipa_send_pkt_to_tl(
/**
* hdd_ipa_is_present() - get IPA hw status
- * @hdd_ctx: pointer to hdd context
*
* ipa_uc_reg_rdyCB is not directly designed to check
* ipa hw status. This is an undocumented function which
@@ -4825,7 +4866,7 @@ static void hdd_ipa_send_pkt_to_tl(
* Return: true - ipa hw present
* false - ipa hw not present
*/
-bool hdd_ipa_is_present(hdd_context_t *hdd_ctx)
+bool hdd_ipa_is_present(void)
{
/* Check if ipa hw is enabled */
if (HDD_IPA_CHECK_HW() != -EPERM)
@@ -5652,9 +5693,9 @@ static void hdd_ipa_clean_hdr(hdd_adapter_t *adapter)
*/
static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
{
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
- if (iface_context == NULL)
+ if (iface_context == NULL || iface_context->adapter == NULL)
return;
if (iface_context->adapter->magic != WLAN_HDD_ADAPTER_MAGIC) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG,
@@ -5667,18 +5708,31 @@ static void hdd_ipa_cleanup_iface(struct hdd_ipa_iface_context *iface_context)
hdd_ipa_clean_hdr(iface_context->adapter);
qdf_spin_lock_bh(&iface_context->interface_lock);
+ /*
+ * Possible race condtion between supplicant and MC thread
+ * and check if the address has been already cleared by the
+ * other thread
+ */
+ if (iface_context->adapter) {
+ qdf_spin_unlock_bh(&iface_context->interface_lock);
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "Already cleared");
+ goto end;
+ }
iface_context->adapter->ipa_context = NULL;
iface_context->adapter = NULL;
iface_context->tl_context = NULL;
- qdf_spin_unlock_bh(&iface_context->interface_lock);
iface_context->ifa_address = 0;
+ qdf_spin_unlock_bh(&iface_context->interface_lock);
if (!iface_context->hdd_ipa->num_iface) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_ERROR,
"NUM INTF 0, Invalid");
QDF_ASSERT(0);
+ goto end;
}
iface_context->hdd_ipa->num_iface--;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: num_iface=%d",
+
+end:
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: num_iface=%d",
iface_context->hdd_ipa->num_iface);
}
@@ -5697,7 +5751,7 @@ static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
void *tl_context = NULL;
int i, ret = 0;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "enter");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "enter");
/* Lower layer may send multiple START_BSS_EVENT in DFS mode or during
* channel change indication. Since these indications are sent by lower
@@ -5748,7 +5802,7 @@ static int hdd_ipa_setup_iface(struct hdd_ipa_priv *hdd_ipa,
hdd_ipa->num_iface++;
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: num_iface=%d",
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: num_iface=%d",
hdd_ipa->num_iface);
return ret;
@@ -6133,8 +6187,7 @@ static int __hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
} else {
/* Disable IPA UC TX PIPE when STA disconnected */
if ((1 == hdd_ipa->num_iface) &&
- (HDD_IPA_UC_NUM_WDI_PIPE ==
- hdd_ipa->activated_fw_pipe) &&
+ hdd_ipa_is_fw_wdi_actived(hdd_ipa->hdd_ctx) &&
!hdd_ipa->ipa_pipes_down)
hdd_ipa_uc_handle_last_discon(hdd_ipa);
}
@@ -6168,7 +6221,7 @@ static int __hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
}
if ((1 == hdd_ipa->num_iface) &&
- (HDD_IPA_UC_NUM_WDI_PIPE == hdd_ipa->activated_fw_pipe) &&
+ hdd_ipa_is_fw_wdi_actived(hdd_ipa->hdd_ctx) &&
!hdd_ipa->ipa_pipes_down) {
if (cds_is_driver_unloading()) {
/*
@@ -6328,10 +6381,9 @@ static int __hdd_ipa_wlan_evt(hdd_adapter_t *adapter, uint8_t sta_id,
/* Disable IPA UC TX PIPE when last STA disconnected */
if (!hdd_ipa->sap_num_connected_sta &&
hdd_ipa->uc_loaded == true) {
- if ((false == hdd_ipa->resource_unloading)
- && (HDD_IPA_UC_NUM_WDI_PIPE ==
- hdd_ipa->activated_fw_pipe) &&
- !hdd_ipa->ipa_pipes_down) {
+ if ((false == hdd_ipa->resource_unloading) &&
+ hdd_ipa_is_fw_wdi_actived(hdd_ipa->hdd_ctx) &&
+ !hdd_ipa->ipa_pipes_down) {
hdd_ipa_uc_handle_last_discon(hdd_ipa);
}
@@ -6599,7 +6651,7 @@ fail_setup_rm:
hdd_ctx->hdd_ipa = NULL;
ghdd_ipa = NULL;
fail_return:
- HDD_IPA_LOG(QDF_TRACE_LEVEL_INFO, "exit: fail");
+ HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "exit: fail");
return QDF_STATUS_E_FAILURE;
}
@@ -6707,6 +6759,7 @@ static QDF_STATUS __hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
for (i = 0; i < HDD_IPA_UC_OPCODE_MAX; i++) {
cancel_work_sync(&hdd_ipa->uc_op_work[i].work);
+ qdf_mem_free(hdd_ipa->uc_op_work[i].msg);
hdd_ipa->uc_op_work[i].msg = NULL;
}
}
@@ -6749,9 +6802,6 @@ QDF_STATUS hdd_ipa_cleanup(hdd_context_t *hdd_ctx)
int hdd_ipa_uc_smmu_map(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
{
- HDD_IPA_DP_LOG(QDF_TRACE_LEVEL_DEBUG, "Map: %d Num_buf: %d",
- map, num_buf);
-
if (!num_buf) {
HDD_IPA_LOG(QDF_TRACE_LEVEL_DEBUG, "No buffers to map/unmap");
return 0;
@@ -6765,4 +6815,11 @@ int hdd_ipa_uc_smmu_map(bool map, uint32_t num_buf, qdf_mem_info_t *buf_arr)
(struct ipa_wdi_buffer_info *)buf_arr);
}
+void hdd_ipa_clean_adapter_iface(hdd_adapter_t *adapter)
+{
+ struct hdd_ipa_iface_context *iface_ctx = adapter->ipa_context;
+
+ if (iface_ctx)
+ hdd_ipa_cleanup_iface(iface_ctx);
+}
#endif /* IPA_OFFLOAD */
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_lro.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_lro.c
index 81dde75398ee..0f095ec34263 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_lro.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_lro.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -498,11 +498,6 @@ int hdd_lro_enable(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
adapter->dev->features |= NETIF_F_LRO;
- if (hdd_ctx->config->enable_tcp_delack) {
- hdd_ctx->config->enable_tcp_delack = 0;
- hdd_reset_tcp_delack(hdd_ctx);
- }
-
hdd_debug("LRO Enabled");
return 0;
@@ -670,7 +665,7 @@ hdd_lro_set_reset(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
adapter->dev->features &= ~NETIF_F_LRO;
hdd_debug("LRO Disabled");
- if (!hdd_ctx->config->enable_tcp_delack) {
+ if (hdd_ctx->config->enable_tcp_delack) {
struct wlan_rx_tp_data rx_tp_data;
hdd_debug("Enable TCP delack as LRO is disabled.");
@@ -679,7 +674,7 @@ hdd_lro_set_reset(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
WLAN_SVC_WLAN_TP_IND, &rx_tp_data,
sizeof(rx_tp_data));
- hdd_ctx->config->enable_tcp_delack = 1;
+ hdd_ctx->tcp_delack_on = 1;
}
}
return 0;
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
index 5e3b8c034476..401194ab9f0d 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_main.c
@@ -113,6 +113,9 @@
#include "wlan_hdd_spectralscan.h"
#include "sme_power_save_api.h"
#include "wlan_hdd_sysfs.h"
+#ifdef WLAN_FEATURE_APF
+#include "wlan_hdd_apf.h"
+#endif
#ifdef CNSS_GENL
#include <net/cnss_nl.h>
@@ -136,6 +139,12 @@
#define MEMORY_DEBUG_STR ""
#endif
+#ifdef PANIC_ON_BUG
+#define PANIC_ON_BUG_STR " +PANIC_ON_BUG"
+#else
+#define PANIC_ON_BUG_STR ""
+#endif
+
int wlan_start_ret_val;
static DECLARE_COMPLETION(wlan_start_comp);
static unsigned int dev_num = 1;
@@ -723,6 +732,12 @@ int wlan_hdd_validate_context(hdd_context_t *hdd_ctx)
return -ENODEV;
}
+ if (cds_is_fw_down()) {
+ hdd_debug("%pS FW is down: 0x%x Ignore!!!",
+ (void *)_RET_IP_, cds_get_driver_state());
+ return -ENODEV;
+ }
+
return 0;
}
@@ -885,6 +900,43 @@ QDF_STATUS hdd_set_ibss_power_save_params(hdd_adapter_t *adapter)
return QDF_STATUS_SUCCESS;
}
+#ifdef FEATURE_RUNTIME_PM
+/**
+ * hdd_runtime_suspend_context_init() - API to initialize HDD Runtime Contexts
+ * @hdd_ctx: HDD context
+ *
+ * Return: None
+ */
+static void hdd_runtime_suspend_context_init(hdd_context_t *hdd_ctx)
+{
+ struct hdd_runtime_pm_context *ctx = &hdd_ctx->runtime_context;
+
+ qdf_runtime_lock_init(&ctx->scan);
+ qdf_runtime_lock_init(&ctx->roc);
+ qdf_runtime_lock_init(&ctx->dfs);
+ qdf_runtime_lock_init(&ctx->connect);
+}
+
+/**
+ * hdd_runtime_suspend_context_deinit() - API to deinit HDD runtime context
+ * @hdd_ctx: HDD Context
+ *
+ * Return: None
+ */
+static void hdd_runtime_suspend_context_deinit(hdd_context_t *hdd_ctx)
+{
+ struct hdd_runtime_pm_context *ctx = &hdd_ctx->runtime_context;
+
+ qdf_runtime_lock_deinit(&ctx->scan);
+ qdf_runtime_lock_deinit(&ctx->roc);
+ qdf_runtime_lock_deinit(&ctx->dfs);
+ qdf_runtime_lock_deinit(&ctx->connect);
+}
+#else /* FEATURE_RUNTIME_PM */
+static void hdd_runtime_suspend_context_init(hdd_context_t *hdd_ctx) {}
+static void hdd_runtime_suspend_context_deinit(hdd_context_t *hdd_ctx) {}
+#endif /* FEATURE_RUNTIME_PM */
+
#define INTF_MACADDR_MASK 0x7
void hdd_update_macaddr(hdd_context_t *hdd_ctx,
@@ -1600,6 +1652,9 @@ void hdd_update_tgt_cfg(void *context, void *param)
hdd_ctx->config->is_fils_roaming_supported =
cfg->services.is_fils_roaming_supported;
+ hdd_ctx->config->is_11k_offload_supported =
+ cfg->services.is_11k_offload_supported;
+
/* now overwrite the target band capability with INI
* setting if INI setting is a subset
*/
@@ -1657,8 +1712,8 @@ void hdd_update_tgt_cfg(void *context, void *param)
hdd_debug("Init current antenna mode: %d",
hdd_ctx->current_antenna_mode);
- hdd_ctx->bpf_enabled = (cfg->bpf_enabled &&
- hdd_ctx->config->bpf_packet_filter_enable);
+ hdd_ctx->apf_enabled = (cfg->apf_enabled &&
+ hdd_ctx->config->apf_packet_filter_enable);
hdd_ctx->rcpi_enabled = cfg->rcpi_enabled;
hdd_update_ra_rate_limit(hdd_ctx, cfg);
@@ -1674,8 +1729,8 @@ void hdd_update_tgt_cfg(void *context, void *param)
hdd_err("fw update WNI_CFG_VHT_CSN_BEAMFORMEE_ANT_SUPPORTED to CFG fails");
- hdd_debug("Target BPF %d Host BPF %d 8ss fw support %d txBFCsnValue %d",
- cfg->bpf_enabled, hdd_ctx->config->bpf_packet_filter_enable,
+ hdd_debug("Target APF %d Host APF %d 8ss fw support %d txBFCsnValue %d",
+ cfg->apf_enabled, hdd_ctx->config->apf_packet_filter_enable,
cfg->tx_bfee_8ss_enabled, hdd_ctx->config->txBFCsnValue);
/*
@@ -1683,11 +1738,11 @@ void hdd_update_tgt_cfg(void *context, void *param)
*/
hdd_update_wiphy_vhtcap(hdd_ctx);
/*
- * If BPF is enabled, maxWowFilters set to WMA_STA_WOW_DEFAULT_PTRN_MAX
+ * If APF is enabled, maxWowFilters set to WMA_STA_WOW_DEFAULT_PTRN_MAX
* because we need atleast WMA_STA_WOW_DEFAULT_PTRN_MAX free slots to
* configure the STA mode wow pattern.
*/
- if (hdd_ctx->bpf_enabled)
+ if (hdd_ctx->apf_enabled)
hdd_ctx->config->maxWoWFilters = WMA_STA_WOW_DEFAULT_PTRN_MAX;
hdd_ctx->wmi_max_len = cfg->wmi_max_len;
@@ -1875,7 +1930,7 @@ int hdd_start_adapter(hdd_adapter_t *adapter)
int ret;
enum tQDF_ADAPTER_MODE device_mode = adapter->device_mode;
- ENTER_DEV(adapter->dev);
+ hdd_info("enter(%s)", netdev_name(adapter->dev));
hdd_debug("Start_adapter for mode : %d", adapter->device_mode);
switch (device_mode) {
@@ -2140,6 +2195,8 @@ int hdd_wlan_start_modules(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
goto deinit_config;
}
+ hdd_runtime_suspend_context_init(hdd_ctx);
+
hdd_ctx->hHal = cds_get_context(QDF_MODULE_ID_SME);
if (NULL == hdd_ctx->hHal) {
hdd_err("HAL context is null");
@@ -2312,6 +2369,9 @@ static int __hdd_open(struct net_device *dev)
}
}
+ if (adapter->device_mode == QDF_FTM_MODE)
+ goto err_hdd_hdd_init_deinit_lock;
+
set_bit(DEVICE_IFACE_OPENED, &adapter->event_flags);
hdd_info("%s interface up", dev->name);
@@ -2444,6 +2504,13 @@ static int __hdd_stop(struct net_device *dev)
clear_bit(DEVICE_IFACE_OPENED, &adapter->event_flags);
/*
+ * Upon wifi turn off, DUT has to flush the scan results so if
+ * this is the last cli iface, flush the scan database.
+ */
+ if (!hdd_is_cli_iface_up(hdd_ctx))
+ sme_scan_flush_result(hdd_ctx->hHal);
+
+ /*
* Find if any iface is up. If any iface is up then can't put device to
* sleep/power save mode
*/
@@ -2589,14 +2656,37 @@ static int __hdd_set_mac_address(struct net_device *dev, void *addr)
struct sockaddr *psta_mac_addr = addr;
QDF_STATUS qdf_ret_status = QDF_STATUS_SUCCESS;
int ret;
+ struct qdf_mac_addr mac_addr;
ENTER_DEV(dev);
+ if (netif_running(dev)) {
+ hdd_err("On iface up, set mac address change isn't supported");
+ return -EBUSY;
+ }
+
hdd_ctx = WLAN_HDD_GET_CTX(adapter);
ret = wlan_hdd_validate_context(hdd_ctx);
if (0 != ret)
return ret;
+ qdf_mem_copy(&mac_addr, psta_mac_addr->sa_data, QDF_MAC_ADDR_SIZE);
+
+ if (qdf_is_macaddr_zero(&mac_addr)) {
+ hdd_err("MAC is all zero");
+ return -EINVAL;
+ }
+
+ if (qdf_is_macaddr_broadcast(&mac_addr)) {
+ hdd_err("MAC is Broadcast");
+ return -EINVAL;
+ }
+
+ if (ETHER_IS_MULTICAST(psta_mac_addr->sa_data)) {
+ hdd_err("MAC is Multicast");
+ return -EINVAL;
+ }
+
memcpy(&adapter->macAddressCurrent, psta_mac_addr->sa_data, ETH_ALEN);
memcpy(dev->dev_addr, psta_mac_addr->sa_data, ETH_ALEN);
@@ -2909,58 +2999,6 @@ void hdd_set_station_ops(struct net_device *pWlanDev)
pWlanDev->netdev_ops = &wlan_drv_ops;
}
-#ifdef FEATURE_RUNTIME_PM
-/**
- * hdd_runtime_suspend_context_init() - API to initialize HDD Runtime Contexts
- * @hdd_ctx: HDD context
- *
- * Return: None
- */
-static void hdd_runtime_suspend_context_init(hdd_context_t *hdd_ctx)
-{
- struct hdd_runtime_pm_context *ctx = &hdd_ctx->runtime_context;
-
- qdf_runtime_lock_init(&ctx->scan);
- qdf_runtime_lock_init(&ctx->roc);
- qdf_runtime_lock_init(&ctx->dfs);
-}
-
-/**
- * hdd_runtime_suspend_context_deinit() - API to deinit HDD runtime context
- * @hdd_ctx: HDD Context
- *
- * Return: None
- */
-static void hdd_runtime_suspend_context_deinit(hdd_context_t *hdd_ctx)
-{
- struct hdd_runtime_pm_context *ctx = &hdd_ctx->runtime_context;
-
- qdf_runtime_lock_deinit(&ctx->scan);
- qdf_runtime_lock_deinit(&ctx->roc);
- qdf_runtime_lock_deinit(&ctx->dfs);
-}
-
-static void hdd_adapter_runtime_suspend_init(hdd_adapter_t *adapter)
-{
- struct hdd_connect_pm_context *ctx = &adapter->connect_rpm_ctx;
-
- qdf_runtime_lock_init(&ctx->connect);
-}
-
-static void hdd_adapter_runtime_suspend_denit(hdd_adapter_t *adapter)
-{
- struct hdd_connect_pm_context *ctx = &adapter->connect_rpm_ctx;
-
- qdf_runtime_lock_deinit(&ctx->connect);
- ctx->connect = NULL;
-}
-#else /* FEATURE_RUNTIME_PM */
-static void hdd_runtime_suspend_context_init(hdd_context_t *hdd_ctx) {}
-static void hdd_runtime_suspend_context_deinit(hdd_context_t *hdd_ctx) {}
-static inline void hdd_adapter_runtime_suspend_init(hdd_adapter_t *adapter) {}
-static inline void hdd_adapter_runtime_suspend_denit(hdd_adapter_t *adapter) {}
-#endif /* FEATURE_RUNTIME_PM */
-
/**
* hdd_adapter_init_action_frame_random_mac() - Initialze attributes needed for
* randomization of SA in management action frames
@@ -3086,7 +3124,6 @@ static hdd_adapter_t *hdd_alloc_station_adapter(hdd_context_t *hdd_ctx,
/* set pWlanDev's parent to underlying device */
SET_NETDEV_DEV(pWlanDev, hdd_ctx->parent_dev);
hdd_wmm_init(adapter);
- hdd_adapter_runtime_suspend_init(adapter);
spin_lock_init(&adapter->pause_map_lock);
adapter->start_time = adapter->last_time = qdf_system_ticks();
}
@@ -3255,6 +3292,17 @@ QDF_STATUS hdd_init_station_mode(hdd_adapter_t *adapter)
}
}
+ if (adapter->device_mode == QDF_STA_MODE) {
+ hdd_debug("setting RTT mac randomization param: %d",
+ hdd_ctx->config->enable_rtt_mac_randomization);
+ ret_val = sme_cli_set_command(adapter->sessionId,
+ WMI_VDEV_PARAM_ENABLE_DISABLE_RTT_INITIATOR_RANDOM_MAC,
+ hdd_ctx->config->enable_rtt_mac_randomization,
+ VDEV_CMD);
+ if (0 != ret_val)
+ hdd_err("RTT mac randomization param set failed %d",
+ ret_val);
+ }
/*
* 1) When DBS hwmode is disabled from INI then send HT/VHT IE as per
* non-dbs hw mode, so that there is no limitation applied for 2G/5G.
@@ -3388,6 +3436,7 @@ void hdd_cleanup_actionframe(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
if (NULL != cfgState->buf) {
unsigned long rc;
+ hdd_debug("Wait for ack for the previous action frame");
rc = wait_for_completion_timeout(
&adapter->tx_action_cnf_event,
msecs_to_jiffies(ACTION_FRAME_TX_TIMEOUT));
@@ -3398,7 +3447,35 @@ void hdd_cleanup_actionframe(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
* cfgState->buf
*/
hdd_send_action_cnf(adapter, false);
- }
+ } else
+ hdd_debug("Wait complete");
+ }
+}
+
+/**
+ * hdd_cleanup_actionframe_no_wait() - Clean up pending action frame
+ * @hdd_ctx: global hdd context
+ * @adapter: pointer to adapter
+ *
+ * This function used to cancel the pending action frame without waiting for
+ * ack.
+ *
+ * Return: None.
+ */
+void hdd_cleanup_actionframe_no_wait(hdd_context_t *hdd_ctx,
+ hdd_adapter_t *adapter)
+{
+ hdd_cfg80211_state_t *cfgState;
+
+ cfgState = WLAN_HDD_GET_CFG_STATE_PTR(adapter);
+
+ if (cfgState->buf) {
+ /*
+ * Inform tx status as FAILURE to upper layer and free
+ * cfgState->buf
+ */
+ hdd_debug("Cleanup previous action frame without waiting for the ack");
+ hdd_send_action_cnf(adapter, false);
}
}
@@ -3515,8 +3592,6 @@ static void hdd_cleanup_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
hdd_debugfs_exit(adapter);
- hdd_adapter_runtime_suspend_denit(adapter);
-
/*
* The adapter is marked as closed. When hdd_wlan_exit() call returns,
* the driver is almost closed and cannot handle either control
@@ -3669,6 +3744,8 @@ static void hdd_set_fw_log_params(hdd_context_t *hdd_ctx,
static int hdd_configure_chain_mask(hdd_adapter_t *adapter)
{
int ret_val;
+ QDF_STATUS status;
+ struct wma_caps_per_phy non_dbs_phy_cap;
hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
hdd_debug("enable2x2: %d, lte_coex: %d, ChainMask1x1: tx: %d rx: %d",
@@ -3683,11 +3760,39 @@ static int hdd_configure_chain_mask(hdd_adapter_t *adapter)
hdd_ctx->config->tx_chain_mask_5g,
hdd_ctx->config->rx_chain_mask_5g);
+ status = wma_get_caps_for_phyidx_hwmode(&non_dbs_phy_cap,
+ HW_MODE_DBS_NONE,
+ CDS_BAND_ALL);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ hdd_info("couldn't get phy caps. skip chain mask programming");
+ return 0;
+ }
+
+ if (non_dbs_phy_cap.tx_chain_mask_2G < 3 ||
+ non_dbs_phy_cap.rx_chain_mask_2G < 3 ||
+ non_dbs_phy_cap.tx_chain_mask_5G < 3 ||
+ non_dbs_phy_cap.rx_chain_mask_5G < 3) {
+ hdd_info("firmware not capable. skip chain mask programming");
+ return 0;
+ }
+
if (hdd_ctx->config->enable2x2) {
hdd_info("2x2 enabled. skip chain mask programming");
return 0;
}
+ if (hdd_ctx->config->dual_mac_feature_disable !=
+ DISABLE_DBS_CXN_AND_SCAN) {
+ hdd_info("DBS enabled(%d). skip chain mask programming",
+ hdd_ctx->config->dual_mac_feature_disable);
+ return 0;
+ }
+
+ if (hdd_ctx->lte_coex_ant_share) {
+ hdd_info("lte ant sharing enabled. skip chainmask programming");
+ return 0;
+ }
+
if (hdd_ctx->config->txchainmask1x1) {
ret_val = sme_cli_set_command(adapter->sessionId,
WMI_PDEV_PARAM_TX_CHAIN_MASK,
@@ -3706,22 +3811,12 @@ static int hdd_configure_chain_mask(hdd_adapter_t *adapter)
goto error;
}
- if (hdd_ctx->lte_coex_ant_share) {
- hdd_info("lte ant sharing enabled. skip per band chain mask");
- return 0;
- }
-
if (hdd_ctx->config->txchainmask1x1 ||
hdd_ctx->config->rxchainmask1x1) {
hdd_info("band agnostic tx/rx chain mask set. skip per band chain mask");
return 0;
}
- if (!hdd_ctx->config->dual_mac_feature_disable) {
- hdd_info("DBS enabled. skip per band chain mask");
- return 0;
- }
-
if (hdd_ctx->config->tx_chain_mask_2g) {
ret_val = sme_cli_set_command(adapter->sessionId,
WMI_PDEV_PARAM_TX_CHAIN_MASK_2G,
@@ -3793,6 +3888,17 @@ int hdd_set_fw_params(hdd_adapter_t *adapter)
hdd_err("Failed to set LPRx");
goto error;
}
+
+ ret = sme_cli_set_command(
+ adapter->sessionId,
+ WMI_PDEV_PARAM_1CH_DTIM_OPTIMIZED_CHAIN_SELECTION,
+ hdd_ctx->config->enable_dtim_selection_diversity,
+ PDEV_CMD);
+ if (ret) {
+ hdd_err("Failed to set DTIM_OPTIMIZED_CHAIN_SELECTION");
+ goto error;
+ }
+
if (adapter->device_mode == QDF_STA_MODE) {
sme_set_smps_cfg(adapter->sessionId,
HDD_STA_SMPS_PARAM_UPPER_BRSSI_THRESH,
@@ -3943,7 +4049,6 @@ hdd_adapter_t *hdd_open_adapter(hdd_context_t *hdd_ctx, uint8_t session_type,
QDF_STATUS status = QDF_STATUS_E_FAILURE;
hdd_cfg80211_state_t *cfgState;
-
if (hdd_ctx->current_intf_count >= hdd_ctx->max_intf_count) {
/*
* Max limit reached on the number of vdevs configured by the
@@ -4165,6 +4270,8 @@ QDF_STATUS hdd_close_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
hdd_adapter_list_node_t *adapterNode, *pCurrent, *pNext;
QDF_STATUS status;
+ hdd_info("enter(%s)", netdev_name(adapter->dev));
+
status = hdd_get_front_adapter(hdd_ctx, &pCurrent);
if (QDF_STATUS_SUCCESS != status) {
hdd_warn("adapter list empty %d",
@@ -4203,6 +4310,7 @@ QDF_STATUS hdd_close_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
return QDF_STATUS_SUCCESS;
}
+ EXIT();
return QDF_STATUS_E_FAILURE;
}
@@ -4399,7 +4507,7 @@ QDF_STATUS hdd_stop_adapter(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter,
void *sap_ctx;
tsap_Config_t *sap_config;
- ENTER();
+ hdd_info("enter(%s)", netdev_name(adapter->dev));
if (!test_bit(SME_SESSION_OPENED, &adapter->event_flags)) {
hdd_err("session %d is not open %lu",
@@ -5240,6 +5348,7 @@ void hdd_connect_result(struct net_device *dev, const u8 *bssid,
{
hdd_adapter_t *padapter = (hdd_adapter_t *) netdev_priv(dev);
struct cfg80211_bss *bss = NULL;
+ hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(padapter);
if (WLAN_STATUS_SUCCESS == status) {
struct ieee80211_channel *chan;
@@ -5267,7 +5376,7 @@ void hdd_connect_result(struct net_device *dev, const u8 *bssid,
req_ie_len, resp_ie, resp_ie_len,
status, gfp, connect_timeout, timeout_reason);
}
- qdf_runtime_pm_allow_suspend(&padapter->connect_rpm_ctx.connect);
+ qdf_runtime_pm_allow_suspend(&hdd_ctx->runtime_context.connect);
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_CONNECT);
}
#else
@@ -5279,10 +5388,11 @@ void hdd_connect_result(struct net_device *dev, const u8 *bssid,
tSirResultCodes timeout_reason)
{
hdd_adapter_t *padapter = (hdd_adapter_t *) netdev_priv(dev);
+ hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(padapter);
cfg80211_connect_result(dev, bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, status, gfp);
- qdf_runtime_pm_allow_suspend(&padapter->connect_rpm_ctx.connect);
+ qdf_runtime_pm_allow_suspend(&hdd_ctx->runtime_context.connect);
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_CONNECT);
}
#endif
@@ -5629,6 +5739,35 @@ hdd_adapter_t *hdd_get_adapter(hdd_context_t *hdd_ctx,
}
/**
+ * hdd_is_adapter_valid() - Check if adapter is valid
+ * @hdd_ctx: hdd context
+ * @adapter: pointer to adapter
+ *
+ * Return: true if adapter address is valid or false otherwise
+ */
+bool hdd_is_adapter_valid(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
+{
+ hdd_adapter_list_node_t *adapter_node = NULL, *p_next = NULL;
+ hdd_adapter_t *p_adapter;
+ QDF_STATUS status;
+
+ status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
+
+ while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
+ p_adapter = adapter_node->pAdapter;
+
+ if (p_adapter && (p_adapter == adapter))
+ return true;
+
+ status = hdd_get_next_adapter(hdd_ctx, adapter_node, &p_next);
+ adapter_node = p_next;
+ }
+
+ return false;
+
+}
+
+/**
* hdd_get_operating_channel() - return operating channel of the device mode
* @hdd_ctx: Pointer to the HDD context.
* @mode: Device mode for which operating channel is required.
@@ -5742,9 +5881,7 @@ QDF_STATUS hdd_abort_mac_scan_all_adapters(hdd_context_t *hdd_ctx)
(adapter->device_mode == QDF_P2P_DEVICE_MODE) ||
(adapter->device_mode == QDF_SAP_MODE) ||
(adapter->device_mode == QDF_P2P_GO_MODE)) {
- hdd_abort_mac_scan(hdd_ctx, adapter->sessionId,
- INVALID_SCAN_ID,
- eCSR_SCAN_ABORT_DEFAULT);
+ wlan_hdd_scan_abort(adapter);
}
status = hdd_get_next_adapter(hdd_ctx, adapterNode, &pNext);
adapterNode = pNext;
@@ -6069,6 +6206,8 @@ static int hdd_context_deinit(hdd_context_t *hdd_ctx)
qdf_list_destroy(&hdd_ctx->hddAdapters);
+ hdd_apf_context_destroy();
+
return 0;
}
@@ -6230,7 +6369,6 @@ static void hdd_wlan_exit(hdd_context_t *hdd_ctx)
hdd_green_ap_deinit(hdd_ctx);
hdd_request_manager_deinit();
- hdd_runtime_suspend_context_deinit(hdd_ctx);
hdd_close_all_adapters(hdd_ctx, false);
hdd_ipa_cleanup(hdd_ctx);
@@ -6245,7 +6383,6 @@ static void hdd_wlan_exit(hdd_context_t *hdd_ctx)
hdd_lpass_notify_stop(hdd_ctx);
}
- wlan_hdd_deinit_chan_info(hdd_ctx);
hdd_exit_netlink_services(hdd_ctx);
mutex_destroy(&hdd_ctx->iface_change_lock);
hdd_context_destroy(hdd_ctx);
@@ -6391,9 +6528,40 @@ QDF_STATUS hdd_post_cds_enable_config(hdd_context_t *hdd_ctx)
return QDF_STATUS_E_FAILURE;
}
+ sme_generic_change_country_code(hdd_ctx->hHal,
+ hdd_ctx->reg.alpha2);
+
return QDF_STATUS_SUCCESS;
}
+hdd_adapter_t *hdd_get_first_valid_adapter()
+{
+ hdd_adapter_list_node_t *adapterNode = NULL, *pNext = NULL;
+ hdd_adapter_t *adapter;
+ QDF_STATUS status;
+ hdd_context_t *hdd_ctx;
+
+ hdd_ctx = cds_get_context(QDF_MODULE_ID_HDD);
+
+ if (!hdd_ctx) {
+ hdd_err("HDD context not valid");
+ return NULL;
+ }
+
+ status = hdd_get_front_adapter(hdd_ctx, &adapterNode);
+
+ while (adapterNode != NULL && status == QDF_STATUS_SUCCESS) {
+ adapter = adapterNode->pAdapter;
+ if (adapter && adapter->magic == WLAN_HDD_ADAPTER_MAGIC)
+ return adapter;
+ status = hdd_get_next_adapter(hdd_ctx, adapterNode, &pNext);
+ adapterNode = pNext;
+ }
+
+
+ return NULL;
+}
+
/* wake lock APIs for HDD */
void hdd_prevent_suspend(uint32_t reason)
{
@@ -6752,7 +6920,7 @@ static void hdd_pld_request_bus_bandwidth(hdd_context_t *hdd_ctx,
* to default delayed ack. Note that this will disable the
* dynamic delayed ack mechanism across the system
*/
- if (hdd_ctx->config->enable_tcp_delack)
+ if (hdd_ctx->tcp_delack_on)
rx_tp_data.rx_tp_flags |= TCP_DEL_ACK_IND;
if (hdd_ctx->config->enable_tcp_adv_win_scale)
@@ -6772,7 +6940,8 @@ static void hdd_pld_request_bus_bandwidth(hdd_context_t *hdd_ctx,
else
next_tx_level = WLAN_SVC_TP_LOW;
- if (hdd_ctx->cur_tx_level != next_tx_level) {
+ if ((hdd_ctx->config->enable_tcp_limit_output) &&
+ (hdd_ctx->cur_tx_level != next_tx_level)) {
hdd_debug("change TCP TX trigger level %d, average_tx: %llu",
next_tx_level, temp_tx);
hdd_ctx->cur_tx_level = next_tx_level;
@@ -6884,29 +7053,27 @@ static void hdd_bus_bw_work_handler(struct work_struct *work)
connected = true;
}
+ if (!connected) {
+ hdd_err("bus bandwidth timer running in disconnected state");
+ return;
+ }
+
/* add intra bss forwarded tx and rx packets */
tx_packets += fwd_tx_packets_diff;
rx_packets += fwd_rx_packets_diff;
- hdd_ipa_uc_stat_query(hdd_ctx, &ipa_tx_packets, &ipa_rx_packets);
- tx_packets += (uint64_t)ipa_tx_packets;
- rx_packets += (uint64_t)ipa_rx_packets;
+ if (hdd_ipa_is_fw_wdi_actived(hdd_ctx)) {
+ hdd_ipa_uc_stat_query(hdd_ctx, &ipa_tx_packets,
+ &ipa_rx_packets);
+ tx_packets += (uint64_t)ipa_tx_packets;
+ rx_packets += (uint64_t)ipa_rx_packets;
- if (adapter) {
- adapter->stats.tx_packets += ipa_tx_packets;
- adapter->stats.rx_packets += ipa_rx_packets;
- }
-
- if (!connected) {
- hdd_err("bus bandwidth timer running in disconnected state");
- return;
+ hdd_ipa_set_perf_level(hdd_ctx, tx_packets, rx_packets);
+ hdd_ipa_uc_stat_request(adapter, 2);
}
hdd_pld_request_bus_bandwidth(hdd_ctx, tx_packets, rx_packets);
- hdd_ipa_set_perf_level(hdd_ctx, tx_packets, rx_packets);
- hdd_ipa_uc_stat_request(adapter, 2);
-
restart_timer:
/* ensure periodic timer should still be running before restarting it */
qdf_spinlock_acquire(&hdd_ctx->bus_bw_timer_lock);
@@ -7043,7 +7210,7 @@ void wlan_hdd_display_tx_rx_histogram(hdd_context_t *hdd_ctx)
hdd_ctx->config->busBandwidthMediumThreshold,
hdd_ctx->config->busBandwidthLowThreshold);
hdd_debug("Enable TCP DEL ACK: %d",
- hdd_ctx->config->enable_tcp_delack);
+ hdd_ctx->tcp_delack_on);
hdd_debug("TCP DEL High TH: %d TCP DEL Low TH: %d",
hdd_ctx->config->tcpDelackThresholdHigh,
hdd_ctx->config->tcpDelackThresholdLow);
@@ -7110,7 +7277,8 @@ hdd_display_netif_queue_history_compact(hdd_context_t *hdd_ctx)
qdf_time_t total, pause, unpause, curr_time, delta;
QDF_STATUS status;
char temp_str[20 * WLAN_REASON_TYPE_MAX];
- char comb_log_str[(ADAP_NETIFQ_LOG_LEN * MAX_NUMBER_OF_ADAPTERS) + 1];
+ char *comb_log_str;
+ uint32_t comb_log_str_size;
struct ol_txrx_pdev_t *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
hdd_adapter_t *adapter = NULL;
hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
@@ -7120,8 +7288,14 @@ hdd_display_netif_queue_history_compact(hdd_context_t *hdd_ctx)
return;
}
+ comb_log_str_size = (ADAP_NETIFQ_LOG_LEN * MAX_NUMBER_OF_ADAPTERS) + 1;
+ comb_log_str = qdf_mem_malloc(comb_log_str_size);
+ if (!comb_log_str) {
+ hdd_err("failed to alloc comb_log_str");
+ return;
+ }
+
bytes_written = 0;
- qdf_mem_set(comb_log_str, 0, sizeof(comb_log_str));
status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
adapter = adapter_node->pAdapter;
@@ -7160,8 +7334,8 @@ hdd_display_netif_queue_history_compact(hdd_context_t *hdd_ctx)
hdd_warn("log truncated");
bytes_written += snprintf(&comb_log_str[bytes_written],
- bytes_written >= sizeof(comb_log_str) ? 0 :
- sizeof(comb_log_str) - bytes_written,
+ bytes_written >= comb_log_str_size ? 0 :
+ comb_log_str_size - bytes_written,
"[%d %d] (%d) %u/%ums %s|",
adapter->sessionId, adapter->device_mode,
adapter->pause_map,
@@ -7180,9 +7354,10 @@ hdd_display_netif_queue_history_compact(hdd_context_t *hdd_ctx)
pdev->tx_desc.num_free,
pdev->tx_desc.pool_size, comb_log_str);
- if (bytes_written >= sizeof(comb_log_str))
+ if (bytes_written >= comb_log_str_size)
hdd_warn("log string truncated");
+ qdf_mem_free(comb_log_str);
}
/**
@@ -8011,44 +8186,12 @@ void hdd_indicate_mgmt_frame(tSirSmeMgmtFrameInd *frame_ind)
frame_ind->rxRssi);
}
-/**
- * wlan_hdd_disable_all_dual_mac_features() - Disable dual mac features
- * @hdd_ctx: HDD context
- *
- * Disables all the dual mac features like DBS, Agile DFS etc.
- *
- * Return: QDF_STATUS_SUCCESS on success
- */
-static QDF_STATUS wlan_hdd_disable_all_dual_mac_features(hdd_context_t *hdd_ctx)
-{
- struct sir_dual_mac_config cfg;
- QDF_STATUS status;
-
- if (!hdd_ctx) {
- hdd_err("HDD context is NULL");
- return QDF_STATUS_E_FAILURE;
- }
-
- cfg.scan_config = 0;
- cfg.fw_mode_config = 0;
- cfg.set_dual_mac_cb = cds_soc_set_dual_mac_cfg_cb;
-
- hdd_debug("Disabling all dual mac features...");
-
- status = sme_soc_set_dual_mac_config(hdd_ctx->hHal, cfg);
- if (status != QDF_STATUS_SUCCESS) {
- hdd_err("sme_soc_set_dual_mac_config failed %d", status);
- return status;
- }
-
- return QDF_STATUS_SUCCESS;
-}
-
static QDF_STATUS
wlan_hdd_update_dbs_scan_and_fw_mode_config(hdd_context_t *hdd_ctx)
{
struct sir_dual_mac_config cfg = {0};
QDF_STATUS status;
+ uint32_t channel_select_logic_conc;
if (!hdd_ctx) {
hdd_err("HDD context is NULL");
@@ -8058,14 +8201,25 @@ wlan_hdd_update_dbs_scan_and_fw_mode_config(hdd_context_t *hdd_ctx)
cfg.scan_config = 0;
cfg.fw_mode_config = 0;
cfg.set_dual_mac_cb = cds_soc_set_dual_mac_cfg_cb;
- status = wma_get_updated_scan_and_fw_mode_config(&cfg.scan_config,
- &cfg.fw_mode_config,
- hdd_ctx->config->dual_mac_feature_disable);
- if (status != QDF_STATUS_SUCCESS) {
- hdd_err("wma_get_updated_scan_and_fw_mode_config failed %d",
- status);
- return status;
+ if (!wma_is_dbs_enable())
+ channel_select_logic_conc = 0;
+ else
+ channel_select_logic_conc = hdd_ctx->config->
+ channel_select_logic_conc;
+
+ if (hdd_ctx->config->dual_mac_feature_disable !=
+ DISABLE_DBS_CXN_AND_SCAN) {
+ status = wma_get_updated_scan_and_fw_mode_config(
+ &cfg.scan_config, &cfg.fw_mode_config,
+ hdd_ctx->config->dual_mac_feature_disable,
+ channel_select_logic_conc);
+
+ if (status != QDF_STATUS_SUCCESS) {
+ hdd_err("wma_get_updated_scan_and_fw_mode_config failed %d",
+ status);
+ return status;
+ }
}
hdd_debug("send scan_cfg: 0x%x fw_mode_cfg: 0x%x to fw",
@@ -8101,8 +8255,11 @@ static void hdd_override_ini_config(hdd_context_t *hdd_ctx)
hdd_debug("Module enable_11d set to %d", enable_11d);
}
- if (!hdd_ipa_is_present(hdd_ctx))
+ if (!hdd_ipa_is_present()) {
hdd_ctx->config->IpaConfig = 0;
+ hdd_debug("IpaConfig override to %d",
+ hdd_ctx->config->IpaConfig);
+ }
if (!hdd_ctx->config->rssi_assoc_reject_enabled ||
!hdd_ctx->config->enable_bcast_probe_rsp) {
@@ -8185,7 +8342,7 @@ static int hdd_context_init(hdd_context_t *hdd_ctx)
init_completion(&hdd_ctx->mc_sus_event_var);
init_completion(&hdd_ctx->ready_to_suspend);
- hdd_init_bpf_completion();
+ hdd_apf_context_init();
qdf_spinlock_create(&hdd_ctx->connection_status_lock);
qdf_spinlock_create(&hdd_ctx->sta_update_info_lock);
@@ -8875,8 +9032,8 @@ static int hdd_update_cds_config(hdd_context_t *hdd_ctx)
cds_cfg->enable_rxthread = hdd_ctx->enableRxThread;
cds_cfg->ce_classify_enabled =
hdd_ctx->config->ce_classify_enabled;
- cds_cfg->bpf_packet_filter_enable =
- hdd_ctx->config->bpf_packet_filter_enable;
+ cds_cfg->apf_packet_filter_enable =
+ hdd_ctx->config->apf_packet_filter_enable;
cds_cfg->tx_chain_mask_cck = hdd_ctx->config->tx_chain_mask_cck;
cds_cfg->self_gen_frm_pwr = hdd_ctx->config->self_gen_frm_pwr;
cds_cfg->max_station = hdd_ctx->config->maxNumberOfPeers;
@@ -8886,8 +9043,8 @@ static int hdd_update_cds_config(hdd_context_t *hdd_ctx)
hdd_ctx->config->max_msdus_per_rxinorderind;
cds_cfg->self_recovery_enabled = hdd_ctx->config->enableSelfRecovery;
cds_cfg->fw_timeout_crash = hdd_ctx->config->fw_timeout_crash;
- cds_cfg->active_uc_bpf_mode = hdd_ctx->config->active_uc_bpf_mode;
- cds_cfg->active_mc_bc_bpf_mode = hdd_ctx->config->active_mc_bc_bpf_mode;
+ cds_cfg->active_uc_apf_mode = hdd_ctx->config->active_uc_apf_mode;
+ cds_cfg->active_mc_bc_apf_mode = hdd_ctx->config->active_mc_bc_apf_mode;
cds_cfg->auto_power_save_fail_mode =
hdd_ctx->config->auto_pwr_save_fail_mode;
@@ -9285,8 +9442,7 @@ static int hdd_platform_wlan_mac(hdd_context_t *hdd_ctx)
addr = hdd_get_platform_wlan_mac_buff(dev, &no_of_mac_addr);
- if (no_of_mac_addr == 0 || !addr ||
- (hdd_ctx->config->mac_provision && (no_of_mac_addr < 2))) {
+ if (no_of_mac_addr == 0 || !addr) {
hdd_err("Platform Driver doesn't have provisioned mac addr");
return -EINVAL;
}
@@ -9388,10 +9544,7 @@ static int hdd_initialize_mac_address(hdd_context_t *hdd_ctx)
bool update_mac_addr_to_fw = true;
ret = hdd_platform_wlan_mac(hdd_ctx);
- if (hdd_ctx->config->mac_provision)
- return ret;
-
- if (ret == 0)
+ if (hdd_ctx->config->mac_provision || !ret)
return ret;
hdd_info("MAC is not programmed in platform driver ret: %d, use wlan_mac.bin",
@@ -9574,6 +9727,7 @@ static int hdd_pre_enable_configure(hdd_context_t *hdd_ctx)
hdd_err("reg info update failed");
goto out;
}
+
cds_fill_and_send_ctl_to_fw(&hdd_ctx->reg);
status = hdd_set_sme_chan_list(hdd_ctx);
@@ -9785,31 +9939,6 @@ static int hdd_set_auto_shutdown_cb(hdd_context_t *hdd_ctx)
}
#endif
-static QDF_STATUS hdd_set_dbs_scan_and_fw_mode_cfg(hdd_context_t *hdd_ctx)
-{
-
- QDF_STATUS status = QDF_STATUS_SUCCESS;
-
- switch (hdd_ctx->config->dual_mac_feature_disable) {
- case DISABLE_DBS_CXN_AND_SCAN:
- status = wlan_hdd_disable_all_dual_mac_features(hdd_ctx);
- if (status != QDF_STATUS_SUCCESS)
- hdd_err("Failed to disable dual mac features");
- break;
- case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN:
- case DISABLE_DBS_CXN_AND_ENABLE_DBS_SCAN_WITH_ASYNC_SCAN_OFF:
- case ENABLE_DBS_CXN_AND_ENABLE_SCAN_WITH_ASYNC_SCAN_OFF:
- status = wlan_hdd_update_dbs_scan_and_fw_mode_config(hdd_ctx);
- if (status != QDF_STATUS_SUCCESS)
- hdd_err("Failed to set dbs scan and fw mode config");
- break;
- default:
- break;
- }
-
- return status;
-
-}
/**
* hdd_features_init() - Init features
* @hdd_ctx: HDD context
@@ -9892,6 +10021,9 @@ static int hdd_features_init(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
sme_set_prefer_80MHz_over_160MHz(hdd_ctx->hHal,
hdd_ctx->config->sta_prefer_80MHz_over_160MHz);
+ sme_set_etsi_srd_ch_in_master_mode(hdd_ctx->hHal,
+ hdd_ctx->config->etsi_srd_chan_in_master_mode);
+
sme_set_allow_adj_ch_bcn(hdd_ctx->hHal,
hdd_ctx->config->allow_adj_ch_bcn);
@@ -9950,7 +10082,7 @@ static int hdd_features_init(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
hdd_err("Failed to register HDD callbacks!");
goto deregister_frames;
}
- status = hdd_set_dbs_scan_and_fw_mode_cfg(hdd_ctx);
+ status = wlan_hdd_update_dbs_scan_and_fw_mode_config(hdd_ctx);
if (!QDF_IS_STATUS_SUCCESS(status)) {
hdd_err("Failed to set dbs scan and fw mode cfg");
goto deregister_cb;
@@ -9993,6 +10125,8 @@ static int hdd_features_init(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
if (ret)
goto deregister_cb;
+ wlan_hdd_init_chan_info(hdd_ctx);
+
EXIT();
return 0;
@@ -10016,6 +10150,7 @@ out:
*/
static void hdd_features_deinit(hdd_context_t *hdd_ctx)
{
+ wlan_hdd_deinit_chan_info(hdd_ctx);
wlan_hdd_tsf_deinit(hdd_ctx);
}
@@ -10124,6 +10259,8 @@ int hdd_configure_cds(hdd_context_t *hdd_ctx, hdd_adapter_t *adapter)
if (hdd_ctx->config->enable_phy_reg_retention)
sme_cli_set_command(0, WMI_PDEV_PARAM_FAST_PWR_TRANSITION,
hdd_ctx->config->enable_phy_reg_retention, PDEV_CMD);
+ sme_cli_set_command(0, WMI_PDEV_PARAM_GCMP_SUPPORT_ENABLE,
+ hdd_ctx->config->gcmp_enabled, PDEV_CMD);
sme_cli_set_command(0, (int)WMI_PDEV_AUTO_DETECT_POWER_FAILURE,
hdd_ctx->config->auto_pwr_save_fail_mode, PDEV_CMD);
@@ -10227,7 +10364,6 @@ int hdd_wlan_stop_modules(hdd_context_t *hdd_ctx, bool ftm_mode)
qdf_device_t qdf_ctx;
QDF_STATUS qdf_status;
int ret = 0;
- p_cds_sched_context cds_sched_context = NULL;
bool is_unload_stop = cds_is_driver_unloading();
bool is_recover_stop = cds_is_driver_recovering();
bool is_idle_stop = !is_unload_stop && !is_recover_stop;
@@ -10305,6 +10441,8 @@ int hdd_wlan_stop_modules(hdd_context_t *hdd_ctx, bool ftm_mode)
goto done;
}
+ hdd_sysfs_destroy_version_interface();
+
qdf_status = cds_post_disable();
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
hdd_err("Failed to process post CDS disable Modules! :%d",
@@ -10313,6 +10451,8 @@ int hdd_wlan_stop_modules(hdd_context_t *hdd_ctx, bool ftm_mode)
QDF_ASSERT(0);
}
+ hdd_runtime_suspend_context_deinit(hdd_ctx);
+
qdf_status = cds_close(hdd_ctx->pcds_context);
if (!QDF_IS_STATUS_SUCCESS(qdf_status)) {
hdd_warn("Failed to stop CDS: %d", qdf_status);
@@ -10320,13 +10460,6 @@ int hdd_wlan_stop_modules(hdd_context_t *hdd_ctx, bool ftm_mode)
QDF_ASSERT(0);
}
- /* Clean up message queues of TX, RX and MC thread */
- if (!is_recover_stop) {
- cds_sched_context = get_cds_sched_ctxt();
- if (cds_sched_context)
- cds_sched_flush_mc_mqs(cds_sched_context);
- }
-
hif_ctx = cds_get_context(QDF_MODULE_ID_HIF);
if (!hif_ctx) {
hdd_err("Hif context is Null");
@@ -10580,8 +10713,6 @@ int hdd_wlan_startup(struct device *dev)
goto err_stop_modules;
}
- wlan_hdd_init_chan_info(hdd_ctx);
-
ret = hdd_wiphy_init(hdd_ctx);
if (ret) {
hdd_err("Failed to initialize wiphy: %d", ret);
@@ -10634,8 +10765,6 @@ int hdd_wlan_startup(struct device *dev)
if (QDF_IS_STATUS_ERROR(status))
goto err_close_adapters;
- hdd_runtime_suspend_context_init(hdd_ctx);
-
if (hdd_ctx->config->fIsImpsEnabled)
hdd_set_idle_ps_config(hdd_ctx, true);
else
@@ -10647,7 +10776,6 @@ int hdd_wlan_startup(struct device *dev)
hdd_ctx->config->iface_change_wait_time,
WIFI_POWER_EVENT_WAKELOCK_IFACE_CHANGE_TIMER);
- hdd_start_complete(0);
goto success;
err_close_adapters:
@@ -10827,10 +10955,10 @@ int hdd_register_cb(hdd_context_t *hdd_ctx)
sme_set_nud_debug_stats_cb(hdd_ctx->hHal,
hdd_get_nud_stats_cb);
- status = sme_bpf_offload_register_callback(hdd_ctx->hHal,
- hdd_get_bpf_offload_cb);
+ status = sme_apf_offload_register_callback(hdd_ctx->hHal,
+ hdd_get_apf_capabilities_cb);
if (!QDF_IS_STATUS_SUCCESS(status)) {
- hdd_err("set bpf offload callback failed");
+ hdd_err("set apf offload callback failed");
ret = -EINVAL;
return ret;
}
@@ -10870,6 +10998,9 @@ int hdd_register_cb(hdd_context_t *hdd_ctx)
if (!QDF_IS_STATUS_SUCCESS(status))
hdd_err("set congestion callback failed");
+ sme_apf_read_memory_register_callback(hdd_ctx->hHal,
+ hdd_apf_read_memory_callback);
+
EXIT();
return ret;
@@ -10896,9 +11027,9 @@ void hdd_deregister_cb(hdd_context_t *hdd_ctx)
status);
sme_reset_link_layer_stats_ind_cb(hdd_ctx->hHal);
- status = sme_bpf_offload_deregister_callback(hdd_ctx->hHal);
+ status = sme_apf_offload_deregister_callback(hdd_ctx->hHal);
if (!QDF_IS_STATUS_SUCCESS(status))
- hdd_warn("De-register bpf offload callback failed: %d",
+ hdd_warn("De-register apf offload callback failed: %d",
status);
sme_reset_rssi_threshold_breached_cb(hdd_ctx->hHal);
@@ -10922,6 +11053,7 @@ void hdd_deregister_cb(hdd_context_t *hdd_ctx)
sme_deregister_oem_data_rsp_callback(hdd_ctx->hHal);
sme_deregister11d_scan_done_callback(hdd_ctx->hHal);
+ sme_apf_read_memory_deregister_callback(hdd_ctx->hHal);
EXIT();
}
@@ -11619,7 +11751,6 @@ void wlan_hdd_start_sap(hdd_adapter_t *ap_adapter, bool reinit)
if (0 != wlan_hdd_cfg80211_update_apies(ap_adapter)) {
hdd_err("SAP Not able to set AP IEs");
- wlansap_reset_sap_config_add_ie(sap_config, eUPDATE_IE_ALL);
goto end;
}
@@ -11637,6 +11768,7 @@ void wlan_hdd_start_sap(hdd_adapter_t *ap_adapter, bool reinit)
hdd_err("SAP Start failed");
goto end;
}
+ wlansap_reset_sap_config_add_ie(sap_config, eUPDATE_IE_ALL);
hdd_info("SAP Start Success");
set_bit(SOFTAP_BSS_STARTED, &ap_adapter->event_flags);
if (hostapd_state->bssState == BSS_START)
@@ -11648,6 +11780,7 @@ void wlan_hdd_start_sap(hdd_adapter_t *ap_adapter, bool reinit)
return;
end:
+ wlansap_reset_sap_config_add_ie(sap_config, eUPDATE_IE_ALL);
mutex_unlock(&hdd_ctx->sap_lock);
/* SAP context and beacon cleanup will happen during driver unload
* in hdd_stop_adapter
@@ -11816,6 +11949,10 @@ static ssize_t wlan_hdd_state_ctrl_param_write(struct file *filp,
goto exit;
}
+ if (strncmp(buf, wlan_on_str, strlen(wlan_on_str)) == 0) {
+ pr_info("Wifi Turning On from UI\n");
+ }
+
if (strncmp(buf, wlan_on_str, strlen(wlan_on_str)) != 0) {
pr_err("Invalid value received from framework");
goto exit;
@@ -11828,7 +11965,6 @@ static ssize_t wlan_hdd_state_ctrl_param_write(struct file *filp,
if (!rc) {
hdd_alert("Timed-out waiting in wlan_hdd_state_ctrl_param_write");
ret = -EINVAL;
- hdd_start_complete(ret);
return ret;
}
@@ -11919,13 +12055,7 @@ static int __hdd_module_init(void)
pr_err("%s: Loading driver v%s (%s)\n",
WLAN_MODULE_NAME,
g_wlan_driver_version,
- TIMER_MANAGER_STR MEMORY_DEBUG_STR);
-
- ret = wlan_hdd_state_ctrl_param_create();
- if (ret) {
- pr_err("wlan_hdd_state_create:%x\n", ret);
- goto err_dev_state;
- }
+ TIMER_MANAGER_STR MEMORY_DEBUG_STR PANIC_ON_BUG_STR);
pld_init();
@@ -11946,6 +12076,12 @@ static int __hdd_module_init(void)
goto out;
}
+ ret = wlan_hdd_state_ctrl_param_create();
+ if (ret) {
+ pr_err("wlan_hdd_state_create:%x\n", ret);
+ goto out;
+ }
+
pr_info("%s: driver loaded\n", WLAN_MODULE_NAME);
return 0;
@@ -11954,8 +12090,6 @@ out:
hdd_deinit();
err_hdd_init:
pld_deinit();
- wlan_hdd_state_ctrl_param_destroy();
-err_dev_state:
return ret;
}
@@ -12162,7 +12296,8 @@ static void __exit hdd_module_exit(void)
}
#endif
-static int fwpath_changed_handler(const char *kmessage, struct kernel_param *kp)
+static int fwpath_changed_handler(const char *kmessage,
+ const struct kernel_param *kp)
{
return param_set_copystring(kmessage, kp);
}
@@ -12334,7 +12469,8 @@ static int hdd_register_req_mode(hdd_context_t *hdd_ctx,
*
* Return - 0 on success and failure code on failure
*/
-static int __con_mode_handler(const char *kmessage, struct kernel_param *kp,
+static int __con_mode_handler(const char *kmessage,
+ const struct kernel_param *kp,
hdd_context_t *hdd_ctx)
{
int ret;
@@ -12433,7 +12569,7 @@ reset_flags:
}
-static int con_mode_handler(const char *kmessage, struct kernel_param *kp)
+static int con_mode_handler(const char *kmessage, const struct kernel_param *kp)
{
int ret;
hdd_context_t *hdd_ctx;
@@ -12914,6 +13050,28 @@ void hdd_drv_ops_inactivity_handler(void)
QDF_BUG(0);
}
+bool hdd_is_cli_iface_up(hdd_context_t *hdd_ctx)
+{
+ hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+ hdd_adapter_t *adapter;
+ QDF_STATUS status;
+
+ status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
+ while (NULL != adapter_node && QDF_STATUS_SUCCESS == status) {
+ adapter = adapter_node->pAdapter;
+ if ((adapter->device_mode == QDF_STA_MODE ||
+ adapter->device_mode == QDF_P2P_CLIENT_MODE) &&
+ qdf_atomic_test_bit(DEVICE_IFACE_OPENED,
+ &adapter->event_flags)){
+ return true;
+ }
+ status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next);
+ adapter_node = next;
+ }
+
+ return false;
+}
+
/* Register the module init/exit functions */
module_init(hdd_module_init);
module_exit(hdd_module_exit);
@@ -12922,11 +13080,20 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Qualcomm Atheros, Inc.");
MODULE_DESCRIPTION("WLAN HOST DEVICE DRIVER");
-module_param_call(con_mode, con_mode_handler, param_get_int, &con_mode,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+static const struct kernel_param_ops con_mode_ops = {
+ .set = con_mode_handler,
+ .get = param_get_int,
+};
+
+static const struct kernel_param_ops fwpath_ops = {
+ .set = fwpath_changed_handler,
+ .get = param_get_string,
+};
-module_param_call(fwpath, fwpath_changed_handler, param_get_string, &fwpath,
- S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+module_param_cb(con_mode, &con_mode_ops, &con_mode,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
+module_param_cb(fwpath, &fwpath_ops, &fwpath,
+ S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
module_param(enable_dfs_chan_scan, int, S_IRUSR | S_IRGRP | S_IROTH);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.c
index 6753afa107f4..be5b499286ca 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_nan_datapath.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -303,25 +303,45 @@ static int hdd_ndi_start_bss(hdd_adapter_t *adapter,
static int hdd_get_random_nan_mac_addr(hdd_context_t *hdd_ctx,
struct qdf_mac_addr *mac_addr)
{
+ bool found;
hdd_adapter_t *adapter;
+ uint8_t pos, bit_pos, byte_pos, mask;
uint8_t i, attempts, max_attempt = 16;
- bool found;
for (attempts = 0; attempts < max_attempt; attempts++) {
found = false;
- cds_rand_get_bytes(0, (uint8_t *)mac_addr, sizeof(*mac_addr));
-
- /*
- * Reset multicast bit (bit-0) and set locally-administered bit
- */
- mac_addr->bytes[0] = 0x2;
-
- /*
- * to avoid potential conflict with FW's generated NMI mac addr,
- * host sets LSB if 6th byte to 0
- */
- mac_addr->bytes[5] &= 0xFE;
+ /* if NDI is present next addr is required to be 1 bit apart */
+ adapter = hdd_get_adapter(hdd_ctx, QDF_NDI_MODE);
+ if (adapter) {
+ hdd_debug("NDI already exists, deriving next NDI's mac");
+ qdf_mem_copy(mac_addr, &adapter->macAddressCurrent,
+ sizeof(*mac_addr));
+ cds_rand_get_bytes(0, &pos, sizeof(pos));
+ /* skipping byte 0, 5 leaves 8*4=32 positions */
+ pos = pos % 32;
+ bit_pos = pos % 8;
+ byte_pos = pos / 8;
+ mask = 1 << bit_pos;
+ /* flip the required bit */
+ mac_addr->bytes[byte_pos + 1] ^= mask;
+ } else {
+ cds_rand_get_bytes(0, (uint8_t *)mac_addr,
+ sizeof(*mac_addr));
+
+ /*
+ * Reset multicast bit (bit-0) and set
+ * locally-administered bit
+ */
+ mac_addr->bytes[0] = 0x2;
+
+ /*
+ * to avoid potential conflict with FW's generated NMI
+ * mac addr, host sets LSB if 6th byte to 0
+ */
+ mac_addr->bytes[5] &= 0xFE;
+ }
+ /* check for generated mac addr against provisioned addr */
for (i = 0; i < hdd_ctx->num_provisioned_addr; i++) {
if ((!qdf_mem_cmp(hdd_ctx->
provisioned_mac_addr[i].bytes,
@@ -334,6 +354,7 @@ static int hdd_get_random_nan_mac_addr(hdd_context_t *hdd_ctx,
if (found)
continue;
+ /* check for generated mac addr against derived addr */
for (i = 0; i < hdd_ctx->num_derived_addr; i++) {
if ((!qdf_mem_cmp(hdd_ctx->
derived_mac_addr[i].bytes,
@@ -345,6 +366,7 @@ static int hdd_get_random_nan_mac_addr(hdd_context_t *hdd_ctx,
if (found)
continue;
+ /* check for generated mac addr against taken addr */
adapter = hdd_get_adapter_by_macaddr(hdd_ctx, mac_addr->bytes);
if (!adapter)
return 0;
@@ -767,28 +789,48 @@ static int hdd_ndp_responder_req_handler(hdd_context_t *hdd_ctx,
ENTER();
- if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) {
- hdd_err("Interface name string is unavailable");
+ /* First validate the response code from the user space */
+ if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]) {
+ hdd_err("ndp_rsp code is unavailable");
return -EINVAL;
}
+ req.ndp_rsp = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]);
- iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]);
- /* Check if there is already an existing NAN interface */
- adapter = hdd_get_adapter_by_iface_name(hdd_ctx, iface_name);
- if (!adapter) {
- hdd_err("NAN data interface %s not available", iface_name);
- return -EINVAL;
- }
+ if (req.ndp_rsp == NDP_RESPONSE_ACCEPT) {
+ /* iface on which NDP is requested to be created */
+ if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]) {
+ hdd_err("NAN iface name not provided");
+ return -ENODEV;
+ }
+ iface_name = nla_data(tb[QCA_WLAN_VENDOR_ATTR_NDP_IFACE_STR]);
+ adapter = hdd_get_adapter_by_iface_name(hdd_ctx, iface_name);
+ if (!adapter) {
+ hdd_err("NAN iface %s unavailable", iface_name);
+ return -ENODEV;
+ }
+ if (!WLAN_HDD_IS_NDI(adapter)) {
+ hdd_err("Iface %s not in NDI mode", iface_name);
+ return -ENODEV;
+ }
+ } else {
+ /*
+ * If the data indication is rejected, iface name in cmd is not
+ * required, hence the user provided iface name is discarded and
+ * first available NDI is used.
+ */
+ hdd_debug("ndp response rejected, use first available NDI");
- if (!WLAN_HDD_IS_NDI(adapter)) {
- hdd_err("Interface %s is not in NDI mode", iface_name);
- return -EINVAL;
+ adapter = hdd_get_adapter(hdd_ctx, QDF_NDI_MODE);
+ if (!adapter) {
+ hdd_err("No active NDIs, rejecting the request");
+ return -ENODEV;
+ }
}
/* NAN data path coexists only with STA interface */
if (!hdd_is_ndp_allowed(hdd_ctx)) {
hdd_err("Unsupported concurrency for NAN datapath");
- return -EINVAL;
+ return -EPERM;
}
ndp_ctx = WLAN_HDD_GET_NDP_CTX_PTR(adapter);
@@ -817,12 +859,6 @@ static int hdd_ndp_responder_req_handler(hdd_context_t *hdd_ctx,
req.ndp_instance_id =
nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_INSTANCE_ID]);
- if (!tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]) {
- hdd_err("ndp_rsp is unavailable");
- return -EINVAL;
- }
- req.ndp_rsp = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_NDP_RESPONSE_CODE]);
-
if (tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]) {
req.ndp_info.ndp_app_info_len =
nla_len(tb[QCA_WLAN_VENDOR_ATTR_NDP_APP_INFO]);
@@ -1967,9 +2003,8 @@ static void hdd_ndp_sch_update_ind_handler(hdd_adapter_t *adapter,
return;
}
- if (0 != wlan_hdd_validate_context(hdd_ctx)) {
+ if (0 != wlan_hdd_validate_context(hdd_ctx))
return;
- }
data_len = NLMSG_HDRLEN + (6 * NLA_HDRLEN) + (3 * sizeof(uint32_t))
+ QDF_MAC_ADDR_SIZE
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
index 912dd00056ee..3591f201ba34 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_napi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
index d9545d1679c3..188717275d7b 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_p2p.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -161,6 +161,26 @@ static bool hdd_p2p_is_action_type_rsp(const u8 *buf, uint32_t len)
}
/**
+ * hdd_is_p2p_go_cnf_frame() - function to if the frame type is go neg cnf
+ * @buf: pointer to frame
+ * @len: frame length
+ *
+ * This function is used to check if the given frame is GO negotiation
+ * confirmation frame.
+ *
+ * Return: true if the frame is go negotiation confirmation otherwise false
+ */
+static bool hdd_is_p2p_go_cnf_frame(const u8 *buf, uint32_t len)
+{
+ if (wlan_hdd_is_type_p2p_action(buf, len) &&
+ buf[WLAN_HDD_PUBLIC_ACTION_FRAME_SUB_TYPE_OFFSET] ==
+ WLAN_HDD_GO_NEG_CNF)
+ return true;
+ else
+ return false;
+}
+
+/**
* hdd_random_mac_callback() - Callback invoked from wmi layer
* @set_random_addr: Status of random mac filter set operation
* @context: Context passed while registring callback
@@ -1163,17 +1183,19 @@ static int wlan_hdd_execute_remain_on_channel(hdd_adapter_t *pAdapter,
return -EINVAL;
}
- if (REMAIN_ON_CHANNEL_REQUEST ==
- pRemainChanCtx->rem_on_chan_request) {
+ mutex_lock(&cfgState->remain_on_chan_ctx_lock);
+ pRemainChanCtx = cfgState->remain_on_chan_ctx;
+ if ((pRemainChanCtx) && (REMAIN_ON_CHANNEL_REQUEST ==
+ pRemainChanCtx->rem_on_chan_request)) {
+ mutex_unlock(&cfgState->remain_on_chan_ctx_lock);
if (QDF_STATUS_SUCCESS != sme_register_mgmt_frame(
- WLAN_HDD_GET_HAL_CTX(pAdapter),
- sessionId,
- (SIR_MAC_MGMT_FRAME << 2) |
- (SIR_MAC_MGMT_PROBE_REQ << 4),
- NULL, 0))
+ WLAN_HDD_GET_HAL_CTX(pAdapter), sessionId,
+ (SIR_MAC_MGMT_FRAME << 2) |
+ (SIR_MAC_MGMT_PROBE_REQ << 4), NULL, 0))
hdd_err("sme_register_mgmt_frame failed");
+ } else {
+ mutex_unlock(&cfgState->remain_on_chan_ctx_lock);
}
-
} else if ((QDF_SAP_MODE == pAdapter->device_mode) ||
(QDF_P2P_GO_MODE == pAdapter->device_mode)) {
/* call sme API to start remain on channel. */
@@ -1347,6 +1369,89 @@ void wlan_hdd_roc_request_dequeue(struct work_struct *work)
qdf_mem_free(hdd_roc_req);
}
+/**
+ * wlan_hdd_is_roc_in_progress_for_other_adapters() - Check if roc is in
+ * progress for another adapter
+ * @hdd_ctx: HDD context
+ * @cur_adapter: current adapter
+ *
+ * Roc requests are serialized per adapter. This means that simultaneous
+ * roc requests on multiple adapters are not supported. This function checks
+ * and returns if there is an roc being executed on another adapter.
+ *
+ * Return: true if roc is ongoing for another adapter, false otherwise.
+ */
+static bool
+wlan_hdd_is_roc_in_progress_for_other_adapters(hdd_context_t *hdd_ctx,
+ hdd_adapter_t *cur_adapter)
+{
+ hdd_adapter_list_node_t *adapter_node = NULL, *next = NULL;
+ hdd_adapter_t *adapter;
+ QDF_STATUS qdf_status;
+
+ qdf_status = hdd_get_front_adapter(hdd_ctx, &adapter_node);
+
+ while ((NULL != adapter_node) && (QDF_STATUS_SUCCESS == qdf_status)) {
+ adapter = adapter_node->pAdapter;
+ if (cur_adapter != adapter) {
+ if (adapter->is_roc_inprogress)
+ return true;
+ }
+
+ qdf_status = hdd_get_next_adapter(hdd_ctx, adapter_node, &next);
+ adapter_node = next;
+ }
+
+ return false;
+}
+
+/**
+ * wlan_hdd_is_roc_req_queued_by_other_adapters() - Check if an roc req is
+ * queued by another adapter
+ * @hdd_ctx: HDD context
+ * @cur_adapter: current adapter
+ *
+ * Roc requests are serialized per adapter. This means that simultaneous
+ * roc requests on multiple adapters are not supported. This function checks
+ * and returns if there is an roc request queued by another adapter.
+ *
+ * Return: true if roc is queued by another adapter, false otherwise.
+ */
+static bool
+wlan_hdd_is_roc_req_queued_by_other_adapters(hdd_context_t *hdd_ctx,
+ hdd_adapter_t *cur_adapter)
+{
+ qdf_list_node_t *node = NULL, *next_node = NULL;
+ hdd_roc_req_t *roc_req;
+
+ qdf_spin_lock(&hdd_ctx->hdd_roc_req_q_lock);
+ if (list_empty(&hdd_ctx->hdd_roc_req_q.anchor)) {
+ qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+ return false;
+ }
+ if (QDF_STATUS_SUCCESS != qdf_list_peek_front(&hdd_ctx->hdd_roc_req_q,
+ &next_node)) {
+ qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+ hdd_err("Unable to peek roc element from list");
+ return false;
+ }
+
+ do {
+ node = next_node;
+ roc_req = qdf_container_of(node, hdd_roc_req_t, node);
+ if (roc_req->pAdapter != cur_adapter) {
+ qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+ return true;
+ }
+
+ } while (QDF_STATUS_SUCCESS == qdf_list_peek_next(
+ &hdd_ctx->hdd_roc_req_q,
+ node, &next_node));
+ qdf_spin_unlock(&hdd_ctx->hdd_roc_req_q_lock);
+
+ return false;
+}
+
static int wlan_hdd_request_remain_on_channel(struct wiphy *wiphy,
struct net_device *dev,
struct ieee80211_channel *chan,
@@ -1375,8 +1480,18 @@ static int wlan_hdd_request_remain_on_channel(struct wiphy *wiphy,
if (0 != ret)
return ret;
+ if ((wlan_hdd_is_roc_in_progress_for_other_adapters(pHddCtx, pAdapter))
+ || (wlan_hdd_is_roc_req_queued_by_other_adapters(pHddCtx, pAdapter))
+ ) {
+ hdd_debug("ROC in progress or queued for another adapter");
+ return -EAGAIN;
+ }
if (cds_is_connection_in_progress(NULL, NULL)) {
hdd_debug("Connection is in progress");
+ if (request_type == OFF_CHANNEL_ACTION_TX) {
+ hdd_debug("Reject Offchannel action frame tx as conection in progress");
+ return -EAGAIN;
+ }
isBusy = true;
}
pRemainChanCtx = qdf_mem_malloc(sizeof(hdd_remain_on_chan_ctx_t));
@@ -1967,7 +2082,13 @@ static int __wlan_hdd_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
if (NULL != cfgState->buf) {
if (!noack) {
hdd_warn("Previous P2P Action frame packet pending");
- hdd_cleanup_actionframe(pAdapter->pHddCtx, pAdapter);
+ if (!hdd_is_p2p_go_cnf_frame(buf, len))
+ hdd_cleanup_actionframe(pAdapter->pHddCtx,
+ pAdapter);
+ else {
+ hdd_cleanup_actionframe_no_wait(
+ pAdapter->pHddCtx, pAdapter);
+ }
} else {
hdd_err("Pending Action frame packet return EBUSY");
return -EBUSY;
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
index 5538529c758f..0228566ae2e1 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_power.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -661,13 +661,8 @@ void hdd_conf_hostoffload(hdd_adapter_t *pAdapter, bool fenable)
}
/* Configure DTIM hardware filter rules */
- {
- enum hw_filter_mode mode = pHddCtx->config->hw_filter_mode;
-
- if (!fenable)
- mode = HW_FILTER_DISABLED;
- hdd_conf_hw_filter_mode(pAdapter, mode);
- }
+ hdd_conf_hw_filter_mode(pAdapter, pHddCtx->config->hw_filter_mode,
+ fenable);
EXIT();
}
@@ -1057,7 +1052,8 @@ QDF_STATUS hdd_conf_arp_offload(hdd_adapter_t *pAdapter, bool fenable)
return QDF_STATUS_SUCCESS;
}
-int hdd_conf_hw_filter_mode(hdd_adapter_t *adapter, enum hw_filter_mode mode)
+int hdd_conf_hw_filter_mode(hdd_adapter_t *adapter, enum hw_filter_mode mode,
+ bool filter_enable)
{
QDF_STATUS status;
@@ -1067,7 +1063,8 @@ int hdd_conf_hw_filter_mode(hdd_adapter_t *adapter, enum hw_filter_mode mode)
}
status = sme_conf_hw_filter_mode(WLAN_HDD_GET_HAL_CTX(adapter),
- adapter->sessionId, mode);
+ adapter->sessionId, mode,
+ filter_enable);
return qdf_status_to_os_return(status);
}
@@ -1581,20 +1578,9 @@ QDF_STATUS hdd_wlan_re_init(void)
}
bug_on_reinit_failure = pHddCtx->config->bug_on_reinit_failure;
- /* Try to get an adapter from mode ID */
- pAdapter = hdd_get_adapter(pHddCtx, QDF_STA_MODE);
- if (!pAdapter) {
- pAdapter = hdd_get_adapter(pHddCtx, QDF_SAP_MODE);
- if (!pAdapter) {
- pAdapter = hdd_get_adapter(pHddCtx, QDF_IBSS_MODE);
- if (!pAdapter) {
- pAdapter = hdd_get_adapter(pHddCtx,
- QDF_MONITOR_MODE);
- if (!pAdapter)
- hdd_err("Failed to get adapter");
- }
- }
- }
+ pAdapter = hdd_get_first_valid_adapter();
+ if (!pAdapter)
+ hdd_err("Failed to get adapter");
if (pHddCtx->config->enable_dp_trace)
hdd_dp_trace_init(pHddCtx->config);
@@ -1628,12 +1614,6 @@ QDF_STATUS hdd_wlan_re_init(void)
/* Allow the phone to go to sleep */
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT);
- ret = hdd_register_cb(pHddCtx);
- if (ret) {
- hdd_err("Failed to register HDD callbacks!");
- goto err_cds_disable;
- }
-
/* set chip power save failure detected callback */
sme_set_chip_pwr_save_fail_cb(pHddCtx->hHal,
hdd_chip_pwr_save_fail_detected_cb);
@@ -1642,9 +1622,6 @@ QDF_STATUS hdd_wlan_re_init(void)
hdd_info("WLAN host driver reinitiation completed!");
goto success;
-err_cds_disable:
- hdd_wlan_stop_modules(pHddCtx, false);
-
err_re_init:
/* Allow the phone to go to sleep */
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DRIVER_REINIT);
@@ -2161,7 +2138,7 @@ static void hdd_stop_dhcp_ind(hdd_adapter_t *adapter)
adapter->macAddressCurrent.bytes,
adapter->sessionId);
hdd_allow_suspend(WIFI_POWER_EVENT_WAKELOCK_DHCP);
- qdf_runtime_pm_allow_suspend(&adapter->connect_rpm_ctx.connect);
+ qdf_runtime_pm_allow_suspend(&hdd_ctx->runtime_context.connect);
}
/**
@@ -2178,7 +2155,7 @@ static void hdd_start_dhcp_ind(hdd_adapter_t *adapter)
hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(adapter);
hdd_debug("DHCP start indicated through power save");
- qdf_runtime_pm_prevent_suspend(&adapter->connect_rpm_ctx.connect);
+ qdf_runtime_pm_prevent_suspend(&hdd_ctx->runtime_context.connect);
hdd_prevent_suspend_timeout(HDD_WAKELOCK_TIMEOUT_CONNECT,
WIFI_POWER_EVENT_WAKELOCK_DHCP);
sme_dhcp_start_ind(hdd_ctx->hHal, adapter->device_mode,
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
index 6dd85d544128..cf45eb10c4a1 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_regulatory.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c
index 1f84f1ea41a4..00133e41127c 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_scan.c
@@ -59,6 +59,7 @@
* Count to ratelimit the HDD logs during Scan and connect
*/
#define HDD_SCAN_REJECT_RATE_LIMIT 5
+#define HDD_DBS_SCAN_DISABLE_RATE_LIMIT 10
/**
* enum essid_bcast_type - SSID broadcast type
@@ -305,6 +306,7 @@ static int hdd_add_scan_event_from_ies(struct hdd_scan_info *scanInfo,
tCsrScanResultInfo *scan_result,
char *current_event, char *last_event)
{
+ int ret;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(scanInfo->dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
tSirBssDescription *descriptor = &scan_result->BssDescriptor;
@@ -334,9 +336,13 @@ static int hdd_add_scan_event_from_ies(struct hdd_scan_info *scanInfo,
if (ie_length <= 0)
return 0;
- dot11f_unpack_beacon_i_es((tpAniSirGlobal)
+ ret = dot11f_unpack_beacon_i_es((tpAniSirGlobal)
hHal, (uint8_t *) descriptor->ieFields,
ie_length, &dot11BeaconIEs, false);
+ if (DOT11F_FAILED(ret)) {
+ hdd_err("unpack failed, ret: 0x%x", ret);
+ return -EINVAL;
+ }
pDot11SSID = &dot11BeaconIEs.SSID;
@@ -633,7 +639,8 @@ static void hdd_update_dbs_scan_ctrl_ext_flag(hdd_context_t *hdd_ctx,
}
if (!(hdd_ctx->is_dbs_scan_duty_cycle_enabled)) {
scan_dbs_policy = SME_SCAN_DBS_POLICY_IGNORE_DUTY;
- hdd_info("DBS scan duty cycle is disabled");
+ hdd_info_ratelimited(HDD_DBS_SCAN_DISABLE_RATE_LIMIT,
+ "DBS scan duty cycle is disabled");
goto end;
}
@@ -2409,7 +2416,9 @@ static int __wlan_hdd_cfg80211_scan(struct wiphy *wiphy,
&hdd_cfg80211_scan_done_callback, dev);
if (QDF_STATUS_SUCCESS != status) {
- hdd_err("sme_scan_request returned error %d", status);
+ hdd_err_ratelimited(HDD_SCAN_REJECT_RATE_LIMIT,
+ "sme_scan_request returned error %d",
+ status);
if (QDF_STATUS_E_RESOURCES == status) {
scan_ebusy_cnt++;
hdd_err("HO is in progress. Defer scan scan_ebusy_cnt: %d",
@@ -2863,7 +2872,8 @@ static int __wlan_hdd_cfg80211_vendor_scan(struct wiphy *wiphy,
ret = __wlan_hdd_cfg80211_scan(wiphy, request, VENDOR_SCAN);
if (0 != ret) {
- hdd_err("Scan Failed. Ret = %d", ret);
+ hdd_err_ratelimited(HDD_SCAN_REJECT_RATE_LIMIT,
+ "Scan Failed. Ret = %d", ret);
qdf_mem_free(request);
return ret;
}
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c
index 0856557bfdec..390dc97aac93 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_softap_tx_rx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -275,8 +275,8 @@ static inline struct sk_buff *hdd_skb_orphan(hdd_adapter_t *pAdapter,
*
* Return: Always returns NETDEV_TX_OK
*/
-static int __hdd_softap_hard_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+static netdev_tx_t __hdd_softap_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
sme_ac_enum_type ac = SME_AC_BE;
hdd_adapter_t *pAdapter = (hdd_adapter_t *) netdev_priv(dev);
@@ -425,20 +425,12 @@ static int __hdd_softap_hard_start_xmit(struct sk_buff *skb,
pAdapter->aStaInfo[STAId].last_tx_rx_ts = qdf_system_ticks();
hdd_event_eapol_log(skb, QDF_TX);
- qdf_dp_trace_log_pkt(pAdapter->sessionId, skb, QDF_TX);
QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
-
qdf_dp_trace_set_track(skb, QDF_TX);
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
qdf_nbuf_data_addr(skb), sizeof(qdf_nbuf_data(skb)),
QDF_TX));
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
- (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_TX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE), QDF_TX));
if (pAdapter->tx_fn(ol_txrx_get_vdev_by_sta_id(STAId),
(qdf_nbuf_t) skb) != NULL) {
@@ -456,12 +448,8 @@ drop_pkt_and_release_skb:
qdf_net_buf_debug_release_skb(skb);
drop_pkt:
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_TX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE), QDF_TX));
+ qdf_dp_trace_data_pkt(skb, QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
+ QDF_TX);
kfree_skb(skb);
drop_pkt_accounting:
@@ -481,9 +469,10 @@ drop_pkt_accounting:
*
* Return: Always returns NETDEV_TX_OK
*/
-int hdd_softap_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t hdd_softap_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
- int ret;
+ netdev_tx_t ret;
cds_ssr_protect(__func__);
ret = __hdd_softap_hard_start_xmit(skb, dev);
@@ -686,7 +675,6 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
unsigned int cpu_index;
struct sk_buff *skb = NULL;
hdd_context_t *pHddCtx = NULL;
- bool proto_pkt_logged = false;
struct qdf_mac_addr src_mac;
uint8_t staid;
@@ -743,24 +731,14 @@ QDF_STATUS hdd_softap_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
}
hdd_event_eapol_log(skb, QDF_RX);
- proto_pkt_logged = qdf_dp_trace_log_pkt(pAdapter->sessionId,
- skb, QDF_RX);
-
+ qdf_dp_trace_log_pkt(pAdapter->sessionId, skb, QDF_RX);
DPTRACE(qdf_dp_trace(skb,
QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
qdf_nbuf_data_addr(skb),
sizeof(qdf_nbuf_data(skb)), QDF_RX));
-
- if (!proto_pkt_logged) {
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
- (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_RX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
- DPTRACE(qdf_dp_trace(skb,
- QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
- QDF_RX));
- }
+ DPTRACE(qdf_dp_trace_data_pkt(skb,
+ QDF_DP_TRACE_RX_PACKET_RECORD,
+ 0, QDF_RX));
skb->protocol = eth_type_trans(skb, skb->dev);
@@ -827,11 +805,12 @@ QDF_STATUS hdd_softap_deregister_sta(hdd_adapter_t *pAdapter, uint8_t staId)
if (pAdapter->aStaInfo[staId].isUsed) {
if (hdd_ipa_uc_is_enabled(pHddCtx)) {
- hdd_ipa_wlan_evt(pAdapter,
+ if (hdd_ipa_wlan_evt(pAdapter,
pAdapter->aStaInfo[staId].ucSTAId,
HDD_IPA_CLIENT_DISCONNECT,
pAdapter->aStaInfo[staId].macAddrSTA.
- bytes);
+ bytes))
+ hdd_err("WLAN_CLIENT_DISCONNECT event failed");
}
spin_lock_bh(&pAdapter->staInfo_lock);
qdf_mem_zero(&pAdapter->aStaInfo[staId],
@@ -929,6 +908,10 @@ QDF_STATUS hdd_softap_register_sta(hdd_adapter_t *pAdapter,
pAdapter->aStaInfo[staId].tlSTAState = OL_TXRX_PEER_STATE_AUTH;
pAdapter->sessionCtx.ap.uIsAuthenticated = true;
+ if (!qdf_is_macaddr_broadcast(pPeerMacAddress))
+ qdf_status = wlan_hdd_send_sta_authorized_event(
+ pAdapter, pHddCtx,
+ pPeerMacAddress);
} else {
hdd_info("ULA auth StaId= %d. Changing TL state to CONNECTED at Join time",
@@ -1039,6 +1022,14 @@ QDF_STATUS hdd_softap_stop_bss(hdd_adapter_t *pAdapter)
sme_update_channel_list(pHddCtx->hHal);
}
+ if (hdd_ipa_is_enabled(pHddCtx)) {
+ if (hdd_ipa_wlan_evt(pAdapter,
+ WLAN_HDD_GET_AP_CTX_PTR(pAdapter)->uBCStaId,
+ HDD_IPA_AP_DISCONNECT,
+ pAdapter->dev->dev_addr))
+ hdd_err("WLAN_AP_DISCONNECT event failed");
+ }
+
return qdf_status;
}
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c
index 1e6bbd46a50e..cfb6789844f6 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_stats.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -4379,6 +4379,8 @@ static int __wlan_hdd_cfg80211_get_station(struct wiphy *wiphy,
}
maxSpeedMCS = 1;
}
+ if (nss == 2)
+ maxMCSIdx += MAX_HT_MCS_IDX;
}
}
@@ -4437,8 +4439,7 @@ static int __wlan_hdd_cfg80211_get_station(struct wiphy *wiphy,
#endif
} else if (rate_flags & eHAL_TX_RATE_VHT20) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
- } else
- sinfo->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+ }
if (rate_flags &
(eHAL_TX_RATE_HT20 | eHAL_TX_RATE_HT40)) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
@@ -4478,7 +4479,7 @@ static int __wlan_hdd_cfg80211_get_station(struct wiphy *wiphy,
/* must be MCS */
sinfo->txrate.mcs = mcs_index;
sinfo->txrate.nss = nss;
- sinfo->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
+
if (rate_flags & eHAL_TX_RATE_VHT80) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0)) || defined(WITH_BACKPORTS)
sinfo->txrate.bw = RATE_INFO_BW_80;
@@ -4505,6 +4506,8 @@ static int __wlan_hdd_cfg80211_get_station(struct wiphy *wiphy,
RATE_INFO_FLAGS_40_MHZ_WIDTH;
#endif
}
+ } else {
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_VHT_MCS;
}
if (rate_flags & eHAL_TX_RATE_SGI) {
sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tdls.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tdls.c
index 29dd6fb7e381..b4e2afa467db 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tdls.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tdls.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -250,6 +250,7 @@ void wlan_hdd_tdls_disable_offchan_and_teardown_links(hdd_context_t *hddctx,
u8 staidx;
hddTdlsPeer_t *curr_peer = NULL;
hdd_adapter_t *adapter = NULL;
+ bool tdls_in_progress = false;
if (eTDLS_SUPPORT_NOT_ENABLED == hddctx->tdls_mode) {
hdd_debug("TDLS mode is disabled OR not enabled in FW");
@@ -265,8 +266,11 @@ void wlan_hdd_tdls_disable_offchan_and_teardown_links(hdd_context_t *hddctx,
connected_tdls_peers = wlan_hdd_tdls_connected_peers(adapter);
- if (!connected_tdls_peers) {
- hdd_debug("No TDLS connected peers to delete");
+ if (wlan_hdd_tdls_is_progress(hddctx, NULL, true))
+ tdls_in_progress = true;
+
+ if (!(connected_tdls_peers || tdls_in_progress)) {
+ hdd_debug("No TDLS connected/in progress peers to delete");
return;
}
@@ -835,6 +839,9 @@ int wlan_hdd_tdls_init(hdd_adapter_t *pAdapter)
wlan_hdd_tdls_del_non_forced_peers(pHddTdlsCtx);
hdd_tdls_context_init(pHddCtx, true);
+
+ /* Reset tx completion status in case of SSR */
+ pAdapter->mgmtTxCompletionStatus = false;
}
sme_set_tdls_power_save_prohibited(WLAN_HDD_GET_HAL_CTX(pAdapter),
@@ -2160,13 +2167,17 @@ void wlan_hdd_check_conc_and_update_tdls_state(hdd_context_t *hdd_ctx,
{
hdd_adapter_t *temp_adapter;
uint16_t connected_tdls_peers;
+ bool tdls_in_progress = false;
temp_adapter = wlan_hdd_tdls_get_adapter(hdd_ctx);
if (NULL != temp_adapter) {
if (disable_tdls) {
connected_tdls_peers = wlan_hdd_tdls_connected_peers(
temp_adapter);
- if (!connected_tdls_peers ||
+ if (wlan_hdd_tdls_is_progress(hdd_ctx, NULL, true))
+ tdls_in_progress = true;
+
+ if (!(tdls_in_progress || connected_tdls_peers) ||
(eTDLS_SUPPORT_NOT_ENABLED == hdd_ctx->tdls_mode)) {
mutex_lock(&hdd_ctx->tdls_lock);
if (hdd_ctx->set_state_info.set_state_cnt !=
@@ -3010,7 +3021,6 @@ void wlan_hdd_tdls_timer_restart(hdd_adapter_t *pAdapter,
}
if (hdd_conn_is_connected(pHddStaCtx)) {
- qdf_mc_timer_stop(timer);
qdf_mc_timer_start(timer, expirationTime);
}
}
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c
index 93e2fad23b7b..154000ef57f2 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_tx_rx.c
@@ -759,6 +759,45 @@ void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
}
}
+/**
+ * hdd_is_tx_allowed() - check if Tx is allowed based on current peer state
+ * @skb: pointer to OS packet (sk_buff)
+ * @peer_id: Peer STA ID in peer table
+ *
+ * This function gets the peer state from DP and check if it is either
+ * in OL_TXRX_PEER_STATE_CONN or OL_TXRX_PEER_STATE_AUTH. Only EAP packets
+ * are allowed when peer_state is OL_TXRX_PEER_STATE_CONN. All packets
+ * allowed when peer_state is OL_TXRX_PEER_STATE_AUTH.
+ *
+ * Return: true if Tx is allowed and false otherwise.
+ **/
+static inline bool hdd_is_tx_allowed(struct sk_buff *skb, uint8_t peer_id)
+{
+ enum ol_txrx_peer_state peer_state;
+ void *pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+ void *peer;
+
+ QDF_ASSERT(pdev);
+ peer = ol_txrx_peer_find_by_local_id(pdev, peer_id);
+
+ if (peer == NULL) {
+ DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
+ (uint8_t *)skb->data,
+ qdf_nbuf_len(skb), QDF_TX));
+ return false;
+ }
+ peer_state = ol_txrx_get_peer_state(peer);
+ if (OL_TXRX_PEER_STATE_AUTH == peer_state)
+ return true;
+ else if (OL_TXRX_PEER_STATE_CONN == peer_state &&
+ ntohs(skb->protocol) == HDD_ETHERTYPE_802_1_X)
+ return true;
+ DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
+ (uint8_t *)skb->data,
+ qdf_nbuf_len(skb), QDF_TX));
+
+ return false;
+}
/**
* __hdd_hard_start_xmit() - Transmit a frame
@@ -773,7 +812,8 @@ void hdd_tx_rx_collect_connectivity_stats_info(struct sk_buff *skb,
*
* Return: Always returns NETDEV_TX_OK
*/
-static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t __hdd_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
QDF_STATUS status;
sme_ac_enum_type ac;
@@ -785,11 +825,8 @@ static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdd_context_t *hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
#endif
hdd_station_ctx_t *pHddStaCtx = &pAdapter->sessionCtx.station;
+
uint8_t pkt_type = 0;
- bool pkt_proto_logged = false;
-#ifdef QCA_PKT_PROTO_TRACE
- uint8_t proto_type = 0;
-#endif /* QCA_PKT_PROTO_TRACE */
bool is_arp = false;
#ifdef QCA_WIFI_FTM
@@ -929,19 +966,6 @@ static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
skb->queue_mapping = hdd_linux_up_to_ac_map[up];
}
-#ifdef QCA_PKT_PROTO_TRACE
- if ((hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_EAPOL) ||
- (hdd_ctx->config->gEnableDebugLog & CDS_PKT_TRAC_TYPE_DHCP)) {
- proto_type = cds_pkt_get_proto_type(skb,
- hdd_ctx->config->gEnableDebugLog,
- 0);
- if (CDS_PKT_TRAC_TYPE_EAPOL & proto_type)
- cds_pkt_trace_buf_update("ST:T:EPL");
- else if (CDS_PKT_TRAC_TYPE_DHCP & proto_type)
- cds_pkt_trace_buf_update("ST:T:DHC");
- }
-#endif /* QCA_PKT_PROTO_TRACE */
-
pAdapter->stats.tx_bytes += skb->len;
wlan_hdd_tdls_update_tx_pkt_cnt(pAdapter, skb);
@@ -952,8 +976,6 @@ static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
++pAdapter->stats.tx_packets;
hdd_event_eapol_log(skb, QDF_TX);
- pkt_proto_logged = qdf_dp_trace_log_pkt(pAdapter->sessionId,
- skb, QDF_TX);
QDF_NBUF_CB_TX_PACKET_TRACK(skb) = QDF_NBUF_TX_PKT_DATA_TRACK;
QDF_NBUF_UPDATE_TX_PKT_COUNT(skb, QDF_NBUF_TX_PKT_HDD);
@@ -962,18 +984,6 @@ static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_PTR_RECORD,
qdf_nbuf_data_addr(skb), sizeof(qdf_nbuf_data(skb)),
QDF_TX));
- if (!pkt_proto_logged) {
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
- (uint8_t *)skb->data,
- qdf_nbuf_len(skb), QDF_TX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE) {
- DPTRACE(qdf_dp_trace(skb,
- QDF_DP_TRACE_HDD_TX_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
- QDF_TX));
- }
- }
/* Check if station is connected */
if (OL_TXRX_PEER_STATE_CONN ==
@@ -985,6 +995,10 @@ static int __hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac];
goto drop_pkt_and_release_skb;
}
+ if (!hdd_is_tx_allowed(skb, STAId)) {
+ ++pAdapter->hdd_stats.hddTxRxStats.txXmitDroppedAC[ac];
+ goto drop_pkt_and_release_skb;
+ }
/*
* If a transmit function is not registered, drop packet
@@ -1015,15 +1029,8 @@ drop_pkt_and_release_skb:
drop_pkt:
if (skb) {
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_TX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
- DPTRACE(qdf_dp_trace(skb,
- QDF_DP_TRACE_DROP_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
- QDF_TX));
-
+ qdf_dp_trace_data_pkt(skb, QDF_DP_TRACE_DROP_PACKET_RECORD, 0,
+ QDF_TX);
kfree_skb(skb);
}
@@ -1055,9 +1062,9 @@ drop_pkt_accounting:
*
* Return: Always returns NETDEV_TX_OK
*/
-int hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+netdev_tx_t hdd_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
- int ret;
+ netdev_tx_t ret;
cds_ssr_protect(__func__);
ret = __hdd_hard_start_xmit(skb, dev);
@@ -1613,6 +1620,8 @@ static inline void hdd_register_rx_ol(void)
return;
}
+ hdd_ctx->tcp_delack_on = 0;
+
if (hdd_ctx->ol_enable == CFG_LRO_ENABLED) {
hdd_ctx->receive_offload_cb = hdd_lro_rx;
/* Register the flush callback */
@@ -1623,6 +1632,8 @@ static inline void hdd_register_rx_ol(void)
if (hdd_ctx->enableRxThread)
hdd_create_napi_for_rxthread();
hdd_debug("GRO is enabled");
+ } else if (hdd_ctx->config->enable_tcp_delack) {
+ hdd_ctx->tcp_delack_on = 1;
}
}
@@ -1660,6 +1671,10 @@ void hdd_gro_destroy(void)
}
#else /* HELIUMPLUS */
static inline void hdd_register_rx_ol(void) { }
+
+void hdd_gro_destroy(void)
+{
+}
#endif
/**
@@ -1719,7 +1734,7 @@ void hdd_enable_rx_ol_in_concurrency(hdd_context_t *hdd_ctx)
{
if (hdd_ctx->config->enable_tcp_delack) {
hdd_debug("Disable TCP delack as Rx Offload is enabled");
- hdd_ctx->config->enable_tcp_delack = 0;
+ hdd_ctx->tcp_delack_on = 0;
hdd_reset_tcp_delack(hdd_ctx);
}
qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 0);
@@ -1733,7 +1748,7 @@ void hdd_enable_rx_ol_in_concurrency(hdd_context_t *hdd_ctx)
*/
void hdd_disable_rx_ol_in_concurrency(hdd_context_t *hdd_ctx)
{
- if (!hdd_ctx->config->enable_tcp_delack) {
+ if (hdd_ctx->config->enable_tcp_delack) {
struct wlan_rx_tp_data rx_tp_data = {0};
hdd_debug("Enable TCP delack as Rx offload disabled in concurrency");
@@ -1741,7 +1756,7 @@ void hdd_disable_rx_ol_in_concurrency(hdd_context_t *hdd_ctx)
rx_tp_data.level = hdd_ctx->cur_rx_level;
wlan_hdd_send_svc_nlink_msg(hdd_ctx->radio_index,
WLAN_SVC_WLAN_TP_IND, &rx_tp_data, sizeof(rx_tp_data));
- hdd_ctx->config->enable_tcp_delack = 1;
+ hdd_ctx->tcp_delack_on = 1;
}
qdf_atomic_set(&hdd_ctx->disable_lro_in_concurrency, 1);
}
@@ -1807,7 +1822,6 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
bool is_arp = false;
bool track_arp = false;
uint8_t pkt_type = 0;
- bool proto_pkt_logged = false;
/* Sanity check on inputs */
if (unlikely((NULL == context) || (NULL == rxBuf))) {
@@ -1871,22 +1885,14 @@ QDF_STATUS hdd_rx_packet_cbk(void *context, qdf_nbuf_t rxBuf)
}
hdd_event_eapol_log(skb, QDF_RX);
- proto_pkt_logged = qdf_dp_trace_log_pkt(pAdapter->sessionId,
- skb, QDF_RX);
+ qdf_dp_trace_log_pkt(pAdapter->sessionId, skb, QDF_RX);
DPTRACE(qdf_dp_trace(skb,
QDF_DP_TRACE_RX_HDD_PACKET_PTR_RECORD,
qdf_nbuf_data_addr(skb),
sizeof(qdf_nbuf_data(skb)), QDF_RX));
- if (!proto_pkt_logged) {
- DPTRACE(qdf_dp_trace(skb, QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
- (uint8_t *)skb->data, qdf_nbuf_len(skb), QDF_RX));
- if (qdf_nbuf_len(skb) > QDF_DP_TRACE_RECORD_SIZE)
- DPTRACE(qdf_dp_trace(skb,
- QDF_DP_TRACE_HDD_RX_PACKET_RECORD,
- (uint8_t *)&skb->data[QDF_DP_TRACE_RECORD_SIZE],
- (qdf_nbuf_len(skb)-QDF_DP_TRACE_RECORD_SIZE),
- QDF_RX));
- }
+
+ DPTRACE(qdf_dp_trace_data_pkt(skb, QDF_DP_TRACE_RX_PACKET_RECORD,
+ 0, QDF_RX));
wlan_hdd_tdls_update_rx_pkt_cnt(pAdapter, skb);
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c
index 986f2f412c32..b0d92b833d69 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wext.c
@@ -4523,13 +4523,13 @@ static int __iw_get_name(struct net_device *dev,
* Return: 0 on success, error number otherwise
*/
static int iw_get_name(struct net_device *dev,
- struct iw_request_info *info,
- char *wrqu, char *extra)
+ struct iw_request_info *info,
+ union iwreq_data *wrqu, char *extra)
{
int ret;
cds_ssr_protect(__func__);
- ret = __iw_get_name(dev, info, wrqu, extra);
+ ret = __iw_get_name(dev, info, wrqu->name, extra);
cds_ssr_unprotect(__func__);
return ret;
@@ -4910,12 +4910,12 @@ static int __iw_get_freq(struct net_device *dev, struct iw_request_info *info,
* Return: 0 on success, error number otherwise
*/
static int iw_get_freq(struct net_device *dev, struct iw_request_info *info,
- struct iw_freq *fwrq, char *extra)
+ union iwreq_data *fwrq, char *extra)
{
int ret;
cds_ssr_protect(__func__);
- ret = __iw_get_freq(dev, info, fwrq, extra);
+ ret = __iw_get_freq(dev, info, &fwrq->freq, extra);
cds_ssr_unprotect(__func__);
return ret;
@@ -5590,12 +5590,12 @@ static int __iw_get_encode(struct net_device *dev,
* Return: 0 on success, error number otherwise
*/
static int iw_get_encode(struct net_device *dev, struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
int ret;
cds_ssr_protect(__func__);
- ret = __iw_get_encode(dev, info, dwrq, extra);
+ ret = __iw_get_encode(dev, info, &dwrq->encoding, extra);
cds_ssr_unprotect(__func__);
return ret;
@@ -6793,12 +6793,12 @@ static int __iw_get_encodeext(struct net_device *dev,
*/
static int iw_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
- struct iw_point *dwrq, char *extra)
+ union iwreq_data *dwrq, char *extra)
{
int ret;
cds_ssr_protect(__func__);
- ret = __iw_get_encodeext(dev, info, dwrq, extra);
+ ret = __iw_get_encodeext(dev, info, &dwrq->encoding, extra);
cds_ssr_unprotect(__func__);
return ret;
@@ -9085,9 +9085,11 @@ static int __iw_setchar_getnone(struct net_device *dev,
tRrmNeighborRspCallbackInfo callbackInfo;
if (pConfig->fRrmEnable) {
- hdd_debug("Neighbor Request");
+ neighborReq.neighbor_report_offload = false;
neighborReq.no_ssid =
(s_priv_data.length - 1) ? false : true;
+ hdd_debug("Neighbor Request ssid present %d",
+ neighborReq.no_ssid);
if (!neighborReq.no_ssid) {
neighborReq.ssid.length =
(s_priv_data.length - 1) >
@@ -9097,14 +9099,25 @@ static int __iw_setchar_getnone(struct net_device *dev,
neighborReq.ssid.length);
}
+ /*
+ * If 11k offload is supported by FW and enabled
+ * in the ini, set the offload to true
+ */
+ if (hdd_ctx->config->is_11k_offload_supported &&
+ (hdd_ctx->config->offload_11k_enable_bitmask &
+ OFFLOAD_11K_BITMASK_NEIGHBOR_REPORT_REQUEST)) {
+ hdd_debug("Neighbor report offloaded to FW");
+ neighborReq.neighbor_report_offload = true;
+ }
+
callbackInfo.neighborRspCallback = NULL;
callbackInfo.neighborRspCallbackContext = NULL;
- callbackInfo.timeout = 5000; /* 5 seconds */
- sme_neighbor_report_request(WLAN_HDD_GET_HAL_CTX
- (pAdapter),
- pAdapter->sessionId,
- &neighborReq,
- &callbackInfo);
+ callbackInfo.timeout = 5000; /* 5 seconds */
+ sme_neighbor_report_request(
+ WLAN_HDD_GET_HAL_CTX(pAdapter),
+ pAdapter->sessionId,
+ &neighborReq,
+ &callbackInfo);
} else {
hdd_err("Ignoring neighbor request as RRM not enabled");
ret = -EINVAL;
@@ -9168,13 +9181,6 @@ static int __iw_setnone_getint(struct net_device *dev,
tSmeConfigParams *sme_config;
hdd_context_t *hdd_ctx;
- sme_config = qdf_mem_malloc(sizeof(*sme_config));
- if (!sme_config) {
- hdd_err("failed to allocate memory for sme_config");
- return -ENOMEM;
- }
- qdf_mem_zero(sme_config, sizeof(*sme_config));
-
ENTER_DEV(dev);
hdd_ctx = WLAN_HDD_GET_CTX(pAdapter);
@@ -9182,6 +9188,12 @@ static int __iw_setnone_getint(struct net_device *dev,
if (0 != ret)
return ret;
+ sme_config = qdf_mem_malloc(sizeof(*sme_config));
+ if (!sme_config) {
+ hdd_err("failed to allocate memory for sme_config");
+ return -ENOMEM;
+ }
+
switch (value[0]) {
case WE_GET_11D_STATE:
{
@@ -12764,61 +12776,61 @@ static int iw_set_two_ints_getnone(struct net_device *dev,
/* A number of these routines are NULL (meaning they are not implemented.) */
static const iw_handler we_handler[] = {
- (iw_handler) iw_set_commit, /* SIOCSIWCOMMIT */
- (iw_handler) iw_get_name, /* SIOCGIWNAME */
- (iw_handler) NULL, /* SIOCSIWNWID */
- (iw_handler) NULL, /* SIOCGIWNWID */
- (iw_handler) iw_set_freq, /* SIOCSIWFREQ */
- (iw_handler) iw_get_freq, /* SIOCGIWFREQ */
- (iw_handler) iw_set_mode, /* SIOCSIWMODE */
- (iw_handler) iw_get_mode, /* SIOCGIWMODE */
- (iw_handler) NULL, /* SIOCSIWSENS */
- (iw_handler) NULL, /* SIOCGIWSENS */
- (iw_handler) NULL, /* SIOCSIWRANGE */
- (iw_handler) iw_get_range, /* SIOCGIWRANGE */
- (iw_handler) NULL, /* SIOCSIWPRIV */
- (iw_handler) NULL, /* SIOCGIWPRIV */
- (iw_handler) NULL, /* SIOCSIWSTATS */
- (iw_handler) NULL, /* SIOCGIWSTATS */
- (iw_handler) NULL, /* SIOCSIWSPY */
- (iw_handler) NULL, /* SIOCGIWSPY */
- (iw_handler) NULL, /* SIOCSIWTHRSPY */
- (iw_handler) NULL, /* SIOCGIWTHRSPY */
- (iw_handler) iw_set_ap_address, /* SIOCSIWAP */
- (iw_handler) iw_get_ap_address, /* SIOCGIWAP */
- (iw_handler) iw_set_mlme, /* SIOCSIWMLME */
- (iw_handler) NULL, /* SIOCGIWAPLIST */
- (iw_handler) iw_set_scan, /* SIOCSIWSCAN */
- (iw_handler) iw_get_scan, /* SIOCGIWSCAN */
- (iw_handler) iw_set_essid, /* SIOCSIWESSID */
- (iw_handler) iw_get_essid, /* SIOCGIWESSID */
- (iw_handler) iw_set_nick, /* SIOCSIWNICKN */
- (iw_handler) iw_get_nick, /* SIOCGIWNICKN */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) iw_set_bitrate, /* SIOCSIWRATE */
- (iw_handler) iw_get_bitrate, /* SIOCGIWRATE */
- (iw_handler) iw_set_rts_threshold, /* SIOCSIWRTS */
- (iw_handler) iw_get_rts_threshold, /* SIOCGIWRTS */
- (iw_handler) iw_set_frag_threshold, /* SIOCSIWFRAG */
- (iw_handler) iw_get_frag_threshold, /* SIOCGIWFRAG */
- (iw_handler) iw_set_tx_power, /* SIOCSIWTXPOW */
- (iw_handler) iw_get_tx_power, /* SIOCGIWTXPOW */
- (iw_handler) iw_set_retry, /* SIOCSIWRETRY */
- (iw_handler) iw_get_retry, /* SIOCGIWRETRY */
- (iw_handler) iw_set_encode, /* SIOCSIWENCODE */
- (iw_handler) iw_get_encode, /* SIOCGIWENCODE */
- (iw_handler) iw_set_power_mode, /* SIOCSIWPOWER */
- (iw_handler) iw_get_power_mode, /* SIOCGIWPOWER */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) NULL, /* -- hole -- */
- (iw_handler) iw_set_genie, /* SIOCSIWGENIE */
- (iw_handler) iw_get_genie, /* SIOCGIWGENIE */
- (iw_handler) iw_set_auth, /* SIOCSIWAUTH */
- (iw_handler) iw_get_auth, /* SIOCGIWAUTH */
- (iw_handler) iw_set_encodeext, /* SIOCSIWENCODEEXT */
- (iw_handler) iw_get_encodeext, /* SIOCGIWENCODEEXT */
- (iw_handler) NULL, /* SIOCSIWPMKSA */
+ iw_set_commit, /* SIOCSIWCOMMIT */
+ iw_get_name, /* SIOCGIWNAME */
+ NULL, /* SIOCSIWNWID */
+ NULL, /* SIOCGIWNWID */
+ iw_set_freq, /* SIOCSIWFREQ */
+ iw_get_freq, /* SIOCGIWFREQ */
+ iw_set_mode, /* SIOCSIWMODE */
+ iw_get_mode, /* SIOCGIWMODE */
+ NULL, /* SIOCSIWSENS */
+ NULL, /* SIOCGIWSENS */
+ NULL, /* SIOCSIWRANGE */
+ iw_get_range, /* SIOCGIWRANGE */
+ NULL, /* SIOCSIWPRIV */
+ NULL, /* SIOCGIWPRIV */
+ NULL, /* SIOCSIWSTATS */
+ NULL, /* SIOCGIWSTATS */
+ NULL, /* SIOCSIWSPY */
+ NULL, /* SIOCGIWSPY */
+ NULL, /* SIOCSIWTHRSPY */
+ NULL, /* SIOCGIWTHRSPY */
+ iw_set_ap_address, /* SIOCSIWAP */
+ iw_get_ap_address, /* SIOCGIWAP */
+ iw_set_mlme, /* SIOCSIWMLME */
+ NULL, /* SIOCGIWAPLIST */
+ iw_set_scan, /* SIOCSIWSCAN */
+ iw_get_scan, /* SIOCGIWSCAN */
+ iw_set_essid, /* SIOCSIWESSID */
+ iw_get_essid, /* SIOCGIWESSID */
+ iw_set_nick, /* SIOCSIWNICKN */
+ iw_get_nick, /* SIOCGIWNICKN */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ iw_set_bitrate, /* SIOCSIWRATE */
+ iw_get_bitrate, /* SIOCGIWRATE */
+ iw_set_rts_threshold, /* SIOCSIWRTS */
+ iw_get_rts_threshold, /* SIOCGIWRTS */
+ iw_set_frag_threshold, /* SIOCSIWFRAG */
+ iw_get_frag_threshold, /* SIOCGIWFRAG */
+ iw_set_tx_power, /* SIOCSIWTXPOW */
+ iw_get_tx_power, /* SIOCGIWTXPOW */
+ iw_set_retry, /* SIOCSIWRETRY */
+ iw_get_retry, /* SIOCGIWRETRY */
+ iw_set_encode, /* SIOCSIWENCODE */
+ iw_get_encode, /* SIOCGIWENCODE */
+ iw_set_power_mode, /* SIOCSIWPOWER */
+ iw_get_power_mode, /* SIOCGIWPOWER */
+ NULL, /* -- hole -- */
+ NULL, /* -- hole -- */
+ iw_set_genie, /* SIOCSIWGENIE */
+ iw_get_genie, /* SIOCGIWGENIE */
+ iw_set_auth, /* SIOCSIWAUTH */
+ iw_get_auth, /* SIOCGIWAUTH */
+ iw_set_encodeext, /* SIOCSIWENCODEEXT */
+ iw_get_encodeext, /* SIOCGIWENCODEEXT */
+ NULL, /* SIOCSIWPMKSA */
};
static const iw_handler we_private[] = {
diff --git a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
index f98763870797..41f0fddf851e 100644
--- a/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
+++ b/drivers/staging/qcacld-3.0/core/hdd/src/wlan_hdd_wmm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -63,6 +63,7 @@
#include <cds_sched.h>
#include "sme_api.h"
+#define WLAN_HDD_HIPRI_TOS 0xc0
#define WLAN_HDD_MAX_DSCP 0x3f
#define HDD_WMM_UP_TO_AC_MAP_SIZE 8
@@ -1374,6 +1375,22 @@ QDF_STATUS hdd_wmm_adapter_close(hdd_adapter_t *pAdapter)
return QDF_STATUS_SUCCESS;
}
+static inline unsigned char hdd_wmm_check_ip_proto(unsigned char ip_proto,
+ unsigned char ip_tos,
+ bool *is_hipri)
+{
+ switch (ip_proto) {
+ case IPPROTO_ICMP:
+ case IPPROTO_ICMPV6:
+ *is_hipri = true;
+ return WLAN_HDD_HIPRI_TOS;
+
+ default:
+ *is_hipri = false;
+ return ip_tos;
+ }
+}
+
/**
* hdd_wmm_classify_pkt() - Function which will classify an OS packet
* into a WMM AC based on DSCP
@@ -1381,7 +1398,7 @@ QDF_STATUS hdd_wmm_adapter_close(hdd_adapter_t *pAdapter)
* @adapter: adapter upon which the packet is being transmitted
* @skb: pointer to network buffer
* @user_pri: user priority of the OS packet
- * @is_eapol: eapol packet flag
+ * @is_hipri: high priority packet flag
*
* Return: None
*/
@@ -1389,7 +1406,7 @@ static
void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
struct sk_buff *skb,
sme_QosWmmUpType *user_pri,
- bool *is_eapol)
+ bool *is_hipri)
{
unsigned char dscp;
unsigned char tos;
@@ -1416,14 +1433,16 @@ void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
if (eth_hdr->eth_II.h_proto == htons(ETH_P_IP)) {
/* case 1: Ethernet II IP packet */
ip_hdr = (struct iphdr *)&pkt[sizeof(eth_hdr->eth_II)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol, ip_hdr->tos,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II IP Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
-
} else if (eth_hdr->eth_II.h_proto == htons(ETH_P_IPV6)) {
ipv6hdr = ipv6_hdr(skb);
- tos = ntohs(*(const __be16 *)ipv6hdr) >> 4;
+ tos = hdd_wmm_check_ip_proto(
+ ipv6hdr->nexthdr, ntohs(*(const __be16 *)ipv6hdr) >> 4,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II IPv6 Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
@@ -1434,7 +1453,8 @@ void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
(eth_hdr->eth_8023.h_proto == htons(ETH_P_IP))) {
/* case 2: 802.3 LLC/SNAP IP packet */
ip_hdr = (struct iphdr *)&pkt[sizeof(eth_hdr->eth_8023)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol, ip_hdr->tos,
+ is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("802.3 LLC/SNAP IP Packet, tos is %d", tos);
#endif /* HDD_WMM_DEBUG */
@@ -1447,7 +1467,8 @@ void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
ip_hdr =
(struct iphdr *)
&pkt[sizeof(eth_hdr->eth_IIv)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol,
+ ip_hdr->tos, is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("Ethernet II VLAN tagged IP Packet, tos is %d",
tos);
@@ -1467,30 +1488,39 @@ void hdd_wmm_classify_pkt(hdd_adapter_t *adapter,
ip_hdr =
(struct iphdr *)
&pkt[sizeof(eth_hdr->eth_8023v)];
- tos = ip_hdr->tos;
+ tos = hdd_wmm_check_ip_proto(ip_hdr->protocol,
+ ip_hdr->tos, is_hipri);
#ifdef HDD_WMM_DEBUG
hdd_info("802.3 LLC/SNAP VLAN tagged IP Packet, tos is %d",
tos);
#endif /* HDD_WMM_DEBUG */
} else {
/* default */
+ *is_hipri = false;
+ tos = 0;
#ifdef HDD_WMM_DEBUG
hdd_warn("VLAN tagged Unhandled Protocol, using default tos");
#endif /* HDD_WMM_DEBUG */
- tos = 0;
}
+ } else if (eth_hdr->eth_II.h_proto == htons(HDD_ETHERTYPE_802_1_X)) {
+ *is_hipri = true;
+ tos = WLAN_HDD_HIPRI_TOS;
+#ifdef HDD_WMM_DEBUG
+ hdd_info("802.1x packet, tos is %d", tos);
+#endif /* HDD_WMM_DEBUG */
+ } else if (skb->protocol == htons(ETH_P_ARP)) {
+ *is_hipri = true;
+ tos = WLAN_HDD_HIPRI_TOS;
+#ifdef HDD_WMM_DEBUG
+ hdd_info("ARP packet, tos is %d", tos);
+#endif /* HDD_WMM_DEBUG */
} else {
/* default */
+ *is_hipri = false;
+ tos = 0;
#ifdef HDD_WMM_DEBUG
hdd_warn("Unhandled Protocol, using default tos");
#endif /* HDD_WMM_DEBUG */
- /* Give the highest priority to 802.1x packet */
- if (eth_hdr->eth_II.h_proto ==
- htons(HDD_ETHERTYPE_802_1_X)) {
- tos = 0xC0;
- *is_eapol = true;
- } else
- tos = 0;
}
dscp = (tos >> 2) & 0x3f;
@@ -1518,20 +1548,20 @@ static uint16_t __hdd_get_queue_index(uint16_t up)
/**
* hdd_get_queue_index() - get queue index
* @up: user priority
- * @is_eapol: is_eapol flag
+ * @is_hipri: high priority packet flag
*
* Return: queue_index
*/
static
-uint16_t hdd_get_queue_index(uint16_t up, bool is_eapol)
+uint16_t hdd_get_queue_index(u16 up, bool is_hipri)
{
- if (qdf_unlikely(is_eapol == true))
+ if (qdf_unlikely(is_hipri))
return HDD_LINUX_AC_HI_PRIO;
return __hdd_get_queue_index(up);
}
#else
static
-uint16_t hdd_get_queue_index(uint16_t up, bool is_eapol)
+uint16_t hdd_get_queue_index(u16 up, bool is_hipri)
{
return __hdd_get_queue_index(up);
}
@@ -1561,7 +1591,7 @@ uint16_t hdd_hostapd_select_queue(struct net_device *dev, struct sk_buff *skb
uint16_t queueIndex;
hdd_adapter_t *adapter = (hdd_adapter_t *) netdev_priv(dev);
hdd_context_t *hddctx = WLAN_HDD_GET_CTX(adapter);
- bool is_eapol = false;
+ bool is_hipri = false;
int status = 0;
status = wlan_hdd_validate_context(hddctx);
@@ -1572,9 +1602,9 @@ uint16_t hdd_hostapd_select_queue(struct net_device *dev, struct sk_buff *skb
}
/* Get the user priority from IP header */
- hdd_wmm_classify_pkt(adapter, skb, &up, &is_eapol);
+ hdd_wmm_classify_pkt(adapter, skb, &up, &is_hipri);
skb->priority = up;
- queueIndex = hdd_get_queue_index(skb->priority, is_eapol);
+ queueIndex = hdd_get_queue_index(skb->priority, is_hipri);
return queueIndex;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
index 2fe19a279010..9ed14dab8125 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/ani_global.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1013,6 +1013,9 @@ typedef struct sAniSirGlobal {
/* action ouis info */
bool enable_action_oui;
struct action_oui_info *oui_info;
+
+ /* 11k Offload Support */
+ bool is_11k_offload_supported;
} tAniSirGlobal;
typedef enum {
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
index 7e5113429fd3..8ea9c294a65f 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/qwlan_version.h
@@ -39,11 +39,11 @@
===========================================================================*/
#define QWLAN_VERSION_MAJOR 5
-#define QWLAN_VERSION_MINOR 1
+#define QWLAN_VERSION_MINOR 2
#define QWLAN_VERSION_PATCH 1
-#define QWLAN_VERSION_EXTRA "V"
-#define QWLAN_VERSION_BUILD 44
+#define QWLAN_VERSION_EXTRA "D"
+#define QWLAN_VERSION_BUILD 1
-#define QWLAN_VERSIONSTR "5.1.1.44V.4"
+#define QWLAN_VERSIONSTR "5.2.1.1D"
#endif /* QWLAN_VERSION_H */
diff --git a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
index 61f94601529e..f7973808aa73 100644
--- a/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
+++ b/drivers/staging/qcacld-3.0/core/mac/inc/sir_api.h
@@ -436,6 +436,26 @@ typedef enum eSirRFBand {
SIR_BAND_MAX = SIR_BAND_UNKNOWN,
} tSirRFBand;
+/**
+ * enum set_hw_mode_status - Status of set HW mode command
+ * @SET_HW_MODE_STATUS_OK: command successful
+ * @SET_HW_MODE_STATUS_EINVAL: Requested invalid hw_mode
+ * @SET_HW_MODE_STATUS_ECANCELED: HW mode change cancelled
+ * @SET_HW_MODE_STATUS_ENOTSUP: HW mode not supported
+ * @SET_HW_MODE_STATUS_EHARDWARE: HW mode change prevented by hardware
+ * @SET_HW_MODE_STATUS_EPENDING: HW mode change is pending
+ * @SET_HW_MODE_STATUS_ECOEX: HW mode change conflict with Coex
+ */
+enum set_hw_mode_status {
+ SET_HW_MODE_STATUS_OK,
+ SET_HW_MODE_STATUS_EINVAL,
+ SET_HW_MODE_STATUS_ECANCELED,
+ SET_HW_MODE_STATUS_ENOTSUP,
+ SET_HW_MODE_STATUS_EHARDWARE,
+ SET_HW_MODE_STATUS_EPENDING,
+ SET_HW_MODE_STATUS_ECOEX,
+};
+
typedef struct sSirRemainOnChnReq {
uint16_t messageType;
uint16_t length;
@@ -522,6 +542,9 @@ struct s_sir_set_hw_mode {
struct sir_hw_mode set_hw;
};
+typedef void (*dual_mac_cb)(enum set_hw_mode_status status,
+ uint32_t scan_config,
+ uint32_t fw_mode_config);
/**
* struct sir_dual_mac_config - Dual MAC configuration
* @scan_config: Scan configuration
@@ -531,7 +554,7 @@ struct s_sir_set_hw_mode {
struct sir_dual_mac_config {
uint32_t scan_config;
uint32_t fw_mode_config;
- void *set_dual_mac_cb;
+ dual_mac_cb set_dual_mac_cb;
};
/**
@@ -1310,6 +1333,7 @@ typedef struct sSirSmeJoinReq {
bool ignore_assoc_disallowed;
bool enable_bcast_probe_rsp;
bool force_24ghz_in_ht20;
+ bool force_rsne_override;
tSirBssDescription bssDescription;
/*
* WARNING: Pls make bssDescription as last variable in struct
@@ -2890,16 +2914,6 @@ typedef struct sSirNsOffloadReq {
} tSirNsOffloadReq, *tpSirNsOffloadReq;
#endif /* WLAN_NS_OFFLOAD */
-/**
- * struct hw_filter_request - For enable/disable HW Filter
- * @mode_bitmap: the hardware filter mode to configure
- * @bssid: bss_id for get session.
- */
-struct hw_filter_request {
- uint8_t mode_bitmap;
- struct qdf_mac_addr bssid;
-};
-
typedef struct sSirHostOffloadReq {
uint8_t offloadType;
uint8_t enableOrDisable;
@@ -3635,6 +3649,7 @@ typedef struct sSirRoamOffloadScanReq {
struct roam_fils_params roam_fils_params;
#endif
struct scoring_param score_params;
+ struct wmi_11k_offload_params offload_11k_params;
} tSirRoamOffloadScanReq, *tpSirRoamOffloadScanReq;
typedef struct sSirRoamOffloadScanRsp {
@@ -3837,26 +3852,6 @@ enum hw_mode_bandwidth {
};
/**
- * enum set_hw_mode_status - Status of set HW mode command
- * @SET_HW_MODE_STATUS_OK: command successful
- * @SET_HW_MODE_STATUS_EINVAL: Requested invalid hw_mode
- * @SET_HW_MODE_STATUS_ECANCELED: HW mode change cancelled
- * @SET_HW_MODE_STATUS_ENOTSUP: HW mode not supported
- * @SET_HW_MODE_STATUS_EHARDWARE: HW mode change prevented by hardware
- * @SET_HW_MODE_STATUS_EPENDING: HW mode change is pending
- * @SET_HW_MODE_STATUS_ECOEX: HW mode change conflict with Coex
- */
-enum set_hw_mode_status {
- SET_HW_MODE_STATUS_OK,
- SET_HW_MODE_STATUS_EINVAL,
- SET_HW_MODE_STATUS_ECANCELED,
- SET_HW_MODE_STATUS_ENOTSUP,
- SET_HW_MODE_STATUS_EHARDWARE,
- SET_HW_MODE_STATUS_EPENDING,
- SET_HW_MODE_STATUS_ECOEX,
-};
-
-/**
* struct sir_pcl_list - Format of PCL
* @pcl_list: List of preferred channels
* @weight_list: Weights of the PCL
@@ -6721,8 +6716,6 @@ typedef void (*hw_mode_transition_cb)(uint32_t old_hw_mode_index,
uint32_t new_hw_mode_index,
uint32_t num_vdev_mac_entries,
struct sir_vdev_mac_map *vdev_mac_map);
-typedef void (*dual_mac_cb)(uint32_t status, uint32_t scan_config,
- uint32_t fw_mode_config);
typedef void (*antenna_mode_cb)(uint32_t status);
/**
@@ -7230,17 +7223,17 @@ struct obss_scanparam {
};
/**
- * struct sir_bpf_set_offload - set bpf filter instructions
+ * struct sir_apf_set_offload - set apf filter instructions
* @session_id: session identifier
- * @version: host bpf version
- * @filter_id: Filter ID for BPF filter
+ * @version: host apf version
+ * @filter_id: Filter ID for APF filter
* @total_length: The total length of the full instruction
* total_length equal to 0 means reset
* @current_offset: current offset, 0 means start a new setting
* @current_length: Length of current @program
- * @program: BPF instructions
+ * @program: APF instructions
*/
-struct sir_bpf_set_offload {
+struct sir_apf_set_offload {
uint8_t session_id;
uint32_t version;
uint32_t filter_id;
@@ -7251,18 +7244,18 @@ struct sir_bpf_set_offload {
};
/**
- * struct sir_bpf_offload_capabilities - get bpf Capabilities
- * @bpf_version: fw's implement version
- * @max_bpf_filters: max filters that fw supports
- * @max_bytes_for_bpf_inst: the max bytes that can be used as bpf instructions
- * @remaining_bytes_for_bpf_inst: remaining bytes for bpf instructions
+ * struct sir_apf_offload_capabilities - get apf Capabilities
+ * @apf_version: fw's implement version
+ * @max_apf_filters: max filters that fw supports
+ * @max_bytes_for_apf_inst: the max bytes that can be used as apf instructions
+ * @remaining_bytes_for_apf_inst: remaining bytes for apf instructions
*
*/
-struct sir_bpf_get_offload {
- uint32_t bpf_version;
- uint32_t max_bpf_filters;
- uint32_t max_bytes_for_bpf_inst;
- uint32_t remaining_bytes_for_bpf_inst;
+struct sir_apf_get_offload {
+ uint32_t apf_version;
+ uint32_t max_apf_filters;
+ uint32_t max_bytes_for_apf_inst;
+ uint32_t remaining_bytes_for_apf_inst;
};
/**
@@ -7716,8 +7709,8 @@ struct ndp_responder_rsp_event {
/**
* struct ndp_channel_info - ndp channel and channel bandwidth
- * @channel: channel width of the ndp connection
- * @ch_width: channel width of the ndp connection
+ * @channel: channel freq in mhz of the ndp connection
+ * @ch_width: channel width (wmi_channel_width) of the ndp connection
* @nss: nss used for ndp connection
*
*/
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
index dff7d55bfda6..87dd01d8de11 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
+++ b/drivers/staging/qcacld-3.0/core/mac/src/cfg/cfgUtil/dot11f.frms
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2006-2007, 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2006-2007, 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1385,8 +1385,8 @@ IE RSN (EID_RSN) // 7.3.2.25
{
// The version is 2 octets, and we only support version 1.
version, 2 MUSTBE 1;
- // The next four octets will be the Group Cipher Suite
- gp_cipher_suite[4];
+ // The next four octets will be the Optional Group Cipher Suite
+ OPTIONAL gp_cipher_suite[4];
// The IE *may* stop here; if there's any more, we should see two more
// octets giving the number of Pairwise Cipher Suites
OPTIONAL pwise_cipher_suite_count, 2;
@@ -1395,10 +1395,10 @@ IE RSN (EID_RSN) // 7.3.2.25
// to 61. However, that seems needlessly wasteful of space.
pwise_cipher_suites[4][0..6] COUNTIS pwise_cipher_suite_count;
// Optional count of AKM suite selectors
- OPTIONAL akm_suite_count, 2;
+ OPTIONAL akm_suite_cnt, 2;
// Again, I see nothing in the Standard explicitly limiting the number of
// AKM suite selectors other than the maximum size of an IE.
- akm_suites[4][0..6] COUNTIS akm_suite_count;
+ akm_suite[4][0..6] COUNTIS akm_suite_cnt;
OPTIONAL RSN_Cap[2];
// Finally, the IE may contain zero or more PMKIDs:
OPTIONAL pmkid_count, 2;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
index bb56b81f4b09..7087926c5515 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/include/dot11f.h
@@ -35,7 +35,7 @@
*
*
* This file was automatically generated by 'framesc'
- * Tue Jan 9 16:51:56 2018 from the following file(s):
+ * Fri Feb 16 10:33:08 2018 from the following file(s):
*
* dot11f.frms
*
@@ -53,6 +53,10 @@ typedef uint32_t tDOT11F_U64[2];
#pragma warning (disable:4214) /* nonstandard extension used */
#endif /* Microsoft C/C++ bit field types other than int */
+#if !defined __must_check
+#define __must_check
+#endif
+
/*
* Frames Return Codes:
*
@@ -81,12 +85,12 @@ typedef uint32_t tDOT11F_U64[2];
#define DOT11F_SKIPPED_BAD_TLV (0x00000200)
#define DOT11F_UNKNOWN_TLVS (0x00000400)
#define DOT11F_LAST_TLV_TOO_LONG (0x00000800)
+#define DOT11F_MANDATORY_TLV_MISSING (0x00001000)
#define DOT11F_INTERNAL_ERROR (0x10000001)
#define DOT11F_MISSING_FIXED_FIELD (0x10000002)
#define DOT11F_BAD_INPUT_BUFFER (0x10000003)
#define DOT11F_BAD_OUTPUT_BUFFER (0x10000004)
#define DOT11F_BUFFER_OVERFLOW (0x10000005)
-#define DOT11F_MANDATORY_TLV_MISSING (0x00001000)
#define DOT11F_FAILED(code) ((code) & 0x10000000)
#define DOT11F_SUCCEEDED(code) ((code) == 0)
#define DOT11F_WARNED(code) (!DOT11F_SUCCEEDED(code) && !DOT11F_FAILED(code))
@@ -2484,7 +2488,7 @@ typedef struct sDot11fIEGTK {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_gtk(
+__must_check uint32_t dot11f_unpack_ie_gtk(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2526,7 +2530,7 @@ typedef struct sDot11fIEIGTK {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_igtk(
+__must_check uint32_t dot11f_unpack_ie_igtk(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2566,7 +2570,7 @@ typedef struct sDot11fIER0KH_ID {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_r0_kh_id(
+__must_check uint32_t dot11f_unpack_ie_r0_kh_id(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2605,7 +2609,7 @@ typedef struct sDot11fIER1KH_ID {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_r1_kh_id(
+__must_check uint32_t dot11f_unpack_ie_r1_kh_id(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2646,7 +2650,7 @@ typedef struct sDot11fIEAPChannelReport {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ap_channel_report(
+__must_check uint32_t dot11f_unpack_ie_ap_channel_report(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2685,7 +2689,7 @@ typedef struct sDot11fIEBcnReportingDetail {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_bcn_reporting_detail(
+__must_check uint32_t dot11f_unpack_ie_bcn_reporting_detail(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2725,7 +2729,7 @@ typedef struct sDot11fIEBeaconReportFrmBody {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_beacon_report_frm_body(
+__must_check uint32_t dot11f_unpack_ie_beacon_report_frm_body(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2765,7 +2769,7 @@ typedef struct sDot11fIEBeaconReporting {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_beacon_reporting(
+__must_check uint32_t dot11f_unpack_ie_beacon_reporting(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2804,7 +2808,7 @@ typedef struct sDot11fIECondensedCountryStr {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_condensed_country_str(
+__must_check uint32_t dot11f_unpack_ie_condensed_country_str(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2845,7 +2849,7 @@ typedef struct sDot11fIEMeasurementPilot {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_measurement_pilot(
+__must_check uint32_t dot11f_unpack_ie_measurement_pilot(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2886,7 +2890,7 @@ typedef struct sDot11fIEMultiBssid {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_multi_bssid(
+__must_check uint32_t dot11f_unpack_ie_multi_bssid(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2927,7 +2931,7 @@ typedef struct sDot11fIERICData {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ric_data(
+__must_check uint32_t dot11f_unpack_ie_ric_data(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -2968,7 +2972,7 @@ typedef struct sDot11fIERICDescriptor {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ric_descriptor(
+__must_check uint32_t dot11f_unpack_ie_ric_descriptor(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3037,7 +3041,7 @@ typedef struct sDot11fIERRMEnabledCap {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_rrm_enabled_cap(
+__must_check uint32_t dot11f_unpack_ie_rrm_enabled_cap(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3077,7 +3081,7 @@ typedef struct sDot11fIERequestedInfo {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_requested_info(
+__must_check uint32_t dot11f_unpack_ie_requested_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3117,7 +3121,7 @@ typedef struct sDot11fIESSID {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ssid(
+__must_check uint32_t dot11f_unpack_ie_ssid(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3163,7 +3167,7 @@ typedef struct sDot11fIESchedule {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_schedule(
+__must_check uint32_t dot11f_unpack_ie_schedule(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3235,7 +3239,7 @@ typedef struct sDot11fIETCLAS {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tclas(
+__must_check uint32_t dot11f_unpack_ie_tclas(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3274,7 +3278,7 @@ typedef struct sDot11fIETCLASSPROC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tclasSPROC(
+__must_check uint32_t dot11f_unpack_ie_tclasSPROC(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3313,7 +3317,7 @@ typedef struct sDot11fIETSDelay {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ts_delay(
+__must_check uint32_t dot11f_unpack_ie_ts_delay(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3353,7 +3357,7 @@ typedef struct sDot11fIETSFInfo {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tsf_info(
+__must_check uint32_t dot11f_unpack_ie_tsf_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3417,7 +3421,7 @@ typedef struct sDot11fIETSPEC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tspec(
+__must_check uint32_t dot11f_unpack_ie_tspec(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3481,7 +3485,7 @@ typedef struct sDot11fIEVHTCaps {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vht_caps(
+__must_check uint32_t dot11f_unpack_ie_vht_caps(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3523,7 +3527,7 @@ typedef struct sDot11fIEVHTOperation {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vht_operation(
+__must_check uint32_t dot11f_unpack_ie_vht_operation(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3570,7 +3574,7 @@ typedef struct sDot11fIEWMMSchedule {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmm_schedule(
+__must_check uint32_t dot11f_unpack_ie_wmm_schedule(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3643,7 +3647,7 @@ typedef struct sDot11fIEWMMTCLAS {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmmtclas(
+__must_check uint32_t dot11f_unpack_ie_wmmtclas(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3683,7 +3687,7 @@ typedef struct sDot11fIEWMMTCLASPROC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmmtclasproc(
+__must_check uint32_t dot11f_unpack_ie_wmmtclasproc(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3723,7 +3727,7 @@ typedef struct sDot11fIEWMMTSDelay {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmmts_delay(
+__must_check uint32_t dot11f_unpack_ie_wmmts_delay(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3788,7 +3792,7 @@ typedef struct sDot11fIEWMMTSPEC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmmtspec(
+__must_check uint32_t dot11f_unpack_ie_wmmtspec(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3829,7 +3833,7 @@ typedef struct sDot11fIEWiderBWChanSwitchAnn {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wider_bw_chan_switch_ann(
+__must_check uint32_t dot11f_unpack_ie_wider_bw_chan_switch_ann(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3868,7 +3872,7 @@ typedef struct sDot11fIEazimuth_req {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_azimuth_req(
+__must_check uint32_t dot11f_unpack_ie_azimuth_req(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3907,7 +3911,7 @@ typedef struct sDot11fIEmax_age {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_max_age(
+__must_check uint32_t dot11f_unpack_ie_max_age(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -3966,7 +3970,7 @@ typedef struct sDot11fIEneighbor_rpt {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_neighbor_rpt(
+__must_check uint32_t dot11f_unpack_ie_neighbor_rpt(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4005,7 +4009,7 @@ typedef struct sDot11fIEreq_mac_addr {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_req_mac_addr(
+__must_check uint32_t dot11f_unpack_ie_req_mac_addr(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4044,7 +4048,7 @@ typedef struct sDot11fIEtgt_mac_addr {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tgt_mac_addr(
+__must_check uint32_t dot11f_unpack_ie_tgt_mac_addr(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4084,7 +4088,7 @@ typedef struct sDot11fIEvht_transmit_power_env {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vht_transmit_power_env(
+__must_check uint32_t dot11f_unpack_ie_vht_transmit_power_env(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4123,7 +4127,7 @@ typedef struct sDot11fIEAID {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_aid(
+__must_check uint32_t dot11f_unpack_ie_aid(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4165,7 +4169,7 @@ typedef struct sDot11fIECFParams {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_cf_params(
+__must_check uint32_t dot11f_unpack_ie_cf_params(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4205,7 +4209,7 @@ typedef struct sDot11fIEChallengeText {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_challenge_text(
+__must_check uint32_t dot11f_unpack_ie_challenge_text(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4246,7 +4250,7 @@ typedef struct sDot11fIEChanSwitchAnn {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_chan_switch_ann(
+__must_check uint32_t dot11f_unpack_ie_chan_switch_ann(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4286,7 +4290,7 @@ typedef struct sDot11fIEChannelSwitchWrapper {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_channel_switch_wrapper(
+__must_check uint32_t dot11f_unpack_ie_channel_switch_wrapper(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4327,7 +4331,7 @@ typedef struct sDot11fIECountry {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_country(
+__must_check uint32_t dot11f_unpack_ie_country(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4366,7 +4370,7 @@ typedef struct sDot11fIEDSParams {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_DSParams(
+__must_check uint32_t dot11f_unpack_ie_DSParams(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4434,7 +4438,7 @@ typedef struct sDot11fIEEDCAParamSet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_edca_param_set(
+__must_check uint32_t dot11f_unpack_ie_edca_param_set(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4476,7 +4480,7 @@ typedef struct sDot11fIEERPInfo {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_erp_info(
+__must_check uint32_t dot11f_unpack_ie_erp_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4516,7 +4520,7 @@ typedef struct sDot11fIEESECckmOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_cckm_opaque(
+__must_check uint32_t dot11f_unpack_ie_ese_cckm_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4557,7 +4561,7 @@ typedef struct sDot11fIEESERadMgmtCap {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_rad_mgmt_cap(
+__must_check uint32_t dot11f_unpack_ie_ese_rad_mgmt_cap(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4598,7 +4602,7 @@ typedef struct sDot11fIEESETrafStrmMet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_traf_strm_met(
+__must_check uint32_t dot11f_unpack_ie_ese_traf_strm_met(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4639,7 +4643,7 @@ typedef struct sDot11fIEESETrafStrmRateSet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_traf_strm_rate_set(
+__must_check uint32_t dot11f_unpack_ie_ese_traf_strm_rate_set(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4679,7 +4683,7 @@ typedef struct sDot11fIEESETxmitPower {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_txmit_power(
+__must_check uint32_t dot11f_unpack_ie_ese_txmit_power(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4718,7 +4722,7 @@ typedef struct sDot11fIEESEVersion {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ese_version(
+__must_check uint32_t dot11f_unpack_ie_ese_version(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4758,7 +4762,7 @@ typedef struct sDot11fIEESP_information {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ESP_information(
+__must_check uint32_t dot11f_unpack_ie_ESP_information(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4798,7 +4802,7 @@ typedef struct sDot11fIEExtCap {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ext_cap(
+__must_check uint32_t dot11f_unpack_ie_ext_cap(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4838,7 +4842,7 @@ typedef struct sDot11fIEExtSuppRates {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ext_supp_rates(
+__must_check uint32_t dot11f_unpack_ie_ext_supp_rates(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4880,7 +4884,7 @@ typedef struct sDot11fIEFHParamSet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fh_param_set(
+__must_check uint32_t dot11f_unpack_ie_fh_param_set(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4920,7 +4924,7 @@ typedef struct sDot11fIEFHParams {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fh_params(
+__must_check uint32_t dot11f_unpack_ie_fh_params(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -4964,7 +4968,7 @@ typedef struct sDot11fIEFHPattTable {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fh_patt_table(
+__must_check uint32_t dot11f_unpack_ie_fh_patt_table(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5011,7 +5015,7 @@ typedef struct sDot11fIEFTInfo {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ft_info(
+__must_check uint32_t dot11f_unpack_ie_ft_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5098,7 +5102,7 @@ typedef struct sDot11fIEHTCaps {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ht_caps(
+__must_check uint32_t dot11f_unpack_ie_ht_caps(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5157,7 +5161,7 @@ typedef struct sDot11fIEHTInfo {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ht_info(
+__must_check uint32_t dot11f_unpack_ie_ht_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5196,7 +5200,7 @@ typedef struct sDot11fIEIBSSParams {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ibss_params(
+__must_check uint32_t dot11f_unpack_ie_ibss_params(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5237,7 +5241,7 @@ typedef struct sDot11fIELinkIdentifier {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_link_identifier(
+__must_check uint32_t dot11f_unpack_ie_link_identifier(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5286,7 +5290,7 @@ typedef struct sDot11fIEMBO_IE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_MBO_IE(
+__must_check uint32_t dot11f_unpack_ie_MBO_IE(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5376,7 +5380,7 @@ typedef struct sDot11fIEMeasurementReport {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_measurement_report(
+__must_check uint32_t dot11f_unpack_ie_measurement_report(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5466,7 +5470,7 @@ typedef struct sDot11fIEMeasurementRequest {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_measurement_request(
+__must_check uint32_t dot11f_unpack_ie_measurement_request(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5508,7 +5512,7 @@ typedef struct sDot11fIEMobilityDomain {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_mobility_domain(
+__must_check uint32_t dot11f_unpack_ie_mobility_domain(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5567,7 +5571,7 @@ typedef struct sDot11fIENeighborReport {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_neighbor_report(
+__must_check uint32_t dot11f_unpack_ie_neighbor_report(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5612,7 +5616,7 @@ typedef struct sDot11fIEOBSSScanParameters {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_obss_scan_parameters(
+__must_check uint32_t dot11f_unpack_ie_obss_scan_parameters(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5654,7 +5658,7 @@ typedef struct sDot11fIEOperatingMode {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_operating_mode(
+__must_check uint32_t dot11f_unpack_ie_operating_mode(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5695,7 +5699,7 @@ typedef struct sDot11fIEP2PAssocReq {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_assoc_req(
+__must_check uint32_t dot11f_unpack_ie_p2_p_assoc_req(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5735,7 +5739,7 @@ typedef struct sDot11fIEP2PAssocRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_assoc_res(
+__must_check uint32_t dot11f_unpack_ie_p2_p_assoc_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5776,7 +5780,7 @@ typedef struct sDot11fIEP2PBeacon {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_beacon(
+__must_check uint32_t dot11f_unpack_ie_p2_p_beacon(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5820,7 +5824,7 @@ typedef struct sDot11fIEP2PBeaconProbeRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_beacon_probe_res(
+__must_check uint32_t dot11f_unpack_ie_p2_p_beacon_probe_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5859,7 +5863,7 @@ typedef struct sDot11fIEP2PDeAuth {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_de_auth(
+__must_check uint32_t dot11f_unpack_ie_p2_p_de_auth(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5898,7 +5902,7 @@ typedef struct sDot11fIEP2PDisAssoc {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_dis_assoc(
+__must_check uint32_t dot11f_unpack_ie_p2_p_dis_assoc(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5938,7 +5942,7 @@ typedef struct sDot11fIEP2PIEOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_pie_opaque(
+__must_check uint32_t dot11f_unpack_ie_p2_pie_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -5981,7 +5985,7 @@ typedef struct sDot11fIEP2PProbeReq {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_probe_req(
+__must_check uint32_t dot11f_unpack_ie_p2_p_probe_req(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6024,7 +6028,7 @@ typedef struct sDot11fIEP2PProbeRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_p2_p_probe_res(
+__must_check uint32_t dot11f_unpack_ie_p2_p_probe_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6064,7 +6068,7 @@ typedef struct sDot11fIEPTIControl {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_pti_control(
+__must_check uint32_t dot11f_unpack_ie_pti_control(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6107,7 +6111,7 @@ typedef struct sDot11fIEPUBufferStatus {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_pu_buffer_status(
+__must_check uint32_t dot11f_unpack_ie_pu_buffer_status(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6147,7 +6151,7 @@ typedef struct sDot11fIEPowerCaps {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_power_caps(
+__must_check uint32_t dot11f_unpack_ie_power_caps(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6186,7 +6190,7 @@ typedef struct sDot11fIEPowerConstraints {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_power_constraints(
+__must_check uint32_t dot11f_unpack_ie_power_constraints(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6227,7 +6231,7 @@ typedef struct sDot11fIEQBSSLoad {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_qbss_load(
+__must_check uint32_t dot11f_unpack_ie_qbss_load(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6266,7 +6270,7 @@ typedef struct sDot11fIEQCN_IE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_QCN_IE(
+__must_check uint32_t dot11f_unpack_ie_QCN_IE(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6306,7 +6310,7 @@ typedef struct sDot11fIEQComVendorIE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_QComVendorIE(
+__must_check uint32_t dot11f_unpack_ie_QComVendorIE(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6349,7 +6353,7 @@ typedef struct sDot11fIEQOSCapsAp {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_qos_caps_ap(
+__must_check uint32_t dot11f_unpack_ie_qos_caps_ap(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6394,7 +6398,7 @@ typedef struct sDot11fIEQOSCapsStation {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_qos_caps_station(
+__must_check uint32_t dot11f_unpack_ie_qos_caps_station(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6434,7 +6438,7 @@ typedef struct sDot11fIEQosMapSet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_qos_map_set(
+__must_check uint32_t dot11f_unpack_ie_qos_map_set(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6476,7 +6480,7 @@ typedef struct sDot11fIEQuiet {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_quiet(
+__must_check uint32_t dot11f_unpack_ie_quiet(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6515,7 +6519,7 @@ typedef struct sDot11fIERCPIIE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_rcpiie(
+__must_check uint32_t dot11f_unpack_ie_rcpiie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6567,7 +6571,7 @@ typedef struct sDot11fIERICDataDesc {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ric_data_desc(
+__must_check uint32_t dot11f_unpack_ie_ric_data_desc(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6594,28 +6598,31 @@ uint32_t dot11f_get_packed_ieric_data_desc(
typedef struct sDot11fIERSN {
uint8_t present;
uint16_t version /* Must be 1! */;
+ uint8_t gp_cipher_suite_present;
uint8_t gp_cipher_suite[4];
uint16_t pwise_cipher_suite_count;
uint8_t pwise_cipher_suites[6][4];
- uint16_t akm_suite_count;
- uint8_t akm_suites[6][4];
+ uint16_t akm_suite_cnt;
+ uint8_t akm_suite[6][4];
+ uint8_t RSN_Cap_present;
uint8_t RSN_Cap[2];
uint16_t pmkid_count;
uint8_t pmkid[4][16];
+ uint8_t gp_mgmt_cipher_suite_present;
uint8_t gp_mgmt_cipher_suite[4];
} tDot11fIERSN;
#define DOT11F_EID_RSN (48)
/* N.B. These #defines do *not* include the EID & length */
-#define DOT11F_IE_RSN_MIN_LEN (6)
+#define DOT11F_IE_RSN_MIN_LEN (2)
#define DOT11F_IE_RSN_MAX_LEN (130)
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_rsn(
+__must_check uint32_t dot11f_unpack_ie_rsn(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6654,7 +6661,7 @@ typedef struct sDot11fIERSNIIE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_rsniie(
+__must_check uint32_t dot11f_unpack_ie_rsniie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6694,7 +6701,7 @@ typedef struct sDot11fIERSNOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_rsn_opaque(
+__must_check uint32_t dot11f_unpack_ie_rsn_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6734,7 +6741,7 @@ typedef struct sDot11fIESuppChannels {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_supp_channels(
+__must_check uint32_t dot11f_unpack_ie_supp_channels(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6774,7 +6781,7 @@ typedef struct sDot11fIESuppOperatingClasses {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_supp_operating_classes(
+__must_check uint32_t dot11f_unpack_ie_supp_operating_classes(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6827,7 +6834,7 @@ typedef struct sDot11fIESuppRates {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_supp_rates(
+__must_check uint32_t dot11f_unpack_ie_supp_rates(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6870,7 +6877,7 @@ typedef struct sDot11fIETIM {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tim(
+__must_check uint32_t dot11f_unpack_ie_tim(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6910,7 +6917,7 @@ typedef struct sDot11fIETPCReport {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tpc_report(
+__must_check uint32_t dot11f_unpack_ie_tpc_report(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6948,7 +6955,7 @@ typedef struct sDot11fIETPCRequest {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_tpc_request(
+__must_check uint32_t dot11f_unpack_ie_tpc_request(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -6989,7 +6996,7 @@ typedef struct sDot11fIETimeAdvertisement {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_time_advertisement(
+__must_check uint32_t dot11f_unpack_ie_time_advertisement(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7029,7 +7036,7 @@ typedef struct sDot11fIETimeoutInterval {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_timeout_interval(
+__must_check uint32_t dot11f_unpack_ie_timeout_interval(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7072,7 +7079,7 @@ typedef struct sDot11fIEVHTExtBssLoad {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vht_ext_bss_load(
+__must_check uint32_t dot11f_unpack_ie_vht_ext_bss_load(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7110,7 +7117,7 @@ typedef struct sDot11fIEVendor1IE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vendor1_ie(
+__must_check uint32_t dot11f_unpack_ie_vendor1_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7148,7 +7155,7 @@ typedef struct sDot11fIEVendor3IE {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vendor3_ie(
+__must_check uint32_t dot11f_unpack_ie_vendor3_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7196,7 +7203,7 @@ typedef struct sDot11fIEWAPI {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wapi(
+__must_check uint32_t dot11f_unpack_ie_wapi(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7236,7 +7243,7 @@ typedef struct sDot11fIEWAPIOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wapi_opaque(
+__must_check uint32_t dot11f_unpack_ie_wapi_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7276,7 +7283,7 @@ typedef struct sDot11fIEWFATPC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wfatpc(
+__must_check uint32_t dot11f_unpack_ie_wfatpc(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7316,7 +7323,7 @@ typedef struct sDot11fIEWFDIEOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wfdie_opaque(
+__must_check uint32_t dot11f_unpack_ie_wfdie_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7360,7 +7367,7 @@ typedef struct sDot11fIEWMMCaps {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmm_caps(
+__must_check uint32_t dot11f_unpack_ie_wmm_caps(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7402,7 +7409,7 @@ typedef struct sDot11fIEWMMInfoAp {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmm_info_ap(
+__must_check uint32_t dot11f_unpack_ie_wmm_info_ap(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7448,7 +7455,7 @@ typedef struct sDot11fIEWMMInfoStation {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmm_info_station(
+__must_check uint32_t dot11f_unpack_ie_wmm_info_station(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7517,7 +7524,7 @@ typedef struct sDot11fIEWMMParams {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wmm_params(
+__must_check uint32_t dot11f_unpack_ie_wmm_params(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7564,7 +7571,7 @@ typedef struct sDot11fIEWPA {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wpa(
+__must_check uint32_t dot11f_unpack_ie_wpa(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7604,7 +7611,7 @@ typedef struct sDot11fIEWPAOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wpa_opaque(
+__must_check uint32_t dot11f_unpack_ie_wpa_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7664,7 +7671,7 @@ typedef struct sDot11fIEWSC {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc(
+__must_check uint32_t dot11f_unpack_ie_wsc(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7705,7 +7712,7 @@ typedef struct sDot11fIEWscAssocReq {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_assoc_req(
+__must_check uint32_t dot11f_unpack_ie_wsc_assoc_req(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7746,7 +7753,7 @@ typedef struct sDot11fIEWscAssocRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_assoc_res(
+__must_check uint32_t dot11f_unpack_ie_wsc_assoc_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7793,7 +7800,7 @@ typedef struct sDot11fIEWscBeacon {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_beacon(
+__must_check uint32_t dot11f_unpack_ie_wsc_beacon(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7848,7 +7855,7 @@ typedef struct sDot11fIEWscBeaconProbeRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_beacon_probe_res(
+__must_check uint32_t dot11f_unpack_ie_wsc_beacon_probe_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7888,7 +7895,7 @@ typedef struct sDot11fIEWscIEOpaque {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_ie_opaque(
+__must_check uint32_t dot11f_unpack_ie_wsc_ie_opaque(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7941,7 +7948,7 @@ typedef struct sDot11fIEWscProbeReq {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_probe_req(
+__must_check uint32_t dot11f_unpack_ie_wsc_probe_req(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -7996,7 +8003,7 @@ typedef struct sDot11fIEWscProbeRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_probe_res(
+__must_check uint32_t dot11f_unpack_ie_wsc_probe_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8037,7 +8044,7 @@ typedef struct sDot11fIEWscReassocRes {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_wsc_reassoc_res(
+__must_check uint32_t dot11f_unpack_ie_wsc_reassoc_res(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8079,7 +8086,7 @@ typedef struct sDot11fIEext_chan_switch_ann {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ext_chan_switch_ann(
+__must_check uint32_t dot11f_unpack_ie_ext_chan_switch_ann(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8118,7 +8125,7 @@ typedef struct sDot11fIEfils_assoc_delay_info {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_assoc_delay_info(
+__must_check uint32_t dot11f_unpack_ie_fils_assoc_delay_info(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8160,7 +8167,7 @@ typedef struct sDot11fIEfils_hlp_container {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_hlp_container(
+__must_check uint32_t dot11f_unpack_ie_fils_hlp_container(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8209,7 +8216,7 @@ typedef struct sDot11fIEfils_indication {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_indication(
+__must_check uint32_t dot11f_unpack_ie_fils_indication(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8250,7 +8257,7 @@ typedef struct sDot11fIEfils_kde {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_kde(
+__must_check uint32_t dot11f_unpack_ie_fils_kde(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8290,7 +8297,7 @@ typedef struct sDot11fIEfils_key_confirmation {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_key_confirmation(
+__must_check uint32_t dot11f_unpack_ie_fils_key_confirmation(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8329,7 +8336,7 @@ typedef struct sDot11fIEfils_nonce {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_nonce(
+__must_check uint32_t dot11f_unpack_ie_fils_nonce(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8370,7 +8377,7 @@ typedef struct sDot11fIEfils_public_key {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_public_key(
+__must_check uint32_t dot11f_unpack_ie_fils_public_key(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8409,7 +8416,7 @@ typedef struct sDot11fIEfils_session {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_session(
+__must_check uint32_t dot11f_unpack_ie_fils_session(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8449,7 +8456,7 @@ typedef struct sDot11fIEfils_wrapped_data {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fils_wrapped_data(
+__must_check uint32_t dot11f_unpack_ie_fils_wrapped_data(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8489,7 +8496,7 @@ typedef struct sDot11fIEfragment_ie {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_fragment_ie(
+__must_check uint32_t dot11f_unpack_ie_fragment_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8539,7 +8546,7 @@ typedef struct sDot11fIEhs20vendor_ie {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_hs20vendor_ie(
+__must_check uint32_t dot11f_unpack_ie_hs20vendor_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8583,7 +8590,7 @@ typedef struct sDot11fIEht2040_bss_coexistence {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ht2040_bss_coexistence(
+__must_check uint32_t dot11f_unpack_ie_ht2040_bss_coexistence(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8624,7 +8631,7 @@ typedef struct sDot11fIEht2040_bss_intolerant_report {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_ht2040_bss_intolerant_report(
+__must_check uint32_t dot11f_unpack_ie_ht2040_bss_intolerant_report(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8664,7 +8671,7 @@ typedef struct sDot11fIEosen_ie {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_osen_ie(
+__must_check uint32_t dot11f_unpack_ie_osen_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8703,7 +8710,7 @@ typedef struct sDot11fIEsec_chan_offset_ele {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_sec_chan_offset_ele(
+__must_check uint32_t dot11f_unpack_ie_sec_chan_offset_ele(
tpAniSirGlobal,
uint8_t *,
uint8_t,
@@ -8744,7 +8751,7 @@ typedef struct sDot11fIEvendor_vht_ie {
#ifdef __cplusplus
extern "C" {
#endif /* C++ */
-uint32_t dot11f_unpack_ie_vendor_vht_ie(
+__must_check uint32_t dot11f_unpack_ie_vendor_vht_ie(
tpAniSirGlobal,
uint8_t *,
uint8_t,
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/include/sir_params.h b/drivers/staging/qcacld-3.0/core/mac/src/include/sir_params.h
index 3b3f1e21d360..fd784dafc809 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/include/sir_params.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/include/sir_params.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -626,8 +626,8 @@ typedef struct sSirMbMsgP2p {
#define SIR_HAL_REMOVE_BCN_FILTER_CMDID (SIR_HAL_ITC_MSG_TYPES_BEGIN + 340)
-#define SIR_HAL_BPF_GET_CAPABILITIES_REQ (SIR_HAL_ITC_MSG_TYPES_BEGIN + 341)
-#define SIR_HAL_BPF_SET_INSTRUCTIONS_REQ (SIR_HAL_ITC_MSG_TYPES_BEGIN + 342)
+#define SIR_HAL_APF_GET_CAPABILITIES_REQ (SIR_HAL_ITC_MSG_TYPES_BEGIN + 341)
+#define SIR_HAL_APF_SET_INSTRUCTIONS_REQ (SIR_HAL_ITC_MSG_TYPES_BEGIN + 342)
#define SIR_HAL_SET_WISA_PARAMS (SIR_HAL_ITC_MSG_TYPES_BEGIN + 343)
#define SIR_HAL_SET_ADAPT_DWELLTIME_PARAMS (SIR_HAL_ITC_MSG_TYPES_BEGIN + 344)
@@ -697,7 +697,8 @@ typedef struct sSirMbMsgP2p {
#define SIR_HAL_SET_DEL_PMKID_CACHE (SIR_HAL_ITC_MSG_TYPES_BEGIN + 389)
#define SIR_HAL_HLP_IE_INFO (SIR_HAL_ITC_MSG_TYPES_BEGIN + 390)
-#define SIR_HAL_NDP_SCH_UPDATE_IND (SIR_HAL_ITC_MSG_TYPES_BEGIN + 391)
+#define SIR_HAL_INVOKE_NEIGHBOR_REPORT (SIR_HAL_ITC_MSG_TYPES_BEGIN + 391)
+#define SIR_HAL_NDP_SCH_UPDATE_IND (SIR_HAL_ITC_MSG_TYPES_BEGIN + 392)
#define SIR_HAL_MSG_TYPES_END (SIR_HAL_MSG_TYPES_BEGIN + 0x1FF)
/* CFG message types */
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_api.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_api.h
index 64215ed8c4b5..5994d95accee 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_api.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_api.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -214,11 +214,11 @@ tMgmtFrmDropReason lim_is_pkt_candidate_for_drop(tpAniSirGlobal pMac,
#ifdef WLAN_FEATURE_ROAM_OFFLOAD
QDF_STATUS pe_roam_synch_callback(tpAniSirGlobal mac_ctx,
struct sSirSmeRoamOffloadSynchInd *roam_sync_ind_ptr,
- tpSirBssDescription bss_desc_ptr);
+ tpSirBssDescription bss_desc_ptr, enum sir_roam_op_code reason);
#else
static inline QDF_STATUS pe_roam_synch_callback(tpAniSirGlobal mac_ctx,
struct sSirSmeRoamOffloadSynchInd *roam_sync_ind_ptr,
- tpSirBssDescription bss_desc_ptr)
+ tpSirBssDescription bss_desc_ptr, enum sir_roam_op_code reason)
{
return QDF_STATUS_E_NOSUPPORT;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h
index 7dd6ff1a3661..6a1c992a0373 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_session.h
@@ -508,6 +508,11 @@ typedef struct sPESession /* Added to Support BT-AMP */
bool ch_switch_in_progress;
/* previous auth frame's sequence number */
uint16_t prev_auth_seq_num;
+ bool fw_roaming_started;
+ bool recvd_deauth_while_roaming;
+ bool recvd_disassoc_while_roaming;
+ bool deauth_disassoc_rc;
+ int8_t def_max_tx_pwr;
} tPESession, *tpPESession;
/*-------------------------------------------------------------------------
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_trace.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_trace.h
index dbd4ed714e4f..5b1d8248d3f7 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_trace.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/include/lim_trace.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -85,7 +85,7 @@ void limTraceUpdateMgmtStat(tpAniSirGlobal pMac, uint8_t subtype);
void lim_trace_dumpMgmtStat(tpAniSirGlobal pMac, uint8_t subtype);
uint8_t *lim_trace_get_mlm_state_string(uint32_t mlmState);
uint8_t *lim_trace_get_sme_state_string(uint32_t smeState);
-void lim_trace_dump(tpAniSirGlobal pMac, tp_qdf_trace_record pRecord,
+void lim_trace_dump(void *pMac, tp_qdf_trace_record pRecord,
uint16_t recIndex);
void mac_trace_msg_tx(tpAniSirGlobal pMac, uint8_t session, uint32_t data);
void mac_trace_msg_rx(tpAniSirGlobal pMac, uint8_t session, uint32_t data);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c
index 68d98dc44391..97f192bcfcdf 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_api.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -633,6 +633,17 @@ void lim_cleanup(tpAniSirGlobal pMac)
qdf_mem_free(pMac->lim.gpLimMlmScanReq);
pMac->lim.gpLimMlmScanReq = NULL;
}
+
+ if (pMac->lim.limDisassocDeauthCnfReq.pMlmDisassocReq) {
+ qdf_mem_free(pMac->lim.limDisassocDeauthCnfReq.pMlmDisassocReq);
+ pMac->lim.limDisassocDeauthCnfReq.pMlmDisassocReq = NULL;
+ }
+
+ if (pMac->lim.limDisassocDeauthCnfReq.pMlmDeauthReq) {
+ qdf_mem_free(pMac->lim.limDisassocDeauthCnfReq.pMlmDeauthReq);
+ pMac->lim.limDisassocDeauthCnfReq.pMlmDeauthReq = NULL;
+ }
+
/* Now, finally reset the deferred message queue pointers */
lim_reset_deferred_msg_q(pMac);
@@ -892,6 +903,17 @@ void pe_stop(tpAniSirGlobal pMac)
return;
}
+static void pe_free_nested_messages(tSirMsgQ *msg)
+{
+ switch (msg->type) {
+ case WMA_SET_LINK_STATE_RSP:
+ qdf_mem_free(((tpLinkStateParams) msg->bodyptr)->callbackArg);
+ break;
+ default:
+ break;
+ }
+}
+
/** -------------------------------------------------------------
\fn pe_free_msg
\brief Called by CDS scheduler (function cds_sched_flush_mc_mqs)
@@ -910,6 +932,7 @@ void pe_free_msg(tpAniSirGlobal pMac, tSirMsgQ *pMsg)
cds_pkt_return_packet((cds_pkt_t *) pMsg->
bodyptr);
} else {
+ pe_free_nested_messages(pMsg);
qdf_mem_free((void *)pMsg->bodyptr);
}
}
@@ -1118,24 +1141,6 @@ static QDF_STATUS pe_handle_mgmt_frame(void *p_cds_gctx, void *cds_buff)
*/
mHdr = WMA_GET_RX_MAC_HEADER(pRxPacketInfo);
- if (mHdr->fc.type == SIR_MAC_MGMT_FRAME) {
- pe_debug("RxBd: %pK mHdr: %pK Type: %d Subtype: %d SizesFC: %zu Mgmt: %zu",
- pRxPacketInfo, mHdr, mHdr->fc.type, mHdr->fc.subType,
- sizeof(tSirMacFrameCtl), sizeof(tSirMacMgmtHdr));
-
- pe_debug("mpdu_len: %d hdr_len: %d data_len: %d",
- WMA_GET_RX_MPDU_LEN(pRxPacketInfo),
- WMA_GET_RX_MPDU_HEADER_LEN(pRxPacketInfo),
- WMA_GET_RX_PAYLOAD_LEN(pRxPacketInfo));
-
- if (WMA_GET_ROAMCANDIDATEIND(pRxPacketInfo))
- pe_debug("roamCandidateInd: %d",
- WMA_GET_ROAMCANDIDATEIND(pRxPacketInfo));
-
- if (WMA_GET_OFFLOADSCANLEARN(pRxPacketInfo))
- pe_debug("offloadScanLearn: %d",
- WMA_GET_OFFLOADSCANLEARN(pRxPacketInfo));
- }
if (QDF_STATUS_SUCCESS !=
pe_drop_pending_rx_mgmt_frames(pMac, mHdr, pVosPkt))
@@ -2011,6 +2016,7 @@ static inline void lim_copy_and_free_hlp_data_from_session(
* @mac_ctx: MAC Context
* @roam_sync_ind_ptr: Roam synch indication buffer pointer
* @bss_desc: BSS Descriptor pointer
+ * @reason: Reason for calling callback which decides the action to be taken.
*
* This is a PE level callback called from WMA to complete the roam synch
* propagation at PE level and also fill the BSS descriptor which will be
@@ -2020,7 +2026,7 @@ static inline void lim_copy_and_free_hlp_data_from_session(
*/
QDF_STATUS pe_roam_synch_callback(tpAniSirGlobal mac_ctx,
roam_offload_synch_ind *roam_sync_ind_ptr,
- tpSirBssDescription bss_desc)
+ tpSirBssDescription bss_desc, enum sir_roam_op_code reason)
{
tpPESession session_ptr;
tpPESession ft_session_ptr;
@@ -2035,20 +2041,54 @@ QDF_STATUS pe_roam_synch_callback(tpAniSirGlobal mac_ctx,
pe_err("LFR3:roam_sync_ind_ptr is NULL");
return status;
}
- pe_debug("LFR3:Received WMA_ROAM_OFFLOAD_SYNCH_IND LFR3:auth: %d vdevId: %d",
- roam_sync_ind_ptr->authStatus, roam_sync_ind_ptr->roamedVdevId);
- lim_print_mac_addr(mac_ctx, roam_sync_ind_ptr->bssid.bytes,
- QDF_TRACE_LEVEL_DEBUG);
session_ptr = pe_find_session_by_sme_session_id(mac_ctx,
roam_sync_ind_ptr->roamedVdevId);
if (session_ptr == NULL) {
pe_err("LFR3:Unable to find session");
return status;
}
+
if (!LIM_IS_STA_ROLE(session_ptr)) {
pe_err("LFR3:session is not in STA mode");
return status;
}
+
+ pe_debug("LFR3: PE callback reason: %d", reason);
+ switch (reason) {
+ case SIR_ROAMING_START:
+ session_ptr->fw_roaming_started = true;
+ return QDF_STATUS_SUCCESS;
+ case SIR_ROAMING_ABORT:
+ session_ptr->fw_roaming_started = false;
+ /*
+ * If there was a disassoc or deauth that was received
+ * during roaming and it was not honored, then we have
+ * to internally initiate a disconnect because with
+ * ROAM_ABORT we come back to original AP.
+ */
+ if (session_ptr->recvd_deauth_while_roaming)
+ lim_perform_deauth(mac_ctx, session_ptr,
+ session_ptr->deauth_disassoc_rc,
+ session_ptr->bssId, 0);
+ if (session_ptr->recvd_disassoc_while_roaming) {
+ lim_disassoc_tdls_peers(mac_ctx, session_ptr,
+ session_ptr->bssId);
+ lim_perform_disassoc(mac_ctx, 0,
+ session_ptr->deauth_disassoc_rc,
+ session_ptr, session_ptr->bssId);
+ }
+ return QDF_STATUS_SUCCESS;
+ case SIR_ROAM_SYNCH_PROPAGATION:
+ session_ptr->fw_roaming_started = false;
+ break;
+ default:
+ return status;
+ }
+
+ pe_debug("LFR3:Received WMA_ROAM_OFFLOAD_SYNCH_IND LFR3:auth: %d vdevId: %d",
+ roam_sync_ind_ptr->authStatus, roam_sync_ind_ptr->roamedVdevId);
+ lim_print_mac_addr(mac_ctx, roam_sync_ind_ptr->bssid.bytes,
+ QDF_TRACE_LEVEL_DEBUG);
/*
* If deauth from AP already in progress, ignore Roam Synch Indication
* from firmware.
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_ft_preauth.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_ft_preauth.c
index 61b4c34c9d1e..503c9bf2bd2a 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_ft_preauth.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_ft_preauth.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -484,14 +484,15 @@ void lim_handle_ft_pre_auth_rsp(tpAniSirGlobal pMac, tSirRetStatus status,
lim_print_mac_addr(pMac, psessionEntry->limReAssocbssId, LOGD);
}
send_rsp:
- if (psessionEntry->currentOperChannel !=
- psessionEntry->ftPEContext.pFTPreAuthReq->preAuthchannelNum) {
+ if ((psessionEntry->currentOperChannel !=
+ psessionEntry->ftPEContext.pFTPreAuthReq->preAuthchannelNum) ||
+ lim_is_in_mcc(pMac)) {
/* Need to move to the original AP channel */
lim_process_abort_scan_ind(pMac, psessionEntry->peSessionId,
psessionEntry->ftPEContext.pFTPreAuthReq->scan_id,
PREAUTH_REQUESTOR_ID);
} else {
- pe_debug("Pre auth on same channel as connected AP channel %d",
+ pe_debug("Pre auth on same channel as connected AP channel %d and no mcc pe sessions exist",
psessionEntry->ftPEContext.pFTPreAuthReq->
preAuthchannelNum);
lim_ft_process_pre_auth_result(pMac, psessionEntry);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c
index 1476aaed2298..d0cb44918a0e 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_action_frame.c
@@ -1648,6 +1648,7 @@ static void lim_process_action_vendor_specific(tpAniSirGlobal mac_ctx,
pe_debug("Received action frame of invalid len %d", frame_len);
return;
}
+
if (session)
session_id = session->smeSessionId;
/* Check if it is a P2P or DPP public action frame. */
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c
index 7a6431023270..3e3345768882 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_assoc_req_frame.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -746,8 +746,8 @@ static bool lim_chk_n_process_wpa_rsn_ie(tpAniSirGlobal mac_ctx,
uint8_t sub_type, bool *pmf_connection)
{
uint8_t *wps_ie = NULL;
- tDot11fIEWPA dot11f_ie_wpa;
- tDot11fIERSN dot11f_ie_rsn;
+ tDot11fIEWPA dot11f_ie_wpa = {0};
+ tDot11fIERSN dot11f_ie_rsn = {0};
tSirRetStatus status = eSIR_SUCCESS;
/*
* Clear the buffers so that frame parser knows that there isn't a
@@ -1380,18 +1380,30 @@ static bool lim_update_sta_ds(tpAniSirGlobal mac_ctx, tpSirMacMgmtHdr hdr,
sta_ds->mlmStaContext.htCapability = 0;
sta_ds->mlmStaContext.vhtCapability = 0;
}
- if (sta_ds->mlmStaContext.vhtCapability) {
+
+ if (sta_ds->mlmStaContext.vhtCapability && vht_caps) {
if (session->vht_config.su_beam_formee &&
- assoc_req->VHTCaps.suBeamFormerCap)
+ vht_caps->suBeamFormerCap)
sta_ds->vhtBeamFormerCapable = 1;
else
sta_ds->vhtBeamFormerCapable = 0;
if (session->vht_config.su_beam_former &&
- assoc_req->VHTCaps.suBeamformeeCap)
+ vht_caps->suBeamformeeCap)
sta_ds->vht_su_bfee_capable = 1;
else
sta_ds->vht_su_bfee_capable = 0;
+
+ pe_debug("peer_caps: suBformer: %d, suBformee: %d",
+ vht_caps->suBeamFormerCap,
+ vht_caps->suBeamformeeCap);
+ pe_debug("self_cap: suBformer: %d, suBformee: %d",
+ session->vht_config.su_beam_former,
+ session->vht_config.su_beam_formee);
+ pe_debug("connection's final cap: suBformer: %d, suBformee: %d",
+ sta_ds->vhtBeamFormerCapable,
+ sta_ds->vht_su_bfee_capable);
}
+
if (lim_populate_matching_rate_set(mac_ctx, sta_ds,
&(assoc_req->supportedRates),
&(assoc_req->extendedRates),
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_deauth_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_deauth_frame.c
index 721b1adcabb9..02837a61cf1e 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_deauth_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_deauth_frame.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -71,11 +71,8 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
tpPESession psessionEntry)
{
uint8_t *pBody;
- uint16_t aid, reasonCode;
+ uint16_t reasonCode;
tpSirMacMgmtHdr pHdr;
- tLimMlmAssocCnf mlmAssocCnf;
- tLimMlmDeauthInd mlmDeauthInd;
- tpDphHashNode pStaDs;
tpPESession pRoamSessionEntry = NULL;
uint8_t roamSessionId;
#ifdef WLAN_FEATURE_11W
@@ -251,8 +248,30 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
pe_find_session_by_bssid(pMac, psessionEntry->limReAssocbssId,
&roamSessionId);
- if (lim_is_reassoc_in_progress(pMac, psessionEntry)
- || lim_is_reassoc_in_progress(pMac, pRoamSessionEntry)) {
+ if (lim_is_reassoc_in_progress(pMac, psessionEntry) ||
+ lim_is_reassoc_in_progress(pMac, pRoamSessionEntry) ||
+ psessionEntry->fw_roaming_started) {
+ /*
+ * For LFR3, the roaming bssid is not known during ROAM_START,
+ * so check if the deauth is received from current AP when
+ * roaming is being done in the firmware
+ */
+ if (psessionEntry->fw_roaming_started &&
+ IS_CURRENT_BSSID(pMac, pHdr->sa, psessionEntry)) {
+ pe_debug("LFR3: Drop deauth frame from connected AP");
+ /*
+ * recvd_deauth_while_roaming will be stored in the
+ * current AP session amd if roaming has been aborted
+ * for some reason and come back to same AP, then issue
+ * a disconnect internally if this flag is true. There
+ * is no need to reset this flag to false, because if
+ * roaming succeeds, then this session gets deleted and
+ * new session is created.
+ */
+ psessionEntry->recvd_deauth_while_roaming = true;
+ psessionEntry->deauth_disassoc_rc = reasonCode;
+ return;
+ }
if (!IS_REASSOC_BSSID(pMac, pHdr->sa, psessionEntry)) {
pe_debug("Rcv Deauth from unknown/different "
"AP while ReAssoc. Ignore " MAC_ADDRESS_STR
@@ -297,14 +316,30 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
}
}
- pStaDs =
- dph_lookup_hash_entry(pMac, pHdr->sa, &aid,
- &psessionEntry->dph.dphHashTable);
+ lim_perform_deauth(pMac, psessionEntry, reasonCode, pHdr->sa,
+ frame_rssi);
+
+} /*** end lim_process_deauth_frame() ***/
+
+void lim_perform_deauth(tpAniSirGlobal mac_ctx, tpPESession pe_session,
+ uint16_t rc, tSirMacAddr addr, int32_t frame_rssi)
+{
+ tLimMlmDeauthInd mlmDeauthInd;
+ tLimMlmAssocCnf mlmAssocCnf;
+ uint16_t aid;
+ tpDphHashNode sta_ds;
+
+ sta_ds = dph_lookup_hash_entry(mac_ctx, addr, &aid,
+ &pe_session->dph.dphHashTable);
+ if (sta_ds == NULL) {
+ pe_debug("Hash entry not found");
+ return;
+ }
/* Check for pre-assoc states */
- switch (GET_LIM_SYSTEM_ROLE(psessionEntry)) {
+ switch (GET_LIM_SYSTEM_ROLE(pe_session)) {
case eLIM_STA_ROLE:
- switch (psessionEntry->limMlmState) {
+ switch (pe_session->limMlmState) {
case eLIM_MLM_WT_AUTH_FRAME2_STATE:
/**
* AP sent Deauth frame while waiting
@@ -314,34 +349,34 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
pe_debug("received Deauth frame state %X with failure "
"code %d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
- lim_restore_from_auth_state(pMac,
- eSIR_SME_DEAUTH_WHILE_JOIN,
- reasonCode, psessionEntry);
+ lim_restore_from_auth_state(mac_ctx,
+ eSIR_SME_DEAUTH_WHILE_JOIN,
+ rc, pe_session);
return;
case eLIM_MLM_AUTHENTICATED_STATE:
pe_debug("received Deauth frame state %X with "
- "reasonCode=%d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ "reasonCode=%d from " MAC_ADDRESS_STR,
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
/* / Issue Deauth Indication to SME. */
qdf_mem_copy((uint8_t *) &mlmDeauthInd.peerMacAddr,
- pHdr->sa, sizeof(tSirMacAddr));
- mlmDeauthInd.reasonCode = reasonCode;
+ addr, sizeof(tSirMacAddr));
+ mlmDeauthInd.reasonCode = rc;
- psessionEntry->limMlmState = eLIM_MLM_IDLE_STATE;
+ pe_session->limMlmState = eLIM_MLM_IDLE_STATE;
MTRACE(mac_trace
- (pMac, TRACE_CODE_MLM_STATE,
- psessionEntry->peSessionId,
- psessionEntry->limMlmState));
+ (mac_ctx, TRACE_CODE_MLM_STATE,
+ pe_session->peSessionId,
+ pe_session->limMlmState));
- lim_post_sme_message(pMac,
- LIM_MLM_DEAUTH_IND,
- (uint32_t *) &mlmDeauthInd);
+ lim_post_sme_message(mac_ctx,
+ LIM_MLM_DEAUTH_IND,
+ (uint32_t *) &mlmDeauthInd);
return;
case eLIM_MLM_WT_ASSOC_RSP_STATE:
@@ -351,102 +386,102 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
* if any and issue ASSOC_CNF to SME.
*/
pe_debug("received Deauth frame state %X with "
- "reasonCode=%d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
- if (lim_search_pre_auth_list(pMac, pHdr->sa))
- lim_delete_pre_auth_node(pMac, pHdr->sa);
-
- if (psessionEntry->pLimMlmJoinReq) {
- qdf_mem_free(psessionEntry->pLimMlmJoinReq);
- psessionEntry->pLimMlmJoinReq = NULL;
+ "reasonCode=%d from " MAC_ADDRESS_STR,
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
+ if (lim_search_pre_auth_list(mac_ctx, addr))
+ lim_delete_pre_auth_node(mac_ctx, addr);
+
+ if (pe_session->pLimMlmJoinReq) {
+ qdf_mem_free(pe_session->pLimMlmJoinReq);
+ pe_session->pLimMlmJoinReq = NULL;
}
mlmAssocCnf.resultCode = eSIR_SME_DEAUTH_WHILE_JOIN;
- mlmAssocCnf.protStatusCode = reasonCode;
+ mlmAssocCnf.protStatusCode = rc;
/* PE session Id */
- mlmAssocCnf.sessionId = psessionEntry->peSessionId;
+ mlmAssocCnf.sessionId = pe_session->peSessionId;
- psessionEntry->limMlmState =
- psessionEntry->limPrevMlmState;
+ pe_session->limMlmState =
+ pe_session->limPrevMlmState;
MTRACE(mac_trace
- (pMac, TRACE_CODE_MLM_STATE,
- psessionEntry->peSessionId,
- psessionEntry->limMlmState));
+ (mac_ctx, TRACE_CODE_MLM_STATE,
+ pe_session->peSessionId,
+ pe_session->limMlmState));
/* Deactive Association response timeout */
- lim_deactivate_and_change_timer(pMac,
- eLIM_ASSOC_FAIL_TIMER);
+ lim_deactivate_and_change_timer(mac_ctx,
+ eLIM_ASSOC_FAIL_TIMER);
- lim_post_sme_message(pMac,
- LIM_MLM_ASSOC_CNF,
- (uint32_t *) &mlmAssocCnf);
+ lim_post_sme_message(mac_ctx,
+ LIM_MLM_ASSOC_CNF,
+ (uint32_t *) &mlmAssocCnf);
return;
case eLIM_MLM_WT_ADD_STA_RSP_STATE:
- psessionEntry->fDeauthReceived = true;
+ pe_session->fDeauthReceived = true;
pe_debug("Received Deauth frame in state %X with Reason "
- "Code %d from Peer" MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ "Code %d from Peer" MAC_ADDRESS_STR,
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
return;
case eLIM_MLM_IDLE_STATE:
case eLIM_MLM_LINK_ESTABLISHED_STATE:
#ifdef FEATURE_WLAN_TDLS
- if ((NULL != pStaDs)
- && (STA_ENTRY_TDLS_PEER == pStaDs->staType)) {
+ if ((NULL != sta_ds)
+ && (STA_ENTRY_TDLS_PEER == sta_ds->staType)) {
pe_err("received Deauth frame in state %X with "
"reason code %d from Tdls peer"
MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
- lim_send_sme_tdls_del_sta_ind(pMac, pStaDs,
- psessionEntry,
- reasonCode);
- return;
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
+ lim_send_sme_tdls_del_sta_ind(mac_ctx, sta_ds,
+ pe_session,
+ rc);
+ return;
} else {
- /*
- * Delete all the TDLS peers only if Deauth
- * is received from the AP
- */
- if (IS_CURRENT_BSSID(pMac, pHdr->sa, psessionEntry))
- lim_delete_tdls_peers(pMac, psessionEntry);
+ /*
+ * Delete all the TDLS peers only if Deauth
+ * is received from the AP
+ */
+ if (IS_CURRENT_BSSID(mac_ctx, addr, pe_session))
+ lim_delete_tdls_peers(mac_ctx, pe_session);
#endif
/**
* This could be Deauthentication frame from
* a BSS with which pre-authentication was
* performed. Delete Pre-auth entry if found.
*/
- if (lim_search_pre_auth_list(pMac, pHdr->sa))
- lim_delete_pre_auth_node(pMac, pHdr->sa);
+ if (lim_search_pre_auth_list(mac_ctx, addr))
+ lim_delete_pre_auth_node(mac_ctx, addr);
#ifdef FEATURE_WLAN_TDLS
- }
+ }
#endif
break;
case eLIM_MLM_WT_REASSOC_RSP_STATE:
pe_err("received Deauth frame state %X with "
"reasonCode=%d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
break;
case eLIM_MLM_WT_FT_REASSOC_RSP_STATE:
pe_err("received Deauth frame in FT state %X with "
"reasonCode=%d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
break;
default:
pe_err("received Deauth frame in state %X with "
"reasonCode=%d from " MAC_ADDRESS_STR,
- psessionEntry->limMlmState, reasonCode,
- MAC_ADDR_ARRAY(pHdr->sa));
+ pe_session->limMlmState, rc,
+ MAC_ADDR_ARRAY(addr));
return;
}
break;
@@ -458,7 +493,6 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
break;
default:
-
return;
} /* end switch (pMac->lim.gLimSystemRole) */
@@ -466,30 +500,30 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
* Extract 'associated' context for STA, if any.
* This is maintained by DPH and created by LIM.
*/
- if (NULL == pStaDs) {
- pe_err("pStaDs is NULL");
+ if (NULL == sta_ds) {
+ pe_err("sta_ds is NULL");
return;
}
- if ((pStaDs->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_STA_RSP_STATE) ||
- (pStaDs->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_BSS_RSP_STATE)) {
+ if ((sta_ds->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_STA_RSP_STATE) ||
+ (sta_ds->mlmStaContext.mlmState == eLIM_MLM_WT_DEL_BSS_RSP_STATE)) {
/**
* Already in the process of deleting context for the peer
* and received Deauthentication frame. Log and Ignore.
*/
pe_err("received Deauth frame from peer that is in state %X, addr "
- MAC_ADDRESS_STR, pStaDs->mlmStaContext.mlmState,
- MAC_ADDR_ARRAY(pHdr->sa));
+ MAC_ADDRESS_STR, sta_ds->mlmStaContext.mlmState,
+ MAC_ADDR_ARRAY(addr));
return;
}
- pStaDs->mlmStaContext.disassocReason = (tSirMacReasonCodes) reasonCode;
- pStaDs->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DEAUTH;
+ sta_ds->mlmStaContext.disassocReason = (tSirMacReasonCodes) rc;
+ sta_ds->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DEAUTH;
/* / Issue Deauth Indication to SME. */
qdf_mem_copy((uint8_t *) &mlmDeauthInd.peerMacAddr,
- pStaDs->staAddr, sizeof(tSirMacAddr));
+ sta_ds->staAddr, sizeof(tSirMacAddr));
mlmDeauthInd.reasonCode =
- (uint8_t) pStaDs->mlmStaContext.disassocReason;
+ (uint8_t) sta_ds->mlmStaContext.disassocReason;
mlmDeauthInd.deauthTrigger = eLIM_PEER_ENTITY_DEAUTH;
/*
@@ -498,18 +532,18 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
* failure result code. SME will post the disconnect to the
* supplicant and the latter would start a fresh assoc.
*/
- if (lim_is_reassoc_in_progress(pMac, psessionEntry)) {
+ if (lim_is_reassoc_in_progress(mac_ctx, pe_session)) {
/**
* AP may have 'aged-out' our Pre-auth
* context. Delete local pre-auth context
* if any and issue REASSOC_CNF to SME.
*/
- if (lim_search_pre_auth_list(pMac, pHdr->sa))
- lim_delete_pre_auth_node(pMac, pHdr->sa);
+ if (lim_search_pre_auth_list(mac_ctx, addr))
+ lim_delete_pre_auth_node(mac_ctx, addr);
- if (psessionEntry->limAssocResponseData) {
- qdf_mem_free(psessionEntry->limAssocResponseData);
- psessionEntry->limAssocResponseData = NULL;
+ if (pe_session->limAssocResponseData) {
+ qdf_mem_free(pe_session->limAssocResponseData);
+ pe_session->limAssocResponseData = NULL;
}
pe_debug("Rcv Deauth from ReAssoc AP Issue REASSOC_CNF");
@@ -519,29 +553,29 @@ lim_process_deauth_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
* Using eSIR_SME_FT_REASSOC_FAILURE does not seem to clean-up
* properly and we end up seeing "transmit queue timeout".
*/
- lim_post_reassoc_failure(pMac,
- eSIR_SME_FT_REASSOC_TIMEOUT_FAILURE,
- eSIR_MAC_UNSPEC_FAILURE_STATUS,
- psessionEntry);
+ lim_post_reassoc_failure(mac_ctx,
+ eSIR_SME_FT_REASSOC_TIMEOUT_FAILURE,
+ eSIR_MAC_UNSPEC_FAILURE_STATUS,
+ pe_session);
return;
}
/* reset the deauthMsgCnt here since we are able to Process
* the deauth frame and sending up the indication as well */
- if (psessionEntry->deauthmsgcnt != 0)
- psessionEntry->deauthmsgcnt = 0;
+ if (pe_session->deauthmsgcnt != 0)
+ pe_session->deauthmsgcnt = 0;
- if (LIM_IS_STA_ROLE(psessionEntry))
- wma_tx_abort(psessionEntry->smeSessionId);
+ if (LIM_IS_STA_ROLE(pe_session))
+ wma_tx_abort(pe_session->smeSessionId);
- lim_update_lost_link_info(pMac, psessionEntry, frame_rssi);
+ lim_update_lost_link_info(mac_ctx, pe_session, frame_rssi);
/* / Deauthentication from peer MAC entity */
- if (LIM_IS_STA_ROLE(psessionEntry))
- lim_post_sme_message(pMac, LIM_MLM_DEAUTH_IND,
- (uint32_t *) &mlmDeauthInd);
+ if (LIM_IS_STA_ROLE(pe_session))
+ lim_post_sme_message(mac_ctx, LIM_MLM_DEAUTH_IND,
+ (uint32_t *) &mlmDeauthInd);
/* send eWNI_SME_DEAUTH_IND to SME */
- lim_send_sme_deauth_ind(pMac, pStaDs, psessionEntry);
+ lim_send_sme_deauth_ind(mac_ctx, sta_ds, pe_session);
return;
-} /*** end lim_process_deauth_frame() ***/
+}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_disassoc_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_disassoc_frame.c
index 79344deabb89..c8ae79fc8618 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_disassoc_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_disassoc_frame.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -77,7 +77,6 @@ lim_process_disassoc_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
uint16_t aid, reasonCode;
tpSirMacMgmtHdr pHdr;
tpDphHashNode pStaDs;
- tLimMlmDisassocInd mlmDisassocInd;
#ifdef WLAN_FEATURE_11W
uint32_t frameLen;
#endif
@@ -202,13 +201,25 @@ lim_process_disassoc_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
/** If we are in the Wait for ReAssoc Rsp state */
if (lim_is_reassoc_in_progress(pMac, psessionEntry)) {
+ /*
+ * For LFR3, the roaming bssid is not known during ROAM_START,
+ * so check if the disassoc is received from current AP when
+ * roaming is being done in the firmware
+ */
+ if (psessionEntry->fw_roaming_started &&
+ IS_CURRENT_BSSID(pMac, pHdr->sa, psessionEntry)) {
+ pe_debug("Dropping disassoc frame from connected AP");
+ psessionEntry->recvd_disassoc_while_roaming = true;
+ psessionEntry->deauth_disassoc_rc = reasonCode;
+ return;
+ }
/** If we had received the DisAssoc from,
* a. the Current AP during ReAssociate to different AP in same ESS
* b. Unknown AP
* drop/ignore the DisAssoc received
*/
if (!IS_REASSOC_BSSID(pMac, pHdr->sa, psessionEntry)) {
- pe_err("Ignore the DisAssoc received, while Processing ReAssoc with different/unknown AP");
+ pe_err("Ignore DisAssoc while Processing ReAssoc");
return;
}
/** If the Disassoc is received from the new AP to which we tried to ReAssociate
@@ -286,20 +297,7 @@ lim_process_disassoc_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
MAC_ADDR_ARRAY(pHdr->sa));
return;
}
-#ifdef FEATURE_WLAN_TDLS
- /**
- * Delete all the TDLS peers only if Disassoc is received
- * from the AP
- */
- if ((LIM_IS_STA_ROLE(psessionEntry)) &&
- ((pStaDs->mlmStaContext.mlmState ==
- eLIM_MLM_LINK_ESTABLISHED_STATE) ||
- (pStaDs->mlmStaContext.mlmState ==
- eLIM_MLM_IDLE_STATE)) &&
- (IS_CURRENT_BSSID(pMac, pHdr->sa, psessionEntry)))
- lim_delete_tdls_peers(pMac, psessionEntry);
-#endif
-
+ lim_disassoc_tdls_peers(pMac, psessionEntry, pHdr->sa);
if (pStaDs->mlmStaContext.mlmState != eLIM_MLM_LINK_ESTABLISHED_STATE) {
/**
* Requesting STA is in some 'transient' state?
@@ -315,20 +313,65 @@ lim_process_disassoc_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
} /* if (pStaDs->mlmStaContext.mlmState != eLIM_MLM_LINK_ESTABLISHED_STATE) */
- pStaDs->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DISASSOC;
- pStaDs->mlmStaContext.disassocReason = (tSirMacReasonCodes) reasonCode;
+ lim_perform_disassoc(pMac, frame_rssi, reasonCode,
+ psessionEntry, pHdr->sa);
+
+} /*** end lim_process_disassoc_frame() ***/
+
+#ifdef FEATURE_WLAN_TDLS
+void lim_disassoc_tdls_peers(tpAniSirGlobal mac_ctx,
+ tpPESession pe_session, tSirMacAddr addr)
+{
+ tpDphHashNode sta_ds;
+ uint16_t aid;
+
+ sta_ds = dph_lookup_hash_entry(mac_ctx, addr, &aid,
+ &pe_session->dph.dphHashTable);
+ if (sta_ds == NULL) {
+ pe_debug("Hash entry not found");
+ return;
+ }
+ /**
+ * Delete all the TDLS peers only if Disassoc is received
+ * from the AP
+ */
+ if ((LIM_IS_STA_ROLE(pe_session)) &&
+ ((sta_ds->mlmStaContext.mlmState ==
+ eLIM_MLM_LINK_ESTABLISHED_STATE) ||
+ (sta_ds->mlmStaContext.mlmState ==
+ eLIM_MLM_IDLE_STATE)) &&
+ (IS_CURRENT_BSSID(mac_ctx, addr, pe_session)))
+ lim_delete_tdls_peers(mac_ctx, pe_session);
+}
+#endif
+
+void lim_perform_disassoc(tpAniSirGlobal mac_ctx, int32_t frame_rssi,
+ uint16_t rc, tpPESession pe_session, tSirMacAddr addr)
+{
+ tLimMlmDisassocInd mlmDisassocInd;
+ uint16_t aid;
+ tpDphHashNode sta_ds;
+
+ sta_ds = dph_lookup_hash_entry(mac_ctx, addr, &aid,
+ &pe_session->dph.dphHashTable);
+ if (sta_ds == NULL) {
+ pe_debug("Hash entry not found");
+ return;
+ }
+ sta_ds->mlmStaContext.cleanupTrigger = eLIM_PEER_ENTITY_DISASSOC;
+ sta_ds->mlmStaContext.disassocReason = (tSirMacReasonCodes) rc;
/* Issue Disassoc Indication to SME. */
qdf_mem_copy((uint8_t *) &mlmDisassocInd.peerMacAddr,
- (uint8_t *) pStaDs->staAddr, sizeof(tSirMacAddr));
+ (uint8_t *) sta_ds->staAddr, sizeof(tSirMacAddr));
mlmDisassocInd.reasonCode =
- (uint8_t) pStaDs->mlmStaContext.disassocReason;
+ (uint8_t) sta_ds->mlmStaContext.disassocReason;
mlmDisassocInd.disassocTrigger = eLIM_PEER_ENTITY_DISASSOC;
/* Update PE session Id */
- mlmDisassocInd.sessionId = psessionEntry->peSessionId;
+ mlmDisassocInd.sessionId = pe_session->peSessionId;
- if (lim_is_reassoc_in_progress(pMac, psessionEntry)) {
+ if (lim_is_reassoc_in_progress(mac_ctx, pe_session)) {
/* If we're in the middle of ReAssoc and received disassoc from
* the ReAssoc AP, then notify SME by sending REASSOC_RSP with
@@ -337,22 +380,22 @@ lim_process_disassoc_frame(tpAniSirGlobal pMac, uint8_t *pRxPacketInfo,
*/
pe_debug("received Disassoc from AP while waiting for Reassoc Rsp");
- if (psessionEntry->limAssocResponseData) {
- qdf_mem_free(psessionEntry->limAssocResponseData);
- psessionEntry->limAssocResponseData = NULL;
+ if (pe_session->limAssocResponseData) {
+ qdf_mem_free(pe_session->limAssocResponseData);
+ pe_session->limAssocResponseData = NULL;
}
- lim_restore_pre_reassoc_state(pMac, eSIR_SME_REASSOC_REFUSED,
- reasonCode, psessionEntry);
+ lim_restore_pre_reassoc_state(mac_ctx, eSIR_SME_REASSOC_REFUSED,
+ rc, pe_session);
return;
}
- lim_update_lost_link_info(pMac, psessionEntry, frame_rssi);
- lim_post_sme_message(pMac, LIM_MLM_DISASSOC_IND,
- (uint32_t *) &mlmDisassocInd);
+ lim_update_lost_link_info(mac_ctx, pe_session, frame_rssi);
+ lim_post_sme_message(mac_ctx, LIM_MLM_DISASSOC_IND,
+ (uint32_t *) &mlmDisassocInd);
/* send eWNI_SME_DISASSOC_IND to SME */
- lim_send_sme_disassoc_ind(pMac, pStaDs, psessionEntry);
+ lim_send_sme_disassoc_ind(mac_ctx, sta_ds, pe_session);
return;
-} /*** end lim_process_disassoc_frame() ***/
+}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c
index c2d6f966b0d5..57c26243abff 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_fils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2018 The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
@@ -1062,8 +1062,9 @@ bool lim_process_fils_auth_frame2(tpAniSirGlobal mac_ctx,
tpPESession pe_session,
tSirMacAuthFrameBody *rx_auth_frm_body)
{
- bool pmkid_found = false;
int i;
+ uint32_t ret;
+ bool pmkid_found = false;
tDot11fIERSN dot11f_ie_rsn = {0};
if (rx_auth_frm_body->authAlgoNumber != eSIR_FILS_SK_WITHOUT_PFS)
@@ -1072,10 +1073,12 @@ bool lim_process_fils_auth_frame2(tpAniSirGlobal mac_ctx,
if (!pe_session->fils_info)
return false;
- if (dot11f_unpack_ie_rsn(mac_ctx,
- &rx_auth_frm_body->rsn_ie.info[0],
- rx_auth_frm_body->rsn_ie.length,
- &dot11f_ie_rsn, 0) != DOT11F_PARSE_SUCCESS) {
+ ret = dot11f_unpack_ie_rsn(mac_ctx,
+ &rx_auth_frm_body->rsn_ie.info[0],
+ rx_auth_frm_body->rsn_ie.length,
+ &dot11f_ie_rsn, 0);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
return false;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c
index 23e27cbd1c0b..2c074fae86f4 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_message_queue.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1388,10 +1388,13 @@ static void lim_process_messages(tpAniSirGlobal mac_ctx, tpSirMsgQ msg)
cds_pkt_return_packet(body_ptr);
break;
}
+ if (WMA_GET_ROAMCANDIDATEIND(new_msg.bodyptr))
+ pe_debug("roamCandidateInd: %d",
+ WMA_GET_ROAMCANDIDATEIND(new_msg.bodyptr));
- pe_debug("roamCandidateInd: %d offloadScanLearn: %d",
- WMA_GET_ROAMCANDIDATEIND(new_msg.bodyptr),
- WMA_GET_OFFLOADSCANLEARN(new_msg.bodyptr));
+ if (WMA_GET_OFFLOADSCANLEARN(new_msg.bodyptr))
+ pe_debug("offloadScanLearn: %d",
+ WMA_GET_OFFLOADSCANLEARN(new_msg.bodyptr));
lim_handle80211_frames(mac_ctx, &new_msg, &defer_msg);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_req_messages.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_req_messages.c
index 04ae3ed637e7..4fb9f60679c2 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_req_messages.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_mlm_req_messages.c
@@ -1316,6 +1316,7 @@ lim_process_mlm_disassoc_req_ntf(tpAniSirGlobal mac_ctx,
qdf_mem_malloc(sizeof(tSirSmeDisassocRsp));
if (NULL == sme_disassoc_rsp) {
pe_err("memory allocation failed for disassoc rsp");
+ qdf_mem_free(mlm_disassocreq);
return;
}
@@ -1337,6 +1338,7 @@ lim_process_mlm_disassoc_req_ntf(tpAniSirGlobal mac_ctx,
lim_send_sme_disassoc_deauth_ntf(mac_ctx,
QDF_STATUS_SUCCESS, msg);
+ qdf_mem_free(mlm_disassocreq);
return;
}
@@ -1400,6 +1402,11 @@ lim_process_mlm_disassoc_req_ntf(tpAniSirGlobal mac_ctx,
/* Send Disassociate frame to peer entity */
if (send_disassoc_frame && (mlm_disassocreq->reasonCode !=
eSIR_MAC_DISASSOC_DUE_TO_FTHANDOFF_REASON)) {
+ if (mac_ctx->lim.limDisassocDeauthCnfReq.pMlmDisassocReq) {
+ pe_err("pMlmDisassocReq is not NULL, freeing");
+ qdf_mem_free(mac_ctx->lim.limDisassocDeauthCnfReq.
+ pMlmDisassocReq);
+ }
mac_ctx->lim.limDisassocDeauthCnfReq.pMlmDisassocReq =
mlm_disassocreq;
/*
@@ -1660,6 +1667,7 @@ lim_process_mlm_deauth_req_ntf(tpAniSirGlobal mac_ctx,
qdf_mem_malloc(sizeof(tSirSmeDeauthRsp));
if (NULL == sme_deauth_rsp) {
pe_err("memory allocation failed for deauth rsp");
+ qdf_mem_free(mlm_deauth_req);
return;
}
@@ -1686,6 +1694,7 @@ lim_process_mlm_deauth_req_ntf(tpAniSirGlobal mac_ctx,
lim_send_sme_disassoc_deauth_ntf(mac_ctx,
QDF_STATUS_SUCCESS, msg_buf);
+ qdf_mem_free(mlm_deauth_req);
return;
}
@@ -1796,7 +1805,14 @@ lim_process_mlm_deauth_req_ntf(tpAniSirGlobal mac_ctx,
sta_ds->mlmStaContext.disassocReason = (tSirMacReasonCodes)
mlm_deauth_req->reasonCode;
sta_ds->mlmStaContext.cleanupTrigger = mlm_deauth_req->deauthTrigger;
+
+ if (mac_ctx->lim.limDisassocDeauthCnfReq.pMlmDeauthReq) {
+ pe_err("pMlmDeauthReq is not NULL, freeing");
+ qdf_mem_free(mac_ctx->lim.limDisassocDeauthCnfReq.
+ pMlmDeauthReq);
+ }
mac_ctx->lim.limDisassocDeauthCnfReq.pMlmDeauthReq = mlm_deauth_req;
+
/*
* Set state to mlm State to eLIM_MLM_WT_DEL_STA_RSP_STATE
* This is to address the issue of race condition between
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_probe_rsp_frame.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_probe_rsp_frame.c
index a432d23c0a98..99d1211611f3 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_probe_rsp_frame.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_probe_rsp_frame.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -347,10 +347,6 @@ lim_process_probe_rsp_frame_no_session(tpAniSirGlobal mac_ctx,
header = WMA_GET_RX_MAC_HEADER(rx_packet_info);
- pe_debug("Received Probe Response frame with length=%d from",
- WMA_GET_RX_MPDU_LEN(rx_packet_info));
- lim_print_mac_addr(mac_ctx, header->sa, LOGD);
-
/* Validate IE information before processing Probe Response Frame */
if (lim_validate_ie_information_in_probe_rsp_frame(mac_ctx,
rx_packet_info) !=
@@ -361,11 +357,6 @@ lim_process_probe_rsp_frame_no_session(tpAniSirGlobal mac_ctx,
}
frame_len = WMA_GET_RX_PAYLOAD_LEN(rx_packet_info);
- pe_debug("Probe Resp Frame Received: BSSID "
- MAC_ADDRESS_STR " (RSSI %d)",
- MAC_ADDR_ARRAY(header->bssId),
- (uint) abs((int8_t)WMA_GET_RX_RSSI_NORMALIZED(
- rx_packet_info)));
/*
* Get pointer to Probe Response frame body
*/
@@ -378,7 +369,6 @@ lim_process_probe_rsp_frame_no_session(tpAniSirGlobal mac_ctx,
return;
}
- pe_debug("Save this probe rsp in LFR cache");
lim_check_and_add_bss_description(mac_ctx, probe_rsp,
rx_packet_info, false, true);
qdf_mem_free(probe_rsp);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c
index 2c2c4e8c517a..dddffacc759a 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_sme_req_messages.c
@@ -1903,6 +1903,7 @@ __lim_process_sme_join_req(tpAniSirGlobal mac_ctx, uint32_t *msg_buf)
session->maxTxPower = lim_get_max_tx_power(reg_max,
local_power_constraint,
mac_ctx->roam.configParam.nTxPowerCap);
+ session->def_max_tx_pwr = session->maxTxPower;
pe_debug("Reg max %d local power con %d max tx pwr %d",
reg_max, local_power_constraint, session->maxTxPower);
@@ -5807,18 +5808,8 @@ static void lim_process_update_add_ies(tpAniSirGlobal mac_ctx,
if (update_ie->append) {
/*
* In case of append, allocate new memory
- * with combined length.
- * Multiple back to back append commands
- * can lead to a huge length.So, check
- * for the validity of the length.
+ * with combined length
*/
- if (addn_ie->probeRespDataLen >
- (USHRT_MAX - update_ie->ieBufferlength)) {
- pe_err("IE Length overflow, curr:%d, new:%d",
- addn_ie->probeRespDataLen,
- update_ie->ieBufferlength);
- goto end;
- }
new_length = update_ie->ieBufferlength +
addn_ie->probeRespDataLen;
new_ptr = qdf_mem_malloc(new_length);
@@ -6073,7 +6064,6 @@ skip_vht:
lim_send_chan_switch_action_frame(mac_ctx,
session_entry->gLimChannelSwitch.primaryChannel,
ch_offset, session_entry);
- session_entry->gLimChannelSwitch.switchCount--;
}
/**
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
index 47e5b5fcbb5b..c789401a96aa 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_process_tdls.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -2548,6 +2548,19 @@ static tSirRetStatus lim_tdls_setup_add_sta(tpAniSirGlobal pMac,
pStaDs = dph_lookup_hash_entry(pMac, pAddStaReq->peermac.bytes, &aid,
&psessionEntry->dph.dphHashTable);
+
+ if (pStaDs && pAddStaReq->tdlsAddOper == TDLS_OPER_ADD) {
+ pe_err("TDLS entry for peer: "MAC_ADDRESS_STR " already exist, cannot add new entry",
+ MAC_ADDR_ARRAY(pAddStaReq->peermac.bytes));
+ return eSIR_FAILURE;
+ }
+
+ if (pStaDs && pStaDs->staType != STA_ENTRY_TDLS_PEER) {
+ pe_err("Non TDLS entry for peer: "MAC_ADDRESS_STR " already exist",
+ MAC_ADDR_ARRAY(pAddStaReq->peermac.bytes));
+ return eSIR_FAILURE;
+ }
+
if (NULL == pStaDs) {
aid = lim_assign_peer_idx(pMac, psessionEntry);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
index 4972c30c20b0..fa3ac3214522 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_scan_result_utils.c
@@ -311,12 +311,11 @@ lim_collect_bss_description(tpAniSirGlobal pMac,
pBssDescr->tsf_delta = WMA_GET_RX_TSF_DELTA(pRxPacketInfo);
pBssDescr->seq_ctrl = pHdr->seqControl;
- pe_debug(MAC_ADDRESS_STR
- " rssi: norm %d abs %d tsf_delta %u RcvdTime %llu ssid %s",
- MAC_ADDR_ARRAY(pHdr->bssId), pBssDescr->rssi,
- pBssDescr->rssi_raw, pBssDescr->tsf_delta,
- pBssDescr->received_time,
- ((pBPR->ssidPresent) ? (char *)pBPR->ssId.ssId : ""));
+ pe_debug("Received %s from BSSID: %pM tsf_delta = %u Seq Num: %x ssid:%.*s, rssi: %d",
+ pBssDescr->fProbeRsp ? "Probe Rsp" : "Beacon", pHdr->bssId,
+ pBssDescr->tsf_delta, ((pHdr->seqControl.seqNumHi <<
+ HIGH_SEQ_NUM_OFFSET) | pHdr->seqControl.seqNumLo),
+ pBPR->ssId.length, pBPR->ssId.ssId, pBssDescr->rssi_raw);
if (fScanning) {
rrm_get_start_tsf(pMac, pBssDescr->startTSF);
@@ -351,8 +350,6 @@ lim_collect_bss_description(tpAniSirGlobal pMac,
/*set channel number in beacon in case it is not present */
pBPR->channelNumber = pBssDescr->channelId;
- pe_debug("Collected BSS Description for Channel: %1d length: %u IE Fields: %u",
- pBssDescr->channelId, pBssDescr->length, ieLen);
pMac->lim.beacon_probe_rsp_cnt_per_scan++;
return;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
index d51458a03284..a51ede86459d 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_management_frames.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1607,6 +1607,7 @@ lim_send_assoc_req_mgmt_frame(tpAniSirGlobal mac_ctx,
tLimMlmAssocReq *mlm_assoc_req,
tpPESession pe_session)
{
+ int ret;
tDot11fAssocRequest *frm;
uint16_t caps;
uint8_t *frame;
@@ -1828,6 +1829,11 @@ lim_send_assoc_req_mgmt_frame(tpAniSirGlobal mac_ctx,
frm->vendor_vht_ie.sub_type =
pe_session->vendor_specific_vht_ie_sub_type;
frm->vendor_vht_ie.VHTCaps.present = 1;
+ if (!mac_ctx->roam.configParam.enable_subfee_vendor_vhtie &&
+ pe_session->vht_config.su_beam_formee) {
+ pe_debug("Disable SU beamformee for vendor IE");
+ pe_session->vht_config.su_beam_formee = 0;
+ }
populate_dot11f_vht_caps(mac_ctx, pe_session,
&frm->vendor_vht_ie.VHTCaps);
vht_enabled = true;
@@ -1917,9 +1923,14 @@ lim_send_assoc_req_mgmt_frame(tpAniSirGlobal mac_ctx,
* before packing the frm structure. In this way, the IE ordering
* which the latest 802.11 spec mandates is maintained.
*/
- if (add_ie_len)
- dot11f_unpack_assoc_request(mac_ctx, add_ie,
+ if (add_ie_len) {
+ ret = dot11f_unpack_assoc_request(mac_ctx, add_ie,
add_ie_len, frm, true);
+ if (DOT11F_FAILED(ret)) {
+ pe_err("unpack failed, ret: 0x%x", ret);
+ goto end;
+ }
+ }
status = dot11f_get_packed_assoc_request_size(mac_ctx, frm, &payload);
if (DOT11F_FAILED(status)) {
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_sme_rsp_messages.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_sme_rsp_messages.c
index 9e728afaaee6..854ec0a35ae9 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_sme_rsp_messages.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_send_sme_rsp_messages.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -2711,6 +2711,14 @@ lim_process_beacon_tx_success_ind(tpAniSirGlobal pMac, uint16_t msgType, void *e
if (LIM_IS_AP_ROLE(psessionEntry) &&
true == psessionEntry->dfsIncludeChanSwIe) {
+
+ if (psessionEntry->gLimChannelSwitch.switchCount) {
+ /* Decrement the beacon switch count */
+ psessionEntry->gLimChannelSwitch.switchCount--;
+ pe_debug("current beacon count %d",
+ psessionEntry->gLimChannelSwitch.switchCount);
+ }
+
/* Send only 5 beacons with CSA IE Set in when a radar is detected */
if (psessionEntry->gLimChannelSwitch.switchCount > 0) {
/*
@@ -2725,8 +2733,6 @@ lim_process_beacon_tx_success_ind(tpAniSirGlobal pMac, uint16_t msgType, void *e
lim_send_chan_switch_action_frame(pMac,
ch, ch_width, psessionEntry);
- /* Decrement the IE count */
- psessionEntry->gLimChannelSwitch.switchCount--;
} else {
/* Done with CSA IE update, send response back to SME */
psessionEntry->gLimChannelSwitch.switchCount = 0;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session.c
index 99606faf693b..70865f2fa58b 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -577,8 +577,6 @@ tpPESession pe_find_session_by_bssid(tpAniSirGlobal pMac, uint8_t *bssid,
}
}
- pe_debug("Session lookup fails for BSSID:");
- lim_print_mac_addr(pMac, bssid, LOGD);
return NULL;
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session_utils.c
index 1b39f85328fb..ecdc7e2af261 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_session_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -115,6 +115,8 @@ uint8_t lim_is_in_mcc(tpAniSirGlobal mac_ctx)
if ((mac_ctx->lim.gpSession[i].valid)) {
curr_oper_channel =
mac_ctx->lim.gpSession[i].currentOperChannel;
+ if (curr_oper_channel == 0)
+ continue;
if (chan == 0)
chan = curr_oper_channel;
else if (chan != curr_oper_channel)
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c
index ef022d79d815..a4d36d070f08 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_sme_req_utils.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -232,6 +232,7 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx,
tpSirRSNie rsn_ie,
tpPESession session)
{
+ uint32_t ret;
uint8_t wpa_idx = 0;
uint32_t privacy, val;
@@ -284,15 +285,26 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx,
} else if ((rsn_ie->length == rsn_ie->rsnIEdata[1] + 2) &&
(rsn_ie->rsnIEdata[0] == SIR_MAC_RSN_EID)) {
pe_debug("Only RSN IE is present");
- dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2],
- (uint8_t) rsn_ie->length,
- &session->gStartBssRSNIe, false);
+ ret = dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2],
+ rsn_ie->rsnIEdata[1],
+ &session->gStartBssRSNIe, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
+ return false;
+ }
+ return true;
+
} else if ((rsn_ie->length == rsn_ie->rsnIEdata[1] + 2)
&& (rsn_ie->rsnIEdata[0] == SIR_MAC_WPA_EID)) {
pe_debug("Only WPA IE is present");
- dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[6],
- (uint8_t) rsn_ie->length - 4,
- &session->gStartBssWPAIe, false);
+ ret = dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[6],
+ (uint8_t) rsn_ie->length - 4,
+ &session->gStartBssWPAIe, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
+ return false;
+ }
+ return true;
}
/* Check validity of WPA IE */
if (wpa_idx + 6 >= SIR_MAC_MAX_IE_LENGTH)
@@ -310,12 +322,21 @@ lim_set_rs_nie_wp_aiefrom_sme_start_bss_req_message(tpAniSirGlobal mac_ctx,
return false;
} else {
/* Both RSN and WPA IEs are present */
- dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2],
- (uint8_t) rsn_ie->length,
- &session->gStartBssRSNIe, false);
- dot11f_unpack_ie_wpa(mac_ctx, &rsn_ie->rsnIEdata[wpa_idx + 6],
- rsn_ie->rsnIEdata[wpa_idx + 1] - 4,
- &session->gStartBssWPAIe, false);
+ ret = dot11f_unpack_ie_rsn(mac_ctx, &rsn_ie->rsnIEdata[2],
+ rsn_ie->rsnIEdata[1],
+ &session->gStartBssRSNIe, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
+ return false;
+ }
+ ret = dot11f_unpack_ie_wpa(mac_ctx,
+ &rsn_ie->rsnIEdata[wpa_idx + 6],
+ rsn_ie->rsnIEdata[wpa_idx + 1] - 4,
+ &session->gStartBssWPAIe, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
+ return false;
+ }
}
return true;
}
@@ -490,7 +511,13 @@ uint8_t lim_is_sme_join_req_valid(tpAniSirGlobal pMac, tpSirSmeJoinReq pJoinReq)
{
uint8_t valid = true;
- if (!lim_is_rsn_ie_valid_in_sme_req_message(pMac, &pJoinReq->rsnIE)) {
+ /*
+ * If force_rsne_override is enabled that mean User has provided the
+ * test RSNIE which need to be send as it is in assoc req and thus RSNIE
+ * validity is not required.
+ */
+ if (!pJoinReq->force_rsne_override &&
+ !lim_is_rsn_ie_valid_in_sme_req_message(pMac, &pJoinReq->rsnIE)) {
pe_err("received SME_JOIN_REQ with invalid RSNIE");
valid = false;
goto end;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_trace.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_trace.c
index e685ca119b07..f4ab2e3e1fea 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_trace.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -106,13 +106,12 @@ static uint8_t *__lim_trace_get_mgmt_drop_reason_string(uint16_t dropReason)
void lim_trace_init(tpAniSirGlobal pMac)
{
- qdf_trace_register(QDF_MODULE_ID_PE, (tp_qdf_trace_cb) &lim_trace_dump);
+ qdf_trace_register(QDF_MODULE_ID_PE, &lim_trace_dump);
}
-void lim_trace_dump(tpAniSirGlobal pMac, tp_qdf_trace_record pRecord,
+void lim_trace_dump(void *pMac, tp_qdf_trace_record pRecord,
uint16_t recIndex)
{
-
static char *frameSubtypeStr[LIM_TRACE_MAX_SUBTYPES] = {
"Association request",
"Association response",
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h
index 5f8077443c61..c3d94e30e6e3 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_types.h
@@ -471,7 +471,50 @@ void lim_send_mlm_assoc_ind(tpAniSirGlobal pMac, tpDphHashNode pStaDs,
void lim_process_assoc_rsp_frame(tpAniSirGlobal, uint8_t *, uint8_t, tpPESession);
void lim_process_disassoc_frame(tpAniSirGlobal, uint8_t *, tpPESession);
+/*
+ * lim_perform_disassoc() - Actual action taken after receiving disassoc
+ * @mac_ctx: Global MAC context
+ * @frame_rssi: RSSI of the frame
+ * @rc: Reason code of the deauth
+ * @pe_session: PE session entry pointer
+ * @addr: BSSID from which the disassoc is received
+ *
+ * Return: None
+ */
+void lim_perform_disassoc(tpAniSirGlobal mac_ctx, int32_t frame_rssi,
+ uint16_t rc, tpPESession pe_session,
+ tSirMacAddr addr);
+/*
+ * lim_disassoc_tdls_peers() - Disassoc action for tdls peers
+ * @mac_ctx: Global MAC context
+ * @pe_session: PE session entry pointer
+ * @addr: BSSID from which the disassoc is received
+ *
+ * Return: None
+ */
+#ifdef FEATURE_WLAN_TDLS
+void lim_disassoc_tdls_peers(tpAniSirGlobal mac_ctx,
+ tpPESession pe_session, tSirMacAddr addr);
+#else
+void lim_disassoc_tdls_peers(tpAniSirGlobal mac_ctx,
+ tpPESession pe_session, tSirMacAddr addr)
+{
+ return;
+}
+#endif
void lim_process_deauth_frame(tpAniSirGlobal, uint8_t *, tpPESession);
+/*
+ * lim_perform_deauth() - Actual action taken after receiving deauth
+ * @mac_ctx: Global MAC context
+ * @pe_session: PE session entry pointer
+ * @rc: Reason code of the deauth
+ * @addr: BSSID from which the deauth is received
+ * @frame_rssi: RSSI of the frame
+ *
+ * Return: None
+ */
+void lim_perform_deauth(tpAniSirGlobal mac_ctx, tpPESession pe_session,
+ uint16_t rc, tSirMacAddr addr, int32_t frame_rssi);
void lim_process_action_frame(tpAniSirGlobal, uint8_t *, tpPESession);
void lim_process_action_frame_no_session(tpAniSirGlobal pMac, uint8_t *pRxMetaInfo);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
index d0ef2df79f53..8cb3f4c67351 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/lim/lim_utils.c
@@ -560,7 +560,7 @@ void lim_deactivate_timers(tpAniSirGlobal mac_ctx)
if (tx_timer_running(&lim_timer->gLimJoinFailureTimer)) {
pe_err("Join failure timer running call the timeout API");
/* Cleanup as if join timer expired */
- lim_process_join_failure_timeout(mac_ctx);
+ lim_timer_handler(mac_ctx, SIR_LIM_JOIN_FAIL_TIMEOUT);
}
/* Deactivate Join failure timer. */
tx_timer_deactivate(&lim_timer->gLimJoinFailureTimer);
@@ -575,7 +575,7 @@ void lim_deactivate_timers(tpAniSirGlobal mac_ctx)
if (tx_timer_running(&lim_timer->gLimAssocFailureTimer)) {
pe_err("Assoc failure timer running call the timeout API");
/* Cleanup as if assoc timer expired */
- lim_process_assoc_failure_timeout(mac_ctx, LIM_ASSOC);
+ lim_assoc_failure_timer_handler(mac_ctx, LIM_ASSOC);
}
/* Deactivate Association failure timer. */
tx_timer_deactivate(&lim_timer->gLimAssocFailureTimer);
@@ -583,7 +583,7 @@ void lim_deactivate_timers(tpAniSirGlobal mac_ctx)
if (tx_timer_running(&mac_ctx->lim.limTimers.gLimAuthFailureTimer)) {
pe_err("Auth failure timer running call the timeout API");
/* Cleanup as if auth timer expired */
- lim_process_auth_failure_timeout(mac_ctx);
+ lim_timer_handler(mac_ctx, SIR_LIM_AUTH_FAIL_TIMEOUT);
}
/* Deactivate Authentication failure timer. */
tx_timer_deactivate(&lim_timer->gLimAuthFailureTimer);
@@ -4320,6 +4320,7 @@ void lim_update_sta_run_time_ht_switch_chnl_params(tpAniSirGlobal pMac,
tpPESession psessionEntry)
{
uint8_t center_freq = 0;
+ enum phy_ch_width ch_width = CH_WIDTH_20MHZ;
/* If self capability is set to '20Mhz only', then do not change the CB mode. */
if (!lim_get_ht_capability
@@ -4371,12 +4372,15 @@ void lim_update_sta_run_time_ht_switch_chnl_params(tpAniSirGlobal pMac,
(uint8_t) pHTInfo->recommendedTxWidthSet;
if (eHT_CHANNEL_WIDTH_40MHZ ==
psessionEntry->htRecommendedTxWidthSet) {
+ ch_width = CH_WIDTH_40MHZ;
if (PHY_DOUBLE_CHANNEL_LOW_PRIMARY ==
pHTInfo->secondaryChannelOffset)
center_freq = pHTInfo->primaryChannel + 2;
else if (PHY_DOUBLE_CHANNEL_HIGH_PRIMARY ==
pHTInfo->secondaryChannelOffset)
center_freq = pHTInfo->primaryChannel - 2;
+ else
+ ch_width = CH_WIDTH_20MHZ;
}
/* notify HAL */
@@ -4391,12 +4395,11 @@ void lim_update_sta_run_time_ht_switch_chnl_params(tpAniSirGlobal pMac,
pMac->lim.gpchangeChannelCallback = NULL;
pMac->lim.gpchangeChannelData = NULL;
- lim_send_switch_chnl_params(pMac, (uint8_t) pHTInfo->primaryChannel,
- center_freq, 0,
- psessionEntry->htRecommendedTxWidthSet,
- psessionEntry->maxTxPower,
- psessionEntry->peSessionId,
- true);
+ lim_send_switch_chnl_params(pMac,
+ (uint8_t)pHTInfo->primaryChannel,
+ center_freq, 0, ch_width,
+ psessionEntry->maxTxPower,
+ psessionEntry->peSessionId, true);
/* In case of IBSS, if STA should update HT Info IE in its beacons. */
if (LIM_IS_IBSS_ROLE(psessionEntry)) {
@@ -5294,7 +5297,6 @@ tSirNwType lim_get_nw_type(tpAniSirGlobal pMac, uint8_t channelNum, uint32_t typ
}
}
if (pBeacon->extendedRatesPresent) {
- pe_debug("Beacon, nwtype: G");
nwType = eSIR_11G_NW_TYPE;
} else if (pBeacon->HTInfo.present ||
IS_BSS_VHT_CAPABLE(pBeacon->VHTCaps)) {
@@ -5302,7 +5304,6 @@ tSirNwType lim_get_nw_type(tpAniSirGlobal pMac, uint8_t channelNum, uint32_t typ
}
} else {
/* 11a packet */
- pe_debug("Beacon, nwtype: A");
nwType = eSIR_11A_NW_TYPE;
}
}
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c
index c1f12286d3c8..fed9077b6532 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/pe/rrm/rrm_api.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -269,7 +269,7 @@ rrm_process_link_measurement_request(tpAniSirGlobal pMac,
}
pHdr = WMA_GET_RX_MAC_HEADER(pRxPacketInfo);
- LinkReport.txPower = lim_get_max_tx_power(pSessionEntry->maxTxPower,
+ LinkReport.txPower = lim_get_max_tx_power(pSessionEntry->def_max_tx_pwr,
pLinkReq->MaxTxPower.maxTxPower,
pMac->roam.configParam.
nTxPowerCap);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/system/src/sys_entry_func.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/system/src/sys_entry_func.c
index 1c87e3ac37c0..9f36260402c5 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/system/src/sys_entry_func.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/system/src/sys_entry_func.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -166,19 +166,8 @@ sys_bbt_process_message_core(tpAniSirGlobal mac_ctx, tpSirMsgQ msg,
mac_ctx->sys.gSysFrameCount[type][subtype]);
}
- /*
- * Post the message to PE Queue. Prioritize the
- * Auth and assoc frames.
- */
- if ((subtype == SIR_MAC_MGMT_AUTH) ||
- (subtype == SIR_MAC_MGMT_ASSOC_RSP) ||
- (subtype == SIR_MAC_MGMT_REASSOC_RSP) ||
- (subtype == SIR_MAC_MGMT_ASSOC_REQ) ||
- (subtype == SIR_MAC_MGMT_REASSOC_REQ))
- ret = (tSirRetStatus)
- lim_post_msg_high_priority(mac_ctx, msg);
- else
- ret = (tSirRetStatus) lim_post_msg_api(mac_ctx, msg);
+ /* Post the message to PE Queue */
+ ret = (tSirRetStatus) lim_post_msg_api(mac_ctx, msg);
if (ret != eSIR_SUCCESS) {
pe_err("posting to LIM2 failed, ret %d\n", ret);
goto fail;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
index da5790f16e86..e9d9713b6ce8 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/dot11f.c
@@ -33,7 +33,7 @@
*
*
* This file was automatically generated by 'framesc'
- * Tue Jan 9 16:51:56 2018 from the following file(s):
+ * Fri Feb 16 10:33:08 2018 from the following file(s):
*
* dot11f.frms
*
@@ -132,20 +132,24 @@ typedef struct sIEDefn {
#if defined (DOT11F_HAVE_WIN32_API)
#define DOT11F_PARAMETER_CHECK(pBuf, nBuf, pFrm, nFrm) \
- if (!pBuf || IsBadReadPtr(pBuf, nBuf))\
- return DOT11F_BAD_INPUT_BUFFER; \
- if (!pFrm || IsBadWritePtr(pFrm, nFrm))\
- return DOT11F_BAD_OUTPUT_BUFFER \
+ do { \
+ if (!pBuf || IsBadReadPtr(pBuf, nBuf))\
+ return DOT11F_BAD_INPUT_BUFFER; \
+ if (!pFrm || IsBadWritePtr(pFrm, nFrm))\
+ return DOT11F_BAD_OUTPUT_BUFFER; \
+ } while (0)
#define DOT11F_PARAMETER_CHECK2(pSrc, pBuf, nBuf, pnConsumed) \
- if (!pSrc || IsBadReadPtr(pSrc, 4))\
- eturn DOT11F_BAD_INPUT_BUFFER; \
- if (!pBuf || IsBadWritePtr(pBuf, nBuf))\
- return DOT11F_BAD_OUTPUT_BUFFER; \
- if (!nBuf)\
- return DOT11F_BAD_OUTPUT_BUFFER; \
- if (IsBadWritePtr(pnConsumed, 4))\
- return DOT11F_BAD_OUTPUT_BUFFER \
+ do { \
+ if (!pSrc || IsBadReadPtr(pSrc, 4))\
+ eturn DOT11F_BAD_INPUT_BUFFER; \
+ if (!pBuf || IsBadWritePtr(pBuf, nBuf))\
+ return DOT11F_BAD_OUTPUT_BUFFER; \
+ if (!nBuf)\
+ return DOT11F_BAD_OUTPUT_BUFFER; \
+ if (IsBadWritePtr(pnConsumed, 4))\
+ return DOT11F_BAD_OUTPUT_BUFFER; \
+ } while (0)
#else
@@ -340,7 +344,7 @@ static uint32_t get_container_ies_len(tpAniSirGlobal pCtx,
pBufRemaining += len + 2;
len += 2;
while (len < nBuf) {
- pIe = find_ie_defn(pCtx, pBufRemaining, nBuf - len, IEs);
+ pIe = find_ie_defn(pCtx, pBufRemaining, nBuf + len, IEs);
if (NULL == pIe)
break;
if (pIe->eid == pIeFirst->eid)
@@ -349,8 +353,6 @@ static uint32_t get_container_ies_len(tpAniSirGlobal pCtx,
pBufRemaining += *(pBufRemaining + 1) + 2;
}
- if ((len > 0xFF) || (len > nBuf))
- return DOT11F_INTERNAL_ERROR;
*pnConsumed = len;
return DOT11F_PARSE_SUCCESS;
@@ -412,7 +414,7 @@ static void dot11f_unpack_ff_common_func(tpAniSirGlobal pCtx,
} /* End dot11f_unpack_ff_common_func. */
static uint32_t dot11f_unpack_ie_common_func(tpAniSirGlobal pCtx, uint8_t *pBuf,
- uint8_t ielen, uint8_t *pDstPresent ,
+ uint8_t ielen, uint8_t *pDstPresent,
uint8_t *pDstField)
{
uint32_t status = DOT11F_PARSE_SUCCESS;
@@ -4835,6 +4837,8 @@ uint32_t dot11f_unpack_ie_rsn(tpAniSirGlobal pCtx,
bool append_ie)
{
uint32_t status = DOT11F_PARSE_SUCCESS;
+ uint8_t def_cipher_suite[4] = {0x00, 0x0f, 0xac, 0x04};
+ uint8_t def_akm_suite[4] = {0x00, 0x0f, 0xac, 0x01};
(void) pBuf; (void)ielen; /* Shutup the compiler */
if (pDst->present)
status = DOT11F_DUPLICATE_IE;
@@ -4846,20 +4850,56 @@ uint32_t dot11f_unpack_ie_rsn(tpAniSirGlobal pCtx,
pDst->present = 0;
return status | DOT11F_BAD_FIXED_VALUE;
}
- DOT11F_MEMCPY(pCtx, pDst->gp_cipher_suite, pBuf, 4);
- pBuf += 4;
- ielen -= (uint8_t)4;
if (!ielen) {
- pDst->pwise_cipher_suite_count = 0U;
- pDst->akm_suite_count = 0U;
+ pDst->RSN_Cap_present = 0U;
+ pDst->gp_mgmt_cipher_suite_present = 0U;
+ pDst->gp_cipher_suite_present = 1;
+ DOT11F_MEMCPY(pCtx, pDst->gp_cipher_suite, def_cipher_suite, 4);
+ pDst->pwise_cipher_suite_count = 1;
+ DOT11F_MEMCPY(pCtx,
+ pDst->pwise_cipher_suites, def_cipher_suite, 4);
+ pDst->akm_suite_cnt = 1;
+ DOT11F_MEMCPY(pCtx, pDst->akm_suite, def_akm_suite, 4);
+ pDst->pmkid_count = 0U;
+ return 0U;
+ } else {
+ pDst->gp_cipher_suite_present = 1;
+ if (ielen < 4) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
+ DOT11F_MEMCPY(pCtx, pDst->gp_cipher_suite, pBuf, 4);
+ pBuf += 4;
+ ielen -= (uint8_t)4;
+ }
+ if (!ielen) {
+ pDst->RSN_Cap_present = 0U;
+ pDst->gp_mgmt_cipher_suite_present = 0U;
+ pDst->pwise_cipher_suite_count = 1;
+ DOT11F_MEMCPY(pCtx,
+ pDst->pwise_cipher_suites, def_cipher_suite, 4);
+ pDst->akm_suite_cnt = 1;
+ DOT11F_MEMCPY(pCtx, pDst->akm_suite, def_akm_suite, 4);
pDst->pmkid_count = 0U;
return 0U;
} else {
+ if (ielen < 2) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
framesntohs(pCtx, &pDst->pwise_cipher_suite_count, pBuf, 0);
pBuf += 2;
ielen -= (uint8_t)2;
}
- if (pDst->pwise_cipher_suite_count > 6) {
+ if (!pDst->pwise_cipher_suite_count ||
+ pDst->pwise_cipher_suite_count > 6) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
+ if (ielen < pDst->pwise_cipher_suite_count * 4) {
pDst->present = 0;
return DOT11F_SKIPPED_BAD_IE;
}
@@ -4868,34 +4908,63 @@ uint32_t dot11f_unpack_ie_rsn(tpAniSirGlobal pCtx,
pBuf += (pDst->pwise_cipher_suite_count * 4);
ielen -= (pDst->pwise_cipher_suite_count * 4);
if (!ielen) {
- pDst->akm_suite_count = 0U;
+ pDst->RSN_Cap_present = 0U;
+ pDst->gp_mgmt_cipher_suite_present = 0U;
+ pDst->akm_suite_cnt = 1;
+ DOT11F_MEMCPY(pCtx, pDst->akm_suite, def_akm_suite, 4);
pDst->pmkid_count = 0U;
return 0U;
} else {
- framesntohs(pCtx, &pDst->akm_suite_count, pBuf, 0);
+ if (ielen < 2) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
+ framesntohs(pCtx, &pDst->akm_suite_cnt, pBuf, 0);
pBuf += 2;
ielen -= (uint8_t)2;
}
- if (pDst->akm_suite_count > 6) {
+ if (!pDst->akm_suite_cnt ||
+ pDst->akm_suite_cnt > 6) {
pDst->present = 0;
return DOT11F_SKIPPED_BAD_IE;
}
- DOT11F_MEMCPY(pCtx, pDst->akm_suites, pBuf, (pDst->akm_suite_count * 4));
- pBuf += (pDst->akm_suite_count * 4);
- ielen -= (pDst->akm_suite_count * 4);
+ if (ielen < pDst->akm_suite_cnt * 4) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
+ DOT11F_MEMCPY(pCtx, pDst->akm_suite, pBuf, (pDst->akm_suite_cnt * 4));
+ pBuf += (pDst->akm_suite_cnt * 4);
+ ielen -= (pDst->akm_suite_cnt * 4);
if (!ielen) {
+ pDst->RSN_Cap_present = 0U;
+ pDst->gp_mgmt_cipher_suite_present = 0U;
pDst->pmkid_count = 0U;
return 0U;
} else {
+ pDst->RSN_Cap_present = 1;
+ if (ielen < 2) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
DOT11F_MEMCPY(pCtx, pDst->RSN_Cap, pBuf, 2);
pBuf += 2;
ielen -= (uint8_t)2;
}
if (!ielen) {
+ pDst->RSN_Cap_present = 0U;
+ pDst->gp_mgmt_cipher_suite_present = 0U;
pDst->pmkid_count = 0U;
return 0U;
} else {
+ if (ielen < 2) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
framesntohs(pCtx, &pDst->pmkid_count, pBuf, 0);
pBuf += 2;
ielen -= (uint8_t)2;
@@ -4905,12 +4974,23 @@ uint32_t dot11f_unpack_ie_rsn(tpAniSirGlobal pCtx,
return DOT11F_SKIPPED_BAD_IE;
}
+ if (ielen < pDst->pmkid_count * 16) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
DOT11F_MEMCPY(pCtx, pDst->pmkid, pBuf, (pDst->pmkid_count * 16));
pBuf += (pDst->pmkid_count * 16);
ielen -= (pDst->pmkid_count * 16);
if (!ielen) {
return 0U;
} else {
+ pDst->gp_mgmt_cipher_suite_present = 1;
+ if (ielen < 4) {
+ pDst->present = 0;
+ return DOT11F_SKIPPED_BAD_IE;
+ }
+
DOT11F_MEMCPY(pCtx, pDst->gp_mgmt_cipher_suite, pBuf, 4);
}
(void)pCtx;
@@ -7146,7 +7226,7 @@ static const tIEDefn IES_Beacon[] = {
present), 0, "ExtSuppRates", 0, 3, 14, SigIeExtSuppRates, {0, 0, 0, 0, 0},
0, DOT11F_EID_EXTSUPPRATES, 0, 0, },
{ offsetof(tDot11fBeacon, RSN), offsetof(tDot11fIERSN, present), 0, "RSN",
- 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fBeacon, QBSSLoad), offsetof(tDot11fIEQBSSLoad, present),
0, "QBSSLoad", 0, 7, 7, SigIeQBSSLoad, {0, 0, 0, 0, 0},
0, DOT11F_EID_QBSSLOAD, 0, 0, },
@@ -7565,7 +7645,7 @@ static const tIEDefn IES_BeaconIEs[] = {
0, 3, 14, SigIeExtSuppRates, {0, 0, 0, 0, 0},
0, DOT11F_EID_EXTSUPPRATES, 0, 0, },
{ offsetof(tDot11fBeaconIEs, RSN), offsetof(tDot11fIERSN, present), 0,
- "RSN", 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ "RSN", 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fBeaconIEs, QBSSLoad), offsetof(tDot11fIEQBSSLoad,
present), 0, "QBSSLoad", 0, 7, 7, SigIeQBSSLoad, {0, 0, 0, 0, 0},
0, DOT11F_EID_QBSSLOAD, 0, 0, },
@@ -8868,7 +8948,7 @@ static const tIEDefn IES_TDLSDisRsp[] = {
"SuppOperatingClasses", 0, 3, 34, SigIeSuppOperatingClasses,
{0, 0, 0, 0, 0}, 0, DOT11F_EID_SUPPOPERATINGCLASSES, 0, 0, },
{ offsetof(tDot11fTDLSDisRsp, RSN), offsetof(tDot11fIERSN, present), 0,
- "RSN", 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ "RSN", 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fTDLSDisRsp, ExtCap), offsetof(tDot11fIEExtCap, present),
0, "ExtCap", 0, 3, 17, SigIeExtCap, {0, 0, 0, 0, 0},
0, DOT11F_EID_EXTCAP, 0, 0, },
@@ -8999,7 +9079,7 @@ static const tFFDefn FFS_TDLSSetupCnf[] = {
static const tIEDefn IES_TDLSSetupCnf[] = {
{ offsetof(tDot11fTDLSSetupCnf, RSN), offsetof(tDot11fIERSN, present), 0,
- "RSN", 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ "RSN", 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fTDLSSetupCnf, EDCAParamSet),
offsetof(tDot11fIEEDCAParamSet, present), 0, "EDCAParamSet",
0, 20, 20, SigIeEDCAParamSet, {0, 0, 0, 0, 0},
@@ -9074,7 +9154,7 @@ static const tIEDefn IES_TDLSSetupReq[] = {
0, 4, 98, SigIeSuppChannels, {0, 0, 0, 0, 0},
0, DOT11F_EID_SUPPCHANNELS, 0, 0, },
{ offsetof(tDot11fTDLSSetupReq, RSN), offsetof(tDot11fIERSN, present), 0,
- "RSN", 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ "RSN", 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fTDLSSetupReq, ExtCap), offsetof(tDot11fIEExtCap,
present), 0, "ExtCap", 0, 3, 17, SigIeExtCap, {0, 0, 0, 0, 0},
0, DOT11F_EID_EXTCAP, 0, 0, },
@@ -9163,7 +9243,7 @@ static const tIEDefn IES_TDLSSetupRsp[] = {
0, 4, 98, SigIeSuppChannels, {0, 0, 0, 0, 0},
0, DOT11F_EID_SUPPCHANNELS, 0, 0, },
{ offsetof(tDot11fTDLSSetupRsp, RSN), offsetof(tDot11fIERSN, present), 0,
- "RSN", 0, 8, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
+ "RSN", 0, 4, 132, SigIeRSN, {0, 0, 0, 0, 0}, 0, DOT11F_EID_RSN, 0, 0, },
{ offsetof(tDot11fTDLSSetupRsp, ExtCap), offsetof(tDot11fIEExtCap,
present), 0, "ExtCap", 0, 3, 17, SigIeExtCap, {0, 0, 0, 0, 0},
0, DOT11F_EID_EXTCAP, 0, 0, },
@@ -9904,15 +9984,11 @@ static uint32_t unpack_core(tpAniSirGlobal pCtx,
}
if (pIe) {
- if ((nBufRemaining < pIe->minSize - pIe->noui - 2U) ||
- (len < pIe->minSize - pIe->noui - 2U)) {
- FRAMES_LOG4(pCtx, FRLOGW, FRFL("The IE %s must "
- "be at least %d bytes in size, but "
- "there are only %d bytes remaining in "
- "this frame or the IE reports a size "
- "of %d bytes.\n"),
- pIe->name, pIe->minSize, nBufRemaining,
- (len + pIe->noui + 2U));
+ if (nBufRemaining < pIe->minSize - pIe->noui - 2U) {
+ FRAMES_LOG3(pCtx, FRLOGW, FRFL("The IE %s must be "
+ "at least %d bytes in size, but there are onl"
+ "y %d bytes remaining in this frame.\n"),
+ pIe->name, pIe->minSize, nBufRemaining);
FRAMES_DUMP(pCtx, FRLOG1, pBuf, nBuf);
status |= DOT11F_INCOMPLETE_IE;
FRAMES_DBG_BREAK();
@@ -12399,29 +12475,42 @@ uint32_t dot11f_get_packed_iersn(tpAniSirGlobal pCtx,
(void)pCtx;
while (pIe->present) {
*pnNeeded += 2;
- *pnNeeded += 4;
+ if (pIe->gp_cipher_suite_present) {
+
+ *pnNeeded += 4;
+ } else {
+ break;
+ }
if (pIe->pwise_cipher_suite_count) {
*pnNeeded += 2;
} else {
break;
}
*pnNeeded += (pIe->pwise_cipher_suite_count * 4);
- if (pIe->akm_suite_count) {
+ if (pIe->akm_suite_cnt) {
+ *pnNeeded += 2;
+ } else {
+ break;
+ }
+ *pnNeeded += (pIe->akm_suite_cnt * 4);
+ if (pIe->RSN_Cap_present) {
+
*pnNeeded += 2;
} else {
break;
}
- *pnNeeded += (pIe->akm_suite_count * 4);
- /* RSN_Cap */
- *pnNeeded += 2;
if (pIe->pmkid_count) {
*pnNeeded += 2;
} else {
break;
}
*pnNeeded += (pIe->pmkid_count * 16);
- /* gp_mgmt_cipher_suite */
- *pnNeeded += 4;
+ if (pIe->gp_mgmt_cipher_suite_present) {
+
+ *pnNeeded += 4;
+ } else {
+ break;
+ }
break;
}
return status;
@@ -20525,9 +20614,13 @@ uint32_t dot11f_pack_ie_rsn(tpAniSirGlobal pCtx,
frameshtons(pCtx, pBuf, pSrc->version, 0);
*pnConsumed += 2;
pBuf += 2;
- DOT11F_MEMCPY(pCtx, pBuf, pSrc->gp_cipher_suite, 4);
- *pnConsumed += 4;
- pBuf += 4;
+ if (pSrc->gp_cipher_suite_present) {
+ DOT11F_MEMCPY(pCtx, pBuf, pSrc->gp_cipher_suite, 4);
+ *pnConsumed += 4;
+ pBuf += 4;
+ } else {
+ break;
+ }
if (pSrc->pwise_cipher_suite_count) {
frameshtons(pCtx, pBuf, pSrc->pwise_cipher_suite_count, 0);
*pnConsumed += 2;
@@ -20538,20 +20631,23 @@ uint32_t dot11f_pack_ie_rsn(tpAniSirGlobal pCtx,
DOT11F_MEMCPY(pCtx, pBuf, &(pSrc->pwise_cipher_suites), (pSrc->pwise_cipher_suite_count * 4));
*pnConsumed += (pSrc->pwise_cipher_suite_count * 4);
pBuf += (pSrc->pwise_cipher_suite_count * 4);
- if (pSrc->akm_suite_count) {
- frameshtons(pCtx, pBuf, pSrc->akm_suite_count, 0);
+ if (pSrc->akm_suite_cnt) {
+ frameshtons(pCtx, pBuf, pSrc->akm_suite_cnt, 0);
+ *pnConsumed += 2;
+ pBuf += 2;
+ } else {
+ break;
+ }
+ DOT11F_MEMCPY(pCtx, pBuf, &(pSrc->akm_suite), (pSrc->akm_suite_cnt * 4));
+ *pnConsumed += (pSrc->akm_suite_cnt * 4);
+ pBuf += (pSrc->akm_suite_cnt * 4);
+ if (pSrc->RSN_Cap_present) {
+ DOT11F_MEMCPY(pCtx, pBuf, pSrc->RSN_Cap, 2);
*pnConsumed += 2;
pBuf += 2;
} else {
break;
}
- DOT11F_MEMCPY(pCtx, pBuf, &(pSrc->akm_suites), (pSrc->akm_suite_count * 4));
- *pnConsumed += (pSrc->akm_suite_count * 4);
- pBuf += (pSrc->akm_suite_count * 4);
- /* RSN_Cap */
- DOT11F_MEMCPY(pCtx, pBuf, pSrc->RSN_Cap, 2);
- *pnConsumed += 2;
- pBuf += 2;
if (pSrc->pmkid_count) {
frameshtons(pCtx, pBuf, pSrc->pmkid_count, 0);
*pnConsumed += 2;
@@ -20562,10 +20658,13 @@ uint32_t dot11f_pack_ie_rsn(tpAniSirGlobal pCtx,
DOT11F_MEMCPY(pCtx, pBuf, &(pSrc->pmkid), (pSrc->pmkid_count * 16));
*pnConsumed += (pSrc->pmkid_count * 16);
pBuf += (pSrc->pmkid_count * 16);
- /* gp_mgmt_cipher_suite */
- DOT11F_MEMCPY(pCtx, pBuf, pSrc->gp_mgmt_cipher_suite, 4);
- *pnConsumed += 4;
- /* fieldsEndFlag = 1 */
+ if (pSrc->gp_mgmt_cipher_suite_present) {
+ DOT11F_MEMCPY(pCtx, pBuf, pSrc->gp_mgmt_cipher_suite, 4);
+ *pnConsumed += 4;
+ /* fieldsEndFlag = 1 */
+ } else {
+ break;
+ }
break;
}
(void)pCtx;
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/mac_trace.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/mac_trace.c
index d1f682e3cf31..e7c5746771f6 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/mac_trace.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/mac_trace.c
@@ -623,8 +623,8 @@ uint8_t *mac_trace_get_wma_msg_string(uint16_t wma_msg)
CASE_RETURN_STRING(WMA_ADD_BCN_FILTER_CMDID);
CASE_RETURN_STRING(WMA_REMOVE_BCN_FILTER_CMDID);
CASE_RETURN_STRING(WMA_SET_ADAPT_DWELLTIME_CONF_PARAMS);
- CASE_RETURN_STRING(WDA_BPF_GET_CAPABILITIES_REQ);
- CASE_RETURN_STRING(WDA_BPF_SET_INSTRUCTIONS_REQ);
+ CASE_RETURN_STRING(WDA_APF_GET_CAPABILITIES_REQ);
+ CASE_RETURN_STRING(WDA_APF_SET_INSTRUCTIONS_REQ);
CASE_RETURN_STRING(WMA_SET_PDEV_IE_REQ);
CASE_RETURN_STRING(WMA_UPDATE_WEP_DEFAULT_KEY);
CASE_RETURN_STRING(WMA_SEND_FREQ_RANGE_CONTROL_IND);
diff --git a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
index 523af6aecc3f..7e7d13d0f511 100644
--- a/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
+++ b/drivers/staging/qcacld-3.0/core/mac/src/sys/legacy/src/utils/src/parser_api.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1502,7 +1502,7 @@ populate_dot11f_rsn(tpAniSirGlobal pMac,
status = dot11f_unpack_ie_rsn(pMac, pRsnIe->rsnIEdata + idx + 2, /* EID, length */
pRsnIe->rsnIEdata[idx + 1],
pDot11f, false);
- if (DOT11F_FAILED(status)) {
+ if (!DOT11F_SUCCEEDED(status)) {
pe_err("Parse failure in Populate Dot11fRSN (0x%08x)",
status);
return eSIR_FAILURE;
@@ -2266,7 +2266,7 @@ static void update_esp_data(struct sir_esp_information *esp_information,
uint8_t *data;
int i = 0;
- int total_elements;
+ uint64_t total_elements;
struct sir_esp_info *esp_info;
data = esp_indication->variable_data;
@@ -4246,7 +4246,7 @@ sir_convert_beacon_frame2_struct(tpAniSirGlobal pMac,
pBeaconStruct->channelNumber = pBeacon->HTInfo.primaryChannel;
} else {
pBeaconStruct->channelNumber = mappedRXCh;
- pe_debug("Channel info is not present in Beacon");
+ pe_debug_rate_limited(30, "In Beacon No Channel info");
}
if (pBeacon->RSN.present) {
@@ -4347,7 +4347,6 @@ sir_convert_beacon_frame2_struct(tpAniSirGlobal pMac,
if (pBeacon->vendor_vht_ie.present) {
pBeaconStruct->vendor_vht_ie.sub_type =
pBeacon->vendor_vht_ie.sub_type;
- pe_debug("Vendor Specific VHT caps present in Beacon Frame!");
}
if (pBeacon->vendor_vht_ie.VHTCaps.present) {
@@ -4362,9 +4361,6 @@ sir_convert_beacon_frame2_struct(tpAniSirGlobal pMac,
}
/* Update HS 2.0 Information Element */
if (pBeacon->hs20vendor_ie.present) {
- pe_debug("HS20 Indication Element Present, rel#:%u, id:%u",
- pBeacon->hs20vendor_ie.release_num,
- pBeacon->hs20vendor_ie.hs_id_present);
qdf_mem_copy(&pBeaconStruct->hs20vendor_ie,
&pBeacon->hs20vendor_ie,
sizeof(tDot11fIEhs20vendor_ie) -
@@ -5830,17 +5826,25 @@ tSirRetStatus populate_dot11f_assoc_res_wsc_ie(tpAniSirGlobal pMac,
tDot11fIEWscAssocRes *pDot11f,
tpSirAssocReq pRcvdAssocReq)
{
- tDot11fIEWscAssocReq parsedWscAssocReq = { 0, };
+ uint32_t ret;
uint8_t *wscIe;
+ tDot11fIEWscAssocReq parsedWscAssocReq = { 0, };
- wscIe =
- limGetWscIEPtr(pMac, pRcvdAssocReq->addIE.addIEdata,
+ wscIe = limGetWscIEPtr(pMac, pRcvdAssocReq->addIE.addIEdata,
pRcvdAssocReq->addIE.length);
if (wscIe != NULL) {
/* retreive WSC IE from given AssocReq */
- dot11f_unpack_ie_wsc_assoc_req(pMac, wscIe + 2 + 4, /* EID, length, OUI */
- wscIe[1] - 4, /* length without OUI */
- &parsedWscAssocReq, false);
+ ret = dot11f_unpack_ie_wsc_assoc_req(pMac,
+ /* EID, length, OUI */
+ wscIe + 2 + 4,
+ /* length without OUI */
+ wscIe[1] - 4,
+ &parsedWscAssocReq, false);
+ if (!DOT11F_SUCCEEDED(ret)) {
+ pe_err("unpack failed, ret: %d", ret);
+ return eSIR_HAL_INPUT_INVALID;
+ }
+
pDot11f->present = 1;
/* version has to be 0x10 */
pDot11f->Version.present = 1;
diff --git a/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h b/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h
index 89c8024582dc..2292d8e438a2 100644
--- a/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h
+++ b/drivers/staging/qcacld-3.0/core/sap/inc/sap_api.h
@@ -1025,26 +1025,29 @@ QDF_STATUS wlansap_set_invalid_session(void *cds_ctx);
QDF_STATUS sap_roam_session_close_callback(void *pContext);
/**
- * wlansap_set_etsi_srd_chan_support() - set SRD channel support.
- * @hal: HAL pointer
- * @srd_chan_support: SRD channel support
+ * wlansap_cleanup_cac_timer() - Force cleanup DFS CAC timer
+ * @sap_ctx: sap context
*
- * This function set sap SRD channel support
+ * Force cleanup DFS CAC timer when reset all adapters. It will not
+ * check concurrency SAP since just called when reset all adapters.
*
* Return: None
*/
-void wlansap_set_etsi_srd_chan_support(tHalHandle hal, bool srd_chan_support);
+void wlansap_cleanup_cac_timer(void *sap_ctx);
/**
- * wlansap_cleanup_cac_timer() - Force cleanup DFS CAC timer
- * @sap_ctx: sap context
+ * wlansap_set_stop_bss_inprogress - sets the stop_bss_in_progress flag
*
- * Force cleanup DFS CAC timer when reset all adapters. It will not
- * check concurrency SAP since just called when reset all adapters.
+ * @ctx: Pointer to the global cds context from which the handle to the SAP
+ * ctx can be extracted.
+ * @in_progress: the value to be set to the stop_bss_in_progress_flag
+ *
+ * This function sets the value in in_progress parameter to the
+ * stop_bss_in_progress flag in sap_context.
*
* Return: None
*/
-void wlansap_cleanup_cac_timer(void *sap_ctx);
+void wlansap_set_stop_bss_inprogress(void *ctx, bool in_progress);
#ifdef __cplusplus
}
diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c
index f1bbbd448b55..ba83cd8a6abe 100644
--- a/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c
+++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_api_link_cntl.c
@@ -456,9 +456,20 @@ wlansap_roam_process_ch_change_success(tpAniSirGlobal mac_ctx,
FL("sapdfs: changing target channel to [%d]"),
mac_ctx->sap.SapDfsInfo.target_channel);
sap_ctx->channel = mac_ctx->sap.SapDfsInfo.target_channel;
- /* Identify if this is channel change in radar detected state */
- if (eSAP_DISCONNECTING != sap_ctx->sapsMachine)
+
+ /*
+ * Identify if this is channel change in radar detected state
+ * Also if we are waiting for sap to stop, don't proceed further
+ * to restart SAP again.
+ */
+ if ((eSAP_DISCONNECTING != sap_ctx->sapsMachine) ||
+ sap_ctx->stop_bss_in_progress) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
+ FL("sapdfs: state [%d] Stop BSS in progress [%d], not starting SAP after channel change"),
+ sap_ctx->sapsMachine,
+ sap_ctx->stop_bss_in_progress);
return;
+ }
if (sap_ctx->ch_params.ch_width == CH_WIDTH_160MHZ) {
is_ch_dfs = true;
@@ -897,8 +908,11 @@ wlansap_roam_callback(void *ctx, tCsrRoamInfo *csr_roam_info, uint32_t roamId,
}
mac_ctx = PMAC_STRUCT(hal);
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
- FL("Before switch on roam_status = %d"), roam_status);
+ if (eCSR_ROAM_UPDATE_SCAN_RESULT != roam_status) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
+ FL("roam_status = %d, roam_result = %d"),
+ roam_status, roam_result);
+ }
sta_sap_scc_on_dfs_chan = cds_is_sta_sap_scc_allowed_on_dfs_channel();
@@ -1093,9 +1107,6 @@ wlansap_roam_callback(void *ctx, tCsrRoamInfo *csr_roam_info, uint32_t roamId,
break;
}
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
- FL("Before switch on roam_result = %d"), roam_result);
-
switch (roam_result) {
case eCSR_ROAM_RESULT_INFRA_ASSOCIATION_IND:
wlansap_roam_process_infra_assoc_ind(sap_ctx, roam_result,
diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_ch_select.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_ch_select.c
index 788431091f65..f5048e8906d0 100644
--- a/drivers/staging/qcacld-3.0/core/sap/src/sap_ch_select.c
+++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_ch_select.c
@@ -169,6 +169,8 @@ sapSafeChannelType safe_channels[NUM_CHANNELS] = {
{157, true},
{161, true},
{165, true},
+ {169, true},
+ {173, true},
};
#endif
@@ -867,7 +869,7 @@ static uint32_t sap_weight_channel_noise_floor(ptSapContext sap_ctx,
softap_nf_weight_cfg);
if (channel_stat == NULL || channel_stat->channelfreq == 0) {
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
"In %s, sanity check failed return max weight",
__func__);
return softap_nf_weight_local;
@@ -921,7 +923,7 @@ static uint32_t sap_weight_channel_free(ptSapContext sap_ctx,
softap_channel_free_weight_cfg);
if (channel_stat == NULL || channel_stat->channelfreq == 0) {
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
"In %s, sanity check failed return max weight",
__func__);
return softap_channel_free_weight_local;
@@ -982,7 +984,7 @@ static uint32_t sap_weight_channel_txpwr_range(ptSapContext sap_ctx,
softap_txpwr_range_weight_cfg);
if (channel_stat == NULL || channel_stat->channelfreq == 0) {
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
"In %s, sanity check failed return max weight",
__func__);
return softap_txpwr_range_weight_local;
@@ -1034,7 +1036,7 @@ static uint32_t sap_weight_channel_txpwr_tput(ptSapContext sap_ctx,
softap_txpwr_tput_weight_cfg);
if (channel_stat == NULL || channel_stat->channelfreq == 0) {
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
"In %s, sanity check failed return max weight",
__func__);
return softap_txpwr_tput_weight_local;
@@ -1080,6 +1082,24 @@ uint32_t sap_weight_channel_status(ptSapContext sap_ctx,
}
/**
+ * sap_check_channels_same_band() - Check if two channels belong to same band
+ * @ch_num1: channel number
+ * @ch_num2: channel number
+ *
+ * Return: true if both channels belong to same band else false
+ */
+static bool sap_check_channels_same_band(uint16_t ch_num1, uint16_t ch_num2)
+{
+ if ((ch_num1 <= SIR_11B_CHANNEL_END &&
+ ch_num2 <= SIR_11B_CHANNEL_END) ||
+ (ch_num1 >= SIR_11A_CHANNEL_BEGIN &&
+ ch_num2 >= SIR_11A_CHANNEL_BEGIN))
+ return true;
+
+ return false;
+}
+
+/**
* sap_update_rssi_bsscount() - updates bss count and rssi effect.
*
* @pSpectCh: Channel Information
@@ -1105,6 +1125,9 @@ static void sap_update_rssi_bsscount(tSapSpectChInfo *pSpectCh, int32_t offset,
if (pExtSpectCh != NULL &&
pExtSpectCh >= spectch_start &&
pExtSpectCh < spectch_end) {
+ if (!sap_check_channels_same_band(pSpectCh->chNum,
+ pExtSpectCh->chNum))
+ return;
++pExtSpectCh->bssCount;
switch (offset) {
case -1:
@@ -1656,16 +1679,21 @@ static void sap_compute_spect_weight(tSapChSelSpectInfo *pSpectInfoParams,
break;
case eCSR_DOT11_MODE_abg:
- sap_interference_rssi_count_5G(
- pSpectCh, channelWidth,
- secondaryChannelOffset,
- centerFreq,
- centerFreq_2,
- channel_id,
- spectch_start,
- spectch_end);
- sap_interference_rssi_count(pSpectCh,
- spectch_start, spectch_end);
+ if (pSpectCh->chNum >=
+ SIR_11A_CHANNEL_BEGIN)
+ sap_interference_rssi_count_5G(
+ pSpectCh, channelWidth,
+ secondaryChannelOffset,
+ centerFreq,
+ centerFreq_2,
+ channel_id,
+ spectch_start,
+ spectch_end);
+ else
+ sap_interference_rssi_count(
+ pSpectCh,
+ spectch_start,
+ spectch_end);
break;
}
@@ -1793,6 +1821,11 @@ static void sap_sort_chl_weight(tSapChSelSpectInfo *pSpectInfoParams)
if (pSpectCh[j].weight <
pSpectCh[minWeightIndex].weight) {
minWeightIndex = j;
+ } else if (pSpectCh[j].weight ==
+ pSpectCh[minWeightIndex].weight) {
+ if (pSpectCh[j].bssCount <
+ pSpectCh[minWeightIndex].bssCount)
+ minWeightIndex = j;
}
}
if (minWeightIndex != i) {
@@ -2127,6 +2160,56 @@ static void sap_sort_chl_weight_vht160(tSapChSelSpectInfo *pSpectInfoParams)
}
/**
+ * sap_allocate_max_weight_ht40_24_g() - allocate max weight for 40Mhz
+ * to all 2.4Ghz channels
+ * @spect_info_params: Pointer to the tSapChSelSpectInfo structure
+ *
+ * Return: none
+ */
+static void sap_allocate_max_weight_ht40_24_g(
+ tSapChSelSpectInfo *spect_info_params)
+{
+ tSapSpectChInfo *spect_info;
+ uint8_t j;
+
+ /*
+ * Assign max weight for 40Mhz (SAP_ACS_WEIGHT_MAX * 2) to all
+ * 2.4 Ghz channels
+ */
+ spect_info = spect_info_params->pSpectCh;
+ for (j = 0; j < spect_info_params->numSpectChans; j++) {
+ if ((spect_info[j].chNum >= CDS_CHANNEL_NUM(CHAN_ENUM_1) &&
+ spect_info[j].chNum <= CDS_CHANNEL_NUM(CHAN_ENUM_14)))
+ spect_info[j].weight = SAP_ACS_WEIGHT_MAX * 2;
+ }
+}
+
+/**
+ * sap_allocate_max_weight_ht40_5_g() - allocate max weight for 40Mhz
+ * to all 5Ghz channels
+ * @spect_info_params: Pointer to the tSapChSelSpectInfo structure
+ *
+ * Return: none
+ */
+static void sap_allocate_max_weight_ht40_5_g(
+ tSapChSelSpectInfo *spect_info_params)
+{
+ tSapSpectChInfo *spect_info;
+ uint8_t j;
+
+ /*
+ * Assign max weight for 40Mhz (SAP_ACS_WEIGHT_MAX * 2) to all
+ * 5 Ghz channels
+ */
+ spect_info = spect_info_params->pSpectCh;
+ for (j = 0; j < spect_info_params->numSpectChans; j++) {
+ if ((spect_info[j].chNum >= CDS_CHANNEL_NUM(CHAN_ENUM_36) &&
+ spect_info[j].chNum <= CDS_CHANNEL_NUM(CHAN_ENUM_165)))
+ spect_info[j].weight = SAP_ACS_WEIGHT_MAX * 2;
+ }
+}
+
+/**
* sap_sort_chl_weight_ht40_24_g() - to sort channel with the least weight
* @pSpectInfoParams: Pointer to the tSapChSelSpectInfo structure
*
@@ -2250,6 +2333,16 @@ static void sap_sort_chl_weight_ht40_24_g(tSapChSelSpectInfo *pSpectInfoParams)
pSpectInfo[j].weight = SAP_ACS_WEIGHT_MAX * 2;
}
}
+
+ pSpectInfo = pSpectInfoParams->pSpectCh;
+ for (j = 0; j < (pSpectInfoParams->numSpectChans); j++) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
+ "In %s, Channel=%d Weight= %d rssi=%d bssCount=%d",
+ __func__, pSpectInfo->chNum, pSpectInfo->weight,
+ pSpectInfo->rssiAgr, pSpectInfo->bssCount);
+ pSpectInfo++;
+ }
+
sap_sort_chl_weight(pSpectInfoParams);
}
@@ -2385,12 +2478,16 @@ static void sap_sort_chl_weight_all(ptSapContext pSapCtx,
switch (pSapCtx->acs_cfg->ch_width) {
case CH_WIDTH_40MHZ:
- if (eCSR_DOT11_MODE_11g == operatingBand)
- sap_sort_chl_weight_ht40_24_g(pSpectInfoParams);
- else if (eCSR_DOT11_MODE_11a == operatingBand)
- sap_sort_chl_weight_ht40_5_g(pSpectInfoParams);
- else {
+ /*
+ * Assign max weight to all 5Ghz channels when operating band
+ * is 11g and to all 2.4Ghz channels when operating band is 11a
+ * or 11abg to avoid selection in ACS algorithm for starting SAP
+ */
+ if (eCSR_DOT11_MODE_11g == operatingBand) {
sap_sort_chl_weight_ht40_24_g(pSpectInfoParams);
+ sap_allocate_max_weight_ht40_5_g(pSpectInfoParams);
+ } else {
+ sap_allocate_max_weight_ht40_24_g(pSpectInfoParams);
sap_sort_chl_weight_ht40_5_g(pSpectInfoParams);
}
break;
diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c
index f49877e96ce1..d33ee91381a7 100644
--- a/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c
+++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_fsm.c
@@ -2934,9 +2934,10 @@ QDF_STATUS sap_signal_hdd_event(ptSapContext sap_ctx,
return QDF_STATUS_E_FAILURE;
}
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
- FL("SAP event callback event = %s"),
- sap_hdd_event_to_string(sap_hddevent));
+ if (sap_hddevent != eSAP_UPDATE_SCAN_RESULT)
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO_HIGH,
+ FL("SAP event callback event = %s"),
+ sap_hdd_event_to_string(sap_hddevent));
switch (sap_hddevent) {
case eSAP_STA_ASSOC_IND:
@@ -4364,7 +4365,7 @@ static QDF_STATUS sap_fsm_state_disconnecting(ptSapContext sap_ctx,
QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
FL("in state %s, event msg %d result %d"),
"eSAP_DISCONNECTING ", msg, sap_event->u2);
- sap_goto_disconnecting(sap_ctx);
+ qdf_status = sap_goto_disconnecting(sap_ctx);
} else {
QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
FL("in state %s, invalid event msg %d"),
@@ -4860,6 +4861,7 @@ static QDF_STATUS sap_get_channel_list(ptSapContext sap_ctx,
uint8_t loop_count;
uint8_t *list;
uint8_t ch_count;
+ uint8_t new_chan_count = 0;
uint8_t start_ch_num, band_start_ch;
uint8_t end_ch_num, band_end_ch;
uint32_t en_lte_coex;
@@ -4869,7 +4871,7 @@ static QDF_STATUS sap_get_channel_list(ptSapContext sap_ctx,
#endif
tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
tSapChSelSpectInfo spect_info_obj = { NULL, 0 };
- tSapChSelSpectInfo *spect_info = &spect_info_obj;
+ uint16_t ch_width;
if (NULL == hal) {
QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
@@ -4881,10 +4883,11 @@ static QDF_STATUS sap_get_channel_list(ptSapContext sap_ctx,
start_ch_num = sap_ctx->acs_cfg->start_ch;
end_ch_num = sap_ctx->acs_cfg->end_ch;
+ ch_width = sap_ctx->acs_cfg->ch_width;
QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_INFO,
- FL("startChannel %d, EndChannel %d, HW:%d"),
- start_ch_num, end_ch_num,
- sap_ctx->acs_cfg->hw_mode);
+ FL("startChannel %d, EndChannel %d, ch_width %d, HW:%d"),
+ start_ch_num, end_ch_num, ch_width,
+ sap_ctx->acs_cfg->hw_mode);
wlansap_extend_to_acs_range(&start_ch_num, &end_ch_num,
&band_start_ch, &band_end_ch);
@@ -4941,12 +4944,32 @@ static QDF_STATUS sap_get_channel_list(ptSapContext sap_ctx,
* Skip the channels which are not in ACS config from user
* space
*/
- if(SAP_CHANNEL_NOT_SELECTED ==
+ if (SAP_CHANNEL_NOT_SELECTED ==
sap_channel_in_acs_channel_list(
- CDS_CHANNEL_NUM(loop_count), sap_ctx,
- spect_info))
+ CDS_CHANNEL_NUM(loop_count),
+ sap_ctx, &spect_info_obj))
continue;
+ /*
+ * If we have any 5Ghz channel in the channel list
+ * and bw is 40/80/160 Mhz then we don't want SAP to
+ * come up in 2.4Ghz as for 40Mhz, 2.4Ghz channel is
+ * not preferred and 80/160Mhz is not allowed for 2.4Ghz
+ * band. So, don't even scan on 2.4Ghz channels if bw is
+ * 40/80/160Mhz and channel list has any 5Ghz channel.
+ */
+ if (end_ch_num >= CDS_CHANNEL_NUM(CHAN_ENUM_36) &&
+ ((ch_width == CH_WIDTH_40MHZ) ||
+ (ch_width == CH_WIDTH_80MHZ) ||
+ (ch_width == CH_WIDTH_80P80MHZ) ||
+ (ch_width == CH_WIDTH_160MHZ))) {
+ if (CDS_CHANNEL_NUM(loop_count) >=
+ CDS_CHANNEL_NUM(CHAN_ENUM_1) &&
+ CDS_CHANNEL_NUM(loop_count) <=
+ CDS_CHANNEL_NUM(CHAN_ENUM_14))
+ continue;
+ }
+
#ifdef FEATURE_WLAN_CH_AVOID
for (i = 0; i < NUM_CHANNELS; i++) {
if (safe_channels[i].channelNumber ==
@@ -4997,6 +5020,19 @@ static QDF_STATUS sap_get_channel_list(ptSapContext sap_ctx,
}
#endif
}
+
+ for (i = 0; i < ch_count; i++) {
+ if (cds_is_etsi13_regdmn_srd_chan(cds_chan_to_freq(list[i]))) {
+ if (!sap_ctx->enable_etsi_srd_chan_support)
+ continue;
+ }
+
+ list[new_chan_count] = list[i];
+ new_chan_count++;
+ }
+
+ ch_count = new_chan_count;
+
if (0 == ch_count) {
QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
FL("No active channels present for the current region"));
diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_internal.h b/drivers/staging/qcacld-3.0/core/sap/src/sap_internal.h
index 90fa50cf731a..24991f15502e 100644
--- a/drivers/staging/qcacld-3.0/core/sap/src/sap_internal.h
+++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_internal.h
@@ -275,6 +275,7 @@ typedef struct sSapContext {
uint8_t sap_sta_id;
bool is_chan_change_inprogress;
bool enable_etsi_srd_chan_support;
+ bool stop_bss_in_progress;
} *ptSapContext;
/*----------------------------------------------------------------------------
@@ -436,6 +437,18 @@ QDF_STATUS sap_close_session(tHalHandle hHal,
csr_roamSessionCloseCallback callback, bool valid);
/**
+ * sap_select_default_oper_chan() - Select AP mode default operating channel
+ * @acs_cfg: pointer to ACS config info
+ *
+ * Select AP mode default operating channel based on ACS hw mode and channel
+ * range configuration when ACS scan fails due to some reasons, such as scan
+ * timeout, etc.
+ *
+ * Return: Selected operating channel number
+ */
+uint8_t sap_select_default_oper_chan(struct sap_acs_cfg *acs_cfg);
+
+/**
* sap_channel_in_acs_channel_list() - check if channel in acs channel list
* @channel_num: channel to check
* @sap_ctx: struct ptSapContext
@@ -449,17 +462,6 @@ QDF_STATUS sap_close_session(tHalHandle hHal,
uint8_t sap_channel_in_acs_channel_list(uint8_t channel_num,
ptSapContext sap_ctx,
tSapChSelSpectInfo *spect_info_params);
-/**
- * sap_select_default_oper_chan() - Select AP mode default operating channel
- * @acs_cfg: pointer to ACS config info
- *
- * Select AP mode default operating channel based on ACS hw mode and channel
- * range configuration when ACS scan fails due to some reasons, such as scan
- * timeout, etc.
- *
- * Return: Selected operating channel number
- */
-uint8_t sap_select_default_oper_chan(struct sap_acs_cfg *acs_cfg);
#ifdef __cplusplus
}
diff --git a/drivers/staging/qcacld-3.0/core/sap/src/sap_module.c b/drivers/staging/qcacld-3.0/core/sap/src/sap_module.c
index 4b4db4983077..9e5c0aadb253 100644
--- a/drivers/staging/qcacld-3.0/core/sap/src/sap_module.c
+++ b/drivers/staging/qcacld-3.0/core/sap/src/sap_module.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -855,6 +855,7 @@ QDF_STATUS wlansap_start_bss(void *pCtx, /* pwextCtx */
pSapCtx->acs_cfg = &pConfig->acs_cfg;
pSapCtx->isCacEndNotified = false;
pSapCtx->is_chan_change_inprogress = false;
+ pSapCtx->stop_bss_in_progress = false;
/* Set the BSSID to your "self MAC Addr" read the mac address
from Configuation ITEM received from HDD */
pSapCtx->csr_roamProfile.BSSIDs.numOfBSSIDs = 1;
@@ -997,6 +998,29 @@ QDF_STATUS wlansap_set_mac_acl(void *pCtx, /* pwextCtx */
return qdf_status;
} /* wlansap_set_mac_acl */
+void wlansap_set_stop_bss_inprogress(void *ctx, bool in_progress)
+{
+ ptSapContext sap_ctx = NULL;
+
+ if (!ctx) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid Global CDS handle", __func__);
+ return;
+ }
+
+ sap_ctx = CDS_GET_SAP_CB(ctx);
+ if (!sap_ctx) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid SAP pointer from ctx", __func__);
+ return;
+ }
+
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
+ "%s: Set stop_bss_in_progress to %d",
+ __func__, in_progress);
+ sap_ctx->stop_bss_in_progress = in_progress;
+}
+
/**
* wlansap_stop_bss() - stop BSS.
* @pCtx: Pointer to the global cds context; a handle to SAP's control block
@@ -3287,14 +3311,14 @@ void wlansap_extend_to_acs_range(uint8_t *startChannelNum,
(*endChannelNum + ACS_2G_EXTEND) : 14;
} else if (*startChannelNum >= 36 && *endChannelNum >= 36) {
*bandStartChannel = CHAN_ENUM_36;
- *bandEndChannel = CHAN_ENUM_165;
+ *bandEndChannel = CHAN_ENUM_173;
tmp_startChannelNum = (*startChannelNum - ACS_5G_EXTEND) > 36 ?
(*startChannelNum - ACS_5G_EXTEND) : 36;
tmp_endChannelNum = (*endChannelNum + ACS_5G_EXTEND) <= 165 ?
(*endChannelNum + ACS_5G_EXTEND) : 165;
} else {
*bandStartChannel = CHAN_ENUM_1;
- *bandEndChannel = CHAN_ENUM_165;
+ *bandEndChannel = CHAN_ENUM_173;
tmp_startChannelNum = *startChannelNum > 5 ?
(*startChannelNum - ACS_2G_EXTEND) : 1;
tmp_endChannelNum = (*endChannelNum + ACS_5G_EXTEND) <= 165 ?
@@ -3752,32 +3776,6 @@ QDF_STATUS wlansap_set_tx_leakage_threshold(tHalHandle hal,
return QDF_STATUS_SUCCESS;
}
-/**
- * wlansap_set_etsi_srd_chan_support() - set UNI-III band channel support
- * @hal: HAL pointer
- * @srd_chan_support: ETSI SRD channel support
- *
- * This function set sap ETSI SRD channel support
- *
- * Return: None
- */
-void wlansap_set_etsi_srd_chan_support(tHalHandle hal,
- bool etsi_srd_chan_support)
-{
- tpAniSirGlobal mac;
-
- if (NULL == hal) {
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
- "%s: Invalid hal pointer", __func__);
- return;
- }
-
- mac = PMAC_STRUCT(hal);
- mac->sap.enable_etsi_srd_chan_support = etsi_srd_chan_support;
- QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_DEBUG,
- "%s: srd_ch_support %d", __func__,
- mac->sap.enable_etsi_srd_chan_support);
-}
/*
* wlansap_set_invalid_session() - set session ID to invalid
* @cds_ctx: pointer of global context
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
index 33c7b40da2d8..208e851082ba 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_api.h
@@ -1007,6 +1007,7 @@ typedef struct tagCsrRoamProfile {
uint32_t hlp_ie_len;
struct cds_fils_connection_info *fils_con_info;
#endif
+ bool force_rsne_override;
} tCsrRoamProfile;
#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
@@ -1126,6 +1127,32 @@ struct csr_sta_roam_policy_params {
uint8_t sap_operating_band;
};
+/**
+ * struct csr_neighbor_report_offload_params - neighbor report offload params
+ * @params_bitmask: bitmask to specify which of the below are enabled
+ * @time_offset: time offset after 11k offload command to trigger a neighbor
+ * report request (in seconds)
+ * @low_rssi_offset: Offset from rssi threshold to trigger neighbor
+ * report request (in dBm)
+ * @bmiss_count_trigger: Number of beacon miss events to trigger neighbor
+ * report request
+ * @per_threshold_offset: offset from PER threshold to trigger neighbor
+ * report request (in %)
+ * @neighbor_report_cache_timeout: timeout after which new trigger can enable
+ * sending of a neighbor report request (in seconds)
+ * @max_neighbor_report_req_cap: max number of neighbor report requests that
+ * can be sent to the peer in the current session
+ */
+struct csr_neighbor_report_offload_params {
+ uint8_t params_bitmask;
+ uint32_t time_offset;
+ uint32_t low_rssi_offset;
+ uint32_t bmiss_count_trigger;
+ uint32_t per_threshold_offset;
+ uint32_t neighbor_report_cache_timeout;
+ uint32_t max_neighbor_report_req_cap;
+};
+
typedef struct tagCsrConfigParam {
uint32_t FragmentationThreshold;
/* keep this uint32_t. This gets converted to ePhyChannelBondState */
@@ -1244,6 +1271,7 @@ typedef struct tagCsrConfigParam {
bool fScanTwice;
uint32_t nVhtChannelWidth;
uint8_t enableTxBF;
+ bool enable_subfee_vendor_vhtie;
uint8_t enable_txbf_sap_mode;
uint8_t enable2x2;
bool enableVhtFor24GHz;
@@ -1367,6 +1395,8 @@ typedef struct tagCsrConfigParam {
uint32_t wlm_latency_flags[CSR_NUM_WLM_LATENCY_LEVEL];
struct sir_score_config bss_score_params;
uint8_t oce_feature_bitmap;
+ uint32_t offload_11k_enable_bitmask;
+ struct csr_neighbor_report_offload_params neighbor_report_offload;
} tCsrConfigParam;
/* Tush */
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
index ce78064f7a42..6ce321ae7b79 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_internal.h
@@ -473,6 +473,17 @@ typedef struct tagCsrNeighborRoamConfig {
int32_t nhi_rssi_scan_rssi_ub;
} tCsrNeighborRoamConfig;
+/*
+ * Neighbor Report Params Bitmask
+ */
+#define NEIGHBOR_REPORT_PARAMS_TIME_OFFSET 0x01
+#define NEIGHBOR_REPORT_PARAMS_LOW_RSSI_OFFSET 0x02
+#define NEIGHBOR_REPORT_PARAMS_BMISS_COUNT_TRIGGER 0x04
+#define NEIGHBOR_REPORT_PARAMS_PER_THRESHOLD_OFFSET 0x08
+#define NEIGHBOR_REPORT_PARAMS_CACHE_TIMEOUT 0x10
+#define NEIGHBOR_REPORT_PARAMS_MAX_REQ_CAP 0x20
+#define NEIGHBOR_REPORT_PARAMS_ALL 0x3F
+
typedef struct tagCsrConfig {
uint32_t agingCount;
uint32_t FragmentationThreshold;
@@ -580,6 +591,7 @@ typedef struct tagCsrConfig {
/* To enable scanning 2g channels twice on single scan req from HDD */
bool fScanTwice;
uint32_t nVhtChannelWidth;
+ bool enable_subfee_vendor_vhtie;
uint8_t enable_txbf_sap_mode;
uint8_t enable2x2;
bool enableVhtFor24GHz;
@@ -669,6 +681,8 @@ typedef struct tagCsrConfig {
uint32_t wlm_latency_flags[CSR_NUM_WLM_LATENCY_LEVEL];
struct sir_score_config bss_score_params;
uint8_t oce_feature_bitmap;
+ uint32_t offload_11k_enable_bitmask;
+ struct csr_neighbor_report_offload_params neighbor_report_offload;
} tCsrConfig;
typedef struct tagCsrChannelPowerInfo {
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h b/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
index 22ef0c3c7834..615812fcfb40 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/csr_neighbor_roam.h
@@ -332,6 +332,7 @@ void csr_roam_reset_roam_params(tpAniSirGlobal mac_ptr);
#define REASON_SUPPLICANT_DISABLED_ROAMING 39
#define REASON_CTX_INIT 40
#define REASON_FILS_PARAMS_CHANGED 41
+#define REASON_SME_ISSUED 42
#if defined(WLAN_FEATURE_HOST_ROAM) || defined(WLAN_FEATURE_ROAM_OFFLOAD)
QDF_STATUS csr_roam_offload_scan(tpAniSirGlobal pMac, uint8_t sessionId,
@@ -426,4 +427,18 @@ QDF_STATUS csr_roam_stop_wait_for_key_timer(tpAniSirGlobal pMac);
QDF_STATUS csr_roam_copy_connected_profile(tpAniSirGlobal pMac,
uint32_t sessionId, tCsrRoamProfile *pDstProfile);
+/**
+ * csr_invoke_neighbor_report_request - Send neighbor report invoke command to
+ * WMA
+ * @mac_ctx: MAC context
+ * @session_id: session id
+ *
+ * API called from IW to invoke neighbor report request to WMA then to FW
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS csr_invoke_neighbor_report_request(uint8_t session_id,
+ struct sRrmNeighborReq *neighbor_report_req,
+ bool send_resp_to_host);
+
#endif /* CSR_NEIGHBOR_ROAM_H */
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h b/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h
index 07941f1b1536..30ab39a88048 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/sme_api.h
@@ -60,6 +60,26 @@
#define SME_GLOBAL_CLASSD_STATS (1 << eCsrGlobalClassDStats)
#define SME_PER_CHAIN_RSSI_STATS (1 << csr_per_chain_rssi_stats)
+#define sme_log_rate_limited(rate, level, args...) \
+ QDF_TRACE_RATE_LIMITED(rate, QDF_MODULE_ID_SME, level, ## args)
+#define sme_log_rate_limited_fl(rate, level, format, args...) \
+ sme_log_rate_limited(rate, level, FL(format), ## args)
+#define sme_alert_rate_limited(rate, format, args...) \
+ sme_log_rate_limited_fl(rate, QDF_TRACE_LEVEL_FATAL,\
+ format, ## args)
+#define sme_err_rate_limited(rate, format, args...) \
+ sme_log_rate_limited_fl(rate, QDF_TRACE_LEVEL_ERROR,\
+ format, ## args)
+#define sme_warn_rate_limited(rate, format, args...) \
+ sme_log_rate_limited_fl(rate, QDF_TRACE_LEVEL_WARN,\
+ format, ## args)
+#define sme_info_rate_limited(rate, format, args...) \
+ sme_log_rate_limited_fl(rate, QDF_TRACE_LEVEL_INFO,\
+ format, ## args)
+#define sme_debug_rate_limited(rate, format, args...) \
+ sme_log_rate_limited_fl(rate, QDF_TRACE_LEVEL_DEBUG,\
+ format, ## args)
+
#define sme_log(level, args...) QDF_TRACE(QDF_MODULE_ID_SME, level, ## args)
#define sme_logfl(level, format, args...) sme_log(level, FL(format), ## args)
@@ -84,6 +104,7 @@
#define SME_SCAN_DBS_POLICY_FORCE_NONDBS 0x1
#define SME_SCAN_DBS_POLICY_IGNORE_DUTY 0x2
#define SME_SCAN_DBS_POLICY_MAX 0x3
+#define SME_SCAN_REJECT_RATE_LIMIT 5
#define SME_SESSION_ID_ANY 50
@@ -582,7 +603,7 @@ QDF_STATUS sme_set_host_offload(tHalHandle hHal, uint8_t sessionId,
* Return: QDF_STATUS
*/
QDF_STATUS sme_conf_hw_filter_mode(tHalHandle hal, uint8_t session_id,
- uint8_t mode_bitmap);
+ uint8_t mode_bitmap, bool filter_enable);
QDF_STATUS sme_set_keep_alive(tHalHandle hHal, uint8_t sessionId,
tpSirKeepAliveReq pRequest);
@@ -953,8 +974,8 @@ const char *sme_scan_type_to_string(const uint8_t scan_type);
const char *sme_bss_type_to_string(const uint8_t bss_type);
QDF_STATUS sme_ap_disable_intra_bss_fwd(tHalHandle hHal, uint8_t sessionId,
bool disablefwd);
-uint32_t sme_get_channel_bonding_mode5_g(tHalHandle hHal);
-uint32_t sme_get_channel_bonding_mode24_g(tHalHandle hHal);
+QDF_STATUS sme_get_channel_bonding_mode5_g(tHalHandle hHal, uint32_t *mode);
+QDF_STATUS sme_get_channel_bonding_mode24_g(tHalHandle hHal, uint32_t *mode);
#ifdef WLAN_FEATURE_STATS_EXT
typedef struct sStatsExtRequestReq {
uint32_t request_data_len;
@@ -1370,14 +1391,118 @@ bool sme_is_sta_smps_allowed(tHalHandle hHal, uint8_t session_id);
QDF_STATUS sme_add_beacon_filter(tHalHandle hal,
uint32_t session_id, uint32_t *ie_map);
QDF_STATUS sme_remove_beacon_filter(tHalHandle hal, uint32_t session_id);
-QDF_STATUS sme_bpf_offload_register_callback(tHalHandle hal,
- void (*pbpf_get_offload_cb)(void *,
- struct sir_bpf_get_offload *));
-QDF_STATUS sme_bpf_offload_deregister_callback(tHalHandle hal);
-
-QDF_STATUS sme_get_bpf_offload_capabilities(tHalHandle hal);
-QDF_STATUS sme_set_bpf_instructions(tHalHandle hal,
- struct sir_bpf_set_offload *);
+
+/**
+ * sme_apf_offload_register_callback() - Register get apf offload callback
+ *
+ * @hal - MAC global handle
+ * @callback_routine - callback routine from HDD
+ *
+ * API used by HDD to register its APF get caps callback in SME.
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS sme_apf_offload_register_callback(tHalHandle hal,
+ void (*papf_get_offload_cb)(void *,
+ struct sir_apf_get_offload *));
+
+/**
+ * sme_apf_offload_deregister_callback() - De-register get apf offload callback
+ *
+ * @hal - MAC global handle
+ *
+ * API used by HDD to de-register its APF get caps callback in SME.
+ *
+ * Return: QDF_STATUS
+ */
+QDF_STATUS sme_apf_offload_deregister_callback(tHalHandle hal);
+
+/**
+ * sme_get_apf_capabilities() - Get length for APF offload
+ * @hal: Global HAL handle
+ *
+ * API to get APF version and max filter size.
+ *
+ * Return: QDF_STATUS enumeration
+ */
+QDF_STATUS sme_get_apf_capabilities(tHalHandle hal);
+
+/**
+ * sme_set_apf_instructions() - Set APF apf filter instructions.
+ * @hal: HAL handle
+ * @apf_set_offload: struct to set apf filter instructions.
+ *
+ * APFv2 (Legacy APF) API to set the APF packet filter.
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS sme_set_apf_instructions(tHalHandle hal,
+ struct sir_apf_set_offload *);
+
+/**
+ * sme_set_apf_enable_disable - Send apf enable/disable cmd
+ * @hal: global hal handle
+ * @vdev_id: vdev id
+ * @apf_enable: true: Enable APF Int., false: Disable APF Int.
+ *
+ * API to either enable or disable the APF interpreter.
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS sme_set_apf_enable_disable(tHalHandle hal, uint8_t vdev_id,
+ bool apf_enable);
+
+/**
+ * sme_apf_write_work_memory - Write into the apf work memory
+ * @hal: global hal handle
+ * @write_params: APF parameters for the write operation
+ *
+ * API for writing into the APF work memory.
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS sme_apf_write_work_memory(tHalHandle hal,
+ struct wmi_apf_write_memory_params
+ *write_params);
+
+/**
+ * sme_apf_read_work_memory - Read part of apf work memory
+ * @hal: global hal handle
+ * @read_params: APF parameters for the get operation
+ *
+ * API for issuing a APF read memory request.
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS
+sme_apf_read_work_memory(tHalHandle hal,
+ struct wmi_apf_read_memory_params *read_params);
+
+/**
+ * sme_apf_read_memory_register_callback() - Register apf mem callback
+ *
+ * @hal - MAC global handle
+ * @callback_routine - callback routine from HDD
+ *
+ * API used by HDD to register its APF read memory callback in SME.
+ *
+ * Return: QDF_STATUS Enumeration
+ */
+QDF_STATUS sme_apf_read_memory_register_callback(tHalHandle hal,
+ void (*apf_read_mem_cb)(void *context,
+ struct wmi_apf_read_memory_resp_event_params *));
+
+/**
+ * sme_apf_read_memory_deregister_callback() - De-register apf mem callback
+ *
+ * @h_hal - MAC global handle
+ *
+ * API used by HDD to de-register its APF read memory callback in SME.
+ *
+ * Return: QDF_STATUS Enumeration
+ */
+QDF_STATUS sme_apf_read_memory_deregister_callback(tHalHandle h_hal);
+
uint32_t sme_get_wni_dot11_mode(tHalHandle hal);
QDF_STATUS sme_create_mon_session(tHalHandle hal_handle, uint8_t *bssid);
QDF_STATUS sme_set_adaptive_dwelltime_config(tHalHandle hal,
@@ -1505,6 +1630,18 @@ QDF_STATUS sme_update_short_retry_limit_threshold(tHalHandle hal_handle,
QDF_STATUS sme_update_long_retry_limit_threshold(tHalHandle hal_handle,
struct sme_long_retry_limit *long_retry_limit_th);
/**
+ * sme_set_etsi_srd_ch_in_master_mode() - master mode UNI-III band ch support
+ * @hal: HAL pointer
+ * @srd_chan_support: ETSI SRD channel support
+ *
+ * This function set master ETSI SRD channel support
+ *
+ * Return: None
+ */
+void sme_set_etsi_srd_ch_in_master_mode(tHalHandle hal,
+ bool etsi_srd_chan_support);
+
+/**
* sme_roam_is_ese_assoc() - Check if association type is ESE
* @roam_info: Pointer to roam info
*
@@ -2047,4 +2184,31 @@ QDF_STATUS sme_fast_reassoc(tHalHandle hal, tCsrRoamProfile *profile,
*/
void sme_enable_roaming_on_connected_sta(tHalHandle hal);
+/**
+ * sme_unpack_rsn_ie: wrapper to unpack RSN IE and update def RSN params
+ * if optional fields are not present.
+ * @hal: handle returned by mac_open
+ * @buf: rsn ie buffer pointer
+ * @buf_len: rsn ie buffer length
+ * @rsn_ie: outframe rsn ie structure
+ * @append_ie: flag to indicate if the rsn_ie need to be appended from buf
+ *
+ * Return: parse status
+ */
+uint32_t sme_unpack_rsn_ie(tHalHandle hal, uint8_t *buf,
+ uint8_t buf_len, tDot11fIERSN *rsn_ie,
+ bool append_ie);
+
+/**
+ * sme_is_sta_key_exchange_in_progress() - checks whether the STA/P2P client
+ * session has key exchange in progress
+ *
+ * @hal: global hal handle
+ * @session_id: session id
+ *
+ * Return: true - if key exchange in progress
+ * false - if not in progress
+ */
+bool sme_is_sta_key_exchange_in_progress(tHalHandle hal, uint8_t session_id);
+
#endif /* #if !defined( __SME_API_H ) */
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/sme_internal.h b/drivers/staging/qcacld-3.0/core/sme/inc/sme_internal.h
index 70ee891d2615..66f58c7eba18 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/sme_internal.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/sme_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -263,8 +263,8 @@ typedef struct tagSmeStruct {
ocb_callback dcc_stats_event_callback;
sme_set_thermal_level_callback set_thermal_level_cb;
void *saved_scan_cmd;
- void (*pbpf_get_offload_cb)(void *context,
- struct sir_bpf_get_offload *);
+ void (*papf_get_offload_cb)(void *context,
+ struct sir_apf_get_offload *);
p2p_lo_callback p2p_lo_event_callback;
void *p2p_lo_event_context;
sme_send_oem_data_rsp_msg oem_data_rsp_callback;
@@ -283,6 +283,8 @@ typedef struct tagSmeStruct {
struct spectral_samp_msg *samp_msg);
void (*stats_ext2_cb)(void *, struct stats_ext2_event *);
void (*congestion_cb)(void *, uint32_t congestion, uint32_t vdev_id);
+ void (*apf_read_mem_cb)(void *context,
+ struct wmi_apf_read_memory_resp_event_params *params);
} tSmeStruct, *tpSmeStruct;
diff --git a/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h b/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h
index d172894acb3f..48dc39b6ce2a 100644
--- a/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h
+++ b/drivers/staging/qcacld-3.0/core/sme/inc/sme_rrm_internal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2012, 2014-2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2012, 2014-2016, 2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -98,6 +98,7 @@ typedef struct sRrmSMEContext {
typedef struct sRrmNeighborReq {
uint8_t no_ssid;
tSirMacSSid ssid;
+ bool neighbor_report_offload;
} tRrmNeighborReq, *tpRrmNeighborReq;
#endif /* #if !defined( __SMERRMINTERNAL_H ) */
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c
index 61c494ca26b7..3de390a118f6 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_api.c
@@ -3303,13 +3303,15 @@ QDF_STATUS sme_scan_request(tHalHandle hal, uint8_t session_id,
}
if (!mac_ctx->scan.fScanEnable) {
- sme_err("fScanEnable false");
+ sme_err_rate_limited(SME_SCAN_REJECT_RATE_LIMIT,
+ "fScanEnable false");
return status;
}
scan_count = csr_ll_count(&mac_ctx->sme.smeScanCmdActiveList);
if (scan_count >= mac_ctx->scan.max_scan_count) {
- sme_err("Max scan reached");
+ sme_err_rate_limited(SME_SCAN_REJECT_RATE_LIMIT,
+ "Max scan reached");
return QDF_STATUS_E_FAILURE;
}
@@ -3817,17 +3819,39 @@ eCsrPhyMode sme_get_phy_mode(tHalHandle hHal)
* sme_get_channel_bonding_mode5_g() - get the channel bonding mode for 5G band
*
* @hHal - HAL handle
+ * @mode - channel bonding mode
*
- * Return channel bonding mode for 5G
+ * Return QDF_STATUS
*/
-uint32_t sme_get_channel_bonding_mode5_g(tHalHandle hHal)
+QDF_STATUS sme_get_channel_bonding_mode5_g(tHalHandle hHal, uint32_t *mode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
- tSmeConfigParams smeConfig;
+ tSmeConfigParams *smeConfig;
+
+ if (!mode) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: invalid mode", __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
- sme_get_config_param(pMac, &smeConfig);
+ smeConfig = qdf_mem_malloc(sizeof(*smeConfig));
+ if (!smeConfig) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: failed to alloc smeConfig", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
- return smeConfig.csrConfig.channelBondingMode5GHz;
+ if (sme_get_config_param(pMac, smeConfig) != QDF_STATUS_SUCCESS) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: sme_get_config_param failed", __func__);
+ qdf_mem_free(smeConfig);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ *mode = smeConfig->csrConfig.channelBondingMode5GHz;
+ qdf_mem_free(smeConfig);
+
+ return QDF_STATUS_SUCCESS;
}
/**
@@ -3835,16 +3859,39 @@ uint32_t sme_get_channel_bonding_mode5_g(tHalHandle hHal)
* band
*
* hHal - HAL handle
- * Return channel bonding mode for 2.4G
+ * @mode - channel bonding mode
+ *
+ * Return QDF_STATUS
*/
-uint32_t sme_get_channel_bonding_mode24_g(tHalHandle hHal)
+QDF_STATUS sme_get_channel_bonding_mode24_g(tHalHandle hHal, uint32_t *mode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
- tSmeConfigParams smeConfig;
+ tSmeConfigParams *smeConfig;
+
+ if (!mode) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: invalid mode", __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ smeConfig = qdf_mem_malloc(sizeof(*smeConfig));
+ if (!smeConfig) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: failed to alloc smeConfig", __func__);
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ if (sme_get_config_param(pMac, smeConfig) != QDF_STATUS_SUCCESS) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "%s: sme_get_config_param failed", __func__);
+ qdf_mem_free(smeConfig);
+ return QDF_STATUS_E_FAILURE;
+ }
- sme_get_config_param(pMac, &smeConfig);
+ *mode = smeConfig->csrConfig.channelBondingMode24GHz;
+ qdf_mem_free(smeConfig);
- return smeConfig.csrConfig.channelBondingMode24GHz;
+ return QDF_STATUS_SUCCESS;
}
/**
@@ -5801,6 +5848,13 @@ QDF_STATUS sme_neighbor_report_request(tHalHandle hHal, uint8_t sessionId,
TRACE_CODE_SME_RX_HDD_NEIGHBOR_REPORTREQ, NO_SESSION,
0));
+ if (pRrmNeighborReq->neighbor_report_offload) {
+ status = csr_invoke_neighbor_report_request(sessionId,
+ pRrmNeighborReq,
+ false);
+ return status;
+ }
+
if (QDF_STATUS_SUCCESS == sme_acquire_global_lock(&pMac->sme)) {
status =
sme_rrm_neighbor_report_request(hHal, sessionId,
@@ -6302,12 +6356,12 @@ QDF_STATUS sme_set_host_offload(tHalHandle hHal, uint8_t sessionId,
}
QDF_STATUS sme_conf_hw_filter_mode(tHalHandle hal, uint8_t session_id,
- uint8_t mode_bitmap)
+ uint8_t mode_bitmap, bool filter_enable)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hal);
QDF_STATUS status;
tCsrRoamSession *session;
- struct hw_filter_request *req;
+ struct wmi_hw_filter_req_params *req;
cds_msg_t msg;
status = sme_acquire_global_lock(&pMac->sme);
@@ -6330,6 +6384,8 @@ QDF_STATUS sme_conf_hw_filter_mode(tHalHandle hal, uint8_t session_id,
return QDF_STATUS_E_NOMEM;
}
+ req->vdev_id = session_id;
+ req->enable = filter_enable;
req->mode_bitmap = mode_bitmap;
qdf_copy_macaddr(&req->bssid, &session->connectedProfile.bssid);
@@ -9220,7 +9276,7 @@ QDF_STATUS sme_stop_roaming(tHalHandle hal, uint8_t session_id, uint8_t reason)
if (reason == eCsrForcedDisassoc)
req->reason = REASON_ROAM_STOP_ALL;
else
- req->reason = REASON_ROAM_SYNCH_FAILED;
+ req->reason = REASON_SME_ISSUED;
req->sessionId = session_id;
if (csr_neighbor_middle_of_roaming(mac_ctx, session_id))
req->middle_of_roaming = 1;
@@ -11639,6 +11695,22 @@ void sme_set_prefer_80MHz_over_160MHz(tHalHandle hal,
mac_ctx->sta_prefer_80MHz_over_160MHz = sta_prefer_80MHz_over_160MHz;
}
+void sme_set_etsi_srd_ch_in_master_mode(tHalHandle hal,
+ bool etsi_srd_chan_support)
+{
+ tpAniSirGlobal mac;
+
+ if (NULL == hal) {
+ QDF_TRACE(QDF_MODULE_ID_SAP, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid hal pointer", __func__);
+ return;
+ }
+
+ mac = PMAC_STRUCT(hal);
+ mac->sap.enable_etsi_srd_chan_support = etsi_srd_chan_support;
+ sme_debug("srd_ch_support %d", mac->sap.enable_etsi_srd_chan_support);
+}
+
/**
* sme_set_allow_adj_ch_bcn() - API to set allow_adj_ch_bcn
* @hal: The handle returned by macOpen
@@ -12344,7 +12416,8 @@ void active_list_cmd_timeout_handle(void *userData)
cds_trigger_recovery(CDS_ACTIVE_LIST_TIMEOUT);
} else {
if (!(cds_is_load_or_unload_in_progress() ||
- cds_is_driver_recovering() || cds_is_driver_in_bad_state()))
+ cds_is_driver_recovering() ||
+ cds_is_driver_in_bad_state() || cds_is_fw_down()))
QDF_BUG(0);
else
QDF_ASSERT(0);
@@ -16284,9 +16357,12 @@ void sme_update_tgt_services(tHalHandle hal, struct wma_tgt_services *cfg)
mac_ctx->pmf_offload = cfg->pmf_offload;
mac_ctx->is_fils_roaming_supported =
cfg->is_fils_roaming_supported;
+ mac_ctx->is_11k_offload_supported =
+ cfg->is_11k_offload_supported;
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
- FL("mac_ctx->pmf_offload: %d fils_roam support %d"),
- mac_ctx->pmf_offload, mac_ctx->is_fils_roaming_supported);
+ FL("pmf_offload: %d fils_roam support %d 11k_offload %d"),
+ mac_ctx->pmf_offload, mac_ctx->is_fils_roaming_supported,
+ mac_ctx->is_11k_offload_supported);
}
@@ -16658,14 +16734,7 @@ void sme_send_disassoc_req_frame(tHalHandle hal, uint8_t session_id,
FL("cds_send_mb_message Failed"));
}
-/**
- * sme_get_bpf_offload_capabilities() - Get length for BPF offload
- * @hal: Global HAL handle
- * This function constructs the cds message and fill in message type,
- * post the same to WDA.
- * Return: QDF_STATUS enumeration
- */
-QDF_STATUS sme_get_bpf_offload_capabilities(tHalHandle hal)
+QDF_STATUS sme_get_apf_capabilities(tHalHandle hal)
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
@@ -16677,11 +16746,11 @@ QDF_STATUS sme_get_bpf_offload_capabilities(tHalHandle hal)
if (QDF_STATUS_SUCCESS == status) {
/* Serialize the req through MC thread */
cds_msg.bodyptr = NULL;
- cds_msg.type = WDA_BPF_GET_CAPABILITIES_REQ;
+ cds_msg.type = WDA_APF_GET_CAPABILITIES_REQ;
status = cds_mq_post_message(QDF_MODULE_ID_WMA, &cds_msg);
if (!QDF_IS_STATUS_SUCCESS(status)) {
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
- FL("Post bpf get offload msg fail"));
+ FL("Post apf get offload msg fail"));
status = QDF_STATUS_E_FAILURE;
}
sme_release_global_lock(&mac_ctx->sme);
@@ -16694,21 +16763,13 @@ QDF_STATUS sme_get_bpf_offload_capabilities(tHalHandle hal)
return status;
}
-
-/**
- * sme_set_bpf_instructions() - Set BPF bpf filter instructions.
- * @hal: HAL handle
- * @bpf_set_offload: struct to set bpf filter instructions.
- *
- * Return: QDF_STATUS enumeration.
- */
-QDF_STATUS sme_set_bpf_instructions(tHalHandle hal,
- struct sir_bpf_set_offload *req)
+QDF_STATUS sme_set_apf_instructions(tHalHandle hal,
+ struct sir_apf_set_offload *req)
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
cds_msg_t cds_msg;
- struct sir_bpf_set_offload *set_offload;
+ struct sir_apf_set_offload *set_offload;
set_offload = qdf_mem_malloc(sizeof(*set_offload) +
req->current_length);
@@ -16734,12 +16795,12 @@ QDF_STATUS sme_set_bpf_instructions(tHalHandle hal,
if (QDF_STATUS_SUCCESS == status) {
/* Serialize the req through MC thread */
cds_msg.bodyptr = set_offload;
- cds_msg.type = WDA_BPF_SET_INSTRUCTIONS_REQ;
+ cds_msg.type = WDA_APF_SET_INSTRUCTIONS_REQ;
status = cds_mq_post_message(QDF_MODULE_ID_WMA, &cds_msg);
if (!QDF_IS_STATUS_SUCCESS(status)) {
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
- FL("Post BPF set offload msg fail"));
+ FL("Post APF set offload msg fail"));
status = QDF_STATUS_E_FAILURE;
qdf_mem_free(set_offload);
}
@@ -16752,26 +16813,105 @@ QDF_STATUS sme_set_bpf_instructions(tHalHandle hal,
return status;
}
-/**
- * sme_bpf_offload_register_callback() - Register get bpf offload callbacK
- *
- * @hal - MAC global handle
- * @callback_routine - callback routine from HDD
- *
- * This API is invoked by HDD to register its callback in SME
- *
- * Return: QDF_STATUS
- */
-QDF_STATUS sme_bpf_offload_register_callback(tHalHandle hal,
- void (*pbpf_get_offload_cb)(void *context,
- struct sir_bpf_get_offload *))
+QDF_STATUS sme_set_apf_enable_disable(tHalHandle hal, uint8_t vdev_id,
+ bool apf_enable)
+{
+ void *wma_handle;
+
+ wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
+ if (!wma_handle) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "wma handle is NULL");
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return wma_send_apf_enable_cmd(wma_handle, vdev_id, apf_enable);
+}
+
+QDF_STATUS
+sme_apf_write_work_memory(tHalHandle hal,
+ struct wmi_apf_write_memory_params *write_params)
+{
+ void *wma_handle;
+
+ wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
+ if (!wma_handle) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "wma handle is NULL");
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return wma_send_apf_write_work_memory_cmd(wma_handle, write_params);
+}
+
+QDF_STATUS
+sme_apf_read_work_memory(tHalHandle hal,
+ struct wmi_apf_read_memory_params *read_params)
+{
+ void *wma_handle;
+
+ wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
+ if (!wma_handle) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ "wma handle is NULL");
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return wma_send_apf_read_work_memory_cmd(wma_handle, read_params);
+}
+
+QDF_STATUS sme_apf_read_memory_register_callback(tHalHandle hal,
+ void (*apf_read_mem_cb)(void *context,
+ struct wmi_apf_read_memory_resp_event_params *))
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
tpAniSirGlobal mac = PMAC_STRUCT(hal);
status = sme_acquire_global_lock(&mac->sme);
if (QDF_IS_STATUS_SUCCESS(status)) {
- mac->sme.pbpf_get_offload_cb = pbpf_get_offload_cb;
+ mac->sme.apf_read_mem_cb = apf_read_mem_cb;
+ sme_release_global_lock(&mac->sme);
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ FL("sme_acquire_global_lock failed"));
+ }
+ return status;
+}
+
+QDF_STATUS sme_apf_read_memory_deregister_callback(tHalHandle h_hal)
+{
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+ tpAniSirGlobal mac;
+
+ if (!h_hal) {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ FL("hHal is not valid"));
+ return QDF_STATUS_E_INVAL;
+ }
+
+ mac = PMAC_STRUCT(h_hal);
+
+ status = sme_acquire_global_lock(&mac->sme);
+ if (QDF_IS_STATUS_SUCCESS(status)) {
+ mac->sme.apf_read_mem_cb = NULL;
+ sme_release_global_lock(&mac->sme);
+ } else {
+ QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
+ FL("sme_acquire_global_lock failed"));
+ }
+ return status;
+}
+
+QDF_STATUS sme_apf_offload_register_callback(tHalHandle hal,
+ void (*papf_get_offload_cb)(void *context,
+ struct sir_apf_get_offload *))
+{
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+ tpAniSirGlobal mac = PMAC_STRUCT(hal);
+
+ status = sme_acquire_global_lock(&mac->sme);
+ if (QDF_IS_STATUS_SUCCESS(status)) {
+ mac->sme.papf_get_offload_cb = papf_get_offload_cb;
sme_release_global_lock(&mac->sme);
} else {
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
@@ -16795,7 +16935,7 @@ uint32_t sme_get_wni_dot11_mode(tHalHandle hal)
}
/**
- * sme_bpf_offload_deregister_callback() - Register get bpf offload callbacK
+ * sme_apf_offload_deregister_callback() - Register get apf offload callbacK
*
* @h_hal - MAC global handle
* @callback_routine - callback routine from HDD
@@ -16804,7 +16944,7 @@ uint32_t sme_get_wni_dot11_mode(tHalHandle hal)
*
* Return: QDF_STATUS Enumeration
*/
-QDF_STATUS sme_bpf_offload_deregister_callback(tHalHandle h_hal)
+QDF_STATUS sme_apf_offload_deregister_callback(tHalHandle h_hal)
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
tpAniSirGlobal mac;
@@ -16819,7 +16959,7 @@ QDF_STATUS sme_bpf_offload_deregister_callback(tHalHandle h_hal)
status = sme_acquire_global_lock(&mac->sme);
if (QDF_IS_STATUS_SUCCESS(status)) {
- mac->sme.pbpf_get_offload_cb = NULL;
+ mac->sme.papf_get_offload_cb = NULL;
sme_release_global_lock(&mac->sme);
} else {
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_ERROR,
@@ -18900,6 +19040,15 @@ free_action_oui:
action_oui = NULL;
}
+uint32_t sme_unpack_rsn_ie(tHalHandle hal, uint8_t *buf,
+ uint8_t buf_len, tDot11fIERSN *rsn_ie,
+ bool append_ie)
+{
+ tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
+
+ return dot11f_unpack_ie_rsn(mac_ctx, buf, buf_len, rsn_ie, append_ie);
+}
+
/**
* sme_destroy_action_oui_info() - destroy all action ouis info
* @pmac: pointer to mac context
@@ -19036,3 +19185,15 @@ void sme_enable_roaming_on_connected_sta(tHalHandle hal)
}
+bool sme_is_sta_key_exchange_in_progress(tHalHandle hal, uint8_t session_id)
+{
+ tpAniSirGlobal mac_ctx = PMAC_STRUCT(hal);
+
+ if (!CSR_IS_SESSION_VALID(mac_ctx, session_id)) {
+ sme_err("Invalid session id: %d", session_id);
+ return false;
+ }
+
+ return CSR_IS_WAIT_FOR_KEY(mac_ctx, session_id);
+}
+
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_trace.c b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_trace.c
index 8e95f6a42e2d..bc95c04cdd82 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/common/sme_trace.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/common/sme_trace.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -212,7 +212,7 @@ static uint8_t *sme_trace_get_command_string(uint32_t command)
}
}
-static void sme_trace_dump(tpAniSirGlobal mac_ctx, tp_qdf_trace_record record,
+static void sme_trace_dump(void *mac_ctx, tp_qdf_trace_record record,
uint16_t rec_index)
{
switch (record->code) {
@@ -249,7 +249,6 @@ static void sme_trace_dump(tpAniSirGlobal mac_ctx, tp_qdf_trace_record record,
void sme_trace_init(tpAniSirGlobal pMac)
{
- qdf_trace_register(QDF_MODULE_ID_SME, (tp_qdf_trace_cb)
- &sme_trace_dump);
+ qdf_trace_register(QDF_MODULE_ID_SME, &sme_trace_dump);
}
#endif
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
index 08aeaca46d75..88aa4ddf4586 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_roam.c
@@ -109,6 +109,12 @@
#define MAWC_ROAM_RSSI_HIGH_ADJUST_DEFAULT 5
#define MAWC_ROAM_RSSI_LOW_ADJUST_DEFAULT 5
+/*
+ * Neighbor report offload needs to send 0xFFFFFFFF if a particular
+ * parameter is disabled from the ini
+ */
+#define NEIGHBOR_REPORT_PARAM_INVALID (0xFFFFFFFFU)
+
/* Static Type declarations */
static tCsrRoamSession csr_roam_roam_session[CSR_ROAM_SESSION_MAX];
@@ -2387,6 +2393,40 @@ uint32_t csr_convert_phy_cb_state_to_ini_value(ePhyChanBondState phyCbState)
return cbIniValue;
}
+/**
+ * csr_set_11k_offload_config_param() - Update 11k neighbor report config
+ *
+ * @csr_config: pointer to csr_config in MAC context
+ * @pParam: pointer to config params from HDD
+ *
+ * Return: none
+ */
+static
+void csr_set_11k_offload_config_param(tCsrConfig *csr_config,
+ tCsrConfigParam *param)
+{
+ csr_config->offload_11k_enable_bitmask =
+ param->offload_11k_enable_bitmask;
+ csr_config->neighbor_report_offload.params_bitmask =
+ param->neighbor_report_offload.params_bitmask;
+ csr_config->neighbor_report_offload.time_offset =
+ param->neighbor_report_offload.time_offset;
+ csr_config->neighbor_report_offload.low_rssi_offset =
+ param->neighbor_report_offload.low_rssi_offset;
+ csr_config->neighbor_report_offload.bmiss_count_trigger =
+ param->neighbor_report_offload.bmiss_count_trigger;
+ csr_config->neighbor_report_offload.per_threshold_offset =
+ param->neighbor_report_offload.per_threshold_offset;
+ csr_config->neighbor_report_offload.
+ neighbor_report_cache_timeout =
+ param->neighbor_report_offload.
+ neighbor_report_cache_timeout;
+ csr_config->neighbor_report_offload.
+ max_neighbor_report_req_cap =
+ param->neighbor_report_offload.
+ max_neighbor_report_req_cap;
+}
+
QDF_STATUS csr_change_default_config_param(tpAniSirGlobal pMac,
tCsrConfigParam *pParam)
{
@@ -2708,6 +2748,8 @@ QDF_STATUS csr_change_default_config_param(tpAniSirGlobal pMac,
pMac->roam.configParam.nVhtChannelWidth =
pParam->nVhtChannelWidth;
+ pMac->roam.configParam.enable_subfee_vendor_vhtie =
+ pParam->enable_subfee_vendor_vhtie;
pMac->roam.configParam.enable_txbf_sap_mode =
pParam->enable_txbf_sap_mode;
pMac->roam.configParam.enable2x2 = pParam->enable2x2;
@@ -2911,10 +2953,44 @@ QDF_STATUS csr_change_default_config_param(tpAniSirGlobal pMac,
&pParam->bss_score_params,
sizeof(struct sir_score_config));
+ csr_set_11k_offload_config_param(&pMac->roam.configParam,
+ pParam);
}
return status;
}
+/**
+ * csr_get_11k_offload_config_param() - Get 11k neighbor report config
+ *
+ * @csr_config: pointer to csr_config in MAC context
+ * @pParam: pointer to config params from HDD
+ *
+ * Return: none
+ */
+static
+void csr_get_11k_offload_config_param(tCsrConfig *csr_config,
+ tCsrConfigParam *param)
+{
+ param->offload_11k_enable_bitmask =
+ csr_config->offload_11k_enable_bitmask;
+ param->neighbor_report_offload.params_bitmask =
+ csr_config->neighbor_report_offload.params_bitmask;
+ param->neighbor_report_offload.time_offset =
+ csr_config->neighbor_report_offload.time_offset;
+ param->neighbor_report_offload.low_rssi_offset =
+ csr_config->neighbor_report_offload.low_rssi_offset;
+ param->neighbor_report_offload.bmiss_count_trigger =
+ csr_config->neighbor_report_offload.bmiss_count_trigger;
+ param->neighbor_report_offload.per_threshold_offset =
+ csr_config->neighbor_report_offload.per_threshold_offset;
+ param->neighbor_report_offload.neighbor_report_cache_timeout =
+ csr_config->neighbor_report_offload.
+ neighbor_report_cache_timeout;
+ param->neighbor_report_offload.max_neighbor_report_req_cap =
+ csr_config->neighbor_report_offload.
+ max_neighbor_report_req_cap;
+}
+
QDF_STATUS csr_get_config_param(tpAniSirGlobal pMac, tCsrConfigParam *pParam)
{
int i;
@@ -2986,6 +3062,8 @@ QDF_STATUS csr_get_config_param(tpAniSirGlobal pMac, tCsrConfigParam *pParam)
&cfg_params->neighborRoamConfig,
sizeof(tCsrNeighborRoamConfigParams));
pParam->nVhtChannelWidth = cfg_params->nVhtChannelWidth;
+ pParam->enable_subfee_vendor_vhtie =
+ cfg_params->enable_subfee_vendor_vhtie;
pParam->enable_txbf_sap_mode =
cfg_params->enable_txbf_sap_mode;
pParam->enableVhtFor24GHz = cfg_params->enableVhtFor24GHz;
@@ -3188,6 +3266,8 @@ QDF_STATUS csr_get_config_param(tpAniSirGlobal pMac, tCsrConfigParam *pParam)
&pMac->roam.configParam.bss_score_params,
sizeof(struct sir_score_config));
+ csr_get_11k_offload_config_param(&pMac->roam.configParam, pParam);
+
return QDF_STATUS_SUCCESS;
}
@@ -3757,8 +3837,6 @@ QDF_STATUS csr_roam_call_callback(tpAniSirGlobal pMac, uint32_t sessionId,
return QDF_STATUS_E_FAILURE;
}
- sme_debug("Received RoamCmdStatus %d with Roam Result %d", u1, u2);
-
if (eCSR_ROAM_ASSOCIATION_COMPLETION == u1 &&
eCSR_ROAM_RESULT_ASSOCIATED == u2 && pRoamInfo) {
sme_debug("Assoc complete result: %d status: %d reason: %d",
@@ -6295,11 +6373,11 @@ static QDF_STATUS csr_roam_save_params(tpAniSirGlobal mac_ctx,
/*
* Calculate the actual length
* version + gp_cipher_suite + pwise_cipher_suite_count
- * + akm_suite_count + reserved + pwise_cipher_suites
+ * + akm_suite_cnt + reserved + pwise_cipher_suites
*/
nIeLen = 8 + 2 + 2
+ (rsnie->pwise_cipher_suite_count * 4)
- + (rsnie->akm_suite_count * 4);
+ + (rsnie->akm_suite_cnt * 4);
if (rsnie->pmkid_count)
/* pmkid */
nIeLen += 2 + rsnie->pmkid_count * 4;
@@ -6311,7 +6389,7 @@ static QDF_STATUS csr_roam_save_params(tpAniSirGlobal mac_ctx,
session_ptr->pWpaRsnRspIE[0] = DOT11F_EID_RSN;
session_ptr->pWpaRsnRspIE[1] = (uint8_t) nIeLen;
- /* copy upto akm_suites */
+ /* copy upto akm_suite */
pIeBuf = session_ptr->pWpaRsnRspIE + 2;
qdf_mem_copy(pIeBuf, &rsnie->version,
sizeof(rsnie->version));
@@ -6328,17 +6406,17 @@ static QDF_STATUS csr_roam_save_params(tpAniSirGlobal mac_ctx,
rsnie->pwise_cipher_suite_count * 4);
pIeBuf += rsnie->pwise_cipher_suite_count * 4;
}
- qdf_mem_copy(pIeBuf, &rsnie->akm_suite_count, 2);
+ qdf_mem_copy(pIeBuf, &rsnie->akm_suite_cnt, 2);
pIeBuf += 2;
- if (rsnie->akm_suite_count) {
- /* copy akm_suites */
- qdf_mem_copy(pIeBuf, rsnie->akm_suites,
- rsnie->akm_suite_count * 4);
- pIeBuf += rsnie->akm_suite_count * 4;
+ if (rsnie->akm_suite_cnt) {
+ /* copy akm_suite */
+ qdf_mem_copy(pIeBuf, rsnie->akm_suite,
+ rsnie->akm_suite_cnt * 4);
+ pIeBuf += rsnie->akm_suite_cnt * 4;
}
/* copy the rest */
- qdf_mem_copy(pIeBuf, rsnie->akm_suites +
- rsnie->akm_suite_count * 4,
+ qdf_mem_copy(pIeBuf, rsnie->akm_suite +
+ rsnie->akm_suite_cnt * 4,
2 + rsnie->pmkid_count * 4);
session_ptr->nWpaRsnRspIeLength = nIeLen + 2;
}
@@ -6858,6 +6936,7 @@ static void csr_roam_process_start_bss_success(tpAniSirGlobal mac_ctx,
QDF_STATUS status;
host_log_ibss_pkt_type *ibss_log;
uint32_t bi;
+ eCsrEncryptionType encr_type;
#ifdef FEATURE_WLAN_MCC_TO_SCC_SWITCH
tSirSmeHTProfile *src_profile = NULL;
tCsrRoamHTProfile *dst_profile = NULL;
@@ -6962,26 +7041,36 @@ static void csr_roam_process_start_bss_success(tpAniSirGlobal mac_ctx,
}
#endif
/*
- * Only set context for non-WDS_STA. We don't even need it for
- * WDS_AP. But since the encryption.
- * is WPA2-PSK so it won't matter.
+ * For static key like WEP or for NO security case, send
+ * set context request to lim to establish the broadcast
+ * sta context. This is treated as dummy key installation.
+ *
+ * This is not required for SAP.
*/
- if (CSR_IS_ENC_TYPE_STATIC(profile->negotiatedUCEncryptionType)
- && session->pCurRoamProfile
- && !CSR_IS_INFRA_AP(session->pCurRoamProfile)) {
+ encr_type = profile->negotiatedUCEncryptionType;
+ if (CSR_IS_ENC_TYPE_STATIC(encr_type) && session->pCurRoamProfile
+ && !CSR_IS_INFRA_AP(session->pCurRoamProfile)
+ && !CSR_IS_IBSS(session->pCurRoamProfile)) {
/*
- * Issue the set Context request to LIM to establish
- * the Broadcast STA context for the Ibss. In Rome IBSS
- * case, dummy key installation will break proper BSS
- * key installation, so skip it.
+ * In Rome IBSS case, dummy key installation will break
+ * proper BSS key installation, so skip it.
*/
- if (!CSR_IS_IBSS(session->pCurRoamProfile)) {
- /* NO keys. these key parameters don't matter */
- csr_roam_issue_set_context_req(mac_ctx,
- session_id,
- profile->negotiatedMCEncryptionType,
- bss_desc, &bcast_mac, false,
- false, eSIR_TX_RX, 0, 0, NULL, 0);
+ csr_roam_issue_set_context_req(mac_ctx,
+ session_id,
+ profile->negotiatedMCEncryptionType,
+ bss_desc, &bcast_mac, false,
+ false, eSIR_TX_RX, 0, 0, NULL, 0);
+ }
+ if (session->pCurRoamProfile && CSR_IS_IBSS(session->pCurRoamProfile)) {
+ switch (encr_type) {
+ case eCSR_ENCRYPT_TYPE_WEP40_STATICKEY:
+ case eCSR_ENCRYPT_TYPE_WEP104_STATICKEY:
+ case eCSR_ENCRYPT_TYPE_TKIP:
+ case eCSR_ENCRYPT_TYPE_AES:
+ roam_info.fAuthRequired = true;
+ break;
+ default:
+ break;
}
}
/*
@@ -8011,6 +8100,8 @@ QDF_STATUS csr_roam_copy_profile(tpAniSirGlobal pMac,
}
pDstProfile->chan_switch_hostapd_rate_enabled =
pSrcProfile->chan_switch_hostapd_rate_enabled;
+
+ pDstProfile->force_rsne_override = pSrcProfile->force_rsne_override;
end:
if (!QDF_IS_STATUS_SUCCESS(status)) {
csr_release_profile(pMac, pDstProfile);
@@ -10240,6 +10331,9 @@ void csr_roaming_state_msg_processor(tpAniSirGlobal pMac, void *pMsgBuf)
sme_err("pGetRssiReq->rssiCallback is NULL");
}
break;
+ case eWNI_SME_SETCONTEXT_RSP:
+ csr_roam_check_for_link_status_change(pMac, pSmeRsp);
+ break;
default:
sme_debug("Unexpected message type: %d[0x%X] received in substate %s",
pSmeRsp->messageType, pSmeRsp->messageType,
@@ -10966,9 +11060,33 @@ csr_roam_prepare_filter_from_profile(tpAniSirGlobal mac_ctx,
goto free_filter;
}
scan_fltr->uapsd_mask = profile->uapsd_mask;
- scan_fltr->authType = profile->AuthType;
- scan_fltr->EncryptionType = profile->EncryptionType;
- scan_fltr->mcEncryptionType = profile->mcEncryptionType;
+ if (profile->force_rsne_override) {
+ sme_debug("force_rsne_override enabled fill all auth type and enctype");
+
+ scan_fltr->authType.numEntries = eCSR_NUM_OF_SUPPORT_AUTH_TYPE;
+ for (i = 0; i < scan_fltr->authType.numEntries; i++)
+ scan_fltr->authType.authType[i] = i;
+
+ idx = 0;
+ for (i = 0; i < eCSR_NUM_OF_ENCRYPT_TYPE; i++) {
+ if (i == eCSR_ENCRYPT_TYPE_TKIP ||
+ i == eCSR_ENCRYPT_TYPE_AES ||
+ i == eCSR_ENCRYPT_TYPE_AES_GCMP ||
+ i == eCSR_ENCRYPT_TYPE_AES_GCMP_256) {
+ scan_fltr->
+ EncryptionType.encryptionType[idx] = i;
+ scan_fltr->
+ mcEncryptionType.encryptionType[idx] = i;
+ idx++;
+ }
+ }
+ scan_fltr->EncryptionType.numEntries = idx;
+ scan_fltr->mcEncryptionType.numEntries = idx;
+ } else {
+ scan_fltr->authType = profile->AuthType;
+ scan_fltr->EncryptionType = profile->EncryptionType;
+ scan_fltr->mcEncryptionType = profile->mcEncryptionType;
+ }
scan_fltr->BSSType = profile->BSSType;
scan_fltr->phyMode = profile->phyMode;
#ifdef FEATURE_WLAN_WAPI
@@ -15028,6 +15146,8 @@ QDF_STATUS csr_send_join_req_msg(tpAniSirGlobal pMac, uint32_t sessionId,
csr_retrieve_rsn_ie(pMac, sessionId, pProfile,
pBssDescription, pIes,
(tCsrRSNIe *) (wpaRsnIE));
+ csr_join_req->force_rsne_override =
+ pProfile->force_rsne_override;
}
#ifdef FEATURE_WLAN_WAPI
else if (csr_is_profile_wapi(pProfile)) {
@@ -18668,6 +18788,143 @@ csr_create_roam_scan_offload_request(tpAniSirGlobal mac_ctx,
#endif
return req_buf;
}
+
+/**
+ * csr_update_11k_offload_params - Update 11K offload params
+ * @mac_ctx: MAC context
+ * @session: Pointer to the CSR Roam Session
+ * @req_buffer: Pointer to the RSO Request buffer
+ *
+ * API to update 11k offload params to Roam Scan Offload request buffer
+ *
+ * Return: none
+ */
+static void csr_update_11k_offload_params(tpAniSirGlobal mac_ctx,
+ tCsrRoamSession *session,
+ tSirRoamOffloadScanReq *req_buffer)
+{
+ struct wmi_11k_offload_params *params = &req_buffer->offload_11k_params;
+ tCsrConfig *csr_config = &mac_ctx->roam.configParam;
+ struct csr_neighbor_report_offload_params *neighbor_report_offload =
+ &csr_config->neighbor_report_offload;
+
+ params->vdev_id = session->sessionId;
+ params->offload_11k_bitmask = csr_config->offload_11k_enable_bitmask;
+
+ /*
+ * If none of the parameters are enabled, then set the
+ * offload_11k_bitmask to 0, so that we don't send the command
+ * to the FW and drop it in WMA
+ */
+ if ((neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_ALL) == 0) {
+ sme_err("No valid neighbor report offload params %x",
+ neighbor_report_offload->params_bitmask);
+ params->offload_11k_bitmask = 0;
+ }
+
+ /*
+ * First initialize all params to NEIGHBOR_REPORT_PARAM_INVALID
+ * Then set the values that are enabled
+ */
+ params->neighbor_report_params.time_offset =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+ params->neighbor_report_params.low_rssi_offset =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+ params->neighbor_report_params.bmiss_count_trigger =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+ params->neighbor_report_params.per_threshold_offset =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+ params->neighbor_report_params.neighbor_report_cache_timeout =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+ params->neighbor_report_params.max_neighbor_report_req_cap =
+ NEIGHBOR_REPORT_PARAM_INVALID;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_TIME_OFFSET)
+ params->neighbor_report_params.time_offset =
+ neighbor_report_offload->time_offset;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_LOW_RSSI_OFFSET)
+ params->neighbor_report_params.low_rssi_offset =
+ neighbor_report_offload->low_rssi_offset;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_BMISS_COUNT_TRIGGER)
+ params->neighbor_report_params.bmiss_count_trigger =
+ neighbor_report_offload->bmiss_count_trigger;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_PER_THRESHOLD_OFFSET)
+ params->neighbor_report_params.per_threshold_offset =
+ neighbor_report_offload->per_threshold_offset;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_CACHE_TIMEOUT)
+ params->neighbor_report_params.neighbor_report_cache_timeout =
+ neighbor_report_offload->neighbor_report_cache_timeout;
+
+ if (neighbor_report_offload->params_bitmask &
+ NEIGHBOR_REPORT_PARAMS_MAX_REQ_CAP)
+ params->neighbor_report_params.max_neighbor_report_req_cap =
+ neighbor_report_offload->max_neighbor_report_req_cap;
+
+ params->neighbor_report_params.ssid.length =
+ session->connectedProfile.SSID.length;
+ qdf_mem_copy(params->neighbor_report_params.ssid.mac_ssid,
+ session->connectedProfile.SSID.ssId,
+ session->connectedProfile.SSID.length);
+
+ sme_debug("Updated 11k offload params to RSO");
+}
+
+QDF_STATUS csr_invoke_neighbor_report_request(uint8_t session_id,
+ struct sRrmNeighborReq *neighbor_report_req,
+ bool send_resp_to_host)
+{
+ struct wmi_invoke_neighbor_report_params *invoke_params;
+ cds_msg_t msg = {0};
+
+ if (!neighbor_report_req) {
+ sme_err("Invalid params");
+ return QDF_STATUS_E_INVAL;
+ }
+
+ invoke_params = qdf_mem_malloc(sizeof(*invoke_params));
+ if (!invoke_params) {
+ sme_err("Memory allocation failure");
+ return QDF_STATUS_E_NOMEM;
+ }
+
+ invoke_params->vdev_id = session_id;
+ invoke_params->send_resp_to_host = send_resp_to_host;
+
+ if (!neighbor_report_req->no_ssid) {
+ invoke_params->ssid.length = neighbor_report_req->ssid.length;
+ qdf_mem_copy(invoke_params->ssid.mac_ssid,
+ neighbor_report_req->ssid.ssId,
+ neighbor_report_req->ssid.length);
+ } else {
+ invoke_params->ssid.length = 0;
+ }
+
+ sme_debug("Sending SIR_HAL_INVOKE_NEIGHBOR_REPORT");
+
+ msg.type = SIR_HAL_INVOKE_NEIGHBOR_REPORT;
+ msg.reserved = 0;
+ msg.bodyptr = invoke_params;
+
+ if (QDF_STATUS_SUCCESS !=
+ cds_mq_post_message(QDF_MODULE_ID_WMA, &msg)) {
+ sme_err("Not able to post message to WMA");
+ qdf_mem_free(invoke_params);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ return QDF_STATUS_SUCCESS;
+}
+
/**
* check_allowed_ssid_list() - Check the WhiteList
* @req_buffer: Buffer which contains the connected profile SSID.
@@ -19580,6 +19837,9 @@ csr_roam_offload_scan(tpAniSirGlobal mac_ctx, uint8_t session_id,
csr_update_driver_assoc_ies(mac_ctx, session, req_buf);
csr_update_fils_params_rso(mac_ctx, session, req_buf);
csr_update_score_params(mac_ctx, req_buf);
+ if (reason == REASON_CTX_INIT)
+ csr_update_11k_offload_params(mac_ctx, session,
+ req_buf);
}
QDF_TRACE(QDF_MODULE_ID_SME, QDF_TRACE_LEVEL_DEBUG,
@@ -21372,6 +21632,13 @@ static QDF_STATUS csr_process_roam_sync_callback(tpAniSirGlobal mac_ctx,
ROAMING_OFFLOAD_TIMER_START);
csr_roam_call_callback(mac_ctx, session_id, NULL, 0,
eCSR_ROAM_START, eCSR_ROAM_RESULT_SUCCESS);
+ /*
+ * Inform HDD about roam start using above callback
+ * which will take care of blocking incoming scan
+ * requests during roaming and then call the below
+ * API to cancel all the active scans.
+ */
+ csr_scan_abort_mac_scan_not_for_connect(mac_ctx, session_id);
return status;
case SIR_ROAMING_ABORT:
csr_roam_roaming_offload_timer_action(mac_ctx,
@@ -21384,10 +21651,6 @@ static QDF_STATUS csr_process_roam_sync_callback(tpAniSirGlobal mac_ctx,
eCSR_ROAM_NAPI_OFF, eCSR_ROAM_RESULT_SUCCESS);
return status;
case SIR_ROAMING_INVOKE_FAIL:
- csr_roam_call_callback(mac_ctx, session_id, NULL, 0,
- eCSR_ROAM_ASSOCIATION_FAILURE,
- eCSR_ROAM_RESULT_INVOKE_FAILED);
-
/* Userspace roam request failed, disconnect with current AP */
sme_debug("LFR3: roam invoke from user-space fail, dis cur AP");
csr_roam_disconnect(mac_ctx, session_id,
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
index f8eb087765a8..990b35a055b1 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_api_scan.c
@@ -1923,7 +1923,7 @@ static int32_t csr_calculate_pcl_score(tpAniSirGlobal mac_ctx,
{
int32_t pcl_score = 0;
int32_t score = 0;
- int32_t temp_pcl_chan_weight = 0;
+ uint64_t temp_pcl_chan_weight = 0;
if (pcl_chan_weight) {
temp_pcl_chan_weight =
@@ -5108,8 +5108,6 @@ csr_scan_remove_dup_bss_description_from_interim_list(tpAniSirGlobal mac_ctx,
* in, then these must be duplicate scan results for this Bss. In that
* case, remove the 'old' Bss description from the linked list.
*/
- sme_debug("for BSS " MAC_ADDRESS_STR " ",
- MAC_ADDR_ARRAY(bss_dscp->bssId));
csr_ll_lock(&mac_ctx->scan.tempScanResults);
pEntry = csr_ll_peek_head(&mac_ctx->scan.tempScanResults,
LL_ACCESS_NOLOCK);
@@ -5217,19 +5215,12 @@ static struct tag_csrscan_result *csr_scan_save_bss_description_to_interim_list(
cbAllocated = sizeof(struct tag_csrscan_result) + cbBSSDesc;
- sme_debug("new BSS description, length %d, cbBSSDesc %d",
- cbAllocated, cbBSSDesc);
pCsrBssDescription = qdf_mem_malloc(cbAllocated);
if (NULL != pCsrBssDescription) {
qdf_mem_copy(&pCsrBssDescription->Result.BssDescriptor,
pBSSDescription, cbBSSDesc);
pCsrBssDescription->AgingCount =
(int32_t) pMac->roam.configParam.agingCount;
- sme_debug(
- "Set Aging Count = %d for BSS " MAC_ADDRESS_STR " ",
- pCsrBssDescription->AgingCount,
- MAC_ADDR_ARRAY(pCsrBssDescription->Result.BssDescriptor.
- bssId));
/* Save SSID separately for later use */
if (pIes->SSID.present
&& !csr_is_nullssid(pIes->SSID.ssid, pIes->SSID.num_ssid)) {
@@ -5491,7 +5482,6 @@ QDF_STATUS csr_scan_process_single_bssdescr(tpAniSirGlobal mac_ctx,
bool is_hiddenap_probersp_entry_present = false;
session_id = csr_scan_get_session_id(mac_ctx);
- sme_debug("CSR: Processing single bssdescr");
if (QDF_IS_STATUS_SUCCESS(
csr_get_cfg_valid_channels(mac_ctx,
(uint8_t *) mac_ctx->roam.validChannelList,
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
index d43fa9765066..8dd6bda7ef6c 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_neighbor_roam.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1041,9 +1041,9 @@ static void csr_neighbor_roam_info_ctx_init(
qdf_mem_free(pMac->roam.pReassocResp);
pMac->roam.pReassocResp = NULL;
}
- } else {
+ } else
#endif
-
+ {
csr_roam_offload_scan(pMac, session_id,
ROAM_SCAN_OFFLOAD_START,
REASON_CTX_INIT);
@@ -1055,9 +1055,7 @@ static void csr_neighbor_roam_info_ctx_init(
ROAM_SCAN_OFFLOAD_STOP,
REASON_SUPPLICANT_DISABLED_ROAMING);
}
-#ifdef WLAN_FEATURE_ROAM_OFFLOAD
}
-#endif
}
}
@@ -1140,7 +1138,6 @@ QDF_STATUS csr_neighbor_roam_indicate_connect(
csr_neighbor_roam_reset_init_state_control_info(pMac,
session_id);
csr_neighbor_roam_info_ctx_init(pMac, session_id);
-
return status;
}
#endif
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c
index 14ef6a1ff31e..4cb9eb474836 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/csr/csr_util.c
@@ -2871,10 +2871,11 @@ static bool csr_get_rsn_information(tHalHandle hal, tCsrAuthList *auth_type,
CSR_RSN_OUI_SIZE);
c_ucast_cipher =
(uint8_t) (rsn_ie->pwise_cipher_suite_count);
- c_auth_suites = (uint8_t) (rsn_ie->akm_suite_count);
+
+ c_auth_suites = (uint8_t) (rsn_ie->akm_suite_cnt);
for (i = 0; i < c_auth_suites && i < CSR_RSN_MAX_AUTH_SUITES; i++) {
qdf_mem_copy((void *)&authsuites[i],
- (void *)&rsn_ie->akm_suites[i], CSR_RSN_OUI_SIZE);
+ (void *)&rsn_ie->akm_suite[i], CSR_RSN_OUI_SIZE);
}
/* Check - Is requested unicast Cipher supported by the BSS. */
@@ -3325,6 +3326,7 @@ uint8_t csr_construct_rsn_ie(tHalHandle hHal, uint32_t sessionId,
tSirBssDescription *pSirBssDesc,
tDot11fBeaconIEs *pIes, tCsrRSNIe *pRSNIe)
{
+ uint32_t ret;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
bool fRSNMatch;
uint8_t cbRSNIe = 0;
@@ -3340,6 +3342,7 @@ uint8_t csr_construct_rsn_ie(tHalHandle hHal, uint32_t sessionId,
#endif
tDot11fBeaconIEs *pIesLocal = pIes;
eCsrAuthType negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
+ tDot11fIERSN rsn_ie = {0};
qdf_mem_zero(&pmkid_cache, sizeof(pmkid_cache));
do {
@@ -3353,6 +3356,25 @@ uint8_t csr_construct_rsn_ie(tHalHandle hHal, uint32_t sessionId,
(pMac, pSirBssDesc, &pIesLocal)))) {
break;
}
+
+ /*
+ * Use intersection of the RSN cap sent by user space and
+ * the AP, so that only common capability are enabled.
+ */
+ if (pProfile->pRSNReqIE && pProfile->nRSNReqIELength) {
+ ret = dot11f_unpack_ie_rsn(pMac,
+ pProfile->pRSNReqIE + 2,
+ pProfile->nRSNReqIELength -2,
+ &rsn_ie, false);
+ if (DOT11F_SUCCEEDED(ret)) {
+ pIesLocal->RSN.RSN_Cap[0] =
+ pIesLocal->RSN.RSN_Cap[0] &
+ rsn_ie.RSN_Cap[0];
+ pIesLocal->RSN.RSN_Cap[1] =
+ pIesLocal->RSN.RSN_Cap[1] &
+ rsn_ie.RSN_Cap[1];
+ }
+ }
/* See if the cyphers in the Bss description match with the
* settings in the profile.
*/
@@ -3385,14 +3407,12 @@ uint8_t csr_construct_rsn_ie(tHalHandle hHal, uint32_t sessionId,
qdf_mem_copy(&pAuthSuite->AuthOui[0], AuthSuite,
sizeof(AuthSuite));
- /* RSN capabilities follows the Auth Suite (two octects)
- * !!REVIEW - What should STA put in RSN capabilities, currently
- * just putting back APs capabilities For one, we shouldn't
- * EVER be sending out "pre-auth supported". It is an AP only
- * capability For another, we should use the Management Frame
- * Protection values given by the supplicant
- */
+ /* PreAuthSupported is an AP only capability */
RSNCapabilities.PreAuthSupported = 0;
+ /*
+ * Use the Management Frame Protection values given by the
+ * supplicant, if AP and STA both are MFP capable.
+ */
#ifdef WLAN_FEATURE_11W
if (RSNCapabilities.MFPCapable && pProfile->MFPCapable) {
RSNCapabilities.MFPCapable = pProfile->MFPCapable;
@@ -4078,6 +4098,22 @@ uint8_t csr_retrieve_rsn_ie(tHalHandle hHal, uint32_t sessionId,
do {
if (!csr_is_profile_rsn(pProfile))
break;
+ /* copy RSNIE from user as it is if test mode is enabled */
+ if (pProfile->force_rsne_override &&
+ pProfile->nRSNReqIELength && pProfile->pRSNReqIE) {
+ sme_debug("force_rsne_override, copy RSN IE provided by user");
+ if (pProfile->nRSNReqIELength <=
+ DOT11F_IE_RSN_MAX_LEN) {
+ cbRsnIe = (uint8_t) pProfile->nRSNReqIELength;
+ qdf_mem_copy(pRsnIe, pProfile->pRSNReqIE,
+ cbRsnIe);
+ } else {
+ sme_warn("csr_retrieve_rsn_ie detect invalid RSN IE length (%d)",
+ pProfile->nRSNReqIELength);
+ }
+ break;
+ }
+
if (csr_roam_is_fast_roam_enabled(pMac, sessionId)) {
/* If "Legacy Fast Roaming" is enabled ALWAYS rebuild
* the RSN IE from scratch. So it contains the current
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c b/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c
index 86f14d897dc8..d9dd9ac9c2cc 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/qos/sme_qos.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -4940,8 +4940,12 @@ static QDF_STATUS sme_qos_process_handoff_assoc_req_ev(tpAniSirGlobal pMac,
if (csr_roam_is11r_assoc(pMac, sessionId))
pSession->ftHandoffInProgress = true;
#endif
- /* If FT handoff is in progress, legacy handoff need not be enabled */
- if (!pSession->ftHandoffInProgress)
+ /* If FT handoff/ESE in progress, legacy handoff need not be enabled */
+ if (!pSession->ftHandoffInProgress
+#ifdef FEATURE_WLAN_ESE
+ && !csr_roam_is_ese_assoc(pMac, sessionId)
+#endif
+ )
pSession->handoffRequested = true;
/* this session no longer needs UAPSD */
diff --git a/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c b/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c
index cf59e8107e53..38b756042902 100644
--- a/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c
+++ b/drivers/staging/qcacld-3.0/core/sme/src/rrm/sme_rrm.c
@@ -642,14 +642,10 @@ static QDF_STATUS sme_rrm_scan_request_callback(tHalHandle halHandle,
*/
static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx)
{
- /* Issue scan request. */
- tCsrScanRequest scan_req;
QDF_STATUS status = QDF_STATUS_SUCCESS;
tpRrmSMEContext sme_rrm_ctx = &mac_ctx->rrm.rrmSmeContext;
- uint32_t session_id, scan_req_id;
- uint32_t max_chan_time;
+ uint32_t session_id;
tSirScanType scan_type;
- uint64_t current_time;
status = csr_roam_get_session_id_from_bssid(mac_ctx,
&sme_rrm_ctx->sessionBssId, &session_id);
@@ -674,6 +670,11 @@ static QDF_STATUS sme_rrm_issue_scan_req(tpAniSirGlobal mac_ctx)
if ((eSIR_ACTIVE_SCAN == scan_type) ||
(eSIR_PASSIVE_SCAN == scan_type)) {
+ tCsrScanRequest scan_req;
+ uint32_t scan_req_id;
+ uint32_t max_chan_time;
+ uint64_t current_time;
+
qdf_mem_zero(&scan_req, sizeof(scan_req));
/* set scan_type, active or passive */
scan_req.bcnRptReqScan = true;
diff --git a/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_main.c b/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_main.c
index 14df032dfe0e..76562d7d8b79 100644
--- a/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_main.c
+++ b/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -50,6 +50,7 @@
#include "bmi.h"
#include "ol_fw.h"
#include "ol_if_athvar.h"
+#include "wma_api.h"
#include "hif.h"
#include "epping_main.h"
#include "epping_internal.h"
@@ -140,6 +141,7 @@ void epping_disable(void)
"%s: error: htc_handle = NULL", __func__);
return;
}
+ wma_wmi_stop();
htc_stop(htc_handle);
epping_cookie_cleanup(pEpping_ctx);
htc_destroy(htc_handle);
@@ -338,6 +340,7 @@ int epping_enable(struct device *parent_dev)
if (ret < 0 || pEpping_ctx->epping_adapter == NULL) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: epping_add_adaptererror error", __func__);
+ wma_wmi_stop();
htc_stop(pEpping_ctx->HTCHandle);
epping_cookie_cleanup(pEpping_ctx);
goto error_end;
diff --git a/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_txrx.c b/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_txrx.c
index 659115543a46..e01ef23920ff 100644
--- a/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_txrx.c
+++ b/drivers/staging/qcacld-3.0/core/utils/epping/src/epping_txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -144,7 +144,8 @@ end:
}
-static int epping_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static netdev_tx_t epping_hard_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
epping_adapter_t *pAdapter;
int ret = 0;
@@ -153,12 +154,13 @@ static int epping_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (NULL == pAdapter) {
EPPING_LOG(QDF_TRACE_LEVEL_FATAL,
"%s: EPPING adapter context is Null", __func__);
+ kfree_skb(skb);
ret = -ENODEV;
goto end;
}
ret = epping_tx_send(skb, pAdapter);
end:
- return ret;
+ return NETDEV_TX_OK;
}
static struct net_device_stats *epping_get_stats(struct net_device *dev)
diff --git a/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c b/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c
index df724889af4a..d7bc16c7b709 100644
--- a/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c
+++ b/drivers/staging/qcacld-3.0/core/utils/fwlog/dbglog_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1480,7 +1480,7 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length)
char parseArgsString[DBGLOG_PARSE_ARGS_STRING_LENGTH];
char *dbgidString;
- while (count < length) {
+ while ((count + 1) < length) {
debugid = DBGLOG_GET_DBGID(buffer[count + 1]);
moduleid = DBGLOG_GET_MODULEID(buffer[count + 1]);
@@ -1493,6 +1493,9 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length)
OS_MEMZERO(parseArgsString, sizeof(parseArgsString));
totalWriteLen = 0;
+ if (!numargs || (count + numargs + 2 > length))
+ goto skip_args_processing;
+
for (curArgs = 0; curArgs < numargs; curArgs++) {
/*
* Using sprintf_s instead of sprintf,
@@ -1505,7 +1508,7 @@ static int dbglog_print_raw_data(A_UINT32 *buffer, A_UINT32 length)
buffer[count + 2 + curArgs]);
totalWriteLen += writeLen;
}
-
+skip_args_processing:
if (debugid < MAX_DBG_MSGS) {
dbgidString = DBG_MSG_ARR[moduleid][debugid];
if (dbgidString != NULL) {
@@ -1997,6 +2000,11 @@ int dbglog_parse_debug_logs(ol_scn_t scn, uint8_t *data, uint32_t datalen)
len = param_buf->num_bufp;
}
+ if (len < sizeof(dropped)) {
+ AR_DEBUG_PRINTF(ATH_DEBUG_ERR, ("Invalid length\n"));
+ return A_ERROR;
+ }
+
dropped = *((A_UINT32 *) datap);
if (dropped > 0) {
AR_DEBUG_PRINTF(ATH_DEBUG_TRC,
diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h b/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h
index 7ffc9582dbd2..e02af09e463d 100644
--- a/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h
+++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/include/pktlog_ac.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -142,7 +142,7 @@ int pktlog_disable(struct hif_opaque_softc *scn);
int pktlogmod_init(void *context);
void pktlogmod_exit(void *context);
int pktlog_htc_attach(void);
-void pktlog_process_fw_msg(uint32_t *msg_word);
+void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len);
#define ol_pktlog_attach(_scn) \
do { \
@@ -192,7 +192,7 @@ static inline int pktlog_htc_attach(void)
{
return 0;
}
-static inline void pktlog_process_fw_msg(uint32_t *msg_word)
+static inline void pktlog_process_fw_msg(uint32_t *msg_word, uint32_t msg_len)
{ }
#endif /* REMOVE_PKT_LOG */
#endif /* _PKTLOG_AC_H_ */
diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/linux_ac.c b/drivers/staging/qcacld-3.0/core/utils/pktlog/linux_ac.c
index 589c06129f8b..05cd59d735be 100644
--- a/drivers/staging/qcacld-3.0/core/utils/pktlog/linux_ac.c
+++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/linux_ac.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -78,6 +78,8 @@ static struct ath_pktlog_info *g_pktlog_info;
static struct proc_dir_entry *g_pktlog_pde;
+static DEFINE_MUTEX(proc_mutex);
+
static int pktlog_attach(struct hif_opaque_softc *sc);
static void pktlog_detach(struct hif_opaque_softc *sc);
static int pktlog_open(struct inode *i, struct file *f);
@@ -230,9 +232,11 @@ qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
ol_ath_generic_softc_handle scn;
struct ol_pktlog_dev_t *pl_dev;
+ mutex_lock(&proc_mutex);
scn = (ol_ath_generic_softc_handle) ctl->extra1;
if (!scn) {
+ mutex_unlock(&proc_mutex);
printk("%s: Invalid scn context\n", __func__);
ASSERT(0);
return -EINVAL;
@@ -241,6 +245,7 @@ qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
pl_dev = get_pl_handle((struct hif_opaque_softc *)scn);
if (!pl_dev) {
+ mutex_unlock(&proc_mutex);
printk("%s: Invalid pktlog context\n", __func__);
ASSERT(0);
return -ENODEV;
@@ -271,6 +276,7 @@ qdf_sysctl_decl(ath_sysctl_pktlog_enable, ctl, write, filp, buffer, lenp, ppos)
ctl->data = NULL;
ctl->maxlen = 0;
+ mutex_unlock(&proc_mutex);
return ret;
}
@@ -288,9 +294,11 @@ qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
ol_ath_generic_softc_handle scn;
struct ol_pktlog_dev_t *pl_dev;
+ mutex_lock(&proc_mutex);
scn = (ol_ath_generic_softc_handle) ctl->extra1;
if (!scn) {
+ mutex_unlock(&proc_mutex);
printk("%s: Invalid scn context\n", __func__);
ASSERT(0);
return -EINVAL;
@@ -299,6 +307,7 @@ qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
pl_dev = get_pl_handle((struct hif_opaque_softc *)scn);
if (!pl_dev) {
+ mutex_unlock(&proc_mutex);
printk("%s: Invalid pktlog handle\n", __func__);
ASSERT(0);
return -ENODEV;
@@ -321,6 +330,7 @@ qdf_sysctl_decl(ath_sysctl_pktlog_size, ctl, write, filp, buffer, lenp, ppos)
ctl->data = NULL;
ctl->maxlen = 0;
+ mutex_unlock(&proc_mutex);
return ret;
}
diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c
index acb183c02eb3..2a62ee934efd 100644
--- a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c
+++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_ac.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -710,18 +710,21 @@ int pktlog_clearbuff(struct hif_opaque_softc *scn, bool clear_buff)
*
* Return: None
*/
-void pktlog_process_fw_msg(uint32_t *buff)
+void pktlog_process_fw_msg(uint32_t *buff, uint32_t len)
{
uint32_t *pl_hdr;
uint32_t log_type;
struct ol_txrx_pdev_t *txrx_pdev = cds_get_context(QDF_MODULE_ID_TXRX);
+ struct ol_fw_data pl_fw_data;
if (!txrx_pdev) {
qdf_print("%s: txrx_pdev is NULL", __func__);
return;
}
-
pl_hdr = buff;
+ pl_fw_data.data = pl_hdr;
+ pl_fw_data.len = len;
+
log_type =
(*(pl_hdr + 1) & ATH_PKTLOG_HDR_LOG_TYPE_MASK) >>
ATH_PKTLOG_HDR_LOG_TYPE_SHIFT;
@@ -731,19 +734,19 @@ void pktlog_process_fw_msg(uint32_t *buff)
|| (log_type == PKTLOG_TYPE_TX_FRM_HDR)
|| (log_type == PKTLOG_TYPE_TX_VIRT_ADDR))
wdi_event_handler(WDI_EVENT_TX_STATUS,
- txrx_pdev, pl_hdr);
+ txrx_pdev, &pl_fw_data);
else if (log_type == PKTLOG_TYPE_RC_FIND)
wdi_event_handler(WDI_EVENT_RATE_FIND,
- txrx_pdev, pl_hdr);
+ txrx_pdev, &pl_fw_data);
else if (log_type == PKTLOG_TYPE_RC_UPDATE)
wdi_event_handler(WDI_EVENT_RATE_UPDATE,
- txrx_pdev, pl_hdr);
+ txrx_pdev, &pl_fw_data);
else if (log_type == PKTLOG_TYPE_RX_STAT)
wdi_event_handler(WDI_EVENT_RX_DESC,
- txrx_pdev, pl_hdr);
+ txrx_pdev, &pl_fw_data);
else if (log_type == PKTLOG_TYPE_SW_EVENT)
wdi_event_handler(WDI_EVENT_SW_EVENT,
- txrx_pdev, pl_hdr);
+ txrx_pdev, &pl_fw_data);
}
@@ -771,10 +774,11 @@ static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
struct ol_pktlog_dev_t *pdev = (struct ol_pktlog_dev_t *)context;
qdf_nbuf_t pktlog_t2h_msg = (qdf_nbuf_t) pkt->pPktContext;
uint32_t *msg_word;
+ uint32_t msg_len;
/* check for sanity of the packet, have seen corrupted pkts */
if (pktlog_nbuf_check_sanity(pktlog_t2h_msg)) {
- qdf_print("%s: packet 0x%p corrupted? Leaking...",
+ qdf_print("%s: packet %pK corrupted? Leaking...",
__func__, pktlog_t2h_msg);
/* do not free; may crash! */
QDF_ASSERT(0);
@@ -793,7 +797,8 @@ static void pktlog_t2h_msg_handler(void *context, HTC_PACKET *pkt)
qdf_assert((((unsigned long)qdf_nbuf_data(pktlog_t2h_msg)) & 0x3) == 0);
msg_word = (uint32_t *) qdf_nbuf_data(pktlog_t2h_msg);
- pktlog_process_fw_msg(msg_word);
+ msg_len = qdf_nbuf_len(pktlog_t2h_msg);
+ pktlog_process_fw_msg(msg_word, msg_len);
qdf_nbuf_free(pktlog_t2h_msg);
}
diff --git a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c
index aae7d71c0049..94ef1a09fe34 100644
--- a/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c
+++ b/drivers/staging/qcacld-3.0/core/utils/pktlog/pktlog_internal.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -276,6 +276,13 @@ fill_ieee80211_hdr_data(struct ol_txrx_pdev_t *txrx_pdev,
pl_msdu_info->priv_size = sizeof(uint32_t) *
pl_msdu_info->num_msdu + sizeof(uint32_t);
+ if (pl_msdu_info->num_msdu > MAX_PKT_INFO_MSDU_ID) {
+ QDF_TRACE(QDF_MODULE_ID_TXRX, QDF_TRACE_LEVEL_ERROR,
+ "%s: Invalid num_msdu count",
+ __func__);
+ qdf_assert(0);
+ return;
+ }
for (i = 0; i < pl_msdu_info->num_msdu; i++) {
/*
* Handle big endianness
@@ -356,6 +363,8 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
struct ath_pktlog_hdr pl_hdr;
struct ath_pktlog_info *pl_info;
uint32_t *pl_tgt_hdr;
+ struct ol_fw_data *fw_data;
+ uint32_t len;
if (!txrx_pdev) {
printk("Invalid pdev in %s\n", __func__);
@@ -365,7 +374,19 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
qdf_assert(data);
pl_dev = txrx_pdev->pl_dev;
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
/*
* Makes the short words (16 bits) portable b/w little endian
* and big endian
@@ -390,6 +411,11 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
*(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET);
pl_info = pl_dev->pl_info;
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
+
if (pl_hdr.log_type == PKTLOG_TYPE_TX_CTRL) {
size_t log_size = sizeof(frm_hdr) + pl_hdr.size;
void *txdesc_hdr_ctl = (void *)
@@ -400,7 +426,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
qdf_mem_copy(txdesc_hdr_ctl, &frm_hdr, sizeof(frm_hdr));
qdf_mem_copy((char *)txdesc_hdr_ctl + sizeof(frm_hdr),
- ((void *)data +
+ ((void *)fw_data->data +
sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
pl_hdr.size = log_size;
@@ -417,7 +443,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
log_size, &pl_hdr);
qdf_assert(txstat_log.ds_status);
qdf_mem_copy(txstat_log.ds_status,
- ((void *)data + sizeof(struct ath_pktlog_hdr)),
+ ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
cds_pkt_stats_to_logger_thread(&pl_hdr, NULL,
txstat_log.ds_status);
@@ -436,6 +462,8 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
struct ath_pktlog_hdr pl_hdr;
struct ath_pktlog_info *pl_info;
uint32_t *pl_tgt_hdr;
+ struct ol_fw_data *fw_data;
+ uint32_t len;
if (!txrx_pdev) {
qdf_print("Invalid pdev in %s\n", __func__);
@@ -445,7 +473,19 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
qdf_assert(data);
pl_dev = txrx_pdev->pl_dev;
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
/*
* Makes the short words (16 bits) portable b/w little endian
* and big endian
@@ -467,12 +507,12 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
if (pl_hdr.log_type == PKTLOG_TYPE_TX_FRM_HDR) {
/* Valid only for the TX CTL */
- process_ieee_hdr(data + sizeof(pl_hdr));
+ process_ieee_hdr(fw_data->data + sizeof(pl_hdr));
}
if (pl_hdr.log_type == PKTLOG_TYPE_TX_VIRT_ADDR) {
A_UINT32 desc_id = (A_UINT32)
- *((A_UINT32 *) (data + sizeof(pl_hdr)));
+ *((A_UINT32 *) (fw_data->data + sizeof(pl_hdr)));
A_UINT32 vdev_id = desc_id;
/* if the pkt log msg is for the bcn frame the vdev id
@@ -524,8 +564,13 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
pl_hdr.size = (pl_hdr.size > sizeof(txctl_log.priv.txdesc_ctl))
? sizeof(txctl_log.priv.txdesc_ctl) :
pl_hdr.size;
+
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
qdf_mem_copy((void *)&txctl_log.priv.txdesc_ctl,
- ((void *)data + sizeof(struct ath_pktlog_hdr)),
+ ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
qdf_assert(txctl_log.txdesc_hdr_ctl);
qdf_mem_copy(txctl_log.txdesc_hdr_ctl, &txctl_log.priv,
@@ -544,7 +589,7 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
pktlog_getbuf(pl_dev, pl_info, log_size, &pl_hdr);
qdf_assert(txstat_log.ds_status);
qdf_mem_copy(txstat_log.ds_status,
- ((void *)data + sizeof(struct ath_pktlog_hdr)),
+ ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
cds_pkt_stats_to_logger_thread(&pl_hdr, NULL,
txstat_log.ds_status);
@@ -558,12 +603,12 @@ A_STATUS process_tx_info(struct ol_txrx_pdev_t *txrx_pdev, void *data)
log_size = sizeof(pl_msdu_info.priv);
if (pl_dev->mt_pktlog_enabled == false)
- fill_ieee80211_hdr_data(txrx_pdev, &pl_msdu_info, data);
+ fill_ieee80211_hdr_data(txrx_pdev, &pl_msdu_info, fw_data->data);
pl_msdu_info.ath_msdu_info = pktlog_getbuf(pl_dev, pl_info,
log_size, &pl_hdr);
qdf_mem_copy((void *)&pl_msdu_info.priv.msdu_id_info,
- ((void *)data + sizeof(struct ath_pktlog_hdr)),
+ ((void *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
sizeof(pl_msdu_info.priv.msdu_id_info));
qdf_mem_copy(pl_msdu_info.ath_msdu_info, &pl_msdu_info.priv,
sizeof(pl_msdu_info.priv));
@@ -644,6 +689,8 @@ A_STATUS process_rx_info(void *pdev, void *data)
struct ath_pktlog_hdr pl_hdr;
size_t log_size;
uint32_t *pl_tgt_hdr;
+ struct ol_fw_data *fw_data;
+ uint32_t len;
if (!pdev) {
printk("Invalid pdev in %s", __func__);
@@ -651,7 +698,20 @@ A_STATUS process_rx_info(void *pdev, void *data)
}
pl_dev = ((struct ol_txrx_pdev_t *)pdev)->pl_dev;
pl_info = pl_dev->pl_info;
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
+
pl_hdr.flags = (*(pl_tgt_hdr + ATH_PKTLOG_HDR_FLAGS_OFFSET) &
ATH_PKTLOG_HDR_FLAGS_MASK) >>
ATH_PKTLOG_HDR_FLAGS_SHIFT;
@@ -679,12 +739,17 @@ A_STATUS process_rx_info(void *pdev, void *data)
pl_hdr.type_specific_data =
*(pl_tgt_hdr + ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET);
#endif
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
+
log_size = pl_hdr.size;
rxstat_log.rx_desc = (void *)pktlog_getbuf(pl_dev, pl_info,
log_size, &pl_hdr);
qdf_mem_copy(rxstat_log.rx_desc,
- (void *)data + sizeof(struct ath_pktlog_hdr), pl_hdr.size);
+ (void *)fw_data->data + sizeof(struct ath_pktlog_hdr), pl_hdr.size);
cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rxstat_log.rx_desc);
return A_OK;
@@ -696,6 +761,8 @@ A_STATUS process_rate_find(void *pdev, void *data)
struct ath_pktlog_hdr pl_hdr;
struct ath_pktlog_info *pl_info;
size_t log_size;
+ uint32_t len;
+ struct ol_fw_data *fw_data;
/*
* Will be uncommented when the rate control find
@@ -714,7 +781,19 @@ A_STATUS process_rate_find(void *pdev, void *data)
return A_ERROR;
}
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
/*
* Makes the short words (16 bits) portable b/w little endian
* and big endian
@@ -752,8 +831,12 @@ A_STATUS process_rate_find(void *pdev, void *data)
rcf_log.rcFind = (void *)pktlog_getbuf(pl_dev, pl_info,
log_size, &pl_hdr);
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
qdf_mem_copy(rcf_log.rcFind,
- ((char *)data + sizeof(struct ath_pktlog_hdr)),
+ ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcf_log.rcFind);
@@ -766,6 +849,8 @@ A_STATUS process_sw_event(void *pdev, void *data)
struct ath_pktlog_hdr pl_hdr;
struct ath_pktlog_info *pl_info;
size_t log_size;
+ uint32_t len;
+ struct ol_fw_data *fw_data;
/*
* Will be uncommented when the rate control find
@@ -784,7 +869,19 @@ A_STATUS process_sw_event(void *pdev, void *data)
return A_ERROR;
}
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
/*
* Makes the short words (16 bits) portable b/w little endian
* and big endian
@@ -823,8 +920,12 @@ A_STATUS process_sw_event(void *pdev, void *data)
sw_event.sw_event = (void *)pktlog_getbuf(pl_dev, pl_info,
log_size, &pl_hdr);
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
qdf_mem_copy(sw_event.sw_event,
- ((char *)data + sizeof(struct ath_pktlog_hdr)),
+ ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
return A_OK;
@@ -838,6 +939,8 @@ A_STATUS process_rate_update(void *pdev, void *data)
struct ath_pktlog_info *pl_info;
struct ath_pktlog_rc_update rcu_log;
uint32_t *pl_tgt_hdr;
+ struct ol_fw_data *fw_data;
+ uint32_t len;
if (!pdev) {
printk("Invalid pdev in %s\n", __func__);
@@ -847,7 +950,19 @@ A_STATUS process_rate_update(void *pdev, void *data)
printk("Invalid data in %s\n", __func__);
return A_ERROR;
}
- pl_tgt_hdr = (uint32_t *) data;
+ fw_data = (struct ol_fw_data *)data;
+ len = fw_data->len;
+ pl_tgt_hdr = (uint32_t *) fw_data->data;
+ if (len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_FLAGS_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MISSED_CNT_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_LOG_TYPE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_MAC_ID_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_SIZE_OFFSET + 1)) ||
+ len < (sizeof(uint32_t) * (ATH_PKTLOG_HDR_TYPE_SPECIFIC_DATA_OFFSET + 1))) {
+ qdf_print("Invalid msdu len in %s\n", __func__);
+ qdf_assert(0);
+ return A_ERROR;
+ }
/*
* Makes the short words (16 bits) portable b/w little endian
* and big endian
@@ -890,8 +1005,12 @@ A_STATUS process_rate_update(void *pdev, void *data)
*/
rcu_log.txRateCtrl = (void *)pktlog_getbuf(pl_dev, pl_info,
log_size, &pl_hdr);
+ if (sizeof(struct ath_pktlog_hdr) + pl_hdr.size > len) {
+ qdf_assert(0);
+ return A_ERROR;
+ }
qdf_mem_copy(rcu_log.txRateCtrl,
- ((char *)data + sizeof(struct ath_pktlog_hdr)),
+ ((char *)fw_data->data + sizeof(struct ath_pktlog_hdr)),
pl_hdr.size);
cds_pkt_stats_to_logger_thread(&pl_hdr, NULL, rcu_log.txRateCtrl);
return A_OK;
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma.h
index 24c136443bfa..bf863b06ec63 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma.h
@@ -90,8 +90,6 @@
#define WMA_MAX_MGMT_MPDU_LEN 2000
-#define WMA_MAX_MGMT_MPDU_LEN 2000
-
#define FRAGMENT_SIZE 3072
#define MAX_PRINT_FAILURE_CNT 50
@@ -209,6 +207,19 @@
#define WMA_BEACON_TX_RATE_48_M 480
#define WMA_BEACON_TX_RATE_54_M 540
+#define WMA_FW_MODE_STA_STA_BIT_POS 0
+#define WMA_FW_MODE_STA_P2P_BIT_POS 1
+
+#define WMA_FW_MODE_STA_STA_BIT_MASK (0x1 << WMA_FW_MODE_STA_STA_BIT_POS)
+#define WMA_FW_MODE_STA_P2P_BIT_MASK (0x1 << WMA_FW_MODE_STA_P2P_BIT_POS)
+
+#define WMA_CHANNEL_SELECT_LOGIC_STA_STA_GET(channel_select_logic_conc) \
+ ((channel_select_logic_conc & WMA_FW_MODE_STA_STA_BIT_MASK) >> \
+ WMA_FW_MODE_STA_STA_BIT_POS)
+#define WMA_CHANNEL_SELECT_LOGIC_STA_P2P_GET(channel_select_logic_conc) \
+ ((channel_select_logic_conc & WMA_FW_MODE_STA_P2P_BIT_MASK) >> \
+ WMA_FW_MODE_STA_P2P_BIT_POS)
+
/**
* ds_mode: distribution system mode
* @IEEE80211_NO_DS: NO DS at either side
@@ -1072,6 +1083,7 @@ struct roam_synch_frame_ind {
* @vdev_start_wakelock: wakelock to protect vdev start op with firmware
* @vdev_stop_wakelock: wakelock to protect vdev stop op with firmware
* @vdev_set_key_wakelock: wakelock to protect vdev set key op with firmware
+ * @channel: channel
*/
struct wma_txrx_node {
uint8_t addr[IEEE80211_ADDR_LEN];
@@ -1162,6 +1174,7 @@ struct wma_txrx_node {
qdf_wake_lock_t vdev_set_key_wakelock;
struct roam_synch_frame_ind roam_synch_frame_ind;
bool is_waiting_for_key;
+ uint8_t channel;
};
#if defined(QCA_WIFI_FTM)
@@ -1521,10 +1534,10 @@ struct peer_debug_info {
* It contains global wma module parameters and
* handle of other modules.
* @saved_wmi_init_cmd: Saved WMI INIT command
- * @bpf_packet_filter_enable: BPF filter enabled or not
- * @active_uc_bpf_mode: Setting that determines how BPF is applied in active
+ * @apf_packet_filter_enable: APF filter enabled or not
+ * @active_uc_apf_mode: Setting that determines how APF is applied in active
* mode for uc packets
- * @active_mc_bc_bpf_mode: Setting that determines how BPF is applied in
+ * @active_mc_bc_apf_mode: Setting that determines how APF is applied in
* active mode for MC/BC packets
* @service_ready_ext_evt: Wait event for service ready ext
* @wmi_cmd_rsp_wake_lock: wmi command response wake lock
@@ -1720,15 +1733,16 @@ typedef struct {
enum sir_roam_op_code reason);
QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac,
roam_offload_synch_ind *roam_synch_data,
- tpSirBssDescription bss_desc_ptr);
+ tpSirBssDescription bss_desc_ptr,
+ enum sir_roam_op_code reason);
qdf_wake_lock_t wmi_cmd_rsp_wake_lock;
qdf_runtime_lock_t wmi_cmd_rsp_runtime_lock;
qdf_runtime_lock_t wma_runtime_resume_lock;
uint32_t fine_time_measurement_cap;
- bool bpf_enabled;
- bool bpf_packet_filter_enable;
- enum active_bpf_mode active_uc_bpf_mode;
- enum active_bpf_mode active_mc_bc_bpf_mode;
+ bool apf_enabled;
+ bool apf_packet_filter_enable;
+ enum active_apf_mode active_uc_apf_mode;
+ enum active_apf_mode active_mc_bc_apf_mode;
struct wma_ini_config ini_config;
struct wma_valid_channels saved_chan;
/* NAN datapath support enabled in firmware */
@@ -2444,13 +2458,82 @@ void wma_process_fw_test_cmd(WMA_HANDLE handle,
QDF_STATUS wma_send_ht40_obss_scanind(tp_wma_handle wma,
struct obss_ht40_scanind *req);
-int wma_get_bpf_caps_event_handler(void *handle,
+uint32_t wma_get_num_of_setbits_from_bitmask(uint32_t mask);
+
+/**
+ * wma_get_apf_caps_event_handler() - Event handler for get apf capability
+ * @handle: WMA global handle
+ * @cmd_param_info: command event data
+ * @len: Length of @cmd_param_info
+ *
+ * Return: 0 on Success or Errno on failure
+ */
+int wma_get_apf_caps_event_handler(void *handle,
u_int8_t *cmd_param_info,
u_int32_t len);
-uint32_t wma_get_num_of_setbits_from_bitmask(uint32_t mask);
-QDF_STATUS wma_get_bpf_capabilities(tp_wma_handle wma);
-QDF_STATUS wma_set_bpf_instructions(tp_wma_handle wma,
- struct sir_bpf_set_offload *bpf_set_offload);
+
+/**
+ * wma_get_apf_capabilities - Send get apf capability to firmware
+ * @wma_handle: wma handle
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS wma_get_apf_capabilities(tp_wma_handle wma);
+
+/**
+ * wma_set_apf_instructions - Set apf instructions to firmware
+ * @wma: wma handle
+ * @apf_set_offload: APF offload information to set to firmware
+ *
+ * Return: QDF_STATUS enumeration
+ */
+QDF_STATUS wma_set_apf_instructions(tp_wma_handle wma,
+ struct sir_apf_set_offload *apf_set_offload);
+
+/**
+ * wma_send_apf_enable_cmd - Send apf enable/disable cmd
+ * @wma_handle: wma handle
+ * @vdev_id: vdev id
+ * @apf_enable: true: Enable APF Int., false: Disable APF Int.
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS wma_send_apf_enable_cmd(WMA_HANDLE handle, uint8_t vdev_id,
+ bool apf_enable);
+
+/**
+ * wma_send_apf_write_work_memory_cmd - Command to write into the apf work memory
+ * @wma_handle: wma handle
+ * @write_params: APF parameters for the write operation
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS wma_send_apf_write_work_memory_cmd(WMA_HANDLE handle,
+ struct wmi_apf_write_memory_params *write_params);
+
+/**
+ * wma_send_apf_read_work_memory_cmd - Command to get part of apf work memory
+ * @wma_handle: wma handle
+ * @callback: HDD callback to receive apf get mem event
+ * @context: Context for the HDD callback
+ * @read_params: APF parameters for the get operation
+ *
+ * Return: QDF_STATUS enumeration.
+ */
+QDF_STATUS wma_send_apf_read_work_memory_cmd(WMA_HANDLE handle,
+ struct wmi_apf_read_memory_params *read_params);
+
+/**
+ * wma_apf_read_work_memory_event_handler - Event handler for get apf mem operation
+ * @handle: wma handle
+ * @evt_buf: Buffer pointer to the event
+ * @len: Length of the event buffer
+ *
+ * Return: status.
+ */
+int wma_apf_read_work_memory_event_handler(void *handle, uint8_t *evt_buf,
+ uint32_t len);
+
void wma_process_set_pdev_ie_req(tp_wma_handle wma,
struct set_ie_param *ie_params);
void wma_process_set_pdev_ht_ie_req(tp_wma_handle wma,
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h
index f00378ca0fa2..3cf52cd91a02 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_api.h
@@ -74,6 +74,10 @@ enum GEN_PARAM {
* @vht_5g: entire VHT cap for 5G band in terms of 32 bit flag
* @he_2g: entire HE cap for 2G band in terms of 32 bit flag
* @he_5g: entire HE cap for 5G band in terms of 32 bit flag
+ * @tx_chain_mask_2G: tx chain mask for 2g
+ * @rx_chain_mask_2G: rx chain mask for 2g
+ * @tx_chain_mask_5G: tx chain mask for 5g
+ * @rx_chain_mask_5G: rx chain mask for 5g
*/
struct wma_caps_per_phy {
uint32_t ht_2g;
@@ -82,6 +86,10 @@ struct wma_caps_per_phy {
uint32_t vht_5g;
uint32_t he_2g;
uint32_t he_5g;
+ uint32_t tx_chain_mask_2G;
+ uint32_t rx_chain_mask_2G;
+ uint32_t tx_chain_mask_5G;
+ uint32_t rx_chain_mask_5G;
};
@@ -252,7 +260,8 @@ QDF_STATUS wma_get_updated_fw_mode_config(uint32_t *fw_mode_config,
bool dbs,
bool agile_dfs);
QDF_STATUS wma_get_updated_scan_and_fw_mode_config(uint32_t *scan_config,
- uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini);
+ uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini,
+ uint32_t channel_select_logic_conc);
bool wma_get_dbs_scan_config(void);
bool wma_get_dbs_plus_agile_scan_config(void);
bool wma_get_single_mac_scan_with_dfs_config(void);
@@ -519,4 +528,11 @@ QDF_STATUS wma_crash_inject(WMA_HANDLE wma_handle, uint32_t type,
QDF_STATUS wma_wow_set_wake_time(WMA_HANDLE wma_handle, uint8_t vdev_id,
uint32_t cookie, uint32_t time);
+/**
+ * wma_wmi_stop() - send wmi stop cmd
+ *
+ * Return: None
+ */
+void wma_wmi_stop(void);
+
#endif
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
index f9df5341bd16..fad9038e4306 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_internal.h
@@ -1134,12 +1134,12 @@ QDF_STATUS wma_enable_arp_ns_offload(tp_wma_handle wma,
/**
* wma_conf_hw_filter_mode() - configure hw filter to the given mode
* @wma: wma handle
- * @req: hardware filter request
+ * @req: hardware filter request parameters
*
* Return: QDF_STATUS
*/
QDF_STATUS wma_conf_hw_filter_mode(tp_wma_handle wma,
- struct hw_filter_request *req);
+ struct wmi_hw_filter_req_params *req);
QDF_STATUS wma_process_cesium_enable_ind(tp_wma_handle wma);
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h
index 4ab524d93839..ce0517de8ead 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_tgt_cfg.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -73,6 +73,7 @@ struct wma_tgt_services {
bool get_peer_info_enabled;
bool is_fils_roaming_supported;
bool is_fw_mawc_capable;
+ bool is_11k_offload_supported;
};
/**
@@ -177,7 +178,7 @@ struct wma_tgt_cfg {
bool egap_support;
#endif
uint32_t fine_time_measurement_cap;
- bool bpf_enabled;
+ bool apf_enabled;
#ifdef FEATURE_WLAN_RA_FILTERING
bool is_ra_rate_limit_enabled;
#endif
diff --git a/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h b/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
index 8b8b3ebd8fc5..b2d9aa5742ab 100644
--- a/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
+++ b/drivers/staging/qcacld-3.0/core/wma/inc/wma_types.h
@@ -484,8 +484,8 @@
#define WMA_REMOVE_BCN_FILTER_CMDID SIR_HAL_REMOVE_BCN_FILTER_CMDID
#define WMA_SET_ADAPT_DWELLTIME_CONF_PARAMS SIR_HAL_SET_ADAPT_DWELLTIME_PARAMS
-#define WDA_BPF_GET_CAPABILITIES_REQ SIR_HAL_BPF_GET_CAPABILITIES_REQ
-#define WDA_BPF_SET_INSTRUCTIONS_REQ SIR_HAL_BPF_SET_INSTRUCTIONS_REQ
+#define WDA_APF_GET_CAPABILITIES_REQ SIR_HAL_APF_GET_CAPABILITIES_REQ
+#define WDA_APF_SET_INSTRUCTIONS_REQ SIR_HAL_APF_SET_INSTRUCTIONS_REQ
#define WMA_SET_PDEV_IE_REQ SIR_HAL_SET_PDEV_IE_REQ
#define WMA_UPDATE_WEP_DEFAULT_KEY SIR_HAL_UPDATE_WEP_DEFAULT_KEY
@@ -507,6 +507,7 @@
#define WDA_ACTION_FRAME_RANDOM_MAC SIR_HAL_ACTION_FRAME_RANDOM_MAC
#define WMA_SET_LIMIT_OFF_CHAN SIR_HAL_SET_LIMIT_OFF_CHAN
+#define WMA_INVOKE_NEIGHBOR_REPORT SIR_HAL_INVOKE_NEIGHBOR_REPORT
/* Bit 6 will be used to control BD rate for Management frames */
#define HAL_USE_BD_RATE2_FOR_MANAGEMENT_FRAME 0x40
@@ -793,7 +794,8 @@ QDF_STATUS wma_register_roaming_callbacks(void *cds_ctx,
enum sir_roam_op_code reason),
QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac,
roam_offload_synch_ind *roam_synch_data,
- tpSirBssDescription bss_desc_ptr));
+ tpSirBssDescription bss_desc_ptr,
+ enum sir_roam_op_code reason));
#else
static inline QDF_STATUS wma_register_roaming_callbacks(void *cds_ctx,
QDF_STATUS (*csr_roam_synch_cb)(tpAniSirGlobal mac,
@@ -802,7 +804,8 @@ static inline QDF_STATUS wma_register_roaming_callbacks(void *cds_ctx,
enum sir_roam_op_code reason),
QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac,
roam_offload_synch_ind *roam_synch_data,
- tpSirBssDescription bss_desc_ptr))
+ tpSirBssDescription bss_desc_ptr,
+ enum sir_roam_op_code reason))
{
return QDF_STATUS_E_NOSUPPORT;
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
index 42856d322d5b..e2d5ebbac398 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_data.c
@@ -2630,6 +2630,7 @@ QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
struct wmi_mgmt_params mgmt_param = {0};
struct wmi_desc_t *wmi_desc = NULL;
ol_pdev_handle ctrl_pdev;
+ bool is_5g = false;
if (NULL == wma_handle) {
WMA_LOGE("wma_handle is NULL");
@@ -2962,6 +2963,9 @@ QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
}
}
+ if (CDS_IS_CHANNEL_5GHZ(wma_handle->interfaces[vdev_id].channel))
+ is_5g = true;
+
if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
WMI_SERVICE_MGMT_TX_WMI)) {
mgmt_param.tx_frame = tx_frame;
@@ -2975,7 +2979,7 @@ QDF_STATUS wma_tx_packet(void *wma_context, void *tx_frame, uint16_t frmLen,
* other than 1Mbps and 6 Mbps
*/
if (rid < RATEID_DEFAULT &&
- (rid != RATEID_1MBPS && rid != RATEID_6MBPS)) {
+ (rid != RATEID_1MBPS) && !(rid == RATEID_6MBPS && is_5g)) {
WMA_LOGD(FL("using rate id: %d for Tx"), rid);
mgmt_param.tx_params_valid = true;
wma_update_tx_send_params(&mgmt_param.tx_param, rid);
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
index 67ecf5535c14..990fb2653867 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_dev_if.c
@@ -500,6 +500,7 @@ static QDF_STATUS wma_self_peer_remove(tp_wma_handle wma_handle,
{
ol_txrx_peer_handle peer;
ol_txrx_pdev_handle pdev;
+ QDF_STATUS qdf_status;
uint8_t peer_id;
uint8_t vdev_id = del_sta_self_req_param->session_id;
struct wma_target_req *msg = NULL;
@@ -511,7 +512,8 @@ static QDF_STATUS wma_self_peer_remove(tp_wma_handle wma_handle,
pdev = cds_get_context(QDF_MODULE_ID_TXRX);
if (NULL == pdev) {
WMA_LOGE("%s: Failed to get pdev", __func__);
- return QDF_STATUS_E_FAULT;
+ qdf_status = QDF_STATUS_E_FAULT;
+ goto error;
}
peer = ol_txrx_find_peer_by_addr(pdev,
@@ -520,7 +522,8 @@ static QDF_STATUS wma_self_peer_remove(tp_wma_handle wma_handle,
if (!peer) {
WMA_LOGE("%s Failed to find peer %pM", __func__,
del_sta_self_req_param->self_mac_addr);
- return QDF_STATUS_SUCCESS;
+ qdf_status = QDF_STATUS_E_FAULT;
+ goto error;
}
wma_remove_peer(wma_handle,
del_sta_self_req_param->self_mac_addr,
@@ -532,7 +535,8 @@ static QDF_STATUS wma_self_peer_remove(tp_wma_handle wma_handle,
qdf_mem_malloc(sizeof(struct del_sta_self_rsp_params));
if (sta_self_wmi_rsp == NULL) {
WMA_LOGE(FL("Failed to allocate memory"));
- return QDF_STATUS_E_NOMEM;
+ qdf_status = QDF_STATUS_E_NOMEM;
+ goto error;
}
sta_self_wmi_rsp->self_sta_param = del_sta_self_req_param;
sta_self_wmi_rsp->generate_rsp = generate_vdev_rsp;
@@ -546,10 +550,13 @@ static QDF_STATUS wma_self_peer_remove(tp_wma_handle wma_handle,
vdev_id);
wma_remove_req(wma_handle, vdev_id,
WMA_DEL_P2P_SELF_STA_RSP_START);
- return QDF_STATUS_E_FAILURE;
+ qdf_status = QDF_STATUS_E_FAILURE;
+ goto error;
}
}
return QDF_STATUS_SUCCESS;
+error:
+ return qdf_status;
}
static void
@@ -651,9 +658,9 @@ QDF_STATUS wma_vdev_detach(tp_wma_handle wma_handle,
req_msg = wma_find_vdev_req(wma_handle, vdev_id,
WMA_TARGET_REQ_TYPE_VDEV_STOP, false);
if (!req_msg)
- goto send_fail_rsp;
+ goto send_fail_rsp_and_trigger_recovery;
if (req_msg->msg_type != WMA_DELETE_BSS_REQ)
- goto send_fail_rsp;
+ goto send_fail_rsp_and_trigger_recovery;
WMA_LOGA("BSS is not yet stopped. Defering vdev(vdev id %x) deletion",
vdev_id);
iface->del_staself_req = pdel_sta_self_req_param;
@@ -681,8 +688,27 @@ QDF_STATUS wma_vdev_detach(tp_wma_handle wma_handle,
/* P2P Device */
if ((iface->type == WMI_VDEV_TYPE_AP) &&
(iface->sub_type == WMI_UNIFIED_VDEV_SUBTYPE_P2P_DEVICE)) {
- wma_self_peer_remove(wma_handle, pdel_sta_self_req_param,
- generateRsp);
+ status = wma_self_peer_remove(wma_handle,
+ pdel_sta_self_req_param, generateRsp);
+ if ((status != QDF_STATUS_SUCCESS) && generateRsp) {
+ WMA_LOGE("can't remove selfpeer, send rsp session: %d",
+ vdev_id);
+ if (!cds_is_driver_unloading()) {
+ WMA_LOGE("Trigger recovery for session: %d",
+ vdev_id);
+ goto send_fail_rsp_and_trigger_recovery;
+ } else {
+ WMA_LOGE("driver unload, free mem vdev_id: %d",
+ vdev_id);
+ goto send_fail_rsp;
+ }
+ } else if (status != QDF_STATUS_SUCCESS) {
+ WMA_LOGE("can't remove selfpeer, free msg session: %d",
+ vdev_id);
+ qdf_mem_free(pdel_sta_self_req_param);
+ pdel_sta_self_req_param = NULL;
+ return status;
+ }
if (!WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
WMI_SERVICE_SYNC_DELETE_CMDS))
status = wma_handle_vdev_detach(wma_handle,
@@ -697,7 +723,7 @@ QDF_STATUS wma_vdev_detach(tp_wma_handle wma_handle,
return status;
-send_fail_rsp:
+send_fail_rsp_and_trigger_recovery:
if (!cds_is_driver_recovering()) {
if (cds_is_self_recovery_enabled()) {
WMA_LOGE("rcvd del_self_sta without del_bss, trigger recovery, vdev_id %d",
@@ -710,8 +736,15 @@ send_fail_rsp:
}
}
- pdel_sta_self_req_param->status = QDF_STATUS_E_FAILURE;
- wma_send_del_sta_self_resp(pdel_sta_self_req_param);
+send_fail_rsp:
+ if (generateRsp) {
+ pdel_sta_self_req_param->status = QDF_STATUS_E_FAILURE;
+ wma_send_del_sta_self_resp(pdel_sta_self_req_param);
+ } else {
+ qdf_mem_free(pdel_sta_self_req_param);
+ pdel_sta_self_req_param = NULL;
+ }
+
return status;
}
@@ -1060,6 +1093,7 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info,
if (req_msg->msg_type == WMA_CHNL_SWITCH_REQ) {
tpSwitchChannelParams params =
(tpSwitchChannelParams) req_msg->user_data;
+
if (!params) {
WMA_LOGE("%s: channel switch params is NULL for vdev %d",
__func__, resp_event->vdev_id);
@@ -1090,13 +1124,14 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info,
false;
}
if (((resp_event->resp_type == WMI_VDEV_RESTART_RESP_EVENT) &&
- (iface->type == WMI_VDEV_TYPE_STA)) ||
+ ((iface->type == WMI_VDEV_TYPE_STA) ||
+ (iface->type == WMI_VDEV_TYPE_MONITOR))) ||
((resp_event->resp_type == WMI_VDEV_START_RESP_EVENT) &&
(iface->type == WMI_VDEV_TYPE_MONITOR))) {
+ /* for CSA case firmware expects phymode before ch_wd */
err = wma_set_peer_param(wma, iface->bssid,
WMI_PEER_PHYMODE, iface->chanmode,
resp_event->vdev_id);
-
WMA_LOGD("%s:vdev_id %d chanmode %d status %d",
__func__, resp_event->vdev_id,
iface->chanmode, err);
@@ -1105,34 +1140,26 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info,
err = wma_set_peer_param(wma, iface->bssid,
WMI_PEER_CHWIDTH, chanwidth,
resp_event->vdev_id);
-
WMA_LOGD("%s:vdev_id %d chanwidth %d status %d",
__func__, resp_event->vdev_id,
chanwidth, err);
param.vdev_id = resp_event->vdev_id;
param.assoc_id = iface->aid;
-
- if (iface->vdev_up == true) {
- WMA_LOGD(FL("vdev id %d is already UP for %pM"),
- param.vdev_id, iface->bssid);
- status = QDF_STATUS_SUCCESS;
+ status = wma_send_vdev_up_to_fw(wma, &param,
+ iface->bssid);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ WMA_LOGE("%s:vdev_up failed vdev_id %d",
+ __func__, resp_event->vdev_id);
+ iface->vdev_up = false;
+ WMA_LOGD(FL("Setting vdev_up flag to false"));
+ cds_set_do_hw_mode_change_flag(false);
} else {
- status = wmi_unified_vdev_up_send(wma->wmi_handle,
- iface->bssid,
- &param);
- if (QDF_IS_STATUS_ERROR(status)) {
- WMA_LOGE(FL("vdev_up failed vdev_id %d"),
- resp_event->vdev_id);
- wma->interfaces[resp_event->vdev_id].vdev_up =
- false;
- WMA_LOGD(FL("Setting vdev_up flag to false"));
- cds_set_do_hw_mode_change_flag(false);
- } else {
- wma->interfaces[resp_event->vdev_id].vdev_up =
- true;
- WMA_LOGD(FL("Setting vdev_up flag to true"));
- }
+ iface->vdev_up = true;
+ WMA_LOGD(FL("Setting vdev_up flag to true"));
+ if (iface->beacon_filter_enabled)
+ wma_add_beacon_filter(wma,
+ &iface->beacon_filter);
}
}
@@ -1147,21 +1174,14 @@ int wma_vdev_start_resp_handler(void *handle, uint8_t *cmd_param_info,
} else if (req_msg->msg_type == WMA_OCB_SET_CONFIG_CMD) {
param.vdev_id = resp_event->vdev_id;
param.assoc_id = iface->aid;
-
- if (iface->vdev_up == true) {
- WMA_LOGD(FL("vdev id %d is already UP for %pM"),
- param.vdev_id, iface->bssid);
- } else {
- if (wmi_unified_vdev_up_send(wma->wmi_handle,
- iface->bssid,
- &param) != QDF_STATUS_SUCCESS) {
- WMA_LOGE(FL("failed to send vdev up"));
- cds_set_do_hw_mode_change_flag(false);
- return -EEXIST;
- }
- iface->vdev_up = true;
- WMA_LOGD(FL("Setting vdev_up flag to true"));
+ if (wma_send_vdev_up_to_fw(wma, &param, iface->bssid) !=
+ QDF_STATUS_SUCCESS) {
+ WMA_LOGE(FL("failed to send vdev up"));
+ cds_set_do_hw_mode_change_flag(false);
+ return -EEXIST;
}
+ iface->vdev_up = true;
+ WMA_LOGD(FL("Setting vdev_up flag to true"));
wma_ocb_start_resp_ind_cont(wma);
}
@@ -1192,11 +1212,12 @@ bool wma_is_vdev_valid(uint32_t vdev_id)
return false;
}
- /* No of interface are allocated based on max_bssid value */
- if (vdev_id >= wma_handle->max_bssid)
- return false;
+ WMA_LOGD("%s: vdev_id: %d, vdev_active: %d, is_vdev_valid %d",
+ __func__, vdev_id, wma_handle->interfaces[vdev_id].vdev_active,
+ wma_handle->interfaces[vdev_id].is_vdev_valid);
- return wma_handle->interfaces[vdev_id].vdev_active;
+ return wma_handle->interfaces[vdev_id].vdev_active ||
+ wma_handle->interfaces[vdev_id].is_vdev_valid;
}
/**
@@ -1630,53 +1651,53 @@ static void wma_cleanup_target_req_param(struct wma_target_req *tgt_req)
}
/**
- * get_fw_active_bpf_mode() - convert HDD BPF mode to FW configurable BPF
+ * get_fw_active_apf_mode() - convert HDD APF mode to FW configurable APF
* mode
- * @mode: BPF mode maintained in HDD
+ * @mode: APF mode maintained in HDD
*
* Return: FW configurable BP mode
*/
-static FW_ACTIVE_BPF_MODE get_fw_active_bpf_mode(enum active_bpf_mode mode)
+static FW_ACTIVE_BPF_MODE get_fw_active_apf_mode(enum active_apf_mode mode)
{
- FW_ACTIVE_BPF_MODE fw_bpf_mode;
+ FW_ACTIVE_BPF_MODE fw_apf_mode;
switch (mode) {
- case ACTIVE_BPF_DISABLED:
- fw_bpf_mode = FW_ACTIVE_BPF_MODE_DISABLE;
+ case ACTIVE_APF_DISABLED:
+ fw_apf_mode = FW_ACTIVE_BPF_MODE_DISABLE;
break;
- case ACTIVE_BPF_ENABLED:
- fw_bpf_mode = FW_ACTIVE_BPF_MODE_FORCE_ENABLE;
+ case ACTIVE_APF_ENABLED:
+ fw_apf_mode = FW_ACTIVE_BPF_MODE_FORCE_ENABLE;
break;
- case ACTIVE_BPF_ADAPTIVE:
- fw_bpf_mode = FW_ACTIVE_BPF_MODE_ADAPTIVE_ENABLE;
+ case ACTIVE_APF_ADAPTIVE:
+ fw_apf_mode = FW_ACTIVE_BPF_MODE_ADAPTIVE_ENABLE;
break;
default:
- WMA_LOGE("Invalid Active BPF Mode %d; Using 'disabled'", mode);
- fw_bpf_mode = FW_ACTIVE_BPF_MODE_DISABLE;
+ WMA_LOGE("Invalid Active APF Mode %d; Using 'disabled'", mode);
+ fw_apf_mode = FW_ACTIVE_BPF_MODE_DISABLE;
break;
}
- return fw_bpf_mode;
+ return fw_apf_mode;
}
/**
- * wma_config_active_bpf_mode() - Config active BPF mode in FW
+ * wma_config_active_apf_mode() - Config active APF mode in FW
* @wma: the WMA handle
* @vdev_id: the Id of the vdev for which the configuration should be applied
*
* Return: QDF status
*/
-static QDF_STATUS wma_config_active_bpf_mode(t_wma_handle *wma, uint8_t vdev_id)
+static QDF_STATUS wma_config_active_apf_mode(t_wma_handle *wma, uint8_t vdev_id)
{
FW_ACTIVE_BPF_MODE uc_mode, mcbc_mode;
- uc_mode = get_fw_active_bpf_mode(wma->active_uc_bpf_mode);
- mcbc_mode = get_fw_active_bpf_mode(wma->active_mc_bc_bpf_mode);
+ uc_mode = get_fw_active_apf_mode(wma->active_uc_apf_mode);
+ mcbc_mode = get_fw_active_apf_mode(wma->active_mc_bc_apf_mode);
- WMA_LOGD("Configuring Active BPF Mode UC:%d MC/BC:%d for vdev %u",
+ WMA_LOGD("Configuring Active APF Mode UC:%d MC/BC:%d for vdev %u",
uc_mode, mcbc_mode, vdev_id);
- return wmi_unified_set_active_bpf_mode_cmd(wma->wmi_handle, vdev_id,
+ return wmi_unified_set_active_apf_mode_cmd(wma->wmi_handle, vdev_id,
uc_mode, mcbc_mode);
}
@@ -1799,6 +1820,57 @@ wma_send_del_bss_response(tp_wma_handle wma, struct wma_target_req *req,
}
}
+static QDF_STATUS
+wma_remove_peer_by_reference(ol_txrx_pdev_handle pdev,
+ tp_wma_handle wma,
+ void *params,
+ uint8_t *peer_id,
+ uint8_t *bssid,
+ uint8_t vdev_id,
+ uint8_t peer_rsp_type)
+{
+ ol_txrx_peer_handle peer;
+ struct wma_target_req *del_req;
+ QDF_STATUS status;
+
+ status = QDF_STATUS_SUCCESS;
+ peer = ol_txrx_find_peer_by_addr_inc_ref(pdev,
+ bssid,
+ peer_id);
+ if (!peer) {
+ WMA_LOGD("%s Failed to find peer %pM",
+ __func__, bssid);
+ status = QDF_STATUS_E_FAULT;
+ return status;
+ }
+
+ WMA_LOGI(FL("Deleting peer %pM vdev id %d"),
+ bssid, vdev_id);
+
+ wma_remove_peer(wma, bssid, vdev_id,
+ peer, false);
+
+ if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
+ WMI_SERVICE_SYNC_DELETE_CMDS)) {
+ WMA_LOGD(FL("Wait for the peer delete. vdev_id %d"),
+ vdev_id);
+ del_req = wma_fill_hold_req(wma,
+ vdev_id,
+ WMA_DELETE_STA_REQ,
+ peer_rsp_type,
+ params,
+ WMA_DELETE_STA_TIMEOUT);
+ if (!del_req) {
+ WMA_LOGE(FL("Failed to allocate request. vdev_id %d"),
+ vdev_id);
+ status = QDF_STATUS_E_NOMEM;
+ }
+ }
+
+ OL_TXRX_PEER_UNREF_DELETE(peer);
+
+ return status;
+}
/**
* wma_vdev_stop_resp_handler() - vdev stop response handler
@@ -1814,12 +1886,12 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
tp_wma_handle wma = (tp_wma_handle) handle;
WMI_VDEV_STOPPED_EVENTID_param_tlvs *param_buf;
wmi_vdev_stopped_event_fixed_param *resp_event;
- struct wma_target_req *req_msg, *del_req, *new_req_msg;
- ol_txrx_peer_handle peer;
+ struct wma_target_req *req_msg, *new_req_msg;
ol_txrx_pdev_handle pdev;
uint8_t peer_id;
struct wma_txrx_node *iface;
int32_t status = 0;
+ QDF_STATUS result;
WMA_LOGD("%s: Enter", __func__);
param_buf = (WMI_VDEV_STOPPED_EVENTID_param_tlvs *) cmd_param_info;
@@ -1829,6 +1901,13 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
}
resp_event = param_buf->fixed_param;
+
+ if (resp_event->vdev_id >= wma->max_bssid) {
+ WMA_LOGE("%s: Invalid vdev_id %d from FW",
+ __func__, resp_event->vdev_id);
+ return -EINVAL;
+ }
+
iface = &wma->interfaces[resp_event->vdev_id];
wma_release_wakelock(&iface->vdev_stop_wakelock);
@@ -1840,8 +1919,7 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
return -EINVAL;
}
- if ((resp_event->vdev_id < wma->max_bssid) &&
- (qdf_atomic_read
+ if ((qdf_atomic_read
(&wma->interfaces[resp_event->vdev_id].vdev_restart_params.
hidden_ssid_restart_in_progress))
&& ((wma->interfaces[resp_event->vdev_id].type == WMI_VDEV_TYPE_AP)
@@ -1880,14 +1958,6 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
tpDeleteBssParams params =
(tpDeleteBssParams) req_msg->user_data;
- if (resp_event->vdev_id >= wma->max_bssid) {
- WMA_LOGE("%s: Invalid vdev_id %d", __func__,
- resp_event->vdev_id);
- wma_cleanup_target_req_param(req_msg);
- status = -EINVAL;
- goto free_req_msg;
- }
-
if (iface->handle == NULL) {
WMA_LOGE("%s vdev id %d is already deleted",
__func__, resp_event->vdev_id);
@@ -1917,32 +1987,16 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
wma_delete_all_ap_remote_peers(wma,
resp_event->vdev_id);
}
- peer = ol_txrx_find_peer_by_addr(pdev, params->bssid,
- &peer_id);
- if (!peer)
- WMA_LOGD("%s Failed to find peer %pM",
- __func__, params->bssid);
- wma_remove_peer(wma, params->bssid, resp_event->vdev_id,
- peer, false);
- if (peer && WMI_SERVICE_IS_ENABLED(
- wma->wmi_service_bitmap,
- WMI_SERVICE_SYNC_DELETE_CMDS)) {
- WMA_LOGD(FL("Wait for the peer delete. vdev_id %d"),
- req_msg->vdev_id);
- del_req = wma_fill_hold_req(wma,
- req_msg->vdev_id,
- WMA_DELETE_STA_REQ,
- WMA_DELETE_PEER_RSP,
- params,
- WMA_DELETE_STA_TIMEOUT);
- if (!del_req) {
- WMA_LOGE(FL("Failed to allocate request. vdev_id %d"),
- req_msg->vdev_id);
- params->status = QDF_STATUS_E_NOMEM;
- } else {
- goto free_req_msg;
- }
- }
+ result = wma_remove_peer_by_reference(pdev,
+ wma, params,
+ &peer_id,
+ params->bssid,
+ resp_event->vdev_id,
+ WMA_DELETE_PEER_RSP);
+
+ if (result == QDF_STATUS_SUCCESS)
+ goto free_req_msg;
+
}
wma_send_del_bss_response(wma, req_msg, resp_event->vdev_id);
@@ -1950,31 +2004,15 @@ int wma_vdev_stop_resp_handler(void *handle, uint8_t *cmd_param_info,
tpLinkStateParams params =
(tpLinkStateParams) req_msg->user_data;
- peer = ol_txrx_find_peer_by_addr(pdev, params->bssid, &peer_id);
- if (peer) {
- WMA_LOGE(FL("Deleting peer %pM vdev id %d"),
- params->bssid, req_msg->vdev_id);
- wma_remove_peer(wma, params->bssid, req_msg->vdev_id,
- peer, false);
- if (WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
- WMI_SERVICE_SYNC_DELETE_CMDS)) {
- WMA_LOGD(FL("Wait for the peer delete. vdev_id %d"),
- req_msg->vdev_id);
- del_req = wma_fill_hold_req(wma,
- req_msg->vdev_id,
- WMA_DELETE_STA_REQ,
- WMA_SET_LINK_PEER_RSP,
- params,
- WMA_DELETE_STA_TIMEOUT);
- if (!del_req) {
- WMA_LOGE(FL("Failed to allocate request. vdev_id %d"),
- req_msg->vdev_id);
- params->status = QDF_STATUS_E_NOMEM;
- } else {
- goto free_req_msg;
- }
- }
- }
+ result = wma_remove_peer_by_reference(pdev, wma, params,
+ &peer_id,
+ params->bssid,
+ req_msg->vdev_id,
+ WMA_SET_LINK_PEER_RSP);
+
+ if (result == QDF_STATUS_SUCCESS)
+ goto free_req_msg;
+
if (wma_send_vdev_down_to_fw(wma, req_msg->vdev_id) !=
QDF_STATUS_SUCCESS) {
WMA_LOGE("Failed to send vdev down cmd: vdev %d",
@@ -2320,10 +2358,10 @@ ol_txrx_vdev_handle wma_vdev_attach(tp_wma_handle wma_handle,
wma_register_wow_default_patterns(wma_handle, self_sta_req->session_id);
if (self_sta_req->type == WMI_VDEV_TYPE_STA) {
- status = wma_config_active_bpf_mode(wma_handle,
+ status = wma_config_active_apf_mode(wma_handle,
self_sta_req->session_id);
if (QDF_IS_STATUS_ERROR(status))
- WMA_LOGE("Failed to configure active BPF mode");
+ WMA_LOGE("Failed to configure active APF mode");
}
end:
@@ -2437,8 +2475,7 @@ QDF_STATUS wma_vdev_start(tp_wma_handle wma,
params.band_center_freq2 = 0;
ch_width = CH_WIDTH_80MHZ;
}
- }
- else {
+ } else {
params.band_center_freq2 = 0;
}
chan_mode = wma_chan_phy_mode(req->chan, ch_width,
@@ -2449,6 +2486,16 @@ QDF_STATUS wma_vdev_start(tp_wma_handle wma,
return QDF_STATUS_E_FAILURE;
}
+ if (!params.band_center_freq1) {
+ WMA_LOGE("%s: invalid center freq1", __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (((ch_width == CH_WIDTH_160MHZ) || (ch_width == CH_WIDTH_80P80MHZ))
+ && !params.band_center_freq2) {
+ WMA_LOGE("%s: invalid center freq2 for 160MHz", __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
/* Fill channel info */
params.chan_freq = cds_chan_to_freq(req->chan);
params.chan_mode = chan_mode;
@@ -2506,6 +2553,7 @@ QDF_STATUS wma_vdev_start(tp_wma_handle wma,
CFG_TGT_DEFAULT_GTX_BW_MASK;
intr[params.vdev_id].mhz = params.chan_freq;
intr[params.vdev_id].chan_width = ch_width;
+ intr[params.vdev_id].channel = req->chan;
temp_chan_info &= 0xffffffc0;
temp_chan_info |= params.chan_mode;
@@ -2815,13 +2863,13 @@ int wma_vdev_delete_handler(void *handle, uint8_t *cmd_param_info,
event->vdev_id);
return -EINVAL;
}
+ qdf_mc_timer_stop(&req_msg->event_timeout);
+ qdf_mc_timer_destroy(&req_msg->event_timeout);
wma_release_wakelock(&wma->wmi_cmd_rsp_wake_lock);
/* Send response to upper layers */
wma_vdev_detach_callback(req_msg->user_data);
- qdf_mc_timer_stop(&req_msg->event_timeout);
- qdf_mc_timer_destroy(&req_msg->event_timeout);
qdf_mem_free(req_msg);
return status;
@@ -2901,9 +2949,8 @@ int wma_peer_delete_handler(void *handle, uint8_t *cmd_param_info,
struct wma_txrx_node *iface;
iface = &wma->interfaces[req_msg->vdev_id];
- if (wmi_unified_vdev_down_send(wma->wmi_handle,
- req_msg->vdev_id) !=
- QDF_STATUS_SUCCESS) {
+ if (wma_send_vdev_down_to_fw(wma, req_msg->vdev_id) !=
+ QDF_STATUS_SUCCESS) {
WMA_LOGE("Failed to send vdev down cmd: vdev %d",
req_msg->vdev_id);
} else {
@@ -3177,6 +3224,15 @@ void wma_vdev_resp_timer(void *data)
goto free_tgt_req;
}
+#ifdef FEATURE_AP_MCC_CH_AVOIDANCE
+ mac_ctx = cds_get_context(QDF_MODULE_ID_PE);
+ if (!mac_ctx) {
+ WMA_LOGE("%s: Failed to get mac_ctx", __func__);
+ wma_cleanup_target_req_param(tgt_req);
+ goto free_tgt_req;
+ }
+#endif /* FEATURE_AP_MCC_CH_AVOIDANCE */
+
iface = &wma->interfaces[tgt_req->vdev_id];
if (tgt_req->msg_type == WMA_CHNL_SWITCH_REQ) {
tpSwitchChannelParams params =
@@ -4760,6 +4816,41 @@ static void wma_add_sta_req_sta_mode(tp_wma_handle wma, tpAddStaParams params)
if (params->enableAmpduPs && (params->htCapable || params->vhtCapable))
wma_set_ppsconfig(params->smesessionId,
WMA_VHT_PPS_DELIM_CRC_FAIL, 1);
+ if (WMI_SERVICE_EXT_IS_ENABLED(wma->wmi_service_bitmap,
+ wma->wmi_service_ext_bitmap,
+ WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT)) {
+ WMA_LOGD("%s: listen interval offload enabled, setting params",
+ __func__);
+ status = wma_vdev_set_param(wma->wmi_handle,
+ params->smesessionId,
+ WMI_VDEV_PARAM_MAX_LI_OF_MODDTIM,
+ wma->staMaxLIModDtim);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMA_LOGE(FL("can't set MAX_LI for session: %d"),
+ params->smesessionId);
+ }
+ status = wma_vdev_set_param(wma->wmi_handle,
+ params->smesessionId,
+ WMI_VDEV_PARAM_DYNDTIM_CNT,
+ wma->staDynamicDtim);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMA_LOGE(FL("can't set DYNDTIM_CNT for session: %d"),
+ params->smesessionId);
+ }
+ status = wma_vdev_set_param(wma->wmi_handle,
+ params->smesessionId,
+ WMI_VDEV_PARAM_MODDTIM_CNT,
+ wma->staModDtim);
+ if (status != QDF_STATUS_SUCCESS) {
+ WMA_LOGE(FL("can't set DTIM_CNT for session: %d"),
+ params->smesessionId);
+ }
+
+ } else {
+ WMA_LOGD("%s: listen interval offload is not set",
+ __func__);
+ }
+
iface->aid = params->assocId;
params->nss = iface->nss;
out:
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
index e922c9a521ae..1e5a69913db6 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_features.c
@@ -7388,28 +7388,18 @@ QDF_STATUS wma_enable_arp_ns_offload(tp_wma_handle wma,
}
QDF_STATUS wma_conf_hw_filter_mode(tp_wma_handle wma,
- struct hw_filter_request *req)
+ struct wmi_hw_filter_req_params *req)
{
QDF_STATUS status;
- uint8_t vdev_id;
-
- /* Get the vdev id */
- if (!wma_find_vdev_by_bssid(wma, req->bssid.bytes, &vdev_id)) {
- WMA_LOGE("vdev handle is invalid for %pM",
- req->bssid.bytes);
- qdf_mem_free(req);
- return QDF_STATUS_E_INVAL;
- }
- if (!wma->interfaces[vdev_id].vdev_up) {
+ if (!wma->interfaces[req->vdev_id].vdev_up) {
WMA_LOGE("vdev %d is not up skipping enable Broadcast Filter",
- vdev_id);
+ req->vdev_id);
qdf_mem_free(req);
return QDF_STATUS_E_FAILURE;
}
- status = wmi_unified_conf_hw_filter_mode_cmd(wma->wmi_handle, vdev_id,
- req->mode_bitmap);
+ status = wmi_unified_conf_hw_filter_mode_cmd(wma->wmi_handle, req);
if (QDF_IS_STATUS_ERROR(status))
WMA_LOGE("Failed to enable/disable Broadcast Filter");
@@ -8443,6 +8433,7 @@ failure:
static void wma_check_and_set_wake_timer(tp_wma_handle wma, uint32_t time)
{
int i;
+ bool is_set_key_in_progress = false;
struct wma_txrx_node *iface;
if (!WMI_SERVICE_EXT_IS_ENABLED(wma->wmi_service_bitmap,
@@ -8459,10 +8450,14 @@ static void wma_check_and_set_wake_timer(tp_wma_handle wma, uint32_t time)
* right now cookie is dont care, since FW disregards
* that.
*/
+ is_set_key_in_progress = true;
wma_wow_set_wake_time((WMA_HANDLE)wma, i, 0, time);
break;
}
}
+
+ if (!is_set_key_in_progress)
+ WMA_LOGD("set key not in progress for any vdev");
}
/**
@@ -9778,21 +9773,13 @@ QDF_STATUS wma_process_set_ie_info(tp_wma_handle wma,
return ret;
}
-/**
- * wma_get_bpf_caps_event_handler() - Event handler for get bpf capability
- * @handle: WMA global handle
- * @cmd_param_info: command event data
- * @len: Length of @cmd_param_info
- *
- * Return: 0 on Success or Errno on failure
- */
-int wma_get_bpf_caps_event_handler(void *handle,
+int wma_get_apf_caps_event_handler(void *handle,
u_int8_t *cmd_param_info,
u_int32_t len)
{
WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *param_buf;
wmi_bpf_capability_info_evt_fixed_param *event;
- struct sir_bpf_get_offload *bpf_get_offload;
+ struct sir_apf_get_offload *apf_get_offload;
tpAniSirGlobal pmac = (tpAniSirGlobal)cds_get_context(
QDF_MODULE_ID_PE);
@@ -9800,41 +9787,35 @@ int wma_get_bpf_caps_event_handler(void *handle,
WMA_LOGE("%s: Invalid pmac", __func__);
return -EINVAL;
}
- if (!pmac->sme.pbpf_get_offload_cb) {
+ if (!pmac->sme.papf_get_offload_cb) {
WMA_LOGE("%s: Callback not registered", __func__);
return -EINVAL;
}
param_buf = (WMI_BPF_CAPABILIY_INFO_EVENTID_param_tlvs *)cmd_param_info;
event = param_buf->fixed_param;
- bpf_get_offload = qdf_mem_malloc(sizeof(*bpf_get_offload));
+ apf_get_offload = qdf_mem_malloc(sizeof(*apf_get_offload));
- if (!bpf_get_offload) {
+ if (!apf_get_offload) {
WMA_LOGP("%s: Memory allocation failed.", __func__);
return -ENOMEM;
}
- bpf_get_offload->bpf_version = event->bpf_version;
- bpf_get_offload->max_bpf_filters = event->max_bpf_filters;
- bpf_get_offload->max_bytes_for_bpf_inst =
+ apf_get_offload->apf_version = event->bpf_version;
+ apf_get_offload->max_apf_filters = event->max_bpf_filters;
+ apf_get_offload->max_bytes_for_apf_inst =
event->max_bytes_for_bpf_inst;
- WMA_LOGD("%s: BPF capabilities version: %d max bpf filter size: %d",
- __func__, bpf_get_offload->bpf_version,
- bpf_get_offload->max_bytes_for_bpf_inst);
+ WMA_LOGD("%s: APF capabilities version: %d max apf filter size: %d",
+ __func__, apf_get_offload->apf_version,
+ apf_get_offload->max_bytes_for_apf_inst);
- WMA_LOGD("%s: sending bpf capabilities event to hdd", __func__);
- pmac->sme.pbpf_get_offload_cb(pmac->hHdd, bpf_get_offload);
- qdf_mem_free(bpf_get_offload);
+ WMA_LOGD("%s: sending apf capabilities event to hdd", __func__);
+ pmac->sme.papf_get_offload_cb(pmac->hHdd, apf_get_offload);
+ qdf_mem_free(apf_get_offload);
return 0;
}
-/**
- * wma_get_bpf_capabilities - Send get bpf capability to firmware
- * @wma_handle: wma handle
- *
- * Return: QDF_STATUS enumeration.
- */
-QDF_STATUS wma_get_bpf_capabilities(tp_wma_handle wma)
+QDF_STATUS wma_get_apf_capabilities(tp_wma_handle wma)
{
QDF_STATUS status = QDF_STATUS_SUCCESS;
wmi_bpf_get_capability_cmd_fixed_param *cmd;
@@ -9843,13 +9824,13 @@ QDF_STATUS wma_get_bpf_capabilities(tp_wma_handle wma)
u_int8_t *buf_ptr;
if (!wma || !wma->wmi_handle) {
- WMA_LOGE(FL("WMA is closed, can not issue get BPF capab"));
+ WMA_LOGE(FL("WMA is closed, can not issue get APF capab"));
return QDF_STATUS_E_INVAL;
}
if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
WMI_SERVICE_BPF_OFFLOAD)) {
- WMA_LOGE(FL("BPF cababilities feature bit not enabled"));
+ WMA_LOGE(FL("APF cababilities feature bit not enabled"));
return QDF_STATUS_E_FAILURE;
}
@@ -9869,22 +9850,15 @@ QDF_STATUS wma_get_bpf_capabilities(tp_wma_handle wma)
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_BPF_GET_CAPABILITY_CMDID)) {
- WMA_LOGE(FL("Failed to send BPF capability command"));
+ WMA_LOGE(FL("Failed to send APF capability command"));
wmi_buf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
return status;
}
-/**
- * wma_set_bpf_instructions - Set bpf instructions to firmware
- * @wma: wma handle
- * @bpf_set_offload: Bpf offload information to set to firmware
- *
- * Return: QDF_STATUS enumeration
- */
-QDF_STATUS wma_set_bpf_instructions(tp_wma_handle wma,
- struct sir_bpf_set_offload *bpf_set_offload)
+QDF_STATUS wma_set_apf_instructions(tp_wma_handle wma,
+ struct sir_apf_set_offload *apf_set_offload)
{
wmi_bpf_set_vdev_instructions_cmd_fixed_param *cmd;
wmi_buf_t wmi_buf;
@@ -9892,36 +9866,36 @@ QDF_STATUS wma_set_bpf_instructions(tp_wma_handle wma,
u_int8_t *buf_ptr;
if (!wma || !wma->wmi_handle) {
- WMA_LOGE("%s: WMA is closed, can not issue set BPF capability",
+ WMA_LOGE("%s: WMA is closed, can not issue set APF capability",
__func__);
return QDF_STATUS_E_INVAL;
}
if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
WMI_SERVICE_BPF_OFFLOAD)) {
- WMA_LOGE(FL("BPF offload feature Disabled"));
+ WMA_LOGE(FL("APF offload feature Disabled"));
return QDF_STATUS_E_NOSUPPORT;
}
- if (!bpf_set_offload) {
- WMA_LOGE("%s: Invalid BPF instruction request", __func__);
+ if (!apf_set_offload) {
+ WMA_LOGE("%s: Invalid APF instruction request", __func__);
return QDF_STATUS_E_INVAL;
}
- if (bpf_set_offload->session_id >= wma->max_bssid) {
+ if (apf_set_offload->session_id >= wma->max_bssid) {
WMA_LOGE(FL("Invalid vdev_id: %d"),
- bpf_set_offload->session_id);
+ apf_set_offload->session_id);
return QDF_STATUS_E_INVAL;
}
- if (!wma->interfaces[bpf_set_offload->session_id].vdev_up) {
- WMA_LOGE("vdev %d is not up skipping BPF offload",
- bpf_set_offload->session_id);
+ if (!wma->interfaces[apf_set_offload->session_id].vdev_up) {
+ WMA_LOGE("vdev %d is not up skipping APF offload",
+ apf_set_offload->session_id);
return QDF_STATUS_E_INVAL;
}
- if (bpf_set_offload->total_length) {
- len_aligned = roundup(bpf_set_offload->current_length,
+ if (apf_set_offload->total_length) {
+ len_aligned = roundup(apf_set_offload->current_length,
sizeof(A_UINT32));
len = len_aligned + WMI_TLV_HDR_SIZE;
}
@@ -9940,32 +9914,165 @@ QDF_STATUS wma_set_bpf_instructions(tp_wma_handle wma,
WMITLV_TAG_STRUC_wmi_bpf_set_vdev_instructions_cmd_fixed_param,
WMITLV_GET_STRUCT_TLVLEN(
wmi_bpf_set_vdev_instructions_cmd_fixed_param));
- cmd->vdev_id = bpf_set_offload->session_id;
- cmd->filter_id = bpf_set_offload->filter_id;
- cmd->total_length = bpf_set_offload->total_length;
- cmd->current_offset = bpf_set_offload->current_offset;
- cmd->current_length = bpf_set_offload->current_length;
+ cmd->vdev_id = apf_set_offload->session_id;
+ cmd->filter_id = apf_set_offload->filter_id;
+ cmd->total_length = apf_set_offload->total_length;
+ cmd->current_offset = apf_set_offload->current_offset;
+ cmd->current_length = apf_set_offload->current_length;
- if (bpf_set_offload->total_length) {
+ if (apf_set_offload->total_length) {
buf_ptr +=
sizeof(wmi_bpf_set_vdev_instructions_cmd_fixed_param);
WMITLV_SET_HDR(buf_ptr, WMITLV_TAG_ARRAY_BYTE, len_aligned);
buf_ptr += WMI_TLV_HDR_SIZE;
- qdf_mem_copy(buf_ptr, bpf_set_offload->program,
- bpf_set_offload->current_length);
+ qdf_mem_copy(buf_ptr, apf_set_offload->program,
+ apf_set_offload->current_length);
}
if (wmi_unified_cmd_send(wma->wmi_handle, wmi_buf, len,
WMI_BPF_SET_VDEV_INSTRUCTIONS_CMDID)) {
- WMA_LOGE(FL("Failed to send config bpf instructions command"));
+ WMA_LOGE(FL("Failed to send config apf instructions command"));
wmi_buf_free(wmi_buf);
return QDF_STATUS_E_FAILURE;
}
- WMA_LOGD(FL("BPF offload enabled in fw"));
+ WMA_LOGD(FL("APF offload enabled in fw"));
return QDF_STATUS_SUCCESS;
}
+QDF_STATUS wma_send_apf_enable_cmd(WMA_HANDLE handle, uint8_t vdev_id,
+ bool apf_enable)
+{
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+ tp_wma_handle wma = (tp_wma_handle) handle;
+
+ if (!wma || !wma->wmi_handle) {
+ WMA_LOGE(FL("WMA is closed, can not issue get APF capab"));
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
+ WMI_SERVICE_BPF_OFFLOAD)) {
+ WMA_LOGE(FL("APF cababilities feature bit not enabled"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ status = wmi_unified_send_apf_enable_cmd(wma->wmi_handle, vdev_id,
+ apf_enable);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ WMA_LOGE("Failed to send apf enable/disable cmd");
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (apf_enable)
+ WMA_LOGD("Sent APF Enable on vdevid: %d", vdev_id);
+ else
+ WMA_LOGD("Sent APF Disable on vdevid: %d", vdev_id);
+
+ return status;
+}
+
+QDF_STATUS
+wma_send_apf_write_work_memory_cmd(WMA_HANDLE handle,
+ struct wmi_apf_write_memory_params *write_params)
+{
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+ tp_wma_handle wma = (tp_wma_handle) handle;
+
+ if (!wma || !wma->wmi_handle) {
+ WMA_LOGE(FL("WMA is closed, can not issue write APF mem"));
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
+ WMI_SERVICE_BPF_OFFLOAD)) {
+ WMA_LOGE(FL("APF cababilities feature bit not enabled"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (wmi_unified_send_apf_write_work_memory_cmd(wma->wmi_handle,
+ write_params)) {
+ WMA_LOGE(FL("Failed to send APF write mem command"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ WMA_LOGD("Sent APF wite mem on vdevid: %d", write_params->vdev_id);
+ return status;
+}
+
+int wma_apf_read_work_memory_event_handler(void *handle, uint8_t *evt_buf,
+ uint32_t len)
+{
+ tp_wma_handle wma_handle;
+ wmi_unified_t wmi_handle;
+ struct wmi_apf_read_memory_resp_event_params evt_params = {0};
+ QDF_STATUS status;
+ tpAniSirGlobal pmac = cds_get_context(QDF_MODULE_ID_PE);
+
+ WMA_LOGI(FL("handle:%pK event:%pK len:%u"), handle, evt_buf, len);
+
+ wma_handle = handle;
+ if (!wma_handle) {
+ WMA_LOGE(FL("NULL wma_handle"));
+ return -EINVAL;
+ }
+
+ wmi_handle = wma_handle->wmi_handle;
+ if (!wmi_handle) {
+ WMA_LOGE(FL("NULL wmi_handle"));
+ return -EINVAL;
+ }
+
+ if (!pmac) {
+ WMA_LOGE(FL("Invalid pmac"));
+ return -EINVAL;
+ }
+
+ if (!pmac->sme.apf_read_mem_cb) {
+ WMA_LOGE(FL("Callback not registered"));
+ return -EINVAL;
+ }
+
+ status = wmi_extract_apf_read_memory_resp_event(wmi_handle,
+ evt_buf, &evt_params);
+ if (QDF_IS_STATUS_ERROR(status)) {
+ WMA_LOGE(FL("Event extract failure: %d"), status);
+ return -EINVAL;
+ }
+
+ pmac->sme.apf_read_mem_cb(pmac->hHdd, &evt_params);
+
+ return 0;
+}
+
+QDF_STATUS wma_send_apf_read_work_memory_cmd(WMA_HANDLE handle,
+ struct wmi_apf_read_memory_params
+ *read_params)
+{
+ QDF_STATUS status = QDF_STATUS_SUCCESS;
+ tp_wma_handle wma = (tp_wma_handle) handle;
+
+ if (!wma || !wma->wmi_handle) {
+ WMA_LOGE(FL("WMA is closed, can not issue read APF memory"));
+ return QDF_STATUS_E_INVAL;
+ }
+
+ if (!WMI_SERVICE_IS_ENABLED(wma->wmi_service_bitmap,
+ WMI_SERVICE_BPF_OFFLOAD)) {
+ WMA_LOGE(FL("APF cababilities feature bit not enabled"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (wmi_unified_send_apf_read_work_memory_cmd(wma->wmi_handle,
+ read_params)) {
+ WMA_LOGE(FL("Failed to send APF read memory command"));
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ WMA_LOGD("Sent APF read memory on vdevid: %d", read_params->vdev_id);
+ return status;
+}
+
/**
* wma_set_tx_rx_aggregation_size() - sets tx rx aggregation sizes
* @tx_rx_aggregation_size: aggregation size parameters
@@ -10362,7 +10469,7 @@ static int wma_sar_event_handler(void *handle, uint8_t *evt_buf, uint32_t len)
{
tp_wma_handle wma_handle;
wmi_unified_t wmi_handle;
- struct sar_limit_event event;
+ struct sar_limit_event *event;
wma_sar_cb callback;
QDF_STATUS status;
@@ -10380,17 +10487,26 @@ static int wma_sar_event_handler(void *handle, uint8_t *evt_buf, uint32_t len)
return QDF_STATUS_E_INVAL;
}
+ event = qdf_mem_malloc(sizeof(*event));
+ if (!event) {
+ WMA_LOGE(FL("failed to malloc sar_limit_event"));
+ return QDF_STATUS_E_NOMEM;
+ }
+
status = wmi_unified_extract_sar_limit_event(wmi_handle,
- evt_buf, &event);
+ evt_buf, event);
if (QDF_IS_STATUS_ERROR(status)) {
WMA_LOGE(FL("Event extract failure: %d"), status);
+ qdf_mem_free(event);
return QDF_STATUS_E_INVAL;
}
callback = sar_callback;
sar_callback = NULL;
if (callback)
- callback(sar_context, &event);
+ callback(sar_context, event);
+
+ qdf_mem_free(event);
return 0;
}
@@ -10412,7 +10528,7 @@ QDF_STATUS wma_sar_register_event_handlers(WMA_HANDLE handle)
}
return wmi_unified_register_event_handler(wmi_handle,
- wmi_sar_get_limits_event_id,
+ WMI_SAR_GET_LIMITS_EVENTID,
wma_sar_event_handler,
WMA_RX_WORK_CTX);
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
index 7408f515d8a6..94cf2e33e422 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_main.c
@@ -825,7 +825,10 @@ static void wma_set_dtim_period(tp_wma_handle wma,
{
struct wma_txrx_node *iface =
&wma->interfaces[dtim_params->session_id];
-
+ if (!wma_is_vdev_valid(dtim_params->session_id)) {
+ WMA_LOGE("%s: invalid VDEV", __func__);
+ return;
+ }
WMA_LOGD("%s: set dtim_period %d", __func__,
dtim_params->dtim_period);
iface->dtimPeriod = dtim_params->dtim_period;
@@ -2274,6 +2277,23 @@ static int wma_rx_service_available_event(void *handle, uint8_t *cmd_param_info,
}
/**
+ * wma_wmi_stop() - generic function to block WMI commands
+ * @return: None
+ */
+void wma_wmi_stop(void)
+{
+ tp_wma_handle wma_handle;
+
+ wma_handle = cds_get_context(QDF_MODULE_ID_WMA);
+ if (wma_handle == NULL) {
+ QDF_TRACE(QDF_MODULE_ID_WMI, QDF_TRACE_LEVEL_INFO,
+ "wma_handle is NULL\n");
+ return;
+ }
+ wmi_stop(wma_handle->wmi_handle);
+}
+
+/**
* wma_open() - Allocate wma context and initialize it.
* @cds_context: cds context
* @wma_tgt_cfg_cb: tgt config callback fun
@@ -2493,10 +2513,10 @@ QDF_STATUS wma_open(void *cds_context,
wma_handle->driver_type = cds_cfg->driver_type;
wma_handle->ssdp = cds_cfg->ssdp;
wma_handle->enable_mc_list = cds_cfg->enable_mc_list;
- wma_handle->bpf_packet_filter_enable =
- cds_cfg->bpf_packet_filter_enable;
- wma_handle->active_uc_bpf_mode = cds_cfg->active_uc_bpf_mode;
- wma_handle->active_mc_bc_bpf_mode = cds_cfg->active_mc_bc_bpf_mode;
+ wma_handle->apf_packet_filter_enable =
+ cds_cfg->apf_packet_filter_enable;
+ wma_handle->active_uc_apf_mode = cds_cfg->active_uc_apf_mode;
+ wma_handle->active_mc_bc_apf_mode = cds_cfg->active_mc_bc_apf_mode;
wma_handle->link_stats_results = NULL;
#ifdef FEATURE_WLAN_RA_FILTERING
wma_handle->IsRArateLimitEnabled = cds_cfg->is_ra_ratelimit_enabled;
@@ -2807,9 +2827,13 @@ QDF_STATUS wma_open(void *cds_context,
WMA_RX_SERIALIZER_CTX);
wmi_unified_register_event_handler(wma_handle->wmi_handle,
WMI_BPF_CAPABILIY_INFO_EVENTID,
- wma_get_bpf_caps_event_handler,
+ wma_get_apf_caps_event_handler,
WMA_RX_SERIALIZER_CTX);
wmi_unified_register_event_handler(wma_handle->wmi_handle,
+ WMI_BPF_GET_VDEV_WORK_MEMORY_RESP_EVENTID,
+ wma_apf_read_work_memory_event_handler,
+ WMA_RX_SERIALIZER_CTX);
+ wmi_unified_register_event_handler(wma_handle->wmi_handle,
WMI_CHAN_INFO_EVENTID,
wma_chan_info_event_handler,
WMA_RX_SERIALIZER_CTX);
@@ -2851,7 +2875,6 @@ QDF_STATUS wma_open(void *cds_context,
WMI_REPORT_RX_AGGR_FAILURE_EVENTID,
wma_rx_aggr_failure_event_handler,
WMA_RX_SERIALIZER_CTX);
-
wma_register_debug_callback();
wma_handle->peer_dbg = qdf_mem_malloc(sizeof(*wma_handle->peer_dbg));
@@ -4095,7 +4118,7 @@ static void wma_update_fw_config(tp_wma_handle wma_handle,
wma_handle->max_frag_entry =
tgt_cap->wlan_resource_config.max_frag_entries;
- /* Update no. of maxWoWFilters depending on BPF service */
+ /* Update no. of maxWoWFilters depending on APF service */
if (WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
WMI_SERVICE_BPF_OFFLOAD))
tgt_cap->wlan_resource_config.num_wow_filters =
@@ -4307,6 +4330,11 @@ static inline void wma_update_target_services(tp_wma_handle wh,
wh->wmi_service_ext_bitmap,
WMI_SERVICE_MAWC_SUPPORT))
cfg->is_fw_mawc_capable = true;
+
+ if (WMI_SERVICE_EXT_IS_ENABLED(wh->wmi_service_bitmap,
+ wh->wmi_service_ext_bitmap,
+ WMI_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT))
+ cfg->is_11k_offload_supported = true;
}
/**
@@ -4778,7 +4806,7 @@ static void wma_update_hdd_cfg(tp_wma_handle wma_handle)
tgt_cfg.lpss_support = wma_handle->lpss_support;
#endif /* WLAN_FEATURE_LPSS */
tgt_cfg.ap_arpns_support = wma_handle->ap_arpns_support;
- tgt_cfg.bpf_enabled = wma_handle->bpf_enabled;
+ tgt_cfg.apf_enabled = wma_handle->apf_enabled;
tgt_cfg.rcpi_enabled = wma_handle->rcpi_enabled;
wma_update_ra_rate_limit(wma_handle, &tgt_cfg);
tgt_cfg.fine_time_measurement_cap =
@@ -4983,7 +5011,7 @@ done:
}
/**
- * wma_update_ra_limit() - update ra limit based on bpf filter
+ * wma_update_ra_limit() - update ra limit based on apf filter
* enabled or not
* @handle: wma handle
*
@@ -4992,7 +5020,7 @@ done:
#ifdef FEATURE_WLAN_RA_FILTERING
static void wma_update_ra_limit(tp_wma_handle wma_handle)
{
- if (wma_handle->bpf_enabled)
+ if (wma_handle->apf_enabled)
wma_handle->IsRArateLimitEnabled = false;
}
#else
@@ -5139,7 +5167,7 @@ int wma_rx_service_ready_event(void *handle, uint8_t *cmd_param_info,
WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
WMI_SERVICE_AP_ARPNS_OFFLOAD);
- wma_handle->bpf_enabled = (wma_handle->bpf_packet_filter_enable &&
+ wma_handle->apf_enabled = (wma_handle->apf_packet_filter_enable &&
WMI_SERVICE_IS_ENABLED(wma_handle->wmi_service_bitmap,
WMI_SERVICE_BPF_OFFLOAD));
wma_update_ra_limit(wma_handle);
@@ -5463,6 +5491,11 @@ QDF_STATUS wma_get_caps_for_phyidx_hwmode(struct wma_caps_per_phy *caps_per_phy,
caps_per_phy->he_2g = phy_cap->he_cap_info_2G;
caps_per_phy->he_5g = phy_cap->he_cap_info_5G;
+ caps_per_phy->tx_chain_mask_2G = phy_cap->tx_chain_mask_2G;
+ caps_per_phy->rx_chain_mask_2G = phy_cap->rx_chain_mask_2G;
+ caps_per_phy->tx_chain_mask_5G = phy_cap->tx_chain_mask_5G;
+ caps_per_phy->rx_chain_mask_5G = phy_cap->rx_chain_mask_5G;
+
return QDF_STATUS_SUCCESS;
}
@@ -5848,8 +5881,9 @@ static void wma_populate_soc_caps(t_wma_handle *wma_handle,
return;
}
- if (param_buf->soc_hw_mode_caps->num_hw_modes >
- MAX_NUM_HW_MODE) {
+ if ((param_buf->soc_hw_mode_caps->num_hw_modes > MAX_NUM_HW_MODE) ||
+ (param_buf->soc_hw_mode_caps->num_hw_modes >
+ param_buf->num_hw_mode_caps)) {
WMA_LOGE("Invalid num_hw_modes %u received from firmware",
param_buf->soc_hw_mode_caps->num_hw_modes);
return;
@@ -5930,10 +5964,12 @@ static void wma_populate_soc_caps(t_wma_handle *wma_handle,
* next thing is to populate reg caps per phy
*/
- if (param_buf->soc_hal_reg_caps->num_phy >
- MAX_NUM_PHY) {
+ if ((param_buf->soc_hal_reg_caps->num_phy > MAX_NUM_PHY) ||
+ (param_buf->soc_hal_reg_caps->num_phy >
+ param_buf->num_hal_reg_caps)) {
WMA_LOGE("Invalid num_phy %u received from firmware",
param_buf->soc_hal_reg_caps->num_phy);
+ wma_cleanup_dbs_phy_caps(wma_handle);
return;
}
@@ -6924,6 +6960,34 @@ static void wma_set_del_pmkid_cache(WMA_HANDLE handle,
}
/**
+ * wma_send_invoke_neighbor_report() - API to send invoke neighbor report
+ * command to fw
+ *
+ * @handle: WMA handle
+ * @params: Pointer to invoke neighbor report params
+ *
+ * Return: None
+ */
+static
+void wma_send_invoke_neighbor_report(WMA_HANDLE handle,
+ struct wmi_invoke_neighbor_report_params *params)
+{
+ QDF_STATUS status;
+ tp_wma_handle wma_handle = (tp_wma_handle) handle;
+
+ if (!wma_handle || !wma_handle->wmi_handle) {
+ WMA_LOGE("WMA is closed, cannot send invoke neighbor report");
+ return;
+ }
+
+ status = wmi_unified_invoke_neighbor_report_cmd(wma_handle->wmi_handle,
+ params);
+
+ if (status != QDF_STATUS_SUCCESS)
+ WMA_LOGE("failed to send invoke neighbor report command");
+}
+
+/**
* wma_process_action_frame_random_mac() - set/clear action frame random mac
* @wma_handle: pointer to wma handle
* @filter: pointer to buffer containing random mac, session_id and callback
@@ -8040,11 +8104,11 @@ QDF_STATUS wma_mc_process_msg(void *cds_context, cds_msg_t *msg)
wma_remove_beacon_filter(wma_handle, msg->bodyptr);
qdf_mem_free(msg->bodyptr);
break;
- case WDA_BPF_GET_CAPABILITIES_REQ:
- wma_get_bpf_capabilities(wma_handle);
+ case WDA_APF_GET_CAPABILITIES_REQ:
+ wma_get_apf_capabilities(wma_handle);
break;
- case WDA_BPF_SET_INSTRUCTIONS_REQ:
- wma_set_bpf_instructions(wma_handle, msg->bodyptr);
+ case WDA_APF_SET_INSTRUCTIONS_REQ:
+ wma_set_apf_instructions(wma_handle, msg->bodyptr);
qdf_mem_free(msg->bodyptr);
break;
case SIR_HAL_NDP_INITIATOR_REQ:
@@ -8103,7 +8167,7 @@ QDF_STATUS wma_mc_process_msg(void *cds_context, cds_msg_t *msg)
qdf_mem_free(msg->bodyptr);
break;
case WMA_CONF_HW_FILTER: {
- struct hw_filter_request *req = msg->bodyptr;
+ struct wmi_hw_filter_req_params *req = msg->bodyptr;
qdf_status = wma_conf_hw_filter_mode(wma_handle, req);
break;
@@ -8168,6 +8232,11 @@ QDF_STATUS wma_mc_process_msg(void *cds_context, cds_msg_t *msg)
(struct hlp_params *)msg->bodyptr);
qdf_mem_free(msg->bodyptr);
break;
+ case WMA_INVOKE_NEIGHBOR_REPORT:
+ wma_send_invoke_neighbor_report(wma_handle,
+ (struct wmi_invoke_neighbor_report_params *)msg->bodyptr);
+ qdf_mem_free(msg->bodyptr);
+ break;
default:
WMA_LOGE("Unhandled WMA message of type %d", msg->type);
if (msg->bodyptr)
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
index 54887cbff108..ea1b718d3500 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_mgmt.c
@@ -1325,12 +1325,12 @@ QDF_STATUS wma_send_peer_assoc(tp_wma_handle wma,
intr->nss = cmd->peer_nss;
cmd->peer_phymode = phymode;
- WMA_LOGD("%s: vdev_id %d associd %d peer_flags %x rate_caps %x peer_caps %x",
- __func__, cmd->vdev_id, cmd->peer_associd, cmd->peer_flags,
+ WMA_LOGI("%s: vdev_id %d associd %d peer_flags %x nss %d phymode %d ht_caps %x",
+ __func__, cmd->vdev_id, cmd->peer_associd, cmd->peer_flags,
+ cmd->peer_nss, cmd->peer_phymode, cmd->peer_ht_caps);
+ WMA_LOGD("%s:listen_intval %d max_mpdu %d rate_caps %x peer_caps %x",
+ __func__, cmd->peer_listen_intval, cmd->peer_max_mpdu,
cmd->peer_rate_caps, cmd->peer_caps);
- WMA_LOGD("%s:listen_intval %d ht_caps %x max_mpdu %d nss %d phymode %d",
- __func__, cmd->peer_listen_intval, cmd->peer_ht_caps,
- cmd->peer_max_mpdu, cmd->peer_nss, cmd->peer_phymode);
WMA_LOGD("%s: peer_mpdu_density %d encr_type %d cmd->peer_vht_caps %x",
__func__, cmd->peer_mpdu_density, params->encryptType,
cmd->peer_vht_caps);
@@ -2898,27 +2898,23 @@ void wma_process_update_opmode(tp_wma_handle wma_handle,
tUpdateVHTOpMode *update_vht_opmode)
{
struct wma_txrx_node *iface;
- uint16_t chan_mode;
+ wmi_channel_width ch_width;
iface = &wma_handle->interfaces[update_vht_opmode->smesessionId];
if (iface == NULL)
return;
- chan_mode = wma_chan_phy_mode(cds_freq_to_chan(iface->mhz),
- update_vht_opmode->opMode,
- update_vht_opmode->dot11_mode);
- if (MODE_UNKNOWN == chan_mode)
+ ch_width = chanmode_to_chanwidth(iface->chanmode);
+ if (ch_width < update_vht_opmode->opMode) {
+ WMA_LOGE("%s: Invalid peer bw update %d, self bw %d",
+ __func__, update_vht_opmode->opMode,
+ ch_width);
return;
+ }
- WMA_LOGD("%s: opMode = %d, chanMode = %d, dot11mode = %d ",
- __func__,
- update_vht_opmode->opMode, chan_mode,
- update_vht_opmode->dot11_mode);
-
- wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
- WMI_PEER_PHYMODE, chan_mode,
- update_vht_opmode->smesessionId);
+ WMA_LOGD("%s: opMode = %d, current_ch_width: %d", __func__,
+ update_vht_opmode->opMode, ch_width);
wma_set_peer_param(wma_handle, update_vht_opmode->peer_mac,
WMI_PEER_CHWIDTH, update_vht_opmode->opMode,
@@ -3664,10 +3660,17 @@ static int wma_mgmt_rx_process(void *handle, uint8_t *data,
#else
qdf_mem_copy(wh, param_tlvs->bufp, hdr->buf_len);
#endif
+ /* If it is a beacon/probe response, save it for future use */
+ mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
+ mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
- WMA_LOGD(FL("BSSID: "MAC_ADDRESS_STR" snr = %d, rssi = %d, rssi_raw = %d tsf_delta: %u"),
+ WMA_LOGD(FL("BSSID: "MAC_ADDRESS_STR" snr = %d, Type = %x, Subtype = %x, seq_num = %x, rssi = %d, rssi_raw = %d tsf_delta: %u"),
MAC_ADDR_ARRAY(wh->i_addr3),
- hdr->snr, rx_pkt->pkt_meta.rssi,
+ hdr->snr, mgt_type, mgt_subtype,
+ (((*(uint16_t *)wh->i_seq) &
+ IEEE80211_SEQ_SEQ_MASK) >>
+ IEEE80211_SEQ_SEQ_SHIFT),
+ rx_pkt->pkt_meta.rssi,
rx_pkt->pkt_meta.rssi_raw,
hdr->tsf_delta);
if (!wma_handle->mgmt_rx) {
@@ -3676,10 +3679,6 @@ static int wma_mgmt_rx_process(void *handle, uint8_t *data,
return -EINVAL;
}
- /* If it is a beacon/probe response, save it for future use */
- mgt_type = (wh)->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
- mgt_subtype = (wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
-
if (wma_read_d0wow_flag(wma_handle)) {
WMA_LOGE("%s: Frame subtype is 0x%x", __func__, mgt_subtype);
wma_set_d0wow_flag(wma_handle, false);
@@ -3788,7 +3787,8 @@ QDF_STATUS wma_register_roaming_callbacks(void *cds_ctx,
enum sir_roam_op_code reason),
QDF_STATUS (*pe_roam_synch_cb)(tpAniSirGlobal mac,
roam_offload_synch_ind *roam_synch_data,
- tpSirBssDescription bss_desc_ptr))
+ tpSirBssDescription bss_desc_ptr,
+ enum sir_roam_op_code reason))
{
tp_wma_handle wma = cds_get_context(QDF_MODULE_ID_WMA);
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
index 34cc6292a273..720b627008be 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_nan_datapath.c
@@ -469,6 +469,19 @@ static int wma_ndp_indication_event_handler(void *handle, uint8_t *event_info,
fixed_params =
(wmi_ndp_indication_event_fixed_param *)event->fixed_param;
+ if (fixed_params->ndp_cfg_len > event->num_ndp_cfg) {
+ WMA_LOGE("FW message ndp cfg length %d larger than TLV hdr %d",
+ fixed_params->ndp_cfg_len, event->num_ndp_cfg);
+ return -EINVAL;
+ }
+
+ if (fixed_params->ndp_app_info_len > event->num_ndp_app_info) {
+ WMA_LOGE("FW message ndp app info length %d more than TLV hdr %d",
+ fixed_params->ndp_app_info_len,
+ event->num_ndp_app_info);
+ return -EINVAL;
+ }
+
ind_event.vdev_id = fixed_params->vdev_id;
ind_event.service_instance_id = fixed_params->service_instance_id;
ind_event.ndp_instance_id = fixed_params->ndp_instance_id;
@@ -910,7 +923,7 @@ static int wma_ndp_sch_update_event_handler(void *handle, uint8_t *evinfo,
buff_len = sizeof(uint32_t) * sch_update_ev.num_ndp_instances;
sch_update_ev.ndp_instances = qdf_mem_malloc(buff_len);
- if(!sch_update_ev.ndp_instances) {
+ if (!sch_update_ev.ndp_instances) {
WMA_LOGE(FL("malloc failed"));
return -ENOMEM;
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_power.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_power.c
index cf13afeed47e..721be0443b4d 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_power.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_power.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2017 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2018 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
@@ -1951,15 +1951,23 @@ static inline uint8_t wma_is_user_set_li_params(struct wma_txrx_node *iface)
void wma_set_suspend_dtim(tp_wma_handle wma)
{
uint8_t i;
+ bool li_offload_support = false;
if (NULL == wma) {
WMA_LOGE("%s: wma is NULL", __func__);
return;
}
+ if (WMI_SERVICE_EXT_IS_ENABLED(wma->wmi_service_bitmap,
+ wma->wmi_service_ext_bitmap,
+ WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT)) {
+ WMA_LOGD("%s: listen interval support is enabled", __func__);
+ li_offload_support = true;
+ }
for (i = 0; i < wma->max_bssid; i++) {
if (wma->interfaces[i].handle) {
- if (!wma_is_user_set_li_params(&wma->interfaces[i]))
+ if (!wma_is_user_set_li_params(&wma->interfaces[i]) &&
+ !li_offload_support)
wma_set_vdev_suspend_dtim(wma, i);
wma_configure_vdev_suspend_params(wma, i);
}
@@ -2099,15 +2107,23 @@ static void wma_set_vdev_resume_dtim(tp_wma_handle wma, uint8_t vdev_id)
void wma_set_resume_dtim(tp_wma_handle wma)
{
uint8_t i;
+ bool li_offload_support = false;
if (NULL == wma) {
WMA_LOGE("%s: wma is NULL", __func__);
return;
}
+ if (WMI_SERVICE_EXT_IS_ENABLED(wma->wmi_service_bitmap,
+ wma->wmi_service_ext_bitmap,
+ WMI_SERVICE_LISTEN_INTERVAL_OFFLOAD_SUPPORT)) {
+ WMA_LOGD("%s: listen interval support is enabled", __func__);
+ li_offload_support = true;
+ }
for (i = 0; i < wma->max_bssid; i++) {
if (wma->interfaces[i].handle) {
- if (!wma_is_user_set_li_params(&wma->interfaces[i]))
+ if (!wma_is_user_set_li_params(&wma->interfaces[i]) &&
+ !li_offload_support)
wma_set_vdev_resume_dtim(wma, i);
wma_configure_vdev_resume_params(wma, i);
}
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
index b79e5f6a8b24..282f8743ea5f 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_scan_roam.c
@@ -1904,6 +1904,47 @@ QDF_STATUS wma_roam_scan_offload_command(tp_wma_handle wma_handle,
}
/**
+ * wma_send_offload_11k_params() - API to send 11k offload params to FW
+ * @handle: WMA handle
+ * @params: Pointer to 11k offload params
+ *
+ * Return: None
+ */
+static
+QDF_STATUS wma_send_offload_11k_params(WMA_HANDLE handle,
+ struct wmi_11k_offload_params *params)
+{
+ QDF_STATUS status;
+ tp_wma_handle wma_handle = (tp_wma_handle) handle;
+
+ if (!wma_handle || !wma_handle->wmi_handle) {
+ WMA_LOGE("%s: WMA is closed, cannot send 11k offload cmd",
+ __func__);
+ return QDF_STATUS_E_FAILURE;
+ }
+
+ if (!WMI_SERVICE_EXT_IS_ENABLED(wma_handle->wmi_service_bitmap,
+ wma_handle->wmi_service_ext_bitmap,
+ WMI_SERVICE_11K_NEIGHBOUR_REPORT_SUPPORT)) {
+ WMA_LOGE("%s: FW doesn't support 11k offload",
+ __func__);
+ return QDF_STATUS_E_NOSUPPORT;
+ }
+
+ if (!params->neighbor_report_params.ssid.length) {
+ WMA_LOGD("%s: SSID Len 0", __func__);
+ return QDF_STATUS_E_INVAL;
+ }
+
+ status = wmi_unified_offload_11k_cmd(wma_handle->wmi_handle, params);
+
+ if (status != QDF_STATUS_SUCCESS)
+ WMA_LOGE("failed to send 11k offload command");
+
+ return status;
+}
+
+/**
* wma_process_roaming_config() - process roam request
* @wma_handle: wma handle
* @roam_req: roam request parameters
@@ -2041,6 +2082,16 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle,
WMA_LOGE("Sending start for roam scan filter failed");
break;
}
+
+ if (roam_req->reason == REASON_CTX_INIT) {
+ qdf_status = wma_send_offload_11k_params(wma_handle,
+ &roam_req->offload_11k_params);
+ if (qdf_status != QDF_STATUS_SUCCESS) {
+ WMA_LOGE("11k offload params not sent, status %d",
+ qdf_status);
+ break;
+ }
+ }
break;
case ROAM_SCAN_OFFLOAD_STOP:
@@ -2078,7 +2129,8 @@ QDF_STATUS wma_process_roaming_config(tp_wma_handle wma_handle,
wma_roam_scan_fill_scan_params(wma_handle, pMac,
NULL, &scan_params);
- if (roam_req->reason == REASON_ROAM_STOP_ALL)
+ if (roam_req->reason == REASON_ROAM_STOP_ALL ||
+ roam_req->reason == REASON_ROAM_SYNCH_FAILED)
mode = WMI_ROAM_SCAN_MODE_NONE;
else
mode = WMI_ROAM_SCAN_MODE_NONE |
@@ -2877,7 +2929,8 @@ int wma_roam_synch_event_handler(void *handle, uint8_t *event,
qdf_mem_zero(bss_desc_ptr, sizeof(tSirBssDescription) + ie_len);
if (QDF_IS_STATUS_ERROR(wma->pe_roam_synch_cb(
(tpAniSirGlobal)wma->mac_context,
- roam_synch_ind_ptr, bss_desc_ptr))) {
+ roam_synch_ind_ptr, bss_desc_ptr,
+ SIR_ROAM_SYNCH_PROPAGATION))) {
WMA_LOGE("LFR3: PE roam synch cb failed");
status = -EBUSY;
goto cleanup_label;
@@ -2914,7 +2967,7 @@ cleanup_label:
roam_req = qdf_mem_malloc(sizeof(tSirRoamOffloadScanReq));
if (roam_req && synch_event) {
roam_req->Command = ROAM_SCAN_OFFLOAD_STOP;
- roam_req->reason = REASON_ROAM_STOP_ALL;
+ roam_req->reason = REASON_ROAM_SYNCH_FAILED;
roam_req->sessionId = synch_event->vdev_id;
wma_process_roaming_config(wma, roam_req);
}
@@ -3418,127 +3471,6 @@ void wma_process_roam_synch_complete(WMA_HANDLE handle, uint8_t vdev_id)
#endif /* WLAN_FEATURE_ROAM_OFFLOAD */
/**
- * wma_switch_channel() - WMA api to switch channel dynamically
- * @wma: Pointer of WMA context
- * @req: Pointer vdev_start having channel switch info.
- *
- * Return: 0 for success, otherwise appropriate error code
- */
-static QDF_STATUS wma_switch_channel(tp_wma_handle wma,
- struct wma_vdev_start_req *req)
-{
-
- wmi_buf_t buf;
- wmi_channel *cmd;
- int32_t len, ret;
- WLAN_PHY_MODE chanmode;
- struct wma_txrx_node *intr = wma->interfaces;
- tpAniSirGlobal pmac;
-
- pmac = cds_get_context(QDF_MODULE_ID_PE);
-
- if (pmac == NULL) {
- WMA_LOGE("%s: channel switch failed as pmac is NULL",
- __func__);
- return QDF_STATUS_E_FAILURE;
- }
-
- chanmode = wma_chan_phy_mode(req->chan, req->chan_width,
- req->dot11_mode);
-
- if (chanmode == MODE_UNKNOWN) {
- WMA_LOGE("%s: invalid phy mode!", __func__);
- return QDF_STATUS_E_FAILURE;
- }
-
- len = sizeof(*cmd);
- buf = wmi_buf_alloc(wma->wmi_handle, len);
- if (!buf) {
- WMA_LOGE("%s : wmi_buf_alloc failed", __func__);
- return QDF_STATUS_E_NOMEM;
- }
- cmd = (wmi_channel *)wmi_buf_data(buf);
- WMITLV_SET_HDR(&cmd->tlv_header,
- WMITLV_TAG_STRUC_wmi_channel,
- WMITLV_GET_STRUCT_TLVLEN(wmi_channel));
-
- /* Fill channel info */
- cmd->mhz = cds_chan_to_freq(req->chan);
-
- intr[req->vdev_id].chanmode = chanmode; /* save channel mode */
- intr[req->vdev_id].ht_capable = req->ht_capable;
- intr[req->vdev_id].vht_capable = req->vht_capable;
- intr[req->vdev_id].config.gtx_info.gtxRTMask[0] =
- CFG_TGT_DEFAULT_GTX_HT_MASK;
- intr[req->vdev_id].config.gtx_info.gtxRTMask[1] =
- CFG_TGT_DEFAULT_GTX_VHT_MASK;
-
- if (wlan_cfg_get_int(pmac, WNI_CFG_TGT_GTX_USR_CFG,
- &intr[req->vdev_id].config.gtx_info.gtxUsrcfg) != eSIR_SUCCESS) {
- intr[req->vdev_id].config.gtx_info.gtxUsrcfg =
- WNI_CFG_TGT_GTX_USR_CFG_STADEF;
- QDF_TRACE(QDF_MODULE_ID_WMA, QDF_TRACE_LEVEL_WARN,
- "Failed to get WNI_CFG_TGT_GTX_USR_CFG");
- }
-
- intr[req->vdev_id].config.gtx_info.gtxPERThreshold =
- CFG_TGT_DEFAULT_GTX_PER_THRESHOLD;
- intr[req->vdev_id].config.gtx_info.gtxPERMargin =
- CFG_TGT_DEFAULT_GTX_PER_MARGIN;
- intr[req->vdev_id].config.gtx_info.gtxTPCstep =
- CFG_TGT_DEFAULT_GTX_TPC_STEP;
- intr[req->vdev_id].config.gtx_info.gtxTPCMin =
- CFG_TGT_DEFAULT_GTX_TPC_MIN;
- intr[req->vdev_id].config.gtx_info.gtxBWMask =
- CFG_TGT_DEFAULT_GTX_BW_MASK;
- intr[req->vdev_id].mhz = cmd->mhz;
-
- WMI_SET_CHANNEL_MODE(cmd, chanmode);
- cmd->band_center_freq1 = cmd->mhz;
-
- if (chanmode == MODE_11AC_VHT80)
- cmd->band_center_freq1 =
- cds_chan_to_freq(req->ch_center_freq_seg0);
-
- if ((chanmode == MODE_11NA_HT40) || (chanmode == MODE_11NG_HT40) ||
- (chanmode == MODE_11AC_VHT40)) {
- if (req->chan_width == CH_WIDTH_80MHZ)
- cmd->band_center_freq1 += 10;
- else
- cmd->band_center_freq1 -= 10;
- }
- cmd->band_center_freq2 = 0;
-
- /* Set half or quarter rate WMI flags */
- if (req->is_half_rate)
- WMI_SET_CHANNEL_FLAG(cmd, WMI_CHAN_FLAG_HALF_RATE);
- else if (req->is_quarter_rate)
- WMI_SET_CHANNEL_FLAG(cmd, WMI_CHAN_FLAG_QUARTER_RATE);
-
- /* Find out min, max and regulatory power levels */
- WMI_SET_CHANNEL_REG_POWER(cmd, req->max_txpow);
- WMI_SET_CHANNEL_MAX_TX_POWER(cmd, req->max_txpow);
-
-
- WMA_LOGE("%s: freq %d channel %d chanmode %d center_chan %d center_freq2 %d reg_info_1: 0x%x reg_info_2: 0x%x, req->max_txpow: 0x%x",
- __func__, cmd->mhz, req->chan, chanmode,
- cmd->band_center_freq1, cmd->band_center_freq2,
- cmd->reg_info_1, cmd->reg_info_2, req->max_txpow);
-
-
- ret = wmi_unified_cmd_send(wma->wmi_handle, buf, len,
- WMI_PDEV_SET_CHANNEL_CMDID);
-
- if (ret < 0) {
- WMA_LOGP("%s: Failed to send vdev start command", __func__);
- wmi_buf_free(buf);
- return QDF_STATUS_E_FAILURE;
- }
-
- return QDF_STATUS_SUCCESS;
-}
-
-/**
* wma_set_channel() - set channel
* @wma: wma handle
* @params: switch channel parameters
@@ -3649,37 +3581,32 @@ void wma_set_channel(tp_wma_handle wma, tpSwitchChannelParams params)
if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam() &&
wma_is_vdev_up(vdev_id)) {
- status = wma_switch_channel(wma, &req);
- if (status != QDF_STATUS_SUCCESS)
- WMA_LOGE("%s: wma_switch_channel failed %d\n", __func__,
- status);
+ WMA_LOGD("%s: setting channel switch to true for vdev_id:%d",
+ __func__, req.vdev_id);
+ wma->interfaces[req.vdev_id].is_channel_switch = true;
+ }
- ol_htt_mon_note_chan(pdev, req.chan);
+ msg = wma_fill_vdev_req(wma, req.vdev_id, WMA_CHNL_SWITCH_REQ,
+ WMA_TARGET_REQ_TYPE_VDEV_START, params,
+ WMA_VDEV_START_REQUEST_TIMEOUT);
+ if (!msg) {
+ WMA_LOGP("%s: Failed to fill channel switch request for vdev %d",
+ __func__, req.vdev_id);
+ status = QDF_STATUS_E_NOMEM;
+ goto send_resp;
+ }
+ status = wma_vdev_start(wma, &req,
+ wma->interfaces[req.vdev_id].is_channel_switch);
+ if (status != QDF_STATUS_SUCCESS) {
+ wma_remove_vdev_req(wma, req.vdev_id,
+ WMA_TARGET_REQ_TYPE_VDEV_START);
+ WMA_LOGP("%s: vdev start failed status = %d", __func__,
+ status);
goto send_resp;
- } else {
-
- msg = wma_fill_vdev_req(wma, req.vdev_id, WMA_CHNL_SWITCH_REQ,
- WMA_TARGET_REQ_TYPE_VDEV_START, params,
- WMA_VDEV_START_REQUEST_TIMEOUT);
- if (!msg) {
- WMA_LOGP("%s: Failed to fill channel switch request for vdev %d",
- __func__, req.vdev_id);
- status = QDF_STATUS_E_NOMEM;
- goto send_resp;
- }
- status = wma_vdev_start(wma, &req,
- wma->interfaces[req.vdev_id].is_channel_switch);
- if (status != QDF_STATUS_SUCCESS) {
- wma_remove_vdev_req(wma, req.vdev_id,
- WMA_TARGET_REQ_TYPE_VDEV_START);
- WMA_LOGP("%s: vdev start failed status = %d", __func__,
- status);
- goto send_resp;
- }
-
- if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
- ol_htt_mon_note_chan(pdev, req.chan);
}
+
+ if (QDF_GLOBAL_MONITOR_MODE == cds_get_conparam())
+ ol_htt_mon_note_chan(pdev, req.chan);
return;
send_resp:
WMA_LOGD("%s: channel %d ch_width %d txpower %d status %d", __func__,
@@ -5063,7 +4990,7 @@ int wma_extscan_cached_results_event_handler(void *handle,
struct extscan_cached_scan_results empty_cachelist;
wmi_extscan_wlan_descriptor *src_hotlist;
wmi_extscan_rssi_info *src_rssi;
- int i, moredata, scan_ids_cnt, buf_len;
+ int i, moredata, scan_ids_cnt, buf_len, status;
tpAniSirGlobal pMac = cds_get_context(QDF_MODULE_ID_PE);
uint32_t total_len;
bool excess_data = false;
@@ -5161,19 +5088,24 @@ int wma_extscan_cached_results_event_handler(void *handle,
dest_result = dest_cachelist->result;
wma_fill_num_results_per_scan_id(cmd_param_info, dest_result);
- wma_group_num_bss_to_scan_id(cmd_param_info, dest_cachelist);
- pMac->sme.pExtScanIndCb(pMac->hHdd,
+ status = wma_group_num_bss_to_scan_id(cmd_param_info, dest_cachelist);
+ if (!status)
+ pMac->sme.pExtScanIndCb(pMac->hHdd,
eSIR_EXTSCAN_CACHED_RESULTS_IND,
dest_cachelist);
+ else
+ WMA_LOGD("wma_group_num_bss_to_scan_id failed, not calling callback");
+
dest_result = dest_cachelist->result;
for (i = 0; i < dest_cachelist->num_scan_ids; i++) {
- qdf_mem_free(dest_result->ap);
+ if (dest_result->ap)
+ qdf_mem_free(dest_result->ap);
dest_result++;
}
qdf_mem_free(dest_cachelist->result);
qdf_mem_free(dest_cachelist);
- return 0;
+ return status;
noresults:
empty_cachelist.request_id = event->request_id;
@@ -5340,6 +5272,8 @@ int wma_passpoint_match_event_handler(void *handle,
struct wifi_passpoint_match *dest_match;
tSirWifiScanResult *dest_ap;
uint8_t *buf_ptr;
+ uint32_t buf_len = 0;
+ bool excess_data = false;
tpAniSirGlobal mac = cds_get_context(QDF_MODULE_ID_PE);
if (!mac) {
@@ -5359,13 +5293,26 @@ int wma_passpoint_match_event_handler(void *handle,
event = param_buf->fixed_param;
buf_ptr = (uint8_t *)param_buf->fixed_param;
- /*
- * All the below lengths are UINT32 and summing up and checking
- * against a constant should not be an issue.
- */
- if ((sizeof(*event) + event->ie_length + event->anqp_length) >
- WMI_SVC_MSG_MAX_SIZE ||
- (event->ie_length + event->anqp_length) > param_buf->num_bufp) {
+ do {
+ if (event->ie_length > (WMI_SVC_MSG_MAX_SIZE)) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len = event->ie_length;
+ }
+
+ if (event->anqp_length > (WMI_SVC_MSG_MAX_SIZE)) {
+ excess_data = true;
+ break;
+ } else {
+ buf_len += event->anqp_length;
+ }
+
+ } while (0);
+
+ if (excess_data || buf_len > (WMI_SVC_MSG_MAX_SIZE - sizeof(*event)) ||
+ buf_len > (WMI_SVC_MSG_MAX_SIZE - sizeof(*dest_match)) ||
+ (event->ie_length + event->anqp_length) > param_buf->num_bufp) {
WMA_LOGE("IE Length: %u or ANQP Length: %u is huge, num_bufp: %u",
event->ie_length, event->anqp_length,
param_buf->num_bufp);
@@ -5378,8 +5325,8 @@ int wma_passpoint_match_event_handler(void *handle,
event->ssid.ssid_len = SIR_MAC_MAX_SSID_LENGTH;
}
- dest_match = qdf_mem_malloc(sizeof(*dest_match) +
- event->ie_length + event->anqp_length);
+ dest_match = qdf_mem_malloc(sizeof(*dest_match) + buf_len);
+
if (!dest_match) {
WMA_LOGE("%s: qdf_mem_malloc failed", __func__);
return -EINVAL;
@@ -6520,12 +6467,8 @@ int wma_scan_event_callback(WMA_HANDLE handle, uint8_t *data,
scan_event->reasonCode = eSIR_SME_SCAN_FAILED;
break;
case WMI_SCAN_EVENT_PREEMPTED:
- WMA_LOGW("%s: Unhandled Scan Event WMI_SCAN_EVENT_PREEMPTED",
- __func__);
break;
case WMI_SCAN_EVENT_RESTARTED:
- WMA_LOGW("%s: Unhandled Scan Event WMI_SCAN_EVENT_RESTARTED",
- __func__);
break;
}
@@ -6679,6 +6622,9 @@ int wma_roam_event_callback(WMA_HANDLE handle, uint8_t *event_buf,
if (wmi_event->notif == WMI_ROAM_NOTIF_ROAM_ABORT)
op_code = SIR_ROAMING_ABORT;
roam_synch_data->roamedVdevId = wmi_event->vdev_id;
+ wma_handle->pe_roam_synch_cb(
+ (tpAniSirGlobal)wma_handle->mac_context,
+ roam_synch_data, NULL, op_code);
wma_handle->csr_roam_synch_cb(
(tpAniSirGlobal)wma_handle->mac_context,
roam_synch_data, NULL, op_code);
diff --git a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
index c3bb826be332..2a9d1e4bdb69 100644
--- a/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
+++ b/drivers/staging/qcacld-3.0/core/wma/src/wma_utils.c
@@ -263,6 +263,8 @@ static uint8_t wma_get_mcs_idx(uint16_t maxRate, uint8_t rate_flags,
mcs_nss2[index].ht40_rate[1]);
if (match_rate) {
*mcsRateFlag = eHAL_TX_RATE_HT40;
+ if (nss == 2)
+ index += MAX_HT_MCS_IDX;
goto rate_found;
}
}
@@ -276,6 +278,8 @@ static uint8_t wma_get_mcs_idx(uint16_t maxRate, uint8_t rate_flags,
mcs_nss2[index].ht20_rate[1]);
if (match_rate) {
*mcsRateFlag = eHAL_TX_RATE_HT20;
+ if (nss == 2)
+ index += MAX_HT_MCS_IDX;
goto rate_found;
}
}
@@ -1351,33 +1355,6 @@ static int wma_unified_link_peer_stats_event_handler(void *handle,
return -EINVAL;
}
- do {
- if (peer_stats->num_rates >
- WMI_SVC_MSG_MAX_SIZE/sizeof(wmi_rate_stats)) {
- excess_data = true;
- break;
- } else {
- buf_len =
- peer_stats->num_rates * sizeof(wmi_rate_stats);
- }
- if (fixed_param->num_peers >
- WMI_SVC_MSG_MAX_SIZE/sizeof(wmi_peer_link_stats)) {
- excess_data = true;
- break;
- } else {
- buf_len += fixed_param->num_peers *
- sizeof(wmi_peer_link_stats);
- }
- } while (0);
-
- if (excess_data ||
- (sizeof(*fixed_param) > WMI_SVC_MSG_MAX_SIZE - buf_len)) {
- WMA_LOGE("excess wmi buffer: rates:%d, peers:%d",
- peer_stats->num_rates, fixed_param->num_peers);
- QDF_ASSERT(0);
- return -EINVAL;
- }
-
peer_stats_size = sizeof(tSirWifiPeerStat);
peer_info_size = sizeof(tSirWifiPeerInfo);
rate_stats_size = sizeof(tSirWifiRateStat);
@@ -1498,6 +1475,8 @@ static int wma_unified_radio_tx_power_level_stats_event_handler(void *handle,
uint8_t *tx_power_level_values;
tSirLLStatsResults *link_stats_results;
tSirWifiRadioStat *rs_results;
+ uint32_t max_total_num_tx_power_levels = MAX_TPC_LEVELS * NUM_OF_BANDS *
+ MAX_SPATIAL_STREAM_ANY_V3;
tpAniSirGlobal mac = cds_get_context(QDF_MODULE_ID_PE);
@@ -1547,6 +1526,20 @@ static int wma_unified_radio_tx_power_level_stats_event_handler(void *handle,
return -EINVAL;
}
+ if (fixed_param->radio_id >= link_stats_results->num_radio) {
+ WMA_LOGE("%s: Invalid radio_id %d num_radio %d",
+ __func__, fixed_param->radio_id,
+ link_stats_results->num_radio);
+ return -EINVAL;
+ }
+
+ if (fixed_param->total_num_tx_power_levels >
+ max_total_num_tx_power_levels) {
+ WMA_LOGD("Invalid total_num_tx_power_levels %d",
+ fixed_param->total_num_tx_power_levels);
+ return -EINVAL;
+ }
+
rs_results = (tSirWifiRadioStat *) &link_stats_results->results[0] +
fixed_param->radio_id;
tx_power_level_values = (uint8_t *) param_tlvs->tx_time_per_power_level;
@@ -1699,6 +1692,13 @@ static int wma_unified_link_radio_stats_event_handler(void *handle,
link_stats_results_size = sizeof(*link_stats_results) +
fixed_param->num_radio * radio_stats_size;
+ if (radio_stats->radio_id >= fixed_param->num_radio) {
+ WMA_LOGE("%s: Invalid radio_id %d num_radio %d",
+ __func__, radio_stats->radio_id,
+ fixed_param->num_radio);
+ return -EINVAL;
+ }
+
if (!wma_handle->link_stats_results) {
wma_handle->link_stats_results = qdf_mem_malloc(
link_stats_results_size);
@@ -2527,6 +2527,12 @@ static void wma_vdev_stats_lost_link_helper(tp_wma_handle wma,
static const uint8_t zero_mac[QDF_MAC_ADDR_SIZE] = {0};
int32_t bcn_snr, dat_snr;
+ if (vdev_stats->vdev_id >= wma->max_bssid) {
+ WMA_LOGE("%s: Invalid vdev_id %hu",
+ __func__, vdev_stats->vdev_id);
+ return;
+ }
+
node = &wma->interfaces[vdev_stats->vdev_id];
if (node->vdev_up &&
!qdf_mem_cmp(node->bssid, zero_mac, QDF_MAC_ADDR_SIZE)) {
@@ -2576,6 +2582,12 @@ static void wma_update_vdev_stats(tp_wma_handle wma,
cds_msg_t sme_msg = { 0 };
int32_t bcn_snr, dat_snr;
+ if (vdev_stats->vdev_id >= wma->max_bssid) {
+ WMA_LOGE("%s: Invalid vdev_id %hu",
+ __func__, vdev_stats->vdev_id);
+ return;
+ }
+
bcn_snr = vdev_stats->vdev_snr.bcn_snr;
dat_snr = vdev_stats->vdev_snr.dat_snr;
WMA_LOGD("vdev id %d beancon snr %d data snr %d",
@@ -2848,6 +2860,12 @@ static void wma_update_rssi_stats(tp_wma_handle wma,
uint32_t temp_mask;
uint8_t vdev_id;
+ if (rssi_stats->vdev_id >= wma->max_bssid) {
+ WMA_LOGE("%s: Invalid vdev_id %hu",
+ __func__, rssi_stats->vdev_id);
+ return;
+ }
+
vdev_id = rssi_stats->vdev_id;
node = &wma->interfaces[vdev_id];
stats_rsp_params = (tAniGetPEStatsRsp *) node->stats_rsp;
@@ -5089,7 +5107,8 @@ QDF_STATUS wma_get_updated_scan_config(uint32_t *scan_config,
}
QDF_STATUS wma_get_updated_scan_and_fw_mode_config(uint32_t *scan_config,
- uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini)
+ uint32_t *fw_mode_config, uint32_t dual_mac_disable_ini,
+ uint32_t channel_select_logic_conc)
{
tp_wma_handle wma;
@@ -5128,6 +5147,11 @@ QDF_STATUS wma_get_updated_scan_and_fw_mode_config(uint32_t *scan_config,
default:
break;
}
+
+ WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_STA_SET(*fw_mode_config,
+ WMA_CHANNEL_SELECT_LOGIC_STA_STA_GET(channel_select_logic_conc));
+ WMI_DBS_FW_MODE_CFG_DBS_FOR_STA_PLUS_P2P_SET(*fw_mode_config,
+ WMA_CHANNEL_SELECT_LOGIC_STA_P2P_GET(channel_select_logic_conc));
WMA_LOGD("%s: *scan_config:%x ", __func__, *scan_config);
WMA_LOGD("%s: *fw_mode_config:%x ", __func__, *fw_mode_config);
diff --git a/drivers/staging/qcacld-3.0/uapi/linux/qca_vendor.h b/drivers/staging/qcacld-3.0/uapi/linux/qca_vendor.h
index 1529367ad110..a46a520fec50 100644
--- a/drivers/staging/qcacld-3.0/uapi/linux/qca_vendor.h
+++ b/drivers/staging/qcacld-3.0/uapi/linux/qca_vendor.h
@@ -793,6 +793,7 @@ enum qca_nl80211_vendor_subcmds_index {
QCA_NL80211_VENDOR_SUBCMD_NUD_STATS_GET_INDEX,
QCA_NL80211_VENDOR_SUBCMD_PWR_SAVE_FAIL_DETECTED_INDEX,
QCA_NL80211_VENDOR_SUBCMD_HANG_REASON_INDEX,
+ QCA_NL80211_VENDOR_SUBCMD_LINK_PROPERTIES_INDEX,
QCA_NL80211_VENDOR_SUBCMD_WLAN_MAC_INFO_INDEX,
};
@@ -2870,6 +2871,8 @@ enum qca_wlan_vendor_attr_link_properties {
QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_NSS = 1,
QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_RATE_FLAGS = 2,
QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_FREQ = 3,
+ QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_FLAGS = 4,
+ QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_STA_MAC = 5,
/* KEEP LAST */
QCA_WLAN_VENDOR_ATTR_LINK_PROPERTIES_AFTER_LAST,
@@ -2965,6 +2968,8 @@ enum qca_wlan_vendor_attr_sap_conditional_chan_switch {
* @WIFI_LOGGER_POWER_EVENT_SUPPORTED - Power of driver
* @WIFI_LOGGER_WAKE_LOCK_SUPPORTED - Wakelock of driver
* @WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED - monitor FW health
+ * @WIFI_LOGGER_DRIVER_DUMP_SUPPORTED - dump driver state
+ * @WIFI_LOGGER_PACKET_FATE_SUPPORTED - tracks connection packets fate
*/
enum wifi_logger_supported_features {
WIFI_LOGGER_PER_PACKET_TX_RX_STATUS_SUPPORTED = (1 << (1)),
@@ -2973,6 +2978,8 @@ enum wifi_logger_supported_features {
WIFI_LOGGER_WAKE_LOCK_SUPPORTED = (1 << (4)),
WIFI_LOGGER_VERBOSE_SUPPORTED = (1 << (5)),
WIFI_LOGGER_WATCHDOG_TIMER_SUPPORTED = (1 << (6)),
+ WIFI_LOGGER_DRIVER_DUMP_SUPPORTED = (1 << (7)),
+ WIFI_LOGGER_PACKET_FATE_SUPPORTED = (1 << (8))
};
/**
* enum qca_wlan_vendor_attr_acs_offload
@@ -3327,6 +3334,19 @@ enum qca_wlan_vendor_attr_config {
*/
QCA_WLAN_VENDOR_ATTR_CONFIG_LATENCY_LEVEL = 55,
+ /*
+ * 8-bit unsigned value indicating the driver to use the RSNE as-is from
+ * the connect interface. Exclusively used for the scenarios where the
+ * device is used as a test bed device with special functionality and
+ * not recommended for production. This helps driver to not validate the
+ * RSNE passed from user space and thus allow arbitrary IE data to be
+ * used for testing purposes.
+ * 1-enable, 0-disable.
+ * Applications set/reset this configuration. If not reset, this
+ * parameter remains in use until the driver is unloaded.
+ */
+ QCA_WLAN_VENDOR_ATTR_CONFIG_RSN_IE = 56,
+
QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST,
QCA_WLAN_VENDOR_ATTR_CONFIG_MAX =
QCA_WLAN_VENDOR_ATTR_CONFIG_AFTER_LAST - 1,
@@ -3606,20 +3626,30 @@ enum qca_set_band {
* enum set_reset_packet_filter - set packet filter control commands
* @QCA_WLAN_SET_PACKET_FILTER: Set Packet Filter
* @QCA_WLAN_GET_PACKET_FILTER: Get Packet filter
+ * @QCA_WLAN_WRITE_PACKET_FILTER: Write packet filter program/data
+ * @QCA_WLAN_READ_PACKET_FILTER: Read packet filter program/data
+ * @QCA_WLAN_ENABLE_PACKET_FILTER: Enable APF interpreter
+ * @QCA_WLAN_DISABLE_PACKET_FILTER: Disable APF interpreter
*/
enum set_reset_packet_filter {
QCA_WLAN_SET_PACKET_FILTER = 1,
QCA_WLAN_GET_PACKET_FILTER = 2,
+ QCA_WLAN_WRITE_PACKET_FILTER = 3,
+ QCA_WLAN_READ_PACKET_FILTER = 4,
+ QCA_WLAN_ENABLE_PACKET_FILTER = 5,
+ QCA_WLAN_DISABLE_PACKET_FILTER = 6,
};
/**
- * enum qca_wlan_vendor_attr_packet_filter - BPF control commands
+ * enum qca_wlan_vendor_attr_packet_filter - APF control commands
* @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID: Invalid
* @QCA_WLAN_VENDOR_ATTR_SET_RESET_PACKET_FILTER: Filter ID
* @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_VERSION: Filter Version
* @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE: Total Length
* @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET: Current offset
- * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM: length of BPF instructions
+ * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM: length of APF instructions
+ * @QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH: length of the program
+ * section in packet filter buffer
*/
enum qca_wlan_vendor_attr_packet_filter {
QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_INVALID = 0,
@@ -3629,6 +3659,7 @@ enum qca_wlan_vendor_attr_packet_filter {
QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_SIZE,
QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_CURRENT_OFFSET,
QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROGRAM,
+ QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_PROG_LENGTH,
/* keep last */
QCA_WLAN_VENDOR_ATTR_PACKET_FILTER_AFTER_LAST,
diff --git a/drivers/thermal/msm_lmh_dcvs.c b/drivers/thermal/msm_lmh_dcvs.c
index 81940aa7ff3c..1c5415afe687 100644
--- a/drivers/thermal/msm_lmh_dcvs.c
+++ b/drivers/thermal/msm_lmh_dcvs.c
@@ -38,6 +38,8 @@
#define CREATE_TRACE_POINTS
#define LMH_DCVS_TRACE
#include <trace/trace_thermal.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/events/power.h>
#define MSM_LIMITS_DCVSH 0x10
#define MSM_LIMITS_NODE_DCVS 0x44435653
@@ -147,6 +149,9 @@ static uint32_t msm_lmh_mitigation_notify(struct msm_lmh_dcvs_hw *hw)
max_limit = FREQ_HZ_TO_KHZ(freq_val);
trace_lmh_dcvs_freq(cpumask_first(&hw->core_map), max_limit);
+ trace_clock_set_rate(hw->sensor_name,
+ max_limit,
+ cpumask_first(&hw->core_map));
notify_exit:
hw->hw_freq_limit = max_limit;
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index 78bd121ecede..6060c3e8925e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -137,6 +137,9 @@ struct gsm_dlci {
struct mutex mutex;
/* Link layer */
+ int mode;
+#define DLCI_MODE_ABM 0 /* Normal Asynchronous Balanced Mode */
+#define DLCI_MODE_ADM 1 /* Asynchronous Disconnected Mode */
spinlock_t lock; /* Protects the internal state */
struct timer_list t1; /* Retransmit timer for SABM and UA */
int retries;
@@ -1380,7 +1383,13 @@ retry:
ctrl->data = data;
ctrl->len = clen;
gsm->pending_cmd = ctrl;
- gsm->cretries = gsm->n2;
+
+ /* If DLCI0 is in ADM mode skip retries, it won't respond */
+ if (gsm->dlci[0]->mode == DLCI_MODE_ADM)
+ gsm->cretries = 1;
+ else
+ gsm->cretries = gsm->n2;
+
mod_timer(&gsm->t2_timer, jiffies + gsm->t2 * HZ / 100);
gsm_control_transmit(gsm, ctrl);
spin_unlock_irqrestore(&gsm->control_lock, flags);
@@ -1488,6 +1497,7 @@ static void gsm_dlci_t1(unsigned long data)
if (debug & 8)
pr_info("DLCI %d opening in ADM mode.\n",
dlci->addr);
+ dlci->mode = DLCI_MODE_ADM;
gsm_dlci_open(dlci);
} else {
gsm_dlci_close(dlci);
@@ -2881,11 +2891,22 @@ static int gsmtty_modem_update(struct gsm_dlci *dlci, u8 brk)
static int gsm_carrier_raised(struct tty_port *port)
{
struct gsm_dlci *dlci = container_of(port, struct gsm_dlci, port);
+ struct gsm_mux *gsm = dlci->gsm;
+
/* Not yet open so no carrier info */
if (dlci->state != DLCI_OPEN)
return 0;
if (debug & 2)
return 1;
+
+ /*
+ * Basic mode with control channel in ADM mode may not respond
+ * to CMD_MSC at all and modem_rx is empty.
+ */
+ if (gsm->encoding == 0 && gsm->dlci[0]->mode == DLCI_MODE_ADM &&
+ !dlci->modem_rx)
+ return 1;
+
return dlci->modem_rx & TIOCM_CD;
}
diff --git a/drivers/tty/serial/serial_mctrl_gpio.c b/drivers/tty/serial/serial_mctrl_gpio.c
index 3eb57eb532f1..02147361eaa9 100644
--- a/drivers/tty/serial/serial_mctrl_gpio.c
+++ b/drivers/tty/serial/serial_mctrl_gpio.c
@@ -20,6 +20,7 @@
#include <linux/gpio/consumer.h>
#include <linux/termios.h>
#include <linux/serial_core.h>
+#include <linux/module.h>
#include "serial_mctrl_gpio.h"
@@ -193,6 +194,7 @@ struct mctrl_gpios *mctrl_gpio_init(struct uart_port *port, unsigned int idx)
return gpios;
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_init);
void mctrl_gpio_free(struct device *dev, struct mctrl_gpios *gpios)
{
@@ -247,3 +249,6 @@ void mctrl_gpio_disable_ms(struct mctrl_gpios *gpios)
disable_irq(gpios->irq[i]);
}
}
+EXPORT_SYMBOL_GPL(mctrl_gpio_disable_ms);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 89fd20382ce4..198451fa9e5d 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -3154,7 +3154,10 @@ struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx)
kref_init(&tty->kref);
tty->magic = TTY_MAGIC;
- tty_ldisc_init(tty);
+ if (tty_ldisc_init(tty)) {
+ kfree(tty);
+ return NULL;
+ }
tty->session = NULL;
tty->pgrp = NULL;
mutex_init(&tty->legacy_mutex);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 9bee25cfa0be..d9e013dc2c08 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -168,12 +168,11 @@ static struct tty_ldisc *tty_ldisc_get(struct tty_struct *tty, int disc)
return ERR_CAST(ldops);
}
- ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL);
- if (ld == NULL) {
- put_ldops(ldops);
- return ERR_PTR(-ENOMEM);
- }
-
+ /*
+ * There is no way to handle allocation failure of only 16 bytes.
+ * Let's simplify error handling and save more memory.
+ */
+ ld = kmalloc(sizeof(struct tty_ldisc), GFP_KERNEL | __GFP_NOFAIL);
ld->ops = ldops;
ld->tty = tty;
@@ -804,12 +803,13 @@ void tty_ldisc_release(struct tty_struct *tty)
* the tty structure is not completely set up when this call is made.
*/
-void tty_ldisc_init(struct tty_struct *tty)
+int tty_ldisc_init(struct tty_struct *tty)
{
struct tty_ldisc *ld = tty_ldisc_get(tty, N_TTY);
if (IS_ERR(ld))
- panic("n_tty: init_tty");
+ return PTR_ERR(ld);
tty->ldisc = ld;
+ return 0;
}
/**
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 22dcccf2d286..6a287c81a7be 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -157,7 +157,9 @@ static const unsigned short full_speed_maxpacket_maxes[4] = {
static const unsigned short high_speed_maxpacket_maxes[4] = {
[USB_ENDPOINT_XFER_CONTROL] = 64,
[USB_ENDPOINT_XFER_ISOC] = 1024,
- [USB_ENDPOINT_XFER_BULK] = 512,
+
+ /* Bulk should be 512, but some devices use 1024: we will warn below */
+ [USB_ENDPOINT_XFER_BULK] = 1024,
[USB_ENDPOINT_XFER_INT] = 1024,
};
static const unsigned short super_speed_maxpacket_maxes[4] = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 47eda4b3aea5..8d732e9f74fa 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2396,6 +2396,7 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered) {
+ pm_wakeup_event(&hcd->self.root_hub->dev, 0);
set_bit(HCD_FLAG_WAKEUP_PENDING, &hcd->flags);
queue_work(pm_wq, &hcd->wakeup_work);
}
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index e282878fe52f..507d3bf918f2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -643,12 +643,17 @@ void usb_wakeup_notification(struct usb_device *hdev,
unsigned int portnum)
{
struct usb_hub *hub;
+ struct usb_port *port_dev;
if (!hdev)
return;
hub = usb_hub_to_struct_hub(hdev);
if (hub) {
+ port_dev = hub->ports[portnum - 1];
+ if (port_dev && port_dev->child)
+ pm_wakeup_event(&port_dev->child->dev, 0);
+
set_bit(portnum, hub->wakeup_bits);
kick_hub_wq(hub);
}
@@ -3372,8 +3377,11 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
/* Skip the initial Clear-Suspend step for a remote wakeup */
status = hub_port_status(hub, port1, &portstatus, &portchange);
- if (status == 0 && !port_is_suspended(hub, portstatus))
+ if (status == 0 && !port_is_suspended(hub, portstatus)) {
+ if (portchange & USB_PORT_STAT_C_SUSPEND)
+ pm_wakeup_event(&udev->dev, 0);
goto SuspendCleared;
+ }
/* see 7.1.7.7; affects power usage, but not budgeting */
if (hub_is_superspeed(hub->hdev))
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index f5fa8a9bdbad..84cb37eece87 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -45,6 +45,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x03f0, 0x0701), .driver_info =
USB_QUIRK_STRING_FETCH_255 },
+ /* HP v222w 16GB Mini USB Drive */
+ { USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 19fedb46ae64..ba9d64ab4c01 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -541,6 +541,7 @@ struct dwc3_ep_events {
* @dbg_ep_events: different events counter for endpoint
* @dbg_ep_events_diff: differential events counter for endpoint
* @dbg_ep_events_ts: timestamp for previous event counters
+ * @fifo_depth: allocated TXFIFO depth
*/
struct dwc3_ep {
struct usb_ep endpoint;
@@ -581,6 +582,7 @@ struct dwc3_ep {
struct dwc3_ep_events dbg_ep_events;
struct dwc3_ep_events dbg_ep_events_diff;
struct timespec dbg_ep_events_ts;
+ int fifo_depth;
};
enum dwc3_phy {
@@ -987,7 +989,6 @@ struct dwc3 {
unsigned is_fpga:1;
unsigned needs_fifo_resize:1;
unsigned pullups_connected:1;
- unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
@@ -1048,6 +1049,8 @@ struct dwc3 {
wait_queue_head_t wait_linkstate;
bool create_reg_debugfs;
+ unsigned int index;
+ int last_fifo_depth;
};
/* -------------------------------------------------------------------------- */
@@ -1197,7 +1200,8 @@ struct dwc3_gadget_ep_cmd_params {
/* prototypes */
void dwc3_set_mode(struct dwc3 *dwc, u32 mode);
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc);
+u32 dwc3_core_fifo_space(struct dwc3_ep *dep, u8 type);
+int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep);
/* check whether we are on the DWC_usb3 core */
static inline bool dwc3_is_usb3(struct dwc3 *dwc)
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index fa1116078415..e359fd358ccd 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -1119,7 +1119,8 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
struct dwc3_gadget_ep_cmd_params params;
const struct usb_endpoint_descriptor *desc = ep->desc;
const struct usb_ss_ep_comp_descriptor *comp_desc = ep->comp_desc;
- u32 reg;
+ u32 reg;
+ int ret;
memset(&params, 0x00, sizeof(params));
@@ -1168,6 +1169,10 @@ static void gsi_configure_ep(struct usb_ep *ep, struct usb_gsi_request *request)
/* Set XferRsc Index for GSI EP */
if (!(dep->flags & DWC3_EP_ENABLED)) {
+ ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
+ if (ret)
+ return;
+
memset(&params, 0x00, sizeof(params));
params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
dwc3_send_gadget_ep_cmd(dwc, dep->number,
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c2a6fdbfcfee..1d83e1215541 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -600,8 +600,9 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
enum usb_device_state state = dwc->gadget.state;
u32 cfg;
- int ret;
+ int ret, num;
u32 reg;
+ struct dwc3_ep *dep;
cfg = le16_to_cpu(ctrl->wValue);
@@ -610,6 +611,24 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
return -EINVAL;
case USB_STATE_ADDRESS:
+ /* Read ep0IN related TXFIFO size */
+ dwc->last_fifo_depth = (dwc3_readl(dwc->regs,
+ DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
+ /* Clear existing allocated TXFIFO for all IN eps except ep0 */
+ for (num = 0; num < dwc->num_in_eps; num++) {
+ dep = dwc->eps[(num << 1) | 1];
+ if (num) {
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), 0);
+ dep->fifo_depth = 0;
+ } else {
+ dep->fifo_depth = dwc->last_fifo_depth;
+ }
+
+ dev_dbg(dwc->dev, "%s(): %s dep->fifo_depth:%x\n",
+ __func__, dep->name, dep->fifo_depth);
+ dbg_event(0xFF, "fifo_reset", dep->number);
+ }
+
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
@@ -635,8 +654,6 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
}
- dwc->resize_fifos = true;
- dwc3_trace(trace_dwc3_ep0, "resize FIFOs flag SET");
}
break;
@@ -1083,11 +1100,6 @@ static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
static void __dwc3_ep0_do_control_status(struct dwc3 *dwc, struct dwc3_ep *dep)
{
int ret;
- if (dwc->resize_fifos) {
- dwc3_trace(trace_dwc3_ep0, "Resizing FIFOs");
- dwc3_gadget_resize_tx_fifos(dwc);
- dwc->resize_fifos = 0;
- }
ret = dwc3_ep0_start_control_status(dep);
if (WARN_ON_ONCE(ret))
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index bd5ba9391236..2350e63568ca 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -172,88 +172,64 @@ int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
*
* Unfortunately, due to many variables that's not always the case.
*/
-int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
-{
- int last_fifo_depth = 0;
- int ram1_depth;
- int fifo_size;
- int mdwidth;
- int num;
- int num_eps;
- int max_packet = 1024;
- struct usb_composite_dev *cdev = get_gadget_data(&dwc->gadget);
-
- if (!(cdev && cdev->config) || !dwc->needs_fifo_resize)
+int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc, struct dwc3_ep *dep)
+{
+ int fifo_size, mdwidth, max_packet = 1024;
+ int tmp, mult = 1;
+
+ if (!dwc->needs_fifo_resize)
return 0;
- num_eps = dwc->num_in_eps;
- ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
- mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
+ /* resize IN endpoints excepts ep0 */
+ if (!usb_endpoint_dir_in(dep->endpoint.desc) ||
+ dep->endpoint.ep_num == 0)
+ return 0;
+ /* Don't resize already resized IN endpoint */
+ if (dep->fifo_depth) {
+ dev_dbg(dwc->dev, "%s fifo_depth:%d is already set\n",
+ dep->endpoint.name, dep->fifo_depth);
+ return 0;
+ }
+
+ mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
/* MDWIDTH is represented in bits, we need it in bytes */
mdwidth >>= 3;
- last_fifo_depth = (dwc3_readl(dwc->regs, DWC3_GTXFIFOSIZ(0)) & 0xFFFF);
- dev_dbg(dwc->dev, "%s: num eps:%d max_packet:%d last_fifo_depth:%04x\n",
- __func__, num_eps, max_packet, last_fifo_depth);
-
- /* Don't resize ep0IN TxFIFO, start with ep1IN only. */
- for (num = 1; num < num_eps; num++) {
- /* bit0 indicates direction; 1 means IN ep */
- struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
- int mult = 1;
- int tmp;
-
- tmp = max_packet + mdwidth;
- /*
- * Interfaces like MBIM or ECM is having multiple data
- * interfaces. SET_CONFIG() happens before set_alt with
- * data interface 1 which results into calling this API
- * before GSI endpoint enabled. This results no txfifo
- * resize with GSI endpoint causing low throughput. Hence
- * use mult as 3 for GSI IN endpoint always irrespective
- * USB speed.
- */
- if (dep->endpoint.ep_type == EP_TYPE_GSI ||
- dep->endpoint.endless)
- mult = 3;
-
- if (!(dep->flags & DWC3_EP_ENABLED)) {
- dev_dbg(dwc->dev, "ep%dIn not enabled", num);
- goto resize_fifo;
- }
-
- if (((dep->endpoint.maxburst > 1) &&
- usb_endpoint_xfer_bulk(dep->endpoint.desc))
- || usb_endpoint_xfer_isoc(dep->endpoint.desc))
- mult = 3;
-
-resize_fifo:
- tmp *= mult;
- tmp += mdwidth;
-
- fifo_size = DIV_ROUND_UP(tmp, mdwidth);
-
- fifo_size |= (last_fifo_depth << 16);
-
- dwc3_trace(trace_dwc3_gadget, "%s: Fifo Addr %04x Size %d",
- dep->name, last_fifo_depth, fifo_size & 0xffff);
-
- last_fifo_depth += (fifo_size & 0xffff);
- if (dwc->tx_fifo_size &&
- (last_fifo_depth >= dwc->tx_fifo_size)) {
- /*
- * Fifo size allocated exceeded available RAM size.
- * Hence return error.
- */
- dev_err(dwc->dev, "Fifosize(%d) > available RAM(%d)\n",
- last_fifo_depth, dwc->tx_fifo_size);
- return -ENOMEM;
- }
-
- dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
+ if (dep->endpoint.ep_type == EP_TYPE_GSI || dep->endpoint.endless)
+ mult = 3;
+
+ if (((dep->endpoint.maxburst > 1) &&
+ usb_endpoint_xfer_bulk(dep->endpoint.desc))
+ || usb_endpoint_xfer_isoc(dep->endpoint.desc))
+ mult = 3;
+
+ tmp = ((max_packet + mdwidth) * mult) + mdwidth;
+ fifo_size = DIV_ROUND_UP(tmp, mdwidth);
+ dep->fifo_depth = fifo_size;
+ fifo_size |= (dwc->last_fifo_depth << 16);
+ dwc->last_fifo_depth += (fifo_size & 0xffff);
+
+ dev_dbg(dwc->dev, "%s ep_num:%d last_fifo_depth:%04x fifo_depth:%d\n",
+ dep->endpoint.name, dep->endpoint.ep_num, dwc->last_fifo_depth,
+ dep->fifo_depth);
+
+ dbg_event(0xFF, "resize_fifo", dep->number);
+ dbg_event(0xFF, "fifo_depth", dep->fifo_depth);
+ /* Check fifo size allocation doesn't exceed available RAM size. */
+ if (dwc->tx_fifo_size &&
+ ((dwc->last_fifo_depth * mdwidth) >= dwc->tx_fifo_size)) {
+ dev_err(dwc->dev, "Fifosize(%d) > RAM size(%d) %s depth:%d\n",
+ (dwc->last_fifo_depth * mdwidth), dwc->tx_fifo_size,
+ dep->endpoint.name, fifo_size);
+ dwc->last_fifo_depth -= (fifo_size & 0xffff);
+ dep->fifo_depth = 0;
+ WARN_ON(1);
+ return -ENOMEM;
}
+ dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(dep->endpoint.ep_num),
+ fifo_size);
return 0;
}
@@ -622,6 +598,17 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
dwc3_trace(trace_dwc3_gadget, "Enabling %s", dep->name);
if (!(dep->flags & DWC3_EP_ENABLED)) {
+ dep->endpoint.desc = desc;
+ dep->comp_desc = comp_desc;
+ dep->type = usb_endpoint_type(desc);
+ ret = dwc3_gadget_resize_tx_fifos(dwc, dep);
+ if (ret) {
+ dep->endpoint.desc = NULL;
+ dep->comp_desc = NULL;
+ dep->type = 0;
+ return ret;
+ }
+
ret = dwc3_gadget_start_config(dwc, dep);
if (ret) {
dev_err(dwc->dev, "start_config() failed for %s\n",
@@ -641,9 +628,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- dep->endpoint.desc = desc;
- dep->comp_desc = comp_desc;
- dep->type = usb_endpoint_type(desc);
dep->flags |= DWC3_EP_ENABLED;
reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
@@ -2885,9 +2869,6 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
- /* bus reset issued due to missing status stage of a control transfer */
- dwc->resize_fifos = 0;
-
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
diff --git a/drivers/usb/gadget/function/f_accessory.c b/drivers/usb/gadget/function/f_accessory.c
index 29c418abdc16..9017ec5d435b 100644
--- a/drivers/usb/gadget/function/f_accessory.c
+++ b/drivers/usb/gadget/function/f_accessory.c
@@ -897,6 +897,12 @@ int acc_ctrlrequest(struct usb_composite_dev *cdev,
u16 w_length = le16_to_cpu(ctrl->wLength);
unsigned long flags;
+ /*
+ * If instance is not created which is the case in power off charging
+ * mode, dev will be NULL. Hence return error if it is the case.
+ */
+ if (!dev)
+ return -ENODEV;
/*
printk(KERN_INFO "acc_ctrlrequest "
"%02x.%02x v%04x i%04x l%u\n",
@@ -1105,8 +1111,10 @@ acc_function_unbind(struct usb_configuration *c, struct usb_function *f)
while ((req = req_get(dev, &dev->tx_idle)))
acc_request_free(req, dev->ep_in);
- for (i = 0; i < RX_REQ_MAX; i++)
+ for (i = 0; i < RX_REQ_MAX; i++) {
acc_request_free(dev->rx_req[i], dev->ep_out);
+ dev->rx_req[i] = NULL;
+ }
acc_hid_unbind(dev);
}
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 1e21ee9092e2..5fb3c9e35471 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -2011,12 +2011,6 @@ static int ffs_func_eps_enable(struct ffs_function *func)
break;
}
- /*
- * userspace setting maxburst > 1 results more fifo
- * allocation than without maxburst. Change maxburst to 1
- * only to allocate fifo size of max packet size.
- */
- ep->ep->maxburst = 1;
ret = usb_ep_enable(ep->ep);
if (likely(!ret)) {
epfile->ep = ep;
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 0d843e0f8055..494823f21c28 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1048,7 +1048,9 @@ static void musb_bulk_nak_timeout(struct musb *musb, struct musb_hw_ep *ep,
/* set tx_reinit and schedule the next qh */
ep->tx_reinit = 1;
}
- musb_start_urb(musb, is_in, next_qh);
+
+ if (next_qh)
+ musb_start_urb(musb, is_in, next_qh);
}
}
diff --git a/drivers/usb/pd/pd_engine.c b/drivers/usb/pd/pd_engine.c
index 22d7208e9e66..a94ff4f77bcc 100644
--- a/drivers/usb/pd/pd_engine.c
+++ b/drivers/usb/pd/pd_engine.c
@@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/power_supply.h>
+#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -75,6 +76,7 @@ struct usbpd {
int logbuffer_tail;
u8 *logbuffer[LOG_BUFFER_ENTRIES];
bool in_pr_swap;
+ bool suspend_supported;
};
/*
@@ -521,9 +523,9 @@ static void psy_changed_handler(struct work_struct *work)
apsd_done = !!val.intval;
ret = power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_ONLINE, &val);
+ POWER_SUPPLY_PROP_PRESENT, &val);
if (ret < 0) {
- pd_engine_log(pd, "Unable to read ONLINE, ret=%d",
+ pd_engine_log(pd, "Unable to read PRESENT, ret=%d",
ret);
return;
}
@@ -1077,6 +1079,35 @@ unlock:
return ret;
}
+static int tcpm_set_suspend_supported(struct tcpc_dev *dev,
+ bool suspend_supported)
+{
+ union power_supply_propval val = {0};
+ struct usbpd *pd = container_of(dev, struct usbpd, tcpc_dev);
+ int ret = 0;
+
+ mutex_lock(&pd->lock);
+
+ if (suspend_supported == pd->suspend_supported)
+ goto unlock;
+
+ /* Attempt once */
+ pd->suspend_supported = suspend_supported;
+ val.intval = suspend_supported ? 1 : 0;
+ pd_engine_log(pd, "usb suspend %d", suspend_supported ? 1 : 0);
+ ret = power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_USB_SUSPEND_SUPPORTED,
+ &val);
+ if (ret < 0) {
+ pd_engine_log(pd,
+ "unable to set suspend flag to %d, ret=%d",
+ suspend_supported ? 1 : 0, ret);
+ }
+
+unlock:
+ mutex_unlock(&pd->lock);
+ return ret;
+}
enum power_role get_pdphy_power_role(enum typec_role role)
{
@@ -1302,9 +1333,19 @@ static void pd_phy_shutdown(struct usbpd *pd)
pd_engine_log(pd, "pd phy shutdown");
}
+enum pdo_role {
+ SNK_PDO,
+ SRC_PDO,
+};
+
+static const char * const pdo_prop_name[] = {
+ [SNK_PDO] = "snk-pdo",
+ [SRC_PDO] = "src-pdo",
+};
+
#define PDO_FIXED_FLAGS \
(PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP | PDO_FIXED_USB_COMM)
-
+/*
static const u32 src_pdo[] = {
PDO_FIXED(5000, 900, PDO_FIXED_FLAGS),
};
@@ -1328,10 +1369,140 @@ static const struct tcpc_config pd_tcpc_config = {
.try_role_hw = true,
.alt_modes = NULL,
};
+*/
+
+static u32 *parse_pdo(struct usbpd *pd, enum pdo_role role,
+ unsigned int *nr_pdo)
+{
+ struct device *dev = &pd->dev;
+ u32 *dt_array;
+ u32 *pdo;
+ int i, count, rc;
+
+ count = device_property_read_u32_array(dev->parent, pdo_prop_name[role],
+ NULL, 0);
+ if (count > 0) {
+ if (count % 4)
+ return ERR_PTR(-EINVAL);
+
+ *nr_pdo = count / 4;
+ dt_array = devm_kcalloc(dev, count, sizeof(*dt_array),
+ GFP_KERNEL);
+ if (!dt_array)
+ return ERR_PTR(-ENOMEM);
+
+ rc = device_property_read_u32_array(dev->parent,
+ pdo_prop_name[role],
+ dt_array, count);
+ if (rc)
+ return ERR_PTR(rc);
+
+ pdo = devm_kcalloc(dev, *nr_pdo, sizeof(*pdo), GFP_KERNEL);
+ if (!pdo)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < *nr_pdo; i++) {
+ switch (dt_array[i * 4]) {
+ case PDO_TYPE_FIXED:
+ pdo[i] = PDO_FIXED(dt_array[i * 4 + 1],
+ dt_array[i * 4 + 2],
+ PDO_FIXED_FLAGS);
+ break;
+ case PDO_TYPE_BATT:
+ pdo[i] = PDO_BATT(dt_array[i * 4 + 1],
+ dt_array[i * 4 + 2],
+ dt_array[i * 4 + 3]);
+ break;
+ case PDO_TYPE_VAR:
+ pdo[i] = PDO_VAR(dt_array[i * 4 + 1],
+ dt_array[i * 4 + 2],
+ dt_array[i * 4 + 3]);
+ break;
+ /*case PDO_TYPE_AUG:*/
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+ }
+ return pdo;
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int init_tcpc_config(struct tcpc_dev *pd_tcpc_dev)
+{
+ struct usbpd *pd = container_of(pd_tcpc_dev, struct usbpd, tcpc_dev);
+ struct device *dev = &pd->dev;
+ struct tcpc_config *config;
+ int ret;
+
+ pd_tcpc_dev->config = devm_kzalloc(dev, sizeof(*config), GFP_KERNEL);
+ if (!pd_tcpc_dev->config)
+ return -ENOMEM;
+
+ config = pd_tcpc_dev->config;
+
+ ret = device_property_read_u32(dev->parent, "port-type", &config->type);
+ if (ret < 0)
+ return ret;
+
+ switch (config->type) {
+ case TYPEC_PORT_UFP:
+ config->snk_pdo = parse_pdo(pd, SNK_PDO, &config->nr_snk_pdo);
+ if (IS_ERR(config->snk_pdo))
+ return PTR_ERR(config->snk_pdo);
+ break;
+ case TYPEC_PORT_DFP:
+ config->src_pdo = parse_pdo(pd, SRC_PDO, &config->nr_src_pdo);
+ if (IS_ERR(config->src_pdo))
+ return PTR_ERR(config->src_pdo);
+ break;
+ case TYPEC_PORT_DRP:
+ config->snk_pdo = parse_pdo(pd, SNK_PDO, &config->nr_snk_pdo);
+ if (IS_ERR(config->snk_pdo))
+ return PTR_ERR(config->snk_pdo);
+ config->src_pdo = parse_pdo(pd, SRC_PDO, &config->nr_src_pdo);
+ if (IS_ERR(config->src_pdo))
+ return PTR_ERR(config->src_pdo);
+
+ ret = device_property_read_u32(dev->parent, "default-role",
+ &config->default_role);
+ if (ret < 0)
+ return ret;
+
+ config->try_role_hw = device_property_read_bool(dev->parent,
+ "try-role-hw");
+ break;
+ default:
+ return -EINVAL;
+ }
-static void init_tcpc_dev(struct tcpc_dev *pd_tcpc_dev)
+ if (config->type == TYPEC_PORT_UFP || config->type == TYPEC_PORT_DRP) {
+ ret = device_property_read_u32(dev->parent, "max-snk-mv",
+ &config->max_snk_mv);
+ ret = device_property_read_u32(dev->parent, "max-snk-ma",
+ &config->max_snk_ma);
+ ret = device_property_read_u32(dev->parent, "max-snk-mw",
+ &config->max_snk_mw);
+ ret = device_property_read_u32(dev->parent, "op-snk-mw",
+ &config->operating_snk_mw);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* TODO: parse alt mode from DT */
+ config->alt_modes = NULL;
+
+ return 0;
+}
+
+static int init_tcpc_dev(struct tcpc_dev *pd_tcpc_dev)
{
- pd_tcpc_dev->config = &pd_tcpc_config;
+ int ret;
+
+ ret = init_tcpc_config(pd_tcpc_dev);
+ if (ret < 0)
+ return ret;
pd_tcpc_dev->init = tcpm_init;
pd_tcpc_dev->get_vbus = tcpm_get_vbus;
pd_tcpc_dev->set_cc = tcpm_set_cc;
@@ -1346,7 +1517,9 @@ static void init_tcpc_dev(struct tcpc_dev *pd_tcpc_dev)
pd_tcpc_dev->pd_transmit = tcpm_pd_transmit;
pd_tcpc_dev->start_drp_toggling = tcpm_start_drp_toggling;
pd_tcpc_dev->set_in_pr_swap = tcpm_set_in_pr_swap;
+ pd_tcpc_dev->set_suspend_supported = tcpm_set_suspend_supported;
pd_tcpc_dev->mux = NULL;
+ return 0;
}
static void init_pd_phy_params(struct pd_phy_params *pdphy_params)
@@ -1455,7 +1628,9 @@ struct usbpd *usbpd_create(struct device *parent)
* TCPM callbacks may access pd->usb_psy. Therefore, tcpm_register_port
* must be called after pd->usb_psy is initialized.
*/
- init_tcpc_dev(&pd->tcpc_dev);
+ ret = init_tcpc_dev(&pd->tcpc_dev);
+ if (ret < 0)
+ goto put_psy;
pd->tcpm_port = tcpm_register_port(&pd->dev, &pd->tcpc_dev);
if (IS_ERR(pd->tcpm_port)) {
ret = PTR_ERR(pd->tcpm_port);
@@ -1471,6 +1646,8 @@ struct usbpd *usbpd_create(struct device *parent)
init_pd_phy_params(&pd->pdphy_params);
+ pd->suspend_supported = true;
+
return pd;
unreg_tcpm:
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index 584ae8cbaf1c..77c3ebe860c5 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -62,6 +62,7 @@ config USB_SERIAL_SIMPLE
- Fundamental Software dongle.
- Google USB serial devices
- HP4x calculators
+ - Libtransistor USB console
- a number of Motorola phones
- Motorola Tetra devices
- Novatel Wireless GPS receivers
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 64a4427678b0..32cadca198b2 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -210,6 +210,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
{ USB_DEVICE(0x3195, 0xF280) }, /* Link Instruments MSO-28 */
{ USB_DEVICE(0x3195, 0xF281) }, /* Link Instruments MSO-28 */
+ { USB_DEVICE(0x3923, 0x7A0B) }, /* National Instruments USB Serial Console */
{ USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index a224c7a3ce09..3e5b189a79b4 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1911,7 +1911,8 @@ static int ftdi_8u2232c_probe(struct usb_serial *serial)
return ftdi_jtag_probe(serial);
if (udev->product &&
- (!strcmp(udev->product, "BeagleBone/XDS100V2") ||
+ (!strcmp(udev->product, "Arrow USB Blaster") ||
+ !strcmp(udev->product, "BeagleBone/XDS100V2") ||
!strcmp(udev->product, "SNAP Connect E10")))
return ftdi_jtag_probe(serial);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1799aa058a5b..d982c455e18e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -236,6 +236,8 @@ static void option_instat_callback(struct urb *urb);
/* These Quectel products use Qualcomm's vendor ID */
#define QUECTEL_PRODUCT_UC20 0x9003
#define QUECTEL_PRODUCT_UC15 0x9090
+/* These u-blox products use Qualcomm's vendor ID */
+#define UBLOX_PRODUCT_R410M 0x90b2
/* These Yuga products use Qualcomm's vendor ID */
#define YUGA_PRODUCT_CLM920_NC5 0x9625
@@ -244,6 +246,7 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EC21 0x0121
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_BG96 0x0296
+#define QUECTEL_PRODUCT_EP06 0x0306
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
@@ -550,147 +553,15 @@ static void option_instat_callback(struct urb *urb);
#define WETELECOM_PRODUCT_6802 0x6802
#define WETELECOM_PRODUCT_WMD300 0x6803
-struct option_blacklist_info {
- /* bitmask of interface numbers blacklisted for send_setup */
- const unsigned long sendsetup;
- /* bitmask of interface numbers that are reserved */
- const unsigned long reserved;
-};
-
-static const struct option_blacklist_info four_g_w14_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info four_g_w100_blacklist = {
- .sendsetup = BIT(1) | BIT(2),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info alcatel_x200_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_0037_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
-};
-
-static const struct option_blacklist_info zte_k3765_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_ad3812_z_blacklist = {
- .sendsetup = BIT(0) | BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info zte_mc2718_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_mc2716_z_blacklist = {
- .sendsetup = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info zte_me3620_mbim_blacklist = {
- .reserved = BIT(2) | BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info zte_me3620_xl_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info zte_zm8620_x_blacklist = {
- .reserved = BIT(3) | BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info huawei_cdc12_blacklist = {
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info net_intf0_blacklist = {
- .reserved = BIT(0),
-};
-static const struct option_blacklist_info net_intf1_blacklist = {
- .reserved = BIT(1),
-};
+/* Device flags */
-static const struct option_blacklist_info net_intf2_blacklist = {
- .reserved = BIT(2),
-};
+/* Interface does not support modem-control requests */
+#define NCTRL(ifnum) ((BIT(ifnum) & 0xff) << 8)
-static const struct option_blacklist_info net_intf3_blacklist = {
- .reserved = BIT(3),
-};
+/* Interface is reserved */
+#define RSVD(ifnum) ((BIT(ifnum) & 0xff) << 0)
-static const struct option_blacklist_info net_intf4_blacklist = {
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info net_intf5_blacklist = {
- .reserved = BIT(5),
-};
-
-static const struct option_blacklist_info net_intf6_blacklist = {
- .reserved = BIT(6),
-};
-
-static const struct option_blacklist_info zte_mf626_blacklist = {
- .sendsetup = BIT(0) | BIT(1),
- .reserved = BIT(4),
-};
-
-static const struct option_blacklist_info zte_1255_blacklist = {
- .reserved = BIT(3) | BIT(4),
-};
-
-static const struct option_blacklist_info simcom_sim7100e_blacklist = {
- .reserved = BIT(5) | BIT(6),
-};
-
-static const struct option_blacklist_info telit_me910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_me910_dual_modem_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(3),
-};
-
-static const struct option_blacklist_info telit_le910_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2),
-};
-
-static const struct option_blacklist_info telit_le920_blacklist = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(5),
-};
-
-static const struct option_blacklist_info telit_le920a4_blacklist_1 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg0 = {
- .sendsetup = BIT(2),
- .reserved = BIT(0) | BIT(1) | BIT(3),
-};
-
-static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
- .sendsetup = BIT(0),
- .reserved = BIT(1) | BIT(2) | BIT(3),
-};
-
-static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
- .reserved = BIT(4) | BIT(5),
-};
-
-static const struct option_blacklist_info yuga_clm920_nc5_blacklist = {
- .reserved = BIT(1) | BIT(4),
-};
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
@@ -724,26 +595,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, 0xea42),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c05, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c1f, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S6, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */
- .driver_info = (kernel_ulong_t) &net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
+ .driver_info = RSVD(1) | RSVD(2) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
@@ -1188,65 +1059,70 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
/* Quectel products using Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC15)},
{ USB_DEVICE(QUALCOMM_VENDOR_ID, QUECTEL_PRODUCT_UC20),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
/* Yuga products use Qualcomm vendor ID */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, YUGA_PRODUCT_CLM920_NC5),
- .driver_info = (kernel_ulong_t)&yuga_clm920_nc5_blacklist },
+ .driver_info = RSVD(1) | RSVD(4) },
+ /* u-blox products using Qualcomm vendor ID */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M),
+ .driver_info = RSVD(1) | RSVD(3) },
/* Quectel products using Quectel vendor ID */
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC25),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_BG96),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
+ { USB_DEVICE(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06),
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6004) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_628A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHE_628S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_301),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_628S) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_680) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU_685A) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720S),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7002),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629K),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7004),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7005) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CGU_629),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_629S),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CHU_720I),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7212),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7213),
- .driver_info = (kernel_ulong_t)&net_intf0_blacklist },
+ .driver_info = RSVD(0) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7251),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7252),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_7253),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_DUAL) },
@@ -1254,38 +1130,38 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG0),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG1),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG2),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG3),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE922_USBCFG5, 0xff),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg0 },
+ .driver_info = RSVD(0) | RSVD(1) | NCTRL(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
- .driver_info = (kernel_ulong_t)&telit_me910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
- .driver_info = (kernel_ulong_t)&telit_me910_dual_modem_blacklist },
+ .driver_info = NCTRL(0) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
- .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
- .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(5) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1207) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1208),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1211),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1212),
- .driver_info = (kernel_ulong_t)&telit_le920a4_blacklist_1 },
+ .driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1213, 0xff) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920A4_1214),
- .driver_info = (kernel_ulong_t)&telit_le922_blacklist_usbcfg3 },
+ .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) | RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0002, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0004, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0005, 0xff, 0xff, 0xff) },
@@ -1301,58 +1177,58 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0011, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0022, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0023, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_mf626_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF626, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0032, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0033, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0034, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0037, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_0037_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0038, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0039, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0040, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0042, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0043, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0044, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0048, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0049, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0050, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0056, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0064, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0065, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) },
@@ -1377,26 +1253,26 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0106, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0108, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0135, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0136, 0xff, 0xff, 0xff) },
@@ -1412,50 +1288,50 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0189, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0196, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0197, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0200, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0201, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0254, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0265, 0xff, 0xff, 0xff), /* ONDA MT8205 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0284, 0xff, 0xff, 0xff), /* ZTE MF880 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0317, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
@@ -1572,23 +1448,23 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_1255_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) },
@@ -1603,7 +1479,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1639,17 +1515,17 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1303, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1333, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1667,8 +1543,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1596, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1598, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff,
- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff),
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) | RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */
@@ -1679,20 +1555,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf1_blacklist },
+ .driver_info = RSVD(1) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
@@ -1844,19 +1720,19 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710T, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2718, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2718_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) | NCTRL(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AD3812, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_ad3812_z_blacklist },
+ .driver_info = NCTRL(0) | NCTRL(1) | NCTRL(2) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MC2716, 0xff, 0xff, 0xff),
- .driver_info = (kernel_ulong_t)&zte_mc2716_z_blacklist },
+ .driver_info = NCTRL(1) | NCTRL(2) | NCTRL(3) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_L),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_MBIM),
- .driver_info = (kernel_ulong_t)&zte_me3620_mbim_blacklist },
+ .driver_info = RSVD(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ME3620_X),
- .driver_info = (kernel_ulong_t)&zte_me3620_xl_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_DEVICE(ZTE_VENDOR_ID, ZTE_PRODUCT_ZM8620_X),
- .driver_info = (kernel_ulong_t)&zte_zm8620_x_blacklist },
+ .driver_info = RSVD(3) | RSVD(4) | RSVD(5) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
@@ -1876,37 +1752,34 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
{ USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
- .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
+ .driver_info = RSVD(5) | RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
- .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) | RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x0052),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b6),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, 0x00b7),
- .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
+ .driver_info = RSVD(5) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L100V),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_L800MA),
- .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+ .driver_info = RSVD(2) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
- .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
- },
+ .driver_info = NCTRL(0) | NCTRL(1) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
- .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
- },
+ .driver_info = NCTRL(1) | NCTRL(2) | RSVD(3) },
{USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
+ .driver_info = RSVD(3)},
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
+ .driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1932,14 +1805,14 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
- .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ .driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
@@ -1949,20 +1822,20 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
- .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+ .driver_info = RSVD(6) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
{ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
{ USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
@@ -2039,9 +1912,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) },
{ USB_DEVICE_AND_INTERFACE_INFO(TPLINK_VENDOR_ID, TPLINK_PRODUCT_LTE, 0xff, 0x00, 0x00) }, /* TP-Link LTE Module */
{ USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180),
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */
@@ -2052,9 +1925,9 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d04, 0xff) }, /* D-Link DWM-158 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7d0e, 0xff) }, /* D-Link DWM-157 C1 */
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e19, 0xff), /* D-Link DWM-221 B1 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2001, 0x7e35, 0xff), /* D-Link DWM-222 */
- .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ .driver_info = RSVD(4) },
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */
{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x7e11, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/A3 */
@@ -2114,7 +1987,7 @@ static int option_probe(struct usb_serial *serial,
struct usb_interface_descriptor *iface_desc =
&serial->interface->cur_altsetting->desc;
struct usb_device_descriptor *dev_desc = &serial->dev->descriptor;
- const struct option_blacklist_info *blacklist;
+ unsigned long device_flags = id->driver_info;
/* Never bind to the CD-Rom emulation interface */
if (iface_desc->bInterfaceClass == 0x08)
@@ -2125,9 +1998,7 @@ static int option_probe(struct usb_serial *serial,
* the same class/subclass/protocol as the serial interfaces. Look at
* the Windows driver .INF files for reserved interface numbers.
*/
- blacklist = (void *)id->driver_info;
- if (blacklist && test_bit(iface_desc->bInterfaceNumber,
- &blacklist->reserved))
+ if (device_flags & RSVD(iface_desc->bInterfaceNumber))
return -ENODEV;
/*
* Don't bind network interface on Samsung GT-B3730, it is handled by
@@ -2138,8 +2009,8 @@ static int option_probe(struct usb_serial *serial,
iface_desc->bInterfaceClass != USB_CLASS_CDC_DATA)
return -ENODEV;
- /* Store the blacklist info so we can use it during attach. */
- usb_set_serial_data(serial, (void *)blacklist);
+ /* Store the device flags so we can use them during attach. */
+ usb_set_serial_data(serial, (void *)device_flags);
return 0;
}
@@ -2147,22 +2018,21 @@ static int option_probe(struct usb_serial *serial,
static int option_attach(struct usb_serial *serial)
{
struct usb_interface_descriptor *iface_desc;
- const struct option_blacklist_info *blacklist;
struct usb_wwan_intf_private *data;
+ unsigned long device_flags;
data = kzalloc(sizeof(struct usb_wwan_intf_private), GFP_KERNEL);
if (!data)
return -ENOMEM;
- /* Retrieve blacklist info stored at probe. */
- blacklist = usb_get_serial_data(serial);
+ /* Retrieve device flags stored at probe. */
+ device_flags = (unsigned long)usb_get_serial_data(serial);
iface_desc = &serial->interface->cur_altsetting->desc;
- if (!blacklist || !test_bit(iface_desc->bInterfaceNumber,
- &blacklist->sendsetup)) {
+ if (!(device_flags & NCTRL(iface_desc->bInterfaceNumber)))
data->use_send_setup = 1;
- }
+
spin_lock_init(&data->susp_lock);
usb_set_serial_data(serial, data);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 6aa7ff2c1cf7..2674da40d9cd 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -66,6 +66,11 @@ DEVICE(flashloader, FLASHLOADER_IDS);
0x01) }
DEVICE(google, GOOGLE_IDS);
+/* Libtransistor USB console */
+#define LIBTRANSISTOR_IDS() \
+ { USB_DEVICE(0x1209, 0x8b00) }
+DEVICE(libtransistor, LIBTRANSISTOR_IDS);
+
/* ViVOpay USB Serial Driver */
#define VIVOPAY_IDS() \
{ USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */
@@ -113,6 +118,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
&funsoft_device,
&flashloader_device,
&google_device,
+ &libtransistor_device,
&vivopay_device,
&moto_modem_device,
&motorola_tetra_device,
@@ -129,6 +135,7 @@ static const struct usb_device_id id_table[] = {
FUNSOFT_IDS(),
FLASHLOADER_IDS(),
GOOGLE_IDS(),
+ LIBTRANSISTOR_IDS(),
VIVOPAY_IDS(),
MOTO_IDS(),
MOTOROLA_TETRA_IDS(),
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index 337a0be89fcf..dbc3801b43eb 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -338,47 +338,48 @@ static int palm_os_3_probe(struct usb_serial *serial,
goto exit;
}
- if (retval == sizeof(*connection_info)) {
- connection_info = (struct visor_connection_info *)
- transfer_buffer;
-
- num_ports = le16_to_cpu(connection_info->num_ports);
- for (i = 0; i < num_ports; ++i) {
- switch (
- connection_info->connections[i].port_function_id) {
- case VISOR_FUNCTION_GENERIC:
- string = "Generic";
- break;
- case VISOR_FUNCTION_DEBUGGER:
- string = "Debugger";
- break;
- case VISOR_FUNCTION_HOTSYNC:
- string = "HotSync";
- break;
- case VISOR_FUNCTION_CONSOLE:
- string = "Console";
- break;
- case VISOR_FUNCTION_REMOTE_FILE_SYS:
- string = "Remote File System";
- break;
- default:
- string = "unknown";
- break;
- }
- dev_info(dev, "%s: port %d, is for %s use\n",
- serial->type->description,
- connection_info->connections[i].port, string);
- }
+ if (retval != sizeof(*connection_info)) {
+ dev_err(dev, "Invalid connection information received from device\n");
+ retval = -ENODEV;
+ goto exit;
}
- /*
- * Handle devices that report invalid stuff here.
- */
+
+ connection_info = (struct visor_connection_info *)transfer_buffer;
+
+ num_ports = le16_to_cpu(connection_info->num_ports);
+
+ /* Handle devices that report invalid stuff here. */
if (num_ports == 0 || num_ports > 2) {
dev_warn(dev, "%s: No valid connect info available\n",
serial->type->description);
num_ports = 2;
}
+ for (i = 0; i < num_ports; ++i) {
+ switch (connection_info->connections[i].port_function_id) {
+ case VISOR_FUNCTION_GENERIC:
+ string = "Generic";
+ break;
+ case VISOR_FUNCTION_DEBUGGER:
+ string = "Debugger";
+ break;
+ case VISOR_FUNCTION_HOTSYNC:
+ string = "HotSync";
+ break;
+ case VISOR_FUNCTION_CONSOLE:
+ string = "Console";
+ break;
+ case VISOR_FUNCTION_REMOTE_FILE_SYS:
+ string = "Remote File System";
+ break;
+ default:
+ string = "unknown";
+ break;
+ }
+ dev_info(dev, "%s: port %d, is for %s use\n",
+ serial->type->description,
+ connection_info->connections[i].port, string);
+ }
dev_info(dev, "%s: Number of ports: %d\n", serial->type->description,
num_ports);
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 950b3aa56243..c65e1a7317cf 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -2206,6 +2206,9 @@ static int tcpm_snk_attach(struct tcpm_port *port)
static void tcpm_snk_detach(struct tcpm_port *port)
{
+ /* Conservatively reset back to suspend should be honored */
+ if (port->tcpc->set_suspend_supported)
+ port->tcpc->set_suspend_supported(port->tcpc, true);
tcpm_detach(port);
/* XXX: (Dis)connect SuperSpeed mux? */
@@ -2618,6 +2621,11 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state_cond(port, hard_reset_state(port),
PD_T_SENDER_RESPONSE);
}
+ if (port->tcpc->set_suspend_supported) {
+ port->tcpc->set_suspend_supported(port->tcpc,
+ port->source_caps[0] &
+ PDO_FIXED_SUSPEND);
+ }
break;
case SNK_TRANSITION_SINK:
case SNK_TRANSITION_SINK_VBUS:
diff --git a/drivers/usb/typec/tcpm.h b/drivers/usb/typec/tcpm.h
index 9becc0de926c..234f07b97cc9 100644
--- a/drivers/usb/typec/tcpm.h
+++ b/drivers/usb/typec/tcpm.h
@@ -55,13 +55,13 @@ enum tcpm_transmit_type {
};
struct tcpc_config {
- const u32 *src_pdo;
+ u32 *src_pdo;
unsigned int nr_src_pdo;
- const u32 *snk_pdo;
+ u32 *snk_pdo;
unsigned int nr_snk_pdo;
- const u32 *snk_vdo;
+ u32 *snk_vdo;
unsigned int nr_snk_vdo;
unsigned int max_snk_mv;
@@ -105,7 +105,7 @@ struct tcpc_mux_dev {
};
struct tcpc_dev {
- const struct tcpc_config *config;
+ struct tcpc_config *config;
int (*init)(struct tcpc_dev *dev);
int (*get_vbus)(struct tcpc_dev *dev);
@@ -126,6 +126,8 @@ struct tcpc_dev {
int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type,
const struct pd_message *msg);
int (*set_in_pr_swap)(struct tcpc_dev *dev, bool pr_swap);
+ int (*set_suspend_supported)(struct tcpc_dev *dev,
+ bool suspend_supported);
struct tcpc_mux_dev *mux;
};
diff --git a/drivers/usb/usbip/stub.h b/drivers/usb/usbip/stub.h
index 266e2b0ce9a8..47ccd73a74f0 100644
--- a/drivers/usb/usbip/stub.h
+++ b/drivers/usb/usbip/stub.h
@@ -88,6 +88,7 @@ struct bus_id_priv {
struct stub_device *sdev;
struct usb_device *udev;
char shutdown_busid;
+ spinlock_t busid_lock;
};
/* stub_priv is allocated from stub_priv_cache */
@@ -98,6 +99,7 @@ extern struct usb_device_driver stub_driver;
/* stub_main.c */
struct bus_id_priv *get_busid_priv(const char *busid);
+void put_busid_priv(struct bus_id_priv *bid);
int del_match_busid(char *busid);
void stub_device_cleanup_urbs(struct stub_device *sdev);
diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c
index 0931f3271119..4aad99a59958 100644
--- a/drivers/usb/usbip/stub_dev.c
+++ b/drivers/usb/usbip/stub_dev.c
@@ -314,9 +314,9 @@ static int stub_probe(struct usb_device *udev)
struct stub_device *sdev = NULL;
const char *udev_busid = dev_name(&udev->dev);
struct bus_id_priv *busid_priv;
- int rc;
+ int rc = 0;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter probe\n");
/* check we should claim or not by busid_table */
busid_priv = get_busid_priv(udev_busid);
@@ -331,13 +331,15 @@ static int stub_probe(struct usb_device *udev)
* other matched drivers by the driver core.
* See driver_probe_device() in driver/base/dd.c
*/
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (udev->descriptor.bDeviceClass == USB_CLASS_HUB) {
dev_dbg(&udev->dev, "%s is a usb hub device... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
if (!strcmp(udev->bus->bus_name, "vhci_hcd")) {
@@ -345,13 +347,16 @@ static int stub_probe(struct usb_device *udev)
"%s is attached on vhci_hcd... skip!\n",
udev_busid);
- return -ENODEV;
+ rc = -ENODEV;
+ goto call_put_busid_priv;
}
/* ok, this is my device */
sdev = stub_device_alloc(udev);
- if (!sdev)
- return -ENOMEM;
+ if (!sdev) {
+ rc = -ENOMEM;
+ goto call_put_busid_priv;
+ }
dev_info(&udev->dev,
"usbip-host: register new device (bus %u dev %u)\n",
@@ -383,7 +388,9 @@ static int stub_probe(struct usb_device *udev)
}
busid_priv->status = STUB_BUSID_ALLOC;
- return 0;
+ rc = 0;
+ goto call_put_busid_priv;
+
err_files:
usb_hub_release_port(udev->parent, udev->portnum,
(struct usb_dev_state *) udev);
@@ -394,6 +401,9 @@ err_port:
busid_priv->sdev = NULL;
stub_device_free(sdev);
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
return rc;
}
@@ -419,7 +429,7 @@ static void stub_disconnect(struct usb_device *udev)
struct bus_id_priv *busid_priv;
int rc;
- dev_dbg(&udev->dev, "Enter\n");
+ dev_dbg(&udev->dev, "Enter disconnect\n");
busid_priv = get_busid_priv(udev_busid);
if (!busid_priv) {
@@ -432,7 +442,7 @@ static void stub_disconnect(struct usb_device *udev)
/* get stub_device */
if (!sdev) {
dev_err(&udev->dev, "could not get device");
- return;
+ goto call_put_busid_priv;
}
dev_set_drvdata(&udev->dev, NULL);
@@ -447,12 +457,12 @@ static void stub_disconnect(struct usb_device *udev)
(struct usb_dev_state *) udev);
if (rc) {
dev_dbg(&udev->dev, "unable to release port\n");
- return;
+ goto call_put_busid_priv;
}
/* If usb reset is called from event handler */
if (busid_priv->sdev->ud.eh == current)
- return;
+ goto call_put_busid_priv;
/* shutdown the current connection */
shutdown_busid(busid_priv);
@@ -463,12 +473,11 @@ static void stub_disconnect(struct usb_device *udev)
busid_priv->sdev = NULL;
stub_device_free(sdev);
- if (busid_priv->status == STUB_BUSID_ALLOC) {
+ if (busid_priv->status == STUB_BUSID_ALLOC)
busid_priv->status = STUB_BUSID_ADDED;
- } else {
- busid_priv->status = STUB_BUSID_OTHER;
- del_match_busid((char *)udev_busid);
- }
+
+call_put_busid_priv:
+ put_busid_priv(busid_priv);
}
#ifdef CONFIG_PM
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index 325b4c05acdd..fa90496ca7a8 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -28,6 +28,7 @@
#define DRIVER_DESC "USB/IP Host Driver"
struct kmem_cache *stub_priv_cache;
+
/*
* busid_tables defines matching busids that usbip can grab. A user can change
* dynamically what device is locally used and what device is exported to a
@@ -39,6 +40,8 @@ static spinlock_t busid_table_lock;
static void init_busid_table(void)
{
+ int i;
+
/*
* This also sets the bus_table[i].status to
* STUB_BUSID_OTHER, which is 0.
@@ -46,6 +49,9 @@ static void init_busid_table(void)
memset(busid_table, 0, sizeof(busid_table));
spin_lock_init(&busid_table_lock);
+
+ for (i = 0; i < MAX_BUSID; i++)
+ spin_lock_init(&busid_table[i].busid_lock);
}
/*
@@ -57,15 +63,20 @@ static int get_busid_idx(const char *busid)
int i;
int idx = -1;
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
if (!strncmp(busid_table[i].name, busid, BUSID_SIZE)) {
idx = i;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
return idx;
}
+/* Returns holding busid_lock. Should call put_busid_priv() to unlock */
struct bus_id_priv *get_busid_priv(const char *busid)
{
int idx;
@@ -73,13 +84,22 @@ struct bus_id_priv *get_busid_priv(const char *busid)
spin_lock(&busid_table_lock);
idx = get_busid_idx(busid);
- if (idx >= 0)
+ if (idx >= 0) {
bid = &(busid_table[idx]);
+ /* get busid_lock before returning */
+ spin_lock(&bid->busid_lock);
+ }
spin_unlock(&busid_table_lock);
return bid;
}
+void put_busid_priv(struct bus_id_priv *bid)
+{
+ if (bid)
+ spin_unlock(&bid->busid_lock);
+}
+
static int add_match_busid(char *busid)
{
int i;
@@ -92,15 +112,19 @@ static int add_match_busid(char *busid)
goto out;
}
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (!busid_table[i].name[0]) {
strlcpy(busid_table[i].name, busid, BUSID_SIZE);
if ((busid_table[i].status != STUB_BUSID_ALLOC) &&
(busid_table[i].status != STUB_BUSID_REMOV))
busid_table[i].status = STUB_BUSID_ADDED;
ret = 0;
+ spin_unlock(&busid_table[i].busid_lock);
break;
}
+ spin_unlock(&busid_table[i].busid_lock);
+ }
out:
spin_unlock(&busid_table_lock);
@@ -121,6 +145,8 @@ int del_match_busid(char *busid)
/* found */
ret = 0;
+ spin_lock(&busid_table[idx].busid_lock);
+
if (busid_table[idx].status == STUB_BUSID_OTHER)
memset(busid_table[idx].name, 0, BUSID_SIZE);
@@ -128,6 +154,7 @@ int del_match_busid(char *busid)
(busid_table[idx].status != STUB_BUSID_ADDED))
busid_table[idx].status = STUB_BUSID_REMOV;
+ spin_unlock(&busid_table[idx].busid_lock);
out:
spin_unlock(&busid_table_lock);
@@ -140,9 +167,12 @@ static ssize_t show_match_busid(struct device_driver *drv, char *buf)
char *out = buf;
spin_lock(&busid_table_lock);
- for (i = 0; i < MAX_BUSID; i++)
+ for (i = 0; i < MAX_BUSID; i++) {
+ spin_lock(&busid_table[i].busid_lock);
if (busid_table[i].name[0])
out += sprintf(out, "%s ", busid_table[i].name);
+ spin_unlock(&busid_table[i].busid_lock);
+ }
spin_unlock(&busid_table_lock);
out += sprintf(out, "\n");
@@ -184,6 +214,51 @@ static ssize_t store_match_busid(struct device_driver *dev, const char *buf,
static DRIVER_ATTR(match_busid, S_IRUSR | S_IWUSR, show_match_busid,
store_match_busid);
+static int do_rebind(char *busid, struct bus_id_priv *busid_priv)
+{
+ int ret;
+
+ /* device_attach() callers should hold parent lock for USB */
+ if (busid_priv->udev->dev.parent)
+ device_lock(busid_priv->udev->dev.parent);
+ ret = device_attach(&busid_priv->udev->dev);
+ if (busid_priv->udev->dev.parent)
+ device_unlock(busid_priv->udev->dev.parent);
+ if (ret < 0) {
+ dev_err(&busid_priv->udev->dev, "rebind failed\n");
+ return ret;
+ }
+ return 0;
+}
+
+static void stub_device_rebind(void)
+{
+#if IS_MODULE(CONFIG_USBIP_HOST)
+ struct bus_id_priv *busid_priv;
+ int i;
+
+ /* update status to STUB_BUSID_OTHER so probe ignores the device */
+ spin_lock(&busid_table_lock);
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ busid_priv->status = STUB_BUSID_OTHER;
+ }
+ }
+ spin_unlock(&busid_table_lock);
+
+ /* now run rebind - no need to hold locks. driver files are removed */
+ for (i = 0; i < MAX_BUSID; i++) {
+ if (busid_table[i].name[0] &&
+ busid_table[i].shutdown_busid) {
+ busid_priv = &(busid_table[i]);
+ do_rebind(busid_table[i].name, busid_priv);
+ }
+ }
+#endif
+}
+
static ssize_t rebind_store(struct device_driver *dev, const char *buf,
size_t count)
{
@@ -201,11 +276,17 @@ static ssize_t rebind_store(struct device_driver *dev, const char *buf,
if (!bid)
return -ENODEV;
- ret = device_attach(&bid->udev->dev);
- if (ret < 0) {
- dev_err(&bid->udev->dev, "rebind failed\n");
+ /* mark the device for deletion so probe ignores it during rescan */
+ bid->status = STUB_BUSID_OTHER;
+ /* release the busid lock */
+ put_busid_priv(bid);
+
+ ret = do_rebind((char *) buf, bid);
+ if (ret < 0)
return ret;
- }
+
+ /* delete device from busid_table */
+ del_match_busid((char *) buf);
return count;
}
@@ -328,6 +409,9 @@ static void __exit usbip_host_exit(void)
*/
usb_deregister_device_driver(&stub_driver);
+ /* initiate scan to attach devices */
+ stub_device_rebind();
+
kmem_cache_destroy(stub_priv_cache);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index f875ccaa55f9..0fc5ace57c0e 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -248,7 +248,7 @@ enum usbip_side {
#define SDEV_EVENT_ERROR_SUBMIT (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define SDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
-#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_BYE)
+#define VDEV_EVENT_REMOVED (USBIP_EH_SHUTDOWN | USBIP_EH_RESET | USBIP_EH_BYE)
#define VDEV_EVENT_DOWN (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_TCP (USBIP_EH_SHUTDOWN | USBIP_EH_RESET)
#define VDEV_EVENT_ERROR_MALLOC (USBIP_EH_SHUTDOWN | USBIP_EH_UNUSABLE)
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 7626966f245e..9ba56b7b2804 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -254,6 +254,13 @@ struct mdss_scaler_block {
u32 *dest_scaler_off;
u32 *dest_scaler_lut_off;
struct mdss_mdp_qseed3_lut_tbl lut_tbl;
+
+ /*
+ * Lock is mainly to serialize access to LUT.
+ * LUT values come asynchronously from userspace
+ * via ioctl.
+ */
+ struct mutex scaler_lock;
};
struct mdss_data_type;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 27053230f868..0606a273e7fd 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -808,7 +808,7 @@ static ssize_t mdss_dsi_cmd_state_read(struct file *file, char __user *buf,
if (blen < 0)
return 0;
- if (copy_to_user(buf, buffer, blen))
+ if (copy_to_user(buf, buffer, min(count, (size_t)blen+1)))
return -EFAULT;
*ppos += blen;
@@ -3541,6 +3541,7 @@ static int mdss_dsi_ctrl_probe(struct platform_device *pdev)
pr_err("Failed to request disp ERR_DETECT irq : %d\n", rc);
goto error_shadow_clk_deinit;
}
+ disable_irq(gpio_to_irq(ctrl_pdata->disp_err_detect_gpio));
pr_info("request disp ERR_DETECT irq\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index b6739450d311..0083878d54ae 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1,7 +1,7 @@
/*
* MDSS MDP Interface (used by framebuffer core)
*
- * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2007 Google Incorporated
*
* This software is licensed under the terms of the GNU General Public
@@ -2430,6 +2430,8 @@ static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
ret = mdss_mdp_ds_addr_setup(mdata);
}
+ mutex_init(&mdata->scaler_off->scaler_lock);
+
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 719ccd4ce448..3c2d3242a6d4 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -6673,14 +6673,18 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
if (!mdata->scaler_off)
return -EFAULT;
+ mutex_lock(&mdata->scaler_off->scaler_lock);
+
qseed3_lut_tbl = &mdata->scaler_off->lut_tbl;
if ((lut_tbl->dir_lut_size !=
DIR_LUT_IDX * DIR_LUT_COEFFS * sizeof(uint32_t)) ||
(lut_tbl->cir_lut_size !=
CIR_LUT_IDX * CIR_LUT_COEFFS * sizeof(uint32_t)) ||
(lut_tbl->sep_lut_size !=
- SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t)))
- return -EINVAL;
+ SEP_LUT_IDX * SEP_LUT_COEFFS * sizeof(uint32_t))) {
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+ return -EINVAL;
+ }
if (!qseed3_lut_tbl->dir_lut) {
qseed3_lut_tbl->dir_lut = devm_kzalloc(&mdata->pdev->dev,
@@ -6688,7 +6692,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->dir_lut) {
ret = -ENOMEM;
- goto fail;
+ goto err;
}
}
@@ -6698,7 +6702,7 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->cir_lut) {
ret = -ENOMEM;
- goto fail;
+ goto fail_free_dir_lut;
}
}
@@ -6708,44 +6712,52 @@ static int mdss_mdp_scaler_lut_init(struct mdss_data_type *mdata,
GFP_KERNEL);
if (!qseed3_lut_tbl->sep_lut) {
ret = -ENOMEM;
- goto fail;
+ goto fail_free_cir_lut;
}
}
/* Invalidate before updating */
qseed3_lut_tbl->valid = false;
-
if (copy_from_user(qseed3_lut_tbl->dir_lut,
(void *)(unsigned long)lut_tbl->dir_lut,
lut_tbl->dir_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
if (copy_from_user(qseed3_lut_tbl->cir_lut,
(void *)(unsigned long)lut_tbl->cir_lut,
lut_tbl->cir_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
if (copy_from_user(qseed3_lut_tbl->sep_lut,
(void *)(unsigned long)lut_tbl->sep_lut,
lut_tbl->sep_lut_size)) {
ret = -EINVAL;
- goto err;
+ goto fail_free_sep_lut;
}
qseed3_lut_tbl->valid = true;
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return ret;
-fail:
- kfree(qseed3_lut_tbl->dir_lut);
- kfree(qseed3_lut_tbl->cir_lut);
- kfree(qseed3_lut_tbl->sep_lut);
+fail_free_sep_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->sep_lut);
+fail_free_cir_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->cir_lut);
+fail_free_dir_lut:
+ devm_kfree(&mdata->pdev->dev, qseed3_lut_tbl->dir_lut);
err:
+ qseed3_lut_tbl->dir_lut = NULL;
+ qseed3_lut_tbl->cir_lut = NULL;
+ qseed3_lut_tbl->sep_lut = NULL;
qseed3_lut_tbl->valid = false;
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index dda4bfada9b4..a69f004654fa 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1604,11 +1604,16 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
};
mdata = mdss_mdp_get_mdata();
+
+ mutex_lock(&mdata->scaler_off->scaler_lock);
+
lut_tbl = &mdata->scaler_off->lut_tbl;
if ((!lut_tbl) || (!lut_tbl->valid)) {
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
pr_err("%s:Invalid QSEED3 LUT TABLE\n", __func__);
return -EINVAL;
}
+
if ((scaler->lut_flag & SCALER_LUT_DIR_WR) ||
(scaler->lut_flag & SCALER_LUT_Y_CIR_WR) ||
(scaler->lut_flag & SCALER_LUT_UV_CIR_WR) ||
@@ -1656,6 +1661,8 @@ int mdss_mdp_scaler_lut_cfg(struct mdp_scale_data_v2 *scaler,
}
}
+ mutex_unlock(&mdata->scaler_off->scaler_lock);
+
return 0;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d3c296d4eb25..43b80ca84d9c 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -558,6 +558,8 @@ static void bdev_evict_inode(struct inode *inode)
}
list_del_init(&bdev->bd_list);
spin_unlock(&bdev_lock);
+ /* Detach inode from wb early as bdi_put() may free bdi->wb */
+ inode_detach_wb(inode);
if (bdev->bd_bdi != &noop_backing_dev_info) {
bdi_put(bdev->bd_bdi);
bdev->bd_bdi = &noop_backing_dev_info;
@@ -1221,8 +1223,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- if (bdev->bd_bdi == &noop_backing_dev_info)
- bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
if (!partno) {
@@ -1294,6 +1294,9 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
(bdev->bd_part->nr_sects % (PAGE_SIZE / 512)))
bdev->bd_inode->i_flags &= ~S_DAX;
}
+
+ if (bdev->bd_bdi == &noop_backing_dev_info)
+ bdev->bd_bdi = bdi_get(disk->queue->backing_dev_info);
} else {
if (bdev->bd_contains == bdev) {
ret = 0;
@@ -1325,8 +1328,6 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
bdev->bd_disk = NULL;
bdev->bd_part = NULL;
bdev->bd_queue = NULL;
- bdi_put(bdev->bd_bdi);
- bdev->bd_bdi = &noop_backing_dev_info;
if (bdev != bdev->bd_contains)
__blkdev_put(bdev->bd_contains, mode, 1);
bdev->bd_contains = NULL;
@@ -1548,12 +1549,6 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
kill_bdev(bdev);
bdev_write_inode(bdev);
- /*
- * Detaching bdev inode from its wb in __destroy_inode()
- * is too late: the queue which embeds its bdi (along with
- * root wb) can be gone as soon as we put_disk() below.
- */
- inode_detach_wb(bdev->bd_inode);
}
if (bdev->bd_contains == bdev) {
if (disk->fops->release)
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 0f2b7c622ce3..e2f5be261532 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2497,10 +2497,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
if (p->reada)
reada_for_search(root, p, level, slot, key->objectid);
- btrfs_release_path(p);
-
ret = -EAGAIN;
- tmp = read_tree_block(root, blocknr, 0);
+ tmp = read_tree_block(root, blocknr, gen);
if (!IS_ERR(tmp)) {
/*
* If the read above didn't mark this buffer up to date,
@@ -2512,6 +2510,8 @@ read_block_for_search(struct btrfs_trans_handle *trans,
ret = -EIO;
free_extent_buffer(tmp);
}
+
+ btrfs_release_path(p);
return ret;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index d6359af9789d..6ba022ed4a52 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -4568,6 +4568,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
u64 logged_isize = 0;
bool need_log_inode_item = true;
+ bool xattrs_logged = false;
path = btrfs_alloc_path();
if (!path)
@@ -4808,6 +4809,7 @@ next_slot:
err = btrfs_log_all_xattrs(trans, root, inode, path, dst_path);
if (err)
goto out_unlock;
+ xattrs_logged = true;
if (max_key.type >= BTRFS_EXTENT_DATA_KEY && !fast_search) {
btrfs_release_path(path);
btrfs_release_path(dst_path);
@@ -4820,6 +4822,11 @@ log_extents:
btrfs_release_path(dst_path);
if (need_log_inode_item) {
err = log_inode_item(trans, log, dst_path, inode);
+ if (!err && !xattrs_logged) {
+ err = btrfs_log_all_xattrs(trans, root, inode, path,
+ dst_path);
+ btrfs_release_path(path);
+ }
if (err)
goto out_unlock;
}
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 6d874b1cd53c..ed75d70b4bc2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3850,6 +3850,15 @@ int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
return 0;
}
+ /*
+ * A ro->rw remount sequence should continue with the paused balance
+ * regardless of who pauses it, system or the user as of now, so set
+ * the resume flag.
+ */
+ spin_lock(&fs_info->balance_lock);
+ fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
+ spin_unlock(&fs_info->balance_lock);
+
tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
return PTR_ERR_OR_ZERO(tsk);
}
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 49a0d6b027c1..76dacd5307b9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -673,6 +673,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
goto mknod_out;
}
+ if (!S_ISCHR(mode) && !S_ISBLK(mode))
+ goto mknod_out;
+
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL))
goto mknod_out;
@@ -681,10 +684,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
if (buf == NULL) {
- kfree(full_path);
rc = -ENOMEM;
- free_xid(xid);
- return rc;
+ goto mknod_out;
}
if (backup_cred(cifs_sb))
@@ -731,7 +732,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
pdev->minor = cpu_to_le64(MINOR(device_number));
rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms,
&bytes_written, iov, 1);
- } /* else if (S_ISFIFO) */
+ }
tcon->ses->server->ops->close(xid, tcon, &fid);
d_drop(direntry);
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 0aa9bf6e6e53..f600c43f0047 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -1175,21 +1175,11 @@ do_indirects:
static void ext2_truncate_blocks(struct inode *inode, loff_t offset)
{
- /*
- * XXX: it seems like a bug here that we don't allow
- * IS_APPEND inode to have blocks-past-i_size trimmed off.
- * review and fix this.
- *
- * Also would be nice to be able to handle IO errors and such,
- * but that's probably too much to ask.
- */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
S_ISLNK(inode->i_mode)))
return;
if (ext2_inode_is_fast_symlink(inode))
return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
- return;
dax_sem_down_write(EXT2_I(inode));
__ext2_truncate_blocks(inode, offset);
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 78c51ce913db..c57a94f1c198 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -320,6 +320,7 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_grpblk_t offset;
ext4_grpblk_t next_zero_bit;
+ ext4_grpblk_t max_bit = EXT4_CLUSTERS_PER_GROUP(sb);
ext4_fsblk_t blk;
ext4_fsblk_t group_first_block;
@@ -337,20 +338,25 @@ static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
/* check whether block bitmap block number is set */
blk = ext4_block_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode bitmap block number is set */
blk = ext4_inode_bitmap(sb, desc);
offset = blk - group_first_block;
- if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ !ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
/* bad block bitmap */
return blk;
/* check whether the inode table block number is set */
blk = ext4_inode_table(sb, desc);
offset = blk - group_first_block;
+ if (offset < 0 || EXT4_B2C(sbi, offset) >= max_bit ||
+ EXT4_B2C(sbi, offset + sbi->s_itb_per_group) >= max_bit)
+ return blk;
next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
EXT4_B2C(sbi, offset));
@@ -416,6 +422,7 @@ struct buffer_head *
ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh;
ext4_fsblk_t bitmap_blk;
int err;
@@ -424,6 +431,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
if (!desc)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_block_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid block bitmap block %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot get buffer for block bitmap - "
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index cfb978fd3ec4..359ef3774f4b 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5372,8 +5372,9 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
stop = le32_to_cpu(extent->ee_block);
/*
- * In case of left shift, Don't start shifting extents until we make
- * sure the hole is big enough to accommodate the shift.
+ * For left shifts, make sure the hole on the left is big enough to
+ * accommodate the shift. For right shifts, make sure the last extent
+ * won't be shifted beyond EXT_MAX_BLOCKS.
*/
if (SHIFT == SHIFT_LEFT) {
path = ext4_find_extent(inode, start - 1, &path,
@@ -5393,9 +5394,14 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
if ((start == ex_start && shift > ex_start) ||
(shift > start - ex_end)) {
- ext4_ext_drop_refs(path);
- kfree(path);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (shift > EXT_MAX_BLOCKS -
+ (stop + ext4_ext_get_actual_len(extent))) {
+ ret = -EINVAL;
+ goto out;
}
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 305965e87824..e96a051bd789 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -119,6 +119,7 @@ static struct buffer_head *
ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
{
struct ext4_group_desc *desc;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
struct buffer_head *bh = NULL;
ext4_fsblk_t bitmap_blk;
int err;
@@ -128,6 +129,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
return ERR_PTR(-EFSCORRUPTED);
bitmap_blk = ext4_inode_bitmap(sb, desc);
+ if ((bitmap_blk <= le32_to_cpu(sbi->s_es->s_first_data_block)) ||
+ (bitmap_blk >= ext4_blocks_count(sbi->s_es))) {
+ ext4_error(sb, "Invalid inode bitmap blk %llu in "
+ "block_group %u", bitmap_blk, block_group);
+ return ERR_PTR(-EFSCORRUPTED);
+ }
bh = sb_getblk(sb, bitmap_blk);
if (unlikely(!bh)) {
ext4_error(sb, "Cannot read inode bitmap - "
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 820d692bc931..280d67fe33a7 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -377,7 +377,7 @@ out:
static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
unsigned int len)
{
- int ret, size, no_expand;
+ int ret, size;
struct ext4_inode_info *ei = EXT4_I(inode);
if (!ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA))
@@ -387,14 +387,15 @@ static int ext4_prepare_inline_data(handle_t *handle, struct inode *inode,
if (size < len)
return -ENOSPC;
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
if (ei->i_inline_off)
ret = ext4_update_inline_data(handle, inode, len);
else
ret = ext4_create_inline_data(handle, inode, len);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
+
return ret;
}
@@ -536,7 +537,7 @@ static int ext4_convert_inline_data_to_extent(struct address_space *mapping,
struct inode *inode,
unsigned flags)
{
- int ret, needed_blocks, no_expand;
+ int ret, needed_blocks;
handle_t *handle = NULL;
int retries = 0, sem_held = 0;
struct page *page = NULL;
@@ -576,7 +577,7 @@ retry:
goto out;
}
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
sem_held = 1;
/* If some one has already done this for us, just exit. */
if (!ext4_has_inline_data(inode)) {
@@ -612,7 +613,7 @@ retry:
page_cache_release(page);
page = NULL;
ext4_orphan_add(handle, inode);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
sem_held = 0;
ext4_journal_stop(handle);
handle = NULL;
@@ -638,7 +639,7 @@ out:
page_cache_release(page);
}
if (sem_held)
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
if (handle)
ext4_journal_stop(handle);
brelse(iloc.bh);
@@ -731,7 +732,7 @@ convert:
int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
unsigned copied, struct page *page)
{
- int ret, no_expand;
+ int ret;
void *kaddr;
struct ext4_iloc iloc;
@@ -749,7 +750,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
goto out;
}
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
BUG_ON(!ext4_has_inline_data(inode));
kaddr = kmap_atomic(page);
@@ -759,7 +760,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
/* clear page dirty so that writepages wouldn't work for us. */
ClearPageDirty(page);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
brelse(iloc.bh);
out:
return copied;
@@ -770,7 +771,7 @@ ext4_journalled_write_inline_data(struct inode *inode,
unsigned len,
struct page *page)
{
- int ret, no_expand;
+ int ret;
void *kaddr;
struct ext4_iloc iloc;
@@ -780,11 +781,11 @@ ext4_journalled_write_inline_data(struct inode *inode,
return NULL;
}
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
kaddr = kmap_atomic(page);
ext4_write_inline_data(inode, &iloc, kaddr, 0, len);
kunmap_atomic(kaddr);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
return iloc.bh;
}
@@ -1267,7 +1268,7 @@ out:
int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
struct inode *dir, struct inode *inode)
{
- int ret, inline_size, no_expand;
+ int ret, inline_size;
void *inline_start;
struct ext4_iloc iloc;
@@ -1275,7 +1276,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
if (ret)
return ret;
- ext4_write_lock_xattr(dir, &no_expand);
+ down_write(&EXT4_I(dir)->xattr_sem);
if (!ext4_has_inline_data(dir))
goto out;
@@ -1321,7 +1322,7 @@ int ext4_try_add_inline_entry(handle_t *handle, struct ext4_filename *fname,
out:
ext4_mark_inode_dirty(handle, dir);
- ext4_write_unlock_xattr(dir, &no_expand);
+ up_write(&EXT4_I(dir)->xattr_sem);
brelse(iloc.bh);
return ret;
}
@@ -1681,7 +1682,7 @@ int ext4_delete_inline_entry(handle_t *handle,
struct buffer_head *bh,
int *has_inline_data)
{
- int err, inline_size, no_expand;
+ int err, inline_size;
struct ext4_iloc iloc;
void *inline_start;
@@ -1689,7 +1690,7 @@ int ext4_delete_inline_entry(handle_t *handle,
if (err)
return err;
- ext4_write_lock_xattr(dir, &no_expand);
+ down_write(&EXT4_I(dir)->xattr_sem);
if (!ext4_has_inline_data(dir)) {
*has_inline_data = 0;
goto out;
@@ -1724,7 +1725,7 @@ int ext4_delete_inline_entry(handle_t *handle,
ext4_show_inline_dir(dir, iloc.bh, inline_start, inline_size);
out:
- ext4_write_unlock_xattr(dir, &no_expand);
+ up_write(&EXT4_I(dir)->xattr_sem);
brelse(iloc.bh);
if (err != -ENOENT)
ext4_std_error(dir->i_sb, err);
@@ -1823,11 +1824,11 @@ out:
int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
{
- int ret, no_expand;
+ int ret;
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
ret = ext4_destroy_inline_data_nolock(handle, inode);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
return ret;
}
@@ -1912,7 +1913,7 @@ out:
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
- int inline_size, value_len, needed_blocks, no_expand;
+ int inline_size, value_len, needed_blocks;
size_t i_size;
void *value = NULL;
struct ext4_xattr_ibody_find is = {
@@ -1929,7 +1930,7 @@ void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
if (IS_ERR(handle))
return;
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
if (!ext4_has_inline_data(inode)) {
*has_inline = 0;
ext4_journal_stop(handle);
@@ -1987,7 +1988,7 @@ out_error:
up_write(&EXT4_I(inode)->i_data_sem);
out:
brelse(is.iloc.bh);
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
kfree(value);
if (inode->i_nlink)
ext4_orphan_del(handle, inode);
@@ -2003,7 +2004,7 @@ out:
int ext4_convert_inline_data(struct inode *inode)
{
- int error, needed_blocks, no_expand;
+ int error, needed_blocks;
handle_t *handle;
struct ext4_iloc iloc;
@@ -2025,10 +2026,15 @@ int ext4_convert_inline_data(struct inode *inode)
goto out_free;
}
- ext4_write_lock_xattr(inode, &no_expand);
- if (ext4_has_inline_data(inode))
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
- ext4_write_unlock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
+ if (!ext4_has_inline_data(inode)) {
+ up_write(&EXT4_I(inode)->xattr_sem);
+ goto out;
+ }
+
+ error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
+ up_write(&EXT4_I(inode)->xattr_sem);
+out:
ext4_journal_stop(handle);
out_free:
brelse(iloc.bh);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 5ecac07d668a..7af3cc826f27 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3299,29 +3299,29 @@ static ssize_t ext4_ext_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
+ ext4_inode_aio_set(inode, NULL);
+ if (!is_sync_kiocb(iocb)) {
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end) {
+ ret = -ENOMEM;
+ goto retake_lock;
+ }
+ /*
+ * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
+ */
+ iocb->private = ext4_get_io_end(io_end);
+ /*
+ * we save the io structure for current async direct
+ * IO, so that later ext4_map_blocks() could flag the
+ * io structure whether there is a unwritten extents
+ * needs to be converted when IO is completed.
+ */
+ ext4_inode_aio_set(inode, io_end);
+ }
+
if (overwrite) {
get_block_func = ext4_get_block_write_nolock;
} else {
- ext4_inode_aio_set(inode, NULL);
- if (!is_sync_kiocb(iocb)) {
- io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_end) {
- ret = -ENOMEM;
- goto retake_lock;
- }
- /*
- * Grab reference for DIO. Will be dropped in
- * ext4_end_io_dio()
- */
- iocb->private = ext4_get_io_end(io_end);
- /*
- * we save the io structure for current async direct
- * IO, so that later ext4_map_blocks() could flag the
- * io structure whether there is a unwritten extents
- * needs to be converted when IO is completed.
- */
- ext4_inode_aio_set(inode, io_end);
- }
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
@@ -3866,28 +3866,28 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
EXT4_BLOCK_SIZE_BITS(sb);
stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
- /* If there are no blocks to remove, return now */
- if (first_block >= stop_block)
- goto out_stop;
+ /* If there are blocks to remove, do it */
+ if (stop_block > first_block) {
- down_write(&EXT4_I(inode)->i_data_sem);
- ext4_discard_preallocations(inode);
+ down_write(&EXT4_I(inode)->i_data_sem);
+ ext4_discard_preallocations(inode);
- ret = ext4_es_remove_extent(inode, first_block,
- stop_block - first_block);
- if (ret) {
- up_write(&EXT4_I(inode)->i_data_sem);
- goto out_stop;
- }
+ ret = ext4_es_remove_extent(inode, first_block,
+ stop_block - first_block);
+ if (ret) {
+ up_write(&EXT4_I(inode)->i_data_sem);
+ goto out_stop;
+ }
- if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
- ret = ext4_ext_remove_space(inode, first_block,
- stop_block - 1);
- else
- ret = ext4_ind_remove_space(handle, inode, first_block,
- stop_block);
+ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+ ret = ext4_ext_remove_space(inode, first_block,
+ stop_block - 1);
+ else
+ ret = ext4_ind_remove_space(handle, inode, first_block,
+ stop_block);
- up_write(&EXT4_I(inode)->i_data_sem);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ }
if (IS_SYNC(inode))
ext4_handle_sync(handle);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 035f5aa20712..eba88f78648e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1912,12 +1912,12 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
int read_only)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- int res = 0;
+ int err = 0;
if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
ext4_msg(sb, KERN_ERR, "revision level too high, "
"forcing read-only mode");
- res = MS_RDONLY;
+ err = -EROFS;
}
if (read_only)
goto done;
@@ -1950,7 +1950,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
if (sbi->s_journal)
ext4_set_feature_journal_needs_recovery(sb);
- ext4_commit_super(sb, 1);
+ err = ext4_commit_super(sb, 1);
done:
if (test_opt(sb, DEBUG))
printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
@@ -1962,7 +1962,7 @@ done:
sbi->s_mount_opt, sbi->s_mount_opt2);
cleancache_init_fs(sb);
- return res;
+ return err;
}
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
@@ -3927,8 +3927,12 @@ no_journal:
goto failed_mount4;
}
- if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
+ ret = ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
+ if (ret == -EROFS) {
sb->s_flags |= MS_RDONLY;
+ ret = 0;
+ } else if (ret)
+ goto failed_mount4a;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
@@ -4438,11 +4442,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (sync) {
error = __sync_dirty_buffer(sbh,
test_opt(sb, BARRIER) ? WRITE_FUA : WRITE_SYNC);
- if (error)
- return error;
-
- error = buffer_write_io_error(sbh);
- if (error) {
+ if (buffer_write_io_error(sbh)) {
ext4_msg(sb, KERN_ERR, "I/O error while writing "
"superblock");
clear_buffer_write_io_error(sbh);
@@ -4844,8 +4844,12 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal)
ext4_clear_journal_err(sb, es);
sbi->s_mount_state = le16_to_cpu(es->s_state);
- if (!ext4_setup_super(sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
+
+ err = ext4_setup_super(sb, es, 0);
+ if (err)
+ goto restore_opts;
+
+ sb->s_flags &= ~MS_RDONLY;
if (ext4_has_feature_mmp(sb))
if (ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block))) {
@@ -4869,8 +4873,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
}
ext4_setup_system_zone(sb);
- if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
- ext4_commit_super(sb, 1);
+ if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY)) {
+ err = ext4_commit_super(sb, 1);
+ if (err)
+ goto restore_opts;
+ }
#ifdef CONFIG_QUOTA
/* Release old quota file names */
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index b16bfb52edb2..c356b49540cb 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -1143,14 +1143,16 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
struct ext4_xattr_block_find bs = {
.s = { .not_found = -ENODATA, },
};
- int no_expand;
+ unsigned long no_expand;
int error;
if (!name)
return -EINVAL;
if (strlen(name) > 255)
return -ERANGE;
- ext4_write_lock_xattr(inode, &no_expand);
+ down_write(&EXT4_I(inode)->xattr_sem);
+ no_expand = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_reserve_inode_write(handle, inode, &is.iloc);
if (error)
@@ -1211,7 +1213,7 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
ext4_xattr_update_super_block(handle, inode->i_sb);
inode->i_ctime = ext4_current_time(inode);
if (!value)
- no_expand = 0;
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
/*
* The bh is consumed by ext4_mark_iloc_dirty, even with
@@ -1225,7 +1227,9 @@ ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
cleanup:
brelse(is.iloc.bh);
brelse(bs.bh);
- ext4_write_unlock_xattr(inode, &no_expand);
+ if (no_expand == 0)
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ up_write(&EXT4_I(inode)->xattr_sem);
return error;
}
@@ -1309,11 +1313,12 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
int error = 0, tried_min_extra_isize = 0;
int s_min_extra_isize = le16_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_min_extra_isize);
int isize_diff; /* How much do we need to grow i_extra_isize */
- int no_expand;
-
- if (ext4_write_trylock_xattr(inode, &no_expand) == 0)
- return 0;
+ down_write(&EXT4_I(inode)->xattr_sem);
+ /*
+ * Set EXT4_STATE_NO_EXPAND to avoid recursion when marking inode dirty
+ */
+ ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
retry:
isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
@@ -1507,7 +1512,8 @@ retry:
}
brelse(bh);
out:
- ext4_write_unlock_xattr(inode, &no_expand);
+ ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
+ up_write(&EXT4_I(inode)->xattr_sem);
return 0;
cleanup:
@@ -1519,10 +1525,10 @@ cleanup:
kfree(bs);
brelse(bh);
/*
- * Inode size expansion failed; don't try again
+ * We deliberately leave EXT4_STATE_NO_EXPAND set here since inode
+ * size expansion failed.
*/
- no_expand = 1;
- ext4_write_unlock_xattr(inode, &no_expand);
+ up_write(&EXT4_I(inode)->xattr_sem);
return error;
}
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index cdc413476241..10b0f7323ed6 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -101,38 +101,6 @@ extern const struct xattr_handler ext4_xattr_security_handler;
#define EXT4_XATTR_NAME_ENCRYPTION_CONTEXT "c"
-/*
- * The EXT4_STATE_NO_EXPAND is overloaded and used for two purposes.
- * The first is to signal that there the inline xattrs and data are
- * taking up so much space that we might as well not keep trying to
- * expand it. The second is that xattr_sem is taken for writing, so
- * we shouldn't try to recurse into the inode expansion. For this
- * second case, we need to make sure that we take save and restore the
- * NO_EXPAND state flag appropriately.
- */
-static inline void ext4_write_lock_xattr(struct inode *inode, int *save)
-{
- down_write(&EXT4_I(inode)->xattr_sem);
- *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
-}
-
-static inline int ext4_write_trylock_xattr(struct inode *inode, int *save)
-{
- if (down_write_trylock(&EXT4_I(inode)->xattr_sem) == 0)
- return 0;
- *save = ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND);
- ext4_set_inode_state(inode, EXT4_STATE_NO_EXPAND);
- return 1;
-}
-
-static inline void ext4_write_unlock_xattr(struct inode *inode, int *save)
-{
- if (*save == 0)
- ext4_clear_inode_state(inode, EXT4_STATE_NO_EXPAND);
- up_write(&EXT4_I(inode)->xattr_sem);
-}
-
extern ssize_t ext4_listxattr(struct dentry *, char *, size_t);
extern int ext4_xattr_get(struct inode *, int, const char *, void *, size_t);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 44f0aef71eca..b1020fac452f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -722,7 +722,7 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
if (!ret) {
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
- bh->b_size = map.m_len << inode->i_blkbits;
+ bh->b_size = (u64)map.m_len << inode->i_blkbits;
}
return ret;
}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index da43c4a22e1b..0fe11984db09 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -1906,7 +1906,7 @@ void wb_workfn(struct work_struct *work)
}
if (!list_empty(&wb->work_list))
- mod_delayed_work(bdi_wq, &wb->dwork, 0);
+ wb_wakeup(wb);
else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
wb_wakeup_delayed(wb);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index 7302d96ae8bf..fa40e756c501 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -585,6 +585,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
return 0;
out_put_hidden_dir:
+ cancel_delayed_work_sync(&sbi->sync_work);
iput(sbi->hidden_dir);
out_put_root:
dput(sb->s_root);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index a2e724053919..f3a31f55f372 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -527,6 +527,7 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
if (ret < 0) {
+ handle->h_journal = journal;
jbd2_journal_free_reserved(handle);
return ret;
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index a2edb0049eb5..f038d4ac9aec 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -271,6 +271,8 @@ static void lockd_down_net(struct svc_serv *serv, struct net *net)
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 39eff9a67253..1e7263bb837a 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -616,6 +616,9 @@ struct pipe_inode_info *alloc_pipe_info(void)
unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
struct user_struct *user = get_current_user();
+ if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
+ pipe_bufs = pipe_max_size >> PAGE_SHIFT;
+
if (!too_many_pipe_buffers_hard(user)) {
if (too_many_pipe_buffers_soft(user))
pipe_bufs = 1;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 6d6939740939..760cd1b6cb82 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -954,6 +954,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
unsigned long src = *ppos;
int ret = 0;
struct mm_struct *mm = file->private_data;
+ unsigned long env_start, env_end;
/* Ensure the process spawned far enough to have an environment. */
if (!mm || !mm->env_end)
@@ -966,19 +967,25 @@ static ssize_t environ_read(struct file *file, char __user *buf,
ret = 0;
if (!atomic_inc_not_zero(&mm->mm_users))
goto free;
+
+ down_read(&mm->mmap_sem);
+ env_start = mm->env_start;
+ env_end = mm->env_end;
+ up_read(&mm->mmap_sem);
+
while (count > 0) {
size_t this_len, max_len;
int retval;
- if (src >= (mm->env_end - mm->env_start))
+ if (src >= (env_end - env_start))
break;
- this_len = mm->env_end - (mm->env_start + src);
+ this_len = env_end - (env_start + src);
max_len = min_t(size_t, PAGE_SIZE, count);
this_len = min(max_len, this_len);
- retval = access_remote_vm(mm, (mm->env_start + src),
+ retval = access_remote_vm(mm, (env_start + src),
page, this_len, 0);
if (retval <= 0) {
@@ -3182,7 +3189,7 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
* used for the node /proc/<pid>/task/<tid>/comm.
* It bypasses generic permission checks in the case where a task of the same
* task group attempts to access the node.
- * The rational behind this is that glibc and bionic access this node for
+ * The rationale behind this is that glibc and bionic access this node for
* cross thread naming (pthread_set/getname_np(!self)). However, if
* PR_SET_DUMPABLE gets set to 0 this node among others becomes uid=0 gid=0,
* which locks out the cross thread naming implementation.
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 9155a5a0d3b9..df4661abadc4 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -57,11 +57,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
/*
* Estimate the amount of memory available for userspace allocations,
* without causing swapping.
- *
- * Free memory cannot be taken below the low watermark, before the
- * system starts swapping.
*/
- available = i.freeram - wmark_low;
+ available = i.freeram - totalreserve_pages;
/*
* Not all the page cache can be freed, otherwise the system will
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e4051e87800e..ac2d7a64f083 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -24,9 +24,13 @@
void task_mem(struct seq_file *m, struct mm_struct *mm)
{
- unsigned long data, text, lib, swap, ptes, pmds;
+ unsigned long data, text, lib, swap, ptes, pmds, anon, file, shmem;
unsigned long hiwater_vm, total_vm, hiwater_rss, total_rss;
+ anon = get_mm_counter(mm, MM_ANONPAGES);
+ file = get_mm_counter(mm, MM_FILEPAGES);
+ shmem = get_mm_counter(mm, MM_SHMEMPAGES);
+
/*
* Note: to minimize their overhead, mm maintains hiwater_vm and
* hiwater_rss only when about to *lower* total_vm or rss. Any
@@ -37,7 +41,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
hiwater_vm = total_vm = mm->total_vm;
if (hiwater_vm < mm->hiwater_vm)
hiwater_vm = mm->hiwater_vm;
- hiwater_rss = total_rss = get_mm_rss(mm);
+ hiwater_rss = total_rss = anon + file + shmem;
if (hiwater_rss < mm->hiwater_rss)
hiwater_rss = mm->hiwater_rss;
@@ -54,6 +58,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
"VmPin:\t%8lu kB\n"
"VmHWM:\t%8lu kB\n"
"VmRSS:\t%8lu kB\n"
+ "RssAnon:\t%8lu kB\n"
+ "RssFile:\t%8lu kB\n"
+ "RssShmem:\t%8lu kB\n"
"VmData:\t%8lu kB\n"
"VmStk:\t%8lu kB\n"
"VmExe:\t%8lu kB\n"
@@ -67,6 +74,9 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
mm->pinned_vm << (PAGE_SHIFT-10),
hiwater_rss << (PAGE_SHIFT-10),
total_rss << (PAGE_SHIFT-10),
+ anon << (PAGE_SHIFT-10),
+ file << (PAGE_SHIFT-10),
+ shmem << (PAGE_SHIFT-10),
data << (PAGE_SHIFT-10),
mm->stack_vm << (PAGE_SHIFT-10), text, lib,
ptes >> 10,
@@ -84,7 +94,8 @@ unsigned long task_statm(struct mm_struct *mm,
unsigned long *shared, unsigned long *text,
unsigned long *data, unsigned long *resident)
{
- *shared = get_mm_counter(mm, MM_FILEPAGES);
+ *shared = get_mm_counter(mm, MM_FILEPAGES) +
+ get_mm_counter(mm, MM_SHMEMPAGES);
*text = (PAGE_ALIGN(mm->end_code) - (mm->start_code & PAGE_MASK))
>> PAGE_SHIFT;
*data = mm->total_vm - mm->shared_vm;
diff --git a/fs/sdcardfs/dentry.c b/fs/sdcardfs/dentry.c
index 13da7e5245bd..3795d2c26915 100644
--- a/fs/sdcardfs/dentry.c
+++ b/fs/sdcardfs/dentry.c
@@ -51,7 +51,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
* whether the base obbpath has been changed or not
*/
if (is_obbpath_invalid(dentry)) {
- d_drop(dentry);
return 0;
}
@@ -65,7 +64,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if ((lower_dentry->d_flags & DCACHE_OP_REVALIDATE)) {
err = lower_dentry->d_op->d_revalidate(lower_dentry, flags);
if (err == 0) {
- d_drop(dentry);
goto out;
}
}
@@ -73,14 +71,12 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
spin_lock(&lower_dentry->d_lock);
if (d_unhashed(lower_dentry)) {
spin_unlock(&lower_dentry->d_lock);
- d_drop(dentry);
err = 0;
goto out;
}
spin_unlock(&lower_dentry->d_lock);
if (parent_lower_dentry != lower_cur_parent_dentry) {
- d_drop(dentry);
err = 0;
goto out;
}
@@ -94,7 +90,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
}
if (!qstr_case_eq(&dentry->d_name, &lower_dentry->d_name)) {
- __d_drop(dentry);
err = 0;
}
@@ -113,7 +108,6 @@ static int sdcardfs_d_revalidate(struct dentry *dentry, unsigned int flags)
if (inode) {
data = top_data_get(SDCARDFS_I(inode));
if (!data || data->abandoned) {
- d_drop(dentry);
err = 0;
}
if (data)
@@ -131,6 +125,8 @@ out:
static void sdcardfs_d_release(struct dentry *dentry)
{
+ if (!dentry || !dentry->d_fsdata)
+ return;
/* release and reset the lower paths */
if (has_graft_path(dentry))
sdcardfs_put_reset_orig_path(dentry);
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 00ae21151f52..943310d251a5 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -41,8 +41,6 @@ void sdcardfs_destroy_dentry_cache(void)
void free_dentry_private_data(struct dentry *dentry)
{
- if (!dentry || !dentry->d_fsdata)
- return;
kmem_cache_free(sdcardfs_dentry_cachep, dentry->d_fsdata);
dentry->d_fsdata = NULL;
}
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 8daa94e4abe6..b4d355580bae 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -323,7 +323,7 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
sb->s_root = d_make_root(inode);
if (!sb->s_root) {
err = -ENOMEM;
- goto out_iput;
+ goto out_sput;
}
d_set_d_op(sb->s_root, &sdcardfs_ci_dops);
@@ -368,8 +368,7 @@ static int sdcardfs_read_super(struct vfsmount *mnt, struct super_block *sb,
/* no longer needed: free_dentry_private_data(sb->s_root); */
out_freeroot:
dput(sb->s_root);
-out_iput:
- iput(inode);
+ sb->s_root = NULL;
out_sput:
/* drop refs we took earlier */
atomic_dec(&lower_sb->s_active);
@@ -436,7 +435,7 @@ void sdcardfs_kill_sb(struct super_block *sb)
{
struct sdcardfs_sb_info *sbi;
- if (sb->s_magic == SDCARDFS_SUPER_MAGIC) {
+ if (sb->s_magic == SDCARDFS_SUPER_MAGIC && sb->s_fs_info) {
sbi = SDCARDFS_SB(sb);
mutex_lock(&sdcardfs_super_list_lock);
list_del(&sbi->list);
diff --git a/fs/select.c b/fs/select.c
index 3d38808dbcb6..f7667f2c6d0d 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -239,7 +239,8 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
set_current_state(state);
if (!pwq->triggered)
- rc = schedule_hrtimeout_range(expires, slack, HRTIMER_MODE_ABS);
+ rc = freezable_schedule_hrtimeout_range(expires, slack,
+ HRTIMER_MODE_ABS);
__set_current_state(TASK_RUNNING);
/*
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 3dd47307363f..e917aec4babe 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -969,22 +969,26 @@ xfs_file_fallocate(
if (error)
goto out_unlock;
} else if (mode & FALLOC_FL_INSERT_RANGE) {
- unsigned int blksize_mask = i_blocksize(inode) - 1;
+ unsigned int blksize_mask = i_blocksize(inode) - 1;
+ loff_t isize = i_size_read(inode);
- new_size = i_size_read(inode) + len;
if (offset & blksize_mask || len & blksize_mask) {
error = -EINVAL;
goto out_unlock;
}
- /* check the new inode size does not wrap through zero */
- if (new_size > inode->i_sb->s_maxbytes) {
+ /*
+ * New inode size must not exceed ->s_maxbytes, accounting for
+ * possible signed overflow.
+ */
+ if (inode->i_sb->s_maxbytes - isize < len) {
error = -EFBIG;
goto out_unlock;
}
+ new_size = isize + len;
/* Offset should be less than i_size */
- if (offset >= i_size_read(inode)) {
+ if (offset >= isize) {
error = -EINVAL;
goto out_unlock;
}
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index bf2d34c9d804..f0d8b1c51343 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -13,7 +13,7 @@
*/
/**
- * futex_atomic_op_inuser() - Atomic arithmetic operation with constant
+ * arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant
* argument and comparison of the previous
* futex value with another constant.
*
@@ -25,18 +25,11 @@
* <0 - On error
*/
static inline int
-futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval, ret;
u32 tmp;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
preempt_disable();
pagefault_disable();
@@ -74,17 +67,9 @@ out_pagefault_enable:
pagefault_enable();
preempt_enable();
- if (ret == 0) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (ret == 0)
+ *oval = oldval;
+
return ret;
}
@@ -126,18 +111,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
#else
static inline int
-futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
+arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
- int op = (encoded_op >> 28) & 7;
- int cmp = (encoded_op >> 24) & 15;
- int oparg = (encoded_op << 8) >> 20;
- int cmparg = (encoded_op << 20) >> 20;
int oldval = 0, ret;
- if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
- oparg = 1 << oparg;
-
- if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32)))
- return -EFAULT;
pagefault_disable();
@@ -153,17 +129,9 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
pagefault_enable();
- if (!ret) {
- switch (cmp) {
- case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
- case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
- case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
- case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
- case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
- case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
- default: ret = -ENOSYS;
- }
- }
+ if (!ret)
+ *oval = oldval;
+
return ret;
}
diff --git a/include/dt-bindings/usb/typec.h b/include/dt-bindings/usb/typec.h
new file mode 100644
index 000000000000..82ea851a0fb1
--- /dev/null
+++ b/include/dt-bindings/usb/typec.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef _DT_BINDINGS_USB_TYPEC
+#define _DT_BINDINGS_USB_TYPEC
+
+#define TYPEC_PORT_DFP 0x0
+#define TYPEC_PORT_UFP 0x1
+#define TYPEC_PORT_DRP 0x2
+
+#define PDO_TYPE_FIXED 0x0
+#define PDO_TYPE_BATT 0x1
+#define PDO_TYPE_VAR 0x2
+
+#define TYPEC_SINK 0x0
+#define TYPEC_SOURCE 0x1
+
+#endif /* _DT_BINDINGS_USB_TYPEC */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 65a3be05f7d9..1a24c4fe7df8 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1703,43 +1703,26 @@ static const u_int64_t latency_x_axis_us[] = {
#define BLK_IO_LAT_HIST_ZERO 2
struct io_latency_state {
- u_int64_t latency_y_axis_read[ARRAY_SIZE(latency_x_axis_us) + 1];
- u_int64_t latency_reads_elems;
- u_int64_t latency_y_axis_write[ARRAY_SIZE(latency_x_axis_us) + 1];
- u_int64_t latency_writes_elems;
+ u_int64_t latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
+ u_int64_t latency_elems;
+ u_int64_t latency_sum;
};
static inline void
-blk_update_latency_hist(struct io_latency_state *s,
- int read,
- u_int64_t delta_us)
+blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
{
int i;
- for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++) {
- if (delta_us < (u_int64_t)latency_x_axis_us[i]) {
- if (read)
- s->latency_y_axis_read[i]++;
- else
- s->latency_y_axis_write[i]++;
+ for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
+ if (delta_us < (u_int64_t)latency_x_axis_us[i])
break;
- }
- }
- if (i == ARRAY_SIZE(latency_x_axis_us)) {
- /* Overflowed the histogram */
- if (read)
- s->latency_y_axis_read[i]++;
- else
- s->latency_y_axis_write[i]++;
- }
- if (read)
- s->latency_reads_elems++;
- else
- s->latency_writes_elems++;
+ s->latency_y_axis[i]++;
+ s->latency_elems++;
+ s->latency_sum += delta_us;
}
-void blk_zero_latency_hist(struct io_latency_state *s);
-ssize_t blk_latency_hist_show(struct io_latency_state *s, char *buf);
+ssize_t blk_latency_hist_show(char *name, struct io_latency_state *s,
+ char *buf, int buf_size);
#else /* CONFIG_BLOCK */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 39c7de8c3048..d282307214ac 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -110,7 +110,7 @@ struct clocksource {
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
/**
* clocksource_khz2mult - calculates mult from khz and shift
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 5eb26b7bf930..1a1c112b62a9 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -29,3 +29,19 @@
__attribute__((__section__(".text..ftrace")))
#endif
#endif
+
+#define randomized_struct_fields_start struct {
+#define randomized_struct_fields_end };
+
+/* all clang versions usable with the kernel support KASAN ABI version 5 */
+#define KASAN_ABI_VERSION 5
+
+/* emulate gcc's __SANITIZE_ADDRESS__ flag */
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__
+#endif
+
+#ifdef CONFIG_KASAN
+#undef __no_sanitize_address
+#define __no_sanitize_address __attribute__((no_sanitize("address")))
+#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index b54b1a748d83..3bc30e3ec770 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -182,6 +182,9 @@ struct devfreq {
unsigned int *trans_table;
unsigned long *time_in_state;
unsigned long last_stat_updated;
+
+ bool do_wake_boost;
+ bool needs_wake_boost;
};
#if defined(CONFIG_PM_DEVFREQ)
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c47c68e535e8..a16d1851cfb1 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -767,6 +767,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
sg_dma_address(&sg) = buf;
sg_dma_len(&sg) = len;
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, &sg, 1,
dir, flags, NULL);
}
@@ -775,6 +778,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_transfer_direction dir, unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, NULL);
}
@@ -786,6 +792,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
enum dma_transfer_direction dir, unsigned long flags,
struct rio_dma_ext *rio_ext)
{
+ if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+ return NULL;
+
return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
dir, flags, rio_ext);
}
@@ -796,6 +805,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
size_t period_len, enum dma_transfer_direction dir,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+ return NULL;
+
return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
period_len, dir, flags);
}
@@ -804,6 +816,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_interleaved_dma(
struct dma_chan *chan, struct dma_interleaved_template *xt,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_interleaved_dma)
+ return NULL;
+
return chan->device->device_prep_interleaved_dma(chan, xt, flags);
}
@@ -811,7 +826,7 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memset(
struct dma_chan *chan, dma_addr_t dest, int value, size_t len,
unsigned long flags)
{
- if (!chan || !chan->device)
+ if (!chan || !chan->device || !chan->device->device_prep_dma_memset)
return NULL;
return chan->device->device_prep_dma_memset(chan, dest, value,
@@ -824,6 +839,9 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_sg(
struct scatterlist *src_sg, unsigned int src_nents,
unsigned long flags)
{
+ if (!chan || !chan->device || !chan->device->device_prep_dma_sg)
+ return NULL;
+
return chan->device->device_prep_dma_sg(chan, dst_sg, dst_nents,
src_sg, src_nents, flags);
}
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 21696aaf0eed..b2f6faa3022f 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -364,8 +364,8 @@ typedef struct {
u32 attributes;
u32 get_bar_attributes;
u32 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
+ u64 romsize;
+ u32 romimage;
} efi_pci_io_protocol_32;
typedef struct {
@@ -384,8 +384,8 @@ typedef struct {
u64 attributes;
u64 get_bar_attributes;
u64 set_bar_attributes;
- uint64_t romsize;
- void *romimage;
+ u64 romsize;
+ u64 romimage;
} efi_pci_io_protocol_64;
typedef struct {
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h
index 19db03dbbd00..dd676ba758ee 100644
--- a/include/linux/if_vlan.h
+++ b/include/linux/if_vlan.h
@@ -585,7 +585,7 @@ static inline bool skb_vlan_tagged(const struct sk_buff *skb)
* Returns true if the skb is tagged with multiple vlan headers, regardless
* of whether it is hardware accelerated or not.
*/
-static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
+static inline bool skb_vlan_tagged_multi(struct sk_buff *skb)
{
__be16 protocol = skb->protocol;
@@ -596,6 +596,9 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
protocol != htons(ETH_P_8021AD)))
return false;
+ if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+ return false;
+
veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
}
@@ -613,7 +616,7 @@ static inline bool skb_vlan_tagged_multi(const struct sk_buff *skb)
*
* Returns features without unsafe ones if the skb has multiple tags.
*/
-static inline netdev_features_t vlan_features_check(const struct sk_buff *skb,
+static inline netdev_features_t vlan_features_check(struct sk_buff *skb,
netdev_features_t features)
{
if (skb_vlan_tagged_multi(skb)) {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 42f04a87b312..a4280f5cc0a7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1378,10 +1378,26 @@ static inline void dec_mm_counter(struct mm_struct *mm, int member)
atomic_long_dec(&mm->rss_stat.count[member]);
}
+/* Optimized variant when page is already known not to be PageAnon */
+static inline int mm_counter_file(struct page *page)
+{
+ if (PageSwapBacked(page))
+ return MM_SHMEMPAGES;
+ return MM_FILEPAGES;
+}
+
+static inline int mm_counter(struct page *page)
+{
+ if (PageAnon(page))
+ return MM_ANONPAGES;
+ return mm_counter_file(page);
+}
+
static inline unsigned long get_mm_rss(struct mm_struct *mm)
{
return get_mm_counter(mm, MM_FILEPAGES) +
- get_mm_counter(mm, MM_ANONPAGES);
+ get_mm_counter(mm, MM_ANONPAGES) +
+ get_mm_counter(mm, MM_SHMEMPAGES);
}
static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
@@ -2012,7 +2028,7 @@ int write_one_page(struct page *page, int wait);
void task_dirty_inc(struct task_struct *tsk);
/* readahead.c */
-#define VM_MAX_READAHEAD 128 /* kbytes */
+#define VM_MAX_READAHEAD CONFIG_VM_MAX_READAHEAD /* kbytes */
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index e4a5b4a3a3ec..dccf19c7e17b 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -381,9 +381,10 @@ struct core_state {
};
enum {
- MM_FILEPAGES,
- MM_ANONPAGES,
- MM_SWAPENTS,
+ MM_FILEPAGES, /* Resident file mapping pages */
+ MM_ANONPAGES, /* Resident anonymous pages */
+ MM_SWAPENTS, /* Anonymous swap entries */
+ MM_SHMEMPAGES, /* Resident shared memory pages */
NR_MM_COUNTERS
};
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aea4c0f2ef5f..22feb16616cc 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -594,7 +594,8 @@ struct mmc_host {
struct mmc_request *err_mrq;
#ifdef CONFIG_BLOCK
int latency_hist_enabled;
- struct io_latency_state io_lat_s;
+ struct io_latency_state io_lat_read;
+ struct io_latency_state io_lat_write;
#endif
bool sdr104_wa;
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index b63fa457febd..3529683f691e 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -85,6 +85,7 @@ struct flchip {
unsigned int write_suspended:1;
unsigned int erase_suspended:1;
unsigned long in_progress_block_addr;
+ unsigned long in_progress_block_mask;
struct mutex mutex;
wait_queue_head_t wq; /* Wait on here when we're waiting for the chip
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 6c01ffcc5cea..aaa2d88dee2e 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -246,7 +246,6 @@ enum power_supply_property {
POWER_SUPPLY_PROP_FCC_DELTA,
POWER_SUPPLY_PROP_ICL_REDUCTION,
POWER_SUPPLY_PROP_PARALLEL_MODE,
- POWER_SUPPLY_PROP_PORT_TEMP,
POWER_SUPPLY_PROP_DIE_HEALTH,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index d80259afb9e5..bcc094cb697c 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -97,6 +97,23 @@ static inline int sigisemptyset(sigset_t *set)
}
}
+static inline int sigequalsets(const sigset_t *set1, const sigset_t *set2)
+{
+ switch (_NSIG_WORDS) {
+ case 4:
+ return (set1->sig[3] == set2->sig[3]) &&
+ (set1->sig[2] == set2->sig[2]) &&
+ (set1->sig[1] == set2->sig[1]) &&
+ (set1->sig[0] == set2->sig[0]);
+ case 2:
+ return (set1->sig[1] == set2->sig[1]) &&
+ (set1->sig[0] == set2->sig[0]);
+ case 1:
+ return set1->sig[0] == set2->sig[0];
+ }
+ return 0;
+}
+
#define sigmask(sig) (1UL << ((sig) - 1))
#ifndef __HAVE_ARCH_SIG_SETOPS
diff --git a/include/linux/state_notifier.h b/include/linux/state_notifier.h
deleted file mode 100644
index ffb4fba752c1..000000000000
--- a/include/linux/state_notifier.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#ifndef __LINUX_STATE_NOTIFIER_H
-#define __LINUX_STATE_NOTIFIER_H
-
-#include <linux/notifier.h>
-
-#define STATE_NOTIFIER_ACTIVE 0x01
-#define STATE_NOTIFIER_SUSPEND 0x02
-
-struct state_event {
- void *data;
-};
-
-extern bool state_suspended;
-extern void state_suspend(void);
-extern void state_resume(void);
-int state_register_client(struct notifier_block *nb);
-int state_unregister_client(struct notifier_block *nb);
-int state_notifier_call_chain(unsigned long val, void *v);
-
-#endif /* _LINUX_STATE_NOTIFIER_H */
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index 00a1f330f93a..9c452f6db438 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -518,7 +518,7 @@ static inline void sysfs_notify_dirent(struct kernfs_node *kn)
}
static inline struct kernfs_node *sysfs_get_dirent(struct kernfs_node *parent,
- const unsigned char *name)
+ const char *name)
{
return kernfs_find_and_get(parent, name);
}
diff --git a/include/linux/tty.h b/include/linux/tty.h
index b7971e0d7938..1c1bb90f6819 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -590,7 +590,7 @@ extern int tty_unregister_ldisc(int disc);
extern int tty_set_ldisc(struct tty_struct *tty, int ldisc);
extern int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty);
extern void tty_ldisc_release(struct tty_struct *tty);
-extern void tty_ldisc_init(struct tty_struct *tty);
+extern int __must_check tty_ldisc_init(struct tty_struct *tty);
extern void tty_ldisc_deinit(struct tty_struct *tty);
extern void tty_ldisc_begin(void);
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index c5f2158ab00e..a877836b86eb 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -35,12 +35,12 @@
static inline bool uac2_control_is_readable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x1;
+ return (bmControls >> ((control - 1) * 2)) & 0x1;
}
static inline bool uac2_control_is_writeable(u32 bmControls, u8 control)
{
- return (bmControls >> (control * 2)) & 0x2;
+ return (bmControls >> ((control - 1) * 2)) & 0x2;
}
/* 4.7.2 Class-Specific AC Interface Descriptor */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 8f4d4bfa6d46..d7844d215381 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -124,6 +124,9 @@ int virtio_device_freeze(struct virtio_device *dev);
int virtio_device_restore(struct virtio_device *dev);
#endif
+#define virtio_device_for_each_vq(vdev, vq) \
+ list_for_each_entry(vq, &vdev->vqs, list)
+
/**
* virtio_driver - operations for a virtio I/O driver
* @driver: underlying device driver (populate name and owner).
diff --git a/include/linux/wahoo_info.h b/include/linux/wahoo_info.h
new file mode 100644
index 000000000000..5d443f736a74
--- /dev/null
+++ b/include/linux/wahoo_info.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+bool is_google_taimen(void);
+bool is_google_walleye(void);
diff --git a/include/linux/wake_gestures.h b/include/linux/wake_gestures.h
index 5b10f9978e65..d594c5d2f663 100644
--- a/include/linux/wake_gestures.h
+++ b/include/linux/wake_gestures.h
@@ -29,11 +29,8 @@ extern bool wg_switch;
extern bool wg_switch_temp;
extern bool wg_changed;
extern int vib_strength;
-#if IS_ENABLED(CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_HTC)
bool scr_suspended(void);
-#else
bool scr_suspended_taimen(void);
-#endif
void set_vibrate(int value);
#endif /* _LINUX_WAKE_GESTURES_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d0b5ca5d4e08..6c1cbbedc79c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -224,6 +224,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h
index c9b3eb70f340..567017b5fc9e 100644
--- a/include/net/inet_timewait_sock.h
+++ b/include/net/inet_timewait_sock.h
@@ -55,6 +55,7 @@ struct inet_timewait_sock {
#define tw_family __tw_common.skc_family
#define tw_state __tw_common.skc_state
#define tw_reuse __tw_common.skc_reuse
+#define tw_reuseport __tw_common.skc_reuseport
#define tw_ipv6only __tw_common.skc_ipv6only
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
#define tw_node __tw_common.skc_nulls_node
diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h
index fe994d2e5286..ea985aa7a6c5 100644
--- a/include/net/llc_conn.h
+++ b/include/net/llc_conn.h
@@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb)
struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority,
struct proto *prot, int kern);
+void llc_sk_stop_all_timers(struct sock *sk, bool sync);
void llc_sk_free(struct sock *sk);
void llc_sk_reset(struct sock *sk);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 97d6240492f8..f9e0ceea211e 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -975,7 +975,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
* @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
* verification has been done by the hardware.
- * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
+ * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
* If this flag is set, the stack cannot do any replay detection
* hence the driver or hardware will have to do that.
* @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
@@ -1013,6 +1013,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* on this subframe
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
+ * done by the hardware
* @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
* processing it in any regular way.
@@ -1037,6 +1039,11 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_RADIOTAP_VENDOR_DATA: This frame contains vendor-specific
* radiotap data in the skb->data (before the frame) as described by
* the &struct ieee80211_vendor_radiotap.
+ * @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
+ * This is used for AMSDU subframes which can have the same PN as
+ * the first subframe.
+ * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
+ * be done in the hardware.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1069,6 +1076,9 @@ enum mac80211_rx_flags {
RX_FLAG_5MHZ = BIT(29),
RX_FLAG_AMSDU_MORE = BIT(30),
RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
+ RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
+ RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
+ RX_FLAG_ICV_STRIPPED = BIT_ULL(34),
};
#define RX_FLAG_STBC_SHIFT 26
@@ -1124,7 +1134,7 @@ struct ieee80211_rx_status {
u64 mactime;
u32 device_timestamp;
u32 ampdu_reference;
- u32 flag;
+ u64 flag;
u16 freq;
u8 vht_flag;
u8 rate_idx;
diff --git a/include/net/nexthop.h b/include/net/nexthop.h
index 3334dbfa5aa4..7fc78663ec9d 100644
--- a/include/net/nexthop.h
+++ b/include/net/nexthop.h
@@ -6,7 +6,7 @@
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
{
- return remaining >= sizeof(*rtnh) &&
+ return remaining >= (int)sizeof(*rtnh) &&
rtnh->rtnh_len >= sizeof(*rtnh) &&
rtnh->rtnh_len <= remaining;
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 653118a2d285..d4acb6689d39 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -274,6 +274,7 @@ struct cg_proto;
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
+ * @sk_pacing_shift: scaling factor for TCP Small Queues
* @sk_lingertime: %SO_LINGER l_linger setting
* @sk_backlog: always used with the per-socket spinlock held
* @sk_callback_lock: used with the callbacks in the end of this struct
@@ -416,6 +417,7 @@ struct sock {
unsigned int sk_gso_max_size;
u16 sk_gso_max_segs;
int sk_rcvlowat;
+ u8 sk_pacing_shift;
unsigned long sk_lingertime;
struct sk_buff_head sk_error_queue;
struct proto *sk_prot_creator;
@@ -1030,8 +1032,12 @@ struct proto {
*/
int *memory_pressure;
long *sysctl_mem;
+
int *sysctl_wmem;
int *sysctl_rmem;
+ u32 sysctl_wmem_offset;
+ u32 sysctl_rmem_offset;
+
int max_header;
bool no_autobind;
@@ -2312,6 +2318,35 @@ extern int sysctl_optmem_max;
extern __u32 sysctl_wmem_default;
extern __u32 sysctl_rmem_default;
+static inline int sk_get_wmem0(const struct sock *sk, const struct proto *proto)
+{
+ /* Does this proto have per netns sysctl_wmem ? */
+ if (proto->sysctl_wmem_offset)
+ return *(int *)((void *)sock_net(sk) + proto->sysctl_wmem_offset);
+
+ return *proto->sysctl_wmem;
+}
+
+static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
+{
+ /* Does this proto have per netns sysctl_rmem ? */
+ if (proto->sysctl_rmem_offset)
+ return *(int *)((void *)sock_net(sk) + proto->sysctl_rmem_offset);
+
+ return *proto->sysctl_rmem;
+}
+
+/* Default TCP Small queue budget is ~1 ms of data (1sec >> 10)
+ * Some wifi drivers need to tweak it to get more chunks.
+ * They can use this helper from their ndo_start_xmit()
+ */
+static inline void sk_pacing_shift_update(struct sock *sk, int val)
+{
+ if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+ return;
+ sk->sk_pacing_shift = val;
+}
+
/* SOCKEV Notifier Events */
#define SOCKEV_SOCKET 0x00
#define SOCKEV_BIND 0x01
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ecdd2223e91f..d38770675293 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -559,8 +559,8 @@ __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
int nonagle);
bool tcp_may_send_now(struct sock *sk);
-int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
-int tcp_retransmit_skb(struct sock *, struct sk_buff *);
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
diff --git a/include/sound/control.h b/include/sound/control.h
index 21d047f229a1..4142757080f8 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,7 @@
*
*/
+#include <linux/nospec.h>
#include <sound/asound.h>
#define snd_kcontrol_chip(kcontrol) ((kcontrol)->private_data)
@@ -147,12 +148,14 @@ int snd_ctl_get_preferred_subdevice(struct snd_card *card, int type);
static inline unsigned int snd_ctl_get_ioffnum(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->numid - kctl->id.numid;
+ unsigned int ioff = id->numid - kctl->id.numid;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioffidx(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
{
- return id->index - kctl->id.index;
+ unsigned int ioff = id->index - kctl->id.index;
+ return array_index_nospec(ioff, kctl->count);
}
static inline unsigned int snd_ctl_get_ioff(struct snd_kcontrol *kctl, struct snd_ctl_elem_id *id)
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 779abb91df81..b4dbbf5e613a 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -47,7 +47,7 @@ TRACE_EVENT(sock_exceed_buf_limit,
strncpy(__entry->name, prot->name, 32);
__entry->sysctl_mem = prot->sysctl_mem;
__entry->allocated = allocated;
- __entry->sysctl_rmem = prot->sysctl_rmem[0];
+ __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
),
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f5a35d..d6be935caa50 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -377,22 +377,6 @@ DECLARE_EVENT_CLASS(xen_mmu_pgd,
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_pin);
DEFINE_XEN_MMU_PGD_EVENT(xen_mmu_pgd_unpin);
-TRACE_EVENT(xen_mmu_flush_tlb_all,
- TP_PROTO(int x),
- TP_ARGS(x),
- TP_STRUCT__entry(__array(char, x, 0)),
- TP_fast_assign((void)x),
- TP_printk("%s", "")
- );
-
-TRACE_EVENT(xen_mmu_flush_tlb,
- TP_PROTO(int x),
- TP_ARGS(x),
- TP_STRUCT__entry(__array(char, x, 0)),
- TP_fast_assign((void)x),
- TP_printk("%s", "")
- );
-
TRACE_EVENT(xen_mmu_flush_tlb_single,
TP_PROTO(unsigned long addr),
TP_ARGS(addr),
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 03f3618612aa..376d0ab5b9f2 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -831,6 +831,7 @@ struct kvm_ppc_smmu_info {
#define KVM_CAP_GUEST_DEBUG_HW_WPS 120
#define KVM_CAP_SPLIT_IRQCHIP 121
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
+#define KVM_CAP_S390_BPB 152
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 8dd2f9bb051e..ff501dcb2496 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2415,6 +2415,8 @@ enum nl80211_attrs {
#define NL80211_ATTR_KEYS NL80211_ATTR_KEYS
#define NL80211_ATTR_FEATURE_FLAGS NL80211_ATTR_FEATURE_FLAGS
+#define NL80211_WIPHY_NAME_MAXLEN 128
+
#define NL80211_MAX_SUPP_RATES 32
#define NL80211_MAX_SUPP_HT_RATES 77
#define NL80211_MAX_SUPP_REG_RULES 64
diff --git a/init/main.c b/init/main.c
index 204a6acadffa..5c6b838bc253 100644
--- a/init/main.c
+++ b/init/main.c
@@ -862,8 +862,11 @@ static void __init do_initcalls(void)
{
int level;
- for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
+ for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) {
do_initcall_level(level);
+ /* need to finish all async calls before going into next level */
+ async_synchronize_full();
+ }
}
/*
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 6375465af0a7..76f41594ddca 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1981,14 +1981,15 @@ static void audit_log_set_loginuid(kuid_t koldloginuid, kuid_t kloginuid,
if (!audit_enabled)
return;
+ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
+ if (!ab)
+ return;
+
uid = from_kuid(&init_user_ns, task_uid(current));
oldloginuid = from_kuid(&init_user_ns, koldloginuid);
loginuid = from_kuid(&init_user_ns, kloginuid),
tty = audit_get_tty(current);
- ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_LOGIN);
- if (!ab)
- return;
audit_log_format(ab, "pid=%d uid=%u", task_tgid_nr(current), uid);
audit_log_task_context(ab);
audit_log_format(ab, " old-auid=%u auid=%u tty=%s old-ses=%u ses=%u res=%d",
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3608fa1aec8a..0eb11b4ac4c7 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -102,7 +102,7 @@ static void *array_map_lookup_elem(struct bpf_map *map, void *key)
static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
- u32 index = *(u32 *)key;
+ u32 index = key ? *(u32 *)key : U32_MAX;
u32 *next = (u32 *)next_key;
if (index >= array->map.max_entries) {
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34777b3746fa..a35abe048239 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -169,12 +169,15 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
struct hlist_head *head;
struct htab_elem *l, *next_l;
u32 hash, key_size;
- int i;
+ int i = 0;
WARN_ON_ONCE(!rcu_read_lock_held());
key_size = map->key_size;
+ if (!key)
+ goto find_first_elem;
+
hash = htab_map_hash(key, key_size);
head = select_bucket(htab, hash);
@@ -182,10 +185,8 @@ static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
/* lookup the key */
l = lookup_elem_raw(head, hash, key, key_size);
- if (!l) {
- i = 0;
+ if (!l)
goto find_first_elem;
- }
/* key was found, get next key in the same bucket */
next_l = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(&l->hash_node)),
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index dc19b6e210e6..4b9bbfe764e8 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -390,14 +390,18 @@ static int map_get_next_key(union bpf_attr *attr)
if (IS_ERR(map))
return PTR_ERR(map);
- err = -ENOMEM;
- key = kmalloc(map->key_size, GFP_USER);
- if (!key)
- goto err_put;
-
- err = -EFAULT;
- if (copy_from_user(key, ukey, map->key_size) != 0)
- goto free_key;
+ if (ukey) {
+ err = -ENOMEM;
+ key = kmalloc(map->key_size, GFP_USER);
+ if (!key)
+ goto err_put;
+
+ err = -EFAULT;
+ if (copy_from_user(key, ukey, map->key_size) != 0)
+ goto free_key;
+ } else {
+ key = NULL;
+ }
err = -ENOMEM;
next_key = kmalloc(map->key_size, GFP_USER);
diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c
index 9c418002b8c1..75f835d353db 100644
--- a/kernel/events/callchain.c
+++ b/kernel/events/callchain.c
@@ -107,14 +107,8 @@ int get_callchain_buffers(void)
goto exit;
}
- if (count > 1) {
- /* If the allocation failed, give up */
- if (!callchain_cpus_entries)
- err = -ENOMEM;
- goto exit;
- }
-
- err = alloc_callchain_buffers();
+ if (count == 1)
+ err = alloc_callchain_buffers();
exit:
if (err)
atomic_dec(&nr_callchain_events);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d0f149510734..3f476cdf0f27 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -237,7 +237,7 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec(table, write, buffer, lenp, ppos);
+ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
@@ -8345,9 +8345,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
- ret = -EINVAL;
+ return -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
- ret = -EINVAL;
+ return -EINVAL;
}
if (attr->sample_type & PERF_SAMPLE_REGS_INTR)
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 8c60a4eb4080..f4b9a369c8c3 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -14,6 +14,7 @@
#include <linux/slab.h>
#include <linux/circ_buf.h>
#include <linux/poll.h>
+#include <linux/nospec.h>
#include "internal.h"
@@ -781,8 +782,10 @@ perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
return NULL;
/* AUX space */
- if (pgoff >= rb->aux_pgoff)
- return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
+ if (pgoff >= rb->aux_pgoff) {
+ int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
+ return virt_to_page(rb->aux_pages[aux_pgoff]);
+ }
}
return __perf_mmap_to_page(rb, pgoff);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 7b1b772ab1ce..c7f6307dd133 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -182,7 +182,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
lru_cache_add_active_or_unevictable(kpage, vma);
if (!PageAnon(page)) {
- dec_mm_counter(mm, MM_FILEPAGES);
+ dec_mm_counter(mm, mm_counter_file(page));
inc_mm_counter(mm, MM_ANONPAGES);
}
diff --git a/kernel/exit.c b/kernel/exit.c
index db3de300c157..b430898888fd 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1611,6 +1611,10 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr,
__WNOTHREAD|__WCLONE|__WALL))
return -EINVAL;
+ /* -INT_MIN is not defined */
+ if (upid == INT_MIN)
+ return -ESRCH;
+
if (upid == -1)
type = PIDTYPE_MAX;
else if (upid < 0) {
diff --git a/kernel/futex.c b/kernel/futex.c
index 3bc3a4b560f5..0978056889da 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -666,13 +666,14 @@ again:
* this reference was taken by ihold under the page lock
* pinning the inode in place so i_lock was unnecessary. The
* only way for this check to fail is if the inode was
- * truncated in parallel so warn for now if this happens.
+ * truncated in parallel which is almost certainly an
+ * application bug. In such a case, just retry.
*
* We are not calling into get_futex_key_refs() in file-backed
* cases, therefore a successful atomic_inc return below will
* guarantee that get_futex_key() will still imply smp_mb(); (B).
*/
- if (WARN_ON_ONCE(!atomic_inc_not_zero(&inode->i_count))) {
+ if (!atomic_inc_not_zero(&inode->i_count)) {
rcu_read_unlock();
put_page(page_head);
@@ -1452,6 +1453,45 @@ out:
return ret;
}
+static int futex_atomic_op_inuser(unsigned int encoded_op, u32 __user *uaddr)
+{
+ unsigned int op = (encoded_op & 0x70000000) >> 28;
+ unsigned int cmp = (encoded_op & 0x0f000000) >> 24;
+ int oparg = sign_extend32((encoded_op & 0x00fff000) >> 12, 11);
+ int cmparg = sign_extend32(encoded_op & 0x00000fff, 11);
+ int oldval, ret;
+
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) {
+ if (oparg < 0 || oparg > 31)
+ return -EINVAL;
+ oparg = 1 << oparg;
+ }
+
+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ ret = arch_futex_atomic_op_inuser(op, oparg, &oldval, uaddr);
+ if (ret)
+ return ret;
+
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ:
+ return oldval == cmparg;
+ case FUTEX_OP_CMP_NE:
+ return oldval != cmparg;
+ case FUTEX_OP_CMP_LT:
+ return oldval < cmparg;
+ case FUTEX_OP_CMP_GE:
+ return oldval >= cmparg;
+ case FUTEX_OP_CMP_LE:
+ return oldval <= cmparg;
+ case FUTEX_OP_CMP_GT:
+ return oldval > cmparg;
+ default:
+ return -ENOSYS;
+ }
+}
+
/*
* Wake up all waiters hashed on the physical page that is mapped
* to this virtual address:
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 099d2be4940b..7b746ececbf3 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -326,7 +326,8 @@ static void sugov_update_single(struct update_util_data *hook, u64 time,
* Do not reduce the frequency if the CPU has not been idle
* recently, as the reduction is likely to be premature then.
*/
- if (busy && next_f < sg_policy->next_freq) {
+ if (busy && next_f < sg_policy->next_freq &&
+ sg_policy->next_freq != UINT_MAX) {
next_f = sg_policy->next_freq;
/* Reset cached freq as next_freq has changed */
diff --git a/kernel/signal.c b/kernel/signal.c
index 4a548c6a4118..8bfbc47f0a23 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1392,6 +1392,10 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
return ret;
}
+ /* -INT_MIN is undefined. Exclude this case to avoid a UBSAN warning */
+ if (pid == INT_MIN)
+ return -ESRCH;
+
read_lock(&tasklist_lock);
if (pid != -1) {
ret = __kill_pgrp_info(sig, info,
@@ -2495,6 +2499,13 @@ void __set_current_blocked(const sigset_t *newset)
{
struct task_struct *tsk = current;
+ /*
+ * In case the signal mask hasn't changed, there is nothing we need
+ * to do. The current->blocked shouldn't be modified by other task.
+ */
+ if (sigequalsets(&tsk->blocked, newset))
+ return;
+
spin_lock_irq(&tsk->sighand->siglock);
__set_task_blocked(tsk, newset);
spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index d2a20e83ebae..22d7454b387b 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -610,6 +610,14 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
now = ktime_get();
/* Find all expired events */
for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
+ /*
+ * Required for !SMP because for_each_cpu() reports
+ * unconditionally CPU0 as set on UP kernels.
+ */
+ if (!IS_ENABLED(CONFIG_SMP) &&
+ cpumask_empty(tick_broadcast_oneshot_mask))
+ break;
+
td = &per_cpu(tick_cpu_device, cpu);
if (td->evtdev->next_event.tv64 <= now.tv64) {
cpumask_set_cpu(cpu, tmpmask);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 6cfa20304ecd..1f523c81081b 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2724,13 +2724,14 @@ static void test_cpu_buff_start(struct trace_iterator *iter)
if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
return;
- if (iter->started && cpumask_test_cpu(iter->cpu, iter->started))
+ if (cpumask_available(iter->started) &&
+ cpumask_test_cpu(iter->cpu, iter->started))
return;
if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
return;
- if (iter->started)
+ if (cpumask_available(iter->started))
cpumask_set_cpu(iter->cpu, iter->started);
/* Don't print started cpu buffer for the first entry of the trace */
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index f0e5408499b6..1ab2db6c127b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -322,6 +322,9 @@ static int regex_match_full(char *str, struct regex *r, int len)
static int regex_match_front(char *str, struct regex *r, int len)
{
+ if (len < r->len)
+ return 0;
+
if (strncmp(str, r->pattern, r->len) == 0)
return 1;
return 0;
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 23515a716748..64304388993e 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -149,6 +149,8 @@ static void FETCH_FUNC_NAME(memory, string)(struct pt_regs *regs,
return;
ret = strncpy_from_user(dst, src, maxlen);
+ if (ret == maxlen)
+ dst[--ret] = '\0';
if (ret < 0) { /* Failed to fetch string */
((u8 *)get_rloc_data(dest))[0] = '\0';
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index ecd536de603a..eda85bbf1c2e 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -202,7 +202,7 @@ static int tracepoint_add_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_add(&tp_funcs, func, prio);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
@@ -235,7 +235,7 @@ static int tracepoint_remove_func(struct tracepoint *tp,
lockdep_is_held(&tracepoints_mutex));
old = func_remove(&tp_funcs, func);
if (IS_ERR(old)) {
- WARN_ON_ONCE(1);
+ WARN_ON_ONCE(PTR_ERR(old) != -ENOMEM);
return PTR_ERR(old);
}
diff --git a/lib/kobject.c b/lib/kobject.c
index 7cbccd2b4c72..895edb63fba4 100644
--- a/lib/kobject.c
+++ b/lib/kobject.c
@@ -234,14 +234,12 @@ static int kobject_add_internal(struct kobject *kobj)
/* be noisy on error issues */
if (error == -EEXIST)
- WARN(1, "%s failed for %s with "
- "-EEXIST, don't try to register things with "
- "the same name in the same directory.\n",
- __func__, kobject_name(kobj));
+ pr_err("%s failed for %s with -EEXIST, don't try to register things with the same name in the same directory.\n",
+ __func__, kobject_name(kobj));
else
- WARN(1, "%s failed for %s (error: %d parent: %s)\n",
- __func__, kobject_name(kobj), error,
- parent ? kobject_name(parent) : "'none'");
+ pr_err("%s failed for %s (error: %d parent: %s)\n",
+ __func__, kobject_name(kobj), error,
+ parent ? kobject_name(parent) : "'none'");
} else
kobj->state_in_sysfs = 1;
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 82169fbf2453..a65fa08f3c3c 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -12,11 +12,19 @@
#define pr_fmt(fmt) "kasan test: %s " fmt, __func__
#include <linux/kernel.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
#include <linux/printk.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/uaccess.h>
#include <linux/module.h>
+/*
+ * Note: test functions are marked noinline so that their names appear in
+ * reports.
+ */
+
static noinline void __init kmalloc_oob_right(void)
{
char *ptr;
@@ -344,6 +352,113 @@ static noinline void __init kasan_stack_oob(void)
*(volatile char *)p;
}
+static noinline void __init ksize_unpoisons_memory(void)
+{
+ char *ptr;
+ size_t size = 123, real_size = size;
+
+ pr_info("ksize() unpoisons the whole allocated chunk\n");
+ ptr = kmalloc(size, GFP_KERNEL);
+ if (!ptr) {
+ pr_err("Allocation failed\n");
+ return;
+ }
+ real_size = ksize(ptr);
+ /* This access doesn't trigger an error. */
+ ptr[size] = 'x';
+ /* This one does. */
+ ptr[real_size] = 'y';
+ kfree(ptr);
+}
+
+static noinline void __init copy_user_test(void)
+{
+ char *kmem;
+ char __user *usermem;
+ size_t size = 10;
+ int unused;
+
+ kmem = kmalloc(size, GFP_KERNEL);
+ if (!kmem)
+ return;
+
+ usermem = (char __user *)vm_mmap(NULL, 0, PAGE_SIZE,
+ PROT_READ | PROT_WRITE | PROT_EXEC,
+ MAP_ANONYMOUS | MAP_PRIVATE, 0);
+ if (IS_ERR(usermem)) {
+ pr_err("Failed to allocate user memory\n");
+ kfree(kmem);
+ return;
+ }
+
+ pr_info("out-of-bounds in copy_from_user()\n");
+ unused = copy_from_user(kmem, usermem, size + 1);
+
+ pr_info("out-of-bounds in copy_to_user()\n");
+ unused = copy_to_user(usermem, kmem, size + 1);
+
+ pr_info("out-of-bounds in __copy_from_user()\n");
+ unused = __copy_from_user(kmem, usermem, size + 1);
+
+ pr_info("out-of-bounds in __copy_to_user()\n");
+ unused = __copy_to_user(usermem, kmem, size + 1);
+
+ pr_info("out-of-bounds in __copy_from_user_inatomic()\n");
+ unused = __copy_from_user_inatomic(kmem, usermem, size + 1);
+
+ pr_info("out-of-bounds in __copy_to_user_inatomic()\n");
+ unused = __copy_to_user_inatomic(usermem, kmem, size + 1);
+
+ pr_info("out-of-bounds in strncpy_from_user()\n");
+ unused = strncpy_from_user(kmem, usermem, size + 1);
+
+ vm_munmap((unsigned long)usermem, PAGE_SIZE);
+ kfree(kmem);
+}
+
+static noinline void __init use_after_scope_test(void)
+{
+ volatile char *volatile p;
+
+ pr_info("use-after-scope on int\n");
+ {
+ int local = 0;
+
+ p = (char *)&local;
+ }
+ p[0] = 1;
+ p[3] = 1;
+
+ pr_info("use-after-scope on array\n");
+ {
+ char local[1024] = {0};
+
+ p = local;
+ }
+ p[0] = 1;
+ p[1023] = 1;
+}
+
+static noinline void __init kasan_alloca_oob_left(void)
+{
+ volatile int i = 10;
+ char alloca_array[i];
+ char *p = alloca_array - 1;
+
+ pr_info("out-of-bounds to left on alloca\n");
+ *(volatile char *)p;
+}
+
+static noinline void __init kasan_alloca_oob_right(void)
+{
+ volatile int i = 10;
+ char alloca_array[i];
+ char *p = alloca_array + i;
+
+ pr_info("out-of-bounds to right on alloca\n");
+ *(volatile char *)p;
+}
+
static int __init kmalloc_tests_init(void)
{
kmalloc_oob_right();
@@ -367,6 +482,11 @@ static int __init kmalloc_tests_init(void)
kmem_cache_oob();
kasan_stack_oob();
kasan_global_oob();
+ kasan_alloca_oob_left();
+ kasan_alloca_oob_right();
+ ksize_unpoisons_memory();
+ copy_user_test();
+ use_after_scope_test();
return -EAGAIN;
}
diff --git a/mm/Kconfig b/mm/Kconfig
index 86ee39e8b470..a604b10c6bbc 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -667,6 +667,7 @@ config DEFERRED_STRUCT_PAGE_INIT
default n
depends on ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT
depends on MEMORY_HOTPLUG
+ depends on !NEED_PER_CPU_KM
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
@@ -733,3 +734,11 @@ config PROCESS_RECLAIM
(addr, addr + size-bytes) of the process.
Any other vaule is ignored.
+
+config VM_MAX_READAHEAD
+ int "default max readahead window size"
+ default 128
+ help
+ This sets the VM_MAX_READAHEAD value to allow the readahead window
+ to grow to a maximum size of configured. This will benefit sequential
+ read throughput and thus early boot performance.
diff --git a/mm/filemap.c b/mm/filemap.c
index 750af2219081..f3d6d89cfd61 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1583,6 +1583,15 @@ find_page:
index, last_index - index);
}
if (!PageUptodate(page)) {
+ /*
+ * See comment in do_read_cache_page on why
+ * wait_on_page_locked is used to avoid unnecessarily
+ * serialisations and why it's safe.
+ */
+ wait_on_page_locked_killable(page);
+ if (PageUptodate(page))
+ goto page_ok;
+
if (inode->i_blkbits == PAGE_CACHE_SHIFT ||
!mapping->a_ops->is_partially_uptodate)
goto page_not_up_to_date;
@@ -2217,7 +2226,7 @@ static struct page *wait_on_page_read(struct page *page)
return page;
}
-static struct page *__read_cache_page(struct address_space *mapping,
+static struct page *do_read_cache_page(struct address_space *mapping,
pgoff_t index,
int (*filler)(void *, struct page *),
void *data,
@@ -2239,53 +2248,74 @@ repeat:
/* Presumably ENOMEM for radix tree node */
return ERR_PTR(err);
}
+
+filler:
err = filler(data, page);
if (err < 0) {
page_cache_release(page);
- page = ERR_PTR(err);
- } else {
- page = wait_on_page_read(page);
+ return ERR_PTR(err);
}
- }
- return page;
-}
-static struct page *do_read_cache_page(struct address_space *mapping,
- pgoff_t index,
- int (*filler)(void *, struct page *),
- void *data,
- gfp_t gfp)
-
-{
- struct page *page;
- int err;
+ page = wait_on_page_read(page);
+ if (IS_ERR(page))
+ return page;
+ goto out;
+ }
+ if (PageUptodate(page))
+ goto out;
-retry:
- page = __read_cache_page(mapping, index, filler, data, gfp);
- if (IS_ERR(page))
- return page;
+ /*
+ * Page is not up to date and may be locked due one of the following
+ * case a: Page is being filled and the page lock is held
+ * case b: Read/write error clearing the page uptodate status
+ * case c: Truncation in progress (page locked)
+ * case d: Reclaim in progress
+ *
+ * Case a, the page will be up to date when the page is unlocked.
+ * There is no need to serialise on the page lock here as the page
+ * is pinned so the lock gives no additional protection. Even if the
+ * the page is truncated, the data is still valid if PageUptodate as
+ * it's a race vs truncate race.
+ * Case b, the page will not be up to date
+ * Case c, the page may be truncated but in itself, the data may still
+ * be valid after IO completes as it's a read vs truncate race. The
+ * operation must restart if the page is not uptodate on unlock but
+ * otherwise serialising on page lock to stabilise the mapping gives
+ * no additional guarantees to the caller as the page lock is
+ * released before return.
+ * Case d, similar to truncation. If reclaim holds the page lock, it
+ * will be a race with remove_mapping that determines if the mapping
+ * is valid on unlock but otherwise the data is valid and there is
+ * no need to serialise with page lock.
+ *
+ * As the page lock gives no additional guarantee, we optimistically
+ * wait on the page to be unlocked and check if it's up to date and
+ * use the page if it is. Otherwise, the page lock is required to
+ * distinguish between the different cases. The motivation is that we
+ * avoid spurious serialisations and wakeups when multiple processes
+ * wait on the same page for IO to complete.
+ */
+ wait_on_page_locked(page);
if (PageUptodate(page))
goto out;
+ /* Distinguish between all the cases under the safety of the lock */
lock_page(page);
+
+ /* Case c or d, restart the operation */
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
- goto retry;
+ goto repeat;
}
+
+ /* Someone else locked and filled the page in a very small window */
if (PageUptodate(page)) {
unlock_page(page);
goto out;
}
- err = filler(data, page);
- if (err < 0) {
- page_cache_release(page);
- return ERR_PTR(err);
- } else {
- page = wait_on_page_read(page);
- if (IS_ERR(page))
- return page;
- }
+ goto filler;
+
out:
mark_page_accessed(page);
return page;
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 88af13c00d3c..fb99d481ac78 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -748,6 +748,74 @@ EXPORT_SYMBOL(__asan_storeN_noabort);
void __asan_handle_no_return(void) {}
EXPORT_SYMBOL(__asan_handle_no_return);
+/* Emitted by compiler to poison large objects when they go out of scope. */
+void __asan_poison_stack_memory(const void *addr, size_t size)
+{
+ /*
+ * Addr is KASAN_SHADOW_SCALE_SIZE-aligned and the object is surrounded
+ * by redzones, so we simply round up size to simplify logic.
+ */
+ kasan_poison_shadow(addr, round_up(size, KASAN_SHADOW_SCALE_SIZE),
+ KASAN_USE_AFTER_SCOPE);
+}
+EXPORT_SYMBOL(__asan_poison_stack_memory);
+
+/* Emitted by compiler to unpoison large objects when they go into scope. */
+void __asan_unpoison_stack_memory(const void *addr, size_t size)
+{
+ kasan_unpoison_shadow(addr, size);
+}
+EXPORT_SYMBOL(__asan_unpoison_stack_memory);
+
+/* Emitted by compiler to poison alloca()ed objects. */
+void __asan_alloca_poison(unsigned long addr, size_t size)
+{
+ size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
+ rounded_up_size;
+ size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+
+ const void *left_redzone = (const void *)(addr -
+ KASAN_ALLOCA_REDZONE_SIZE);
+ const void *right_redzone = (const void *)(addr + rounded_up_size);
+
+ WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
+
+ kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
+ size - rounded_down_size);
+ kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_LEFT);
+ kasan_poison_shadow(right_redzone,
+ padding_size + KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_RIGHT);
+}
+EXPORT_SYMBOL(__asan_alloca_poison);
+
+/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
+{
+ if (unlikely(!stack_top || stack_top > stack_bottom))
+ return;
+
+ kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+}
+EXPORT_SYMBOL(__asan_allocas_unpoison);
+
+/* Emitted by the compiler to [un]poison local variables. */
+#define DEFINE_ASAN_SET_SHADOW(byte) \
+ void __asan_set_shadow_##byte(const void *addr, size_t size) \
+ { \
+ __memset((void *)addr, 0x##byte, size); \
+ } \
+ EXPORT_SYMBOL(__asan_set_shadow_##byte)
+
+DEFINE_ASAN_SET_SHADOW(00);
+DEFINE_ASAN_SET_SHADOW(f1);
+DEFINE_ASAN_SET_SHADOW(f2);
+DEFINE_ASAN_SET_SHADOW(f3);
+DEFINE_ASAN_SET_SHADOW(f5);
+DEFINE_ASAN_SET_SHADOW(f8);
+
#ifdef CONFIG_MEMORY_HOTPLUG
static int kasan_mem_notifier(struct notifier_block *nb,
unsigned long action, void *data)
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 03f4545b103d..b13de7622ee6 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -21,6 +21,15 @@
#define KASAN_STACK_MID 0xF2
#define KASAN_STACK_RIGHT 0xF3
#define KASAN_STACK_PARTIAL 0xF4
+#define KASAN_USE_AFTER_SCOPE 0xF8
+
+/*
+ * alloca redzone shadow values
+ */
+#define KASAN_ALLOCA_LEFT 0xCA
+#define KASAN_ALLOCA_RIGHT 0xCB
+
+#define KASAN_ALLOCA_REDZONE_SIZE 32
/* Don't break randconfig/all*config builds */
#ifndef KASAN_ABI_VERSION
@@ -116,4 +125,48 @@ static inline void quarantine_reduce(void) { }
static inline void quarantine_remove_cache(struct kmem_cache *cache) { }
#endif
+/*
+ * Exported functions for interfaces called from assembly or from generated
+ * code. Declarations here to avoid warning about missing declarations.
+ */
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
+void __asan_register_globals(struct kasan_global *globals, size_t size);
+void __asan_unregister_globals(struct kasan_global *globals, size_t size);
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+void __asan_handle_no_return(void);
+void __asan_poison_stack_memory(const void *addr, size_t size);
+void __asan_unpoison_stack_memory(const void *addr, size_t size);
+void __asan_alloca_poison(unsigned long addr, size_t size);
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
+
+void __asan_load1(unsigned long addr);
+void __asan_store1(unsigned long addr);
+void __asan_load2(unsigned long addr);
+void __asan_store2(unsigned long addr);
+void __asan_load4(unsigned long addr);
+void __asan_store4(unsigned long addr);
+void __asan_load8(unsigned long addr);
+void __asan_store8(unsigned long addr);
+void __asan_load16(unsigned long addr);
+void __asan_store16(unsigned long addr);
+
+void __asan_load1_noabort(unsigned long addr);
+void __asan_store1_noabort(unsigned long addr);
+void __asan_load2_noabort(unsigned long addr);
+void __asan_store2_noabort(unsigned long addr);
+void __asan_load4_noabort(unsigned long addr);
+void __asan_store4_noabort(unsigned long addr);
+void __asan_load8_noabort(unsigned long addr);
+void __asan_store8_noabort(unsigned long addr);
+void __asan_load16_noabort(unsigned long addr);
+void __asan_store16_noabort(unsigned long addr);
+
+void __asan_set_shadow_00(const void *addr, size_t size);
+void __asan_set_shadow_f1(const void *addr, size_t size);
+void __asan_set_shadow_f2(const void *addr, size_t size);
+void __asan_set_shadow_f3(const void *addr, size_t size);
+void __asan_set_shadow_f5(const void *addr, size_t size);
+void __asan_set_shadow_f8(const void *addr, size_t size);
+
#endif
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 2270122d2a64..a4f4802376e0 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -91,6 +91,13 @@ static void print_error_description(struct kasan_access_info *info)
case KASAN_KMALLOC_FREE:
bug_type = "use-after-free";
break;
+ case KASAN_USE_AFTER_SCOPE:
+ bug_type = "use-after-scope";
+ break;
+ case KASAN_ALLOCA_LEFT:
+ case KASAN_ALLOCA_RIGHT:
+ bug_type = "alloca-out-of-bounds";
+ break;
}
pr_err("BUG: KASAN: %s in %pS at addr %p\n",
diff --git a/mm/memory.c b/mm/memory.c
index 78ab57141731..76697e624fc8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -872,10 +872,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
} else if (is_migration_entry(entry)) {
page = migration_entry_to_page(entry);
- if (PageAnon(page))
- rss[MM_ANONPAGES]++;
- else
- rss[MM_FILEPAGES]++;
+ rss[mm_counter(page)]++;
if (is_write_migration_entry(entry) &&
is_cow_mapping(vm_flags)) {
@@ -914,10 +911,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (page) {
get_page(page);
page_dup_rmap(page);
- if (PageAnon(page))
- rss[MM_ANONPAGES]++;
- else
- rss[MM_FILEPAGES]++;
+ rss[mm_counter(page)]++;
}
out_set_pte:
@@ -1154,9 +1148,8 @@ again:
tlb_remove_tlb_entry(tlb, pte, addr);
if (unlikely(!page))
continue;
- if (PageAnon(page))
- rss[MM_ANONPAGES]--;
- else {
+
+ if (!PageAnon(page)) {
if (pte_dirty(ptent)) {
force_flush = 1;
set_page_dirty(page);
@@ -1164,8 +1157,8 @@ again:
if (pte_young(ptent) &&
likely(!(vma->vm_flags & VM_SEQ_READ)))
mark_page_accessed(page);
- rss[MM_FILEPAGES]--;
}
+ rss[mm_counter(page)]--;
page_remove_rmap(page);
if (unlikely(page_mapcount(page) < 0))
print_bad_pte(vma, addr, ptent, page);
@@ -1187,11 +1180,7 @@ again:
struct page *page;
page = migration_entry_to_page(entry);
-
- if (PageAnon(page))
- rss[MM_ANONPAGES]--;
- else
- rss[MM_FILEPAGES]--;
+ rss[mm_counter(page)]--;
}
if (unlikely(!free_swap_and_cache(entry)))
print_bad_pte(vma, addr, ptent, NULL);
@@ -1501,7 +1490,7 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
/* Ok, finally just insert the thing.. */
get_page(page);
- inc_mm_counter_fast(mm, MM_FILEPAGES);
+ inc_mm_counter_fast(mm, mm_counter_file(page));
page_add_file_rmap(page);
set_pte_at(mm, addr, pte, mk_pte(page, prot));
@@ -2153,7 +2142,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
if (likely(pte_same(*page_table, orig_pte))) {
if (old_page) {
if (!PageAnon(old_page)) {
- dec_mm_counter_fast(mm, MM_FILEPAGES);
+ dec_mm_counter_fast(mm,
+ mm_counter_file(old_page));
inc_mm_counter_fast(mm, MM_ANONPAGES);
}
} else {
@@ -2840,7 +2830,7 @@ void do_set_pte(struct vm_area_struct *vma, unsigned long address,
inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
} else {
- inc_mm_counter_fast(vma->vm_mm, MM_FILEPAGES);
+ inc_mm_counter_fast(vma->vm_mm, mm_counter_file(page));
page_add_file_rmap(page);
}
set_pte_at(vma->vm_mm, address, pte, entry);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 90515f4d9786..997e28eb505c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -585,10 +585,11 @@ void oom_kill_process(struct oom_control *oc, struct task_struct *p,
*/
do_send_sig_info(SIGKILL, SEND_SIG_FORCED, victim, true);
mark_oom_victim(victim);
- pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
+ pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
K(get_mm_counter(victim->mm, MM_ANONPAGES)),
- K(get_mm_counter(victim->mm, MM_FILEPAGES)));
+ K(get_mm_counter(victim->mm, MM_FILEPAGES)),
+ K(get_mm_counter(victim->mm, MM_SHMEMPAGES)));
task_unlock(victim);
/*
diff --git a/mm/percpu.c b/mm/percpu.c
index 3b8ac4d61915..1d0ff6a10142 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -68,6 +68,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/kmemleak.h>
+#include <linux/sched.h>
#include <asm/cacheflush.h>
#include <asm/sections.h>
diff --git a/mm/rmap.c b/mm/rmap.c
index effcea83ac4e..6b424cc6bd6f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1384,10 +1384,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
if (PageHuge(page)) {
hugetlb_count_sub(1 << compound_order(page), mm);
} else {
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
+ dec_mm_counter(mm, mm_counter(page));
}
set_pte_at(mm, address, pte,
swp_entry_to_pte(make_hwpoison_entry(page)));
@@ -1397,10 +1394,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
*/
- if (PageAnon(page))
- dec_mm_counter(mm, MM_ANONPAGES);
- else
- dec_mm_counter(mm, MM_FILEPAGES);
+ dec_mm_counter(mm, mm_counter(page));
} else if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION)) {
swp_entry_t entry;
pte_t swp_pte;
@@ -1440,7 +1434,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, address, pte, swp_pte);
} else
- dec_mm_counter(mm, MM_FILEPAGES);
+ dec_mm_counter(mm, mm_counter_file(page));
page_remove_rmap(page);
page_cache_release(page);
diff --git a/mm/util.c b/mm/util.c
index 4760c0c9d1bd..dcbc8e0afea8 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -476,17 +476,25 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
int res = 0;
unsigned int len;
struct mm_struct *mm = get_task_mm(task);
+ unsigned long arg_start, arg_end, env_start, env_end;
if (!mm)
goto out;
if (!mm->arg_end)
goto out_mm; /* Shh! No looking before we're done */
- len = mm->arg_end - mm->arg_start;
+ down_read(&mm->mmap_sem);
+ arg_start = mm->arg_start;
+ arg_end = mm->arg_end;
+ env_start = mm->env_start;
+ env_end = mm->env_end;
+ up_read(&mm->mmap_sem);
+
+ len = arg_end - arg_start;
if (len > buflen)
len = buflen;
- res = access_process_vm(task, mm->arg_start, buffer, len, 0);
+ res = access_process_vm(task, arg_start, buffer, len, 0);
/*
* If the nul at the end of args has been overwritten, then
@@ -497,10 +505,10 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen)
if (len < res) {
res = len;
} else {
- len = mm->env_end - mm->env_start;
+ len = env_end - env_start;
if (len > buflen - res)
len = buflen - res;
- res += access_process_vm(task, mm->env_start,
+ res += access_process_vm(task, env_start,
buffer+res, len, 0);
res = strnlen(buffer, res);
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 67da9446135d..ef5f8aa3b7e3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2194,11 +2194,17 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
}
/*
- * There is enough inactive page cache, do not reclaim
- * anything from the anonymous working set right now.
+ * If there is enough inactive page cache, i.e. if the size of the
+ * inactive list is greater than that of the active list *and* the
+ * inactive list actually has some pages to scan on this priority, we
+ * do not reclaim anything from the anonymous working set right now.
+ * Without the second condition we could end up never scanning an
+ * lruvec even if it has plenty of old anonymous pages unless the
+ * system is under heavy pressure.
*/
if (!IS_ENABLED(CONFIG_BALANCE_ANON_FILE_RECLAIM) &&
- !inactive_file_is_low(lruvec)) {
+ !inactive_file_is_low(lruvec) &&
+ get_lru_size(lruvec, LRU_INACTIVE_FILE) >> sc->priority) {
scan_balance = SCAN_FILE;
goto out;
}
diff --git a/net/Kconfig b/net/Kconfig
index 5caebee9f020..b01f9fcf2d02 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -410,12 +410,12 @@ config LWTUNNEL
weight tunnel endpoint. Tunnel encapsulation parameters are stored
with light weight tunnel state associated with fib routes.
+source "net/ipc_router/Kconfig"
+
config DST_CACHE
bool
default n
-source "net/ipc_router/Kconfig"
-
endif # if NET
# Used by archs to tell that they support BPF_JIT
diff --git a/net/atm/lec.c b/net/atm/lec.c
index cd3b37989057..10e4066991b8 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -41,6 +41,9 @@ static unsigned char bridge_ula_lec[] = { 0x01, 0x80, 0xc2, 0x00, 0x00 };
#include <linux/module.h>
#include <linux/init.h>
+/* Hardening for Spectre-v1 */
+#include <linux/nospec.h>
+
#include "lec.h"
#include "lec_arpc.h"
#include "resources.h"
@@ -697,8 +700,10 @@ static int lec_vcc_attach(struct atm_vcc *vcc, void __user *arg)
bytes_left = copy_from_user(&ioc_data, arg, sizeof(struct atmlec_ioc));
if (bytes_left != 0)
pr_info("copy from user failed for %d bytes\n", bytes_left);
- if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF ||
- !dev_lec[ioc_data.dev_num])
+ if (ioc_data.dev_num < 0 || ioc_data.dev_num >= MAX_LEC_ITF)
+ return -EINVAL;
+ ioc_data.dev_num = array_index_nospec(ioc_data.dev_num, MAX_LEC_ITF);
+ if (!dev_lec[ioc_data.dev_num])
return -EINVAL;
vpriv = kmalloc(sizeof(struct lec_vcc_priv), GFP_KERNEL);
if (!vpriv)
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index ec02f5869a78..3400b1e47668 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -456,8 +456,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
return -ELOOP;
- /* Device is already being bridged */
- if (br_port_exists(dev))
+ /* Device has master upper dev */
+ if (netdev_master_upper_dev_get(dev))
return -EBUSY;
/* No bridging devices that dislike that (e.g. wireless) */
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index a6b2f2138c9d..ad3c9e96a275 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -2531,6 +2531,11 @@ static int try_write(struct ceph_connection *con)
int ret = 1;
dout("try_write start %p state %lu\n", con, con->state);
+ if (con->state != CON_STATE_PREOPEN &&
+ con->state != CON_STATE_CONNECTING &&
+ con->state != CON_STATE_NEGOTIATING &&
+ con->state != CON_STATE_OPEN)
+ return 0;
more:
dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
@@ -2556,6 +2561,8 @@ more:
}
more_kvec:
+ BUG_ON(!con->sock);
+
/* kvec data queued? */
if (con->out_kvec_left) {
ret = write_partial_kvec(con);
diff --git a/net/compat.c b/net/compat.c
index 0ccf3ecf6bbb..17e97b106458 100644
--- a/net/compat.c
+++ b/net/compat.c
@@ -358,7 +358,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
if (optname == SO_ATTACH_FILTER)
return do_set_attach_filter(sock, level, optname,
optval, optlen);
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_set_sock_timeout(sock, level, optname, optval, optlen);
return sock_setsockopt(sock, level, optname, optval, optlen);
@@ -423,7 +424,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
char __user *optval, int __user *optlen)
{
- if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
+ if (!COMPAT_USE_64BIT_TIME &&
+ (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
return do_get_sock_timeout(sock, level, optname, optval, optlen);
return sock_getsockopt(sock, level, optname, optval, optlen);
}
diff --git a/net/core/Makefile b/net/core/Makefile
index 340c45af4472..0d35bba614a9 100644
--- a/net/core/Makefile
+++ b/net/core/Makefile
@@ -24,5 +24,5 @@ obj-$(CONFIG_NET_PTP_CLASSIFY) += ptp_classifier.o
obj-$(CONFIG_CGROUP_NET_PRIO) += netprio_cgroup.o
obj-$(CONFIG_CGROUP_NET_CLASSID) += netclassid_cgroup.o
obj-$(CONFIG_LWTUNNEL) += lwtunnel.o
-obj-$(CONFIG_DST_CACHE) += dst_cache.o
obj-$(CONFIG_SOCKEV_NLMCAST) += sockev_nlmcast.o
+obj-$(CONFIG_DST_CACHE) += dst_cache.o
diff --git a/net/core/dev.c b/net/core/dev.c
index 27e73f8ce4cf..7c61d6ee4120 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2708,7 +2708,7 @@ netdev_features_t passthru_features_check(struct sk_buff *skb,
}
EXPORT_SYMBOL(passthru_features_check);
-static netdev_features_t dflt_features_check(const struct sk_buff *skb,
+static netdev_features_t dflt_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features)
{
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c
index c0548d268e1a..e3e6a3e2ca22 100644
--- a/net/core/dev_addr_lists.c
+++ b/net/core/dev_addr_lists.c
@@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
return -EINVAL;
list_for_each_entry(ha, &list->list, list) {
- if (!memcmp(ha->addr, addr, addr_len) &&
- ha->type == addr_type) {
+ if (ha->type == addr_type &&
+ !memcmp(ha->addr, addr, addr_len)) {
if (global) {
/* check if addr is already used as global */
if (ha->global_use)
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index fa3d73c1fe94..982abf17ea82 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -54,7 +54,8 @@ do { \
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev);
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
@@ -254,8 +255,7 @@ int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
- pneigh_ifdown(tbl, dev);
- write_unlock_bh(&tbl->lock);
+ pneigh_ifdown_and_unlock(tbl, dev);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
@@ -645,9 +645,10 @@ int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
return -ENOENT;
}
-static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
+ struct net_device *dev)
{
- struct pneigh_entry *n, **np;
+ struct pneigh_entry *n, **np, *freelist = NULL;
u32 h;
for (h = 0; h <= PNEIGH_HASHMASK; h++) {
@@ -655,16 +656,23 @@ static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
while ((n = *np) != NULL) {
if (!dev || n->dev == dev) {
*np = n->next;
- if (tbl->pdestructor)
- tbl->pdestructor(n);
- if (n->dev)
- dev_put(n->dev);
- kfree(n);
+ n->next = freelist;
+ freelist = n;
continue;
}
np = &n->next;
}
}
+ write_unlock_bh(&tbl->lock);
+ while ((n = freelist)) {
+ freelist = n->next;
+ n->next = NULL;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ kfree(n);
+ }
return -ENOENT;
}
@@ -2296,12 +2304,16 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
err = nlmsg_parse(nlh, sizeof(struct ndmsg), tb, NDA_MAX, NULL);
if (!err) {
- if (tb[NDA_IFINDEX])
+ if (tb[NDA_IFINDEX]) {
+ if (nla_len(tb[NDA_IFINDEX]) != sizeof(u32))
+ return -EINVAL;
filter_idx = nla_get_u32(tb[NDA_IFINDEX]);
-
- if (tb[NDA_MASTER])
+ }
+ if (tb[NDA_MASTER]) {
+ if (nla_len(tb[NDA_MASTER]) != sizeof(u32))
+ return -EINVAL;
filter_master_idx = nla_get_u32(tb[NDA_MASTER]);
-
+ }
if (filter_idx || filter_master_idx)
flags |= NLM_F_DUMP_FILTERED;
}
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 440aa9f6e0a8..b55f340f5f71 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -666,7 +666,7 @@ int netpoll_setup(struct netpoll *np)
int err;
rtnl_lock();
- if (np->dev_name) {
+ if (np->dev_name[0]) {
struct net *net = current->nsproxy->net_ns;
ndev = __dev_get_by_name(net, np->dev_name);
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3c5e3c022232..0d8383c8a117 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -853,6 +853,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
n->cloned = 1;
n->nohdr = 0;
+ n->peeked = 0;
n->destructor = NULL;
C(tail);
C(end);
diff --git a/net/core/sock.c b/net/core/sock.c
index acc60ec11630..31cdfe36db91 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1486,7 +1486,7 @@ void sk_destruct(struct sock *sk)
static void __sk_free(struct sock *sk)
{
- if (unlikely(sock_diag_has_destroy_listeners(sk) && sk->sk_net_refcnt))
+ if (unlikely(sk->sk_net_refcnt && sock_diag_has_destroy_listeners(sk)))
sock_diag_broadcast_destroy(sk);
else
sk_destruct(sk);
@@ -2120,16 +2120,18 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
/* guarantee minimum buffer size under pressure */
if (kind == SK_MEM_RECV) {
- if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
+ if (atomic_read(&sk->sk_rmem_alloc) < sk_get_rmem0(sk, prot))
return 1;
} else { /* SK_MEM_SEND */
+ int wmem0 = sk_get_wmem0(sk, prot);
+
if (sk->sk_type == SOCK_STREAM) {
- if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
+ if (sk->sk_wmem_queued < wmem0)
return 1;
- } else if (atomic_read(&sk->sk_wmem_alloc) <
- prot->sysctl_wmem[0])
+ } else if (atomic_read(&sk->sk_wmem_alloc) < wmem0) {
return 1;
+ }
}
if (sk_has_memory_pressure(sk)) {
@@ -2446,6 +2448,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
sk->sk_max_pacing_rate = ~0U;
sk->sk_pacing_rate = ~0U;
+ sk->sk_pacing_shift = 10;
sk->sk_incoming_cpu = -1;
/*
* Before updating sk_refcnt, we must commit prior changes to memory
diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
index 7753681195c1..86a2ed0fb219 100644
--- a/net/dccp/ccids/ccid2.c
+++ b/net/dccp/ccids/ccid2.c
@@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
DCCPF_SEQ_WMAX));
}
+static void dccp_tasklet_schedule(struct sock *sk)
+{
+ struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
+
+ if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+ sock_hold(sk);
+ __tasklet_schedule(t);
+ }
+}
+
static void ccid2_hc_tx_rto_expire(unsigned long data)
{
struct sock *sk = (struct sock *)data;
@@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
/* if we were blocked before, we may now send cwnd=1 packet */
if (sender_was_blocked)
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
/* restart backed-off timer */
sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
out:
@@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
done:
/* check if incoming Acks allow pending packets to be sent */
if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
- tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
+ dccp_tasklet_schedule(sk);
dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6eb2bbf9873b..45fd82e61e79 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -618,6 +618,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
+ ireq->ir_mark = inet_request_mark(sk, skb);
ireq->ireq_family = AF_INET;
ireq->ir_iif = sk->sk_bound_dev_if;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 09a9ab65f4e1..0bf41faeffc4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -345,6 +345,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
ireq->ireq_family = AF_INET6;
+ ireq->ir_mark = inet_request_mark(sk, skb);
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 3ef7acef3ce8..aa7c7dad7f96 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data)
else
dccp_write_xmit(sk);
bh_unlock_sock(sk);
+ sock_put(sk);
}
static void dccp_write_xmit_timer(unsigned long data)
{
dccp_write_xmitlet(data);
- sock_put((struct sock *)data);
}
void dccp_init_xmit_timers(struct sock *sk)
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 6abc5012200b..e26df2764e83 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -25,6 +25,7 @@
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include <linux/string.h>
+#include <linux/ratelimit.h>
#include <linux/kernel.h>
#include <linux/keyctl.h>
#include <linux/err.h>
@@ -91,9 +92,9 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (!opt_len) {
- printk(KERN_WARNING
- "Empty option to dns_resolver key\n");
+ if (opt_len <= 0 || opt_len > 128) {
+ pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
+ opt_len);
return -EINVAL;
}
@@ -127,10 +128,8 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
}
bad_option_value:
- printk(KERN_WARNING
- "Option '%*.*s' to dns_resolver key:"
- " bad/missing value\n",
- opt_nlen, opt_nlen, opt);
+ pr_warn_ratelimited("Option '%*.*s' to dns_resolver key: bad/missing value\n",
+ opt_nlen, opt_nlen, opt);
return -EINVAL;
} while (opt = next_opt + 1, opt < end);
}
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index c67f9bd7699c..d8316869947a 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -182,6 +182,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
tw->tw_dport = inet->inet_dport;
tw->tw_family = sk->sk_family;
tw->tw_reuse = sk->sk_reuse;
+ tw->tw_reuseport = sk->sk_reuseport;
tw->tw_hash = sk->sk_hash;
tw->tw_ipv6only = 0;
tw->tw_transparent = inet->transparent;
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 62e41d38da78..c1d7dc433976 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1062,7 +1062,8 @@ alloc_new_skb:
if (copy > length)
copy = length;
- if (!(rt->dst.dev->features&NETIF_F_SG)) {
+ if (!(rt->dst.dev->features&NETIF_F_SG) &&
+ skb_tailroom(skb) >= copy) {
unsigned int off;
off = skb->len;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 72e1e831589a..c0b633ee6c1e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -777,8 +777,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
}
tos = get_rttos(&ipc, inet);
@@ -844,6 +846,7 @@ back_from_confirm:
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err) {
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 7541427537d0..a7880f660b7e 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -608,7 +608,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
RT_SCOPE_UNIVERSE,
hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
- (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
+ (hdrincl ? FLOWI_FLAG_KNOWN_NH : 0),
daddr, saddr, 0, 0, sk->sk_uid);
if (!saddr && ipc.oif) {
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1c04dad774a7..2f26b0d1f1d7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1114,7 +1114,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
flags = msg->msg_flags;
- if (flags & MSG_FASTOPEN) {
+ if ((flags & MSG_FASTOPEN) && !tp->repair) {
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
@@ -2459,7 +2459,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_REPAIR_QUEUE:
if (!tp->repair)
err = -EPERM;
- else if (val < TCP_QUEUES_NR)
+ else if ((unsigned int)val < TCP_QUEUES_NR)
tp->repair_queue = val;
else
err = -EINVAL;
@@ -2598,8 +2598,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
#ifdef CONFIG_TCP_MD5SIG
case TCP_MD5SIG:
- /* Read the IP->Key mappings from userspace */
- err = tp->af_specific->md5_parse(sk, optval, optlen);
+ if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ else
+ err = -EINVAL;
break;
#endif
case TCP_USER_TIMEOUT:
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7df435d7f8e7..ee5057f53ca3 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -3874,11 +3874,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)
int length = (th->doff << 2) - sizeof(*th);
const u8 *ptr = (const u8 *)(th + 1);
- /* If the TCP option is too short, we can short cut */
- if (length < TCPOLEN_MD5SIG)
- return NULL;
-
- while (length > 0) {
+ /* If not enough data remaining, we can short cut */
+ while (length >= TCPOLEN_MD5SIG) {
int opcode = *ptr++;
int opsize;
@@ -5515,7 +5512,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
if (data) { /* Retransmit unacked data in SYN */
tcp_for_write_queue_from(data, sk) {
if (data == tcp_send_head(sk) ||
- __tcp_retransmit_skb(sk, data))
+ __tcp_retransmit_skb(sk, data, 1))
break;
}
tcp_rearm_rto(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d82c172db78..740ac0367c13 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -737,9 +737,16 @@ static void tcp_tsq_handler(struct sock *sk)
{
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
- TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
- tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+ TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) {
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ if (tp->lost_out > tp->retrans_out &&
+ tp->snd_cwnd > tcp_packets_in_flight(tp))
+ tcp_xmit_retransmit_queue(sk);
+
+ tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
0, GFP_ATOMIC);
+ }
}
/*
* One tasklet per cpu tries to send more skbs.
@@ -1540,7 +1547,7 @@ static u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now)
{
u32 bytes, segs;
- bytes = min(sk->sk_pacing_rate >> 10,
+ bytes = min(sk->sk_pacing_rate >> sk->sk_pacing_shift,
sk->sk_gso_max_size - 1 - MAX_TCP_HEADER);
/* Goal is to send at least one packet per ms,
@@ -2012,6 +2019,39 @@ static int tcp_mtu_probe(struct sock *sk)
return -1;
}
+/* TCP Small Queues :
+ * Control number of packets in qdisc/devices to two packets / or ~1 ms.
+ * (These limits are doubled for retransmits)
+ * This allows for :
+ * - better RTT estimation and ACK scheduling
+ * - faster recovery
+ * - high rates
+ * Alas, some drivers / subsystems require a fair amount
+ * of queued bytes to ensure line rate.
+ * One example is wifi aggregation (802.11 AMPDU)
+ */
+static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
+ unsigned int factor)
+{
+ unsigned int limit;
+
+ limit = max(2 * skb->truesize, sk->sk_pacing_rate >> sk->sk_pacing_shift);
+ limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
+ limit <<= factor;
+
+ if (atomic_read(&sk->sk_wmem_alloc) > limit) {
+ set_bit(TSQ_THROTTLED, &tcp_sk(sk)->tsq_flags);
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED, so we must
+ * test again the condition.
+ */
+ smp_mb__after_atomic();
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
+ return true;
+ }
+ return false;
+}
+
/* This routine writes packets to the network. It advances the
* send_head. This happens as incoming acks open up the remote
* window for us.
@@ -2098,29 +2138,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
- /* TCP Small Queues :
- * Control number of packets in qdisc/devices to two packets / or ~1 ms.
- * This allows for :
- * - better RTT estimation and ACK scheduling
- * - faster recovery
- * - high rates
- * Alas, some drivers / subsystems require a fair amount
- * of queued bytes to ensure line rate.
- * One example is wifi aggregation (802.11 AMPDU)
- */
- limit = max(2 * skb->truesize, sk->sk_pacing_rate >> 10);
- limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
-
- if (atomic_read(&sk->sk_wmem_alloc) > limit) {
- set_bit(TSQ_THROTTLED, &tp->tsq_flags);
- /* It is possible TX completion already happened
- * before we set TSQ_THROTTLED, so we must
- * test again the condition.
- */
- smp_mb__after_atomic();
- if (atomic_read(&sk->sk_wmem_alloc) > limit)
- break;
- }
+ if (tcp_small_queue_check(sk, skb, 0))
+ break;
if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
break;
@@ -2276,7 +2295,7 @@ void tcp_send_loss_probe(struct sock *sk)
if (WARN_ON(!skb || !tcp_skb_pcount(skb)))
goto rearm_timer;
- if (__tcp_retransmit_skb(sk, skb))
+ if (__tcp_retransmit_skb(sk, skb, 1))
goto rearm_timer;
/* Record snd_nxt for loss detection. */
@@ -2563,17 +2582,17 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to,
* state updates are done by the caller. Returns non-zero if an
* error occurred which prevented the send.
*/
-int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
{
- struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
unsigned int cur_mss;
- int err;
+ int diff, len, err;
+
- /* Inconslusive MTU probe */
- if (icsk->icsk_mtup.probe_size) {
+ /* Inconclusive MTU probe */
+ if (icsk->icsk_mtup.probe_size)
icsk->icsk_mtup.probe_size = 0;
- }
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: fragmentation, tunneling, mangling etc.
@@ -2587,8 +2606,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
return -EBUSY;
if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) {
- if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
- BUG();
+ if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) {
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq))
return -ENOMEM;
}
@@ -2607,30 +2628,27 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
TCP_SKB_CB(skb)->seq != tp->snd_una)
return -EAGAIN;
- if (skb->len > cur_mss) {
- if (tcp_fragment(sk, skb, cur_mss, cur_mss, GFP_ATOMIC))
+ len = cur_mss * segs;
+ if (skb->len > len) {
+ if (tcp_fragment(sk, skb, len, cur_mss, GFP_ATOMIC))
return -ENOMEM; /* We'll try again later. */
} else {
- int oldpcount = tcp_skb_pcount(skb);
+ if (skb_unclone(skb, GFP_ATOMIC))
+ return -ENOMEM;
- if (unlikely(oldpcount > 1)) {
- if (skb_unclone(skb, GFP_ATOMIC))
- return -ENOMEM;
- tcp_init_tso_segs(skb, cur_mss);
- tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
- }
+ diff = tcp_skb_pcount(skb);
+ tcp_set_skb_tso_segs(skb, cur_mss);
+ diff -= tcp_skb_pcount(skb);
+ if (diff)
+ tcp_adjust_pcount(sk, skb, diff);
+ if (skb->len < cur_mss)
+ tcp_retrans_try_collapse(sk, skb, cur_mss);
}
/* RFC3168, section 6.1.1.1. ECN fallback */
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
tcp_ecn_clear_syn(sk, skb);
- tcp_retrans_try_collapse(sk, skb, cur_mss);
-
- /* Make a copy, if the first transmission SKB clone we made
- * is still in somebody's hands, else make a clone.
- */
-
/* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom
* beyond what csum_start can cover.
@@ -2648,20 +2666,22 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
}
if (likely(!err)) {
+ segs = tcp_skb_pcount(skb);
+
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
/* Update global TCP statistics. */
- TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
- tp->total_retrans++;
+ tp->total_retrans += segs;
}
return err;
}
-int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
{
struct tcp_sock *tp = tcp_sk(sk);
- int err = __tcp_retransmit_skb(sk, skb);
+ int err = __tcp_retransmit_skb(sk, skb, segs);
if (err == 0) {
#if FASTRETRANS_DEBUG > 0
@@ -2752,6 +2772,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
tcp_for_write_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
+ int segs;
if (skb == tcp_send_head(sk))
break;
@@ -2759,14 +2780,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (!hole)
tp->retransmit_skb_hint = skb;
- /* Assume this retransmit will generate
- * only one packet for congestion window
- * calculation purposes. This works because
- * tcp_retransmit_skb() will chop up the
- * packet to be MSS sized and all the
- * packet counting works out.
- */
- if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+ segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
+ if (segs <= 0)
return;
if (fwd_rexmitting) {
@@ -2803,7 +2818,10 @@ begin_fwd:
if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))
continue;
- if (tcp_retransmit_skb(sk, skb))
+ if (tcp_small_queue_check(sk, skb, 1))
+ return;
+
+ if (tcp_retransmit_skb(sk, skb, segs))
return;
NET_INC_STATS_BH(sock_net(sk), mib_idx);
@@ -3117,6 +3135,7 @@ static void tcp_connect_init(struct sock *sk)
sock_reset_flag(sk, SOCK_DONE);
tp->snd_wnd = 0;
tcp_init_wl(tp, 0);
+ tcp_write_queue_purge(sk);
tp->snd_una = tp->write_seq;
tp->snd_sml = tp->write_seq;
tp->snd_up = tp->write_seq;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 2221e3c36bd5..c6c677fba0a5 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -460,7 +460,7 @@ void tcp_retransmit_timer(struct sock *sk)
goto out;
}
tcp_enter_loss(sk);
- tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
+ tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1);
__sk_dst_reset(sk);
goto out_reset_timer;
}
@@ -492,7 +492,7 @@ void tcp_retransmit_timer(struct sock *sk)
tcp_enter_loss(sk);
- if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk)) > 0) {
+ if (tcp_retransmit_skb(sk, tcp_write_queue_head(sk), 1) > 0) {
/* Retransmission failed because of local congestion,
* do not backoff.
*/
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9828c4aa3739..7f699bcb2450 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -996,8 +996,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipc.addr = faddr = daddr;
if (ipc.opt && ipc.opt->opt.srr) {
- if (!daddr)
- return -EINVAL;
+ if (!daddr) {
+ err = -EINVAL;
+ goto out_free;
+ }
faddr = ipc.opt->opt.faddr;
connected = 0;
}
@@ -1111,6 +1113,7 @@ do_append_data:
out:
ip_rt_put(rt);
+out_free:
if (free)
kfree(ipc.opt);
if (!err)
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 42f2caf366a7..c019c7ccfe93 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1528,7 +1528,8 @@ alloc_new_skb:
if (copy > length)
copy = length;
- if (!(rt->dst.dev->features&NETIF_F_SG)) {
+ if (!(rt->dst.dev->features&NETIF_F_SG) &&
+ skb_tailroom(skb) >= copy) {
unsigned int off;
off = skb->len;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 854852138316..e2ca9fe2d32b 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2696,6 +2696,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
+ [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
[RTA_OIF] = { .type = NLA_U32 },
[RTA_IIF] = { .type = NLA_U32 },
[RTA_PRIORITY] = { .type = NLA_U32 },
@@ -2705,6 +2706,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
[RTA_ENCAP_TYPE] = { .type = NLA_U16 },
[RTA_ENCAP] = { .type = NLA_NESTED },
[RTA_UID] = { .type = NLA_U32 },
+ [RTA_TABLE] = { .type = NLA_U32 },
};
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index ae3438685caa..fb3248ff8b48 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -732,8 +732,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
if ((session->ifname[0] &&
nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
- (session->offset &&
- nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
(session->cookie_len &&
nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
&session->cookie[0])) ||
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 67f2e72723b2..2764c4bd072c 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -606,6 +606,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr,
lock_sock(sk);
error = -EINVAL;
+
+ if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) &&
+ sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6))
+ goto end;
+
if (sp->sa_protocol != PX_PROTO_OL2TP)
goto end;
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 1e698768aca8..83e8a295c806 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -197,9 +197,19 @@ static int llc_ui_release(struct socket *sock)
llc->laddr.lsap, llc->daddr.lsap);
if (!llc_send_disc(sk))
llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo);
- if (!sock_flag(sk, SOCK_ZAPPED))
+ if (!sock_flag(sk, SOCK_ZAPPED)) {
+ struct llc_sap *sap = llc->sap;
+
+ /* Hold this for release_sock(), so that llc_backlog_rcv()
+ * could still use it.
+ */
+ llc_sap_hold(sap);
llc_sap_remove_socket(llc->sap, sk);
- release_sock(sk);
+ release_sock(sk);
+ llc_sap_put(sap);
+ } else {
+ release_sock(sk);
+ }
if (llc->dev)
dev_put(llc->dev);
sock_put(sk);
@@ -916,6 +926,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (size > llc->dev->mtu)
size = llc->dev->mtu;
copied = size - hdrlen;
+ rc = -EINVAL;
+ if (copied < 0)
+ goto release;
release_sock(sk);
skb = sock_alloc_send_skb(sk, size, noblock, &rc);
lock_sock(sk);
diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c
index ea225bd2672c..f8d4ab8ca1a5 100644
--- a/net/llc/llc_c_ac.c
+++ b/net/llc/llc_c_ac.c
@@ -1096,14 +1096,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb)
int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb)
{
- struct llc_sock *llc = llc_sk(sk);
-
- del_timer(&llc->pf_cycle_timer.timer);
- del_timer(&llc->ack_timer.timer);
- del_timer(&llc->rej_sent_timer.timer);
- del_timer(&llc->busy_state_timer.timer);
- llc->ack_must_be_send = 0;
- llc->ack_pf = 0;
+ llc_sk_stop_all_timers(sk, false);
return 0;
}
diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c
index 8bc5a1bd2d45..d861b74ad068 100644
--- a/net/llc/llc_conn.c
+++ b/net/llc/llc_conn.c
@@ -951,6 +951,26 @@ out:
return sk;
}
+void llc_sk_stop_all_timers(struct sock *sk, bool sync)
+{
+ struct llc_sock *llc = llc_sk(sk);
+
+ if (sync) {
+ del_timer_sync(&llc->pf_cycle_timer.timer);
+ del_timer_sync(&llc->ack_timer.timer);
+ del_timer_sync(&llc->rej_sent_timer.timer);
+ del_timer_sync(&llc->busy_state_timer.timer);
+ } else {
+ del_timer(&llc->pf_cycle_timer.timer);
+ del_timer(&llc->ack_timer.timer);
+ del_timer(&llc->rej_sent_timer.timer);
+ del_timer(&llc->busy_state_timer.timer);
+ }
+
+ llc->ack_must_be_send = 0;
+ llc->ack_pf = 0;
+}
+
/**
* llc_sk_free - Frees a LLC socket
* @sk - socket to free
@@ -963,7 +983,7 @@ void llc_sk_free(struct sock *sk)
llc->state = LLC_CONN_OUT_OF_SVC;
/* Stop all (possibly) running timers */
- llc_conn_ac_stop_all_timers(sk, NULL);
+ llc_sk_stop_all_timers(sk, true);
#ifdef DEBUG_LLC_CONN_ALLOC
printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__,
skb_queue_len(&llc->pdu_unack_q),
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index c29aaa69d61c..a8a4d70efc81 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2663,8 +2663,9 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
rate = cfg80211_calculate_bitrate(&ri);
if (WARN_ONCE(!rate,
- "Invalid bitrate: flags=0x%x, idx=%d, vht_nss=%d\n",
- status->flag, status->rate_idx, status->vht_nss))
+ "Invalid bitrate: flags=0x%llx, idx=%d, vht_nss=%d\n",
+ (unsigned long long)status->flag, status->rate_idx,
+ status->vht_nss))
return 0;
/* rewind from end of MPDU */
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index efa3f48f1ec5..73e8f347802e 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -293,7 +293,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
- if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
+ if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
+ pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
return RX_DROP_UNUSABLE;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index e19ea1c53afa..cb439e06919f 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -298,7 +298,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
/* Trim ICV */
- skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+ if (!(status->flag & RX_FLAG_ICV_STRIPPED))
+ skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
/* Remove IV */
memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
@@ -508,25 +509,31 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx,
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
- data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
- if (!rx->sta || data_len < 0)
- return RX_DROP_UNUSABLE;
-
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_CCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
+ data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN - mic_len;
+ if (!rx->sta || data_len < 0)
+ return RX_DROP_UNUSABLE;
+
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ int res;
+
ccmp_hdr2pn(pn, skb->data + hdrlen);
queue = rx->security_idx;
- if (memcmp(pn, key->u.ccmp.rx_pn[queue],
- IEEE80211_CCMP_PN_LEN) <= 0) {
+ res = memcmp(pn, key->u.ccmp.rx_pn[queue],
+ IEEE80211_CCMP_PN_LEN);
+ if (res < 0 ||
+ (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
key->u.ccmp.replays++;
return RX_DROP_UNUSABLE;
}
@@ -724,8 +731,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
struct sk_buff *skb = rx->skb;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
u8 pn[IEEE80211_GCMP_PN_LEN];
- int data_len;
- int queue;
+ int data_len, queue, mic_len = IEEE80211_GCMP_MIC_LEN;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -733,26 +739,31 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
!ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
- data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN -
- IEEE80211_GCMP_MIC_LEN;
- if (!rx->sta || data_len < 0)
- return RX_DROP_UNUSABLE;
-
if (status->flag & RX_FLAG_DECRYPTED) {
if (!pskb_may_pull(rx->skb, hdrlen + IEEE80211_GCMP_HDR_LEN))
return RX_DROP_UNUSABLE;
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ mic_len = 0;
} else {
if (skb_linearize(rx->skb))
return RX_DROP_UNUSABLE;
}
+ data_len = skb->len - hdrlen - IEEE80211_GCMP_HDR_LEN - mic_len;
+ if (!rx->sta || data_len < 0)
+ return RX_DROP_UNUSABLE;
+
if (!(status->flag & RX_FLAG_PN_VALIDATED)) {
+ int res;
+
gcmp_hdr2pn(pn, skb->data + hdrlen);
queue = rx->security_idx;
- if (memcmp(pn, key->u.gcmp.rx_pn[queue],
- IEEE80211_GCMP_PN_LEN) <= 0) {
+ res = memcmp(pn, key->u.gcmp.rx_pn[queue],
+ IEEE80211_GCMP_PN_LEN);
+ if (res < 0 ||
+ (!res && !(status->flag & RX_FLAG_ALLOW_SAME_PN))) {
key->u.gcmp.replays++;
return RX_DROP_UNUSABLE;
}
@@ -776,7 +787,7 @@ ieee80211_crypto_gcmp_decrypt(struct ieee80211_rx_data *rx)
}
/* Remove GCMP header and MIC */
- if (pskb_trim(skb, skb->len - IEEE80211_GCMP_MIC_LEN))
+ if (pskb_trim(skb, skb->len - mic_len))
return RX_DROP_UNUSABLE;
memmove(skb->data + IEEE80211_GCMP_HDR_LEN, skb->data, hdrlen);
skb_pull(skb, IEEE80211_GCMP_HDR_LEN);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 2f0e4f61c40f..c0656510c4dc 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2352,11 +2352,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
strlcpy(cfg.mcast_ifn, dm->mcast_ifn,
sizeof(cfg.mcast_ifn));
cfg.syncid = dm->syncid;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &cfg, dm->state);
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
} else {
mutex_lock(&ipvs->sync_mutex);
ret = stop_sync_thread(ipvs, dm->state);
@@ -3435,12 +3431,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs)
if (ipvs->mixed_address_family_dests > 0)
return -EINVAL;
- rtnl_lock();
- mutex_lock(&ipvs->sync_mutex);
ret = start_sync_thread(ipvs, &c,
nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE]));
- mutex_unlock(&ipvs->sync_mutex);
- rtnl_unlock();
return ret;
}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index 1b07578bedf3..cec7234b7a1d 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -48,6 +48,7 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/kernel.h>
+#include <linux/sched.h>
#include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */
@@ -1356,15 +1357,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val)
/*
* Specifiy default interface for outgoing multicasts
*/
-static int set_mcast_if(struct sock *sk, char *ifname)
+static int set_mcast_if(struct sock *sk, struct net_device *dev)
{
- struct net_device *dev;
struct inet_sock *inet = inet_sk(sk);
- struct net *net = sock_net(sk);
-
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1392,19 +1387,14 @@ static int set_mcast_if(struct sock *sk, char *ifname)
* in the in_addr structure passed in as a parameter.
*/
static int
-join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
+join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev)
{
- struct net *net = sock_net(sk);
struct ip_mreqn mreq;
- struct net_device *dev;
int ret;
memset(&mreq, 0, sizeof(mreq));
memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr));
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1419,15 +1409,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname)
#ifdef CONFIG_IP_VS_IPV6
static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
- char *ifname)
+ struct net_device *dev)
{
- struct net *net = sock_net(sk);
- struct net_device *dev;
int ret;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if)
return -EINVAL;
@@ -1439,24 +1424,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr,
}
#endif
-static int bind_mcastif_addr(struct socket *sock, char *ifname)
+static int bind_mcastif_addr(struct socket *sock, struct net_device *dev)
{
- struct net *net = sock_net(sock->sk);
- struct net_device *dev;
__be32 addr;
struct sockaddr_in sin;
- dev = __dev_get_by_name(net, ifname);
- if (!dev)
- return -ENODEV;
-
addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE);
if (!addr)
pr_err("You probably need to specify IP address on "
"multicast interface.\n");
IP_VS_DBG(7, "binding socket with (%s) %pI4\n",
- ifname, &addr);
+ dev->name, &addr);
/* Now bind the socket with the address of multicast interface */
sin.sin_family = AF_INET;
@@ -1489,7 +1468,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen,
/*
* Set up sending multicast socket over UDP
*/
-static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
+static int make_send_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1501,9 +1481,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
- result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn);
+ *sock_ret = sock;
+ result = set_mcast_if(sock->sk, dev);
if (result < 0) {
pr_err("Error setting outbound mcast interface\n");
goto error;
@@ -1518,7 +1499,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
set_sock_size(sock->sk, 1, result);
if (AF_INET == ipvs->mcfg.mcast_af)
- result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn);
+ result = bind_mcastif_addr(sock, dev);
else
result = 0;
if (result < 0) {
@@ -1534,19 +1515,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id)
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
/*
* Set up receiving multicast socket over UDP
*/
-static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
- int ifindex)
+static int make_receive_sock(struct netns_ipvs *ipvs, int id,
+ struct net_device *dev, struct socket **sock_ret)
{
/* multicast addr */
union ipvs_sockaddr mcast_addr;
@@ -1558,8 +1538,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
IPPROTO_UDP, &sock);
if (result < 0) {
pr_err("Error during creation of socket; terminating\n");
- return ERR_PTR(result);
+ goto error;
}
+ *sock_ret = sock;
/* it is equivalent to the REUSEADDR option in user-space */
sock->sk->sk_reuse = SK_CAN_REUSE;
result = sysctl_sync_sock_size(ipvs);
@@ -1567,7 +1548,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
set_sock_size(sock->sk, 0, result);
get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id);
- sock->sk->sk_bound_dev_if = ifindex;
+ sock->sk->sk_bound_dev_if = dev->ifindex;
result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen);
if (result < 0) {
pr_err("Error binding to the multicast addr\n");
@@ -1578,21 +1559,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id,
#ifdef CONFIG_IP_VS_IPV6
if (ipvs->bcfg.mcast_af == AF_INET6)
result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
else
#endif
result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr,
- ipvs->bcfg.mcast_ifn);
+ dev);
if (result < 0) {
pr_err("Error joining to the multicast group\n");
goto error;
}
- return sock;
+ return 0;
error:
- sock_release(sock);
- return ERR_PTR(result);
+ return result;
}
@@ -1777,13 +1757,12 @@ static int sync_thread_backup(void *data)
int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
int state)
{
- struct ip_vs_sync_thread_data *tinfo;
+ struct ip_vs_sync_thread_data *tinfo = NULL;
struct task_struct **array = NULL, *task;
- struct socket *sock;
struct net_device *dev;
char *name;
int (*threadfn)(void *data);
- int id, count, hlen;
+ int id = 0, count, hlen;
int result = -ENOMEM;
u16 mtu, min_mtu;
@@ -1791,6 +1770,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n",
sizeof(struct ip_vs_sync_conn_v0));
+ /* Do not hold one mutex and then to block on another */
+ for (;;) {
+ rtnl_lock();
+ if (mutex_trylock(&ipvs->sync_mutex))
+ break;
+ rtnl_unlock();
+ mutex_lock(&ipvs->sync_mutex);
+ if (rtnl_trylock())
+ break;
+ mutex_unlock(&ipvs->sync_mutex);
+ }
+
if (!ipvs->sync_state) {
count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX);
ipvs->threads_mask = count - 1;
@@ -1809,7 +1800,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
dev = __dev_get_by_name(ipvs->net, c->mcast_ifn);
if (!dev) {
pr_err("Unknown mcast interface: %s\n", c->mcast_ifn);
- return -ENODEV;
+ result = -ENODEV;
+ goto out_early;
}
hlen = (AF_INET6 == c->mcast_af) ?
sizeof(struct ipv6hdr) + sizeof(struct udphdr) :
@@ -1826,26 +1818,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
c->sync_maxlen = mtu - hlen;
if (state == IP_VS_STATE_MASTER) {
+ result = -EEXIST;
if (ipvs->ms)
- return -EEXIST;
+ goto out_early;
ipvs->mcfg = *c;
name = "ipvs-m:%d:%d";
threadfn = sync_thread_master;
} else if (state == IP_VS_STATE_BACKUP) {
+ result = -EEXIST;
if (ipvs->backup_threads)
- return -EEXIST;
+ goto out_early;
ipvs->bcfg = *c;
name = "ipvs-b:%d:%d";
threadfn = sync_thread_backup;
} else {
- return -EINVAL;
+ result = -EINVAL;
+ goto out_early;
}
if (state == IP_VS_STATE_MASTER) {
struct ipvs_master_sync_state *ms;
+ result = -ENOMEM;
ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL);
if (!ipvs->ms)
goto out;
@@ -1861,39 +1857,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
} else {
array = kzalloc(count * sizeof(struct task_struct *),
GFP_KERNEL);
+ result = -ENOMEM;
if (!array)
goto out;
}
- tinfo = NULL;
for (id = 0; id < count; id++) {
- if (state == IP_VS_STATE_MASTER)
- sock = make_send_sock(ipvs, id);
- else
- sock = make_receive_sock(ipvs, id, dev->ifindex);
- if (IS_ERR(sock)) {
- result = PTR_ERR(sock);
- goto outtinfo;
- }
+ result = -ENOMEM;
tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL);
if (!tinfo)
- goto outsocket;
+ goto out;
tinfo->ipvs = ipvs;
- tinfo->sock = sock;
+ tinfo->sock = NULL;
if (state == IP_VS_STATE_BACKUP) {
tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen,
GFP_KERNEL);
if (!tinfo->buf)
- goto outtinfo;
+ goto out;
} else {
tinfo->buf = NULL;
}
tinfo->id = id;
+ if (state == IP_VS_STATE_MASTER)
+ result = make_send_sock(ipvs, id, dev, &tinfo->sock);
+ else
+ result = make_receive_sock(ipvs, id, dev, &tinfo->sock);
+ if (result < 0)
+ goto out;
task = kthread_run(threadfn, tinfo, name, ipvs->gen, id);
if (IS_ERR(task)) {
result = PTR_ERR(task);
- goto outtinfo;
+ goto out;
}
tinfo = NULL;
if (state == IP_VS_STATE_MASTER)
@@ -1910,20 +1905,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c,
ipvs->sync_state |= state;
spin_unlock_bh(&ipvs->sync_buff_lock);
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
+
/* increase the module use count */
ip_vs_use_count_inc();
return 0;
-outsocket:
- sock_release(sock);
-
-outtinfo:
- if (tinfo) {
- sock_release(tinfo->sock);
- kfree(tinfo->buf);
- kfree(tinfo);
- }
+out:
+ /* We do not need RTNL lock anymore, release it here so that
+ * sock_release below and in the kthreads can use rtnl_lock
+ * to leave the mcast group.
+ */
+ rtnl_unlock();
count = id;
while (count-- > 0) {
if (state == IP_VS_STATE_MASTER)
@@ -1931,13 +1926,23 @@ outtinfo:
else
kthread_stop(array[count]);
}
- kfree(array);
-
-out:
if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) {
kfree(ipvs->ms);
ipvs->ms = NULL;
}
+ mutex_unlock(&ipvs->sync_mutex);
+ if (tinfo) {
+ if (tinfo->sock)
+ sock_release(tinfo->sock);
+ kfree(tinfo->buf);
+ kfree(tinfo);
+ }
+ kfree(array);
+ return result;
+
+out_early:
+ mutex_unlock(&ipvs->sync_mutex);
+ rtnl_unlock();
return result;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 83c0f56d05cb..0fb27debd4fa 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1775,6 +1775,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
if (msg->msg_namelen) {
err = -EINVAL;
+ if (msg->msg_namelen < sizeof(struct sockaddr_nl))
+ goto out;
if (addr->nl_family != AF_NETLINK)
goto out;
dst_portid = addr->nl_pid;
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
index 2b0f0ac498d2..5a58f9f38095 100644
--- a/net/nfc/hci/core.c
+++ b/net/nfc/hci/core.c
@@ -209,6 +209,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
create_info = (struct hci_create_pipe_resp *)skb->data;
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
/* Save the new created pipe and bind with local gate,
* the description for skb->data[3] is destination gate id
* but since we received this cmd from host controller, we
@@ -232,6 +237,11 @@ void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
}
delete_info = (struct hci_delete_pipe_noti *)skb->data;
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
break;
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 21e4d339217e..624c4719e404 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1141,13 +1141,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
/* The nlattr stream should already have been validated */
nla_for_each_nested(nla, attr, rem) {
- if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
- if (tbl[nla_type(nla)].next)
- tbl = tbl[nla_type(nla)].next;
- nlattr_set(nla, val, tbl);
- } else {
+ if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
+ nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
+ else
memset(nla_data(nla), val, nla_len(nla));
- }
if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
*(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 92ca3e106c2b..392d4e2c0a24 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -332,11 +332,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
skb_set_queue_mapping(skb, queue_index);
}
-/* register_prot_hook must be invoked with the po->bind_lock held,
+/* __register_prot_hook must be invoked through register_prot_hook
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
*/
-static void register_prot_hook(struct sock *sk)
+static void __register_prot_hook(struct sock *sk)
{
struct packet_sock *po = pkt_sk(sk);
@@ -351,8 +351,13 @@ static void register_prot_hook(struct sock *sk)
}
}
-/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock
- * held. If the sync parameter is true, we will temporarily drop
+static void register_prot_hook(struct sock *sk)
+{
+ lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
+ __register_prot_hook(sk);
+}
+
+/* If the sync parameter is true, we will temporarily drop
* the po->bind_lock and do a synchronize_net to make sure no
* asynchronous packet processing paths still refer to the elements
* of po->prot_hook. If the sync parameter is false, it is the
@@ -362,6 +367,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
{
struct packet_sock *po = pkt_sk(sk);
+ lockdep_assert_held_once(&po->bind_lock);
+
po->running = 0;
if (po->fanout)
@@ -2764,13 +2771,15 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
if (skb == NULL)
goto out_unlock;
- skb_set_network_header(skb, reserve);
+ skb_reset_network_header(skb);
err = -EINVAL;
if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
if (unlikely(offset < 0))
goto out_free;
+ } else if (reserve) {
+ skb_push(skb, reserve);
}
/* Returns -EFAULT on error */
@@ -2892,6 +2901,7 @@ static int packet_release(struct socket *sock)
packet_flush_mclist(sk);
+ lock_sock(sk);
if (po->rx_ring.pg_vec) {
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 0);
@@ -2901,6 +2911,7 @@ static int packet_release(struct socket *sock)
memset(&req_u, 0, sizeof(req_u));
packet_set_ring(sk, &req_u, 1, 1);
}
+ release_sock(sk);
f = fanout_release(sk);
@@ -3134,7 +3145,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
if (proto) {
po->prot_hook.type = proto;
- register_prot_hook(sk);
+ __register_prot_hook(sk);
}
mutex_lock(&net->packet.sklist_lock);
@@ -3570,6 +3581,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
union tpacket_req_u req_u;
int len;
+ lock_sock(sk);
switch (po->tp_version) {
case TPACKET_V1:
case TPACKET_V2:
@@ -3580,14 +3592,21 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
len = sizeof(req_u.req3);
break;
}
- if (optlen < len)
- return -EINVAL;
- if (pkt_sk(sk)->has_vnet_hdr)
- return -EINVAL;
- if (copy_from_user(&req_u.req, optval, len))
- return -EFAULT;
- return packet_set_ring(sk, &req_u, 0,
- optname == PACKET_TX_RING);
+ if (optlen < len) {
+ ret = -EINVAL;
+ } else {
+ if (pkt_sk(sk)->has_vnet_hdr) {
+ ret = -EINVAL;
+ } else {
+ if (copy_from_user(&req_u.req, optval, len))
+ ret = -EFAULT;
+ else
+ ret = packet_set_ring(sk, &req_u, 0,
+ optname == PACKET_TX_RING);
+ }
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_COPY_THRESH:
{
@@ -3653,12 +3672,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_loss = !!val;
- return 0;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_loss = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_AUXDATA:
{
@@ -3669,7 +3694,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->auxdata = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_ORIGDEV:
@@ -3681,7 +3708,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
+ lock_sock(sk);
po->origdev = !!val;
+ release_sock(sk);
return 0;
}
case PACKET_VNET_HDR:
@@ -3690,15 +3719,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (sock->type != SOCK_RAW)
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (optlen < sizeof(val))
return -EINVAL;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->has_vnet_hdr = !!val;
- return 0;
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->has_vnet_hdr = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
+ return ret;
}
case PACKET_TIMESTAMP:
{
@@ -3736,11 +3770,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv
if (optlen != sizeof(val))
return -EINVAL;
- if (po->rx_ring.pg_vec || po->tx_ring.pg_vec)
- return -EBUSY;
if (copy_from_user(&val, optval, sizeof(val)))
return -EFAULT;
- po->tp_tx_has_off = !!val;
+
+ lock_sock(sk);
+ if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
+ ret = -EBUSY;
+ } else {
+ po->tp_tx_has_off = !!val;
+ ret = 0;
+ }
+ release_sock(sk);
return 0;
}
case PACKET_QDISC_BYPASS:
@@ -4116,7 +4156,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
/* Added to avoid minimal code churn */
struct tpacket_req *req = &req_u->req;
- lock_sock(sk);
/* Opening a Tx-ring is NOT supported in TPACKET_V3 */
if (!closing && tx_ring && (po->tp_version > TPACKET_V2)) {
WARN(1, "Tx-ring is not supported.\n");
@@ -4252,7 +4291,6 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
if (pg_vec)
free_pg_vec(pg_vec, order, req->tp_block_nr);
out:
- release_sock(sk);
return err;
}
diff --git a/net/packet/internal.h b/net/packet/internal.h
index d55bfc34d6b3..1309e2a7baad 100644
--- a/net/packet/internal.h
+++ b/net/packet/internal.h
@@ -109,10 +109,12 @@ struct packet_sock {
int copy_thresh;
spinlock_t bind_lock;
struct mutex pg_vec_lock;
- unsigned int running:1, /* prot_hook is attached*/
- auxdata:1,
+ unsigned int running; /* bind_lock must be held */
+ unsigned int auxdata:1, /* writer must hold sock lock */
origdev:1,
- has_vnet_hdr:1;
+ has_vnet_hdr:1,
+ tp_loss:1,
+ tp_tx_has_off:1;
int pressure;
int ifindex; /* bound device */
__be16 num;
@@ -122,8 +124,6 @@ struct packet_sock {
enum tpacket_versions tp_version;
unsigned int tp_hdrlen;
unsigned int tp_reserve;
- unsigned int tp_loss:1;
- unsigned int tp_tx_has_off:1;
unsigned int tp_tstamp;
struct net_device __rcu *cached_dev;
int (*xmit)(struct sk_buff *skb);
diff --git a/net/rfkill/rfkill-gpio.c b/net/rfkill/rfkill-gpio.c
index 93127220cb54..e6e249cc651c 100644
--- a/net/rfkill/rfkill-gpio.c
+++ b/net/rfkill/rfkill-gpio.c
@@ -140,13 +140,18 @@ static int rfkill_gpio_probe(struct platform_device *pdev)
ret = rfkill_register(rfkill->rfkill_dev);
if (ret < 0)
- return ret;
+ goto err_destroy;
platform_set_drvdata(pdev, rfkill);
dev_info(&pdev->dev, "%s device registered.\n", rfkill->name);
return 0;
+
+err_destroy:
+ rfkill_destroy(rfkill->rfkill_dev);
+
+ return ret;
}
static int rfkill_gpio_remove(struct platform_device *pdev)
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 3c6a47d66a04..117ed90c5f21 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -126,6 +126,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
return f->next == &detached;
}
+static bool fq_flow_is_throttled(const struct fq_flow *f)
+{
+ return f->next == &throttled;
+}
+
+static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
+{
+ if (head->first)
+ head->last->next = flow;
+ else
+ head->first = flow;
+ head->last = flow;
+ flow->next = NULL;
+}
+
+static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
+{
+ rb_erase(&f->rate_node, &q->delayed);
+ q->throttled_flows--;
+ fq_flow_add_tail(&q->old_flows, f);
+}
+
static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
{
struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
@@ -153,15 +175,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
static struct kmem_cache *fq_flow_cachep __read_mostly;
-static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
-{
- if (head->first)
- head->last->next = flow;
- else
- head->first = flow;
- head->last = flow;
- flow->next = NULL;
-}
/* limit number of collected flows per round */
#define FQ_GC_MAX 8
@@ -265,6 +278,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
f->socket_hash != sk->sk_hash)) {
f->credit = q->initial_quantum;
f->socket_hash = sk->sk_hash;
+ if (fq_flow_is_throttled(f))
+ fq_flow_unset_throttled(q, f);
f->time_next_packet = 0ULL;
}
return f;
@@ -419,9 +434,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
q->time_next_delayed_flow = f->time_next_packet;
break;
}
- rb_erase(p, &q->delayed);
- q->throttled_flows--;
- fq_flow_add_tail(&q->old_flows, f);
+ fq_flow_unset_throttled(q, f);
}
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 559afd0ee7de..a40b8b0ef0d5 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1000,9 +1000,10 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
struct sctp_endpoint *ep;
struct sctp_chunk *chunk;
struct sctp_inq *inqueue;
- int state;
sctp_subtype_t subtype;
+ int first_time = 1; /* is this the first time through the loop */
int error = 0;
+ int state;
/* The association should be held so we should be safe. */
ep = asoc->ep;
@@ -1013,6 +1014,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
state = asoc->state;
subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
+ /* If the first chunk in the packet is AUTH, do special
+ * processing specified in Section 6.3 of SCTP-AUTH spec
+ */
+ if (first_time && subtype.chunk == SCTP_CID_AUTH) {
+ struct sctp_chunkhdr *next_hdr;
+
+ next_hdr = sctp_inq_peek(inqueue);
+ if (!next_hdr)
+ goto normal;
+
+ /* If the next chunk is COOKIE-ECHO, skip the AUTH
+ * chunk while saving a pointer to it so we can do
+ * Authentication later (during cookie-echo
+ * processing).
+ */
+ if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
+ chunk->auth_chunk = skb_clone(chunk->skb,
+ GFP_ATOMIC);
+ chunk->auth = 1;
+ continue;
+ }
+ }
+
+normal:
/* SCTP-AUTH, Section 6.3:
* The receiver has a list of chunk types which it expects
* to be received only after an AUTH-chunk. This list has
@@ -1051,6 +1076,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
/* If there is an error on chunk, discard this packet. */
if (error && chunk)
chunk->pdiscard = 1;
+
+ if (first_time)
+ first_time = 0;
}
sctp_association_put(asoc);
}
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
index 7e8a16c77039..8d9b7ad25b65 100644
--- a/net/sctp/inqueue.c
+++ b/net/sctp/inqueue.c
@@ -178,7 +178,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
chunk->subh.v = NULL; /* Subheader is no longer valid. */
- if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+ if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <=
skb_tail_pointer(chunk->skb)) {
/* This is not a singleton */
chunk->singleton = 0;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index edb8514b4e00..5ca8309ea7b1 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -519,46 +519,49 @@ static void sctp_v6_to_addr(union sctp_addr *addr, struct in6_addr *saddr,
addr->v6.sin6_scope_id = 0;
}
-/* Compare addresses exactly.
- * v4-mapped-v6 is also in consideration.
- */
-static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
- const union sctp_addr *addr2)
+static int __sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
{
if (addr1->sa.sa_family != addr2->sa.sa_family) {
if (addr1->sa.sa_family == AF_INET &&
addr2->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr2->v6.sin6_addr)) {
- if (addr2->v6.sin6_port == addr1->v4.sin_port &&
- addr2->v6.sin6_addr.s6_addr32[3] ==
- addr1->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr2->v6.sin6_addr) &&
+ addr2->v6.sin6_addr.s6_addr32[3] ==
+ addr1->v4.sin_addr.s_addr)
+ return 1;
+
if (addr2->sa.sa_family == AF_INET &&
addr1->sa.sa_family == AF_INET6 &&
- ipv6_addr_v4mapped(&addr1->v6.sin6_addr)) {
- if (addr1->v6.sin6_port == addr2->v4.sin_port &&
- addr1->v6.sin6_addr.s6_addr32[3] ==
- addr2->v4.sin_addr.s_addr)
- return 1;
- }
+ ipv6_addr_v4mapped(&addr1->v6.sin6_addr) &&
+ addr1->v6.sin6_addr.s6_addr32[3] ==
+ addr2->v4.sin_addr.s_addr)
+ return 1;
+
return 0;
}
- if (addr1->v6.sin6_port != addr2->v6.sin6_port)
- return 0;
+
if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr))
return 0;
+
/* If this is a linklocal address, compare the scope_id. */
- if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
- if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
- (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) {
- return 0;
- }
- }
+ if ((ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) &&
+ addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id &&
+ addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)
+ return 0;
return 1;
}
+/* Compare addresses exactly.
+ * v4-mapped-v6 is also in consideration.
+ */
+static int sctp_v6_cmp_addr(const union sctp_addr *addr1,
+ const union sctp_addr *addr2)
+{
+ return __sctp_v6_cmp_addr(addr1, addr2) &&
+ addr1->v6.sin6_port == addr2->v6.sin6_port;
+}
+
/* Initialize addr struct to INADDR_ANY. */
static void sctp_v6_inaddr_any(union sctp_addr *addr, __be16 port)
{
@@ -843,8 +846,8 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
const union sctp_addr *addr2,
struct sctp_sock *opt)
{
- struct sctp_af *af1, *af2;
struct sock *sk = sctp_opt2sk(opt);
+ struct sctp_af *af1, *af2;
af1 = sctp_get_af_specific(addr1->sa.sa_family);
af2 = sctp_get_af_specific(addr2->sa.sa_family);
@@ -860,10 +863,10 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
return 1;
- if (addr1->sa.sa_family != addr2->sa.sa_family)
- return 0;
+ if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
+ return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
- return af1->cmp_addr(addr1, addr2);
+ return __sctp_v6_cmp_addr(addr1, addr2);
}
/* Verify that the provided sockaddr looks bindable. Common verification,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 29c7c43de108..df9ac3746c1b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -144,10 +144,8 @@ static sctp_disposition_t sctp_sf_violation_chunk(
void *arg,
sctp_cmd_seq_t *commands);
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk);
static sctp_disposition_t __sctp_sf_do_9_1_abort(struct net *net,
@@ -615,6 +613,38 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
+ const struct sctp_association *asoc)
+{
+ struct sctp_chunk auth;
+
+ if (!chunk->auth_chunk)
+ return true;
+
+ /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
+ * is supposed to be authenticated and we have to do delayed
+ * authentication. We've just recreated the association using
+ * the information in the cookie and now it's much easier to
+ * do the authentication.
+ */
+
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
+ return false;
+
+ /* set-up our fake chunk so that we can process it */
+ auth.skb = chunk->auth_chunk;
+ auth.asoc = chunk->asoc;
+ auth.sctp_hdr = chunk->sctp_hdr;
+ auth.chunk_hdr = (struct sctp_chunkhdr *)
+ skb_push(chunk->auth_chunk,
+ sizeof(struct sctp_chunkhdr));
+ skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
+ auth.transport = chunk->transport;
+
+ return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
+}
+
/*
* Respond to a normal COOKIE ECHO chunk.
* We are the side that is being asked for an association.
@@ -751,36 +781,9 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
if (error)
goto nomem_init;
- /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
- * is supposed to be authenticated and we have to do delayed
- * authentication. We've just recreated the association using
- * the information in the cookie and now it's much easier to
- * do the authentication.
- */
- if (chunk->auth_chunk) {
- struct sctp_chunk auth;
- sctp_ierror_t ret;
-
- /* Make sure that we and the peer are AUTH capable */
- if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
-
- /* set-up our fake chunk so that we can process it */
- auth.skb = chunk->auth_chunk;
- auth.asoc = chunk->asoc;
- auth.sctp_hdr = chunk->sctp_hdr;
- auth.chunk_hdr = (sctp_chunkhdr_t *)skb_push(chunk->auth_chunk,
- sizeof(sctp_chunkhdr_t));
- skb_pull(chunk->auth_chunk, sizeof(sctp_chunkhdr_t));
- auth.transport = chunk->transport;
-
- ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
- if (ret != SCTP_IERROR_NO_ERROR) {
- sctp_association_free(new_asoc);
- return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
- }
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
}
repl = sctp_make_cookie_ack(new_asoc, chunk);
@@ -1717,13 +1720,15 @@ static sctp_disposition_t sctp_sf_do_dupcook_a(struct net *net,
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Make sure no new addresses are being added during the
* restart. Though this is a pretty complicated attack
* since you'd have to get inside the cookie.
*/
- if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
+ if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
return SCTP_DISPOSITION_CONSUME;
- }
/* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
* the peer has restarted (Action A), it MUST NOT setup a new
@@ -1828,6 +1833,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_b(struct net *net,
GFP_ATOMIC))
goto nomem;
+ if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Update the content of current association. */
sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
@@ -1920,6 +1928,9 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
* a COOKIE ACK.
*/
+ if (!sctp_auth_chunk_verify(net, chunk, asoc))
+ return SCTP_DISPOSITION_DISCARD;
+
/* Don't accidentally move back into established state. */
if (asoc->state < SCTP_STATE_ESTABLISHED) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
@@ -1959,7 +1970,7 @@ static sctp_disposition_t sctp_sf_do_dupcook_d(struct net *net,
}
}
- repl = sctp_make_cookie_ack(new_asoc, chunk);
+ repl = sctp_make_cookie_ack(asoc, chunk);
if (!repl)
goto nomem;
@@ -3985,10 +3996,8 @@ gen_shutdown:
*
* The return value is the disposition of the chunk.
*/
-static sctp_ierror_t sctp_sf_authenticate(struct net *net,
- const struct sctp_endpoint *ep,
+static sctp_ierror_t sctp_sf_authenticate(
const struct sctp_association *asoc,
- const sctp_subtype_t type,
struct sctp_chunk *chunk)
{
struct sctp_authhdr *auth_hdr;
@@ -4087,7 +4096,7 @@ sctp_disposition_t sctp_sf_eat_auth(struct net *net,
commands);
auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
- error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
+ error = sctp_sf_authenticate(asoc, chunk);
switch (error) {
case SCTP_IERROR_AUTH_BAD_HMAC:
/* Generate the ERROR chunk and discard the rest
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 77bf9113c7a7..2763bd369b79 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -44,7 +44,8 @@
static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
[TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
- [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
+ [TIPC_NLA_NET_ID] = { .type = NLA_U32 },
+ [TIPC_NLA_NET_ADDR] = { .type = NLA_U32 },
};
/*
diff --git a/net/wireless/core.c b/net/wireless/core.c
index a119bc59d5a4..82a3bf0b1f0e 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -95,6 +95,9 @@ static int cfg80211_dev_check_name(struct cfg80211_registered_device *rdev,
ASSERT_RTNL();
+ if (strlen(newname) > NL80211_WIPHY_NAME_MAXLEN)
+ return -EINVAL;
+
/* prohibit calling the thing phy%d when %d is not its number */
sscanf(newname, PHY_NAME "%d%n", &wiphy_idx, &taken);
if (taken == strlen(newname) && wiphy_idx != rdev->wiphy_idx) {
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 5214da8c61f7..bfdcaa1071fc 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -430,6 +430,12 @@ country GH: DFS-FCC
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
+country GI: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
+
country GL: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -538,10 +544,6 @@ country IN:
(5170 - 5330 @ 160), (23)
(5735 - 5835 @ 80), (30)
-country IR:
- (2402 - 2482 @ 40), (20)
- (5735 - 5835 @ 80), (30)
-
country IS: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
@@ -1155,7 +1157,7 @@ country UG: DFS-FCC
country US: DFS-FCC
(2402 - 2472 @ 40), (30)
- (5170 - 5250 @ 80), (24), AUTO-BW
+ (5170 - 5250 @ 80), (30), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 70535b8ee4d6..9b6e51450fc5 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1159,6 +1159,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
if (orig->aead) {
x->aead = xfrm_algo_aead_clone(orig->aead);
+ x->geniv = orig->geniv;
if (!x->aead)
goto error;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 69f1ca1bc753..6f480978d7ca 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -2489,7 +2489,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
#ifdef CONFIG_COMPAT
if (is_compat_task())
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
#endif
type = nlh->nlmsg_type;
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
index 3d05a4a0a218..d809e00d6b61 100644
--- a/scripts/Makefile.kasan
+++ b/scripts/Makefile.kasan
@@ -9,18 +9,7 @@ KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
-ifeq ($(cc-name),clang)
-CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
- -mllvm \
- -asan-mapping-offset=$(KASAN_SHADOW_OFFSET) \
- -asan-stack=1 -asan-globals=1 \
- -asan-instrumentation-with-call-threshold=$(call_threshold))
-else
-CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \
- -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET) \
- --param asan-stack=1 --param asan-globals=1 \
- --param asan-instrumentation-with-call-threshold=$(call_threshold))
-endif
+cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)
ifneq ($(CONFIG_COMPILE_TEST),y)
@@ -28,12 +17,26 @@ ifeq ($(call cc-option, $(CFLAGS_KASAN_MINIMAL) -Werror),)
-fsanitize=kernel-address is not supported by compiler)
endif
else
- ifeq ($(CFLAGS_KASAN),)
- ifneq ($(CONFIG_COMPILE_TEST),y)
- $(warning CONFIG_KASAN: compiler does not support all options.\
- Trying minimal configuration)
- endif
- CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
- endif
+ # -fasan-shadow-offset fails without -fsanitize
+ CFLAGS_KASAN_SHADOW := $(call cc-option, -fsanitize=kernel-address \
+ -fasan-shadow-offset=$(KASAN_SHADOW_OFFSET), \
+ $(call cc-option, -fsanitize=kernel-address \
+ -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET)))
+
+ ifeq ($(strip $(CFLAGS_KASAN_SHADOW)),)
+ CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL)
+ else
+ # Now add all the compiler specific options that are valid standalone
+ CFLAGS_KASAN := $(CFLAGS_KASAN_SHADOW) \
+ $(call cc-param,asan-globals=1) \
+ $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \
+ $(call cc-param,asan-stack=1) \
+ $(call cc-param,asan-use-after-scope=1) \
+ $(call cc-param,asan-instrument-allocas=1)
+ endif
+
endif
+
+CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+
endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index b666ac63a613..c2c65681cde1 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -126,7 +126,7 @@ endif
ifeq ($(CONFIG_KASAN),y)
_c_flags += $(if $(patsubst n%,, \
$(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
- $(CFLAGS_KASAN))
+ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE))
endif
ifeq ($(CONFIG_UBSAN),y)
diff --git a/scripts/fetch-latest-wireguard.sh b/scripts/fetch-latest-wireguard.sh
index dcc25dec71fb..3fd13273df1a 100755
--- a/scripts/fetch-latest-wireguard.sh
+++ b/scripts/fetch-latest-wireguard.sh
@@ -13,6 +13,6 @@ fi
rm -rf net/wireguard
mkdir -p net/wireguard
-curl -A "$USER_AGENT" -LsS "https://git.zx2c4.com/WireGuard/snapshot/WireGuard-${BASH_REMATCH[1]}.tar.xz" | tar -C "net/wireguard" -xJf - --strip-components=2 "WireGuard-${BASH_REMATCH[1]}/src"
+curl -A "$USER_AGENT" -LsS "https://git.zx2c4.com/WireGuard/snapshot/WireGuard-master.tar.xz" | tar -C "net/wireguard" -xJf - --strip-components=2 "WireGuard-master/src"
sed -i 's/tristate/bool/;s/default m/default y/;' net/wireguard/Kconfig
touch net/wireguard/.check
diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h
index 3bb7acf9e152..5c723833ec54 100755
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
@@ -77,7 +77,7 @@ UTS_TRUNCATE="cut -b -$UTS_LEN"
echo \#define LINUX_COMPILE_HOST \"`echo $LINUX_COMPILE_HOST | $UTS_TRUNCATE`\"
if [ -z "$KBUILD_COMPILER_STRING" ]; then
- echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version '`\"
+ echo \#define LINUX_COMPILER \"`$CC -v 2>&1 | grep ' version ' | sed 's/[[:space:]]*$//'`\"
else
echo \#define LINUX_COMPILER \"$KBUILD_COMPILER_STRING\"
fi;
diff --git a/sound/core/control_compat.c b/sound/core/control_compat.c
index 0608f216f359..ac0a40b9ba1e 100644
--- a/sound/core/control_compat.c
+++ b/sound/core/control_compat.c
@@ -400,8 +400,7 @@ static int snd_ctl_elem_add_compat(struct snd_ctl_file *file,
if (copy_from_user(&data->id, &data32->id, sizeof(data->id)) ||
copy_from_user(&data->type, &data32->type, 3 * sizeof(u32)))
goto error;
- if (get_user(data->owner, &data32->owner) ||
- get_user(data->type, &data32->type))
+ if (get_user(data->owner, &data32->owner))
goto error;
switch (data->type) {
case SNDRV_CTL_ELEM_TYPE_BOOLEAN:
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index e1512aea9f60..0c81e2657950 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -426,6 +426,8 @@ static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
+ if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
+ return -EBADFD;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index c01077305778..2431ca266b86 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2763,6 +2763,7 @@ static int snd_pcm_sync_ptr(struct snd_pcm_substream *substream,
sync_ptr.s.status.hw_ptr = status->hw_ptr;
sync_ptr.s.status.tstamp = status->tstamp;
sync_ptr.s.status.suspended_state = status->suspended_state;
+ sync_ptr.s.status.audio_tstamp = status->audio_tstamp;
snd_pcm_stream_unlock_irq(substream);
if (copy_to_user(_sync_ptr, &sync_ptr, sizeof(sync_ptr)))
return -EFAULT;
diff --git a/sound/core/seq/oss/seq_oss_event.c b/sound/core/seq/oss/seq_oss_event.c
index c3908862bc8b..86ca584c27b2 100644
--- a/sound/core/seq/oss/seq_oss_event.c
+++ b/sound/core/seq/oss/seq_oss_event.c
@@ -26,6 +26,7 @@
#include <sound/seq_oss_legacy.h>
#include "seq_oss_readq.h"
#include "seq_oss_writeq.h"
+#include <linux/nospec.h>
/*
@@ -287,10 +288,10 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -298,6 +299,7 @@ note_on_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, st
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (note == 255 && info->ch[ch].note >= 0) {
/* volume control */
int type;
@@ -347,10 +349,10 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
{
struct seq_oss_synthinfo *info;
- if (!snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- info = &dp->synths[dev];
switch (info->arg.event_passing) {
case SNDRV_SEQ_OSS_PROCESS_EVENTS:
if (! info->ch || ch < 0 || ch >= info->nr_voices) {
@@ -358,6 +360,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
return set_note_event(dp, dev, SNDRV_SEQ_EVENT_NOTEON, ch, note, vel, ev);
}
+ ch = array_index_nospec(ch, info->nr_voices);
if (info->ch[ch].note >= 0) {
note = info->ch[ch].note;
info->ch[ch].vel = 0;
@@ -381,7 +384,7 @@ note_off_event(struct seq_oss_devinfo *dp, int dev, int ch, int note, int vel, s
static int
set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note, int vel, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
@@ -399,7 +402,7 @@ set_note_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int note,
static int
set_control_event(struct seq_oss_devinfo *dp, int dev, int type, int ch, int param, int val, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ if (!snd_seq_oss_synth_info(dp, dev))
return -ENXIO;
ev->type = type;
diff --git a/sound/core/seq/oss/seq_oss_midi.c b/sound/core/seq/oss/seq_oss_midi.c
index b30b2139e3f0..9debd1b8fd28 100644
--- a/sound/core/seq/oss/seq_oss_midi.c
+++ b/sound/core/seq/oss/seq_oss_midi.c
@@ -29,6 +29,7 @@
#include "../seq_lock.h"
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
@@ -315,6 +316,7 @@ get_mididev(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_mididev)
return NULL;
+ dev = array_index_nospec(dev, dp->max_mididev);
return get_mdev(dev);
}
diff --git a/sound/core/seq/oss/seq_oss_synth.c b/sound/core/seq/oss/seq_oss_synth.c
index b16dbef04174..ea545f9291b4 100644
--- a/sound/core/seq/oss/seq_oss_synth.c
+++ b/sound/core/seq/oss/seq_oss_synth.c
@@ -26,6 +26,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/nospec.h>
/*
* constants
@@ -339,17 +340,13 @@ snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp)
dp->max_synthdev = 0;
}
-/*
- * check if the specified device is MIDI mapped device
- */
-static int
-is_midi_dev(struct seq_oss_devinfo *dp, int dev)
+static struct seq_oss_synthinfo *
+get_synthinfo_nospec(struct seq_oss_devinfo *dp, int dev)
{
if (dev < 0 || dev >= dp->max_synthdev)
- return 0;
- if (dp->synths[dev].is_midi)
- return 1;
- return 0;
+ return NULL;
+ dev = array_index_nospec(dev, SNDRV_SEQ_OSS_MAX_SYNTH_DEVS);
+ return &dp->synths[dev];
}
/*
@@ -359,14 +356,20 @@ static struct seq_oss_synth *
get_synthdev(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
- if (dev < 0 || dev >= dp->max_synthdev)
- return NULL;
- if (! dp->synths[dev].opened)
+ struct seq_oss_synthinfo *info = get_synthinfo_nospec(dp, dev);
+
+ if (!info)
return NULL;
- if (dp->synths[dev].is_midi)
- return &midi_synth_dev;
- if ((rec = get_sdev(dev)) == NULL)
+ if (!info->opened)
return NULL;
+ if (info->is_midi) {
+ rec = &midi_synth_dev;
+ snd_use_lock_use(&rec->use_lock);
+ } else {
+ rec = get_sdev(dev);
+ if (!rec)
+ return NULL;
+ }
if (! rec->opened) {
snd_use_lock_free(&rec->use_lock);
return NULL;
@@ -402,10 +405,8 @@ snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev)
struct seq_oss_synth *rec;
struct seq_oss_synthinfo *info;
- if (snd_BUG_ON(dev < 0 || dev >= dp->max_synthdev))
- return;
- info = &dp->synths[dev];
- if (! info->opened)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || !info->opened)
return;
if (info->sysex)
info->sysex->len = 0; /* reset sysex */
@@ -454,12 +455,14 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (dev < 0 || dev >= dp->max_synthdev)
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info)
return -ENXIO;
- if (is_midi_dev(dp, dev))
+ if (info->is_midi)
return 0;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
@@ -467,24 +470,25 @@ snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
if (rec->oper.load_patch == NULL)
rc = -ENXIO;
else
- rc = rec->oper.load_patch(&dp->synths[dev].arg, fmt, buf, p, c);
+ rc = rec->oper.load_patch(&info->arg, fmt, buf, p, c);
snd_use_lock_free(&rec->use_lock);
return rc;
}
/*
- * check if the device is valid synth device
+ * check if the device is valid synth device and return the synth info
*/
-int
-snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev)
+struct seq_oss_synthinfo *
+snd_seq_oss_synth_info(struct seq_oss_devinfo *dp, int dev)
{
struct seq_oss_synth *rec;
+
rec = get_synthdev(dp, dev);
if (rec) {
snd_use_lock_free(&rec->use_lock);
- return 1;
+ return get_synthinfo_nospec(dp, dev);
}
- return 0;
+ return NULL;
}
@@ -499,16 +503,18 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int i, send;
unsigned char *dest;
struct seq_oss_synth_sysex *sysex;
+ struct seq_oss_synthinfo *info;
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info)
return -ENXIO;
- sysex = dp->synths[dev].sysex;
+ sysex = info->sysex;
if (sysex == NULL) {
sysex = kzalloc(sizeof(*sysex), GFP_KERNEL);
if (sysex == NULL)
return -ENOMEM;
- dp->synths[dev].sysex = sysex;
+ info->sysex = sysex;
}
send = 0;
@@ -553,10 +559,12 @@ snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
int
snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev))
+ struct seq_oss_synthinfo *info = snd_seq_oss_synth_info(dp, dev);
+
+ if (!info)
return -EINVAL;
- snd_seq_oss_fill_addr(dp, ev, dp->synths[dev].arg.addr.client,
- dp->synths[dev].arg.addr.port);
+ snd_seq_oss_fill_addr(dp, ev, info->arg.addr.client,
+ info->arg.addr.port);
return 0;
}
@@ -568,16 +576,18 @@ int
snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, unsigned long addr)
{
struct seq_oss_synth *rec;
+ struct seq_oss_synthinfo *info;
int rc;
- if (is_midi_dev(dp, dev))
+ info = get_synthinfo_nospec(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
if ((rec = get_synthdev(dp, dev)) == NULL)
return -ENXIO;
if (rec->oper.ioctl == NULL)
rc = -ENXIO;
else
- rc = rec->oper.ioctl(&dp->synths[dev].arg, cmd, addr);
+ rc = rec->oper.ioctl(&info->arg, cmd, addr);
snd_use_lock_free(&rec->use_lock);
return rc;
}
@@ -589,7 +599,10 @@ snd_seq_oss_synth_ioctl(struct seq_oss_devinfo *dp, int dev, unsigned int cmd, u
int
snd_seq_oss_synth_raw_event(struct seq_oss_devinfo *dp, int dev, unsigned char *data, struct snd_seq_event *ev)
{
- if (! snd_seq_oss_synth_is_valid(dp, dev) || is_midi_dev(dp, dev))
+ struct seq_oss_synthinfo *info;
+
+ info = snd_seq_oss_synth_info(dp, dev);
+ if (!info || info->is_midi)
return -ENXIO;
ev->type = SNDRV_SEQ_EVENT_OSS;
memcpy(ev->data.raw8.d, data, 8);
diff --git a/sound/core/seq/oss/seq_oss_synth.h b/sound/core/seq/oss/seq_oss_synth.h
index 74ac55f166b6..a63f9e22974d 100644
--- a/sound/core/seq/oss/seq_oss_synth.h
+++ b/sound/core/seq/oss/seq_oss_synth.h
@@ -37,7 +37,8 @@ void snd_seq_oss_synth_cleanup(struct seq_oss_devinfo *dp);
void snd_seq_oss_synth_reset(struct seq_oss_devinfo *dp, int dev);
int snd_seq_oss_synth_load_patch(struct seq_oss_devinfo *dp, int dev, int fmt,
const char __user *buf, int p, int c);
-int snd_seq_oss_synth_is_valid(struct seq_oss_devinfo *dp, int dev);
+struct seq_oss_synthinfo *snd_seq_oss_synth_info(struct seq_oss_devinfo *dp,
+ int dev);
int snd_seq_oss_synth_sysex(struct seq_oss_devinfo *dp, int dev, unsigned char *buf,
struct snd_seq_event *ev);
int snd_seq_oss_synth_addr(struct seq_oss_devinfo *dp, int dev, struct snd_seq_event *ev);
diff --git a/sound/core/seq/seq_virmidi.c b/sound/core/seq/seq_virmidi.c
index 3b126af4a026..ef494ffc1369 100644
--- a/sound/core/seq/seq_virmidi.c
+++ b/sound/core/seq/seq_virmidi.c
@@ -174,12 +174,12 @@ static void snd_virmidi_output_trigger(struct snd_rawmidi_substream *substream,
}
return;
}
+ spin_lock_irqsave(&substream->runtime->lock, flags);
if (vmidi->event.type != SNDRV_SEQ_EVENT_NONE) {
if (snd_seq_kernel_client_dispatch(vmidi->client, &vmidi->event, in_atomic(), 0) < 0)
- return;
+ goto out;
vmidi->event.type = SNDRV_SEQ_EVENT_NONE;
}
- spin_lock_irqsave(&substream->runtime->lock, flags);
while (1) {
count = __snd_rawmidi_transmit_peek(substream, buf, sizeof(buf));
if (count <= 0)
diff --git a/sound/drivers/aloop.c b/sound/drivers/aloop.c
index dc91002d1e0d..847f70348d4d 100644
--- a/sound/drivers/aloop.c
+++ b/sound/drivers/aloop.c
@@ -296,6 +296,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
cable->pause |= stream;
loopback_timer_stop(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -304,6 +306,8 @@ static int loopback_trigger(struct snd_pcm_substream *substream, int cmd)
cable->pause &= ~stream;
loopback_timer_start(dpcm);
spin_unlock(&cable->lock);
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ loopback_active_notify(dpcm);
break;
default:
return -EINVAL;
@@ -828,9 +832,11 @@ static int loopback_rate_shift_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate_shift;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -862,9 +868,11 @@ static int loopback_notify_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -876,12 +884,14 @@ static int loopback_notify_put(struct snd_kcontrol *kcontrol,
int change = 0;
val = ucontrol->value.integer.value[0] ? 1 : 0;
+ mutex_lock(&loopback->cable_lock);
if (val != loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify) {
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].notify = val;
change = 1;
}
+ mutex_unlock(&loopback->cable_lock);
return change;
}
@@ -889,13 +899,18 @@ static int loopback_active_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
- struct loopback_cable *cable = loopback->cables
- [kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ struct loopback_cable *cable;
+
unsigned int val = 0;
- if (cable != NULL)
- val = (cable->running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ?
- 1 : 0;
+ mutex_lock(&loopback->cable_lock);
+ cable = loopback->cables[kcontrol->id.subdevice][kcontrol->id.device ^ 1];
+ if (cable != NULL) {
+ unsigned int running = cable->running ^ cable->pause;
+
+ val = (running & (1 << SNDRV_PCM_STREAM_PLAYBACK)) ? 1 : 0;
+ }
+ mutex_unlock(&loopback->cable_lock);
ucontrol->value.integer.value[0] = val;
return 0;
}
@@ -938,9 +953,11 @@ static int loopback_rate_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].rate;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
@@ -960,9 +977,11 @@ static int loopback_channels_get(struct snd_kcontrol *kcontrol,
{
struct loopback *loopback = snd_kcontrol_chip(kcontrol);
+ mutex_lock(&loopback->cable_lock);
ucontrol->value.integer.value[0] =
loopback->setup[kcontrol->id.subdevice]
[kcontrol->id.device].channels;
+ mutex_unlock(&loopback->cable_lock);
return 0;
}
diff --git a/sound/drivers/opl3/opl3_synth.c b/sound/drivers/opl3/opl3_synth.c
index ddcc1a325a61..42920a243328 100644
--- a/sound/drivers/opl3/opl3_synth.c
+++ b/sound/drivers/opl3/opl3_synth.c
@@ -21,6 +21,7 @@
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/nospec.h>
#include <sound/opl3.h>
#include <sound/asound_fm.h>
@@ -448,7 +449,7 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
{
unsigned short reg_side;
unsigned char op_offset;
- unsigned char voice_offset;
+ unsigned char voice_offset, voice_op;
unsigned short opl3_reg;
unsigned char reg_val;
@@ -473,7 +474,9 @@ static int snd_opl3_set_voice(struct snd_opl3 * opl3, struct snd_dm_fm_voice * v
voice_offset = voice->voice - MAX_OPL2_VOICES;
}
/* Get register offset of operator */
- op_offset = snd_opl3_regmap[voice_offset][voice->op];
+ voice_offset = array_index_nospec(voice_offset, MAX_OPL2_VOICES);
+ voice_op = array_index_nospec(voice->op, 4);
+ op_offset = snd_opl3_regmap[voice_offset][voice_op];
reg_val = 0x00;
/* Set amplitude modulation (tremolo) effect */
diff --git a/sound/pci/asihpi/hpimsginit.c b/sound/pci/asihpi/hpimsginit.c
index 7eb617175fde..a31a70dccecf 100644
--- a/sound/pci/asihpi/hpimsginit.c
+++ b/sound/pci/asihpi/hpimsginit.c
@@ -23,6 +23,7 @@
#include "hpi_internal.h"
#include "hpimsginit.h"
+#include <linux/nospec.h>
/* The actual message size for each object type */
static u16 msg_size[HPI_OBJ_MAXINDEX + 1] = HPI_MESSAGE_SIZE_BY_OBJECT;
@@ -39,10 +40,12 @@ static void hpi_init_message(struct hpi_message *phm, u16 object,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = msg_size[object];
- else
+ } else {
size = sizeof(*phm);
+ }
memset(phm, 0, size);
phm->size = size;
@@ -66,10 +69,12 @@ void hpi_init_response(struct hpi_response *phr, u16 object, u16 function,
{
u16 size;
- if ((object > 0) && (object <= HPI_OBJ_MAXINDEX))
+ if ((object > 0) && (object <= HPI_OBJ_MAXINDEX)) {
+ object = array_index_nospec(object, HPI_OBJ_MAXINDEX + 1);
size = res_size[object];
- else
+ } else {
size = sizeof(*phr);
+ }
memset(phr, 0, sizeof(*phr));
phr->size = size;
diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c
index d17937b92331..7a32abbe0cef 100644
--- a/sound/pci/asihpi/hpioctl.c
+++ b/sound/pci/asihpi/hpioctl.c
@@ -33,6 +33,7 @@
#include <linux/stringify.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#ifdef MODULE_FIRMWARE
MODULE_FIRMWARE("asihpi/dsp5000.bin");
@@ -182,7 +183,8 @@ long asihpi_hpi_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct hpi_adapter *pa = NULL;
if (hm->h.adapter_index < ARRAY_SIZE(adapters))
- pa = &adapters[hm->h.adapter_index];
+ pa = &adapters[array_index_nospec(hm->h.adapter_index,
+ ARRAY_SIZE(adapters))];
if (!pa || !pa->adapter || !pa->adapter->type) {
hpi_init_response(&hr->r0, hm->h.object,
diff --git a/sound/pci/hda/hda_hwdep.c b/sound/pci/hda/hda_hwdep.c
index 57df06e76968..cc009a4a3d1d 100644
--- a/sound/pci/hda/hda_hwdep.c
+++ b/sound/pci/hda/hda_hwdep.c
@@ -21,6 +21,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include "hda_codec.h"
#include "hda_local.h"
@@ -51,7 +52,16 @@ static int get_wcap_ioctl(struct hda_codec *codec,
if (get_user(verb, &arg->verb))
return -EFAULT;
- res = get_wcaps(codec, verb >> 24);
+ /* open-code get_wcaps(verb>>24) with nospec */
+ verb >>= 24;
+ if (verb < codec->core.start_nid ||
+ verb >= codec->core.start_nid + codec->core.num_nodes) {
+ res = 0;
+ } else {
+ verb -= codec->core.start_nid;
+ verb = array_index_nospec(verb, codec->core.num_nodes);
+ res = codec->wcaps[verb];
+ }
if (put_user(res, &arg->res))
return -EFAULT;
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 3be91696ac35..d0b55c866370 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2072,6 +2072,8 @@ static struct snd_pci_quirk power_save_blacklist[] = {
SND_PCI_QUIRK(0x1849, 0x0c0c, "Asrock B85M-ITX", 0),
/* https://bugzilla.redhat.com/show_bug.cgi?id=1525104 */
SND_PCI_QUIRK(0x1043, 0x8733, "Asus Prime X370-Pro", 0),
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1572975 */
+ SND_PCI_QUIRK(0x17aa, 0x36a7, "Lenovo C50 All in one", 0),
/* https://bugzilla.kernel.org/show_bug.cgi?id=198611 */
SND_PCI_QUIRK(0x17aa, 0x2227, "Lenovo X1 Carbon 3rd Gen", 0),
{}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 8cb14e27988b..6a789278970e 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -329,6 +329,7 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
break;
case 0x10ec0225:
case 0x10ec0233:
+ case 0x10ec0235:
case 0x10ec0236:
case 0x10ec0255:
case 0x10ec0256:
@@ -6296,6 +6297,7 @@ static int patch_alc269(struct hda_codec *codec)
case 0x10ec0298:
spec->codec_variant = ALC269_TYPE_ALC298;
break;
+ case 0x10ec0235:
case 0x10ec0255:
spec->codec_variant = ALC269_TYPE_ALC255;
break;
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index a4a999a0317e..1a0c0d16a279 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -137,6 +137,7 @@
#include <linux/pci.h>
#include <linux/math64.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -5692,40 +5693,43 @@ static int snd_hdspm_channel_info(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info *info)
{
struct hdspm *hdspm = snd_pcm_substream_chip(substream);
+ unsigned int channel = info->channel;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_out)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_out)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_out[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_out);
+ if (hdspm->channel_map_out[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: output channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_out[info->channel] *
+ info->offset = hdspm->channel_map_out[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
} else {
- if (snd_BUG_ON(info->channel >= hdspm->max_channels_in)) {
+ if (snd_BUG_ON(channel >= hdspm->max_channels_in)) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel out of range (%d)\n",
- info->channel);
+ channel);
return -EINVAL;
}
- if (hdspm->channel_map_in[info->channel] < 0) {
+ channel = array_index_nospec(channel, hdspm->max_channels_in);
+ if (hdspm->channel_map_in[channel] < 0) {
dev_info(hdspm->card->dev,
"snd_hdspm_channel_info: input channel %d mapped out\n",
- info->channel);
+ channel);
return -EINVAL;
}
- info->offset = hdspm->channel_map_in[info->channel] *
+ info->offset = hdspm->channel_map_in[channel] *
HDSPM_CHANNEL_BUFFER_BYTES;
}
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index fdbc0aa2776a..c253bdf92e36 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -26,6 +26,7 @@
#include <linux/pci.h>
#include <linux/module.h>
#include <linux/io.h>
+#include <linux/nospec.h>
#include <sound/core.h>
#include <sound/control.h>
@@ -2036,9 +2037,10 @@ static int snd_rme9652_channel_info(struct snd_pcm_substream *substream,
if (snd_BUG_ON(info->channel >= RME9652_NCHANNELS))
return -EINVAL;
- if ((chn = rme9652->channel_map[info->channel]) < 0) {
+ chn = rme9652->channel_map[array_index_nospec(info->channel,
+ RME9652_NCHANNELS)];
+ if (chn < 0)
return -EINVAL;
- }
info->offset = chn * RME9652_CHANNEL_BUFFER_BYTES;
info->first = 0;
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index 29c218013a07..b682c37a1537 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,6 +31,7 @@
#define WCD_PROCFS_ENTRY_MAX_LEN 16
#define WCD_934X_RAMDUMP_START_ADDR 0x20100000
#define WCD_934X_RAMDUMP_SIZE ((1024 * 1024) - 128)
+#define WCD_DSP_CNTL_MAX_COUNT 2
#define WCD_CNTL_MUTEX_LOCK(codec, lock) \
{ \
@@ -909,11 +910,11 @@ static ssize_t wcd_miscdev_write(struct file *filep, const char __user *ubuf,
{
struct wcd_dsp_cntl *cntl = container_of(filep->private_data,
struct wcd_dsp_cntl, miscdev);
- char val[count];
+ char val[WCD_DSP_CNTL_MAX_COUNT];
bool vote;
int ret = 0;
- if (count == 0 || count > 2) {
+ if (count == 0 || count > WCD_DSP_CNTL_MAX_COUNT) {
pr_err("%s: Invalid count = %zd\n", __func__, count);
ret = -EINVAL;
goto done;
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 59f234e51971..e8adead8be00 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -143,6 +143,13 @@ static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio,
psr = ratio <= 256 * maxfp ? ESAI_xCCR_xPSR_BYPASS : ESAI_xCCR_xPSR_DIV8;
+ /* Do not loop-search if PM (1 ~ 256) alone can serve the ratio */
+ if (ratio <= 256) {
+ pm = ratio;
+ fp = 1;
+ goto out;
+ }
+
/* Set the max fluctuation -- 0.1% of the max devisor */
savesub = (psr ? 1 : 8) * 256 * maxfp / 1000;
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 4dd8ae0b634c..79a1a043fd38 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -247,6 +247,11 @@ static ssize_t audio_output_latency_dbgfs_read(struct file *file,
pr_err("%s: out_buffer is null\n", __func__);
return 0;
}
+ if (count < OUT_BUFFER_SIZE) {
+ pr_err("%s: read size %d exceeds buf size %zd\n", __func__,
+ OUT_BUFFER_SIZE, count);
+ return 0;
+ }
snprintf(out_buffer, OUT_BUFFER_SIZE, "%ld,%ld,%ld,%ld,%ld,%ld,",\
out_cold_tv.tv_sec, out_cold_tv.tv_usec, out_warm_tv.tv_sec,\
out_warm_tv.tv_usec, out_cont_tv.tv_sec, out_cont_tv.tv_usec);
@@ -300,6 +305,11 @@ static ssize_t audio_input_latency_dbgfs_read(struct file *file,
pr_err("%s: in_buffer is null\n", __func__);
return 0;
}
+ if (count < IN_BUFFER_SIZE) {
+ pr_err("%s: read size %d exceeds buf size %zd\n", __func__,
+ IN_BUFFER_SIZE, count);
+ return 0;
+ }
snprintf(in_buffer, IN_BUFFER_SIZE, "%ld,%ld,",\
in_cont_tv.tv_sec, in_cont_tv.tv_usec);
return simple_read_from_buffer(buf, IN_BUFFER_SIZE, ppos,
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 86096532dfba..10a71c045178 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -283,6 +283,7 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
struct usb_interface_descriptor *altsd;
struct usb_interface *usb_iface;
int i, protocol;
+ int rest_bytes;
usb_iface = usb_ifnum_to_if(dev, ctrlif);
if (!usb_iface) {
@@ -316,7 +317,6 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
case UAC_VERSION_1: {
void *control_header;
struct uac1_ac_header_descriptor *h1;
- int rest_bytes;
control_header = snd_usb_find_csint_desc(host_iface->extra,
host_iface->extralen, NULL, UAC_HEADER);
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 2899797610e8..1d5f87bf4873 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -151,7 +151,7 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
/* If a clock source can't tell us whether it's valid, we assume it is */
if (!uac2_control_is_readable(cs_desc->bmControls,
- UAC2_CS_CONTROL_CLOCK_VALID - 1))
+ UAC2_CS_CONTROL_CLOCK_VALID))
return 1;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,
@@ -372,7 +372,8 @@ static int set_sample_rate_v2(struct snd_usb_audio *chip, int iface,
return 0;
cs_desc = snd_usb_find_clock_source(chip->ctrl_intf, clock);
- writeable = uac2_control_is_writeable(cs_desc->bmControls, UAC2_CS_CONTROL_SAM_FREQ - 1);
+ writeable = uac2_control_is_writeable(cs_desc->bmControls,
+ UAC2_CS_CONTROL_SAM_FREQ);
if (writeable) {
data = cpu_to_le32(rate);
err = snd_usb_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC2_CS_CUR,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 01440d764536..e936f0d61f18 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -887,26 +887,27 @@ static int check_input_term(struct mixer_build *state, int id,
/* feature unit control information */
struct usb_feature_control_info {
+ int control;
const char *name;
- int type; /* data type for uac1 */
+ int type; /* data type for uac1 */
int type_uac2; /* data type for uac2 if different from uac1, else -1 */
};
static struct usb_feature_control_info audio_feature_info[] = {
- { "Mute", USB_MIXER_INV_BOOLEAN, -1 },
- { "Volume", USB_MIXER_S16, -1 },
- { "Tone Control - Bass", USB_MIXER_S8, -1 },
- { "Tone Control - Mid", USB_MIXER_S8, -1 },
- { "Tone Control - Treble", USB_MIXER_S8, -1 },
- { "Graphic Equalizer", USB_MIXER_S8, -1 }, /* FIXME: not implemeted yet */
- { "Auto Gain Control", USB_MIXER_BOOLEAN, -1 },
- { "Delay Control", USB_MIXER_U16, USB_MIXER_U32 },
- { "Bass Boost", USB_MIXER_BOOLEAN, -1 },
- { "Loudness", USB_MIXER_BOOLEAN, -1 },
+ { UAC_FU_MUTE, "Mute", USB_MIXER_INV_BOOLEAN, -1 },
+ { UAC_FU_VOLUME, "Volume", USB_MIXER_S16, -1 },
+ { UAC_FU_BASS, "Tone Control - Bass", USB_MIXER_S8, -1 },
+ { UAC_FU_MID, "Tone Control - Mid", USB_MIXER_S8, -1 },
+ { UAC_FU_TREBLE, "Tone Control - Treble", USB_MIXER_S8, -1 },
+ { UAC_FU_GRAPHIC_EQUALIZER, "Graphic Equalizer", USB_MIXER_S8, -1 }, /* FIXME: not implemented yet */
+ { UAC_FU_AUTOMATIC_GAIN, "Auto Gain Control", USB_MIXER_BOOLEAN, -1 },
+ { UAC_FU_DELAY, "Delay Control", USB_MIXER_U16, USB_MIXER_U32 },
+ { UAC_FU_BASS_BOOST, "Bass Boost", USB_MIXER_BOOLEAN, -1 },
+ { UAC_FU_LOUDNESS, "Loudness", USB_MIXER_BOOLEAN, -1 },
/* UAC2 specific */
- { "Input Gain Control", USB_MIXER_S16, -1 },
- { "Input Gain Pad Control", USB_MIXER_S16, -1 },
- { "Phase Inverter Control", USB_MIXER_BOOLEAN, -1 },
+ { UAC2_FU_INPUT_GAIN, "Input Gain Control", USB_MIXER_S16, -1 },
+ { UAC2_FU_INPUT_GAIN_PAD, "Input Gain Pad Control", USB_MIXER_S16, -1 },
+ { UAC2_FU_PHASE_INVERTER, "Phase Inverter Control", USB_MIXER_BOOLEAN, -1 },
};
/* private_free callback */
@@ -974,6 +975,14 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
}
break;
+ case USB_ID(0x0d8c, 0x0103):
+ if (!strcmp(kctl->id.name, "PCM Playback Volume")) {
+ usb_audio_info(chip,
+ "set volume quirk for CM102-A+/102S+\n");
+ cval->min = -256;
+ }
+ break;
+
case USB_ID(0x0471, 0x0101):
case USB_ID(0x0471, 0x0104):
case USB_ID(0x0471, 0x0105):
@@ -1244,6 +1253,21 @@ static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol,
return changed;
}
+/* get the current value from a mixer element */
+static int mixer_ctl_connector_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ struct usb_mixer_elem_info *cval = kcontrol->private_data;
+ int val, err;
+
+ err = snd_usb_get_cur_mix_value(cval, 0, 0, &val);
+ if (err < 0)
+ return filter_error(cval, err);
+ val = (val != 0);
+ ucontrol->value.integer.value[0] = val;
+ return 0;
+}
+
static struct snd_kcontrol_new usb_feature_unit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
@@ -1261,6 +1285,16 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
.put = NULL,
};
+/* A UAC control mixer control */
+static struct snd_kcontrol_new usb_connector_ctl_ro = {
+ .iface = SNDRV_CTL_ELEM_IFACE_CARD,
+ .name = "", /* will be filled later manually */
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .info = snd_ctl_boolean_mono_info,
+ .get = mixer_ctl_connector_get,
+ .put = NULL,
+};
+
/*
* This symbol is exported in order to allow the mixer quirks to
* hook up to the standard feature unit control mechanism
@@ -1303,6 +1337,17 @@ static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
}
+static struct usb_feature_control_info *get_feature_control_info(int control)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(audio_feature_info); ++i) {
+ if (audio_feature_info[i].control == control)
+ return &audio_feature_info[i];
+ }
+ return NULL;
+}
+
static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
unsigned int ctl_mask, int control,
struct usb_audio_term *iterm, int unitid,
@@ -1324,8 +1369,6 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
else
nameid = uac_feature_unit_iFeature(desc);
- control++; /* change from zero-based to 1-based value */
-
if (control == UAC_FU_GRAPHIC_EQUALIZER) {
/* FIXME: not supported yet */
return;
@@ -1341,7 +1384,11 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
snd_usb_mixer_elem_init_std(&cval->head, state->mixer, unitid);
cval->control = control;
cval->cmask = ctl_mask;
- ctl_info = &audio_feature_info[control-1];
+ ctl_info = get_feature_control_info(control);
+ if (!ctl_info) {
+ kfree(cval);
+ return;
+ }
if (state->mixer->protocol == UAC_VERSION_1)
cval->val_type = ctl_info->type;
else /* UAC_VERSION_2 or UAC_VERSION_3*/
@@ -1467,6 +1514,55 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
snd_usb_mixer_add_control(&cval->head, kctl);
}
+static void get_connector_control_name(struct mixer_build *state,
+ struct usb_audio_term *term,
+ bool is_input, char *name, int name_size)
+{
+ int name_len = get_term_name(state, term, name, name_size, 0);
+
+ if (name_len == 0)
+ strlcpy(name, "Unknown", name_size);
+
+ /*
+ * sound/core/ctljack.c has a convention of naming jack controls
+ * by ending in " Jack". Make it slightly more useful by
+ * indicating Input or Output after the terminal name.
+ */
+ if (is_input)
+ strlcat(name, " - Input Jack", name_size);
+ else
+ strlcat(name, " - Output Jack", name_size);
+}
+
+/* Build a mixer control for a UAC connector control (jack-detect) */
+static void build_connector_control(struct mixer_build *state,
+ struct usb_audio_term *term, bool is_input)
+{
+ struct snd_kcontrol *kctl;
+ struct usb_mixer_elem_info *cval;
+
+ cval = kzalloc(sizeof(*cval), GFP_KERNEL);
+ if (!cval)
+ return;
+ snd_usb_mixer_elem_init_std(&cval->head, state->mixer, term->id);
+ cval->control = UAC2_TE_CONNECTOR;
+ cval->val_type = USB_MIXER_BOOLEAN;
+ cval->channels = term->channels;
+ cval->cmask = term->chconfig;
+ cval->min = 0;
+ cval->max = 1;
+ kctl = snd_ctl_new1(&usb_connector_ctl_ro, cval);
+ if (!kctl) {
+ usb_audio_err(state->chip, "cannot malloc kcontrol\n");
+ kfree(cval);
+ return;
+ }
+ get_connector_control_name(state, term, is_input, kctl->id.name,
+ sizeof(kctl->id.name));
+ kctl->private_free = snd_usb_mixer_elem_free;
+ snd_usb_mixer_add_control(&cval->head, kctl);
+}
+
static int find_num_channels(struct mixer_build *state, int dir)
{
int num_ch = -EINVAL, num, i, j, wMaxPacketSize;
@@ -1717,6 +1813,8 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
/* check all control types */
for (i = 0; i < 10; i++) {
unsigned int ch_bits = 0;
+ int control = audio_feature_info[i].control;
+
for (j = 0; j < channels; j++) {
unsigned int mask;
@@ -1732,26 +1830,29 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
* (for ease of programming).
*/
if (ch_bits & 1)
- build_feature_ctl(state, _ftr, ch_bits, i,
+ build_feature_ctl(state, _ftr, ch_bits, control,
&iterm, unitid, 0);
if (master_bits & (1 << i))
- build_feature_ctl(state, _ftr, 0, i, &iterm,
- unitid, 0);
+ build_feature_ctl(state, _ftr, 0, control,
+ &iterm, unitid, 0);
}
} else { /* UAC_VERSION_2 or UAC_VERSION_3*/
for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
unsigned int ch_bits = 0;
unsigned int ch_read_only = 0;
+ int control = audio_feature_info[i].control;
for (j = 0; j < channels; j++) {
unsigned int mask;
mask = snd_usb_combine_bytes(bmaControls +
csize * (j+1), csize);
- if (uac2_control_is_readable(mask, i)) {
+ if (uac2_control_is_readable(mask, control)) {
ch_bits |= (1 << j);
- if (!uac2_control_is_writeable(mask, i))
+ if (!uac2_control_is_writeable(mask,
+ control)) {
ch_read_only |= (1 << j);
+ }
}
}
@@ -1769,11 +1870,14 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
* (for ease of programming).
*/
if (ch_bits & 1)
- build_feature_ctl(state, _ftr, ch_bits, i,
+ build_feature_ctl(state, _ftr, ch_bits, control,
&iterm, unitid, ch_read_only);
- if (uac2_control_is_readable(master_bits, i))
- build_feature_ctl(state, _ftr, 0, i, &iterm, unitid,
- !uac2_control_is_writeable(master_bits, i));
+ if (uac2_control_is_readable(master_bits, control)) {
+ int wr = uac2_control_is_writeable(master_bits,
+ control);
+ build_feature_ctl(state, _ftr, 0, control,
+ &iterm, unitid, !wr);
+ }
}
}
@@ -1845,6 +1949,25 @@ static void build_mixer_unit_ctl(struct mixer_build *state,
snd_usb_mixer_add_control(&cval->head, kctl);
}
+static int parse_audio_input_terminal(struct mixer_build *state, int unitid,
+ void *raw_desc)
+{
+ struct usb_audio_term iterm;
+
+ struct uac2_input_terminal_descriptor *d = raw_desc;
+ /* determine the input source type and name */
+ check_input_term(state, d->bTerminalID, &iterm);
+
+ if (state->mixer->protocol == UAC_VERSION_2) {
+ /* Check for jack detection. */
+ if (uac2_control_is_readable(d->bmControls,
+ UAC2_TE_CONNECTOR)) {
+ build_connector_control(state, &iterm, true);
+ }
+ }
+ return 0;
+}
+
/*
* parse a mixer unit
*/
@@ -2405,6 +2528,7 @@ static int parse_audio_unit(struct mixer_build *state, int unitid)
switch (p1[2]) {
case UAC_INPUT_TERMINAL:
+ return parse_audio_input_terminal(state, unitid, p1);
case UAC2_CLOCK_SOURCE:
return 0; /* NOP */
case UAC_MIXER_UNIT:
@@ -2614,6 +2738,13 @@ static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
desc->bCSourceID);
if (err < 0 && err != -EINVAL)
return err;
+
+ if (uac2_control_is_readable(desc->bmControls,
+ UAC2_TE_CONNECTOR)) {
+ build_connector_control(&state,
+ &state.oterm,
+ false);
+ }
}
}
}
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 1f8fb0d904e0..f5cf23ffb35b 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -351,8 +351,11 @@ static struct usbmix_name_map bose_companion5_map[] = {
/*
* Dell usb dock with ALC4020 codec had a firmware problem where it got
* screwed up when zero volume is passed; just skip it as a workaround
+ *
+ * Also the extension unit gives an access error, so skip it as well.
*/
static const struct usbmix_name_map dell_alc4020_map[] = {
+ { 4, NULL }, /* extension unit */
{ 16, NULL },
{ 19, NULL },
{ 0 }
diff --git a/tools/testing/selftests/firmware/fw_filesystem.sh b/tools/testing/selftests/firmware/fw_filesystem.sh
index 61f9b1dbbd9b..63c310cdac09 100755
--- a/tools/testing/selftests/firmware/fw_filesystem.sh
+++ b/tools/testing/selftests/firmware/fw_filesystem.sh
@@ -29,9 +29,11 @@ test_finish()
echo "$OLD_TIMEOUT" >/sys/class/firmware/timeout
fi
if [ "$OLD_FWPATH" = "" ]; then
- OLD_FWPATH=" "
+ # A zero-length write won't work; write a null byte
+ printf '\000' >/sys/module/firmware_class/parameters/path
+ else
+ echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
fi
- echo -n "$OLD_FWPATH" >/sys/module/firmware_class/parameters/path
rm -f "$FW"
rmdir "$FWPATH"
}