This is the 5.4.275 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmYzoYAACgkQONu9yGCS
 aT5Gog/8DiBn13NgIqTvlQucOpiKcQrFS6ghuLjWUj+H+toaBPCoiAaSNzDgk3a6
 Vc7aW+/H6035Wpt6qmA9nr9VljS4JgHRlxPbp442zcXoaQabxXstt80s05EgC+ue
 MamCnirQwtkSaHYTQDHwumyr+J73b/EM6w0MHinmKJKbmLOpF6ejt87bBN8i1i4N
 0/atD79jq6l3wOIwNpJI5hIdc/OwNKnCd0M1lQL60XQgN+4Yc5Gup/BazSnVzTHN
 PvkyqJOPSHcOcTkaDsS6t6RRJ5fQcHDihEPILwQuyqDbasyVULaNWxuZjVUeXbIS
 rcgrlPTsJB1KjtZuGSkSCmj3FrmBBhs9Fd2HBEGE2UfShJa1c4O/Hbmq3sHvijUQ
 i8Oaakkx0Z4XKiXf5djwG0QNm4vT8M+nBDNTxw+/pzWg2L0uzlpVh87YwODsrtT2
 /WHQ0B1MFXncCDebweXqlxTWKRCoqR183MjMedDJt25smCKp1RZZR7pJb9c065Mn
 bg2benOfKUr/+Us5oiQboV3XZkZiPc39V/E6J9NjYE62EPht6Gb/yoblFKp9t+/f
 8MYPXqyAjGGXIzkqD7e1+YyOH6N/96EOZ2TrLhZiMb/3W6+dCjc1YeFtquNmaN9b
 pdYhzG3qlneflRNTz/30YKIyMQE5Zv+1DMR2mpBSD4hvSEYCmE4=
 =yaox
 -----END PGP SIGNATURE-----

Merge 5.4.275 into android11-5.4-lts

Changes in 5.4.275
	batman-adv: Avoid infinite loop trying to resize local TT
	Bluetooth: Fix memory leak in hci_req_sync_complete()
	nouveau: fix function cast warning
	net: openvswitch: fix unwanted error log on timeout policy probing
	u64_stats: fix u64_stats_init() for lockdep when used repeatedly in one file
	geneve: fix header validation in geneve[6]_xmit_skb
	ipv6: fib: hide unused 'pn' variable
	ipv4/route: avoid unused-but-set-variable warning
	ipv6: fix race condition between ipv6_get_ifaddr and ipv6_del_addr
	net/mlx5: Properly link new fs rules into the tree
	af_unix: Do not use atomic ops for unix_sk(sk)->inflight.
	af_unix: Fix garbage collector racing against connect()
	net: ena: Fix potential sign extension issue
	btrfs: qgroup: correctly model root qgroup rsv in convert
	drm/client: Fully protect modes[] with dev->mode_config.mutex
	vhost: Add smp_rmb() in vhost_vq_avail_empty()
	selftests: timers: Fix abs() warning in posix_timers test
	x86/apic: Force native_apic_mem_read() to use the MOV instruction
	btrfs: record delayed inode root in transaction
	selftests/ftrace: Limit length in subsystem-enable tests
	kprobes: Fix possible use-after-free issue on kprobe registration
	Revert "tracing/trigger: Fix to return error if failed to alloc snapshot"
	netfilter: nf_tables: Fix potential data-race in __nft_expr_type_get()
	tun: limit printing rate when illegal packet received by tun dev
	RDMA/rxe: Fix the problem "mutex_destroy missing"
	RDMA/mlx5: Fix port number for counter query in multi-port configuration
	drm: nv04: Fix out of bounds access
	clk: Remove prepare_lock hold assertion in __clk_release()
	clk: Mark 'all_lists' as const
	clk: remove extra empty line
	clk: Print an info line before disabling unused clocks
	clk: Initialize struct clk_core kref earlier
	clk: Get runtime PM before walking tree during disable_unused
	x86/cpufeatures: Fix dependencies for GFNI, VAES, and VPCLMULQDQ
	binder: check offset alignment in binder_get_object()
	comedi: vmk80xx: fix incomplete endpoint checking
	serial/pmac_zilog: Remove flawed mitigation for rx irq flood
	USB: serial: option: add Fibocom FM135-GL variants
	USB: serial: option: add support for Fibocom FM650/FG650
	USB: serial: option: add Lonsung U8300/U9300 product
	USB: serial: option: support Quectel EM060K sub-models
	USB: serial: option: add Rolling RW101-GL and RW135-GL support
	USB: serial: option: add Telit FN920C04 rmnet compositions
	Revert "usb: cdc-wdm: close race between read and workqueue"
	usb: dwc2: host: Fix dereference issue in DDMA completion flow.
	usb: Disable USB3 LPM at shutdown
	speakup: Avoid crash on very long word
	fs: sysfs: Fix reference leak in sysfs_break_active_protection()
	nouveau: fix instmem race condition around ptr stores
	nilfs2: fix OOB in nilfs_set_de_type
	KVM: async_pf: Cleanup kvm_setup_async_pf()
	arm64: dts: rockchip: fix alphabetical ordering RK3399 puma
	arm64: dts: rockchip: enable internal pull-up on PCIE_WAKE# for RK3399 Puma
	arm64: dts: mediatek: mt7622: fix IR nodename
	arm64: dts: mediatek: mt7622: fix ethernet controller "compatible"
	arm64: dts: mediatek: mt7622: drop "reset-names" from thermal block
	arm64: dts: mt2712: add ethernet device node
	arm64: dts: mediatek: mt2712: fix validation errors
	ARC: [plat-hsdk]: Remove misplaced interrupt-cells property
	vxlan: drop packets from invalid src-address
	mlxsw: core: Unregister EMAD trap using FORWARD action
	NFC: trf7970a: disable all regulators on removal
	net: usb: ax88179_178a: stop lying about skb->truesize
	net: gtp: Fix Use-After-Free in gtp_dellink
	ipvs: Fix checksumming on GSO of SCTP packets
	net: openvswitch: Fix Use-After-Free in ovs_ct_exit
	mlxsw: spectrum_acl_tcam: Fix race during rehash delayed work
	mlxsw: spectrum_acl_tcam: Fix possible use-after-free during activity update
	mlxsw: spectrum_acl_tcam: Fix possible use-after-free during rehash
	mlxsw: spectrum_acl_tcam: Rate limit error message
	mlxsw: spectrum_acl_tcam: Fix memory leak during rehash
	mlxsw: spectrum_acl_tcam: Fix warning during rehash
	mlxsw: spectrum_acl_tcam: Fix incorrect list API usage
	mlxsw: spectrum_acl_tcam: Fix memory leak when canceling rehash work
	i40e: Do not use WQ_MEM_RECLAIM flag for workqueue
	iavf: Fix TC config comparison with existing adapter TC config
	af_unix: Suppress false-positive lockdep splat for spin_lock() in __unix_gc().
	serial: core: Provide port lock wrappers
	serial: mxs-auart: add spinlock around changing cts state
	drm/amdgpu: restrict bo mapping within gpu address limits
	amdgpu: validate offset_in_bo of drm_amdgpu_gem_va
	drm/amdgpu: validate the parameters of bo mapping operations more clearly
	Revert "crypto: api - Disallow identical driver names"
	net/mlx5e: Fix a race in command alloc flow
	tracing: Show size of requested perf buffer
	tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
	Bluetooth: Fix type of len in {l2cap,sco}_sock_getsockopt_old()
	Bluetooth: btusb: Add Realtek RTL8852BE support ID 0x0bda:0x4853
	btrfs: fix information leak in btrfs_ioctl_logical_to_ino()
	arm64: dts: rockchip: enable internal pull-up for Q7_THRM# on RK3399 Puma
	drm/amdgpu: Fix leak when GPU memory allocation fails
	irqchip/gic-v3-its: Prevent double free on error
	ethernet: Add helper for assigning packet type when dest address does not match device address
	net: b44: set pause params only when interface is up
	stackdepot: respect __GFP_NOLOCKDEP allocation flag
	mtd: diskonchip: work around ubsan link failure
	tcp: Clean up kernel listener's reqsk in inet_twsk_purge()
	tcp: Fix NEW_SYN_RECV handling in inet_twsk_purge()
	dmaengine: owl: fix register access functions
	idma64: Don't try to serve interrupts when device is powered off
	i2c: smbus: fix NULL function pointer dereference
	HID: i2c-hid: remove I2C_HID_READ_PENDING flag to prevent lock-up
	bounds: Use the right number of bits for power-of-two CONFIG_NR_CPUS
	dm: limit the number of targets and parameter size area
	udp: preserve the connected status if only UDP cmsg
	serial: core: fix kernel-doc for uart_port_unlock_irqrestore()
	Linux 5.4.275

Change-Id: I0ebcbc604c4ef7fb2b1ce4d7a530f57e27a74cb6
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-05-15 16:00:38 +00:00
commit 65d8957031
89 changed files with 904 additions and 338 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 274
SUBLEVEL = 275
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View file

@ -205,7 +205,6 @@
};
gmac: ethernet@8000 {
#interrupt-cells = <1>;
compatible = "snps,dwmac";
reg = <0x8000 0x2000>;
interrupts = <10>;

View file

@ -105,15 +105,89 @@
proc-supply = <&cpus_fixed_vproc1>;
};
&eth {
phy-mode ="rgmii-rxid";
phy-handle = <&ethernet_phy0>;
mediatek,tx-delay-ps = <1530>;
snps,reset-gpio = <&pio 87 GPIO_ACTIVE_LOW>;
pinctrl-names = "default", "sleep";
pinctrl-0 = <&eth_default>;
pinctrl-1 = <&eth_sleep>;
status = "okay";
mdio {
compatible = "snps,dwmac-mdio";
#address-cells = <1>;
#size-cells = <0>;
ethernet_phy0: ethernet-phy@5 {
compatible = "ethernet-phy-id0243.0d90";
reg = <0x5>;
};
};
};
&pio {
usb0_id_pins_float: usb0_iddig {
eth_default: eth-default-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GBE_TXD3>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GBE_TXD2>,
<MT2712_PIN_73_GBE_TXD1__FUNC_GBE_TXD1>,
<MT2712_PIN_74_GBE_TXD0__FUNC_GBE_TXD0>,
<MT2712_PIN_75_GBE_TXC__FUNC_GBE_TXC>,
<MT2712_PIN_76_GBE_TXEN__FUNC_GBE_TXEN>;
drive-strength = <MTK_DRIVE_8mA>;
};
rx_pins {
pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GBE_RXD3>,
<MT2712_PIN_79_GBE_RXD2__FUNC_GBE_RXD2>,
<MT2712_PIN_80_GBE_RXD1__FUNC_GBE_RXD1>,
<MT2712_PIN_81_GBE_RXD0__FUNC_GBE_RXD0>,
<MT2712_PIN_82_GBE_RXDV__FUNC_GBE_RXDV>,
<MT2712_PIN_84_GBE_RXC__FUNC_GBE_RXC>;
input-enable;
};
mdio_pins {
pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GBE_MDC>,
<MT2712_PIN_86_GBE_MDIO__FUNC_GBE_MDIO>;
drive-strength = <MTK_DRIVE_8mA>;
input-enable;
};
};
eth_sleep: eth-sleep-pins {
tx_pins {
pinmux = <MT2712_PIN_71_GBE_TXD3__FUNC_GPIO71>,
<MT2712_PIN_72_GBE_TXD2__FUNC_GPIO72>,
<MT2712_PIN_73_GBE_TXD1__FUNC_GPIO73>,
<MT2712_PIN_74_GBE_TXD0__FUNC_GPIO74>,
<MT2712_PIN_75_GBE_TXC__FUNC_GPIO75>,
<MT2712_PIN_76_GBE_TXEN__FUNC_GPIO76>;
};
rx_pins {
pinmux = <MT2712_PIN_78_GBE_RXD3__FUNC_GPIO78>,
<MT2712_PIN_79_GBE_RXD2__FUNC_GPIO79>,
<MT2712_PIN_80_GBE_RXD1__FUNC_GPIO80>,
<MT2712_PIN_81_GBE_RXD0__FUNC_GPIO81>,
<MT2712_PIN_82_GBE_RXDV__FUNC_GPIO82>,
<MT2712_PIN_84_GBE_RXC__FUNC_GPIO84>;
input-disable;
};
mdio_pins {
pinmux = <MT2712_PIN_85_GBE_MDC__FUNC_GPIO85>,
<MT2712_PIN_86_GBE_MDIO__FUNC_GPIO86>;
input-disable;
bias-disable;
};
};
usb0_id_pins_float: usb0-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_12_IDDIG_P0__FUNC_IDDIG_A>;
bias-pull-up;
};
};
usb1_id_pins_float: usb1_iddig {
usb1_id_pins_float: usb1-iddig-pins {
pins_iddig {
pinmux = <MT2712_PIN_14_IDDIG_P1__FUNC_IDDIG_B>;
bias-pull-up;

View file

@ -249,10 +249,11 @@
#clock-cells = <1>;
};
infracfg: syscon@10001000 {
infracfg: clock-controller@10001000 {
compatible = "mediatek,mt2712-infracfg", "syscon";
reg = <0 0x10001000 0 0x1000>;
#clock-cells = <1>;
#reset-cells = <1>;
};
pericfg: syscon@10003000 {
@ -632,6 +633,71 @@
status = "disabled";
};
stmmac_axi_setup: stmmac-axi-config {
snps,wr_osr_lmt = <0x7>;
snps,rd_osr_lmt = <0x7>;
snps,blen = <0 0 0 0 16 8 4>;
};
mtl_rx_setup: rx-queues-config {
snps,rx-queues-to-use = <1>;
snps,rx-sched-sp;
queue0 {
snps,dcb-algorithm;
snps,map-to-dma-channel = <0x0>;
snps,priority = <0x0>;
};
};
mtl_tx_setup: tx-queues-config {
snps,tx-queues-to-use = <3>;
snps,tx-sched-wrr;
queue0 {
snps,weight = <0x10>;
snps,dcb-algorithm;
snps,priority = <0x0>;
};
queue1 {
snps,weight = <0x11>;
snps,dcb-algorithm;
snps,priority = <0x1>;
};
queue2 {
snps,weight = <0x12>;
snps,dcb-algorithm;
snps,priority = <0x2>;
};
};
eth: ethernet@1101c000 {
compatible = "mediatek,mt2712-gmac";
reg = <0 0x1101c000 0 0x1300>;
interrupts = <GIC_SPI 237 IRQ_TYPE_LEVEL_LOW>;
interrupt-names = "macirq";
mac-address = [00 55 7b b5 7d f7];
clock-names = "axi",
"apb",
"mac_main",
"ptp_ref";
clocks = <&pericfg CLK_PERI_GMAC>,
<&pericfg CLK_PERI_GMAC_PCLK>,
<&topckgen CLK_TOP_ETHER_125M_SEL>,
<&topckgen CLK_TOP_ETHER_50M_SEL>;
assigned-clocks = <&topckgen CLK_TOP_ETHER_125M_SEL>,
<&topckgen CLK_TOP_ETHER_50M_SEL>;
assigned-clock-parents = <&topckgen CLK_TOP_ETHERPLL_125M>,
<&topckgen CLK_TOP_APLL1_D3>;
power-domains = <&scpsys MT2712_POWER_DOMAIN_AUDIO>;
mediatek,pericfg = <&pericfg>;
snps,axi-config = <&stmmac_axi_setup>;
snps,mtl-rx-config = <&mtl_rx_setup>;
snps,mtl-tx-config = <&mtl_tx_setup>;
snps,txpbl = <1>;
snps,rxpbl = <1>;
clk_csr = <0>;
status = "disabled";
};
mmc0: mmc@11230000 {
compatible = "mediatek,mt2712-mmc";
reg = <0 0x11230000 0 0x1000>;

View file

@ -244,7 +244,7 @@
clock-names = "hif_sel";
};
cir: cir@10009000 {
cir: ir-receiver@10009000 {
compatible = "mediatek,mt7622-cir";
reg = <0 0x10009000 0 0x1000>;
interrupts = <GIC_SPI 175 IRQ_TYPE_LEVEL_LOW>;
@ -507,7 +507,6 @@
<&pericfg CLK_PERI_AUXADC_PD>;
clock-names = "therm", "auxadc";
resets = <&pericfg MT7622_PERI_THERM_SW_RST>;
reset-names = "therm";
mediatek,auxadc = <&auxadc>;
mediatek,apmixedsys = <&apmixedsys>;
nvmem-cells = <&thermal_calibration>;
@ -901,9 +900,7 @@
};
eth: ethernet@1b100000 {
compatible = "mediatek,mt7622-eth",
"mediatek,mt2701-eth",
"syscon";
compatible = "mediatek,mt7622-eth";
reg = <0 0x1b100000 0 0x20000>;
interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 224 IRQ_TYPE_LEVEL_LOW>,

View file

@ -425,16 +425,22 @@
gpio1830-supply = <&vcc_1v8>;
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
&pcie_clkreqn_cpm {
rockchip,pins =
<2 RK_PD2 RK_FUNC_GPIO &pcfg_pull_up>;
};
&pinctrl {
pinctrl-names = "default";
pinctrl-0 = <&q7_thermal_pin>;
gpios {
q7_thermal_pin: q7-thermal-pin {
rockchip,pins =
<0 RK_PA3 RK_FUNC_GPIO &pcfg_pull_up>;
};
};
i2c8 {
i2c8_xfer_a: i2c8-xfer {
rockchip,pins =
@ -465,6 +471,15 @@
};
};
&pmu_io_domains {
status = "okay";
pmu1830-supply = <&vcc_1v8>;
};
&pwm2 {
status = "okay";
};
&sdhci {
/*
* Signal integrity isn't great at 200MHz but 100MHz has proven stable

View file

@ -12,6 +12,7 @@
#include <asm/mpspec.h>
#include <asm/msr.h>
#include <asm/hardirq.h>
#include <asm/io.h>
#define ARCH_APICTIMER_STOPS_ON_C3 1
@ -111,7 +112,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v)
static inline u32 native_apic_mem_read(u32 reg)
{
return *((volatile u32 *)(APIC_BASE + reg));
return readl((void __iomem *)(APIC_BASE + reg));
}
extern void native_apic_wait_icr_idle(void);

View file

@ -44,7 +44,10 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_F16C, X86_FEATURE_XMM2, },
{ X86_FEATURE_AES, X86_FEATURE_XMM2 },
{ X86_FEATURE_SHA_NI, X86_FEATURE_XMM2 },
{ X86_FEATURE_GFNI, X86_FEATURE_XMM2 },
{ X86_FEATURE_FMA, X86_FEATURE_AVX },
{ X86_FEATURE_VAES, X86_FEATURE_AVX },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX },
{ X86_FEATURE_AVX2, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512F, X86_FEATURE_AVX, },
{ X86_FEATURE_AVX512IFMA, X86_FEATURE_AVX512F },
@ -56,9 +59,6 @@ static const struct cpuid_dep cpuid_deps[] = {
{ X86_FEATURE_AVX512VL, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512VBMI, X86_FEATURE_AVX512F },
{ X86_FEATURE_AVX512_VBMI2, X86_FEATURE_AVX512VL },
{ X86_FEATURE_GFNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VAES, X86_FEATURE_AVX512VL },
{ X86_FEATURE_VPCLMULQDQ, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_VNNI, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_BITALG, X86_FEATURE_AVX512VL },
{ X86_FEATURE_AVX512_4VNNIW, X86_FEATURE_AVX512F },

View file

@ -217,7 +217,6 @@ static struct crypto_larval *__crypto_register_alg(struct crypto_alg *alg)
}
if (!strcmp(q->cra_driver_name, alg->cra_name) ||
!strcmp(q->cra_driver_name, alg->cra_driver_name) ||
!strcmp(q->cra_name, alg->cra_driver_name))
goto err;
}

View file

@ -1819,8 +1819,10 @@ static size_t binder_get_object(struct binder_proc *proc,
size_t object_size = 0;
read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
if (offset > buffer->data_size || read_size < sizeof(*hdr))
if (offset > buffer->data_size || read_size < sizeof(*hdr) ||
!IS_ALIGNED(offset, sizeof(u32)))
return 0;
if (u) {
if (copy_from_user(object, u + offset, read_size))
return 0;

View file

@ -360,6 +360,8 @@ static const struct usb_device_id blacklist_table[] = {
/* Realtek 8852BE Bluetooth devices */
{ USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x4853), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK |
BTUSB_WIDEBAND_SPEECH },
{ USB_DEVICE(0x0bda, 0xb85b), .driver_info = BTUSB_REALTEK |

View file

@ -37,7 +37,11 @@ static HLIST_HEAD(clk_root_list);
static HLIST_HEAD(clk_orphan_list);
static LIST_HEAD(clk_notifier_list);
static struct hlist_head *all_lists[] = {
/* List of registered clks that use runtime PM */
static HLIST_HEAD(clk_rpm_list);
static DEFINE_MUTEX(clk_rpm_list_lock);
static const struct hlist_head *all_lists[] = {
&clk_root_list,
&clk_orphan_list,
NULL,
@ -59,6 +63,7 @@ struct clk_core {
struct clk_hw *hw;
struct module *owner;
struct device *dev;
struct hlist_node rpm_node;
struct device_node *of_node;
struct clk_core *parent;
struct clk_parent_map *parents;
@ -131,6 +136,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
pm_runtime_put_sync(core->dev);
}
/**
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
*
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
* that disabling unused clks avoids a deadlock where a device is runtime PM
* resuming/suspending and the runtime PM callback is trying to grab the
* prepare_lock for something like clk_prepare_enable() while
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
* PM resume/suspend the device as well.
*
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
* success. Otherwise the lock is released on failure.
*
* Return: 0 on success, negative errno otherwise.
*/
static int clk_pm_runtime_get_all(void)
{
int ret;
struct clk_core *core, *failed;
/*
* Grab the list lock to prevent any new clks from being registered
* or unregistered until clk_pm_runtime_put_all().
*/
mutex_lock(&clk_rpm_list_lock);
/*
* Runtime PM "get" all the devices that are needed for the clks
* currently registered. Do this without holding the prepare_lock, to
* avoid the deadlock.
*/
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
ret = clk_pm_runtime_get(core);
if (ret) {
failed = core;
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
dev_name(failed->dev), failed->name);
goto err;
}
}
return 0;
err:
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
if (core == failed)
break;
clk_pm_runtime_put(core);
}
mutex_unlock(&clk_rpm_list_lock);
return ret;
}
/**
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
*
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
* the 'clk_rpm_list_lock'.
*/
static void clk_pm_runtime_put_all(void)
{
struct clk_core *core;
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
clk_pm_runtime_put(core);
mutex_unlock(&clk_rpm_list_lock);
}
static void clk_pm_runtime_init(struct clk_core *core)
{
struct device *dev = core->dev;
if (dev && pm_runtime_enabled(dev)) {
core->rpm_enabled = true;
mutex_lock(&clk_rpm_list_lock);
hlist_add_head(&core->rpm_node, &clk_rpm_list);
mutex_unlock(&clk_rpm_list_lock);
}
}
/*** locking ***/
static void clk_prepare_lock(void)
{
@ -1243,9 +1331,6 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
if (core->flags & CLK_IGNORE_UNUSED)
return;
if (clk_pm_runtime_get(core))
return;
if (clk_core_is_prepared(core)) {
trace_clk_unprepare(core);
if (core->ops->unprepare_unused)
@ -1254,8 +1339,6 @@ static void clk_unprepare_unused_subtree(struct clk_core *core)
core->ops->unprepare(core->hw);
trace_clk_unprepare_complete(core);
}
clk_pm_runtime_put(core);
}
static void clk_disable_unused_subtree(struct clk_core *core)
@ -1275,9 +1358,6 @@ static void clk_disable_unused_subtree(struct clk_core *core)
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
if (clk_pm_runtime_get(core))
goto unprepare_out;
flags = clk_enable_lock();
if (core->enable_count)
@ -1302,8 +1382,6 @@ static void clk_disable_unused_subtree(struct clk_core *core)
unlock_out:
clk_enable_unlock(flags);
clk_pm_runtime_put(core);
unprepare_out:
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_disable_unprepare(core->parent);
}
@ -1319,12 +1397,22 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
static int clk_disable_unused(void)
{
struct clk_core *core;
int ret;
if (clk_ignore_unused) {
pr_warn("clk: Not disabling unused clocks\n");
return 0;
}
pr_info("clk: Disabling unused clocks\n");
ret = clk_pm_runtime_get_all();
if (ret)
return ret;
/*
* Grab the prepare lock to keep the clk topology stable while iterating
* over clks.
*/
clk_prepare_lock();
hlist_for_each_entry(core, &clk_root_list, child_node)
@ -1341,6 +1429,8 @@ static int clk_disable_unused(void)
clk_prepare_unlock();
clk_pm_runtime_put_all();
return 0;
}
late_initcall_sync(clk_disable_unused);
@ -3590,8 +3680,6 @@ static int __clk_core_init(struct clk_core *core)
clk_core_hold_state(core);
clk_core_reparent_orphans_nolock();
kref_init(&core->ref);
out:
clk_pm_runtime_put(core);
unlock:
@ -3801,6 +3889,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
kfree(core->parents);
}
/* Free memory allocated for a struct clk_core */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
if (core->rpm_enabled) {
mutex_lock(&clk_rpm_list_lock);
hlist_del(&core->rpm_node);
mutex_unlock(&clk_rpm_list_lock);
}
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
static struct clk *
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
{
@ -3821,6 +3925,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
goto fail_out;
}
kref_init(&core->ref);
core->name = kstrdup_const(init->name, GFP_KERNEL);
if (!core->name) {
ret = -ENOMEM;
@ -3833,9 +3939,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
}
core->ops = init->ops;
if (dev && pm_runtime_enabled(dev))
core->rpm_enabled = true;
core->dev = dev;
clk_pm_runtime_init(core);
core->of_node = np;
if (dev && dev->driver)
core->owner = dev->driver->owner;
@ -3875,12 +3980,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
hw->clk = NULL;
fail_create_clk:
clk_core_free_parent_map(core);
fail_parents:
fail_ops:
kfree_const(core->name);
fail_name:
kfree(core);
kref_put(&core->ref, __clk_release);
fail_out:
return ERR_PTR(ret);
}
@ -3960,18 +4063,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(of_clk_hw_register);
/* Free memory allocated for a clock. */
static void __clk_release(struct kref *ref)
{
struct clk_core *core = container_of(ref, struct clk_core, ref);
lockdep_assert_held(&prepare_lock);
clk_core_free_parent_map(core);
kfree_const(core->name);
kfree(core);
}
/*
* Empty clk_ops for unregistered clocks. These are used temporarily
* after clk_unregister() was called on a clock and until last clock
@ -4024,7 +4115,7 @@ static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
/* Remove this clk from all parent caches */
static void clk_core_evict_parent_cache(struct clk_core *core)
{
struct hlist_head **lists;
const struct hlist_head **lists;
struct clk_core *root;
lockdep_assert_held(&prepare_lock);

View file

@ -167,6 +167,10 @@ static irqreturn_t idma64_irq(int irq, void *dev)
u32 status_err;
unsigned short i;
/* Since IRQ may be shared, check if DMA controller is powered on */
if (status == GENMASK(31, 0))
return IRQ_NONE;
dev_vdbg(idma64->dma.dev, "%s: status=%#x\n", __func__, status);
/* Check if we have any interrupt from the DMA controller */

View file

@ -238,7 +238,7 @@ static void pchan_update(struct owl_dma_pchan *pchan, u32 reg,
else
regval &= ~val;
writel(val, pchan->base + reg);
writel(regval, pchan->base + reg);
}
static void pchan_writel(struct owl_dma_pchan *pchan, u32 reg, u32 data)
@ -262,7 +262,7 @@ static void dma_update(struct owl_dma *od, u32 reg, u32 val, bool state)
else
regval &= ~val;
writel(val, od->base + reg);
writel(regval, od->base + reg);
}
static void dma_writel(struct owl_dma *od, u32 reg, u32 data)

View file

@ -1204,6 +1204,7 @@ allocate_init_user_pages_failed:
err_bo_create:
unreserve_mem_limit(adev, size, alloc_domain, !!sg);
err_reserve_limit:
amdgpu_sync_free(&(*mem)->sync);
mutex_destroy(&(*mem)->lock);
kfree(*mem);
err:

View file

@ -2095,6 +2095,37 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
/* Validate operation parameters to prevent potential abuse */
static int amdgpu_vm_verify_parameters(struct amdgpu_device *adev,
struct amdgpu_bo *bo,
uint64_t saddr,
uint64_t offset,
uint64_t size)
{
uint64_t tmp, lpfn;
if (saddr & AMDGPU_GPU_PAGE_MASK
|| offset & AMDGPU_GPU_PAGE_MASK
|| size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
if (check_add_overflow(saddr, size, &tmp)
|| check_add_overflow(offset, size, &tmp)
|| size == 0 /* which also leads to end < begin */)
return -EINVAL;
/* make sure object fit at this offset */
if (bo && offset + size > amdgpu_bo_size(bo))
return -EINVAL;
/* Ensure last pfn not exceed max_pfn */
lpfn = (saddr + size - 1) >> AMDGPU_GPU_PAGE_SHIFT;
if (lpfn >= adev->vm_manager.max_pfn)
return -EINVAL;
return 0;
}
/**
* amdgpu_vm_bo_map - map bo inside a vm
*
@ -2121,20 +2152,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
tmp = amdgpu_vm_it_iter_first(&vm->va, saddr, eaddr);
if (tmp) {
@ -2187,16 +2212,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
uint64_t eaddr;
int r;
/* validate the parameters */
if (saddr & ~PAGE_MASK || offset & ~PAGE_MASK ||
size == 0 || size & ~PAGE_MASK)
return -EINVAL;
/* make sure object fit at this offset */
eaddr = saddr + size - 1;
if (saddr >= eaddr ||
(bo && offset + size > amdgpu_bo_size(bo)))
return -EINVAL;
r = amdgpu_vm_verify_parameters(adev, bo, saddr, offset, size);
if (r)
return r;
/* Allocate all the needed memory */
mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
@ -2210,7 +2228,7 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
}
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
mapping->start = saddr;
mapping->last = eaddr;
@ -2297,10 +2315,14 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_bo_va_mapping *before, *after, *tmp, *next;
LIST_HEAD(removed);
uint64_t eaddr;
int r;
r = amdgpu_vm_verify_parameters(adev, NULL, saddr, 0, size);
if (r)
return r;
eaddr = saddr + size - 1;
saddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr /= AMDGPU_GPU_PAGE_SIZE;
eaddr = saddr + (size - 1) / AMDGPU_GPU_PAGE_SIZE;
/* Allocate all the needed memory */
before = kzalloc(sizeof(*before), GFP_KERNEL);

View file

@ -700,6 +700,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
unsigned int total_modes_count = 0;
struct drm_client_offset *offsets;
unsigned int connector_count = 0;
/* points to modes protected by mode_config.mutex */
struct drm_display_mode **modes;
struct drm_crtc **crtcs;
int i, ret = 0;
@ -768,7 +769,6 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
drm_client_pick_crtcs(client, connectors, connector_count,
crtcs, modes, 0, width, height);
}
mutex_unlock(&dev->mode_config.mutex);
drm_client_modeset_release(client);
@ -798,6 +798,7 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
modeset->y = offset->y;
}
}
mutex_unlock(&dev->mode_config.mutex);
mutex_unlock(&client->modeset_mutex);
out:

View file

@ -23,6 +23,7 @@
*/
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_reg.h"
#include "dispnv04/hw.h"
#include "nouveau_encoder.h"
@ -1672,7 +1673,7 @@ apply_dcb_encoder_quirks(struct drm_device *dev, int idx, u32 *conn, u32 *conf)
*/
if (nv_match_device(dev, 0x0201, 0x1462, 0x8851)) {
if (*conn == 0xf2005014 && *conf == 0xffffffff) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 1, 1, DCB_OUTPUT_B);
return false;
}
}
@ -1758,26 +1759,26 @@ fabricate_dcb_encoder_table(struct drm_device *dev, struct nvbios *bios)
#ifdef __powerpc__
/* Apple iMac G4 NV17 */
if (of_machine_is_compatible("PowerMac4,5")) {
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, 1);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, 2);
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS, 0, all_heads, DCB_OUTPUT_B);
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG, 1, all_heads, DCB_OUTPUT_C);
return;
}
#endif
/* Make up some sane defaults */
fabricate_dcb_output(dcb, DCB_OUTPUT_ANALOG,
bios->legacy.i2c_indices.crt, 1, 1);
bios->legacy.i2c_indices.crt, 1, DCB_OUTPUT_B);
if (nv04_tv_identify(dev, bios->legacy.i2c_indices.tv) >= 0)
fabricate_dcb_output(dcb, DCB_OUTPUT_TV,
bios->legacy.i2c_indices.tv,
all_heads, 0);
all_heads, DCB_OUTPUT_A);
else if (bios->tmds.output0_script_ptr ||
bios->tmds.output1_script_ptr)
fabricate_dcb_output(dcb, DCB_OUTPUT_TMDS,
bios->legacy.i2c_indices.panel,
all_heads, 1);
all_heads, DCB_OUTPUT_B);
}
static int

View file

@ -66,11 +66,16 @@ of_init(struct nvkm_bios *bios, const char *name)
return ERR_PTR(-EINVAL);
}
static void of_fini(void *p)
{
kfree(p);
}
const struct nvbios_source
nvbios_of = {
.name = "OpenFirmware",
.init = of_init,
.fini = (void(*)(void *))kfree,
.fini = of_fini,
.read = of_read,
.size = of_size,
.rw = false,

View file

@ -221,8 +221,11 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
void __iomem *map = NULL;
/* Already mapped? */
if (refcount_inc_not_zero(&iobj->maps))
if (refcount_inc_not_zero(&iobj->maps)) {
/* read barrier match the wmb on refcount set */
smp_rmb();
return iobj->map;
}
/* Take the lock, and re-check that another thread hasn't
* already mapped the object in the meantime.
@ -249,6 +252,8 @@ nv50_instobj_acquire(struct nvkm_memory *memory)
iobj->base.memory.ptrs = &nv50_instobj_fast;
else
iobj->base.memory.ptrs = &nv50_instobj_slow;
/* barrier to ensure the ptrs are written before refcount is set */
smp_wmb();
refcount_set(&iobj->maps, 1);
}

View file

@ -56,7 +56,6 @@
/* flags */
#define I2C_HID_STARTED 0
#define I2C_HID_RESET_PENDING 1
#define I2C_HID_READ_PENDING 2
#define I2C_HID_PWR_ON 0x00
#define I2C_HID_PWR_SLEEP 0x01
@ -258,7 +257,6 @@ static int __i2c_hid_command(struct i2c_client *client,
msg[1].len = data_len;
msg[1].buf = buf_recv;
msg_num = 2;
set_bit(I2C_HID_READ_PENDING, &ihid->flags);
}
if (wait)
@ -266,9 +264,6 @@ static int __i2c_hid_command(struct i2c_client *client,
ret = i2c_transfer(client->adapter, msg, msg_num);
if (data_len > 0)
clear_bit(I2C_HID_READ_PENDING, &ihid->flags);
if (ret != msg_num)
return ret < 0 ? ret : -EIO;
@ -540,9 +535,6 @@ static irqreturn_t i2c_hid_irq(int irq, void *dev_id)
{
struct i2c_hid *ihid = dev_id;
if (test_bit(I2C_HID_READ_PENDING, &ihid->flags))
return IRQ_HANDLED;
i2c_hid_get_input(ihid);
return IRQ_HANDLED;

View file

@ -1965,13 +1965,18 @@ static int i2c_check_for_quirks(struct i2c_adapter *adap, struct i2c_msg *msgs,
* Returns negative errno, else the number of messages executed.
*
* Adapter lock must be held when calling this function. No debug logging
* takes place. adap->algo->master_xfer existence isn't checked.
* takes place.
*/
int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
unsigned long orig_jiffies;
int ret, try;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
if (WARN_ON(!msgs || num < 1))
return -EINVAL;
@ -2038,11 +2043,6 @@ int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
{
int ret;
if (!adap->algo->master_xfer) {
dev_dbg(&adap->dev, "I2C level transfers not supported\n");
return -EOPNOTSUPP;
}
/* REVISIT the fault reporting model here is weak:
*
* - When we get an error after receiving N bytes from a slave,

View file

@ -219,7 +219,8 @@ static int process_pma_cmd(struct mlx5_ib_dev *dev, u8 port_num,
mdev = dev->mdev;
mdev_port_num = 1;
}
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1) {
if (MLX5_CAP_GEN(dev->mdev, num_ports) == 1 &&
!mlx5_core_mp_enabled(mdev)) {
/* set local port to one for Function-Per-Port HCA. */
mdev = dev->mdev;
mdev_port_num = 1;

View file

@ -72,6 +72,8 @@ void rxe_dealloc(struct ib_device *ib_dev)
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
mutex_destroy(&rxe->usdev_lock);
}
/* initialize rxe device parameters */

View file

@ -3121,13 +3121,8 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
set_bit(i, bitmap);
}
if (err) {
if (i > 0)
its_vpe_irq_domain_free(domain, virq, i);
its_lpi_free(bitmap, base, nr_ids);
its_free_prop_table(vprop_page);
}
if (err)
its_vpe_irq_domain_free(domain, virq, i);
return err;
}

View file

@ -18,6 +18,8 @@
#include "dm.h"
#define DM_RESERVED_MAX_IOS 1024
#define DM_MAX_TARGETS 1048576
#define DM_MAX_TARGET_PARAMS 1024
struct dm_kobject_holder {
struct kobject kobj;

View file

@ -1760,7 +1760,8 @@ static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kern
if (copy_from_user(param_kernel, user, minimum_data_size))
return -EFAULT;
if (param_kernel->data_size < minimum_data_size)
if (unlikely(param_kernel->data_size < minimum_data_size) ||
unlikely(param_kernel->data_size > DM_MAX_TARGETS * DM_MAX_TARGET_PARAMS))
return -EINVAL;
secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;

View file

@ -186,7 +186,12 @@ static int alloc_targets(struct dm_table *t, unsigned int num)
int dm_table_create(struct dm_table **result, fmode_t mode,
unsigned num_targets, struct mapped_device *md)
{
struct dm_table *t = kzalloc(sizeof(*t), GFP_KERNEL);
struct dm_table *t;
if (num_targets > DM_MAX_TARGETS)
return -EOVERFLOW;
t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
@ -201,7 +206,7 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
if (!num_targets) {
kfree(t);
return -ENOMEM;
return -EOVERFLOW;
}
if (alloc_targets(t, num_targets)) {

View file

@ -53,7 +53,7 @@ static unsigned long doc_locations[] __initdata = {
0xe8000, 0xea000, 0xec000, 0xee000,
#endif
#endif
0xffffffff };
};
static struct mtd_info *doclist = NULL;
@ -1666,7 +1666,7 @@ static int __init init_nanddoc(void)
if (ret < 0)
return ret;
} else {
for (i = 0; (doc_locations[i] != 0xffffffff); i++) {
for (i = 0; i < ARRAY_SIZE(doc_locations); i++) {
doc_probe(doc_locations[i]);
}
}

View file

@ -374,7 +374,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
io_sq->bounce_buf_ctrl.next_to_use = 0;
size = io_sq->bounce_buf_ctrl.buffer_size *
size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
io_sq->bounce_buf_ctrl.buffers_num;
dev_node = dev_to_node(ena_dev->dmadev);

View file

@ -2033,12 +2033,14 @@ static int b44_set_pauseparam(struct net_device *dev,
bp->flags |= B44_FLAG_TX_PAUSE;
else
bp->flags &= ~B44_FLAG_TX_PAUSE;
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
if (netif_running(dev)) {
if (bp->flags & B44_FLAG_PAUSE_AUTO) {
b44_halt(bp);
b44_init_rings(bp);
b44_init_hw(bp, B44_FULL_RESET);
} else {
__b44_set_flow_ctrl(bp, bp->flags);
}
}
spin_unlock_irq(&bp->lock);

View file

@ -15971,7 +15971,7 @@ static int __init i40e_init_module(void)
* since we need to be able to guarantee forward progress even under
* memory pressure.
*/
i40e_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0, i40e_driver_name);
i40e_wq = alloc_workqueue("%s", 0, 0, i40e_driver_name);
if (!i40e_wq) {
pr_err("%s: Failed to create workqueue\n", i40e_driver_name);
return -ENOMEM;

View file

@ -2647,6 +2647,34 @@ static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
spin_unlock_bh(&adapter->cloud_filter_list_lock);
}
/**
* iavf_is_tc_config_same - Compare the mqprio TC config with the
* TC config already configured on this adapter.
* @adapter: board private structure
* @mqprio_qopt: TC config received from kernel.
*
* This function compares the TC config received from the kernel
* with the config already configured on the adapter.
*
* Return: True if configuration is same, false otherwise.
**/
static bool iavf_is_tc_config_same(struct iavf_adapter *adapter,
struct tc_mqprio_qopt *mqprio_qopt)
{
struct virtchnl_channel_info *ch = &adapter->ch_config.ch_info[0];
int i;
if (adapter->num_tc != mqprio_qopt->num_tc)
return false;
for (i = 0; i < adapter->num_tc; i++) {
if (ch[i].count != mqprio_qopt->count[i] ||
ch[i].offset != mqprio_qopt->offset[i])
return false;
}
return true;
}
/**
* __iavf_setup_tc - configure multiple traffic classes
* @netdev: network interface device structure
@ -2703,7 +2731,7 @@ static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
if (ret)
return ret;
/* Return if same TC config is requested */
if (adapter->num_tc == num_tc)
if (iavf_is_tc_config_same(adapter, &mqprio_qopt->qopt))
return 0;
adapter->num_tc = num_tc;

View file

@ -114,15 +114,18 @@ static u8 alloc_token(struct mlx5_cmd *cmd)
return token;
}
static int cmd_alloc_index(struct mlx5_cmd *cmd)
static int cmd_alloc_index(struct mlx5_cmd *cmd, struct mlx5_cmd_work_ent *ent)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cmd->alloc_lock, flags);
ret = find_first_bit(&cmd->bitmask, cmd->max_reg_cmds);
if (ret < cmd->max_reg_cmds)
if (ret < cmd->max_reg_cmds) {
clear_bit(ret, &cmd->bitmask);
ent->idx = ret;
cmd->ent_arr[ent->idx] = ent;
}
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
return ret < cmd->max_reg_cmds ? ret : -ENOMEM;
@ -905,7 +908,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
alloc_ret = cmd_alloc_index(cmd);
alloc_ret = cmd_alloc_index(cmd, ent);
if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {
@ -920,15 +923,14 @@ static void cmd_work_handler(struct work_struct *work)
up(sem);
return;
}
ent->idx = alloc_ret;
} else {
ent->idx = cmd->max_reg_cmds;
spin_lock_irqsave(&cmd->alloc_lock, flags);
clear_bit(ent->idx, &cmd->bitmask);
cmd->ent_arr[ent->idx] = ent;
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
memset(lay, 0, sizeof(*lay));

View file

@ -1549,8 +1549,9 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
}
trace_mlx5_fs_set_fte(fte, false);
/* Link newly added rules into the tree. */
for (i = 0; i < handle->num_rules; i++) {
if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
if (!handle->rule[i]->node.parent) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}

View file

@ -566,7 +566,7 @@ free_skb:
static const struct mlxsw_listener mlxsw_emad_rx_listener =
MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
EMAD, DISCARD);
EMAD, FORWARD);
static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
{

View file

@ -756,7 +756,9 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
rehash.dw.work);
int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
mutex_lock(&vregion->lock);
mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
mutex_unlock(&vregion->lock);
if (credits < 0)
/* Rehash gone out of credits so it was interrupted.
* Schedule the work as soon as possible to continue.
@ -766,6 +768,17 @@ static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
{
/* The entry markers are relative to the current chunk and therefore
* needs to be reset together with the chunk marker.
*/
ctx->current_vchunk = NULL;
ctx->start_ventry = NULL;
ctx->stop_ventry = NULL;
}
static void
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
{
@ -788,7 +801,7 @@ mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *v
* the current chunk pointer to make sure all chunks
* are properly migrated.
*/
vregion->rehash.ctx.current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
}
static struct mlxsw_sp_acl_tcam_vregion *
@ -861,10 +874,14 @@ mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
mutex_lock(&tcam->lock);
list_del(&vregion->tlist);
mutex_unlock(&tcam->lock);
cancel_delayed_work_sync(&vregion->rehash.dw);
if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
ctx->hints_priv)
ops->region_rehash_hints_put(ctx->hints_priv);
}
mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
if (vregion->region2)
@ -1228,8 +1245,14 @@ mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_ventry *ventry,
bool *activity)
{
return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp,
ventry->entry, activity);
struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
int err;
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
activity);
mutex_unlock(&vregion->lock);
return err;
}
static int
@ -1263,6 +1286,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
{
struct mlxsw_sp_acl_tcam_chunk *new_chunk;
WARN_ON(vchunk->chunk2);
new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
if (IS_ERR(new_chunk))
return PTR_ERR(new_chunk);
@ -1281,7 +1306,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
{
mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
vchunk->chunk2 = NULL;
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
}
static int
@ -1304,6 +1329,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
return 0;
}
if (list_empty(&vchunk->ventry_list))
goto out;
/* If the migration got interrupted, we have the ventry to start from
* stored in context.
*/
@ -1313,6 +1341,8 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
ventry = list_first_entry(&vchunk->ventry_list,
typeof(*ventry), list);
WARN_ON(ventry->vchunk != vchunk);
list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
/* During rollback, once we reach the ventry that failed
* to migrate, we are done.
@ -1353,6 +1383,7 @@ mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
}
}
out:
mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
return 0;
}
@ -1366,6 +1397,9 @@ mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_acl_tcam_vchunk *vchunk;
int err;
if (list_empty(&vregion->vchunk_list))
return 0;
/* If the migration got interrupted, we have the vchunk
* we are working on stored in context.
*/
@ -1394,16 +1428,17 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
int err, err2;
trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
mutex_lock(&vregion->lock);
err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
if (err) {
if (ctx->this_is_rollback)
return err;
/* In case migration was not successful, we need to swap
* so the original region pointer is assigned again
* to vregion->region.
*/
swap(vregion->region, vregion->region2);
ctx->current_vchunk = NULL;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
ctx->this_is_rollback = true;
err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
ctx, credits);
@ -1414,7 +1449,6 @@ mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
/* Let the rollback to be continued later on. */
}
}
mutex_unlock(&vregion->lock);
trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
return err;
}
@ -1463,6 +1497,7 @@ mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
ctx->hints_priv = hints_priv;
ctx->this_is_rollback = false;
mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
return 0;
@ -1515,7 +1550,8 @@ mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
ctx, credits);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
return;
}
if (*credits >= 0)

View file

@ -890,7 +890,7 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_inet_may_pull(skb))
if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);
@ -956,7 +956,7 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
__be16 sport;
int err;
if (!pskb_inet_may_pull(skb))
if (!skb_vlan_inet_prepare(skb))
return -EINVAL;
sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true);

View file

@ -706,11 +706,12 @@ out_hashtable:
static void gtp_dellink(struct net_device *dev, struct list_head *head)
{
struct gtp_dev *gtp = netdev_priv(dev);
struct hlist_node *next;
struct pdp_ctx *pctx;
int i;
for (i = 0; i < gtp->hash_size; i++)
hlist_for_each_entry_rcu(pctx, &gtp->tid_hash[i], hlist_tid)
hlist_for_each_entry_safe(pctx, next, &gtp->tid_hash[i], hlist_tid)
pdp_context_delete(pctx);
list_del_rcu(&gtp->list);

View file

@ -2208,14 +2208,16 @@ static ssize_t tun_put_user(struct tun_struct *tun,
tun_is_little_endian(tun), true,
vlan_hlen)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
if (net_ratelimit()) {
netdev_err(tun->dev, "unexpected GSO type: 0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, tun16_to_cpu(tun, gso.gso_size),
tun16_to_cpu(tun, gso.hdr_len));
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)tun16_to_cpu(tun, gso.hdr_len), 64), true);
}
WARN_ON_ONCE(1);
return -EINVAL;
}

View file

@ -1452,21 +1452,16 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
/* Skip IP alignment pseudo header */
skb_pull(skb, 2);
skb->truesize = SKB_TRUESIZE(pkt_len_plus_padd);
ax88179_rx_checksum(skb, pkt_hdr);
return 1;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
ax_skb = netdev_alloc_skb_ip_align(dev->net, pkt_len);
if (!ax_skb)
return 0;
skb_trim(ax_skb, pkt_len);
skb_put(ax_skb, pkt_len);
memcpy(ax_skb->data, skb->data + 2, pkt_len);
/* Skip IP alignment pseudo header */
skb_pull(ax_skb, 2);
skb->truesize = pkt_len_plus_padd +
SKB_DATA_ALIGN(sizeof(struct sk_buff));
ax88179_rx_checksum(ax_skb, pkt_hdr);
usbnet_skb_return(dev, ax_skb);

View file

@ -1605,6 +1605,10 @@ static bool vxlan_set_mac(struct vxlan_dev *vxlan,
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
return false;
/* Ignore packets from invalid src-address */
if (!is_valid_ether_addr(eth_hdr(skb)->h_source))
return false;
/* Get address from the outer IP header */
if (vxlan_get_sk_family(vs) == AF_INET) {
saddr.sin.sin_addr.s_addr = ip_hdr(skb)->saddr;

View file

@ -424,7 +424,8 @@ struct trf7970a {
enum trf7970a_state state;
struct device *dev;
struct spi_device *spi;
struct regulator *regulator;
struct regulator *vin_regulator;
struct regulator *vddio_regulator;
struct nfc_digital_dev *ddev;
u32 quirks;
bool is_initiator;
@ -1883,7 +1884,7 @@ static int trf7970a_power_up(struct trf7970a *trf)
if (trf->state != TRF7970A_ST_PWR_OFF)
return 0;
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret);
return ret;
@ -1926,7 +1927,7 @@ static int trf7970a_power_down(struct trf7970a *trf)
if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW))
gpiod_set_value_cansleep(trf->en2_gpiod, 0);
ret = regulator_disable(trf->regulator);
ret = regulator_disable(trf->vin_regulator);
if (ret)
dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__,
ret);
@ -2065,37 +2066,37 @@ static int trf7970a_probe(struct spi_device *spi)
mutex_init(&trf->lock);
INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler);
trf->regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->regulator)) {
ret = PTR_ERR(trf->regulator);
trf->vin_regulator = devm_regulator_get(&spi->dev, "vin");
if (IS_ERR(trf->vin_regulator)) {
ret = PTR_ERR(trf->vin_regulator);
dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret);
goto err_destroy_lock;
}
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vin_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VIN: %d\n", ret);
goto err_destroy_lock;
}
uvolts = regulator_get_voltage(trf->regulator);
uvolts = regulator_get_voltage(trf->vin_regulator);
if (uvolts > 4000000)
trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3;
trf->regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->regulator)) {
ret = PTR_ERR(trf->regulator);
trf->vddio_regulator = devm_regulator_get(&spi->dev, "vdd-io");
if (IS_ERR(trf->vddio_regulator)) {
ret = PTR_ERR(trf->vddio_regulator);
dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret);
goto err_destroy_lock;
goto err_disable_vin_regulator;
}
ret = regulator_enable(trf->regulator);
ret = regulator_enable(trf->vddio_regulator);
if (ret) {
dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret);
goto err_destroy_lock;
goto err_disable_vin_regulator;
}
if (regulator_get_voltage(trf->regulator) == 1800000) {
if (regulator_get_voltage(trf->vddio_regulator) == 1800000) {
trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW;
dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n");
}
@ -2108,7 +2109,7 @@ static int trf7970a_probe(struct spi_device *spi)
if (!trf->ddev) {
dev_err(trf->dev, "Can't allocate NFC digital device\n");
ret = -ENOMEM;
goto err_disable_regulator;
goto err_disable_vddio_regulator;
}
nfc_digital_set_parent_dev(trf->ddev, trf->dev);
@ -2137,8 +2138,10 @@ err_shutdown:
trf7970a_shutdown(trf);
err_free_ddev:
nfc_digital_free_device(trf->ddev);
err_disable_regulator:
regulator_disable(trf->regulator);
err_disable_vddio_regulator:
regulator_disable(trf->vddio_regulator);
err_disable_vin_regulator:
regulator_disable(trf->vin_regulator);
err_destroy_lock:
mutex_destroy(&trf->lock);
return ret;
@ -2157,7 +2160,8 @@ static int trf7970a_remove(struct spi_device *spi)
nfc_digital_unregister_device(trf->ddev);
nfc_digital_free_device(trf->ddev);
regulator_disable(trf->regulator);
regulator_disable(trf->vddio_regulator);
regulator_disable(trf->vin_regulator);
mutex_destroy(&trf->lock);

View file

@ -642,32 +642,21 @@ static int vmk80xx_find_usb_endpoints(struct comedi_device *dev)
struct vmk80xx_private *devpriv = dev->private;
struct usb_interface *intf = comedi_to_usb_interface(dev);
struct usb_host_interface *iface_desc = intf->cur_altsetting;
struct usb_endpoint_descriptor *ep_desc;
int i;
struct usb_endpoint_descriptor *ep_rx_desc, *ep_tx_desc;
int ret;
if (iface_desc->desc.bNumEndpoints != 2)
if (devpriv->model == VMK8061_MODEL)
ret = usb_find_common_endpoints(iface_desc, &ep_rx_desc,
&ep_tx_desc, NULL, NULL);
else
ret = usb_find_common_endpoints(iface_desc, NULL, NULL,
&ep_rx_desc, &ep_tx_desc);
if (ret)
return -ENODEV;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
ep_desc = &iface_desc->endpoint[i].desc;
if (usb_endpoint_is_int_in(ep_desc) ||
usb_endpoint_is_bulk_in(ep_desc)) {
if (!devpriv->ep_rx)
devpriv->ep_rx = ep_desc;
continue;
}
if (usb_endpoint_is_int_out(ep_desc) ||
usb_endpoint_is_bulk_out(ep_desc)) {
if (!devpriv->ep_tx)
devpriv->ep_tx = ep_desc;
continue;
}
}
if (!devpriv->ep_rx || !devpriv->ep_tx)
return -ENODEV;
devpriv->ep_rx = ep_rx_desc;
devpriv->ep_tx = ep_tx_desc;
if (!usb_endpoint_maxp(devpriv->ep_rx) || !usb_endpoint_maxp(devpriv->ep_tx))
return -EINVAL;

View file

@ -577,7 +577,7 @@ static u_long get_word(struct vc_data *vc)
}
attr_ch = get_char(vc, (u_short *)tmp_pos, &spk_attr);
buf[cnt++] = attr_ch;
while (tmpx < vc->vc_cols - 1) {
while (tmpx < vc->vc_cols - 1 && cnt < sizeof(buf) - 1) {
tmp_pos += 2;
tmpx++;
ch = get_char(vc, (u_short *)tmp_pos, &temp);

View file

@ -1126,11 +1126,13 @@ static void mxs_auart_set_ldisc(struct uart_port *port,
static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
{
u32 istat;
u32 istat, stat;
struct mxs_auart_port *s = context;
u32 mctrl_temp = s->mctrl_prev;
u32 stat = mxs_read(s, REG_STAT);
uart_port_lock(&s->port);
stat = mxs_read(s, REG_STAT);
istat = mxs_read(s, REG_INTR);
/* ack irq */
@ -1166,6 +1168,8 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context)
istat &= ~AUART_INTR_TXIS;
}
uart_port_unlock(&s->port);
return IRQ_HANDLED;
}

View file

@ -220,7 +220,6 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
{
struct tty_port *port;
unsigned char ch, r1, drop, flag;
int loops = 0;
/* Sanity check, make sure the old bug is no longer happening */
if (uap->port.state == NULL) {
@ -301,24 +300,11 @@ static bool pmz_receive_chars(struct uart_pmac_port *uap)
if (r1 & Rx_OVR)
tty_insert_flip_char(port, 0, TTY_OVERRUN);
next_char:
/* We can get stuck in an infinite loop getting char 0 when the
* line is in a wrong HW state, we break that here.
* When that happens, I disable the receive side of the driver.
* Note that what I've been experiencing is a real irq loop where
* I'm getting flooded regardless of the actual port speed.
* Something strange is going on with the HW
*/
if ((++loops) > 1000)
goto flood;
ch = read_zsreg(uap, R0);
if (!(ch & Rx_CH_AV))
break;
}
return true;
flood:
pmz_interrupt_control(uap, 0);
pmz_error("pmz: rx irq flood !\n");
return true;
}

View file

@ -471,7 +471,6 @@ out_free_mem:
static int service_outstanding_interrupt(struct wdm_device *desc)
{
int rv = 0;
int used;
/* submit read urb only if the device is waiting for it */
if (!desc->resp_count || !--desc->resp_count)
@ -486,10 +485,7 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
goto out;
}
used = test_and_set_bit(WDM_RESPONDING, &desc->flags);
if (used)
goto out;
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);

View file

@ -295,8 +295,10 @@ static void usb_port_shutdown(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
if (port_dev->child)
if (port_dev->child) {
usb_disable_usb2_hardware_lpm(port_dev->child);
usb_unlocked_disable_lpm(port_dev->child);
}
}
static const struct dev_pm_ops usb_port_pm_ops = {

View file

@ -897,13 +897,15 @@ static int dwc2_cmpl_host_isoc_dma_desc(struct dwc2_hsotg *hsotg,
struct dwc2_dma_desc *dma_desc;
struct dwc2_hcd_iso_packet_desc *frame_desc;
u16 frame_desc_idx;
struct urb *usb_urb = qtd->urb->priv;
struct urb *usb_urb;
u16 remain = 0;
int rc = 0;
if (!qtd->urb)
return -EINVAL;
usb_urb = qtd->urb->priv;
dma_sync_single_for_cpu(hsotg->dev, qh->desc_list_dma + (idx *
sizeof(struct dwc2_dma_desc)),
sizeof(struct dwc2_dma_desc),

View file

@ -255,6 +255,10 @@ static void option_instat_callback(struct urb *urb);
#define QUECTEL_PRODUCT_EM061K_LMS 0x0124
#define QUECTEL_PRODUCT_EC25 0x0125
#define QUECTEL_PRODUCT_EM060K_128 0x0128
#define QUECTEL_PRODUCT_EM060K_129 0x0129
#define QUECTEL_PRODUCT_EM060K_12a 0x012a
#define QUECTEL_PRODUCT_EM060K_12b 0x012b
#define QUECTEL_PRODUCT_EM060K_12c 0x012c
#define QUECTEL_PRODUCT_EG91 0x0191
#define QUECTEL_PRODUCT_EG95 0x0195
#define QUECTEL_PRODUCT_BG96 0x0296
@ -1218,6 +1222,18 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_129, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12a, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12b, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_12c, 0xff, 0xff, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) },
@ -1360,6 +1376,12 @@ static const struct usb_device_id option_ids[] = {
.driver_info = NCTRL(2) | RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1083, 0xff), /* Telit FE990 (ECM) */
.driver_info = NCTRL(0) | RSVD(1) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a0, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a4, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(3) },
{ USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x10a9, 0xff), /* Telit FN20C04 (rmnet) */
.driver_info = RSVD(0) | NCTRL(2) | RSVD(3) | RSVD(4) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
@ -2052,6 +2074,10 @@ static const struct usb_device_id option_ids[] = {
.driver_info = RSVD(3) },
{ USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9803, 0xff),
.driver_info = RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b05), /* Longsung U8300 */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, 0x9b3c), /* Longsung U9300 */
.driver_info = RSVD(0) | RSVD(4) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
{ USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@ -2272,15 +2298,29 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */
{ USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0115, 0xff), /* Fibocom FM135 (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a3, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a04, 0xff) }, /* Fibocom FM650-CN (ECM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a05, 0xff) }, /* Fibocom FM650-CN (NCM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a06, 0xff) }, /* Fibocom FM650-CN (RNDIS mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0a07, 0xff) }, /* Fibocom FM650-CN (MBIM mode) */
{ USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
{ USB_DEVICE(0x33f8, 0x0104), /* Rolling RW101-GL (laptop RMNET) */
.driver_info = RSVD(4) | RSVD(5) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a2, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a3, 0xff) }, /* Rolling RW101-GL (laptop MBIM) */
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x01a4, 0xff), /* Rolling RW101-GL (laptop MBIM) */
.driver_info = RSVD(4) },
{ USB_DEVICE_INTERFACE_CLASS(0x33f8, 0x0115, 0xff), /* Rolling RW135-GL (laptop MBIM) */
.driver_info = RSVD(5) },
{ USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x40) },

View file

@ -2525,9 +2525,19 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
r = vhost_get_avail_idx(vq, &avail_idx);
if (unlikely(r))
return false;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
return vq->avail_idx == vq->last_avail_idx;
vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
if (vq->avail_idx != vq->last_avail_idx) {
/* Since we have updated avail_idx, the following
* call to vhost_get_vq_desc() will read available
* ring entries. Make sure that read happens after
* the avail_idx read.
*/
smp_rmb();
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);

View file

@ -2291,20 +2291,14 @@ struct btrfs_data_container *init_data_container(u32 total_bytes)
size_t alloc_bytes;
alloc_bytes = max_t(size_t, total_bytes, sizeof(*data));
data = kvmalloc(alloc_bytes, GFP_KERNEL);
data = kvzalloc(alloc_bytes, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
if (total_bytes >= sizeof(*data)) {
if (total_bytes >= sizeof(*data))
data->bytes_left = total_bytes - sizeof(*data);
data->bytes_missing = 0;
} else {
else
data->bytes_missing = sizeof(*data) - total_bytes;
data->bytes_left = 0;
}
data->elem_cnt = 0;
data->elem_missed = 0;
return data;
}

View file

@ -1137,6 +1137,9 @@ __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans,
if (ret)
return ret;
ret = btrfs_record_root_in_trans(trans, node->root);
if (ret)
return ret;
ret = btrfs_update_delayed_inode(trans, node->root, path, node);
return ret;
}

View file

@ -4061,6 +4061,8 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
BTRFS_QGROUP_RSV_META_PREALLOC);
trace_qgroup_meta_convert(root, num_bytes);
qgroup_convert_meta(fs_info, root->root_key.objectid, num_bytes);
if (!sb_rdonly(fs_info->sb))
add_root_meta_rsv(root, num_bytes, BTRFS_QGROUP_RSV_META_PERTRANS);
}
/*

View file

@ -243,7 +243,7 @@ nilfs_filetype_table[NILFS_FT_MAX] = {
#define S_SHIFT 12
static unsigned char
nilfs_type_by_mode[S_IFMT >> S_SHIFT] = {
nilfs_type_by_mode[(S_IFMT >> S_SHIFT) + 1] = {
[S_IFREG >> S_SHIFT] = NILFS_FT_REG_FILE,
[S_IFDIR >> S_SHIFT] = NILFS_FT_DIR,
[S_IFCHR >> S_SHIFT] = NILFS_FT_CHRDEV,

View file

@ -431,6 +431,8 @@ struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
kn = kernfs_find_and_get(kobj->sd, attr->name);
if (kn)
kernfs_break_active_protection(kn);
else
kobject_put(kobj);
return kn;
}
EXPORT_SYMBOL_GPL(sysfs_break_active_protection);

View file

@ -531,6 +531,31 @@ static inline unsigned long compare_ether_header(const void *a, const void *b)
#endif
}
/**
* eth_skb_pkt_type - Assign packet type if destination address does not match
* @skb: Assigned a packet type if address does not match @dev address
* @dev: Network device used to compare packet address against
*
* If the destination MAC address of the packet does not match the network
* device address, assign an appropriate packet type.
*/
static inline void eth_skb_pkt_type(struct sk_buff *skb,
const struct net_device *dev)
{
const struct ethhdr *eth = eth_hdr(skb);
if (unlikely(!ether_addr_equal_64bits(eth->h_dest, dev->dev_addr))) {
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
skb->pkt_type = PACKET_OTHERHOST;
}
}
}
/**
* eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame
* @skb: Buffer to pad

View file

@ -255,6 +255,85 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
/**
* uart_port_lock - Lock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock(struct uart_port *up)
{
spin_lock(&up->lock);
}
/**
* uart_port_lock_irq - Lock the UART port and disable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_lock_irq(struct uart_port *up)
{
spin_lock_irq(&up->lock);
}
/**
* uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*/
static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
{
spin_lock_irqsave(&up->lock, *flags);
}
/**
* uart_port_trylock - Try to lock the UART port
* @up: Pointer to UART port structure
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock(struct uart_port *up)
{
return spin_trylock(&up->lock);
}
/**
* uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
* @up: Pointer to UART port structure
* @flags: Pointer to interrupt flags storage
*
* Returns: True if lock was acquired, false otherwise
*/
static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
{
return spin_trylock_irqsave(&up->lock, *flags);
}
/**
* uart_port_unlock - Unlock the UART port
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock(struct uart_port *up)
{
spin_unlock(&up->lock);
}
/**
* uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
* @up: Pointer to UART port structure
*/
static inline void uart_port_unlock_irq(struct uart_port *up)
{
spin_unlock_irq(&up->lock);
}
/**
* uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
* @up: Pointer to UART port structure
* @flags: The saved interrupt flags for restore
*/
static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
{
spin_unlock_irqrestore(&up->lock, flags);
}
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);

View file

@ -423,7 +423,7 @@ struct trace_event_file {
} \
early_initcall(trace_init_perf_perm_##name);
#define PERF_MAX_TRACE_SIZE 2048
#define PERF_MAX_TRACE_SIZE 8192
#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */

View file

@ -70,7 +70,11 @@ struct u64_stats_sync {
#if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
#define u64_stats_init(syncp) \
do { \
struct u64_stats_sync *__s = (syncp); \
seqcount_init(&__s->seq); \
} while (0)
#else
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{

View file

@ -451,6 +451,10 @@ static inline void in6_ifa_hold(struct inet6_ifaddr *ifp)
refcount_inc(&ifp->refcnt);
}
static inline bool in6_ifa_hold_safe(struct inet6_ifaddr *ifp)
{
return refcount_inc_not_zero(&ifp->refcnt);
}
/*
* compute link-local solicited-node multicast address

View file

@ -52,7 +52,7 @@ struct unix_sock {
struct mutex iolock, bindlock;
struct sock *peer;
struct list_head link;
atomic_long_t inflight;
unsigned long inflight;
spinlock_t lock;
unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0
@ -72,6 +72,9 @@ enum unix_socket_lock_class {
U_LOCK_NORMAL,
U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */
U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */
U_LOCK_GC_LISTENER, /* used for listening socket while determining gc
* candidates to close a small race window.
*/
};
static inline void unix_state_lock_nested(struct sock *sk,

View file

@ -329,6 +329,39 @@ static inline bool pskb_inet_may_pull(struct sk_buff *skb)
return pskb_network_may_pull(skb, nhlen);
}
/* Variant of pskb_inet_may_pull().
*/
static inline bool skb_vlan_inet_prepare(struct sk_buff *skb)
{
int nhlen = 0, maclen = ETH_HLEN;
__be16 type = skb->protocol;
/* Essentially this is skb_protocol(skb, true)
* And we get MAC len.
*/
if (eth_type_vlan(type))
type = __vlan_get_protocol(skb, type, &maclen);
switch (type) {
#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6):
nhlen = sizeof(struct ipv6hdr);
break;
#endif
case htons(ETH_P_IP):
nhlen = sizeof(struct iphdr);
break;
}
/* For ETH_P_IPV6/ETH_P_IP we make sure to pull
* a base network header in skb->head.
*/
if (!pskb_may_pull(skb, maclen + nhlen))
return false;
skb_set_network_header(skb, maclen);
return true;
}
static inline int ip_encap_hlen(struct ip_tunnel_encap *e)
{
const struct ip_tunnel_encap_ops *ops;

View file

@ -19,7 +19,7 @@ int main(void)
DEFINE(NR_PAGEFLAGS, __NR_PAGEFLAGS);
DEFINE(MAX_NR_ZONES, __MAX_NR_ZONES);
#ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, bits_per(CONFIG_NR_CPUS));
DEFINE(NR_CPUS_BITS, order_base_2(CONFIG_NR_CPUS));
#endif
DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */

View file

@ -1593,10 +1593,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
jump_label_lock();
preempt_disable();
/* Ensure it is not in reserved area nor out of text */
if (!(core_kernel_text((unsigned long) p->addr) ||
is_module_text_address((unsigned long) p->addr)) ||
in_gate_area_no_mm((unsigned long) p->addr) ||
/* Ensure the address is in a text area, and find a module if exists. */
*probed_mod = NULL;
if (!core_kernel_text((unsigned long) p->addr)) {
*probed_mod = __module_text_address((unsigned long) p->addr);
if (!(*probed_mod)) {
ret = -EINVAL;
goto out;
}
}
/* Ensure it is not in reserved area. */
if (in_gate_area_no_mm((unsigned long) p->addr) ||
within_kprobe_blacklist((unsigned long) p->addr) ||
jump_label_text_reserved(p->addr, p->addr) ||
find_bug((unsigned long)p->addr)) {
@ -1604,8 +1611,7 @@ static int check_kprobe_address_safe(struct kprobe *p,
goto out;
}
/* Check if are we probing a module */
*probed_mod = __module_text_address((unsigned long) p->addr);
/* Get module refcount and reject __init functions for loaded modules. */
if (*probed_mod) {
/*
* We must hold a refcount of the probed module while updating

View file

@ -400,7 +400,8 @@ void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
"perf buffer not large enough"))
"perf buffer not large enough, wanted %d, have %d",
size, PERF_MAX_TRACE_SIZE))
return NULL;
*rctxp = rctx = perf_swevent_get_recursion_context();

View file

@ -1140,10 +1140,8 @@ register_snapshot_trigger(char *glob, struct event_trigger_ops *ops,
struct event_trigger_data *data,
struct trace_event_file *file)
{
int ret = tracing_alloc_snapshot_instance(file->tr);
if (ret < 0)
return ret;
if (tracing_alloc_snapshot_instance(file->tr) != 0)
return 0;
return register_trigger(glob, ops, data, file);
}

View file

@ -259,10 +259,10 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
* contexts and I/O.
* contexts, I/O, nolockdep.
*/
alloc_flags &= ~GFP_ZONEMASK;
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL);
alloc_flags &= (GFP_ATOMIC | GFP_KERNEL | __GFP_NOLOCKDEP);
alloc_flags |= __GFP_NOWARN;
page = alloc_pages(alloc_flags, STACK_ALLOC_ORDER);
if (page)

View file

@ -4190,7 +4190,7 @@ void batadv_tt_local_resize_to_mtu(struct net_device *soft_iface)
spin_lock_bh(&bat_priv->tt.commit_lock);
while (true) {
while (timeout) {
table_size = batadv_tt_local_table_transmit_size(bat_priv);
if (packet_size_max >= table_size)
break;

View file

@ -107,8 +107,10 @@ static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
if (hdev->req_status == HCI_REQ_PEND) {
hdev->req_result = result;
hdev->req_status = HCI_REQ_DONE;
if (skb)
if (skb) {
kfree_skb(hdev->req_skb);
hdev->req_skb = skb_get(skb);
}
wake_up_interruptible(&hdev->req_wait_q);
}
}

View file

@ -405,7 +405,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
struct l2cap_options opts;
struct l2cap_conninfo cinfo;
int len, err = 0;
int err = 0;
size_t len;
u32 opt;
BT_DBG("sk %p", sk);
@ -450,7 +451,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
opts.max_tx = chan->max_tx;
opts.txwin_size = chan->tx_win;
len = min_t(unsigned int, len, sizeof(opts));
len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *) &opts, len))
err = -EFAULT;
@ -500,7 +501,7 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = chan->conn->hcon->handle;
memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *) &cinfo, len))
err = -EFAULT;

View file

@ -880,7 +880,8 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
struct sock *sk = sock->sk;
struct sco_options opts;
struct sco_conninfo cinfo;
int len, err = 0;
int err = 0;
size_t len;
BT_DBG("sk %p", sk);
@ -902,7 +903,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
BT_DBG("mtu %d", opts.mtu);
len = min_t(unsigned int, len, sizeof(opts));
len = min(len, sizeof(opts));
if (copy_to_user(optval, (char *)&opts, len))
err = -EFAULT;
@ -920,7 +921,7 @@ static int sco_sock_getsockopt_old(struct socket *sock, int optname,
cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle;
memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3);
len = min_t(unsigned int, len, sizeof(cinfo));
len = min(len, sizeof(cinfo));
if (copy_to_user(optval, (char *)&cinfo, len))
err = -EFAULT;

View file

@ -164,17 +164,7 @@ __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
eth = (struct ethhdr *)skb->data;
skb_pull_inline(skb, ETH_HLEN);
if (unlikely(!ether_addr_equal_64bits(eth->h_dest,
dev->dev_addr))) {
if (unlikely(is_multicast_ether_addr_64bits(eth->h_dest))) {
if (ether_addr_equal_64bits(eth->h_dest, dev->broadcast))
skb->pkt_type = PACKET_BROADCAST;
else
skb->pkt_type = PACKET_MULTICAST;
} else {
skb->pkt_type = PACKET_OTHERHOST;
}
}
eth_skb_pkt_type(skb, dev);
/*
* Some variants of DSA tagging don't have an ethertype field

View file

@ -254,12 +254,12 @@ void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm)
}
EXPORT_SYMBOL_GPL(__inet_twsk_schedule);
/* Remove all non full sockets (TIME_WAIT and NEW_SYN_RECV) for dead netns */
void inet_twsk_purge(struct inet_hashinfo *hashinfo, int family)
{
struct inet_timewait_sock *tw;
struct sock *sk;
struct hlist_nulls_node *node;
unsigned int slot;
struct sock *sk;
for (slot = 0; slot <= hashinfo->ehash_mask; slot++) {
struct inet_ehash_bucket *head = &hashinfo->ehash[slot];
@ -268,25 +268,35 @@ restart_rcu:
rcu_read_lock();
restart:
sk_nulls_for_each_rcu(sk, node, &head->chain) {
if (sk->sk_state != TCP_TIME_WAIT)
continue;
tw = inet_twsk(sk);
if ((tw->tw_family != family) ||
refcount_read(&twsk_net(tw)->count))
int state = inet_sk_state_load(sk);
if ((1 << state) & ~(TCPF_TIME_WAIT |
TCPF_NEW_SYN_RECV))
continue;
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
if (sk->sk_family != family ||
refcount_read(&sock_net(sk)->count))
continue;
if (unlikely((tw->tw_family != family) ||
refcount_read(&twsk_net(tw)->count))) {
inet_twsk_put(tw);
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
continue;
if (unlikely(sk->sk_family != family ||
refcount_read(&sock_net(sk)->count))) {
sock_gen_put(sk);
goto restart;
}
rcu_read_unlock();
local_bh_disable();
inet_twsk_deschedule_put(tw);
if (state == TCP_TIME_WAIT) {
inet_twsk_deschedule_put(inet_twsk(sk));
} else {
struct request_sock *req = inet_reqsk(sk);
inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
req);
}
local_bh_enable();
goto restart_rcu;
}

View file

@ -945,13 +945,11 @@ void ip_rt_send_redirect(struct sk_buff *skb)
icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
peer->rate_last = jiffies;
++peer->n_redirects;
#ifdef CONFIG_IP_ROUTE_VERBOSE
if (log_martians &&
if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
peer->n_redirects == ip_rt_redirect_number)
net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
&ip_hdr(skb)->saddr, inet_iif(skb),
&ip_hdr(skb)->daddr, &gw);
#endif
}
out_put_peer:
inet_putpeer(peer);

View file

@ -1054,16 +1054,17 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (msg->msg_controllen) {
err = udp_cmsg_send(sk, msg, &ipc.gso_size);
if (err > 0)
if (err > 0) {
err = ip_cmsg_send(sk, msg, &ipc,
sk->sk_family == AF_INET6);
connected = 0;
}
if (unlikely(err < 0)) {
kfree(ipc.opt);
return err;
}
if (ipc.opt)
free = 1;
connected = 0;
}
if (!ipc.opt) {
struct ip_options_rcu *inet_opt;

View file

@ -2028,9 +2028,10 @@ struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *add
if (ipv6_addr_equal(&ifp->addr, addr)) {
if (!dev || ifp->idev->dev == dev ||
!(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
result = ifp;
in6_ifa_hold(ifp);
break;
if (in6_ifa_hold_safe(ifp)) {
result = ifp;
break;
}
}
}
}

View file

@ -1307,7 +1307,10 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
struct nl_info *info, struct netlink_ext_ack *extack)
{
struct fib6_table *table = rt->fib6_table;
struct fib6_node *fn, *pn = NULL;
struct fib6_node *fn;
#ifdef CONFIG_IPV6_SUBTREES
struct fib6_node *pn = NULL;
#endif
int err = -ENOMEM;
int allow_create = 1;
int replace_required = 0;
@ -1331,9 +1334,9 @@ int fib6_add(struct fib6_node *root, struct fib6_info *rt,
goto out;
}
#ifdef CONFIG_IPV6_SUBTREES
pn = fn;
#ifdef CONFIG_IPV6_SUBTREES
if (rt->fib6_src.plen) {
struct fib6_node *sn;

View file

@ -1387,9 +1387,11 @@ do_udp_sendmsg:
ipc6.opt = opt;
err = udp_cmsg_send(sk, msg, &ipc6.gso_size);
if (err > 0)
if (err > 0) {
err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6,
&ipc6);
connected = false;
}
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
@ -1401,7 +1403,6 @@ do_udp_sendmsg:
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
connected = false;
}
if (!opt) {
opt = txopt_get(np);

View file

@ -126,7 +126,8 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
if (sctph->source != cp->vport || payload_csum ||
skb->ip_summed == CHECKSUM_PARTIAL) {
sctph->source = cp->vport;
sctp_nat_csum(skb, sctph, sctphoff);
if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
@ -174,7 +175,8 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp,
(skb->ip_summed == CHECKSUM_PARTIAL &&
!(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) {
sctph->dest = cp->dport;
sctp_nat_csum(skb, sctph, sctphoff);
if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb))
sctp_nat_csum(skb, sctph, sctphoff);
} else if (skb->ip_summed != CHECKSUM_PARTIAL) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
}

View file

@ -2239,7 +2239,7 @@ static const struct nft_expr_type *__nft_expr_type_get(u8 family,
{
const struct nft_expr_type *type, *candidate = NULL;
list_for_each_entry(type, &nf_tables_expressions, list) {
list_for_each_entry_rcu(type, &nf_tables_expressions, list) {
if (!nla_strcmp(nla, type->name)) {
if (!type->family && !candidate)
candidate = type;
@ -2271,9 +2271,13 @@ static const struct nft_expr_type *nft_expr_type_get(struct net *net,
if (nla == NULL)
return ERR_PTR(-EINVAL);
rcu_read_lock();
type = __nft_expr_type_get(family, nla);
if (type != NULL && try_module_get(type->owner))
if (type != NULL && try_module_get(type->owner)) {
rcu_read_unlock();
return type;
}
rcu_read_unlock();
lockdep_nfnl_nft_mutex_not_held();
#ifdef CONFIG_MODULES

View file

@ -1687,8 +1687,9 @@ int ovs_ct_copy_action(struct net *net, const struct nlattr *attr,
if (ct_info.timeout[0]) {
if (nf_ct_set_timeout(net, ct_info.ct, family, key->ip.proto,
ct_info.timeout))
pr_info_ratelimited("Failed to associated timeout "
"policy `%s'\n", ct_info.timeout);
OVS_NLERR(log,
"Failed to associated timeout policy '%s'",
ct_info.timeout);
else
ct_info.nf_ct_timeout = rcu_dereference(
nf_ct_timeout_find(ct_info.ct)->timeout);
@ -1896,9 +1897,9 @@ static void ovs_ct_limit_exit(struct net *net, struct ovs_net *ovs_net)
for (i = 0; i < CT_LIMIT_HASH_BUCKETS; ++i) {
struct hlist_head *head = &info->limits[i];
struct ovs_ct_limit *ct_limit;
struct hlist_node *next;
hlist_for_each_entry_rcu(ct_limit, head, hlist_node,
lockdep_ovsl_is_held())
hlist_for_each_entry_safe(ct_limit, next, head, hlist_node)
kfree_rcu(ct_limit, rcu);
}
kfree(ovs_net->ct_limit_info->limits);

View file

@ -809,11 +809,11 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern)
sk->sk_write_space = unix_write_space;
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk);
u = unix_sk(sk);
u->inflight = 0;
u->path.dentry = NULL;
u->path.mnt = NULL;
spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */

View file

@ -166,17 +166,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk)
{
atomic_long_dec(&usk->inflight);
usk->inflight--;
}
static void inc_inflight(struct unix_sock *usk)
{
atomic_long_inc(&usk->inflight);
usk->inflight++;
}
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
u->inflight++;
/* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
@ -234,20 +235,34 @@ void unix_gc(void)
* receive queues. Other, non candidate sockets _can_ be
* added to queue, so we must make sure only to touch
* candidates.
*
* Embryos, though never candidates themselves, affect which
* candidates are reachable by the garbage collector. Before
* being added to a listener's queue, an embryo may already
* receive data carrying SCM_RIGHTS, potentially making the
* passed socket a candidate that is not yet reachable by the
* collector. It becomes reachable once the embryo is
* enqueued. Therefore, we must ensure that no SCM-laden
* embryo appears in a (candidate) listener's queue between
* consecutive scan_children() calls.
*/
list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
struct sock *sk = &u->sk;
long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
total_refs = file_count(sk->sk_socket->file);
BUG_ON(inflight_refs < 1);
BUG_ON(total_refs < inflight_refs);
if (total_refs == inflight_refs) {
BUG_ON(!u->inflight);
BUG_ON(total_refs < u->inflight);
if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
if (sk->sk_state == TCP_LISTEN) {
unix_state_lock_nested(sk, U_LOCK_GC_LISTENER);
unix_state_unlock(sk);
}
}
}
@ -271,7 +286,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */
list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) {
if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL);

View file

@ -51,12 +51,13 @@ void unix_inflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
if (atomic_long_inc_return(&u->inflight) == 1) {
if (!u->inflight) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
} else {
BUG_ON(list_empty(&u->link));
}
u->inflight++;
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
}
@ -73,10 +74,11 @@ void unix_notinflight(struct user_struct *user, struct file *fp)
if (s) {
struct unix_sock *u = unix_sk(s);
BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(!u->inflight);
BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight))
u->inflight--;
if (!u->inflight)
list_del_init(&u->link);
/* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);

View file

@ -22,7 +22,7 @@ echo 'sched:*' > set_event
yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@ -33,7 +33,7 @@ echo 1 > events/sched/enable
yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -lt 3 ]; then
fail "at least fork, exec and exit events should be recorded"
fi
@ -44,7 +44,7 @@ echo 0 > events/sched/enable
yield
count=`cat trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
count=`head -n 100 trace | grep -v ^# | awk '{ print $5 }' | sort -u | wc -l`
if [ $count -ne 0 ]; then
fail "any of scheduler events should not be recorded"
fi

View file

@ -66,7 +66,7 @@ static int check_diff(struct timeval start, struct timeval end)
diff = end.tv_usec - start.tv_usec;
diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
if (abs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff);
return -1;
}

View file

@ -195,7 +195,9 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU)
return 0;
/* setup delayed work */
/* Arch specific code should not do async PF in this case */
if (unlikely(kvm_is_error_hva(hva)))
return 0;
/*
* do alloc nowait since if we are going to sleep anyway we
@ -213,24 +215,15 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
work->mm = current->mm;
mmget(work->mm);
/* this can't really happen otherwise gfn_to_pfn_async
would succeed */
if (unlikely(kvm_is_error_hva(work->addr)))
goto retry_sync;
INIT_WORK(&work->work, async_pf_execute);
if (!schedule_work(&work->work))
goto retry_sync;
list_add_tail(&work->queue, &vcpu->async_pf.queue);
vcpu->async_pf.queued++;
kvm_arch_async_page_not_present(vcpu, work);
schedule_work(&work->work);
return 1;
retry_sync:
kvm_put_kvm(work->vcpu->kvm);
mmput(work->mm);
kmem_cache_free(async_pf_cache, work);
return 0;
}
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu)