Merge https://github.com/LineageOS/android_kernel_qcom_sm8350 into lineage-22.1
Change-Id: I1458bcb6fad952cf220e2ea27c853b15ff550b0d
This commit is contained in:
commit
a7d46f236b
480 changed files with 58492 additions and 52666 deletions
|
|
@ -125,6 +125,17 @@ Description:
|
|||
will be present in sysfs. Writing 1 to this file
|
||||
will perform reset.
|
||||
|
||||
What: /sys/bus/pci/devices/.../reset_subordinate
|
||||
Date: October 2024
|
||||
Contact: linux-pci@vger.kernel.org
|
||||
Description:
|
||||
This is visible only for bridge devices. If you want to reset
|
||||
all devices attached through the subordinate bus of a specific
|
||||
bridge device, writing 1 to this will try to do it. This will
|
||||
affect all devices attached to the system through this bridge
|
||||
similiar to writing 1 to their individual "reset" file, so use
|
||||
with caution.
|
||||
|
||||
What: /sys/bus/pci/devices/.../vpd
|
||||
Date: February 2008
|
||||
Contact: Ben Hutchings <bwh@kernel.org>
|
||||
|
|
|
|||
|
|
@ -902,12 +902,17 @@ The default value is 60.
|
|||
unprivileged_userfaultfd
|
||||
========================
|
||||
|
||||
This flag controls whether unprivileged users can use the userfaultfd
|
||||
system calls. Set this to 1 to allow unprivileged users to use the
|
||||
userfaultfd system calls, or set this to 0 to restrict userfaultfd to only
|
||||
privileged users (with SYS_CAP_PTRACE capability).
|
||||
This flag controls the mode in which unprivileged users can use the
|
||||
userfaultfd system calls. Set this to 0 to restrict unprivileged users
|
||||
to handle page faults in user mode only. In this case, users without
|
||||
SYS_CAP_PTRACE must pass UFFD_USER_MODE_ONLY in order for userfaultfd to
|
||||
succeed. Prohibiting use of userfaultfd for handling faults from kernel
|
||||
mode may make certain vulnerabilities more difficult to exploit.
|
||||
|
||||
The default value is 1.
|
||||
Set this to 1 to allow unprivileged users to use the userfaultfd system
|
||||
calls without any restrictions.
|
||||
|
||||
The default value is 0.
|
||||
|
||||
|
||||
user_reserve_kbytes
|
||||
|
|
|
|||
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 286
|
||||
SUBLEVEL = 289
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +1 @@
|
|||
LTS_5.4.274_564901bd7f5d
|
||||
LTS_5.4.281_d62984adb112
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
|
@ -2604,6 +2604,7 @@
|
|||
usb_disabled
|
||||
usb_driver_claim_interface
|
||||
usb_driver_release_interface
|
||||
usb_driver_set_configuration
|
||||
usb_enable_autosuspend
|
||||
usb_enable_intel_xhci_ports
|
||||
usb_ep_alloc_request
|
||||
|
|
|
|||
6
android/abi_gki_aarch64_trimble
Normal file
6
android/abi_gki_aarch64_trimble
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
[abi_symbol_list]
|
||||
# required by cdc_mbim.ko
|
||||
in6_dev_finish_destroy
|
||||
# required by qmi_wwan.ko
|
||||
netdev_stats_to_stats64
|
||||
netdev_upper_get_next_dev_rcu
|
||||
|
|
@ -697,6 +697,13 @@ config HAVE_IRQ_TIME_ACCOUNTING
|
|||
Archs need to ensure they use a high enough resolution clock to
|
||||
support irq time accounting and then call enable_sched_clock_irqtime().
|
||||
|
||||
config HAVE_MOVE_PUD
|
||||
bool
|
||||
help
|
||||
Architectures that select this are able to move page tables at the
|
||||
PUD level. If there are only 3 page table levels, the move effectively
|
||||
happens at the PGD level.
|
||||
|
||||
config HAVE_MOVE_PMD
|
||||
bool
|
||||
help
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@
|
|||
KBUILD_DEFCONFIG := nsim_hs_defconfig
|
||||
|
||||
ifeq ($(CROSS_COMPILE),)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux-)
|
||||
CROSS_COMPILE := $(call cc-cross-prefix, arc-linux- arceb-linux- arc-linux-gnu-)
|
||||
endif
|
||||
|
||||
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
|
||||
|
|
|
|||
|
|
@ -280,8 +280,8 @@
|
|||
|
||||
reg_dcdc5: dcdc5 {
|
||||
regulator-always-on;
|
||||
regulator-min-microvolt = <1425000>;
|
||||
regulator-max-microvolt = <1575000>;
|
||||
regulator-min-microvolt = <1450000>;
|
||||
regulator-max-microvolt = <1550000>;
|
||||
regulator-name = "vcc-dram";
|
||||
};
|
||||
|
||||
|
|
|
|||
|
|
@ -115,6 +115,8 @@ config ARM64
|
|||
select GENERIC_GETTIMEOFDAY
|
||||
select HANDLE_DOMAIN_IRQ
|
||||
select HARDIRQS_SW_RESEND
|
||||
select HAVE_MOVE_PMD
|
||||
select HAVE_MOVE_PUD
|
||||
select HAVE_PCI
|
||||
select HAVE_ACPI_APEI if (ACPI && EFI)
|
||||
select HAVE_ALIGNED_STRUCT_PAGE if SLUB
|
||||
|
|
@ -1743,7 +1745,7 @@ config RANDOMIZE_MODULE_REGION_FULL
|
|||
core kernel, so branch relocations are always in range.
|
||||
|
||||
config CC_HAVE_STACKPROTECTOR_SYSREG
|
||||
def_bool $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
|
||||
def_bool CC_IS_GCC && $(cc-option,-mstack-protector-guard=sysreg -mstack-protector-guard-reg=sp_el0 -mstack-protector-guard-offset=0)
|
||||
|
||||
config STACKPROTECTOR_PER_TASK
|
||||
def_bool y
|
||||
|
|
|
|||
67
arch/arm64/boot/dts/vendor/bindings/clock/adi,axi-clkgen.yaml
vendored
Normal file
67
arch/arm64/boot/dts/vendor/bindings/clock/adi,axi-clkgen.yaml
vendored
Normal file
|
|
@ -0,0 +1,67 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/clock/adi,axi-clkgen.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: Binding for Analog Devices AXI clkgen pcore clock generator
|
||||
|
||||
maintainers:
|
||||
- Lars-Peter Clausen <lars@metafoo.de>
|
||||
- Michael Hennerich <michael.hennerich@analog.com>
|
||||
|
||||
description: |
|
||||
The axi_clkgen IP core is a software programmable clock generator,
|
||||
that can be synthesized on various FPGA platforms.
|
||||
|
||||
Link: https://wiki.analog.com/resources/fpga/docs/axi_clkgen
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- adi,axi-clkgen-2.00.a
|
||||
|
||||
clocks:
|
||||
description:
|
||||
Specifies the reference clock(s) from which the output frequency is
|
||||
derived. This must either reference one clock if only the first clock
|
||||
input is connected or two if both clock inputs are connected. The last
|
||||
clock is the AXI bus clock that needs to be enabled so we can access the
|
||||
core registers.
|
||||
minItems: 2
|
||||
maxItems: 3
|
||||
|
||||
clock-names:
|
||||
oneOf:
|
||||
- items:
|
||||
- const: clkin1
|
||||
- const: s_axi_aclk
|
||||
- items:
|
||||
- const: clkin1
|
||||
- const: clkin2
|
||||
- const: s_axi_aclk
|
||||
|
||||
'#clock-cells':
|
||||
const: 0
|
||||
|
||||
reg:
|
||||
maxItems: 1
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- reg
|
||||
- clocks
|
||||
- clock-names
|
||||
- '#clock-cells'
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
- |
|
||||
clock-controller@ff000000 {
|
||||
compatible = "adi,axi-clkgen-2.00.a";
|
||||
#clock-cells = <0>;
|
||||
reg = <0xff000000 0x1000>;
|
||||
clocks = <&osc 1>, <&clkc 15>;
|
||||
clock-names = "clkin1", "s_axi_aclk";
|
||||
};
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
Binding for the axi-clkgen clock generator
|
||||
|
||||
This binding uses the common clock binding[1].
|
||||
|
||||
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
|
||||
|
||||
Required properties:
|
||||
- compatible : shall be "adi,axi-clkgen-1.00.a" or "adi,axi-clkgen-2.00.a".
|
||||
- #clock-cells : from common clock binding; Should always be set to 0.
|
||||
- reg : Address and length of the axi-clkgen register set.
|
||||
- clocks : Phandle and clock specifier for the parent clock(s). This must
|
||||
either reference one clock if only the first clock input is connected or two
|
||||
if both clock inputs are connected. For the later case the clock connected
|
||||
to the first input must be specified first.
|
||||
|
||||
Optional properties:
|
||||
- clock-output-names : From common clock binding.
|
||||
|
||||
Example:
|
||||
clock@ff000000 {
|
||||
compatible = "adi,axi-clkgen";
|
||||
#clock-cells = <0>;
|
||||
reg = <0xff000000 0x1000>;
|
||||
clocks = <&osc 1>;
|
||||
};
|
||||
|
|
@ -621,6 +621,8 @@ patternProperties:
|
|||
description: National Semiconductor
|
||||
"^nec,.*":
|
||||
description: NEC LCD Technologies, Ltd.
|
||||
"^neofidelity,.*":
|
||||
description: Neofidelity Inc.
|
||||
"^neonode,.*":
|
||||
description: Neonode Inc.
|
||||
"^netgear,.*":
|
||||
|
|
|
|||
|
|
@ -37,6 +37,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
# CONFIG_RSEQ is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
|
@ -110,6 +111,7 @@ CONFIG_PACKET=y
|
|||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM_USER=y
|
||||
CONFIG_XFRM_INTERFACE=y
|
||||
CONFIG_XFRM_MIGRATE=y
|
||||
CONFIG_XFRM_STATISTICS=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
|
|
@ -211,9 +213,12 @@ CONFIG_NET_SCH_HTB=y
|
|||
CONFIG_NET_SCH_INGRESS=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_CLS_BPF=y
|
||||
CONFIG_NET_CLS_MATCHALL=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_POLICE=y
|
||||
CONFIG_NET_ACT_BPF=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BT=y
|
||||
CONFIG_BT_RFCOMM=y
|
||||
|
|
@ -279,12 +284,14 @@ CONFIG_PPP_DEFLATE=y
|
|||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8150=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_CDC_NCM is not set
|
||||
CONFIG_USB_NET_CDC_EEM=y
|
||||
# CONFIG_USB_NET_NET1080 is not set
|
||||
# CONFIG_USB_NET_CDC_SUBSET is not set
|
||||
# CONFIG_USB_NET_ZAURUS is not set
|
||||
CONFIG_USB_NET_AQC111=y
|
||||
# CONFIG_WLAN_VENDOR_ADMTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_ATH is not set
|
||||
# CONFIG_WLAN_VENDOR_ATMEL is not set
|
||||
|
|
@ -560,7 +567,9 @@ CONFIG_STATIC_USERMODEHELPER=y
|
|||
CONFIG_STATIC_USERMODEHELPER_PATH=""
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=y
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_BLAKE2B=y
|
||||
CONFIG_CRYPTO_LZ4=y
|
||||
CONFIG_CRYPTO_ZSTD=y
|
||||
|
|
|
|||
|
|
@ -216,7 +216,14 @@ CONFIG_HID_WIIMOTE=y
|
|||
CONFIG_USB_DUMMY_HCD=y
|
||||
CONFIG_USB_CONFIGFS_ACM=y
|
||||
# CONFIG_USB_CONFIGFS_RNDIS is not set
|
||||
CONFIG_USB_CONFIGFS_F_UVC=y
|
||||
CONFIG_USB_CONFIGFS_F_HID=y
|
||||
CONFIG_USB_LIBCOMPOSITE=y
|
||||
CONFIG_VIDEOBUF2_VMALLOC=y
|
||||
CONFIG_USB_F_UVC=y
|
||||
CONFIG_USB_G_WEBCAM=m
|
||||
CONFIG_USB_CONFIGFS=y
|
||||
CONFIG_USB_CONFIGFS_F_UVC=y
|
||||
CONFIG_EXT4_FS_POSIX_ACL=y
|
||||
CONFIG_EROFS_FS=y
|
||||
# CONFIG_SERIAL_MSM_GENI_EARLY_CONSOLE is not set
|
||||
|
|
|
|||
|
|
@ -414,6 +414,7 @@ static inline pmd_t pmd_mkdevmap(pmd_t pmd)
|
|||
#define pfn_pud(pfn,prot) __pud(__phys_to_pud_val((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
|
||||
#define set_pmd_at(mm, addr, pmdp, pmd) set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
|
||||
#define set_pud_at(mm, addr, pudp, pud) set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud))
|
||||
|
||||
#define __pgd_to_phys(pgd) __pte_to_phys(pgd_pte(pgd))
|
||||
#define __phys_to_pgd_val(phys) __phys_to_pte_val(phys)
|
||||
|
|
|
|||
|
|
@ -443,7 +443,7 @@ static void tls_thread_switch(struct task_struct *next)
|
|||
|
||||
if (is_compat_thread(task_thread_info(next)))
|
||||
write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
|
||||
else if (!arm64_kernel_unmapped_at_el0())
|
||||
else
|
||||
write_sysreg(0, tpidrro_el0);
|
||||
|
||||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
|
|
|
|||
|
|
@ -92,7 +92,7 @@ static struct platform_device mcf_uart = {
|
|||
.dev.platform_data = mcf_uart_platform_data,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_FEC)
|
||||
#ifdef MCFFEC_BASE0
|
||||
|
||||
#ifdef CONFIG_M5441x
|
||||
#define FEC_NAME "enet-fec"
|
||||
|
|
@ -144,6 +144,7 @@ static struct platform_device mcf_fec0 = {
|
|||
.platform_data = FEC_PDATA,
|
||||
}
|
||||
};
|
||||
#endif /* MCFFEC_BASE0 */
|
||||
|
||||
#ifdef MCFFEC_BASE1
|
||||
static struct resource mcf_fec1_resources[] = {
|
||||
|
|
@ -181,7 +182,6 @@ static struct platform_device mcf_fec1 = {
|
|||
}
|
||||
};
|
||||
#endif /* MCFFEC_BASE1 */
|
||||
#endif /* CONFIG_FEC */
|
||||
|
||||
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||
/*
|
||||
|
|
@ -556,12 +556,12 @@ static struct platform_device mcf_edma = {
|
|||
|
||||
static struct platform_device *mcf_devices[] __initdata = {
|
||||
&mcf_uart,
|
||||
#if IS_ENABLED(CONFIG_FEC)
|
||||
#ifdef MCFFEC_BASE0
|
||||
&mcf_fec0,
|
||||
#endif
|
||||
#ifdef MCFFEC_BASE1
|
||||
&mcf_fec1,
|
||||
#endif
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_SPI_COLDFIRE_QSPI)
|
||||
&mcf_qspi,
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ static inline void gpio_free(unsigned gpio)
|
|||
* read-modify-write as well as those controlled by the EPORT and GPIO modules.
|
||||
*/
|
||||
#define MCFGPIO_SCR_START 40
|
||||
#elif defined(CONFIGM5441x)
|
||||
#elif defined(CONFIG_M5441x)
|
||||
/* The m5441x EPORT doesn't have its own GPIO port, uses PORT C */
|
||||
#define MCFGPIO_SCR_START 0
|
||||
#else
|
||||
|
|
|
|||
|
|
@ -93,8 +93,8 @@ struct pcc_regs {
|
|||
#define M147_SCC_B_ADDR 0xfffe3000
|
||||
#define M147_SCC_PCLK 5000000
|
||||
|
||||
#define MVME147_IRQ_SCSI_PORT (IRQ_USER+0x45)
|
||||
#define MVME147_IRQ_SCSI_DMA (IRQ_USER+0x46)
|
||||
#define MVME147_IRQ_SCSI_PORT (IRQ_USER + 5)
|
||||
#define MVME147_IRQ_SCSI_DMA (IRQ_USER + 6)
|
||||
|
||||
/* SCC interrupts, for MVME147 */
|
||||
|
||||
|
|
|
|||
|
|
@ -12,8 +12,9 @@
|
|||
#include <linux/string.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
extern void mvme16x_cons_write(struct console *co,
|
||||
const char *str, unsigned count);
|
||||
|
||||
#include "../mvme147/mvme147.h"
|
||||
#include "../mvme16x/mvme16x.h"
|
||||
|
||||
asmlinkage void __init debug_cons_nputs(const char *s, unsigned n);
|
||||
|
||||
|
|
@ -22,7 +23,9 @@ static void __ref debug_cons_write(struct console *c,
|
|||
{
|
||||
#if !(defined(CONFIG_SUN3) || defined(CONFIG_M68000) || \
|
||||
defined(CONFIG_COLDFIRE))
|
||||
if (MACH_IS_MVME16x)
|
||||
if (MACH_IS_MVME147)
|
||||
mvme147_scc_write(c, s, n);
|
||||
else if (MACH_IS_MVME16x)
|
||||
mvme16x_cons_write(c, s, n);
|
||||
else
|
||||
debug_cons_nputs(s, n);
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@
|
|||
#include <asm/machdep.h>
|
||||
#include <asm/mvme147hw.h>
|
||||
|
||||
#include "mvme147.h"
|
||||
|
||||
static void mvme147_get_model(char *model);
|
||||
extern void mvme147_sched_init(irq_handler_t handler);
|
||||
|
|
@ -189,3 +190,32 @@ int mvme147_hwclk(int op, struct rtc_time *t)
|
|||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void scc_delay(void)
|
||||
{
|
||||
__asm__ __volatile__ ("nop; nop;");
|
||||
}
|
||||
|
||||
static void scc_write(char ch)
|
||||
{
|
||||
do {
|
||||
scc_delay();
|
||||
} while (!(in_8(M147_SCC_A_ADDR) & BIT(2)));
|
||||
scc_delay();
|
||||
out_8(M147_SCC_A_ADDR, 8);
|
||||
scc_delay();
|
||||
out_8(M147_SCC_A_ADDR, ch);
|
||||
}
|
||||
|
||||
void mvme147_scc_write(struct console *co, const char *str, unsigned int count)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
local_irq_save(flags);
|
||||
while (count--) {
|
||||
if (*str == '\n')
|
||||
scc_write('\r');
|
||||
scc_write(*str++);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
|
|
|||
6
arch/m68k/mvme147/mvme147.h
Normal file
6
arch/m68k/mvme147/mvme147.h
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
struct console;
|
||||
|
||||
/* config.c */
|
||||
void mvme147_scc_write(struct console *co, const char *str, unsigned int count);
|
||||
|
|
@ -39,6 +39,8 @@
|
|||
#include <asm/machdep.h>
|
||||
#include <asm/mvme16xhw.h>
|
||||
|
||||
#include "mvme16x.h"
|
||||
|
||||
extern t_bdid mvme_bdid;
|
||||
|
||||
static MK48T08ptr_t volatile rtc = (MK48T08ptr_t)MVME_RTC_BASE;
|
||||
|
|
|
|||
6
arch/m68k/mvme16x/mvme16x.h
Normal file
6
arch/m68k/mvme16x/mvme16x.h
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
|
||||
struct console;
|
||||
|
||||
/* config.c */
|
||||
void mvme16x_cons_write(struct console *co, const char *str, unsigned count);
|
||||
|
|
@ -272,7 +272,7 @@ drivers-$(CONFIG_PCI) += arch/mips/pci/
|
|||
ifdef CONFIG_64BIT
|
||||
ifndef KBUILD_SYM32
|
||||
ifeq ($(shell expr $(load-y) \< 0xffffffff80000000), 0)
|
||||
KBUILD_SYM32 = y
|
||||
KBUILD_SYM32 = $(call cc-option-yn, -msym32)
|
||||
endif
|
||||
endif
|
||||
|
||||
|
|
|
|||
|
|
@ -97,7 +97,7 @@ do { \
|
|||
} \
|
||||
} while (0)
|
||||
#else
|
||||
# define __sanitize_fcr31(next)
|
||||
# define __sanitize_fcr31(next) do { (void) (next); } while (0)
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
|
|||
|
|
@ -160,9 +160,4 @@ extern int emulate_step(struct pt_regs *regs, unsigned int instr);
|
|||
*/
|
||||
extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
|
||||
|
||||
extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
|
||||
const void *mem, bool cross_endian);
|
||||
extern void emulate_vsx_store(struct instruction_op *op,
|
||||
const union vsx_reg *reg, void *mem,
|
||||
bool cross_endian);
|
||||
extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);
|
||||
|
|
|
|||
|
|
@ -49,6 +49,7 @@ int vdso_getcpu_init(void);
|
|||
|
||||
#define V_FUNCTION_BEGIN(name) \
|
||||
.globl name; \
|
||||
.type name,@function; \
|
||||
name: \
|
||||
|
||||
#define V_FUNCTION_END(name) \
|
||||
|
|
|
|||
|
|
@ -2856,7 +2856,7 @@ static void __init fixup_device_tree_chrp(void)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_PPC64) && defined(CONFIG_PPC_PMAC)
|
||||
static void __init fixup_device_tree_pmac(void)
|
||||
static void __init fixup_device_tree_pmac64(void)
|
||||
{
|
||||
phandle u3, i2c, mpic;
|
||||
u32 u3_rev;
|
||||
|
|
@ -2896,7 +2896,31 @@ static void __init fixup_device_tree_pmac(void)
|
|||
&parent, sizeof(parent));
|
||||
}
|
||||
#else
|
||||
#define fixup_device_tree_pmac()
|
||||
#define fixup_device_tree_pmac64()
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_PMAC
|
||||
static void __init fixup_device_tree_pmac(void)
|
||||
{
|
||||
__be32 val = 1;
|
||||
char type[8];
|
||||
phandle node;
|
||||
|
||||
// Some pmacs are missing #size-cells on escc nodes
|
||||
for (node = 0; prom_next_node(&node); ) {
|
||||
type[0] = '\0';
|
||||
prom_getprop(node, "device_type", type, sizeof(type));
|
||||
if (prom_strcmp(type, "escc"))
|
||||
continue;
|
||||
|
||||
if (prom_getproplen(node, "#size-cells") != PROM_ERROR)
|
||||
continue;
|
||||
|
||||
prom_setprop(node, NULL, "#size-cells", &val, sizeof(val));
|
||||
}
|
||||
}
|
||||
#else
|
||||
static inline void fixup_device_tree_pmac(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC_EFIKA
|
||||
|
|
@ -3121,6 +3145,7 @@ static void __init fixup_device_tree(void)
|
|||
fixup_device_tree_maple_memory_controller();
|
||||
fixup_device_tree_chrp();
|
||||
fixup_device_tree_pmac();
|
||||
fixup_device_tree_pmac64();
|
||||
fixup_device_tree_efika();
|
||||
fixup_device_tree_pasemi();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -663,8 +663,8 @@ static nokprobe_inline int emulate_stq(struct pt_regs *regs, unsigned long ea,
|
|||
#endif /* __powerpc64 */
|
||||
|
||||
#ifdef CONFIG_VSX
|
||||
void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
|
||||
const void *mem, bool rev)
|
||||
static nokprobe_inline void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
|
||||
const void *mem, bool rev)
|
||||
{
|
||||
int size, read_size;
|
||||
int i, j;
|
||||
|
|
@ -744,11 +744,9 @@ void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(emulate_vsx_load);
|
||||
NOKPROBE_SYMBOL(emulate_vsx_load);
|
||||
|
||||
void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
|
||||
void *mem, bool rev)
|
||||
static nokprobe_inline void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
|
||||
void *mem, bool rev)
|
||||
{
|
||||
int size, write_size;
|
||||
int i, j;
|
||||
|
|
@ -820,8 +818,6 @@ void emulate_vsx_store(struct instruction_op *op, const union vsx_reg *reg,
|
|||
break;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(emulate_vsx_store);
|
||||
NOKPROBE_SYMBOL(emulate_vsx_store);
|
||||
|
||||
static nokprobe_inline int do_vsx_load(struct instruction_op *op,
|
||||
unsigned long ea, struct pt_regs *regs,
|
||||
|
|
|
|||
|
|
@ -17,8 +17,6 @@
|
|||
|
||||
struct task_struct;
|
||||
|
||||
register struct task_struct *riscv_current_is_tp __asm__("tp");
|
||||
|
||||
/*
|
||||
* This only works because "struct thread_info" is at offset 0 from "struct
|
||||
* task_struct". This constraint seems to be necessary on other architectures
|
||||
|
|
@ -28,7 +26,8 @@ register struct task_struct *riscv_current_is_tp __asm__("tp");
|
|||
*/
|
||||
static __always_inline struct task_struct *get_current(void)
|
||||
{
|
||||
return riscv_current_is_tp;
|
||||
register struct task_struct *tp __asm__("tp");
|
||||
return tp;
|
||||
}
|
||||
|
||||
#define current get_current()
|
||||
|
|
|
|||
|
|
@ -22,8 +22,6 @@
|
|||
#include <asm/switch_to.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
||||
unsigned long gp_in_global __asm__("gp");
|
||||
|
||||
extern asmlinkage void ret_from_fork(void);
|
||||
extern asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
|
@ -111,8 +109,9 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
|||
/* p->thread holds context to be restored by __switch_to() */
|
||||
if (unlikely(p->flags & PF_KTHREAD)) {
|
||||
/* Kernel thread */
|
||||
const register unsigned long gp __asm__ ("gp");
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->gp = gp_in_global;
|
||||
childregs->gp = gp;
|
||||
childregs->sstatus = SR_SPP | SR_SPIE; /* Supervisor, irqs on */
|
||||
|
||||
p->thread.ra = (unsigned long)ret_from_kernel_thread;
|
||||
|
|
|
|||
|
|
@ -12,8 +12,6 @@
|
|||
#include <linux/stacktrace.h>
|
||||
#include <linux/ftrace.h>
|
||||
|
||||
register unsigned long sp_in_global __asm__("sp");
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
struct stackframe {
|
||||
|
|
@ -31,7 +29,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
const register unsigned long current_sp = sp_in_global;
|
||||
const register unsigned long current_sp __asm__ ("sp");
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
sp = current_sp;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
|
|
@ -75,7 +73,8 @@ void notrace walk_stackframe(struct task_struct *task,
|
|||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
sp = sp_in_global;
|
||||
const register unsigned long current_sp __asm__ ("sp");
|
||||
sp = current_sp;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
|
|
|
|||
|
|
@ -1862,7 +1862,9 @@ static void cpumsf_pmu_stop(struct perf_event *event, int flags)
|
|||
event->hw.state |= PERF_HES_STOPPED;
|
||||
|
||||
if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
|
||||
hw_perf_event_update(event, 1);
|
||||
/* CPU hotplug off removes SDBs. No samples to extract. */
|
||||
if (cpuhw->flags & PMU_F_RESERVED)
|
||||
hw_perf_event_update(event, 1);
|
||||
event->hw.state |= PERF_HES_UPTODATE;
|
||||
}
|
||||
perf_pmu_enable(event->pmu);
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ kapi-hdrs-y := $(kapi)/unistd_nr.h
|
|||
uapi-hdrs-y := $(uapi)/unistd_32.h
|
||||
uapi-hdrs-y += $(uapi)/unistd_64.h
|
||||
|
||||
targets += $(addprefix ../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
|
||||
targets += $(addprefix ../../../../,$(gen-y) $(kapi-hdrs-y) $(uapi-hdrs-y))
|
||||
|
||||
PHONY += kapi uapi
|
||||
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
|||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < NR_CPUS ? cpu_data + *pos : NULL;
|
||||
return *pos < nr_cpu_ids ? cpu_data + *pos : NULL;
|
||||
}
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
|
|
|
|||
|
|
@ -347,7 +347,7 @@ static struct platform_driver uml_net_driver = {
|
|||
|
||||
static void net_device_release(struct device *dev)
|
||||
{
|
||||
struct uml_net *device = dev_get_drvdata(dev);
|
||||
struct uml_net *device = container_of(dev, struct uml_net, pdev.dev);
|
||||
struct net_device *netdev = device->dev;
|
||||
struct uml_net_private *lp = netdev_priv(netdev);
|
||||
|
||||
|
|
|
|||
|
|
@ -860,7 +860,7 @@ static int ubd_open_dev(struct ubd *ubd_dev)
|
|||
|
||||
static void ubd_device_release(struct device *dev)
|
||||
{
|
||||
struct ubd *ubd_dev = dev_get_drvdata(dev);
|
||||
struct ubd *ubd_dev = container_of(dev, struct ubd, pdev.dev);
|
||||
|
||||
blk_cleanup_queue(ubd_dev->queue);
|
||||
blk_mq_free_tag_set(&ubd_dev->tag_set);
|
||||
|
|
|
|||
|
|
@ -802,7 +802,8 @@ static struct platform_driver uml_net_driver = {
|
|||
|
||||
static void vector_device_release(struct device *dev)
|
||||
{
|
||||
struct vector_device *device = dev_get_drvdata(dev);
|
||||
struct vector_device *device =
|
||||
container_of(dev, struct vector_device, pdev.dev);
|
||||
struct net_device *netdev = device->dev;
|
||||
|
||||
list_del(&device->list);
|
||||
|
|
|
|||
|
|
@ -80,10 +80,10 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
|
|||
unsigned long len, unsigned long long highmem)
|
||||
{
|
||||
unsigned long reserve = reserve_end - start;
|
||||
long map_size = len - reserve;
|
||||
unsigned long map_size = len - reserve;
|
||||
int err;
|
||||
|
||||
if(map_size <= 0) {
|
||||
if (len <= reserve) {
|
||||
os_warn("Too few physical memory! Needed=%lu, given=%lu\n",
|
||||
reserve, len);
|
||||
exit(1);
|
||||
|
|
@ -94,7 +94,7 @@ void __init setup_physmem(unsigned long start, unsigned long reserve_end,
|
|||
err = os_map_memory((void *) reserve_end, physmem_fd, reserve,
|
||||
map_size, 1, 1, 1);
|
||||
if (err < 0) {
|
||||
os_warn("setup_physmem - mapping %ld bytes of memory at 0x%p "
|
||||
os_warn("setup_physmem - mapping %lu bytes of memory at 0x%p "
|
||||
"failed - errno = %d\n", map_size,
|
||||
(void *) reserve_end, err);
|
||||
exit(1);
|
||||
|
|
|
|||
|
|
@ -444,6 +444,6 @@ int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
|
|||
{
|
||||
int cpu = current_thread_info()->cpu;
|
||||
|
||||
return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
|
||||
return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu) == 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,9 @@
|
|||
|
||||
static void _print_addr(void *data, unsigned long address, int reliable)
|
||||
{
|
||||
pr_info(" [<%08lx>] %s%pS\n", address, reliable ? "" : "? ",
|
||||
const char *loglvl = data;
|
||||
|
||||
printk("%s [<%08lx>] %s%pS\n", loglvl, address, reliable ? "" : "? ",
|
||||
(void *)address);
|
||||
}
|
||||
|
||||
|
|
@ -25,9 +27,9 @@ static const struct stacktrace_ops stackops = {
|
|||
.address = _print_addr
|
||||
};
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *stack)
|
||||
void show_stack_loglvl(struct task_struct *task, unsigned long *stack,
|
||||
const char *loglvl)
|
||||
{
|
||||
unsigned long *sp = stack;
|
||||
struct pt_regs *segv_regs = current->thread.segv_regs;
|
||||
int i;
|
||||
|
||||
|
|
@ -38,10 +40,9 @@ void show_stack(struct task_struct *task, unsigned long *stack)
|
|||
}
|
||||
|
||||
if (!stack)
|
||||
sp = get_stack_pointer(task, segv_regs);
|
||||
stack = get_stack_pointer(task, segv_regs);
|
||||
|
||||
pr_info("Stack:\n");
|
||||
stack = sp;
|
||||
printk("%sStack:\n", loglvl);
|
||||
for (i = 0; i < 3 * STACKSLOTS_PER_LINE; i++) {
|
||||
if (kstack_end(stack))
|
||||
break;
|
||||
|
|
@ -49,9 +50,12 @@ void show_stack(struct task_struct *task, unsigned long *stack)
|
|||
pr_cont("\n");
|
||||
pr_cont(" %08lx", *stack++);
|
||||
}
|
||||
pr_cont("\n");
|
||||
|
||||
pr_info("Call Trace:\n");
|
||||
dump_trace(current, &stackops, NULL);
|
||||
pr_info("\n");
|
||||
printk("%sCall Trace:\n", loglvl);
|
||||
dump_trace(task ?: current, &stackops, (void *)loglvl);
|
||||
}
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *stack)
|
||||
{
|
||||
show_stack_loglvl(task, stack, KERN_INFO);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -191,6 +191,7 @@ config X86
|
|||
select HAVE_MIXED_BREAKPOINTS_REGS
|
||||
select HAVE_MOD_ARCH_SPECIFIC
|
||||
select HAVE_MOVE_PMD
|
||||
select HAVE_MOVE_PUD
|
||||
select HAVE_NMI
|
||||
select HAVE_OPROFILE
|
||||
select HAVE_OPTPROBES
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ CONFIG_BLK_DEV_INITRD=y
|
|||
CONFIG_KALLSYMS_ALL=y
|
||||
CONFIG_BPF_SYSCALL=y
|
||||
CONFIG_BPF_JIT_ALWAYS_ON=y
|
||||
CONFIG_USERFAULTFD=y
|
||||
# CONFIG_RSEQ is not set
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
|
|
@ -88,6 +89,7 @@ CONFIG_PACKET=y
|
|||
CONFIG_UNIX=y
|
||||
CONFIG_XFRM_USER=y
|
||||
CONFIG_XFRM_INTERFACE=y
|
||||
CONFIG_XFRM_MIGRATE=y
|
||||
CONFIG_XFRM_STATISTICS=y
|
||||
CONFIG_NET_KEY=y
|
||||
CONFIG_XDP_SOCKETS=y
|
||||
|
|
@ -189,9 +191,12 @@ CONFIG_NET_SCH_HTB=y
|
|||
CONFIG_NET_SCH_INGRESS=y
|
||||
CONFIG_NET_CLS_U32=y
|
||||
CONFIG_NET_CLS_BPF=y
|
||||
CONFIG_NET_CLS_MATCHALL=y
|
||||
CONFIG_NET_EMATCH=y
|
||||
CONFIG_NET_EMATCH_U32=y
|
||||
CONFIG_NET_CLS_ACT=y
|
||||
CONFIG_NET_ACT_POLICE=y
|
||||
CONFIG_NET_ACT_BPF=y
|
||||
CONFIG_BPF_JIT=y
|
||||
CONFIG_BT=y
|
||||
CONFIG_BT_RFCOMM=y
|
||||
|
|
@ -253,12 +258,14 @@ CONFIG_PPP_DEFLATE=y
|
|||
CONFIG_PPP_MPPE=y
|
||||
CONFIG_PPTP=y
|
||||
CONFIG_PPPOL2TP=y
|
||||
CONFIG_USB_RTL8150=y
|
||||
CONFIG_USB_RTL8152=y
|
||||
CONFIG_USB_USBNET=y
|
||||
# CONFIG_USB_NET_CDC_NCM is not set
|
||||
CONFIG_USB_NET_CDC_EEM=y
|
||||
# CONFIG_USB_NET_NET1080 is not set
|
||||
# CONFIG_USB_NET_CDC_SUBSET is not set
|
||||
# CONFIG_USB_NET_ZAURUS is not set
|
||||
CONFIG_USB_NET_AQC111=y
|
||||
# CONFIG_WLAN_VENDOR_ADMTEK is not set
|
||||
# CONFIG_WLAN_VENDOR_ATH is not set
|
||||
# CONFIG_WLAN_VENDOR_ATMEL is not set
|
||||
|
|
@ -489,7 +496,9 @@ CONFIG_STATIC_USERMODEHELPER=y
|
|||
CONFIG_STATIC_USERMODEHELPER_PATH=""
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
|
||||
CONFIG_CRYPTO_CHACHA20POLY1305=y
|
||||
CONFIG_CRYPTO_ADIANTUM=y
|
||||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_BLAKE2B=y
|
||||
CONFIG_CRYPTO_SHA256_SSSE3=y
|
||||
CONFIG_CRYPTO_AES_NI_INTEL=y
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
#define T1 %xmm7
|
||||
|
||||
#define STATEP %rdi
|
||||
#define LEN %rsi
|
||||
#define LEN %esi
|
||||
#define SRC %rdx
|
||||
#define DST %rcx
|
||||
|
||||
|
|
@ -75,32 +75,32 @@ __load_partial:
|
|||
xor %r9d, %r9d
|
||||
pxor MSG, MSG
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x1, %r8
|
||||
jz .Lld_partial_1
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x1E, %r8
|
||||
add SRC, %r8
|
||||
mov (%r8), %r9b
|
||||
|
||||
.Lld_partial_1:
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x2, %r8
|
||||
jz .Lld_partial_2
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x1C, %r8
|
||||
add SRC, %r8
|
||||
shl $0x10, %r9
|
||||
mov (%r8), %r9w
|
||||
|
||||
.Lld_partial_2:
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x4, %r8
|
||||
jz .Lld_partial_4
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x18, %r8
|
||||
add SRC, %r8
|
||||
shl $32, %r9
|
||||
|
|
@ -110,11 +110,11 @@ __load_partial:
|
|||
.Lld_partial_4:
|
||||
movq %r9, MSG
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x8, %r8
|
||||
jz .Lld_partial_8
|
||||
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
and $0x10, %r8
|
||||
add SRC, %r8
|
||||
pslldq $8, MSG
|
||||
|
|
@ -138,7 +138,7 @@ ENDPROC(__load_partial)
|
|||
* %r10
|
||||
*/
|
||||
__store_partial:
|
||||
mov LEN, %r8
|
||||
mov LEN, %r8d
|
||||
mov DST, %r9
|
||||
|
||||
movq T0, %r10
|
||||
|
|
@ -676,7 +676,7 @@ ENTRY(crypto_aegis128_aesni_dec_tail)
|
|||
call __store_partial
|
||||
|
||||
/* mask with byte count: */
|
||||
movq LEN, T0
|
||||
movd LEN, T0
|
||||
punpcklbw T0, T0
|
||||
punpcklbw T0, T0
|
||||
punpcklbw T0, T0
|
||||
|
|
@ -701,7 +701,8 @@ ENDPROC(crypto_aegis128_aesni_dec_tail)
|
|||
|
||||
/*
|
||||
* void crypto_aegis128_aesni_final(void *state, void *tag_xor,
|
||||
* u64 assoclen, u64 cryptlen);
|
||||
* unsigned int assoclen,
|
||||
* unsigned int cryptlen);
|
||||
*/
|
||||
ENTRY(crypto_aegis128_aesni_final)
|
||||
FRAME_BEGIN
|
||||
|
|
@ -714,8 +715,8 @@ ENTRY(crypto_aegis128_aesni_final)
|
|||
movdqu 0x40(STATEP), STATE4
|
||||
|
||||
/* prepare length block: */
|
||||
movq %rdx, MSG
|
||||
movq %rcx, T0
|
||||
movd %edx, MSG
|
||||
movd %ecx, T0
|
||||
pslldq $8, T0
|
||||
pxor T0, MSG
|
||||
psllq $3, MSG /* multiply by 8 (to get bit count) */
|
||||
|
|
|
|||
|
|
@ -782,11 +782,13 @@ static void pt_buffer_advance(struct pt_buffer *buf)
|
|||
buf->cur_idx++;
|
||||
|
||||
if (buf->cur_idx == buf->cur->last) {
|
||||
if (buf->cur == buf->last)
|
||||
if (buf->cur == buf->last) {
|
||||
buf->cur = buf->first;
|
||||
else
|
||||
buf->wrapped = true;
|
||||
} else {
|
||||
buf->cur = list_entry(buf->cur->list.next, struct topa,
|
||||
list);
|
||||
}
|
||||
buf->cur_idx = 0;
|
||||
}
|
||||
}
|
||||
|
|
@ -800,8 +802,11 @@ static void pt_buffer_advance(struct pt_buffer *buf)
|
|||
static void pt_update_head(struct pt *pt)
|
||||
{
|
||||
struct pt_buffer *buf = perf_get_aux(&pt->handle);
|
||||
bool wrapped = buf->wrapped;
|
||||
u64 topa_idx, base, old;
|
||||
|
||||
buf->wrapped = false;
|
||||
|
||||
/* offset of the first region in this table from the beginning of buf */
|
||||
base = buf->cur->offset + buf->output_off;
|
||||
|
||||
|
|
@ -814,7 +819,7 @@ static void pt_update_head(struct pt *pt)
|
|||
} else {
|
||||
old = (local64_xchg(&buf->head, base) &
|
||||
((buf->nr_pages << PAGE_SHIFT) - 1));
|
||||
if (base < old)
|
||||
if (base < old || (base == old && wrapped))
|
||||
base += buf->nr_pages << PAGE_SHIFT;
|
||||
|
||||
local_add(base - old, &buf->data_size);
|
||||
|
|
|
|||
|
|
@ -64,6 +64,7 @@ struct pt_pmu {
|
|||
* @lost: if data was lost/truncated
|
||||
* @head: logical write offset inside the buffer
|
||||
* @snapshot: if this is for a snapshot/overwrite counter
|
||||
* @wrapped: buffer advance wrapped back to the first topa table
|
||||
* @stop_pos: STOP topa entry index
|
||||
* @intr_pos: INT topa entry index
|
||||
* @stop_te: STOP topa entry pointer
|
||||
|
|
@ -80,6 +81,7 @@ struct pt_buffer {
|
|||
local_t data_size;
|
||||
local64_t head;
|
||||
bool snapshot;
|
||||
bool wrapped;
|
||||
long stop_pos, intr_pos;
|
||||
struct topa_entry *stop_te, *intr_te;
|
||||
void **data_pages;
|
||||
|
|
|
|||
|
|
@ -118,7 +118,10 @@ static inline bool amd_gart_present(void)
|
|||
|
||||
#define amd_nb_num(x) 0
|
||||
#define amd_nb_has_feature(x) false
|
||||
#define node_to_amd_nb(x) NULL
|
||||
static inline struct amd_northbridge *node_to_amd_nb(int node)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#define amd_gart_present(x) false
|
||||
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -335,12 +335,6 @@ early_idt_handler_common:
|
|||
jmp restore_regs_and_return_to_kernel
|
||||
END(early_idt_handler_common)
|
||||
|
||||
__INITDATA
|
||||
|
||||
.balign 4
|
||||
GLOBAL(early_recursion_flag)
|
||||
.long 0
|
||||
|
||||
#define NEXT_PAGE(name) \
|
||||
.balign PAGE_SIZE; \
|
||||
GLOBAL(name)
|
||||
|
|
@ -375,6 +369,8 @@ GLOBAL(name)
|
|||
.endr
|
||||
|
||||
__INITDATA
|
||||
.balign 4
|
||||
|
||||
NEXT_PGD_PAGE(early_top_pgt)
|
||||
.fill 512,8,0
|
||||
.fill PTI_USER_PGD_FILL,8,0
|
||||
|
|
@ -382,6 +378,9 @@ NEXT_PGD_PAGE(early_top_pgt)
|
|||
NEXT_PAGE(early_dynamic_pgts)
|
||||
.fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
|
||||
|
||||
GLOBAL(early_recursion_flag)
|
||||
.long 0
|
||||
|
||||
.data
|
||||
|
||||
#if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
|
||||
|
|
|
|||
|
|
@ -179,9 +179,11 @@ module_param(ple_window_shrink, uint, 0444);
|
|||
static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
|
||||
module_param(ple_window_max, uint, 0444);
|
||||
|
||||
/* Default is SYSTEM mode, 1 for host-guest mode */
|
||||
/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
|
||||
int __read_mostly pt_mode = PT_MODE_SYSTEM;
|
||||
#ifdef CONFIG_BROKEN
|
||||
module_param(pt_mode, int, S_IRUGO);
|
||||
#endif
|
||||
|
||||
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
|
||||
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@
|
|||
#include <asm/boot.h>
|
||||
#include <asm/processor-flags.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/nospec-branch.h>
|
||||
#include <xen/interface/elfnote.h>
|
||||
|
||||
__HEAD
|
||||
|
|
@ -105,6 +106,7 @@ ENTRY(pvh_start_xen)
|
|||
/* startup_64 expects boot_params in %rsi. */
|
||||
mov $_pa(pvh_bootparams), %rsi
|
||||
mov $_pa(startup_64), %rax
|
||||
ANNOTATE_RETPOLINE_SAFE
|
||||
jmp *%rax
|
||||
|
||||
#else /* CONFIG_X86_64 */
|
||||
|
|
|
|||
|
|
@ -907,7 +907,27 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
|
|||
|
||||
lockdep_assert_held(&ioc->lock);
|
||||
|
||||
inuse = min(active, inuse);
|
||||
/*
|
||||
* For an active leaf node, its inuse shouldn't be zero or exceed
|
||||
* @active. An active internal node's inuse is solely determined by the
|
||||
* inuse to active ratio of its children regardless of @inuse.
|
||||
*/
|
||||
if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
|
||||
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
|
||||
iocg->child_active_sum);
|
||||
} else {
|
||||
/*
|
||||
* It may be tempting to turn this into a clamp expression with
|
||||
* a lower limit of 1 but active may be 0, which cannot be used
|
||||
* as an upper limit in that situation. This expression allows
|
||||
* active to clamp inuse unless it is 0, in which case inuse
|
||||
* becomes 1.
|
||||
*/
|
||||
inuse = min(inuse, active) ?: 1;
|
||||
}
|
||||
|
||||
if (active == iocg->active && inuse == iocg->inuse)
|
||||
return;
|
||||
|
||||
for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
|
||||
struct ioc_gq *parent = iocg->ancestors[lvl];
|
||||
|
|
@ -917,7 +937,7 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
|
|||
/* update the level sums */
|
||||
parent->child_active_sum += (s32)(active - child->active);
|
||||
parent->child_inuse_sum += (s32)(inuse - child->inuse);
|
||||
/* apply the udpates */
|
||||
/* apply the updates */
|
||||
child->active = active;
|
||||
child->inuse = inuse;
|
||||
|
||||
|
|
|
|||
|
|
@ -1652,6 +1652,12 @@ void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
|
|||
return;
|
||||
|
||||
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
/*
|
||||
* Pairs with the smp_mb() in blk_mq_hctx_stopped() to order the
|
||||
* clearing of BLK_MQ_S_STOPPED above and the checking of dispatch
|
||||
* list in the subsequent routine.
|
||||
*/
|
||||
smp_mb__after_atomic();
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_start_stopped_hw_queue);
|
||||
|
|
|
|||
|
|
@ -178,6 +178,19 @@ static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data
|
|||
|
||||
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
|
||||
{
|
||||
/* Fast path: hardware queue is not stopped most of the time. */
|
||||
if (likely(!test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* This barrier is used to order adding of dispatch list before and
|
||||
* the test of BLK_MQ_S_STOPPED below. Pairs with the memory barrier
|
||||
* in blk_mq_start_stopped_hw_queue() so that dispatch code could
|
||||
* either see BLK_MQ_S_STOPPED is cleared or dispatch list is not
|
||||
* empty to avoid missing dispatching requests.
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ android/abi_gki_aarch64_qcom
|
|||
android/abi_gki_aarch64_sony
|
||||
android/abi_gki_aarch64_sonywalkman
|
||||
android/abi_gki_aarch64_sunxi
|
||||
android/abi_gki_aarch64_trimble
|
||||
android/abi_gki_aarch64_unisoc
|
||||
android/abi_gki_aarch64_vivo
|
||||
android/abi_gki_aarch64_xiaomi
|
||||
|
|
|
|||
|
|
@ -118,8 +118,10 @@ static int pcrypt_aead_encrypt(struct aead_request *req)
|
|||
err = padata_do_parallel(ictx->psenc, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
if (err == -EBUSY) {
|
||||
/* try non-parallel mode */
|
||||
return crypto_aead_encrypt(creq);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
@ -167,8 +169,10 @@ static int pcrypt_aead_decrypt(struct aead_request *req)
|
|||
err = padata_do_parallel(ictx->psdec, padata, &ctx->cb_cpu);
|
||||
if (!err)
|
||||
return -EINPROGRESS;
|
||||
if (err == -EBUSY)
|
||||
return -EAGAIN;
|
||||
if (err == -EBUSY) {
|
||||
/* try non-parallel mode */
|
||||
return crypto_aead_decrypt(creq);
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -201,8 +201,6 @@ acpi_remove_address_space_handler(acpi_handle device,
|
|||
|
||||
/* Now we can delete the handler object */
|
||||
|
||||
acpi_os_release_mutex(handler_obj->address_space.
|
||||
context_mutex);
|
||||
acpi_ut_remove_reference(handler_obj);
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -283,7 +283,7 @@ error:
|
|||
if (frame->virt_irq > 0)
|
||||
acpi_unregister_gsi(gtdt_frame->virtual_timer_interrupt);
|
||||
frame->virt_irq = 0;
|
||||
} while (i-- >= 0 && gtdt_frame--);
|
||||
} while (i-- > 0 && gtdt_frame--);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
|
|||
switch (addr->resource_type) {
|
||||
case ACPI_MEMORY_RANGE:
|
||||
acpi_dev_memresource_flags(res, len, wp);
|
||||
|
||||
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
||||
res->flags |= IORESOURCE_PREFETCH;
|
||||
break;
|
||||
case ACPI_IO_RANGE:
|
||||
acpi_dev_ioresource_flags(res, len, iodec,
|
||||
|
|
@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
|
|||
if (addr->producer_consumer == ACPI_PRODUCER)
|
||||
res->flags |= IORESOURCE_WINDOW;
|
||||
|
||||
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
|
||||
res->flags |= IORESOURCE_PREFETCH;
|
||||
|
||||
return !(res->flags & IORESOURCE_DISABLED);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
|
|||
phy_nodes[phy] = phy_data.np;
|
||||
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
|
||||
if (cphy_base[phy] == NULL) {
|
||||
of_node_put(phy_data.np);
|
||||
return 0;
|
||||
}
|
||||
phy_count += 1;
|
||||
|
|
|
|||
|
|
@ -532,12 +532,16 @@ exit:
|
|||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
static struct lock_class_key regmap_irq_lock_class;
|
||||
static struct lock_class_key regmap_irq_request_class;
|
||||
|
||||
static int regmap_irq_map(struct irq_domain *h, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct regmap_irq_chip_data *data = h->host_data;
|
||||
|
||||
irq_set_chip_data(virq, data);
|
||||
irq_set_lockdep_class(virq, ®map_irq_lock_class, ®map_irq_request_class);
|
||||
irq_set_chip(virq, &data->irq_chip);
|
||||
irq_set_nested_thread(virq, 1);
|
||||
irq_set_parent(virq, data->irq);
|
||||
|
|
|
|||
|
|
@ -603,6 +603,17 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(regmap_attach_dev);
|
||||
|
||||
static int dev_get_regmap_match(struct device *dev, void *res, void *data);
|
||||
|
||||
static int regmap_detach_dev(struct device *dev, struct regmap *map)
|
||||
{
|
||||
if (!dev)
|
||||
return 0;
|
||||
|
||||
return devres_release(dev, dev_get_regmap_release,
|
||||
dev_get_regmap_match, (void *)map->name);
|
||||
}
|
||||
|
||||
static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
|
||||
const struct regmap_config *config)
|
||||
{
|
||||
|
|
@ -1048,13 +1059,13 @@ skip_format_initialization:
|
|||
|
||||
/* Sanity check */
|
||||
if (range_cfg->range_max < range_cfg->range_min) {
|
||||
dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
|
||||
dev_err(map->dev, "Invalid range %d: %u < %u\n", i,
|
||||
range_cfg->range_max, range_cfg->range_min);
|
||||
goto err_range;
|
||||
}
|
||||
|
||||
if (range_cfg->range_max > map->max_register) {
|
||||
dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
|
||||
dev_err(map->dev, "Invalid range %d: %u > %u\n", i,
|
||||
range_cfg->range_max, map->max_register);
|
||||
goto err_range;
|
||||
}
|
||||
|
|
@ -1331,6 +1342,7 @@ void regmap_exit(struct regmap *map)
|
|||
{
|
||||
struct regmap_async *async;
|
||||
|
||||
regmap_detach_dev(map->dev, map);
|
||||
regcache_exit(map);
|
||||
regmap_debugfs_exit(map);
|
||||
regmap_range_exit(map);
|
||||
|
|
|
|||
|
|
@ -1129,9 +1129,12 @@ static void virtblk_remove(struct virtio_device *vdev)
|
|||
static int virtblk_freeze(struct virtio_device *vdev)
|
||||
{
|
||||
struct virtio_blk *vblk = vdev->priv;
|
||||
struct request_queue *q = vblk->disk->queue;
|
||||
|
||||
/* Ensure no requests in virtqueues before deleting vqs. */
|
||||
blk_mq_freeze_queue(vblk->disk->queue);
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue_nowait(q);
|
||||
blk_mq_unfreeze_queue(q);
|
||||
|
||||
/* Ensure we don't receive any more interrupts */
|
||||
vdev->config->reset(vdev);
|
||||
|
|
@ -1155,8 +1158,8 @@ static int virtblk_restore(struct virtio_device *vdev)
|
|||
return ret;
|
||||
|
||||
virtio_device_ready(vdev);
|
||||
blk_mq_unquiesce_queue(vblk->disk->queue);
|
||||
|
||||
blk_mq_unfreeze_queue(vblk->disk->queue);
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -495,6 +495,12 @@ static ssize_t backing_dev_store(struct device *dev,
|
|||
}
|
||||
|
||||
nr_pages = i_size_read(inode) >> PAGE_SHIFT;
|
||||
/* Refuse to use zero sized device (also prevents self reference) */
|
||||
if (!nr_pages) {
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
bitmap_sz = BITS_TO_LONGS(nr_pages) * sizeof(long);
|
||||
bitmap = kvzalloc(bitmap_sz, GFP_KERNEL);
|
||||
if (!bitmap) {
|
||||
|
|
|
|||
|
|
@ -372,6 +372,8 @@ static const struct usb_device_id blacklist_table[] = {
|
|||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe123), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/clk-provider.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/io.h>
|
||||
|
|
@ -412,7 +413,7 @@ static int axi_clkgen_probe(struct platform_device *pdev)
|
|||
struct clk_init_data init;
|
||||
const char *parent_names[2];
|
||||
const char *clk_name;
|
||||
struct resource *mem;
|
||||
struct clk *axi_clk;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
|
||||
|
|
@ -427,14 +428,29 @@ static int axi_clkgen_probe(struct platform_device *pdev)
|
|||
if (!axi_clkgen)
|
||||
return -ENOMEM;
|
||||
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
axi_clkgen->base = devm_ioremap_resource(&pdev->dev, mem);
|
||||
axi_clkgen->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(axi_clkgen->base))
|
||||
return PTR_ERR(axi_clkgen->base);
|
||||
|
||||
init.num_parents = of_clk_get_parent_count(pdev->dev.of_node);
|
||||
if (init.num_parents < 1 || init.num_parents > 2)
|
||||
return -EINVAL;
|
||||
|
||||
axi_clk = devm_clk_get_enabled(&pdev->dev, "s_axi_aclk");
|
||||
if (!IS_ERR(axi_clk)) {
|
||||
if (init.num_parents < 2 || init.num_parents > 3)
|
||||
return -EINVAL;
|
||||
|
||||
init.num_parents -= 1;
|
||||
} else {
|
||||
/*
|
||||
* Legacy... So that old DTs which do not have clock-names still
|
||||
* work. In this case we don't explicitly enable the AXI bus
|
||||
* clock.
|
||||
*/
|
||||
if (PTR_ERR(axi_clk) != -ENOENT)
|
||||
return PTR_ERR(axi_clk);
|
||||
if (init.num_parents < 1 || init.num_parents > 2)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < init.num_parents; i++) {
|
||||
parent_names[i] = of_clk_get_parent_name(pdev->dev.of_node, i);
|
||||
|
|
|
|||
|
|
@ -345,6 +345,7 @@ static struct clk_alpha_pll gpll1_out_main = {
|
|||
/* 930MHz configuration */
|
||||
static const struct alpha_pll_config gpll3_config = {
|
||||
.l = 48,
|
||||
.alpha_hi = 0x70,
|
||||
.alpha = 0x0,
|
||||
.alpha_hi = 0x70,
|
||||
.alpha_en_mask = BIT(24),
|
||||
|
|
|
|||
|
|
@ -167,7 +167,9 @@ static int __init cpufreq_init(void)
|
|||
|
||||
ret = cpufreq_register_driver(&loongson2_cpufreq_driver);
|
||||
|
||||
if (!ret && !nowait) {
|
||||
if (ret) {
|
||||
platform_driver_unregister(&platform_driver);
|
||||
} else if (!nowait) {
|
||||
saved_cpu_wait = cpu_wait;
|
||||
cpu_wait = loongson2_cpu_wait;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2478,6 +2478,7 @@ static int ahash_hmac_setkey(struct crypto_ahash *ahash, const u8 *key,
|
|||
|
||||
static int ahash_hmac_init(struct ahash_request *req)
|
||||
{
|
||||
int ret;
|
||||
struct iproc_reqctx_s *rctx = ahash_request_ctx(req);
|
||||
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
|
||||
struct iproc_ctx_s *ctx = crypto_ahash_ctx(tfm);
|
||||
|
|
@ -2487,7 +2488,9 @@ static int ahash_hmac_init(struct ahash_request *req)
|
|||
flow_log("ahash_hmac_init()\n");
|
||||
|
||||
/* init the context as a hash */
|
||||
ahash_init(req);
|
||||
ret = ahash_init(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!spu_no_incr_hash(ctx)) {
|
||||
/* SPU-M can do incr hashing but needs sw for outer HMAC */
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask,
|
|||
dev_err(dev, "Cores still busy %llx", coremask);
|
||||
grp = cpt_read_csr64(cpt->reg_base,
|
||||
CPTX_PF_EXEC_BUSY(0));
|
||||
if (timeout--)
|
||||
if (!timeout--)
|
||||
break;
|
||||
|
||||
udelay(CSR_DELAY);
|
||||
|
|
@ -303,6 +303,8 @@ static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae)
|
|||
|
||||
ret = do_cpt_init(cpt, mcode);
|
||||
if (ret) {
|
||||
dma_free_coherent(&cpt->pdev->dev, mcode->code_size,
|
||||
mcode->code, mcode->phys_base);
|
||||
dev_err(dev, "do_cpt_init failed with ret: %d\n", ret);
|
||||
goto fw_release;
|
||||
}
|
||||
|
|
@ -395,7 +397,7 @@ static void cpt_disable_all_cores(struct cpt_device *cpt)
|
|||
dev_err(dev, "Cores still busy");
|
||||
grp = cpt_read_csr64(cpt->reg_base,
|
||||
CPTX_PF_EXEC_BUSY(0));
|
||||
if (timeout--)
|
||||
if (!timeout--)
|
||||
break;
|
||||
|
||||
udelay(CSR_DELAY);
|
||||
|
|
|
|||
|
|
@ -339,8 +339,9 @@ static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl,
|
|||
* driver. Once driver synthesizes cpl_pass_accpet_req the skb will go
|
||||
* through the regular cpl_pass_accept_req processing in TOM.
|
||||
*/
|
||||
skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req)
|
||||
- pktshift, GFP_ATOMIC);
|
||||
skb = alloc_skb(size_add(gl->tot_len,
|
||||
sizeof(struct cpl_pass_accept_req)) -
|
||||
pktshift, GFP_ATOMIC);
|
||||
if (unlikely(!skb))
|
||||
return NULL;
|
||||
__skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req)
|
||||
|
|
|
|||
|
|
@ -103,10 +103,36 @@ static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
|
|||
static bool dma_fence_array_signaled(struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence_array *array = to_dma_fence_array(fence);
|
||||
int num_pending;
|
||||
unsigned int i;
|
||||
|
||||
if (atomic_read(&array->num_pending) > 0)
|
||||
/*
|
||||
* We need to read num_pending before checking the enable_signal bit
|
||||
* to avoid racing with the enable_signaling() implementation, which
|
||||
* might decrement the counter, and cause a partial check.
|
||||
* atomic_read_acquire() pairs with atomic_dec_and_test() in
|
||||
* dma_fence_array_enable_signaling()
|
||||
*
|
||||
* The !--num_pending check is here to account for the any_signaled case
|
||||
* if we race with enable_signaling(), that means the !num_pending check
|
||||
* in the is_signalling_enabled branch might be outdated (num_pending
|
||||
* might have been decremented), but that's fine. The user will get the
|
||||
* right value when testing again later.
|
||||
*/
|
||||
num_pending = atomic_read_acquire(&array->num_pending);
|
||||
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &array->base.flags)) {
|
||||
if (num_pending <= 0)
|
||||
goto signal;
|
||||
return false;
|
||||
}
|
||||
|
||||
for (i = 0; i < array->num_fences; ++i) {
|
||||
if (dma_fence_is_signaled(array->fences[i]) && !--num_pending)
|
||||
goto signal;
|
||||
}
|
||||
return false;
|
||||
|
||||
signal:
|
||||
dma_fence_array_clear_pending_error(array);
|
||||
return true;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,7 +120,7 @@ static const struct dma_buf_ops udmabuf_ops = {
|
|||
};
|
||||
|
||||
#define SEALS_WANTED (F_SEAL_SHRINK)
|
||||
#define SEALS_DENIED (F_SEAL_WRITE)
|
||||
#define SEALS_DENIED (F_SEAL_WRITE|F_SEAL_FUTURE_WRITE)
|
||||
|
||||
static long udmabuf_create(const struct udmabuf_create_list *head,
|
||||
const struct udmabuf_create_item *list)
|
||||
|
|
|
|||
|
|
@ -1214,6 +1214,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
|
|||
return NULL;
|
||||
|
||||
desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
list_add_tail(&desc->desc_node, &desc->descs_list);
|
||||
|
||||
desc->tx_dma_desc.cookie = -EBUSY;
|
||||
|
|
|
|||
|
|
@ -1394,6 +1394,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|||
irq = irq_of_parse_and_map(np, 0);
|
||||
if (!irq) {
|
||||
ret = -ENODEV;
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
|
|
@ -1402,6 +1403,7 @@ static int mv_xor_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(chan)) {
|
||||
ret = PTR_ERR(chan);
|
||||
irq_dispose_mapping(irq);
|
||||
of_node_put(np);
|
||||
goto err_channel_add;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ static void bluefield_edac_check(struct mem_ctl_info *mci)
|
|||
static void bluefield_edac_init_dimms(struct mem_ctl_info *mci)
|
||||
{
|
||||
struct bluefield_edac_priv *priv = mci->pvt_info;
|
||||
int mem_ctrl_idx = mci->mc_idx;
|
||||
u64 mem_ctrl_idx = mci->mc_idx;
|
||||
struct dimm_info *dimm;
|
||||
u64 smc_info, smc_arg;
|
||||
int is_empty = 1, i;
|
||||
|
|
|
|||
|
|
@ -331,21 +331,25 @@ static void fsl_mc_check(struct mem_ctl_info *mci)
|
|||
* TODO: Add support for 32-bit wide buses
|
||||
*/
|
||||
if ((err_detect & DDR_EDE_SBE) && (bus_width == 64)) {
|
||||
u64 cap = (u64)cap_high << 32 | cap_low;
|
||||
u32 s = syndrome;
|
||||
|
||||
sbe_ecc_decode(cap_high, cap_low, syndrome,
|
||||
&bad_data_bit, &bad_ecc_bit);
|
||||
|
||||
if (bad_data_bit != -1)
|
||||
fsl_mc_printk(mci, KERN_ERR,
|
||||
"Faulty Data bit: %d\n", bad_data_bit);
|
||||
if (bad_ecc_bit != -1)
|
||||
fsl_mc_printk(mci, KERN_ERR,
|
||||
"Faulty ECC bit: %d\n", bad_ecc_bit);
|
||||
if (bad_data_bit >= 0) {
|
||||
fsl_mc_printk(mci, KERN_ERR, "Faulty Data bit: %d\n", bad_data_bit);
|
||||
cap ^= 1ULL << bad_data_bit;
|
||||
}
|
||||
|
||||
if (bad_ecc_bit >= 0) {
|
||||
fsl_mc_printk(mci, KERN_ERR, "Faulty ECC bit: %d\n", bad_ecc_bit);
|
||||
s ^= 1 << bad_ecc_bit;
|
||||
}
|
||||
|
||||
fsl_mc_printk(mci, KERN_ERR,
|
||||
"Expected Data / ECC:\t%#8.8x_%08x / %#2.2x\n",
|
||||
cap_high ^ (1 << (bad_data_bit - 32)),
|
||||
cap_low ^ (1 << bad_data_bit),
|
||||
syndrome ^ (1 << bad_ecc_bit));
|
||||
upper_32_bits(cap), lower_32_bits(cap), s);
|
||||
}
|
||||
|
||||
fsl_mc_printk(mci, KERN_ERR,
|
||||
|
|
|
|||
|
|
@ -627,6 +627,9 @@ static struct scpi_dvfs_info *scpi_dvfs_get_info(u8 domain)
|
|||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (!buf.opp_count)
|
||||
return ERR_PTR(-ENOENT);
|
||||
|
||||
info = kmalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (!info)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
|
|
|||
|
|
@ -40,7 +40,8 @@ int __init efi_tpm_eventlog_init(void)
|
|||
{
|
||||
struct linux_efi_tpm_eventlog *log_tbl;
|
||||
struct efi_tcg2_final_events_table *final_tbl;
|
||||
int tbl_size;
|
||||
unsigned int tbl_size;
|
||||
int final_tbl_size;
|
||||
int ret = 0;
|
||||
|
||||
if (efi.tpm_log == EFI_INVALID_TABLE_ADDR) {
|
||||
|
|
@ -80,26 +81,26 @@ int __init efi_tpm_eventlog_init(void)
|
|||
goto out;
|
||||
}
|
||||
|
||||
tbl_size = 0;
|
||||
final_tbl_size = 0;
|
||||
if (final_tbl->nr_events != 0) {
|
||||
void *events = (void *)efi.tpm_final_log
|
||||
+ sizeof(final_tbl->version)
|
||||
+ sizeof(final_tbl->nr_events);
|
||||
|
||||
tbl_size = tpm2_calc_event_log_size(events,
|
||||
final_tbl->nr_events,
|
||||
log_tbl->log);
|
||||
final_tbl_size = tpm2_calc_event_log_size(events,
|
||||
final_tbl->nr_events,
|
||||
log_tbl->log);
|
||||
}
|
||||
|
||||
if (tbl_size < 0) {
|
||||
if (final_tbl_size < 0) {
|
||||
pr_err(FW_BUG "Failed to parse event in TPM Final Events Log\n");
|
||||
ret = -EINVAL;
|
||||
goto out_calc;
|
||||
}
|
||||
|
||||
memblock_reserve((unsigned long)final_tbl,
|
||||
tbl_size + sizeof(*final_tbl));
|
||||
efi_tpm_final_log_size = tbl_size;
|
||||
memblock_reserve(efi.tpm_final_log,
|
||||
final_tbl_size + sizeof(*final_tbl));
|
||||
efi_tpm_final_log_size = final_tbl_size;
|
||||
|
||||
out_calc:
|
||||
early_memunmap(final_tbl, sizeof(*final_tbl));
|
||||
|
|
|
|||
|
|
@ -898,7 +898,8 @@ static __init int gsmi_init(void)
|
|||
gsmi_dev.pdev = platform_device_register_full(&gsmi_dev_info);
|
||||
if (IS_ERR(gsmi_dev.pdev)) {
|
||||
printk(KERN_ERR "gsmi: unable to register platform device\n");
|
||||
return PTR_ERR(gsmi_dev.pdev);
|
||||
ret = PTR_ERR(gsmi_dev.pdev);
|
||||
goto out_unregister;
|
||||
}
|
||||
|
||||
/* SMI access needs to be serialized */
|
||||
|
|
@ -1025,6 +1026,10 @@ out_err:
|
|||
gsmi_buf_free(gsmi_dev.name_buf);
|
||||
dma_pool_destroy(gsmi_dev.dma_pool);
|
||||
platform_device_unregister(gsmi_dev.pdev);
|
||||
out_unregister:
|
||||
#ifdef CONFIG_PM
|
||||
platform_driver_unregister(&gsmi_driver_info);
|
||||
#endif
|
||||
pr_info("gsmi: failed to load: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -1047,6 +1052,9 @@ static void __exit gsmi_exit(void)
|
|||
gsmi_buf_free(gsmi_dev.name_buf);
|
||||
dma_pool_destroy(gsmi_dev.dma_pool);
|
||||
platform_device_unregister(gsmi_dev.pdev);
|
||||
#ifdef CONFIG_PM
|
||||
platform_driver_unregister(&gsmi_driver_info);
|
||||
#endif
|
||||
}
|
||||
|
||||
module_init(gsmi_init);
|
||||
|
|
|
|||
|
|
@ -1248,8 +1248,6 @@ static void qcom_scm_shutdown(struct platform_device *pdev)
|
|||
{
|
||||
qcom_scm_disable_sdi();
|
||||
qcom_scm_halt_spmi_pmic_arbiter();
|
||||
/* Clean shutdown, disable download mode to allow normal restart */
|
||||
qcom_scm_set_download_mode(QCOM_DOWNLOAD_NODUMP, 0);
|
||||
}
|
||||
|
||||
static const struct of_device_id qcom_scm_dt_match[] = {
|
||||
|
|
|
|||
|
|
@ -328,6 +328,7 @@ static const struct irq_domain_ops grgpio_irq_domain_ops = {
|
|||
static int grgpio_probe(struct platform_device *ofdev)
|
||||
{
|
||||
struct device_node *np = ofdev->dev.of_node;
|
||||
struct device *dev = &ofdev->dev;
|
||||
void __iomem *regs;
|
||||
struct gpio_chip *gc;
|
||||
struct grgpio_priv *priv;
|
||||
|
|
@ -337,7 +338,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
|||
int size;
|
||||
int i;
|
||||
|
||||
priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL);
|
||||
priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
|
||||
if (!priv)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
@ -346,29 +347,32 @@ static int grgpio_probe(struct platform_device *ofdev)
|
|||
return PTR_ERR(regs);
|
||||
|
||||
gc = &priv->gc;
|
||||
err = bgpio_init(gc, &ofdev->dev, 4, regs + GRGPIO_DATA,
|
||||
err = bgpio_init(gc, dev, 4, regs + GRGPIO_DATA,
|
||||
regs + GRGPIO_OUTPUT, NULL, regs + GRGPIO_DIR, NULL,
|
||||
BGPIOF_BIG_ENDIAN_BYTE_ORDER);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "bgpio_init() failed\n");
|
||||
dev_err(dev, "bgpio_init() failed\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
priv->regs = regs;
|
||||
priv->imask = gc->read_reg(regs + GRGPIO_IMASK);
|
||||
priv->dev = &ofdev->dev;
|
||||
priv->dev = dev;
|
||||
|
||||
gc->of_node = np;
|
||||
gc->owner = THIS_MODULE;
|
||||
gc->to_irq = grgpio_to_irq;
|
||||
gc->label = devm_kasprintf(&ofdev->dev, GFP_KERNEL, "%pOF", np);
|
||||
gc->label = devm_kasprintf(dev, GFP_KERNEL, "%pOF", np);
|
||||
if (!gc->label)
|
||||
return -ENOMEM;
|
||||
|
||||
gc->base = -1;
|
||||
|
||||
err = of_property_read_u32(np, "nbits", &prop);
|
||||
if (err || prop <= 0 || prop > GRGPIO_MAX_NGPIO) {
|
||||
gc->ngpio = GRGPIO_MAX_NGPIO;
|
||||
dev_dbg(&ofdev->dev,
|
||||
"No or invalid nbits property: assume %d\n", gc->ngpio);
|
||||
dev_dbg(dev, "No or invalid nbits property: assume %d\n",
|
||||
gc->ngpio);
|
||||
} else {
|
||||
gc->ngpio = prop;
|
||||
}
|
||||
|
|
@ -380,7 +384,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
|||
irqmap = (s32 *)of_get_property(np, "irqmap", &size);
|
||||
if (irqmap) {
|
||||
if (size < gc->ngpio) {
|
||||
dev_err(&ofdev->dev,
|
||||
dev_err(dev,
|
||||
"irqmap shorter than ngpio (%d < %d)\n",
|
||||
size, gc->ngpio);
|
||||
return -EINVAL;
|
||||
|
|
@ -390,7 +394,7 @@ static int grgpio_probe(struct platform_device *ofdev)
|
|||
&grgpio_irq_domain_ops,
|
||||
priv);
|
||||
if (!priv->domain) {
|
||||
dev_err(&ofdev->dev, "Could not add irq domain\n");
|
||||
dev_err(dev, "Could not add irq domain\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
@ -420,13 +424,13 @@ static int grgpio_probe(struct platform_device *ofdev)
|
|||
|
||||
err = gpiochip_add_data(gc, priv);
|
||||
if (err) {
|
||||
dev_err(&ofdev->dev, "Could not add gpiochip\n");
|
||||
dev_err(dev, "Could not add gpiochip\n");
|
||||
if (priv->domain)
|
||||
irq_domain_remove(priv->domain);
|
||||
return err;
|
||||
}
|
||||
|
||||
dev_info(&ofdev->dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
|
||||
dev_info(dev, "regs=0x%p, base=%d, ngpio=%d, irqs=%s\n",
|
||||
priv->regs, gc->base, gc->ngpio, priv->domain ? "on" : "off");
|
||||
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -395,7 +395,7 @@ static ssize_t amdgpu_debugfs_regs_smc_read(struct file *f, char __user *buf,
|
|||
if (!adev->smc_rreg)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (size > 4096 || size & 0x3 || *pos & 0x3)
|
||||
if (size & 0x3 || *pos & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
while (size) {
|
||||
|
|
|
|||
|
|
@ -1678,6 +1678,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
|
||||
mutex_init(&adev->mman.gtt_window_lock);
|
||||
|
||||
dma_set_max_seg_size(adev->dev, UINT_MAX);
|
||||
/* No others user of address space so set it to 0 */
|
||||
r = ttm_bo_device_init(&adev->mman.bdev,
|
||||
&amdgpu_bo_driver,
|
||||
|
|
|
|||
|
|
@ -121,6 +121,9 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
|
|||
audio_source = ADV7511_AUDIO_SOURCE_I2S;
|
||||
i2s_format = ADV7511_I2S_FORMAT_LEFT_J;
|
||||
break;
|
||||
case HDMI_SPDIF:
|
||||
audio_source = ADV7511_AUDIO_SOURCE_SPDIF;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
@ -144,7 +147,16 @@ int adv7511_hdmi_hw_params(struct device *dev, void *data,
|
|||
ADV7511_AUDIO_CFG3_LEN_MASK, len);
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG,
|
||||
ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4);
|
||||
regmap_write(adv7511->regmap, 0x73, 0x1);
|
||||
|
||||
/* send current Audio infoframe values while updating */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), BIT(5));
|
||||
|
||||
regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1);
|
||||
|
||||
/* use Audio infoframe updated info */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -175,13 +187,24 @@ static int audio_startup(struct device *dev, void *data)
|
|||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0),
|
||||
BIT(7) | BIT(6), BIT(7));
|
||||
/* use Audio infoframe updated info */
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(1),
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE,
|
||||
BIT(5), 0);
|
||||
|
||||
/* enable SPDIF receiver */
|
||||
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
|
||||
BIT(7), BIT(7));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void audio_shutdown(struct device *dev, void *data)
|
||||
{
|
||||
struct adv7511 *adv7511 = dev_get_drvdata(dev);
|
||||
|
||||
if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF)
|
||||
regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG,
|
||||
BIT(7), 0);
|
||||
}
|
||||
|
||||
static int adv7511_hdmi_i2s_get_dai_id(struct snd_soc_component *component,
|
||||
|
|
@ -215,6 +238,7 @@ static const struct hdmi_codec_pdata codec_data = {
|
|||
.ops = &adv7511_codec_ops,
|
||||
.max_i2s_channels = 2,
|
||||
.i2s = 1,
|
||||
.spdif = 1,
|
||||
};
|
||||
|
||||
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511)
|
||||
|
|
|
|||
|
|
@ -193,7 +193,7 @@ int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
|
|||
|
||||
of_property_read_u32(np, "adi,dsi-lanes", &num_lanes);
|
||||
|
||||
if (num_lanes < 1 || num_lanes > 4)
|
||||
if (num_lanes < 2 || num_lanes > 4)
|
||||
return -EINVAL;
|
||||
|
||||
adv->num_dsi_lanes = num_lanes;
|
||||
|
|
|
|||
|
|
@ -154,7 +154,7 @@ static void show_leaks(struct drm_mm *mm) { }
|
|||
|
||||
INTERVAL_TREE_DEFINE(struct drm_mm_node, rb,
|
||||
u64, __subtree_last,
|
||||
START, LAST, static inline, drm_mm_interval_tree)
|
||||
START, LAST, static inline __maybe_unused, drm_mm_interval_tree)
|
||||
|
||||
struct drm_mm_node *
|
||||
__drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last)
|
||||
|
|
|
|||
|
|
@ -166,6 +166,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* AYA NEO AYANEO 2 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYANEO"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYANEO 2"),
|
||||
},
|
||||
.driver_data = (void *)&lcd1200x1920_rightside_up,
|
||||
}, { /* AYA NEO 2021 */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "AYADEVICE"),
|
||||
|
|
|
|||
|
|
@ -481,7 +481,8 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
|
|||
} else {
|
||||
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_CACHE,
|
||||
VIVS_GL_FLUSH_CACHE_DEPTH |
|
||||
VIVS_GL_FLUSH_CACHE_COLOR);
|
||||
VIVS_GL_FLUSH_CACHE_COLOR |
|
||||
VIVS_GL_FLUSH_CACHE_SHADER_L1);
|
||||
if (has_blt) {
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_ENABLE, 0x1);
|
||||
CMD_LOAD_STATE(buffer, VIVS_BLT_SET_COMMAND, 0x1);
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ static void etnaviv_core_dump_header(struct core_dump_iterator *iter,
|
|||
hdr->file_size = cpu_to_le32(data_end - iter->data);
|
||||
|
||||
iter->hdr++;
|
||||
iter->data += hdr->file_size;
|
||||
iter->data += le32_to_cpu(hdr->file_size);
|
||||
}
|
||||
|
||||
static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
||||
|
|
@ -83,10 +83,15 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
|
|||
{
|
||||
struct etnaviv_dump_registers *reg = iter->data;
|
||||
unsigned int i;
|
||||
u32 read_addr;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(etnaviv_dump_registers); i++, reg++) {
|
||||
reg->reg = etnaviv_dump_registers[i];
|
||||
reg->value = gpu_read(gpu, etnaviv_dump_registers[i]);
|
||||
read_addr = etnaviv_dump_registers[i];
|
||||
if (read_addr >= VIVS_PM_POWER_CONTROLS &&
|
||||
read_addr <= VIVS_PM_PULSE_EATER)
|
||||
read_addr = gpu_fix_power_address(gpu, read_addr);
|
||||
reg->reg = cpu_to_le32(etnaviv_dump_registers[i]);
|
||||
reg->value = cpu_to_le32(gpu_read(gpu, read_addr));
|
||||
}
|
||||
|
||||
etnaviv_core_dump_header(iter, ETDUMP_BUF_REG, reg);
|
||||
|
|
@ -207,7 +212,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
|
|||
if (!IS_ERR(pages)) {
|
||||
int j;
|
||||
|
||||
iter.hdr->data[0] = bomap - bomap_start;
|
||||
iter.hdr->data[0] = cpu_to_le32((bomap - bomap_start));
|
||||
|
||||
for (j = 0; j < obj->base.size >> PAGE_SHIFT; j++)
|
||||
*bomap++ = cpu_to_le64(page_to_phys(*pages++));
|
||||
|
|
|
|||
|
|
@ -566,7 +566,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
|
|||
u32 pmc, ppc;
|
||||
|
||||
/* enable clock gating */
|
||||
ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
ppc = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
||||
|
||||
/* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */
|
||||
|
|
@ -574,9 +574,9 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
|
|||
gpu->identity.revision == 0x4302)
|
||||
ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING;
|
||||
|
||||
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc);
|
||||
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, ppc);
|
||||
|
||||
pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS);
|
||||
pmc = gpu_read_power(gpu, VIVS_PM_MODULE_CONTROLS);
|
||||
|
||||
/* Disable PA clock gating for GC400+ without bugfix except for GC420 */
|
||||
if (gpu->identity.model >= chipModel_GC400 &&
|
||||
|
|
@ -605,7 +605,7 @@ static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu)
|
|||
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ;
|
||||
pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ;
|
||||
|
||||
gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
|
||||
gpu_write_power(gpu, VIVS_PM_MODULE_CONTROLS, pmc);
|
||||
}
|
||||
|
||||
void etnaviv_gpu_start_fe(struct etnaviv_gpu *gpu, u32 address, u16 prefetch)
|
||||
|
|
@ -665,11 +665,11 @@ static void etnaviv_gpu_setup_pulse_eater(struct etnaviv_gpu *gpu)
|
|||
(gpu->identity.features & chipFeatures_PIPE_3D))
|
||||
{
|
||||
/* Performance fix: disable internal DFS */
|
||||
pulse_eater = gpu_read(gpu, VIVS_PM_PULSE_EATER);
|
||||
pulse_eater = gpu_read_power(gpu, VIVS_PM_PULSE_EATER);
|
||||
pulse_eater |= BIT(18);
|
||||
}
|
||||
|
||||
gpu_write(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
|
||||
gpu_write_power(gpu, VIVS_PM_PULSE_EATER, pulse_eater);
|
||||
}
|
||||
|
||||
static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
|
||||
|
|
@ -1236,10 +1236,12 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
|
|||
{
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
/* disable clock gating */
|
||||
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val &= ~VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
||||
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
|
||||
/* enable debug register */
|
||||
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
|
|
@ -1247,6 +1249,8 @@ static void sync_point_perfmon_sample_pre(struct etnaviv_gpu *gpu,
|
|||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
|
||||
|
||||
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_PRE);
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
}
|
||||
|
||||
static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
|
||||
|
|
@ -1256,23 +1260,27 @@ static void sync_point_perfmon_sample_post(struct etnaviv_gpu *gpu,
|
|||
unsigned int i;
|
||||
u32 val;
|
||||
|
||||
mutex_lock(&gpu->lock);
|
||||
|
||||
sync_point_perfmon_sample(gpu, event, ETNA_PM_PROCESS_POST);
|
||||
|
||||
for (i = 0; i < submit->nr_pmrs; i++) {
|
||||
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
|
||||
|
||||
*pmr->bo_vma = pmr->sequence;
|
||||
}
|
||||
|
||||
/* disable debug register */
|
||||
val = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
|
||||
val |= VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS;
|
||||
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, val);
|
||||
|
||||
/* enable clock gating */
|
||||
val = gpu_read(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val = gpu_read_power(gpu, VIVS_PM_POWER_CONTROLS);
|
||||
val |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING;
|
||||
gpu_write(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
gpu_write_power(gpu, VIVS_PM_POWER_CONTROLS, val);
|
||||
|
||||
mutex_unlock(&gpu->lock);
|
||||
|
||||
for (i = 0; i < submit->nr_pmrs; i++) {
|
||||
const struct etnaviv_perfmon_request *pmr = submit->pmrs + i;
|
||||
|
||||
*pmr->bo_vma = pmr->sequence;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@
|
|||
#include "etnaviv_gem.h"
|
||||
#include "etnaviv_mmu.h"
|
||||
#include "etnaviv_drv.h"
|
||||
#include "common.xml.h"
|
||||
|
||||
struct etnaviv_gem_submit;
|
||||
struct etnaviv_vram_mapping;
|
||||
|
|
@ -158,6 +159,26 @@ static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg)
|
|||
return readl(gpu->mmio + reg);
|
||||
}
|
||||
|
||||
static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg)
|
||||
{
|
||||
/* Power registers in GC300 < 2.0 are offset by 0x100 */
|
||||
if (gpu->identity.model == chipModel_GC300 &&
|
||||
gpu->identity.revision < 0x2000)
|
||||
reg += 0x100;
|
||||
|
||||
return reg;
|
||||
}
|
||||
|
||||
static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data)
|
||||
{
|
||||
writel(data, gpu->mmio + gpu_fix_power_address(gpu, reg));
|
||||
}
|
||||
|
||||
static inline u32 gpu_read_power(struct etnaviv_gpu *gpu, u32 reg)
|
||||
{
|
||||
return readl(gpu->mmio + gpu_fix_power_address(gpu, reg));
|
||||
}
|
||||
|
||||
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
|
||||
|
||||
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
|
||||
|
|
|
|||
|
|
@ -533,6 +533,6 @@ int __init i915_global_scheduler_init(void)
|
|||
return 0;
|
||||
|
||||
err_priorities:
|
||||
kmem_cache_destroy(global.slab_priorities);
|
||||
kmem_cache_destroy(global.slab_dependencies);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -411,14 +411,12 @@ static int ipu_crtc_init(struct ipu_crtc *ipu_crtc,
|
|||
}
|
||||
|
||||
ipu_crtc->irq = ipu_plane_irq(ipu_crtc->plane[0]);
|
||||
ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler, 0,
|
||||
"imx_drm", ipu_crtc);
|
||||
ret = devm_request_irq(ipu_crtc->dev, ipu_crtc->irq, ipu_irq_handler,
|
||||
IRQF_NO_AUTOEN, "imx_drm", ipu_crtc);
|
||||
if (ret < 0) {
|
||||
dev_err(ipu_crtc->dev, "irq request failed with %d.\n", ret);
|
||||
goto err_put_plane1_res;
|
||||
}
|
||||
/* Only enable IRQ when we actually need it to trigger work. */
|
||||
disable_irq(ipu_crtc->irq);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -533,6 +533,7 @@ static const struct of_device_id mcde_of_match[] = {
|
|||
},
|
||||
{},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, mcde_of_match);
|
||||
|
||||
static struct platform_driver mcde_driver = {
|
||||
.driver = {
|
||||
|
|
|
|||
|
|
@ -1152,15 +1152,13 @@ static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
|
|||
|
||||
irq = platform_get_irq_byname(pdev, name);
|
||||
|
||||
ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH, name, gmu);
|
||||
ret = request_irq(irq, handler, IRQF_TRIGGER_HIGH | IRQF_NO_AUTOEN, name, gmu);
|
||||
if (ret) {
|
||||
DRM_DEV_ERROR(&pdev->dev, "Unable to get interrupt %s %d\n",
|
||||
name, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
disable_irq(irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1244,8 +1244,6 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
|
|||
|
||||
omap_obj = to_omap_bo(obj);
|
||||
|
||||
mutex_lock(&omap_obj->lock);
|
||||
|
||||
omap_obj->sgt = sgt;
|
||||
|
||||
if (sgt->orig_nents == 1) {
|
||||
|
|
@ -1261,8 +1259,7 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
|
|||
pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
|
||||
if (!pages) {
|
||||
omap_gem_free_object(obj);
|
||||
obj = ERR_PTR(-ENOMEM);
|
||||
goto done;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
omap_obj->pages = pages;
|
||||
|
|
@ -1275,13 +1272,10 @@ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
|
|||
|
||||
if (WARN_ON(i != npages)) {
|
||||
omap_gem_free_object(obj);
|
||||
obj = ERR_PTR(-ENOMEM);
|
||||
goto done;
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
}
|
||||
|
||||
done:
|
||||
mutex_unlock(&omap_obj->lock);
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue