This is the 5.4.271 stable release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmXof14ACgkQONu9yGCS aT4K1Q//Wt8oz/83saKdDP4Uo51NWuyLHCLzBotCKDP1f5g4gTZbvZzzDsC+jCPB Tavkw3BHPLfIpvDt5WV8eKTGrkbmw/xIyLqUyDxZDD7hx8p7YJ3lMBcSfZvP/YGm 1vmiQDmTp6ZIfh5Fl9aNjYDkS/HxRVHlZMJx2ERI480Zm7FbtbCVUYK1gkHGHXYD aVW3X7UKsZVYvK2I4jI5IGg/zP0PTfH9XnFRTm5WRvOLoRYF8eFZ2NiBHACkZyoh UWYc5Kl0rdurSkPEo8qN1KPDagbxyGhCVU2v+tpSNC3eeSF3WkNCvuCD7zX4LRkJ 3FSxB7oVmgKn95/BREwkcegzDD9ZwTaXFuQVelWF4+ajdjCEMkxOuUoFajSI83IG LXHHIn4WYkcSkokgdaYGTP6CbdqSvcuLJCfS/ANwFvdsBSUylBGHbUuc+53jD754 sbNBLZnWIiERSx6mUy3/jGwMfncDEroouMZVL3YkXkDuWYryZEDvIdP7jMsZ2u6d GM6EmGbi7aWsxdaPr+B638ph58eFkvTdcLyAwp+Sn5AORK2569Gg0Rubhl+xX54X y8Wa7+93ohh59yifQll2OqDLN+Wlj6Sg1smvitjdyiK4Od7Id0plJ7Gzg1Wj97C+ vGkD33vfH++tvZdsNU6Ynw4oY2mQ7fBOqhCm5XavDFXDUMOZI2s= =XlXs -----END PGP SIGNATURE----- Merge 5.4.271 into android11-5.4-lts Changes in 5.4.271 netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter net: ip_tunnel: prevent perpetual headroom growth tun: Fix xdp_rxq_info's queue_index when detaching ipv6: fix potential "struct net" leak in inet6_rtm_getaddr() lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is detected net: usb: dm9601: fix wrong return value in dm9601_mdio_read Bluetooth: Avoid potential use-after-free in hci_error_reset Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST Bluetooth: Enforce validation on max value of connection interval netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate() rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back efi/capsule-loader: fix incorrect allocation size power: supply: bq27xxx-i2c: Do not free non existing IRQ ALSA: Drop leftover snd-rtctimer stuff from Makefile afs: Fix endless loop in directory parsing gtp: fix use-after-free and null-ptr-deref in gtp_newlink() wifi: nl80211: reject iftype change with mesh ID change btrfs: dev-replace: properly validate device names dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read dmaengine: fsl-qdma: init irq after reg initialization mmc: core: Fix eMMC initialization with 1-bit bus connection x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers cachefiles: fix memory leak in cachefiles_add_cache() fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super gpio: 74x164: Enable output pins after registers are reset Linux 5.4.271 Change-Id: I2c27fce7ee73eecbf275a5125b150b90d6aa4b1f Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
commit
40df6b551d
25 changed files with 225 additions and 142 deletions
2
Makefile
2
Makefile
|
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 270
|
||||
SUBLEVEL = 271
|
||||
EXTRAVERSION =
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
|
|
|
|||
|
|
@ -187,6 +187,90 @@ static bool bad_spectre_microcode(struct cpuinfo_x86 *c)
|
|||
return false;
|
||||
}
|
||||
|
||||
#define MSR_IA32_TME_ACTIVATE 0x982
|
||||
|
||||
/* Helpers to access TME_ACTIVATE MSR */
|
||||
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
|
||||
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
|
||||
|
||||
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
|
||||
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
|
||||
|
||||
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
|
||||
|
||||
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
|
||||
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
|
||||
|
||||
/* Values for mktme_status (SW only construct) */
|
||||
#define MKTME_ENABLED 0
|
||||
#define MKTME_DISABLED 1
|
||||
#define MKTME_UNINITIALIZED 2
|
||||
static int mktme_status = MKTME_UNINITIALIZED;
|
||||
|
||||
static void detect_tme_early(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 tme_activate, tme_policy, tme_crypto_algs;
|
||||
int keyid_bits = 0, nr_keyids = 0;
|
||||
static u64 tme_activate_cpu0 = 0;
|
||||
|
||||
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
|
||||
|
||||
if (mktme_status != MKTME_UNINITIALIZED) {
|
||||
if (tme_activate != tme_activate_cpu0) {
|
||||
/* Broken BIOS? */
|
||||
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
|
||||
pr_err_once("x86/tme: MKTME is not usable\n");
|
||||
mktme_status = MKTME_DISABLED;
|
||||
|
||||
/* Proceed. We may need to exclude bits from x86_phys_bits. */
|
||||
}
|
||||
} else {
|
||||
tme_activate_cpu0 = tme_activate;
|
||||
}
|
||||
|
||||
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
|
||||
pr_info_once("x86/tme: not enabled by BIOS\n");
|
||||
mktme_status = MKTME_DISABLED;
|
||||
return;
|
||||
}
|
||||
|
||||
if (mktme_status != MKTME_UNINITIALIZED)
|
||||
goto detect_keyid_bits;
|
||||
|
||||
pr_info("x86/tme: enabled by BIOS\n");
|
||||
|
||||
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
|
||||
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
|
||||
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
|
||||
|
||||
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
|
||||
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
|
||||
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
|
||||
tme_crypto_algs);
|
||||
mktme_status = MKTME_DISABLED;
|
||||
}
|
||||
detect_keyid_bits:
|
||||
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
|
||||
nr_keyids = (1UL << keyid_bits) - 1;
|
||||
if (nr_keyids) {
|
||||
pr_info_once("x86/mktme: enabled by BIOS\n");
|
||||
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
|
||||
} else {
|
||||
pr_info_once("x86/mktme: disabled by BIOS\n");
|
||||
}
|
||||
|
||||
if (mktme_status == MKTME_UNINITIALIZED) {
|
||||
/* MKTME is usable */
|
||||
mktme_status = MKTME_ENABLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* KeyID bits effectively lower the number of physical address
|
||||
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
|
||||
*/
|
||||
c->x86_phys_bits -= keyid_bits;
|
||||
}
|
||||
|
||||
static void early_init_intel(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 misc_enable;
|
||||
|
|
@ -339,6 +423,13 @@ static void early_init_intel(struct cpuinfo_x86 *c)
|
|||
*/
|
||||
if (detect_extended_topology_early(c) < 0)
|
||||
detect_ht_early(c);
|
||||
|
||||
/*
|
||||
* Adjust the number of physical bits early because it affects the
|
||||
* valid bits of the MTRR mask registers.
|
||||
*/
|
||||
if (cpu_has(c, X86_FEATURE_TME))
|
||||
detect_tme_early(c);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
|
|
@ -540,90 +631,6 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
|
|||
}
|
||||
}
|
||||
|
||||
#define MSR_IA32_TME_ACTIVATE 0x982
|
||||
|
||||
/* Helpers to access TME_ACTIVATE MSR */
|
||||
#define TME_ACTIVATE_LOCKED(x) (x & 0x1)
|
||||
#define TME_ACTIVATE_ENABLED(x) (x & 0x2)
|
||||
|
||||
#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
|
||||
#define TME_ACTIVATE_POLICY_AES_XTS_128 0
|
||||
|
||||
#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
|
||||
|
||||
#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
|
||||
#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1
|
||||
|
||||
/* Values for mktme_status (SW only construct) */
|
||||
#define MKTME_ENABLED 0
|
||||
#define MKTME_DISABLED 1
|
||||
#define MKTME_UNINITIALIZED 2
|
||||
static int mktme_status = MKTME_UNINITIALIZED;
|
||||
|
||||
static void detect_tme(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 tme_activate, tme_policy, tme_crypto_algs;
|
||||
int keyid_bits = 0, nr_keyids = 0;
|
||||
static u64 tme_activate_cpu0 = 0;
|
||||
|
||||
rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate);
|
||||
|
||||
if (mktme_status != MKTME_UNINITIALIZED) {
|
||||
if (tme_activate != tme_activate_cpu0) {
|
||||
/* Broken BIOS? */
|
||||
pr_err_once("x86/tme: configuration is inconsistent between CPUs\n");
|
||||
pr_err_once("x86/tme: MKTME is not usable\n");
|
||||
mktme_status = MKTME_DISABLED;
|
||||
|
||||
/* Proceed. We may need to exclude bits from x86_phys_bits. */
|
||||
}
|
||||
} else {
|
||||
tme_activate_cpu0 = tme_activate;
|
||||
}
|
||||
|
||||
if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) {
|
||||
pr_info_once("x86/tme: not enabled by BIOS\n");
|
||||
mktme_status = MKTME_DISABLED;
|
||||
return;
|
||||
}
|
||||
|
||||
if (mktme_status != MKTME_UNINITIALIZED)
|
||||
goto detect_keyid_bits;
|
||||
|
||||
pr_info("x86/tme: enabled by BIOS\n");
|
||||
|
||||
tme_policy = TME_ACTIVATE_POLICY(tme_activate);
|
||||
if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128)
|
||||
pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy);
|
||||
|
||||
tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate);
|
||||
if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) {
|
||||
pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n",
|
||||
tme_crypto_algs);
|
||||
mktme_status = MKTME_DISABLED;
|
||||
}
|
||||
detect_keyid_bits:
|
||||
keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate);
|
||||
nr_keyids = (1UL << keyid_bits) - 1;
|
||||
if (nr_keyids) {
|
||||
pr_info_once("x86/mktme: enabled by BIOS\n");
|
||||
pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids);
|
||||
} else {
|
||||
pr_info_once("x86/mktme: disabled by BIOS\n");
|
||||
}
|
||||
|
||||
if (mktme_status == MKTME_UNINITIALIZED) {
|
||||
/* MKTME is usable */
|
||||
mktme_status = MKTME_ENABLED;
|
||||
}
|
||||
|
||||
/*
|
||||
* KeyID bits effectively lower the number of physical address
|
||||
* bits. Update cpuinfo_x86::x86_phys_bits accordingly.
|
||||
*/
|
||||
c->x86_phys_bits -= keyid_bits;
|
||||
}
|
||||
|
||||
static void init_cpuid_fault(struct cpuinfo_x86 *c)
|
||||
{
|
||||
u64 msr;
|
||||
|
|
@ -758,9 +765,6 @@ static void init_intel(struct cpuinfo_x86 *c)
|
|||
if (cpu_has(c, X86_FEATURE_VMX))
|
||||
detect_vmx_virtcap(c);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_TME))
|
||||
detect_tme(c);
|
||||
|
||||
init_intel_misc_features(c);
|
||||
|
||||
if (tsx_ctrl_state == TSX_CTRL_ENABLE)
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@
|
|||
#define FSL_QDMA_CMD_WTHROTL_OFFSET 20
|
||||
#define FSL_QDMA_CMD_DSEN_OFFSET 19
|
||||
#define FSL_QDMA_CMD_LWC_OFFSET 16
|
||||
#define FSL_QDMA_CMD_PF BIT(17)
|
||||
|
||||
/* Field definition for Descriptor offset */
|
||||
#define QDMA_CCDF_STATUS 20
|
||||
|
|
@ -372,7 +373,8 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
|
|||
qdma_csgf_set_f(csgf_dest, len);
|
||||
/* Descriptor Buffer */
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET);
|
||||
FSL_QDMA_CMD_RWTTYPE_OFFSET) |
|
||||
FSL_QDMA_CMD_PF;
|
||||
sdf->data = QDMA_SDDF_CMD(cmd);
|
||||
|
||||
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
|
||||
|
|
@ -1150,10 +1152,6 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
|||
if (!fsl_qdma->queue)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
|
||||
if (fsl_qdma->irq_base < 0)
|
||||
return fsl_qdma->irq_base;
|
||||
|
|
@ -1192,19 +1190,22 @@ static int fsl_qdma_probe(struct platform_device *pdev)
|
|||
|
||||
platform_set_drvdata(pdev, fsl_qdma);
|
||||
|
||||
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev,
|
||||
"Can't register NXP Layerscape qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fsl_qdma_reg_init(fsl_qdma);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = fsl_qdma_irq_init(pdev, fsl_qdma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = dma_async_device_register(&fsl_qdma->dma_dev);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -291,7 +291,7 @@ static int efi_capsule_open(struct inode *inode, struct file *file)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cap_info->phys = kzalloc(sizeof(void *), GFP_KERNEL);
|
||||
cap_info->phys = kzalloc(sizeof(phys_addr_t), GFP_KERNEL);
|
||||
if (!cap_info->phys) {
|
||||
kfree(cap_info->pages);
|
||||
kfree(cap_info);
|
||||
|
|
|
|||
|
|
@ -128,8 +128,6 @@ static int gen_74x164_probe(struct spi_device *spi)
|
|||
if (IS_ERR(chip->gpiod_oe))
|
||||
return PTR_ERR(chip->gpiod_oe);
|
||||
|
||||
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
|
||||
|
||||
spi_set_drvdata(spi, chip);
|
||||
|
||||
chip->gpio_chip.label = spi->modalias;
|
||||
|
|
@ -154,6 +152,8 @@ static int gen_74x164_probe(struct spi_device *spi)
|
|||
goto exit_destroy;
|
||||
}
|
||||
|
||||
gpiod_set_value_cansleep(chip->gpiod_oe, 1);
|
||||
|
||||
ret = gpiochip_add_data(&chip->gpio_chip, chip);
|
||||
if (!ret)
|
||||
return 0;
|
||||
|
|
|
|||
|
|
@ -992,10 +992,12 @@ static int mmc_select_bus_width(struct mmc_card *card)
|
|||
static unsigned ext_csd_bits[] = {
|
||||
EXT_CSD_BUS_WIDTH_8,
|
||||
EXT_CSD_BUS_WIDTH_4,
|
||||
EXT_CSD_BUS_WIDTH_1,
|
||||
};
|
||||
static unsigned bus_widths[] = {
|
||||
MMC_BUS_WIDTH_8,
|
||||
MMC_BUS_WIDTH_4,
|
||||
MMC_BUS_WIDTH_1,
|
||||
};
|
||||
struct mmc_host *host = card->host;
|
||||
unsigned idx, bus_width = 0;
|
||||
|
|
|
|||
|
|
@ -1377,26 +1377,26 @@ static int __init gtp_init(void)
|
|||
|
||||
get_random_bytes(>p_h_initval, sizeof(gtp_h_initval));
|
||||
|
||||
err = rtnl_link_register(>p_link_ops);
|
||||
err = register_pernet_subsys(>p_net_ops);
|
||||
if (err < 0)
|
||||
goto error_out;
|
||||
|
||||
err = register_pernet_subsys(>p_net_ops);
|
||||
err = rtnl_link_register(>p_link_ops);
|
||||
if (err < 0)
|
||||
goto unreg_rtnl_link;
|
||||
goto unreg_pernet_subsys;
|
||||
|
||||
err = genl_register_family(>p_genl_family);
|
||||
if (err < 0)
|
||||
goto unreg_pernet_subsys;
|
||||
goto unreg_rtnl_link;
|
||||
|
||||
pr_info("GTP module loaded (pdp ctx size %zd bytes)\n",
|
||||
sizeof(struct pdp_ctx));
|
||||
return 0;
|
||||
|
||||
unreg_pernet_subsys:
|
||||
unregister_pernet_subsys(>p_net_ops);
|
||||
unreg_rtnl_link:
|
||||
rtnl_link_unregister(>p_link_ops);
|
||||
unreg_pernet_subsys:
|
||||
unregister_pernet_subsys(>p_net_ops);
|
||||
error_out:
|
||||
pr_err("error loading GTP module loaded\n");
|
||||
return err;
|
||||
|
|
|
|||
|
|
@ -715,6 +715,7 @@ static void __tun_detach(struct tun_file *tfile, bool clean)
|
|||
tun->tfiles[tun->numqueues - 1]);
|
||||
ntfile = rtnl_dereference(tun->tfiles[index]);
|
||||
ntfile->queue_index = index;
|
||||
ntfile->xdp_rxq.queue_index = index;
|
||||
rcu_assign_pointer(tun->tfiles[tun->numqueues - 1],
|
||||
NULL);
|
||||
|
||||
|
|
|
|||
|
|
@ -231,7 +231,7 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
|||
err = dm_read_shared_word(dev, 1, loc, &res);
|
||||
if (err < 0) {
|
||||
netdev_err(dev->net, "MDIO read error: %d\n", err);
|
||||
return err;
|
||||
return 0;
|
||||
}
|
||||
|
||||
netdev_dbg(dev->net,
|
||||
|
|
|
|||
|
|
@ -2535,7 +2535,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
|
|||
if (dev->chipid == ID_REV_CHIP_ID_7801_)
|
||||
buf &= ~MAC_CR_GMII_EN_;
|
||||
|
||||
if (dev->chipid == ID_REV_CHIP_ID_7800_) {
|
||||
if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
|
||||
dev->chipid == ID_REV_CHIP_ID_7850_) {
|
||||
ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
|
||||
if (!ret && sig != EEPROM_INDICATOR) {
|
||||
/* Implies there is no external eeprom. Set mac speed */
|
||||
|
|
|
|||
|
|
@ -217,7 +217,9 @@ static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
|
|||
{
|
||||
struct bq27xxx_device_info *di = i2c_get_clientdata(client);
|
||||
|
||||
free_irq(client->irq, di);
|
||||
if (client->irq)
|
||||
free_irq(client->irq, di);
|
||||
|
||||
bq27xxx_battery_teardown(di);
|
||||
|
||||
mutex_lock(&battery_mutex);
|
||||
|
|
|
|||
|
|
@ -426,8 +426,10 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
|
|||
dire->u.name[0] == '.' &&
|
||||
ctx->actor != afs_lookup_filldir &&
|
||||
ctx->actor != afs_lookup_one_filldir &&
|
||||
memcmp(dire->u.name, ".__afs", 6) == 0)
|
||||
memcmp(dire->u.name, ".__afs", 6) == 0) {
|
||||
ctx->pos = blkoff + next * sizeof(union afs_xdr_dirent);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* found the next entry */
|
||||
if (!dir_emit(ctx, dire->u.name, nlen,
|
||||
|
|
|
|||
|
|
@ -535,6 +535,23 @@ leave:
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int btrfs_check_replace_dev_names(struct btrfs_ioctl_dev_replace_args *args)
|
||||
{
|
||||
if (args->start.srcdevid == 0) {
|
||||
if (memchr(args->start.srcdev_name, 0,
|
||||
sizeof(args->start.srcdev_name)) == NULL)
|
||||
return -ENAMETOOLONG;
|
||||
} else {
|
||||
args->start.srcdev_name[0] = 0;
|
||||
}
|
||||
|
||||
if (memchr(args->start.tgtdev_name, 0,
|
||||
sizeof(args->start.tgtdev_name)) == NULL)
|
||||
return -ENAMETOOLONG;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
|
||||
struct btrfs_ioctl_dev_replace_args *args)
|
||||
{
|
||||
|
|
@ -547,10 +564,9 @@ int btrfs_dev_replace_by_ioctl(struct btrfs_fs_info *fs_info,
|
|||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((args->start.srcdevid == 0 && args->start.srcdev_name[0] == '\0') ||
|
||||
args->start.tgtdev_name[0] == '\0')
|
||||
return -EINVAL;
|
||||
ret = btrfs_check_replace_dev_names(args);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = btrfs_dev_replace_start(fs_info, args->start.tgtdev_name,
|
||||
args->start.srcdevid,
|
||||
|
|
|
|||
|
|
@ -245,6 +245,8 @@ error_open_root:
|
|||
kmem_cache_free(cachefiles_object_jar, fsdef);
|
||||
error_root_object:
|
||||
cachefiles_end_secure(cache, saved_cred);
|
||||
put_cred(cache->cache_cred);
|
||||
cache->cache_cred = NULL;
|
||||
pr_err("Failed to register: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
|
@ -265,6 +267,7 @@ void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
|
|||
|
||||
dput(cache->graveyard);
|
||||
mntput(cache->mnt);
|
||||
put_cred(cache->cache_cred);
|
||||
|
||||
kfree(cache->rootdirname);
|
||||
kfree(cache->secctx);
|
||||
|
|
|
|||
|
|
@ -1205,6 +1205,7 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
|
|||
{
|
||||
struct hugetlbfs_fs_context *ctx = fc->fs_private;
|
||||
struct fs_parse_result result;
|
||||
struct hstate *h;
|
||||
char *rest;
|
||||
unsigned long ps;
|
||||
int opt;
|
||||
|
|
@ -1249,11 +1250,12 @@ static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *par
|
|||
|
||||
case Opt_pagesize:
|
||||
ps = memparse(param->string, &rest);
|
||||
ctx->hstate = size_to_hstate(ps);
|
||||
if (!ctx->hstate) {
|
||||
h = size_to_hstate(ps);
|
||||
if (!h) {
|
||||
pr_err("Unsupported page size %lu MB\n", ps >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
ctx->hstate = h;
|
||||
return 0;
|
||||
|
||||
case Opt_min_size:
|
||||
|
|
|
|||
|
|
@ -2272,6 +2272,7 @@ static void hci_error_reset(struct work_struct *work)
|
|||
{
|
||||
struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
|
||||
|
||||
hci_dev_hold(hdev);
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (hdev->hw_error)
|
||||
|
|
@ -2279,10 +2280,10 @@ static void hci_error_reset(struct work_struct *work)
|
|||
else
|
||||
bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
|
||||
|
||||
if (hci_dev_do_close(hdev))
|
||||
return;
|
||||
if (!hci_dev_do_close(hdev))
|
||||
hci_dev_do_open(hdev);
|
||||
|
||||
hci_dev_do_open(hdev);
|
||||
hci_dev_put(hdev);
|
||||
}
|
||||
|
||||
void hci_uuids_clear(struct hci_dev *hdev)
|
||||
|
|
|
|||
|
|
@ -4469,9 +4469,12 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
|||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
|
||||
if (!conn || !hci_conn_ssp_enabled(conn))
|
||||
if (!conn || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
|
||||
goto unlock;
|
||||
|
||||
/* Assume remote supports SSP since it has triggered this event */
|
||||
set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
|
||||
|
||||
hci_conn_hold(conn);
|
||||
|
||||
if (!hci_dev_test_flag(hdev, HCI_MGMT))
|
||||
|
|
@ -5766,6 +5769,10 @@ static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
|
|||
return send_conn_param_neg_reply(hdev, handle,
|
||||
HCI_ERROR_UNKNOWN_CONN_ID);
|
||||
|
||||
if (max > hcon->le_conn_max_interval)
|
||||
return send_conn_param_neg_reply(hdev, handle,
|
||||
HCI_ERROR_INVALID_LL_PARAMS);
|
||||
|
||||
if (hci_check_conn_params(min, max, latency, timeout))
|
||||
return send_conn_param_neg_reply(hdev, handle,
|
||||
HCI_ERROR_INVALID_LL_PARAMS);
|
||||
|
|
|
|||
|
|
@ -5331,7 +5331,13 @@ static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
|
|||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
|
||||
err = hci_check_conn_params(min, max, latency, to_multiplier);
|
||||
if (max > hcon->le_conn_max_interval) {
|
||||
BT_DBG("requested connection interval exceeds current bounds.");
|
||||
err = -EINVAL;
|
||||
} else {
|
||||
err = hci_check_conn_params(min, max, latency, to_multiplier);
|
||||
}
|
||||
|
||||
if (err)
|
||||
rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
|
||||
else
|
||||
|
|
|
|||
|
|
@ -4586,10 +4586,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
struct net *net = sock_net(skb->sk);
|
||||
struct ifinfomsg *ifm;
|
||||
struct net_device *dev;
|
||||
struct nlattr *br_spec, *attr = NULL;
|
||||
struct nlattr *br_spec, *attr, *br_flags_attr = NULL;
|
||||
int rem, err = -EOPNOTSUPP;
|
||||
u16 flags = 0;
|
||||
bool have_flags = false;
|
||||
|
||||
if (nlmsg_len(nlh) < sizeof(*ifm))
|
||||
return -EINVAL;
|
||||
|
|
@ -4607,11 +4606,11 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
|
||||
if (br_spec) {
|
||||
nla_for_each_nested(attr, br_spec, rem) {
|
||||
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) {
|
||||
if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !br_flags_attr) {
|
||||
if (nla_len(attr) < sizeof(flags))
|
||||
return -EINVAL;
|
||||
|
||||
have_flags = true;
|
||||
br_flags_attr = attr;
|
||||
flags = nla_get_u16(attr);
|
||||
}
|
||||
|
||||
|
|
@ -4655,8 +4654,8 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
|
|||
}
|
||||
}
|
||||
|
||||
if (have_flags)
|
||||
memcpy(nla_data(attr), &flags, sizeof(flags));
|
||||
if (br_flags_attr)
|
||||
memcpy(nla_data(br_flags_attr), &flags, sizeof(flags));
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -547,6 +547,20 @@ static int tnl_update_pmtu(struct net_device *dev, struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void ip_tunnel_adj_headroom(struct net_device *dev, unsigned int headroom)
|
||||
{
|
||||
/* we must cap headroom to some upperlimit, else pskb_expand_head
|
||||
* will overflow header offsets in skb_headers_offset_update().
|
||||
*/
|
||||
static const unsigned int max_allowed = 512;
|
||||
|
||||
if (headroom > max_allowed)
|
||||
headroom = max_allowed;
|
||||
|
||||
if (headroom > READ_ONCE(dev->needed_headroom))
|
||||
WRITE_ONCE(dev->needed_headroom, headroom);
|
||||
}
|
||||
|
||||
void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
||||
u8 proto, int tunnel_hlen)
|
||||
{
|
||||
|
|
@ -620,13 +634,13 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
}
|
||||
|
||||
headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len;
|
||||
if (headroom > READ_ONCE(dev->needed_headroom))
|
||||
WRITE_ONCE(dev->needed_headroom, headroom);
|
||||
|
||||
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
|
||||
if (skb_cow_head(skb, headroom)) {
|
||||
ip_rt_put(rt);
|
||||
goto tx_dropped;
|
||||
}
|
||||
|
||||
ip_tunnel_adj_headroom(dev, headroom);
|
||||
|
||||
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, proto, tos, ttl,
|
||||
df, !net_eq(tunnel->net, dev_net(dev)));
|
||||
return;
|
||||
|
|
@ -804,16 +818,16 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
|
|||
|
||||
max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr)
|
||||
+ rt->dst.header_len + ip_encap_hlen(&tunnel->encap);
|
||||
if (max_headroom > READ_ONCE(dev->needed_headroom))
|
||||
WRITE_ONCE(dev->needed_headroom, max_headroom);
|
||||
|
||||
if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) {
|
||||
if (skb_cow_head(skb, max_headroom)) {
|
||||
ip_rt_put(rt);
|
||||
dev->stats.tx_dropped++;
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
ip_tunnel_adj_headroom(dev, max_headroom);
|
||||
|
||||
iptunnel_xmit(NULL, rt, skb, fl4.saddr, fl4.daddr, protocol, tos, ttl,
|
||||
df, !net_eq(tunnel->net, dev_net(dev)));
|
||||
return;
|
||||
|
|
|
|||
|
|
@ -5402,9 +5402,10 @@ static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
|
|||
}
|
||||
|
||||
addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
|
||||
if (!addr)
|
||||
return -EINVAL;
|
||||
|
||||
if (!addr) {
|
||||
err = -EINVAL;
|
||||
goto errout;
|
||||
}
|
||||
ifm = nlmsg_data(nlh);
|
||||
if (ifm->ifa_index)
|
||||
dev = dev_get_by_index(tgt_net, ifm->ifa_index);
|
||||
|
|
|
|||
|
|
@ -336,10 +336,20 @@ static int nft_target_validate(const struct nft_ctx *ctx,
|
|||
|
||||
if (ctx->family != NFPROTO_IPV4 &&
|
||||
ctx->family != NFPROTO_IPV6 &&
|
||||
ctx->family != NFPROTO_INET &&
|
||||
ctx->family != NFPROTO_BRIDGE &&
|
||||
ctx->family != NFPROTO_ARP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = nft_chain_validate_hooks(ctx->chain,
|
||||
(1 << NF_INET_PRE_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_IN) |
|
||||
(1 << NF_INET_FORWARD) |
|
||||
(1 << NF_INET_LOCAL_OUT) |
|
||||
(1 << NF_INET_POST_ROUTING));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nft_is_base_chain(ctx->chain)) {
|
||||
const struct nft_base_chain *basechain =
|
||||
nft_base_chain(ctx->chain);
|
||||
|
|
@ -584,10 +594,20 @@ static int nft_match_validate(const struct nft_ctx *ctx,
|
|||
|
||||
if (ctx->family != NFPROTO_IPV4 &&
|
||||
ctx->family != NFPROTO_IPV6 &&
|
||||
ctx->family != NFPROTO_INET &&
|
||||
ctx->family != NFPROTO_BRIDGE &&
|
||||
ctx->family != NFPROTO_ARP)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
ret = nft_chain_validate_hooks(ctx->chain,
|
||||
(1 << NF_INET_PRE_ROUTING) |
|
||||
(1 << NF_INET_LOCAL_IN) |
|
||||
(1 << NF_INET_FORWARD) |
|
||||
(1 << NF_INET_LOCAL_OUT) |
|
||||
(1 << NF_INET_POST_ROUTING));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nft_is_base_chain(ctx->chain)) {
|
||||
const struct nft_base_chain *basechain =
|
||||
nft_base_chain(ctx->chain);
|
||||
|
|
|
|||
|
|
@ -156,7 +156,7 @@ static inline u32 netlink_group_mask(u32 group)
|
|||
static struct sk_buff *netlink_to_full_skb(const struct sk_buff *skb,
|
||||
gfp_t gfp_mask)
|
||||
{
|
||||
unsigned int len = skb_end_offset(skb);
|
||||
unsigned int len = skb->len;
|
||||
struct sk_buff *new;
|
||||
|
||||
new = alloc_skb(len, gfp_mask);
|
||||
|
|
|
|||
|
|
@ -3652,6 +3652,8 @@ static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info)
|
|||
|
||||
if (ntype != NL80211_IFTYPE_MESH_POINT)
|
||||
return -EINVAL;
|
||||
if (otype != NL80211_IFTYPE_MESH_POINT)
|
||||
return -EINVAL;
|
||||
if (netif_running(dev))
|
||||
return -EBUSY;
|
||||
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@ snd-pcm-dmaengine-objs := pcm_dmaengine.o
|
|||
snd-rawmidi-objs := rawmidi.o
|
||||
snd-timer-objs := timer.o
|
||||
snd-hrtimer-objs := hrtimer.o
|
||||
snd-rtctimer-objs := rtctimer.o
|
||||
snd-hwdep-objs := hwdep.o
|
||||
snd-seq-device-objs := seq_device.o
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue