This is the 5.4.288 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdkUkYACgkQONu9yGCS
 aT5RDg//WKMt8BAC/HtVkHOaO2vJt7ADIawvJkSZD+ljKfPvydJsMydpfqcO1l7y
 aVahXIkDVsqyxNkkvZr9Sn19W6SQHD1yPsm76gbatSmHEydCvgOncrQgDYQOXDx5
 pnMRwyPYd9P/9oXPfsGJK4dPL7dklL5vHYr+47kG47PJENeXCB2Z6LVEjSt9Sm6f
 3dcoyUb0RsNSW7yoWX099UOH7B8/rYaTCXxAlrch/7vP5Uhfw5A2L2L/Tu/0zXNU
 1G4/aHVDjabjQBkhV2Obh0OQcs5MSMMfor6cIABaaiotsDqPPVLzW+ciEC11FpvM
 aeRElnkxN9b27MrGTL62mgsJN0/yr1nyMZt9v0/B/bcbkPHKDse6RTz3jCXo2d52
 BpGfj/7yw3c7rzFrFC3uxKUgnsJZFmyyadPGZJetBLwICJl/aNhrPt5ZYkTx+ety
 90oMe510dbc/AHaSZ6cWkZy+0+ejBGVbbfja/L/Vji4mCJtmab2mTQkVuiuOBYeh
 pBotQzaaUQ0+QihMe3n+8PN5H/BKNBx3wvmzwrbVZ6pGA2Sz+wevuEBo6bKNmmpN
 iVmJZy+Y/W9ffllCUz4zGsu+hK2CxVQIHa7gw+NzmlqGovIGt70zvDonyWAHuy5A
 m9LFrDJkct0jc3X/6xN9gfuDPMrOIEmGXhjcGmmWaqrLh4MamtI=
 =2mmT
 -----END PGP SIGNATURE-----

Merge 5.4.288 into android11-5.4-lts

Changes in 5.4.288
	usb: host: max3421-hcd: Correctly abort a USB request.
	ata: sata_highbank: fix OF node reference leak in highbank_initialize_phys()
	usb: dwc2: hcd: Fix GetPortStatus & SetPortFeature
	usb: ehci-hcd: fix call balance of clocks handling routines
	usb: gadget: u_serial: Fix the issue that gs_start_io crashed due to accessing null pointer
	xfs: don't drop errno values when we fail to ficlone the entire range
	bpf, sockmap: Fix update element with same
	batman-adv: Do not send uninitialized TT changes
	batman-adv: Remove uninitialized data in full table TT response
	batman-adv: Do not let TT changes list grows indefinitely
	tipc: fix NULL deref in cleanup_bearer()
	net: lapb: increase LAPB_HEADER_LEN
	ACPI: resource: Fix memory resource type union access
	qca_spi: Fix clock speed for multiple QCA7000
	qca_spi: Make driver probing reliable
	net/sched: netem: account for backlog updates from child qdisc
	ACPICA: events/evxfregn: don't release the ContextMutex that was never acquired
	blk-iocost: clamp inuse and skip noops in __propagate_weights()
	blk-iocost: fix weight updates of inner active iocgs
	blk-iocost: Avoid using clamp() on inuse in __propagate_weights()
	KVM: arm64: Ignore PMCNTENSET_EL0 while checking for overflow status
	tracing/kprobes: Skip symbol counting logic for module symbols in create_local_trace_kprobe()
	xen/netfront: fix crash when removing device
	ALSA: usb-audio: Fix a DMA to stack memory bug
	Linux 5.4.288

Change-Id: Ie329f210978bae25fa2703d4106a3880bb9ba53c
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
Greg Kroah-Hartman 2024-12-27 16:16:36 +00:00
commit 4d8aad9b5e
21 changed files with 166 additions and 83 deletions

View file

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 5
PATCHLEVEL = 4
SUBLEVEL = 287
SUBLEVEL = 288
EXTRAVERSION =
NAME = Kleptomaniac Octopus

View file

@ -907,7 +907,27 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
lockdep_assert_held(&ioc->lock);
inuse = min(active, inuse);
/*
* For an active leaf node, its inuse shouldn't be zero or exceed
* @active. An active internal node's inuse is solely determined by the
* inuse to active ratio of its children regardless of @inuse.
*/
if (list_empty(&iocg->active_list) && iocg->child_active_sum) {
inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum,
iocg->child_active_sum);
} else {
/*
* It may be tempting to turn this into a clamp expression with
* a lower limit of 1 but active may be 0, which cannot be used
* as an upper limit in that situation. This expression allows
* active to clamp inuse unless it is 0, in which case inuse
* becomes 1.
*/
inuse = min(inuse, active) ?: 1;
}
if (active == iocg->active && inuse == iocg->inuse)
return;
for (lvl = iocg->level - 1; lvl >= 0; lvl--) {
struct ioc_gq *parent = iocg->ancestors[lvl];
@ -917,7 +937,7 @@ static void __propagate_active_weight(struct ioc_gq *iocg, u32 active, u32 inuse
/* update the level sums */
parent->child_active_sum += (s32)(active - child->active);
parent->child_inuse_sum += (s32)(inuse - child->inuse);
/* apply the udpates */
/* apply the updates */
child->active = active;
child->inuse = inuse;

View file

@ -201,8 +201,6 @@ acpi_remove_address_space_handler(acpi_handle device,
/* Now we can delete the handler object */
acpi_os_release_mutex(handler_obj->address_space.
context_mutex);
acpi_ut_remove_reference(handler_obj);
goto unlock_and_exit;
}

View file

@ -250,6 +250,9 @@ static bool acpi_decode_space(struct resource_win *win,
switch (addr->resource_type) {
case ACPI_MEMORY_RANGE:
acpi_dev_memresource_flags(res, len, wp);
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
res->flags |= IORESOURCE_PREFETCH;
break;
case ACPI_IO_RANGE:
acpi_dev_ioresource_flags(res, len, iodec,
@ -265,9 +268,6 @@ static bool acpi_decode_space(struct resource_win *win,
if (addr->producer_consumer == ACPI_PRODUCER)
res->flags |= IORESOURCE_WINDOW;
if (addr->info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
res->flags |= IORESOURCE_PREFETCH;
return !(res->flags & IORESOURCE_DISABLED);
}

View file

@ -348,6 +348,7 @@ static int highbank_initialize_phys(struct device *dev, void __iomem *addr)
phy_nodes[phy] = phy_data.np;
cphy_base[phy] = of_iomap(phy_nodes[phy], 0);
if (cphy_base[phy] == NULL) {
of_node_put(phy_data.np);
return 0;
}
phy_count += 1;

View file

@ -65,7 +65,7 @@ MODULE_PARM_DESC(qcaspi_burst_len, "Number of data bytes per burst. Use 1-5000."
#define QCASPI_PLUGGABLE_MIN 0
#define QCASPI_PLUGGABLE_MAX 1
static int qcaspi_pluggable = QCASPI_PLUGGABLE_MIN;
static int qcaspi_pluggable = QCASPI_PLUGGABLE_MAX;
module_param(qcaspi_pluggable, int, 0);
MODULE_PARM_DESC(qcaspi_pluggable, "Pluggable SPI connection (yes/no).");
@ -823,7 +823,6 @@ qcaspi_netdev_init(struct net_device *dev)
dev->mtu = QCAFRM_MAX_MTU;
dev->type = ARPHRD_ETHER;
qca->clkspeed = qcaspi_clkspeed;
qca->burst_len = qcaspi_burst_len;
qca->spi_thread = NULL;
qca->buffer_size = (dev->mtu + VLAN_ETH_HLEN + QCAFRM_HEADER_LEN +
@ -912,17 +911,15 @@ qca_spi_probe(struct spi_device *spi)
legacy_mode = of_property_read_bool(spi->dev.of_node,
"qca,legacy-mode");
if (qcaspi_clkspeed == 0) {
if (spi->max_speed_hz)
qcaspi_clkspeed = spi->max_speed_hz;
else
qcaspi_clkspeed = QCASPI_CLK_SPEED;
}
if (qcaspi_clkspeed)
spi->max_speed_hz = qcaspi_clkspeed;
else if (!spi->max_speed_hz)
spi->max_speed_hz = QCASPI_CLK_SPEED;
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
dev_err(&spi->dev, "Invalid clkspeed: %d\n",
qcaspi_clkspeed);
if (spi->max_speed_hz < QCASPI_CLK_SPEED_MIN ||
spi->max_speed_hz > QCASPI_CLK_SPEED_MAX) {
dev_err(&spi->dev, "Invalid clkspeed: %u\n",
spi->max_speed_hz);
return -EINVAL;
}
@ -947,14 +944,13 @@ qca_spi_probe(struct spi_device *spi)
return -EINVAL;
}
dev_info(&spi->dev, "ver=%s, clkspeed=%d, burst_len=%d, pluggable=%d\n",
dev_info(&spi->dev, "ver=%s, clkspeed=%u, burst_len=%d, pluggable=%d\n",
QCASPI_DRV_VERSION,
qcaspi_clkspeed,
spi->max_speed_hz,
qcaspi_burst_len,
qcaspi_pluggable);
spi->mode = SPI_MODE_3;
spi->max_speed_hz = qcaspi_clkspeed;
if (spi_setup(spi) < 0) {
dev_err(&spi->dev, "Unable to setup SPI device\n");
return -EFAULT;

View file

@ -101,7 +101,6 @@ struct qcaspi {
#endif
/* user configurable options */
u32 clkspeed;
u8 legacy_mode;
u16 burst_len;
};

View file

@ -787,7 +787,7 @@ static netdev_tx_t xennet_start_xmit(struct sk_buff *skb, struct net_device *dev
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
unsigned int num_queues = dev->real_num_tx_queues;
unsigned int num_queues = np->queues ? dev->real_num_tx_queues : 0;
unsigned int i;
struct netfront_queue *queue;
netif_tx_stop_all_queues(np->netdev);
@ -802,6 +802,9 @@ static void xennet_destroy_queues(struct netfront_info *info)
{
unsigned int i;
if (!info->queues)
return;
for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
struct netfront_queue *queue = &info->queues[i];

View file

@ -3544,11 +3544,9 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
port_status |= USB_PORT_STAT_C_OVERCURRENT << 16;
}
if (!hsotg->flags.b.port_connect_status) {
if (dwc2_is_device_mode(hsotg)) {
/*
* The port is disconnected, which means the core is
* either in device mode or it soon will be. Just
* return 0's for the remainder of the port status
* Just return 0's for the remainder of the port status
* since the port register can't be read if the core
* is in device mode.
*/
@ -3618,13 +3616,11 @@ static int dwc2_hcd_hub_control(struct dwc2_hsotg *hsotg, u16 typereq,
if (wvalue != USB_PORT_FEAT_TEST && (!windex || windex > 1))
goto error;
if (!hsotg->flags.b.port_connect_status) {
if (dwc2_is_device_mode(hsotg)) {
/*
* The port is disconnected, which means the core is
* either in device mode or it soon will be. Just
* return without doing anything since the port
* register can't be written if the core is in device
* mode.
* Just return 0's for the remainder of the port status
* since the port register can't be read if the core
* is in device mode.
*/
break;
}

View file

@ -566,9 +566,12 @@ static int gs_start_io(struct gs_port *port)
* we didn't in gs_start_tx() */
tty_wakeup(port->port.tty);
} else {
gs_free_requests(ep, head, &port->read_allocated);
gs_free_requests(port->port_usb->in, &port->write_pool,
&port->write_allocated);
/* Free reqs only if we are still connected */
if (port->port_usb) {
gs_free_requests(ep, head, &port->read_allocated);
gs_free_requests(port->port_usb->in, &port->write_pool,
&port->write_allocated);
}
status = -EIO;
}

View file

@ -124,8 +124,12 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
if (IS_ERR(priv->iclk))
priv->iclk = NULL;
clk_enable(priv->fclk);
clk_enable(priv->iclk);
ret = clk_enable(priv->fclk);
if (ret)
goto fail_request_resource;
ret = clk_enable(priv->iclk);
if (ret)
goto fail_iclk;
if (pdata && pdata->phy_init)
pdata->phy_init();
@ -144,6 +148,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
fail_add_hcd:
clk_disable(priv->iclk);
fail_iclk:
clk_disable(priv->fclk);
fail_request_resource:

View file

@ -785,11 +785,17 @@ max3421_check_unlink(struct usb_hcd *hcd)
retval = 1;
dev_dbg(&spi->dev, "%s: URB %p unlinked=%d",
__func__, urb, urb->unlinked);
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&max3421_hcd->lock,
flags);
usb_hcd_giveback_urb(hcd, urb, 0);
spin_lock_irqsave(&max3421_hcd->lock, flags);
if (urb == max3421_hcd->curr_urb) {
max3421_hcd->urb_done = 1;
max3421_hcd->hien &= ~(BIT(MAX3421_HI_HXFRDN_BIT) |
BIT(MAX3421_HI_RCVDAV_BIT));
} else {
usb_hcd_unlink_urb_from_ep(hcd, urb);
spin_unlock_irqrestore(&max3421_hcd->lock,
flags);
usb_hcd_giveback_urb(hcd, urb, 0);
spin_lock_irqsave(&max3421_hcd->lock, flags);
}
}
}
}

View file

@ -1068,6 +1068,14 @@ out_unlock:
xfs_reflink_remap_unlock(file_in, file_out);
if (ret)
trace_xfs_reflink_remap_range_error(dest, ret, _RET_IP_);
/*
* If the caller did not set CAN_SHORTEN, then it is not prepared to
* handle partial results -- either the whole remap succeeds, or we
* must say why it did not. In this case, any error should be returned
* to the caller.
*/
if (ret && remapped < len && !(remap_flags & REMAP_FILE_CAN_SHORTEN))
return ret;
return remapped > 0 ? remapped : ret;
}

View file

@ -4,7 +4,7 @@
#include <linux/lapb.h>
#include <linux/refcount.h>
#define LAPB_HEADER_LEN 20 /* LAPB over Ethernet + a bit more */
#define LAPB_HEADER_LEN MAX_HEADER /* LAPB over Ethernet + a bit more */
#define LAPB_ACK_PENDING_CONDITION 0x01
#define LAPB_REJECT_CONDITION 0x02

View file

@ -1663,7 +1663,7 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
int ret;
char *event;
if (func) {
if (func && !strchr(func, ':')) {
unsigned int count;
count = number_of_same_symbols(func);

View file

@ -1000,16 +1000,25 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
int tt_diff_len, tt_change_len = 0;
int tt_diff_entries_num = 0;
int tt_diff_entries_count = 0;
bool drop_changes = false;
size_t tt_extra_len = 0;
u16 tvlv_len;
tt_diff_entries_num = atomic_read(&bat_priv->tt.local_changes);
tt_diff_len = batadv_tt_len(tt_diff_entries_num);
/* if we have too many changes for one packet don't send any
* and wait for the tt table request which will be fragmented
* and wait for the tt table request so we can reply with the full
* (fragmented) table.
*
* The local change history should still be cleaned up so the next
* TT round can start again with a clean state.
*/
if (tt_diff_len > bat_priv->soft_iface->mtu)
if (tt_diff_len > bat_priv->soft_iface->mtu) {
tt_diff_len = 0;
tt_diff_entries_num = 0;
drop_changes = true;
}
tvlv_len = batadv_tt_prepare_tvlv_local_data(bat_priv, &tt_data,
&tt_change, &tt_diff_len);
@ -1018,7 +1027,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_data->flags = BATADV_TT_OGM_DIFF;
if (tt_diff_len == 0)
if (!drop_changes && tt_diff_len == 0)
goto container_register;
spin_lock_bh(&bat_priv->tt.changes_list_lock);
@ -1037,6 +1046,9 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
}
spin_unlock_bh(&bat_priv->tt.changes_list_lock);
tt_extra_len = batadv_tt_len(tt_diff_entries_num -
tt_diff_entries_count);
/* Keep the buffer for possible tt_request */
spin_lock_bh(&bat_priv->tt.last_changeset_lock);
kfree(bat_priv->tt.last_changeset);
@ -1045,6 +1057,7 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
tt_change_len = batadv_tt_len(tt_diff_entries_count);
/* check whether this new OGM has no changes due to size problems */
if (tt_diff_entries_count > 0) {
tt_diff_len -= tt_extra_len;
/* if kmalloc() fails we will reply with the full table
* instead of providing the diff
*/
@ -1057,6 +1070,8 @@ static void batadv_tt_tvlv_container_update(struct batadv_priv *bat_priv)
}
spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
/* Remove extra packet space for OGM */
tvlv_len -= tt_extra_len;
container_register:
batadv_tvlv_container_register(bat_priv, BATADV_TVLV_TT, 1, tt_data,
tvlv_len);
@ -2979,14 +2994,16 @@ static bool batadv_tt_global_valid(const void *entry_ptr,
*
* Fills the tvlv buff with the tt entries from the specified hash. If valid_cb
* is not provided then this becomes a no-op.
*
* Return: Remaining unused length in tvlv_buff.
*/
static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
void *tvlv_buff, u16 tt_len,
bool (*valid_cb)(const void *,
const void *,
u8 *flags),
void *cb_data)
static u16 batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
struct batadv_hashtable *hash,
void *tvlv_buff, u16 tt_len,
bool (*valid_cb)(const void *,
const void *,
u8 *flags),
void *cb_data)
{
struct batadv_tt_common_entry *tt_common_entry;
struct batadv_tvlv_tt_change *tt_change;
@ -3000,7 +3017,7 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
tt_change = (struct batadv_tvlv_tt_change *)tvlv_buff;
if (!valid_cb)
return;
return tt_len;
rcu_read_lock();
for (i = 0; i < hash->size; i++) {
@ -3026,6 +3043,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
}
}
rcu_read_unlock();
return batadv_tt_len(tt_tot - tt_num_entries);
}
/**
@ -3303,10 +3322,11 @@ static bool batadv_send_other_tt_response(struct batadv_priv *bat_priv,
goto out;
/* fill the rest of the tvlv with the real TT entries */
batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.global_hash,
tt_change, tt_len,
batadv_tt_global_valid,
req_dst_orig_node);
tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
bat_priv->tt.global_hash,
tt_change, tt_len,
batadv_tt_global_valid,
req_dst_orig_node);
}
/* Don't send the response, if larger than fragmented packet. */
@ -3432,9 +3452,11 @@ static bool batadv_send_my_tt_response(struct batadv_priv *bat_priv,
goto out;
/* fill the rest of the tvlv with the real TT entries */
batadv_tt_tvlv_generate(bat_priv, bat_priv->tt.local_hash,
tt_change, tt_len,
batadv_tt_local_valid, NULL);
tvlv_len -= batadv_tt_tvlv_generate(bat_priv,
bat_priv->tt.local_hash,
tt_change, tt_len,
batadv_tt_local_valid,
NULL);
}
tvlv_tt_data->flags = BATADV_TT_RESPONSE;

View file

@ -152,6 +152,7 @@ static void sock_map_del_link(struct sock *sk,
strp_stop = true;
list_del(&link->list);
sk_psock_free_link(link);
break;
}
}
spin_unlock_bh(&psock->link_lock);

View file

@ -77,6 +77,8 @@ struct netem_sched_data {
struct sk_buff *t_head;
struct sk_buff *t_tail;
u32 t_len;
/* optional qdisc for classful handling (NULL at netem init) */
struct Qdisc *qdisc;
@ -373,6 +375,7 @@ static void tfifo_reset(struct Qdisc *sch)
rtnl_kfree_skbs(q->t_head, q->t_tail);
q->t_head = NULL;
q->t_tail = NULL;
q->t_len = 0;
}
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
@ -402,6 +405,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
rb_link_node(&nskb->rbnode, parent, p);
rb_insert_color(&nskb->rbnode, &q->t_root);
}
q->t_len++;
sch->q.qlen++;
}
@ -508,7 +512,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<(prandom_u32() % 8);
}
if (unlikely(sch->q.qlen >= sch->limit)) {
if (unlikely(q->t_len >= sch->limit)) {
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
@ -692,8 +696,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
tfifo_dequeue:
skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
qdisc_qstats_backlog_dec(sch, skb);
deliver:
qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
}
@ -709,8 +713,7 @@ deliver:
if (time_to_send <= now && q->slot.slot_next <= now) {
netem_erase_head(q, skb);
sch->q.qlen--;
qdisc_qstats_backlog_dec(sch, skb);
q->t_len--;
skb->next = NULL;
skb->prev = NULL;
/* skb->dev shares skb->rbnode area,
@ -737,16 +740,21 @@ deliver:
if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch);
qdisc_tree_reduce_backlog(sch, 1, pkt_len);
sch->qstats.backlog -= pkt_len;
sch->q.qlen--;
}
goto tfifo_dequeue;
}
sch->q.qlen--;
goto deliver;
}
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
if (skb)
if (skb) {
sch->q.qlen--;
goto deliver;
}
}
qdisc_watchdog_schedule_ns(&q->watchdog,
@ -756,8 +764,10 @@ deliver:
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
if (skb)
if (skb) {
sch->q.qlen--;
goto deliver;
}
}
return NULL;
}

View file

@ -798,6 +798,7 @@ static void cleanup_bearer(struct work_struct *work)
{
struct udp_bearer *ub = container_of(work, struct udp_bearer, work);
struct udp_replicast *rcast, *tmp;
struct tipc_net *tn;
list_for_each_entry_safe(rcast, tmp, &ub->rcast.list, list) {
dst_cache_destroy(&rcast->dst_cache);
@ -805,10 +806,14 @@ static void cleanup_bearer(struct work_struct *work)
kfree_rcu(rcast, rcu);
}
tn = tipc_net(sock_net(ub->ubsock->sk));
dst_cache_destroy(&ub->rcast.dst_cache);
udp_tunnel_sock_release(ub->ubsock);
/* Note: could use a call_rcu() to avoid another synchronize_net() */
synchronize_net();
atomic_dec(&tipc_net(sock_net(ub->ubsock->sk))->wq_count);
atomic_dec(&tn->wq_count);
kfree(ub);
}

View file

@ -585,7 +585,7 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip,
static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interface *intf)
{
struct usb_host_config *config = dev->actconfig;
struct usb_device_descriptor new_device_descriptor;
struct usb_device_descriptor *new_device_descriptor = NULL;
int err;
if (le16_to_cpu(get_cfg_desc(config)->wTotalLength) == EXTIGY_FIRMWARE_SIZE_OLD ||
@ -596,15 +596,20 @@ static int snd_usb_extigy_boot_quirk(struct usb_device *dev, struct usb_interfac
0x10, 0x43, 0x0001, 0x000a, NULL, 0);
if (err < 0)
dev_dbg(&dev->dev, "error sending boot message: %d\n", err);
new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
if (!new_device_descriptor)
return -ENOMEM;
err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
&new_device_descriptor, sizeof(new_device_descriptor));
new_device_descriptor, sizeof(*new_device_descriptor));
if (err < 0)
dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
new_device_descriptor.bNumConfigurations);
new_device_descriptor->bNumConfigurations);
else
memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
kfree(new_device_descriptor);
err = usb_reset_configuration(dev);
if (err < 0)
dev_dbg(&dev->dev, "error usb_reset_configuration: %d\n", err);
@ -938,7 +943,7 @@ static void mbox2_setup_48_24_magic(struct usb_device *dev)
static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
{
struct usb_host_config *config = dev->actconfig;
struct usb_device_descriptor new_device_descriptor;
struct usb_device_descriptor *new_device_descriptor = NULL;
int err;
u8 bootresponse[0x12];
int fwsize;
@ -973,15 +978,21 @@ static int snd_usb_mbox2_boot_quirk(struct usb_device *dev)
dev_dbg(&dev->dev, "device initialised!\n");
new_device_descriptor = kmalloc(sizeof(*new_device_descriptor), GFP_KERNEL);
if (!new_device_descriptor)
return -ENOMEM;
err = usb_get_descriptor(dev, USB_DT_DEVICE, 0,
&new_device_descriptor, sizeof(new_device_descriptor));
new_device_descriptor, sizeof(*new_device_descriptor));
if (err < 0)
dev_dbg(&dev->dev, "error usb_get_descriptor: %d\n", err);
if (new_device_descriptor.bNumConfigurations > dev->descriptor.bNumConfigurations)
if (new_device_descriptor->bNumConfigurations > dev->descriptor.bNumConfigurations)
dev_dbg(&dev->dev, "error too large bNumConfigurations: %d\n",
new_device_descriptor.bNumConfigurations);
new_device_descriptor->bNumConfigurations);
else
memcpy(&dev->descriptor, &new_device_descriptor, sizeof(dev->descriptor));
memcpy(&dev->descriptor, new_device_descriptor, sizeof(dev->descriptor));
kfree(new_device_descriptor);
err = usb_reset_configuration(dev);
if (err < 0)

View file

@ -357,7 +357,6 @@ static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) {
reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
reg &= kvm_pmu_valid_counter_mask(vcpu);
}