android_kernel_xiaomi_sm8350/mm/migrate.c
Michael Bestas 73abf253d5
Merge tag 'ASB-2024-06-05_11-5.4' of https://android.googlesource.com/kernel/common into android13-5.4-lahaina
https://source.android.com/docs/security/bulletin/2024-06-01
CVE-2024-26926

* tag 'ASB-2024-06-05_11-5.4' of https://android.googlesource.com/kernel/common:
  ANDROID: ABI fixup for abi break in struct dst_ops
  BACKPORT: net: fix __dst_negative_advice() race
  UPSTREAM: selftests: timers: Fix valid-adjtimex signed left-shift undefined behavior
  Revert "timers: Rename del_timer_sync() to timer_delete_sync()"
  Reapply "media: ttpci: fix two memleaks in budget_av_attach"
  Revert "media: rename VFL_TYPE_GRABBER to _VIDEO"
  Revert "media: media/pci: rename VFL_TYPE_GRABBER to _VIDEO"
  Revert "media: ttpci: fix two memleaks in budget_av_attach"
  Revert "net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv()"
  Revert "regmap: allow to define reg_update_bits for no bus configuration"
  Revert "regmap: Add bulk read/write callbacks into regmap_config"
  Revert "serial: max310x: fix IO data corruption in batched operations"
  Revert "geneve: make sure to pull inner header in geneve_rx()"
  Linux 5.4.274
  firmware: meson_sm: fix to avoid potential NULL pointer dereference
  ip_gre: do not report erspan version on GRE interface
  erspan: Check IFLA_GRE_ERSPAN_VER is set.
  VMCI: Fix possible memcpy() run-time warning in vmci_datagram_invoke_guest_handler()
  Bluetooth: btintel: Fixe build regression
  x86/alternative: Don't call text_poke() in lazy TLB mode
  drm/i915/gt: Reset queue_priority_hint on parking
  x86/mm/pat: fix VM_PAT handling in COW mappings
  virtio: reenable config if freezing device failed
  drm/vkms: call drm_atomic_helper_shutdown before drm_dev_put()
  tty: n_gsm: require CAP_NET_ADMIN to attach N_GSM0710 ldisc
  netfilter: nf_tables: discard table flag update with pending basechain deletion
  netfilter: nf_tables: release mutex after nft_gc_seq_end from abort path
  netfilter: nf_tables: release batch on table validation from abort path
  netfilter: nf_tables: reject new basechain after table flag update
  fbmon: prevent division by zero in fb_videomode_from_videomode()
  fbdev: viafb: fix typo in hw_bitblt_1 and hw_bitblt_2
  usb: sl811-hcd: only defined function checkdone if QUIRK2 is defined
  usb: typec: tcpci: add generic tcpci fallback compatible
  tools: iio: replace seekdir() in iio_generic_buffer
  ktest: force $buildonly = 1 for 'make_warnings_file' test type
  Input: allocate keycode for Display refresh rate toggle
  block: prevent division by zero in blk_rq_stat_sum()
  Revert "ACPI: PM: Block ASUS B1400CEAE from suspend to idle by default"
  SUNRPC: increase size of rpc_wait_queue.qlen from unsigned short to unsigned int
  drm/amd/display: Fix nanosec stat overflow
  media: sta2x11: fix irq handler cast
  isofs: handle CDs with bad root inode but good Joliet root directory
  scsi: lpfc: Fix possible memory leak in lpfc_rcv_padisc()
  sysv: don't call sb_bread() with pointers_lock held
  Input: synaptics-rmi4 - fail probing if memory allocation for "phys" fails
  Bluetooth: btintel: Fix null ptr deref in btintel_read_version
  btrfs: send: handle path ref underflow in header iterate_inode_ref()
  btrfs: export: handle invalid inode or root reference in btrfs_get_parent()
  btrfs: handle chunk tree lookup error in btrfs_relocate_sys_chunks()
  tools/power x86_energy_perf_policy: Fix file leak in get_pkg_num()
  ionic: set adminq irq affinity
  arm64: dts: rockchip: fix rk3399 hdmi ports node
  arm64: dts: rockchip: fix rk3328 hdmi ports node
  panic: Flush kernel log buffer at the end
  VMCI: Fix memcpy() run-time warning in dg_dispatch_as_host()
  wifi: ath9k: fix LNA selection in ath_ant_try_scan()
  s390/entry: align system call table on 8 bytes
  x86/mce: Make sure to grab mce_sysfs_mutex in set_bank()
  ALSA: hda/realtek: Update Panasonic CF-SZ6 quirk to support headset with microphone
  ata: sata_mv: Fix PCI device ID table declaration compilation warning
  scsi: mylex: Fix sysfs buffer lengths
  ata: sata_sx4: fix pdc20621_get_from_dimm() on 64-bit
  ASoC: ops: Fix wraparound for mask in snd_soc_get_volsw
  net: ravb: Always process TX descriptor ring
  erspan: make sure erspan_base_hdr is present in skb->head
  erspan: Add type I version 0 support.
  init: open /initrd.image with O_LARGEFILE
  initramfs: switch initramfs unpacking to struct file based APIs
  fs: add a vfs_fchmod helper
  fs: add a vfs_fchown helper
  staging: vc04_services: fix information leak in create_component()
  staging: vc04_services: changen strncpy() to strscpy_pad()
  staging: mmal-vchiq: Fix client_component for 64 bit kernel
  staging: mmal-vchiq: Allocate and free components as required
  i40e: fix vf may be used uninitialized in this function warning
  ipv6: Fix infinite recursion in fib6_dump_done().
  selftests: reuseaddr_conflict: add missing new line at the end of the output
  net: stmmac: fix rx queue priority assignment
  net/sched: act_skbmod: prevent kernel-infoleak
  bpf, sockmap: Prevent lock inversion deadlock in map delete elem
  netfilter: nf_tables: Fix potential data-race in __nft_flowtable_type_get()
  netfilter: nf_tables: flush pending destroy work before exit_net release
  mm, vmscan: prevent infinite loop for costly GFP_NOIO | __GFP_RETRY_MAYFAIL allocations
  Revert "x86/mm/ident_map: Use gbpages only where full GB page should be mapped."
  vfio/platform: Create persistent IRQ handlers
  vfio/pci: Create persistent INTx handler
  vfio: Introduce interface to flush virqfd inject workqueue
  vfio/pci: Lock external INTx masking ops
  vfio/pci: Disable auto-enable of exclusive INTx IRQ
  net/rds: fix possible cp null dereference
  netfilter: nf_tables: disallow timeout for anonymous sets
  Bluetooth: Fix TOCTOU in HCI debugfs implementation
  Bluetooth: hci_event: set the conn encrypted before conn establishes
  x86/cpufeatures: Add new word for scattered features
  r8169: fix issue caused by buggy BIOS on certain boards with RTL8168d
  dm integrity: fix out-of-range warning
  tcp: properly terminate timers for kernel sockets
  ixgbe: avoid sleeping allocation in ixgbe_ipsec_vf_add_sa()
  nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet
  USB: core: Fix deadlock in usb_deauthorize_interface()
  scsi: lpfc: Correct size for wqe for memset()
  x86/cpu: Enable STIBP on AMD if Automatic IBRS is enabled
  scsi: qla2xxx: Fix command flush on cable pull
  usb: udc: remove warning when queue disabled ep
  usb: dwc2: gadget: LPM flow fix
  usb: dwc2: host: Fix ISOC flow in DDMA mode
  usb: dwc2: host: Fix hibernation flow
  usb: dwc2: host: Fix remote wakeup from hibernation
  scsi: core: Fix unremoved procfs host directory regression
  ALSA: sh: aica: reorder cleanup operations to avoid UAF bugs
  usb: cdc-wdm: close race between read and workqueue
  mmc: core: Avoid negative index with array access
  mmc: core: Initialize mmc_blk_ioc_data
  exec: Fix NOMMU linux_binprm::exec in transfer_args_to_stack()
  wifi: mac80211: check/clear fast rx for non-4addr sta VLAN changes
  mm/migrate: set swap entry values of THP tail pages properly.
  mm/memory-failure: fix an incorrect use of tail pages
  vt: fix memory overlapping when deleting chars in the buffer
  bounds: support non-power-of-two CONFIG_NR_CPUS
  powerpc: xor_vmx: Add '-mhard-float' to CFLAGS
  efivarfs: Request at most 512 bytes for variable names
  perf/core: Fix reentry problem in perf_output_read_group()
  loop: loop_set_status_from_info() check before assignment
  loop: Check for overflow while configuring loop
  loop: Factor out configuring loop from status
  loop: Refactor loop_set_status() size calculation
  loop: Factor out setting loop device size
  loop: Remove sector_t truncation checks
  loop: Call loop_config_discard() only after new config is applied
  Revert "loop: Check for overflow while configuring loop"
  btrfs: allocate btrfs_ioctl_defrag_range_args on stack
  printk: Update @console_may_schedule in console_trylock_spinning()
  xen/events: close evtchn after mapping cleanup
  x86/speculation: Support intra-function call validation
  objtool: Add support for intra-function calls
  objtool: is_fentry_call() crashes if call has no destination
  fs/aio: Check IOCB_AIO_RW before the struct aio_kiocb conversion
  vt: fix unicode buffer corruption when deleting characters
  tty: serial: fsl_lpuart: avoid idle preamble pending if CTS is enabled
  usb: port: Don't try to peer unused USB ports based on location
  usb: gadget: ncm: Fix handling of zero block length packets
  USB: usb-storage: Prevent divide-by-0 error in isd200_ata_command
  ALSA: hda/realtek - Fix headset Mic no show at resume back for Lenovo ALC897 platform
  xfrm: Avoid clang fortify warning in copy_to_user_tmpl()
  netfilter: nf_tables: reject constant set with timeout
  netfilter: nf_tables: disallow anonymous set with timeout flag
  netfilter: nf_tables: mark set as dead when unbinding anonymous set with timeout
  comedi: comedi_test: Prevent timers rescheduling during deletion
  dm snapshot: fix lockup in dm_exception_table_exit
  ahci: asm1064: asm1166: don't limit reported ports
  ahci: asm1064: correct count of reported ports
  x86/CPU/AMD: Update the Zenbleed microcode revisions
  nilfs2: prevent kernel bug at submit_bh_wbc()
  nilfs2: use a more common logging style
  nilfs2: fix failure to detect DAT corruption in btree and direct mappings
  memtest: use {READ,WRITE}_ONCE in memory scanning
  drm/vc4: hdmi: do not return negative values from .get_modes()
  drm/imx/ipuv3: do not return negative values from .get_modes()
  drm/exynos: do not return negative values from .get_modes()
  s390/zcrypt: fix reference counting on zcrypt card objects
  soc: fsl: qbman: Use raw spinlock for cgr_lock
  soc: fsl: qbman: Add CGR update function
  soc: fsl: qbman: Add helper for sanity checking cgr ops
  soc: fsl: qbman: Always disable interrupts when taking cgr_lock
  ring-buffer: Fix full_waiters_pending in poll
  ring-buffer: Fix resetting of shortest_full
  vfio/platform: Disable virqfds on cleanup
  kbuild: Move -Wenum-{compare-conditional,enum-conversion} into W=1
  speakup: Fix 8bit characters from direct synth
  slimbus: core: Remove usage of the deprecated ida_simple_xx() API
  nvmem: meson-efuse: fix function pointer type mismatch
  firmware: meson_sm: Rework driver as a proper platform driver
  ext4: fix corruption during on-line resize
  hwmon: (amc6821) add of_match table
  mmc: core: Fix switch on gp3 partition
  dm-raid: fix lockdep waring in "pers->hot_add_disk"
  Revert "Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d""
  PCI/PM: Drain runtime-idle callbacks before driver removal
  PCI: Drop pci_device_remove() test of pci_dev->driver
  btrfs: fix off-by-one chunk length calculation at contains_pending_extent()
  fuse: don't unhash root
  mmc: tmio: avoid concurrent runs of mmc_request_done()
  PM: sleep: wakeirq: fix wake irq warning in system suspend
  USB: serial: cp210x: add pid/vid for TDK NC0110013M and MM0110113M
  USB: serial: option: add MeiG Smart SLM320 product
  USB: serial: cp210x: add ID for MGP Instruments PDS100
  USB: serial: add device ID for VeriFone adapter
  USB: serial: ftdi_sio: add support for GMC Z216C Adapter IR-USB
  powerpc/fsl: Fix mfpmr build errors with newer binutils
  clk: qcom: mmcc-msm8974: fix terminating of frequency table arrays
  clk: qcom: mmcc-apq8084: fix terminating of frequency table arrays
  clk: qcom: gcc-ipq8074: fix terminating of frequency table arrays
  PM: suspend: Set mem_sleep_current during kernel command line setup
  parisc: Strip upper 32 bit of sum in csum_ipv6_magic for 64-bit builds
  parisc: Fix csum_ipv6_magic on 64-bit systems
  parisc: Fix csum_ipv6_magic on 32-bit systems
  parisc: Fix ip_fast_csum
  parisc: Do not hardcode registers in checksum functions
  mtd: rawnand: meson: fix scrambling mode value in command macro
  ubi: correct the calculation of fastmap size
  ubi: Check for too small LEB size in VTBL code
  ubifs: Set page uptodate in the correct place
  fat: fix uninitialized field in nostale filehandles
  ext4: correct best extent lstart adjustment logic
  selftests/mqueue: Set timeout to 180 seconds
  crypto: qat - resolve race condition during AER recovery
  crypto: qat - fix double free during reset
  sparc: vDSO: fix return value of __setup handler
  sparc64: NMI watchdog: fix return value of __setup handler
  KVM: Always flush async #PF workqueue when vCPU is being destroyed
  media: xc4000: Fix atomicity violation in xc4000_get_frequency
  serial: max310x: fix NULL pointer dereference in I2C instantiation
  arm: dts: marvell: Fix maxium->maxim typo in brownstone dts
  ARM: dts: mmp2-brownstone: Don't redeclare phandle references
  smack: Handle SMACK64TRANSMUTE in smack_inode_setsecurity()
  smack: Set SMACK64TRANSMUTE only for dirs in smack_inode_setxattr()
  clk: qcom: gcc-sdm845: Add soft dependency on rpmhpd
  media: staging: ipu3-imgu: Set fields before media_entity_pads_init()
  wifi: brcmfmac: Fix use-after-free bug in brcmf_cfg80211_detach
  timers: Rename del_timer_sync() to timer_delete_sync()
  timers: Use del_timer_sync() even on UP
  timers: Update kernel-doc for various functions
  x86/bugs: Use sysfs_emit()
  x86/cpu: Support AMD Automatic IBRS
  Documentation/hw-vuln: Update spectre doc
  amdkfd: use calloc instead of kzalloc to avoid integer overflow
  Linux 5.4.273
  regmap: Add missing map->bus check
  spi: spi-mt65xx: Fix NULL pointer access in interrupt handler
  bpf: report RCU QS in cpumap kthread
  rcu: add a helper to report consolidated flavor QS
  netfilter: nf_tables: do not compare internal table flags on updates
  ARM: dts: sun8i-h2-plus-bananapi-m2-zero: add regulator nodes vcc-dram and vcc1v2
  octeontx2-af: Use separate handlers for interrupts
  net/bnx2x: Prevent access to a freed page in page_pool
  hsr: Handle failures in module init
  rds: introduce acquire/release ordering in acquire/release_in_xmit()
  packet: annotate data-races around ignore_outgoing
  hsr: Fix uninit-value access in hsr_get_node()
  s390/vtime: fix average steal time calculation
  octeontx2-af: Use matching wake_up API variant in CGX command interface
  usb: gadget: net2272: Use irqflags in the call to net2272_probe_fin
  staging: greybus: fix get_channel_from_mode() failure path
  serial: 8250_exar: Don't remove GPIO device on suspend
  rtc: mt6397: select IRQ_DOMAIN instead of depending on it
  kconfig: fix infinite loop when expanding a macro at the end of file
  tty: serial: samsung: fix tx_empty() to return TIOCSER_TEMT
  serial: max310x: fix syntax error in IRQ error message
  tty: vt: fix 20 vs 0x20 typo in EScsiignore
  afs: Revert "afs: Hide silly-rename files from userspace"
  NFS: Fix an off by one in root_nfs_cat()
  watchdog: stm32_iwdg: initialize default timeout
  net: sunrpc: Fix an off by one in rpc_sockaddr2uaddr()
  scsi: bfa: Fix function pointer type mismatch for hcb_qe->cbfn
  RDMA/device: Fix a race between mad_client and cm_client init
  scsi: csiostor: Avoid function pointer casts
  ALSA: usb-audio: Stop parsing channels bits when all channels are found.
  clk: Fix clk_core_get NULL dereference
  sparc32: Fix section mismatch in leon_pci_grpci
  backlight: lp8788: Fully initialize backlight_properties during probe
  backlight: lm3639: Fully initialize backlight_properties during probe
  backlight: da9052: Fully initialize backlight_properties during probe
  backlight: lm3630a: Don't set bl->props.brightness in get_brightness
  backlight: lm3630a: Initialize backlight_properties on init
  powerpc/embedded6xx: Fix no previous prototype for avr_uart_send() etc.
  drm/msm/dpu: add division of drm_display_mode's hskew parameter
  powerpc/hv-gpci: Fix the H_GET_PERF_COUNTER_INFO hcall return value checks
  drm/mediatek: Fix a null pointer crash in mtk_drm_crtc_finish_page_flip
  media: ttpci: fix two memleaks in budget_av_attach
  media: media/pci: rename VFL_TYPE_GRABBER to _VIDEO
  media: rename VFL_TYPE_GRABBER to _VIDEO
  media: v4l2-core: correctly validate video and metadata ioctls
  media: go7007: fix a memleak in go7007_load_encoder
  media: dvb-frontends: avoid stack overflow warnings with clang
  media: pvrusb2: fix uaf in pvr2_context_set_notify
  drm/amdgpu: Fix missing break in ATOM_ARG_IMM Case of atom_get_src_int()
  ASoC: meson: axg-tdm-interface: fix mclk setup without mclk-fs
  mtd: rawnand: lpc32xx_mlc: fix irq handler prototype
  mtd: maps: physmap-core: fix flash size larger than 32-bit
  crypto: arm/sha - fix function cast warnings
  mfd: altera-sysmgr: Call of_node_put() only when of_parse_phandle() takes a ref
  mfd: syscon: Call of_node_put() only when of_parse_phandle() takes a ref
  drm/tegra: put drm_gem_object ref on error in tegra_fb_create
  clk: hisilicon: hi3519: Release the correct number of gates in hi3519_clk_unregister()
  PCI: Mark 3ware-9650SE Root Port Extended Tags as broken
  drm/mediatek: dsi: Fix DSI RGB666 formats and definitions
  clk: qcom: dispcc-sdm845: Adjust internal GDSC wait times
  media: pvrusb2: fix pvr2_stream_callback casts
  media: pvrusb2: remove redundant NULL check
  media: go7007: add check of return value of go7007_read_addr()
  media: imx: csc/scaler: fix v4l2_ctrl_handler memory leak
  perf stat: Avoid metric-only segv
  ALSA: seq: fix function cast warnings
  drm/radeon/ni: Fix wrong firmware size logging in ni_init_microcode()
  perf thread_map: Free strlist on normal path in thread_map__new_by_tid_str()
  PCI: switchtec: Fix an error handling path in switchtec_pci_probe()
  quota: Fix rcu annotations of inode dquot pointers
  quota: Fix potential NULL pointer dereference
  quota: simplify drop_dquot_ref()
  clk: qcom: reset: Ensure write completion on reset de/assertion
  clk: qcom: reset: Commonize the de/assert functions
  clk: qcom: reset: support resetting multiple bits
  clk: qcom: reset: Allow specifying custom reset delay
  media: edia: dvbdev: fix a use-after-free
  media: v4l2-mem2mem: fix a memleak in v4l2_m2m_register_entity
  media: v4l2-tpg: fix some memleaks in tpg_alloc
  media: em28xx: annotate unchecked call to media_device_register()
  perf evsel: Fix duplicate initialization of data->id in evsel__parse_sample()
  drm/amd/display: Fix potential NULL pointer dereferences in 'dcn10_set_output_transfer_func()'
  perf record: Fix possible incorrect free in record__switch_output()
  PCI/DPC: Print all TLP Prefixes, not just the first
  media: tc358743: register v4l2 async device only after successful setup
  dmaengine: tegra210-adma: Update dependency to ARCH_TEGRA
  drm/rockchip: lvds: do not overwrite error code
  drm: Don't treat 0 as -1 in drm_fixp2int_ceil
  drm/rockchip: inno_hdmi: Fix video timing
  drm/tegra: output: Fix missing i2c_put_adapter() in the error handling paths of tegra_output_probe()
  drm/tegra: dsi: Fix missing pm_runtime_disable() in the error handling path of tegra_dsi_probe()
  drm/tegra: dsi: Fix some error handling paths in tegra_dsi_probe()
  drm/tegra: dsi: Make use of the helper function dev_err_probe()
  gpu: host1x: mipi: Update tegra_mipi_request() to be node based
  drm/tegra: dsi: Add missing check for of_find_device_by_node
  dm: call the resume method on internal suspend
  dm raid: fix false positive for requeue needed during reshape
  nfp: flower: handle acti_netdevs allocation failure
  net/x25: fix incorrect parameter validation in the x25_getsockopt() function
  net: kcm: fix incorrect parameter validation in the kcm_getsockopt) function
  udp: fix incorrect parameter validation in the udp_lib_getsockopt() function
  l2tp: fix incorrect parameter validation in the pppol2tp_getsockopt() function
  tcp: fix incorrect parameter validation in the do_tcp_getsockopt() function
  net: hns3: fix port duplex configure error in IMP reset
  net: ip_tunnel: make sure to pull inner header in ip_tunnel_rcv()
  ipv6: fib6_rules: flush route cache when rule is changed
  bpf: Fix stackmap overflow check on 32-bit arches
  bpf: Fix hashtab overflow check on 32-bit arches
  sr9800: Add check for usbnet_get_endpoints
  Bluetooth: hci_core: Fix possible buffer overflow
  Bluetooth: Remove superfluous call to hci_conn_check_pending()
  igb: Fix missing time sync events
  igb: move PEROUT and EXTTS isr logic to separate functions
  mmc: wmt-sdmmc: remove an incorrect release_mem_region() call in the .remove function
  SUNRPC: fix some memleaks in gssx_dec_option_array
  x86, relocs: Ignore relocations in .notes section
  ACPI: scan: Fix device check notification handling
  ARM: dts: imx6dl-yapp4: Move the internal switch PHYs under the switch node
  ARM: dts: imx6dl-yapp4: Fix typo in the QCA switch register address
  ARM: dts: imx6dl-yapp4: Move phy reset into switch node
  ARM: dts: arm: realview: Fix development chip ROM compatible value
  net: ena: Remove ena_select_queue
  net: ena: cosmetic: fix line break issues
  wifi: brcmsmac: avoid function pointer casts
  iommu/amd: Mark interrupt as managed
  bus: tegra-aconnect: Update dependency to ARCH_TEGRA
  ACPI: processor_idle: Fix memory leak in acpi_processor_power_exit()
  arm64: dts: qcom: msm8996: Pad addresses
  arm64: dts: qcom: msm8996: Move regulator consumers to db820c
  arm64: dts: qcom: msm8996: Use node references in db820c
  arm64: dts: qcom: db820c: Move non-soc entries out of /soc
  bpf: Mark bpf_spin_{lock,unlock}() helpers with notrace correctly
  bpf: Factor out bpf_spin_lock into helpers.
  bpf: Add typecast to bpf helpers to help BTF generation
  arm64: dts: mediatek: mt7622: add missing "device_type" to memory nodes
  wifi: libertas: fix some memleaks in lbs_allocate_cmd_buffer()
  net: blackhole_dev: fix build warning for ethh set but not used
  af_unix: Annotate data-race of gc_in_progress in wait_for_unix_gc().
  sock_diag: annotate data-races around sock_diag_handlers[family]
  wifi: mwifiex: debugfs: Drop unnecessary error check for debugfs_create_dir()
  wifi: wilc1000: fix RCU usage in connect path
  wifi: wilc1000: fix declarations ordering
  wifi: b43: Disable QoS for bcm4331
  wifi: b43: Stop correct queue in DMA worker when QoS is disabled
  b43: main: Fix use true/false for bool type
  wifi: b43: Stop/wake correct queue in PIO Tx path when QoS is disabled
  wifi: b43: Stop/wake correct queue in DMA Tx path when QoS is disabled
  b43: dma: Fix use true/false for bool type variable
  wifi: ath10k: fix NULL pointer dereference in ath10k_wmi_tlv_op_pull_mgmt_tx_compl_ev()
  timekeeping: Fix cross-timestamp interpolation for non-x86
  timekeeping: Fix cross-timestamp interpolation corner case decision
  timekeeping: Fix cross-timestamp interpolation on counter wrap
  aoe: fix the potential use-after-free problem in aoecmd_cfg_pkts
  fs/select: rework stack allocation hack for clang
  nbd: null check for nla_nest_start
  do_sys_name_to_handle(): use kzalloc() to fix kernel-infoleak
  ASoC: wm8962: Fix up incorrect error message in wm8962_set_fll
  ASoC: wm8962: Enable both SPKOUTR_ENA and SPKOUTL_ENA in mono mode
  ASoC: wm8962: Enable oscillator if selecting WM8962_FLL_OSC
  Input: gpio_keys_polled - suppress deferred probe error for gpio
  ASoC: Intel: bytcr_rt5640: Add an extra entry for the Chuwi Vi8 tablet
  firewire: core: use long bus reset on gap count error
  Bluetooth: rfcomm: Fix null-ptr-deref in rfcomm_check_security
  scsi: mpt3sas: Prevent sending diag_reset when the controller is ready
  btrfs: fix data race at btrfs_use_block_rsv() when accessing block reserve
  dm-verity, dm-crypt: align "struct bvec_iter" correctly
  block: sed-opal: handle empty atoms when parsing response
  parisc/ftrace: add missing CONFIG_DYNAMIC_FTRACE check
  net/iucv: fix the allocation size of iucv_path_table array
  RDMA/mlx5: Relax DEVX access upon modify commands
  HID: multitouch: Add required quirk for Synaptics 0xcddc device
  MIPS: Clear Cause.BD in instruction_pointer_set
  x86/xen: Add some null pointer checking to smp.c
  ASoC: rt5645: Make LattePanda board DMI match more precise
  selftests: tls: use exact comparison in recv_partial
  io_uring: drop any code related to SCM_RIGHTS
  io_uring/unix: drop usage of io_uring socket
  UPSTREAM: arm64: dts: qcom: sdm845: fix USB DP/DM HS PHY interrupts
  UPSTREAM: arm64: dts: qcom: add PDC interrupt controller for SDM845
  Linux 5.4.272
  arm64: dts: qcom: sdm845: fix USB DP/DM HS PHY interrupts
  arm64: dts: qcom: add PDC interrupt controller for SDM845
  serial: max310x: fix IO data corruption in batched operations
  serial: max310x: implement I2C support
  serial: max310x: make accessing revision id interface-agnostic
  regmap: Add bulk read/write callbacks into regmap_config
  regmap: allow to define reg_update_bits for no bus configuration
  serial: max310x: Unprepare and disable clock in error path
  getrusage: use sig->stats_lock rather than lock_task_sighand()
  getrusage: use __for_each_thread()
  getrusage: move thread_group_cputime_adjusted() outside of lock_task_sighand()
  getrusage: add the "signal_struct *sig" local variable
  y2038: rusage: use __kernel_old_timeval
  hv_netvsc: Register VF in netvsc_probe if NET_DEVICE_REGISTER missed
  hv_netvsc: use netif_is_bond_master() instead of open code
  hv_netvsc: Make netvsc/VF binding check both MAC and serial number
  Input: i8042 - fix strange behavior of touchpad on Clevo NS70PU
  serial: max310x: prevent infinite while() loop in port startup
  serial: max310x: use a separate regmap for each port
  serial: max310x: use regmap methods for SPI batch operations
  serial: max310x: Make use of device properties
  serial: max310x: fail probe if clock crystal is unstable
  serial: max310x: Try to get crystal clock rate from property
  serial: max310x: Use devm_clk_get_optional() to get the input clock
  um: allow not setting extra rpaths in the linux binary
  selftests: mm: fix map_hugetlb failure on 64K page size systems
  netrom: Fix data-races around sysctl_net_busy_read
  netrom: Fix a data-race around sysctl_netrom_link_fails_count
  netrom: Fix a data-race around sysctl_netrom_routing_control
  netrom: Fix a data-race around sysctl_netrom_transport_no_activity_timeout
  netrom: Fix a data-race around sysctl_netrom_transport_requested_window_size
  netrom: Fix a data-race around sysctl_netrom_transport_busy_delay
  netrom: Fix a data-race around sysctl_netrom_transport_acknowledge_delay
  netrom: Fix a data-race around sysctl_netrom_transport_maximum_tries
  netrom: Fix a data-race around sysctl_netrom_transport_timeout
  netrom: Fix data-races around sysctl_netrom_network_ttl_initialiser
  netrom: Fix a data-race around sysctl_netrom_obsolescence_count_initialiser
  netrom: Fix a data-race around sysctl_netrom_default_path_quality
  netfilter: nf_conntrack_h323: Add protection for bmp length out of range
  netfilter: nft_ct: fix l3num expectations with inet pseudo family
  net/rds: fix WARNING in rds_conn_connect_if_down
  net/ipv6: avoid possible UAF in ip6_route_mpath_notify()
  net: ice: Fix potential NULL pointer dereference in ice_bridge_setlink()
  geneve: make sure to pull inner header in geneve_rx()
  ixgbe: {dis, en}able irqs in ixgbe_txrx_ring_{dis, en}able
  net: lan78xx: fix runtime PM count underflow on link stop
  lan78xx: Fix race conditions in suspend/resume handling
  lan78xx: Fix partial packet errors on suspend/resume
  lan78xx: Add missing return code checks
  lan78xx: Fix white space and style issues
  Linux 5.4.271
  gpio: 74x164: Enable output pins after registers are reset
  fs,hugetlb: fix NULL pointer dereference in hugetlbs_fill_super
  cachefiles: fix memory leak in cachefiles_add_cache()
  x86/cpu/intel: Detect TME keyid bits before setting MTRR mask registers
  mmc: core: Fix eMMC initialization with 1-bit bus connection
  dmaengine: fsl-qdma: init irq after reg initialization
  dmaengine: fsl-qdma: fix SoC may hang on 16 byte unaligned read
  btrfs: dev-replace: properly validate device names
  wifi: nl80211: reject iftype change with mesh ID change
  gtp: fix use-after-free and null-ptr-deref in gtp_newlink()
  afs: Fix endless loop in directory parsing
  ALSA: Drop leftover snd-rtctimer stuff from Makefile
  power: supply: bq27xxx-i2c: Do not free non existing IRQ
  efi/capsule-loader: fix incorrect allocation size
  rtnetlink: fix error logic of IFLA_BRIDGE_FLAGS writing back
  netfilter: nf_tables: allow NFPROTO_INET in nft_(match/target)_validate()
  Bluetooth: Enforce validation on max value of connection interval
  Bluetooth: hci_event: Fix handling of HCI_EV_IO_CAPA_REQUEST
  Bluetooth: Avoid potential use-after-free in hci_error_reset
  net: usb: dm9601: fix wrong return value in dm9601_mdio_read
  lan78xx: enable auto speed configuration for LAN7850 if no EEPROM is detected
  ipv6: fix potential "struct net" leak in inet6_rtm_getaddr()
  tun: Fix xdp_rxq_info's queue_index when detaching
  net: ip_tunnel: prevent perpetual headroom growth
  netlink: Fix kernel-infoleak-after-free in __skb_datagram_iter
  ANDROID: GKI: update .xml file due to USB changes in 5.4.270
  Revert "bpf: Add map and need_defer parameters to .map_fd_put_ptr()"
  Revert "hrtimer: Report offline hrtimer enqueue"
  Revert "drm/mipi-dsi: Fix detach call without attach"
  Linux 5.4.270
  scripts/bpf: Fix xdp_md forward declaration typo
  fs/aio: Restrict kiocb_set_cancel_fn() to I/O submitted via libaio
  drm/syncobj: call drm_syncobj_fence_add_wait when WAIT_AVAILABLE flag is set
  drm/syncobj: make lockdep complain on WAIT_FOR_SUBMIT v3
  netfilter: nf_tables: set dormant flag on hook register failure
  tls: stop recv() if initial process_rx_list gave us non-DATA
  tls: rx: drop pointless else after goto
  tls: rx: jump to a more appropriate label
  s390: use the correct count for __iowrite64_copy()
  packet: move from strlcpy with unused retval to strscpy
  ipv6: sr: fix possible use-after-free and null-ptr-deref
  afs: Increase buffer size in afs_update_volume_status()
  ipv6: properly combine dev_base_seq and ipv6.dev_addr_genid
  ipv4: properly combine dev_base_seq and ipv4.dev_addr_genid
  nouveau: fix function cast warnings
  scsi: jazz_esp: Only build if SCSI core is builtin
  bpf, scripts: Correct GPL license name
  scripts/bpf: teach bpf_helpers_doc.py to dump BPF helper definitions
  RDMA/srpt: fix function pointer cast warnings
  RDMA/srpt: Make debug output more detailed
  RDMA/bnxt_re: Return error for SRQ resize
  IB/hfi1: Fix a memleak in init_credit_return
  usb: roles: don't get/set_role() when usb_role_switch is unregistered
  usb: gadget: ncm: Avoid dropping datagrams of properly parsed NTBs
  usb: cdns3: fix memory double free when handle zero packet
  usb: cdns3: fixed memory use after free at cdns3_gadget_ep_disable()
  ARM: ep93xx: Add terminator to gpiod_lookup_table
  l2tp: pass correct message length to ip6_append_data
  PCI/MSI: Prevent MSI hardware interrupt number truncation
  gtp: fix use-after-free and null-ptr-deref in gtp_genl_dump_pdp()
  dm-crypt: don't modify the data when using authenticated encryption
  IB/hfi1: Fix sdma.h tx->num_descs off-by-one error
  PCI: tegra: Fix OF node reference leak
  PCI: tegra: Fix reporting GPIO error value
  arm64: dts: qcom: msm8916: Fix typo in pronto remoteproc node
  drm/amdgpu: Fix type of second parameter in trans_msg() callback
  iomap: Set all uptodate bits for an Uptodate page
  dm-integrity: don't modify bio's immutable bio_vec in integrity_metadata()
  x86/alternatives: Disable KASAN in apply_alternatives()
  drm/amdgpu: Check for valid number of registers to read
  Revert "drm/sun4i: dsi: Change the start delay calculation"
  ALSA: hda/realtek - Enable micmute LED on and HP system
  selftests/bpf: Avoid running unprivileged tests with alignment requirements
  net: bridge: clear bridge's private skb space on xmit
  spi: mt7621: Fix an error message in mt7621_spi_probe()
  pinctrl: rockchip: Fix refcount leak in rockchip_pinctrl_parse_groups
  pinctrl: pinctrl-rockchip: Fix a bunch of kerneldoc misdemeanours
  tcp: add annotations around sk->sk_shutdown accesses
  tcp: return EPOLLOUT from tcp_poll only when notsent_bytes is half the limit
  tcp: factor out __tcp_close() helper
  pmdomain: renesas: r8a77980-sysc: CR7 must be always on
  s390/qeth: Fix potential loss of L3-IP@ in case of network issues
  virtio-blk: Ensure no requests in virtqueues before deleting vqs.
  firewire: core: send bus reset promptly on gap count error
  scsi: lpfc: Use unsigned type for num_sge
  hwmon: (coretemp) Enlarge per package core count limit
  nvmet-fc: abort command when there is no binding
  netfilter: conntrack: check SCTP_CID_SHUTDOWN_ACK for vtag setting in sctp_new
  ASoC: sunxi: sun4i-spdif: Add support for Allwinner H616
  nvmet-tcp: fix nvme tcp ida memory leak
  regulator: pwm-regulator: Add validity checks in continuous .get_voltage
  ext4: avoid allocating blocks from corrupted group in ext4_mb_find_by_goal()
  ext4: avoid allocating blocks from corrupted group in ext4_mb_try_best_found()
  ahci: add 43-bit DMA address quirk for ASMedia ASM1061 controllers
  ahci: asm1166: correct count of reported ports
  fbdev: sis: Error out if pixclock equals zero
  fbdev: savage: Error out if pixclock equals zero
  wifi: mac80211: fix race condition on enabling fast-xmit
  wifi: cfg80211: fix missing interfaces when dumping
  dmaengine: fsl-qdma: increase size of 'irq_name'
  dmaengine: shdma: increase size of 'dev_id'
  scsi: target: core: Add TMF to tmr_list handling
  sched/rt: Disallow writing invalid values to sched_rt_period_us
  sched/rt: Fix sysctl_sched_rr_timeslice intial value
  userfaultfd: fix mmap_changing checking in mfill_atomic_hugetlb
  nilfs2: replace WARN_ONs for invalid DAT metadata block requests
  memcg: add refcnt for pcpu stock to avoid UAF problem in drain_all_stock()
  sched/rt: sysctl_sched_rr_timeslice show default timeslice after reset
  net/sched: Retire dsmark qdisc
  net/sched: Retire ATM qdisc
  net/sched: Retire CBQ qdisc
  KVM: arm64: vgic-its: Test for valid IRQ in MOVALL handler
  KVM: arm64: vgic-its: Test for valid IRQ in its_sync_lpi_pending_table()
  Linux 5.4.269
  of: gpio unittest kfree() wrong object
  of: unittest: fix EXPECT text for gpio hog errors
  net: bcmgenet: Fix EEE implementation
  Revert "Revert "mtd: rawnand: gpmi: Fix setting busy timeout setting""
  netfilter: nf_tables: fix pointer math issue in nft_byteorder_eval()
  lsm: new security_file_ioctl_compat() hook
  drm/msm/dsi: Enable runtime PM
  PM: runtime: Have devm_pm_runtime_enable() handle pm_runtime_dont_use_autosuspend()
  PM: runtime: add devm_pm_runtime_enable helper
  nilfs2: fix potential bug in end_buffer_async_write
  sched/membarrier: reduce the ability to hammer on sys_membarrier
  net: prevent mss overflow in skb_segment()
  netfilter: ipset: Missing gc cancellations fixed
  netfilter: ipset: fix performance regression in swap operation
  KVM: arm64: vgic-its: Avoid potential UAF in LPI translation cache
  mips: Fix max_mapnr being uninitialized on early stages
  arch, mm: remove stale mentions of DISCONIGMEM
  bus: moxtet: Add spi device table
  Revert "md/raid5: Wait for MD_SB_CHANGE_PENDING in raid5d"
  tracing: Inform kmemleak of saved_cmdlines allocation
  pmdomain: core: Move the unused cleanup to a _sync initcall
  can: j1939: Fix UAF in j1939_sk_match_filter during setsockopt(SO_J1939_FILTER)
  irqchip/irq-brcmstb-l2: Add write memory barrier before exit
  nfp: flower: prevent re-adding mac index for bonded port
  nfp: use correct macro for LengthSelect in BAR config
  nilfs2: fix hang in nilfs_lookup_dirty_data_buffers()
  nilfs2: fix data corruption in dsync block recovery for small block sizes
  ALSA: hda/conexant: Add quirk for SWS JS201D
  mmc: slot-gpio: Allow non-sleeping GPIO ro
  x86/mm/ident_map: Use gbpages only where full GB page should be mapped.
  x86/Kconfig: Transmeta Crusoe is CPU family 5, not 6
  serial: max310x: improve crystal stable clock detection
  serial: max310x: set default value when reading clock ready bit
  ring-buffer: Clean ring_buffer_poll_wait() error return
  iio: magnetometer: rm3100: add boundary check for the value read from RM3100_REG_TMRC
  staging: iio: ad5933: fix type mismatch regression
  tracing: Fix wasted memory in saved_cmdlines logic
  ext4: fix double-free of blocks due to wrong extents moved_len
  misc: fastrpc: Mark all sessions as invalid in cb_remove
  binder: signal epoll threads of self-work
  ALSA: hda/realtek: Enable headset mic on Vaio VJFE-ADL
  xen-netback: properly sync TX responses
  nfc: nci: free rx_data_reassembly skb on NCI device cleanup
  kbuild: Fix changing ELF file type for output of gen_btf for big endian
  firewire: core: correct documentation of fw_csr_string() kernel API
  scsi: Revert "scsi: fcoe: Fix potential deadlock on &fip->ctlr_lock"
  i2c: i801: Fix block process call transactions
  i2c: i801: Remove i801_set_block_buffer_mode
  usb: f_mass_storage: forbid async queue when shutdown happen
  USB: hub: check for alternate port before enabling A_ALT_HNP_SUPPORT
  HID: wacom: Do not register input devices until after hid_hw_start
  HID: wacom: generic: Avoid reporting a serial of '0' to userspace
  mm/writeback: fix possible divide-by-zero in wb_dirty_limits(), again
  tracing/trigger: Fix to return error if failed to alloc snapshot
  i40e: Fix waiting for queues of all VSIs to be disabled
  MIPS: Add 'memory' clobber to csum_ipv6_magic() inline assembler
  ASoC: rt5645: Fix deadlock in rt5645_jack_detect_work()
  spi: ppc4xx: Drop write-only variable
  of: unittest: Fix compile in the non-dynamic case
  of: unittest: add overlay gpio test to catch gpio hog problem
  btrfs: send: return EOPNOTSUPP on unknown flags
  btrfs: forbid deleting live subvol qgroup
  btrfs: forbid creating subvol qgroups
  netfilter: nft_set_rbtree: skip end interval element from gc
  net: stmmac: xgmac: fix a typo of register name in DPP safety handling
  net: stmmac: xgmac: use #define for string constants
  vhost: use kzalloc() instead of kmalloc() followed by memset()
  Input: atkbd - skip ATKBD_CMD_SETLEDS when skipping ATKBD_CMD_GETID
  hrtimer: Report offline hrtimer enqueue
  USB: serial: cp210x: add ID for IMST iM871A-USB
  USB: serial: option: add Fibocom FM101-GL variant
  USB: serial: qcserial: add new usb-id for Dell Wireless DW5826e
  net/af_iucv: clean up a try_then_request_module()
  netfilter: nft_ct: reject direction for ct id
  netfilter: nft_compat: restrict match/target protocol to u16
  netfilter: nft_compat: reject unused compat flag
  ppp_async: limit MRU to 64K
  tipc: Check the bearer type before calling tipc_udp_nl_bearer_add()
  rxrpc: Fix response to PING RESPONSE ACKs to a dead call
  inet: read sk->sk_family once in inet_recv_error()
  hwmon: (coretemp) Fix bogus core_id to attr name mapping
  hwmon: (coretemp) Fix out-of-bounds memory access
  hwmon: (aspeed-pwm-tacho) mutex for tach reading
  atm: idt77252: fix a memleak in open_card_ubr0
  selftests: net: avoid just another constant wait
  net: stmmac: xgmac: fix handling of DPP safety error for DMA channels
  phy: ti: phy-omap-usb2: Fix NULL pointer dereference for SRP
  dmaengine: fix is_slave_direction() return false when DMA_DEV_TO_DEV
  phy: renesas: rcar-gen3-usb2: Fix returning wrong error code
  dmaengine: fsl-qdma: Fix a memory leak related to the queue command DMA
  dmaengine: fsl-qdma: Fix a memory leak related to the status queue DMA
  bonding: remove print in bond_verify_device_path
  HID: apple: Add 2021 magic keyboard FN key mapping
  HID: apple: Swap the Fn and Left Control keys on Apple keyboards
  HID: apple: Add support for the 2021 Magic Keyboard
  net: sysfs: Fix /sys/class/net/<iface> path
  af_unix: fix lockdep positive in sk_diag_dump_icons()
  net: ipv4: fix a memleak in ip_setup_cork
  netfilter: nft_ct: sanitize layer 3 and 4 protocol number in custom expectations
  netfilter: nf_log: replace BUG_ON by WARN_ON_ONCE when putting logger
  llc: call sock_orphan() at release time
  ipv6: Ensure natural alignment of const ipv6 loopback and router addresses
  ixgbe: Fix an error handling path in ixgbe_read_iosf_sb_reg_x550()
  ixgbe: Refactor overtemp event handling
  ixgbe: Refactor returning internal error codes
  ixgbe: Remove non-inclusive language
  net: remove unneeded break
  scsi: isci: Fix an error code problem in isci_io_request_build()
  wifi: cfg80211: fix RCU dereference in __cfg80211_bss_update
  perf: Fix the nr_addr_filters fix
  drm/amdgpu: Release 'adev->pm.fw' before return in 'amdgpu_device_need_post()'
  ceph: fix deadlock or deadcode of misusing dget()
  blk-mq: fix IO hang from sbitmap wakeup race
  virtio_net: Fix "‘%d’ directive writing between 1 and 11 bytes into a region of size 10" warnings
  libsubcmd: Fix memory leak in uniq()
  PCI/AER: Decode Requester ID when no error info found
  fs/kernfs/dir: obey S_ISGID
  usb: hub: Replace hardcoded quirk value with BIT() macro
  PCI: switchtec: Fix stdev_release() crash after surprise hot remove
  PCI: Only override AMD USB controller if required
  mfd: ti_am335x_tscadc: Fix TI SoC dependencies
  i3c: master: cdns: Update maximum prescaler value for i2c clock
  um: net: Fix return type of uml_net_start_xmit()
  um: Don't use vfprintf() for os_info()
  um: Fix naming clash between UML and scheduler
  leds: trigger: panic: Don't register panic notifier if creating the trigger failed
  drm/amdgpu: Drop 'fence' check in 'to_amdgpu_amdkfd_fence()'
  drm/amdgpu: Let KFD sync with VM fences
  clk: mmp: pxa168: Fix memory leak in pxa168_clk_init()
  clk: hi3620: Fix memory leak in hi3620_mmc_clk_init()
  drm/msm/dpu: Ratelimit framedone timeout msgs
  media: ddbridge: fix an error code problem in ddb_probe
  IB/ipoib: Fix mcast list locking
  drm/exynos: Call drm_atomic_helper_shutdown() at shutdown/unbind time
  ALSA: hda: Intel: add HDA_ARL PCI ID support
  PCI: add INTEL_HDA_ARL to pci_ids.h
  media: rockchip: rga: fix swizzling for RGB formats
  media: stk1160: Fixed high volume of stk1160_dbg messages
  drm/mipi-dsi: Fix detach call without attach
  drm/framebuffer: Fix use of uninitialized variable
  drm/drm_file: fix use of uninitialized variable
  RDMA/IPoIB: Fix error code return in ipoib_mcast_join
  fast_dput(): handle underflows gracefully
  ASoC: doc: Fix undefined SND_SOC_DAPM_NOPM argument
  f2fs: fix to check return value of f2fs_reserve_new_block()
  wifi: cfg80211: free beacon_ies when overridden from hidden BSS
  wifi: rtlwifi: rtl8723{be,ae}: using calculate_bit_shift()
  wifi: rtl8xxxu: Add additional USB IDs for RTL8192EU devices
  arm64: dts: qcom: msm8998: Fix 'out-ports' is a required property
  arm64: dts: qcom: msm8996: Fix 'in-ports' is a required property
  md: Whenassemble the array, consult the superblock of the freshest device
  block: prevent an integer overflow in bvec_try_merge_hw_page
  ARM: dts: imx23/28: Fix the DMA controller node name
  ARM: dts: imx23-sansa: Use preferred i2c-gpios properties
  ARM: dts: imx27-apf27dev: Fix LED name
  ARM: dts: imx25/27: Pass timing0
  ARM: dts: imx1: Fix sram node
  ARM: dts: imx27: Fix sram node
  ARM: dts: imx: Use flash@0,0 pattern
  ARM: dts: imx25/27-eukrea: Fix RTC node name
  ARM: dts: rockchip: fix rk3036 hdmi ports node
  scsi: libfc: Fix up timeout error in fc_fcp_rec_error()
  scsi: libfc: Don't schedule abort twice
  bpf: Add map and need_defer parameters to .map_fd_put_ptr()
  wifi: ath9k: Fix potential array-index-out-of-bounds read in ath9k_htc_txstatus()
  ARM: dts: imx7s: Fix nand-controller #size-cells
  ARM: dts: imx7s: Fix lcdif compatible
  ARM: dts: imx7d: Fix coresight funnel ports
  bonding: return -ENOMEM instead of BUG in alb_upper_dev_walk
  PCI: Add no PM reset quirk for NVIDIA Spectrum devices
  scsi: lpfc: Fix possible file string name overflow when updating firmware
  selftests/bpf: Fix pyperf180 compilation failure with clang18
  selftests/bpf: satisfy compiler by having explicit return in btf test
  wifi: rt2x00: restart beacon queue when hardware reset
  ext4: avoid online resizing failures due to oversized flex bg
  ext4: remove unnecessary check from alloc_flex_gd()
  ext4: unify the type of flexbg_size to unsigned int
  ext4: fix inconsistent between segment fstrim and full fstrim
  ecryptfs: Reject casefold directory inodes
  SUNRPC: Fix a suspicious RCU usage warning
  KVM: s390: fix setting of fpc register
  s390/ptrace: handle setting of fpc register correctly
  jfs: fix array-index-out-of-bounds in diNewExt
  rxrpc_find_service_conn_rcu: fix the usage of read_seqbegin_or_lock()
  afs: fix the usage of read_seqbegin_or_lock() in afs_find_server*()
  crypto: stm32/crc32 - fix parsing list of devices
  pstore/ram: Fix crash when setting number of cpus to an odd number
  jfs: fix uaf in jfs_evict_inode
  jfs: fix array-index-out-of-bounds in dbAdjTree
  jfs: fix slab-out-of-bounds Read in dtSearch
  UBSAN: array-index-out-of-bounds in dtSplitRoot
  FS:JFS:UBSAN:array-index-out-of-bounds in dbAdjTree
  ACPI: extlog: fix NULL pointer dereference check
  PNP: ACPI: fix fortify warning
  ACPI: video: Add quirk for the Colorful X15 AT 23 Laptop
  audit: Send netlink ACK before setting connection in auditd_set
  regulator: core: Only increment use_count when enable_count changes
  perf/core: Fix narrow startup race when creating the perf nr_addr_filters sysfs file
  x86/mce: Mark fatal MCE's page as poison to avoid panic in the kdump kernel
  powerpc/lib: Validate size for vector operations
  powerpc: pmd_move_must_withdraw() is only needed for CONFIG_TRANSPARENT_HUGEPAGE
  powerpc/mm: Fix build failures due to arch_reserved_kernel_pages()
  powerpc: Fix build error due to is_valid_bugaddr()
  powerpc/mm: Fix null-pointer dereference in pgtable_cache_add
  x86/entry/ia32: Ensure s32 is sign extended to s64
  tick/sched: Preserve number of idle sleeps across CPU hotplug events
  mips: Call lose_fpu(0) before initializing fcr31 in mips_set_personality_nan
  spi: bcm-qspi: fix SFDP BFPT read by usig mspi read
  gpio: eic-sprd: Clear interrupt after set the interrupt type
  drm/exynos: gsc: minor fix for loop iteration in gsc_runtime_resume
  drm/exynos: fix accidental on-stack copy of exynos_drm_plane
  drm/bridge: nxp-ptn3460: simplify some error checking
  drm/bridge: nxp-ptn3460: fix i2c_master_send() error checking
  drm: Don't unref the same fb many times by mistake due to deadlock handling
  gpiolib: acpi: Ignore touchpad wakeup on GPD G1619-04
  netfilter: nf_tables: reject QUEUE/DROP verdict parameters
  rbd: don't move requests to the running list on errors
  btrfs: defrag: reject unknown flags of btrfs_ioctl_defrag_range_args
  btrfs: don't warn if discard range is not aligned to sector
  btrfs: tree-checker: fix inline ref size in error messages
  btrfs: ref-verify: free ref cache before clearing mount opt
  net: fec: fix the unhandled context fault from smmu
  fjes: fix memleaks in fjes_hw_setup
  netfilter: nf_tables: validate NFPROTO_* family
  netfilter: nf_tables: restrict anonymous set and map names to 16 bytes
  net/mlx5e: fix a double-free in arfs_create_groups
  net/mlx5: Use kfree(ft->g) in arfs_create_groups()
  net/mlx5: DR, Use the right GVMI number for drop action
  netlink: fix potential sleeping issue in mqueue_flush_file
  tcp: Add memory barrier to tcp_push()
  afs: Hide silly-rename files from userspace
  tracing: Ensure visibility when inserting an element into tracing_map
  net/rds: Fix UBSAN: array-index-out-of-bounds in rds_cmsg_recv
  llc: Drop support for ETH_P_TR_802_2.
  llc: make llc_ui_sendmsg() more robust against bonding changes
  vlan: skip nested type that is not IFLA_VLAN_QOS_MAPPING
  net/smc: fix illegal rmb_desc access in SMC-D connection dump
  x86/CPU/AMD: Fix disabling XSAVES on AMD family 0x17 due to erratum
  powerpc: Use always instead of always-y in for crtsavres.o
  fs: move S_ISGID stripping into the vfs_*() helpers
  fs: add mode_strip_sgid() helper
  mtd: spinand: macronix: Fix MX35LFxGE4AD page size
  block: Remove special-casing of compound pages
  rename(): fix the locking of subdirectories
  ubifs: ubifs_symlink: Fix memleak of inode->i_link in error path
  nouveau/vmm: don't set addr on the fail path to avoid warning
  mmc: core: Use mrq.sbc in close-ended ffu
  arm64: dts: qcom: sdm845: fix USB wakeup interrupt types
  parisc/firmware: Fix F-extend for PDC addresses
  rpmsg: virtio: Free driver_override when rpmsg_remove()
  hwrng: core - Fix page fault dead lock on mmap-ed hwrng
  PM: hibernate: Enforce ordering during image compression/decompression
  crypto: api - Disallow identical driver names
  ext4: allow for the last group to be marked as trimmed
  serial: sc16is7xx: add check for unsupported SPI modes during probe
  spi: introduce SPI_MODE_X_MASK macro
  serial: sc16is7xx: set safe default SPI clock frequency
  units: add the HZ macros
  units: change from 'L' to 'UL'
  units: Add Watt units
  include/linux/units.h: add helpers for kelvin to/from Celsius conversion
  PCI: mediatek: Clear interrupt status before dispatching handler

 Conflicts:
	include/linux/timer.h
	mm/memory-failure.c

Change-Id: I4974903c79ecddc3d9225b0b723a30b6c83ef572
2024-06-22 17:58:09 +03:00

2991 lines
78 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Memory Migration functionality - linux/mm/migrate.c
*
* Copyright (C) 2006 Silicon Graphics, Inc., Christoph Lameter
*
* Page migration was first developed in the context of the memory hotplug
* project. The main authors of the migration code are:
*
* IWAMOTO Toshihiro <iwamoto@valinux.co.jp>
* Hirokazu Takahashi <taka@valinux.co.jp>
* Dave Hansen <haveblue@us.ibm.com>
* Christoph Lameter
*/
#include <linux/migrate.h>
#include <linux/export.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
#include <linux/nsproxy.h>
#include <linux/pagevec.h>
#include <linux/ksm.h>
#include <linux/rmap.h>
#include <linux/topology.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
#include <linux/writeback.h>
#include <linux/mempolicy.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
#include <linux/compaction.h>
#include <linux/syscalls.h>
#include <linux/compat.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/pagewalk.h>
#include <linux/pfn_t.h>
#include <linux/memremap.h>
#include <linux/userfaultfd_k.h>
#include <linux/balloon_compaction.h>
#include <linux/mmu_notifier.h>
#include <linux/page_idle.h>
#include <linux/page_owner.h>
#include <linux/sched/mm.h>
#include <linux/ptrace.h>
#include <asm/tlbflush.h>
#define CREATE_TRACE_POINTS
#include <trace/events/migrate.h>
#include "internal.h"
/*
* migrate_prep() needs to be called before we start compiling a list of pages
* to be migrated using isolate_lru_page(). If scheduling work on other CPUs is
* undesirable, use migrate_prep_local()
*/
int migrate_prep(void)
{
/*
* Clear the LRU lists so pages can be isolated.
* Note that pages may be moved off the LRU after we have
* drained them. Those pages will fail to migrate like other
* pages that may be busy.
*/
lru_add_drain_all();
return 0;
}
/* Do the necessary work of migrate_prep but not if it involves other CPUs */
int migrate_prep_local(void)
{
lru_add_drain();
return 0;
}
int isolate_movable_page(struct page *page, isolate_mode_t mode)
{
struct address_space *mapping;
/*
* Avoid burning cycles with pages that are yet under __free_pages(),
* or just got freed under us.
*
* In case we 'win' a race for a movable page being freed under us and
* raise its refcount preventing __free_pages() from doing its job
* the put_page() at the end of this block will take care of
* release this page, thus avoiding a nasty leakage.
*/
if (unlikely(!get_page_unless_zero(page)))
goto out;
/*
* Check PageMovable before holding a PG_lock because page's owner
* assumes anybody doesn't touch PG_lock of newly allocated page
* so unconditionally grabbing the lock ruins page's owner side.
*/
if (unlikely(!__PageMovable(page)))
goto out_putpage;
/*
* As movable pages are not isolated from LRU lists, concurrent
* compaction threads can race against page migration functions
* as well as race against the releasing a page.
*
* In order to avoid having an already isolated movable page
* being (wrongly) re-isolated while it is under migration,
* or to avoid attempting to isolate pages being released,
* lets be sure we have the page lock
* before proceeding with the movable page isolation steps.
*/
if (unlikely(!trylock_page(page)))
goto out_putpage;
if (!PageMovable(page) || PageIsolated(page))
goto out_no_isolated;
mapping = page_mapping(page);
VM_BUG_ON_PAGE(!mapping, page);
if (!mapping->a_ops->isolate_page(page, mode))
goto out_no_isolated;
/* Driver shouldn't use PG_isolated bit of page->flags */
WARN_ON_ONCE(PageIsolated(page));
__SetPageIsolated(page);
unlock_page(page);
return 0;
out_no_isolated:
unlock_page(page);
out_putpage:
put_page(page);
out:
return -EBUSY;
}
/* It should be called on page which is PG_movable */
void putback_movable_page(struct page *page)
{
struct address_space *mapping;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageMovable(page), page);
VM_BUG_ON_PAGE(!PageIsolated(page), page);
mapping = page_mapping(page);
mapping->a_ops->putback_page(page);
__ClearPageIsolated(page);
}
/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
*
* This function shall be used whenever the isolated pageset has been
* built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
* and isolate_huge_page().
*/
void putback_movable_pages(struct list_head *l)
{
struct page *page;
struct page *page2;
list_for_each_entry_safe(page, page2, l, lru) {
if (unlikely(PageHuge(page))) {
putback_active_hugepage(page);
continue;
}
list_del(&page->lru);
/*
* We isolated non-lru movable page so here we can use
* __PageMovable because LRU page's mapping cannot have
* PAGE_MAPPING_MOVABLE.
*/
if (unlikely(__PageMovable(page))) {
VM_BUG_ON_PAGE(!PageIsolated(page), page);
lock_page(page);
if (PageMovable(page))
putback_movable_page(page);
else
__ClearPageIsolated(page);
unlock_page(page);
put_page(page);
} else {
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_cache(page), -hpage_nr_pages(page));
putback_lru_page(page);
}
}
}
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
{
struct page_vma_mapped_walk pvmw = {
.page = old,
.vma = vma,
.address = addr,
.flags = PVMW_SYNC | PVMW_MIGRATION,
};
struct page *new;
pte_t pte;
swp_entry_t entry;
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
if (PageKsm(page))
new = page;
else
new = page - pvmw.page->index +
linear_page_index(vma, pvmw.address);
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
/* PMD-mapped THP migration entry */
if (!pvmw.pte) {
VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page);
remove_migration_pmd(&pvmw, new);
continue;
}
#endif
get_page(new);
pte = pte_mkold(mk_pte(new, READ_ONCE(vma->vm_page_prot)));
if (pte_swp_soft_dirty(*pvmw.pte))
pte = pte_mksoft_dirty(pte);
/*
* Recheck VMA as permissions can change since migration started
*/
entry = pte_to_swp_entry(*pvmw.pte);
if (is_write_migration_entry(entry))
pte = maybe_mkwrite(pte, vma->vm_flags);
if (unlikely(is_zone_device_page(new))) {
if (is_device_private_page(new)) {
entry = make_device_private_entry(new, pte_write(pte));
pte = swp_entry_to_pte(entry);
}
}
#ifdef CONFIG_HUGETLB_PAGE
if (PageHuge(new)) {
pte = pte_mkhuge(pte);
pte = arch_make_huge_pte(pte, vma, new, 0);
set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
hugepage_add_anon_rmap(new, vma, pvmw.address);
else
page_dup_rmap(new, true);
} else
#endif
{
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
if (PageAnon(new))
page_add_anon_rmap(new, vma, pvmw.address, false);
else
page_add_file_rmap(new, false);
}
if (vma->vm_flags & VM_LOCKED && !PageTransCompound(new))
mlock_vma_page(new);
if (PageTransHuge(page) && PageMlocked(page))
clear_page_mlock(page);
/* No need to invalidate - it was non-present before */
update_mmu_cache(vma, pvmw.address, pvmw.pte);
}
return true;
}
/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
void remove_migration_ptes(struct page *old, struct page *new, bool locked)
{
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
};
if (locked)
rmap_walk_locked(new, &rwc);
else
rmap_walk(new, &rwc);
}
/*
* Something used the pte of a page under migration. We need to
* get to the page and wait until migration is finished.
* When we return from this function the fault will be retried.
*/
void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
spinlock_t *ptl)
{
pte_t pte;
swp_entry_t entry;
struct page *page;
spin_lock(ptl);
pte = *ptep;
if (!is_swap_pte(pte))
goto out;
entry = pte_to_swp_entry(pte);
if (!is_migration_entry(entry))
goto out;
page = migration_entry_to_page(entry);
page = compound_head(page);
/*
* Once page cache replacement of page migration started, page_count
* is zero; but we must not call put_and_wait_on_page_locked() without
* a ref. Use get_page_unless_zero(), and just fault again if it fails.
*/
if (!get_page_unless_zero(page))
goto out;
pte_unmap_unlock(ptep, ptl);
put_and_wait_on_page_locked(page);
return;
out:
pte_unmap_unlock(ptep, ptl);
}
void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{
spinlock_t *ptl = pte_lockptr(mm, pmd);
pte_t *ptep = pte_offset_map(pmd, address);
__migration_entry_wait(mm, ptep, ptl);
}
void migration_entry_wait_huge(struct vm_area_struct *vma,
struct mm_struct *mm, pte_t *pte)
{
spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte);
__migration_entry_wait(mm, pte, ptl);
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
{
spinlock_t *ptl;
struct page *page;
ptl = pmd_lock(mm, pmd);
if (!is_pmd_migration_entry(*pmd))
goto unlock;
page = migration_entry_to_page(pmd_to_swp_entry(*pmd));
if (!get_page_unless_zero(page))
goto unlock;
spin_unlock(ptl);
put_and_wait_on_page_locked(page);
return;
unlock:
spin_unlock(ptl);
}
#endif
static int expected_page_refs(struct address_space *mapping, struct page *page)
{
int expected_count = 1;
/*
* Device public or private pages have an extra refcount as they are
* ZONE_DEVICE pages.
*/
expected_count += is_device_private_page(page);
if (mapping)
expected_count += hpage_nr_pages(page) + page_has_private(page);
return expected_count;
}
/*
* Replace the page in the mapping.
*
* The number of remaining references must be:
* 1 for anonymous pages without a mapping
* 2 for pages with a mapping
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
*/
int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page, int extra_count)
{
XA_STATE(xas, &mapping->i_pages, page_index(page));
struct zone *oldzone, *newzone;
int dirty;
int expected_count = expected_page_refs(mapping, page) + extra_count;
if (!mapping) {
/* Anonymous page without mapping */
if (page_count(page) != expected_count)
return -EAGAIN;
/* No turning back from here */
newpage->index = page->index;
newpage->mapping = page->mapping;
if (PageSwapBacked(page))
__SetPageSwapBacked(newpage);
return MIGRATEPAGE_SUCCESS;
}
oldzone = page_zone(page);
newzone = page_zone(newpage);
xas_lock_irq(&xas);
if (page_count(page) != expected_count || xas_load(&xas) != page) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
if (!page_ref_freeze(page, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
/*
* Now we know that no one else is looking at the page:
* no turning back from here.
*/
newpage->index = page->index;
newpage->mapping = page->mapping;
page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
if (PageSwapBacked(page)) {
__SetPageSwapBacked(newpage);
if (PageSwapCache(page)) {
int i;
SetPageSwapCache(newpage);
for (i = 0; i < (1 << compound_order(page)); i++)
set_page_private(newpage + i,
page_private(page + i));
}
} else {
VM_BUG_ON_PAGE(PageSwapCache(page), page);
}
/* Move dirty while page refs frozen and newpage not yet exposed */
dirty = PageDirty(page);
if (dirty) {
ClearPageDirty(page);
SetPageDirty(newpage);
}
xas_store(&xas, newpage);
if (PageTransHuge(page)) {
int i;
for (i = 1; i < HPAGE_PMD_NR; i++) {
xas_next(&xas);
xas_store(&xas, newpage);
}
}
/*
* Drop cache reference from old page by unfreezing
* to one less reference.
* We know this isn't the last reference.
*/
page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
xas_unlock(&xas);
/* Leave irq disabled to prevent preemption while updating stats */
/*
* If moved to a different zone then also account
* the page for that zone. Other VM counters will be
* taken care of when we establish references to the
* new page and drop references to the old page.
*
* Note that anonymous pages are accounted for
* via NR_FILE_PAGES and NR_ANON_MAPPED if they
* are mapped to swap space.
*/
if (newzone != oldzone) {
__dec_node_state(oldzone->zone_pgdat, NR_FILE_PAGES);
__inc_node_state(newzone->zone_pgdat, NR_FILE_PAGES);
if (PageSwapBacked(page) && !PageSwapCache(page)) {
__dec_node_state(oldzone->zone_pgdat, NR_SHMEM);
__inc_node_state(newzone->zone_pgdat, NR_SHMEM);
}
if (dirty && mapping_cap_account_dirty(mapping)) {
__dec_node_state(oldzone->zone_pgdat, NR_FILE_DIRTY);
__dec_zone_state(oldzone, NR_ZONE_WRITE_PENDING);
__inc_node_state(newzone->zone_pgdat, NR_FILE_DIRTY);
__inc_zone_state(newzone, NR_ZONE_WRITE_PENDING);
}
}
local_irq_enable();
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL(migrate_page_move_mapping);
/*
* The expected number of remaining references is the same as that
* of migrate_page_move_mapping().
*/
int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page)
{
XA_STATE(xas, &mapping->i_pages, page_index(page));
int expected_count;
xas_lock_irq(&xas);
expected_count = 2 + page_has_private(page);
if (page_count(page) != expected_count || xas_load(&xas) != page) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
if (!page_ref_freeze(page, expected_count)) {
xas_unlock_irq(&xas);
return -EAGAIN;
}
newpage->index = page->index;
newpage->mapping = page->mapping;
get_page(newpage);
xas_store(&xas, newpage);
page_ref_unfreeze(page, expected_count - 1);
xas_unlock_irq(&xas);
return MIGRATEPAGE_SUCCESS;
}
/*
* Gigantic pages are so large that we do not guarantee that page++ pointer
* arithmetic will work across the entire page. We need something more
* specialized.
*/
static void __copy_gigantic_page(struct page *dst, struct page *src,
int nr_pages)
{
int i;
struct page *dst_base = dst;
struct page *src_base = src;
for (i = 0; i < nr_pages; ) {
cond_resched();
copy_highpage(dst, src);
i++;
dst = mem_map_next(dst, dst_base, i);
src = mem_map_next(src, src_base, i);
}
}
static void copy_huge_page(struct page *dst, struct page *src)
{
int i;
int nr_pages;
if (PageHuge(src)) {
/* hugetlbfs page */
struct hstate *h = page_hstate(src);
nr_pages = pages_per_huge_page(h);
if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) {
__copy_gigantic_page(dst, src, nr_pages);
return;
}
} else {
/* thp page */
BUG_ON(!PageTransHuge(src));
nr_pages = hpage_nr_pages(src);
}
for (i = 0; i < nr_pages; i++) {
cond_resched();
copy_highpage(dst + i, src + i);
}
}
/*
* Copy the page to its new location
*/
void migrate_page_states(struct page *newpage, struct page *page)
{
int cpupid;
if (PageError(page))
SetPageError(newpage);
if (PageReferenced(page))
SetPageReferenced(newpage);
if (PageUptodate(page))
SetPageUptodate(newpage);
if (TestClearPageActive(page)) {
VM_BUG_ON_PAGE(PageUnevictable(page), page);
SetPageActive(newpage);
} else if (TestClearPageUnevictable(page))
SetPageUnevictable(newpage);
if (PageWorkingset(page))
SetPageWorkingset(newpage);
if (PageChecked(page))
SetPageChecked(newpage);
if (PageMappedToDisk(page))
SetPageMappedToDisk(newpage);
/* Move dirty on pages not done by migrate_page_move_mapping() */
if (PageDirty(page))
SetPageDirty(newpage);
if (page_is_young(page))
set_page_young(newpage);
if (page_is_idle(page))
set_page_idle(newpage);
/*
* Copy NUMA information to the new page, to prevent over-eager
* future migrations of this same page.
*/
cpupid = page_cpupid_xchg_last(page, -1);
page_cpupid_xchg_last(newpage, cpupid);
ksm_migrate_page(newpage, page);
/*
* Please do not reorder this without considering how mm/ksm.c's
* get_ksm_page() depends upon ksm_migrate_page() and PageSwapCache().
*/
if (PageSwapCache(page))
ClearPageSwapCache(page);
ClearPagePrivate(page);
set_page_private(page, 0);
/*
* If any waiters have accumulated on the new page then
* wake them up.
*/
if (PageWriteback(newpage))
end_page_writeback(newpage);
copy_page_owner(page, newpage);
mem_cgroup_migrate(page, newpage);
}
EXPORT_SYMBOL(migrate_page_states);
void migrate_page_copy(struct page *newpage, struct page *page)
{
if (PageHuge(page) || PageTransHuge(page))
copy_huge_page(newpage, page);
else
copy_highpage(newpage, page);
migrate_page_states(newpage, page);
}
EXPORT_SYMBOL(migrate_page_copy);
/************************************************************
* Migration functions
***********************************************************/
/*
* Common logic to directly migrate a single LRU page suitable for
* pages that do not use PagePrivate/PagePrivate2.
*
* Pages are locked upon entry and exit.
*/
int migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page,
enum migrate_mode mode)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
migrate_page_states(newpage, page);
return MIGRATEPAGE_SUCCESS;
}
EXPORT_SYMBOL(migrate_page);
#ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */
static bool buffer_migrate_lock_buffers(struct buffer_head *head,
enum migrate_mode mode)
{
struct buffer_head *bh = head;
/* Simple case, sync compaction */
if (mode != MIGRATE_ASYNC) {
do {
lock_buffer(bh);
bh = bh->b_this_page;
} while (bh != head);
return true;
}
/* async case, we cannot block on lock_buffer so use trylock_buffer */
do {
if (!trylock_buffer(bh)) {
/*
* We failed to lock the buffer and cannot stall in
* async migration. Release the taken locks
*/
struct buffer_head *failed_bh = bh;
bh = head;
while (bh != failed_bh) {
unlock_buffer(bh);
bh = bh->b_this_page;
}
return false;
}
bh = bh->b_this_page;
} while (bh != head);
return true;
}
static int __buffer_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode,
bool check_refs)
{
struct buffer_head *bh, *head;
int rc;
int expected_count;
if (!page_has_buffers(page))
return migrate_page(mapping, newpage, page, mode);
/* Check whether page does not have extra refs before we do more work */
expected_count = expected_page_refs(mapping, page);
if (page_count(page) != expected_count)
return -EAGAIN;
head = page_buffers(page);
if (!buffer_migrate_lock_buffers(head, mode))
return -EAGAIN;
if (check_refs) {
bool busy;
bool invalidated = false;
recheck_buffers:
busy = false;
spin_lock(&mapping->private_lock);
bh = head;
do {
if (atomic_read(&bh->b_count)) {
busy = true;
break;
}
bh = bh->b_this_page;
} while (bh != head);
if (busy) {
if (invalidated) {
rc = -EAGAIN;
goto unlock_buffers;
}
spin_unlock(&mapping->private_lock);
invalidate_bh_lrus();
invalidated = true;
goto recheck_buffers;
}
}
rc = migrate_page_move_mapping(mapping, newpage, page, 0);
if (rc != MIGRATEPAGE_SUCCESS)
goto unlock_buffers;
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
put_page(page);
get_page(newpage);
bh = head;
do {
set_bh_page(bh, newpage, bh_offset(bh));
bh = bh->b_this_page;
} while (bh != head);
SetPagePrivate(newpage);
if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page);
else
migrate_page_states(newpage, page);
rc = MIGRATEPAGE_SUCCESS;
unlock_buffers:
if (check_refs)
spin_unlock(&mapping->private_lock);
bh = head;
do {
unlock_buffer(bh);
bh = bh->b_this_page;
} while (bh != head);
return rc;
}
/*
* Migration function for pages with buffers. This function can only be used
* if the underlying filesystem guarantees that no other references to "page"
* exist. For example attached buffer heads are accessed only under page lock.
*/
int buffer_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
return __buffer_migrate_page(mapping, newpage, page, mode, false);
}
EXPORT_SYMBOL(buffer_migrate_page);
/*
* Same as above except that this variant is more careful and checks that there
* are also no buffer head references. This function is the right one for
* mappings where buffer heads are directly looked up and referenced (such as
* block device mappings).
*/
int buffer_migrate_page_norefs(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
return __buffer_migrate_page(mapping, newpage, page, mode, true);
}
#endif
/*
* Writeback a page to clean the dirty state
*/
static int writeout(struct address_space *mapping, struct page *page)
{
struct writeback_control wbc = {
.sync_mode = WB_SYNC_NONE,
.nr_to_write = 1,
.range_start = 0,
.range_end = LLONG_MAX,
.for_reclaim = 1
};
int rc;
if (!mapping->a_ops->writepage)
/* No write method for the address space */
return -EINVAL;
if (!clear_page_dirty_for_io(page))
/* Someone else already triggered a write */
return -EAGAIN;
/*
* A dirty page may imply that the underlying filesystem has
* the page on some queue. So the page must be clean for
* migration. Writeout may mean we loose the lock and the
* page state is no longer what we checked for earlier.
* At this point we know that the migration attempt cannot
* be successful.
*/
remove_migration_ptes(page, page, false);
rc = mapping->a_ops->writepage(page, &wbc);
if (rc != AOP_WRITEPAGE_ACTIVATE)
/* unlocked. Relock */
lock_page(page);
return (rc < 0) ? -EIO : -EAGAIN;
}
/*
* Default handling if a filesystem does not provide a migration function.
*/
static int fallback_migrate_page(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode)
{
if (PageDirty(page)) {
/* Only writeback pages in full synchronous migration */
switch (mode) {
case MIGRATE_SYNC:
case MIGRATE_SYNC_NO_COPY:
break;
default:
return -EBUSY;
}
return writeout(mapping, page);
}
/*
* Buffers may be managed in a filesystem specific way.
* We must have no buffers or drop them.
*/
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
return migrate_page(mapping, newpage, page, mode);
}
/*
* Move a page to a newly allocated page
* The page is locked and all ptes have been successfully removed.
*
* The new page will have replaced the old page if this function
* is successful.
*
* Return value:
* < 0 - error code
* MIGRATEPAGE_SUCCESS - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct address_space *mapping;
int rc = -EAGAIN;
bool is_lru = !__PageMovable(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
mapping = page_mapping(page);
if (likely(is_lru)) {
if (!mapping)
rc = migrate_page(mapping, newpage, page, mode);
else if (mapping->a_ops->migratepage)
/*
* Most pages have a mapping and most filesystems
* provide a migratepage callback. Anonymous pages
* are part of swap space which also has its own
* migratepage callback. This is the most common path
* for page migration.
*/
rc = mapping->a_ops->migratepage(mapping, newpage,
page, mode);
else
rc = fallback_migrate_page(mapping, newpage,
page, mode);
} else {
/*
* In case of non-lru page, it could be released after
* isolation step. In that case, we shouldn't try migration.
*/
VM_BUG_ON_PAGE(!PageIsolated(page), page);
if (!PageMovable(page)) {
rc = MIGRATEPAGE_SUCCESS;
__ClearPageIsolated(page);
goto out;
}
rc = mapping->a_ops->migratepage(mapping, newpage,
page, mode);
WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
!PageIsolated(page));
}
/*
* When successful, old pagecache page->mapping must be cleared before
* page is freed; but stats require that PageAnon be left as PageAnon.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
if (__PageMovable(page)) {
VM_BUG_ON_PAGE(!PageIsolated(page), page);
/*
* We clear PG_movable under page_lock so any compactor
* cannot try to migrate this page.
*/
__ClearPageIsolated(page);
}
/*
* Anonymous and movable page->mapping will be cleard by
* free_pages_prepare so don't reset it here for keeping
* the type to work PageAnon, for example.
*/
if (!PageMappingFlags(page))
page->mapping = NULL;
if (likely(!is_zone_device_page(newpage))) {
int i, nr = compound_nr(newpage);
for (i = 0; i < nr; i++)
flush_dcache_page(newpage + i);
}
}
out:
return rc;
}
static int __unmap_and_move(struct page *page, struct page *newpage,
int force, enum migrate_mode mode)
{
int rc = -EAGAIN;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(page);
if (!trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
goto out;
/*
* It's not safe for direct compaction to call lock_page.
* For example, during page readahead pages are added locked
* to the LRU. Later, when the IO completes the pages are
* marked uptodate and unlocked. However, the queueing
* could be merging multiple pages for one bio (e.g.
* mpage_readpages). If an allocation happens for the
* second or third page, the process can end up locking
* the same page twice and deadlocking. Rather than
* trying to be clever about what pages can be locked,
* avoid the use of lock_page for direct compaction
* altogether.
*/
if (current->flags & PF_MEMALLOC)
goto out;
lock_page(page);
}
if (PageWriteback(page)) {
/*
* Only in the case of a full synchronous migration is it
* necessary to wait for PageWriteback. In the async case,
* the retry loop is too short and in the sync-light case,
* the overhead of stalling is too much
*/
switch (mode) {
case MIGRATE_SYNC:
case MIGRATE_SYNC_NO_COPY:
break;
default:
rc = -EBUSY;
goto out_unlock;
}
if (!force)
goto out_unlock;
wait_on_page_writeback(page);
}
/*
* By try_to_unmap(), page->mapcount goes down to 0 here. In this case,
* we cannot notice that anon_vma is freed while we migrates a page.
* This get_anon_vma() delays freeing anon_vma pointer until the end
* of migration. File cache pages are no problem because of page_lock()
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*
* Only page_get_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
* But if we cannot get anon_vma, then we won't need it anyway,
* because that implies that the anon page is no longer mapped
* (and cannot be remapped so long as we hold the page lock).
*/
if (PageAnon(page) && !PageKsm(page))
anon_vma = page_get_anon_vma(page);
/*
* Block others from accessing the new page when we get around to
* establishing additional references. We are usually the only one
* holding a reference to newpage at this point. We used to have a BUG
* here if trylock_page(newpage) fails, but would like to allow for
* cases where there might be a race with the previous use of newpage.
* This is much like races on refcount of oldpage: just don't BUG().
*/
if (unlikely(!trylock_page(newpage)))
goto out_unlock;
if (unlikely(!is_lru)) {
rc = move_to_new_page(newpage, page, mode);
goto out_unlock_both;
}
/*
* Corner case handling:
* 1. When a new swap-cache page is read into, it is added to the LRU
* and treated as swapcache but it has no rmap yet.
* Calling try_to_unmap() against a page->mapping==NULL page will
* trigger a BUG. So handle it here.
* 2. An orphaned page (see truncate_complete_page) might have
* fs-private metadata. The page can be picked up due to memory
* offlining. Everywhere else except page reclaim, the page is
* invisible to the vm, so the page can not be migrated. So try to
* free the metadata, so the page can be freed.
*/
if (!page->mapping) {
VM_BUG_ON_PAGE(PageAnon(page), page);
if (page_has_private(page)) {
try_to_free_buffers(page);
goto out_unlock_both;
}
} else if (page_mapped(page)) {
/* Establish migration ptes */
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
if (!page_mapped(page))
rc = move_to_new_page(newpage, page, mode);
if (page_was_mapped)
remove_migration_ptes(page,
rc == MIGRATEPAGE_SUCCESS ? newpage : page, false);
out_unlock_both:
unlock_page(newpage);
out_unlock:
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
unlock_page(page);
out:
/*
* If migration is successful, decrease refcount of the newpage
* which will not free the page because new page owner increased
* refcounter. As well, if it is LRU page, add the page to LRU
* list in here. Use the old state of the isolated source page to
* determine if we migrated a LRU page. newpage was already unlocked
* and possibly modified by its owner - don't rely on the page
* state.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
if (unlikely(!is_lru))
put_page(newpage);
else
putback_lru_page(newpage);
}
return rc;
}
/*
* gcc 4.7 and 4.8 on arm get an ICEs when inlining unmap_and_move(). Work
* around it.
*/
#if defined(CONFIG_ARM) && \
defined(GCC_VERSION) && GCC_VERSION < 40900 && GCC_VERSION >= 40700
#define ICE_noinline noinline
#else
#define ICE_noinline
#endif
/*
* Obtain the lock on page, remove all ptes and migrate the page
* to the newly allocated page in newpage.
*/
static ICE_noinline int unmap_and_move(new_page_t get_new_page,
free_page_t put_new_page,
unsigned long private, struct page *page,
int force, enum migrate_mode mode,
enum migrate_reason reason)
{
int rc = MIGRATEPAGE_SUCCESS;
struct page *newpage;
if (!thp_migration_supported() && PageTransHuge(page))
return -ENOMEM;
newpage = get_new_page(page, private);
if (!newpage)
return -ENOMEM;
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
ClearPageActive(page);
ClearPageUnevictable(page);
if (unlikely(__PageMovable(page))) {
lock_page(page);
if (!PageMovable(page))
__ClearPageIsolated(page);
unlock_page(page);
}
if (put_new_page)
put_new_page(newpage, private);
else
put_page(newpage);
goto out;
}
rc = __unmap_and_move(page, newpage, force, mode);
if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(newpage, reason);
out:
if (rc != -EAGAIN) {
/*
* A page that has been migrated has all references
* removed and will be freed. A page that has not been
* migrated will have kepts its references and be
* restored.
*/
list_del(&page->lru);
/*
* Compaction can migrate also non-LRU pages which are
* not accounted to NR_ISOLATED_*. They can be recognized
* as __PageMovable
*/
if (likely(!__PageMovable(page)))
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON +
page_is_file_cache(page), -hpage_nr_pages(page));
}
/*
* If migration is successful, releases reference grabbed during
* isolation. Otherwise, restore the page to right list unless
* we want to retry.
*/
if (rc == MIGRATEPAGE_SUCCESS) {
put_page(page);
if (reason == MR_MEMORY_FAILURE) {
/*
* Set PG_HWPoison on just freed page
* intentionally. Although it's rather weird,
* it's how HWPoison flag works at the moment.
*/
if (set_hwpoison_free_buddy_page(page))
num_poisoned_pages_inc();
}
} else {
if (rc != -EAGAIN) {
if (likely(!__PageMovable(page))) {
putback_lru_page(page);
goto put_new;
}
lock_page(page);
if (PageMovable(page))
putback_movable_page(page);
else
__ClearPageIsolated(page);
unlock_page(page);
put_page(page);
}
put_new:
if (put_new_page)
put_new_page(newpage, private);
else
put_page(newpage);
}
return rc;
}
/*
* Counterpart of unmap_and_move_page() for hugepage migration.
*
* This function doesn't wait the completion of hugepage I/O
* because there is no race between I/O and migration for hugepage.
* Note that currently hugepage I/O occurs only in direct I/O
* where no lock is held and PG_writeback is irrelevant,
* and writeback status of all subpages are counted in the reference
* count of the head page (i.e. if all subpages of a 2MB hugepage are
* under direct I/O, the reference of the head page is 512 and a bit more.)
* This means that when we try to migrate hugepage whose subpages are
* doing direct I/O, some references remain after try_to_unmap() and
* hugepage migration fails without data corruption.
*
* There is also no race when direct I/O is issued on the page under migration,
* because then pte is replaced with migration swap entry and direct I/O code
* will wait in the page fault for migration to complete.
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
struct page *hpage, int force,
enum migrate_mode mode, int reason)
{
int rc = -EAGAIN;
int page_was_mapped = 0;
struct page *new_hpage;
struct anon_vma *anon_vma = NULL;
/*
* Migratability of hugepages depends on architectures and their size.
* This check is necessary because some callers of hugepage migration
* like soft offline and memory hotremove don't walk through page
* tables or check whether the hugepage is pmd-based or not before
* kicking migration.
*/
if (!hugepage_migration_supported(page_hstate(hpage))) {
putback_active_hugepage(hpage);
return -ENOSYS;
}
new_hpage = get_new_page(hpage, private);
if (!new_hpage)
return -ENOMEM;
if (!trylock_page(hpage)) {
if (!force)
goto out;
switch (mode) {
case MIGRATE_SYNC:
case MIGRATE_SYNC_NO_COPY:
break;
default:
goto out;
}
lock_page(hpage);
}
/*
* Check for pages which are in the process of being freed. Without
* page_mapping() set, hugetlbfs specific move page routine will not
* be called and we could leak usage counts for subpools.
*/
if (page_private(hpage) && !page_mapping(hpage)) {
rc = -EBUSY;
goto out_unlock;
}
if (PageAnon(hpage))
anon_vma = page_get_anon_vma(hpage);
if (unlikely(!trylock_page(new_hpage)))
goto put_anon;
if (page_mapped(hpage)) {
try_to_unmap(hpage,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
if (!page_mapped(hpage))
rc = move_to_new_page(new_hpage, hpage, mode);
if (page_was_mapped)
remove_migration_ptes(hpage,
rc == MIGRATEPAGE_SUCCESS ? new_hpage : hpage, false);
unlock_page(new_hpage);
put_anon:
if (anon_vma)
put_anon_vma(anon_vma);
if (rc == MIGRATEPAGE_SUCCESS) {
move_hugetlb_state(hpage, new_hpage, reason);
put_new_page = NULL;
}
out_unlock:
unlock_page(hpage);
out:
if (rc != -EAGAIN)
putback_active_hugepage(hpage);
/*
* If migration was not successful and there's a freeing callback, use
* it. Otherwise, put_page() will drop the reference grabbed during
* isolation.
*/
if (put_new_page)
put_new_page(new_hpage, private);
else
putback_active_hugepage(new_hpage);
return rc;
}
/*
* migrate_pages - migrate the pages specified in a list, to the free pages
* supplied as the target for the page migration
*
* @from: The list of pages to be migrated.
* @get_new_page: The function used to allocate free pages to be used
* as the target of the page migration.
* @put_new_page: The function used to free target pages if migration
* fails, or NULL if no special handling is necessary.
* @private: Private data to be passed on to get_new_page()
* @mode: The migration mode that specifies the constraints for
* page migration, if any.
* @reason: The reason for page migration.
*
* The function returns after 10 attempts or if no pages are movable any more
* because the list has become empty or no retryable pages exist any more.
* The caller should call putback_movable_pages() to return pages to the LRU
* or free list only if ret != 0.
*
* Returns the number of pages that were not migrated, or an error code.
*/
int migrate_pages(struct list_head *from, new_page_t get_new_page,
free_page_t put_new_page, unsigned long private,
enum migrate_mode mode, int reason)
{
int retry = 1;
int nr_failed = 0;
int nr_succeeded = 0;
int pass = 0;
struct page *page;
struct page *page2;
int swapwrite = current->flags & PF_SWAPWRITE;
int rc;
trace_mm_migrate_pages_start(mode, reason);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;
for(pass = 0; pass < 10 && retry; pass++) {
retry = 0;
list_for_each_entry_safe(page, page2, from, lru) {
retry:
cond_resched();
if (PageHuge(page))
rc = unmap_and_move_huge_page(get_new_page,
put_new_page, private, page,
pass > 2, mode, reason);
else
rc = unmap_and_move(get_new_page, put_new_page,
private, page, pass > 2, mode,
reason);
switch(rc) {
case -ENOMEM:
/*
* THP migration might be unsupported or the
* allocation could've failed so we should
* retry on the same page with the THP split
* to base pages.
*
* Head page is retried immediately and tail
* pages are added to the tail of the list so
* we encounter them after the rest of the list
* is processed.
*/
if (PageTransHuge(page) && !PageHuge(page)) {
lock_page(page);
rc = split_huge_page_to_list(page, from);
unlock_page(page);
if (!rc) {
list_safe_reset_next(page, page2, lru);
goto retry;
}
}
nr_failed++;
goto out;
case -EAGAIN:
retry++;
break;
case MIGRATEPAGE_SUCCESS:
nr_succeeded++;
break;
default:
/*
* Permanent failure (-EBUSY, -ENOSYS, etc.):
* unlike -EAGAIN case, the failed page is
* removed from migration page list and not
* retried in the next outer loop.
*/
nr_failed++;
break;
}
}
}
nr_failed += retry;
rc = nr_failed;
out:
if (nr_succeeded)
count_vm_events(PGMIGRATE_SUCCESS, nr_succeeded);
if (nr_failed)
count_vm_events(PGMIGRATE_FAIL, nr_failed);
trace_mm_migrate_pages(nr_succeeded, nr_failed, mode, reason);
if (!swapwrite)
current->flags &= ~PF_SWAPWRITE;
return rc;
}
#ifdef CONFIG_NUMA
static int store_status(int __user *status, int start, int value, int nr)
{
while (nr-- > 0) {
if (put_user(value, status + start))
return -EFAULT;
start++;
}
return 0;
}
static int do_move_pages_to_node(struct mm_struct *mm,
struct list_head *pagelist, int node)
{
int err;
if (list_empty(pagelist))
return 0;
err = migrate_pages(pagelist, alloc_new_node_page, NULL, node,
MIGRATE_SYNC, MR_SYSCALL);
if (err)
putback_movable_pages(pagelist);
return err;
}
/*
* Resolves the given address to a struct page, isolates it from the LRU and
* puts it to the given pagelist.
* Returns:
* errno - if the page cannot be found/isolated
* 0 - when it doesn't have to be migrated because it is already on the
* target node
* 1 - when it has been queued
*/
static int add_page_for_migration(struct mm_struct *mm, unsigned long addr,
int node, struct list_head *pagelist, bool migrate_all)
{
struct vm_area_struct *vma;
struct page *page;
unsigned int follflags;
int err;
down_read(&mm->mmap_sem);
err = -EFAULT;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start || !vma_migratable(vma))
goto out;
/* FOLL_DUMP to ignore special (like zero) pages */
follflags = FOLL_GET | FOLL_DUMP;
page = follow_page(vma, addr, follflags);
err = PTR_ERR(page);
if (IS_ERR(page))
goto out;
err = -ENOENT;
if (!page)
goto out;
err = 0;
if (page_to_nid(page) == node)
goto out_putpage;
err = -EACCES;
if (page_mapcount(page) > 1 && !migrate_all)
goto out_putpage;
if (PageHuge(page)) {
if (PageHead(page)) {
isolate_huge_page(page, pagelist);
err = 1;
}
} else {
struct page *head;
head = compound_head(page);
err = isolate_lru_page(head);
if (err)
goto out_putpage;
err = 1;
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_cache(head),
hpage_nr_pages(head));
}
out_putpage:
/*
* Either remove the duplicate refcount from
* isolate_lru_page() or drop the page ref if it was
* not isolated.
*/
put_page(page);
out:
up_read(&mm->mmap_sem);
return err;
}
/*
* Migrate an array of page address onto an array of nodes and fill
* the corresponding array of status.
*/
static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes,
unsigned long nr_pages,
const void __user * __user *pages,
const int __user *nodes,
int __user *status, int flags)
{
int current_node = NUMA_NO_NODE;
LIST_HEAD(pagelist);
int start, i;
int err = 0, err1;
migrate_prep();
for (i = start = 0; i < nr_pages; i++) {
const void __user *p;
unsigned long addr;
int node;
err = -EFAULT;
if (get_user(p, pages + i))
goto out_flush;
if (get_user(node, nodes + i))
goto out_flush;
addr = (unsigned long)untagged_addr(p);
err = -ENODEV;
if (node < 0 || node >= MAX_NUMNODES)
goto out_flush;
if (!node_state(node, N_MEMORY))
goto out_flush;
err = -EACCES;
if (!node_isset(node, task_nodes))
goto out_flush;
if (current_node == NUMA_NO_NODE) {
current_node = node;
start = i;
} else if (node != current_node) {
err = do_move_pages_to_node(mm, &pagelist, current_node);
if (err) {
/*
* Positive err means the number of failed
* pages to migrate. Since we are going to
* abort and return the number of non-migrated
* pages, so need to incude the rest of the
* nr_pages that have not been attempted as
* well.
*/
if (err > 0)
err += nr_pages - i - 1;
goto out;
}
err = store_status(status, start, current_node, i - start);
if (err)
goto out;
start = i;
current_node = node;
}
/*
* Errors in the page lookup or isolation are not fatal and we simply
* report them via status
*/
err = add_page_for_migration(mm, addr, current_node,
&pagelist, flags & MPOL_MF_MOVE_ALL);
if (!err) {
/* The page is already on the target node */
err = store_status(status, i, current_node, 1);
if (err)
goto out_flush;
continue;
} else if (err > 0) {
/* The page is successfully queued for migration */
continue;
}
err = store_status(status, i, err, 1);
if (err)
goto out_flush;
err = do_move_pages_to_node(mm, &pagelist, current_node);
if (err) {
if (err > 0)
err += nr_pages - i - 1;
goto out;
}
if (i > start) {
err = store_status(status, start, current_node, i - start);
if (err)
goto out;
}
current_node = NUMA_NO_NODE;
}
out_flush:
if (list_empty(&pagelist))
return err;
/* Make sure we do not overwrite the existing error */
err1 = do_move_pages_to_node(mm, &pagelist, current_node);
/*
* Don't have to report non-attempted pages here since:
* - If the above loop is done gracefully all pages have been
* attempted.
* - If the above loop is aborted it means a fatal error
* happened, should return ret.
*/
if (!err1)
err1 = store_status(status, start, current_node, i - start);
if (err >= 0)
err = err1;
out:
return err;
}
/*
* Determine the nodes of an array of pages and store it in an array of status.
*/
static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
const void __user **pages, int *status)
{
unsigned long i;
down_read(&mm->mmap_sem);
for (i = 0; i < nr_pages; i++) {
unsigned long addr = (unsigned long)(*pages);
struct vm_area_struct *vma;
struct page *page;
int err = -EFAULT;
vma = find_vma(mm, addr);
if (!vma || addr < vma->vm_start)
goto set_status;
/* FOLL_DUMP to ignore special (like zero) pages */
page = follow_page(vma, addr, FOLL_DUMP);
err = PTR_ERR(page);
if (IS_ERR(page))
goto set_status;
err = page ? page_to_nid(page) : -ENOENT;
set_status:
*status = err;
pages++;
status++;
}
up_read(&mm->mmap_sem);
}
/*
* Determine the nodes of a user array of pages and store it in
* a user array of status.
*/
static int do_pages_stat(struct mm_struct *mm, unsigned long nr_pages,
const void __user * __user *pages,
int __user *status)
{
#define DO_PAGES_STAT_CHUNK_NR 16
const void __user *chunk_pages[DO_PAGES_STAT_CHUNK_NR];
int chunk_status[DO_PAGES_STAT_CHUNK_NR];
while (nr_pages) {
unsigned long chunk_nr;
chunk_nr = nr_pages;
if (chunk_nr > DO_PAGES_STAT_CHUNK_NR)
chunk_nr = DO_PAGES_STAT_CHUNK_NR;
if (copy_from_user(chunk_pages, pages, chunk_nr * sizeof(*chunk_pages)))
break;
do_pages_stat_array(mm, chunk_nr, chunk_pages, chunk_status);
if (copy_to_user(status, chunk_status, chunk_nr * sizeof(*status)))
break;
pages += chunk_nr;
status += chunk_nr;
nr_pages -= chunk_nr;
}
return nr_pages ? -EFAULT : 0;
}
/*
* Move a list of pages in the address space of the currently executing
* process.
*/
static int kernel_move_pages(pid_t pid, unsigned long nr_pages,
const void __user * __user *pages,
const int __user *nodes,
int __user *status, int flags)
{
struct task_struct *task;
struct mm_struct *mm;
int err;
nodemask_t task_nodes;
/* Check flags */
if (flags & ~(MPOL_MF_MOVE|MPOL_MF_MOVE_ALL))
return -EINVAL;
if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
return -EPERM;
/* Find the mm_struct */
rcu_read_lock();
task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
rcu_read_unlock();
return -ESRCH;
}
get_task_struct(task);
/*
* Check if this process has the right to modify the specified
* process. Use the regular "ptrace_may_access()" checks.
*/
if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS)) {
rcu_read_unlock();
err = -EPERM;
goto out;
}
rcu_read_unlock();
err = security_task_movememory(task);
if (err)
goto out;
task_nodes = cpuset_mems_allowed(task);
mm = get_task_mm(task);
put_task_struct(task);
if (!mm)
return -EINVAL;
if (nodes)
err = do_pages_move(mm, task_nodes, nr_pages, pages,
nodes, status, flags);
else
err = do_pages_stat(mm, nr_pages, pages, status);
mmput(mm);
return err;
out:
put_task_struct(task);
return err;
}
SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
const void __user * __user *, pages,
const int __user *, nodes,
int __user *, status, int, flags)
{
return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
}
#ifdef CONFIG_COMPAT
COMPAT_SYSCALL_DEFINE6(move_pages, pid_t, pid, compat_ulong_t, nr_pages,
compat_uptr_t __user *, pages32,
const int __user *, nodes,
int __user *, status,
int, flags)
{
const void __user * __user *pages;
int i;
pages = compat_alloc_user_space(nr_pages * sizeof(void *));
for (i = 0; i < nr_pages; i++) {
compat_uptr_t p;
if (get_user(p, pages32 + i) ||
put_user(compat_ptr(p), pages + i))
return -EFAULT;
}
return kernel_move_pages(pid, nr_pages, pages, nodes, status, flags);
}
#endif /* CONFIG_COMPAT */
#ifdef CONFIG_NUMA_BALANCING
/*
* Returns true if this is a safe migration target node for misplaced NUMA
* pages. Currently it only checks the watermarks which crude
*/
static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
unsigned long nr_migrate_pages)
{
int z;
for (z = pgdat->nr_zones - 1; z >= 0; z--) {
struct zone *zone = pgdat->node_zones + z;
if (!populated_zone(zone))
continue;
/* Avoid waking kswapd by allocating pages_to_migrate pages. */
if (!zone_watermark_ok(zone, 0,
high_wmark_pages(zone) +
nr_migrate_pages,
0, 0))
continue;
return true;
}
return false;
}
static struct page *alloc_misplaced_dst_page(struct page *page,
unsigned long data)
{
int nid = (int) data;
struct page *newpage;
newpage = __alloc_pages_node(nid,
(GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NORETRY | __GFP_NOWARN) &
~__GFP_RECLAIM, 0);
return newpage;
}
static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
{
int page_lru;
VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page);
/* Avoid migrating to a node that is nearly full */
if (!migrate_balanced_pgdat(pgdat, compound_nr(page)))
return 0;
if (isolate_lru_page(page))
return 0;
/*
* migrate_misplaced_transhuge_page() skips page migration's usual
* check on page_count(), so we must do it here, now that the page
* has been isolated: a GUP pin, or any other pin, prevents migration.
* The expected page count is 3: 1 for page's mapcount and 1 for the
* caller's pin and 1 for the reference taken by isolate_lru_page().
*/
if (PageTransHuge(page) && page_count(page) != 3) {
putback_lru_page(page);
return 0;
}
page_lru = page_is_file_cache(page);
mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru,
hpage_nr_pages(page));
/*
* Isolating the page has taken another reference, so the
* caller's reference can be safely dropped without the page
* disappearing underneath us during migration.
*/
put_page(page);
return 1;
}
bool pmd_trans_migrating(pmd_t pmd)
{
struct page *page = pmd_page(pmd);
return PageLocked(page);
}
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
* the page that will be dropped by this function before returning.
*/
int migrate_misplaced_page(struct page *page, struct vm_fault *vmf,
int node)
{
pg_data_t *pgdat = NODE_DATA(node);
int isolated;
int nr_remaining;
LIST_HEAD(migratepages);
/*
* Don't migrate file pages that are mapped in multiple processes
* with execute permissions as they are probably shared libraries.
*/
if (page_mapcount(page) != 1 && page_is_file_cache(page) &&
(vmf->vma_flags & VM_EXEC))
goto out;
/*
* Also do not migrate dirty pages as not all filesystems can move
* dirty pages in MIGRATE_ASYNC mode which is a waste of cycles.
*/
if (page_is_file_cache(page) && PageDirty(page))
goto out;
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated)
goto out;
list_add(&page->lru, &migratepages);
nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page,
NULL, node, MIGRATE_ASYNC,
MR_NUMA_MISPLACED);
if (nr_remaining) {
if (!list_empty(&migratepages)) {
list_del(&page->lru);
dec_node_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
putback_lru_page(page);
}
isolated = 0;
} else
count_vm_numa_event(NUMA_PAGE_MIGRATE);
BUG_ON(!list_empty(&migratepages));
return isolated;
out:
put_page(page);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#if defined(CONFIG_NUMA_BALANCING) && defined(CONFIG_TRANSPARENT_HUGEPAGE)
/*
* Migrates a THP to a given target node. page must be locked and is unlocked
* before returning.
*/
int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct vm_area_struct *vma,
pmd_t *pmd, pmd_t entry,
unsigned long address,
struct page *page, int node)
{
spinlock_t *ptl;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
int page_lru = page_is_file_cache(page);
unsigned long start = address & HPAGE_PMD_MASK;
new_page = alloc_pages_node(node,
(GFP_TRANSHUGE_LIGHT | __GFP_THISNODE),
HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
prep_transhuge_page(new_page);
isolated = numamigrate_isolate_page(pgdat, page);
if (!isolated) {
put_page(new_page);
goto out_fail;
}
/* Prepare a page as a migration target */
__SetPageLocked(new_page);
if (PageSwapBacked(page))
__SetPageSwapBacked(new_page);
/* anon mapping, we can simply copy page->mapping to the new page: */
new_page->mapping = page->mapping;
new_page->index = page->index;
/* flush the cache before copying using the kernel virtual address */
flush_cache_range(vma, start, start + HPAGE_PMD_SIZE);
migrate_page_copy(new_page, page);
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
ptl = pmd_lock(mm, pmd);
if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) {
spin_unlock(ptl);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
SetPageActive(page);
if (TestClearPageUnevictable(new_page))
SetPageUnevictable(page);
unlock_page(new_page);
put_page(new_page); /* Free it */
/* Retake the callers reference and putback on LRU */
get_page(page);
putback_lru_page(page);
mod_node_page_state(page_pgdat(page),
NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
goto out_unlock;
}
entry = mk_huge_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
/*
* Overwrite the old entry under pagetable lock and establish
* the new PTE. Any parallel GUP will either observe the old
* page blocking on the page lock, block on the page table
* lock or observe the new page. The SetPageUptodate on the
* new page and page_add_new_anon_rmap guarantee the copy is
* visible before the pagetable update.
*/
page_add_anon_rmap(new_page, vma, start, true);
/*
* At this point the pmd is numa/protnone (i.e. non present) and the TLB
* has already been flushed globally. So no TLB can be currently
* caching this non present pmd mapping. There's no need to clear the
* pmd before doing set_pmd_at(), nor to flush the TLB after
* set_pmd_at(). Clearing the pmd here would introduce a race
* condition against MADV_DONTNEED, because MADV_DONTNEED only holds the
* mmap_sem for reading. If the pmd is set to NULL at any given time,
* MADV_DONTNEED won't wait on the pmd lock and it'll skip clearing this
* pmd.
*/
set_pmd_at(mm, start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
page_ref_unfreeze(page, 2);
mlock_migrate_page(new_page, page);
page_remove_rmap(page, true);
set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
spin_unlock(ptl);
/* Take an "isolate" reference and put new page on the LRU. */
get_page(new_page);
putback_lru_page(new_page);
unlock_page(new_page);
unlock_page(page);
put_page(page); /* Drop the rmap reference */
put_page(page); /* Drop the LRU isolation reference */
count_vm_events(PGMIGRATE_SUCCESS, HPAGE_PMD_NR);
count_vm_numa_events(NUMA_PAGE_MIGRATE, HPAGE_PMD_NR);
mod_node_page_state(page_pgdat(page),
NR_ISOLATED_ANON + page_lru,
-HPAGE_PMD_NR);
return isolated;
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) {
entry = pmd_modify(entry, vma->vm_page_prot);
set_pmd_at(mm, start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry);
}
spin_unlock(ptl);
out_unlock:
unlock_page(page);
put_page(page);
return 0;
}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_NUMA */
#ifdef CONFIG_DEVICE_PRIVATE
static int migrate_vma_collect_hole(unsigned long start,
unsigned long end,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE;
migrate->dst[migrate->npages] = 0;
migrate->npages++;
migrate->cpages++;
}
return 0;
}
static int migrate_vma_collect_skip(unsigned long start,
unsigned long end,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
unsigned long addr;
for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) {
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = 0;
}
return 0;
}
static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long start,
unsigned long end,
struct mm_walk *walk)
{
struct migrate_vma *migrate = walk->private;
struct vm_area_struct *vma = walk->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = start, unmapped = 0;
spinlock_t *ptl;
pte_t *ptep;
again:
if (pmd_none(*pmdp))
return migrate_vma_collect_hole(start, end, walk);
if (pmd_trans_huge(*pmdp)) {
struct page *page;
ptl = pmd_lock(mm, pmdp);
if (unlikely(!pmd_trans_huge(*pmdp))) {
spin_unlock(ptl);
goto again;
}
page = pmd_page(*pmdp);
if (is_huge_zero_page(page)) {
spin_unlock(ptl);
split_huge_pmd(vma, pmdp, addr);
if (pmd_trans_unstable(pmdp))
return migrate_vma_collect_skip(start, end,
walk);
} else {
int ret;
get_page(page);
spin_unlock(ptl);
if (unlikely(!trylock_page(page)))
return migrate_vma_collect_skip(start, end,
walk);
ret = split_huge_page(page);
unlock_page(page);
put_page(page);
if (ret)
return migrate_vma_collect_skip(start, end,
walk);
if (pmd_none(*pmdp))
return migrate_vma_collect_hole(start, end,
walk);
}
}
if (unlikely(pmd_bad(*pmdp)))
return migrate_vma_collect_skip(start, end, walk);
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; addr < end; addr += PAGE_SIZE, ptep++) {
unsigned long mpfn, pfn;
struct page *page;
swp_entry_t entry;
pte_t pte;
pte = *ptep;
if (pte_none(pte)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
goto next;
}
if (!pte_present(pte)) {
mpfn = 0;
/*
* Only care about unaddressable device page special
* page table entry. Other special swap entries are not
* migratable, and we ignore regular swapped page.
*/
entry = pte_to_swp_entry(pte);
if (!is_device_private_entry(entry))
goto next;
page = device_private_entry_to_page(entry);
mpfn = migrate_pfn(page_to_pfn(page)) |
MIGRATE_PFN_MIGRATE;
if (is_write_device_private_entry(entry))
mpfn |= MIGRATE_PFN_WRITE;
} else {
pfn = pte_pfn(pte);
if (is_zero_pfn(pfn)) {
mpfn = MIGRATE_PFN_MIGRATE;
migrate->cpages++;
goto next;
}
page = vm_normal_page(migrate->vma, addr, pte);
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
/* FIXME support THP */
if (!page || !page->mapping || PageTransCompound(page)) {
mpfn = 0;
goto next;
}
/*
* By getting a reference on the page we pin it and that blocks
* any kind of migration. Side effect is that it "freezes" the
* pte.
*
* We drop this reference after isolating the page from the lru
* for non device page (device page are not on the lru and thus
* can't be dropped from it).
*/
get_page(page);
migrate->cpages++;
/*
* Optimize for the common case where page is only mapped once
* in one process. If we can lock the page, then we can safely
* set up a special migration page table entry now.
*/
if (trylock_page(page)) {
pte_t swp_pte;
mpfn |= MIGRATE_PFN_LOCKED;
ptep_get_and_clear(mm, addr, ptep);
/* Setup special migration page table entry */
entry = make_migration_entry(page, mpfn &
MIGRATE_PFN_WRITE);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pte))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
set_pte_at(mm, addr, ptep, swp_pte);
/*
* This is like regular unmap: we remove the rmap and
* drop page refcount. Page won't be freed, as we took
* a reference just above.
*/
page_remove_rmap(page, false);
put_page(page);
if (pte_present(pte))
unmapped++;
}
next:
migrate->dst[migrate->npages] = 0;
migrate->src[migrate->npages++] = mpfn;
}
/* Only flush the TLB if we actually modified any entries */
if (unmapped)
flush_tlb_range(walk->vma, start, end);
arch_leave_lazy_mmu_mode();
pte_unmap_unlock(ptep - 1, ptl);
return 0;
}
static const struct mm_walk_ops migrate_vma_walk_ops = {
.pmd_entry = migrate_vma_collect_pmd,
.pte_hole = migrate_vma_collect_hole,
};
/*
* migrate_vma_collect() - collect pages over a range of virtual addresses
* @migrate: migrate struct containing all migration information
*
* This will walk the CPU page table. For each virtual address backed by a
* valid page, it updates the src array and takes a reference on the page, in
* order to pin the page until we lock it and unmap it.
*/
static void migrate_vma_collect(struct migrate_vma *migrate)
{
struct mmu_notifier_range range;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL,
migrate->vma->vm_mm, migrate->start, migrate->end);
mmu_notifier_invalidate_range_start(&range);
walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end,
&migrate_vma_walk_ops, migrate);
mmu_notifier_invalidate_range_end(&range);
migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
}
/*
* migrate_vma_check_page() - check if page is pinned or not
* @page: struct page to check
*
* Pinned pages cannot be migrated. This is the same test as in
* migrate_page_move_mapping(), except that here we allow migration of a
* ZONE_DEVICE page.
*/
static bool migrate_vma_check_page(struct page *page)
{
/*
* One extra ref because caller holds an extra reference, either from
* isolate_lru_page() for a regular page, or migrate_vma_collect() for
* a device page.
*/
int extra = 1;
/*
* FIXME support THP (transparent huge page), it is bit more complex to
* check them than regular pages, because they can be mapped with a pmd
* or with a pte (split pte mapping).
*/
if (PageCompound(page))
return false;
/* Page from ZONE_DEVICE have one extra reference */
if (is_zone_device_page(page)) {
/*
* Private page can never be pin as they have no valid pte and
* GUP will fail for those. Yet if there is a pending migration
* a thread might try to wait on the pte migration entry and
* will bump the page reference count. Sadly there is no way to
* differentiate a regular pin from migration wait. Hence to
* avoid 2 racing thread trying to migrate back to CPU to enter
* infinite loop (one stoping migration because the other is
* waiting on pte migration entry). We always return true here.
*
* FIXME proper solution is to rework migration_entry_wait() so
* it does not need to take a reference on page.
*/
return is_device_private_page(page);
}
/* For file back page */
if (page_mapping(page))
extra += 1 + page_has_private(page);
if ((page_count(page) - extra) > page_mapcount(page))
return false;
return true;
}
/*
* migrate_vma_prepare() - lock pages and isolate them from the lru
* @migrate: migrate struct containing all migration information
*
* This locks pages that have been collected by migrate_vma_collect(). Once each
* page is locked it is isolated from the lru (for non-device pages). Finally,
* the ref taken by migrate_vma_collect() is dropped, as locked pages cannot be
* migrated by concurrent kernel threads.
*/
static void migrate_vma_prepare(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
bool allow_drain = true;
lru_add_drain();
for (i = 0; (i < npages) && migrate->cpages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
bool remap = true;
if (!page)
continue;
if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) {
/*
* Because we are migrating several pages there can be
* a deadlock between 2 concurrent migration where each
* are waiting on each other page lock.
*
* Make migrate_vma() a best effort thing and backoff
* for any page we can not lock right away.
*/
if (!trylock_page(page)) {
migrate->src[i] = 0;
migrate->cpages--;
put_page(page);
continue;
}
remap = false;
migrate->src[i] |= MIGRATE_PFN_LOCKED;
}
/* ZONE_DEVICE pages are not on LRU */
if (!is_zone_device_page(page)) {
if (!PageLRU(page) && allow_drain) {
/* Drain CPU's pagevec */
lru_add_drain_all();
allow_drain = false;
}
if (isolate_lru_page(page)) {
if (remap) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
put_page(page);
}
continue;
}
/* Drop the reference we took in collect */
put_page(page);
}
if (!migrate_vma_check_page(page)) {
if (remap) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
if (!is_zone_device_page(page)) {
get_page(page);
putback_lru_page(page);
}
} else {
migrate->src[i] = 0;
unlock_page(page);
migrate->cpages--;
if (!is_zone_device_page(page))
putback_lru_page(page);
else
put_page(page);
}
}
}
for (i = 0, addr = start; i < npages && restore; i++, addr += PAGE_SIZE) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
remove_migration_pte(page, migrate->vma, addr, page);
migrate->src[i] = 0;
unlock_page(page);
put_page(page);
restore--;
}
}
/*
* migrate_vma_unmap() - replace page mapping with special migration pte entry
* @migrate: migrate struct containing all migration information
*
* Replace page mapping (CPU page table pte) with a special migration pte entry
* and check again if it has been pinned. Pinned pages are restored because we
* cannot migrate them.
*
* This is the last step before we call the device driver callback to allocate
* destination memory and copy contents of original page over to new page.
*/
static void migrate_vma_unmap(struct migrate_vma *migrate)
{
int flags = TTU_MIGRATION | TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
unsigned long addr, i, restore = 0;
for (i = 0; i < npages; i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
if (page_mapped(page)) {
try_to_unmap(page, flags, NULL);
if (page_mapped(page))
goto restore;
}
if (migrate_vma_check_page(page))
continue;
restore:
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
migrate->cpages--;
restore++;
}
for (addr = start, i = 0; i < npages && restore; addr += PAGE_SIZE, i++) {
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE))
continue;
remove_migration_ptes(page, page, false);
migrate->src[i] = 0;
unlock_page(page);
restore--;
if (is_zone_device_page(page))
put_page(page);
else
putback_lru_page(page);
}
}
/**
* migrate_vma_setup() - prepare to migrate a range of memory
* @args: contains the vma, start, and and pfns arrays for the migration
*
* Returns: negative errno on failures, 0 when 0 or more pages were migrated
* without an error.
*
* Prepare to migrate a range of memory virtual address range by collecting all
* the pages backing each virtual address in the range, saving them inside the
* src array. Then lock those pages and unmap them. Once the pages are locked
* and unmapped, check whether each page is pinned or not. Pages that aren't
* pinned have the MIGRATE_PFN_MIGRATE flag set (by this function) in the
* corresponding src array entry. Then restores any pages that are pinned, by
* remapping and unlocking those pages.
*
* The caller should then allocate destination memory and copy source memory to
* it for all those entries (ie with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE
* flag set). Once these are allocated and copied, the caller must update each
* corresponding entry in the dst array with the pfn value of the destination
* page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
* (destination pages must have their struct pages locked, via lock_page()).
*
* Note that the caller does not have to migrate all the pages that are marked
* with MIGRATE_PFN_MIGRATE flag in src array unless this is a migration from
* device memory to system memory. If the caller cannot migrate a device page
* back to system memory, then it must return VM_FAULT_SIGBUS, which has severe
* consequences for the userspace process, so it must be avoided if at all
* possible.
*
* For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
* do set MIGRATE_PFN_MIGRATE flag inside the corresponding source array thus
* allowing the caller to allocate device memory for those unback virtual
* address. For this the caller simply has to allocate device memory and
* properly set the destination entry like for regular migration. Note that
* this can still fails and thus inside the device driver must check if the
* migration was successful for those entries after calling migrate_vma_pages()
* just like for regular migration.
*
* After that, the callers must call migrate_vma_pages() to go over each entry
* in the src array that has the MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag
* set. If the corresponding entry in dst array has MIGRATE_PFN_VALID flag set,
* then migrate_vma_pages() to migrate struct page information from the source
* struct page to the destination struct page. If it fails to migrate the
* struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
* src array.
*
* At this point all successfully migrated pages have an entry in the src
* array with MIGRATE_PFN_VALID and MIGRATE_PFN_MIGRATE flag set and the dst
* array entry with MIGRATE_PFN_VALID flag set.
*
* Once migrate_vma_pages() returns the caller may inspect which pages were
* successfully migrated, and which were not. Successfully migrated pages will
* have the MIGRATE_PFN_MIGRATE flag set for their src array entry.
*
* It is safe to update device page table after migrate_vma_pages() because
* both destination and source page are still locked, and the mmap_sem is held
* in read mode (hence no one can unmap the range being migrated).
*
* Once the caller is done cleaning up things and updating its page table (if it
* chose to do so, this is not an obligation) it finally calls
* migrate_vma_finalize() to update the CPU page table to point to new pages
* for successfully migrated pages or otherwise restore the CPU page table to
* point to the original source pages.
*/
int migrate_vma_setup(struct migrate_vma *args)
{
long nr_pages = (args->end - args->start) >> PAGE_SHIFT;
args->start &= PAGE_MASK;
args->end &= PAGE_MASK;
if (!args->vma || is_vm_hugetlb_page(args->vma) ||
(args->vma->vm_flags & VM_SPECIAL) || vma_is_dax(args->vma))
return -EINVAL;
if (nr_pages <= 0)
return -EINVAL;
if (args->start < args->vma->vm_start ||
args->start >= args->vma->vm_end)
return -EINVAL;
if (args->end <= args->vma->vm_start || args->end > args->vma->vm_end)
return -EINVAL;
if (!args->src || !args->dst)
return -EINVAL;
memset(args->src, 0, sizeof(*args->src) * nr_pages);
args->cpages = 0;
args->npages = 0;
migrate_vma_collect(args);
if (args->cpages)
migrate_vma_prepare(args);
if (args->cpages)
migrate_vma_unmap(args);
/*
* At this point pages are locked and unmapped, and thus they have
* stable content and can safely be copied to destination memory that
* is allocated by the drivers.
*/
return 0;
}
EXPORT_SYMBOL(migrate_vma_setup);
static void migrate_vma_insert_page(struct migrate_vma *migrate,
unsigned long addr,
struct page *page,
unsigned long *src,
unsigned long *dst)
{
struct vm_area_struct *vma = migrate->vma;
struct mm_struct *mm = vma->vm_mm;
struct mem_cgroup *memcg;
bool flush = false;
spinlock_t *ptl;
pte_t entry;
pgd_t *pgdp;
p4d_t *p4dp;
pud_t *pudp;
pmd_t *pmdp;
pte_t *ptep;
/* Only allow populating anonymous memory */
if (!vma_is_anonymous(vma))
goto abort;
pgdp = pgd_offset(mm, addr);
p4dp = p4d_alloc(mm, pgdp, addr);
if (!p4dp)
goto abort;
pudp = pud_alloc(mm, p4dp, addr);
if (!pudp)
goto abort;
pmdp = pmd_alloc(mm, pudp, addr);
if (!pmdp)
goto abort;
if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp))
goto abort;
/*
* Use pte_alloc() instead of pte_alloc_map(). We can't run
* pte_offset_map() on pmds where a huge pmd might be created
* from a different thread.
*
* pte_alloc_map() is safe to use under down_write(mmap_sem) or when
* parallel threads are excluded by other means.
*
* Here we only have down_read(mmap_sem).
*/
if (pte_alloc(mm, pmdp))
goto abort;
/* See the comment in pte_alloc_one_map() */
if (unlikely(pmd_trans_unstable(pmdp)))
goto abort;
if (unlikely(anon_vma_prepare(vma)))
goto abort;
if (mem_cgroup_try_charge(page, vma->vm_mm, GFP_KERNEL, &memcg, false))
goto abort;
/*
* The memory barrier inside __SetPageUptodate makes sure that
* preceding stores to the page contents become visible before
* the set_pte_at() write.
*/
__SetPageUptodate(page);
if (is_zone_device_page(page)) {
if (is_device_private_page(page)) {
swp_entry_t swp_entry;
swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE);
entry = swp_entry_to_pte(swp_entry);
} else {
/*
* For now we only support migrating to un-addressable
* device memory.
*/
pr_warn_once("Unsupported ZONE_DEVICE page type.\n");
goto abort;
}
} else {
entry = mk_pte(page, vma->vm_page_prot);
if (vma->vm_flags & VM_WRITE)
entry = pte_mkwrite(pte_mkdirty(entry));
}
ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
if (pte_present(*ptep)) {
unsigned long pfn = pte_pfn(*ptep);
if (!is_zero_pfn(pfn)) {
pte_unmap_unlock(ptep, ptl);
mem_cgroup_cancel_charge(page, memcg, false);
goto abort;
}
flush = true;
} else if (!pte_none(*ptep)) {
pte_unmap_unlock(ptep, ptl);
mem_cgroup_cancel_charge(page, memcg, false);
goto abort;
}
/*
* Check for usefaultfd but do not deliver the fault. Instead,
* just back off.
*/
if (userfaultfd_missing(vma)) {
pte_unmap_unlock(ptep, ptl);
mem_cgroup_cancel_charge(page, memcg, false);
goto abort;
}
inc_mm_counter(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, addr, false);
mem_cgroup_commit_charge(page, memcg, false, false);
if (!is_zone_device_page(page))
lru_cache_add_active_or_unevictable(page, vma);
get_page(page);
if (flush) {
flush_cache_page(vma, addr, pte_pfn(*ptep));
ptep_clear_flush_notify(vma, addr, ptep);
set_pte_at_notify(mm, addr, ptep, entry);
update_mmu_cache(vma, addr, ptep);
} else {
/* No need to invalidate - it was non-present before */
set_pte_at(mm, addr, ptep, entry);
update_mmu_cache(vma, addr, ptep);
}
pte_unmap_unlock(ptep, ptl);
*src = MIGRATE_PFN_MIGRATE;
return;
abort:
*src &= ~MIGRATE_PFN_MIGRATE;
}
/**
* migrate_vma_pages() - migrate meta-data from src page to dst page
* @migrate: migrate struct containing all migration information
*
* This migrates struct page meta-data from source struct page to destination
* struct page. This effectively finishes the migration from source page to the
* destination page.
*/
void migrate_vma_pages(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
const unsigned long start = migrate->start;
struct mmu_notifier_range range;
unsigned long addr, i;
bool notified = false;
for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
struct page *page = migrate_pfn_to_page(migrate->src[i]);
struct address_space *mapping;
int r;
if (!newpage) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
if (!page) {
if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) {
continue;
}
if (!notified) {
notified = true;
mmu_notifier_range_init(&range,
MMU_NOTIFY_CLEAR, 0,
NULL,
migrate->vma->vm_mm,
addr, migrate->end);
mmu_notifier_invalidate_range_start(&range);
}
migrate_vma_insert_page(migrate, addr, newpage,
&migrate->src[i],
&migrate->dst[i]);
continue;
}
mapping = page_mapping(page);
if (is_zone_device_page(newpage)) {
if (is_device_private_page(newpage)) {
/*
* For now only support private anonymous when
* migrating to un-addressable device memory.
*/
if (mapping) {
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
} else {
/*
* Other types of ZONE_DEVICE page are not
* supported.
*/
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
continue;
}
}
r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS)
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
}
/*
* No need to double call mmu_notifier->invalidate_range() callback as
* the above ptep_clear_flush_notify() inside migrate_vma_insert_page()
* did already call it.
*/
if (notified)
mmu_notifier_invalidate_range_only_end(&range);
}
EXPORT_SYMBOL(migrate_vma_pages);
/**
* migrate_vma_finalize() - restore CPU page table entry
* @migrate: migrate struct containing all migration information
*
* This replaces the special migration pte entry with either a mapping to the
* new page if migration was successful for that page, or to the original page
* otherwise.
*
* This also unlocks the pages and puts them back on the lru, or drops the extra
* refcount, for device pages.
*/
void migrate_vma_finalize(struct migrate_vma *migrate)
{
const unsigned long npages = migrate->npages;
unsigned long i;
for (i = 0; i < npages; i++) {
struct page *newpage = migrate_pfn_to_page(migrate->dst[i]);
struct page *page = migrate_pfn_to_page(migrate->src[i]);
if (!page) {
if (newpage) {
unlock_page(newpage);
put_page(newpage);
}
continue;
}
if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) {
if (newpage) {
unlock_page(newpage);
put_page(newpage);
}
newpage = page;
}
remove_migration_ptes(page, newpage, false);
unlock_page(page);
migrate->cpages--;
if (is_zone_device_page(page))
put_page(page);
else
putback_lru_page(page);
if (newpage != page) {
unlock_page(newpage);
if (is_zone_device_page(newpage))
put_page(newpage);
else
putback_lru_page(newpage);
}
}
}
EXPORT_SYMBOL(migrate_vma_finalize);
#endif /* CONFIG_DEVICE_PRIVATE */