diff options
30 files changed, 501 insertions, 153 deletions
diff --git a/Makefile b/Makefile index 2f545ec1690fb9..e404e4767944ed 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 -PATCHLEVEL = 18 +PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Baby Opossum Posse # *DOCUMENTATION* diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 77d6944489905b..858320b6ebb7ef 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -316,9 +316,6 @@ config BLK_DEV_RBD tristate "Rados block device (RBD)" depends on INET && BLOCK select CEPH_LIB - select CRC32 - select CRYPTO_AES - select CRYPTO help Say Y here if you want include the Rados block device, which stripes a block device over objects stored in the Ceph distributed object diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index c18358271618d2..d5d6ef7ba8381a 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -950,6 +950,19 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps q = bdev_get_queue(p->path.dev->bdev); attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL); + if (IS_ERR(attached_handler_name)) { + if (PTR_ERR(attached_handler_name) == -ENODEV) { + if (m->hw_handler_name) { + DMERR("hardware handlers are only allowed for SCSI devices"); + kfree(m->hw_handler_name); + m->hw_handler_name = NULL; + } + attached_handler_name = NULL; + } else { + r = PTR_ERR(attached_handler_name); + goto bad; + } + } if (attached_handler_name || m->hw_handler_name) { INIT_DELAYED_WORK(&p->activate_path, activate_path_work); r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 9d1de68dee278c..d7d41b054b9824 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -106,7 +106,6 @@ config PHANTOM config RPMB tristate "RPMB partition interface" - depends on MMC || SCSI_UFSHCD help Unified RPMB unit interface for RPMB capable devices such as eMMC and UFS. Provides interface for in-kernel security controllers to access diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c index 5c602c0577989f..45b0e33293a591 100644 --- a/drivers/scsi/imm.c +++ b/drivers/scsi/imm.c @@ -1260,6 +1260,7 @@ static void imm_detach(struct parport *pb) imm_struct *dev; list_for_each_entry(dev, &imm_hosts, list) { if (dev->dev->port == pb) { + disable_delayed_work_sync(&dev->imm_tq); list_del_init(&dev->list); scsi_remove_host(dev->host); scsi_host_put(dev->host); diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 95123689e9d1fe..dbd58a7e7bc1f3 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -61,8 +61,8 @@ #include <linux/hdreg.h> #include <linux/reboot.h> #include <linux/stringify.h> +#include <linux/irq.h> #include <asm/io.h> -#include <asm/irq.h> #include <asm/processor.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> @@ -7844,6 +7844,30 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd) } /** + * ipr_set_affinity_nobalance + * @ioa_cfg: ipr_ioa_cfg struct for an ipr device + * @flag: bool + * true: ensable "IRQ_NO_BALANCING" bit for msix interrupt + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt + * Description: This function will be called to disable/enable + * "IRQ_NO_BALANCING" to avoid irqbalance daemon + * kicking in during adapter reset. + **/ +static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag) +{ + int irq, i; + + for (i = 0; i < ioa_cfg->nvectors; i++) { + irq = pci_irq_vector(ioa_cfg->pdev, i); + + if (flag) + irq_set_status_flags(irq, IRQ_NO_BALANCING); + else + irq_clear_status_flags(irq, IRQ_NO_BALANCING); + } +} + +/** * ipr_reset_restore_cfg_space - Restore PCI config space. * @ipr_cmd: ipr command struct * @@ -7866,6 +7890,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd) return IPR_RC_JOB_CONTINUE; } + ipr_set_affinity_nobalance(ioa_cfg, false); ipr_fail_all_ops(ioa_cfg); if (ioa_cfg->sis64) { @@ -7945,6 +7970,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd) rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START); if (rc == PCIBIOS_SUCCESSFUL) { + ipr_set_affinity_nobalance(ioa_cfg, true); ipr_cmd->job_step = ipr_reset_bist_done; ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT); rc = IPR_RC_JOB_RETURN; diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 8566bb1208a057..6b15ad1bcadabe 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -141,6 +141,7 @@ Undo_event_q: Undo_ports: sas_unregister_ports(sas_ha); Undo_phys: + sas_unregister_phys(sas_ha); return error; } diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h index 6706f2be8d274b..d104c87f04f523 100644 --- a/drivers/scsi/libsas/sas_internal.h +++ b/drivers/scsi/libsas/sas_internal.h @@ -54,6 +54,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev); void sas_scsi_recover_host(struct Scsi_Host *shost); int sas_register_phys(struct sas_ha_struct *sas_ha); +void sas_unregister_phys(struct sas_ha_struct *sas_ha); struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags); void sas_free_event(struct asd_sas_event *event); @@ -145,20 +146,6 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i func, dev->parent ? "exp-attached" : "direct-attached", SAS_ADDR(dev->sas_addr), err); - - /* - * If the device probe failed, the expander phy attached address - * needs to be reset so that the phy will not be treated as flutter - * in the next revalidation - */ - if (dev->parent && !dev_is_expander(dev->dev_type)) { - struct sas_phy *phy = dev->phy; - struct domain_device *parent = dev->parent; - struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number]; - - memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE); - } - sas_unregister_dev(dev->port, dev); } diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c index 635835c28ecd43..58f08dc2c1872c 100644 --- a/drivers/scsi/libsas/sas_phy.c +++ b/drivers/scsi/libsas/sas_phy.c @@ -116,6 +116,7 @@ static void sas_phye_shutdown(struct work_struct *work) int sas_register_phys(struct sas_ha_struct *sas_ha) { int i; + int err; /* Now register the phys. */ for (i = 0; i < sas_ha->num_phys; i++) { @@ -132,8 +133,10 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->frame_rcvd_size = 0; phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i); - if (!phy->phy) - return -ENOMEM; + if (!phy->phy) { + err = -ENOMEM; + goto rollback; + } phy->phy->identify.initiator_port_protocols = phy->iproto; @@ -146,10 +149,34 @@ int sas_register_phys(struct sas_ha_struct *sas_ha) phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN; phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN; - sas_phy_add(phy->phy); + err = sas_phy_add(phy->phy); + if (err) { + sas_phy_free(phy->phy); + goto rollback; + } } return 0; +rollback: + for (i-- ; i >= 0 ; i--) { + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + sas_phy_delete(phy->phy); + sas_phy_free(phy->phy); + } + return err; +} + +void sas_unregister_phys(struct sas_ha_struct *sas_ha) +{ + int i; + + for (i = 0 ; i < sas_ha->num_phys ; i++) { + struct asd_sas_phy *phy = sas_ha->sas_phy[i]; + + sas_phy_delete(phy->phy); + sas_phy_free(phy->phy); + } } const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = { diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 6742684e2990a7..31d68c151b2076 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -56,8 +56,8 @@ extern struct list_head mrioc_list; extern int prot_mask; extern atomic64_t event_counter; -#define MPI3MR_DRIVER_VERSION "8.15.0.5.50" -#define MPI3MR_DRIVER_RELDATE "12-August-2025" +#define MPI3MR_DRIVER_VERSION "8.15.0.5.51" +#define MPI3MR_DRIVER_RELDATE "18-November-2025" #define MPI3MR_DRIVER_NAME "mpi3mr" #define MPI3MR_DRIVER_LICENSE "GPL" diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index b88633e1efe275..d4ca878d088697 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -1184,6 +1184,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc, if (is_added == true) tgtdev->io_throttle_enabled = (flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0; + if (!mrioc->sas_transport_enabled) + tgtdev->non_stl = 1; switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) { case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB: @@ -4844,7 +4846,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget) spin_lock_irqsave(&mrioc->tgtdev_lock, flags); if (starget->channel == mrioc->scsi_device_channel) { tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id); - if (tgt_dev && !tgt_dev->is_hidden) { + if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) { scsi_tgt_priv_data->starget = starget; scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle; scsi_tgt_priv_data->perst_id = tgt_dev->perst_id; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 3a57f07d73f587..16a44c0917e18b 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -17,6 +17,7 @@ #include <linux/crash_dump.h> #include <linux/trace_events.h> #include <linux/trace.h> +#include <linux/irq.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> @@ -7776,6 +7777,31 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha) } +/** + * qla2xxx_set_affinity_nobalance + * @pdev: pci_dev struct for a qla2xxx device + * @flag: bool + * true: enable "IRQ_NO_BALANCING" bit for msix interrupt + * false: disable "IRQ_NO_BALANCING" bit for msix interrupt + * Description: This function will be called to disable/enable + * "IRQ_NO_BALANCING" to avoid irqbalance daemon + * kicking in during adapter reset. + **/ + +static void qla2xxx_set_affinity_nobalance(struct pci_dev *pdev, bool flag) +{ + int irq, i; + + for (i = 0; i < QLA_BASE_VECTORS; i++) { + irq = pci_irq_vector(pdev, i); + + if (flag) + irq_set_status_flags(irq, IRQ_NO_BALANCING); + else + irq_clear_status_flags(irq, IRQ_NO_BALANCING); + } +} + static pci_ers_result_t qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) { @@ -7794,6 +7820,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state) goto out; } + qla2xxx_set_affinity_nobalance(pdev, false); + switch (state) { case pci_channel_io_normal: qla_pci_set_eeh_busy(vha); @@ -7930,6 +7958,8 @@ exit_slot_reset: ql_dbg(ql_dbg_aer, base_vha, 0x900e, "Slot Reset returning %x.\n", ret); + qla2xxx_set_affinity_nobalance(pdev, true); + return ret; } diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c index da2fc66ffedd5d..b0a62aaa1cca90 100644 --- a/drivers/scsi/qla4xxx/ql4_nx.c +++ b/drivers/scsi/qla4xxx/ql4_nx.c @@ -1552,7 +1552,7 @@ static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val) (val == PHAN_INITIALIZE_ACK)) return 0; set_current_state(TASK_UNINTERRUPTIBLE); - schedule_timeout(500); + schedule_timeout(msecs_to_jiffies(500)); } while (--retries); diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c index 7b56e00c7df6b0..b9d80531781494 100644 --- a/drivers/scsi/scsi_dh.c +++ b/drivers/scsi/scsi_dh.c @@ -353,7 +353,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach); * that may have a device handler attached * @gfp - the GFP mask used in the kmalloc() call when allocating memory * - * Returns name of attached handler, NULL if no handler is attached. + * Returns name of attached handler, NULL if no handler is attached, or + * and error pointer if an error occurred. * Caller must take care to free the returned string. */ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) @@ -363,10 +364,11 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp) sdev = scsi_device_from_queue(q); if (!sdev) - return NULL; + return ERR_PTR(-ENODEV); if (sdev->handler) - handler_name = kstrdup(sdev->handler->name, gfp); + handler_name = kstrdup(sdev->handler->name, gfp) ? : + ERR_PTR(-ENOMEM); put_device(&sdev->sdev_gendev); return handler_name; } diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 51ad2ad07e4390..93031326ac3ee9 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -2801,7 +2801,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple); * * Must be called with user context, may sleep. * - * Returns zero if unsuccessful or an error if not. + * Returns zero if successful or an error if not. */ int scsi_device_quiesce(struct scsi_device *sdev) diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index b8457477cee9eb..9f167ff8da7b07 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -5,8 +5,7 @@ * Copyright (C) 2011 Chris Boot <bootc@bootc.net> */ -#define KMSG_COMPONENT "sbp_target" -#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt +#define pr_fmt(fmt) "sbp_target: " fmt #include <linux/kernel.h> #include <linux/module.h> diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index e8b7955d40f29a..50d21888a0c9a7 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1524,6 +1524,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { + cmd->t_task_cdb = &cmd->__t_task_cdb[0]; pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig index 90226f72c158aa..f662e7ce71f1bd 100644 --- a/drivers/ufs/Kconfig +++ b/drivers/ufs/Kconfig @@ -6,6 +6,7 @@ menuconfig SCSI_UFSHCD tristate "Universal Flash Storage Controller" depends on SCSI && SCSI_DMA + depends on RPMB || !RPMB select PM_DEVFREQ select DEVFREQ_GOV_SIMPLE_ONDEMAND select NLS diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 040a0ceb170a7f..80c0b49f30b012 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1455,15 +1455,14 @@ out: static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err) { up_write(&hba->clk_scaling_lock); + mutex_unlock(&hba->wb_mutex); + blk_mq_unquiesce_tagset(&hba->host->tag_set); + mutex_unlock(&hba->host->scan_mutex); /* Enable Write Booster if current gear requires it else disable it */ if (ufshcd_enable_wb_if_scaling_up(hba) && !err) ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear); - mutex_unlock(&hba->wb_mutex); - - blk_mq_unquiesce_tagset(&hba->host->tag_set); - mutex_unlock(&hba->host->scan_mutex); ufshcd_release(hba); } @@ -6504,6 +6503,11 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend) static void ufshcd_err_handling_prepare(struct ufs_hba *hba) { + /* + * A WLUN resume failure could potentially lead to the HBA being + * runtime suspended, so take an extra reference on hba->dev. + */ + pm_runtime_get_sync(hba->dev); ufshcd_rpm_get_sync(hba); if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) || hba->is_sys_suspended) { @@ -6543,6 +6547,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba) if (ufshcd_is_clkscaling_supported(hba)) ufshcd_clk_scaling_suspend(hba, false); ufshcd_rpm_put(hba); + pm_runtime_put(hba->dev); } static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) @@ -6557,28 +6562,42 @@ static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba) #ifdef CONFIG_PM static void ufshcd_recover_pm_error(struct ufs_hba *hba) { + struct scsi_target *starget = hba->ufs_device_wlun->sdev_target; struct Scsi_Host *shost = hba->host; struct scsi_device *sdev; struct request_queue *q; - int ret; + bool resume_sdev_queues = false; hba->is_sys_suspended = false; + /* - * Set RPM status of wlun device to RPM_ACTIVE, - * this also clears its runtime error. + * Ensure the parent's error status is cleared before proceeding + * to the child, as the parent must be active to activate the child. */ - ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + if (hba->dev->power.runtime_error) { + /* hba->dev has no functional parent thus simplily set RPM_ACTIVE */ + pm_runtime_set_active(hba->dev); + resume_sdev_queues = true; + } + + if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) { + /* + * starget, parent of wlun, might be suspended if wlun resume failed. + * Make sure parent is resumed before set child (wlun) active. + */ + pm_runtime_get_sync(&starget->dev); + pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev); + pm_runtime_put_sync(&starget->dev); + resume_sdev_queues = true; + } - /* hba device might have a runtime error otherwise */ - if (ret) - ret = pm_runtime_set_active(hba->dev); /* * If wlun device had runtime error, we also need to resume those * consumer scsi devices in case any of them has failed to be * resumed due to supplier runtime resume failure. This is to unblock * blk_queue_enter in case there are bios waiting inside it. */ - if (!ret) { + if (resume_sdev_queues) { shost_for_each_device(sdev, shost) { q = sdev->request_queue; if (q->dev && (q->rpm_status == RPM_SUSPENDED || @@ -6679,19 +6698,22 @@ static void ufshcd_err_handler(struct work_struct *work) hba->saved_uic_err, hba->force_reset, ufshcd_is_link_broken(hba) ? "; link is broken" : ""); - /* - * Use ufshcd_rpm_get_noresume() here to safely perform link recovery - * even if an error occurs during runtime suspend or runtime resume. - * This avoids potential deadlocks that could happen if we tried to - * resume the device while a PM operation is already in progress. - */ - ufshcd_rpm_get_noresume(hba); - if (hba->pm_op_in_progress) { - ufshcd_link_recovery(hba); + if (hba->ufs_device_wlun) { + /* + * Use ufshcd_rpm_get_noresume() here to safely perform link + * recovery even if an error occurs during runtime suspend or + * runtime resume. This avoids potential deadlocks that could + * happen if we tried to resume the device while a PM operation + * is already in progress. + */ + ufshcd_rpm_get_noresume(hba); + if (hba->pm_op_in_progress) { + ufshcd_link_recovery(hba); + ufshcd_rpm_put(hba); + return; + } ufshcd_rpm_put(hba); - return; } - ufshcd_rpm_put(hba); down(&hba->host_sem); spin_lock_irqsave(hba->host->host_lock, flags); diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 8d119b3223cbda..8ebee0cc531370 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1769,10 +1769,9 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba) { struct ufs_qcom_host *host = ufshcd_get_variant(hba); int i, j, nminor = 0, testbus_len = 0; - u32 *testbus __free(kfree) = NULL; char *prefix; - testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL); + u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL); if (!testbus) return; @@ -1794,13 +1793,12 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba) static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len, const char *prefix, void __iomem *base) { - u32 *regs __free(kfree) = NULL; size_t pos; if (offset % 4 != 0 || len % 4 != 0) return -EINVAL; - regs = kzalloc(len, GFP_ATOMIC); + u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC); if (!regs) return -ENOMEM; diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig index 3e7def3d31c16f..3d64a316ca31d3 100644 --- a/fs/ceph/Kconfig +++ b/fs/ceph/Kconfig @@ -3,9 +3,6 @@ config CEPH_FS tristate "Ceph distributed file system" depends on INET select CEPH_LIB - select CRC32 - select CRYPTO_AES - select CRYPTO select NETFS_SUPPORT select FS_ENCRYPTION_ALGS if FS_ENCRYPTION default n diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index b1a8ff612c41dc..2f663972da99cc 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -18,6 +18,7 @@ #include "crypto.h" #include <linux/ceph/decode.h> #include <linux/ceph/messenger.h> +#include <trace/events/ceph.h> /* * Capability management @@ -4452,6 +4453,9 @@ void ceph_handle_caps(struct ceph_mds_session *session, session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode, seq, issue_seq, mseq); + trace_ceph_handle_caps(mdsc, session, op, &vino, ceph_inode(inode), + seq, issue_seq, mseq); + mutex_lock(&session->s_mutex); if (!inode) { diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 1740047aef0f9a..7e4eab824daef8 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -24,6 +24,7 @@ #include <linux/ceph/pagelist.h> #include <linux/ceph/auth.h> #include <linux/ceph/debugfs.h> +#include <trace/events/ceph.h> #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE) @@ -3288,6 +3289,8 @@ static void complete_request(struct ceph_mds_client *mdsc, { req->r_end_latency = ktime_get(); + trace_ceph_mdsc_complete_request(mdsc, req); + if (req->r_callback) req->r_callback(mdsc, req); complete_all(&req->r_completion); @@ -3419,6 +3422,8 @@ static int __send_request(struct ceph_mds_session *session, { int err; + trace_ceph_mdsc_send_request(session, req); + err = __prepare_send_request(session, req, drop_cap_releases); if (!err) { ceph_msg_get(req->r_request); @@ -3470,6 +3475,8 @@ static void __do_request(struct ceph_mds_client *mdsc, } if (mdsc->mdsmap->m_epoch == 0) { doutc(cl, "no mdsmap, waiting for map\n"); + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_no_mdsmap); list_add(&req->r_wait, &mdsc->waiting_for_map); return; } @@ -3491,6 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc, goto finish; } doutc(cl, "no mds or not active, waiting for map\n"); + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_no_active_mds); list_add(&req->r_wait, &mdsc->waiting_for_map); return; } @@ -3536,9 +3545,11 @@ static void __do_request(struct ceph_mds_client *mdsc, * it to the mdsc queue. */ if (session->s_state == CEPH_MDS_SESSION_REJECTED) { - if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) + if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) { + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_rejected); list_add(&req->r_wait, &mdsc->waiting_for_map); - else + } else err = -EACCES; goto out_session; } @@ -3552,6 +3563,8 @@ static void __do_request(struct ceph_mds_client *mdsc, if (random) req->r_resend_mds = mds; } + trace_ceph_mdsc_suspend_request(mdsc, session, req, + ceph_mdsc_suspend_reason_session); list_add(&req->r_wait, &session->s_waiting); goto out_session; } @@ -3652,6 +3665,7 @@ static void __wake_requests(struct ceph_mds_client *mdsc, list_del_init(&req->r_wait); doutc(cl, " wake request %p tid %llu\n", req, req->r_tid); + trace_ceph_mdsc_resume_request(mdsc, req); __do_request(mdsc, req); } } @@ -3678,6 +3692,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds) req->r_session->s_mds == mds) { doutc(cl, " kicking tid %llu\n", req->r_tid); list_del_init(&req->r_wait); + trace_ceph_mdsc_resume_request(mdsc, req); __do_request(mdsc, req); } } @@ -3724,6 +3739,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir, doutc(cl, "submit_request on %p for inode %p\n", req, dir); mutex_lock(&mdsc->mutex); __register_request(mdsc, req, dir); + trace_ceph_mdsc_submit_request(mdsc, req); __do_request(mdsc, req); err = req->r_err; mutex_unlock(&mdsc->mutex); diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index c65f2b202b2b3e..521507ea826000 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c @@ -374,7 +374,7 @@ static int build_snap_context(struct ceph_mds_client *mdsc, /* alloc new snap context */ err = -ENOMEM; - if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) + if ((size_t)num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64)) goto fail; snapc = ceph_create_snap_context(num, GFP_NOFS); if (!snapc) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index f6bf24b5c683ef..7c1c1dac320da3 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -30,6 +30,9 @@ #include <uapi/linux/magic.h> +#define CREATE_TRACE_POINTS +#include <trace/events/ceph.h> + static DEFINE_SPINLOCK(ceph_fsc_lock); static LIST_HEAD(ceph_fsc_list); diff --git a/include/trace/events/ceph.h b/include/trace/events/ceph.h new file mode 100644 index 00000000000000..08cb0659fbfc78 --- /dev/null +++ b/include/trace/events/ceph.h @@ -0,0 +1,234 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Ceph filesystem support module tracepoints + * + * Copyright (C) 2025 IONOS SE. All Rights Reserved. + * Written by Max Kellermann (max.kellermann@ionos.com) + */ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ceph + +#if !defined(_TRACE_CEPH_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_CEPH_H + +#include <linux/tracepoint.h> + +#define ceph_mdsc_suspend_reasons \ + EM(ceph_mdsc_suspend_reason_no_mdsmap, "no-mdsmap") \ + EM(ceph_mdsc_suspend_reason_no_active_mds, "no-active-mds") \ + EM(ceph_mdsc_suspend_reason_rejected, "rejected") \ + E_(ceph_mdsc_suspend_reason_session, "session") + +#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY +#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY + +#undef EM +#undef E_ +#define EM(a, b) a, +#define E_(a, b) a + +enum ceph_mdsc_suspend_reason { ceph_mdsc_suspend_reasons } __mode(byte); + +#endif + +/* + * Export enum symbols via userspace. + */ +#undef EM +#undef E_ +#define EM(a, b) TRACE_DEFINE_ENUM(a); +#define E_(a, b) TRACE_DEFINE_ENUM(a); + +ceph_mdsc_suspend_reasons; + +/* + * Now redefine the EM() and E_() macros to map the enums to the strings that + * will be printed in the output. + */ +#undef EM +#undef E_ +#define EM(a, b) { a, b }, +#define E_(a, b) { a, b } + +TRACE_EVENT(ceph_mdsc_submit_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(u64, ino) + __field(u64, snap) + ), + + TP_fast_assign( + struct inode *inode; + + __entry->tid = req->r_tid; + __entry->op = req->r_op; + + inode = req->r_inode; + if (inode == NULL && req->r_dentry) + inode = d_inode(req->r_dentry); + + if (inode) { + __entry->ino = ceph_ino(inode); + __entry->snap = ceph_snap(inode); + } else { + __entry->ino = __entry->snap = 0; + } + ), + + TP_printk("R=%llu op=%s ino=%llx,%llx", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->ino, __entry->snap) +); + +TRACE_EVENT(ceph_mdsc_suspend_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + struct ceph_mds_request *req, + enum ceph_mdsc_suspend_reason reason), + + TP_ARGS(mdsc, session, req, reason), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, mds) + __field(enum ceph_mdsc_suspend_reason, reason) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->mds = session ? session->s_mds : -1; + __entry->reason = reason; + ), + + TP_printk("R=%llu op=%s reason=%s", + __entry->tid, + ceph_mds_op_name(__entry->op), + __print_symbolic(__entry->reason, ceph_mdsc_suspend_reasons)) +); + +TRACE_EVENT(ceph_mdsc_resume_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + ), + + TP_printk("R=%llu op=%s", + __entry->tid, + ceph_mds_op_name(__entry->op)) +); + +TRACE_EVENT(ceph_mdsc_send_request, + TP_PROTO(struct ceph_mds_session *session, + struct ceph_mds_request *req), + + TP_ARGS(session, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, mds) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->mds = session->s_mds; + ), + + TP_printk("R=%llu op=%s mds=%d", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->mds) +); + +TRACE_EVENT(ceph_mdsc_complete_request, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_request *req), + + TP_ARGS(mdsc, req), + + TP_STRUCT__entry( + __field(u64, tid) + __field(int, op) + __field(int, err) + __field(unsigned long, latency_ns) + ), + + TP_fast_assign( + __entry->tid = req->r_tid; + __entry->op = req->r_op; + __entry->err = req->r_err; + __entry->latency_ns = req->r_end_latency - req->r_start_latency; + ), + + TP_printk("R=%llu op=%s err=%d latency_ns=%lu", + __entry->tid, + ceph_mds_op_name(__entry->op), + __entry->err, + __entry->latency_ns) +); + +TRACE_EVENT(ceph_handle_caps, + TP_PROTO(struct ceph_mds_client *mdsc, + struct ceph_mds_session *session, + int op, + const struct ceph_vino *vino, + struct ceph_inode_info *inode, + u32 seq, u32 mseq, u32 issue_seq), + + TP_ARGS(mdsc, session, op, vino, inode, seq, mseq, issue_seq), + + TP_STRUCT__entry( + __field(int, mds) + __field(int, op) + __field(u64, ino) + __field(u64, snap) + __field(u32, seq) + __field(u32, mseq) + __field(u32, issue_seq) + ), + + TP_fast_assign( + __entry->mds = session->s_mds; + __entry->op = op; + __entry->ino = vino->ino; + __entry->snap = vino->snap; + __entry->seq = seq; + __entry->mseq = mseq; + __entry->issue_seq = issue_seq; + ), + + TP_printk("mds=%d op=%s vino=%llx.%llx seq=%u iseq=%u mseq=%u", + __entry->mds, + ceph_cap_op_name(__entry->op), + __entry->ino, + __entry->snap, + __entry->seq, + __entry->issue_seq, + __entry->mseq) +); + +#undef EM +#undef E_ +#endif /* _TRACE_CEPH_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/kernel/cpu.c b/kernel/cpu.c index b674fdf96208be..8df2d773fe3b52 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -249,6 +249,14 @@ err: return ret; } +/* + * The former STARTING/DYING states, ran with IRQs disabled and must not fail. + */ +static bool cpuhp_is_atomic_state(enum cpuhp_state state) +{ + return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; +} + #ifdef CONFIG_SMP static bool cpuhp_is_ap_state(enum cpuhp_state state) { @@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup) complete(done); } -/* - * The former STARTING/DYING states, ran with IRQs disabled and must not fail. - */ -static bool cpuhp_is_atomic_state(enum cpuhp_state state) -{ - return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE; -} - /* Synchronization state management */ enum cpuhp_sync_state { SYNC_STATE_DEAD, @@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup, else ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); #else - ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + if (cpuhp_is_atomic_state(state)) { + guard(irqsave)(); + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + /* STARTING/DYING must not fail! */ + WARN_ON_ONCE(ret); + } else { + ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL); + } #endif BUG_ON(ret && !bringup); return ret; diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 6664ea73ccf81b..3667319b949ddc 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -1280,8 +1280,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum) static struct ceph_osd *get_osd(struct ceph_osd *osd) { if (refcount_inc_not_zero(&osd->o_ref)) { - dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1, - refcount_read(&osd->o_ref)); + dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref)); return osd; } else { dout("get_osd %p FAIL\n", osd); @@ -1291,8 +1290,7 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd) static void put_osd(struct ceph_osd *osd) { - dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref), - refcount_read(&osd->o_ref) - 1); + dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1); if (refcount_dec_and_test(&osd->o_ref)) { osd_cleanup(osd); kfree(osd); diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index d245fa508e1cc9..34b3ab59602f73 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) ceph_decode_need(p, end, len, bad); pool_end = *p + len; + ceph_decode_need(p, end, 4 + 4 + 4, bad); pi->type = ceph_decode_8(p); pi->size = ceph_decode_8(p); pi->crush_ruleset = ceph_decode_8(p); pi->object_hash = ceph_decode_8(p); - pi->pg_num = ceph_decode_32(p); pi->pgp_num = ceph_decode_32(p); - *p += 4 + 4; /* skip lpg* */ - *p += 4; /* skip last_change */ - *p += 8 + 4; /* skip snap_seq, snap_epoch */ + /* lpg*, last_change, snap_seq, snap_epoch */ + ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad); /* skip snaps */ - num = ceph_decode_32(p); + ceph_decode_32_safe(p, end, num, bad); while (num--) { - *p += 8; /* snapid key */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* snapid key, pool snap (with versions) */ + ceph_decode_skip_n(p, end, 8 + 2, bad); + ceph_decode_skip_string(p, end, bad); } - /* skip removed_snaps */ - num = ceph_decode_32(p); - *p += num * (8 + 8); + /* removed_snaps */ + ceph_decode_skip_map(p, end, 64, 64, bad); + ceph_decode_need(p, end, 8 + 8 + 4, bad); *p += 8; /* skip auid */ pi->flags = ceph_decode_64(p); *p += 4; /* skip crash_replay_interval */ if (ev >= 7) - pi->min_size = ceph_decode_8(p); + ceph_decode_8_safe(p, end, pi->min_size, bad); else pi->min_size = pi->size - pi->size / 2; if (ev >= 8) - *p += 8 + 8; /* skip quota_max_* */ + /* quota_max_* */ + ceph_decode_skip_n(p, end, 8 + 8, bad); if (ev >= 9) { - /* skip tiers */ - num = ceph_decode_32(p); - *p += num * 8; + /* tiers */ + ceph_decode_skip_set(p, end, 64, bad); + ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad); *p += 8; /* skip tier_of */ *p += 1; /* skip cache_mode */ - pi->read_tier = ceph_decode_64(p); pi->write_tier = ceph_decode_64(p); } else { @@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi) pi->write_tier = -1; } - if (ev >= 10) { - /* skip properties */ - num = ceph_decode_32(p); - while (num--) { - len = ceph_decode_32(p); - *p += len; /* key */ - len = ceph_decode_32(p); - *p += len; /* val */ - } - } + if (ev >= 10) + /* properties */ + ceph_decode_skip_map(p, end, string, string, bad); if (ev >= 11) { - /* skip hit_set_params */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* hit_set_params (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad); - *p += 4; /* skip hit_set_period */ - *p += 4; /* skip hit_set_count */ + /* hit_set_period, hit_set_count */ + ceph_decode_skip_n(p, end, 4 + 4, bad); } if (ev >= 12) - *p += 4; /* skip stripe_width */ + /* stripe_width */ + ceph_decode_skip_32(p, end, bad); - if (ev >= 13) { - *p += 8; /* skip target_max_bytes */ - *p += 8; /* skip target_max_objects */ - *p += 4; /* skip cache_target_dirty_ratio_micro */ - *p += 4; /* skip cache_target_full_ratio_micro */ - *p += 4; /* skip cache_min_flush_age */ - *p += 4; /* skip cache_min_evict_age */ - } + if (ev >= 13) + /* target_max_*, cache_target_*, cache_min_* */ + ceph_decode_skip_n(p, end, 16 + 8 + 8, bad); - if (ev >= 14) { - /* skip erasure_code_profile */ - len = ceph_decode_32(p); - *p += len; - } + if (ev >= 14) + /* erasure_code_profile */ + ceph_decode_skip_string(p, end, bad); /* * last_force_op_resend_preluminous, will be overridden if the * map was encoded with RESEND_ON_SPLIT */ if (ev >= 15) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad); else pi->last_force_request_resend = 0; if (ev >= 16) - *p += 4; /* skip min_read_recency_for_promote */ + /* min_read_recency_for_promote */ + ceph_decode_skip_32(p, end, bad); if (ev >= 17) - *p += 8; /* skip expected_num_objects */ + /* expected_num_objects */ + ceph_decode_skip_64(p, end, bad); if (ev >= 19) - *p += 4; /* skip cache_target_dirty_high_ratio_micro */ + /* cache_target_dirty_high_ratio_micro */ + ceph_decode_skip_32(p, end, bad); if (ev >= 20) - *p += 4; /* skip min_write_recency_for_promote */ + /* min_write_recency_for_promote */ + ceph_decode_skip_32(p, end, bad); if (ev >= 21) - *p += 1; /* skip use_gmt_hitset */ + /* use_gmt_hitset */ + ceph_decode_skip_8(p, end, bad); if (ev >= 22) - *p += 1; /* skip fast_read */ + /* fast_read */ + ceph_decode_skip_8(p, end, bad); - if (ev >= 23) { - *p += 4; /* skip hit_set_grade_decay_rate */ - *p += 4; /* skip hit_set_search_last_n */ - } + if (ev >= 23) + /* hit_set_grade_decay_rate, hit_set_search_last_n */ + ceph_decode_skip_n(p, end, 4 + 4, bad); if (ev >= 24) { - /* skip opts */ - *p += 1 + 1; /* versions */ - len = ceph_decode_32(p); - *p += len; + /* opts (with versions) */ + ceph_decode_skip_n(p, end, 2, bad); + ceph_decode_skip_string(p, end, bad); } if (ev >= 25) - pi->last_force_request_resend = ceph_decode_32(p); + ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad); /* ignore the rest */ @@ -1438,7 +1426,7 @@ static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end, ceph_decode_32_safe(p, end, len, e_inval); if (len == 0 && incremental) return NULL; /* new_pg_temp: [] to remove */ - if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) + if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32)) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, len * sizeof(u32), e_inval); @@ -1619,7 +1607,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end, u32 len, i; ceph_decode_32_safe(p, end, len, e_inval); - if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) + if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32))) return ERR_PTR(-EINVAL); ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval); diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 5f9ccab26e9abb..90cf0e2969df8e 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -934,17 +934,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, #endif if (page != dump->page) { const unsigned int offset = pos % PAGE_SIZE; - /* - * Maybe kmap()/kunmap() should be used here. - * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic(). - * So do I. - */ - char *kaddr = kmap_atomic(page); + char *kaddr = kmap_local_page(page); dump->page = page; memcpy(dump->data + offset, kaddr + offset, PAGE_SIZE - offset); - kunmap_atomic(kaddr); + kunmap_local(kaddr); } /* Same with put_arg_page(page) in fs/exec.c */ #ifdef CONFIG_MMU |
