aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/dev-tools/checkpatch.rst23
-rw-r--r--Documentation/translations/zh_CN/core-api/irq/irq-domain.rst4
-rw-r--r--MAINTAINERS1
-rw-r--r--Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/cmm.c3
-rw-r--r--arch/x86/events/amd/uncore.c5
-rw-r--r--arch/x86/events/intel/core.c3
-rw-r--r--arch/x86/hyperv/.gitignore1
-rw-r--r--drivers/block/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c11
-rw-r--r--drivers/gpu/drm/drm_plane.c8
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c25
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c20
-rw-r--r--drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h19
-rw-r--r--drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hwmon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c61
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c10
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35560.c8
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c2
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.c53
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_drv.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/irqchip/irq-mchp-eic.c2
-rw-r--r--drivers/md/dm-mpath.c13
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/scsi/imm.c1
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/libsas/sas_init.c1
-rw-r--r--drivers/scsi/libsas/sas_internal.h15
-rw-r--r--drivers/scsi/libsas/sas_phy.c33
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr.h4
-rw-r--r--drivers/scsi/mpi3mr/mpi3mr_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c30
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c2
-rw-r--r--drivers/scsi/scsi_dh.c8
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/target/sbp/sbp_target.c3
-rw-r--r--drivers/target/target_core_transport.c1
-rw-r--r--drivers/ufs/Kconfig1
-rw-r--r--drivers/ufs/core/ufshcd.c68
-rw-r--r--drivers/ufs/host/ufs-qcom.c6
-rw-r--r--fs/ceph/Kconfig3
-rw-r--r--fs/ceph/caps.c4
-rw-r--r--fs/ceph/mds_client.c20
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/ceph/super.c3
-rw-r--r--fs/fat/cache.c7
-rw-r--r--fs/ocfs2/alloc.c12
-rw-r--r--fs/ocfs2/buffer_head_io.c2
-rw-r--r--fs/ocfs2/cluster/nodemanager.c3
-rw-r--r--fs/ocfs2/dir.c10
-rw-r--r--fs/ocfs2/file.c14
-rw-r--r--fs/ocfs2/inode.c11
-rw-r--r--fs/ocfs2/move_extents.c2
-rw-r--r--fs/ocfs2/namei.c3
-rw-r--r--fs/ocfs2/ocfs2.h18
-rw-r--r--fs/ocfs2/resize.c4
-rw-r--r--fs/ocfs2/stackglue.c3
-rw-r--r--fs/ocfs2/suballoc.c13
-rw-r--r--fs/ocfs2/super.c2
-rw-r--r--fs/ocfs2/xattr.c38
-rw-r--r--include/linux/args.h4
-rw-r--r--include/linux/file.h13
-rw-r--r--include/linux/huge_mm.h13
-rw-r--r--include/linux/irqdomain.h16
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/rseq_entry.h2
-rw-r--r--include/trace/events/ceph.h234
-rw-r--r--kernel/cpu.c25
-rw-r--r--kernel/events/core.c22
-rw-r--r--kernel/events/uprobes.c8
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/liveupdate/Kconfig1
-rw-r--r--kernel/liveupdate/luo_core.c4
-rw-r--r--kernel/liveupdate/luo_file.c7
-rw-r--r--lib/bug.c6
-rw-r--r--mm/damon/tests/core-kunit.h9
-rw-r--r--mm/huge_memory.c165
-rw-r--r--mm/hugetlb.c25
-rw-r--r--mm/shmem.c11
-rw-r--r--mm/vmscan.c8
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/ceph/osdmap.c120
-rwxr-xr-xscripts/checkpatch.pl6
-rw-r--r--security/tomoyo/domain.c9
89 files changed, 989 insertions, 418 deletions
diff --git a/Documentation/dev-tools/checkpatch.rst b/Documentation/dev-tools/checkpatch.rst
index fa2988dd465732..deb3f67a633cfc 100644
--- a/Documentation/dev-tools/checkpatch.rst
+++ b/Documentation/dev-tools/checkpatch.rst
@@ -1002,6 +1002,29 @@ Functions and Variables
return bar;
+ **UNINITIALIZED_PTR_WITH_FREE**
+ Pointers with __free attribute should be declared at the place of use
+ and initialized (see include/linux/cleanup.h). In this case
+ declarations at the top of the function rule can be relaxed. Not doing
+ so may lead to undefined behavior as the memory assigned (garbage,
+ in case not initialized) to the pointer is freed automatically when
+ the pointer goes out of scope.
+
+ Also see: https://lore.kernel.org/lkml/58fd478f408a34b578ee8d949c5c4b4da4d4f41d.camel@HansenPartnership.com/
+
+ Example::
+
+ type var __free(free_func);
+ ... // var not used, but, in future someone might add a return here
+ var = malloc(var_size);
+ ...
+
+ should be initialized as::
+
+ ...
+ type var __free(free_func) = malloc(var_size);
+ ...
+
Permissions
-----------
diff --git a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
index 4a2d3b27aa4d21..aaefeda0e16444 100644
--- a/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
+++ b/Documentation/translations/zh_CN/core-api/irq/irq-domain.rst
@@ -109,10 +109,6 @@ irq_domain维护着从hwirq号到Linux IRQ的radix的树状映射。 当一个hw
如果hwirq号可以非常大,树状映射是一个很好的选择,因为它不需要分配一个和最大hwirq
号一样大的表。 缺点是,hwirq到IRQ号的查找取决于表中有多少条目。
-irq_domain_add_tree()和irq_domain_create_tree()在功能上是等价的,除了第一
-个参数不同——前者接受一个Open Firmware特定的 'struct device_node' ,而后者接受
-一个更通用的抽象 'struct fwnode_handle' 。
-
很少有驱动应该需要这个映射。
无映射
diff --git a/MAINTAINERS b/MAINTAINERS
index 56975a34e0de44..5b11839cba9de1 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -28321,6 +28321,7 @@ M: Matthew Wilcox <willy@infradead.org>
L: linux-fsdevel@vger.kernel.org
L: linux-mm@kvack.org
S: Supported
+F: Documentation/core-api/idr.rst
F: Documentation/core-api/xarray.rst
F: include/linux/idr.h
F: include/linux/xarray.h
diff --git a/Makefile b/Makefile
index 2f545ec1690fb9..e404e4767944ed 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 6
-PATCHLEVEL = 18
+PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Baby Opossum Posse
# *DOCUMENTATION*
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c
index 502133979e2289..4cbbe2ee58aba7 100644
--- a/arch/powerpc/platforms/pseries/cmm.c
+++ b/arch/powerpc/platforms/pseries/cmm.c
@@ -532,6 +532,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
balloon_page_insert(b_dev_info, newpage);
+ __count_vm_event(BALLOON_MIGRATE);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
@@ -550,7 +551,6 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info,
static void cmm_balloon_compaction_init(void)
{
- balloon_devinfo_init(&b_dev_info);
b_dev_info.migratepage = cmm_migratepage;
}
#else /* CONFIG_BALLOON_COMPACTION */
@@ -572,6 +572,7 @@ static int cmm_init(void)
if (!firmware_has_feature(FW_FEATURE_CMO) && !simulate)
return -EOPNOTSUPP;
+ balloon_devinfo_init(&b_dev_info);
cmm_balloon_compaction_init();
rc = register_oom_notifier(&cmm_oom_nb);
diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c
index e8b6af199c738e..9293ce50574dad 100644
--- a/arch/x86/events/amd/uncore.c
+++ b/arch/x86/events/amd/uncore.c
@@ -656,14 +656,11 @@ static int amd_uncore_df_event_init(struct perf_event *event)
struct hw_perf_event *hwc = &event->hw;
int ret = amd_uncore_event_init(event);
- if (ret || pmu_version < 2)
- return ret;
-
hwc->config = event->attr.config &
(pmu_version >= 2 ? AMD64_PERFMON_V2_RAW_EVENT_MASK_NB :
AMD64_RAW_EVENT_MASK_NB);
- return 0;
+ return ret;
}
static int amd_uncore_df_add(struct perf_event *event, int flags)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 853fe073bab301..bdf3f0d0fe2167 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3378,6 +3378,9 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
if (!test_bit(bit, cpuc->active_mask))
continue;
+ /* Event may have already been cleared: */
+ if (!event)
+ continue;
/*
* There may be unprocessed PEBS records in the PEBS buffer,
diff --git a/arch/x86/hyperv/.gitignore b/arch/x86/hyperv/.gitignore
new file mode 100644
index 00000000000000..333615d993b56c
--- /dev/null
+++ b/arch/x86/hyperv/.gitignore
@@ -0,0 +1 @@
+mshv-asm-offsets.h
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 77d6944489905b..858320b6ebb7ef 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -316,9 +316,6 @@ config BLK_DEV_RBD
tristate "Rados block device (RBD)"
depends on INET && BLOCK
select CEPH_LIB
- select CRC32
- select CRYPTO_AES
- select CRYPTO
help
Say Y here if you want include the Rados block device, which stripes
a block device over objects stored in the Ceph distributed object
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index 033c44326552ab..fffb47b62f437b 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -429,7 +429,14 @@ static void sn65dsi83_handle_errors(struct sn65dsi83 *ctx)
*/
ret = regmap_read(ctx->regmap, REG_IRQ_STAT, &irq_stat);
- if (ret || irq_stat) {
+
+ /*
+ * Some hardware (Toradex Verdin AM62) is known to report the
+ * PLL_UNLOCK error interrupt while working without visible
+ * problems. In lack of a reliable way to discriminate such cases
+ * from user-visible PLL_UNLOCK cases, ignore that bit entirely.
+ */
+ if (ret || irq_stat & ~REG_IRQ_STAT_CHA_PLL_UNLOCK) {
/*
* IRQ acknowledged is not always possible (the bridge can be in
* a state where it doesn't answer anymore). To prevent an
@@ -654,7 +661,7 @@ static void sn65dsi83_atomic_enable(struct drm_bridge *bridge,
if (ctx->irq) {
/* Enable irq to detect errors */
regmap_write(ctx->regmap, REG_IRQ_GLOBAL, REG_IRQ_GLOBAL_IRQ_EN);
- regmap_write(ctx->regmap, REG_IRQ_EN, 0xff);
+ regmap_write(ctx->regmap, REG_IRQ_EN, 0xff & ~REG_IRQ_EN_CHA_PLL_UNLOCK_EN);
} else {
/* Use the polling task */
sn65dsi83_monitor_start(ctx);
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index ce76c55913f725..b143589717e648 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -338,14 +338,14 @@ static int drm_plane_create_hotspot_properties(struct drm_plane *plane)
prop_x = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_X",
INT_MIN, INT_MAX);
- if (IS_ERR(prop_x))
- return PTR_ERR(prop_x);
+ if (!prop_x)
+ return -ENOMEM;
prop_y = drm_property_create_signed_range(plane->dev, 0, "HOTSPOT_Y",
INT_MIN, INT_MAX);
- if (IS_ERR(prop_y)) {
+ if (!prop_y) {
drm_property_destroy(plane->dev, prop_x);
- return PTR_ERR(prop_y);
+ return -ENOMEM;
}
drm_object_attach_property(&plane->base, prop_x, 0);
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 951d715dea3011..d019177462cf7f 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -161,6 +161,30 @@ static void mgag200_set_startadd(struct mga_device *mdev,
WREG_ECRT(0x00, crtcext0);
}
+/*
+ * Set the opmode for the hardware swapper for Big-Endian processor
+ * support for the frame buffer aperture and DMAWIN space.
+ */
+static void mgag200_set_datasiz(struct mga_device *mdev, u32 format)
+{
+#if defined(__BIG_ENDIAN)
+ u32 opmode = RREG32(MGAREG_OPMODE);
+
+ opmode &= ~(GENMASK(17, 16) | GENMASK(9, 8) | GENMASK(3, 2));
+
+ /* Big-endian byte-swapping */
+ switch (format) {
+ case DRM_FORMAT_RGB565:
+ opmode |= 0x10100;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ opmode |= 0x20200;
+ break;
+ }
+ WREG32(MGAREG_OPMODE, opmode);
+#endif
+}
+
void mgag200_init_registers(struct mga_device *mdev)
{
u8 crtc11, misc;
@@ -496,6 +520,7 @@ void mgag200_primary_plane_helper_atomic_update(struct drm_plane *plane,
struct drm_atomic_helper_damage_iter iter;
struct drm_rect damage;
+ mgag200_set_datasiz(mdev, fb->format->format);
drm_atomic_helper_damage_iter_init(&iter, old_plane_state, plane_state);
drm_atomic_for_each_plane_damage(&iter, &damage) {
mgag200_handle_damage(mdev, shadow_plane_state->data, fb, &damage);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
index e2bf99c433366b..a60209097a20a6 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/nouveau_i2c_encoder.c
@@ -94,26 +94,6 @@ fail_unregister:
return err;
}
-/**
- * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder
- * @drm_encoder: Encoder to be unregistered.
- *
- * This should be called from the @destroy method of an I2C slave
- * encoder driver once I2C access is no longer needed.
- */
-void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
-{
- struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder);
- struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder);
- struct module *module = client->dev.driver->owner;
-
- i2c_unregister_device(client);
- encoder->i2c_client = NULL;
-
- module_put(module);
-}
-EXPORT_SYMBOL(nouveau_i2c_encoder_destroy);
-
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
*/
diff --git a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
index 31334aa90781b3..869820701a56eb 100644
--- a/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
+++ b/drivers/gpu/drm/nouveau/include/dispnv04/i2c/encoder_i2c.h
@@ -202,7 +202,24 @@ static inline struct i2c_client *nouveau_i2c_encoder_get_client(struct drm_encod
return to_encoder_i2c(encoder)->i2c_client;
}
-void nouveau_i2c_encoder_destroy(struct drm_encoder *encoder);
+/**
+ * nouveau_i2c_encoder_destroy - Unregister the I2C device backing an encoder
+ * @drm_encoder: Encoder to be unregistered.
+ *
+ * This should be called from the @destroy method of an I2C slave
+ * encoder driver once I2C access is no longer needed.
+ */
+static __always_inline void nouveau_i2c_encoder_destroy(struct drm_encoder *drm_encoder)
+{
+ struct nouveau_i2c_encoder *encoder = to_encoder_i2c(drm_encoder);
+ struct i2c_client *client = nouveau_i2c_encoder_get_client(drm_encoder);
+ struct module *module = client->dev.driver->owner;
+
+ i2c_unregister_device(client);
+ encoder->i2c_client = NULL;
+
+ module_put(module);
+}
/*
* Wrapper fxns which can be plugged in to drm_encoder_helper_funcs:
diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
index 226c7ec56b8ed9..b8b97e10ae83ed 100644
--- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
+++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h
@@ -73,6 +73,10 @@ struct nvkm_gsp {
const struct firmware *bl;
const struct firmware *rm;
+
+ struct {
+ struct nvkm_falcon_fw sb;
+ } falcon;
} fws;
struct nvkm_firmware fw;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 869d4335c0f45c..4a193b7d6d9e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -183,11 +183,11 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
fctx->context = drm->runl[chan->runlist].context_base + chan->chid;
if (chan == drm->cechan)
- strcpy(fctx->name, "copy engine channel");
+ strscpy(fctx->name, "copy engine channel");
else if (chan == drm->channel)
- strcpy(fctx->name, "generic kernel channel");
+ strscpy(fctx->name, "generic kernel channel");
else
- strcpy(fctx->name, cli->name);
+ strscpy(fctx->name, cli->name);
kref_init(&fctx->fence_ref);
if (!priv->uevent)
diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
index 5c07a9ee8b775f..34effe6d86ad50 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c
@@ -125,7 +125,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", ret);
+ return sysfs_emit(buf, "%i\n", ret);
}
static ssize_t
@@ -141,7 +141,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
if (ret < 0)
return ret;
- return sprintf(buf, "%i\n", ret);
+ return sysfs_emit(buf, "%i\n", ret);
}
static ssize_t
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 5b721bd9d79949..50376024666042 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -259,18 +259,16 @@ nvkm_gsp_fwsec_v3(struct nvkm_gsp *gsp, const char *name,
}
static int
-nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
+nvkm_gsp_fwsec_init(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw, const char *name, u32 init_cmd)
{
struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
struct nvkm_bios *bios = device->bios;
const union nvfw_falcon_ucode_desc *desc;
struct nvbios_pmuE flcn_ucode;
- u8 idx, ver, hdr;
u32 data;
u16 size, vers;
- struct nvkm_falcon_fw fw = {};
- u32 mbox0 = 0;
+ u8 idx, ver, hdr;
int ret;
/* Lookup in VBIOS. */
@@ -291,8 +289,8 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
vers = (desc->v2.Hdr & 0x0000ff00) >> 8;
switch (vers) {
- case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, &fw); break;
- case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, &fw); break;
+ case 2: ret = nvkm_gsp_fwsec_v2(gsp, name, &desc->v2, size, init_cmd, fw); break;
+ case 3: ret = nvkm_gsp_fwsec_v3(gsp, name, &desc->v3, size, init_cmd, fw); break;
default:
nvkm_error(subdev, "%s(v%d): version unknown\n", name, vers);
return -EINVAL;
@@ -303,15 +301,19 @@ nvkm_gsp_fwsec(struct nvkm_gsp *gsp, const char *name, u32 init_cmd)
return ret;
}
- /* Boot. */
- ret = nvkm_falcon_fw_boot(&fw, subdev, true, &mbox0, NULL, 0, 0);
- nvkm_falcon_fw_dtor(&fw);
- if (ret)
- return ret;
-
return 0;
}
+static int
+nvkm_gsp_fwsec_boot(struct nvkm_gsp *gsp, struct nvkm_falcon_fw *fw)
+{
+ struct nvkm_subdev *subdev = &gsp->subdev;
+ u32 mbox0 = 0;
+
+ /* Boot */
+ return nvkm_falcon_fw_boot(fw, subdev, true, &mbox0, NULL, 0, 0);
+}
+
int
nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
{
@@ -320,7 +322,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
int ret;
u32 err;
- ret = nvkm_gsp_fwsec(gsp, "fwsec-sb", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
+ ret = nvkm_gsp_fwsec_boot(gsp, &gsp->fws.falcon.sb);
if (ret)
return ret;
@@ -335,26 +337,47 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
}
int
+nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *gsp)
+{
+ return nvkm_gsp_fwsec_init(gsp, &gsp->fws.falcon.sb, "fwsec-sb",
+ NVFW_FALCON_APPIF_DMEMMAPPER_CMD_SB);
+}
+
+void
+nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *gsp)
+{
+ nvkm_falcon_fw_dtor(&gsp->fws.falcon.sb);
+}
+
+int
nvkm_gsp_fwsec_frts(struct nvkm_gsp *gsp)
{
struct nvkm_subdev *subdev = &gsp->subdev;
struct nvkm_device *device = subdev->device;
+ struct nvkm_falcon_fw fw = {};
int ret;
u32 err, wpr2_lo, wpr2_hi;
- ret = nvkm_gsp_fwsec(gsp, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
+ ret = nvkm_gsp_fwsec_init(gsp, &fw, "fwsec-frts", NVFW_FALCON_APPIF_DMEMMAPPER_CMD_FRTS);
if (ret)
return ret;
+ ret = nvkm_gsp_fwsec_boot(gsp, &fw);
+ if (ret)
+ goto fwsec_dtor;
+
/* Verify. */
err = nvkm_rd32(device, 0x001400 + (0xe * 4)) >> 16;
if (err) {
nvkm_error(subdev, "fwsec-frts: 0x%04x\n", err);
- return -EIO;
+ ret = -EIO;
+ } else {
+ wpr2_lo = nvkm_rd32(device, 0x1fa824);
+ wpr2_hi = nvkm_rd32(device, 0x1fa828);
+ nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
}
- wpr2_lo = nvkm_rd32(device, 0x1fa824);
- wpr2_hi = nvkm_rd32(device, 0x1fa828);
- nvkm_debug(subdev, "fwsec-frts: WPR2 @ %08x - %08x\n", wpr2_lo, wpr2_hi);
- return 0;
+fwsec_dtor:
+ nvkm_falcon_fw_dtor(&fw);
+ return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
index c3494b7ac572bc..86bdd203bc107c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/priv.h
@@ -6,7 +6,10 @@
enum nvkm_acr_lsf_id;
int nvkm_gsp_fwsec_frts(struct nvkm_gsp *);
+
+int nvkm_gsp_fwsec_sb_ctor(struct nvkm_gsp *);
int nvkm_gsp_fwsec_sb(struct nvkm_gsp *);
+void nvkm_gsp_fwsec_sb_dtor(struct nvkm_gsp *);
struct nvkm_gsp_fwif {
int version;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
index 32e6a065d6d7a5..2a7e80c6d70f39 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/gsp.c
@@ -1817,12 +1817,16 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
RM_RISCV_UCODE_DESC *desc;
int ret;
+ ret = nvkm_gsp_fwsec_sb_ctor(gsp);
+ if (ret)
+ return ret;
+
hdr = nvfw_bin_hdr(&gsp->subdev, fw->data);
desc = (void *)fw->data + hdr->header_offset;
ret = nvkm_gsp_mem_ctor(gsp, hdr->data_size, &gsp->boot.fw);
if (ret)
- return ret;
+ goto dtor_fwsec;
memcpy(gsp->boot.fw.data, fw->data + hdr->data_offset, hdr->data_size);
@@ -1831,6 +1835,9 @@ r535_gsp_rm_boot_ctor(struct nvkm_gsp *gsp)
gsp->boot.manifest_offset = desc->manifestOffset;
gsp->boot.app_version = desc->appVersion;
return 0;
+dtor_fwsec:
+ nvkm_gsp_fwsec_sb_dtor(gsp);
+ return ret;
}
static const struct nvkm_firmware_func
@@ -2101,6 +2108,7 @@ r535_gsp_dtor(struct nvkm_gsp *gsp)
mutex_destroy(&gsp->cmdq.mutex);
nvkm_gsp_dtor_fws(gsp);
+ nvkm_gsp_fwsec_sb_dtor(gsp);
nvkm_gsp_mem_dtor(&gsp->rmargs);
nvkm_gsp_mem_dtor(&gsp->wpr_meta);
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35560.c b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
index 561e6643dcbb67..6e5173f98a2264 100644
--- a/drivers/gpu/drm/panel/panel-novatek-nt35560.c
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35560.c
@@ -213,7 +213,7 @@ static const struct backlight_properties nt35560_bl_props = {
static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
{
- struct device dev = dsi_ctx->dsi->dev;
+ struct device *dev = &dsi_ctx->dsi->dev;
u8 vendor, version, panel;
u16 val;
@@ -225,7 +225,7 @@ static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
return;
if (vendor == 0x00) {
- dev_err(&dev, "device vendor ID is zero\n");
+ dev_err(dev, "device vendor ID is zero\n");
dsi_ctx->accum_err = -ENODEV;
return;
}
@@ -236,12 +236,12 @@ static void nt35560_read_id(struct mipi_dsi_multi_context *dsi_ctx)
case DISPLAY_SONY_ACX424AKP_ID2:
case DISPLAY_SONY_ACX424AKP_ID3:
case DISPLAY_SONY_ACX424AKP_ID4:
- dev_info(&dev,
+ dev_info(dev,
"MTP vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
default:
- dev_info(&dev,
+ dev_info(dev,
"unknown vendor: %02x, version: %02x, panel: %02x\n",
vendor, version, panel);
break;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 5718d9d83a49f3..52c95131af5afd 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -586,7 +586,7 @@ out:
drm_modeset_unlock(&crtc->mutex);
}
-static void tilcdc_crtc_destroy(struct drm_crtc *crtc)
+void tilcdc_crtc_destroy(struct drm_crtc *crtc)
{
struct tilcdc_drm_private *priv = crtc->dev->dev_private;
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
index 7caec4d38ddf0c..3dcbec312bacb4 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c
@@ -172,8 +172,7 @@ static void tilcdc_fini(struct drm_device *dev)
if (priv->crtc)
tilcdc_crtc_shutdown(priv->crtc);
- if (priv->is_registered)
- drm_dev_unregister(dev);
+ drm_dev_unregister(dev);
drm_kms_helper_poll_fini(dev);
drm_atomic_helper_shutdown(dev);
@@ -220,21 +219,21 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
priv->wq = alloc_ordered_workqueue("tilcdc", 0);
if (!priv->wq) {
ret = -ENOMEM;
- goto init_failed;
+ goto put_drm;
}
priv->mmio = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->mmio)) {
dev_err(dev, "failed to request / ioremap\n");
ret = PTR_ERR(priv->mmio);
- goto init_failed;
+ goto free_wq;
}
priv->clk = clk_get(dev, "fck");
if (IS_ERR(priv->clk)) {
dev_err(dev, "failed to get functional clock\n");
ret = -ENODEV;
- goto init_failed;
+ goto free_wq;
}
pm_runtime_enable(dev);
@@ -313,7 +312,7 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
ret = tilcdc_crtc_create(ddev);
if (ret < 0) {
dev_err(dev, "failed to create crtc\n");
- goto init_failed;
+ goto disable_pm;
}
modeset_init(ddev);
@@ -324,46 +323,46 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
if (ret) {
dev_err(dev, "failed to register cpufreq notifier\n");
priv->freq_transition.notifier_call = NULL;
- goto init_failed;
+ goto destroy_crtc;
}
#endif
if (priv->is_componentized) {
ret = component_bind_all(dev, ddev);
if (ret < 0)
- goto init_failed;
+ goto unregister_cpufreq_notif;
ret = tilcdc_add_component_encoder(ddev);
if (ret < 0)
- goto init_failed;
+ goto unbind_component;
} else {
ret = tilcdc_attach_external_device(ddev);
if (ret)
- goto init_failed;
+ goto unregister_cpufreq_notif;
}
if (!priv->external_connector &&
((priv->num_encoders == 0) || (priv->num_connectors == 0))) {
dev_err(dev, "no encoders/connectors found\n");
ret = -EPROBE_DEFER;
- goto init_failed;
+ goto unbind_component;
}
ret = drm_vblank_init(ddev, 1);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto init_failed;
+ goto unbind_component;
}
ret = platform_get_irq(pdev, 0);
if (ret < 0)
- goto init_failed;
+ goto unbind_component;
priv->irq = ret;
ret = tilcdc_irq_install(ddev, priv->irq);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto init_failed;
+ goto unbind_component;
}
drm_mode_config_reset(ddev);
@@ -372,16 +371,34 @@ static int tilcdc_init(const struct drm_driver *ddrv, struct device *dev)
ret = drm_dev_register(ddev, 0);
if (ret)
- goto init_failed;
- priv->is_registered = true;
+ goto stop_poll;
drm_client_setup_with_color_mode(ddev, bpp);
return 0;
-init_failed:
- tilcdc_fini(ddev);
+stop_poll:
+ drm_kms_helper_poll_fini(ddev);
+ tilcdc_irq_uninstall(ddev);
+unbind_component:
+ if (priv->is_componentized)
+ component_unbind_all(dev, ddev);
+unregister_cpufreq_notif:
+#ifdef CONFIG_CPU_FREQ
+ cpufreq_unregister_notifier(&priv->freq_transition,
+ CPUFREQ_TRANSITION_NOTIFIER);
+destroy_crtc:
+#endif
+ tilcdc_crtc_destroy(priv->crtc);
+disable_pm:
+ pm_runtime_disable(dev);
+ clk_put(priv->clk);
+free_wq:
+ destroy_workqueue(priv->wq);
+put_drm:
platform_set_drvdata(pdev, NULL);
+ ddev->dev_private = NULL;
+ drm_dev_put(ddev);
return ret;
}
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.h b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
index b818448c83f612..58b276f82a669a 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_drv.h
+++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.h
@@ -82,7 +82,6 @@ struct tilcdc_drm_private {
struct drm_encoder *external_encoder;
struct drm_connector *external_connector;
- bool is_registered;
bool is_componentized;
bool irq_enabled;
};
@@ -164,6 +163,7 @@ void tilcdc_crtc_set_panel_info(struct drm_crtc *crtc,
void tilcdc_crtc_set_simulate_vesa_sync(struct drm_crtc *crtc,
bool simulate_vesa_sync);
void tilcdc_crtc_shutdown(struct drm_crtc *crtc);
+void tilcdc_crtc_destroy(struct drm_crtc *crtc);
int tilcdc_crtc_update_fb(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b47020fca19923..e6abc7b40b1895 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -434,6 +434,11 @@ int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
if (ret)
return ret;
+ if (!bo->resource) {
+ ret = -ENODATA;
+ goto unlock;
+ }
+
switch (bo->resource->mem_type) {
case TTM_PL_SYSTEM:
fallthrough;
@@ -448,6 +453,7 @@ int ttm_bo_access(struct ttm_buffer_object *bo, unsigned long offset,
ret = -EIO;
}
+unlock:
ttm_bo_unreserve(bo);
return ret;
diff --git a/drivers/irqchip/irq-mchp-eic.c b/drivers/irqchip/irq-mchp-eic.c
index 2474fa467a059e..31093a8ab67c3a 100644
--- a/drivers/irqchip/irq-mchp-eic.c
+++ b/drivers/irqchip/irq-mchp-eic.c
@@ -170,7 +170,7 @@ static int mchp_eic_domain_alloc(struct irq_domain *domain, unsigned int virq,
ret = irq_domain_translate_twocell(domain, fwspec, &hwirq, &type);
if (ret || hwirq >= MCHP_EIC_NIRQ)
- return ret;
+ return ret ?: -EINVAL;
switch (type) {
case IRQ_TYPE_EDGE_RISING:
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index c18358271618d2..d5d6ef7ba8381a 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -950,6 +950,19 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
q = bdev_get_queue(p->path.dev->bdev);
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
+ if (IS_ERR(attached_handler_name)) {
+ if (PTR_ERR(attached_handler_name) == -ENODEV) {
+ if (m->hw_handler_name) {
+ DMERR("hardware handlers are only allowed for SCSI devices");
+ kfree(m->hw_handler_name);
+ m->hw_handler_name = NULL;
+ }
+ attached_handler_name = NULL;
+ } else {
+ r = PTR_ERR(attached_handler_name);
+ goto bad;
+ }
+ }
if (attached_handler_name || m->hw_handler_name) {
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 9d1de68dee278c..d7d41b054b9824 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -106,7 +106,6 @@ config PHANTOM
config RPMB
tristate "RPMB partition interface"
- depends on MMC || SCSI_UFSHCD
help
Unified RPMB unit interface for RPMB capable devices such as eMMC and
UFS. Provides interface for in-kernel security controllers to access
diff --git a/drivers/scsi/imm.c b/drivers/scsi/imm.c
index 5c602c0577989f..45b0e33293a591 100644
--- a/drivers/scsi/imm.c
+++ b/drivers/scsi/imm.c
@@ -1260,6 +1260,7 @@ static void imm_detach(struct parport *pb)
imm_struct *dev;
list_for_each_entry(dev, &imm_hosts, list) {
if (dev->dev->port == pb) {
+ disable_delayed_work_sync(&dev->imm_tq);
list_del_init(&dev->list);
scsi_remove_host(dev->host);
scsi_host_put(dev->host);
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 95123689e9d1fe..dbd58a7e7bc1f3 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -61,8 +61,8 @@
#include <linux/hdreg.h>
#include <linux/reboot.h>
#include <linux/stringify.h>
+#include <linux/irq.h>
#include <asm/io.h>
-#include <asm/irq.h>
#include <asm/processor.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
@@ -7844,6 +7844,30 @@ static int ipr_dump_mailbox_wait(struct ipr_cmnd *ipr_cmd)
}
/**
+ * ipr_set_affinity_nobalance
+ * @ioa_cfg: ipr_ioa_cfg struct for an ipr device
+ * @flag: bool
+ * true: ensable "IRQ_NO_BALANCING" bit for msix interrupt
+ * false: disable "IRQ_NO_BALANCING" bit for msix interrupt
+ * Description: This function will be called to disable/enable
+ * "IRQ_NO_BALANCING" to avoid irqbalance daemon
+ * kicking in during adapter reset.
+ **/
+static void ipr_set_affinity_nobalance(struct ipr_ioa_cfg *ioa_cfg, bool flag)
+{
+ int irq, i;
+
+ for (i = 0; i < ioa_cfg->nvectors; i++) {
+ irq = pci_irq_vector(ioa_cfg->pdev, i);
+
+ if (flag)
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ else
+ irq_clear_status_flags(irq, IRQ_NO_BALANCING);
+ }
+}
+
+/**
* ipr_reset_restore_cfg_space - Restore PCI config space.
* @ipr_cmd: ipr command struct
*
@@ -7866,6 +7890,7 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
return IPR_RC_JOB_CONTINUE;
}
+ ipr_set_affinity_nobalance(ioa_cfg, false);
ipr_fail_all_ops(ioa_cfg);
if (ioa_cfg->sis64) {
@@ -7945,6 +7970,7 @@ static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
if (rc == PCIBIOS_SUCCESSFUL) {
+ ipr_set_affinity_nobalance(ioa_cfg, true);
ipr_cmd->job_step = ipr_reset_bist_done;
ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
rc = IPR_RC_JOB_RETURN;
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 8566bb1208a057..6b15ad1bcadabe 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -141,6 +141,7 @@ Undo_event_q:
Undo_ports:
sas_unregister_ports(sas_ha);
Undo_phys:
+ sas_unregister_phys(sas_ha);
return error;
}
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 6706f2be8d274b..d104c87f04f523 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -54,6 +54,7 @@ void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev);
void sas_scsi_recover_host(struct Scsi_Host *shost);
int sas_register_phys(struct sas_ha_struct *sas_ha);
+void sas_unregister_phys(struct sas_ha_struct *sas_ha);
struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
@@ -145,20 +146,6 @@ static inline void sas_fail_probe(struct domain_device *dev, const char *func, i
func, dev->parent ? "exp-attached" :
"direct-attached",
SAS_ADDR(dev->sas_addr), err);
-
- /*
- * If the device probe failed, the expander phy attached address
- * needs to be reset so that the phy will not be treated as flutter
- * in the next revalidation
- */
- if (dev->parent && !dev_is_expander(dev->dev_type)) {
- struct sas_phy *phy = dev->phy;
- struct domain_device *parent = dev->parent;
- struct ex_phy *ex_phy = &parent->ex_dev.ex_phy[phy->number];
-
- memset(ex_phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- }
-
sas_unregister_dev(dev->port, dev);
}
diff --git a/drivers/scsi/libsas/sas_phy.c b/drivers/scsi/libsas/sas_phy.c
index 635835c28ecd43..58f08dc2c1872c 100644
--- a/drivers/scsi/libsas/sas_phy.c
+++ b/drivers/scsi/libsas/sas_phy.c
@@ -116,6 +116,7 @@ static void sas_phye_shutdown(struct work_struct *work)
int sas_register_phys(struct sas_ha_struct *sas_ha)
{
int i;
+ int err;
/* Now register the phys. */
for (i = 0; i < sas_ha->num_phys; i++) {
@@ -132,8 +133,10 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
phy->frame_rcvd_size = 0;
phy->phy = sas_phy_alloc(&sas_ha->shost->shost_gendev, i);
- if (!phy->phy)
- return -ENOMEM;
+ if (!phy->phy) {
+ err = -ENOMEM;
+ goto rollback;
+ }
phy->phy->identify.initiator_port_protocols =
phy->iproto;
@@ -146,10 +149,34 @@ int sas_register_phys(struct sas_ha_struct *sas_ha)
phy->phy->maximum_linkrate = SAS_LINK_RATE_UNKNOWN;
phy->phy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
- sas_phy_add(phy->phy);
+ err = sas_phy_add(phy->phy);
+ if (err) {
+ sas_phy_free(phy->phy);
+ goto rollback;
+ }
}
return 0;
+rollback:
+ for (i-- ; i >= 0 ; i--) {
+ struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+
+ sas_phy_delete(phy->phy);
+ sas_phy_free(phy->phy);
+ }
+ return err;
+}
+
+void sas_unregister_phys(struct sas_ha_struct *sas_ha)
+{
+ int i;
+
+ for (i = 0 ; i < sas_ha->num_phys ; i++) {
+ struct asd_sas_phy *phy = sas_ha->sas_phy[i];
+
+ sas_phy_delete(phy->phy);
+ sas_phy_free(phy->phy);
+ }
}
const work_func_t sas_phy_event_fns[PHY_NUM_EVENTS] = {
diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h
index 6742684e2990a7..31d68c151b2076 100644
--- a/drivers/scsi/mpi3mr/mpi3mr.h
+++ b/drivers/scsi/mpi3mr/mpi3mr.h
@@ -56,8 +56,8 @@ extern struct list_head mrioc_list;
extern int prot_mask;
extern atomic64_t event_counter;
-#define MPI3MR_DRIVER_VERSION "8.15.0.5.50"
-#define MPI3MR_DRIVER_RELDATE "12-August-2025"
+#define MPI3MR_DRIVER_VERSION "8.15.0.5.51"
+#define MPI3MR_DRIVER_RELDATE "18-November-2025"
#define MPI3MR_DRIVER_NAME "mpi3mr"
#define MPI3MR_DRIVER_LICENSE "GPL"
diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c
index b88633e1efe275..d4ca878d088697 100644
--- a/drivers/scsi/mpi3mr/mpi3mr_os.c
+++ b/drivers/scsi/mpi3mr/mpi3mr_os.c
@@ -1184,6 +1184,8 @@ static void mpi3mr_update_tgtdev(struct mpi3mr_ioc *mrioc,
if (is_added == true)
tgtdev->io_throttle_enabled =
(flags & MPI3_DEVICE0_FLAGS_IO_THROTTLING_REQUIRED) ? 1 : 0;
+ if (!mrioc->sas_transport_enabled)
+ tgtdev->non_stl = 1;
switch (flags & MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_MASK) {
case MPI3_DEVICE0_FLAGS_MAX_WRITE_SAME_256_LB:
@@ -4844,7 +4846,7 @@ static int mpi3mr_target_alloc(struct scsi_target *starget)
spin_lock_irqsave(&mrioc->tgtdev_lock, flags);
if (starget->channel == mrioc->scsi_device_channel) {
tgt_dev = __mpi3mr_get_tgtdev_by_perst_id(mrioc, starget->id);
- if (tgt_dev && !tgt_dev->is_hidden) {
+ if (tgt_dev && !tgt_dev->is_hidden && tgt_dev->non_stl) {
scsi_tgt_priv_data->starget = starget;
scsi_tgt_priv_data->dev_handle = tgt_dev->dev_handle;
scsi_tgt_priv_data->perst_id = tgt_dev->perst_id;
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 3a57f07d73f587..16a44c0917e18b 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -17,6 +17,7 @@
#include <linux/crash_dump.h>
#include <linux/trace_events.h>
#include <linux/trace.h>
+#include <linux/irq.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsicam.h>
@@ -7776,6 +7777,31 @@ static void qla_pci_error_cleanup(scsi_qla_host_t *vha)
}
+/**
+ * qla2xxx_set_affinity_nobalance
+ * @pdev: pci_dev struct for a qla2xxx device
+ * @flag: bool
+ * true: enable "IRQ_NO_BALANCING" bit for msix interrupt
+ * false: disable "IRQ_NO_BALANCING" bit for msix interrupt
+ * Description: This function will be called to disable/enable
+ * "IRQ_NO_BALANCING" to avoid irqbalance daemon
+ * kicking in during adapter reset.
+ **/
+
+static void qla2xxx_set_affinity_nobalance(struct pci_dev *pdev, bool flag)
+{
+ int irq, i;
+
+ for (i = 0; i < QLA_BASE_VECTORS; i++) {
+ irq = pci_irq_vector(pdev, i);
+
+ if (flag)
+ irq_set_status_flags(irq, IRQ_NO_BALANCING);
+ else
+ irq_clear_status_flags(irq, IRQ_NO_BALANCING);
+ }
+}
+
static pci_ers_result_t
qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
@@ -7794,6 +7820,8 @@ qla2xxx_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
goto out;
}
+ qla2xxx_set_affinity_nobalance(pdev, false);
+
switch (state) {
case pci_channel_io_normal:
qla_pci_set_eeh_busy(vha);
@@ -7930,6 +7958,8 @@ exit_slot_reset:
ql_dbg(ql_dbg_aer, base_vha, 0x900e,
"Slot Reset returning %x.\n", ret);
+ qla2xxx_set_affinity_nobalance(pdev, true);
+
return ret;
}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index da2fc66ffedd5d..b0a62aaa1cca90 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -1552,7 +1552,7 @@ static int qla4_82xx_cmdpeg_ready(struct scsi_qla_host *ha, int pegtune_val)
(val == PHAN_INITIALIZE_ACK))
return 0;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(500);
+ schedule_timeout(msecs_to_jiffies(500));
} while (--retries);
diff --git a/drivers/scsi/scsi_dh.c b/drivers/scsi/scsi_dh.c
index 7b56e00c7df6b0..b9d80531781494 100644
--- a/drivers/scsi/scsi_dh.c
+++ b/drivers/scsi/scsi_dh.c
@@ -353,7 +353,8 @@ EXPORT_SYMBOL_GPL(scsi_dh_attach);
* that may have a device handler attached
* @gfp - the GFP mask used in the kmalloc() call when allocating memory
*
- * Returns name of attached handler, NULL if no handler is attached.
+ * Returns name of attached handler, NULL if no handler is attached, or
+ * and error pointer if an error occurred.
* Caller must take care to free the returned string.
*/
const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
@@ -363,10 +364,11 @@ const char *scsi_dh_attached_handler_name(struct request_queue *q, gfp_t gfp)
sdev = scsi_device_from_queue(q);
if (!sdev)
- return NULL;
+ return ERR_PTR(-ENODEV);
if (sdev->handler)
- handler_name = kstrdup(sdev->handler->name, gfp);
+ handler_name = kstrdup(sdev->handler->name, gfp) ? :
+ ERR_PTR(-ENOMEM);
put_device(&sdev->sdev_gendev);
return handler_name;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 51ad2ad07e4390..93031326ac3ee9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2801,7 +2801,7 @@ EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
*
* Must be called with user context, may sleep.
*
- * Returns zero if unsuccessful or an error if not.
+ * Returns zero if successful or an error if not.
*/
int
scsi_device_quiesce(struct scsi_device *sdev)
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index b8457477cee9eb..9f167ff8da7b07 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -5,8 +5,7 @@
* Copyright (C) 2011 Chris Boot <bootc@bootc.net>
*/
-#define KMSG_COMPONENT "sbp_target"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+#define pr_fmt(fmt) "sbp_target: " fmt
#include <linux/kernel.h>
#include <linux/module.h>
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index e8b7955d40f29a..50d21888a0c9a7 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1524,6 +1524,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
if (!cmd->t_task_cdb) {
+ cmd->t_task_cdb = &cmd->__t_task_cdb[0];
pr_err("Unable to allocate cmd->t_task_cdb"
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
diff --git a/drivers/ufs/Kconfig b/drivers/ufs/Kconfig
index 90226f72c158aa..f662e7ce71f1bd 100644
--- a/drivers/ufs/Kconfig
+++ b/drivers/ufs/Kconfig
@@ -6,6 +6,7 @@
menuconfig SCSI_UFSHCD
tristate "Universal Flash Storage Controller"
depends on SCSI && SCSI_DMA
+ depends on RPMB || !RPMB
select PM_DEVFREQ
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select NLS
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 040a0ceb170a7f..80c0b49f30b012 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -1455,15 +1455,14 @@ out:
static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, int err)
{
up_write(&hba->clk_scaling_lock);
+ mutex_unlock(&hba->wb_mutex);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
/* Enable Write Booster if current gear requires it else disable it */
if (ufshcd_enable_wb_if_scaling_up(hba) && !err)
ufshcd_wb_toggle(hba, hba->pwr_info.gear_rx >= hba->clk_scaling.wb_gear);
- mutex_unlock(&hba->wb_mutex);
-
- blk_mq_unquiesce_tagset(&hba->host->tag_set);
- mutex_unlock(&hba->host->scan_mutex);
ufshcd_release(hba);
}
@@ -6504,6 +6503,11 @@ static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
+ /*
+ * A WLUN resume failure could potentially lead to the HBA being
+ * runtime suspended, so take an extra reference on hba->dev.
+ */
+ pm_runtime_get_sync(hba->dev);
ufshcd_rpm_get_sync(hba);
if (pm_runtime_status_suspended(&hba->ufs_device_wlun->sdev_gendev) ||
hba->is_sys_suspended) {
@@ -6543,6 +6547,7 @@ static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
if (ufshcd_is_clkscaling_supported(hba))
ufshcd_clk_scaling_suspend(hba, false);
ufshcd_rpm_put(hba);
+ pm_runtime_put(hba->dev);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
@@ -6557,28 +6562,42 @@ static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
#ifdef CONFIG_PM
static void ufshcd_recover_pm_error(struct ufs_hba *hba)
{
+ struct scsi_target *starget = hba->ufs_device_wlun->sdev_target;
struct Scsi_Host *shost = hba->host;
struct scsi_device *sdev;
struct request_queue *q;
- int ret;
+ bool resume_sdev_queues = false;
hba->is_sys_suspended = false;
+
/*
- * Set RPM status of wlun device to RPM_ACTIVE,
- * this also clears its runtime error.
+ * Ensure the parent's error status is cleared before proceeding
+ * to the child, as the parent must be active to activate the child.
*/
- ret = pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
+ if (hba->dev->power.runtime_error) {
+ /* hba->dev has no functional parent thus simplily set RPM_ACTIVE */
+ pm_runtime_set_active(hba->dev);
+ resume_sdev_queues = true;
+ }
+
+ if (hba->ufs_device_wlun->sdev_gendev.power.runtime_error) {
+ /*
+ * starget, parent of wlun, might be suspended if wlun resume failed.
+ * Make sure parent is resumed before set child (wlun) active.
+ */
+ pm_runtime_get_sync(&starget->dev);
+ pm_runtime_set_active(&hba->ufs_device_wlun->sdev_gendev);
+ pm_runtime_put_sync(&starget->dev);
+ resume_sdev_queues = true;
+ }
- /* hba device might have a runtime error otherwise */
- if (ret)
- ret = pm_runtime_set_active(hba->dev);
/*
* If wlun device had runtime error, we also need to resume those
* consumer scsi devices in case any of them has failed to be
* resumed due to supplier runtime resume failure. This is to unblock
* blk_queue_enter in case there are bios waiting inside it.
*/
- if (!ret) {
+ if (resume_sdev_queues) {
shost_for_each_device(sdev, shost) {
q = sdev->request_queue;
if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
@@ -6679,19 +6698,22 @@ static void ufshcd_err_handler(struct work_struct *work)
hba->saved_uic_err, hba->force_reset,
ufshcd_is_link_broken(hba) ? "; link is broken" : "");
- /*
- * Use ufshcd_rpm_get_noresume() here to safely perform link recovery
- * even if an error occurs during runtime suspend or runtime resume.
- * This avoids potential deadlocks that could happen if we tried to
- * resume the device while a PM operation is already in progress.
- */
- ufshcd_rpm_get_noresume(hba);
- if (hba->pm_op_in_progress) {
- ufshcd_link_recovery(hba);
+ if (hba->ufs_device_wlun) {
+ /*
+ * Use ufshcd_rpm_get_noresume() here to safely perform link
+ * recovery even if an error occurs during runtime suspend or
+ * runtime resume. This avoids potential deadlocks that could
+ * happen if we tried to resume the device while a PM operation
+ * is already in progress.
+ */
+ ufshcd_rpm_get_noresume(hba);
+ if (hba->pm_op_in_progress) {
+ ufshcd_link_recovery(hba);
+ ufshcd_rpm_put(hba);
+ return;
+ }
ufshcd_rpm_put(hba);
- return;
}
- ufshcd_rpm_put(hba);
down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 8d119b3223cbda..8ebee0cc531370 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -1769,10 +1769,9 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int i, j, nminor = 0, testbus_len = 0;
- u32 *testbus __free(kfree) = NULL;
char *prefix;
- testbus = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
+ u32 *testbus __free(kfree) = kmalloc_array(256, sizeof(u32), GFP_KERNEL);
if (!testbus)
return;
@@ -1794,13 +1793,12 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix, void __iomem *base)
{
- u32 *regs __free(kfree) = NULL;
size_t pos;
if (offset % 4 != 0 || len % 4 != 0)
return -EINVAL;
- regs = kzalloc(len, GFP_ATOMIC);
+ u32 *regs __free(kfree) = kzalloc(len, GFP_ATOMIC);
if (!regs)
return -ENOMEM;
diff --git a/fs/ceph/Kconfig b/fs/ceph/Kconfig
index 3e7def3d31c16f..3d64a316ca31d3 100644
--- a/fs/ceph/Kconfig
+++ b/fs/ceph/Kconfig
@@ -3,9 +3,6 @@ config CEPH_FS
tristate "Ceph distributed file system"
depends on INET
select CEPH_LIB
- select CRC32
- select CRYPTO_AES
- select CRYPTO
select NETFS_SUPPORT
select FS_ENCRYPTION_ALGS if FS_ENCRYPTION
default n
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index b1a8ff612c41dc..2f663972da99cc 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -18,6 +18,7 @@
#include "crypto.h"
#include <linux/ceph/decode.h>
#include <linux/ceph/messenger.h>
+#include <trace/events/ceph.h>
/*
* Capability management
@@ -4452,6 +4453,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode,
seq, issue_seq, mseq);
+ trace_ceph_handle_caps(mdsc, session, op, &vino, ceph_inode(inode),
+ seq, issue_seq, mseq);
+
mutex_lock(&session->s_mutex);
if (!inode) {
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 1740047aef0f9a..7e4eab824daef8 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -24,6 +24,7 @@
#include <linux/ceph/pagelist.h>
#include <linux/ceph/auth.h>
#include <linux/ceph/debugfs.h>
+#include <trace/events/ceph.h>
#define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
@@ -3288,6 +3289,8 @@ static void complete_request(struct ceph_mds_client *mdsc,
{
req->r_end_latency = ktime_get();
+ trace_ceph_mdsc_complete_request(mdsc, req);
+
if (req->r_callback)
req->r_callback(mdsc, req);
complete_all(&req->r_completion);
@@ -3419,6 +3422,8 @@ static int __send_request(struct ceph_mds_session *session,
{
int err;
+ trace_ceph_mdsc_send_request(session, req);
+
err = __prepare_send_request(session, req, drop_cap_releases);
if (!err) {
ceph_msg_get(req->r_request);
@@ -3470,6 +3475,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
}
if (mdsc->mdsmap->m_epoch == 0) {
doutc(cl, "no mdsmap, waiting for map\n");
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_no_mdsmap);
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3491,6 +3498,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
goto finish;
}
doutc(cl, "no mds or not active, waiting for map\n");
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_no_active_mds);
list_add(&req->r_wait, &mdsc->waiting_for_map);
return;
}
@@ -3536,9 +3545,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
* it to the mdsc queue.
*/
if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
- if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER))
+ if (ceph_test_mount_opt(mdsc->fsc, CLEANRECOVER)) {
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_rejected);
list_add(&req->r_wait, &mdsc->waiting_for_map);
- else
+ } else
err = -EACCES;
goto out_session;
}
@@ -3552,6 +3563,8 @@ static void __do_request(struct ceph_mds_client *mdsc,
if (random)
req->r_resend_mds = mds;
}
+ trace_ceph_mdsc_suspend_request(mdsc, session, req,
+ ceph_mdsc_suspend_reason_session);
list_add(&req->r_wait, &session->s_waiting);
goto out_session;
}
@@ -3652,6 +3665,7 @@ static void __wake_requests(struct ceph_mds_client *mdsc,
list_del_init(&req->r_wait);
doutc(cl, " wake request %p tid %llu\n", req,
req->r_tid);
+ trace_ceph_mdsc_resume_request(mdsc, req);
__do_request(mdsc, req);
}
}
@@ -3678,6 +3692,7 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
req->r_session->s_mds == mds) {
doutc(cl, " kicking tid %llu\n", req->r_tid);
list_del_init(&req->r_wait);
+ trace_ceph_mdsc_resume_request(mdsc, req);
__do_request(mdsc, req);
}
}
@@ -3724,6 +3739,7 @@ int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
doutc(cl, "submit_request on %p for inode %p\n", req, dir);
mutex_lock(&mdsc->mutex);
__register_request(mdsc, req, dir);
+ trace_ceph_mdsc_submit_request(mdsc, req);
__do_request(mdsc, req);
err = req->r_err;
mutex_unlock(&mdsc->mutex);
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index c65f2b202b2b3e..521507ea826000 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -374,7 +374,7 @@ static int build_snap_context(struct ceph_mds_client *mdsc,
/* alloc new snap context */
err = -ENOMEM;
- if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
+ if ((size_t)num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
goto fail;
snapc = ceph_create_snap_context(num, GFP_NOFS);
if (!snapc)
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index f6bf24b5c683ef..7c1c1dac320da3 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -30,6 +30,9 @@
#include <uapi/linux/magic.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/ceph.h>
+
static DEFINE_SPINLOCK(ceph_fsc_lock);
static LIST_HEAD(ceph_fsc_list);
diff --git a/fs/fat/cache.c b/fs/fat/cache.c
index 2af424e200b35e..630f3056658ef3 100644
--- a/fs/fat/cache.c
+++ b/fs/fat/cache.c
@@ -29,11 +29,6 @@ struct fat_cache_id {
int dcluster;
};
-static inline int fat_max_cache(struct inode *inode)
-{
- return FAT_MAX_CACHE;
-}
-
static struct kmem_cache *fat_cache_cachep;
static void init_once(void *foo)
@@ -145,7 +140,7 @@ static void fat_cache_add(struct inode *inode, struct fat_cache_id *new)
cache = fat_cache_merge(inode, new);
if (cache == NULL) {
- if (MSDOS_I(inode)->nr_caches < fat_max_cache(inode)) {
+ if (MSDOS_I(inode)->nr_caches < FAT_MAX_CACHE) {
MSDOS_I(inode)->nr_caches++;
spin_unlock(&MSDOS_I(inode)->cache_lru_lock);
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index b267ec580da995..58bf58b68955c5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/quotaops.h>
@@ -1037,7 +1038,7 @@ static int ocfs2_create_new_meta_bhs(handle_t *handle,
memset(bhs[i]->b_data, 0, osb->sb->s_blocksize);
eb = (struct ocfs2_extent_block *) bhs[i]->b_data;
/* Ok, setup the minimal stuff here. */
- strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(first_blkno);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot =
@@ -3654,7 +3655,6 @@ static int ocfs2_merge_rec_left(struct ocfs2_path *right_path,
* So we use the new rightmost path.
*/
ocfs2_mv_path(right_path, left_path);
- left_path = NULL;
} else
ocfs2_complete_edge_insert(handle, left_path,
right_path, subtree_index);
@@ -6164,7 +6164,7 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
struct buffer_head *bh = NULL;
struct ocfs2_dinode *di;
struct ocfs2_truncate_log *tl;
- unsigned int tl_count;
+ unsigned int tl_count, tl_used;
inode = ocfs2_get_system_file_inode(osb,
TRUNCATE_LOG_SYSTEM_INODE,
@@ -6185,8 +6185,10 @@ static int ocfs2_get_truncate_log_info(struct ocfs2_super *osb,
di = (struct ocfs2_dinode *)bh->b_data;
tl = &di->id2.i_dealloc;
tl_count = le16_to_cpu(tl->tl_count);
+ tl_used = le16_to_cpu(tl->tl_used);
if (unlikely(tl_count > ocfs2_truncate_recs_per_inode(osb->sb) ||
- tl_count == 0)) {
+ tl_count == 0 ||
+ tl_used > tl_count)) {
status = -EFSCORRUPTED;
iput(inode);
brelse(bh);
@@ -6744,7 +6746,7 @@ static int ocfs2_reuse_blk_from_dealloc(handle_t *handle,
/* We can't guarantee that buffer head is still cached, so
* polutlate the extent block again.
*/
- strcpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
+ strscpy(eb->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE);
eb->h_blkno = cpu_to_le64(bf->free_blk);
eb->h_fs_generation = cpu_to_le32(osb->fs_generation);
eb->h_suballoc_slot = cpu_to_le16(real_slot);
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f714406528d62..701d27d908d4b2 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -434,7 +434,7 @@ int ocfs2_write_super_or_backup(struct ocfs2_super *osb,
BUG_ON(buffer_jbd(bh));
ocfs2_check_super_or_backup(osb->sb, bh->b_blocknr);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) {
+ if (unlikely(ocfs2_emergency_state(osb))) {
ret = -EROFS;
mlog_errno(ret);
goto out;
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 2f61d39e4e509b..6bc4e064ace464 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -4,6 +4,7 @@
*/
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/configfs.h>
@@ -590,7 +591,7 @@ static struct config_item *o2nm_node_group_make_item(struct config_group *group,
if (node == NULL)
return ERR_PTR(-ENOMEM);
- strcpy(node->nd_name, name); /* use item.ci_namebuf instead? */
+ strscpy(node->nd_name, name); /* use item.ci_namebuf instead? */
config_item_init_type_name(&node->nd_item, name, &o2nm_node_type);
spin_lock_init(&node->nd_lock);
diff --git a/fs/ocfs2/dir.c b/fs/ocfs2/dir.c
index 2785ff245e79e4..782afd9fa93481 100644
--- a/fs/ocfs2/dir.c
+++ b/fs/ocfs2/dir.c
@@ -136,7 +136,7 @@ static void ocfs2_init_dir_trailer(struct inode *inode,
struct ocfs2_dir_block_trailer *trailer;
trailer = ocfs2_trailer_from_bh(bh, inode->i_sb);
- strcpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
+ strscpy(trailer->db_signature, OCFS2_DIR_TRAILER_SIGNATURE);
trailer->db_compat_rec_len =
cpu_to_le16(sizeof(struct ocfs2_dir_block_trailer));
trailer->db_parent_dinode = cpu_to_le64(OCFS2_I(inode)->ip_blkno);
@@ -2213,14 +2213,14 @@ static struct ocfs2_dir_entry *ocfs2_fill_initial_dirents(struct inode *inode,
de->name_len = 1;
de->rec_len =
cpu_to_le16(OCFS2_DIR_REC_LEN(de->name_len));
- strcpy(de->name, ".");
+ strscpy(de->name, ".");
ocfs2_set_de_type(de, S_IFDIR);
de = (struct ocfs2_dir_entry *) ((char *)de + le16_to_cpu(de->rec_len));
de->inode = cpu_to_le64(OCFS2_I(parent)->ip_blkno);
de->rec_len = cpu_to_le16(size - OCFS2_DIR_REC_LEN(1));
de->name_len = 2;
- strcpy(de->name, "..");
+ strscpy(de->name, "..");
ocfs2_set_de_type(de, S_IFDIR);
return de;
@@ -2378,7 +2378,7 @@ static int ocfs2_dx_dir_attach_index(struct ocfs2_super *osb,
dx_root = (struct ocfs2_dx_root_block *)dx_root_bh->b_data;
memset(dx_root, 0, osb->sb->s_blocksize);
- strcpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
+ strscpy(dx_root->dr_signature, OCFS2_DX_ROOT_SIGNATURE);
dx_root->dr_suballoc_slot = cpu_to_le16(meta_ac->ac_alloc_slot);
dx_root->dr_suballoc_loc = cpu_to_le64(suballoc_loc);
dx_root->dr_suballoc_bit = cpu_to_le16(dr_suballoc_bit);
@@ -2454,7 +2454,7 @@ static int ocfs2_dx_dir_format_cluster(struct ocfs2_super *osb,
dx_leaf = (struct ocfs2_dx_leaf *) bh->b_data;
memset(dx_leaf, 0, osb->sb->s_blocksize);
- strcpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
+ strscpy(dx_leaf->dl_signature, OCFS2_DX_LEAF_SIGNATURE);
dx_leaf->dl_fs_generation = cpu_to_le32(osb->fs_generation);
dx_leaf->dl_blkno = cpu_to_le64(bh->b_blocknr);
dx_leaf->dl_list.de_count =
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 21d797ccccd068..732c61599159cc 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -179,7 +179,7 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
err = file_write_and_wait_range(file, start, end);
@@ -209,7 +209,7 @@ int ocfs2_should_update_atime(struct inode *inode,
struct timespec64 now;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return 0;
if ((inode->i_flags & S_NOATIME) ||
@@ -1136,6 +1136,12 @@ int ocfs2_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
attr->ia_valid & ATTR_GID ?
from_kgid(&init_user_ns, attr->ia_gid) : 0);
+ status = ocfs2_emergency_state(osb);
+ if (unlikely(status)) {
+ mlog_errno(status);
+ goto bail;
+ }
+
/* ensuring we don't even attempt to truncate a symlink */
if (S_ISLNK(inode->i_mode))
attr->ia_valid &= ~ATTR_SIZE;
@@ -1943,7 +1949,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
handle_t *handle;
unsigned long long max_off = inode->i_sb->s_maxbytes;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
inode_lock(inode);
@@ -2707,7 +2713,7 @@ static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
return -EINVAL;
if (!ocfs2_refcount_tree(osb))
return -EOPNOTSUPP;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
/* Lock both files against IO */
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 8340525e5589c0..b5fcc2725a2962 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1442,6 +1442,14 @@ int ocfs2_validate_inode_block(struct super_block *sb,
goto bail;
}
+ if ((!di->i_links_count && !di->i_links_count_hi) || !di->i_mode) {
+ mlog(ML_ERROR, "Invalid dinode #%llu: "
+ "Corrupt state (nlink = %u or mode = %u) detected!\n",
+ (unsigned long long)bh->b_blocknr,
+ ocfs2_read_links_count(di), le16_to_cpu(di->i_mode));
+ rc = -EFSCORRUPTED;
+ goto bail;
+ }
/*
* Errors after here are fatal.
*/
@@ -1604,8 +1612,7 @@ static int ocfs2_filecheck_repair_inode_block(struct super_block *sb,
trace_ocfs2_filecheck_repair_inode_block(
(unsigned long long)bh->b_blocknr);
- if (ocfs2_is_hard_readonly(OCFS2_SB(sb)) ||
- ocfs2_is_soft_readonly(OCFS2_SB(sb))) {
+ if (unlikely(ocfs2_emergency_state(OCFS2_SB(sb)))) {
mlog(ML_ERROR,
"Filecheck: cannot repair dinode #%llu "
"on readonly filesystem\n",
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index ce978a2497d9de..99637e34d9daf8 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -909,7 +909,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
struct buffer_head *di_bh = NULL;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
inode_lock(inode);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index c90b254da75eb5..4ec6dbed65a86c 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -23,6 +23,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <linux/quotaops.h>
#include <linux/iversion.h>
@@ -568,7 +569,7 @@ static int __ocfs2_mknod_locked(struct inode *dir,
ocfs2_set_links_count(fe, inode->i_nlink);
fe->i_last_eb_blk = 0;
- strcpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
+ strscpy(fe->i_signature, OCFS2_INODE_SIGNATURE);
fe->i_flags |= cpu_to_le32(OCFS2_VALID_FL);
ktime_get_coarse_real_ts64(&ts);
fe->i_atime = fe->i_ctime = fe->i_mtime =
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 6aaa94c554c12a..7b50e03dfa664a 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -680,6 +680,24 @@ static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
return ret;
}
+static inline int ocfs2_is_readonly(struct ocfs2_super *osb)
+{
+ int ret;
+ spin_lock(&osb->osb_lock);
+ ret = osb->osb_flags & (OCFS2_OSB_SOFT_RO | OCFS2_OSB_HARD_RO);
+ spin_unlock(&osb->osb_lock);
+
+ return ret;
+}
+
+static inline int ocfs2_emergency_state(struct ocfs2_super *osb)
+{
+ if (ocfs2_is_readonly(osb))
+ return -EROFS;
+
+ return 0;
+}
+
static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb)
{
return (osb->s_feature_incompat &
diff --git a/fs/ocfs2/resize.c b/fs/ocfs2/resize.c
index b0733c08ed13b1..ac3ec2c2111963 100644
--- a/fs/ocfs2/resize.c
+++ b/fs/ocfs2/resize.c
@@ -276,7 +276,7 @@ int ocfs2_group_extend(struct inode * inode, int new_clusters)
u32 first_new_cluster;
u64 lgd_blkno;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
if (new_clusters < 0)
@@ -466,7 +466,7 @@ int ocfs2_group_add(struct inode *inode, struct ocfs2_new_group_input *input)
u16 cl_bpc;
u64 bg_ptr;
- if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ if (unlikely(ocfs2_emergency_state(osb)))
return -EROFS;
main_bm_inode = ocfs2_get_system_file_inode(osb,
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index a28c127b9934bc..fca2fd07c881d1 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
+#include <linux/string.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/kmod.h>
@@ -670,7 +671,7 @@ static int __init ocfs2_stack_glue_init(void)
{
int ret;
- strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+ strscpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
ocfs2_table_header = register_sysctl("fs/ocfs2/nm", ocfs2_nm_table);
if (!ocfs2_table_header) {
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index 6ac4dcd54588cf..8e6e5235b30c46 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/slab.h>
+#include <linux/string.h>
#include <linux/highmem.h>
#include <cluster/masklog.h>
@@ -372,7 +373,7 @@ static int ocfs2_block_group_fill(handle_t *handle,
}
memset(bg, 0, sb->s_blocksize);
- strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
+ strscpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
bg->bg_generation = cpu_to_le32(osb->fs_generation);
bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb, 1,
osb->s_feature_incompat));
@@ -1992,6 +1993,16 @@ static int ocfs2_claim_suballoc_bits(struct ocfs2_alloc_context *ac,
}
cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
+ if (!le16_to_cpu(cl->cl_next_free_rec) ||
+ le16_to_cpu(cl->cl_next_free_rec) > le16_to_cpu(cl->cl_count)) {
+ status = ocfs2_error(ac->ac_inode->i_sb,
+ "Chain allocator dinode %llu has invalid next "
+ "free chain record %u, but only %u total\n",
+ (unsigned long long)le64_to_cpu(fe->i_blkno),
+ le16_to_cpu(cl->cl_next_free_rec),
+ le16_to_cpu(cl->cl_count));
+ goto bail;
+ }
victim = ocfs2_find_victim_chain(cl);
ac->ac_chain = victim;
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 2c7ba1480f7a1b..3cbafac50cd137 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -2487,7 +2487,7 @@ static int ocfs2_handle_error(struct super_block *sb)
rv = -EIO;
} else { /* default option */
rv = -EROFS;
- if (sb_rdonly(sb) && (ocfs2_is_soft_readonly(osb) || ocfs2_is_hard_readonly(osb)))
+ if (sb_rdonly(sb) && ocfs2_emergency_state(osb))
return rv;
pr_crit("OCFS2: File system is now read-only.\n");
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index dc1761e8481422..1b21fbc16d73a2 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -49,9 +49,13 @@
#include "ocfs2_trace.h"
struct ocfs2_xattr_def_value_root {
- struct ocfs2_xattr_value_root xv;
- struct ocfs2_extent_rec er;
+ /* Must be last as it ends in a flexible-array member. */
+ TRAILING_OVERLAP(struct ocfs2_xattr_value_root, xv, xr_list.l_recs,
+ struct ocfs2_extent_rec er;
+ );
};
+static_assert(offsetof(struct ocfs2_xattr_def_value_root, xv.xr_list.l_recs) ==
+ offsetof(struct ocfs2_xattr_def_value_root, er));
struct ocfs2_xattr_bucket {
/* The inode these xattrs are associated with */
@@ -971,13 +975,39 @@ static int ocfs2_xattr_ibody_list(struct inode *inode,
struct ocfs2_xattr_header *header = NULL;
struct ocfs2_inode_info *oi = OCFS2_I(inode);
int ret = 0;
+ u16 xattr_count;
+ size_t max_entries;
+ u16 inline_size;
if (!(oi->ip_dyn_features & OCFS2_INLINE_XATTR_FL))
return ret;
+ inline_size = le16_to_cpu(di->i_xattr_inline_size);
+
+ /* Validate inline size is reasonable */
+ if (inline_size > inode->i_sb->s_blocksize ||
+ inline_size < sizeof(struct ocfs2_xattr_header)) {
+ ocfs2_error(inode->i_sb,
+ "Invalid xattr inline size %u in inode %llu\n",
+ inline_size,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EFSCORRUPTED;
+ }
+
header = (struct ocfs2_xattr_header *)
- ((void *)di + inode->i_sb->s_blocksize -
- le16_to_cpu(di->i_xattr_inline_size));
+ ((void *)di + inode->i_sb->s_blocksize - inline_size);
+
+ xattr_count = le16_to_cpu(header->xh_count);
+ max_entries = (inline_size - sizeof(struct ocfs2_xattr_header)) /
+ sizeof(struct ocfs2_xattr_entry);
+
+ if (xattr_count > max_entries) {
+ ocfs2_error(inode->i_sb,
+ "xattr entry count %u exceeds maximum %zu in inode %llu\n",
+ xattr_count, max_entries,
+ (unsigned long long)OCFS2_I(inode)->ip_blkno);
+ return -EFSCORRUPTED;
+ }
ret = ocfs2_xattr_list_entries(inode, header, buffer, buffer_size);
diff --git a/include/linux/args.h b/include/linux/args.h
index 2e8e65d975c778..0562dc51435e2d 100644
--- a/include/linux/args.h
+++ b/include/linux/args.h
@@ -6,9 +6,9 @@
/*
* How do these macros work?
*
- * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * In __COUNT_ARGS() _0 to _15 are just placeholders from the start
* in order to make sure _n is positioned over the correct number
- * from 12 to 0 (depending on X, which is a variadic argument list).
+ * from 15 to 0 (depending on X, which is a variadic argument list).
* They serve no purpose other than occupying a position. Since each
* macro parameter must have a distinct identifier, those identifiers
* are as good as any.
diff --git a/include/linux/file.h b/include/linux/file.h
index cf389fde9bc28c..27484b444d3155 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -161,12 +161,10 @@ typedef struct fd_prepare class_fd_prepare_t;
/* Do not use directly. */
static inline void class_fd_prepare_destructor(const struct fd_prepare *fdf)
{
- if (unlikely(fdf->err)) {
- if (likely(fdf->__fd >= 0))
- put_unused_fd(fdf->__fd);
- if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
- fput(fdf->__file);
- }
+ if (unlikely(fdf->__fd >= 0))
+ put_unused_fd(fdf->__fd);
+ if (unlikely(!IS_ERR_OR_NULL(fdf->__file)))
+ fput(fdf->__file);
}
/* Do not use directly. */
@@ -230,7 +228,8 @@ static inline int class_fd_prepare_lock_err(const struct fd_prepare *fdf)
VFS_WARN_ON_ONCE(fdp->__fd < 0); \
VFS_WARN_ON_ONCE(IS_ERR_OR_NULL(fdp->__file)); \
fd_install(fdp->__fd, fdp->__file); \
- fdp->__fd; \
+ retain_and_null_ptr(fdp->__file); \
+ take_fd(fdp->__fd); \
})
/* Do not use directly. */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index ae7f21aad0ac35..a4d9f964dfdea9 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -369,14 +369,13 @@ enum split_type {
SPLIT_TYPE_NON_UNIFORM,
};
-bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
int __split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
unsigned int new_order);
int folio_split_unmapped(struct folio *folio, unsigned int new_order);
-int min_order_for_split(struct folio *folio);
+unsigned int min_order_for_split(struct folio *folio);
int split_folio_to_list(struct folio *folio, struct list_head *list);
-bool folio_split_supported(struct folio *folio, unsigned int new_order,
- enum split_type split_type, bool warns);
+int folio_check_splittable(struct folio *folio, unsigned int new_order,
+ enum split_type split_type);
int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
struct list_head *list);
@@ -407,7 +406,7 @@ static inline int split_huge_page_to_order(struct page *page, unsigned int new_o
static inline int try_folio_split_to_order(struct folio *folio,
struct page *page, unsigned int new_order)
{
- if (!folio_split_supported(folio, new_order, SPLIT_TYPE_NON_UNIFORM, /* warns= */ false))
+ if (folio_check_splittable(folio, new_order, SPLIT_TYPE_NON_UNIFORM))
return split_huge_page_to_order(&folio->page, new_order);
return folio_split(folio, new_order, page, NULL);
}
@@ -631,10 +630,10 @@ static inline int split_huge_page(struct page *page)
return -EINVAL;
}
-static inline int min_order_for_split(struct folio *folio)
+static inline unsigned int min_order_for_split(struct folio *folio)
{
VM_WARN_ON_ONCE_FOLIO(1, folio);
- return -EINVAL;
+ return 0;
}
static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 952d3c8dd6b7a2..62f81bbeb4906b 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -730,22 +730,6 @@ static inline void msi_device_domain_free_wired(struct irq_domain *domain, unsig
}
#endif
-static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
- const struct irq_domain_ops *ops,
- void *host_data)
-{
- struct irq_domain_info info = {
- .fwnode = of_fwnode_handle(of_node),
- .hwirq_max = ~0U,
- .ops = ops,
- .host_data = host_data,
- };
- struct irq_domain *d;
-
- d = irq_domain_instantiate(&info);
- return IS_ERR(d) ? NULL : d;
-}
-
static inline struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
unsigned int size,
const struct irq_domain_ops *ops,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7a1819c20643bb..15076261d0c2eb 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -438,7 +438,7 @@ enum {
#define VM_NOHUGEPAGE INIT_VM_FLAG(NOHUGEPAGE)
#define VM_MERGEABLE INIT_VM_FLAG(MERGEABLE)
#define VM_STACK INIT_VM_FLAG(STACK)
-#ifdef CONFIG_STACK_GROWS_UP
+#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK_EARLY INIT_VM_FLAG(STACK_EARLY)
#else
#define VM_STACK_EARLY VM_NONE
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4398e027f450ec..75ef7c9f9307ff 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -2289,7 +2289,7 @@ void sparse_init(void);
#else
#define sparse_init() do {} while (0)
#define sparse_index_init(_sec, _nid) do {} while (0)
-#define sparse_vmemmap_init_nid_early(_nid, _use) do {} while (0)
+#define sparse_vmemmap_init_nid_early(_nid) do {} while (0)
#define sparse_vmemmap_init_nid_late(_nid) do {} while (0)
#define pfn_in_present_section pfn_valid
#define subsection_map_init(_pfn, _nr_pages) do {} while (0)
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index c92167ff8a7fbd..a36b472627dec8 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -596,7 +596,7 @@ static __always_inline void rseq_exit_to_user_mode_legacy(void)
void __rseq_debug_syscall_return(struct pt_regs *regs);
-static inline void rseq_debug_syscall_return(struct pt_regs *regs)
+static __always_inline void rseq_debug_syscall_return(struct pt_regs *regs)
{
if (static_branch_unlikely(&rseq_debug_enabled))
__rseq_debug_syscall_return(regs);
diff --git a/include/trace/events/ceph.h b/include/trace/events/ceph.h
new file mode 100644
index 00000000000000..08cb0659fbfc78
--- /dev/null
+++ b/include/trace/events/ceph.h
@@ -0,0 +1,234 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Ceph filesystem support module tracepoints
+ *
+ * Copyright (C) 2025 IONOS SE. All Rights Reserved.
+ * Written by Max Kellermann (max.kellermann@ionos.com)
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ceph
+
+#if !defined(_TRACE_CEPH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CEPH_H
+
+#include <linux/tracepoint.h>
+
+#define ceph_mdsc_suspend_reasons \
+ EM(ceph_mdsc_suspend_reason_no_mdsmap, "no-mdsmap") \
+ EM(ceph_mdsc_suspend_reason_no_active_mds, "no-active-mds") \
+ EM(ceph_mdsc_suspend_reason_rejected, "rejected") \
+ E_(ceph_mdsc_suspend_reason_session, "session")
+
+#ifndef __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+#define __NETFS_DECLARE_TRACE_ENUMS_ONCE_ONLY
+
+#undef EM
+#undef E_
+#define EM(a, b) a,
+#define E_(a, b) a
+
+enum ceph_mdsc_suspend_reason { ceph_mdsc_suspend_reasons } __mode(byte);
+
+#endif
+
+/*
+ * Export enum symbols via userspace.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define E_(a, b) TRACE_DEFINE_ENUM(a);
+
+ceph_mdsc_suspend_reasons;
+
+/*
+ * Now redefine the EM() and E_() macros to map the enums to the strings that
+ * will be printed in the output.
+ */
+#undef EM
+#undef E_
+#define EM(a, b) { a, b },
+#define E_(a, b) { a, b }
+
+TRACE_EVENT(ceph_mdsc_submit_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ ),
+
+ TP_fast_assign(
+ struct inode *inode;
+
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+
+ inode = req->r_inode;
+ if (inode == NULL && req->r_dentry)
+ inode = d_inode(req->r_dentry);
+
+ if (inode) {
+ __entry->ino = ceph_ino(inode);
+ __entry->snap = ceph_snap(inode);
+ } else {
+ __entry->ino = __entry->snap = 0;
+ }
+ ),
+
+ TP_printk("R=%llu op=%s ino=%llx,%llx",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->ino, __entry->snap)
+);
+
+TRACE_EVENT(ceph_mdsc_suspend_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ struct ceph_mds_request *req,
+ enum ceph_mdsc_suspend_reason reason),
+
+ TP_ARGS(mdsc, session, req, reason),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ __field(enum ceph_mdsc_suspend_reason, reason)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session ? session->s_mds : -1;
+ __entry->reason = reason;
+ ),
+
+ TP_printk("R=%llu op=%s reason=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __print_symbolic(__entry->reason, ceph_mdsc_suspend_reasons))
+);
+
+TRACE_EVENT(ceph_mdsc_resume_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ ),
+
+ TP_printk("R=%llu op=%s",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op))
+);
+
+TRACE_EVENT(ceph_mdsc_send_request,
+ TP_PROTO(struct ceph_mds_session *session,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(session, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, mds)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->mds = session->s_mds;
+ ),
+
+ TP_printk("R=%llu op=%s mds=%d",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->mds)
+);
+
+TRACE_EVENT(ceph_mdsc_complete_request,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_request *req),
+
+ TP_ARGS(mdsc, req),
+
+ TP_STRUCT__entry(
+ __field(u64, tid)
+ __field(int, op)
+ __field(int, err)
+ __field(unsigned long, latency_ns)
+ ),
+
+ TP_fast_assign(
+ __entry->tid = req->r_tid;
+ __entry->op = req->r_op;
+ __entry->err = req->r_err;
+ __entry->latency_ns = req->r_end_latency - req->r_start_latency;
+ ),
+
+ TP_printk("R=%llu op=%s err=%d latency_ns=%lu",
+ __entry->tid,
+ ceph_mds_op_name(__entry->op),
+ __entry->err,
+ __entry->latency_ns)
+);
+
+TRACE_EVENT(ceph_handle_caps,
+ TP_PROTO(struct ceph_mds_client *mdsc,
+ struct ceph_mds_session *session,
+ int op,
+ const struct ceph_vino *vino,
+ struct ceph_inode_info *inode,
+ u32 seq, u32 mseq, u32 issue_seq),
+
+ TP_ARGS(mdsc, session, op, vino, inode, seq, mseq, issue_seq),
+
+ TP_STRUCT__entry(
+ __field(int, mds)
+ __field(int, op)
+ __field(u64, ino)
+ __field(u64, snap)
+ __field(u32, seq)
+ __field(u32, mseq)
+ __field(u32, issue_seq)
+ ),
+
+ TP_fast_assign(
+ __entry->mds = session->s_mds;
+ __entry->op = op;
+ __entry->ino = vino->ino;
+ __entry->snap = vino->snap;
+ __entry->seq = seq;
+ __entry->mseq = mseq;
+ __entry->issue_seq = issue_seq;
+ ),
+
+ TP_printk("mds=%d op=%s vino=%llx.%llx seq=%u iseq=%u mseq=%u",
+ __entry->mds,
+ ceph_cap_op_name(__entry->op),
+ __entry->ino,
+ __entry->snap,
+ __entry->seq,
+ __entry->issue_seq,
+ __entry->mseq)
+);
+
+#undef EM
+#undef E_
+#endif /* _TRACE_CEPH_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/cpu.c b/kernel/cpu.c
index b674fdf96208be..8df2d773fe3b52 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -249,6 +249,14 @@ err:
return ret;
}
+/*
+ * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
+ */
+static bool cpuhp_is_atomic_state(enum cpuhp_state state)
+{
+ return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
+}
+
#ifdef CONFIG_SMP
static bool cpuhp_is_ap_state(enum cpuhp_state state)
{
@@ -271,14 +279,6 @@ static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
complete(done);
}
-/*
- * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
- */
-static bool cpuhp_is_atomic_state(enum cpuhp_state state)
-{
- return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
-}
-
/* Synchronization state management */
enum cpuhp_sync_state {
SYNC_STATE_DEAD,
@@ -2364,7 +2364,14 @@ static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
else
ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
#else
- ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ if (cpuhp_is_atomic_state(state)) {
+ guard(irqsave)();
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ /* STARTING/DYING must not fail! */
+ WARN_ON_ONCE(ret);
+ } else {
+ ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
+ }
#endif
BUG_ON(ret && !bringup);
return ret;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ece716879cbc24..dad0d3d2e85f7d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2317,8 +2317,6 @@ out:
perf_event__header_size(leader);
}
-static void sync_child_event(struct perf_event *child_event);
-
static void perf_child_detach(struct perf_event *event)
{
struct perf_event *parent_event = event->parent;
@@ -2337,7 +2335,6 @@ static void perf_child_detach(struct perf_event *event)
lockdep_assert_held(&parent_event->child_mutex);
*/
- sync_child_event(event);
list_del_init(&event->child_list);
}
@@ -4588,6 +4585,7 @@ out:
static void perf_remove_from_owner(struct perf_event *event);
static void perf_event_exit_event(struct perf_event *event,
struct perf_event_context *ctx,
+ struct task_struct *task,
bool revoke);
/*
@@ -4615,7 +4613,7 @@ static void perf_event_remove_on_exec(struct perf_event_context *ctx)
modified = true;
- perf_event_exit_event(event, ctx, false);
+ perf_event_exit_event(event, ctx, ctx->task, false);
}
raw_spin_lock_irqsave(&ctx->lock, flags);
@@ -12518,7 +12516,7 @@ static void __pmu_detach_event(struct pmu *pmu, struct perf_event *event,
/*
* De-schedule the event and mark it REVOKED.
*/
- perf_event_exit_event(event, ctx, true);
+ perf_event_exit_event(event, ctx, ctx->task, true);
/*
* All _free_event() bits that rely on event->pmu:
@@ -14075,14 +14073,13 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
-static void sync_child_event(struct perf_event *child_event)
+static void sync_child_event(struct perf_event *child_event,
+ struct task_struct *task)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat) {
- struct task_struct *task = child_event->ctx->task;
-
if (task && task != TASK_TOMBSTONE)
perf_event_read_event(child_event, task);
}
@@ -14101,7 +14098,9 @@ static void sync_child_event(struct perf_event *child_event)
static void
perf_event_exit_event(struct perf_event *event,
- struct perf_event_context *ctx, bool revoke)
+ struct perf_event_context *ctx,
+ struct task_struct *task,
+ bool revoke)
{
struct perf_event *parent_event = event->parent;
unsigned long detach_flags = DETACH_EXIT;
@@ -14124,6 +14123,9 @@ perf_event_exit_event(struct perf_event *event,
mutex_lock(&parent_event->child_mutex);
/* PERF_ATTACH_ITRACE might be set concurrently */
attach_state = READ_ONCE(event->attach_state);
+
+ if (attach_state & PERF_ATTACH_CHILD)
+ sync_child_event(event, task);
}
if (revoke)
@@ -14215,7 +14217,7 @@ static void perf_event_exit_task_context(struct task_struct *task, bool exit)
perf_event_task(task, ctx, 0);
list_for_each_entry_safe(child_event, next, &ctx->event_list, event_entry)
- perf_event_exit_event(child_event, ctx, false);
+ perf_event_exit_event(child_event, ctx, exit ? task : NULL, false);
mutex_unlock(&ctx->mutex);
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index f11ceb8be8c419..d546d32390a81d 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -79,7 +79,7 @@ struct uprobe {
* The generic code assumes that it has two members of unknown type
* owned by the arch-specific code:
*
- * insn - copy_insn() saves the original instruction here for
+ * insn - copy_insn() saves the original instruction here for
* arch_uprobe_analyze_insn().
*
* ixol - potentially modified instruction to execute out of
@@ -107,8 +107,8 @@ static LIST_HEAD(delayed_uprobe_list);
* allocated.
*/
struct xol_area {
- wait_queue_head_t wq; /* if all slots are busy */
- unsigned long *bitmap; /* 0 = free slot */
+ wait_queue_head_t wq; /* if all slots are busy */
+ unsigned long *bitmap; /* 0 = free slot */
struct page *page;
/*
@@ -116,7 +116,7 @@ struct xol_area {
* itself. The probed process or a naughty kernel module could make
* the vma go away, and we must handle that reasonably gracefully.
*/
- unsigned long vaddr; /* Page(s) of instruction slots */
+ unsigned long vaddr; /* Page(s) of instruction slots */
};
static void uprobe_warn(struct task_struct *t, const char *msg)
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0bb29316b4362c..8b1b4c8a4f54c5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -2470,6 +2470,9 @@ int setup_percpu_irq(unsigned int irq, struct irqaction *act)
if (retval < 0)
return retval;
+ if (!act->affinity)
+ act->affinity = cpu_online_mask;
+
retval = __setup_irq(irq, desc, act);
if (retval)
diff --git a/kernel/liveupdate/Kconfig b/kernel/liveupdate/Kconfig
index 9b2515f31afb4b..d2aeaf13c3acb8 100644
--- a/kernel/liveupdate/Kconfig
+++ b/kernel/liveupdate/Kconfig
@@ -54,6 +54,7 @@ config KEXEC_HANDOVER_ENABLE_DEFAULT
config LIVEUPDATE
bool "Live Update Orchestrator"
depends on KEXEC_HANDOVER
+ depends on SHMEM
help
Enable the Live Update Orchestrator. Live Update is a mechanism,
typically based on kexec, that allows the kernel to be updated
diff --git a/kernel/liveupdate/luo_core.c b/kernel/liveupdate/luo_core.c
index f7ecaf7740d19d..944663d99dd9f3 100644
--- a/kernel/liveupdate/luo_core.c
+++ b/kernel/liveupdate/luo_core.c
@@ -399,10 +399,8 @@ static long luo_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
int err;
nr = _IOC_NR(cmd);
- if (nr < LIVEUPDATE_CMD_BASE ||
- (nr - LIVEUPDATE_CMD_BASE) >= ARRAY_SIZE(luo_ioctl_ops)) {
+ if (nr - LIVEUPDATE_CMD_BASE >= ARRAY_SIZE(luo_ioctl_ops))
return -EINVAL;
- }
ucmd.ubuffer = (void __user *)arg;
err = get_user(ucmd.user_size, (u32 __user *)ucmd.ubuffer);
diff --git a/kernel/liveupdate/luo_file.c b/kernel/liveupdate/luo_file.c
index ddff87917b217c..a32a777f6df8f3 100644
--- a/kernel/liveupdate/luo_file.c
+++ b/kernel/liveupdate/luo_file.c
@@ -554,17 +554,20 @@ int luo_retrieve_file(struct luo_file_set *file_set, u64 token,
{
struct liveupdate_file_op_args args = {0};
struct luo_file *luo_file;
+ bool found = false;
int err;
if (list_empty(&file_set->files_list))
return -ENOENT;
list_for_each_entry(luo_file, &file_set->files_list, list) {
- if (luo_file->token == token)
+ if (luo_file->token == token) {
+ found = true;
break;
+ }
}
- if (luo_file->token != token)
+ if (!found)
return -ENOENT;
guard(mutex)(&luo_file->mutex);
diff --git a/lib/bug.c b/lib/bug.c
index edd9041f89f3aa..623c467a8b76c7 100644
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -173,6 +173,9 @@ struct bug_entry *find_bug(unsigned long bugaddr)
return module_find_bug(bugaddr);
}
+__diag_push();
+__diag_ignore(GCC, all, "-Wsuggest-attribute=format",
+ "Not a valid __printf() conversion candidate.");
static void __warn_printf(const char *fmt, struct pt_regs *regs)
{
if (!fmt)
@@ -192,6 +195,7 @@ static void __warn_printf(const char *fmt, struct pt_regs *regs)
printk("%s", fmt);
}
+__diag_pop();
static enum bug_trap_type __report_bug(struct bug_entry *bug, unsigned long bugaddr, struct pt_regs *regs)
{
@@ -262,7 +266,7 @@ enum bug_trap_type report_bug_entry(struct bug_entry *bug, struct pt_regs *regs)
bool rcu = false;
rcu = warn_rcu_enter();
- ret = __report_bug(bug, 0, regs);
+ ret = __report_bug(bug, bug_addr(bug), regs);
warn_rcu_exit(rcu);
return ret;
diff --git a/mm/damon/tests/core-kunit.h b/mm/damon/tests/core-kunit.h
index a1eff023e928a0..8cb369b63e08ed 100644
--- a/mm/damon/tests/core-kunit.h
+++ b/mm/damon/tests/core-kunit.h
@@ -924,7 +924,7 @@ static void damos_test_commit_for(struct kunit *test, struct damos *dst,
}
}
-static void damos_test_commit(struct kunit *test)
+static void damos_test_commit_pageout(struct kunit *test)
{
damos_test_commit_for(test,
&(struct damos){
@@ -945,6 +945,10 @@ static void damos_test_commit(struct kunit *test)
DAMOS_WMARK_FREE_MEM_RATE,
800, 50, 30},
});
+}
+
+static void damos_test_commit_migrate_hot(struct kunit *test)
+{
damos_test_commit_for(test,
&(struct damos){
.pattern = (struct damos_access_pattern){
@@ -1230,7 +1234,8 @@ static struct kunit_case damon_test_cases[] = {
KUNIT_CASE(damos_test_commit_quota),
KUNIT_CASE(damos_test_commit_dests),
KUNIT_CASE(damos_test_commit_filter),
- KUNIT_CASE(damos_test_commit),
+ KUNIT_CASE(damos_test_commit_pageout),
+ KUNIT_CASE(damos_test_commit_migrate_hot),
KUNIT_CASE(damon_test_commit_target_regions),
KUNIT_CASE(damos_test_filter_out),
KUNIT_CASE(damon_test_feed_loop_next_input),
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f7c565f11a985a..40cf59301c21aa 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3464,23 +3464,6 @@ static void lru_add_split_folio(struct folio *folio, struct folio *new_folio,
}
}
-/* Racy check whether the huge page can be split */
-bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
-{
- int extra_pins;
-
- /* Additional pins from page cache */
- if (folio_test_anon(folio))
- extra_pins = folio_test_swapcache(folio) ?
- folio_nr_pages(folio) : 0;
- else
- extra_pins = folio_nr_pages(folio);
- if (pextra_pins)
- *pextra_pins = extra_pins;
- return folio_mapcount(folio) == folio_ref_count(folio) - extra_pins -
- caller_pins;
-}
-
static bool page_range_has_hwpoisoned(struct page *page, long nr_pages)
{
for (; nr_pages; page++, nr_pages--)
@@ -3697,15 +3680,40 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
return 0;
}
-bool folio_split_supported(struct folio *folio, unsigned int new_order,
- enum split_type split_type, bool warns)
+/**
+ * folio_check_splittable() - check if a folio can be split to a given order
+ * @folio: folio to be split
+ * @new_order: the smallest order of the after split folios (since buddy
+ * allocator like split generates folios with orders from @folio's
+ * order - 1 to new_order).
+ * @split_type: uniform or non-uniform split
+ *
+ * folio_check_splittable() checks if @folio can be split to @new_order using
+ * @split_type method. The truncated folio check must come first.
+ *
+ * Context: folio must be locked.
+ *
+ * Return: 0 - @folio can be split to @new_order, otherwise an error number is
+ * returned.
+ */
+int folio_check_splittable(struct folio *folio, unsigned int new_order,
+ enum split_type split_type)
{
+ VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
+ /*
+ * Folios that just got truncated cannot get split. Signal to the
+ * caller that there was a race.
+ *
+ * TODO: this will also currently refuse folios without a mapping in the
+ * swapcache (shmem or to-be-anon folios).
+ */
+ if (!folio->mapping && !folio_test_anon(folio))
+ return -EBUSY;
+
if (folio_test_anon(folio)) {
/* order-1 is not supported for anonymous THP. */
- VM_WARN_ONCE(warns && new_order == 1,
- "Cannot split to order-1 folio");
if (new_order == 1)
- return false;
+ return -EINVAL;
} else if (split_type == SPLIT_TYPE_NON_UNIFORM || new_order) {
if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
!mapping_large_folio_support(folio->mapping)) {
@@ -3726,9 +3734,7 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order,
* case, the mapping does not actually support large
* folios properly.
*/
- VM_WARN_ONCE(warns,
- "Cannot split file folio to non-0 order");
- return false;
+ return -EINVAL;
}
}
@@ -3741,19 +3747,31 @@ bool folio_split_supported(struct folio *folio, unsigned int new_order,
* here.
*/
if ((split_type == SPLIT_TYPE_NON_UNIFORM || new_order) && folio_test_swapcache(folio)) {
- VM_WARN_ONCE(warns,
- "Cannot split swapcache folio to non-0 order");
- return false;
+ return -EINVAL;
}
- return true;
+ if (is_huge_zero_folio(folio))
+ return -EINVAL;
+
+ if (folio_test_writeback(folio))
+ return -EBUSY;
+
+ return 0;
+}
+
+/* Number of folio references from the pagecache or the swapcache. */
+static unsigned int folio_cache_ref_count(const struct folio *folio)
+{
+ if (folio_test_anon(folio) && !folio_test_swapcache(folio))
+ return 0;
+ return folio_nr_pages(folio);
}
static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int new_order,
struct page *split_at, struct xa_state *xas,
struct address_space *mapping, bool do_lru,
struct list_head *list, enum split_type split_type,
- pgoff_t end, int *nr_shmem_dropped, int extra_pins)
+ pgoff_t end, int *nr_shmem_dropped)
{
struct folio *end_folio = folio_next(folio);
struct folio *new_folio, *next;
@@ -3764,10 +3782,9 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
VM_WARN_ON_ONCE(!mapping && end);
/* Prevent deferred_split_scan() touching ->_refcount */
ds_queue = folio_split_queue_lock(folio);
- if (folio_ref_freeze(folio, 1 + extra_pins)) {
+ if (folio_ref_freeze(folio, folio_cache_ref_count(folio) + 1)) {
struct swap_cluster_info *ci = NULL;
struct lruvec *lruvec;
- int expected_refs;
if (old_order > 1) {
if (!list_empty(&folio->_deferred_list)) {
@@ -3835,8 +3852,8 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
zone_device_private_split_cb(folio, new_folio);
- expected_refs = folio_expected_ref_count(new_folio) + 1;
- folio_ref_unfreeze(new_folio, expected_refs);
+ folio_ref_unfreeze(new_folio,
+ folio_cache_ref_count(new_folio) + 1);
if (do_lru)
lru_add_split_folio(folio, new_folio, lruvec, list);
@@ -3879,8 +3896,7 @@ static int __folio_freeze_and_split_unmapped(struct folio *folio, unsigned int n
* Otherwise, a parallel folio_try_get() can grab @folio
* and its caller can see stale page cache entries.
*/
- expected_refs = folio_expected_ref_count(folio) + 1;
- folio_ref_unfreeze(folio, expected_refs);
+ folio_ref_unfreeze(folio, folio_cache_ref_count(folio) + 1);
if (do_lru)
unlock_page_lruvec(lruvec);
@@ -3929,40 +3945,27 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
struct folio *new_folio, *next;
int nr_shmem_dropped = 0;
int remap_flags = 0;
- int extra_pins, ret;
+ int ret;
pgoff_t end = 0;
- bool is_hzp;
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
- if (folio != page_folio(split_at) || folio != page_folio(lock_at))
- return -EINVAL;
-
- /*
- * Folios that just got truncated cannot get split. Signal to the
- * caller that there was a race.
- *
- * TODO: this will also currently refuse shmem folios that are in the
- * swapcache.
- */
- if (!is_anon && !folio->mapping)
- return -EBUSY;
-
- if (new_order >= old_order)
- return -EINVAL;
-
- if (!folio_split_supported(folio, new_order, split_type, /* warn = */ true))
- return -EINVAL;
+ if (folio != page_folio(split_at) || folio != page_folio(lock_at)) {
+ ret = -EINVAL;
+ goto out;
+ }
- is_hzp = is_huge_zero_folio(folio);
- if (is_hzp) {
- pr_warn_ratelimited("Called split_huge_page for huge zero page\n");
- return -EBUSY;
+ if (new_order >= old_order) {
+ ret = -EINVAL;
+ goto out;
}
- if (folio_test_writeback(folio))
- return -EBUSY;
+ ret = folio_check_splittable(folio, new_order, split_type);
+ if (ret) {
+ VM_WARN_ONCE(ret == -EINVAL, "Tried to split an unsplittable folio");
+ goto out;
+ }
if (is_anon) {
/*
@@ -4027,7 +4030,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
* Racy check if we can split the page, before unmap_folio() will
* split PMDs
*/
- if (!can_split_folio(folio, 1, &extra_pins)) {
+ if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1) {
ret = -EAGAIN;
goto out_unlock;
}
@@ -4050,8 +4053,7 @@ static int __folio_split(struct folio *folio, unsigned int new_order,
}
ret = __folio_freeze_and_split_unmapped(folio, new_order, split_at, &xas, mapping,
- true, list, split_type, end, &nr_shmem_dropped,
- extra_pins);
+ true, list, split_type, end, &nr_shmem_dropped);
fail:
if (mapping)
xas_unlock(&xas);
@@ -4125,20 +4127,20 @@ out:
*/
int folio_split_unmapped(struct folio *folio, unsigned int new_order)
{
- int extra_pins, ret = 0;
+ int ret = 0;
VM_WARN_ON_ONCE_FOLIO(folio_mapped(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_large(folio), folio);
VM_WARN_ON_ONCE_FOLIO(!folio_test_anon(folio), folio);
- if (!can_split_folio(folio, 1, &extra_pins))
+ if (folio_expected_ref_count(folio) != folio_ref_count(folio) - 1)
return -EAGAIN;
local_irq_disable();
ret = __folio_freeze_and_split_unmapped(folio, new_order, &folio->page, NULL,
NULL, false, NULL, SPLIT_TYPE_UNIFORM,
- 0, NULL, extra_pins);
+ 0, NULL);
local_irq_enable();
return ret;
}
@@ -4230,16 +4232,29 @@ int folio_split(struct folio *folio, unsigned int new_order,
SPLIT_TYPE_NON_UNIFORM);
}
-int min_order_for_split(struct folio *folio)
+/**
+ * min_order_for_split() - get the minimum order @folio can be split to
+ * @folio: folio to split
+ *
+ * min_order_for_split() tells the minimum order @folio can be split to.
+ * If a file-backed folio is truncated, 0 will be returned. Any subsequent
+ * split attempt should get -EBUSY from split checking code.
+ *
+ * Return: @folio's minimum order for split
+ */
+unsigned int min_order_for_split(struct folio *folio)
{
if (folio_test_anon(folio))
return 0;
- if (!folio->mapping) {
- if (folio_test_pmd_mappable(folio))
- count_vm_event(THP_SPLIT_PAGE_FAILED);
- return -EBUSY;
- }
+ /*
+ * If the folio got truncated, we don't know the previous mapping and
+ * consequently the old min order. But it doesn't matter, as any split
+ * attempt will immediately fail with -EBUSY as the folio cannot get
+ * split until freed.
+ */
+ if (!folio->mapping)
+ return 0;
return mapping_min_folio_order(folio->mapping);
}
@@ -4631,7 +4646,7 @@ static int split_huge_pages_pid(int pid, unsigned long vaddr_start,
* can be split or not. So skip the check here.
*/
if (!folio_test_private(folio) &&
- !can_split_folio(folio, 0, NULL))
+ folio_expected_ref_count(folio) != folio_ref_count(folio))
goto next;
if (!folio_trylock(folio))
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9e7815b4f0583f..51273baec9e5dc 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -6579,6 +6579,7 @@ long hugetlb_reserve_pages(struct inode *inode,
struct resv_map *resv_map;
struct hugetlb_cgroup *h_cg = NULL;
long gbl_reserve, regions_needed = 0;
+ int err;
/* This should never happen */
if (from > to) {
@@ -6612,8 +6613,10 @@ long hugetlb_reserve_pages(struct inode *inode,
} else {
/* Private mapping. */
resv_map = resv_map_alloc();
- if (!resv_map)
+ if (!resv_map) {
+ err = -ENOMEM;
goto out_err;
+ }
chg = to - from;
@@ -6621,11 +6624,15 @@ long hugetlb_reserve_pages(struct inode *inode,
set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
}
- if (chg < 0)
+ if (chg < 0) {
+ /* region_chg() above can return -ENOMEM */
+ err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
goto out_err;
+ }
- if (hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
- chg * pages_per_huge_page(h), &h_cg) < 0)
+ err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
+ chg * pages_per_huge_page(h), &h_cg);
+ if (err < 0)
goto out_err;
if (desc && !(desc->vm_flags & VM_MAYSHARE) && h_cg) {
@@ -6641,14 +6648,17 @@ long hugetlb_reserve_pages(struct inode *inode,
* reservations already in place (gbl_reserve).
*/
gbl_reserve = hugepage_subpool_get_pages(spool, chg);
- if (gbl_reserve < 0)
+ if (gbl_reserve < 0) {
+ err = gbl_reserve;
goto out_uncharge_cgroup;
+ }
/*
* Check enough hugepages are available for the reservation.
* Hand the pages back to the subpool if there are not
*/
- if (hugetlb_acct_memory(h, gbl_reserve) < 0)
+ err = hugetlb_acct_memory(h, gbl_reserve);
+ if (err < 0)
goto out_put_pages;
/*
@@ -6667,6 +6677,7 @@ long hugetlb_reserve_pages(struct inode *inode,
if (unlikely(add < 0)) {
hugetlb_acct_memory(h, -gbl_reserve);
+ err = add;
goto out_put_pages;
} else if (unlikely(chg > add)) {
/*
@@ -6726,7 +6737,7 @@ out_err:
kref_put(&resv_map->refs, resv_map_release);
set_vma_desc_resv_map(desc, NULL);
}
- return chg < 0 ? chg : add < 0 ? add : -EINVAL;
+ return err;
}
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
diff --git a/mm/shmem.c b/mm/shmem.c
index 3f194c9842a8ca..b329b5302c4885 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -5794,8 +5794,15 @@ EXPORT_SYMBOL_GPL(shmem_truncate_range);
#define shmem_vm_ops generic_file_vm_ops
#define shmem_anon_vm_ops generic_file_vm_ops
#define shmem_file_operations ramfs_file_operations
-#define shmem_acct_size(flags, size) 0
-#define shmem_unacct_size(flags, size) do {} while (0)
+
+static inline int shmem_acct_size(unsigned long flags, loff_t size)
+{
+ return 0;
+}
+
+static inline void shmem_unacct_size(unsigned long flags, loff_t size)
+{
+}
static inline struct inode *shmem_get_inode(struct mnt_idmap *idmap,
struct super_block *sb, struct inode *dir,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 900c74b6aa62a5..670fe9fae5baa2 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1284,7 +1284,8 @@ retry:
goto keep_locked;
if (folio_test_large(folio)) {
/* cannot split folio, skip it */
- if (!can_split_folio(folio, 1, NULL))
+ if (folio_expected_ref_count(folio) !=
+ folio_ref_count(folio) - 1)
goto activate_locked;
/*
* Split partially mapped folios right away.
@@ -4540,7 +4541,8 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
int scanned = 0;
int isolated = 0;
int skipped = 0;
- int remaining = min(nr_to_scan, MAX_LRU_BATCH);
+ int scan_batch = min(nr_to_scan, MAX_LRU_BATCH);
+ int remaining = scan_batch;
struct lru_gen_folio *lrugen = &lruvec->lrugen;
struct mem_cgroup *memcg = lruvec_memcg(lruvec);
@@ -4600,7 +4602,7 @@ static int scan_folios(unsigned long nr_to_scan, struct lruvec *lruvec,
count_memcg_events(memcg, item, isolated);
count_memcg_events(memcg, PGREFILL, sorted);
__count_vm_events(PGSCAN_ANON + type, isolated);
- trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH,
+ trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, scan_batch,
scanned, skipped, isolated,
type ? LRU_INACTIVE_FILE : LRU_INACTIVE_ANON);
if (type == LRU_GEN_FILE)
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 6664ea73ccf81b..3667319b949ddc 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1280,8 +1280,7 @@ static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
static struct ceph_osd *get_osd(struct ceph_osd *osd)
{
if (refcount_inc_not_zero(&osd->o_ref)) {
- dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
- refcount_read(&osd->o_ref));
+ dout("get_osd %p -> %d\n", osd, refcount_read(&osd->o_ref));
return osd;
} else {
dout("get_osd %p FAIL\n", osd);
@@ -1291,8 +1290,7 @@ static struct ceph_osd *get_osd(struct ceph_osd *osd)
static void put_osd(struct ceph_osd *osd)
{
- dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
- refcount_read(&osd->o_ref) - 1);
+ dout("put_osd %p -> %d\n", osd, refcount_read(&osd->o_ref) - 1);
if (refcount_dec_and_test(&osd->o_ref)) {
osd_cleanup(osd);
kfree(osd);
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c
index d245fa508e1cc9..34b3ab59602f73 100644
--- a/net/ceph/osdmap.c
+++ b/net/ceph/osdmap.c
@@ -806,51 +806,49 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
ceph_decode_need(p, end, len, bad);
pool_end = *p + len;
+ ceph_decode_need(p, end, 4 + 4 + 4, bad);
pi->type = ceph_decode_8(p);
pi->size = ceph_decode_8(p);
pi->crush_ruleset = ceph_decode_8(p);
pi->object_hash = ceph_decode_8(p);
-
pi->pg_num = ceph_decode_32(p);
pi->pgp_num = ceph_decode_32(p);
- *p += 4 + 4; /* skip lpg* */
- *p += 4; /* skip last_change */
- *p += 8 + 4; /* skip snap_seq, snap_epoch */
+ /* lpg*, last_change, snap_seq, snap_epoch */
+ ceph_decode_skip_n(p, end, 8 + 4 + 8 + 4, bad);
/* skip snaps */
- num = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, num, bad);
while (num--) {
- *p += 8; /* snapid key */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* snapid key, pool snap (with versions) */
+ ceph_decode_skip_n(p, end, 8 + 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
- /* skip removed_snaps */
- num = ceph_decode_32(p);
- *p += num * (8 + 8);
+ /* removed_snaps */
+ ceph_decode_skip_map(p, end, 64, 64, bad);
+ ceph_decode_need(p, end, 8 + 8 + 4, bad);
*p += 8; /* skip auid */
pi->flags = ceph_decode_64(p);
*p += 4; /* skip crash_replay_interval */
if (ev >= 7)
- pi->min_size = ceph_decode_8(p);
+ ceph_decode_8_safe(p, end, pi->min_size, bad);
else
pi->min_size = pi->size - pi->size / 2;
if (ev >= 8)
- *p += 8 + 8; /* skip quota_max_* */
+ /* quota_max_* */
+ ceph_decode_skip_n(p, end, 8 + 8, bad);
if (ev >= 9) {
- /* skip tiers */
- num = ceph_decode_32(p);
- *p += num * 8;
+ /* tiers */
+ ceph_decode_skip_set(p, end, 64, bad);
+ ceph_decode_need(p, end, 8 + 1 + 8 + 8, bad);
*p += 8; /* skip tier_of */
*p += 1; /* skip cache_mode */
-
pi->read_tier = ceph_decode_64(p);
pi->write_tier = ceph_decode_64(p);
} else {
@@ -858,86 +856,76 @@ static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
pi->write_tier = -1;
}
- if (ev >= 10) {
- /* skip properties */
- num = ceph_decode_32(p);
- while (num--) {
- len = ceph_decode_32(p);
- *p += len; /* key */
- len = ceph_decode_32(p);
- *p += len; /* val */
- }
- }
+ if (ev >= 10)
+ /* properties */
+ ceph_decode_skip_map(p, end, string, string, bad);
if (ev >= 11) {
- /* skip hit_set_params */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* hit_set_params (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
- *p += 4; /* skip hit_set_period */
- *p += 4; /* skip hit_set_count */
+ /* hit_set_period, hit_set_count */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
}
if (ev >= 12)
- *p += 4; /* skip stripe_width */
+ /* stripe_width */
+ ceph_decode_skip_32(p, end, bad);
- if (ev >= 13) {
- *p += 8; /* skip target_max_bytes */
- *p += 8; /* skip target_max_objects */
- *p += 4; /* skip cache_target_dirty_ratio_micro */
- *p += 4; /* skip cache_target_full_ratio_micro */
- *p += 4; /* skip cache_min_flush_age */
- *p += 4; /* skip cache_min_evict_age */
- }
+ if (ev >= 13)
+ /* target_max_*, cache_target_*, cache_min_* */
+ ceph_decode_skip_n(p, end, 16 + 8 + 8, bad);
- if (ev >= 14) {
- /* skip erasure_code_profile */
- len = ceph_decode_32(p);
- *p += len;
- }
+ if (ev >= 14)
+ /* erasure_code_profile */
+ ceph_decode_skip_string(p, end, bad);
/*
* last_force_op_resend_preluminous, will be overridden if the
* map was encoded with RESEND_ON_SPLIT
*/
if (ev >= 15)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
else
pi->last_force_request_resend = 0;
if (ev >= 16)
- *p += 4; /* skip min_read_recency_for_promote */
+ /* min_read_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 17)
- *p += 8; /* skip expected_num_objects */
+ /* expected_num_objects */
+ ceph_decode_skip_64(p, end, bad);
if (ev >= 19)
- *p += 4; /* skip cache_target_dirty_high_ratio_micro */
+ /* cache_target_dirty_high_ratio_micro */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 20)
- *p += 4; /* skip min_write_recency_for_promote */
+ /* min_write_recency_for_promote */
+ ceph_decode_skip_32(p, end, bad);
if (ev >= 21)
- *p += 1; /* skip use_gmt_hitset */
+ /* use_gmt_hitset */
+ ceph_decode_skip_8(p, end, bad);
if (ev >= 22)
- *p += 1; /* skip fast_read */
+ /* fast_read */
+ ceph_decode_skip_8(p, end, bad);
- if (ev >= 23) {
- *p += 4; /* skip hit_set_grade_decay_rate */
- *p += 4; /* skip hit_set_search_last_n */
- }
+ if (ev >= 23)
+ /* hit_set_grade_decay_rate, hit_set_search_last_n */
+ ceph_decode_skip_n(p, end, 4 + 4, bad);
if (ev >= 24) {
- /* skip opts */
- *p += 1 + 1; /* versions */
- len = ceph_decode_32(p);
- *p += len;
+ /* opts (with versions) */
+ ceph_decode_skip_n(p, end, 2, bad);
+ ceph_decode_skip_string(p, end, bad);
}
if (ev >= 25)
- pi->last_force_request_resend = ceph_decode_32(p);
+ ceph_decode_32_safe(p, end, pi->last_force_request_resend, bad);
/* ignore the rest */
@@ -1438,7 +1426,7 @@ static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
ceph_decode_32_safe(p, end, len, e_inval);
if (len == 0 && incremental)
return NULL; /* new_pg_temp: [] to remove */
- if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, len * sizeof(u32), e_inval);
@@ -1619,7 +1607,7 @@ static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
u32 len, i;
ceph_decode_32_safe(p, end, len, e_inval);
- if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
+ if ((size_t)len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
return ERR_PTR(-EINVAL);
ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index d58ca9655ab7e6..c0250244cf7a3c 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -7732,6 +7732,12 @@ sub process {
ERROR("MISSING_SENTINEL", "missing sentinel in ID array\n" . "$here\n$stat\n");
}
}
+
+# check for uninitialized pointers with __free attribute
+ while ($line =~ /\*\s*($Ident)\s+__free\s*\(\s*$Ident\s*\)\s*[,;]/g) {
+ ERROR("UNINITIALIZED_PTR_WITH_FREE",
+ "pointer '$1' with __free attribute should be initialized\n" . $herecurr);
+ }
}
# If we have no input at all, then there is nothing to report on
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 5f9ccab26e9abb..90cf0e2969df8e 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -934,17 +934,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
#endif
if (page != dump->page) {
const unsigned int offset = pos % PAGE_SIZE;
- /*
- * Maybe kmap()/kunmap() should be used here.
- * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
- * So do I.
- */
- char *kaddr = kmap_atomic(page);
+ char *kaddr = kmap_local_page(page);
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
- kunmap_atomic(kaddr);
+ kunmap_local(kaddr);
}
/* Same with put_arg_page(page) in fs/exec.c */
#ifdef CONFIG_MMU