#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) \
((ICE_PKG_BUF_SIZE - \
- struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) / \
+ struct_size_t(struct ice_buf_hdr, section_entry, 1) - (hd_sz)) / \
(ent_sz))
/* ice package section IDs */
};
#define ICE_MAX_LABELS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_label_section *)0, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_label_section, \
label, 1) - \
sizeof(struct ice_label), \
sizeof(struct ice_label))
};
#define ICE_MAX_BST_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_boost_tcam_section *)0, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_boost_tcam_section, \
tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
- ICE_MAX_ENTRIES_IN_BUF( \
- struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, \
+ ICE_MAX_ENTRIES_IN_BUF(struct_size_t(struct ice_marker_ptype_tcam_section, tcam, \
1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
ret = nvme_alloc_io_tag_set(&ctrl->ctrl, &ctrl->tag_set,
&nvme_fc_mq_ops, 1,
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz));
+ struct_size_t(struct nvme_fcp_op_w_sgl, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
return ret;
ret = nvme_alloc_admin_tag_set(&ctrl->ctrl, &ctrl->admin_tag_set,
&nvme_fc_admin_mq_ops,
- struct_size((struct nvme_fcp_op_w_sgl *)NULL, priv,
- ctrl->lport->ops->fcprqst_priv_sz));
+ struct_size_t(struct nvme_fcp_op_w_sgl, priv,
+ ctrl->lport->ops->fcprqst_priv_sz));
if (ret)
goto fail_ctrl;
host->cmd_per_lun = le32_to_cpu(iop_config.max_requests);
host->max_cmd_len = 16;
- req_size = struct_size((struct hpt_iop_request_scsi_command *)0,
- sg_list, hba->max_sg_descriptors);
+ req_size = struct_size_t(struct hpt_iop_request_scsi_command,
+ sg_list, hba->max_sg_descriptors);
if ((req_size & 0x1f) != 0)
req_size = (req_size + 0x1f) & ~0x1f;
fusion->max_map_sz = ventura_map_sz;
} else {
fusion->old_map_sz =
- struct_size((struct MR_FW_RAID_MAP *)0, ldSpanMap,
- instance->fw_supported_vd_count);
+ struct_size_t(struct MR_FW_RAID_MAP, ldSpanMap,
+ instance->fw_supported_vd_count);
fusion->new_map_sz = sizeof(struct MR_FW_RAID_MAP_EXT);
fusion->max_map_sz =
struct fusion_context *fusion = instance->ctrl_context;
size_t pd_seq_map_sz;
- pd_seq_map_sz = struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0, seq,
- MAX_PHYSICAL_DEVICES);
+ pd_seq_map_sz = struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC, seq,
+ MAX_PHYSICAL_DEVICES);
instance->use_seqnum_jbod_fp =
instance->support_seqnum_jbod_fp;
if (instance->adapter_type != MFI_SERIES) {
megasas_release_fusion(instance);
pd_seq_map_sz =
- struct_size((struct MR_PD_CFG_SEQ_NUM_SYNC *)0,
- seq, MAX_PHYSICAL_DEVICES);
+ struct_size_t(struct MR_PD_CFG_SEQ_NUM_SYNC,
+ seq, MAX_PHYSICAL_DEVICES);
for (i = 0; i < 2 ; i++) {
if (fusion->ld_map[i])
dma_free_coherent(&instance->pdev->dev,
else if (instance->supportmax256vd)
expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
else
- expected_size = struct_size((struct MR_FW_RAID_MAP *)0,
- ldSpanMap,
- le16_to_cpu(pDrvRaidMap->ldCount));
+ expected_size = struct_size_t(struct MR_FW_RAID_MAP,
+ ldSpanMap,
+ le16_to_cpu(pDrvRaidMap->ldCount));
if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
dev_dbg(&instance->pdev->dev, "megasas: map info structure size 0x%x",
}
#define PQI_REPORT_EVENT_CONFIG_BUFFER_LENGTH \
- struct_size((struct pqi_event_config *)0, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
+ struct_size_t(struct pqi_event_config, descriptors, PQI_MAX_EVENT_DESCRIPTORS)
static int pqi_configure_events(struct pqi_ctrl_info *ctrl_info,
bool enable_events)
static inline size_t
xfs_btree_cur_sizeof(unsigned int nlevels)
{
- return struct_size((struct xfs_btree_cur *)NULL, bc_levels, nlevels);
+ return struct_size_t(struct xfs_btree_cur, bc_levels, nlevels);
}
/* cursor flags */
static inline size_t
xchk_btree_sizeof(unsigned int nlevels)
{
- return struct_size((struct xchk_btree *)NULL, lastkey, nlevels - 1);
+ return struct_size_t(struct xchk_btree, lastkey, nlevels - 1);
}
int xchk_btree(struct xfs_scrub *sc, struct xfs_btree_cur *cur,
* @member: Name of the array member.
* @count: Number of elements in the array.
*
- * Calculates size of memory needed for structure @p followed by an
+ * Calculates size of memory needed for structure of @p followed by an
* array of @count number of @member elements.
*
* Return: number of bytes needed or SIZE_MAX on overflow.
sizeof(*(p)) + flex_array_size(p, member, count), \
size_add(sizeof(*(p)), flex_array_size(p, member, count)))
+/**
+ * struct_size_t() - Calculate size of structure with trailing flexible array
+ * @type: structure type name.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array.
+ *
+ * Calculates size of memory needed for structure @type followed by an
+ * array of @count number of @member elements. Prefer using struct_size()
+ * when possible instead, to keep calculations associated with a specific
+ * instance variable of type @type.
+ *
+ * Return: number of bytes needed or SIZE_MAX on overflow.
+ */
+#define struct_size_t(type, member, count) \
+ struct_size((type *)NULL, member, count)
+
#endif /* __LINUX_OVERFLOW_H */
static void overflow_size_helpers_test(struct kunit *test)
{
/* Make sure struct_size() can be used in a constant expression. */
- u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)];
+ u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)];
struct __test_flex_array *obj;
int count = 0;
int var;