return 0;
}
-static bool coroutine_fn do_perform_cow_encrypt(BlockDriverState *bs,
- uint64_t src_cluster_offset,
- uint64_t cluster_offset,
- unsigned offset_in_cluster,
- uint8_t *buffer,
- unsigned bytes)
-{
- if (bytes && bs->encrypted) {
- BDRVQcow2State *s = bs->opaque;
- assert((offset_in_cluster & ~BDRV_SECTOR_MASK) == 0);
- assert((bytes & ~BDRV_SECTOR_MASK) == 0);
- assert(s->crypto);
- if (qcow2_co_encrypt(bs, cluster_offset,
- src_cluster_offset + offset_in_cluster,
- buffer, bytes) < 0) {
- return false;
- }
- }
- return true;
-}
-
static int coroutine_fn do_perform_cow_write(BlockDriverState *bs,
uint64_t cluster_offset,
unsigned offset_in_cluster,
/* Encrypt the data if necessary before writing it */
if (bs->encrypted) {
- if (!do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
- start->offset, start_buffer,
- start->nb_bytes) ||
- !do_perform_cow_encrypt(bs, m->offset, m->alloc_offset,
- end->offset, end_buffer, end->nb_bytes)) {
- ret = -EIO;
+ ret = qcow2_co_encrypt(bs,
+ m->alloc_offset + start->offset,
+ m->offset + start->offset,
+ start_buffer, start->nb_bytes);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = qcow2_co_encrypt(bs,
+ m->alloc_offset + end->offset,
+ m->offset + end->offset,
+ end_buffer, end->nb_bytes);
+ if (ret < 0) {
goto fail;
}
}
nb_clusters = MIN(nb_clusters, s->l2_slice_size - l2_index);
assert(nb_clusters <= INT_MAX);
+ /* Limit total allocation byte count to INT_MAX */
+ nb_clusters = MIN(nb_clusters, INT_MAX >> s->cluster_bits);
+
/* Find L2 entry for the first involved cluster */
ret = get_cluster_table(bs, guest_offset, &l2_slice, &l2_index);
if (ret < 0) {
* request actually writes to (excluding COW at the end)
*/
uint64_t requested_bytes = *bytes + offset_into_cluster(s, guest_offset);
- int avail_bytes = MIN(INT_MAX, nb_clusters << s->cluster_bits);
+ int avail_bytes = nb_clusters << s->cluster_bits;
int nb_bytes = MIN(requested_bytes, avail_bytes);
QCowL2Meta *old_m = *m;