]>
Commit | Line | Data |
---|---|---|
298800ca SH |
1 | /* |
2 | * QEMU Enhanced Disk Format Cluster functions | |
3 | * | |
4 | * Copyright IBM, Corp. 2010 | |
5 | * | |
6 | * Authors: | |
7 | * Stefan Hajnoczi <[email protected]> | |
8 | * Anthony Liguori <[email protected]> | |
9 | * | |
10 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. | |
11 | * See the COPYING.LIB file in the top-level directory. | |
12 | * | |
13 | */ | |
14 | ||
15 | #include "qed.h" | |
16 | ||
17 | /** | |
18 | * Count the number of contiguous data clusters | |
19 | * | |
20 | * @s: QED state | |
21 | * @table: L2 table | |
22 | * @index: First cluster index | |
23 | * @n: Maximum number of clusters | |
24 | * @offset: Set to first cluster offset | |
25 | * | |
21df65b6 AL |
26 | * This function scans tables for contiguous clusters. A contiguous run of |
27 | * clusters may be allocated, unallocated, or zero. | |
298800ca SH |
28 | */ |
29 | static unsigned int qed_count_contiguous_clusters(BDRVQEDState *s, | |
30 | QEDTable *table, | |
31 | unsigned int index, | |
32 | unsigned int n, | |
33 | uint64_t *offset) | |
34 | { | |
35 | unsigned int end = MIN(index + n, s->table_nelems); | |
36 | uint64_t last = table->offsets[index]; | |
37 | unsigned int i; | |
38 | ||
39 | *offset = last; | |
40 | ||
41 | for (i = index + 1; i < end; i++) { | |
21df65b6 AL |
42 | if (qed_offset_is_unalloc_cluster(last)) { |
43 | /* Counting unallocated clusters */ | |
44 | if (!qed_offset_is_unalloc_cluster(table->offsets[i])) { | |
45 | break; | |
46 | } | |
47 | } else if (qed_offset_is_zero_cluster(last)) { | |
48 | /* Counting zero clusters */ | |
49 | if (!qed_offset_is_zero_cluster(table->offsets[i])) { | |
298800ca SH |
50 | break; |
51 | } | |
52 | } else { | |
53 | /* Counting allocated clusters */ | |
54 | if (table->offsets[i] != last + s->header.cluster_size) { | |
55 | break; | |
56 | } | |
57 | last = table->offsets[i]; | |
58 | } | |
59 | } | |
60 | return i - index; | |
61 | } | |
62 | ||
63 | typedef struct { | |
64 | BDRVQEDState *s; | |
65 | uint64_t pos; | |
66 | size_t len; | |
67 | ||
68 | QEDRequest *request; | |
69 | ||
70 | /* User callback */ | |
71 | QEDFindClusterFunc *cb; | |
72 | void *opaque; | |
73 | } QEDFindClusterCB; | |
74 | ||
75 | static void qed_find_cluster_cb(void *opaque, int ret) | |
76 | { | |
77 | QEDFindClusterCB *find_cluster_cb = opaque; | |
78 | BDRVQEDState *s = find_cluster_cb->s; | |
79 | QEDRequest *request = find_cluster_cb->request; | |
80 | uint64_t offset = 0; | |
81 | size_t len = 0; | |
82 | unsigned int index; | |
83 | unsigned int n; | |
84 | ||
85 | if (ret) { | |
86 | goto out; | |
87 | } | |
88 | ||
89 | index = qed_l2_index(s, find_cluster_cb->pos); | |
90 | n = qed_bytes_to_clusters(s, | |
91 | qed_offset_into_cluster(s, find_cluster_cb->pos) + | |
92 | find_cluster_cb->len); | |
93 | n = qed_count_contiguous_clusters(s, request->l2_table->table, | |
94 | index, n, &offset); | |
95 | ||
21df65b6 AL |
96 | if (qed_offset_is_unalloc_cluster(offset)) { |
97 | ret = QED_CLUSTER_L2; | |
98 | } else if (qed_offset_is_zero_cluster(offset)) { | |
99 | ret = QED_CLUSTER_ZERO; | |
100 | } else if (qed_check_cluster_offset(s, offset)) { | |
101 | ret = QED_CLUSTER_FOUND; | |
102 | } else { | |
298800ca SH |
103 | ret = -EINVAL; |
104 | } | |
105 | ||
21df65b6 AL |
106 | len = MIN(find_cluster_cb->len, n * s->header.cluster_size - |
107 | qed_offset_into_cluster(s, find_cluster_cb->pos)); | |
108 | ||
298800ca SH |
109 | out: |
110 | find_cluster_cb->cb(find_cluster_cb->opaque, ret, offset, len); | |
7267c094 | 111 | g_free(find_cluster_cb); |
298800ca SH |
112 | } |
113 | ||
114 | /** | |
115 | * Find the offset of a data cluster | |
116 | * | |
117 | * @s: QED state | |
118 | * @request: L2 cache entry | |
119 | * @pos: Byte position in device | |
120 | * @len: Number of bytes | |
121 | * @cb: Completion function | |
122 | * @opaque: User data for completion function | |
123 | * | |
124 | * This function translates a position in the block device to an offset in the | |
125 | * image file. It invokes the cb completion callback to report back the | |
126 | * translated offset or unallocated range in the image file. | |
127 | * | |
128 | * If the L2 table exists, request->l2_table points to the L2 table cache entry | |
129 | * and the caller must free the reference when they are finished. The cache | |
130 | * entry is exposed in this way to avoid callers having to read the L2 table | |
131 | * again later during request processing. If request->l2_table is non-NULL it | |
132 | * will be unreferenced before taking on the new cache entry. | |
133 | */ | |
134 | void qed_find_cluster(BDRVQEDState *s, QEDRequest *request, uint64_t pos, | |
135 | size_t len, QEDFindClusterFunc *cb, void *opaque) | |
136 | { | |
137 | QEDFindClusterCB *find_cluster_cb; | |
138 | uint64_t l2_offset; | |
139 | ||
140 | /* Limit length to L2 boundary. Requests are broken up at the L2 boundary | |
141 | * so that a request acts on one L2 table at a time. | |
142 | */ | |
143 | len = MIN(len, (((pos >> s->l1_shift) + 1) << s->l1_shift) - pos); | |
144 | ||
145 | l2_offset = s->l1_table->offsets[qed_l1_index(s, pos)]; | |
21df65b6 | 146 | if (qed_offset_is_unalloc_cluster(l2_offset)) { |
298800ca SH |
147 | cb(opaque, QED_CLUSTER_L1, 0, len); |
148 | return; | |
149 | } | |
150 | if (!qed_check_table_offset(s, l2_offset)) { | |
151 | cb(opaque, -EINVAL, 0, 0); | |
152 | return; | |
153 | } | |
154 | ||
7267c094 | 155 | find_cluster_cb = g_malloc(sizeof(*find_cluster_cb)); |
298800ca SH |
156 | find_cluster_cb->s = s; |
157 | find_cluster_cb->pos = pos; | |
158 | find_cluster_cb->len = len; | |
159 | find_cluster_cb->cb = cb; | |
160 | find_cluster_cb->opaque = opaque; | |
161 | find_cluster_cb->request = request; | |
162 | ||
163 | qed_read_l2_table(s, request, l2_offset, | |
164 | qed_find_cluster_cb, find_cluster_cb); | |
165 | } |