Remove regcache_descr fields sizeof_raw_register_status and sizeof_cooked_register_status
[deliverable/binutils-gdb.git] / gdb / target-memory.c
CommitLineData
a76d924d
DJ
1/* Parts of target interface that deal with accessing memory and memory-like
2 objects.
3
61baf725 4 Copyright (C) 2006-2017 Free Software Foundation, Inc.
a76d924d
DJ
5
6 This file is part of GDB.
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
a9762ec7 10 the Free Software Foundation; either version 3 of the License, or
a76d924d
DJ
11 (at your option) any later version.
12
13 This program is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
a9762ec7 19 along with this program. If not, see <http://www.gnu.org/licenses/>. */
a76d924d
DJ
20
21#include "defs.h"
22#include "vec.h"
23#include "target.h"
24#include "memory-map.h"
25
438e1e42 26#include "gdb_sys_time.h"
325fac50 27#include <algorithm>
a76d924d
DJ
28
29static int
30compare_block_starting_address (const void *a, const void *b)
31{
19ba03f4
SM
32 const struct memory_write_request *a_req
33 = (const struct memory_write_request *) a;
34 const struct memory_write_request *b_req
35 = (const struct memory_write_request *) b;
a76d924d
DJ
36
37 if (a_req->begin < b_req->begin)
38 return -1;
39 else if (a_req->begin == b_req->begin)
40 return 0;
41 else
42 return 1;
43}
44
45/* Adds to RESULT all memory write requests from BLOCK that are
46 in [BEGIN, END) range.
47
48 If any memory request is only partially in the specified range,
49 that part of the memory request will be added. */
50
51static void
52claim_memory (VEC(memory_write_request_s) *blocks,
53 VEC(memory_write_request_s) **result,
54 ULONGEST begin,
55 ULONGEST end)
56{
57 int i;
58 ULONGEST claimed_begin;
59 ULONGEST claimed_end;
60 struct memory_write_request *r;
61
62 for (i = 0; VEC_iterate (memory_write_request_s, blocks, i, r); ++i)
63 {
64 /* If the request doesn't overlap [BEGIN, END), skip it. We
65 must handle END == 0 meaning the top of memory; we don't yet
66 check for R->end == 0, which would also mean the top of
67 memory, but there's an assertion in
68 target_write_memory_blocks which checks for that. */
69
70 if (begin >= r->end)
71 continue;
72 if (end != 0 && end <= r->begin)
73 continue;
74
325fac50 75 claimed_begin = std::max (begin, r->begin);
a76d924d
DJ
76 if (end == 0)
77 claimed_end = r->end;
78 else
325fac50 79 claimed_end = std::min (end, r->end);
a76d924d
DJ
80
81 if (claimed_begin == r->begin && claimed_end == r->end)
82 VEC_safe_push (memory_write_request_s, *result, r);
83 else
84 {
85 struct memory_write_request *n =
86 VEC_safe_push (memory_write_request_s, *result, NULL);
5d502164 87
24bf05ac 88 *n = *r;
a76d924d
DJ
89 n->begin = claimed_begin;
90 n->end = claimed_end;
24bf05ac 91 n->data += claimed_begin - r->begin;
a76d924d
DJ
92 }
93 }
94}
95
96/* Given a vector of struct memory_write_request objects in BLOCKS,
97 add memory requests for flash memory into FLASH_BLOCKS, and for
98 regular memory to REGULAR_BLOCKS. */
99
100static void
101split_regular_and_flash_blocks (VEC(memory_write_request_s) *blocks,
102 VEC(memory_write_request_s) **regular_blocks,
103 VEC(memory_write_request_s) **flash_blocks)
104{
105 struct mem_region *region;
106 CORE_ADDR cur_address;
107
108 /* This implementation runs in O(length(regions)*length(blocks)) time.
109 However, in most cases the number of blocks will be small, so this does
110 not matter.
111
112 Note also that it's extremely unlikely that a memory write request
113 will span more than one memory region, however for safety we handle
114 such situations. */
115
116 cur_address = 0;
117 while (1)
118 {
119 VEC(memory_write_request_s) **r;
a76d924d 120
5d502164 121 region = lookup_mem_region (cur_address);
a76d924d
DJ
122 r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks;
123 cur_address = region->hi;
124 claim_memory (blocks, r, region->lo, region->hi);
125
126 if (cur_address == 0)
127 break;
128 }
129}
130
131/* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN
132 to the start of the flash block containing the address. Similarly,
133 if END is non-NULL *END will be set to the address one past the end
134 of the block containing the address. */
135
136static void
137block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end)
138{
139 struct mem_region *region;
140 unsigned blocksize;
d9b477e3 141 CORE_ADDR offset_in_region;
a76d924d
DJ
142
143 region = lookup_mem_region (address);
144 gdb_assert (region->attrib.mode == MEM_FLASH);
145 blocksize = region->attrib.blocksize;
d9b477e3
KB
146
147 offset_in_region = address - region->lo;
148
a76d924d 149 if (begin)
d9b477e3 150 *begin = region->lo + offset_in_region / blocksize * blocksize;
a76d924d 151 if (end)
d9b477e3 152 *end = region->lo + (offset_in_region + blocksize - 1) / blocksize * blocksize;
a76d924d
DJ
153}
154
155/* Given the list of memory requests to be WRITTEN, this function
156 returns write requests covering each group of flash blocks which must
157 be erased. */
158
159static VEC(memory_write_request_s) *
160blocks_to_erase (VEC(memory_write_request_s) *written)
161{
162 unsigned i;
163 struct memory_write_request *ptr;
164
165 VEC(memory_write_request_s) *result = NULL;
166
167 for (i = 0; VEC_iterate (memory_write_request_s, written, i, ptr); ++i)
168 {
169 CORE_ADDR begin, end;
170
171 block_boundaries (ptr->begin, &begin, 0);
1fc01e03 172 block_boundaries (ptr->end - 1, 0, &end);
a76d924d
DJ
173
174 if (!VEC_empty (memory_write_request_s, result)
175 && VEC_last (memory_write_request_s, result)->end >= begin)
176 {
177 VEC_last (memory_write_request_s, result)->end = end;
178 }
179 else
180 {
181 struct memory_write_request *n =
182 VEC_safe_push (memory_write_request_s, result, NULL);
5d502164 183
a76d924d
DJ
184 memset (n, 0, sizeof (struct memory_write_request));
185 n->begin = begin;
186 n->end = end;
187 }
188 }
189
190 return result;
191}
192
193/* Given ERASED_BLOCKS, a list of blocks that will be erased with
194 flash erase commands, and WRITTEN_BLOCKS, the list of memory
195 addresses that will be written, compute the set of memory addresses
196 that will be erased but not rewritten (e.g. padding within a block
197 which is only partially filled by "load"). */
198
199static VEC(memory_write_request_s) *
200compute_garbled_blocks (VEC(memory_write_request_s) *erased_blocks,
201 VEC(memory_write_request_s) *written_blocks)
202{
203 VEC(memory_write_request_s) *result = NULL;
204
205 unsigned i, j;
206 unsigned je = VEC_length (memory_write_request_s, written_blocks);
207 struct memory_write_request *erased_p;
208
209 /* Look at each erased memory_write_request in turn, and
210 see what part of it is subsequently written to.
211
212 This implementation is O(length(erased) * length(written)). If
213 the lists are sorted at this point it could be rewritten more
214 efficiently, but the complexity is not generally worthwhile. */
215
216 for (i = 0;
217 VEC_iterate (memory_write_request_s, erased_blocks, i, erased_p);
218 ++i)
219 {
220 /* Make a deep copy -- it will be modified inside the loop, but
221 we don't want to modify original vector. */
222 struct memory_write_request erased = *erased_p;
223
224 for (j = 0; j != je;)
225 {
226 struct memory_write_request *written
227 = VEC_index (memory_write_request_s,
228 written_blocks, j);
229
230 /* Now try various cases. */
231
232 /* If WRITTEN is fully to the left of ERASED, check the next
233 written memory_write_request. */
234 if (written->end <= erased.begin)
235 {
236 ++j;
237 continue;
238 }
239
240 /* If WRITTEN is fully to the right of ERASED, then ERASED
241 is not written at all. WRITTEN might affect other
242 blocks. */
243 if (written->begin >= erased.end)
244 {
245 VEC_safe_push (memory_write_request_s, result, &erased);
246 goto next_erased;
247 }
248
249 /* If all of ERASED is completely written, we can move on to
250 the next erased region. */
251 if (written->begin <= erased.begin
252 && written->end >= erased.end)
253 {
254 goto next_erased;
255 }
256
257 /* If there is an unwritten part at the beginning of ERASED,
258 then we should record that part and try this inner loop
259 again for the remainder. */
260 if (written->begin > erased.begin)
261 {
262 struct memory_write_request *n =
263 VEC_safe_push (memory_write_request_s, result, NULL);
5d502164 264
a76d924d
DJ
265 memset (n, 0, sizeof (struct memory_write_request));
266 n->begin = erased.begin;
267 n->end = written->begin;
268 erased.begin = written->begin;
269 continue;
270 }
271
272 /* If there is an unwritten part at the end of ERASED, we
273 forget about the part that was written to and wait to see
274 if the next write request writes more of ERASED. We can't
275 push it yet. */
276 if (written->end < erased.end)
277 {
278 erased.begin = written->end;
279 ++j;
280 continue;
281 }
282 }
283
284 /* If we ran out of write requests without doing anything about
285 ERASED, then that means it's really erased. */
286 VEC_safe_push (memory_write_request_s, result, &erased);
287
288 next_erased:
289 ;
290 }
291
292 return result;
293}
294
295static void
296cleanup_request_data (void *p)
297{
19ba03f4 298 VEC(memory_write_request_s) **v = (VEC(memory_write_request_s) **) p;
a76d924d
DJ
299 struct memory_write_request *r;
300 int i;
301
302 for (i = 0; VEC_iterate (memory_write_request_s, *v, i, r); ++i)
303 xfree (r->data);
304}
305
306static void
307cleanup_write_requests_vector (void *p)
308{
19ba03f4 309 VEC(memory_write_request_s) **v = (VEC(memory_write_request_s) **) p;
5d502164 310
a76d924d
DJ
311 VEC_free (memory_write_request_s, *v);
312}
313
314int
315target_write_memory_blocks (VEC(memory_write_request_s) *requests,
316 enum flash_preserve_mode preserve_flash_p,
317 void (*progress_cb) (ULONGEST, void *))
318{
319 struct cleanup *back_to = make_cleanup (null_cleanup, NULL);
320 VEC(memory_write_request_s) *blocks = VEC_copy (memory_write_request_s,
321 requests);
322 unsigned i;
323 int err = 0;
324 struct memory_write_request *r;
325 VEC(memory_write_request_s) *regular = NULL;
326 VEC(memory_write_request_s) *flash = NULL;
327 VEC(memory_write_request_s) *erased, *garbled;
328
329 /* END == 0 would represent wraparound: a write to the very last
330 byte of the address space. This file was not written with that
331 possibility in mind. This is fixable, but a lot of work for a
332 rare problem; so for now, fail noisily here instead of obscurely
333 later. */
334 for (i = 0; VEC_iterate (memory_write_request_s, requests, i, r); ++i)
335 gdb_assert (r->end != 0);
336
337 make_cleanup (cleanup_write_requests_vector, &blocks);
338
339 /* Sort the blocks by their start address. */
340 qsort (VEC_address (memory_write_request_s, blocks),
341 VEC_length (memory_write_request_s, blocks),
342 sizeof (struct memory_write_request), compare_block_starting_address);
343
344 /* Split blocks into list of regular memory blocks,
c378eb4e 345 and list of flash memory blocks. */
a76d924d
DJ
346 make_cleanup (cleanup_write_requests_vector, &regular);
347 make_cleanup (cleanup_write_requests_vector, &flash);
348 split_regular_and_flash_blocks (blocks, &regular, &flash);
349
350 /* If a variable is added to forbid flash write, even during "load",
351 it should be checked here. Similarly, if this function is used
352 for other situations besides "load" in which writing to flash
353 is undesirable, that should be checked here. */
354
355 /* Find flash blocks to erase. */
356 erased = blocks_to_erase (flash);
357 make_cleanup (cleanup_write_requests_vector, &erased);
358
359 /* Find what flash regions will be erased, and not overwritten; then
360 either preserve or discard the old contents. */
361 garbled = compute_garbled_blocks (erased, flash);
362 make_cleanup (cleanup_request_data, &garbled);
363 make_cleanup (cleanup_write_requests_vector, &garbled);
364
365 if (!VEC_empty (memory_write_request_s, garbled))
366 {
367 if (preserve_flash_p == flash_preserve)
368 {
369 struct memory_write_request *r;
370
371 /* Read in regions that must be preserved and add them to
372 the list of blocks we read. */
373 for (i = 0; VEC_iterate (memory_write_request_s, garbled, i, r); ++i)
374 {
375 gdb_assert (r->data == NULL);
224c3ddb 376 r->data = (gdb_byte *) xmalloc (r->end - r->begin);
a76d924d
DJ
377 err = target_read_memory (r->begin, r->data, r->end - r->begin);
378 if (err != 0)
379 goto out;
380
381 VEC_safe_push (memory_write_request_s, flash, r);
382 }
383
384 qsort (VEC_address (memory_write_request_s, flash),
385 VEC_length (memory_write_request_s, flash),
3e43a32a
MS
386 sizeof (struct memory_write_request),
387 compare_block_starting_address);
a76d924d
DJ
388 }
389 }
390
391 /* We could coalesce adjacent memory blocks here, to reduce the
392 number of write requests for small sections. However, we would
393 have to reallocate and copy the data pointers, which could be
394 large; large sections are more common in loadable objects than
395 large numbers of small sections (although the reverse can be true
396 in object files). So, we issue at least one write request per
397 passed struct memory_write_request. The remote stub will still
398 have the opportunity to batch flash requests. */
399
400 /* Write regular blocks. */
401 for (i = 0; VEC_iterate (memory_write_request_s, regular, i, r); ++i)
402 {
403 LONGEST len;
404
c35b1492 405 len = target_write_with_progress (current_target.beneath,
a76d924d
DJ
406 TARGET_OBJECT_MEMORY, NULL,
407 r->data, r->begin, r->end - r->begin,
408 progress_cb, r->baton);
409 if (len < (LONGEST) (r->end - r->begin))
410 {
411 /* Call error? */
412 err = -1;
413 goto out;
414 }
415 }
416
417 if (!VEC_empty (memory_write_request_s, erased))
418 {
419 /* Erase all pages. */
420 for (i = 0; VEC_iterate (memory_write_request_s, erased, i, r); ++i)
421 target_flash_erase (r->begin, r->end - r->begin);
422
423 /* Write flash data. */
424 for (i = 0; VEC_iterate (memory_write_request_s, flash, i, r); ++i)
425 {
426 LONGEST len;
427
428 len = target_write_with_progress (&current_target,
429 TARGET_OBJECT_FLASH, NULL,
3e43a32a
MS
430 r->data, r->begin,
431 r->end - r->begin,
a76d924d
DJ
432 progress_cb, r->baton);
433 if (len < (LONGEST) (r->end - r->begin))
434 error (_("Error writing data to flash"));
435 }
436
437 target_flash_done ();
438 }
439
440 out:
441 do_cleanups (back_to);
442
443 return err;
444}
This page took 1.404738 seconds and 4 git commands to generate.