| 1 | /* Parts of target interface that deal with accessing memory and memory-like |
| 2 | objects. |
| 3 | |
| 4 | Copyright (C) 2006-2020 Free Software Foundation, Inc. |
| 5 | |
| 6 | This file is part of GDB. |
| 7 | |
| 8 | This program is free software; you can redistribute it and/or modify |
| 9 | it under the terms of the GNU General Public License as published by |
| 10 | the Free Software Foundation; either version 3 of the License, or |
| 11 | (at your option) any later version. |
| 12 | |
| 13 | This program is distributed in the hope that it will be useful, |
| 14 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | GNU General Public License for more details. |
| 17 | |
| 18 | You should have received a copy of the GNU General Public License |
| 19 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ |
| 20 | |
| 21 | #include "defs.h" |
| 22 | #include "target.h" |
| 23 | #include "memory-map.h" |
| 24 | |
| 25 | #include "gdbsupport/gdb_sys_time.h" |
| 26 | #include <algorithm> |
| 27 | |
| 28 | static bool |
| 29 | compare_block_starting_address (const memory_write_request &a_req, |
| 30 | const memory_write_request &b_req) |
| 31 | { |
| 32 | return a_req.begin < b_req.begin; |
| 33 | } |
| 34 | |
| 35 | /* Adds to RESULT all memory write requests from BLOCK that are |
| 36 | in [BEGIN, END) range. |
| 37 | |
| 38 | If any memory request is only partially in the specified range, |
| 39 | that part of the memory request will be added. */ |
| 40 | |
| 41 | static void |
| 42 | claim_memory (const std::vector<memory_write_request> &blocks, |
| 43 | std::vector<memory_write_request> *result, |
| 44 | ULONGEST begin, |
| 45 | ULONGEST end) |
| 46 | { |
| 47 | ULONGEST claimed_begin; |
| 48 | ULONGEST claimed_end; |
| 49 | |
| 50 | for (const memory_write_request &r : blocks) |
| 51 | { |
| 52 | /* If the request doesn't overlap [BEGIN, END), skip it. We |
| 53 | must handle END == 0 meaning the top of memory; we don't yet |
| 54 | check for R->end == 0, which would also mean the top of |
| 55 | memory, but there's an assertion in |
| 56 | target_write_memory_blocks which checks for that. */ |
| 57 | |
| 58 | if (begin >= r.end) |
| 59 | continue; |
| 60 | if (end != 0 && end <= r.begin) |
| 61 | continue; |
| 62 | |
| 63 | claimed_begin = std::max (begin, r.begin); |
| 64 | if (end == 0) |
| 65 | claimed_end = r.end; |
| 66 | else |
| 67 | claimed_end = std::min (end, r.end); |
| 68 | |
| 69 | if (claimed_begin == r.begin && claimed_end == r.end) |
| 70 | result->push_back (r); |
| 71 | else |
| 72 | { |
| 73 | struct memory_write_request n = r; |
| 74 | |
| 75 | n.begin = claimed_begin; |
| 76 | n.end = claimed_end; |
| 77 | n.data += claimed_begin - r.begin; |
| 78 | |
| 79 | result->push_back (n); |
| 80 | } |
| 81 | } |
| 82 | } |
| 83 | |
| 84 | /* Given a vector of struct memory_write_request objects in BLOCKS, |
| 85 | add memory requests for flash memory into FLASH_BLOCKS, and for |
| 86 | regular memory to REGULAR_BLOCKS. */ |
| 87 | |
| 88 | static void |
| 89 | split_regular_and_flash_blocks (const std::vector<memory_write_request> &blocks, |
| 90 | std::vector<memory_write_request> *regular_blocks, |
| 91 | std::vector<memory_write_request> *flash_blocks) |
| 92 | { |
| 93 | struct mem_region *region; |
| 94 | CORE_ADDR cur_address; |
| 95 | |
| 96 | /* This implementation runs in O(length(regions)*length(blocks)) time. |
| 97 | However, in most cases the number of blocks will be small, so this does |
| 98 | not matter. |
| 99 | |
| 100 | Note also that it's extremely unlikely that a memory write request |
| 101 | will span more than one memory region, however for safety we handle |
| 102 | such situations. */ |
| 103 | |
| 104 | cur_address = 0; |
| 105 | while (1) |
| 106 | { |
| 107 | std::vector<memory_write_request> *r; |
| 108 | |
| 109 | region = lookup_mem_region (cur_address); |
| 110 | r = region->attrib.mode == MEM_FLASH ? flash_blocks : regular_blocks; |
| 111 | cur_address = region->hi; |
| 112 | claim_memory (blocks, r, region->lo, region->hi); |
| 113 | |
| 114 | if (cur_address == 0) |
| 115 | break; |
| 116 | } |
| 117 | } |
| 118 | |
| 119 | /* Given an ADDRESS, if BEGIN is non-NULL this function sets *BEGIN |
| 120 | to the start of the flash block containing the address. Similarly, |
| 121 | if END is non-NULL *END will be set to the address one past the end |
| 122 | of the block containing the address. */ |
| 123 | |
| 124 | static void |
| 125 | block_boundaries (CORE_ADDR address, CORE_ADDR *begin, CORE_ADDR *end) |
| 126 | { |
| 127 | struct mem_region *region; |
| 128 | unsigned blocksize; |
| 129 | CORE_ADDR offset_in_region; |
| 130 | |
| 131 | region = lookup_mem_region (address); |
| 132 | gdb_assert (region->attrib.mode == MEM_FLASH); |
| 133 | blocksize = region->attrib.blocksize; |
| 134 | |
| 135 | offset_in_region = address - region->lo; |
| 136 | |
| 137 | if (begin) |
| 138 | *begin = region->lo + offset_in_region / blocksize * blocksize; |
| 139 | if (end) |
| 140 | *end = region->lo + (offset_in_region + blocksize - 1) / blocksize * blocksize; |
| 141 | } |
| 142 | |
| 143 | /* Given the list of memory requests to be WRITTEN, this function |
| 144 | returns write requests covering each group of flash blocks which must |
| 145 | be erased. */ |
| 146 | |
| 147 | static std::vector<memory_write_request> |
| 148 | blocks_to_erase (const std::vector<memory_write_request> &written) |
| 149 | { |
| 150 | std::vector<memory_write_request> result; |
| 151 | |
| 152 | for (const memory_write_request &request : written) |
| 153 | { |
| 154 | CORE_ADDR begin, end; |
| 155 | |
| 156 | block_boundaries (request.begin, &begin, 0); |
| 157 | block_boundaries (request.end - 1, 0, &end); |
| 158 | |
| 159 | if (!result.empty () && result.back ().end >= begin) |
| 160 | result.back ().end = end; |
| 161 | else |
| 162 | result.emplace_back (begin, end); |
| 163 | } |
| 164 | |
| 165 | return result; |
| 166 | } |
| 167 | |
| 168 | /* Given ERASED_BLOCKS, a list of blocks that will be erased with |
| 169 | flash erase commands, and WRITTEN_BLOCKS, the list of memory |
| 170 | addresses that will be written, compute the set of memory addresses |
| 171 | that will be erased but not rewritten (e.g. padding within a block |
| 172 | which is only partially filled by "load"). */ |
| 173 | |
| 174 | static std::vector<memory_write_request> |
| 175 | compute_garbled_blocks (const std::vector<memory_write_request> &erased_blocks, |
| 176 | const std::vector<memory_write_request> &written_blocks) |
| 177 | { |
| 178 | std::vector<memory_write_request> result; |
| 179 | |
| 180 | unsigned j; |
| 181 | unsigned je = written_blocks.size (); |
| 182 | |
| 183 | /* Look at each erased memory_write_request in turn, and |
| 184 | see what part of it is subsequently written to. |
| 185 | |
| 186 | This implementation is O(length(erased) * length(written)). If |
| 187 | the lists are sorted at this point it could be rewritten more |
| 188 | efficiently, but the complexity is not generally worthwhile. */ |
| 189 | |
| 190 | for (const memory_write_request &erased_iter : erased_blocks) |
| 191 | { |
| 192 | /* Make a deep copy -- it will be modified inside the loop, but |
| 193 | we don't want to modify original vector. */ |
| 194 | struct memory_write_request erased = erased_iter; |
| 195 | |
| 196 | for (j = 0; j != je;) |
| 197 | { |
| 198 | const memory_write_request *written = &written_blocks[j]; |
| 199 | |
| 200 | /* Now try various cases. */ |
| 201 | |
| 202 | /* If WRITTEN is fully to the left of ERASED, check the next |
| 203 | written memory_write_request. */ |
| 204 | if (written->end <= erased.begin) |
| 205 | { |
| 206 | ++j; |
| 207 | continue; |
| 208 | } |
| 209 | |
| 210 | /* If WRITTEN is fully to the right of ERASED, then ERASED |
| 211 | is not written at all. WRITTEN might affect other |
| 212 | blocks. */ |
| 213 | if (written->begin >= erased.end) |
| 214 | { |
| 215 | result.push_back (erased); |
| 216 | goto next_erased; |
| 217 | } |
| 218 | |
| 219 | /* If all of ERASED is completely written, we can move on to |
| 220 | the next erased region. */ |
| 221 | if (written->begin <= erased.begin |
| 222 | && written->end >= erased.end) |
| 223 | { |
| 224 | goto next_erased; |
| 225 | } |
| 226 | |
| 227 | /* If there is an unwritten part at the beginning of ERASED, |
| 228 | then we should record that part and try this inner loop |
| 229 | again for the remainder. */ |
| 230 | if (written->begin > erased.begin) |
| 231 | { |
| 232 | result.emplace_back (erased.begin, written->begin); |
| 233 | erased.begin = written->begin; |
| 234 | continue; |
| 235 | } |
| 236 | |
| 237 | /* If there is an unwritten part at the end of ERASED, we |
| 238 | forget about the part that was written to and wait to see |
| 239 | if the next write request writes more of ERASED. We can't |
| 240 | push it yet. */ |
| 241 | if (written->end < erased.end) |
| 242 | { |
| 243 | erased.begin = written->end; |
| 244 | ++j; |
| 245 | continue; |
| 246 | } |
| 247 | } |
| 248 | |
| 249 | /* If we ran out of write requests without doing anything about |
| 250 | ERASED, then that means it's really erased. */ |
| 251 | result.push_back (erased); |
| 252 | |
| 253 | next_erased: |
| 254 | ; |
| 255 | } |
| 256 | |
| 257 | return result; |
| 258 | } |
| 259 | |
| 260 | int |
| 261 | target_write_memory_blocks (const std::vector<memory_write_request> &requests, |
| 262 | enum flash_preserve_mode preserve_flash_p, |
| 263 | void (*progress_cb) (ULONGEST, void *)) |
| 264 | { |
| 265 | std::vector<memory_write_request> blocks = requests; |
| 266 | std::vector<memory_write_request> regular; |
| 267 | std::vector<memory_write_request> flash; |
| 268 | std::vector<memory_write_request> erased, garbled; |
| 269 | |
| 270 | /* END == 0 would represent wraparound: a write to the very last |
| 271 | byte of the address space. This file was not written with that |
| 272 | possibility in mind. This is fixable, but a lot of work for a |
| 273 | rare problem; so for now, fail noisily here instead of obscurely |
| 274 | later. */ |
| 275 | for (const memory_write_request &iter : requests) |
| 276 | gdb_assert (iter.end != 0); |
| 277 | |
| 278 | /* Sort the blocks by their start address. */ |
| 279 | std::sort (blocks.begin (), blocks.end (), compare_block_starting_address); |
| 280 | |
| 281 | /* Split blocks into list of regular memory blocks, |
| 282 | and list of flash memory blocks. */ |
| 283 | split_regular_and_flash_blocks (blocks, ®ular, &flash); |
| 284 | |
| 285 | /* If a variable is added to forbid flash write, even during "load", |
| 286 | it should be checked here. Similarly, if this function is used |
| 287 | for other situations besides "load" in which writing to flash |
| 288 | is undesirable, that should be checked here. */ |
| 289 | |
| 290 | /* Find flash blocks to erase. */ |
| 291 | erased = blocks_to_erase (flash); |
| 292 | |
| 293 | /* Find what flash regions will be erased, and not overwritten; then |
| 294 | either preserve or discard the old contents. */ |
| 295 | garbled = compute_garbled_blocks (erased, flash); |
| 296 | |
| 297 | std::vector<gdb::unique_xmalloc_ptr<gdb_byte>> mem_holders; |
| 298 | if (!garbled.empty ()) |
| 299 | { |
| 300 | if (preserve_flash_p == flash_preserve) |
| 301 | { |
| 302 | /* Read in regions that must be preserved and add them to |
| 303 | the list of blocks we read. */ |
| 304 | for (memory_write_request &iter : garbled) |
| 305 | { |
| 306 | gdb_assert (iter.data == NULL); |
| 307 | gdb::unique_xmalloc_ptr<gdb_byte> holder |
| 308 | ((gdb_byte *) xmalloc (iter.end - iter.begin)); |
| 309 | iter.data = holder.get (); |
| 310 | mem_holders.push_back (std::move (holder)); |
| 311 | int err = target_read_memory (iter.begin, iter.data, |
| 312 | iter.end - iter.begin); |
| 313 | if (err != 0) |
| 314 | return err; |
| 315 | |
| 316 | flash.push_back (iter); |
| 317 | } |
| 318 | |
| 319 | std::sort (flash.begin (), flash.end (), |
| 320 | compare_block_starting_address); |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | /* We could coalesce adjacent memory blocks here, to reduce the |
| 325 | number of write requests for small sections. However, we would |
| 326 | have to reallocate and copy the data pointers, which could be |
| 327 | large; large sections are more common in loadable objects than |
| 328 | large numbers of small sections (although the reverse can be true |
| 329 | in object files). So, we issue at least one write request per |
| 330 | passed struct memory_write_request. The remote stub will still |
| 331 | have the opportunity to batch flash requests. */ |
| 332 | |
| 333 | /* Write regular blocks. */ |
| 334 | for (const memory_write_request &iter : regular) |
| 335 | { |
| 336 | LONGEST len; |
| 337 | |
| 338 | len = target_write_with_progress (current_top_target (), |
| 339 | TARGET_OBJECT_MEMORY, NULL, |
| 340 | iter.data, iter.begin, |
| 341 | iter.end - iter.begin, |
| 342 | progress_cb, iter.baton); |
| 343 | if (len < (LONGEST) (iter.end - iter.begin)) |
| 344 | { |
| 345 | /* Call error? */ |
| 346 | return -1; |
| 347 | } |
| 348 | } |
| 349 | |
| 350 | if (!erased.empty ()) |
| 351 | { |
| 352 | /* Erase all pages. */ |
| 353 | for (const memory_write_request &iter : erased) |
| 354 | target_flash_erase (iter.begin, iter.end - iter.begin); |
| 355 | |
| 356 | /* Write flash data. */ |
| 357 | for (const memory_write_request &iter : flash) |
| 358 | { |
| 359 | LONGEST len; |
| 360 | |
| 361 | len = target_write_with_progress (current_top_target (), |
| 362 | TARGET_OBJECT_FLASH, NULL, |
| 363 | iter.data, iter.begin, |
| 364 | iter.end - iter.begin, |
| 365 | progress_cb, iter.baton); |
| 366 | if (len < (LONGEST) (iter.end - iter.begin)) |
| 367 | error (_("Error writing data to flash")); |
| 368 | } |
| 369 | |
| 370 | target_flash_done (); |
| 371 | } |
| 372 | |
| 373 | return 0; |
| 374 | } |