return NULL;
}
+
+/* Helper for the memory xfer routines. Checks the attributes of the
+ memory region of MEMADDR against the read or write being attempted.
+ If the access is permitted returns true, otherwise returns false.
+ REGION_P is an optional output parameter. If not-NULL, it is
+ filled with a pointer to the memory region of MEMADDR. REG_LEN
+ returns LEN trimmed to the end of the region. This is how much the
+ caller can continue requesting, if the access is permitted. A
+ single xfer request must not straddle memory region boundaries. */
+
+static int
+memory_xfer_check_region (gdb_byte *readbuf, const gdb_byte *writebuf,
+ ULONGEST memaddr, ULONGEST len, ULONGEST *reg_len,
+ struct mem_region **region_p)
+{
+ struct mem_region *region;
+
+ region = lookup_mem_region (memaddr);
+
+ if (region_p != NULL)
+ *region_p = region;
+
+ switch (region->attrib.mode)
+ {
+ case MEM_RO:
+ if (writebuf != NULL)
+ return 0;
+ break;
+
+ case MEM_WO:
+ if (readbuf != NULL)
+ return 0;
+ break;
+
+ case MEM_FLASH:
+ /* We only support writing to flash during "load" for now. */
+ if (writebuf != NULL)
+ error (_("Writing to flash memory forbidden in this context"));
+ break;
+
+ case MEM_NONE:
+ return 0;
+ }
+
+ /* region->hi == 0 means there's no upper bound. */
+ if (memaddr + len < region->hi || region->hi == 0)
+ *reg_len = len;
+ else
+ *reg_len = region->hi - memaddr;
+
+ return 1;
+}
+
/* Read memory from more than one valid target. A core file, for
instance, could have some of memory but delegate other bits to
the target below it. So, we must manually try all targets. */
ULONGEST len, ULONGEST *xfered_len)
{
enum target_xfer_status res;
- int reg_len;
+ ULONGEST reg_len;
struct mem_region *region;
struct inferior *inf;
}
/* Try GDB's internal data cache. */
- region = lookup_mem_region (memaddr);
- /* region->hi == 0 means there's no upper bound. */
- if (memaddr + len < region->hi || region->hi == 0)
- reg_len = len;
- else
- reg_len = region->hi - memaddr;
-
- switch (region->attrib.mode)
- {
- case MEM_RO:
- if (writebuf != NULL)
- return TARGET_XFER_E_IO;
- break;
- case MEM_WO:
- if (readbuf != NULL)
- return TARGET_XFER_E_IO;
- break;
-
- case MEM_FLASH:
- /* We only support writing to flash during "load" for now. */
- if (writebuf != NULL)
- error (_("Writing to flash memory forbidden in this context"));
- break;
-
- case MEM_NONE:
- return TARGET_XFER_E_IO;
- }
+ if (!memory_xfer_check_region (readbuf, writebuf, memaddr, len, ®_len,
+ ®ion))
+ return TARGET_XFER_E_IO;
if (!ptid_equal (inferior_ptid, null_ptid))
inf = find_inferior_pid (ptid_get_pid (inferior_ptid));
writebuf, offset, len, xfered_len);
else if (object == TARGET_OBJECT_RAW_MEMORY)
{
+ /* Skip/avoid accessing the target if the memory region
+ attributes block the access. Check this here instead of in
+ raw_memory_xfer_partial as otherwise we'd end up checking
+ this twice in the case of the memory_xfer_partial path is
+ taken; once before checking the dcache, and another in the
+ tail call to raw_memory_xfer_partial. */
+ if (!memory_xfer_check_region (readbuf, writebuf, offset, len, &len,
+ NULL))
+ return TARGET_XFER_E_IO;
+
/* Request the normal memory object from other layers. */
retval = raw_memory_xfer_partial (ops, readbuf, writebuf, offset, len,
xfered_len);