Fix regression in default.exp caused by _caller_is, etc.
[deliverable/binutils-gdb.git] / gdb / testsuite / gdb.base / bigcore.exp
CommitLineData
ecd75fc8 1# Copyright 1992-2014 Free Software Foundation, Inc.
2d822687
AC
2
3# This program is free software; you can redistribute it and/or modify
4# it under the terms of the GNU General Public License as published by
e22f8b7c 5# the Free Software Foundation; either version 3 of the License, or
2d822687 6# (at your option) any later version.
e22f8b7c 7#
2d822687
AC
8# This program is distributed in the hope that it will be useful,
9# but WITHOUT ANY WARRANTY; without even the implied warranty of
10# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11# GNU General Public License for more details.
e22f8b7c 12#
2d822687 13# You should have received a copy of the GNU General Public License
e22f8b7c 14# along with this program. If not, see <http://www.gnu.org/licenses/>.
2d822687 15
2d822687
AC
16# This file is based on corefile.exp which was written by Fred
17# Fish. (fnf@cygnus.com)
18
2d822687
AC
19
20# Are we on a target board? As of 2004-02-12, GDB didn't have a
21# mechanism that would let it efficiently access a remote corefile.
22
23if ![isnative] then {
24 untested "Remote system"
25 return
26}
78f47043 27
2d822687
AC
28# Can the system run this test (in particular support sparse
29# corefiles)? On systems that lack sparse corefile support this test
7a9dd1b2 30# consumes too many resources - gigabytes worth of disk space and
2d822687
AC
31# I/O bandwith.
32
15f7b60e
MK
33if { [istarget "*-*-*bsd*"]
34 || [istarget "*-*-hpux*"]
cc984116 35 || [istarget "*-*-solaris*"]
af589bd0 36 || [istarget "*-*-darwin*"]
cc984116 37 || [istarget "*-*-cygwin*"] } {
2d822687
AC
38 untested "Kernel lacks sparse corefile support (PR gdb/1551)"
39 return
40}
41
eac69dca
JB
42# This testcase causes too much stress (in terms of memory usage)
43# on certain systems...
44if { [istarget "*-*-*irix*"] } {
45 untested "Testcase too stressful for this system"
46 return
47}
48
a64d2530 49standard_testfile .c
08b3fe69 50set corefile [standard_output_file ${binfile}.corefile]
2d822687
AC
51
52if { [gdb_compile "${srcdir}/${subdir}/${srcfile}" "${binfile}" executable {debug}] != "" } {
b60f0898
JB
53 untested bigcore.exp
54 return -1
2d822687
AC
55}
56
2d822687
AC
57# Run GDB on the bigcore program up-to where it will dump core.
58
a64d2530 59clean_restart ${binfile}
27d3a1a2
MS
60gdb_test_no_output "set print sevenbit-strings"
61gdb_test_no_output "set width 0"
62
08b3fe69
TT
63# Get the core into the output directory.
64if {![is_remote host]} {
65 gdb_test "cd [file dirname $corefile]" "Working directory .*" \
66 "cd to test directory"
67}
68
2d822687 69if { ![runto_main] } then {
4ec70201 70 gdb_suppress_tests
2d822687
AC
71}
72set print_core_line [gdb_get_line_number "Dump core"]
73gdb_test "tbreak $print_core_line"
74gdb_test continue ".*print_string.*"
75gdb_test next ".*0 = 0.*"
76
77# Traverse part of bigcore's linked list of memory chunks (forward or
bf08c2a1 78# backward), saving each chunk's address.
2d822687
AC
79
80proc extract_heap { dir } {
81 global gdb_prompt
82 global expect_out
83 set heap ""
84 set test "extract ${dir} heap"
85 set lim 0
bf08c2a1 86 gdb_test_multiple "print heap.${dir}" "$test" {
2d822687
AC
87 -re " = \\(struct list \\*\\) 0x0.*$gdb_prompt $" {
88 pass "$test"
89 }
90 -re " = \\(struct list \\*\\) (0x\[0-9a-f\]*).*$gdb_prompt $" {
91 set heap [concat $heap $expect_out(1,string)]
6a53b2ec 92 if { $lim >= 200 } {
2d822687
AC
93 pass "$test (stop at $lim)"
94 } else {
95 incr lim
96 send_gdb "print \$.${dir}\n"
97 exp_continue
98 }
99 }
100 -re ".*$gdb_prompt $" {
101 fail "$test (entry $lim)"
102 }
103 timeout {
104 fail "$test (timeout)"
105 }
106 }
ae59b1da 107 return $heap
2d822687
AC
108}
109set next_heap [extract_heap next]
110set prev_heap [extract_heap prev]
111
26585198
DJ
112# Save the total allocated size within GDB so that we can check
113# the core size later.
27d3a1a2 114gdb_test_no_output "set \$bytes_allocated = bytes_allocated" "save heap size"
26585198 115
4bb3667f
AC
116# Now create a core dump
117
118# Rename the core file to "TESTFILE.corefile" rather than just "core",
119# to avoid problems with sys admin types that like to regularly prune
120# all files named "core" from the system.
121
122# Some systems append "core" to the name of the program; others append
123# the name of the program to "core"; still others (like Linux, as of
124# May 2003) create cores named "core.PID".
125
126# Save the process ID. Some systems dump the core into core.PID.
127set test "grab pid"
128gdb_test_multiple "info program" $test {
129 -re "child process (\[0-9\]+).*$gdb_prompt $" {
130 set inferior_pid $expect_out(1,string)
131 pass $test
132 }
133 -re "$gdb_prompt $" {
134 set inferior_pid unknown
135 pass $test
136 }
137}
138
139# Dump core using SIGABRT
140set oldtimeout $timeout
141set timeout 600
142gdb_test "signal SIGABRT" "Program terminated with signal SIGABRT, .*"
c593ac0e 143set timeout $oldtimeout
4bb3667f
AC
144
145# Find the corefile
146set file ""
147foreach pat [list core.${inferior_pid} ${testfile}.core core] {
08b3fe69 148 set names [glob -nocomplain [standard_output_file $pat]]
4bb3667f
AC
149 if {[llength $names] == 1} {
150 set file [lindex $names 0]
151 remote_exec build "mv $file $corefile"
152 break
153 }
154}
155
156if { $file == "" } {
157 untested "Can't generate a core file"
158 return 0
159}
160
161# Check that the corefile is plausibly large enough. We're trying to
162# detect the case where the operating system has truncated the file
163# just before signed wraparound. TCL, unfortunately, has a similar
164# problem - so use catch. It can handle the "bad" size but not
165# necessarily the "good" one. And we must use GDB for the comparison,
166# similarly.
167
4bb3667f 168if {[catch {file size $corefile} core_size] == 0} {
3c0edcdc 169 set core_ok 0
26585198 170 gdb_test_multiple "print \$bytes_allocated < $core_size" "check core size" {
4bb3667f
AC
171 -re " = 1\r\n$gdb_prompt $" {
172 pass "check core size"
173 set core_ok 1
174 }
26585198
DJ
175 -re " = 0\r\n$gdb_prompt $" {
176 pass "check core size"
177 set core_ok 0
178 }
4bb3667f 179 }
3c0edcdc
AC
180} {
181 # Probably failed due to the TCL build having problems with very
182 # large values. Since GDB uses a 64-bit off_t (when possible) it
183 # shouldn't have this problem. Assume that things are going to
184 # work. Without this assumption the test is skiped on systems
185 # (such as i386 GNU/Linux with patched kernel) which do pass.
186 pass "check core size"
187 set core_ok 1
4bb3667f 188}
3c0edcdc 189if {! $core_ok} {
4bb3667f
AC
190 untested "check core size (system does not support large corefiles)"
191 return 0
192}
193
2d822687
AC
194# Now load up that core file
195
196set test "load corefile"
08b3fe69
TT
197# We use [file tail] because gdb is still "cd"d to the
198# output directory.
199gdb_test_multiple "core [file tail $corefile]" "$test" {
2d822687
AC
200 -re "A program is being debugged already. Kill it. .y or n. " {
201 send_gdb "y\n"
202 exp_continue
203 }
204 -re "Core was generated by.*$gdb_prompt $" {
205 pass "$test"
206 }
207}
208
209# Finally, re-traverse bigcore's linked list, checking each chunk's
210# address against the executable. Don't use gdb_test_multiple as want
211# only one pass/fail. Don't use exp_continue as the regular
212# expression involving $heap needs to be re-evaluated for each new
213# response.
214
215proc check_heap { dir heap } {
216 global gdb_prompt
217 set test "check ${dir} heap"
218 set ok 1
219 set lim 0
220 send_gdb "print heap.${dir}\n"
221 while { $ok } {
222 gdb_expect {
223 -re " = \\(struct list \\*\\) [lindex $heap $lim].*$gdb_prompt $" {
224 if { $lim >= [llength $heap] } {
225 pass "$test"
226 set ok 0
227 } else {
228 incr lim
229 send_gdb "print \$.${dir}\n"
230 }
231 }
232 -re ".*$gdb_prompt $" {
233 fail "$test (address [lindex $heap $lim])"
234 set ok 0
235 }
236 timeout {
237 fail "$test (timeout)"
238 set ok 0
239 }
240 }
241 }
242}
243
244check_heap next $next_heap
245check_heap prev $prev_heap
This page took 1.222286 seconds and 4 git commands to generate.