Fix a subtle (?) bug in sched_do_syscall to with read/write calls for

which the client has already got the fd in nonblocking mode.  In such
cases, do not wait for an IO completion -- since the client presumably
handles that somehow.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@164
This commit is contained in:
Julian Seward 2002-04-29 01:58:08 +00:00
parent 26207e6d5a
commit bf379fa8d7
2 changed files with 26 additions and 56 deletions

View File

@ -779,7 +779,6 @@ void sched_do_syscall ( ThreadId tid )
UInt saved_eax;
UInt res, syscall_no;
UInt fd;
Bool might_block, assumed_nonblocking;
Bool orig_fd_blockness;
Char msg_buf[100];
@ -808,39 +807,18 @@ void sched_do_syscall ( ThreadId tid )
return;
}
switch (syscall_no) {
case __NR_read:
case __NR_write:
assumed_nonblocking
= False;
might_block
= fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
break;
default:
might_block = False;
assumed_nonblocking = True;
}
if (assumed_nonblocking) {
if (syscall_no != __NR_read && syscall_no != __NR_write) {
/* We think it's non-blocking. Just do it in the normal way. */
VG_(perform_assumed_nonblocking_syscall)(tid);
/* The thread is still runnable. */
return;
}
/* It might block. Take evasive action. */
switch (syscall_no) {
case __NR_read:
case __NR_write:
fd = vg_threads[tid].m_ebx; break;
default:
vg_assert(3+3 == 7);
}
/* Set the fd to nonblocking, and do the syscall, which will return
immediately, in order to lodge a request with the Linux kernel.
We later poll for I/O completion using select(). */
fd = vg_threads[tid].m_ebx /* arg1 */;
orig_fd_blockness = fd_is_blockful(fd);
set_fd_nonblocking(fd);
vg_assert(!fd_is_blockful(fd));
@ -856,16 +834,23 @@ void sched_do_syscall ( ThreadId tid )
else
set_fd_nonblocking(fd);
if (res != -VKI_EWOULDBLOCK) {
/* It didn't block; it went through immediately. So finish off
in the normal way. Don't restore %EAX, since that now
(correctly) holds the result of the call. */
if (res != -VKI_EWOULDBLOCK || !orig_fd_blockness) {
/* Finish off in the normal way. Don't restore %EAX, since that
now (correctly) holds the result of the call. We get here if either:
1. The call didn't block, or
2. The fd was already in nonblocking mode before we started to
mess with it. In this case, we're not expecting to handle
the I/O completion -- the client is. So don't file a
completion-wait entry.
*/
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* We're still runnable. */
vg_assert(vg_threads[tid].status == VgTs_Runnable);
} else {
vg_assert(res == -VKI_EWOULDBLOCK && orig_fd_blockness);
/* It would have blocked. First, restore %EAX to what it was
before our speculative call. */
vg_threads[tid].m_eax = saved_eax;

View File

@ -779,7 +779,6 @@ void sched_do_syscall ( ThreadId tid )
UInt saved_eax;
UInt res, syscall_no;
UInt fd;
Bool might_block, assumed_nonblocking;
Bool orig_fd_blockness;
Char msg_buf[100];
@ -808,39 +807,18 @@ void sched_do_syscall ( ThreadId tid )
return;
}
switch (syscall_no) {
case __NR_read:
case __NR_write:
assumed_nonblocking
= False;
might_block
= fd_is_blockful(vg_threads[tid].m_ebx /* arg1 */);
break;
default:
might_block = False;
assumed_nonblocking = True;
}
if (assumed_nonblocking) {
if (syscall_no != __NR_read && syscall_no != __NR_write) {
/* We think it's non-blocking. Just do it in the normal way. */
VG_(perform_assumed_nonblocking_syscall)(tid);
/* The thread is still runnable. */
return;
}
/* It might block. Take evasive action. */
switch (syscall_no) {
case __NR_read:
case __NR_write:
fd = vg_threads[tid].m_ebx; break;
default:
vg_assert(3+3 == 7);
}
/* Set the fd to nonblocking, and do the syscall, which will return
immediately, in order to lodge a request with the Linux kernel.
We later poll for I/O completion using select(). */
fd = vg_threads[tid].m_ebx /* arg1 */;
orig_fd_blockness = fd_is_blockful(fd);
set_fd_nonblocking(fd);
vg_assert(!fd_is_blockful(fd));
@ -856,16 +834,23 @@ void sched_do_syscall ( ThreadId tid )
else
set_fd_nonblocking(fd);
if (res != -VKI_EWOULDBLOCK) {
/* It didn't block; it went through immediately. So finish off
in the normal way. Don't restore %EAX, since that now
(correctly) holds the result of the call. */
if (res != -VKI_EWOULDBLOCK || !orig_fd_blockness) {
/* Finish off in the normal way. Don't restore %EAX, since that
now (correctly) holds the result of the call. We get here if either:
1. The call didn't block, or
2. The fd was already in nonblocking mode before we started to
mess with it. In this case, we're not expecting to handle
the I/O completion -- the client is. So don't file a
completion-wait entry.
*/
VG_(check_known_blocking_syscall)(tid, syscall_no, &res /* POST */);
/* We're still runnable. */
vg_assert(vg_threads[tid].status == VgTs_Runnable);
} else {
vg_assert(res == -VKI_EWOULDBLOCK && orig_fd_blockness);
/* It would have blocked. First, restore %EAX to what it was
before our speculative call. */
vg_threads[tid].m_eax = saved_eax;