diff --git a/io/io_canread.c b/io/io_canread.c index 0e23f51..278a3a4 100644 --- a/io/io_canread.c +++ b/io/io_canread.c @@ -33,6 +33,15 @@ int64 io_canread() { e->next_read=-1; debug_printf(("io_canread: dequeue %lld from normal read queue (next is %ld)\n",r,first_readable)); + if (e->closed) { + /* The fd was previously closed, but there were still open events on it. + * To prevent race conditions, we did not actually close the fd + * but only marked it as closed, so we can skip this event here + * and really closed it now. */ + io_close(r); + continue; + } + #ifdef __MINGW32__ // printf("event on %d: wr %d rq %d aq %d\n",(int)r,e->wantread,e->readqueued,e->acceptqueued); #endif diff --git a/io/io_canwrite.c b/io/io_canwrite.c index 21e28ba..2950ad9 100644 --- a/io/io_canwrite.c +++ b/io/io_canwrite.c @@ -27,6 +27,16 @@ int64 io_canwrite() { r=first_writeable; first_writeable=e->next_write; e->next_write=-1; + + if (e->closed) { + /* The fd was previously closed, but there were still open events on it. + * To prevent race conditions, we did not actually close the fd + * but only marked it as closed, so we can skip this event here + * and really closed it now. */ + io_close(r); + continue; + } + debug_printf(("io_canwrite: dequeue %lld from normal write queue (next is %ld)\n",r,first_writeable)); if (e->wantwrite && #ifdef __MINGW32__ diff --git a/io/io_close.c b/io/io_close.c index 50fa62d..ea8bc6f 100644 --- a/io/io_close.c +++ b/io/io_close.c @@ -15,8 +15,8 @@ void io_close(int64 d) { if ((e=iarray_get(&io_fds,d))) { e->inuse=0; e->cookie=0; - if (e->kernelwantread) io_dontwantread_really(d,e); - if (e->kernelwantwrite) io_dontwantwrite_really(d,e); + if (e->kernelwantread) { io_dontwantread_really(d,e); e->kernelwantread=0; } + if (e->kernelwantwrite) { io_dontwantwrite_really(d,e); e->kernelwantwrite=0; } if (e->mmapped) { #ifdef __MINGW32__ UnmapViewOfFile(e->mmapped); @@ -26,6 +26,17 @@ void io_close(int64 d) { #endif e->mmapped=0; } + if (e->next_read!=-1 || e->next_write!=-1) { + /* There are still outstanding events. If we close the fd, between + * now and when those events are handled, another accept() or + * open() could return this descriptor, and cause bad things to + * happen for everybody. + * So we don't actually close the fd now, but we will mark it as + * closed. */ + e->closed=1; + return; + } else + e->closed=0; } close(d); } diff --git a/io_internal.h b/io_internal.h index 254e21f..09a497d 100644 --- a/io_internal.h +++ b/io_internal.h @@ -46,6 +46,7 @@ typedef struct { unsigned int kernelwantread:1; /* did we tell the kernel we want to read/write? */ unsigned int kernelwantwrite:1; unsigned int epolladded:1; + unsigned int closed:1; /* io_close called, but close deferred because of outstanding events */ #ifdef __MINGW32__ unsigned int readqueued:2; unsigned int writequeued:2; @@ -80,6 +81,8 @@ my_extern array io_pollfds; my_extern long first_readable; my_extern long first_writeable; +my_extern long first_deferred; + my_extern enum __io_waitmode { UNDECIDED, POLL