isync

mailbox synchronization program
git clone https://git.code.sf.net/p/isync/isync
Log | Files | Refs | README | LICENSE

commit 859b7dd7f2dc5d8396488700e0ab35d207188ed9
parent ac3b5186b0046a18db21f54b00c974c4bd73d731
Author: Oswald Buddenhagen <ossi@users.sf.net>
Date:   Thu,  9 Jun 2022 13:32:16 +0200

try to avoid extra syscalls when reading sockets

so far we shifted down the buffered data only when we ran out of space.
however, that may cause chopping up the actual socket read, which is a
bad trade-off with avoiding a moderate-sized memmove. so try to keep
enough space for the anticipated read size.

note that this ignores the zlib path, as that always does full-size
socket reads into z_buf, and saving inflate() calls seems pointless.

Diffstat:
Msrc/socket.c | 16++++++++++++----
Msrc/socket.h | 5+++++
2 files changed, 17 insertions(+), 4 deletions(-)

diff --git a/src/socket.c b/src/socket.c @@ -407,6 +407,7 @@ socket_start_deflate( conn_t *conn ) } init_wakeup( &conn->z_fake, z_fake_cb, conn ); + conn->readsz = 0; // This optimization makes no sense past this point } #endif /* HAVE_LIBZ */ @@ -726,8 +727,9 @@ socket_filled( conn_t *conn, uint len ) uint cnt = conn->bytes + len; conn->bytes = cnt; if (conn->wanted) { - // Fulfill as much of the request as still fits into the buffer - if (cnt < conn->wanted && off + cnt < sizeof(conn->buf)) + // Fulfill as much of the request as still fits into the buffer, + // but avoid chopping up the actual socket reads + if (cnt < conn->wanted && off + cnt < sizeof(conn->buf) - conn->readsz) return; } else { // Need a full line @@ -735,7 +737,7 @@ socket_filled( conn_t *conn, uint len ) char *p = memchr( s + conn->scanoff, '\n', cnt - conn->scanoff ); if (!p) { conn->scanoff = cnt; - if (off + cnt == sizeof(conn->buf)) { + if (off && off + cnt >= sizeof(conn->buf) - conn->readsz) { memmove( conn->buf, conn->buf + off, cnt ); conn->offset = 0; } @@ -803,6 +805,12 @@ socket_fill( conn_t *sock ) if ((n = do_read( sock, buf, len )) <= 0) return; + // IIR filter for tracking average size of bulk reads. + // We use this to optimize the free space at the end of the + // buffer, hence the factor of 1.5. + if (n >= MIN_BULK_READ) + sock->readsz = (sock->readsz * 3 + n * 3 / 2) / 4; + socket_filled( sock, (uint)n ); } } @@ -831,7 +839,7 @@ socket_expect_bytes( conn_t *conn, uint len ) if (off) { uint cnt = conn->bytes; if (off + len > sizeof(conn->buf) || - off + cnt == sizeof(conn->buf)) { + off + cnt >= sizeof(conn->buf) - conn->readsz) { memmove( conn->buf, conn->buf + off, cnt ); conn->offset = 0; } diff --git a/src/socket.h b/src/socket.h @@ -100,12 +100,16 @@ typedef struct { uint bytes; /* number of filled bytes in buffer */ uint scanoff; /* offset to continue scanning for newline at, relative to 'offset' */ uint wanted; // try to accumulate that many bytes before calling back; 0 => full line + uint readsz; // average size of bulk reads from the underlying socket, times 1.5 char buf[100000]; #ifdef HAVE_LIBZ char z_buf[100000]; #endif } conn_t; +// Shorter reads are assumed to be limited by round-trips. +#define MIN_BULK_READ 1000 + /* call this before doing anything with the socket */ static INLINE void socket_init( conn_t *conn, const server_conf_t *conf, @@ -123,6 +127,7 @@ static INLINE void socket_init( conn_t *conn, conn->name = NULL; conn->write_buf_append = &conn->write_buf; conn->wanted = 1; + conn->readsz = MIN_BULK_READ * 3 / 2; } void socket_connect( conn_t *conn, void (*cb)( int ok, void *aux ) ); void socket_start_tls(conn_t *conn, void (*cb)( int ok, void *aux ) );