Compare commits

...

3 Commits

Author SHA1 Message Date
Rafa de la Torre
fd3cc95573 Remove unused var buffer_sent 2018-06-11 12:17:39 +02:00
Rafa de la Torre
922627daaf Small refactor 2018-06-11 12:14:28 +02:00
Rafa de la Torre
61bc713e0c Improve performance of COPY TO #56
Under some circumstances, the COPY TO streamming can be CPU-bound,
particularly when PG holds the resultset in memory buffers and the size
of the rows << chunk (64 KB in my linux box).

This commits improves the situation by creating a buffer of `chunk`
size and fitting in as many rows as it can before pushing them. This
results in more balanced read and writes (in terms of size and in bigger
chunks) as well as more frequent calls to the callback, thus freeing the
main loop for other events to be processed, and therefore avoiding
starvation.
2018-06-08 15:04:42 +02:00

View File

@ -42,6 +42,16 @@ CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
var messageCode;
var needPush = false;
var buffer = Buffer.alloc(chunk.length);
var buffer_offset = 0;
this.pushBufferIfneeded = function() {
if (needPush && buffer_offset > 0) {
this.push(buffer.slice(0, buffer_offset))
buffer_offset = 0;
}
}
while((chunk.length - offset) >= (Byte1Len + Int32Len)) {
var messageCode = chunk[offset]
@ -70,6 +80,7 @@ CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
case code.ErrorResponse:
case code.CopyDone:
this.pushBufferIfneeded();
this._detach()
this.push(null)
return cb();
@ -84,7 +95,8 @@ CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
if (needPush) {
var row = chunk.slice(offset, offset + length - Int32Len)
this.rowCount++
this.push(row)
row.copy(buffer, buffer_offset);
buffer_offset += row.length;
}
offset += (length - Int32Len)
} else {
@ -93,6 +105,8 @@ CopyStreamQuery.prototype._transform = function(chunk, enc, cb) {
}
}
this.pushBufferIfneeded();
if(chunk.length - offset) {
var slice = chunk.slice(offset)
this._remainder = slice