major refactor (only briefly tested)
This commit is contained in:
parent
cb8261fd31
commit
8be76e1eb2
|
@ -2,42 +2,45 @@
|
|||
|
||||
var Packer = require('proxy-packer');
|
||||
|
||||
module.exports = function pipeWs(servername, service, conn, remote, serviceport) {
|
||||
module.exports = function pipeWs(servername, service, srv, conn, serviceport) {
|
||||
var browserAddr = Packer.socketToAddr(conn);
|
||||
var cid = Packer.addrToId(browserAddr);
|
||||
browserAddr.service = service;
|
||||
browserAddr.serviceport = serviceport;
|
||||
browserAddr.name = servername;
|
||||
conn.tunnelCid = cid;
|
||||
var rid = Packer.socketToId(remote.upgradeReq.socket);
|
||||
var rid = Packer.socketToId(srv.upgradeReq.socket);
|
||||
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, '=> remote', rid, 'for', servername, 'via', service); }
|
||||
|
||||
function sendWs(data, serviceOverride) {
|
||||
if (remote.ws && (!conn.tunnelClosing || serviceOverride)) {
|
||||
if (srv.ws && (!conn.tunnelClosing || serviceOverride)) {
|
||||
try {
|
||||
remote.ws.send(Packer.pack(browserAddr, data, serviceOverride), { binary: true });
|
||||
srv.ws.send(Packer.pack(browserAddr, data, serviceOverride), { binary: true });
|
||||
// If we can't send data over the websocket as fast as this connection can send it to us
|
||||
// (or there are a lot of connections trying to send over the same websocket) then we
|
||||
// need to pause the connection for a little. We pause all connections if any are paused
|
||||
// to make things more fair so a connection doesn't get stuck waiting for everyone else
|
||||
// to finish because it got caught on the boundary. Also if serviceOverride is set it
|
||||
// means the connection is over, so no need to pause it.
|
||||
if (!serviceOverride && (remote.pausedConns.length || remote.ws.bufferedAmount > 1024*1024)) {
|
||||
if (!serviceOverride && (srv.pausedConns.length || srv.ws.bufferedAmount > 1024*1024)) {
|
||||
// console.log('pausing', cid, 'to allow web socket to catch up');
|
||||
conn.pause();
|
||||
remote.pausedConns.push(conn);
|
||||
srv.pausedConns.push(conn);
|
||||
}
|
||||
} catch (err) {
|
||||
console.warn('[pipeWs] remote', rid, ' => client', cid, 'error sending websocket message', err);
|
||||
console.warn('[pipeWs] srv', rid, ' => client', cid, 'error sending websocket message', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
remote.clients[cid] = conn;
|
||||
srv.clients[cid] = conn;
|
||||
conn.servername = servername;
|
||||
conn.serviceport = serviceport;
|
||||
conn.service = service;
|
||||
|
||||
conn.on('data', function (chunk) {
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, ' => remote', rid, chunk.byteLength, 'bytes'); }
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, ' => srv', rid, chunk.byteLength, 'bytes'); }
|
||||
sendWs(chunk);
|
||||
});
|
||||
|
||||
|
@ -48,7 +51,7 @@ module.exports = function pipeWs(servername, service, conn, remote, serviceport)
|
|||
conn.on('close', function (hadErr) {
|
||||
//if (state.debug) { console.log('[pipeWs] client', cid, 'closing'); }
|
||||
sendWs(null, hadErr ? 'error': 'end');
|
||||
delete remote.clients[cid];
|
||||
delete srv.clients[cid];
|
||||
});
|
||||
|
||||
};
|
||||
|
|
1012
lib/relay.js
1012
lib/relay.js
File diff suppressed because it is too large
Load Diff
|
@ -19,6 +19,11 @@ module.exports.createTcpConnectionHandler = function (state) {
|
|||
|
||||
//return;
|
||||
conn.once('data', function (firstChunk) {
|
||||
var service = 'tcp';
|
||||
var servername;
|
||||
var str;
|
||||
var m;
|
||||
|
||||
conn.pause();
|
||||
conn.unshift(firstChunk);
|
||||
|
||||
|
@ -31,18 +36,13 @@ module.exports.createTcpConnectionHandler = function (state) {
|
|||
// defer after return (instead of being in many places)
|
||||
function deferData(fn) {
|
||||
if (fn) {
|
||||
state[fn](servername, conn)
|
||||
state[fn](servername, conn);
|
||||
}
|
||||
process.nextTick(function () {
|
||||
conn.resume();
|
||||
});
|
||||
}
|
||||
|
||||
var service = 'tcp';
|
||||
var servername;
|
||||
var str;
|
||||
var m;
|
||||
|
||||
function tryTls() {
|
||||
var vhost;
|
||||
|
||||
|
@ -76,9 +76,9 @@ module.exports.createTcpConnectionHandler = function (state) {
|
|||
return;
|
||||
}
|
||||
|
||||
if (state.debug) { console.log("pipeWs(servername, service, socket, deviceLists['" + servername + "'])"); }
|
||||
if (state.debug) { console.log("pipeWs(servername, service, deviceLists['" + servername + "'], socket)"); }
|
||||
deferData();
|
||||
pipeWs(servername, service, conn, nextDevice, serviceport);
|
||||
pipeWs(servername, service, nextDevice, conn, serviceport);
|
||||
}
|
||||
|
||||
// TODO don't run an fs check if we already know this is working elsewhere
|
||||
|
@ -90,7 +90,7 @@ module.exports.createTcpConnectionHandler = function (state) {
|
|||
//return;
|
||||
require('fs').readdir(vhost, function (err, nodes) {
|
||||
if (state.debug && err) { console.log("VHOST error", err); }
|
||||
if (err) { run(); return; }
|
||||
if (err || !nodes) { run(); return; }
|
||||
//if (nodes) { deferData('httpsVhost'); return; }
|
||||
deferData('httpsVhost');
|
||||
});
|
||||
|
@ -131,7 +131,7 @@ module.exports.createTcpConnectionHandler = function (state) {
|
|||
// HTTP
|
||||
if (Devices.exist(state.deviceLists, servername)) {
|
||||
deferData();
|
||||
pipeWs(servername, service, conn, Devices.next(state.deviceLists, servername), serviceport);
|
||||
pipeWs(servername, service, Devices.next(state.deviceLists, servername), conn, serviceport);
|
||||
return;
|
||||
}
|
||||
deferData('handleHttp');
|
||||
|
|
Loading…
Reference in New Issue