digd.js/lib/store/store.json.js

450 lines
15 KiB
JavaScript

'use strict';
var crypto = require('crypto');
function jsonDeepClone(target) {
return JSON.parse(
JSON.stringify(target)
);
}
function mergeObjects() {
// arguments should be an array of objects. We
// reverse it because the last argument to set
// a value wins.
var args = [].slice.call(arguments).reverse();
var len = args.length;
if (len === 1) {
return args[0];
}
// gather the set of keys from all arguments
var keyLists = args.map(function (arg) {
return Object.keys(arg);
});
var keys = Object.keys(keyLists.reduce(function (all, list) {
list.forEach(function (k) {
all[k] = true;
});
return all;
}, {}));
// for each key
return keys.reduce(function (target, k) {
// find the first argument (because of the reverse() above) with the key set
var values = [];
var isObject = false;
for (var i = 0; i < len; i++) {
var v = args[i];
var vType = typeof v;
if (vType === 'object') {
if (!v) {
// typeof null is object. null is the only falsey object. null represents
// a delete or the end of our argument list;
break;
}
// we need to collect values until we get a non-object, so we can merge them
values.push(v);
isObject = true;
} else if (!isObject) {
if (vType === 'undefined') {
// if the arg actually has the key set this is effectively a "delete"
if (keyList[i].indexOf(k) != -1) {
break;
}
// otherwise we need to check the next argument's value, so we don't break the loop
} else {
values.push(v);
break;
}
} else {
// a previous value was an object, this one isn't
// That means we are done collecting values.
break;
}
}
if (values.length > 0) {
target[k] = mergeObjects.apply(null, values);
}
return target;
}, {});
}
function prepareZone(zone, options) {
var opts = options || {};
var timestamp = opts.timestamp || Date.now();
if (!zone.name) {
zone.name = zone.id;
zone.id = null;
}
if (!zone.id) {
zone.id = crypto.randomBytes(16).toString('hex');
}
if (!zone.createdAt) { zone.createdAt = timestamp; }
if (!zone.updatedAt || opts.isUpdate) { zone.updatedAt = timestamp; }
// create a names set for the zone, keyed by record name mapped to
// an object for the various records with that name, by type (A, MX, TXT, etc.)
zone.records = zone.records || {};
return zone;
}
/*
`init()` should return a `lock(forOps)` function, where `forOps` describes the portions
of the database that we need to obtain a lock for (so we can write to them). If `forOps`
is underfined, we only need to read the currently valid data.
`lock(forOps)` should return an object with: {
save: function -> undefined - changes to in memory representation should be persisted.
This could be considered the equivalent of committing a transaction to the database.
This will release any write lock obtained. `save()` will return an error if no write
lock was obtained OR writes are made to locations other than were locked.,
discard: function -> undefined - changes to in memory representation should be discarded.
This could be considered the equivalent of cancelling a transaction to the database.
This will release any write lock obtained.,
peers: {
list: function -> list FQDNs that we expec to be in sync with this server
},
zones: {
list: function -> list zones,
write:
delete:
},
records: {
list: function -> list records,
write:
delete:
}
}
All lists will be a deep copy of the data actually stored.
*/
module.exports = function init (opts) {
// opts = { filepath };
var fsDb = require(opts.filepath);
var mtime = require('fs').statSync(opts.filepath).mtime.valueOf();
//
// Migration from other formats
//
// Convert the primary nameservers from an array of strings to objects with names and IDs.
// also switch to the 'peers' name, since we are really interested in the other FQDNs that
// use the same data store and are kept in sync.
var peerList = (!fsDb.peers || Array.isArray(fsDb.peers))? fsDb.peers : Object.keys(fsDb.peers).map(function (p) {
return fsDb.peers[p];
});
fsDb.peers = [].concat(fsDb.primaryNameservers, peerList).filter(function (p) {
// filter out empty strings, undefined, etc.
return !!p;
}).map(function (ns) {
var peer = ('string' === typeof ns)? ns : { name: ns };
if (!peer.id) {
peer.id = crypto.randomBytes(16).toString('hex');
}
return peer;
}).reduce(function (peers, p) {
peers[p.name] = p;
return peers;
}, {});
delete fsDb.primaryNameservers;
// Convert domains to zones and ensure that they have proper IDs and timestamps
// Organize zones as a set of zone names
var zoneList = (!fsDb.zones || Array.isArray(fsDb.zones))? fsDb.zones : Object.keys(fsDb.zones).map(function (z) {
return fsDb.zones[z];
});
fsDb.zones = [].concat(fsDb.domains, zoneList).filter(function (z) {
// filter out empty strings, undefined, etc.
return !!z;
}).map(function (zone) {
return prepareZone(zone, { timestamp: mtime });
}).reduce(function (zones, z) {
zones[z.name] = z;
return zones;
}, {});
delete fsDb.domains;
// NOTE: Records belong to zones, but they previously referred to them only by a
// zone property. This may pose problems where the whole list of records is not easily
// filtered / kept in memory / indexed and/or retrieved by zone. Traditionally,
// records are stored "within a zone" in a zone file. We want to have the store API
// behave more traditionally, even though some stores (like a SQL database
// table) might actually store the zone as a property of a record as we currently do.
// (This fits with the somewhat unexpected and confusing logic of wildcard records.)
(fsDb.records || []).forEach(function (record) {
// make sure the record has an ID
if (!record.id) {
record.id = crypto.randomBytes(16).toString('hex');
}
// Put it in it's zone - synthesize one if needed
fsDb.zones[record.zone] = fsDb.zones[record.zone] || prepareZone({ name: record.zone });
var zone = fsDb.zones[record.zone];
// Keep in mind that each name may have multiple records (whether or not they are
// of different types, classes, etc.), but each record must have a unique ID.
zone.records[record.name] = zone.records[record.name] || {};
var recordsForName = zone.records[record.name];
recordsForName[record.id] = record;
});
delete fsDb.records;
// Write the migrated data
require('fs').writeFileSync(opts.filepath, JSON.stringify(fsDb, null, 2));
//
// End Migration
//
return function lock(forOps) {
/*
forOps : {
write: {
zone: string - required - a zone name,
names: [string] - optional - a list of record names that may be modified. May be 0 length,
records: [string] - optional - a list of record IDs that may be modified. May be 0 length (default)
}
}
1. You can't get a lock for a whole zone without first releasing any locks for names and records
within the zone. A whole zone lock will block
2. You can't get a lock for a name within a zone without first releasing any locks for records
within that name and zone.
3. Locks for a specific record do not block new locks with the same zone, name, but a different
record ID.
4. Creating a new zone, name, or record requires obtaining a lock for it's key (name or ID), even
though it does not exist yet. This prevents race conditions where 2 requests (or processes) attempt
to create the same resource at the same time.
Note: The UI probably needs to know if it is trying to write based on an outdated copy of data. Such
writes should be detected and fail loudly.
locks probably involve lockfiles on the filesystem (with watches) so that writes and locks can be
communicated easily across processes.
*/
var db = mergeObjects(fsDb);
var save = function save (cb) {
if (save._saving) {
console.log('make pending');
save._pending.push(cb);
return;
}
save._saving = true;
require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) {
console.log('done writing');
var pending = save._pending.splice(0);
save._saving = false;
cb(err);
if (!pending.length) {
return;
}
save(function (err) {
console.log('double save');
pending.forEach(function (cb) { cb(err); });
});
});
};
save._pending = [];
function matchPredicate(predicate) {
return function (toCheck) {
// which items match the predicate?
if (!toCheck) {
return false;
}
// check all the keys in the predicate - only supporting exact match
// of at least one listed option for all keys right now
if (Object.keys(predicate || {}).some(function (k) {
return [].concat(predicate[k]).indexOf(toCheck[k]) === -1;
})) {
return false;
}
// we have a match
return true;
};
}
function matchZone(predicate) {
var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones);
var check = matchPredicate(predicate);
// TODO: swap the filter() for a functional style "loop" recursive function
// that lets us return early if we have a limit, etc.
var found = zonenames.filter(function (zonename) {
/*
if (predicate.id && predicate.id !== z.id) { return false; }
if (predicate.name && predicate.name !== z.name) { return false; }
*/
return check(db.zones[zonename]);
}).map(function (zonename) {
return db.zones[zonename];
});
return found;
}
// NOTE: `opts` exists so we can add options - like properties to read - easily in the future
// without modifying the function signature
function listZones(predicate, opts, cb) {
var found = jsonDeepClone(matchZone(predicate))
return setImmediate(cb, null, found);
}
function writeZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
var isUpdate = !!found;
var combined = mergeObjects((found || {}), zone);
db.zones[zone.name] = prepareZone(combined, { isUpdate: isUpdate });
return setImmediate(function () {
cb(null, jsonDeepClone(db.zones[zone.name]));
});
});
}
function deleteZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (!found) {
return setImmediate(cb, new Error('Zone not found'));
}
delete db.zones[zone.name];
return setImmediate(function () {
cb();
});
});
}
function listRecords(rPredicate, cb) {
var recordNames = [].concat(rPredicate.name);
var check = matchPredicate(rPredicate);
var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) {
// get the records from the zone that match the record predicate
var zFound = recordNames.filter(function (name) {
return !!zone.records[name];
}).map(function (name) {
return Object.keys(zone.records[name]).map(function (id) {
return zone.records[name][id];
}).filter(check);
});
return records.concat(zFound);
}, []);
return setImmediate(cb, null, jsonDeepClone(found));
}
function modifyRecords (record, options, cb) {
var opts = options || {};
var isDelete = !!opts.isDelete;
if (!record.zone) {
return setImmediate(cb, new Error('No zone specified for record'));
}
if (!record.name) {
return setImmediate(cb, new Error('No name specified for record'));
}
if (isDelete && !record.id) {
return setImmediate(cb, new Error('No id specified to delete record'));
}
var zone = matchZone({ name: record.zone })[0];
if (!zone) {
return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' for record'));
}
var isUpdate = (record.id && !isDelete);
if (!isUpdate) {
record.id = crypto.randomBytes(16).toString('hex');
}
var recordsForName = zone.records[record.name] = zone.records[record.name] || {};
var found = recordsForName[record.id];
if ((isUpdate || isDelete) && !found) {
return setImmediate(cb, new Error('Unable to find record with ID: ' + record.id));
}
if (!isDelete) {
recordsForName[record.id] = (mergeObjects((found || {}), record));
}
var zoneUpdate = {
name: record.name,
records: {}
};
zoneUpdate.records[record.name] = keep;
return writeZone(zoneUpdate, function (err) {
if (err) {
return cb(err);
}
return cb(
null,
isDelete ? null : jsonDeepClone(recordsForName[record.id])
);
});
}
function writeRecord(record, cb) {
modifyRecords(record, null, cb);
}
function deleteRecord(record, cb) {
modifyRecords(record, { isDelete: true }, cb);
}
var dbApi = {
save: function () {
// hide _pending and _saving from callers
var args = [].slice.call(arguments);
return save.apply(null, args);
},
// peers really isn't editable - it's literally the list of FQDN's
// that this database is replicated to in a multi-master fashion.
//
// However, lib/store/index.js does plenty to update these records in support
// of the SOA records that are built from them (as does this file in the "migration"
// section). I'm toying with the idea of not storing them seperately or creating the
// SOA records somewhat immediately.
peers: function listPeers(cb) {
// Most data stores are going to have an asynchronous storage API. If we need
// synchronous access to the data it is going to have to be cached. If it is
// cached, there is still the issue the cache getting out of sync (a legitimate
// issue anyway). If we explicitly make all of these operations async then we
// have greater flexibility for store implmentations to address these issues.
return setImmediate(cb, null, jsonDeepClone(db.peers));
},
zones: {
list: listZones,
write: writeZone,
delete: deleteZone
},
records: {
list: listRecords,
write: writeRecord,
delete: deleteRecord
}
};
return dbApi;
};
};