In progress: Create store API to enable non-json based stores #9

Open
Ghost wants to merge 3 commits from (deleted):js-stores into httpd
6 changed files with 592 additions and 141 deletions

1
.gitignore vendored
View File

@ -1,2 +1,3 @@
node_modules
.*.sw*
local-db.js

View File

@ -387,7 +387,7 @@ cli.main(function (args, cli) {
}
try {
engine = engine || require('../lib/store.json.js').create(engineOpts);
engine = engine || require('../lib/store').create(engineOpts);
} catch(e) {
respondWithResults(e);
return;
@ -413,7 +413,7 @@ cli.main(function (args, cli) {
}
if (cli.http) {
try {
engine = engine || require('../lib/store.json.js').create(engineOpts);
engine = engine || require('../lib/store').create(engineOpts);
} catch(e) {
console.error(e);
return;

View File

@ -184,7 +184,7 @@ function getNs(engine, zs, results, cb) {
// d.vanityNs should only be vanity nameservers (pointing to this same server)
if (z.vanityNs || results.authority.some(function (ns) {
console.log('[debug] ns', ns);
return -1 !== engine.primaryNameservers.indexOf(ns.data.toLowerCase());
return -1 !== engine.primaryNameservers().indexOf(ns.data.toLowerCase());
})) {
results.authority.length = 0;
results.authority.push(engine.zones._toSoa(z));
@ -359,7 +359,7 @@ module.exports.query = function (engine, query, cb) {
// NOTE: I think that the issue here is EXTERNAL vs INTERNAL vanity NS
// We _should_ reply for EXTERNAL vanity NS... but not when it's listed on the SOA internally?
// It's surrounding the problem of what if I do sub domain delegation to the same server.
if (-1 === engine.primaryNameservers.indexOf(r.data.toLowerCase())) {
if (-1 === engine.primaryNameservers().indexOf(r.data.toLowerCase())) {
console.log("It's a vanity NS");
return false;
}

View File

@ -230,6 +230,11 @@ module.exports.create = function (cli, engine/*, dnsd*/) {
zone.class = zone.className;
zone.type = zone.typeName;
zone.soa = true;
// TODO: consider sending a predicate object through the engine
// to the actual store in case it is highly inefficient to transfer
// a large number of records from the store that will just be
// thrown away.
engine.records.all(function (err, records) {
records = records.filter(function (r) {
return r.zone === zonename;
@ -239,6 +244,8 @@ module.exports.create = function (cli, engine/*, dnsd*/) {
});
});
});
// I wonder what an API that gets ALL records from all zones is for
app.get('/api/records', function (req, res) {
engine.records.all(function (err, records) {
res.send({ records: records.map(mapRecord) });

View File

@ -1,101 +1,80 @@
'use strict';
var crypto = require('crypto');
var dns = require('dns');
var os = require('os');
var path = require('path');
var pathResolvers = {
'.': function fromCwd(relPath) {
return path.join(process.cwd(), relPath);
},
'~': function fromHomedir(relPath) {
if (!os.homedir) {
throw new Error(
'Resolving home directory relative paths is not supported in this version of node.'
);
}
return path.join(os.homedir(), relPath);
},
noop: function (p) { return p; }
}
module.exports.create = function (opts) {
// opts = { filepath };
// `opts.filepath` is a module id or path to a module that contains a store plugin or file
var pathFn = pathResolvers[opts.filepath[0]] || pathResolvers.noop;
var storeId = pathFn(opts.filepath);
var pathToStore = require.resolve(storeId);
var engine = { db: null };
function notDeleted(r) {
return !r.revokedAt && !r.deletedAt;
}
var db = require(opts.filepath);
var stat = require('fs').statSync(opts.filepath);
var crypto = require('crypto');
//
// Manual Migration
//
db.primaryNameservers.forEach(function (ns, i, arr) {
if ('string' === typeof ns) {
ns = { name: ns };
arr[i] = ns;
}
if (!ns.id) {
ns.id = crypto.randomBytes(16).toString('hex');
}
});
db.zones = db.zones || [];
if (db.domains) {
db.zones = db.zones.concat(db.domains);
}
db.zones.forEach(function (zone) {
if (!zone.name) {
zone.name = zone.id;
zone.id = null;
}
if (!zone.id) {
zone.id = crypto.randomBytes(16).toString('hex');
}
if (!zone.createdAt) { zone.createdAt = stat.mtime.valueOf(); }
if (!zone.updatedAt) { zone.updatedAt = stat.mtime.valueOf(); }
});
db.records.forEach(function (record) {
if (!record.id) {
record.id = crypto.randomBytes(16).toString('hex');
}
});
require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2));
//
// End Migration
//
// instantiate the DB module
var db = (pathToStore.slice(-5) === '.json') ?
// JSON files should be loaded using our built in store.json.js
require('./store.json.js')(pathToStore) :
// everything else should be loaded as a module and passed our opts object
require(storeId)(opts);
db.save = function (cb) {
if (db.save._saving) {
console.log('make pending');
db.save._pending.push(cb);
return;
}
// TODO: examine usage of engine.primaryNameservers to see if we are supporting it right
engine.primaryNameservers = db.primaryNameservers.list;
db.save._saving = true;
require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) {
console.log('done writing');
var pending = db.save._pending.splice(0);
db.save._saving = false;
cb(err);
if (!pending.length) {
return;
}
db.save(function (err) {
console.log('double save');
pending.forEach(function (cb) { cb(err); });
});
});
};
db.save._pending = [];
engine.primaryNameservers = db.primaryNameservers;
engine.peers = {
all: function (cb) {
var dns = require('dns');
var count = db.primaryNameservers.length;
function gotRecord() {
count -= 1;
if (!count) {
cb(null, db.primaryNameservers);
}
}
function getRecord(ns) {
var pNS = db.primaryNameservers.list();
function getRecord(ns, done) {
dns.resolve4(ns.name, function (err, addresses) {
console.log('ns addresses:');
console.log(addresses);
if (err) { console.error(err); gotRecord(); return; }
if (err) { console.error(err); done(); return; }
ns.type = 'A';
ns.address = addresses[0];
gotRecord();
done();
});
}
db.primaryNameservers.forEach(getRecord);
// resolve addreses for all of the primary nameservers in parallel
pNS.forEach(function (ns) {
var status = { pending: true };
function done() {
status.pending = false;
// TODO: determine if the locally stored records should get updated
var incomplete = tasks.filter(function (s) { return s.pending; });
if (incomplete.length < 1) {
cb(null, pNS);
}
}
getRecord(ns, done);
return status;
});
}
};
engine.zones = {
_immutableKeys: [ 'id', 'name', 'primary', 'serial', 'revokedAt', 'changedAt', 'insertedAt', 'updatedAt', 'deletedAt' ]
, _mutableKeys: [ 'admin', 'expiration', 'minimum', 'refresh', 'retry', 'ttl', 'vanity' ]
@ -105,8 +84,9 @@ module.exports.create = function (opts) {
// epoch in seconds will do
return parseInt(Math.round(date/1000).toString().slice(-10), 10);
}
// NOTE/TODO: despite the _, _toSoa is used outside this file (in lib/digd.js and lib/httpd.js)
, _toSoa: function (domain) {
var nameservers = domain.vanityNs || engine.primaryNameservers.map(function (n) { return n.name; });
var nameservers = domain.vanityNs || engine.primaryNameservers().map(function (n) { return n.name; });
var index = Math.floor(Math.random() * nameservers.length) % nameservers.length;
var nameserver = nameservers[index];
@ -122,6 +102,7 @@ module.exports.create = function (opts) {
, name_server: nameserver
// admin -- email address or domain for admin
// default is effectively admin@{domain name}
, admin: domain.admin || ('admin.' + domain.name)
, email_addr: domain.admin || ('admin.' + domain.name)
@ -148,7 +129,7 @@ module.exports.create = function (opts) {
}
, all: function (cb) {
process.nextTick(function () {
cb(null, db.zones.slice(0).filter(notDeleted));
cb(null, db.zones().filter(notDeleted));
});
}
, get: function (queries, cb) {
@ -157,7 +138,7 @@ module.exports.create = function (opts) {
return { name: n };
});
}
var myDomains = db.zones.filter(function (d) {
var myDomains = db.zones().filter(function (d) {
return queries.some(function (q) {
return (d.name.toLowerCase() === q.name) && notDeleted(d);
});
@ -167,19 +148,17 @@ module.exports.create = function (opts) {
});
}
, touch: function (zone, cb) {
var existing;
db.zones.some(function (z) {
if (z.id && zone.id === z.id) { existing = z; return true; }
if (z.name && zone.name === z.name) { existing = z; return true; }
});
if (!existing) {
cb(null, null);
db.zones.get(zone, function (err, existing) {
if (err || !existing) {
cb(err, null);
return;
}
existing.updatedAt = new Date().valueOf(); // toISOString();
console.log('touch saving...');
db.zone.update(existing, function (err) {
cb(err, !err && existing || null);
});
return;
}
existing.updatedAt = new Date().valueOf(); // toISOString();
console.log('touch saving...');
db.save(function (err) {
cb(err, !err && existing || null);
});
}
, save: function (zone, cb) {
@ -191,65 +170,69 @@ module.exports.create = function (opts) {
}
}
, update: function (zone, cb) {
var existing;
var dirty;
db.zones.get({ id: zone.id }, function (err, found) {
var dirty;
db.zones.some(function (z) {
if (z.id === zone.id) {
existing = z;
return true;
if (err) {
console.log('error finding zone');
cb(new Error("Error finding zone for '" + zone.id + "'"), null);
return;
}
});
if (!existing) {
console.log('no existing zone');
cb(new Error("zone for '" + zone.id + "' does not exist"), null);
return;
}
console.log('found existing zone');
console.log(existing);
console.log(zone);
Object.keys(zone).forEach(function (key) {
if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; }
if (existing[key] !== zone[key]) {
dirty = true;
console.log('existing key', key, existing[key], zone[key]);
existing[key] = zone[key];
if (!found) {
console.log('no existing zone');
cb(new Error("zone for '" + zone.id + "' does not exist"), null);
return;
}
});
zone.updatedAt = new Date().valueOf(); // toISOString(); // Math.round(Date.now() / 1000);
if (dirty) {
zone.changedAt = zone.updatedAt;
}
console.log('found existing zone');
console.log(found);
console.log(zone);
Object.keys(zone).forEach(function (key) {
if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; }
if (found[key] !== zone[key]) {
dirty = true;
console.log('existing key', key, found[key], zone[key]);
found[key] = zone[key];
}
});
console.log('saving...');
db.save(function (err) {
cb(err, !err && existing || null);
found.updatedAt = new Date().valueOf(); // toISOString(); // Math.round(Date.now() / 1000);
if (dirty) {
found.changedAt = found.updatedAt;
}
console.log('saving...');
db.zones.update(found, function (err) {
cb(err, !err && found || null);
});
});
}
, create: function (zone, cb) {
var newZone = { id: crypto.randomBytes(16).toString('hex') };
var existing;
var nss = [];
zone.name = (zone.name||'').toLowerCase();
db.zones.some(function (z) {
if (z.name === zone.name) {
existing = z;
return true;
}
});
if (existing) {
cb(new Error("tried to create new zone, but '" + existing.name + "' already exists"));
var zoneName = (zone.name||'').toLowerCase();
db.zones.get({ name: zoneName }, function (err, found) {
if (err) {
console.error(err);
cb(new Error("error attempting to create new zone '" + zoneName + "'"));
return;
}
newZone.name = zone.name;
if (found) {
cb(new Error("tried to create new zone, but '" + found.name + "' already exists"));
return;
}
var newZone = {
id: crypto.randomBytes(16).toString('hex'),
name: zoneName
};
var nss = [];
newZone.createdAt = Date.now();
newZone.updatedAt = newZone.createdAt;
/*
Set only the mutable keys in the new zone from the proposed zone object
*/
Object.keys(zone).forEach(function (key) {
//if (-1 !== engine.zones._immutableKeys.indexOf(key)) { return; }
if (-1 === engine.zones._mutableKeys.indexOf(key)) { return; }
@ -262,7 +245,12 @@ module.exports.create = function (opts) {
} else {
newZone.vanity = false;
}
db.primaryNameservers.forEach(function (ns, i) {
// TODO: distinguish between primary and secondary zones
// TODO: determine if we need to do anything special for delegation
// create records for the primary nameservers (or vanity name servers)
db.primaryNameservers.list().forEach(function (ns, i) {
var nsx = 'ns' + (i + 1);
var nsZone;
var ttl = 43200; // 12h // TODO pick a well-reasoned number
@ -302,7 +290,13 @@ module.exports.create = function (opts) {
});
});
db.zones.push(newZone);
db.zones.create(newZone, function (err) {
// WIP: going to need to figure out how to manage this as a transaction
// Significant benefit to having records owned by the zone is we won't have
// records for zones that don't otherwise exist - at least at the engine level.
// every line below this one is not yet modified...
});
nss.forEach(function (ns) {
db.records.push(ns);
});

449
lib/store/store.json.js Normal file
View File

@ -0,0 +1,449 @@
'use strict';
var crypto = require('crypto');
function jsonDeepClone(target) {
return JSON.parse(
JSON.stringify(target)
);
}
function mergeObjects() {
// arguments should be an array of objects. We
// reverse it because the last argument to set
// a value wins.
var args = [].slice.call(arguments).reverse();
var len = args.length;
if (len === 1) {
return args[0];
}
// gather the set of keys from all arguments
var keyLists = args.map(function (arg) {
return Object.keys(arg);
});
var keys = Object.keys(keyLists.reduce(function (all, list) {
list.forEach(function (k) {
all[k] = true;
});
return all;
}, {}));
// for each key
return keys.reduce(function (target, k) {
// find the first argument (because of the reverse() above) with the key set
var values = [];
var isObject = false;
for (var i = 0; i < len; i++) {
var v = args[i];
var vType = typeof v;
if (vType === 'object') {
if (!v) {
// typeof null is object. null is the only falsey object. null represents
// a delete or the end of our argument list;
break;
}
// we need to collect values until we get a non-object, so we can merge them
values.push(v);
isObject = true;
} else if (!isObject) {
if (vType === 'undefined') {
// if the arg actually has the key set this is effectively a "delete"
if (keyList[i].indexOf(k) != -1) {
break;
}
// otherwise we need to check the next argument's value, so we don't break the loop
} else {
values.push(v);
break;
}
} else {
// a previous value was an object, this one isn't
// That means we are done collecting values.
break;
}
}
if (values.length > 0) {
target[k] = mergeObjects.apply(null, values);
}
return target;
}, {});
}
function prepareZone(zone, options) {
var opts = options || {};
var timestamp = opts.timestamp || Date.now();
if (!zone.name) {
zone.name = zone.id;
zone.id = null;
}
if (!zone.id) {
zone.id = crypto.randomBytes(16).toString('hex');
}
if (!zone.createdAt) { zone.createdAt = timestamp; }
if (!zone.updatedAt || opts.isUpdate) { zone.updatedAt = timestamp; }
// create a names set for the zone, keyed by record name mapped to
// an object for the various records with that name, by type (A, MX, TXT, etc.)
zone.records = zone.records || {};
return zone;
}
/*
`init()` should return a `lock(forOps)` function, where `forOps` describes the portions
of the database that we need to obtain a lock for (so we can write to them). If `forOps`
is underfined, we only need to read the currently valid data.
`lock(forOps)` should return an object with: {
save: function -> undefined - changes to in memory representation should be persisted.
This could be considered the equivalent of committing a transaction to the database.
This will release any write lock obtained. `save()` will return an error if no write
lock was obtained OR writes are made to locations other than were locked.,
discard: function -> undefined - changes to in memory representation should be discarded.
This could be considered the equivalent of cancelling a transaction to the database.
This will release any write lock obtained.,
peers: {
list: function -> list FQDNs that we expec to be in sync with this server
},
zones: {
list: function -> list zones,
write:
delete:
},
records: {
list: function -> list records,
write:
delete:
}
}
All lists will be a deep copy of the data actually stored.
*/
module.exports = function init (opts) {
// opts = { filepath };
var fsDb = require(opts.filepath);
var mtime = require('fs').statSync(opts.filepath).mtime.valueOf();
//
// Migration from other formats
//
// Convert the primary nameservers from an array of strings to objects with names and IDs.
// also switch to the 'peers' name, since we are really interested in the other FQDNs that
// use the same data store and are kept in sync.
var peerList = (!fsDb.peers || Array.isArray(fsDb.peers))? fsDb.peers : Object.keys(fsDb.peers).map(function (p) {
return fsDb.peers[p];
});
fsDb.peers = [].concat(fsDb.primaryNameservers, peerList).filter(function (p) {
// filter out empty strings, undefined, etc.
return !!p;
}).map(function (ns) {
var peer = ('string' === typeof ns)? ns : { name: ns };
if (!peer.id) {
peer.id = crypto.randomBytes(16).toString('hex');
}
return peer;
}).reduce(function (peers, p) {
peers[p.name] = p;
return peers;
}, {});
delete fsDb.primaryNameservers;
// Convert domains to zones and ensure that they have proper IDs and timestamps
// Organize zones as a set of zone names
var zoneList = (!fsDb.zones || Array.isArray(fsDb.zones))? fsDb.zones : Object.keys(fsDb.zones).map(function (z) {
return fsDb.zones[z];
});
fsDb.zones = [].concat(fsDb.domains, zoneList).filter(function (z) {
// filter out empty strings, undefined, etc.
return !!z;
}).map(function (zone) {
return prepareZone(zone, { timestamp: mtime });
}).reduce(function (zones, z) {
zones[z.name] = z;
return zones;
}, {});
delete fsDb.domains;
// NOTE: Records belong to zones, but they previously referred to them only by a
// zone property. This may pose problems where the whole list of records is not easily
// filtered / kept in memory / indexed and/or retrieved by zone. Traditionally,
// records are stored "within a zone" in a zone file. We want to have the store API
// behave more traditionally, even though some stores (like a SQL database
// table) might actually store the zone as a property of a record as we currently do.
// (This fits with the somewhat unexpected and confusing logic of wildcard records.)
(fsDb.records || []).forEach(function (record) {
// make sure the record has an ID
if (!record.id) {
record.id = crypto.randomBytes(16).toString('hex');
}
// Put it in it's zone - synthesize one if needed
fsDb.zones[record.zone] = fsDb.zones[record.zone] || prepareZone({ name: record.zone });
var zone = fsDb.zones[record.zone];
// Keep in mind that each name may have multiple records (whether or not they are
// of different types, classes, etc.), but each record must have a unique ID.
zone.records[record.name] = zone.records[record.name] || {};
var recordsForName = zone.records[record.name];
recordsForName[record.id] = record;
});
delete fsDb.records;
// Write the migrated data
require('fs').writeFileSync(opts.filepath, JSON.stringify(fsDb, null, 2));
//
// End Migration
//
return function lock(forOps) {
/*
forOps : {
write: {
zone: string - required - a zone name,
names: [string] - optional - a list of record names that may be modified. May be 0 length,
records: [string] - optional - a list of record IDs that may be modified. May be 0 length (default)
}
}
1. You can't get a lock for a whole zone without first releasing any locks for names and records
within the zone. A whole zone lock will block
2. You can't get a lock for a name within a zone without first releasing any locks for records
within that name and zone.
3. Locks for a specific record do not block new locks with the same zone, name, but a different
record ID.
4. Creating a new zone, name, or record requires obtaining a lock for it's key (name or ID), even
though it does not exist yet. This prevents race conditions where 2 requests (or processes) attempt
to create the same resource at the same time.
Note: The UI probably needs to know if it is trying to write based on an outdated copy of data. Such
writes should be detected and fail loudly.
locks probably involve lockfiles on the filesystem (with watches) so that writes and locks can be
communicated easily across processes.
*/
var db = mergeObjects(fsDb);
var save = function save (cb) {
if (save._saving) {
console.log('make pending');
save._pending.push(cb);
return;
}
save._saving = true;
require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) {
console.log('done writing');
var pending = save._pending.splice(0);
save._saving = false;
cb(err);
if (!pending.length) {
return;
}
save(function (err) {
console.log('double save');
pending.forEach(function (cb) { cb(err); });
});
});
};
save._pending = [];
function matchPredicate(predicate) {
return function (toCheck) {
// which items match the predicate?
if (!toCheck) {
return false;
}
// check all the keys in the predicate - only supporting exact match
// of at least one listed option for all keys right now
if (Object.keys(predicate || {}).some(function (k) {
return [].concat(predicate[k]).indexOf(toCheck[k]) === -1;
})) {
return false;
}
// we have a match
return true;
};
}
function matchZone(predicate) {
var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones);
var check = matchPredicate(predicate);
// TODO: swap the filter() for a functional style "loop" recursive function
// that lets us return early if we have a limit, etc.
var found = zonenames.filter(function (zonename) {
/*
if (predicate.id && predicate.id !== z.id) { return false; }
if (predicate.name && predicate.name !== z.name) { return false; }
*/
return check(db.zones[zonename]);
}).map(function (zonename) {
return db.zones[zonename];
});
return found;
}
// NOTE: `opts` exists so we can add options - like properties to read - easily in the future
// without modifying the function signature
function listZones(predicate, opts, cb) {
var found = jsonDeepClone(matchZone(predicate))
return setImmediate(cb, null, found);
}
function writeZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
var isUpdate = !!found;
var combined = mergeObjects((found || {}), zone);
db.zones[zone.name] = prepareZone(combined, { isUpdate: isUpdate });
return setImmediate(function () {
cb(null, jsonDeepClone(db.zones[zone.name]));
});
});
}
function deleteZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (!found) {
return setImmediate(cb, new Error('Zone not found'));
}
delete db.zones[zone.name];
return setImmediate(function () {
cb();
});
});
}
function listRecords(rPredicate, cb) {
var recordNames = [].concat(rPredicate.name);
var check = matchPredicate(rPredicate);
var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) {
// get the records from the zone that match the record predicate
var zFound = recordNames.filter(function (name) {
return !!zone.records[name];
}).map(function (name) {
return Object.keys(zone.records[name]).map(function (id) {
return zone.records[name][id];
}).filter(check);
});
return records.concat(zFound);
}, []);
return setImmediate(cb, null, jsonDeepClone(found));
}
function modifyRecords (record, options, cb) {
var opts = options || {};
var isDelete = !!opts.isDelete;
if (!record.zone) {
return setImmediate(cb, new Error('No zone specified for record'));
}
if (!record.name) {
return setImmediate(cb, new Error('No name specified for record'));
}
if (isDelete && !record.id) {
return setImmediate(cb, new Error('No id specified to delete record'));
}
var zone = matchZone({ name: record.zone })[0];
if (!zone) {
return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' for record'));
}
var isUpdate = (record.id && !isDelete);
if (!isUpdate) {
record.id = crypto.randomBytes(16).toString('hex');
}
var recordsForName = zone.records[record.name] = zone.records[record.name] || {};
var found = recordsForName[record.id];
if ((isUpdate || isDelete) && !found) {
return setImmediate(cb, new Error('Unable to find record with ID: ' + record.id));
}
if (!isDelete) {
recordsForName[record.id] = (mergeObjects((found || {}), record));
}
var zoneUpdate = {
name: record.name,
records: {}
};
zoneUpdate.records[record.name] = keep;
return writeZone(zoneUpdate, function (err) {
if (err) {
return cb(err);
}
return cb(
null,
isDelete ? null : jsonDeepClone(recordsForName[record.id])
);
});
}
function writeRecord(record, cb) {
modifyRecords(record, null, cb);
}
function deleteRecord(record, cb) {
modifyRecords(record, { isDelete: true }, cb);
}
var dbApi = {
save: function () {
// hide _pending and _saving from callers
var args = [].slice.call(arguments);
return save.apply(null, args);
},
// peers really isn't editable - it's literally the list of FQDN's
// that this database is replicated to in a multi-master fashion.
//
// However, lib/store/index.js does plenty to update these records in support
// of the SOA records that are built from them (as does this file in the "migration"
// section). I'm toying with the idea of not storing them seperately or creating the
// SOA records somewhat immediately.
peers: function listPeers(cb) {
// Most data stores are going to have an asynchronous storage API. If we need
// synchronous access to the data it is going to have to be cached. If it is
// cached, there is still the issue the cache getting out of sync (a legitimate
// issue anyway). If we explicitly make all of these operations async then we
// have greater flexibility for store implmentations to address these issues.
return setImmediate(cb, null, jsonDeepClone(db.peers));
},
zones: {
list: listZones,
write: writeZone,
delete: deleteZone
},
records: {
list: listRecords,
write: writeRecord,
delete: deleteRecord
}
};
return dbApi;
};
};