In progress: Create store API to enable non-json based stores #9

Open
Ghost wants to merge 3 commits from (deleted):js-stores into httpd
1 changed files with 269 additions and 256 deletions
Showing only changes of commit 077c259272 - Show all commits

View File

@ -95,22 +95,29 @@ function prepareZone(zone, options) {
} }
/* /*
init() should return an object with: { `init()` should return a `lock(forOps)` function, where `forOps` describes the portions
save: function -> undefined - changes to in memory representation should be persisted of the database that we need to obtain a lock for (so we can write to them). If `forOps`
This could be considered the equivalent of committing a transaction to the database. is underfined, we only need to read the currently valid data.
primaryNameservers: {
list: function -> list nameservers `lock(forOps)` should return an object with: {
save: function -> undefined - changes to in memory representation should be persisted.
This could be considered the equivalent of committing a transaction to the database.
This will release any write lock obtained. `save()` will return an error if no write
lock was obtained OR writes are made to locations other than were locked.,
discard: function -> undefined - changes to in memory representation should be discarded.
This could be considered the equivalent of cancelling a transaction to the database.
This will release any write lock obtained.,
peers: {
list: function -> list FQDNs that we expec to be in sync with this server
}, },
zones: { zones: {
list: function -> list zones, list: function -> list zones,
create: write:
update:
delete: delete:
}, },
records: { records: {
list: function -> list records, list: function -> list records,
create: write:
update:
delete: delete:
} }
} }
@ -121,7 +128,7 @@ All lists will be a deep copy of the data actually stored.
module.exports = function init (opts) { module.exports = function init (opts) {
// opts = { filepath }; // opts = { filepath };
var db = require(opts.filepath); var fsDb = require(opts.filepath);
var mtime = require('fs').statSync(opts.filepath).mtime.valueOf(); var mtime = require('fs').statSync(opts.filepath).mtime.valueOf();
// //
@ -131,15 +138,15 @@ module.exports = function init (opts) {
// Convert the primary nameservers from an array of strings to objects with names and IDs. // Convert the primary nameservers from an array of strings to objects with names and IDs.
// also switch to the 'peers' name, since we are really interested in the other FQDNs that // also switch to the 'peers' name, since we are really interested in the other FQDNs that
// use the same data store and are kept in sync. // use the same data store and are kept in sync.
var peerList = (!db.peers || Array.isArray(db.peers))? db.peers : Object.keys(db.peers).map(function (p) { var peerList = (!fsDb.peers || Array.isArray(fsDb.peers))? fsDb.peers : Object.keys(fsDb.peers).map(function (p) {
return db.peers[p]; return fsDb.peers[p];
}); });
db.peers = [].concat(db.primaryNameservers, peerList).filter(function (p) { fsDb.peers = [].concat(fsDb.primaryNameservers, peerList).filter(function (p) {
// filer out empty strings, undefined, etc. // filter out empty strings, undefined, etc.
return !!p; return !!p;
}).map(function (ns) { }).map(function (ns) {
var peer = ('string' === typeof ns)? ns : { name: ns }; var peer = ('string' === typeof ns)? ns : { name: ns };
if (!ns.id) { if (!peer.id) {
peer.id = crypto.randomBytes(16).toString('hex'); peer.id = crypto.randomBytes(16).toString('hex');
} }
return peer; return peer;
@ -147,16 +154,16 @@ module.exports = function init (opts) {
peers[p.name] = p; peers[p.name] = p;
return peers; return peers;
}, {}); }, {});
delete db.primaryNameservers; delete fsDb.primaryNameservers;
// Convert domains to zones and ensure that they have proper IDs and timestamps // Convert domains to zones and ensure that they have proper IDs and timestamps
// Organize zones as a set of zone names // Organize zones as a set of zone names
var zoneList = (!db.zones || Array.isArray(db.zones))? db.zones : Object.keys(db.zones).map(function (z) { var zoneList = (!fsDb.zones || Array.isArray(fsDb.zones))? fsDb.zones : Object.keys(fsDb.zones).map(function (z) {
return db.zones[z]; return fsDb.zones[z];
}); });
db.zones = [].concat(db.domains, zoneList).filter(function (z) { fsDb.zones = [].concat(fsDb.domains, zoneList).filter(function (z) {
// filer out empty strings, undefined, etc. // filter out empty strings, undefined, etc.
return !!z; return !!z;
}).map(function (zone) { }).map(function (zone) {
return prepareZone(zone, { timestamp: mtime }); return prepareZone(zone, { timestamp: mtime });
@ -164,7 +171,7 @@ module.exports = function init (opts) {
zones[z.name] = z; zones[z.name] = z;
return zones; return zones;
}, {}); }, {});
delete db.domains; delete fsDb.domains;
// NOTE: Records belong to zones, but they previously referred to them only by a // NOTE: Records belong to zones, but they previously referred to them only by a
// zone property. This may pose problems where the whole list of records is not easily // zone property. This may pose problems where the whole list of records is not easily
@ -173,264 +180,270 @@ module.exports = function init (opts) {
// behave more traditionally, even though some stores (like a SQL database // behave more traditionally, even though some stores (like a SQL database
// table) might actually store the zone as a property of a record as we currently do. // table) might actually store the zone as a property of a record as we currently do.
// (This fits with the somewhat unexpected and confusing logic of wildcard records.) // (This fits with the somewhat unexpected and confusing logic of wildcard records.)
(db.records || []).forEach(function (record) { (fsDb.records || []).forEach(function (record) {
// make sure the record has an ID // make sure the record has an ID
if (!record.id) { if (!record.id) {
record.id = crypto.randomBytes(16).toString('hex'); record.id = crypto.randomBytes(16).toString('hex');
} }
// put it in it's zone - synthesize one if needed // Put it in it's zone - synthesize one if needed
db.zones[record.zone] = db.zones[record.zone] || prepareZone({ name: record.zone }); fsDb.zones[record.zone] = fsDb.zones[record.zone] || prepareZone({ name: record.zone });
var zone = db.zones[record.zone]; var zone = fsDb.zones[record.zone];
zone.records[record.name] = zone.records[record.name] || []; // Keep in mind that each name may have multiple records (whether or not they are
// of different types, classes, etc.), but each record must have a unique ID.
zone.records[record.name] = zone.records[record.name] || {};
var recordsForName = zone.records[record.name]; var recordsForName = zone.records[record.name];
recordsForName.push(record); recordsForName[record.id] = record;
}); });
delete db.records; delete fsDb.records;
// Write the migrated data // Write the migrated data
require('fs').writeFileSync(opts.filepath, JSON.stringify(db, null, 2)); require('fs').writeFileSync(opts.filepath, JSON.stringify(fsDb, null, 2));
// //
// End Migration // End Migration
// //
var save = function save (cb) { return function lock(forOps) {
if (save._saving) { /*
console.log('make pending'); forOps : {
save._pending.push(cb); write: {
return; zone: string - required - a zone name,
} names: [string] - optional - a list of record names that may be modified. May be 0 length,
records: [string] - optional - a list of record IDs that may be modified. May be 0 length (default)
}
}
save._saving = true; 1. You can't get a lock for a whole zone without first releasing any locks for names and records
require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) { within the zone. A whole zone lock will block
console.log('done writing'); 2. You can't get a lock for a name within a zone without first releasing any locks for records
var pending = save._pending.splice(0); within that name and zone.
save._saving = false; 3. Locks for a specific record do not block new locks with the same zone, name, but a different
cb(err); record ID.
if (!pending.length) { 4. Creating a new zone, name, or record requires obtaining a lock for it's key (name or ID), even
though it does not exist yet. This prevents race conditions where 2 requests (or processes) attempt
to create the same resource at the same time.
Note: The UI probably needs to know if it is trying to write based on an outdated copy of data. Such
writes should be detected and fail loudly.
locks probably involve lockfiles on the filesystem (with watches) so that writes and locks can be
communicated easily across processes.
*/
var db = mergeObjects(fsDb);
var save = function save (cb) {
if (save._saving) {
console.log('make pending');
save._pending.push(cb);
return; return;
} }
save(function (err) {
console.log('double save'); save._saving = true;
pending.forEach(function (cb) { cb(err); }); require('fs').writeFile(opts.filepath, JSON.stringify(db, null, 2), function (err) {
console.log('done writing');
var pending = save._pending.splice(0);
save._saving = false;
cb(err);
if (!pending.length) {
return;
}
save(function (err) {
console.log('double save');
pending.forEach(function (cb) { cb(err); });
});
}); });
});
};
save._pending = [];
function matchPredicate(predicate) {
return function (toCheck) {
// which items match the predicate?
if (!toCheck) {
return false;
}
// check all the keys in the predicate - only supporting exact match
// of at least one listed option for all keys right now
if (Object.keys(predicate || {}).some(function (k) {
return [].concat(predicate[k]).indexOf(toCheck[k]) === -1;
})) {
return false;
}
// we have a match
return true;
}; };
} save._pending = [];
function matchZone(predicate) { function matchPredicate(predicate) {
var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones); return function (toCheck) {
var check = matchPredicate(predicate); // which items match the predicate?
// TODO: swap the filter() for a functional style "loop" recursive function if (!toCheck) {
// that lets us return early if we have a limit, etc. return false;
var found = zonenames.filter(function (zonename) {
/*
if (predicate.id && predicate.id !== z.id) { return false; }
if (predicate.name && predicate.name !== z.name) { return false; }
*/
return check(db.zones[zonename]);
}).map(function (zonename) {
return db.zones[zonename];
});
return found;
}
var dbApi = {
save: function () {
// hide _pending and _saving from callers
var args = [].slice.call(arguments);
return save.apply(null, args);
},
// peers really isn't editable - it's literally the list of FQDN's
// that this database is replicated to in a multi-master fashion.
//
// However, lib/store/index.js does plenty to update these records in support
// of the SOA records that are built from them (as does this file in the "migration"
// section). I'm toying with the idea of not storing them seperately or creating the
// SOA records somewhat immediately.
peers: function listPeers(cb) {
// Most data stores are going to have an asynchronous storage API. If we need
// synchronous access to the data it is going to have to be cached. If it is
// cached, there is still the issue the cache getting out of sync (a legitimate
// issue anyway). If we explicitly make all of these operations async then we
// have greater flexibility for store implmentations to address these issues.
return setImmediate(cb, null, jsonDeepClone(db.peers));
},
zones: {
/*
I'm fairly certan that zone names must be unique and therefore are legitimately
IDs within the zones namespace. This is similarly true of record names within a zone.
I'm not certain that having a distinct ID adds value and it may add confusion / complexity.
*/
// NOTE: `opts` exists so we can add options - like properties to read - easily in the future
// without modifying the function signature
list: function listZones(predicate, opts, cb) {
// TODO: consider whether we should just return the zone names
var found = jsonDeepClone(matchZone(predicate)).map(function (z) {
// This is fairly inefficient!! Consider alternative storage
// that does not require deleting the records like this.
delete z.records;
return z;
});
return setImmediate(cb, null, found);
},
// // NOTE: I'm not sure we need a distinct 'find()' operation in the API
// // unless we are going to limit the output of the
// // 'list()' operation in some incompatible way.
// // NOTE: `opts` exists so we can add options - like properties to read - easily in the future
// // without modifying the function signature
// find: function getZone(predicate, opts, cb) {
// if (!predicate.name || predicate.id) {
// return setImmediate(cb, new Error('Finding a zone requires a `name` or `id`'));
// }
// // TODO: implement a limit / short circuit and possibly offset
// // to allow for paging of zone data.
// var found = matchZone(predicate);
// if (!found[0]) {
// // TODO: make error message more specific?
// return setImmediate(cb, new Error('Zone not found'));
// }
// var z = jsonDeepClone(found[0]);
// delete z.records;
// return setImmediate(cb, null, z);
// },
create: function createZone(zone, cb) {
// We'll need a lock mechanism of some sort that works
// for simultaneous requests and multiple processes.
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (found) {
return setImmediate(cb, new Error('Zone ' + zone.name + ' already exists'));
}
db.zones[zone.name] = prepareZone(zone);
return setImmediate(function () {
cb(null, jsonDeepClone(db.zones[zone.name]));
// release lock
});
});
},
update: function updateZone(zone, cb) {
// We'll need a lock mechanism of some sort that works
// for simultaneous requests and multiple processes.
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (!found) {
return setImmediate(cb, new Error('Zone not found'));
}
// make sure we are not writing records through this interface
delete zone.records;
var combined = mergeObjects(found, zone);
db.zones[zone.name] = prepareZone(combined, { isUpdate: true });
return setImmediate(function () {
cb(null, jsonDeepClone(db.zones[zone.name]));
// release lock
});
});
},
delete: function(zone, cb) {
// We'll need a lock mechanism of some sort that works
// for simultaneous requests and multiple processes.
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (!found) {
return setImmediate(cb, new Error('Zone not found'));
}
delete db.zones[zone.name];
return setImmediate(function () {
cb();
// release lock
});
});
}
},
records: {
list: function listRecords(rPredicate, cb) {
var recordNames = [].concat(rPredicate.name);
var check = matchPredicate(rPredicate);
var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) {
// get the records from the zone that match the record predicate
var zFound = recordNames.filter(function (name) {
return !!zone.records[name];
}).map(function (name) {
return zone.records[name].filter(check);
});
return records.concat(zFound);
}, []);
return setImmediate(cb, null, jsonDeepClone(found));
},
// find: function getRecord(rPredicate, cb) {
// var recordNames = [].concat(rPredicate.name);
// var check = matchPredicate(rPredicate);
// // TODO: swap the `filter()` and `map()` for a functional style "loop"
// // recursive function that lets us return early if we have a limit, etc.
// var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) {
// // get the records from the zone that match the record predicate
// var zFound = recordNames.filter(function (name) {
// return !!zone.records[name];
// }).map(function (name) {
// return zone.records[name].filter(check);
// });
// return records.concat(zFound);
// }, []);
// return setImmediate(cb, null, jsonDeepClone(found[0]));
// },
create: function(record, cb) {
var zone = matchZone({ name: record.zone })[0];
if (!zone) {
return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' to create record'));
} }
var records = zone.records[record.name] = zone.records[record.name] || []; // check all the keys in the predicate - only supporting exact match
var check = matchPredicate(record); // of at least one listed option for all keys right now
if (records.filter(check)[0]) { if (Object.keys(predicate || {}).some(function (k) {
return setImmediate(cb, new Error('Exact record already exists in zone ' + record.zone )); return [].concat(predicate[k]).indexOf(toCheck[k]) === -1;
})) {
return false;
} }
return setImmediate(cb, null, jsonDeepClone(found)); // we have a match
}, return true;
update: function(record, cb) {}, };
delete: function(record, cb) {}
} }
};
return dbApi; function matchZone(predicate) {
var zonenames = !!predicate.name ? [].concat(predicate.name) : Object.keys(db.zones);
var check = matchPredicate(predicate);
// TODO: swap the filter() for a functional style "loop" recursive function
// that lets us return early if we have a limit, etc.
var found = zonenames.filter(function (zonename) {
/*
if (predicate.id && predicate.id !== z.id) { return false; }
if (predicate.name && predicate.name !== z.name) { return false; }
*/
return check(db.zones[zonename]);
}).map(function (zonename) {
return db.zones[zonename];
});
return found;
}
// NOTE: `opts` exists so we can add options - like properties to read - easily in the future
// without modifying the function signature
function listZones(predicate, opts, cb) {
var found = jsonDeepClone(matchZone(predicate))
return setImmediate(cb, null, found);
}
function writeZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
var isUpdate = !!found;
var combined = mergeObjects((found || {}), zone);
db.zones[zone.name] = prepareZone(combined, { isUpdate: isUpdate });
return setImmediate(function () {
cb(null, jsonDeepClone(db.zones[zone.name]));
});
});
}
function deleteZone(zone, cb) {
matchZone({ name: zone.name }, function (err, matched) {
if (err) {
return setImmediate(cb, err);
}
var found = matched[0];
if (!found) {
return setImmediate(cb, new Error('Zone not found'));
}
delete db.zones[zone.name];
return setImmediate(function () {
cb();
});
});
}
function listRecords(rPredicate, cb) {
var recordNames = [].concat(rPredicate.name);
var check = matchPredicate(rPredicate);
var found = matchZone({ name: rPredicate.zone }).reduce(function (records, zone) {
// get the records from the zone that match the record predicate
var zFound = recordNames.filter(function (name) {
return !!zone.records[name];
}).map(function (name) {
return Object.keys(zone.records[name]).map(function (id) {
return zone.records[name][id];
}).filter(check);
});
return records.concat(zFound);
}, []);
return setImmediate(cb, null, jsonDeepClone(found));
}
function modifyRecords (record, options, cb) {
var opts = options || {};
var isDelete = !!opts.isDelete;
if (!record.zone) {
return setImmediate(cb, new Error('No zone specified for record'));
}
if (!record.name) {
return setImmediate(cb, new Error('No name specified for record'));
}
if (isDelete && !record.id) {
return setImmediate(cb, new Error('No id specified to delete record'));
}
var zone = matchZone({ name: record.zone })[0];
if (!zone) {
return setImmediate(cb, new Error('Unble to find zone ' + record.zone + ' for record'));
}
var isUpdate = (record.id && !isDelete);
if (!isUpdate) {
record.id = crypto.randomBytes(16).toString('hex');
}
var recordsForName = zone.records[record.name] = zone.records[record.name] || {};
var found = recordsForName[record.id];
if ((isUpdate || isDelete) && !found) {
return setImmediate(cb, new Error('Unable to find record with ID: ' + record.id));
}
if (!isDelete) {
recordsForName[record.id] = (mergeObjects((found || {}), record));
}
var zoneUpdate = {
name: record.name,
records: {}
};
zoneUpdate.records[record.name] = keep;
return writeZone(zoneUpdate, function (err) {
if (err) {
return cb(err);
}
return cb(
null,
isDelete ? null : jsonDeepClone(recordsForName[record.id])
);
});
}
function writeRecord(record, cb) {
modifyRecords(record, null, cb);
}
function deleteRecord(record, cb) {
modifyRecords(record, { isDelete: true }, cb);
}
var dbApi = {
save: function () {
// hide _pending and _saving from callers
var args = [].slice.call(arguments);
return save.apply(null, args);
},
// peers really isn't editable - it's literally the list of FQDN's
// that this database is replicated to in a multi-master fashion.
//
// However, lib/store/index.js does plenty to update these records in support
// of the SOA records that are built from them (as does this file in the "migration"
// section). I'm toying with the idea of not storing them seperately or creating the
// SOA records somewhat immediately.
peers: function listPeers(cb) {
// Most data stores are going to have an asynchronous storage API. If we need
// synchronous access to the data it is going to have to be cached. If it is
// cached, there is still the issue the cache getting out of sync (a legitimate
// issue anyway). If we explicitly make all of these operations async then we
// have greater flexibility for store implmentations to address these issues.
return setImmediate(cb, null, jsonDeepClone(db.peers));
},
zones: {
list: listZones,
write: writeZone,
delete: deleteZone
},
records: {
list: listRecords,
write: writeRecord,
delete: deleteRecord
}
};
return dbApi;
};
}; };