node[id_property] = id
end
+ # Enlarge the collection to have at least the specified capacity.
+ #
+ # The capacity is specified in number of nodes. Used to minimize the
+ # number of times the collection need to be resized when adding nodes
+ # in batches.
+ #
+ # Do nothing by default.
+ fun enlarge(cap: Int) do end
+
# Add the specified node to the graph and set its local ID.
#
# SEE: `add`
register(node)
return node
end
+
+ # Remove the node with the specified local ID.
+ fun remove_at(id: ID_TYPE) is abstract
+
+ # Remove the specified node.
+ #
+ # The local ID is used instead of `==` to seek the node.
+ fun remove_node(node: NeoNode) do
+ remove_at(id_of(node))
+ end
+
+ redef fun clear do
+ for node in self do remove_node(node)
+ end
+
+ redef fun remove(node: NeoNode) do
+ for n in self do
+ if node == n then
+ remove_node(n)
+ return
+ end
+ end
+ end
+
+ redef fun remove_all(node: NeoNode) do
+ for n in self do
+ if node == n then remove_node(n)
+ end
+ end
+
+ # Optimize the collection, possibly by rewritting it.
+ #
+ # The local ID of the elements may be changed by this method.
+ fun compact do end
end
# A mean to save and load a Neo4j graph.
fun save_part(nodes: Collection[NeoNode],
edges: Collection[NeoEdge]) is abstract
end
+
+# Save or load a graph using an actual Neo4j database.
+class Neo4jGraphStore
+ super GraphStore
+
+ # The maximum number of entities saved in one request.
+ #
+ # Also defines the granulity of the reported progression.
+ #
+ # TODO Also honor this limit in `load`.
+ var batch_max_size = 512 is writable
+
+ # The Neo4j client to use.
+ var client: Neo4jClient
+
+ # The label to use to retrieve the nodes.
+ var node_label: String
+
+ private var done_part = 0
+ private var total = 0
+
+ # Is the database already contains at least one node with the specified label?
+ fun has_node_label(name: String): Bool do
+ var query = new CypherQuery.from_string(
+ "match n where \{name\} in labels(n) return count(n)")
+ query.params["name"] = name
+ var data = client.cypher(query).as(JsonObject)["data"]
+ var result = data.as(JsonArray).first.as(JsonArray).first.as(Int)
+ return result > 0
+ end
+
+ redef fun isolated_save do return not has_node_label(node_label)
+
+ redef fun load do
+ assert batch_max_size > 0
+ fire_started
+ var db_nodes = client.nodes_with_label(node_label)
+ var nodes = graph.nodes
+ var edges = graph.edges
+ var i = 0
+
+ total = nodes.length * 2
+ done_part = nodes.length
+ fire_progressed(done_part, total)
+ for node in db_nodes do
+ nodes.add(node)
+ edges.add_all(node.out_edges)
+ i += 1
+ if i >= batch_max_size then
+ done_part += batch_max_size
+ fire_progressed(done_part, total)
+ end
+ end
+ fire_done
+ end
+
+ redef fun save_part(nodes, edges) do
+ assert batch_max_size > 0
+ fire_started
+ total = nodes.length + edges.length
+ done_part = 0
+
+ save_entities(nodes)
+ save_entities(edges)
+ fire_done
+ end
+
+ # Save the specified entities.
+ private fun save_entities(neo_entities: Collection[NeoEntity]) do
+ var batch = new NeoBatch(client)
+ var batch_length = 0
+
+ for nentity in neo_entities do
+ batch.save_entity(nentity)
+ batch_length += 1
+ if batch_length >= batch_max_size then
+ do_batch(batch)
+ done_part += batch_max_size
+ fire_progressed(done_part, total)
+ batch = new NeoBatch(client)
+ batch_length = 0
+ end
+ end
+ do_batch(batch)
+ done_part += batch_length
+ end
+
+ # Execute `batch` and check for errors.
+ #
+ # Abort if `batch.execute` returns errors.
+ private fun do_batch(batch: NeoBatch) do
+ var errors = batch.execute
+ assert errors.is_empty else
+ for e in errors do sys.stderr.write("{e}\n")
+ end
+ end
+end