📄 fbruleinfgraph.java
字号:
* Add a new deduction to the deductions graph.
*/
public void addDeduction(Triple t) {
getCurrentDeductionsGraph().add(t);
if (useTGCCaching) {
transitiveEngine.add(t);
}
}
/**
* Retrieve or create a bNode representing an inferred property value.
* @param instance the base instance node to which the property applies
* @param prop the property node whose value is being inferred
* @param pclass the (optional, can be null) class for the inferred value.
* @return the bNode representing the property value
*/
public Node getTemp(Node instance, Node prop, Node pclass) {
return tempNodecache.getTemp(instance, prop, pclass);
}
// =======================================================================
// Core inf graph methods
/**
* Add a new rule to the rule set. This should only be used by implementations
* of RuleProprocessHook (which are called during rule system preparation phase).
* If called at other times the rule won't be correctly transferred into the
* underlying engines.
*/
public void addRuleDuringPrepare(Rule rule) {
if (rules == rawRules) {
// Ensure the original is preserved in case we need to do a restart
if (rawRules instanceof ArrayList) {
rules = (ArrayList) ((ArrayList)rawRules).clone();
} else {
rules = new ArrayList(rawRules);
}
// Rebuild the forward engine to use the cloned rules
instantiateRuleEngine(rules);
}
rules.add(rule);
}
/**
* Add a new preprocessing hook defining an operation that
* should be run when the preparation phase is underway.
*/
public void addPreprocessingHook(RulePreprocessHook hook) {
if (preprocessorHooks == null) {
preprocessorHooks = new ArrayList();
}
preprocessorHooks.add(hook);
}
/**
* Perform any initial processing and caching. This call is optional. Most
* engines either have negligable set up work or will perform an implicit
* "prepare" if necessary. The call is provided for those occasions where
* substantial preparation work is possible (e.g. running a forward chaining
* rule system) and where an application might wish greater control over when
* this prepration is done.
*/
public void prepare() {
if (!isPrepared) {
isPrepared = true;
// Restore the original pre-hookProcess rules
rules = rawRules;
// Is there any data to bind in yet?
Graph data = null;
if (fdata != null) data = fdata.getGraph();
// initilize the deductions graph
if (fdeductions != null && fdeductions instanceof FGraph) {
Graph oldDeductions = ((FGraph)fdeductions).getGraph();
oldDeductions.getBulkUpdateHandler().removeAll();
} else {
fdeductions = new FGraph( createDeductionsGraph() );
}
dataFind = (data == null) ? fdeductions : FinderUtil.cascade(fdeductions, fdata);
Finder dataSource = fdata;
// Initialize the optional TGC caches
if (useTGCCaching) {
resetTGCCache();
if (schemaGraph != null) {
// Check if we can just reuse the copy of the raw
if (
(transitiveEngine.checkOccurance(TransitiveReasoner.subPropertyOf, data) ||
transitiveEngine.checkOccurance(TransitiveReasoner.subClassOf, data) ||
transitiveEngine.checkOccurance(RDFS.domain.asNode(), data) ||
transitiveEngine.checkOccurance(RDFS.range.asNode(), data) )) {
// The data graph contains some ontology knowledge so split the caches
// now and rebuild them using merged data
transitiveEngine.insert(((FBRuleInfGraph)schemaGraph).fdata, fdata);
}
} else {
if (data != null) {
transitiveEngine.insert(null, fdata);
}
}
// Insert any axiomatic statements into the caches
for (Iterator i = rules.iterator(); i.hasNext(); ) {
Rule r = (Rule)i.next();
if (r.bodyLength() == 0) {
// An axiom
for (int j = 0; j < r.headLength(); j++) {
Object head = r.getHeadElement(j);
if (head instanceof TriplePattern) {
TriplePattern h = (TriplePattern) head;
transitiveEngine.add(h.asTriple());
}
}
}
}
transitiveEngine.setCaching(true, true);
// dataFind = FinderUtil.cascade(subClassCache, subPropertyCache, dataFind);
dataFind = FinderUtil.cascade(dataFind, transitiveEngine.getSubClassCache(), transitiveEngine.getSubPropertyCache());
// Without the next statement then the transitive closures are not seen by the forward rules
dataSource = FinderUtil.cascade(dataSource, transitiveEngine.getSubClassCache(), transitiveEngine.getSubPropertyCache());
}
// Make sure there are no Brules left over from pior runs
bEngine.deleteAllRules();
// Call any optional preprocessing hook
if (preprocessorHooks != null && preprocessorHooks.size() > 0) {
Graph inserts = Factory.createGraphMem();
for (Iterator i = preprocessorHooks.iterator(); i.hasNext(); ) {
RulePreprocessHook hook = (RulePreprocessHook)i.next();
hook.run(this, dataFind, inserts);
}
if (inserts.size() > 0) {
FGraph finserts = new FGraph(inserts);
dataSource = FinderUtil.cascade(fdata, finserts);
dataFind = FinderUtil.cascade(dataFind, finserts);
}
}
boolean rulesLoaded = false;
if (schemaGraph != null) {
Graph rawPreload = ((InfGraph)schemaGraph).getRawGraph();
if (rawPreload != null) {
dataFind = FinderUtil.cascade(dataFind, new FGraph(rawPreload));
}
rulesLoaded = preloadDeductions(schemaGraph);
}
if (rulesLoaded) {
engine.fastInit(dataSource);
} else {
// No preload so do the rule separation
addBRules(extractPureBackwardRules(rules));
engine.init(true, dataSource);
}
// Prepare the context for builtins run in backwards engine
context = new BBRuleContext(this);
}
}
/**
* Cause the inference graph to reconsult the underlying graph to take
* into account changes. Normally changes are made through the InfGraph's add and
* remove calls are will be handled appropriately. However, in some cases changes
* are made "behind the InfGraph's back" and this forces a full reconsult of
* the changed data.
*/
public void rebind() {
version++;
if (bEngine != null) bEngine.reset();
isPrepared = false;
}
/**
* Set the state of the trace flag. If set to true then rule firings
* are logged out to the Log at "INFO" level.
*/
public void setTraceOn(boolean state) {
super.setTraceOn(state);
bEngine.setTraceOn(state);
}
/**
* Set to true to enable derivation caching
*/
public void setDerivationLogging(boolean recordDerivations) {
this.recordDerivations = recordDerivations;
engine.setDerivationLogging(recordDerivations);
bEngine.setDerivationLogging(recordDerivations);
if (recordDerivations) {
derivations = new OneToManyMap();
} else {
derivations = null;
}
}
/**
* Set to true to cause functor-valued literals to be dropped from rule output.
* Default is true.
*/
public void setFunctorFiltering(boolean param) {
filterFunctors = param;
}
/**
* Return the number of rules fired since this rule engine instance
* was created and initialized. The current implementation only counts
* forward rules and does not track dynamic backward rules needed for
* specific queries.
*/
public long getNRulesFired() {
return engine.getNRulesFired();
}
/**
* Extended find interface used in situations where the implementator
* may or may not be able to answer the complete query. It will
* attempt to answer the pattern but if its answers are not known
* to be complete then it will also pass the request on to the nested
* Finder to append more results.
* @param pattern a TriplePattern to be matched against the data
* @param continuation either a Finder or a normal Graph which
* will be asked for additional match results if the implementor
* may not have completely satisfied the query.
*/
public ExtendedIterator findWithContinuation(TriplePattern pattern, Finder continuation) {
checkOpen();
if (!isPrepared) prepare();
ExtendedIterator result = new UniqueExtendedIterator(bEngine.find(pattern));
if (continuation != null) {
result = result.andThen(continuation.find(pattern));
}
if (filterFunctors) {
// return result.filterDrop(Functor.acceptFilter);
return result.filterDrop( new Filter() {
public boolean accept( Object o )
{ return FBRuleInfGraph.this.accept( o ); }} );
} else {
return result;
}
}
/**
* Internal variant of find which omits the filters which block illegal RDF data.
* @param pattern a TriplePattern to be matched against the data
*/
public ExtendedIterator findFull(TriplePattern pattern) {
checkOpen();
if (!isPrepared) prepare();
return new UniqueExtendedIterator(bEngine.find(pattern));
}
/**
* Returns an iterator over Triples.
* This implementation assumes that the underlying findWithContinuation
* will have also consulted the raw data.
*/
public ExtendedIterator graphBaseFind(Node subject, Node property, Node object) {
return findWithContinuation(new TriplePattern(subject, property, object), null);
}
/**
* Basic pattern lookup interface.
* This implementation assumes that the underlying findWithContinuation
* will have also consulted the raw data.
* @param pattern a TriplePattern to be matched against the data
* @return a ExtendedIterator over all Triples in the data set
* that match the pattern
*/
public ExtendedIterator find(TriplePattern pattern) {
return findWithContinuation(pattern, null);
}
/**
* Flush out all cached results. Future queries have to start from scratch.
*/
public void reset() {
version++;
bEngine.reset();
isPrepared = false;
}
/**
* Add one triple to the data graph, run any rules triggered by
* the new data item, recursively adding any generated triples.
*/
public synchronized void performAdd(Triple t) {
version++;
fdata.getGraph().add(t);
if (useTGCCaching) {
if (transitiveEngine.add(t)) isPrepared = false;
}
if (isPrepared) {
boolean needReset = false;
if (preprocessorHooks != null && preprocessorHooks.size() > 0) {
if (preprocessorHooks.size() > 1) {
for (Iterator i = preprocessorHooks.iterator(); i.hasNext();) {
if (((RulePreprocessHook)i.next()).needsRerun(this, t)) {
needReset = true; break;
}
}
} else {
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -