dpf.c
来自「基于组件方式开发操作系统的OSKIT源代码」· C语言 代码 · 共 670 行 · 第 1/2 页
C
670 行
/* No matches, insert as an OR branch. */ if(!res) { last = swizzle(ir, pid, &first, alignment); insert_or(first_or, first); return last; } /* Matched: continue to the next node. */ if(res == 1 && !a->ht) continue; /* Is a disjunction. */ /* Create ht and insert a. */ if(!a->ht) a = ht_newht(a); /* continue merging, unless there is no more. */ if((hte = ht_lookup(a->ht, ir->u.eq.val, 0, 0))) { a = hte; } else { /* create an entry for this atom. */ hte = ht_insert(a, 0, ir); /* string together the rest */ if((last = swizzle(ir+1, pid, &first, alignment))) { first->parent = hte; LIST_INSERT_HEAD(&hte->kids, first, sibs); return last; } else { hte->pid = pid; return hte; } } } /* Long filter - extended past last entry. */ if(!a) { /* Merged all the way down. */ if(isend(ir)) { dpf_overlap = 1; return succ; } last = swizzle(ir, pid, &first, alignment); LIST_INSERT_HEAD(&succ->kids, first, sibs); first->parent = succ; return last; /* Short filter. */ } else { demand(succ, nil filter); /* filter overlap? */ if(succ->pid) dpf_overlap = 1; else succ->pid = pid; return succ; }}/* Scan backwards, genning code for each atom. */static void instantiate_filter(Atom last) { Atom a; /* always compile the last atom (at very least is a pid). */ dpf_compile_atom(last); /* * When refcnt > 1 and pid is non-zero it means this filter * was overlayed on top of a short one and we need to regen. * Otherwise we are in an orlist or in a hash table --- * both of these structures have been recompiled at the point * of modification. */ for(a = last->parent; a != dpf_base; a = a->parent) if(a->refcnt == 1) dpf_compile_atom(a); /* filter has to handle longest match: regen. */ else if(a->pid) { dpf_compile_atom(a); break; }}/* Merge dpf with trie. */static Atom dpf_merge(Atom a, struct ir *ir, int pid) { Atom p, last; Ht ht; last = a = merge(a, ir, pid); /* incr filter's atom's refcnt. */ for(last = a; a; a = p) { p = a->parent; a->refcnt++; /* fix up counts of all hash tables we are in. */ if(p && (ht = p->ht)) { if(a == last) ht->term++; else ht->nterm++; } /* regen ht if necessary. */ if((ht = a->ht)) { demand((a->ht->term + a->ht->nterm) == a->refcnt, bogus summation!); if(ht_regenp(ht)) dpf_compile_atom(a); } } instantiate_filter(last); return last;}/* get last element in list; gross. */static Atom getlast(Atom a) { for(a = a->kids.lh_first; a->sibs.le_next; a = a->sibs.le_next); return a;}/* Delete filter from tree. */SYSCALL int dpf_delete(unsigned pid) { Atom a, p, f, fl[DPF_MAXELEM * 2 + 2], last; int i; Ht ht; if(pid >= DPF_MAXFILT || !(a = dpf_active[pid])) return DPF_BOGUSID; /* nuke the pid. */ if(!dpf_overlap) a->pid = 0; /* Scan backwards, decrementing ref counts. */ for(last = a, i = 0; a != dpf_base; a = p) { /* All atoms with refcnt = 1 need to be deleted. */ if(a->refcnt-- == 1) fl[i++] = a; p = a->parent; /* decrement hash table counters as appropriate. */ if((ht = p->ht)) { if(a == last) ht->term--; else ht->nterm--; } demand(!a->ht || (a->ht->term + a->ht->nterm) == a->refcnt, bogus summation!); } /* * If the filter had any elements to free, do so. Note that only * the last atom (going backwards) can be in anything * interesting (i.e., a hash table or real or list). * * If we delete from an orlist need to regen code for first and * last elements (if these change). * * If we delete from a hash table, need to regen code if it is * demoted. For performance, we might consider regening if * collisions are eliminated. */ if(i-- > 0) { int lastor, orlist; Atom first; f = fl[i]; p = f->parent; lastor = 0; first = 0; if((orlist = orlist_p(f))) { demand(orlist_p(f), bogus: should be an orlist); /* get firstor. */ first = (p->kids.lh_first != f) ? 0 : f->sibs.le_next; lastor = (f->sibs.le_next == 0); } LIST_REMOVE(f, sibs); /* Remove f from kid list. */ /* * Check first atom to see if it is in a ht. If it is remove * and, if necessary, demote. */ if(p->ht) { ht_delete(p, f->ir.u.eq.val, i == 0); /* If only one unique entry, delete ht. */ if(p->ht->ent == 1) { /* last element in hash table */ last = p->kids.lh_first; last->parent = p->parent; LIST_INSERT_BEFORE(p, last, sibs); LIST_REMOVE(p, sibs); free(p->ht); free(p); /* regen code for last atom in trie */ dpf_compile_atom(last); /* if parent jumps to ht, regen parent */ p = last->parent; if(p->kids.lh_first == last) dpf_compile_atom(p); p = 0; } } else if(orlist) { /* was the first or: need to regen new firstor. */ if(first) { dpf_compile_atom(first); /* fix up parent; cheaper way, certainly */ dpf_compile_atom(first->parent); } /* was last or, need to regen last or */ else if(lastor) /* eek: use tail queue */ dpf_compile_atom(getlast(p)); /* is the last in a chain: regen this node. */ } else dpf_compile_atom(p); for(; i >= 0; i--) { demand(fl[i]->refcnt == 0, bogus refcnt!); free(fl[i]); } } dpf_iptr = dpf_compile(dpf_base); dpf_active[pid] = 0; dpf_freepid(pid); debug_stmt(dpf_check()); return pid;}/* check of 0 length filter */SYSCALL int dpf_insert_really(struct dpf_ir *irp) { /* scratch memory to hold copied filter */ struct dpf_ir ir; struct ir *r; int pid, n, i; Atom tail; /* Make sure these fields are at identical offsets. */ AssertNow(offsetof(struct atom, ir.u.eq.op) == \ offsetof(struct atom, ir.u.shift.op)); AssertNow(offsetof(struct atom, ir.u.eq.offset) == \ offsetof(struct atom, ir.u.shift.offset)); AssertNow(offsetof(struct atom, ir.u.eq.nbits) == \ offsetof(struct atom, ir.u.shift.nbits)); AssertNow(offsetof(struct atom, ir.u.eq.mask) == \ offsetof(struct atom, ir.u.shift.mask)); /* One-time initialization. */ if(!dpf_base) { dpf_base = &dpf_fake_head; dpf_iptr = dpf_compile(dpf_base); LIST_INIT(&dpf_base->kids); dpf_allocpid(); } debug_stmt(dpf_check()); if ((pid = dpf_allocpid()) < 0) return DPF_TOOMANYFILTERS; demand(pid < DPF_MAXFILT, bogus pid); /* copy in the header */ dpf_copyin(&ir, irp, offsetof(struct dpf_ir, ir)); /* copy in the elements */ dpf_copyin(&ir.ir[0], &irp->ir[0], irp->irn * sizeof irp->ir[0]); if(ir.irn >= DPF_MAXELEM) return DPF_TOOMANYELEMS; else if(!ir.irn) return DPF_NILFILTER; r = &ir.ir[0]; /* * Make sure all atoms are well-formed. quicker to do in single * pass, but what the hell. */ for(i = 0, n = ir.irn; i < n; i++) { /* * I don't think we have to check these: the only * thing that could be wrong is the mask size, but * we just truncate that (serves them right). */ if(!iseq(&r[i]) && !isshift(&r[i])) return DPF_BOGUSOP; r[i].level = i; } r[n].u.eq.op = DPF_OP_END; /* mark it */ dpf_coalesce(&ir); /* XXX: Need to compute alignment and whether there has been a shift. alignment starts at DPF_MSG_ALIGN, shift = 0. */ /* Now merge it in with the main tree. */ dpf_overlap = 0; demand(!dpf_active[pid], bogus active!); tail = dpf_active[pid] = dpf_merge(dpf_base, &ir.ir[0], pid); if(!dpf_overlap) { dpf_iptr = dpf_compile(dpf_base); if(verbose) { printf("Compiled trie:\n"); dpf_output(); dpf_dump((void *)dpf_iptr); } } else { if(dpf_delete(pid) < 0) fatal(Should not fail); dpf_overlap = 0; pid = DPF_OVERLAP; } debug_stmt(dpf_check()); return pid;}SYSCALL int dpf_insert(void *p, int sz) { int pid; pid = dpf_insert_really(dpf_xlate(p, sz)); debug_stmt(dpf_check()); return pid;}void dpf_verbose(int v) { verbose = v; }
⌨️ 快捷键说明
复制代码Ctrl + C
搜索代码Ctrl + F
全屏模式F11
增大字号Ctrl + =
减小字号Ctrl + -
显示快捷键?