📄 hash_verify.c
字号:
else hfunc = __ham_func5; if ((ret = __db_vrfy_getpageinfo(vdp, PGNO(m), &mip)) != 0) return (ret); /* Calculate the first pgno for this bucket. */ pgno = BS_TO_PAGE(bucket, m->spares); if ((ret = __db_vrfy_getpageinfo(vdp, pgno, &pip)) != 0) goto err; /* Make sure we got a plausible page number. */ if (pgno > vdp->last_pgno || pip->type != P_HASH) { EPRINT((dbp->dbenv, "Page %lu: impossible first page in bucket %lu", (u_long)pgno, (u_long)bucket)); /* Unsafe to continue. */ isbad = 1; goto err; } if (pip->prev_pgno != PGNO_INVALID) { EPRINT((dbp->dbenv, "Page %lu: first page in hash bucket %lu has a prev_pgno", (u_long)pgno, (u_long)bucket)); isbad = 1; } /* * Set flags for dups and sorted dups. */ flags |= F_ISSET(mip, VRFY_HAS_DUPS) ? ST_DUPOK : 0; flags |= F_ISSET(mip, VRFY_HAS_DUPSORT) ? ST_DUPSORT : 0; /* Loop until we find a fatal bug, or until we run out of pages. */ for (;;) { /* Provide feedback on our progress to the application. */ if (!LF_ISSET(DB_SALVAGE)) __db_vrfy_struct_feedback(dbp, vdp); if ((ret = __db_vrfy_pgset_get(vdp->pgset, pgno, &p)) != 0) goto err; if (p != 0) { EPRINT((dbp->dbenv, "Page %lu: hash page referenced twice", (u_long)pgno)); isbad = 1; /* Unsafe to continue. */ goto err; } else if ((ret = __db_vrfy_pgset_inc(vdp->pgset, pgno)) != 0) goto err; /* * Hash pages that nothing has ever hashed to may never * have actually come into existence, and may appear to be * entirely zeroed. This is acceptable, and since there's * no real way for us to know whether this has actually * occurred, we clear the "wholly zeroed" flag on every * hash page. A wholly zeroed page, by nature, will appear * to have no flags set and zero entries, so should * otherwise verify correctly. */ F_CLR(pip, VRFY_IS_ALLZEROES); /* If we have dups, our meta page had better know about it. */ if (F_ISSET(pip, VRFY_HAS_DUPS) && !F_ISSET(mip, VRFY_HAS_DUPS)) { EPRINT((dbp->dbenv, "Page %lu: duplicates present in non-duplicate database", (u_long)pgno)); isbad = 1; } /* * If the database has sorted dups, this page had better * not have unsorted ones. */ if (F_ISSET(mip, VRFY_HAS_DUPSORT) && F_ISSET(pip, VRFY_DUPS_UNSORTED)) { EPRINT((dbp->dbenv, "Page %lu: unsorted dups in sorted-dup database", (u_long)pgno)); isbad = 1; } /* Walk overflow chains and offpage dup trees. */ if ((ret = __db_vrfy_childcursor(vdp, &cc)) != 0) goto err; for (ret = __db_vrfy_ccset(cc, pip->pgno, &child); ret == 0; ret = __db_vrfy_ccnext(cc, &child)) if (child->type == V_OVERFLOW) { if ((ret = __db_vrfy_ovfl_structure(dbp, vdp, child->pgno, child->tlen, flags)) != 0) { if (ret == DB_VERIFY_BAD) isbad = 1; else goto err; } } else if (child->type == V_DUPLICATE) { if ((ret = __db_vrfy_duptype(dbp, vdp, child->pgno, flags)) != 0) { isbad = 1; continue; } if ((ret = __bam_vrfy_subtree(dbp, vdp, child->pgno, NULL, NULL, flags | ST_RECNUM | ST_DUPSET | ST_TOPLEVEL, NULL, NULL, NULL)) != 0) { if (ret == DB_VERIFY_BAD) isbad = 1; else goto err; } } if ((ret = __db_vrfy_ccclose(cc)) != 0) goto err; cc = NULL; /* If it's safe to check that things hash properly, do so. */ if (isbad == 0 && !LF_ISSET(DB_NOORDERCHK) && (ret = __ham_vrfy_hashing(dbp, pip->entries, m, bucket, pgno, flags, hfunc)) != 0) { if (ret == DB_VERIFY_BAD) isbad = 1; else goto err; } next_pgno = pip->next_pgno; ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip); pip = NULL; if (ret != 0) goto err; if (next_pgno == PGNO_INVALID) break; /* End of the bucket. */ /* We already checked this, but just in case... */ if (!IS_VALID_PGNO(next_pgno)) { DB_ASSERT(0); EPRINT((dbp->dbenv, "Page %lu: hash page has bad next_pgno", (u_long)pgno)); isbad = 1; goto err; } if ((ret = __db_vrfy_getpageinfo(vdp, next_pgno, &pip)) != 0) goto err; if (pip->prev_pgno != pgno) { EPRINT((dbp->dbenv, "Page %lu: hash page has bad prev_pgno", (u_long)next_pgno)); isbad = 1; } pgno = next_pgno; }err: if (cc != NULL && ((t_ret = __db_vrfy_ccclose(cc)) != 0) && ret == 0) ret = t_ret; if (mip != NULL && ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, mip)) != 0) && ret == 0) ret = t_ret; if (pip != NULL && ((t_ret = __db_vrfy_putpageinfo(dbp->dbenv, vdp, pip)) != 0) && ret == 0) ret = t_ret; return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);}/* * __ham_vrfy_hashing -- * Verify that all items on a given hash page hash correctly. * * PUBLIC: int __ham_vrfy_hashing __P((DB *, * PUBLIC: u_int32_t, HMETA *, u_int32_t, db_pgno_t, u_int32_t, * PUBLIC: u_int32_t (*) __P((DB *, const void *, u_int32_t)))); */int__ham_vrfy_hashing(dbp, nentries, m, thisbucket, pgno, flags, hfunc) DB *dbp; u_int32_t nentries; HMETA *m; u_int32_t thisbucket; db_pgno_t pgno; u_int32_t flags; u_int32_t (*hfunc) __P((DB *, const void *, u_int32_t));{ DBT dbt; DB_MPOOLFILE *mpf; PAGE *h; db_indx_t i; int ret, t_ret, isbad; u_int32_t hval, bucket; mpf = dbp->mpf; ret = isbad = 0; memset(&dbt, 0, sizeof(DBT)); F_SET(&dbt, DB_DBT_REALLOC); if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) return (ret); for (i = 0; i < nentries; i += 2) { /* * We've already verified the page integrity and that of any * overflow chains linked off it; it is therefore safe to use * __db_ret. It's also not all that much slower, since we have * to copy every hash item to deal with alignment anyway; we * can tweak this a bit if this proves to be a bottleneck, * but for now, take the easy route. */ if ((ret = __db_ret(dbp, h, i, &dbt, NULL, NULL)) != 0) goto err; hval = hfunc(dbp, dbt.data, dbt.size); bucket = hval & m->high_mask; if (bucket > m->max_bucket) bucket = bucket & m->low_mask; if (bucket != thisbucket) { EPRINT((dbp->dbenv, "Page %lu: item %lu hashes incorrectly", (u_long)pgno, (u_long)i)); isbad = 1; } }err: if (dbt.data != NULL) __os_ufree(dbp->dbenv, dbt.data); if ((t_ret = mpf->put(mpf, h, 0)) != 0) return (t_ret); return ((ret == 0 && isbad == 1) ? DB_VERIFY_BAD : ret);}/* * __ham_salvage -- * Safely dump out anything that looks like a key on an alleged * hash page. * * PUBLIC: int __ham_salvage __P((DB *, VRFY_DBINFO *, db_pgno_t, PAGE *, * PUBLIC: void *, int (*)(void *, const void *), u_int32_t)); */int__ham_salvage(dbp, vdp, pgno, h, handle, callback, flags) DB *dbp; VRFY_DBINFO *vdp; db_pgno_t pgno; PAGE *h; void *handle; int (*callback) __P((void *, const void *)); u_int32_t flags;{ DBT dbt, unkdbt; db_pgno_t dpgno; int ret, err_ret, t_ret; u_int32_t himark, tlen; u_int8_t *hk; void *buf; u_int32_t dlen, len, i; memset(&dbt, 0, sizeof(DBT)); dbt.flags = DB_DBT_REALLOC; memset(&unkdbt, 0, sizeof(DBT)); unkdbt.size = (u_int32_t)strlen("UNKNOWN") + 1; unkdbt.data = "UNKNOWN"; err_ret = 0; /* * Allocate a buffer for overflow items. Start at one page; * __db_safe_goff will realloc as needed. */ if ((ret = __os_malloc(dbp->dbenv, dbp->pgsize, &buf)) != 0) return (ret); himark = dbp->pgsize; for (i = 0;; i++) { /* If we're not aggressive, break when we hit NUM_ENT(h). */ if (!LF_ISSET(DB_AGGRESSIVE) && i >= NUM_ENT(h)) break; /* Verify the current item. */ ret = __db_vrfy_inpitem(dbp, h, pgno, i, 0, flags, &himark, NULL); /* If this returned a fatality, it's time to break. */ if (ret == DB_VERIFY_FATAL) break; if (ret == 0) { hk = P_ENTRY(dbp, h, i); len = LEN_HKEYDATA(dbp, h, dbp->pgsize, i); if ((u_int32_t)(hk + len - (u_int8_t *)h) > dbp->pgsize) { /* * Item is unsafely large; either continue * or set it to the whole page, depending on * aggressiveness. */ if (!LF_ISSET(DB_AGGRESSIVE)) continue; len = dbp->pgsize - (u_int32_t)(hk - (u_int8_t *)h); err_ret = DB_VERIFY_BAD; } switch (HPAGE_PTYPE(hk)) { default: if (!LF_ISSET(DB_AGGRESSIVE)) break; err_ret = DB_VERIFY_BAD; /* FALLTHROUGH */ case H_KEYDATA:keydata: memcpy(buf, HKEYDATA_DATA(hk), len); dbt.size = len; dbt.data = buf; if ((ret = __db_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; case H_OFFPAGE: if (len < HOFFPAGE_SIZE) { err_ret = DB_VERIFY_BAD; continue; } memcpy(&dpgno, HOFFPAGE_PGNO(hk), sizeof(dpgno)); if ((ret = __db_safe_goff(dbp, vdp, dpgno, &dbt, &buf, flags)) != 0) { err_ret = ret; (void)__db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp); break; } if ((ret = __db_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; break; case H_OFFDUP: if (len < HOFFPAGE_SIZE) { err_ret = DB_VERIFY_BAD; continue; } memcpy(&dpgno, HOFFPAGE_PGNO(hk), sizeof(dpgno)); /* UNKNOWN iff pgno is bad or we're a key. */ if (!IS_VALID_PGNO(dpgno) || (i % 2 == 0)) { if ((ret = __db_prdbt(&unkdbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; } else if ((ret = __db_salvage_duptree(dbp, vdp, dpgno, &dbt, handle, callback, flags | SA_SKIPFIRSTKEY)) != 0) err_ret = ret; break; case H_DUPLICATE: /* * We're a key; printing dups will seriously * foul the output. If we're being aggressive, * pretend this is a key and let the app. * programmer sort out the mess. */ if (i % 2 == 0) { err_ret = ret; if (LF_ISSET(DB_AGGRESSIVE)) goto keydata; break; } /* Too small to have any data. */ if (len < HKEYDATA_SIZE(2 * sizeof(db_indx_t))) { err_ret = DB_VERIFY_BAD; continue; } /* Loop until we hit the total length. */ for (tlen = 0; tlen + sizeof(db_indx_t) < len; tlen += dlen) { tlen += sizeof(db_indx_t); memcpy(&dlen, hk, sizeof(db_indx_t)); /* * If dlen is too long, print all the * rest of the dup set in a chunk. */ if (dlen + tlen > len) dlen = len - tlen; memcpy(buf, hk + tlen, dlen); dbt.size = dlen; dbt.data = buf; if ((ret = __db_prdbt(&dbt, 0, " ", handle, callback, 0, vdp)) != 0) err_ret = ret; tlen += sizeof(db_indx_t); } break; } } } __os_free(dbp->dbenv, buf); if ((t_ret = __db_salvage_markdone(vdp, pgno)) != 0) return (t_ret); return ((ret == 0 && err_ret != 0) ? err_ret : ret);}/* * __ham_meta2pgset -- * Return the set of hash pages corresponding to the given * known-good meta page. * * PUBLIC: int __ham_meta2pgset __P((DB *, VRFY_DBINFO *, HMETA *, u_int32_t, * PUBLIC: DB *)); */int __ham_meta2pgset(dbp, vdp, hmeta, flags, pgset) DB *dbp; VRFY_DBINFO *vdp; HMETA *hmeta; u_int32_t flags; DB *pgset;{ DB_MPOOLFILE *mpf; PAGE *h; db_pgno_t pgno; u_int32_t bucket, totpgs; int ret, val; /* * We don't really need flags, but leave them for consistency with * __bam_meta2pgset. */ COMPQUIET(flags, 0); DB_ASSERT(pgset != NULL); mpf = dbp->mpf; totpgs = 0; /* * Loop through all the buckets, pushing onto pgset the corresponding * page(s) for each one. */ for (bucket = 0; bucket <= hmeta->max_bucket; bucket++) { pgno = BS_TO_PAGE(bucket, hmeta->spares); /* * We know the initial pgno is safe because the spares array has * been verified. * * Safely walk the list of pages in this bucket. */ for (;;) { if ((ret = mpf->get(mpf, &pgno, 0, &h)) != 0) return (ret); if (TYPE(h) == P_HASH) { /* * Make sure we don't go past the end of * pgset. */ if (++totpgs > vdp->last_pgno) { (void)mpf->put(mpf, h, 0); return (DB_VERIFY_BAD); } if ((ret = __db_vrfy_pgset_inc(pgset, pgno)) != 0) { (void)mpf->put(mpf, h, 0); return (ret); } pgno = NEXT_PGNO(h); } else pgno = PGNO_INVALID; if ((ret = mpf->put(mpf, h, 0)) != 0) return (ret); /* If the new pgno is wonky, go onto the next bucket. */ if (!IS_VALID_PGNO(pgno) || pgno == PGNO_INVALID) break; /* * If we've touched this page before, we have a cycle; * go on to the next bucket. */ if ((ret = __db_vrfy_pgset_get(pgset, pgno, &val)) != 0) return (ret); if (val != 0) break; } } return (0);}/* * __ham_dups_unsorted -- * Takes a known-safe hash duplicate set and its total length. * Returns 1 if there are out-of-order duplicates in this set, * 0 if there are not. */static int__ham_dups_unsorted(dbp, buf, len) DB *dbp; u_int8_t *buf; u_int32_t len;{ DBT a, b; db_indx_t offset, dlen; int (*func) __P((DB *, const DBT *, const DBT *)); memset(&a, 0, sizeof(DBT)); memset(&b, 0, sizeof(DBT)); func = (dbp->dup_compare == NULL) ? __bam_defcmp : dbp->dup_compare; /* * Loop through the dup set until we hit the end or we find * a pair of dups that's out of order. b is always the current * dup, a the one before it. */ for (offset = 0; offset < len; offset += DUP_SIZE(dlen)) { memcpy(&dlen, buf + offset, sizeof(db_indx_t)); b.data = buf + offset + sizeof(db_indx_t); b.size = dlen; if (a.data != NULL && func(dbp, &a, &b) > 0) return (1); a.data = b.data; a.size = b.size; } return (0);}
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -