📄 low.c
字号:
for (i = 0; i < SAMPLE_NUMBER; i++) newSample[i].probability = 1.0; normalize = 1.0; threshold = PARTICLE_NUMBER; for (p = 0; p < PASSES; p++){ best = 0; total = 0.0; for (i = 0; i < SAMPLE_NUMBER; i++) { if ((newSample[i].probability != WORST_POSSIBLE) && (1.0-pow(1.0-(newSample[i].probability/threshold), SAMPLE_NUMBER) > 1.0/(SAMPLE_NUMBER))) { newSample[i].probability = newSample[i].probability / normalize; for (k = p; k < SENSE_NUMBER; k += PASSES) newSample[i].probability = newSample[i].probability * QuickScore(sense, k, i); if (newSample[i].probability > newSample[best].probability) best = i; total = total + newSample[i].probability; } else newSample[i].probability = WORST_POSSIBLE; } normalize = newSample[best].probability; threshold = total; } keepers = 0; for (i = 0; i < SAMPLE_NUMBER; i++) if (newSample[i].probability != WORST_POSSIBLE) { keepers++; // Don't let this heuristic evaluation be included in the final eval. newSample[i].probability = 1.0; } // Letting the user know how many samples survived this first cut. fprintf(stderr, "Better %d ", keepers); threshold = -1; // Now reevaluate all of the surviving samples, using the full laser scan to look for possible // obstructions, in order to get the most accurate weights. While doing this evaluation, we can // still keep our eye out for unlikely samples before we are finished. keepers = 0; normalize = 1.0; threshold = PARTICLE_NUMBER; for (p = 0; p < PASSES; p++){ best = 0; total = 0.0; for (i = 0; i < SAMPLE_NUMBER; i++) { if ((newSample[i].probability != WORST_POSSIBLE) && (1.0-pow(1.0-(newSample[i].probability/threshold), SAMPLE_NUMBER) > 30.0/(SAMPLE_NUMBER))) { if (p == PASSES -1) keepers++; newSample[i].probability = newSample[i].probability / normalize; for (k = p; k < SENSE_NUMBER; k += PASSES) newSample[i].probability = newSample[i].probability * CheckScore(sense, k, i); if (newSample[i].probability > newSample[best].probability) best = i; total = total + newSample[i].probability; } else newSample[i].probability = WORST_POSSIBLE; } normalize = newSample[best].probability; threshold = total; } // Report how many samples survived the second cut. These numbers help the user have confidence that // the threshhold values used for culling are reasonable. fprintf(stderr, "Best of %d ", keepers); // All probabilities are currently in log form. Exponentiate them, but weight them by the prob of the // the most likely sample, to ensure that we don't run into issues of machine precision at really small // numbers. total = 0.0; threshold = newSample[best].probability; for (i = 0; i < SAMPLE_NUMBER; i++) // If the sample was culled, it has a weight of 0 if (newSample[i].probability == WORST_POSSIBLE) newSample[i].probability = 0.0; else total = total + newSample[i].probability; // Renormalize to ensure that the total probability is now equal to 1. for (i=0; i < SAMPLE_NUMBER; i++) newSample[i].probability = newSample[i].probability/total; total = 0.0; // Count how many children each particle will get in next generation // This is done through random resampling. for (i = 0; i < SAMPLE_NUMBER; i++) { newchildren[i] = 0; total = total + newSample[i].probability; } i = j = 0; // i = no. of survivors, j = no. of new samples while ((j < SAMPLE_NUMBER) && (i < PARTICLE_NUMBER)) { k = 0; ftemp = MTrandDec()*total; while (ftemp > (newSample[k].probability)) { ftemp = ftemp - newSample[k].probability; k++; } if (newchildren[k] == 0) i++; newchildren[k]++; j++; } // Report exactly how many samples are kept as particles, since they were actually // resampled. fprintf(stderr, "(%d kept) ", i); // Do some cleaning up // Is this even necessary? for (i = 0; i < PARTICLE_NUMBER; i++) { children[i] = 0; savedParticle[i].probability = 0.0; } // Now copy over new particles to savedParticles best = 0; k = 0; // pointer into saved particles for (i = 0; i < SAMPLE_NUMBER; i++) if (newchildren[i] > 0) { savedParticle[k].probability = newSample[i].probability; savedParticle[k].x = newSample[i].x; savedParticle[k].y = newSample[i].y; savedParticle[k].theta = newSample[i].theta; savedParticle[k].C = newSample[i].C; savedParticle[k].D = newSample[i].D; savedParticle[k].T = newSample[i].T; savedParticle[k].dist = distance / MAP_SCALE; savedParticle[k].turn = turn; // For savedParticle, the ancestryNode field actually points to the parent of this saved particle savedParticle[k].ancestryNode = l_particle[ newSample[i].parent ].ancestryNode; savedParticle[k].ancestryNode->numChildren++; children[k] = newchildren[i]; if (savedParticle[k].probability > savedParticle[best].probability) best = k; k++; } // This number records how many saved particles we are currently using, so that we can ignore anything beyond this // in later computations. cur_saved_particles_used = k; // We might need to continue generating children for particles, if we reach PARTICLE_NUMBER worth of distinct parents early // We renormalize over the chosen particles, and continue to sample from there. if (j < SAMPLE_NUMBER) { total = 0.0; // Normalize particle probabilities. Note that they have already been exponentiated for (i = 0; i < cur_saved_particles_used; i++) total = total + savedParticle[i].probability; for (i=0; i < cur_saved_particles_used; i++) savedParticle[i].probability = savedParticle[i].probability/total; total = 0.0; for (i = 0; i < cur_saved_particles_used; i++) total = total + savedParticle[i].probability; while (j < SAMPLE_NUMBER) { k = 0; ftemp = MTrandDec()*total; while (ftemp > (savedParticle[k].probability)) { ftemp = ftemp - savedParticle[k].probability; k++; } children[k]++; j++; } } // Some useful information concerning the current generation of particles, and the parameters for the best one. fprintf(stderr, "-- %.3d (%.4f, %.4f, %.4f) : %.4f\n", curGeneration, savedParticle[best].x, savedParticle[best].y, savedParticle[best].theta, savedParticle[best].probability);}//// DisposeAncestry//// When the SLAM process is complete, this function will clean up the memory being used by the ancestry// tree, and remove all of the associated entries from the low level map.//void DisposeAncestry(TAncestor particleID[]) { int i, j; TPath *tempPath, *trashPath; TEntryList *entry; for (i = 0; i < ID_NUMBER; i++) { if (particleID[i].ID == i) { // Free up memory entry = particleID[i].mapEntries; for (j=0; j < particleID[i].total; j++) LowDeleteObservation(entry[j].x, entry[j].y, entry[j].node); free(entry); particleID[i].mapEntries = NULL; tempPath = particleID[i].path; while (tempPath != NULL) { trashPath = tempPath; tempPath = tempPath->next; free(trashPath); } particleID[i].path = NULL; particleID[i].ID = -123; } for (cleanID=0; cleanID < ID_NUMBER; cleanID++) availableID[cleanID] = cleanID; cleanID = ID_NUMBER; }}//// UpdateAncestry//// Every iteration, after particles have been resampled and evaluated (ie after Localize has been run),// we will need to update the ancestry tree. This consists of four main steps.// a) Remove dead nodes. These are defined as any ancestor node which has no descendents in the current // generation. This is caused by certain particles not being resampled, or by a node's children all// dying off on their own. These nodes not only need to be removed, but every one of their observations// in their observations in the map also need to be removed.// b) Collapse branches of the tree. We want to restrict each internal node to having a branching factor// of at least two. Therefore, if a node has only one child, we merge the information in that node with// the one child node. This is especially common at the root of the tree, as the different hypotheses // coalesce.// c) Add the new particles into the tree. // d) Update the map for each new particle. We needed to wait until they were added into the tree, so that// the ancestry tree can keep track of the different observations associated with the particle.//void UpdateAncestry(TSense sense, TAncestor particleID[]){ int i, j; TAncestor *temp, *hold, *parentNode; TEntryList *entry, *workArray; TMapStarter *node; TPath *tempPath, *trashPath; // Remove Dead Nodes - // Go through the current particle array, and prune out all particles that did not spawn any particles. We know that the particle // had to spawn samples, but those samples may not have become particles themselves, through not generating any samples for the // next generation. Recurse up through there. for (i=0; i < l_cur_particles_used; i++) { temp = l_particle[i].ancestryNode; // This is a "while" loop for purposes of recursing up the tree. while (temp->numChildren == 0) { // Free up the memory in the map by deleting the associated observations. for (j=0; j < temp->total; j++) LowDeleteObservation(temp->mapEntries[j].x, temp->mapEntries[j].y, temp->mapEntries[j].node); // Get rid of the memory being used by this ancestor free(temp->mapEntries); temp->mapEntries = NULL; // This is used exclusively for the low level in hierarchical SLAM. // Get rid of the hypothesized path that this ancestor used. tempPath = temp->path; while (tempPath != NULL) { trashPath = tempPath; tempPath = tempPath->next; free(trashPath); } temp->path = NULL; // Recover the ID, so that it can be used later. cleanID++; availableID[cleanID] = temp->ID; temp->generation = curGeneration; temp->ID = -42; // Remove this node from the tree, while keeping track of its parent. We need that for recursing // up the tree (its parent could possibly need to be removed itself, now) hold = temp; temp = temp->parent; hold->parent = NULL; // Note the disappearance of this particle (may cause telescoping of particles, or outright deletion) temp->numChildren--; } } // Collapse Branches - // Run through the particle IDs, checking for those node IDs that are currently in use. Essentially is
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -