📄 storagewrapper.py
字号:
self.data_flunked(length, index) self.inactive_requests[index] = 1 self.amount_inactive += length self.stat_numflunked += 1 self.failed_pieces[index] = {} allsenders = {} for d in self.download_history[index].values(): allsenders[d] = 1 if len(allsenders) == 1: culprit = allsenders.keys()[0] if culprit is not None: culprit.failed(index, bump = True) del self.failed_pieces[index] # found the culprit already return False self.have[index] = True self.inactive_requests[index] = None self.waschecked[index] = True self.amount_left -= length self.stat_numdownloaded += 1 for d in self.download_history[index].values(): if d is not None: d.good(index) del self.download_history[index] if self.failed_pieces.has_key(index): for d in self.failed_pieces[index].keys(): if d is not None: d.failed(index) del self.failed_pieces[index] if self.amount_left == 0: self.finished() return True def request_lost(self, index, begin, length): assert not (begin, length) in self.inactive_requests[index] insort(self.inactive_requests[index], (begin, length)) self.amount_inactive += length self.numactive[index] -= 1 if not self.numactive[index]: del self.stat_active[index] if self.stat_new.has_key(index): del self.stat_new[index] def get_piece(self, index, begin, length): if not self.have[index]: return None data = None if not self.waschecked[index]: data = self.read_raw(self.places[index], 0, self._piecelen(index)) if data is None: return None if sha(data[:]).digest() != self.hashes[index]: self.failed('told file complete on start-up, but piece failed hash check') return None self.waschecked[index] = True if length == -1 and begin == 0: return data # optimization if length == -1: if begin > self._piecelen(index): return None length = self._piecelen(index)-begin if begin == 0: return self.read_raw(self.places[index], 0, length) elif begin + length > self._piecelen(index): return None if data is not None: s = data[begin:begin+length] data.release() return s data = self.read_raw(self.places[index], begin, length) if data is None: return None s = data.getarray() data.release() return s def read_raw(self, piece, begin, length, flush_first = False): try: return self.storage.read(self.piece_size * piece + begin, length, flush_first) except IOError, e: self.failed('IO Error: ' + str(e)) return None def set_file_readonly(self, n): try: self.storage.set_readonly(n) except IOError, e: self.failed('IO Error: ' + str(e)) except OSError, e: self.failed('OS Error: ' + str(e)) def has_data(self, index): return index not in self.holes and index not in self.blocked_holes def doublecheck_data(self, pieces_to_check): if not self.double_check: return sources = [] for p,v in self.places.items(): if pieces_to_check.has_key(v): sources.append(p) assert len(sources) == len(pieces_to_check) sources.sort() for index in sources: if self.have[index]: piece = self.read_raw(self.places[index],0,self._piecelen(index), flush_first = True ) if piece is None: return False if sha(piece[:]).digest() != self.hashes[index]: self.failed('download corrupted; please restart and resume') return False piece.release() return True def reblock(self, new_blocked): # assume downloads have already been canceled and chunks made inactive for i in xrange(len(new_blocked)): if new_blocked[i] and not self.blocked[i]: length = self._piecelen(i) self.amount_desired -= length if self.have[i]: self.amount_obtained -= length continue if self.inactive_requests[i] == 1: self.amount_inactive -= length continue inactive = 0 for nb, nl in self.inactive_requests[i]: inactive += nl self.amount_inactive -= inactive self.amount_obtained -= length - inactive if self.blocked[i] and not new_blocked[i]: length = self._piecelen(i) self.amount_desired += length if self.have[i]: self.amount_obtained += length continue if self.inactive_requests[i] == 1: self.amount_inactive += length continue inactive = 0 for nb, nl in self.inactive_requests[i]: inactive += nl self.amount_inactive += inactive self.amount_obtained += length - inactive self.blocked = new_blocked self.blocked_movein = Olist() self.blocked_moveout = Olist() for p,v in self.places.items(): if p != v: if self.blocked[p] and not self.blocked[v]: self.blocked_movein.add(p) elif self.blocked[v] and not self.blocked[p]: self.blocked_moveout.add(p) self.holes.extend(self.blocked_holes) # reset holes list self.holes.sort() self.blocked_holes = [] ''' Pickled data format: d['pieces'] = either a string containing a bitfield of complete pieces, or the numeric value "1" signifying a seed. If it is a seed, d['places'] and d['partials'] should be empty and needn't even exist. d['partials'] = [ piece, [ offset, length... ]... ] a list of partial data that had been previously downloaded, plus the given offsets. Adjacent partials are merged so as to save space, and so that if the request size changes then new requests can be calculated more efficiently. d['places'] = [ piece, place, {,piece, place ...} ] the piece index, and the place it's stored. If d['pieces'] specifies a complete piece or d['partials'] specifies a set of partials for a piece which has no entry in d['places'], it can be assumed that place[index] = index. A place specified with no corresponding data in d['pieces'] or d['partials'] indicates allocated space with no valid data, and is reserved so it doesn't need to be hash-checked. ''' def pickle(self): if self.have.complete(): return {'pieces': 1} pieces = Bitfield(len(self.hashes)) places = [] partials = [] for p in xrange(len(self.hashes)): if self.blocked[p] or not self.places.has_key(p): continue h = self.have[p] pieces[p] = h pp = self.dirty.get(p) if not h and not pp: # no data places.extend([self.places[p],self.places[p]]) elif self.places[p] != p: places.extend([p, self.places[p]]) if h or not pp: continue pp.sort() r = [] while len(pp) > 1: if pp[0][0]+pp[0][1] == pp[1][0]: pp[0] = list(pp[0]) pp[0][1] += pp[1][1] del pp[1] else: r.extend(pp[0]) del pp[0] r.extend(pp[0]) partials.extend([p,r]) return {'pieces': pieces.tostring(), 'places': places, 'partials': partials} def unpickle(self, data, valid_places): got = {} places = {} dirty = {} download_history = {} stat_active = {} stat_numfound = self.stat_numfound amount_obtained = self.amount_obtained amount_inactive = self.amount_inactive amount_left = self.amount_left inactive_requests = [x for x in self.inactive_requests] restored_partials = [] try: if data['pieces'] == 1: # a seed assert not data.get('places',None) assert not data.get('partials',None) have = Bitfield(len(self.hashes)) for i in xrange(len(self.hashes)): have[i] = True assert have.complete() _places = [] _partials = [] else: have = Bitfield(len(self.hashes), data['pieces']) _places = data['places'] assert len(_places) % 2 == 0 _places = [_places[x:x+2] for x in xrange(0,len(_places),2)] _partials = data['partials'] assert len(_partials) % 2 == 0 _partials = [_partials[x:x+2] for x in xrange(0,len(_partials),2)] for index, place in _places: if place not in valid_places: continue assert not got.has_key(index) assert not got.has_key(place) places[index] = place got[index] = 1 got[place] = 1 for index in xrange(len(self.hashes)): if have[index]: if not places.has_key(index): if index not in valid_places: have[index] = False continue assert not got.has_key(index) places[index] = index got[index] = 1 length = self._piecelen(index) amount_obtained += length stat_numfound += 1 amount_inactive -= length amount_left -= length inactive_requests[index] = None for index, plist in _partials: assert not dirty.has_key(index) assert not have[index] if not places.has_key(index): if index not in valid_places: continue assert not got.has_key(index) places[index] = index got[index] = 1 assert len(plist) % 2 == 0 plist = [plist[x:x+2] for x in xrange(0,len(plist),2)] dirty[index] = plist stat_active[index] = 1 download_history[index] = {} # invert given partials length = self._piecelen(index) l = [] if plist[0][0] > 0: l.append((0,plist[0][0])) for i in xrange(len(plist)-1): end = plist[i][0]+plist[i][1] assert not end > plist[i+1][0] l.append((end,plist[i+1][0]-end)) end = plist[-1][0]+plist[-1][1] assert not end > length if end < length: l.append((end,length-end)) # split them to request_size ll = [] amount_obtained += length amount_inactive -= length for nb, nl in l: while nl > 0: r = min(nl,self.request_size) ll.append((nb,r)) amount_inactive += r amount_obtained -= r nb += self.request_size nl -= self.request_size inactive_requests[index] = ll restored_partials.append(index) assert amount_obtained + amount_inactive == self.amount_desired except:# print_exc() return [] # invalid data, discard everything self.have = have self.places = places self.dirty = dirty self.download_history = download_history self.stat_active = stat_active self.stat_numfound = stat_numfound self.amount_obtained = amount_obtained self.amount_inactive = amount_inactive self.amount_left = amount_left self.inactive_requests = inactive_requests return restored_partials
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -