📄 download_bt1.py
字号:
# Written by Bram Cohen
# see LICENSE.txt for license information
from zurllib import urlopen
from urlparse import urlparse
from BT1.btformats import check_message
from BT1.Choker import Choker
from BT1.Storage import Storage
from BT1.StorageWrapper import StorageWrapper
from BT1.FileSelector import FileSelector
from BT1.Uploader import Upload
from BT1.Downloader import Downloader
from BT1.HTTPDownloader import HTTPDownloader
from BT1.Connecter import Connecter
from RateLimiter import RateLimiter
from BT1.Encrypter import Encoder
from RawServer import RawServer, autodetect_socket_style
from BT1.Rerequester import Rerequester
from BT1.DownloaderFeedback import DownloaderFeedback
from RateMeasure import RateMeasure
from CurrentRateMeasure import Measure
from BT1.PiecePicker import PiecePicker
from BT1.Statistics import Statistics
from ConfigDir import ConfigDir
from bencode import bencode, bdecode
from natpunch import UPnP_test
from sha import sha
from os import path, makedirs, listdir
from parseargs import parseargs, formatDefinitions, defaultargs
from socket import error as socketerror
from random import seed
from threading import Event
from clock import clock
from __init__ import createPeerID
try:
True
except:
True = 1
False = 0
defaults = [
('max_uploads', 7,
"the maximum number of uploads to allow at once."),
('keepalive_interval', 120.0,
'number of seconds to pause between sending keepalives'),
('download_slice_size', 2 ** 14,
"How many bytes to query for per request."),
('upload_unit_size', 1460,
"when limiting upload rate, how many bytes to send at a time"),
('request_backlog', 10,
"maximum number of requests to keep in a single pipe at once."),
('max_message_length', 2 ** 23,
"maximum length prefix encoding you'll accept over the wire - larger values get the connection dropped."),
('ip', '',
"ip to report you have to the tracker."),
('minport', 10000, 'minimum port to listen on, counts up if unavailable'),
('maxport', 60000, 'maximum port to listen on'),
('random_port', 1, 'whether to choose randomly inside the port range ' +
'instead of counting up linearly'),
('responsefile', '',
'file the server response was stored in, alternative to url'),
('url', '',
'url to get file from, alternative to responsefile'),
('selector_enabled', 1,
'whether to enable the file selector and fast resume function'),
('expire_cache_data', 10,
'the number of days after which you wish to expire old cache data ' +
'(0 = disabled)'),
('priority', '',
'a list of file priorities separated by commas, must be one per file, ' +
'0 = highest, 1 = normal, 2 = lowest, -1 = download disabled'),
('saveas', '',
'local file name to save the file as, null indicates query user'),
('timeout', 300.0,
'time to wait between closing sockets which nothing has been received on'),
('timeout_check_interval', 60.0,
'time to wait between checking if any connections have timed out'),
('max_slice_length', 2 ** 17,
"maximum length slice to send to peers, larger requests are ignored"),
('max_rate_period', 20.0,
"maximum amount of time to guess the current rate estimate represents"),
('bind', '',
'comma-separated list of ips/hostnames to bind to locally'),
# ('ipv6_enabled', autodetect_ipv6(),
('ipv6_enabled', 0,
'allow the client to connect to peers via IPv6'),
('ipv6_binds_v4', autodetect_socket_style(),
"set if an IPv6 server socket won't also field IPv4 connections"),
('upnp_nat_access', 1,
'attempt to autoconfigure a UPnP router to forward a server port ' +
'(0 = disabled, 1 = mode 1 [fast], 2 = mode 2 [slow])'),
('upload_rate_fudge', 5.0,
'time equivalent of writing to kernel-level TCP buffer, for rate adjustment'),
('tcp_ack_fudge', 0.03,
'how much TCP ACK download overhead to add to upload rate calculations ' +
'(0 = disabled)'),
('display_interval', .5,
'time between updates of displayed information'),
('rerequest_interval', 5 * 60,
'time to wait between requesting more peers'),
('min_peers', 20,
'minimum number of peers to not do rerequesting'),
('http_timeout', 60,
'number of seconds to wait before assuming that an http connection has timed out'),
('max_initiate', 40,
'number of peers at which to stop initiating new connections'),
('check_hashes', 1,
'whether to check hashes on disk'),
('max_upload_rate', 0,
'maximum kB/s to upload at (0 = no limit, -1 = automatic)'),
('max_download_rate', 0,
'maximum kB/s to download at (0 = no limit)'),
('alloc_type', 'normal',
'allocation type (may be normal, background, pre-allocate or sparse)'),
('alloc_rate', 2.0,
'rate (in MiB/s) to allocate space at using background allocation'),
('buffer_reads', 1,
'whether to buffer disk reads'),
('write_buffer_size', 4,
'the maximum amount of space to use for buffering disk writes ' +
'(in megabytes, 0 = disabled)'),
('breakup_seed_bitfield', 1,
'sends an incomplete bitfield and then fills with have messages, '
'in order to get around stupid ISP manipulation'),
('snub_time', 30.0,
"seconds to wait for data to come in over a connection before assuming it's semi-permanently choked"),
('spew', 0,
"whether to display diagnostic info to stdout"),
('rarest_first_cutoff', 2,
"number of downloads at which to switch from random to rarest first"),
('rarest_first_priority_cutoff', 5,
'the number of peers which need to have a piece before other partials take priority over rarest first'),
('min_uploads', 4,
"the number of uploads to fill out to with extra optimistic unchokes"),
('max_files_open', 50,
'the maximum number of files to keep open at a time, 0 means no limit'),
('round_robin_period', 30,
"the number of seconds between the client's switching upload targets"),
('super_seeder', 0,
"whether to use special upload-efficiency-maximizing routines (only for dedicated seeds)"),
('security', 1,
"whether to enable extra security features intended to prevent abuse"),
('max_connections', 0,
"the absolute maximum number of peers to connect with (0 = no limit)"),
('auto_kick', 1,
"whether to allow the client to automatically kick/ban peers that send bad data"),
('double_check', 1,
"whether to double-check data being written to the disk for errors (may increase CPU load)"),
('triple_check', 0,
"whether to thoroughly check data being written to the disk (may slow disk access)"),
('lock_files', 1,
"whether to lock files the client is working with"),
('lock_while_reading', 0,
"whether to lock access to files being read"),
('auto_flush', 0,
"minutes between automatic flushes to disk (0 = disabled)"),
]
argslistheader = 'Arguments are:\n\n'
def _failfunc(x):
print x
# old-style downloader
def download(params, filefunc, statusfunc, finfunc, errorfunc, doneflag, cols,
pathFunc = None, presets = {}, exchandler = None,
failed = _failfunc, paramfunc = None):
try:
config = parse_params(params, presets)
except ValueError, e:
failed('error: ' + str(e) + '\nrun with no args for parameter explanations')
return
if not config:
errorfunc(get_usage())
return
myid = createPeerID()
seed(myid)
rawserver = RawServer(doneflag, config['timeout_check_interval'],
config['timeout'], ipv6_enable = config['ipv6_enabled'],
failfunc = failed, errorfunc = exchandler)
upnp_type = UPnP_test(config['upnp_nat_access'])
try:
listen_port = rawserver.find_and_bind(config['minport'], config['maxport'],
config['bind'], ipv6_socket_style = config['ipv6_binds_v4'],
upnp = upnp_type, randomizer = config['random_port'])
except socketerror, e:
failed("Couldn't listen - " + str(e))
return
response = get_response(config['responsefile'], config['url'], failed)
if not response:
return
infohash = sha(bencode(response['info'])).digest()
d = BT1Download(statusfunc, finfunc, errorfunc, exchandler, doneflag,
config, response, infohash, myid, rawserver, listen_port)
if not d.saveAs(filefunc):
return
if pathFunc:
pathFunc(d.getFilename())
hashcheck = d.initFiles(old_style = True)
if not hashcheck:
return
if not hashcheck():
return
if not d.startEngine():
return
d.startRerequester()
d.autoStats()
statusfunc(activity = 'connecting to peers')
if paramfunc:
paramfunc({ 'max_upload_rate' : d.setUploadRate, # change_max_upload_rate(<int KiB/sec>)
'max_uploads': d.setConns, # change_max_uploads(<int max uploads>)
'listen_port' : listen_port, # int
'peer_id' : myid, # string
'info_hash' : infohash, # string
'start_connection' : d._startConnection, # start_connection((<string ip>, <int port>), <peer id>)
})
rawserver.listen_forever(d.getPortHandler())
d.shutdown()
def parse_params(params, presets = {}):
if not params:
return None
config, args = parseargs(params, defaults, 0, 1, presets = presets)
if args:
if config['responsefile'] or config['url']:
raise ValueError, 'must have responsefile or url as arg or parameter, not both'
if path.isfile(args[0]):
config['responsefile'] = args[0]
else:
try:
urlparse(args[0])
except:
raise ValueError, 'bad filename or url'
config['url'] = args[0]
elif (not config['responsefile']) == (not config['url']):
raise ValueError, 'need responsefile or url, must have one, cannot have both'
return config
def get_usage(defaults = defaults, cols = 100, presets = {}):
return (argslistheader + formatDefinitions(defaults, cols, presets))
def get_response(file, url, errorfunc):
try:
if file:
h = open(file, 'rb')
try:
line = h.read(10) # quick test to see if responsefile contains a dict
front, garbage = line.split(':', 1)
assert front[0] == 'd'
int(front[1:])
except:
errorfunc(file+' is not a valid responsefile')
return None
try:
h.seek(0)
except:
try:
h.close()
except:
pass
h = open(file, 'rb')
else:
try:
h = urlopen(url)
except:
errorfunc(url+' bad url')
return None
response = h.read()
except IOError, e:
⌨️ 快捷键说明
复制代码
Ctrl + C
搜索代码
Ctrl + F
全屏模式
F11
切换主题
Ctrl + Shift + D
显示快捷键
?
增大字号
Ctrl + =
减小字号
Ctrl + -