⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 retr.c

📁 wget (command line browser) source code
💻 C
📖 第 1 页 / 共 2 页
字号:
/* File retrieval.   Copyright (C) 1995, 1996, 1997, 1998, 2000, 2001 Free Software Foundation, Inc.This file is part of GNU Wget.GNU Wget is free software; you can redistribute it and/or modifyit under the terms of the GNU General Public License as published bythe Free Software Foundation; either version 2 of the License, or (atyour option) any later version.GNU Wget is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See theGNU General Public License for more details.You should have received a copy of the GNU General Public Licensealong with Wget; if not, write to the Free SoftwareFoundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.In addition, as a special exception, the Free Software Foundationgives permission to link the code of its release of Wget with theOpenSSL project's "OpenSSL" library (or with modified versions of itthat use the same license as the "OpenSSL" library), and distributethe linked executables.  You must obey the GNU General Public Licensein all respects for all of the code used other than "OpenSSL".  If youmodify this file, you may extend this exception to your version of thefile, but you are not obligated to do so.  If you do not wish to doso, delete this exception statement from your version.  */#include <config.h>#include <stdio.h>#include <stdlib.h>#include <sys/types.h>#ifdef HAVE_UNISTD_H# include <unistd.h>#endif /* HAVE_UNISTD_H */#include <errno.h>#ifdef HAVE_STRING_H# include <string.h>#else# include <strings.h>#endif /* HAVE_STRING_H */#include <assert.h>#include "wget.h"#include "utils.h"#include "retr.h"#include "progress.h"#include "url.h"#include "recur.h"#include "ftp.h"#include "host.h"#include "connect.h"#include "hash.h"#include "convert.h"#ifdef HAVE_SSL# include "gen_sslfunc.h"	/* for ssl_iread */#endif#ifndef errnoextern int errno;#endif/* See the comment in gethttp() why this is needed. */int global_download_count;/* Total size of downloaded files.  Used to enforce quota.  */LARGE_INT total_downloaded_bytes;static struct {  long chunk_bytes;  double chunk_start;  double sleep_adjust;} limit_data;static voidlimit_bandwidth_reset (void){  limit_data.chunk_bytes = 0;  limit_data.chunk_start = 0;}/* Limit the bandwidth by pausing the download for an amount of time.   BYTES is the number of bytes received from the network, and DELTA   is the number of milliseconds it took to receive them.  */static voidlimit_bandwidth (long bytes, double *dltime, struct wget_timer *timer){  double delta_t = *dltime - limit_data.chunk_start;  double expected;  limit_data.chunk_bytes += bytes;  /* Calculate the amount of time we expect downloading the chunk     should take.  If in reality it took less time, sleep to     compensate for the difference.  */  expected = 1000.0 * limit_data.chunk_bytes / opt.limit_rate;  if (expected > delta_t)    {      double slp = expected - delta_t + limit_data.sleep_adjust;      double t0, t1;      if (slp < 200)	{	  DEBUGP (("deferring a %.2f ms sleep (%ld/%.2f).\n",		   slp, limit_data.chunk_bytes, delta_t));	  return;	}      DEBUGP (("\nsleeping %.2f ms for %ld bytes, adjust %.2f ms\n",	       slp, limit_data.chunk_bytes, limit_data.sleep_adjust));      t0 = *dltime;      usleep ((unsigned long) (1000 * slp));      t1 = wtimer_elapsed (timer);      /* Due to scheduling, we probably slept slightly longer (or	 shorter) than desired.  Calculate the difference between the	 desired and the actual sleep, and adjust the next sleep by	 that amount.  */      limit_data.sleep_adjust = slp - (t1 - t0);      /* Since we've called wtimer_elapsed, we might as well update	 the caller's dltime. */      *dltime = t1;    }  limit_data.chunk_bytes = 0;  limit_data.chunk_start = *dltime;}#define MIN(i, j) ((i) <= (j) ? (i) : (j))/* Reads the contents of file descriptor FD, until it is closed, or a   read error occurs.  The data is read in 8K chunks, and stored to   stream fp, which should have been open for writing.  If BUF is   non-NULL and its file descriptor is equal to FD, flush RBUF first.   This function will *not* use the rbuf_* functions!   The EXPECTED argument is passed to show_progress() unchanged, but   otherwise ignored.   If opt.verbose is set, the progress is also shown.  RESTVAL   represents a value from which to start downloading (which will be   shown accordingly).  If RESTVAL is non-zero, the stream should have   been open for appending.   The function exits and returns codes of 0, -1 and -2 if the   connection was closed, there was a read error, or if it could not   write to the output stream, respectively.   IMPORTANT: The function flushes the contents of the buffer in   rbuf_flush() before actually reading from fd.  If you wish to read   from fd immediately, flush or discard the buffer.  */intget_contents (int fd, FILE *fp, long *len, long restval, long expected,	      struct rbuf *rbuf, int use_expected, double *elapsed){  int res = 0;  static char dlbuf[16384];  int dlbufsize = sizeof (dlbuf);  void *progress = NULL;  struct wget_timer *timer = wtimer_allocate ();  double dltime = 0;  *len = restval;  if (opt.verbose)    progress = progress_create (restval, expected);  if (rbuf && RBUF_FD (rbuf) == fd)    {      int sz = 0;      while ((res = rbuf_flush (rbuf, dlbuf, sizeof (dlbuf))) != 0)	{	  fwrite (dlbuf, 1, res, fp);	  *len += res;	  sz += res;	}      if (sz)	fflush (fp);      if (ferror (fp))	{	  res = -2;	  goto out;	}      if (progress)	progress_update (progress, sz, 0);    }  if (opt.limit_rate)    limit_bandwidth_reset ();  wtimer_reset (timer);  /* Use a smaller buffer for low requested bandwidths.  For example,     with --limit-rate=2k, it doesn't make sense to slurp in 16K of     data and then sleep for 8s.  With buffer size equal to the limit,     we never have to sleep for more than one second.  */  if (opt.limit_rate && opt.limit_rate < dlbufsize)    dlbufsize = opt.limit_rate;  /* Read from fd while there is available data.     Normally, if expected is 0, it means that it is not known how     much data is expected.  However, if use_expected is specified,     then expected being zero means exactly that.  */  while (!use_expected || (*len < expected))    {      int amount_to_read = (use_expected			    ? MIN (expected - *len, dlbufsize) : dlbufsize);#ifdef HAVE_SSL      if (rbuf->ssl!=NULL)	res = ssl_iread (rbuf->ssl, dlbuf, amount_to_read);      else#endif /* HAVE_SSL */	res = iread (fd, dlbuf, amount_to_read);      if (res <= 0)	break;      fwrite (dlbuf, 1, res, fp);      /* Always flush the contents of the network packet.  This should	 not hinder performance: fast downloads will be received in	 16K chunks (which stdio would write out anyway), and slow	 downloads won't be limited with disk performance.  */      fflush (fp);      if (ferror (fp))	{	  res = -2;	  goto out;	}      dltime = wtimer_elapsed (timer);      if (opt.limit_rate)	limit_bandwidth (res, &dltime, timer);      *len += res;      if (progress)	progress_update (progress, res, dltime);#ifdef WINDOWS      if (use_expected && expected > 0)	ws_percenttitle (100.0 * (double)(*len) / (double)expected);#endif    }  if (res < -1)    res = -1; out:  if (progress)    progress_finish (progress, dltime);  if (elapsed)    *elapsed = dltime;  wtimer_delete (timer);  return res;}/* Return a printed representation of the download rate, as   appropriate for the speed.  If PAD is non-zero, strings will be   padded to the width of 7 characters (xxxx.xx).  */char *retr_rate (long bytes, double msecs, int pad){  static char res[20];  static char *rate_names[] = {"B/s", "KB/s", "MB/s", "GB/s" };  int units = 0;  double dlrate = calc_rate (bytes, msecs, &units);  sprintf (res, pad ? "%7.2f %s" : "%.2f %s", dlrate, rate_names[units]);  return res;}/* Calculate the download rate and trim it as appropriate for the   speed.  Appropriate means that if rate is greater than 1K/s,   kilobytes are used, and if rate is greater than 1MB/s, megabytes   are used.   UNITS is zero for B/s, one for KB/s, two for MB/s, and three for   GB/s.  */doublecalc_rate (long bytes, double msecs, int *units){  double dlrate;  assert (msecs >= 0);  assert (bytes >= 0);  if (msecs == 0)    /* If elapsed time is exactly zero, it means we're under the       granularity of the timer.  This often happens on systems that       use time() for the timer.  */    msecs = wtimer_granularity ();  dlrate = (double)1000 * bytes / msecs;  if (dlrate < 1024.0)    *units = 0;  else if (dlrate < 1024.0 * 1024.0)    *units = 1, dlrate /= 1024.0;  else if (dlrate < 1024.0 * 1024.0 * 1024.0)    *units = 2, dlrate /= (1024.0 * 1024.0);  else    /* Maybe someone will need this, one day. */    *units = 3, dlrate /= (1024.0 * 1024.0 * 1024.0);  return dlrate;}/* Maximum number of allowed redirections.  20 was chosen as a   "reasonable" value, which is low enough to not cause havoc, yet   high enough to guarantee that normal retrievals will not be hurt by   the check.  */#define MAX_REDIRECTIONS 20#define SUSPEND_POST_DATA do {			\  post_data_suspended = 1;			\  saved_post_data = opt.post_data;		\  saved_post_file_name = opt.post_file_name;	\  opt.post_data = NULL;				\  opt.post_file_name = NULL;			\} while (0)#define RESTORE_POST_DATA do {				\  if (post_data_suspended)				\    {							\      opt.post_data = saved_post_data;			\      opt.post_file_name = saved_post_file_name;	\      post_data_suspended = 0;				\    }							\} while (0)static char *getproxy PARAMS ((struct url *));/* Retrieve the given URL.  Decides which loop to call -- HTTP, FTP,   FTP, proxy, etc.  *//* #### This function should be rewritten so it doesn't return from   multiple points. */uerr_tretrieve_url (const char *origurl, char **file, char **newloc,	      const char *refurl, int *dt){  uerr_t result;  char *url;  int location_changed, dummy;  char *mynewloc, *proxy;  struct url *u, *proxy_url;  int up_error_code;		/* url parse error code */  char *local_file;  int redirection_count = 0;  int post_data_suspended = 0;  char *saved_post_data = NULL;  char *saved_post_file_name = NULL;  /* If dt is NULL, use local storage.  */  if (!dt)    {      dt = &dummy;      dummy = 0;    }  url = xstrdup (origurl);  if (newloc)    *newloc = NULL;  if (file)    *file = NULL;  u = url_parse (url, &up_error_code);  if (!u)    {      logprintf (LOG_NOTQUIET, "%s: %s.\n", url, url_error (up_error_code));      xfree (url);      return URLERROR;    }

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -