⭐ 欢迎来到虫虫下载站! | 📦 资源下载 📁 资源专辑 ℹ️ 关于我们
⭐ 虫虫下载站

📄 fetchftp.html

📁 用JAVA编写的,在做实验的时候留下来的,本来想删的,但是传上来,大家分享吧
💻 HTML
📖 第 1 页 / 共 4 页
字号:
<a name="318" href="#318">318</a>         } <strong>finally</strong> {<a name="319" href="#319">319</a>             recorder.close();<a name="320" href="#320">320</a>             close(socket);<a name="321" href="#321">321</a>         }<a name="322" href="#322">322</a> <a name="323" href="#323">323</a>         curi.setFetchStatus(200);<a name="324" href="#324">324</a>         <strong>if</strong> (dir) {<a name="325" href="#325">325</a>             extract(curi, recorder);<a name="326" href="#326">326</a>         }<a name="327" href="#327">327</a>         addParent(curi);<a name="328" href="#328">328</a>     }<a name="329" href="#329">329</a>     <a name="330" href="#330">330</a>     <a name="331" href="#331">331</a>     <em>/**<em>*</em></em><a name="332" href="#332">332</a> <em>     * Saves the given socket to the given recorder.</em><a name="333" href="#333">333</a> <em>     * </em><a name="334" href="#334">334</a> <em>     * @param curi      the curi that owns the recorder</em><a name="335" href="#335">335</a> <em>     * @param socket    the socket whose streams to save</em><a name="336" href="#336">336</a> <em>     * @param recorder  the recorder to save them to</em><a name="337" href="#337">337</a> <em>     * @throws IOException  if a network or file error occurs</em><a name="338" href="#338">338</a> <em>     * @throws InterruptedException  if the thread is interrupted</em><a name="339" href="#339">339</a> <em>     */</em><a name="340" href="#340">340</a>     <strong>private</strong> <strong>void</strong> saveToRecorder(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi,<a name="341" href="#341">341</a>             Socket socket, <a href="../../../../org/archive/util/HttpRecorder.html">HttpRecorder</a> recorder) <a name="342" href="#342">342</a>     throws IOException, InterruptedException {<a name="343" href="#343">343</a>         curi.setHttpRecorder(recorder);<a name="344" href="#344">344</a>         recorder.markContentBegin();<a name="345" href="#345">345</a>         recorder.inputWrap(socket.getInputStream());<a name="346" href="#346">346</a>         recorder.outputWrap(socket.getOutputStream());<a name="347" href="#347">347</a> <a name="348" href="#348">348</a>         <em class="comment">// Read the remote file/dir listing in its entirety.</em><a name="349" href="#349">349</a>         <strong>long</strong> softMax = 0;<a name="350" href="#350">350</a>         <strong>long</strong> hardMax = getMaxLength(curi);<a name="351" href="#351">351</a>         <strong>long</strong> timeout = (<strong>long</strong>)getTimeout(curi) * 1000;<a name="352" href="#352">352</a>         <strong>int</strong> maxRate = getFetchBandwidth(curi);<a name="353" href="#353">353</a>         <a href="../../../../org/archive/io/RecordingInputStream.html">RecordingInputStream</a> input = recorder.getRecordedInput();<a name="354" href="#354">354</a>         input.readFullyOrUntil(softMax, hardMax, timeout, maxRate);<a name="355" href="#355">355</a>     }<a name="356" href="#356">356</a>     <a name="357" href="#357">357</a>     <a name="358" href="#358">358</a>     <em>/**<em>*</em></em><a name="359" href="#359">359</a> <em>     * Extract FTP links in a directory listing.</em><a name="360" href="#360">360</a> <em>     * The listing must already be saved to the given recorder.</em><a name="361" href="#361">361</a> <em>     * </em><a name="362" href="#362">362</a> <em>     * @param curi      The curi to save extracted links to</em><a name="363" href="#363">363</a> <em>     * @param recorder  The recorder containing the directory listing</em><a name="364" href="#364">364</a> <em>     */</em><a name="365" href="#365">365</a>     <strong>private</strong> <strong>void</strong> extract(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi, <a href="../../../../org/archive/util/HttpRecorder.html">HttpRecorder</a> recorder) {<a name="366" href="#366">366</a>         <strong>if</strong> (!getExtractFromDirs(curi)) {<a name="367" href="#367">367</a>             <strong>return</strong>;<a name="368" href="#368">368</a>         }<a name="369" href="#369">369</a>         <a name="370" href="#370">370</a>         <a href="../../../../org/archive/io/ReplayCharSequence.html">ReplayCharSequence</a> seq = <strong>null</strong>;<a name="371" href="#371">371</a>         <strong>try</strong> {<a name="372" href="#372">372</a>             seq = recorder.getReplayCharSequence();<a name="373" href="#373">373</a>             extract(curi, seq);<a name="374" href="#374">374</a>         } <strong>catch</strong> (IOException e) {<a name="375" href="#375">375</a>             logger.log(Level.SEVERE, <span class="string">"IO error during extraction."</span>, e);<a name="376" href="#376">376</a>         } <strong>catch</strong> (RuntimeException e) {<a name="377" href="#377">377</a>             logger.log(Level.SEVERE, <span class="string">"IO error during extraction."</span>, e);<a name="378" href="#378">378</a>         } <strong>finally</strong> {<a name="379" href="#379">379</a>             close(seq);<a name="380" href="#380">380</a>         }<a name="381" href="#381">381</a>     }<a name="382" href="#382">382</a>     <a name="383" href="#383">383</a>     <a name="384" href="#384">384</a>     <em>/**<em>*</em></em><a name="385" href="#385">385</a> <em>     * Extracts FTP links in a directory listing.</em><a name="386" href="#386">386</a> <em>     * </em><a name="387" href="#387">387</a> <em>     * @param curi  The curi to save extracted links to</em><a name="388" href="#388">388</a> <em>     * @param dir   The directory listing to extract links from</em><a name="389" href="#389">389</a> <em>     * @throws URIException  if an extracted link is invalid</em><a name="390" href="#390">390</a> <em>     */</em><a name="391" href="#391">391</a>     <strong>private</strong> <strong>void</strong> extract(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi, <a href="../../../../org/archive/io/ReplayCharSequence.html">ReplayCharSequence</a> dir) {<a name="392" href="#392">392</a>         logger.log(Level.FINEST, <span class="string">"Extracting URIs from FTP directory."</span>);<a name="393" href="#393">393</a>         Matcher matcher = DIR.matcher(dir);<a name="394" href="#394">394</a>         <strong>while</strong> (matcher.find()) {<a name="395" href="#395">395</a>             String file = matcher.group(1);<a name="396" href="#396">396</a>             addExtracted(curi, file);<a name="397" href="#397">397</a>         }<a name="398" href="#398">398</a>     }<a name="399" href="#399">399</a> <a name="400" href="#400">400</a> <a name="401" href="#401">401</a>     <em>/**<em>*</em></em><a name="402" href="#402">402</a> <em>     * Adds an extracted filename to the curi.  A new URI will be formed</em><a name="403" href="#403">403</a> <em>     * by taking the given curi (which should represent the directory the</em><a name="404" href="#404">404</a> <em>     * file lives in) and appending the file.</em><a name="405" href="#405">405</a> <em>     * </em><a name="406" href="#406">406</a> <em>     * @param curi  the curi to store the discovered link in</em><a name="407" href="#407">407</a> <em>     * @param file  the filename of the discovered link</em><a name="408" href="#408">408</a> <em>     */</em><a name="409" href="#409">409</a>     <strong>private</strong> <strong>void</strong> addExtracted(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi, String file) {<a name="410" href="#410">410</a>         <strong>try</strong> {<a name="411" href="#411">411</a>             file = URLEncoder.encode(file, <span class="string">"UTF-8"</span>);<a name="412" href="#412">412</a>         } <strong>catch</strong> (UnsupportedEncodingException e) {<a name="413" href="#413">413</a>             <strong>throw</strong> <strong>new</strong> AssertionError(e);<a name="414" href="#414">414</a>         }<a name="415" href="#415">415</a>         <strong>if</strong> (logger.isLoggable(Level.FINEST)) {<a name="416" href="#416">416</a>             logger.log(Level.FINEST, <span class="string">"Found "</span> + file);<a name="417" href="#417">417</a>         }<a name="418" href="#418">418</a>         String base = curi.toString();<a name="419" href="#419">419</a>         <strong>if</strong> (base.endsWith(<span class="string">"/"</span>)) {<a name="420" href="#420">420</a>             base = base.substring(0, base.length() - 1);<a name="421" href="#421">421</a>         }<a name="422" href="#422">422</a>         <strong>try</strong> {<a name="423" href="#423">423</a>             <a href="../../../../org/archive/net/UURI.html">UURI</a> n = <strong>new</strong> <a href="../../../../org/archive/net/UURI.html">UURI</a>(base + <span class="string">"/"</span> + file, <strong>true</strong>);<a name="424" href="#424">424</a>             <a href="../../../../org/archive/crawler/extractor/Link.html">Link</a> link = <strong>new</strong> <a href="../../../../org/archive/crawler/extractor/Link.html">Link</a>(curi.getUURI(), n, NAVLINK_MISC, NAVLINK_HOP);<a name="425" href="#425">425</a>             curi.addOutLink(link);<a name="426" href="#426">426</a>         } <strong>catch</strong> (URIException e) {<a name="427" href="#427">427</a>             logger.log(Level.WARNING, <span class="string">"URI error during extraction."</span>, e);            <a name="428" href="#428">428</a>         }<a name="429" href="#429">429</a>     }<a name="430" href="#430">430</a>     <a name="431" href="#431">431</a> <a name="432" href="#432">432</a>     <em>/**<em>*</em></em><a name="433" href="#433">433</a> <em>     * Extracts the parent URI from the given curi, then adds that parent</em><a name="434" href="#434">434</a> <em>     * URI as a discovered link to the curi. </em><a name="435" href="#435">435</a> <em>     * </em><a name="436" href="#436">436</a> <em>     * &lt;p>If the &lt;code>extract-parent&lt;/code> attribute is false, then this</em><a name="437" href="#437">437</a> <em>     * method does nothing.  Also, if the path of the given curi is </em><a name="438" href="#438">438</a> <em>     * &lt;code>/&lt;/code>, then this method does nothing.</em><a name="439" href="#439">439</a> <em>     * </em><a name="440" href="#440">440</a> <em>     * &lt;p>Otherwise the parent is determined by eliminated the lowest part</em><a name="441" href="#441">441</a> <em>     * of the URI's path.  Eg, the parent of &lt;code>ftp://foo.com/one/two&lt;/code></em><a name="442" href="#442">442</a> <em>     * is &lt;code>ftp://foo.com/one&lt;/code>.</em><a name="443" href="#443">443</a> <em>     * </em><a name="444" href="#444">444</a> <em>     * @param curi  the curi whose parent to add</em><a name="445" href="#445">445</a> <em>     */</em><a name="446" href="#446">446</a>     <strong>private</strong> <strong>void</strong> addParent(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi) {<a name="447" href="#447">447</a>         <strong>if</strong> (!getExtractParent(curi)) {<a name="448" href="#448">448</a>             <strong>return</strong>;<a name="449" href="#449">449</a>         }<a name="450" href="#450">450</a>         <a href="../../../../org/archive/net/UURI.html">UURI</a> uuri = curi.getUURI();<a name="451" href="#451">451</a>         <strong>try</strong> {<a name="452" href="#452">452</a>             <strong>if</strong> (uuri.getPath().equals(<span class="string">"/"</span>)) {<a name="453" href="#453">453</a>                 <em class="comment">// There's no parent to add.</em><a name="454" href="#454">454</a>                 <strong>return</strong>;<a name="455" href="#455">455</a>             }<a name="456" href="#456">456</a>             String scheme = uuri.getScheme();<a name="457" href="#457">457</a>             String auth = uuri.getEscapedAuthority();<a name="458" href="#458">458</a>             String path = uuri.getEscapedCurrentHierPath();<a name="459" href="#459">459</a>             <a href="../../../../org/archive/net/UURI.html">UURI</a> parent = <strong>new</strong> <a href="../../../../org/archive/net/UURI.html">UURI</a>(scheme + <span class="string">"://"</span> + auth + path, false);<a name="460" href="#460">460</a> <a name="461" href="#461">461</a>             <a href="../../../../org/archive/crawler/extractor/Link.html">Link</a> link = <strong>new</strong> <a href="../../../../org/archive/crawler/extractor/Link.html">Link</a>(uuri, parent, NAVLINK_MISC, NAVLINK_HOP);<a name="462" href="#462">462</a>             curi.addOutLink(link);<a name="463" href="#463">463</a>         } <strong>catch</strong> (URIException e) {<a name="464" href="#464">464</a>             logger.log(Level.WARNING, <span class="string">"URI error during extraction."</span>, e);<a name="465" href="#465">465</a>         }<a name="466" href="#466">466</a>     }<a name="467" href="#467">467</a>     <a name="468" href="#468">468</a>     <a name="469" href="#469">469</a>     <em>/**<em>*</em></em><a name="470" href="#470">470</a> <em>     * Returns the &lt;code>extract.from.dirs&lt;/code> attribute for this</em><a name="471" href="#471">471</a> <em>     * &lt;code>FetchFTP&lt;/code> and the given curi.</em><a name="472" href="#472">472</a> <em>     * </em><a name="473" href="#473">473</a> <em>     * @param curi  the curi whose attribute to return</em><a name="474" href="#474">474</a> <em>     * @return  that curi's &lt;code>extract.from.dirs&lt;/code></em><a name="475" href="#475">475</a> <em>     */</em><a name="476" href="#476">476</a>     <strong>public</strong> <strong>boolean</strong> getExtractFromDirs(<a href="../../../../org/archive/crawler/datamodel/CrawlURI.html">CrawlURI</a> curi) {<a name="477" href="#477">477</a>         <strong>return</strong> (Boolean)get(curi, ATTR_EXTRACT, DEFAULT_EXTRACT);<a name="478" href="#478">478</a>     }<a name="479" href="#479">479</a>     <a name="480" href="#480">480</a>     

⌨️ 快捷键说明

复制代码 Ctrl + C
搜索代码 Ctrl + F
全屏模式 F11
切换主题 Ctrl + Shift + D
显示快捷键 ?
增大字号 Ctrl + =
减小字号 Ctrl + -