123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261 |
- #!@PERL@
- #
- # getlinks.pl
- #
- # This script extracts all links from a HTML page, compares them to a pattern
- # entered on the command line and then downloads matching links into the
- # target dir (also specified on the command line).
- #
- # Written to use 'curl' for URL fetching, uses the source file names in the
- # target directory.
- #
- # Author: Daniel Stenberg <Daniel.Stenberg@sth.frontec.se>
- # Version: 0.1 Oct 7, 1998
- #
- # HISTORY
- #
- # 0.1 - Created now!
- #
- $in="";
- argv:
- if($ARGV[0] eq "-v" ) {
- $verbose = 1;
- shift @ARGV;
- goto argv;
- }
- if($ARGV[0] eq "-d" ) {
- $display = 1;
- shift @ARGV;
- goto argv;
- }
- elsif($ARGV[0] eq "-h" ) {
- $help = 1;
- shift @ARGV;
- goto argv;
- }
- $geturl = $ARGV[0];
- $getdir = $ARGV[1];
- $getregex = $ARGV[2];
- if(($geturl eq "") ||
- (($getdir eq "") && !$display) ||
- $help) {
- print "Usage: $0 [-hv] <full source URL> <target dir> [regex]\n",
- " Use a traling slash for directory URLs!\n",
- " Use \"quotes\" around the regex!\n",
- " -h This help text\n",
- " -d Display matches only instead of downloading\n",
- " -v Verbose mode\n";
- exit;
- }
- # change to target directory:
- chdir $getdir ||
- die "couldn't cd into $getdir";
- # This is necessary from where I tried this:
- #$proxy =" -x 194.237.142.41:80";
- # linkchecker, URL will be appended to the right of this command line
- # this is the one using HEAD:
- $linkcheck = "curl -s -m 20 -I$proxy";
- # as a second attempt, this will be used. This is not using HEAD but will
- # get the whole frigging document!
- $linkcheckfull = "curl -s -m 20 -i$proxy";
- # htmlget, URL will be appended to the right of this command line
- $htmlget = "curl -s$proxy";
- # urlget, URL will be appended to the right of this command line
- # this stores the file with the remote file name in the current dir
- $urlget = "curl -O -s$proxy";
- # Parse the input URL and split it into the relevant parts:
- sub SplitURL {
- my $inurl = $_[0];
- if($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)\/(.*)/ ) {
- $getprotocol = $1;
- $getserver = $2;
- $getpath = $3;
- $getdocument = $4;
- }
- elsif ($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)/ ) {
- $getprotocol = $1;
- $getserver = $2;
- $getpath = $3;
- $getdocument = "";
- if($getpath !~ /\//) {
- $getpath ="";
- $getdocument = $3;
- }
- }
- elsif ($inurl=~ /^([^:]+):\/\/(.*)/ ) {
- $getprotocol = $1;
- $getserver = $2;
- $getpath = "";
- $getdocument = "";
- }
- else {
- print "Couldn't parse the specified URL, retry please!\n";
- exit;
- }
- }
- &SplitURL($geturl);
- #print "protocol = $getprotocol\n";
- #print "server = $getserver\n";
- #print "path = $getpath\n";
- #print "document = $getdocument\n";
- #exit;
- if(!$usestdin) {
- open(HEADGET, "$linkcheck $geturl|") ||
- die "Couldn't get web page for some reason";
- headget:
- while(<HEADGET>) {
- # print $_;
- if($_ =~ /HTTP\/.*3\d\d /) {
- $pagemoved=1;
- }
- elsif($pagemoved &&
- ($_ =~ /^Location: (.*)/)) {
- $geturl = $1;
- &SplitURL($geturl);
- $pagemoved++;
- last headget;
- }
- }
- close(HEADGET);
- if($pagemoved == 1) {
- print "Page is moved but we don't know where. Did you forget the ",
- "traling slash?\n";
- exit;
- }
- open(WEBGET, "$htmlget $geturl|") ||
- die "Couldn't get web page for some reason";
- while(<WEBGET>) {
- $line = $_;
- push @indoc, $line;
- $line=~ s/\n//g;
- $line=~ s/\r//g;
- # print $line."\n";
- $in=$in.$line;
- }
- close(WEBGET);
- }
- else {
- while(<STDIN>) {
- $line = $_;
- push @indoc, $line;
- $line=~ s/\n//g;
- $line=~ s/\r//g;
- $in=$in.$line;
- }
- }
- sub GetLinks {
- my $in = $_[0];
- my @result;
- getlinkloop:
- while($in =~ /[^<]*(<[^>]+>)/g ) {
- # we have a tag in $1
- $tag = $1;
- if($tag =~ /^<!--/) {
- # this is a comment tag, ignore it
- }
- else {
- if($tag =~ /(src|href|background|archive) *= *(\"[^\"]\"|[^ )>]*)/i) {
- $url=$2;
- if($url =~ /^\"(.*)\"$/) {
- # this was a "string" now $1 has removed the quotes:
- $url=$1;
- }
- $url =~ s/([^\#]*)\#.*/$1/g;
- if($url eq "") {
- # if the link was nothing than a #-link it may now have
- # been emptied completely so then we skip the rest
- next getlinkloop;
- }
- if($done{$url}) {
- # if this url already is done, do next
- $done{$url}++;
- next getlinkloop;
- }
- $done{$url} = 1; # this is "done"
- push @result, $url;
- if($tag =~ /< *([^ ]+)/) {
- # print "TAG: $1\n";
- $tagtype{$url}=$1;
- }
- }
- }
- }
- return @result;
- }
- @links = &GetLinks($in);
- linkloop:
- for(@links) {
- $url = $_;
- if($url =~ /^([^:]+):/) {
- $link = $url;
- }
- else {
- # this is an absolute link on the same server:
- if($url =~ /^\//) {
- # from root
- $link = "$getprotocol://$getserver$url";
- }
- else {
- # from the scanned page's dir
- $nyurl=$url;
- if(length($getpath) &&
- ($getpath !~ /\/$/) &&
- ($nyurl !~ /^\//)) {
- # lacks ending slash, add one to the document part:
- $nyurl = "/".$nyurl;
- }
- $link = "$getprotocol://$getserver/$getpath$nyurl";
- }
- }
- if($link =~ /$getregex/) {
- if($display) {
- print "$link\n";
- }
- else {
- if($verbose) {
- print "Gets $link\n";
- }
- print `$urlget $link`;
- }
- }
- }
|