• We just launched and are currently in beta. Join us as we build and grow the community.

Proxy Scraper (Scrapes proxies from proxyscrape.com)

RaidenTheRipper2021

Endgame Challenger
R Rep
0
0
0
Rep
0
R Vouches
0
0
0
Vouches
0
Posts
168
Likes
40
Bits
2 MONTHS
2 2 MONTHS OF SERVICE
LEVEL 1 200 XP
Proxy Scraper
Requirements:
1) Python 3
Downloads :
Source Code
Code:
Code:
import pandas as pd
import re
import urllib.request
print("Which PROXIES do you want?")
print("1. Http/Https \n2. Sock4 \n3. Sock5")
choice = input("Type 1,2 or 3>>>")
if choice=="1":
url ="https://api.proxyscrape.com/?request=getproxies&proxytype=http&timeout=10000&country=all&ssl=all&anonymity=all"
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
print("Scraping proxies from proxyscrape.com")
proxies_df = pd.DataFrame(re.findall(r'\\r\\n(.*?)\\r\\n',str(respData)))
print("Done")
proxies_df.to_csv('http.txt', header=False, index=False, mode='w')
if choice=="2":
url ="https://api.proxyscrape.com/?request=getproxies&proxytype=socks4&timeout=10000&country=all&ssl=all&anonymity=all"
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
print("Scraping proxies from proxyscrape.com")
proxies_df = pd.DataFrame(re.findall(r'\\r\\n(.*?)\\r\\n',str(respData)))
print("Done")
proxies_df.to_csv('socks4.txt', header=False, index=False, mode='w')
if choice =="3":
url ="https://api.proxyscrape.com/?request=getproxies&proxytype=socks5&timeout=10000&country=all&ssl=all&anonymity=all"
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib.request.Request(url, headers = headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
print("Scraping proxies from proxyscrape.com")
proxies_df = pd.DataFrame(re.findall(r'\\r\\n(.*?)\\r\\n',str(respData)))
print("Done")
proxies_df.to_csv('socks5.txt', header=False, index=False, mode='w')
HIT LIKE and Happy Cracking <3
 

452,292

323,341

323,350

Top