import requests
import urllib.parse

token = "YOUR_TOKEN"
targetUrl = "https://httpbin.co/ip"

encoded_url = urllib.parse.quote(targetUrl)
url = "http://api.scrape.do?token={}&url={}".format(token, encoded_url)
response = requests.request("GET", url)

print(response.text)
import requests
import urllib3
# Disable warnings for self-signed certificate
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) 

url = "https://httpbin.co/ip"
token = "YOUR_TOKEN"
proxyModeUrl = "http://{}:[email protected]:8080".format(token)

proxies = {
    "http": proxyModeUrl,
    "https": proxyModeUrl,
}

response = requests.request("GET", url, proxies=proxies, verify=False)

print(response.text)
curl "http://api.scrape.do?token=YOUR_TOKEN&url=https://httpbin.co/ip"

curl -k -x "https://YOUR_TOKEN:render=true&[email protected]:8080" 'https://httpbin.co/anything' -v
var request = require('request');
var token = "YOUR_TOKEN";
var targetUrl = "https://httpbin.co/ip"
var encodedUrl = encodeURIComponent(targetUrl);

var options = {
    'method': 'GET',
    'url': `https://api.scrape.do?token=${token}&url=${encodedUrl}`,
    'headers': {}
};

request(options, function (error, response) {
    if (error) {
        console.log(error);
    } else {
        console.log(response.body);
    }
});
var request = require('request');

var token = "YOUR_TOKEN";
var targetUrl = "https://httpbin.co/ip"

var proxyUrl = `http://${token}:[email protected]:8080`;

request({
    'url': targetUrl,
    'method': "GET",
    'proxy': proxyUrl,
    'rejectUnauthorized': false, // ignore self-signed certificate
}, function (error, response, body) {
    if (!error) {
        console.log(body);
    }else {
        console.log(error);
    }
})
<?php
$curl = curl_init();

curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);
curl_setopt($curl, CURLOPT_HEADER, false);

$data = [
   "url" => "https://httpbin.co/ip",
   "token" => "YOUR_TOKEN",
];

curl_setopt($curl, CURLOPT_CUSTOMREQUEST, 'GET');
curl_setopt($curl, CURLOPT_URL, "https://api.scrape.do?".http_build_query($data));
curl_setopt($curl, CURLOPT_HTTPHEADER, array(
    "Accept: */*",
));

$response = curl_exec($curl);
curl_close($curl);
echo $response;
?>
<?php
$curl = curl_init();

curl_setopt($curl, CURLOPT_RETURNTRANSFER, true);
curl_setopt($curl, CURLOPT_HEADER, false);
curl_setopt($curl, CURLOPT_SSL_VERIFYHOST, 0);
curl_setopt($curl, CURLOPT_SSL_VERIFYPEER, 0);

$url = "https://httpbin.co/ip";
$token = "YOUR_TOKEN";

$proxy = sprintf("http://%s:[email protected]:8080", $token);

curl_setopt($curl, CURLOPT_URL, $url);
curl_setopt($curl, CURLOPT_CUSTOMREQUEST, 'GET');
curl_setopt($curl, CURLOPT_PROXY, $proxy);

curl_setopt($curl, CURLOPT_HTTPHEADER, array(
    "Accept: */*",
));

$response = curl_exec($curl);
curl_close($curl);
echo $response;
?>
OkHttpClient client = new OkHttpClient().newBuilder()
  .build();
MediaType mediaType = MediaType.parse("text/plain");
RequestBody body = RequestBody.create(mediaType, "");
String encoded_url = URLEncoder.encode("https://httpbin.co/anything", "UTF-8");

Request request = new Request.Builder()
  .url("https://api.scrape.do?token=YOUR_TOKEN&url=" + encoded_url +"")
  .method("GET", body)
  .build();
Response response = client.newCall(request).execute();
import java.net.URI;
import java.util.Base64;
import org.apache.hc.client5.http.fluent.Request;
import org.apache.hc.core5.http.HttpHost;

public class TestRequest {
    public static void main(final String... args) throws Exception {
        String url = "https://httpbin.co/anything";
        URI proxyURI = new URI("http://YOUR_TOKEN:@proxy.scrape.do:8080");
        String basicAuth = new String(
            Base64.getEncoder()
            .encode(
                proxyURI.getUserInfo().getBytes()
            ));
        String response = Request.get(url)
                .addHeader("Proxy-Authorization", "Basic " + basicAuth)
                .viaProxy(HttpHost.create(proxyURI))
                .execute().returnContent().asString();

        System.out.println(response);
    }
}
using System.Net;
internal class Program
{
    static void Main(string[] args)
    {
        string token = "YOUR_TOKEN";
        string url = "https://httpbin.co/ip";

        var client = new HttpClient();
        var requestURL = $"https://api.scrape.do?token={token}&url={WebUtility.UrlEncode(url)}";
        
        var request = new HttpRequestMessage(HttpMethod.Get, requestURL);

        var respponse = client.SendAsync(request).Result;
        var content = respponse.Content.ReadAsStringAsync().Result;

        Console.WriteLine(content);
    }
}
using System.Net;
internal class Program
{
    static void Main(string[] args)
    {
        string token = "YOUR_TOKEN";
        string url = "https://httpbin.co/ip";

        var proxy = new WebProxy
        {
            Address = new Uri("http://proxy.scrape.do:8080"),
            Credentials = new NetworkCredential(token, "customHeaders=false")
        };
        var request = new HttpRequestMessage(HttpMethod.Get, url);
        var handler = new HttpClientHandler
        {
            Proxy = proxy,
            UseProxy = true
        };
       
        handler.ServerCertificateCustomValidationCallback =
            (sender, cert, chain, sslPolicyErrors) => { return true; };

        var client = new HttpClient(handler);

        var respponse = client.SendAsync(request).Result;
        var content = respponse.Content.ReadAsStringAsync().Result;

        Console.WriteLine(content);
    }
}
Revolut Logo Zeo Logo Otelz Logo Tripadvisor Logo Expedia Logo Shopee Logo

Stable, Reliable, Lightning-Fast - a new way to collect data

Let Scrape.do do the heavy lifting and focus on what matters.

  • Integrate in less than 30 seconds
  • Cut corners managing proxies, headless browsers, CAPTCHAs
  • Boost system performance with less strain on RAM and CPU
  • Built to meet the needs of every industry, big or small
chrome

‘Headless Browsers

Access target pages just like a real user - no blocks, no restrictions

Render Pages Like a Real Browser

Automate Browser Interactions

Wait for Complete Data Loading

Set Up Custom Automations

chrome

‘95M+ Premium Proxies

Scrape any website using our huge proxy network - send a request and we'll handle the rest, including rotation for each request.

Residential Proxies

Mobile Proxies

Datacenter Proxies

Tired of handling proxies, headless browsers, and CAPTCHAs manually?

There's a simpler and better way to collect the data you need.

Rotating Proxies

Websites with tight restrictions are no match for our rotating proxies.

Geotargeting

USA, UK, Canada, Turkey, EU or more! Get the data you need from anywhere in the world with proxies across 150+ countries

Avoiding Blocks & Captcha

Got blocked? Our automated proxy rotator assigns you a new IP from a new location immediately.

Backconnect Proxy

Get assigned a different IP for each access request so you’re never blocked, let alone identified.

24/7 Expert Support

Need help scraping the web with Scrape.do? Reach out anytime and a developer will be there for you.

Simulate User Interactions

Click buttons, open popups, scroll pages - mimic real user behavior directly through your headless browser.

Unlimited Bandwidth

Forget data caps—scrape as much as you need without worrying about usage limits.

Callback / Webhook

Receive instant notifications when your data is processed—automate workflows with real-time data delivery.

99.9%

Uptime over the last 6 months.

5 mins.

Average support response time.

+5B

Requests every month.

+1000

Happy customers.

Customize Your Web Scraping API to Meet Your Market Research Needs

From request headers and cookies to geographic targeting and JavaScript rendering;
Control every detail to match your requirements.

  • Render single-page apps effortlessly - ReactJS, AngularJS, VueJS?
    - We've got you covered
  • Scrape web pages that require JavaScript render
    - only by passing a parameter!
  • Automatically locate an unblocked IP for your target website
    - without lifting a finger!
illustration

Hear It From Our Customers

  • We were using Scrape.do for our competitor tracking feature. As we need to reach various pages behind of different technologies, we need a super proxy that can handle all the request without hustle. In our company culture, delivering fast APIs is crucial. That is why we have tested similar solutions but thanks to scrape.do's friendly support and custom approach, we have reached the speed average like 800ms-900ms per each request and %99+ success rate which is amazing.


    Yigit K. Product Manager

  • We appreciate the security measures employed by your company, which work in the interest of all of us. Once you register, it's not difficult to figure out how to use the services to support our own research and development. Your support engineers are very helpful in helping me find a solution that works for us, and your product teams are always coming up with new features and products that add value for research and development. The latency isn't bad, and we hardly ever have problems with proxies failing during testing runs.


    Emre E. CEO

  • Scape.do firstly we did meeting and good understanding my business, and we defined I'm enterprise user :) We did some analyse on our side and Scrape.do gave powerfull end point to us. After sales we wanted to some customize feature and did. Thank you Scape.do!


    Ibrahim B. Head of Product

  • We are using the web scraping API of Scrape.do in one of the core modules of our SaaS project. The average response times of their API credits are far away better than the Scraper API. And they have top-notch customer service. I am suggesting their service.


    Hasan T. Co-Founder

Listen The Scraper Podcast

Frequently Asked Questions

Find answers to the most common questions about Scrape.do

Yes, you can! Simply head to your dashboard to cancel, and you won’t be charged again - unless, of course, you decide to come back!

Yes! Get 1,000 successful API credits with 5 concurrent request, completely free. Sign up and explore what’s possible with our data harvesting tools, no strings attached.

Though it’s rare, we do offer refunds within the first three days of purchase. Just make sure to review our refunds policy for details.

Not at all! Our free trial is truly free - no credit card required.

Absolutely not! Only successful requests use up API credits—failed requests won’t be deducted from your balance.

Yes! View your usage data directly on your dashboard page, or access detailed insights with our Statistics API.

You can force Scrape.do to use mobile and residential IP pool with using super proxy. It’s best proxy type for web scraping and data harvesting.

We’ve got you covered! Contact us at [email protected] for a customer service package tailored to your exact needs.

Absolutely! We offer a fully customizable scraping API to tailor to your needs. Reach out at [email protected] and let’s talk about what we can do together!

Our extensive documentationcovers every technical detail you might need - built to get you up and running without a hitch. But if you need more help, expert developers are available 24/7 at [email protected]

Need help?
Reach out directly!