Docs
Log in

Endpoint

PROXYsmartproxy.crawlbase.com:8012
# Username = your token. Password = blank.
# Supports both HTTP and HTTPS targets. Use HTTPS proxy mode for HTTPS targets.

Quickstart

Set Smart AI Proxy as the proxy in your HTTP client. That's the entire setup.

curl -x 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012' \
     -k 'https://httpbin.org/ip'
import requests

proxies = {
    'http':  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
    'https': 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
}
res = requests.get('https://httpbin.org/ip', proxies=proxies, verify=False)
print(res.text)
const { HttpsProxyAgent } = require('https-proxy-agent');

const agent = new HttpsProxyAgent(
  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012'
);

const res = await fetch('https://httpbin.org/ip', { agent });
console.log(await res.text());
require 'net/http'

uri = URI('https://httpbin.org/ip')
proxy = Net::HTTP::Proxy('smartproxy.crawlbase.com', 8012, 'YOUR_TOKEN', '')
http = proxy.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE
puts http.get(uri.request_uri).body
package main

import (
    "crypto/tls"
    "fmt"
    "io"
    "net/http"
    "net/url"
)

func main() {
    proxyURL, _ := url.Parse("http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012")
    client := &http.Client{Transport: &http.Transport{
        Proxy:           http.ProxyURL(proxyURL),
        TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
    }}
    res, _ := client.Get("https://httpbin.org/ip")
    body, _ := io.ReadAll(res.Body)
    fmt.Println(string(body))
}
Disable TLS verification

Smart AI Proxy intercepts TLS connections to add proxy headers. Your client will see Crawlbase's certificate instead of the target's, so set verify=False / InsecureSkipVerify: true / equivalent. The connection from Crawlbase to the target site is still verified.

POST requests

Smart AI Proxy forwards POST requests to the target like any other HTTP method. Set the proxy on your client and POST as you normally would - the proxy preserves your method, headers, and body. Examples below cover the two body shapes most clients use: form-encoded and JSON.

Form-encoded body

# HTTP proxy on :8012 (use https:// + :8013 for HTTPS proxy)
curl -X POST \
     -H 'Content-Type: application/x-www-form-urlencoded' \
     -F 'param=value' \
     -x 'http://[email protected]:8012' \
     -k 'https://httpbin.org/anything'
import requests
from urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(category=InsecureRequestWarning)

proxies = {
    'http':  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
    'https': 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
}
res = requests.post(
    'https://httpbin.org/anything',
    data={'param': 'value'},
    proxies=proxies,
    verify=False,
)
print(res.status_code, res.text)
const { HttpsProxyAgent } = require('https-proxy-agent');
const querystring = require('querystring');

const agent = new HttpsProxyAgent(
  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012'
);
const res = await fetch('https://httpbin.org/anything', {
  method: 'POST',
  headers: { 'Content-Type': 'application/x-www-form-urlencoded' },
  body: querystring.stringify({ param: 'value' }),
  agent,
});
console.log(res.status, await res.text());
require 'net/http'
require 'openssl'
require 'uri'

uri = URI('https://httpbin.org/anything')
proxy = Net::HTTP::Proxy('smartproxy.crawlbase.com', 8012, 'YOUR_TOKEN', '')
http = proxy.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE

req = Net::HTTP::Post.new(uri.request_uri)
req.set_form_data('param' => 'value')
res = http.request(req)
puts res.code, res.body
package main

import (
    "crypto/tls"
    "fmt"
    "io"
    "net/http"
    "net/url"
    "strings"
)

func main() {
    proxyURL, _ := url.Parse("http://[email protected]:8012")
    client := &http.Client{Transport: &http.Transport{
        Proxy:           http.ProxyURL(proxyURL),
        TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
    }}

    data := url.Values{}
    data.Set("param", "value")
    req, _ := http.NewRequest("POST",
        "https://httpbin.org/anything",
        strings.NewReader(data.Encode()))
    req.Header.Set("Content-Type", "application/x-www-form-urlencoded")

    res, _ := client.Do(req)
    defer res.Body.Close()
    body, _ := io.ReadAll(res.Body)
    fmt.Println(res.Status, string(body))
}

JSON body

curl -X POST \
     -H 'Content-Type: application/json' \
     --data '{"key1":"value1","key2":"value2"}' \
     -x 'http://[email protected]:8012' \
     -k 'https://httpbin.org/anything'
import requests
proxies = {
    'http':  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
    'https': 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012',
}
res = requests.post(
    'https://httpbin.org/anything',
    json={'key1': 'value1', 'key2': 'value2'},
    proxies=proxies,
    verify=False,
)
print(res.status_code, res.text)
const { HttpsProxyAgent } = require('https-proxy-agent');

const agent = new HttpsProxyAgent(
  'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012'
);
const res = await fetch('https://httpbin.org/anything', {
  method: 'POST',
  headers: { 'Content-Type': 'application/json' },
  body: JSON.stringify({ key1: 'value1', key2: 'value2' }),
  agent,
});
console.log(res.status, await res.text());
require 'net/http'
require 'json'
require 'openssl'
require 'uri'

uri = URI('https://httpbin.org/anything')
proxy = Net::HTTP::Proxy('smartproxy.crawlbase.com', 8012, 'YOUR_TOKEN', '')
http = proxy.new(uri.host, uri.port)
http.use_ssl = true
http.verify_mode = OpenSSL::SSL::VERIFY_NONE

req = Net::HTTP::Post.new(uri.request_uri,
                          'Content-Type' => 'application/json')
req.body = { key1: 'value1', key2: 'value2' }.to_json
puts http.request(req).body

Forwarding headers and cookies

Smart AI Proxy passes most of the headers and cookies on your outgoing request through to the target, so existing clients keep working without modification. Two notable behaviors:

  • Your User-Agent is forwarded as-is. Send a blank one and the proxy rotates a realistic UA for you.
  • Hop-by-hop and proxy-control headers (Host, Proxy-Authorization) are stripped - they describe the proxy itself, not the request being forwarded.
curl -H 'Accept-Language: en-US,en;q=0.9' \
     -H 'X-Custom-Header: My-Custom-Value' \
     -H 'User-Agent: MyCustomBrowser/1.0' \
     --cookie 'sid=abc123; cart=xyz789' \
     -x 'http://[email protected]:8012' \
     -k 'https://httpbin.org/anything'

The example above arrives at the target with all four custom headers and both cookies intact. To override proxy behavior (country, device, session, JS rendering, scrapers, etc.) use the CrawlbaseAPI-* headers instead - those are interpreted by the proxy and never reach the target.

Headless browser rendering

Smart AI Proxy is backed by the same headless browser fleet as the Crawling API. To execute JavaScript, capture client-rendered SPAs, or apply Crawling API features that require a real browser (screenshots, scroll, click-selectors, autoparse), pass CrawlbaseAPI-Parameters: javascript=true as a header on your outgoing request.

# Render with a headless browser, force a 2s wait, scroll to load lazy content
curl -H 'CrawlbaseAPI-Parameters: javascript=true&page_wait=2000&scroll=true' \
     -x 'http://[email protected]:8012' \
     -k 'https://spa.example.com/feed'

Use your JavaScript token (not the Normal token) when javascript=true is set - they're metered separately. The full set of browser-tier parameters (page_wait, scroll, css_click_selector, wait_for, screenshots) is reachable through CrawlbaseAPI-Parameters; see the JavaScript parameters reference for the canonical list.

When to use Smart AI Proxy vs the Crawling API

Smart AI Proxy and the Crawling API run on the same network and expose the same feature surface - JS rendering, anti-bot bypass, country routing, device emulation, sessions, scrapers, async + storage, all of it. The choice between them isn't about capability; it's about interface shape, which subscription you hold, and what concurrency tier that subscription provides.

Pick Smart AI Proxy when…Pick the Crawling API (REST) when…
You can't change client code (third-party tool, browser extension, Scrapy, an existing scraper)You're building from scratch and want explicit per-request control
You'd rather configure a proxy once than rewrite every request to a new endpointYou'd rather see the URL and parameters in plain GET form for logging / debugging
Your subscription is on the Smart AI Proxy plan, with its own thread / concurrency tierYour subscription is on the Crawling API plan, with its own monthly quota and concurrency budget
You want to drop Crawlbase in front of an existing pipeline with zero code changesYou want one of the SDKs to handle retries, async polling, and response parsing for you

All Crawling API parameters are reachable from Smart AI Proxy via the CrawlbaseAPI-Parameters header (see below). The capability surface is the same - pick the lane your subscription and integration shape favor.

Control headers

Pass custom headers prefixed with CrawlbaseAPI- on your outgoing request to control proxy behavior. The three single-purpose headers below are convenience shortcuts; the full Crawling API parameter set is reachable via CrawlbaseAPI-Parameters (documented after the table).

CrawlbaseAPI-Country
ISO 3166optional
Force a specific country: US, GB, DE, etc.
CrawlbaseAPI-Device
desktop | mobiledesktop
Emulate device class.
CrawlbaseAPI-Session-Id
stringoptional
Pin a session to the same exit IP. Useful for multi-step flows that need a stable identity. Sessions live for ~30 minutes.
CrawlbaseAPI-Parameters
query stringoptional
The full Crawling API parameter set passed as a single ampersand-joined string. Anything you'd append to a REST request - javascript=true, page_wait=2000, scroll=true, store=true, &scraper=amazon-product-details, autoparse=true: works here. Combine multiple with &: e.g. "javascript=true&country=US&store=true".

Using CrawlbaseAPI-Parameters

The single-purpose headers above (Country, Device, Session-Id) are shortcuts for the most common controls. Anything else from the Crawling API parameter set - JS rendering, scroll, click selectors, scrapers, async + webhooks + storage, get_cookies, get_headers - is reachable via the CrawlbaseAPI-Parameters header. The format is the same query-string you'd append to a REST call:

# JS-rendered SPA, store the result, force US geo
curl -x 'http://YOUR_JS_TOKEN:@smartproxy.crawlbase.com:8012' \
     -H 'CrawlbaseAPI-Parameters: javascript=true&country=US&store=true&page_wait=2000' \
     -k 'https://spa.example.com/feed'

# Apply a scraper - same as &scraper=… on the REST endpoint
curl -x 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012' \
     -H 'CrawlbaseAPI-Parameters: scraper=amazon-product-details' \
     -k 'https://www.amazon.com/dp/B0CHX2XFLN'

Conflict resolution: if you pass both a single-purpose header (e.g. CrawlbaseAPI-Country: GB) and the same field inside CrawlbaseAPI-Parameters, the single-purpose header wins. Pick one style per request to keep behavior predictable.

# Pin to a US session for a multi-step checkout flow
curl -x 'http://YOUR_TOKEN:@smartproxy.crawlbase.com:8012' \
     -H 'CrawlbaseAPI-Country: US' \
     -H 'CrawlbaseAPI-Session-Id: checkout-user-42' \
     -k 'https://shop.example.com/cart'

Errors

Smart AI Proxy returns standard HTTP responses. Status codes follow the same model as the Crawling API. Auth errors (401, 402) are returned by the proxy itself; site errors (404, 500, etc.) come from the target.