Best Practices
BlockPI Network RPC service allow users to set their endpoint to be with different features, such as Archive mode.
When the archive mode is on for a spesific endpoint, the requests sent to this endpoint will be routed to archive nodes. And it typically takes a longer time to process due to the huge amount of data. In order to more accurately bill users for their requests, we will charge an additional 30% fee for those utilizing archive nodes while keeping the cost for regular requests low.
As a user, if you wish for your RUs to be utilized efficiently, you may choose to send only those requests which require archive data for processing to the designated endpoint. In this case, you would need to generate two endpoints - one for regular requests, and another with Archive mode enabled. And send requests based on conditions. Here is an example,
# Send requests for data that predates 128 blocks to the archive nodes
def request():
# generate two keys, one is with archive mode enabled
fullNodeUrl = "https://ethereum.blockpi.network/v1/rpc/<key-normal>"
archiveNodeUrl = "https://ethereum.blockpi.network/v1/rpc/<key-with-archive-mode-on>"
#target block number
blockNum = "0x10336aa"
# get the latest block number
payload = {"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id": 83}
headers = {"Content-Type": "application/json"}
latestNum = requests.post(fullNodeUrl,headers=headers, data=json.dumps(payload)).json()['result']
# Send the request to the desired endpoint.
traceBlockPayload = {"jsonrpc":"2.0","method":"trace_block","params":[blockNum],"id":1}
if int(latestNum,16) - int(blockNum,16) >= 128 :
resp = requests.post(archiveNodeUrl,headers=headers, data=json.dumps(traceBlockPayload))
print(resp.text)
else:
resp = requests.post(fullNodeUrl,headers=headers, data=json.dumps(traceBlockPayload))
print(resp.text)
import json
import requests
fullNodeUrl = "https://ethereum.blockpi.network/v1/rpc/<your-api-key>"
headers = {"Content-Type": "application/json"}
interval = 10
def get_logs(from_block_number, to_block_number):
logs = []
while from_block_number <= to_block_number:
end_block_number = min(to_block_number, from_block_number + interval)
payload = {
"jsonrpc": "2.0",
"id": 1,
"method": "eth_getLogs",
"params": [{
"fromBlock": hex(from_block_number),
"toBlock": hex(end_block_number)
}]
}
response = requests.post(fullNodeUrl, headers=headers, data=json.dumps(payload))
if response.status_code != 200:
raise Exception("Failed to retrieve logs for block range:", from_block_number, end_block_number)
result = response.json()["result"]
logs.extend(result)
from_block_number = end_block_number + 1
print(response.json())
return logs
def get_all_logs(from_block_number, to_block_number):
logs = []
current_block_number = from_block_number
while current_block_number <= to_block_number:
end_block_number = current_block_number + interval
logs_in_range = get_logs(current_block_number, end_block_number)
logs.extend(logs_in_range)
print("Processed block range:", current_block_number, "-", end_block_number, ", total logs:", len(logs_in_range))
current_block_number = end_block_number + 1
return logs
from_block_number = 10962850
to_block_number = 10962950
logs = get_all_logs(from_block_number, to_block_number)
print("Total logs:", len(logs))