GenAI API
A GenAI API is an interactive and user-friendly platform to selects models,configure parameters and observe the results, designed for experimenting with machine learning AI models. It provides a space where users, including data scientists, researchers, and developers, can explore, test, and Open Source machine learning models without the need for extensive coding or infrastructure setup.
List of Models
To get the list of Models, send a GET request to the GenAI Endpoint
https://api.e2enetworks.com/myaccount/api/v1/gpu/teams/{{team_id}}/projects/{{project_id}}/model_playground/?apikey={{tapi_key}}
import requests
url = "https://api.e2enetworks.com/myaccount/api/v1/gpu/teams/{{team_id}}/projects/{{project_id}}/model_playground/?apikey={{tapi_key}}"
payload={}
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("GET", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://api.e2enetworks.com/myaccount/api/v1/gpu/teams/{{team_id}}/projects/{{project_id}}/model_playground/?apikey={{tapi_key}}' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
Stable Diffussion response
To get the Stable Diffusion response with provided request, send a POST request to the Finetuning Endpoint
https://infer.e2enetworks.net/project/p-{{project_id}}/v1/stable-diffusion-2-1/infer?apikey={{tapi_key}}
import requests
import json
url = "https://infer.e2enetworks.net/project/p-{{project_id}}/v1/stable-diffusion-2-1/infer?apikey={{tapi_key}}"
payload = json.dumps({
"inputs": [
{
"name": "prompt",
"shape": [
1,
1
],
"datatype": "BYTES",
"data": [
"A photo of an astronaut riding a horse on mars"
]
},
{
"name": "height",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "width",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "num_inference_steps",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
50
]
},
{
"name": "guidance_scale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
7.5
]
},
{
"name": "guidance_rescale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
0.7
]
}
]
})
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://infer.e2enetworks.net/project/p-{{project_id}}/v1/stable-diffusion-2-1/infer?apikey={{tapi_key}}' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json' \
--data '{
"inputs": [
{
"name": "prompt",
"shape": [
1,
1
],
"datatype": "BYTES",
"data": [
"A photo of an astronaut riding a horse on mars"
]
},
{
"name": "height",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "width",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "num_inference_steps",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
50
]
},
{
"name": "guidance_scale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
7.5
]
},
{
"name": "guidance_rescale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
0.7
]
}
]
}'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
{
"inputs": [
{
"name": "prompt",
"shape": [
1,
1
],
"datatype": "BYTES",
"data": [
"A photo of an astronaut riding a horse on mars"
]
},
{
"name": "height",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "width",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
768
]
},
{
"name": "num_inference_steps",
"shape": [
1,
1
],
"datatype": "UINT16",
"data": [
50
]
},
{
"name": "guidance_scale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
7.5
]
},
{
"name": "guidance_rescale",
"shape": [
1,
1
],
"datatype": "FP32",
"data": [
0.7
]
}
]
}
WhisperLarge V3 Response
To get the WhisperLarge V3 Response with provided request, send a POST request to the Finetuning Endpoint
https://infer.e2enetworks.net/project/p-{{project_id}}/v1/whisper-large-v3/infer?apikey={{tapi_key}}
import requests
import json
url = "https://infer.e2enetworks.net/project/p-{{project_id}}/v1/whisper-large-v3/infer?apikey={{tapi_key}}"
payload = json.dumps({
"inputs": [
{
"name": "input",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"{{path"
]
},
{
"name": "language",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"English"
]
},
{
"name": "task",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"transcribe"
]
},
{
"name": "max_new_tokens",
"shape": [
1
],
"datatype": "INT32",
"data": [
400
]
},
{
"name": "return_timestamps",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"none"
]
}
]
})
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://infer.e2enetworks.net/project/p-{{project_id}}/v1/whisper-large-v3/infer?apikey={{tapi_key}}' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json' \
--data '{
"inputs": [
{
"name": "input",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"{{path"
]
},
{
"name": "language",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"English"
]
},
{
"name": "task",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"transcribe"
]
},
{
"name": "max_new_tokens",
"shape": [
1
],
"datatype": "INT32",
"data": [
400
]
},
{
"name": "return_timestamps",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"none"
]
}
]
}'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
{
"inputs": [
{
"name": "input",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"/mnt/data/3005-1720514090-recording.wav"
]
},
{
"name": "language",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"English"
]
},
{
"name": "task",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"transcribe"
]
},
{
"name": "max_new_tokens",
"shape": [
1
],
"datatype": "INT32",
"data": [
400
]
},
{
"name": "return_timestamps",
"shape": [
1
],
"datatype": "BYTES",
"data": [
"none"
]
}
]
}
Llama2 Response
To get the Llama2 Response with provided request, send a POST request to the Finetuning Endpoint
https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions
import requests
import json
url = "https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions"
payload = json.dumps({
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": True,
"model": "llama-2-13b-chat",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
})
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json' \
--data '{
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": true,
"model": "llama-2-13b-chat",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
}'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
{
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": true,
"model": "llama-2-13b-chat",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
}
Llama3 Response
To get the Llama3 Response with provided request, send a POST request to the Finetuning Endpoint
https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions
import requests
import json
url = "https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions"
payload = json.dumps({
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": True,
"model": "llama-3-8b-instruct",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
})
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/chat/completions' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json' \
--data '{
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": true,
"model": "llama-3-8b-instruct",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
}'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
{
"temperature": 0.5,
"max_tokens": 1024,
"top_p": 1,
"frequency_penalty": 0,
"seed": 999,
"presence_penalty": 1,
"stream": true,
"model": "llama-3-8b-instruct",
"messages": [
{
"role": "user",
"content": "Can you write a poem about open source machine learning?"
}
]
}
Vector Embeddings Response
To get the Vector Embeddings Response with provided request, send a POST request to the Finetuning Endpoint
https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/embeddings
import requests
import json
url = "https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/embeddings"
payload = "{\n \"model\": \"e5-mistral-7b-instruct\",\n \"input\": \"Generate your text embeddings here\",\n \"encoding_format\": \"float\",\n \"dimensions\": 4096 //only dimensions == 4096 is supported\n}"
headers = {
'Authorization': 'Bearer {{Token}}',
'Content-Type': 'application/json',
}
response = requests.request("POST", url, headers=headers, data=payload)
print(response.text)
curl --location -g 'https://infer.e2enetworks.net/project/p-{{project_id}}/genai/v1/embeddings' \
--header 'Authorization: Bearer {{Token}}' \
--header 'Content-Type: application/json' \
--data '{
"model": "e5-mistral-7b-instruct",
"input": "Generate your text embeddings here",
"encoding_format": "float",
"dimensions": 4096 //only dimensions == 4096 is supported
}'
Content-Type: application/json
Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCIgOiAi...
content-type: application/json; charset=utf-8
status: 202 Accepted
ratelimit-limit: 1200
ratelimit-remaining: 965
ratelimit-reset: 1415984218
{
"model": "e5-mistral-7b-instruct",
"input": "Generate your text embeddings here",
"encoding_format": "float",
"dimensions": 4096 //only dimensions == 4096 is supported
}