Skip to content

Commit

Permalink
organic query issue fix
Browse files Browse the repository at this point in the history
  • Loading branch information
acer-king committed Oct 14, 2024
1 parent 2801190 commit 2f9b35d
Showing 1 changed file with 89 additions and 54 deletions.
143 changes: 89 additions & 54 deletions organic.py
Original file line number Diff line number Diff line change
@@ -1,91 +1,126 @@
import bittensor as bt
import asyncio
import random
import traceback

from cortext.protocol import StreamPrompting, Bandwidth, IsAlive
from cortext.protocol import StreamPrompting
from cortext.dendrite import CortexDendrite

async def generate_prompts(num_prompts=100):
subjects = [
"artificial intelligence",
"climate change",
"space exploration",
"quantum computing",
"renewable energy",
"virtual reality",
"biotechnology",
"cybersecurity",
"autonomous vehicles",
"blockchain",
"3D printing",
"robotics",
"nanotechnology",
"gene editing",
"Internet of Things",
"augmented reality",
"machine learning",
"sustainable agriculture",
"smart cities",
"digital privacy",
]

prompt_types = [
"Explain the concept of",
"Discuss the potential impact of",
"Compare and contrast two approaches to",
"Outline the future prospects of",
"Describe the ethical implications of",
"Analyze the current state of",
"Propose a solution using",
"Evaluate the pros and cons of",
"Predict how {} will change in the next decade",
"Discuss the role of {} in solving global challenges",
"Explain how {} is transforming industry",
"Describe a day in the life with advanced {}",
"Outline the key challenges in developing {}",
"Discuss the intersection of {} and another field",
"Explain the historical development of",
]

prompts = set()
while len(prompts) < num_prompts:
subject = random.choice(subjects)
prompt_type = random.choice(prompt_types)
prompt = prompt_type.format(subject)
prompts.add(prompt)

async def query_miner(dendrite: CortexDendrite, axon_to_use, synapse, timeout, streaming):
return list(prompts)


async def query_miner(dendrite: CortexDendrite, axon_to_use, synapse, timeout=60, streaming=True):
try:
# print(f"calling vali axon {axon_to_use} to miner uid {synapse.uid} for query {synapse.messages}")
if streaming is False:
responses = await dendrite.call(
target_axon=axon_to_use,
synapse=synapse,
timeout=timeout,
deserialize=False
)
return responses
else:
responses = dendrite.call_stream(
target_axon=axon_to_use,
synapse=synapse,
timeout=timeout,
)
return await handle_response(responses)
print(f"calling vali axon {axon_to_use} to miner uid {synapse.uid} for query {synapse.messages}")
resp = dendrite.call_stream(
target_axon=axon_to_use,
synapse=synapse,
timeout=timeout
)
return await handle_response(resp)
except Exception as e:
print(f"Exception during query: {traceback.format_exc()}")
return None


async def handle_response(response):
async def handle_response(resp):
full_response = ""
try:
async for chunk in response:
async for chunk in resp:
if isinstance(chunk, str):
full_response += chunk
print(chunk, end='', flush=True)
else:
# print(f"\n\nFinal synapse: {chunk}\n")
pass
print(f"\n\nFinal synapse: {chunk}\n")
except Exception as e:
print(f"Error processing response for uid {e}")
return full_response


async def main():
print("synching metagraph, this takes way too long.........")
subtensor = bt.subtensor(network="test")
meta = subtensor.metagraph(netuid=196)
subtensor = bt.subtensor(network="finney")
meta = subtensor.metagraph(netuid=18)
print("metagraph synched!")

# This needs to be your validator wallet that is running your subnet 18 validator
wallet = bt.wallet(name="miner", hotkey="default")
wallet = bt.wallet(name="default", hotkey="default")
dendrite = CortexDendrite(wallet=wallet)
vali_uid = meta.hotkeys.index(wallet.hotkey.ss58_address)
axon_to_use = meta.axons[vali_uid]
print(f"axon to use: {axon_to_use}")

# This is the question to send your validator to send your miner.
prompt = "Give me a story about a cat"
messages = [{'role': 'user', 'content': prompt}]

# see options for providers/models here: https://github.com/Datura-ai/cortex.t/blob/34f0160213d26a829e9619e3df9441760a0da1ad/cortext/constants.py#L10
synapse = StreamPrompting(
messages=messages,
num_prompts = 10
prompts = await generate_prompts(num_prompts)
synapses = [StreamPrompting(
messages=[{"role": "user", "content": prompt}],
provider="OpenAI",
model="gpt-4o",
)
timeout = 60
streaming = True
# synapse = Bandwidth()
# timeout = 60
# streaming = False
# print("querying miner")
tasks = []
import time
start_time = time.time()

from copy import deepcopy
# for i in range(1):
# tasks.append(query_miner(dendrite, axon_to_use, synapse, timeout, streaming))
# print(time.time() - start_time)

results = await query_miner(dendrite, axon_to_use, synapse, timeout, streaming)
print(time.time() - start_time)
print(results)
# print("acerr", results[0])
model="gpt-4o"
) for prompt in prompts]

async def query_and_log(synapse):
return await query_miner(dendrite, axon_to_use, synapse)

responses = await asyncio.gather(*[query_and_log(synapse) for synapse in synapses])

import csv
with open('miner_responses.csv', 'w', newline='', encoding='utf-8') as file:
writer = csv.writer(file)
writer.writerow(['Prompt', 'Response'])
for prompt, response in zip(prompts, responses):
writer.writerow([prompt, response])

print("Responses saved to miner_responses.csv")


if __name__ == "__main__":
asyncio.run(main())

0 comments on commit 2f9b35d

Please sign in to comment.