Skip to content
Open
Changes from 1 commit
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
2c10950
Atualizando o projeto para Jupiter: inclusão de server.py, worker.py …
ROBKEN2014 Feb 15, 2025
18ebab4
Atualiza projeto para Jupiter com server, worker, .gitignore e Procfile
ROBKEN2014 Feb 16, 2025
fd72b49
Update README.md
ROBKEN2014 Feb 16, 2025
54f6983
Remove py-ellipticcurve from requirements (not available on PyPI)
ROBKEN2014 Feb 16, 2025
2be2a3e
Atualiza worker.py com as novas modificações
ROBKEN2014 Feb 16, 2025
8a14362
Atualiza worker.py: Divide a tarefa entre os núcleos e adiciona barra…
ROBKEN2014 Feb 16, 2025
0d2495a
Reverte para estratégia original (Plutus) e remove delay no worker
ROBKEN2014 Feb 16, 2025
85f4964
Atualiza server.py: TASK_SIZE para 10000 e rotas atualizadas (Projeto…
ROBKEN2014 Feb 16, 2025
cef08eb
Reorganiza estrutura: separa pastas server e worker, move database pa…
ROBKEN2014 Feb 16, 2025
c146836
Reorganiza estrutura: separa pastas server e worker, move database pa…
ROBKEN2014 Feb 16, 2025
3d2688b
Adiciona requirements na pasta worker
ROBKEN2014 Feb 16, 2025
467ab68
Atualiza Procfile para usar arquivos da pasta server
ROBKEN2014 Feb 16, 2025
3714a14
Atualiza Procfile para usar arquivos da pasta server
ROBKEN2014 Feb 16, 2025
d024b2c
Atualiza Procfile para deployment via subtree (remove cd server &&)
ROBKEN2014 Feb 16, 2025
cf66f87
Atualiza Procfile para deployment via subtree (remove cd server &&)
ROBKEN2014 Feb 16, 2025
126150d
Atualiza worker.py para solicitar tarefas do servidor e reportar conc…
ROBKEN2014 Feb 16, 2025
ffdc94d
Atualiza worker.py: solicita tarefas do servidor, processa e reporta …
ROBKEN2014 Feb 16, 2025
2b6b9f9
Atualiza TASK_SIZE para 1 milhão candidatos por tarefa (Projeto Jupiter)
ROBKEN2014 Feb 16, 2025
ed707ba
Atualiza TASK_SIZE para 1 milhão candidatos por tarefa (Projeto Jupiter)
ROBKEN2014 Feb 16, 2025
c89da51
Atualiza worker.py: solicita tarefas do servidor, processa 1 milhão d…
ROBKEN2014 Feb 16, 2025
dff5eee
Atualiza worker.py: divide range de 1 milhão entre todos os núcleos, …
ROBKEN2014 Feb 17, 2025
511b003
atualiza o readme
ROBKEN2014 Feb 17, 2025
4ab6cca
Atualiza create_tables.py e requirements.txt para integração com Hero…
ROBKEN2014 Feb 18, 2025
e893c20
Merge branch 'main' of https://git.heroku.com/jupiter
ROBKEN2014 Feb 18, 2025
b9125d4
Atualiza .gitignore para ignorar pasta(s) desnecessária(s) para o dep…
ROBKEN2014 Feb 18, 2025
0ef4735
Remove arquivos desnecessários para o deploy
ROBKEN2014 Feb 18, 2025
990451a
Adiciona .slugignore para ignorar a pasta worker no deploy do Heroku
ROBKEN2014 Feb 18, 2025
3c43c6f
Adiciona atualizacao gitignore
ROBKEN2014 Feb 18, 2025
c5c90f3
Atualiza requirements.txt e demais alterações
ROBKEN2014 Feb 18, 2025
20b977a
Atualiza requirements.txt e demais alterações
ROBKEN2014 Feb 18, 2025
3795aac
Update server.py: Added SQLAlchemy integration and task distribution …
ROBKEN2014 Feb 18, 2025
6e986de
Define runtime.txt with python-3.12.2 to ensure compatibility with ps…
ROBKEN2014 Feb 18, 2025
2725086
Define runtime.txt with python-3.12.2 to fix psycopg2 compatibility i…
ROBKEN2014 Feb 19, 2025
91e207e
Move Procfile to root for Heroku deploy
ROBKEN2014 Feb 19, 2025
5031b6f
Move runtime.txt, requirements.txt e Procfile para a pasta server
ROBKEN2014 Feb 19, 2025
41cbf13
Create README.md
ROBKEN2014 Feb 19, 2025
3dd4b88
Update README.md
ROBKEN2014 Feb 19, 2025
7d7be8b
Update README.md
ROBKEN2014 Feb 19, 2025
aeea256
Update README.md
ROBKEN2014 Feb 19, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Atualiza worker.py: divide range de 1 milhão entre todos os núcleos, …
…barra de progresso azul a cada 30s, tratamento de KeyboardInterrupt
  • Loading branch information
ROBKEN2014 committed Feb 17, 2025
commit dff5eeeee3312e6c29399c530b665a718abd4821
49 changes: 22 additions & 27 deletions worker/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,6 @@
import datetime
import requests

# Exibe o aviso apenas no processo principal
if __name__ == '__main__':
if multiprocessing.current_process().name == "MainProcess":
print("AVISO: Projeto Jupiter - Este é um projeto para fins de aprendizado e educacional. Não use este software em produção!")

# Configurações
DATABASE = r'database/11_13_2022/' # Caminho para o banco de dados
SERVER_URL = "https://jupiter-55e84f25b2dc.herokuapp.com" # URL do servidor
Expand All @@ -24,6 +19,7 @@ def generate_private_key_with_task(candidate):
"""
Gera uma chave privada de 32 bytes onde os 27 primeiros são aleatórios e os 5 últimos
são os bytes do candidate (big-endian).

Total: 27 + 5 = 32 bytes (256 bits).
"""
suffix = candidate.to_bytes(5, 'big')
Expand Down Expand Up @@ -104,7 +100,7 @@ def load_database(substring_length):
print(f"Banco de dados carregado: {len(database)} entradas.")
return database

def process_subrange(sub_start, sub_end, processed_counter, substring, database, verbose):
def process_subrange(sub_start, sub_end, processed_counter, counter_lock, substring, database, verbose):
"""
Processa os candidatos no subrange:
- Gera chave privada, converte para chave pública e endereço.
Expand Down Expand Up @@ -134,23 +130,23 @@ def process_subrange(sub_start, sub_end, processed_counter, substring, database,
requests.post(SERVER_URL + "/found", json=data, timeout=5)
except Exception as e:
print("Erro ao reportar resultado:", e)
with processed_counter.get_lock():
with counter_lock:
processed_counter.value += 1
# Sem delay para máxima eficiência

def display_progress(processed_counter, total, step=5):
def display_progress(processed_counter, counter_lock, total, step=5):
"""
Exibe uma barra de progresso em cor azul que atualiza a cada 30 segundos.
Exibe uma barra de progresso em cor azul a cada 30 segundos.
"""
BLUE = "\033[94m"
RESET = "\033[0m"
print(BLUE + "Monitor de progresso iniciado..." + RESET)
while True:
time.sleep(30) # Atualiza a cada 30 segundos
with processed_counter.get_lock():
with counter_lock:
count = processed_counter.value
pct = int((count / total) * 100)
bar = "[" + "#" * (pct // 5) + " " * ((100 - pct) // 5) + "]"
bar = "[" + "*" * (pct // 5) + " " * ((100 - pct) // 5) + "]"
print(BLUE + f"Progresso: {pct}% {bar}" + RESET)
if count >= total:
break
Expand All @@ -165,18 +161,23 @@ def process_task(task, database, args):
total_range = task_end - task_start
print(f"Processando tarefa de {task_start} até {task_end} (total: {total_range} candidatos)")
num_workers = args["cpu_count"]
processed_counter = multiprocessing.Value('i', 0)

# Cria um Manager para o contador e um lock para sincronização
manager = multiprocessing.Manager()
processed_counter = manager.Value('i', 0)
counter_lock = manager.Lock()

subranges = []
subrange_size = total_range // num_workers
for i in range(num_workers):
sub_start = task_start + i * subrange_size
sub_end = task_end if i == num_workers - 1 else sub_start + subrange_size
subranges.append((sub_start, sub_end))
pool = multiprocessing.Pool(processes=num_workers)
progress_proc = multiprocessing.Process(target=display_progress, args=(processed_counter, total_range))
progress_proc = multiprocessing.Process(target=display_progress, args=(processed_counter, counter_lock, total_range))
progress_proc.start()
for sub in subranges:
pool.apply_async(process_subrange, args=(sub[0], sub[1], processed_counter, args["substring"], database, args["verbose"]))
pool.apply_async(process_subrange, args=(sub[0], sub[1], processed_counter, counter_lock, args["substring"], database, args["verbose"]))
pool.close()
pool.join()
progress_proc.join()
Expand All @@ -186,9 +187,9 @@ def worker_main(database, args, global_counter):
"""
Loop principal do worker:
- Solicita um intervalo (range) do servidor (via GET /get_task).
- Processa o range recebido, dividindo-o entre os núcleos disponíveis.
- Divide o range entre todos os núcleos e processa-o.
- Reporta a conclusão da tarefa (via POST /task_complete).
- Solicita um novo range.
- Solicita um novo intervalo.
"""
while True:
try:
Expand All @@ -207,7 +208,8 @@ def worker_main(database, args, global_counter):
process_task(task, database, args)

try:
payload = {"range": {"start": task["start"], "end": task["end"]}, "timestamp": datetime.datetime.now().isoformat()}
payload = {"range": {"start": task["start"], "end": task["end"]},
"timestamp": datetime.datetime.now().isoformat()}
requests.post(SERVER_URL + "/task_complete", json=payload, timeout=5)
print(f"Tarefa reportada como concluída para o range {task['start']} a {task['end']}")
except Exception as e:
Expand All @@ -221,7 +223,7 @@ def print_help():

verbose: 0 or 1 (default 0) - If 1, prints detailed info.
substring: number of characters (from the end of the address) for database lookup (default 8).
cpu_count: number of processes to use (default: number of CPU cores).
cpu_count: number of worker processes to use (default: number of CPU cores).
"""
print(help_text)
sys.exit(0)
Expand Down Expand Up @@ -280,12 +282,5 @@ def print_help():
print("Processos iniciados:", args["cpu_count"])

global_counter = multiprocessing.Value('i', 0)
processes = []
for i in range(args["cpu_count"]):
p = multiprocessing.Process(target=worker_main, args=(database, args, global_counter))
p.start()
processes.append(p)
print(f"Iniciado worker {i+1}")

for p in processes:
p.join()
# Agora, todos os núcleos trabalharão juntos em uma única tarefa
worker_main(database, args, global_counter)