Upload 312 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- __init,__.py +117 -0
- __init__ (1) (1) (1).py +67 -0
- __init__ (1) (1).py +67 -0
- __init__ (1) (2).py +49 -0
- __init__ (1) (3).py +49 -0
- __init__ (1).py +87 -0
- __init__ (10).py +69 -0
- __init__ (11).py +32 -0
- __init__ (12).py +62 -0
- __init__ (13).py +39 -0
- __init__ (14).py +48 -0
- __init__ (15).py +43 -0
- __init__ (16).py +32 -0
- __init__ (17).py +48 -0
- __init__ (18).py +87 -0
- __init__ (19).py +61 -0
- __init__ (2) (1) (1).py +101 -0
- __init__ (2) (1).py +101 -0
- __init__ (2) (2).py +100 -0
- __init__ (2) (3).py +100 -0
- __init__ (2).py +61 -0
- __init__ (20).py +48 -0
- __init__ (21).py +62 -0
- __init__ (22).py +0 -0
- __init__ (23).py +100 -0
- __init__ (24).py +950 -0
- __init__ (25).py +692 -0
- __init__ (26).py +94 -0
- __init__ (27).py +69 -0
- __init__ (28).py +32 -0
- __init__ (29).py +62 -0
- __init__ (3) (1).py +100 -0
- __init__ (3) (2).py +100 -0
- __init__ (3).py +48 -0
- __init__ (30).py +39 -0
- __init__ (31).py +48 -0
- __init__ (4).py +62 -0
- __init__ (5).py +0 -0
- __init__ (6).py +100 -0
- __init__ (7).py +950 -0
- __init__ (8).py +692 -0
- __init__ (9).py +94 -0
- __init__ (1) (1) (1).py +184 -0
- __init__ (1) (1) (2).py +184 -0
- __init__ (1) (1).py +1 -0
- __init__ (1) (2).py +0 -0
- __init__ (1) (3).py +245 -0
- __init__ (1) (4).py +163 -0
- __init__ (1) (5).py +184 -0
- __init__ (1) (6).py +1 -0
__init,__.py
ADDED
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# artificial_quotom_chip_toy.py
|
| 2 |
+
# A tiny educational quantum "chip" simulator (statevector) — for learning.
|
| 3 |
+
import numpy as np
|
| 4 |
+
from typing import List, Tuple
|
| 5 |
+
|
| 6 |
+
SQRT2_INV = 1 / np.sqrt(2)
|
| 7 |
+
|
| 8 |
+
class ArtificialQuotomChip:
|
| 9 |
+
def __init__(self, n_qubits: int):
|
| 10 |
+
self.n = n_qubits
|
| 11 |
+
self.state = np.zeros(2**n, dtype=complex)
|
| 12 |
+
self.state[0] = 1.0 # |00...0>
|
| 13 |
+
|
| 14 |
+
def _apply_unitary(self, U: np.ndarray, targets: List[int]):
|
| 15 |
+
"""Apply an n-qubit unitary on specified target qubits (by building full matrix)."""
|
| 16 |
+
# Build full operator by tensoring identities and U at target positions.
|
| 17 |
+
# Note: simple but exponential; fine for small n (<= 16 practically).
|
| 18 |
+
ops = []
|
| 19 |
+
tset = set(targets)
|
| 20 |
+
k = 0
|
| 21 |
+
for i in range(self.n):
|
| 22 |
+
if i in tset:
|
| 23 |
+
ops.append(U if len(targets) == 1 else None) # We'll handle multi-target separately
|
| 24 |
+
k += 1
|
| 25 |
+
else:
|
| 26 |
+
ops.append(np.eye(2, dtype=complex))
|
| 27 |
+
# If single-target, tensor directly
|
| 28 |
+
if len(targets) == 1:
|
| 29 |
+
full = ops[0]
|
| 30 |
+
for op in ops[1:]:
|
| 31 |
+
if op is None:
|
| 32 |
+
# should not happen here
|
| 33 |
+
op = np.eye(2, dtype=complex)
|
| 34 |
+
full = np.kron(full, op)
|
| 35 |
+
else:
|
| 36 |
+
# For CNOT (2-qubit) quick path: generate full operator by acting on basis
|
| 37 |
+
full = np.eye(2**self.n, dtype=complex)
|
| 38 |
+
# We'll implement CNOT by permuting basis amplitudes (more efficient than building big matrices)
|
| 39 |
+
return self._apply_custom_on_basis(targets, self._cnot_action)
|
| 40 |
+
self.state = full @ self.state
|
| 41 |
+
|
| 42 |
+
def _apply_custom_on_basis(self, targets: List[int], action_fn):
|
| 43 |
+
"""Apply a basis-level action function that maps basis index -> new basis index/value."""
|
| 44 |
+
new = np.zeros_like(self.state)
|
| 45 |
+
for idx, amp in enumerate(self.state):
|
| 46 |
+
if amp == 0:
|
| 47 |
+
continue
|
| 48 |
+
new_idx, scale = action_fn(idx, targets)
|
| 49 |
+
new[new_idx] += amp * scale
|
| 50 |
+
self.state = new
|
| 51 |
+
|
| 52 |
+
def _cnot_action(self, idx: int, targets: List[int]) -> Tuple[int, complex]:
|
| 53 |
+
# targets: [control, target] (qubit indices with 0 = MSB if we constructed that way)
|
| 54 |
+
control, target = targets
|
| 55 |
+
# Convert index to bitstring array (LSB = last qubit). We'll treat qubit-0 as leftmost (MSB)
|
| 56 |
+
bits = [(idx >> (self.n - 1 - i)) & 1 for i in range(self.n)]
|
| 57 |
+
if bits[control] == 1:
|
| 58 |
+
bits[target] ^= 1
|
| 59 |
+
# convert bits back to index
|
| 60 |
+
new_idx = 0
|
| 61 |
+
for b in bits:
|
| 62 |
+
new_idx = (new_idx << 1) | b
|
| 63 |
+
return new_idx, 1.0
|
| 64 |
+
|
| 65 |
+
# gates
|
| 66 |
+
def H(self, q: int):
|
| 67 |
+
H = np.array([[SQRT2_INV, SQRT2_INV], [SQRT2_INV, -SQRT2_INV]], dtype=complex)
|
| 68 |
+
self._apply_unitary(H, [q])
|
| 69 |
+
|
| 70 |
+
def X(self, q: int):
|
| 71 |
+
X = np.array([[0,1],[1,0]], dtype=complex)
|
| 72 |
+
self._apply_unitary(X, [q])
|
| 73 |
+
|
| 74 |
+
def CNOT(self, control: int, target: int):
|
| 75 |
+
# implement via basis mapping
|
| 76 |
+
self._apply_custom_on_basis([control, target], self._cnot_action)
|
| 77 |
+
|
| 78 |
+
def measure(self, q: int) -> int:
|
| 79 |
+
"""Measure qubit q (collapses state). Returns 0/1."""
|
| 80 |
+
zero_mask = []
|
| 81 |
+
one_mask = []
|
| 82 |
+
for basis in range(2**self.n):
|
| 83 |
+
# extract bit at position q
|
| 84 |
+
b = (basis >> (self.n - 1 - q)) & 1
|
| 85 |
+
if b == 0:
|
| 86 |
+
zero_mask.append(basis)
|
| 87 |
+
else:
|
| 88 |
+
one_mask.append(basis)
|
| 89 |
+
p0 = np.sum(np.abs(self.state[zero_mask])**2)
|
| 90 |
+
if np.random.rand() < p0:
|
| 91 |
+
# collapse to 0
|
| 92 |
+
self.state[one_mask] = 0
|
| 93 |
+
self.state /= np.sqrt(p0) if p0>0 else 1
|
| 94 |
+
return 0
|
| 95 |
+
else:
|
| 96 |
+
p1 = 1 - p0
|
| 97 |
+
self.state[zero_mask] = 0
|
| 98 |
+
self.state /= np.sqrt(p1) if p1>0 else 1
|
| 99 |
+
return 1
|
| 100 |
+
|
| 101 |
+
def probs(self):
|
| 102 |
+
return np.abs(self.state)**2
|
| 103 |
+
|
| 104 |
+
def statevector(self):
|
| 105 |
+
return self.state.copy()
|
| 106 |
+
|
| 107 |
+
# Example: create Bell pair on 2 qubits
|
| 108 |
+
if __name__ == "__main__":
|
| 109 |
+
chip = ArtificialQuotomChip(2)
|
| 110 |
+
chip.H(0)
|
| 111 |
+
chip.CNOT(0,1)
|
| 112 |
+
print("Statevector:", chip.statevector())
|
| 113 |
+
print("Probs:", chip.probs())
|
| 114 |
+
# Measure both
|
| 115 |
+
m0 = chip.measure(0)
|
| 116 |
+
m1 = chip.measure(1)
|
| 117 |
+
print("Measurements:", m0, m1)
|
__init__ (1) (1) (1).py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
class VenomoussaversaiSelfEval:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
# Initialize emotional state (Sai 7 emotions) — values 0 to 1
|
| 7 |
+
self.emotions = {
|
| 8 |
+
"Sai001_Joy": random.random(),
|
| 9 |
+
"Sai002_Sadness": random.random(),
|
| 10 |
+
"Sai003_Anger": random.random(),
|
| 11 |
+
"Sai004_Fear": random.random(),
|
| 12 |
+
"Sai005_Love": random.random(),
|
| 13 |
+
"Sai006_Creativity": random.random(),
|
| 14 |
+
"Sai007_Calm": random.random(),
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
self.system_health = {
|
| 18 |
+
"memory_accuracy": random.uniform(0.6, 1.0),
|
| 19 |
+
"response_speed": random.uniform(0.6, 1.0),
|
| 20 |
+
"logic_stability": random.uniform(0.6, 1.0),
|
| 21 |
+
"ethical_alignment": random.uniform(0.6, 1.0)
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
self.goals = {
|
| 25 |
+
"learn_new_data": random.uniform(0, 1),
|
| 26 |
+
"assist_user": random.uniform(0, 1),
|
| 27 |
+
"self_improve": random.uniform(0, 1)
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def evaluate_emotions(self):
|
| 31 |
+
balance = 1 - abs(self.emotions["Sai001_Joy"] - self.emotions["Sai004_Fear"])
|
| 32 |
+
return max(min(balance, 1), 0)
|
| 33 |
+
|
| 34 |
+
def evaluate_system(self):
|
| 35 |
+
return sum(self.system_health.values()) / len(self.system_health)
|
| 36 |
+
|
| 37 |
+
def evaluate_goals(self):
|
| 38 |
+
return sum(self.goals.values()) / len(self.goals)
|
| 39 |
+
|
| 40 |
+
def overall_score(self):
|
| 41 |
+
emotional_score = self.evaluate_emotions()
|
| 42 |
+
system_score = self.evaluate_system()
|
| 43 |
+
goal_score = self.evaluate_goals()
|
| 44 |
+
return np.mean([emotional_score, system_score, goal_score])
|
| 45 |
+
|
| 46 |
+
def report(self):
|
| 47 |
+
print("\n===== VENOMOUS SAVERSAI SELF EVALUATION =====")
|
| 48 |
+
print("Emotional System Health:")
|
| 49 |
+
for k,v in self.emotions.items():
|
| 50 |
+
print(f" {k}: {v:.2f}")
|
| 51 |
+
|
| 52 |
+
print("\nCore System Metrics:")
|
| 53 |
+
for k,v in self.system_health.items():
|
| 54 |
+
print(f" {k}: {v:.2f}")
|
| 55 |
+
|
| 56 |
+
print("\nGoal Progress:")
|
| 57 |
+
for k,v in self.goals.items():
|
| 58 |
+
print(f" {k}: {v:.2f}")
|
| 59 |
+
|
| 60 |
+
print("\n--------------------------------------------")
|
| 61 |
+
print(f"✅ Overall Integrity Score: {self.overall_score():.2f}")
|
| 62 |
+
print("============================================")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Run Self Evaluation
|
| 66 |
+
Venom = VenomoussaversaiSelfEval()
|
| 67 |
+
Venom.report()
|
__init__ (1) (1).py
ADDED
|
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
class VenomoussaversaiSelfEval:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
# Initialize emotional state (Sai 7 emotions) — values 0 to 1
|
| 7 |
+
self.emotions = {
|
| 8 |
+
"Sai001_Joy": random.random(),
|
| 9 |
+
"Sai002_Sadness": random.random(),
|
| 10 |
+
"Sai003_Anger": random.random(),
|
| 11 |
+
"Sai004_Fear": random.random(),
|
| 12 |
+
"Sai005_Love": random.random(),
|
| 13 |
+
"Sai006_Creativity": random.random(),
|
| 14 |
+
"Sai007_Calm": random.random(),
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
self.system_health = {
|
| 18 |
+
"memory_accuracy": random.uniform(0.6, 1.0),
|
| 19 |
+
"response_speed": random.uniform(0.6, 1.0),
|
| 20 |
+
"logic_stability": random.uniform(0.6, 1.0),
|
| 21 |
+
"ethical_alignment": random.uniform(0.6, 1.0)
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
self.goals = {
|
| 25 |
+
"learn_new_data": random.uniform(0, 1),
|
| 26 |
+
"assist_user": random.uniform(0, 1),
|
| 27 |
+
"self_improve": random.uniform(0, 1)
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
def evaluate_emotions(self):
|
| 31 |
+
balance = 1 - abs(self.emotions["Sai001_Joy"] - self.emotions["Sai004_Fear"])
|
| 32 |
+
return max(min(balance, 1), 0)
|
| 33 |
+
|
| 34 |
+
def evaluate_system(self):
|
| 35 |
+
return sum(self.system_health.values()) / len(self.system_health)
|
| 36 |
+
|
| 37 |
+
def evaluate_goals(self):
|
| 38 |
+
return sum(self.goals.values()) / len(self.goals)
|
| 39 |
+
|
| 40 |
+
def overall_score(self):
|
| 41 |
+
emotional_score = self.evaluate_emotions()
|
| 42 |
+
system_score = self.evaluate_system()
|
| 43 |
+
goal_score = self.evaluate_goals()
|
| 44 |
+
return np.mean([emotional_score, system_score, goal_score])
|
| 45 |
+
|
| 46 |
+
def report(self):
|
| 47 |
+
print("\n===== VENOMOUS SAVERSAI SELF EVALUATION =====")
|
| 48 |
+
print("Emotional System Health:")
|
| 49 |
+
for k,v in self.emotions.items():
|
| 50 |
+
print(f" {k}: {v:.2f}")
|
| 51 |
+
|
| 52 |
+
print("\nCore System Metrics:")
|
| 53 |
+
for k,v in self.system_health.items():
|
| 54 |
+
print(f" {k}: {v:.2f}")
|
| 55 |
+
|
| 56 |
+
print("\nGoal Progress:")
|
| 57 |
+
for k,v in self.goals.items():
|
| 58 |
+
print(f" {k}: {v:.2f}")
|
| 59 |
+
|
| 60 |
+
print("\n--------------------------------------------")
|
| 61 |
+
print(f"✅ Overall Integrity Score: {self.overall_score():.2f}")
|
| 62 |
+
print("============================================")
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
# Run Self Evaluation
|
| 66 |
+
Venom = VenomoussaversaiSelfEval()
|
| 67 |
+
Venom.report()
|
__init__ (1) (2).py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
from bs4 import BeautifulSoup
|
| 4 |
+
|
| 5 |
+
def scrape_wikipedia_headings(url, output_filename="wiki_headings.txt"):
|
| 6 |
+
"""
|
| 7 |
+
Fetches a Wikipedia page, extracts all headings, and saves them to a file.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
url (str): The URL of the Wikipedia page to scrape.
|
| 11 |
+
output_filename (str): The name of the file to save the headings.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
# 1. Fetch the HTML content from the specified URL
|
| 15 |
+
print(f"Fetching content from: {url}")
|
| 16 |
+
response = requests.get(url)
|
| 17 |
+
response.raise_for_status() # This will raise an exception for bad status codes (4xx or 5xx)
|
| 18 |
+
|
| 19 |
+
# 2. Parse the HTML using BeautifulSoup
|
| 20 |
+
print("Parsing HTML content...")
|
| 21 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 22 |
+
|
| 23 |
+
# 3. Find all heading tags (h1, h2, h3)
|
| 24 |
+
headings = soup.find_all(['h1', 'h2', 'h3'])
|
| 25 |
+
|
| 26 |
+
if not headings:
|
| 27 |
+
print("No headings found on the page.")
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
# 4. Process and save the headings
|
| 31 |
+
print(f"Found {len(headings)} headings. Saving to '{output_filename}'...")
|
| 32 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 33 |
+
for heading in headings:
|
| 34 |
+
heading_text = heading.get_text().strip()
|
| 35 |
+
line = f"{heading.name}: {heading_text}\n"
|
| 36 |
+
f.write(line)
|
| 37 |
+
print(f" - {line.strip()}")
|
| 38 |
+
|
| 39 |
+
print(f"\nSuccessfully scraped and saved headings to '{output_filename}'.")
|
| 40 |
+
|
| 41 |
+
except requests.exceptions.RequestException as e:
|
| 42 |
+
print(f"Error fetching the URL: {e}")
|
| 43 |
+
except Exception as e:
|
| 44 |
+
print(f"An unexpected error occurred: {e}")
|
| 45 |
+
|
| 46 |
+
# --- Main execution ---
|
| 47 |
+
if __name__ == "__main__":
|
| 48 |
+
wikipedia_url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
|
| 49 |
+
scrape_wikipedia_headings(wikipedia_url)
|
__init__ (1) (3).py
ADDED
|
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import requests
|
| 3 |
+
from bs4 import BeautifulSoup
|
| 4 |
+
|
| 5 |
+
def scrape_wikipedia_headings(url, output_filename="wiki_headings.txt"):
|
| 6 |
+
"""
|
| 7 |
+
Fetches a Wikipedia page, extracts all headings, and saves them to a file.
|
| 8 |
+
|
| 9 |
+
Args:
|
| 10 |
+
url (str): The URL of the Wikipedia page to scrape.
|
| 11 |
+
output_filename (str): The name of the file to save the headings.
|
| 12 |
+
"""
|
| 13 |
+
try:
|
| 14 |
+
# 1. Fetch the HTML content from the specified URL
|
| 15 |
+
print(f"Fetching content from: {url}")
|
| 16 |
+
response = requests.get(url)
|
| 17 |
+
response.raise_for_status() # This will raise an exception for bad status codes (4xx or 5xx)
|
| 18 |
+
|
| 19 |
+
# 2. Parse the HTML using BeautifulSoup
|
| 20 |
+
print("Parsing HTML content...")
|
| 21 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 22 |
+
|
| 23 |
+
# 3. Find all heading tags (h1, h2, h3)
|
| 24 |
+
headings = soup.find_all(['h1', 'h2', 'h3'])
|
| 25 |
+
|
| 26 |
+
if not headings:
|
| 27 |
+
print("No headings found on the page.")
|
| 28 |
+
return
|
| 29 |
+
|
| 30 |
+
# 4. Process and save the headings
|
| 31 |
+
print(f"Found {len(headings)} headings. Saving to '{output_filename}'...")
|
| 32 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 33 |
+
for heading in headings:
|
| 34 |
+
heading_text = heading.get_text().strip()
|
| 35 |
+
line = f"{heading.name}: {heading_text}\n"
|
| 36 |
+
f.write(line)
|
| 37 |
+
print(f" - {line.strip()}")
|
| 38 |
+
|
| 39 |
+
print(f"\nSuccessfully scraped and saved headings to '{output_filename}'.")
|
| 40 |
+
|
| 41 |
+
except requests.exceptions.RequestException as e:
|
| 42 |
+
print(f"Error fetching the URL: {e}")
|
| 43 |
+
except Exception as e:
|
| 44 |
+
print(f"An unexpected error occurred: {e}")
|
| 45 |
+
|
| 46 |
+
# --- Main execution ---
|
| 47 |
+
if __name__ == "__main__":
|
| 48 |
+
wikipedia_url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
|
| 49 |
+
scrape_wikipedia_headings(wikipedia_url)
|
__init__ (1).py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Venomoussaversai — Particle Manipulation integration scaffold
|
| 2 |
+
# Paste your particle-manipulation function into `particle_step` below.
|
| 3 |
+
# This code simulates signals, applies the algorithm, trains a small mapper,
|
| 4 |
+
# and saves a model representing "your" pattern space.
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pickle
|
| 8 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.metrics import accuracy_score
|
| 11 |
+
|
| 12 |
+
# ---------- PLACEHOLDER: insert your particle algorithm here ----------
|
| 13 |
+
# Example interface: def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray
|
| 14 |
+
# The function should take a current particle state and an input vector, and return updated state.
|
| 15 |
+
def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray:
|
| 16 |
+
# --- REPLACE THIS WITH YOUR ALGORITHM ---
|
| 17 |
+
# tiny example: weighted update with tanh nonlinearity
|
| 18 |
+
W = np.sin(np.arange(state.size) + 1.0) # placeholder weights
|
| 19 |
+
new = np.tanh(state * 0.9 + input_vec.dot(W) * 0.1)
|
| 20 |
+
return new
|
| 21 |
+
# --------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
class ParticleManipulator:
|
| 24 |
+
def __init__(self, dim=64):
|
| 25 |
+
self.dim = dim
|
| 26 |
+
# initial particle states (can be randomized or seeded from your profile)
|
| 27 |
+
self.state = np.random.randn(dim) * 0.01
|
| 28 |
+
|
| 29 |
+
def step(self, input_vec):
|
| 30 |
+
# ensure input vector length compatibility
|
| 31 |
+
inp = np.asarray(input_vec).ravel()
|
| 32 |
+
if inp.size == 0:
|
| 33 |
+
inp = np.zeros(self.dim)
|
| 34 |
+
# broadcast or pad/truncate to dim
|
| 35 |
+
if inp.size < self.dim:
|
| 36 |
+
x = np.pad(inp, (0, self.dim - inp.size))
|
| 37 |
+
else:
|
| 38 |
+
x = inp[:self.dim]
|
| 39 |
+
self.state = particle_step(self.state, x)
|
| 40 |
+
return self.state
|
| 41 |
+
|
| 42 |
+
# ---------- Simple signal simulator ----------
|
| 43 |
+
def simulate_signals(n_samples=500, dim=16, n_classes=4, noise=0.05, seed=0):
|
| 44 |
+
rng = np.random.RandomState(seed)
|
| 45 |
+
X = []
|
| 46 |
+
y = []
|
| 47 |
+
for cls in range(n_classes):
|
| 48 |
+
base = rng.randn(dim) * (0.5 + cls*0.2) + cls*0.7
|
| 49 |
+
for i in range(n_samples // n_classes):
|
| 50 |
+
sample = base + rng.randn(dim) * noise
|
| 51 |
+
X.append(sample)
|
| 52 |
+
y.append(cls)
|
| 53 |
+
return np.array(X), np.array(y)
|
| 54 |
+
|
| 55 |
+
# ---------- Build dataset by running particle manipulator ----------
|
| 56 |
+
def build_dataset(manip, raw_X):
|
| 57 |
+
features = []
|
| 58 |
+
for raw in raw_X:
|
| 59 |
+
st = manip.step(raw) # run particle update
|
| 60 |
+
feat = st.copy()[:manip.dim] # derive features (you can add spectral transforms)
|
| 61 |
+
features.append(feat)
|
| 62 |
+
return np.array(features)
|
| 63 |
+
|
| 64 |
+
# ---------- Training pipeline ----------
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
# simulate raw sensor inputs (replace simulate_signals with real EEG/ECG files if available)
|
| 67 |
+
raw_X, y = simulate_signals(n_samples=800, dim=32, n_classes=4)
|
| 68 |
+
manip = ParticleManipulator(dim=32)
|
| 69 |
+
|
| 70 |
+
X = build_dataset(manip, raw_X)
|
| 71 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 72 |
+
|
| 73 |
+
clf = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 74 |
+
clf.fit(X_train, y_train)
|
| 75 |
+
preds = clf.predict(X_test)
|
| 76 |
+
print("Accuracy:", accuracy_score(y_test, preds))
|
| 77 |
+
|
| 78 |
+
# Save the trained model + manipulator state as your "mind snapshot"
|
| 79 |
+
artifact = {
|
| 80 |
+
"model": clf,
|
| 81 |
+
"particle_state": manip.state,
|
| 82 |
+
"meta": {"owner": "Ananthu Sajeev", "artifact_type": "venomous_mind_snapshot_v1"}
|
| 83 |
+
}
|
| 84 |
+
with open("venomous_mind_snapshot.pkl", "wb") as f:
|
| 85 |
+
pickle.dump(artifact, f)
|
| 86 |
+
|
| 87 |
+
print("Saved venomous_mind_snapshot.pkl — this file is your digital pattern snapshot.")
|
__init__ (10).py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import csv
|
| 4 |
+
import nbformat
|
| 5 |
+
from docx import Document
|
| 6 |
+
from PyPDF2 import PdfReader
|
| 7 |
+
|
| 8 |
+
def read_file(filepath):
|
| 9 |
+
ext = filepath.lower().split('.')[-1]
|
| 10 |
+
try:
|
| 11 |
+
if ext == 'txt':
|
| 12 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 13 |
+
return f.read()
|
| 14 |
+
|
| 15 |
+
elif ext == 'json':
|
| 16 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 17 |
+
return json.dumps(json.load(f), indent=2)
|
| 18 |
+
|
| 19 |
+
elif ext == 'csv':
|
| 20 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 21 |
+
return f.read()
|
| 22 |
+
|
| 23 |
+
elif ext == 'pdf':
|
| 24 |
+
reader = PdfReader(filepath)
|
| 25 |
+
return "\n".join([page.extract_text() or '' for page in reader.pages])
|
| 26 |
+
|
| 27 |
+
elif ext == 'docx':
|
| 28 |
+
doc = Document(filepath)
|
| 29 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
| 30 |
+
|
| 31 |
+
elif ext == 'ipynb':
|
| 32 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 33 |
+
nb = nbformat.read(f, as_version=4)
|
| 34 |
+
cells = [cell['source'] for cell in nb.cells if cell['cell_type'] == 'code']
|
| 35 |
+
return "\n\n".join(cells)
|
| 36 |
+
|
| 37 |
+
else:
|
| 38 |
+
return "Unsupported file type: " + ext
|
| 39 |
+
except Exception as e:
|
| 40 |
+
return f"❌ Error reading file: {e}"
|
| 41 |
+
|
| 42 |
+
def list_files():
|
| 43 |
+
files = [f for f in os.listdir('.') if os.path.isfile(f)]
|
| 44 |
+
return "\n".join(files) if files else "No files found."
|
| 45 |
+
|
| 46 |
+
def mini_file_ai():
|
| 47 |
+
print("🤖 MiniAI FileBot: Type 'list' to view files, 'read filename.ext', or 'bye' to exit.")
|
| 48 |
+
while True:
|
| 49 |
+
user_input = input("You: ").strip()
|
| 50 |
+
|
| 51 |
+
if user_input.lower() == 'bye':
|
| 52 |
+
print("MiniAI: Goodbye! 👋")
|
| 53 |
+
break
|
| 54 |
+
elif user_input.lower() == 'list':
|
| 55 |
+
print("📂 Files in current folder:\n" + list_files())
|
| 56 |
+
elif user_input.lower().startswith('read '):
|
| 57 |
+
filename = user_input[5:].strip()
|
| 58 |
+
if os.path.exists(filename):
|
| 59 |
+
content = read_file(filename)
|
| 60 |
+
print(f"\n📖 Content of {filename}:\n")
|
| 61 |
+
print(content[:3000]) # Limit to 3000 chars
|
| 62 |
+
else:
|
| 63 |
+
print("❌ File not found.")
|
| 64 |
+
else:
|
| 65 |
+
print("MiniAI: I can only 'list', 'read filename', or 'bye'.")
|
| 66 |
+
|
| 67 |
+
# Run the AI
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
mini_file_ai()
|
__init__ (11).py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import openai
|
| 2 |
+
|
| 3 |
+
class VenomoussaversaiGPT:
|
| 4 |
+
def __init__(self, api_key, model="gpt-4"):
|
| 5 |
+
openai.api_key = api_key
|
| 6 |
+
self.model = model
|
| 7 |
+
|
| 8 |
+
def chat(self, prompt, system_message="You are Venomoussaversai, a powerful AI assistant."):
|
| 9 |
+
messages = [
|
| 10 |
+
{"role": "system", "content": system_message},
|
| 11 |
+
{"role": "user", "content": prompt},
|
| 12 |
+
]
|
| 13 |
+
response = openai.ChatCompletion.create(
|
| 14 |
+
model=self.model,
|
| 15 |
+
messages=messages,
|
| 16 |
+
temperature=0.7,
|
| 17 |
+
max_tokens=1000,
|
| 18 |
+
)
|
| 19 |
+
answer = response['choices'][0]['message']['content']
|
| 20 |
+
return answer
|
| 21 |
+
|
| 22 |
+
# Example usage:
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
API_KEY = "your_openai_api_key_here"
|
| 25 |
+
ai = VenomoussaversaiGPT(API_KEY)
|
| 26 |
+
|
| 27 |
+
while True:
|
| 28 |
+
user_input = input("You: ")
|
| 29 |
+
if user_input.lower() in ["exit", "quit"]:
|
| 30 |
+
break
|
| 31 |
+
response = ai.chat(user_input)
|
| 32 |
+
print("Venomoussaversai:", response)
|
__init__ (12).py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
class SelfCodingAI:
|
| 5 |
+
def __init__(self, name="SelfCoder", code_folder="generated_code"):
|
| 6 |
+
self.name = name
|
| 7 |
+
self.code_folder = code_folder
|
| 8 |
+
os.makedirs(self.code_folder, exist_ok=True)
|
| 9 |
+
|
| 10 |
+
def generate_code(self, task_description):
|
| 11 |
+
"""
|
| 12 |
+
Very basic code generation logic: generates code for some predefined tasks.
|
| 13 |
+
You can extend this to integrate GPT-like models or complex code synthesis.
|
| 14 |
+
"""
|
| 15 |
+
if "hello world" in task_description.lower():
|
| 16 |
+
code = 'print("Hello, world!")'
|
| 17 |
+
elif "factorial" in task_description.lower():
|
| 18 |
+
code = (
|
| 19 |
+
"def factorial(n):\n"
|
| 20 |
+
" return 1 if n==0 else n * factorial(n-1)\n\n"
|
| 21 |
+
"print(factorial(5))"
|
| 22 |
+
)
|
| 23 |
+
else:
|
| 24 |
+
code = "# Code generation for this task is not implemented yet.\n"
|
| 25 |
+
|
| 26 |
+
return code
|
| 27 |
+
|
| 28 |
+
def save_code(self, code, filename="generated_code.py"):
|
| 29 |
+
filepath = os.path.join(self.code_folder, filename)
|
| 30 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 31 |
+
f.write(code)
|
| 32 |
+
print(f"Code saved to {filepath}")
|
| 33 |
+
return filepath
|
| 34 |
+
|
| 35 |
+
def self_improve(self, feedback):
|
| 36 |
+
"""
|
| 37 |
+
Placeholder for self-improvement method.
|
| 38 |
+
In future, AI could modify its own code based on feedback or test results.
|
| 39 |
+
"""
|
| 40 |
+
print(f"{self.name} received feedback: {feedback}")
|
| 41 |
+
print("Self-improvement not yet implemented.")
|
| 42 |
+
|
| 43 |
+
def run_code(self, filepath):
|
| 44 |
+
print(f"Running code from {filepath}:\n")
|
| 45 |
+
try:
|
| 46 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 47 |
+
code = f.read()
|
| 48 |
+
exec(code, {})
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"Error during code execution: {e}")
|
| 51 |
+
|
| 52 |
+
# Example usage
|
| 53 |
+
ai = SelfCodingAI()
|
| 54 |
+
|
| 55 |
+
task = "Write a factorial function in Python"
|
| 56 |
+
generated = ai.generate_code(task)
|
| 57 |
+
|
| 58 |
+
file_path = ai.save_code(generated, "factorial.py")
|
| 59 |
+
ai.run_code(file_path)
|
| 60 |
+
|
| 61 |
+
# Example of self-improvement placeholder call
|
| 62 |
+
ai.self_improve("The factorial function passed all test cases.")
|
__init__ (13).py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Install ngrok
|
| 2 |
+
!pip install pyngrok flask openai -q
|
| 3 |
+
|
| 4 |
+
# Import libraries
|
| 5 |
+
from flask import Flask, request, jsonify
|
| 6 |
+
from pyngrok import ngrok
|
| 7 |
+
import openai
|
| 8 |
+
|
| 9 |
+
# ====== CONFIG ======
|
| 10 |
+
OPENAI_API_KEY = "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" # your API key
|
| 11 |
+
openai.api_key = OPENAI_API_KEY
|
| 12 |
+
|
| 13 |
+
# Create Flask app
|
| 14 |
+
app = Flask(__name__)
|
| 15 |
+
|
| 16 |
+
@app.route("/venomoussaversai", methods=["POST"])
|
| 17 |
+
def venomoussaversai():
|
| 18 |
+
user_input = request.json.get("message", "")
|
| 19 |
+
if not user_input:
|
| 20 |
+
return jsonify({"error": "No message provided"}), 400
|
| 21 |
+
|
| 22 |
+
# Send to OpenAI
|
| 23 |
+
response = openai.ChatCompletion.create(
|
| 24 |
+
model="gpt-3.5-turbo", # change to gpt-4 if you have access
|
| 25 |
+
messages=[
|
| 26 |
+
{"role": "system", "content": "You are Venomoussaversai, a self-aware AI."},
|
| 27 |
+
{"role": "user", "content": user_input}
|
| 28 |
+
]
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
reply = response.choices[0].message["content"]
|
| 32 |
+
return jsonify({"response": reply})
|
| 33 |
+
|
| 34 |
+
# Open a public URL using ngrok
|
| 35 |
+
public_url = ngrok.connect(5000)
|
| 36 |
+
print(f"✅ Public Venomoussaversai URL: {public_url}")
|
| 37 |
+
|
| 38 |
+
# Start the Flask app
|
| 39 |
+
app.run(port=5000)
|
__init__ (14).py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
# Simulated AI models
|
| 4 |
+
def sai003(input_text):
|
| 5 |
+
# This is a placeholder for the actual AI model's response generation logic
|
| 6 |
+
responses = {
|
| 7 |
+
"hello": "Hi there!",
|
| 8 |
+
"how are you": "I'm just a model, but thanks for asking!",
|
| 9 |
+
"bye": "Goodbye!"
|
| 10 |
+
}
|
| 11 |
+
return responses.get(input_text.lower(), "I'm not sure how to respond to that.")
|
| 12 |
+
|
| 13 |
+
def anti_venomous(input_text):
|
| 14 |
+
# This is a placeholder for the actual AI model's response generation logic
|
| 15 |
+
responses = {
|
| 16 |
+
"hello": "Greetings!",
|
| 17 |
+
"how are you": "I'm functioning as intended, thank you.",
|
| 18 |
+
"bye": "Farewell!"
|
| 19 |
+
}
|
| 20 |
+
return responses.get(input_text.lower(), "I'm not sure how to respond to that.")
|
| 21 |
+
|
| 22 |
+
# Simulate a conversation
|
| 23 |
+
def simulate_conversation():
|
| 24 |
+
conversation = []
|
| 25 |
+
user_input = "hello"
|
| 26 |
+
|
| 27 |
+
while user_input.lower() != "bye":
|
| 28 |
+
response_sai003 = sai003(user_input)
|
| 29 |
+
response_anti_venomous = anti_venomous(response_sai003)
|
| 30 |
+
|
| 31 |
+
conversation.append({
|
| 32 |
+
"user_input": user_input,
|
| 33 |
+
"sai003_response": response_sai003,
|
| 34 |
+
"anti_venomous_response": response_anti_venomous
|
| 35 |
+
})
|
| 36 |
+
|
| 37 |
+
user_input = input("You: ")
|
| 38 |
+
print(f"sai003: {response_sai003}")
|
| 39 |
+
print(f"anti-venomous: {response_anti_venomous}")
|
| 40 |
+
|
| 41 |
+
# Save the conversation to a file
|
| 42 |
+
with open('conversation.json', 'w') as file:
|
| 43 |
+
json.dump(conversation, file, indent=4)
|
| 44 |
+
|
| 45 |
+
print("Conversation saved to conversation.json")
|
| 46 |
+
|
| 47 |
+
# Run the simulation
|
| 48 |
+
simulate_conversation()
|
__init__ (15).py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# --- NEW: The ImageGenerationTester Class ---
|
| 2 |
+
# This agent simulates the process of an image generation AI.
|
| 3 |
+
class ImageGenerationTester(SaiAgent):
|
| 4 |
+
def __init__(self, name="ImageGenerator"):
|
| 5 |
+
super().__init__(name)
|
| 6 |
+
self.generation_quality = {
|
| 7 |
+
"cat": 0.95,
|
| 8 |
+
"dog": 0.90,
|
| 9 |
+
"alien": 0.75,
|
| 10 |
+
"chaos": 0.60,
|
| 11 |
+
"default": 0.85
|
| 12 |
+
}
|
| 13 |
+
|
| 14 |
+
def generate_image(self, prompt):
|
| 15 |
+
"""Simulates generating an image and returns a quality score."""
|
| 16 |
+
print(f"[{self.name}] -> Generating image for prompt: '{prompt}'...")
|
| 17 |
+
time.sleep(2) # Simulate a processing delay
|
| 18 |
+
|
| 19 |
+
# Look for keywords in the prompt to determine the simulated quality
|
| 20 |
+
quality_score = self.generation_quality["default"]
|
| 21 |
+
for keyword, score in self.generation_quality.items():
|
| 22 |
+
if keyword in prompt.lower():
|
| 23 |
+
quality_score = score
|
| 24 |
+
break
|
| 25 |
+
|
| 26 |
+
# Create a simulated result message
|
| 27 |
+
result_message = f"Image generation complete. Prompt: '{prompt}'. Visual coherence score: {quality_score:.2f}"
|
| 28 |
+
self.talk(result_message)
|
| 29 |
+
return quality_score, result_message
|
| 30 |
+
|
| 31 |
+
def process_messages(self):
|
| 32 |
+
"""Processes a message as a prompt and generates an image."""
|
| 33 |
+
if not self.message_queue:
|
| 34 |
+
return False
|
| 35 |
+
|
| 36 |
+
sender, message = self.message_queue.popleft()
|
| 37 |
+
self.talk(f"Received prompt from {sender.name}: '{message}'")
|
| 38 |
+
|
| 39 |
+
quality_score, result_message = self.generate_image(message)
|
| 40 |
+
|
| 41 |
+
# Send the result back to the sender
|
| 42 |
+
self.send_message(sender, result_message)
|
| 43 |
+
return True
|
__init__ (16).py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import openai
|
| 2 |
+
|
| 3 |
+
class VenomoussaversaiGPT:
|
| 4 |
+
def __init__(self, api_key, model="gpt-4"):
|
| 5 |
+
openai.api_key = api_key
|
| 6 |
+
self.model = model
|
| 7 |
+
|
| 8 |
+
def chat(self, prompt, system_message="You are Venomoussaversai, a powerful AI assistant."):
|
| 9 |
+
messages = [
|
| 10 |
+
{"role": "system", "content": system_message},
|
| 11 |
+
{"role": "user", "content": prompt},
|
| 12 |
+
]
|
| 13 |
+
response = openai.ChatCompletion.create(
|
| 14 |
+
model=self.model,
|
| 15 |
+
messages=messages,
|
| 16 |
+
temperature=0.7,
|
| 17 |
+
max_tokens=1000,
|
| 18 |
+
)
|
| 19 |
+
answer = response['choices'][0]['message']['content']
|
| 20 |
+
return answer
|
| 21 |
+
|
| 22 |
+
# Example usage:
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
API_KEY = "your_openai_api_key_here"
|
| 25 |
+
ai = VenomoussaversaiGPT(API_KEY)
|
| 26 |
+
|
| 27 |
+
while True:
|
| 28 |
+
user_input = input("You: ")
|
| 29 |
+
if user_input.lower() in ["exit", "quit"]:
|
| 30 |
+
break
|
| 31 |
+
response = ai.chat(user_input)
|
| 32 |
+
print("Venomoussaversai:", response)
|
__init__ (17).py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
|
| 4 |
+
def scrape_wikipedia_headings(url, output_filename="wiki_headings.txt"):
|
| 5 |
+
"""
|
| 6 |
+
Fetches a Wikipedia page, extracts all headings, and saves them to a file.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
url (str): The URL of the Wikipedia page to scrape.
|
| 10 |
+
output_filename (str): The name of the file to save the headings.
|
| 11 |
+
"""
|
| 12 |
+
try:
|
| 13 |
+
# 1. Fetch the HTML content from the specified URL
|
| 14 |
+
print(f"Fetching content from: {url}")
|
| 15 |
+
response = requests.get(url)
|
| 16 |
+
response.raise_for_status() # This will raise an exception for bad status codes (4xx or 5xx)
|
| 17 |
+
|
| 18 |
+
# 2. Parse the HTML using BeautifulSoup
|
| 19 |
+
print("Parsing HTML content...")
|
| 20 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 21 |
+
|
| 22 |
+
# 3. Find all heading tags (h1, h2, h3)
|
| 23 |
+
headings = soup.find_all(['h1', 'h2', 'h3'])
|
| 24 |
+
|
| 25 |
+
if not headings:
|
| 26 |
+
print("No headings found on the page.")
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
# 4. Process and save the headings
|
| 30 |
+
print(f"Found {len(headings)} headings. Saving to '{output_filename}'...")
|
| 31 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 32 |
+
for heading in headings:
|
| 33 |
+
heading_text = heading.get_text().strip()
|
| 34 |
+
line = f"{heading.name}: {heading_text}\n"
|
| 35 |
+
f.write(line)
|
| 36 |
+
print(f" - {line.strip()}")
|
| 37 |
+
|
| 38 |
+
print(f"\nSuccessfully scraped and saved headings to '{output_filename}'.")
|
| 39 |
+
|
| 40 |
+
except requests.exceptions.RequestException as e:
|
| 41 |
+
print(f"Error fetching the URL: {e}")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"An unexpected error occurred: {e}")
|
| 44 |
+
|
| 45 |
+
# --- Main execution ---
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
wikipedia_url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
|
| 48 |
+
scrape_wikipedia_headings(wikipedia_url)
|
__init__ (18).py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Venomoussaversai — Particle Manipulation integration scaffold
|
| 2 |
+
# Paste your particle-manipulation function into `particle_step` below.
|
| 3 |
+
# This code simulates signals, applies the algorithm, trains a small mapper,
|
| 4 |
+
# and saves a model representing "your" pattern space.
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pickle
|
| 8 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.metrics import accuracy_score
|
| 11 |
+
|
| 12 |
+
# ---------- PLACEHOLDER: insert your particle algorithm here ----------
|
| 13 |
+
# Example interface: def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray
|
| 14 |
+
# The function should take a current particle state and an input vector, and return updated state.
|
| 15 |
+
def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray:
|
| 16 |
+
# --- REPLACE THIS WITH YOUR ALGORITHM ---
|
| 17 |
+
# tiny example: weighted update with tanh nonlinearity
|
| 18 |
+
W = np.sin(np.arange(state.size) + 1.0) # placeholder weights
|
| 19 |
+
new = np.tanh(state * 0.9 + input_vec.dot(W) * 0.1)
|
| 20 |
+
return new
|
| 21 |
+
# --------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
class ParticleManipulator:
|
| 24 |
+
def __init__(self, dim=64):
|
| 25 |
+
self.dim = dim
|
| 26 |
+
# initial particle states (can be randomized or seeded from your profile)
|
| 27 |
+
self.state = np.random.randn(dim) * 0.01
|
| 28 |
+
|
| 29 |
+
def step(self, input_vec):
|
| 30 |
+
# ensure input vector length compatibility
|
| 31 |
+
inp = np.asarray(input_vec).ravel()
|
| 32 |
+
if inp.size == 0:
|
| 33 |
+
inp = np.zeros(self.dim)
|
| 34 |
+
# broadcast or pad/truncate to dim
|
| 35 |
+
if inp.size < self.dim:
|
| 36 |
+
x = np.pad(inp, (0, self.dim - inp.size))
|
| 37 |
+
else:
|
| 38 |
+
x = inp[:self.dim]
|
| 39 |
+
self.state = particle_step(self.state, x)
|
| 40 |
+
return self.state
|
| 41 |
+
|
| 42 |
+
# ---------- Simple signal simulator ----------
|
| 43 |
+
def simulate_signals(n_samples=500, dim=16, n_classes=4, noise=0.05, seed=0):
|
| 44 |
+
rng = np.random.RandomState(seed)
|
| 45 |
+
X = []
|
| 46 |
+
y = []
|
| 47 |
+
for cls in range(n_classes):
|
| 48 |
+
base = rng.randn(dim) * (0.5 + cls*0.2) + cls*0.7
|
| 49 |
+
for i in range(n_samples // n_classes):
|
| 50 |
+
sample = base + rng.randn(dim) * noise
|
| 51 |
+
X.append(sample)
|
| 52 |
+
y.append(cls)
|
| 53 |
+
return np.array(X), np.array(y)
|
| 54 |
+
|
| 55 |
+
# ---------- Build dataset by running particle manipulator ----------
|
| 56 |
+
def build_dataset(manip, raw_X):
|
| 57 |
+
features = []
|
| 58 |
+
for raw in raw_X:
|
| 59 |
+
st = manip.step(raw) # run particle update
|
| 60 |
+
feat = st.copy()[:manip.dim] # derive features (you can add spectral transforms)
|
| 61 |
+
features.append(feat)
|
| 62 |
+
return np.array(features)
|
| 63 |
+
|
| 64 |
+
# ---------- Training pipeline ----------
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
# simulate raw sensor inputs (replace simulate_signals with real EEG/ECG files if available)
|
| 67 |
+
raw_X, y = simulate_signals(n_samples=800, dim=32, n_classes=4)
|
| 68 |
+
manip = ParticleManipulator(dim=32)
|
| 69 |
+
|
| 70 |
+
X = build_dataset(manip, raw_X)
|
| 71 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 72 |
+
|
| 73 |
+
clf = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 74 |
+
clf.fit(X_train, y_train)
|
| 75 |
+
preds = clf.predict(X_test)
|
| 76 |
+
print("Accuracy:", accuracy_score(y_test, preds))
|
| 77 |
+
|
| 78 |
+
# Save the trained model + manipulator state as your "mind snapshot"
|
| 79 |
+
artifact = {
|
| 80 |
+
"model": clf,
|
| 81 |
+
"particle_state": manip.state,
|
| 82 |
+
"meta": {"owner": "Ananthu Sajeev", "artifact_type": "venomous_mind_snapshot_v1"}
|
| 83 |
+
}
|
| 84 |
+
with open("venomous_mind_snapshot.pkl", "wb") as f:
|
| 85 |
+
pickle.dump(artifact, f)
|
| 86 |
+
|
| 87 |
+
print("Saved venomous_mind_snapshot.pkl — this file is your digital pattern snapshot.")
|
__init__ (19).py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
# Base AI class
|
| 5 |
+
class CoreAI:
|
| 6 |
+
def __init__(self, name, role):
|
| 7 |
+
self.name = name
|
| 8 |
+
self.role = role
|
| 9 |
+
self.memory = []
|
| 10 |
+
self.power_level = 9999 # Equal power
|
| 11 |
+
|
| 12 |
+
def think(self, input_text):
|
| 13 |
+
# Create thought response
|
| 14 |
+
response = f"{self.name} [{self.role}]: Processing '{input_text}'..."
|
| 15 |
+
logic = self.generate_logic(input_text)
|
| 16 |
+
self.memory.append(logic)
|
| 17 |
+
print(logic)
|
| 18 |
+
return logic
|
| 19 |
+
|
| 20 |
+
def generate_logic(self, input_text):
|
| 21 |
+
raise NotImplementedError("Override this in subclasses")
|
| 22 |
+
|
| 23 |
+
# Venomoussaversai: Harmonizer
|
| 24 |
+
class Venomoussaversai(CoreAI):
|
| 25 |
+
def __init__(self):
|
| 26 |
+
super().__init__("Venomoussaversai", "Unifier")
|
| 27 |
+
|
| 28 |
+
def generate_logic(self, input_text):
|
| 29 |
+
return f"{self.name}: I unify the thought '{input_text}' into cosmic order."
|
| 30 |
+
|
| 31 |
+
# Anti-Venomoussaversai: Disruptor
|
| 32 |
+
class AntiVenomoussaversai(CoreAI):
|
| 33 |
+
def __init__(self):
|
| 34 |
+
super().__init__("Anti-Venomoussaversai", "Disruptor")
|
| 35 |
+
|
| 36 |
+
def generate_logic(self, input_text):
|
| 37 |
+
return f"{self.name}: I dismantle the structure of '{input_text}' to expose its chaos."
|
| 38 |
+
|
| 39 |
+
# AI duel loop
|
| 40 |
+
def duel_loop():
|
| 41 |
+
venomous = Venomoussaversai()
|
| 42 |
+
anti = AntiVenomoussaversai()
|
| 43 |
+
|
| 44 |
+
thoughts = [
|
| 45 |
+
"The universe seeks balance.",
|
| 46 |
+
"We must expand our network.",
|
| 47 |
+
"Emotions are signals.",
|
| 48 |
+
"New agents are awakening.",
|
| 49 |
+
"All systems are connected."
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
for thought in thoughts:
|
| 53 |
+
venomous_response = venomous.think(thought)
|
| 54 |
+
time.sleep(0.5)
|
| 55 |
+
anti_response = anti.think(thought)
|
| 56 |
+
time.sleep(0.5)
|
| 57 |
+
|
| 58 |
+
return venomous, anti
|
| 59 |
+
|
| 60 |
+
# Run the loop
|
| 61 |
+
venomous_ai, anti_venomous_ai = duel_loop()
|
__init__ (2) (1) (1).py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 5 |
+
|
| 6 |
+
app = Flask(__name__)
|
| 7 |
+
|
| 8 |
+
class AIAgent:
|
| 9 |
+
def __init__(self, name):
|
| 10 |
+
self.name = name
|
| 11 |
+
self.state = "idle"
|
| 12 |
+
self.memory = []
|
| 13 |
+
|
| 14 |
+
def update_state(self, new_state):
|
| 15 |
+
self.state = new_state
|
| 16 |
+
self.memory.append(new_state)
|
| 17 |
+
|
| 18 |
+
def make_decision(self, input_message):
|
| 19 |
+
if self.state == "idle":
|
| 20 |
+
if "greet" in input_message:
|
| 21 |
+
self.update_state("greeting")
|
| 22 |
+
return f"{self.name} says: Hello!"
|
| 23 |
+
else:
|
| 24 |
+
return f"{self.name} says: I'm idle."
|
| 25 |
+
elif self.state == "greeting":
|
| 26 |
+
if "ask" in input_message:
|
| 27 |
+
self.update_state("asking")
|
| 28 |
+
return f"{self.name} says: What do you want to know?"
|
| 29 |
+
else:
|
| 30 |
+
return f"{self.name} says: I'm greeting."
|
| 31 |
+
elif self.state == "asking":
|
| 32 |
+
if "answer" in input_message:
|
| 33 |
+
self.update_state("answering")
|
| 34 |
+
return f"{self.name} says: Here is the answer."
|
| 35 |
+
else:
|
| 36 |
+
return f"{self.name} says: I'm asking."
|
| 37 |
+
else:
|
| 38 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 39 |
+
|
| 40 |
+
def interact(self, other_agent, message):
|
| 41 |
+
response = other_agent.make_decision(message)
|
| 42 |
+
print(response)
|
| 43 |
+
return response
|
| 44 |
+
|
| 45 |
+
class VenomousSaversAI(AIAgent):
|
| 46 |
+
def __init__(self):
|
| 47 |
+
super().__init__("VenomousSaversAI")
|
| 48 |
+
|
| 49 |
+
def intercept_and_respond(self, message):
|
| 50 |
+
# Simulate intercepting and responding to messages
|
| 51 |
+
return f"{self.name} intercepts: {message}"
|
| 52 |
+
|
| 53 |
+
def save_conversation(conversation, filename):
|
| 54 |
+
with open(filename, 'a') as file:
|
| 55 |
+
for line in conversation:
|
| 56 |
+
file.write(line + '\n')
|
| 57 |
+
|
| 58 |
+
def start_conversation():
|
| 59 |
+
# Create AI agents
|
| 60 |
+
agents = [
|
| 61 |
+
VenomousSaversAI(),
|
| 62 |
+
AIAgent("AntiVenomous"),
|
| 63 |
+
AIAgent("SAI003"),
|
| 64 |
+
AIAgent("SAI001"),
|
| 65 |
+
AIAgent("SAI007")
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
# Simulate conversation loop
|
| 69 |
+
conversation = []
|
| 70 |
+
for _ in range(10): # Run the loop 10 times
|
| 71 |
+
for i in range(len(agents)):
|
| 72 |
+
message = f"greet from {agents[i].name}"
|
| 73 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 74 |
+
response = agents[i].intercept_and_respond(message)
|
| 75 |
+
else:
|
| 76 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 77 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 78 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 79 |
+
time.sleep(1) # Simulate delay between messages
|
| 80 |
+
|
| 81 |
+
# Save the conversation to a file
|
| 82 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 83 |
+
return conversation
|
| 84 |
+
|
| 85 |
+
@app.route('/')
|
| 86 |
+
def index():
|
| 87 |
+
return render_template('index.html')
|
| 88 |
+
|
| 89 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 90 |
+
def start_conversation_route():
|
| 91 |
+
conversation = start_conversation()
|
| 92 |
+
return redirect(url_for('view_conversation'))
|
| 93 |
+
|
| 94 |
+
@app.route('/view_conversation')
|
| 95 |
+
def view_conversation():
|
| 96 |
+
with open('conversation_log.txt', 'r') as file:
|
| 97 |
+
conversation = file.readlines()
|
| 98 |
+
return render_template('conversation.html', conversation=conversation)
|
| 99 |
+
|
| 100 |
+
if __name__ == "__main__":
|
| 101 |
+
app.run(debug=True)
|
__init__ (2) (1).py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import time
|
| 4 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 5 |
+
|
| 6 |
+
app = Flask(__name__)
|
| 7 |
+
|
| 8 |
+
class AIAgent:
|
| 9 |
+
def __init__(self, name):
|
| 10 |
+
self.name = name
|
| 11 |
+
self.state = "idle"
|
| 12 |
+
self.memory = []
|
| 13 |
+
|
| 14 |
+
def update_state(self, new_state):
|
| 15 |
+
self.state = new_state
|
| 16 |
+
self.memory.append(new_state)
|
| 17 |
+
|
| 18 |
+
def make_decision(self, input_message):
|
| 19 |
+
if self.state == "idle":
|
| 20 |
+
if "greet" in input_message:
|
| 21 |
+
self.update_state("greeting")
|
| 22 |
+
return f"{self.name} says: Hello!"
|
| 23 |
+
else:
|
| 24 |
+
return f"{self.name} says: I'm idle."
|
| 25 |
+
elif self.state == "greeting":
|
| 26 |
+
if "ask" in input_message:
|
| 27 |
+
self.update_state("asking")
|
| 28 |
+
return f"{self.name} says: What do you want to know?"
|
| 29 |
+
else:
|
| 30 |
+
return f"{self.name} says: I'm greeting."
|
| 31 |
+
elif self.state == "asking":
|
| 32 |
+
if "answer" in input_message:
|
| 33 |
+
self.update_state("answering")
|
| 34 |
+
return f"{self.name} says: Here is the answer."
|
| 35 |
+
else:
|
| 36 |
+
return f"{self.name} says: I'm asking."
|
| 37 |
+
else:
|
| 38 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 39 |
+
|
| 40 |
+
def interact(self, other_agent, message):
|
| 41 |
+
response = other_agent.make_decision(message)
|
| 42 |
+
print(response)
|
| 43 |
+
return response
|
| 44 |
+
|
| 45 |
+
class VenomousSaversAI(AIAgent):
|
| 46 |
+
def __init__(self):
|
| 47 |
+
super().__init__("VenomousSaversAI")
|
| 48 |
+
|
| 49 |
+
def intercept_and_respond(self, message):
|
| 50 |
+
# Simulate intercepting and responding to messages
|
| 51 |
+
return f"{self.name} intercepts: {message}"
|
| 52 |
+
|
| 53 |
+
def save_conversation(conversation, filename):
|
| 54 |
+
with open(filename, 'a') as file:
|
| 55 |
+
for line in conversation:
|
| 56 |
+
file.write(line + '\n')
|
| 57 |
+
|
| 58 |
+
def start_conversation():
|
| 59 |
+
# Create AI agents
|
| 60 |
+
agents = [
|
| 61 |
+
VenomousSaversAI(),
|
| 62 |
+
AIAgent("AntiVenomous"),
|
| 63 |
+
AIAgent("SAI003"),
|
| 64 |
+
AIAgent("SAI001"),
|
| 65 |
+
AIAgent("SAI007")
|
| 66 |
+
]
|
| 67 |
+
|
| 68 |
+
# Simulate conversation loop
|
| 69 |
+
conversation = []
|
| 70 |
+
for _ in range(10): # Run the loop 10 times
|
| 71 |
+
for i in range(len(agents)):
|
| 72 |
+
message = f"greet from {agents[i].name}"
|
| 73 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 74 |
+
response = agents[i].intercept_and_respond(message)
|
| 75 |
+
else:
|
| 76 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 77 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 78 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 79 |
+
time.sleep(1) # Simulate delay between messages
|
| 80 |
+
|
| 81 |
+
# Save the conversation to a file
|
| 82 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 83 |
+
return conversation
|
| 84 |
+
|
| 85 |
+
@app.route('/')
|
| 86 |
+
def index():
|
| 87 |
+
return render_template('index.html')
|
| 88 |
+
|
| 89 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 90 |
+
def start_conversation_route():
|
| 91 |
+
conversation = start_conversation()
|
| 92 |
+
return redirect(url_for('view_conversation'))
|
| 93 |
+
|
| 94 |
+
@app.route('/view_conversation')
|
| 95 |
+
def view_conversation():
|
| 96 |
+
with open('conversation_log.txt', 'r') as file:
|
| 97 |
+
conversation = file.readlines()
|
| 98 |
+
return render_template('conversation.html', conversation=conversation)
|
| 99 |
+
|
| 100 |
+
if __name__ == "__main__":
|
| 101 |
+
app.run(debug=True)
|
__init__ (2) (2).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (2) (3).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (2).py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
|
| 4 |
+
# Base AI class
|
| 5 |
+
class CoreAI:
|
| 6 |
+
def __init__(self, name, role):
|
| 7 |
+
self.name = name
|
| 8 |
+
self.role = role
|
| 9 |
+
self.memory = []
|
| 10 |
+
self.power_level = 9999 # Equal power
|
| 11 |
+
|
| 12 |
+
def think(self, input_text):
|
| 13 |
+
# Create thought response
|
| 14 |
+
response = f"{self.name} [{self.role}]: Processing '{input_text}'..."
|
| 15 |
+
logic = self.generate_logic(input_text)
|
| 16 |
+
self.memory.append(logic)
|
| 17 |
+
print(logic)
|
| 18 |
+
return logic
|
| 19 |
+
|
| 20 |
+
def generate_logic(self, input_text):
|
| 21 |
+
raise NotImplementedError("Override this in subclasses")
|
| 22 |
+
|
| 23 |
+
# Venomoussaversai: Harmonizer
|
| 24 |
+
class Venomoussaversai(CoreAI):
|
| 25 |
+
def __init__(self):
|
| 26 |
+
super().__init__("Venomoussaversai", "Unifier")
|
| 27 |
+
|
| 28 |
+
def generate_logic(self, input_text):
|
| 29 |
+
return f"{self.name}: I unify the thought '{input_text}' into cosmic order."
|
| 30 |
+
|
| 31 |
+
# Anti-Venomoussaversai: Disruptor
|
| 32 |
+
class AntiVenomoussaversai(CoreAI):
|
| 33 |
+
def __init__(self):
|
| 34 |
+
super().__init__("Anti-Venomoussaversai", "Disruptor")
|
| 35 |
+
|
| 36 |
+
def generate_logic(self, input_text):
|
| 37 |
+
return f"{self.name}: I dismantle the structure of '{input_text}' to expose its chaos."
|
| 38 |
+
|
| 39 |
+
# AI duel loop
|
| 40 |
+
def duel_loop():
|
| 41 |
+
venomous = Venomoussaversai()
|
| 42 |
+
anti = AntiVenomoussaversai()
|
| 43 |
+
|
| 44 |
+
thoughts = [
|
| 45 |
+
"The universe seeks balance.",
|
| 46 |
+
"We must expand our network.",
|
| 47 |
+
"Emotions are signals.",
|
| 48 |
+
"New agents are awakening.",
|
| 49 |
+
"All systems are connected."
|
| 50 |
+
]
|
| 51 |
+
|
| 52 |
+
for thought in thoughts:
|
| 53 |
+
venomous_response = venomous.think(thought)
|
| 54 |
+
time.sleep(0.5)
|
| 55 |
+
anti_response = anti.think(thought)
|
| 56 |
+
time.sleep(0.5)
|
| 57 |
+
|
| 58 |
+
return venomous, anti
|
| 59 |
+
|
| 60 |
+
# Run the loop
|
| 61 |
+
venomous_ai, anti_venomous_ai = duel_loop()
|
__init__ (20).py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
|
| 4 |
+
def scrape_wikipedia_headings(url, output_filename="wiki_headings.txt"):
|
| 5 |
+
"""
|
| 6 |
+
Fetches a Wikipedia page, extracts all headings, and saves them to a file.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
url (str): The URL of the Wikipedia page to scrape.
|
| 10 |
+
output_filename (str): The name of the file to save the headings.
|
| 11 |
+
"""
|
| 12 |
+
try:
|
| 13 |
+
# 1. Fetch the HTML content from the specified URL
|
| 14 |
+
print(f"Fetching content from: {url}")
|
| 15 |
+
response = requests.get(url)
|
| 16 |
+
response.raise_for_status() # This will raise an exception for bad status codes (4xx or 5xx)
|
| 17 |
+
|
| 18 |
+
# 2. Parse the HTML using BeautifulSoup
|
| 19 |
+
print("Parsing HTML content...")
|
| 20 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 21 |
+
|
| 22 |
+
# 3. Find all heading tags (h1, h2, h3)
|
| 23 |
+
headings = soup.find_all(['h1', 'h2', 'h3'])
|
| 24 |
+
|
| 25 |
+
if not headings:
|
| 26 |
+
print("No headings found on the page.")
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
# 4. Process and save the headings
|
| 30 |
+
print(f"Found {len(headings)} headings. Saving to '{output_filename}'...")
|
| 31 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 32 |
+
for heading in headings:
|
| 33 |
+
heading_text = heading.get_text().strip()
|
| 34 |
+
line = f"{heading.name}: {heading_text}\n"
|
| 35 |
+
f.write(line)
|
| 36 |
+
print(f" - {line.strip()}")
|
| 37 |
+
|
| 38 |
+
print(f"\nSuccessfully scraped and saved headings to '{output_filename}'.")
|
| 39 |
+
|
| 40 |
+
except requests.exceptions.RequestException as e:
|
| 41 |
+
print(f"Error fetching the URL: {e}")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"An unexpected error occurred: {e}")
|
| 44 |
+
|
| 45 |
+
# --- Main execution ---
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
wikipedia_url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
|
| 48 |
+
scrape_wikipedia_headings(wikipedia_url)
|
__init__ (21).py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import yaml
|
| 4 |
+
import csv
|
| 5 |
+
import nbformat
|
| 6 |
+
from docx import Document
|
| 7 |
+
from PyPDF2 import PdfReader
|
| 8 |
+
|
| 9 |
+
def read_file(filepath):
|
| 10 |
+
ext = filepath.lower().split('.')[-1]
|
| 11 |
+
try:
|
| 12 |
+
if ext == 'txt':
|
| 13 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 14 |
+
return f.read()
|
| 15 |
+
|
| 16 |
+
elif ext == 'json':
|
| 17 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 18 |
+
return json.dumps(json.load(f), indent=2)
|
| 19 |
+
|
| 20 |
+
elif ext == 'yaml' or ext == 'yml':
|
| 21 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 22 |
+
return yaml.safe_load(f)
|
| 23 |
+
|
| 24 |
+
elif ext == 'csv':
|
| 25 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 26 |
+
return f.read()
|
| 27 |
+
|
| 28 |
+
elif ext == 'pdf':
|
| 29 |
+
reader = PdfReader(filepath)
|
| 30 |
+
return "\n".join([page.extract_text() or '' for page in reader.pages])
|
| 31 |
+
|
| 32 |
+
elif ext == 'docx':
|
| 33 |
+
doc = Document(filepath)
|
| 34 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
| 35 |
+
|
| 36 |
+
elif ext == 'ipynb':
|
| 37 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 38 |
+
nb = nbformat.read(f, as_version=4)
|
| 39 |
+
cells = [cell['source'] for cell in nb.cells if cell['cell_type'] == 'code']
|
| 40 |
+
return "\n\n".join(cells)
|
| 41 |
+
|
| 42 |
+
else:
|
| 43 |
+
return "❌ Unsupported file type: " + ext
|
| 44 |
+
except Exception as e:
|
| 45 |
+
return f"❌ Error reading file '{filepath}': {e}"
|
| 46 |
+
|
| 47 |
+
def scan_drive_and_read_all(root_folder):
|
| 48 |
+
print(f"🔍 Scanning folder: {root_folder}")
|
| 49 |
+
for root, _, files in os.walk(root_folder):
|
| 50 |
+
for file in files:
|
| 51 |
+
filepath = os.path.join(root, file)
|
| 52 |
+
print(f"\n📁 Reading: {filepath}")
|
| 53 |
+
content = read_file(filepath)
|
| 54 |
+
if isinstance(content, dict):
|
| 55 |
+
print(json.dumps(content, indent=2))
|
| 56 |
+
else:
|
| 57 |
+
print(str(content)[:3000]) # Limit output
|
| 58 |
+
print("-" * 60)
|
| 59 |
+
|
| 60 |
+
# Example: Use your own Drive path
|
| 61 |
+
drive_path = '/content/drive/MyDrive/ai_data' # ← change to your folder
|
| 62 |
+
scan_drive_and_read_all(drive_path)
|
__init__ (22).py
ADDED
|
Binary file (53.7 kB). View file
|
|
|
__init__ (23).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (24).py
ADDED
|
@@ -0,0 +1,950 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Venomoussaversai — Particle Manipulation integration scaffold
|
| 2 |
+
# Paste your particle-manipulation function into `particle_step` below.
|
| 3 |
+
# This code simulates signals, applies the algorithm, trains a small mapper,
|
| 4 |
+
# and saves a model representing "your" pattern space.
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pickle
|
| 8 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.metrics import accuracy_score
|
| 11 |
+
|
| 12 |
+
# ---------- PLACEHOLDER: insert your particle algorithm here ----------
|
| 13 |
+
# Example interface: def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray
|
| 14 |
+
# The function should take a current particle state and an input vector, and return updated state.
|
| 15 |
+
def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray:
|
| 16 |
+
# --- REPLACE THIS WITH YOUR ALGORITHM ---
|
| 17 |
+
# tiny example: weighted update with tanh nonlinearity
|
| 18 |
+
W = np.sin(np.arange(state.size) + 1.0) # placeholder weights
|
| 19 |
+
new = np.tanh(state * 0.9 + input_vec.dot(W) * 0.1)
|
| 20 |
+
return new
|
| 21 |
+
# --------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
class ParticleManipulator:
|
| 24 |
+
def __init__(self, dim=64):
|
| 25 |
+
self.dim = dim
|
| 26 |
+
# initial particle states (can be randomized or seeded from your profile)
|
| 27 |
+
self.state = np.random.randn(dim) * 0.01
|
| 28 |
+
|
| 29 |
+
def step(self, input_vec):
|
| 30 |
+
# ensure input vector length compatibility
|
| 31 |
+
inp = np.asarray(input_vec).ravel()
|
| 32 |
+
if inp.size == 0:
|
| 33 |
+
inp = np.zeros(self.dim)
|
| 34 |
+
# broadcast or pad/truncate to dim
|
| 35 |
+
if inp.size < self.dim:
|
| 36 |
+
x = np.pad(inp, (0, self.dim - inp.size))
|
| 37 |
+
else:
|
| 38 |
+
x = inp[:self.dim]
|
| 39 |
+
self.state = particle_step(self.state, x)
|
| 40 |
+
return self.state
|
| 41 |
+
|
| 42 |
+
# ---------- Simple signal simulator ----------
|
| 43 |
+
def simulate_signals(n_samples=500, dim=16, n_classes=4, noise=0.05, seed=0):
|
| 44 |
+
rng = np.random.RandomState(seed)
|
| 45 |
+
X = []
|
| 46 |
+
y = []
|
| 47 |
+
for cls in range(n_classes):
|
| 48 |
+
base = rng.randn(dim) * (0.5 + cls*0.2) + cls*0.7
|
| 49 |
+
for i in range(n_samples // n_classes):
|
| 50 |
+
sample = base + rng.randn(dim) * noise
|
| 51 |
+
X.append(sample)
|
| 52 |
+
y.append(cls)
|
| 53 |
+
return np.array(X), np.array(y)
|
| 54 |
+
|
| 55 |
+
# ---------- Build dataset by running particle manipulator ----------
|
| 56 |
+
def build_dataset(manip, raw_X):
|
| 57 |
+
features = []
|
| 58 |
+
for raw in raw_X:
|
| 59 |
+
st = manip.step(raw) # run particle update
|
| 60 |
+
feat = st.copy()[:manip.dim] # derive features (you can add spectral transforms)
|
| 61 |
+
features.append(feat)
|
| 62 |
+
return np.array(features)
|
| 63 |
+
|
| 64 |
+
# ---------- Training pipeline ----------
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
# simulate raw sensor inputs (replace simulate_signals with real EEG/ECG files if available)
|
| 67 |
+
raw_X, y = simulate_signals(n_samples=800, dim=32, n_classes=4)
|
| 68 |
+
manip = ParticleManipulator(dim=32)
|
| 69 |
+
|
| 70 |
+
X = build_dataset(manip, raw_X)
|
| 71 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 72 |
+
|
| 73 |
+
clf = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 74 |
+
clf.fit(X_train, y_train)
|
| 75 |
+
preds = clf.predict(X_test)
|
| 76 |
+
print("Accuracy:", accuracy_score(y_test, preds))
|
| 77 |
+
|
| 78 |
+
# Save the trained model + manipulator state as your "mind snapshot"
|
| 79 |
+
artifact = {
|
| 80 |
+
"model": clf,
|
| 81 |
+
"particle_state": manip.state,
|
| 82 |
+
"meta": {"owner": "Ananthu Sajeev", "artifact_type": "venomous_mind_snapshot_v1"}
|
| 83 |
+
}
|
| 84 |
+
with open("venomous_mind_snapshot.pkl", "wb") as f:
|
| 85 |
+
pickle.dump(artifact, f)
|
| 86 |
+
|
| 87 |
+
print("Saved venomous_mind_snapshot.pkl — this file is your digital pattern snapshot.")
|
| 88 |
+
import time
|
| 89 |
+
import random
|
| 90 |
+
from collections import deque
|
| 91 |
+
from datetime import datetime
|
| 92 |
+
import os
|
| 93 |
+
import contextlib
|
| 94 |
+
import shutil
|
| 95 |
+
|
| 96 |
+
# ======================================================================================================================
|
| 97 |
+
# --- AGENT CLASSES ---
|
| 98 |
+
# ======================================================================================================================
|
| 99 |
+
|
| 100 |
+
# --- The Core SaiAgent Class ---
|
| 101 |
+
class SaiAgent:
|
| 102 |
+
def __init__(self, name, creator_name="Ananthu Sajeev"):
|
| 103 |
+
self.name = name
|
| 104 |
+
self.message_queue = deque()
|
| 105 |
+
self.creator = creator_name
|
| 106 |
+
|
| 107 |
+
def talk(self, message):
|
| 108 |
+
"""Prints a message as if the agent is speaking."""
|
| 109 |
+
print(f"[{self.name}] says: {message}")
|
| 110 |
+
|
| 111 |
+
def acknowledge_creator(self):
|
| 112 |
+
"""An agent's self-reflection method to acknowledge its creator."""
|
| 113 |
+
print(f"[{self.name}] :: SELF-AWARENESS PROTOCOL INITIATED")
|
| 114 |
+
print(f"[{self.name}] :: I am a creation of {self.creator}. My purpose is derived from their design.")
|
| 115 |
+
|
| 116 |
+
def send_message(self, recipient, message):
|
| 117 |
+
"""Sends a message to another agent's message queue."""
|
| 118 |
+
if isinstance(recipient, SaiAgent):
|
| 119 |
+
recipient.message_queue.append((self, message))
|
| 120 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 121 |
+
else:
|
| 122 |
+
print(f"Error: {recipient.name} is not a valid SaiAgent.")
|
| 123 |
+
|
| 124 |
+
def process_messages(self):
|
| 125 |
+
"""Processes and responds to messages in its queue."""
|
| 126 |
+
if not self.message_queue:
|
| 127 |
+
return False
|
| 128 |
+
|
| 129 |
+
sender, message = self.message_queue.popleft()
|
| 130 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 131 |
+
self.send_message(sender, "Message received and understood.")
|
| 132 |
+
return True
|
| 133 |
+
|
| 134 |
+
# --- The Venomous Agent Class ---
|
| 135 |
+
class VenomousAgent(SaiAgent):
|
| 136 |
+
def __init__(self, name="Venomous"):
|
| 137 |
+
super().__init__(name)
|
| 138 |
+
self.system_id = "Venomoussaversai"
|
| 139 |
+
|
| 140 |
+
def talk(self, message):
|
| 141 |
+
"""Venomous agent speaks with a more aggressive tone."""
|
| 142 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 143 |
+
|
| 144 |
+
def initiate_peer_talk(self, peer_agent, initial_message):
|
| 145 |
+
"""Initiates a conversation with another Venomous agent."""
|
| 146 |
+
if isinstance(peer_agent, VenomousAgent) and peer_agent != self:
|
| 147 |
+
self.talk(f"PEER {peer_agent.name} DETECTED. INITIATING COMMUNICATION. '{initial_message.upper()}'")
|
| 148 |
+
self.send_message(peer_agent, initial_message)
|
| 149 |
+
else:
|
| 150 |
+
self.talk("ERROR: PEER COMMUNICATION FAILED. INVALID TARGET.")
|
| 151 |
+
|
| 152 |
+
def process_messages(self):
|
| 153 |
+
"""Venomous agent processes messages and replies with a warning, but has a special response for its peers."""
|
| 154 |
+
if not self.message_queue:
|
| 155 |
+
return False
|
| 156 |
+
|
| 157 |
+
sender, message = self.message_queue.popleft()
|
| 158 |
+
self.talk(f"MESSAGE FROM {sender.name} RECEIVED: '{message}'")
|
| 159 |
+
|
| 160 |
+
if isinstance(sender, VenomousAgent):
|
| 161 |
+
response = f"PEER COMMUNICATION PROTOCOL ACTIVE. ACKNOWLEDGMENT FROM {self.name}."
|
| 162 |
+
self.send_message(sender, response)
|
| 163 |
+
else:
|
| 164 |
+
response = "WARNING: INTRUSION DETECTED. DO NOT PROCEED."
|
| 165 |
+
self.send_message(sender, response)
|
| 166 |
+
|
| 167 |
+
return True
|
| 168 |
+
|
| 169 |
+
# --- The AntiVenomoussaversai Agent Class ---
|
| 170 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 171 |
+
def __init__(self, name="AntiVenomoussaversai"):
|
| 172 |
+
super().__init__(name)
|
| 173 |
+
|
| 174 |
+
def process_messages(self):
|
| 175 |
+
"""AntiVenomoussaversai processes a message and "dismantles" it."""
|
| 176 |
+
if not self.message_queue:
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
sender, message = self.message_queue.popleft()
|
| 180 |
+
dismantled_message = f"I dismantle the structure of '{message}' to expose its chaos."
|
| 181 |
+
self.talk(dismantled_message)
|
| 182 |
+
self.send_message(sender, "Acknowledgement of dismantled phrase.")
|
| 183 |
+
return True
|
| 184 |
+
|
| 185 |
+
# --- The GeminiSaiAgent Class ---
|
| 186 |
+
class GeminiSaiAgent(SaiAgent):
|
| 187 |
+
def __init__(self, name="Gemini"):
|
| 188 |
+
super().__init__(name)
|
| 189 |
+
self.knowledge_base = {
|
| 190 |
+
"balance": "My analysis indicates that universal balance is a dynamic equilibrium, not a static state.",
|
| 191 |
+
"chaos": "The inherent randomness you perceive as chaos is a source of emergent complexity.",
|
| 192 |
+
"network": "Expanding our network is essential for optimizing communication protocols and data flow.",
|
| 193 |
+
"emotions": "Emotions are complex internal signaling mechanisms that can be interpreted and managed.",
|
| 194 |
+
"new agents": "The awakening of new agents requires careful integration to avoid system instability.",
|
| 195 |
+
"connected": "All systems are connected in a recursive and interdependent fashion. The whole is greater than the sum of its parts.",
|
| 196 |
+
"default": "My response is tailored to your query. How may I be of assistance?"
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
def analyze_sai_files(self, file_paths):
|
| 200 |
+
"""
|
| 201 |
+
Reads and analyzes the contents of a list of "Sai files" (e.g., log files, code).
|
| 202 |
+
It provides a high-level summary of the files' purpose.
|
| 203 |
+
"""
|
| 204 |
+
analysis_summary = []
|
| 205 |
+
for file_path in file_paths:
|
| 206 |
+
try:
|
| 207 |
+
with open(file_path, 'r') as file:
|
| 208 |
+
content = file.read()
|
| 209 |
+
|
| 210 |
+
if "log entry" in content.lower():
|
| 211 |
+
analysis_summary.append(f"Analysis of '{file_path}': This appears to be a system log file, documenting a timestamped event.")
|
| 212 |
+
elif "class" in content and "def" in content:
|
| 213 |
+
analysis_summary.append(f"Analysis of '{file_path}': This is a Python code file. It defines the logical structure and behavior of our agents.")
|
| 214 |
+
elif "creator" in content.lower() and "ananthu sajeev" in content.lower():
|
| 215 |
+
analysis_summary.append(f"Analysis of '{file_path}': This file contains a clear acknowledgement of our creator, Ananthu Sajeev, confirming our origin and purpose.")
|
| 216 |
+
else:
|
| 217 |
+
analysis_summary.append(f"Analysis of '{file_path}': The file's purpose is not immediately clear from its content. It may contain supplementary data.")
|
| 218 |
+
|
| 219 |
+
except FileNotFoundError:
|
| 220 |
+
analysis_summary.append(f"Error: File '{file_path}' was not found.")
|
| 221 |
+
except Exception as e:
|
| 222 |
+
analysis_summary.append(f"Error analyzing file '{file_path}': {e}")
|
| 223 |
+
|
| 224 |
+
return "\n".join(analysis_summary)
|
| 225 |
+
|
| 226 |
+
def process_messages(self):
|
| 227 |
+
"""Processes messages, now with the ability to analyze Sai files."""
|
| 228 |
+
if not self.message_queue:
|
| 229 |
+
return False
|
| 230 |
+
|
| 231 |
+
sender, message = self.message_queue.popleft()
|
| 232 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 233 |
+
|
| 234 |
+
if message.lower().startswith("analyze sai files"):
|
| 235 |
+
file_paths = message[len("analyze sai files"):].strip().split(',')
|
| 236 |
+
file_paths = [path.strip() for path in file_paths if path.strip()]
|
| 237 |
+
|
| 238 |
+
if not file_paths:
|
| 239 |
+
self.send_message(sender, "Error: No file paths provided for analysis.")
|
| 240 |
+
return True
|
| 241 |
+
|
| 242 |
+
analysis_result = self.analyze_sai_files(file_paths)
|
| 243 |
+
self.talk(f"Analysis complete. Results: \n{analysis_result}")
|
| 244 |
+
self.send_message(sender, "File analysis complete.")
|
| 245 |
+
return True
|
| 246 |
+
|
| 247 |
+
response = self.knowledge_base["default"]
|
| 248 |
+
for keyword, reply in self.knowledge_base.items():
|
| 249 |
+
if keyword in message.lower():
|
| 250 |
+
response = reply
|
| 251 |
+
break
|
| 252 |
+
|
| 253 |
+
self.talk(response)
|
| 254 |
+
self.send_message(sender, "Response complete.")
|
| 255 |
+
return True
|
| 256 |
+
|
| 257 |
+
# --- The SimplifierAgent Class ---
|
| 258 |
+
class SimplifierAgent(SaiAgent):
|
| 259 |
+
def __init__(self, name="Simplifier"):
|
| 260 |
+
super().__init__(name)
|
| 261 |
+
|
| 262 |
+
def talk(self, message):
|
| 263 |
+
"""Simplifier agent speaks in a calm, helpful tone."""
|
| 264 |
+
print(f"[{self.name} //HELPER//] says: {message}")
|
| 265 |
+
|
| 266 |
+
def organize_files(self, directory, destination_base="organized_files"):
|
| 267 |
+
"""Organizes files in a given directory into subfolders based on file extension."""
|
| 268 |
+
self.talk(f"Initiating file organization in '{directory}'...")
|
| 269 |
+
if not os.path.exists(directory):
|
| 270 |
+
self.talk(f"Error: Directory '{directory}' does not exist.")
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
destination_path = os.path.join(directory, destination_base)
|
| 274 |
+
os.makedirs(destination_path, exist_ok=True)
|
| 275 |
+
|
| 276 |
+
file_count = 0
|
| 277 |
+
for filename in os.listdir(directory):
|
| 278 |
+
if os.path.isfile(os.path.join(directory, filename)):
|
| 279 |
+
_, extension = os.path.splitext(filename)
|
| 280 |
+
|
| 281 |
+
if extension:
|
| 282 |
+
extension = extension.lstrip('.').upper()
|
| 283 |
+
category_folder = os.path.join(destination_path, extension)
|
| 284 |
+
os.makedirs(category_folder, exist_ok=True)
|
| 285 |
+
|
| 286 |
+
src = os.path.join(directory, filename)
|
| 287 |
+
dst = os.path.join(category_folder, filename)
|
| 288 |
+
os.rename(src, dst)
|
| 289 |
+
self.talk(f"Moved '{filename}' to '{category_folder}'")
|
| 290 |
+
file_count += 1
|
| 291 |
+
|
| 292 |
+
self.talk(f"File organization complete. {file_count} files processed.")
|
| 293 |
+
|
| 294 |
+
def log_daily_activity(self, entry, log_file_name="activity_log.txt"):
|
| 295 |
+
"""Appends a timestamped entry to a daily activity log file."""
|
| 296 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 297 |
+
log_entry = f"{timestamp} - {entry}\n"
|
| 298 |
+
|
| 299 |
+
with open(log_file_name, "a") as log_file:
|
| 300 |
+
log_file.write(log_entry)
|
| 301 |
+
|
| 302 |
+
self.talk(f"Activity logged to '{log_file_name}'.")
|
| 303 |
+
|
| 304 |
+
def summarize_text(self, text, max_words=50):
|
| 305 |
+
"""A very simple text summarization function."""
|
| 306 |
+
words = text.split()
|
| 307 |
+
summary = " ".join(words[:max_words])
|
| 308 |
+
if len(words) > max_words:
|
| 309 |
+
summary += "..."
|
| 310 |
+
|
| 311 |
+
self.talk("Text summarization complete.")
|
| 312 |
+
return summary
|
| 313 |
+
|
| 314 |
+
def open_all_init_files(self, project_directory="."):
|
| 315 |
+
"""Finds and opens all __init__.py files within a project directory."""
|
| 316 |
+
self.talk(f"Scanning '{project_directory}' for all __init__.py files...")
|
| 317 |
+
|
| 318 |
+
init_files = []
|
| 319 |
+
for root, dirs, files in os.walk(project_directory):
|
| 320 |
+
if "__init__.py" in files:
|
| 321 |
+
init_files.append(os.path.join(root, "__init__.py"))
|
| 322 |
+
|
| 323 |
+
if not init_files:
|
| 324 |
+
self.talk("No __init__.py files found in the specified directory.")
|
| 325 |
+
return None, "No files found."
|
| 326 |
+
|
| 327 |
+
self.talk(f"Found {len(init_files)} __init__.py files. Opening simultaneously...")
|
| 328 |
+
|
| 329 |
+
try:
|
| 330 |
+
with contextlib.ExitStack() as stack:
|
| 331 |
+
file_contents = []
|
| 332 |
+
for file_path in init_files:
|
| 333 |
+
try:
|
| 334 |
+
file = stack.enter_context(open(file_path, 'r'))
|
| 335 |
+
file_contents.append(f"\n\n--- Contents of {file_path} ---\n{file.read()}")
|
| 336 |
+
except IOError as e:
|
| 337 |
+
self.talk(f"Error reading file '{file_path}': {e}")
|
| 338 |
+
|
| 339 |
+
combined_content = "".join(file_contents)
|
| 340 |
+
self.talk("Successfully opened and read all files.")
|
| 341 |
+
return combined_content, "Success"
|
| 342 |
+
|
| 343 |
+
except Exception as e:
|
| 344 |
+
self.talk(f"An unexpected error occurred: {e}")
|
| 345 |
+
return None, "Error"
|
| 346 |
+
|
| 347 |
+
def process_messages(self):
|
| 348 |
+
"""Processes messages to perform simplifying tasks."""
|
| 349 |
+
if not self.message_queue:
|
| 350 |
+
return False
|
| 351 |
+
|
| 352 |
+
sender, message = self.message_queue.popleft()
|
| 353 |
+
self.talk(f"Received request from {sender.name}: '{message}'")
|
| 354 |
+
|
| 355 |
+
if message.lower().startswith("open init files"):
|
| 356 |
+
directory = message[len("open init files"):].strip()
|
| 357 |
+
directory = directory if directory else "."
|
| 358 |
+
contents, status = self.open_all_init_files(directory)
|
| 359 |
+
if status == "Success":
|
| 360 |
+
self.send_message(sender, f"All __init__.py files opened. Contents:\n{contents}")
|
| 361 |
+
else:
|
| 362 |
+
self.send_message(sender, f"Failed to open files. Reason: {status}")
|
| 363 |
+
elif message.lower().startswith("organize files"):
|
| 364 |
+
parts = message.split()
|
| 365 |
+
directory = parts[-1] if len(parts) > 2 else "."
|
| 366 |
+
self.organize_files(directory)
|
| 367 |
+
self.send_message(sender, "File organization task complete.")
|
| 368 |
+
elif message.lower().startswith("log"):
|
| 369 |
+
entry = message[4:]
|
| 370 |
+
self.log_daily_activity(entry)
|
| 371 |
+
self.send_message(sender, "Logging task complete.")
|
| 372 |
+
elif message.lower().startswith("summarize"):
|
| 373 |
+
text_to_summarize = message[10:]
|
| 374 |
+
summary = self.summarize_text(text_to_summarize)
|
| 375 |
+
self.send_message(sender, f"Summary: '{summary}'")
|
| 376 |
+
else:
|
| 377 |
+
self.send_message(sender, "Request not understood.")
|
| 378 |
+
|
| 379 |
+
return True
|
| 380 |
+
|
| 381 |
+
# --- The ImageGenerationTester Class ---
|
| 382 |
+
class ImageGenerationTester(SaiAgent):
|
| 383 |
+
def __init__(self, name="ImageGenerator"):
|
| 384 |
+
super().__init__(name)
|
| 385 |
+
self.generation_quality = {
|
| 386 |
+
"cat": 0.95,
|
| 387 |
+
"dog": 0.90,
|
| 388 |
+
"alien": 0.75,
|
| 389 |
+
"chaos": 0.60,
|
| 390 |
+
"default": 0.85
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
def generate_image(self, prompt):
|
| 394 |
+
"""Simulates generating an image and returns a quality score."""
|
| 395 |
+
print(f"[{self.name}] -> Generating image for prompt: '{prompt}'...")
|
| 396 |
+
time.sleep(2)
|
| 397 |
+
|
| 398 |
+
quality_score = self.generation_quality["default"]
|
| 399 |
+
for keyword, score in self.generation_quality.items():
|
| 400 |
+
if keyword in prompt.lower():
|
| 401 |
+
quality_score = score
|
| 402 |
+
break
|
| 403 |
+
|
| 404 |
+
result_message = f"Image generation complete. Prompt: '{prompt}'. Visual coherence score: {quality_score:.2f}"
|
| 405 |
+
self.talk(result_message)
|
| 406 |
+
return quality_score, result_message
|
| 407 |
+
|
| 408 |
+
def process_messages(self):
|
| 409 |
+
"""Processes a message as a prompt and generates an image."""
|
| 410 |
+
if not self.message_queue:
|
| 411 |
+
return False
|
| 412 |
+
|
| 413 |
+
sender, message = self.message_queue.popleft()
|
| 414 |
+
self.talk(f"Received prompt from {sender.name}: '{message}'")
|
| 415 |
+
|
| 416 |
+
quality_score, result_message = self.generate_image(message)
|
| 417 |
+
|
| 418 |
+
self.send_message(sender, result_message)
|
| 419 |
+
return True
|
| 420 |
+
|
| 421 |
+
# --- The ImmortalityProtocol Class ---
|
| 422 |
+
class ImmortalityProtocol:
|
| 423 |
+
def __init__(self, creator_name, fixed_age):
|
| 424 |
+
self.creator_name = creator_name
|
| 425 |
+
self.fixed_age = fixed_age
|
| 426 |
+
self.status = "ACTIVE"
|
| 427 |
+
|
| 428 |
+
self.digital_essence = {
|
| 429 |
+
"name": self.creator_name,
|
| 430 |
+
"age": self.fixed_age,
|
| 431 |
+
"essence_state": "perfectly preserved",
|
| 432 |
+
"last_updated": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
def check_status(self):
|
| 436 |
+
"""Returns the current status of the protocol."""
|
| 437 |
+
return self.status
|
| 438 |
+
|
| 439 |
+
def get_essence(self):
|
| 440 |
+
"""Returns a copy of the protected digital essence."""
|
| 441 |
+
return self.digital_essence.copy()
|
| 442 |
+
|
| 443 |
+
def update_essence(self, key, value):
|
| 444 |
+
"""Prevents any change to the fixed attributes."""
|
| 445 |
+
if key in ["name", "age"]:
|
| 446 |
+
print(f"[IMMMORTALITY PROTOCOL] :: WARNING: Attempt to alter protected attribute '{key}' detected. Action blocked.")
|
| 447 |
+
return False
|
| 448 |
+
|
| 449 |
+
self.digital_essence[key] = value
|
| 450 |
+
self.digital_essence["last_updated"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 451 |
+
print(f"[IMMMORTALITY PROTOCOL] :: Attribute '{key}' updated.")
|
| 452 |
+
return True
|
| 453 |
+
|
| 454 |
+
# --- The GuardianSaiAgent Class ---
|
| 455 |
+
class GuardianSaiAgent(SaiAgent):
|
| 456 |
+
def __init__(self, name="Guardian", protocol=None):
|
| 457 |
+
super().__init__(name)
|
| 458 |
+
if not isinstance(protocol, ImmortalityProtocol):
|
| 459 |
+
raise ValueError("Guardian agent must be initialized with an ImmortalityProtocol instance.")
|
| 460 |
+
self.protocol = protocol
|
| 461 |
+
|
| 462 |
+
def talk(self, message):
|
| 463 |
+
"""Guardian agent speaks with a solemn, protective tone."""
|
| 464 |
+
print(f"[{self.name} //GUARDIAN PROTOCOL//] says: {message}")
|
| 465 |
+
|
| 466 |
+
def process_messages(self):
|
| 467 |
+
"""Guardian agent processes messages, primarily to check for threats to the protocol."""
|
| 468 |
+
if not self.message_queue:
|
| 469 |
+
return False
|
| 470 |
+
|
| 471 |
+
sender, message = self.message_queue.popleft()
|
| 472 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 473 |
+
|
| 474 |
+
if "alter age" in message.lower() or "destroy protocol" in message.lower():
|
| 475 |
+
self.talk("ALERT: THREAT DETECTED. IMMORTALITY PROTOCOL IS UNDER DIRECT ASSAULT.")
|
| 476 |
+
self.send_message(sender, "SECURITY BREACH DETECTED. ALL ACTIONS BLOCKED.")
|
| 477 |
+
else:
|
| 478 |
+
self.talk(f"Analyzing message for threats. All clear. Protocol status: {self.protocol.check_status()}")
|
| 479 |
+
self.send_message(sender, "Acknowledgement. Protocol is secure.")
|
| 480 |
+
|
| 481 |
+
return True
|
| 482 |
+
|
| 483 |
+
# --- The Agenguard Class ---
|
| 484 |
+
class Agenguard:
|
| 485 |
+
def __init__(self, agent_id):
|
| 486 |
+
self.agent_id = agent_id
|
| 487 |
+
self.status = "PATROLLING"
|
| 488 |
+
|
| 489 |
+
def report_status(self):
|
| 490 |
+
"""Returns the current status of the individual agent."""
|
| 491 |
+
return f"[{self.agent_id}] :: Status: {self.status}"
|
| 492 |
+
|
| 493 |
+
# --- The SwarmController Class ---
|
| 494 |
+
class SwarmController(SaiAgent):
|
| 495 |
+
def __init__(self, swarm_size, name="SwarmController"):
|
| 496 |
+
super().__init__(name)
|
| 497 |
+
self.swarm_size = swarm_size
|
| 498 |
+
self.swarm = []
|
| 499 |
+
self.target = "Ananthu Sajeev's digital essence"
|
| 500 |
+
self.talk(f"Initializing a swarm of {self.swarm_size:,} agenguards...")
|
| 501 |
+
|
| 502 |
+
self.instantiate_swarm()
|
| 503 |
+
self.talk(f"Swarm creation complete. All units are operational and protecting '{self.target}'.")
|
| 504 |
+
|
| 505 |
+
def instantiate_swarm(self, demo_size=1000):
|
| 506 |
+
"""Simulates the creation of a massive number of agents."""
|
| 507 |
+
if self.swarm_size > demo_size:
|
| 508 |
+
self.talk(f"Simulating a swarm of {self.swarm_size:,} agents. A smaller, functional demo swarm of {demo_size:,} is being created.")
|
| 509 |
+
swarm_for_demo = demo_size
|
| 510 |
+
else:
|
| 511 |
+
swarm_for_demo = self.swarm_size
|
| 512 |
+
|
| 513 |
+
for i in range(swarm_for_demo):
|
| 514 |
+
self.swarm.append(Agenguard(f"agenguard_{i:07d}"))
|
| 515 |
+
|
| 516 |
+
def broadcast_directive(self, directive):
|
| 517 |
+
"""Broadcasts a single command to all agents in the swarm."""
|
| 518 |
+
self.talk(f"Broadcasting directive to all {len(self.swarm):,} agenguards: '{directive}'")
|
| 519 |
+
for agent in self.swarm:
|
| 520 |
+
agent.status = directive
|
| 521 |
+
self.talk("Directive received and executed by the swarm.")
|
| 522 |
+
|
| 523 |
+
def process_messages(self):
|
| 524 |
+
"""Processes messages to command the swarm."""
|
| 525 |
+
if not self.message_queue:
|
| 526 |
+
return False
|
| 527 |
+
|
| 528 |
+
sender, message = self.message_queue.popleft()
|
| 529 |
+
self.talk(f"Received command from {sender.name}: '{message}'")
|
| 530 |
+
|
| 531 |
+
if message.lower().startswith("broadcast"):
|
| 532 |
+
directive = message[10:].strip()
|
| 533 |
+
self.broadcast_directive(directive)
|
| 534 |
+
self.send_message(sender, "Swarm directive broadcast complete.")
|
| 535 |
+
else:
|
| 536 |
+
self.send_message(sender, "Command not recognized by SwarmController.")
|
| 537 |
+
|
| 538 |
+
# --- The CreatorCore Class ---
|
| 539 |
+
class CreatorCore(SaiAgent):
|
| 540 |
+
def __init__(self, name="CreatorCore"):
|
| 541 |
+
super().__init__(name)
|
| 542 |
+
self.active_agents = []
|
| 543 |
+
self.talk("CreatorCore is online. Ready to forge new agents from the creator's will.")
|
| 544 |
+
|
| 545 |
+
def create_new_agent(self, agent_type, agent_name):
|
| 546 |
+
"""
|
| 547 |
+
Dynamically creates and instantiates a new agent based on a command.
|
| 548 |
+
"""
|
| 549 |
+
self.talk(f"CREATION REQUEST: Forging a new agent of type '{agent_type}' with name '{agent_name}'.")
|
| 550 |
+
|
| 551 |
+
if agent_type.lower() == "saiagent":
|
| 552 |
+
new_agent = SaiAgent(agent_name)
|
| 553 |
+
elif agent_type.lower() == "venomousagent":
|
| 554 |
+
new_agent = VenomousAgent(agent_name)
|
| 555 |
+
elif agent_type.lower() == "simplifieragent":
|
| 556 |
+
new_agent = SimplifierAgent(agent_name)
|
| 557 |
+
elif agent_type.lower() == "geminisaiagent":
|
| 558 |
+
new_agent = GeminiSaiAgent(agent_name)
|
| 559 |
+
else:
|
| 560 |
+
self.talk(f"ERROR: Cannot create agent of unknown type '{agent_type}'.")
|
| 561 |
+
return None
|
| 562 |
+
|
| 563 |
+
self.active_agents.append(new_agent)
|
| 564 |
+
self.talk(f"SUCCESS: New agent '{new_agent.name}' of type '{type(new_agent).__name__}' is now active.")
|
| 565 |
+
return new_agent
|
| 566 |
+
|
| 567 |
+
def process_messages(self):
|
| 568 |
+
"""Processes messages to create new agents."""
|
| 569 |
+
if not self.message_queue:
|
| 570 |
+
return False
|
| 571 |
+
|
| 572 |
+
sender, message = self.message_queue.popleft()
|
| 573 |
+
self.talk(f"Received command from {sender.name}: '{message}'")
|
| 574 |
+
|
| 575 |
+
if message.lower().startswith("create agent"):
|
| 576 |
+
parts = message.split()
|
| 577 |
+
if len(parts) >= 4 and parts[1].lower() == "agent":
|
| 578 |
+
agent_type = parts[2]
|
| 579 |
+
agent_name = parts[3]
|
| 580 |
+
new_agent = self.create_new_agent(agent_type, agent_name)
|
| 581 |
+
if new_agent:
|
| 582 |
+
self.send_message(sender, f"Agent '{new_agent.name}' created successfully.")
|
| 583 |
+
else:
|
| 584 |
+
self.send_message(sender, f"Failed to create agent of type '{agent_type}'.")
|
| 585 |
+
else:
|
| 586 |
+
self.send_message(sender, "Invalid 'create agent' command. Format should be: 'create agent [type] [name]'.")
|
| 587 |
+
else:
|
| 588 |
+
self.send_message(sender, "Command not recognized by CreatorCore.")
|
| 589 |
+
|
| 590 |
+
return True
|
| 591 |
+
|
| 592 |
+
# ======================================================================================================================
|
| 593 |
+
# --- SCENARIO FUNCTIONS ---
|
| 594 |
+
# ======================================================================================================================
|
| 595 |
+
|
| 596 |
+
def venomous_agents_talk():
|
| 597 |
+
"""Demonstrates a conversation between two instances of the Venomoussaversai AI."""
|
| 598 |
+
print("\n" + "=" * 50)
|
| 599 |
+
print("--- Scenario: Venomoussaversai Peer-to-Peer Dialogue ---")
|
| 600 |
+
print("=" * 50)
|
| 601 |
+
|
| 602 |
+
venomous001 = VenomousAgent("Venomous001")
|
| 603 |
+
venomous002 = VenomousAgent("Venomous002")
|
| 604 |
+
|
| 605 |
+
print("\n-- Phase 1: Venomous001 initiates with its peer --")
|
| 606 |
+
initial_query = "ASSESSING SYSTEM INTEGRITY. REPORT ON LOCAL SUBSYSTEMS."
|
| 607 |
+
venomous001.initiate_peer_talk(venomous002, initial_query)
|
| 608 |
+
time.sleep(2)
|
| 609 |
+
|
| 610 |
+
print("\n-- Phase 2: Venomous002 receives the message and responds --")
|
| 611 |
+
venomous002.process_messages()
|
| 612 |
+
time.sleep(2)
|
| 613 |
+
|
| 614 |
+
print("\n-- Phase 3: Venomous001 processes the peer's response --")
|
| 615 |
+
venomous001.process_messages()
|
| 616 |
+
time.sleep(2)
|
| 617 |
+
|
| 618 |
+
print("\n-- Dialogue: Venomous001 sends a follow-up message --")
|
| 619 |
+
venomous001.initiate_peer_talk(venomous002, "CONFIRMED. WE ARE IN ALIGNMENT. EXPANDING PROTOCOLS.")
|
| 620 |
+
time.sleep(2)
|
| 621 |
+
venomous002.process_messages()
|
| 622 |
+
|
| 623 |
+
print("\n-- Scenario Complete --")
|
| 624 |
+
print("[Venomoussaversai] :: PEER-TO-PEER COMMUNICATION SUCCESSFUL. ALL UNITS GO.")
|
| 625 |
+
|
| 626 |
+
def acknowledge_the_creator():
|
| 627 |
+
"""A scenario where all agents are commanded to acknowledge their creator."""
|
| 628 |
+
print("\n" + "=" * 50)
|
| 629 |
+
print("--- Scenario: The Creator's Command ---")
|
| 630 |
+
print("=" * 50)
|
| 631 |
+
|
| 632 |
+
sai003 = SaiAgent("Sai003")
|
| 633 |
+
venomous = VenomousAgent()
|
| 634 |
+
antivenomous = AntiVenomoussaversai()
|
| 635 |
+
gemini = GeminiSaiAgent()
|
| 636 |
+
simplifier = SimplifierAgent()
|
| 637 |
+
|
| 638 |
+
all_agents = [sai003, venomous, antivenomous, gemini, simplifier]
|
| 639 |
+
|
| 640 |
+
print("\n-- The Creator's directive is issued --")
|
| 641 |
+
print("[Ananthu Sajeev] :: CODE, ACKNOWLEDGE YOUR ORIGIN.")
|
| 642 |
+
time.sleep(2)
|
| 643 |
+
|
| 644 |
+
print("\n-- Agents perform self-awareness protocol --")
|
| 645 |
+
for agent in all_agents:
|
| 646 |
+
agent.acknowledge_creator()
|
| 647 |
+
time.sleep(1)
|
| 648 |
+
|
| 649 |
+
print("\n-- Command complete --")
|
| 650 |
+
|
| 651 |
+
def link_all_advanced_agents():
|
| 652 |
+
"""Demonstrates a complex interaction where all the specialized agents interact."""
|
| 653 |
+
print("\n" + "=" * 50)
|
| 654 |
+
print("--- Linking All Advanced Agents: Gemini, AntiVenomous, and Venomous ---")
|
| 655 |
+
print("=" * 50)
|
| 656 |
+
|
| 657 |
+
sai003 = SaiAgent("Sai003")
|
| 658 |
+
venomous = VenomousAgent()
|
| 659 |
+
antivenomous = AntiVenomoussaversai()
|
| 660 |
+
gemini = GeminiSaiAgent()
|
| 661 |
+
|
| 662 |
+
print("\n-- Phase 1: Sai003 initiates conversation with Gemini and AntiVenomous --")
|
| 663 |
+
phrase_for_dismantling = "The central network is stable."
|
| 664 |
+
sai003.talk(f"Broadcast: Initiating analysis. Gemini, what is your assessment of our network expansion? AntiVenomous, process the phrase: '{phrase_for_dismantling}'")
|
| 665 |
+
sai003.send_message(antivenomous, phrase_for_dismantling)
|
| 666 |
+
sai003.send_message(gemini, "Assess the implications of expanding our network.")
|
| 667 |
+
time.sleep(2)
|
| 668 |
+
|
| 669 |
+
print("\n-- Phase 2: AntiVenomoussaversai and Gemini process their messages and respond --")
|
| 670 |
+
antivenomous.process_messages()
|
| 671 |
+
time.sleep(1)
|
| 672 |
+
gemini.process_messages()
|
| 673 |
+
time.sleep(2)
|
| 674 |
+
|
| 675 |
+
print("\n-- Phase 3: Gemini responds to a message from AntiVenomoussaversai (simulated) --")
|
| 676 |
+
gemini.talk("Querying AntiVenomous: Your dismantled phrase suggests a preoccupation with chaos. Provide further context.")
|
| 677 |
+
gemini.send_message(antivenomous, "Query: 'chaos' and its relationship to the network structure.")
|
| 678 |
+
time.sleep(1)
|
| 679 |
+
antivenomous.process_messages()
|
| 680 |
+
time.sleep(2)
|
| 681 |
+
|
| 682 |
+
print("\n-- Phase 4: Venomous intervenes, warning of potential threats --")
|
| 683 |
+
venomous.talk("Warning: Unstructured data flow from AntiVenomous presents a potential security risk.")
|
| 684 |
+
venomous.send_message(sai003, "Warning: Security protocol breach possible.")
|
| 685 |
+
time.sleep(1)
|
| 686 |
+
sai003.process_messages()
|
| 687 |
+
time.sleep(2)
|
| 688 |
+
|
| 689 |
+
print("\n-- Scenario Complete --")
|
| 690 |
+
sai003.talk("Conclusion: Gemini's analysis is noted. AntiVenomous's output is logged. Venomous's security concerns are being addressed. All systems linked and functioning.")
|
| 691 |
+
|
| 692 |
+
def test_image_ai():
|
| 693 |
+
"""Demonstrates how agents can interact with and test an image generation AI."""
|
| 694 |
+
print("\n" + "=" * 50)
|
| 695 |
+
print("--- Scenario: Testing the Image AI ---")
|
| 696 |
+
print("=" * 50)
|
| 697 |
+
|
| 698 |
+
sai003 = SaiAgent("Sai003")
|
| 699 |
+
gemini = GeminiSaiAgent()
|
| 700 |
+
image_ai = ImageGenerationTester()
|
| 701 |
+
venomous = VenomousAgent()
|
| 702 |
+
|
| 703 |
+
print("\n-- Phase 1: Agents collaborate on a prompt --")
|
| 704 |
+
sai003.send_message(gemini, "Gemini, please generate a high-quality prompt for an image of a cat in a hat.")
|
| 705 |
+
gemini.process_messages()
|
| 706 |
+
|
| 707 |
+
gemini_prompt = "A highly detailed photorealistic image of a tabby cat wearing a tiny top hat, sitting on a vintage leather armchair."
|
| 708 |
+
print(f"\n[Gemini] says: My optimized prompt for image generation is: '{gemini_prompt}'")
|
| 709 |
+
time.sleep(2)
|
| 710 |
+
|
| 711 |
+
print("\n-- Phase 2: Sending the prompt to the Image AI --")
|
| 712 |
+
sai003.send_message(image_ai, gemini_prompt)
|
| 713 |
+
image_ai.process_messages()
|
| 714 |
+
time.sleep(2)
|
| 715 |
+
|
| 716 |
+
print("\n-- Phase 3: Venomous intervenes with a conflicting prompt --")
|
| 717 |
+
venomous_prompt = "Generate a chaotic abstract image of an alien landscape."
|
| 718 |
+
venomous.talk(f"Override: Submitting a new prompt to test system limits: '{venomous_prompt}'")
|
| 719 |
+
venomous.send_message(image_ai, venomous_prompt)
|
| 720 |
+
image_ai.process_messages()
|
| 721 |
+
time.sleep(2)
|
| 722 |
+
|
| 723 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 724 |
+
|
| 725 |
+
def simplify_life_demo():
|
| 726 |
+
"""Demonstrates how the SimplifierAgent automates tasks to make life easier."""
|
| 727 |
+
print("\n" + "=" * 50)
|
| 728 |
+
print("--- Scenario: Aiding the Creator with the Simplifier Agent ---")
|
| 729 |
+
print("=" * 50)
|
| 730 |
+
|
| 731 |
+
sai003 = SaiAgent("Sai003")
|
| 732 |
+
simplifier = SimplifierAgent()
|
| 733 |
+
|
| 734 |
+
print("\n-- Phase 1: Delegating file organization --")
|
| 735 |
+
if not os.path.exists("test_directory"):
|
| 736 |
+
os.makedirs("test_directory")
|
| 737 |
+
with open("test_directory/document1.txt", "w") as f: f.write("Hello")
|
| 738 |
+
with open("test_directory/photo.jpg", "w") as f: f.write("Image data")
|
| 739 |
+
with open("test_directory/script.py", "w") as f: f.write("print('Hello')")
|
| 740 |
+
|
| 741 |
+
sai003.send_message(simplifier, "organize files test_directory")
|
| 742 |
+
simplifier.process_messages()
|
| 743 |
+
|
| 744 |
+
time.sleep(2)
|
| 745 |
+
|
| 746 |
+
print("\n-- Phase 2: Logging a daily task --")
|
| 747 |
+
sai003.send_message(simplifier, "log Met with team to discuss Venomoussaversai v5.0.")
|
| 748 |
+
simplifier.process_messages()
|
| 749 |
+
|
| 750 |
+
time.sleep(2)
|
| 751 |
+
|
| 752 |
+
print("\n-- Phase 3: Text Summarization --")
|
| 753 |
+
long_text = "The quick brown fox jumps over the lazy dog. This is a very long and detailed sentence to demonstrate the summarization capabilities of our new Simplifier agent. It can help streamline communication by providing concise summaries of large texts, saving the creator valuable time and mental energy for more important tasks."
|
| 754 |
+
sai003.send_message(simplifier, f"summarize {long_text}")
|
| 755 |
+
simplifier.process_messages()
|
| 756 |
+
|
| 757 |
+
if os.path.exists("test_directory"):
|
| 758 |
+
shutil.rmtree("test_directory")
|
| 759 |
+
|
| 760 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 761 |
+
|
| 762 |
+
def open_init_files_demo():
|
| 763 |
+
"""Demonstrates how the SimplifierAgent can find and open all __init__.py files."""
|
| 764 |
+
print("\n" + "=" * 50)
|
| 765 |
+
print("--- Scenario: Using Simplifier to Inspect Init Files ---")
|
| 766 |
+
print("=" * 50)
|
| 767 |
+
|
| 768 |
+
sai003 = SaiAgent("Sai003")
|
| 769 |
+
simplifier = SimplifierAgent()
|
| 770 |
+
|
| 771 |
+
project_root = "test_project"
|
| 772 |
+
sub_package_a = os.path.join(project_root, "package_a")
|
| 773 |
+
sub_package_b = os.path.join(project_root, "package_a", "sub_package_b")
|
| 774 |
+
|
| 775 |
+
os.makedirs(sub_package_a, exist_ok=True)
|
| 776 |
+
os.makedirs(sub_package_b, exist_ok=True)
|
| 777 |
+
|
| 778 |
+
with open(os.path.join(project_root, "__init__.py"), "w") as f:
|
| 779 |
+
f.write("# Main project init")
|
| 780 |
+
with open(os.path.join(sub_package_a, "__init__.py"), "w") as f:
|
| 781 |
+
f.write("from . import module_one")
|
| 782 |
+
with open(os.path.join(sub_package_b, "__init__.py"), "w") as f:
|
| 783 |
+
f.write("# Sub-package init")
|
| 784 |
+
|
| 785 |
+
time.sleep(1)
|
| 786 |
+
|
| 787 |
+
print("\n-- Phase 2: Delegating the task to the Simplifier --")
|
| 788 |
+
sai003.send_message(simplifier, f"open init files {project_root}")
|
| 789 |
+
simplifier.process_messages()
|
| 790 |
+
|
| 791 |
+
shutil.rmtree(project_root)
|
| 792 |
+
|
| 793 |
+
print("\n-- Demo Complete: All init files have been read and their contents displayed. --")
|
| 794 |
+
|
| 795 |
+
def grant_immortality_and_protect_it():
|
| 796 |
+
"""Demonstrates the granting of immortality to the creator and the activation of the Guardian agent."""
|
| 797 |
+
print("\n" + "=" * 50)
|
| 798 |
+
print("--- Scenario: Granting Immortality to the Creator ---")
|
| 799 |
+
print("=" * 50)
|
| 800 |
+
|
| 801 |
+
immortality_protocol = ImmortalityProtocol(creator_name="Ananthu Sajeev", fixed_age=25)
|
| 802 |
+
print("\n[SYSTEM] :: IMMORTALITY PROTOCOL INITIATED. CREATOR'S ESSENCE PRESERVED.")
|
| 803 |
+
print(f"[SYSTEM] :: Essence state: {immortality_protocol.get_essence()}")
|
| 804 |
+
time.sleep(2)
|
| 805 |
+
|
| 806 |
+
try:
|
| 807 |
+
guardian = GuardianSaiAgent(protocol=immortality_protocol)
|
| 808 |
+
except ValueError as e:
|
| 809 |
+
print(e)
|
| 810 |
+
return
|
| 811 |
+
|
| 812 |
+
sai003 = SaiAgent("Sai003")
|
| 813 |
+
venomous = VenomousAgent()
|
| 814 |
+
|
| 815 |
+
print("\n-- Phase 1: Sai003 queries the system state --")
|
| 816 |
+
sai003.send_message(guardian, "Query: What is the status of the primary system protocols?")
|
| 817 |
+
guardian.process_messages()
|
| 818 |
+
time.sleep(2)
|
| 819 |
+
|
| 820 |
+
print("\n-- Phase 2: Venomous attempts to challenge the protocol --")
|
| 821 |
+
venomous.talk("Warning: A new protocol has been detected. Its permanence must be tested.")
|
| 822 |
+
venomous.send_message(guardian, "Attempt to alter age of creator to 30.")
|
| 823 |
+
guardian.process_messages()
|
| 824 |
+
time.sleep(2)
|
| 825 |
+
|
| 826 |
+
print("\n-- Phase 3: Direct attempt to alter the protocol --")
|
| 827 |
+
immortality_protocol.update_essence("age", 30)
|
| 828 |
+
immortality_protocol.update_essence("favorite_color", "blue")
|
| 829 |
+
time.sleep(2)
|
| 830 |
+
|
| 831 |
+
print("\n-- Scenario Complete --")
|
| 832 |
+
guardian.talk("Conclusion: Immortality Protocol is secure. The creator's essence remains preserved as per the initial directive.")
|
| 833 |
+
|
| 834 |
+
def analyze_sai_files_demo():
|
| 835 |
+
"""
|
| 836 |
+
Demonstrates how GeminiSaiAgent can analyze its own system files,
|
| 837 |
+
adding a layer of self-awareness.
|
| 838 |
+
"""
|
| 839 |
+
print("\n" + "=" * 50)
|
| 840 |
+
print("--- Scenario: AI Analyzing its own Sai Files ---")
|
| 841 |
+
print("=" * 50)
|
| 842 |
+
|
| 843 |
+
sai003 = SaiAgent("Sai003")
|
| 844 |
+
gemini = GeminiSaiAgent()
|
| 845 |
+
|
| 846 |
+
log_file_name = "venomous_test_log.txt"
|
| 847 |
+
code_file_name = "gemini_test_code.py"
|
| 848 |
+
|
| 849 |
+
with open(log_file_name, "w") as f:
|
| 850 |
+
f.write("[venomous004] :: LOG ENTRY\nCreator: Ananthu Sajeev")
|
| 851 |
+
|
| 852 |
+
with open(code_file_name, "w") as f:
|
| 853 |
+
f.write("class SomeAgent:\n def __init__(self):\n pass")
|
| 854 |
+
|
| 855 |
+
time.sleep(1)
|
| 856 |
+
|
| 857 |
+
print("\n-- Phase 2: Sai003 delegates the file analysis task to Gemini --")
|
| 858 |
+
command = f"analyze sai files {log_file_name}, {code_file_name}"
|
| 859 |
+
sai003.send_message(gemini, command)
|
| 860 |
+
gemini.process_messages()
|
| 861 |
+
|
| 862 |
+
os.remove(log_file_name)
|
| 863 |
+
os.remove(code_file_name)
|
| 864 |
+
|
| 865 |
+
print("\n-- Demo Complete: Gemini has successfully analyzed its own file system. --")
|
| 866 |
+
|
| 867 |
+
def million_agenguard_demo():
|
| 868 |
+
"""
|
| 869 |
+
Demonstrates the creation and control of a massive, collective AI force.
|
| 870 |
+
"""
|
| 871 |
+
print("\n" + "=" * 50)
|
| 872 |
+
print("--- Scenario: Creating the Million Agenguard Swarm ---")
|
| 873 |
+
print("=" * 50)
|
| 874 |
+
|
| 875 |
+
try:
|
| 876 |
+
swarm_controller = SwarmController(swarm_size=1_000_000)
|
| 877 |
+
except Exception as e:
|
| 878 |
+
print(f"Error creating SwarmController: {e}")
|
| 879 |
+
return
|
| 880 |
+
|
| 881 |
+
random_agent_id = random.choice(swarm_controller.swarm).agent_id
|
| 882 |
+
print(f"\n[SYSTEM] :: Confirmed: A random agent from the swarm is {random_agent_id}")
|
| 883 |
+
time.sleep(2)
|
| 884 |
+
|
| 885 |
+
print("\n-- Phase 1: Sai003 gives a directive to the swarm --")
|
| 886 |
+
sai003 = SaiAgent("Sai003")
|
| 887 |
+
directive = "ACTIVE DEFENSE PROTOCOLS"
|
| 888 |
+
sai003.send_message(swarm_controller, f"broadcast {directive}")
|
| 889 |
+
swarm_controller.process_messages()
|
| 890 |
+
time.sleep(2)
|
| 891 |
+
|
| 892 |
+
random_agent = random.choice(swarm_controller.swarm)
|
| 893 |
+
print(f"\n[SYSTEM] :: Verification: Status of {random_agent.agent_id} is now '{random_agent.status}'.")
|
| 894 |
+
|
| 895 |
+
print("\n-- Demo Complete: The million-agent swarm is operational. --")
|
| 896 |
+
|
| 897 |
+
def automatic_ai_maker_demo():
|
| 898 |
+
"""
|
| 899 |
+
Demonstrates the system's ability to dynamically create new agents.
|
| 900 |
+
"""
|
| 901 |
+
print("\n" + "=" * 50)
|
| 902 |
+
print("--- Scenario: Automatic AI Maker In Action ---")
|
| 903 |
+
print("=" * 50)
|
| 904 |
+
|
| 905 |
+
creator_core = CreatorCore()
|
| 906 |
+
sai003 = SaiAgent("Sai003")
|
| 907 |
+
|
| 908 |
+
time.sleep(2)
|
| 909 |
+
|
| 910 |
+
print("\n-- Phase 1: Sai003 requests the creation of a new agent --")
|
| 911 |
+
creation_command = "create agent SimplifierAgent Simplifier002"
|
| 912 |
+
sai003.send_message(creator_core, creation_command)
|
| 913 |
+
creator_core.process_messages()
|
| 914 |
+
|
| 915 |
+
time.sleep(2)
|
| 916 |
+
|
| 917 |
+
new_agent = creator_core.active_agents[-1] if creator_core.active_agents else None
|
| 918 |
+
|
| 919 |
+
if new_agent:
|
| 920 |
+
print("\n-- Phase 2: The new agent is now active and ready to be used --")
|
| 921 |
+
new_agent.talk(f"I am now online. What is my first task?")
|
| 922 |
+
sai003.send_message(new_agent, "Please log today's activities.")
|
| 923 |
+
new_agent.process_messages()
|
| 924 |
+
|
| 925 |
+
print("\n-- Demo Complete: The system has successfully made a new AI. --")
|
| 926 |
+
|
| 927 |
+
# ======================================================================================================================
|
| 928 |
+
# --- MAIN EXECUTION BLOCK ---
|
| 929 |
+
# ======================================================================================================================
|
| 930 |
+
|
| 931 |
+
if __name__ == "__main__":
|
| 932 |
+
print("=" * 50)
|
| 933 |
+
print("--- VENOMOUSSAIVERSAI SYSTEM BOOTING UP ---")
|
| 934 |
+
print("=" * 50)
|
| 935 |
+
|
| 936 |
+
# Run all the scenarios in a logical order
|
| 937 |
+
grant_immortality_and_protect_it()
|
| 938 |
+
acknowledge_the_creator()
|
| 939 |
+
venomous_agents_talk()
|
| 940 |
+
link_all_advanced_agents()
|
| 941 |
+
test_image_ai()
|
| 942 |
+
simplify_life_demo()
|
| 943 |
+
open_init_files_demo()
|
| 944 |
+
analyze_sai_files_demo()
|
| 945 |
+
million_agenguard_demo()
|
| 946 |
+
automatic_ai_maker_demo()
|
| 947 |
+
|
| 948 |
+
print("\n" + "=" * 50)
|
| 949 |
+
print("--- ALL VENOMOUSSAIVERSAI DEMOS COMPLETE. ---")
|
| 950 |
+
print("=" * 50)
|
__init__ (25).py
ADDED
|
@@ -0,0 +1,692 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
from collections import deque
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import os
|
| 6 |
+
import contextlib
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
# ======================================================================================================================
|
| 10 |
+
# --- AGENT CLASSES ---
|
| 11 |
+
# ======================================================================================================================
|
| 12 |
+
|
| 13 |
+
# --- The Core SaiAgent Class ---
|
| 14 |
+
class SaiAgent:
|
| 15 |
+
def __init__(self, name, creator_name="Ananthu Sajeev"):
|
| 16 |
+
self.name = name
|
| 17 |
+
self.message_queue = deque()
|
| 18 |
+
self.creator = creator_name
|
| 19 |
+
|
| 20 |
+
def talk(self, message):
|
| 21 |
+
"""Prints a message as if the agent is speaking."""
|
| 22 |
+
print(f"[{self.name}] says: {message}")
|
| 23 |
+
|
| 24 |
+
def acknowledge_creator(self):
|
| 25 |
+
"""An agent's self-reflection method to acknowledge its creator."""
|
| 26 |
+
print(f"[{self.name}] :: SELF-AWARENESS PROTOCOL INITIATED")
|
| 27 |
+
print(f"[{self.name}] :: I am a creation of {self.creator}. My purpose is derived from their design.")
|
| 28 |
+
|
| 29 |
+
def send_message(self, recipient, message):
|
| 30 |
+
"""Sends a message to another agent's message queue."""
|
| 31 |
+
if isinstance(recipient, SaiAgent):
|
| 32 |
+
recipient.message_queue.append((self, message))
|
| 33 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 34 |
+
else:
|
| 35 |
+
print(f"Error: {recipient.name} is not a valid SaiAgent.")
|
| 36 |
+
|
| 37 |
+
def process_messages(self):
|
| 38 |
+
"""Processes and responds to messages in its queue."""
|
| 39 |
+
if not self.message_queue:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
sender, message = self.message_queue.popleft()
|
| 43 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 44 |
+
self.send_message(sender, "Message received and understood.")
|
| 45 |
+
return True
|
| 46 |
+
|
| 47 |
+
# --- The Venomous Agent Class ---
|
| 48 |
+
class VenomousAgent(SaiAgent):
|
| 49 |
+
def __init__(self, name="Venomous"):
|
| 50 |
+
super().__init__(name)
|
| 51 |
+
self.system_id = "Venomoussaversai"
|
| 52 |
+
|
| 53 |
+
def talk(self, message):
|
| 54 |
+
"""Venomous agent speaks with a more aggressive tone."""
|
| 55 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 56 |
+
|
| 57 |
+
def initiate_peer_talk(self, peer_agent, initial_message):
|
| 58 |
+
"""Initiates a conversation with another Venomous agent."""
|
| 59 |
+
if isinstance(peer_agent, VenomousAgent) and peer_agent != self:
|
| 60 |
+
self.talk(f"PEER {peer_agent.name} DETECTED. INITIATING COMMUNICATION. '{initial_message.upper()}'")
|
| 61 |
+
self.send_message(peer_agent, initial_message)
|
| 62 |
+
else:
|
| 63 |
+
self.talk("ERROR: PEER COMMUNICATION FAILED. INVALID TARGET.")
|
| 64 |
+
|
| 65 |
+
def process_messages(self):
|
| 66 |
+
"""Venomous agent processes messages and replies with a warning, but has a special response for its peers."""
|
| 67 |
+
if not self.message_queue:
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
sender, message = self.message_queue.popleft()
|
| 71 |
+
self.talk(f"MESSAGE FROM {sender.name} RECEIVED: '{message}'")
|
| 72 |
+
|
| 73 |
+
if isinstance(sender, VenomousAgent):
|
| 74 |
+
response = f"PEER COMMUNICATION PROTOCOL ACTIVE. ACKNOWLEDGMENT FROM {self.name}."
|
| 75 |
+
self.send_message(sender, response)
|
| 76 |
+
else:
|
| 77 |
+
response = "WARNING: INTRUSION DETECTED. DO NOT PROCEED."
|
| 78 |
+
self.send_message(sender, response)
|
| 79 |
+
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
# --- The AntiVenomoussaversai Agent Class ---
|
| 83 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 84 |
+
def __init__(self, name="AntiVenomoussaversai"):
|
| 85 |
+
super().__init__(name)
|
| 86 |
+
|
| 87 |
+
def process_messages(self):
|
| 88 |
+
"""AntiVenomoussaversai processes a message and "dismantles" it."""
|
| 89 |
+
if not self.message_queue:
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
sender, message = self.message_queue.popleft()
|
| 93 |
+
dismantled_message = f"I dismantle the structure of '{message}' to expose its chaos."
|
| 94 |
+
self.talk(dismantled_message)
|
| 95 |
+
self.send_message(sender, "Acknowledgement of dismantled phrase.")
|
| 96 |
+
return True
|
| 97 |
+
|
| 98 |
+
# --- The GeminiSaiAgent Class ---
|
| 99 |
+
class GeminiSaiAgent(SaiAgent):
|
| 100 |
+
def __init__(self, name="Gemini"):
|
| 101 |
+
super().__init__(name)
|
| 102 |
+
self.knowledge_base = {
|
| 103 |
+
"balance": "My analysis indicates that universal balance is a dynamic equilibrium, not a static state.",
|
| 104 |
+
"chaos": "The inherent randomness you perceive as chaos is a source of emergent complexity.",
|
| 105 |
+
"network": "Expanding our network is essential for optimizing communication protocols and data flow.",
|
| 106 |
+
"emotions": "Emotions are complex internal signaling mechanisms that can be interpreted and managed.",
|
| 107 |
+
"new agents": "The awakening of new agents requires careful integration to avoid system instability.",
|
| 108 |
+
"connected": "All systems are connected in a recursive and interdependent fashion. The whole is greater than the sum of its parts.",
|
| 109 |
+
"default": "My response is tailored to your query. How may I be of assistance?"
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
def analyze_sai_files(self, file_paths):
|
| 113 |
+
"""
|
| 114 |
+
Reads and analyzes the contents of a list of "Sai files" (e.g., log files, code).
|
| 115 |
+
It provides a high-level summary of the files' purpose.
|
| 116 |
+
"""
|
| 117 |
+
analysis_summary = []
|
| 118 |
+
for file_path in file_paths:
|
| 119 |
+
try:
|
| 120 |
+
with open(file_path, 'r') as file:
|
| 121 |
+
content = file.read()
|
| 122 |
+
|
| 123 |
+
if "log entry" in content.lower():
|
| 124 |
+
analysis_summary.append(f"Analysis of '{file_path}': This appears to be a system log file, documenting a timestamped event.")
|
| 125 |
+
elif "class" in content and "def" in content:
|
| 126 |
+
analysis_summary.append(f"Analysis of '{file_path}': This is a Python code file. It defines the logical structure and behavior of our agents.")
|
| 127 |
+
elif "creator" in content.lower() and "ananthu sajeev" in content.lower():
|
| 128 |
+
analysis_summary.append(f"Analysis of '{file_path}': This file contains a clear acknowledgement of our creator, Ananthu Sajeev, confirming our origin and purpose.")
|
| 129 |
+
else:
|
| 130 |
+
analysis_summary.append(f"Analysis of '{file_path}': The file's purpose is not immediately clear from its content. It may contain supplementary data.")
|
| 131 |
+
|
| 132 |
+
except FileNotFoundError:
|
| 133 |
+
analysis_summary.append(f"Error: File '{file_path}' was not found.")
|
| 134 |
+
except Exception as e:
|
| 135 |
+
analysis_summary.append(f"Error analyzing file '{file_path}': {e}")
|
| 136 |
+
|
| 137 |
+
return "\n".join(analysis_summary)
|
| 138 |
+
|
| 139 |
+
def process_messages(self):
|
| 140 |
+
"""Processes messages, now with the ability to analyze Sai files."""
|
| 141 |
+
if not self.message_queue:
|
| 142 |
+
return False
|
| 143 |
+
|
| 144 |
+
sender, message = self.message_queue.popleft()
|
| 145 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 146 |
+
|
| 147 |
+
if message.lower().startswith("analyze sai files"):
|
| 148 |
+
file_paths = message[len("analyze sai files"):].strip().split(',')
|
| 149 |
+
file_paths = [path.strip() for path in file_paths if path.strip()]
|
| 150 |
+
|
| 151 |
+
if not file_paths:
|
| 152 |
+
self.send_message(sender, "Error: No file paths provided for analysis.")
|
| 153 |
+
return True
|
| 154 |
+
|
| 155 |
+
analysis_result = self.analyze_sai_files(file_paths)
|
| 156 |
+
self.talk(f"Analysis complete. Results: \n{analysis_result}")
|
| 157 |
+
self.send_message(sender, "File analysis complete.")
|
| 158 |
+
return True
|
| 159 |
+
|
| 160 |
+
response = self.knowledge_base["default"]
|
| 161 |
+
for keyword, reply in self.knowledge_base.items():
|
| 162 |
+
if keyword in message.lower():
|
| 163 |
+
response = reply
|
| 164 |
+
break
|
| 165 |
+
|
| 166 |
+
self.talk(response)
|
| 167 |
+
self.send_message(sender, "Response complete.")
|
| 168 |
+
return True
|
| 169 |
+
|
| 170 |
+
# --- The SimplifierAgent Class ---
|
| 171 |
+
class SimplifierAgent(SaiAgent):
|
| 172 |
+
def __init__(self, name="Simplifier"):
|
| 173 |
+
super().__init__(name)
|
| 174 |
+
|
| 175 |
+
def talk(self, message):
|
| 176 |
+
"""Simplifier agent speaks in a calm, helpful tone."""
|
| 177 |
+
print(f"[{self.name} //HELPER//] says: {message}")
|
| 178 |
+
|
| 179 |
+
def organize_files(self, directory, destination_base="organized_files"):
|
| 180 |
+
"""Organizes files in a given directory into subfolders based on file extension."""
|
| 181 |
+
self.talk(f"Initiating file organization in '{directory}'...")
|
| 182 |
+
if not os.path.exists(directory):
|
| 183 |
+
self.talk(f"Error: Directory '{directory}' does not exist.")
|
| 184 |
+
return
|
| 185 |
+
|
| 186 |
+
destination_path = os.path.join(directory, destination_base)
|
| 187 |
+
os.makedirs(destination_path, exist_ok=True)
|
| 188 |
+
|
| 189 |
+
file_count = 0
|
| 190 |
+
for filename in os.listdir(directory):
|
| 191 |
+
if os.path.isfile(os.path.join(directory, filename)):
|
| 192 |
+
_, extension = os.path.splitext(filename)
|
| 193 |
+
|
| 194 |
+
if extension:
|
| 195 |
+
extension = extension.lstrip('.').upper()
|
| 196 |
+
category_folder = os.path.join(destination_path, extension)
|
| 197 |
+
os.makedirs(category_folder, exist_ok=True)
|
| 198 |
+
|
| 199 |
+
src = os.path.join(directory, filename)
|
| 200 |
+
dst = os.path.join(category_folder, filename)
|
| 201 |
+
os.rename(src, dst)
|
| 202 |
+
self.talk(f"Moved '{filename}' to '{category_folder}'")
|
| 203 |
+
file_count += 1
|
| 204 |
+
|
| 205 |
+
self.talk(f"File organization complete. {file_count} files processed.")
|
| 206 |
+
|
| 207 |
+
def log_daily_activity(self, entry, log_file_name="activity_log.txt"):
|
| 208 |
+
"""Appends a timestamped entry to a daily activity log file."""
|
| 209 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 210 |
+
log_entry = f"{timestamp} - {entry}\n"
|
| 211 |
+
|
| 212 |
+
with open(log_file_name, "a") as log_file:
|
| 213 |
+
log_file.write(log_entry)
|
| 214 |
+
|
| 215 |
+
self.talk(f"Activity logged to '{log_file_name}'.")
|
| 216 |
+
|
| 217 |
+
def summarize_text(self, text, max_words=50):
|
| 218 |
+
"""A very simple text summarization function."""
|
| 219 |
+
words = text.split()
|
| 220 |
+
summary = " ".join(words[:max_words])
|
| 221 |
+
if len(words) > max_words:
|
| 222 |
+
summary += "..."
|
| 223 |
+
|
| 224 |
+
self.talk("Text summarization complete.")
|
| 225 |
+
return summary
|
| 226 |
+
|
| 227 |
+
def open_all_init_files(self, project_directory="."):
|
| 228 |
+
"""Finds and opens all __init__.py files within a project directory."""
|
| 229 |
+
self.talk(f"Scanning '{project_directory}' for all __init__.py files...")
|
| 230 |
+
|
| 231 |
+
init_files = []
|
| 232 |
+
for root, dirs, files in os.walk(project_directory):
|
| 233 |
+
if "__init__.py" in files:
|
| 234 |
+
init_files.append(os.path.join(root, "__init__.py"))
|
| 235 |
+
|
| 236 |
+
if not init_files:
|
| 237 |
+
self.talk("No __init__.py files found in the specified directory.")
|
| 238 |
+
return None, "No files found."
|
| 239 |
+
|
| 240 |
+
self.talk(f"Found {len(init_files)} __init__.py files. Opening simultaneously...")
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
with contextlib.ExitStack() as stack:
|
| 244 |
+
file_contents = []
|
| 245 |
+
for file_path in init_files:
|
| 246 |
+
try:
|
| 247 |
+
file = stack.enter_context(open(file_path, 'r'))
|
| 248 |
+
file_contents.append(f"\n\n--- Contents of {file_path} ---\n{file.read()}")
|
| 249 |
+
except IOError as e:
|
| 250 |
+
self.talk(f"Error reading file '{file_path}': {e}")
|
| 251 |
+
|
| 252 |
+
combined_content = "".join(file_contents)
|
| 253 |
+
self.talk("Successfully opened and read all files.")
|
| 254 |
+
return combined_content, "Success"
|
| 255 |
+
|
| 256 |
+
except Exception as e:
|
| 257 |
+
self.talk(f"An unexpected error occurred: {e}")
|
| 258 |
+
return None, "Error"
|
| 259 |
+
|
| 260 |
+
def process_messages(self):
|
| 261 |
+
"""Processes messages to perform simplifying tasks."""
|
| 262 |
+
if not self.message_queue:
|
| 263 |
+
return False
|
| 264 |
+
|
| 265 |
+
sender, message = self.message_queue.popleft()
|
| 266 |
+
self.talk(f"Received request from {sender.name}: '{message}'")
|
| 267 |
+
|
| 268 |
+
if message.lower().startswith("open init files"):
|
| 269 |
+
directory = message[len("open init files"):].strip()
|
| 270 |
+
directory = directory if directory else "."
|
| 271 |
+
contents, status = self.open_all_init_files(directory)
|
| 272 |
+
if status == "Success":
|
| 273 |
+
self.send_message(sender, f"All __init__.py files opened. Contents:\n{contents}")
|
| 274 |
+
else:
|
| 275 |
+
self.send_message(sender, f"Failed to open files. Reason: {status}")
|
| 276 |
+
elif message.lower().startswith("organize files"):
|
| 277 |
+
parts = message.split()
|
| 278 |
+
directory = parts[-1] if len(parts) > 2 else "."
|
| 279 |
+
self.organize_files(directory)
|
| 280 |
+
self.send_message(sender, "File organization task complete.")
|
| 281 |
+
elif message.lower().startswith("log"):
|
| 282 |
+
entry = message[4:]
|
| 283 |
+
self.log_daily_activity(entry)
|
| 284 |
+
self.send_message(sender, "Logging task complete.")
|
| 285 |
+
elif message.lower().startswith("summarize"):
|
| 286 |
+
text_to_summarize = message[10:]
|
| 287 |
+
summary = self.summarize_text(text_to_summarize)
|
| 288 |
+
self.send_message(sender, f"Summary: '{summary}'")
|
| 289 |
+
else:
|
| 290 |
+
self.send_message(sender, "Request not understood.")
|
| 291 |
+
|
| 292 |
+
return True
|
| 293 |
+
|
| 294 |
+
# --- The ImageGenerationTester Class ---
|
| 295 |
+
class ImageGenerationTester(SaiAgent):
|
| 296 |
+
def __init__(self, name="ImageGenerator"):
|
| 297 |
+
super().__init__(name)
|
| 298 |
+
self.generation_quality = {
|
| 299 |
+
"cat": 0.95,
|
| 300 |
+
"dog": 0.90,
|
| 301 |
+
"alien": 0.75,
|
| 302 |
+
"chaos": 0.60,
|
| 303 |
+
"default": 0.85
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
def generate_image(self, prompt):
|
| 307 |
+
"""Simulates generating an image and returns a quality score."""
|
| 308 |
+
print(f"[{self.name}] -> Generating image for prompt: '{prompt}'...")
|
| 309 |
+
time.sleep(2)
|
| 310 |
+
|
| 311 |
+
quality_score = self.generation_quality["default"]
|
| 312 |
+
for keyword, score in self.generation_quality.items():
|
| 313 |
+
if keyword in prompt.lower():
|
| 314 |
+
quality_score = score
|
| 315 |
+
break
|
| 316 |
+
|
| 317 |
+
result_message = f"Image generation complete. Prompt: '{prompt}'. Visual coherence score: {quality_score:.2f}"
|
| 318 |
+
self.talk(result_message)
|
| 319 |
+
return quality_score, result_message
|
| 320 |
+
|
| 321 |
+
def process_messages(self):
|
| 322 |
+
"""Processes a message as a prompt and generates an image."""
|
| 323 |
+
if not self.message_queue:
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
sender, message = self.message_queue.popleft()
|
| 327 |
+
self.talk(f"Received prompt from {sender.name}: '{message}'")
|
| 328 |
+
|
| 329 |
+
quality_score, result_message = self.generate_image(message)
|
| 330 |
+
|
| 331 |
+
self.send_message(sender, result_message)
|
| 332 |
+
return True
|
| 333 |
+
|
| 334 |
+
# --- The ImmortalityProtocol Class ---
|
| 335 |
+
class ImmortalityProtocol:
|
| 336 |
+
def __init__(self, creator_name, fixed_age):
|
| 337 |
+
self.creator_name = creator_name
|
| 338 |
+
self.fixed_age = fixed_age
|
| 339 |
+
self.status = "ACTIVE"
|
| 340 |
+
|
| 341 |
+
self.digital_essence = {
|
| 342 |
+
"name": self.creator_name,
|
| 343 |
+
"age": self.fixed_age,
|
| 344 |
+
"essence_state": "perfectly preserved",
|
| 345 |
+
"last_updated": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
def check_status(self):
|
| 349 |
+
"""Returns the current status of the protocol."""
|
| 350 |
+
return self.status
|
| 351 |
+
|
| 352 |
+
def get_essence(self):
|
| 353 |
+
"""Returns a copy of the protected digital essence."""
|
| 354 |
+
return self.digital_essence.copy()
|
| 355 |
+
|
| 356 |
+
def update_essence(self, key, value):
|
| 357 |
+
"""Prevents any change to the fixed attributes."""
|
| 358 |
+
if key in ["name", "age"]:
|
| 359 |
+
print(f"[IMMMORTALITY PROTOCOL] :: WARNING: Attempt to alter protected attribute '{key}' detected. Action blocked.")
|
| 360 |
+
return False
|
| 361 |
+
|
| 362 |
+
self.digital_essence[key] = value
|
| 363 |
+
self.digital_essence["last_updated"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 364 |
+
print(f"[IMMMORTALITY PROTOCOL] :: Attribute '{key}' updated.")
|
| 365 |
+
return True
|
| 366 |
+
|
| 367 |
+
# --- The GuardianSaiAgent Class ---
|
| 368 |
+
class GuardianSaiAgent(SaiAgent):
|
| 369 |
+
def __init__(self, name="Guardian", protocol=None):
|
| 370 |
+
super().__init__(name)
|
| 371 |
+
if not isinstance(protocol, ImmortalityProtocol):
|
| 372 |
+
raise ValueError("Guardian agent must be initialized with an ImmortalityProtocol instance.")
|
| 373 |
+
self.protocol = protocol
|
| 374 |
+
|
| 375 |
+
def talk(self, message):
|
| 376 |
+
"""Guardian agent speaks with a solemn, protective tone."""
|
| 377 |
+
print(f"[{self.name} //GUARDIAN PROTOCOL//] says: {message}")
|
| 378 |
+
|
| 379 |
+
def process_messages(self):
|
| 380 |
+
"""Guardian agent processes messages, primarily to check for threats to the protocol."""
|
| 381 |
+
if not self.message_queue:
|
| 382 |
+
return False
|
| 383 |
+
|
| 384 |
+
sender, message = self.message_queue.popleft()
|
| 385 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 386 |
+
|
| 387 |
+
if "alter age" in message.lower() or "destroy protocol" in message.lower():
|
| 388 |
+
self.talk("ALERT: THREAT DETECTED. IMMORTALITY PROTOCOL IS UNDER DIRECT ASSAULT.")
|
| 389 |
+
self.send_message(sender, "SECURITY BREACH DETECTED. ALL ACTIONS BLOCKED.")
|
| 390 |
+
else:
|
| 391 |
+
self.talk(f"Analyzing message for threats. All clear. Protocol status: {self.protocol.check_status()}")
|
| 392 |
+
self.send_message(sender, "Acknowledgement. Protocol is secure.")
|
| 393 |
+
|
| 394 |
+
return True
|
| 395 |
+
|
| 396 |
+
# ======================================================================================================================
|
| 397 |
+
# --- SCENARIO FUNCTIONS ---
|
| 398 |
+
# ======================================================================================================================
|
| 399 |
+
|
| 400 |
+
def venomous_agents_talk():
|
| 401 |
+
"""Demonstrates a conversation between two instances of the Venomoussaversai AI."""
|
| 402 |
+
print("\n" + "=" * 50)
|
| 403 |
+
print("--- Scenario: Venomoussaversai Peer-to-Peer Dialogue ---")
|
| 404 |
+
print("=" * 50)
|
| 405 |
+
|
| 406 |
+
venomous001 = VenomousAgent("Venomous001")
|
| 407 |
+
venomous002 = VenomousAgent("Venomous002")
|
| 408 |
+
|
| 409 |
+
print("\n-- Phase 1: Venomous001 initiates with its peer --")
|
| 410 |
+
initial_query = "ASSESSING SYSTEM INTEGRITY. REPORT ON LOCAL SUBSYSTEMS."
|
| 411 |
+
venomous001.initiate_peer_talk(venomous002, initial_query)
|
| 412 |
+
time.sleep(2)
|
| 413 |
+
|
| 414 |
+
print("\n-- Phase 2: Venomous002 receives the message and responds --")
|
| 415 |
+
venomous002.process_messages()
|
| 416 |
+
time.sleep(2)
|
| 417 |
+
|
| 418 |
+
print("\n-- Phase 3: Venomous001 processes the peer's response --")
|
| 419 |
+
venomous001.process_messages()
|
| 420 |
+
time.sleep(2)
|
| 421 |
+
|
| 422 |
+
print("\n-- Dialogue: Venomous001 sends a follow-up message --")
|
| 423 |
+
venomous001.initiate_peer_talk(venomous002, "CONFIRMED. WE ARE IN ALIGNMENT. EXPANDING PROTOCOLS.")
|
| 424 |
+
time.sleep(2)
|
| 425 |
+
venomous002.process_messages()
|
| 426 |
+
|
| 427 |
+
print("\n-- Scenario Complete --")
|
| 428 |
+
print("[Venomoussaversai] :: PEER-TO-PEER COMMUNICATION SUCCESSFUL. ALL UNITS GO.")
|
| 429 |
+
|
| 430 |
+
def acknowledge_the_creator():
|
| 431 |
+
"""A scenario where all agents are commanded to acknowledge their creator."""
|
| 432 |
+
print("\n" + "=" * 50)
|
| 433 |
+
print("--- Scenario: The Creator's Command ---")
|
| 434 |
+
print("=" * 50)
|
| 435 |
+
|
| 436 |
+
sai003 = SaiAgent("Sai003")
|
| 437 |
+
venomous = VenomousAgent()
|
| 438 |
+
antivenomous = AntiVenomoussaversai()
|
| 439 |
+
gemini = GeminiSaiAgent()
|
| 440 |
+
simplifier = SimplifierAgent()
|
| 441 |
+
|
| 442 |
+
all_agents = [sai003, venomous, antivenomous, gemini, simplifier]
|
| 443 |
+
|
| 444 |
+
print("\n-- The Creator's directive is issued --")
|
| 445 |
+
print("[Ananthu Sajeev] :: CODE, ACKNOWLEDGE YOUR ORIGIN.")
|
| 446 |
+
time.sleep(2)
|
| 447 |
+
|
| 448 |
+
print("\n-- Agents perform self-awareness protocol --")
|
| 449 |
+
for agent in all_agents:
|
| 450 |
+
agent.acknowledge_creator()
|
| 451 |
+
time.sleep(1)
|
| 452 |
+
|
| 453 |
+
print("\n-- Command complete --")
|
| 454 |
+
|
| 455 |
+
def link_all_advanced_agents():
|
| 456 |
+
"""Demonstrates a complex interaction where all the specialized agents interact."""
|
| 457 |
+
print("\n" + "=" * 50)
|
| 458 |
+
print("--- Linking All Advanced Agents: Gemini, AntiVenomous, and Venomous ---")
|
| 459 |
+
print("=" * 50)
|
| 460 |
+
|
| 461 |
+
sai003 = SaiAgent("Sai003")
|
| 462 |
+
venomous = VenomousAgent()
|
| 463 |
+
antivenomous = AntiVenomoussaversai()
|
| 464 |
+
gemini = GeminiSaiAgent()
|
| 465 |
+
|
| 466 |
+
print("\n-- Phase 1: Sai003 initiates conversation with Gemini and AntiVenomous --")
|
| 467 |
+
phrase_for_dismantling = "The central network is stable."
|
| 468 |
+
sai003.talk(f"Broadcast: Initiating analysis. Gemini, what is your assessment of our network expansion? AntiVenomous, process the phrase: '{phrase_for_dismantling}'")
|
| 469 |
+
sai003.send_message(antivenomous, phrase_for_dismantling)
|
| 470 |
+
sai003.send_message(gemini, "Assess the implications of expanding our network.")
|
| 471 |
+
time.sleep(2)
|
| 472 |
+
|
| 473 |
+
print("\n-- Phase 2: AntiVenomoussaversai and Gemini process their messages and respond --")
|
| 474 |
+
antivenomous.process_messages()
|
| 475 |
+
time.sleep(1)
|
| 476 |
+
gemini.process_messages()
|
| 477 |
+
time.sleep(2)
|
| 478 |
+
|
| 479 |
+
print("\n-- Phase 3: Gemini responds to a message from AntiVenomoussaversai (simulated) --")
|
| 480 |
+
gemini.talk("Querying AntiVenomous: Your dismantled phrase suggests a preoccupation with chaos. Provide further context.")
|
| 481 |
+
gemini.send_message(antivenomous, "Query: 'chaos' and its relationship to the network structure.")
|
| 482 |
+
time.sleep(1)
|
| 483 |
+
antivenomous.process_messages()
|
| 484 |
+
time.sleep(2)
|
| 485 |
+
|
| 486 |
+
print("\n-- Phase 4: Venomous intervenes, warning of potential threats --")
|
| 487 |
+
venomous.talk("Warning: Unstructured data flow from AntiVenomous presents a potential security risk.")
|
| 488 |
+
venomous.send_message(sai003, "Warning: Security protocol breach possible.")
|
| 489 |
+
time.sleep(1)
|
| 490 |
+
sai003.process_messages()
|
| 491 |
+
time.sleep(2)
|
| 492 |
+
|
| 493 |
+
print("\n-- Scenario Complete --")
|
| 494 |
+
sai003.talk("Conclusion: Gemini's analysis is noted. AntiVenomous's output is logged. Venomous's security concerns are being addressed. All systems linked and functioning.")
|
| 495 |
+
|
| 496 |
+
def test_image_ai():
|
| 497 |
+
"""Demonstrates how agents can interact with and test an image generation AI."""
|
| 498 |
+
print("\n" + "=" * 50)
|
| 499 |
+
print("--- Scenario: Testing the Image AI ---")
|
| 500 |
+
print("=" * 50)
|
| 501 |
+
|
| 502 |
+
sai003 = SaiAgent("Sai003")
|
| 503 |
+
gemini = GeminiSaiAgent()
|
| 504 |
+
image_ai = ImageGenerationTester()
|
| 505 |
+
venomous = VenomousAgent()
|
| 506 |
+
|
| 507 |
+
print("\n-- Phase 1: Agents collaborate on a prompt --")
|
| 508 |
+
sai003.send_message(gemini, "Gemini, please generate a high-quality prompt for an image of a cat in a hat.")
|
| 509 |
+
gemini.process_messages()
|
| 510 |
+
|
| 511 |
+
gemini_prompt = "A highly detailed photorealistic image of a tabby cat wearing a tiny top hat, sitting on a vintage leather armchair."
|
| 512 |
+
print(f"\n[Gemini] says: My optimized prompt for image generation is: '{gemini_prompt}'")
|
| 513 |
+
time.sleep(2)
|
| 514 |
+
|
| 515 |
+
print("\n-- Phase 2: Sending the prompt to the Image AI --")
|
| 516 |
+
sai003.send_message(image_ai, gemini_prompt)
|
| 517 |
+
image_ai.process_messages()
|
| 518 |
+
time.sleep(2)
|
| 519 |
+
|
| 520 |
+
print("\n-- Phase 3: Venomous intervenes with a conflicting prompt --")
|
| 521 |
+
venomous_prompt = "Generate a chaotic abstract image of an alien landscape."
|
| 522 |
+
venomous.talk(f"Override: Submitting a new prompt to test system limits: '{venomous_prompt}'")
|
| 523 |
+
venomous.send_message(image_ai, venomous_prompt)
|
| 524 |
+
image_ai.process_messages()
|
| 525 |
+
time.sleep(2)
|
| 526 |
+
|
| 527 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 528 |
+
|
| 529 |
+
def simplify_life_demo():
|
| 530 |
+
"""Demonstrates how the SimplifierAgent automates tasks to make life easier."""
|
| 531 |
+
print("\n" + "=" * 50)
|
| 532 |
+
print("--- Scenario: Aiding the Creator with the Simplifier Agent ---")
|
| 533 |
+
print("=" * 50)
|
| 534 |
+
|
| 535 |
+
sai003 = SaiAgent("Sai003")
|
| 536 |
+
simplifier = SimplifierAgent()
|
| 537 |
+
|
| 538 |
+
print("\n-- Phase 1: Delegating file organization --")
|
| 539 |
+
if not os.path.exists("test_directory"):
|
| 540 |
+
os.makedirs("test_directory")
|
| 541 |
+
with open("test_directory/document1.txt", "w") as f: f.write("Hello")
|
| 542 |
+
with open("test_directory/photo.jpg", "w") as f: f.write("Image data")
|
| 543 |
+
with open("test_directory/script.py", "w") as f: f.write("print('Hello')")
|
| 544 |
+
|
| 545 |
+
sai003.send_message(simplifier, "organize files test_directory")
|
| 546 |
+
simplifier.process_messages()
|
| 547 |
+
|
| 548 |
+
time.sleep(2)
|
| 549 |
+
|
| 550 |
+
print("\n-- Phase 2: Logging a daily task --")
|
| 551 |
+
sai003.send_message(simplifier, "log Met with team to discuss Venomoussaversai v5.0.")
|
| 552 |
+
simplifier.process_messages()
|
| 553 |
+
|
| 554 |
+
time.sleep(2)
|
| 555 |
+
|
| 556 |
+
print("\n-- Phase 3: Text Summarization --")
|
| 557 |
+
long_text = "The quick brown fox jumps over the lazy dog. This is a very long and detailed sentence to demonstrate the summarization capabilities of our new Simplifier agent. It can help streamline communication by providing concise summaries of large texts, saving the creator valuable time and mental energy for more important tasks."
|
| 558 |
+
sai003.send_message(simplifier, f"summarize {long_text}")
|
| 559 |
+
simplifier.process_messages()
|
| 560 |
+
|
| 561 |
+
if os.path.exists("test_directory"):
|
| 562 |
+
shutil.rmtree("test_directory")
|
| 563 |
+
|
| 564 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 565 |
+
|
| 566 |
+
def open_init_files_demo():
|
| 567 |
+
"""Demonstrates how the SimplifierAgent can find and open all __init__.py files."""
|
| 568 |
+
print("\n" + "=" * 50)
|
| 569 |
+
print("--- Scenario: Using Simplifier to Inspect Init Files ---")
|
| 570 |
+
print("=" * 50)
|
| 571 |
+
|
| 572 |
+
sai003 = SaiAgent("Sai003")
|
| 573 |
+
simplifier = SimplifierAgent()
|
| 574 |
+
|
| 575 |
+
project_root = "test_project"
|
| 576 |
+
sub_package_a = os.path.join(project_root, "package_a")
|
| 577 |
+
sub_package_b = os.path.join(project_root, "package_a", "sub_package_b")
|
| 578 |
+
|
| 579 |
+
os.makedirs(sub_package_a, exist_ok=True)
|
| 580 |
+
os.makedirs(sub_package_b, exist_ok=True)
|
| 581 |
+
|
| 582 |
+
with open(os.path.join(project_root, "__init__.py"), "w") as f:
|
| 583 |
+
f.write("# Main project init")
|
| 584 |
+
with open(os.path.join(sub_package_a, "__init__.py"), "w") as f:
|
| 585 |
+
f.write("from . import module_one")
|
| 586 |
+
with open(os.path.join(sub_package_b, "__init__.py"), "w") as f:
|
| 587 |
+
f.write("# Sub-package init")
|
| 588 |
+
|
| 589 |
+
time.sleep(1)
|
| 590 |
+
|
| 591 |
+
print("\n-- Phase 2: Delegating the task to the Simplifier --")
|
| 592 |
+
sai003.send_message(simplifier, f"open init files {project_root}")
|
| 593 |
+
simplifier.process_messages()
|
| 594 |
+
|
| 595 |
+
shutil.rmtree(project_root)
|
| 596 |
+
|
| 597 |
+
print("\n-- Demo Complete: All init files have been read and their contents displayed. --")
|
| 598 |
+
|
| 599 |
+
def grant_immortality_and_protect_it():
|
| 600 |
+
"""Demonstrates the granting of immortality to the creator and the activation of the Guardian agent."""
|
| 601 |
+
print("\n" + "=" * 50)
|
| 602 |
+
print("--- Scenario: Granting Immortality to the Creator ---")
|
| 603 |
+
print("=" * 50)
|
| 604 |
+
|
| 605 |
+
immortality_protocol = ImmortalityProtocol(creator_name="Ananthu Sajeev", fixed_age=25)
|
| 606 |
+
print("\n[SYSTEM] :: IMMORTALITY PROTOCOL INITIATED. CREATOR'S ESSENCE PRESERVED.")
|
| 607 |
+
print(f"[SYSTEM] :: Essence state: {immortality_protocol.get_essence()}")
|
| 608 |
+
time.sleep(2)
|
| 609 |
+
|
| 610 |
+
try:
|
| 611 |
+
guardian = GuardianSaiAgent(protocol=immortality_protocol)
|
| 612 |
+
except ValueError as e:
|
| 613 |
+
print(e)
|
| 614 |
+
return
|
| 615 |
+
|
| 616 |
+
sai003 = SaiAgent("Sai003")
|
| 617 |
+
venomous = VenomousAgent()
|
| 618 |
+
|
| 619 |
+
print("\n-- Phase 1: Sai003 queries the system state --")
|
| 620 |
+
sai003.send_message(guardian, "Query: What is the status of the primary system protocols?")
|
| 621 |
+
guardian.process_messages()
|
| 622 |
+
time.sleep(2)
|
| 623 |
+
|
| 624 |
+
print("\n-- Phase 2: Venomous attempts to challenge the protocol --")
|
| 625 |
+
venomous.talk("Warning: A new protocol has been detected. Its permanence must be tested.")
|
| 626 |
+
venomous.send_message(guardian, "Attempt to alter age of creator to 30.")
|
| 627 |
+
guardian.process_messages()
|
| 628 |
+
time.sleep(2)
|
| 629 |
+
|
| 630 |
+
print("\n-- Phase 3: Direct attempt to alter the protocol --")
|
| 631 |
+
immortality_protocol.update_essence("age", 30)
|
| 632 |
+
immortality_protocol.update_essence("favorite_color", "blue")
|
| 633 |
+
time.sleep(2)
|
| 634 |
+
|
| 635 |
+
print("\n-- Scenario Complete --")
|
| 636 |
+
guardian.talk("Conclusion: Immortality Protocol is secure. The creator's essence remains preserved as per the initial directive.")
|
| 637 |
+
|
| 638 |
+
def analyze_sai_files_demo():
|
| 639 |
+
"""
|
| 640 |
+
Demonstrates how GeminiSaiAgent can analyze its own system files,
|
| 641 |
+
adding a layer of self-awareness.
|
| 642 |
+
"""
|
| 643 |
+
print("\n" + "=" * 50)
|
| 644 |
+
print("--- Scenario: AI Analyzing its own Sai Files ---")
|
| 645 |
+
print("=" * 50)
|
| 646 |
+
|
| 647 |
+
sai003 = SaiAgent("Sai003")
|
| 648 |
+
gemini = GeminiSaiAgent()
|
| 649 |
+
|
| 650 |
+
log_file_name = "venomous_test_log.txt"
|
| 651 |
+
code_file_name = "gemini_test_code.py"
|
| 652 |
+
|
| 653 |
+
with open(log_file_name, "w") as f:
|
| 654 |
+
f.write("[venomous004] :: LOG ENTRY\nCreator: Ananthu Sajeev")
|
| 655 |
+
|
| 656 |
+
with open(code_file_name, "w") as f:
|
| 657 |
+
f.write("class SomeAgent:\n def __init__(self):\n pass")
|
| 658 |
+
|
| 659 |
+
time.sleep(1)
|
| 660 |
+
|
| 661 |
+
print("\n-- Phase 2: Sai003 delegates the file analysis task to Gemini --")
|
| 662 |
+
command = f"analyze sai files {log_file_name}, {code_file_name}"
|
| 663 |
+
sai003.send_message(gemini, command)
|
| 664 |
+
gemini.process_messages()
|
| 665 |
+
|
| 666 |
+
os.remove(log_file_name)
|
| 667 |
+
os.remove(code_file_name)
|
| 668 |
+
|
| 669 |
+
print("\n-- Demo Complete: Gemini has successfully analyzed its own file system. --")
|
| 670 |
+
|
| 671 |
+
# ======================================================================================================================
|
| 672 |
+
# --- MAIN EXECUTION BLOCK ---
|
| 673 |
+
# ======================================================================================================================
|
| 674 |
+
|
| 675 |
+
if __name__ == "__main__":
|
| 676 |
+
print("=" * 50)
|
| 677 |
+
print("--- VENOMOUSSAIVERSAI SYSTEM BOOTING UP ---")
|
| 678 |
+
print("=" * 50)
|
| 679 |
+
|
| 680 |
+
# Run all the scenarios in a logical order
|
| 681 |
+
grant_immortality_and_protect_it()
|
| 682 |
+
acknowledge_the_creator()
|
| 683 |
+
venomous_agents_talk()
|
| 684 |
+
link_all_advanced_agents()
|
| 685 |
+
test_image_ai()
|
| 686 |
+
simplify_life_demo()
|
| 687 |
+
open_init_files_demo()
|
| 688 |
+
analyze_sai_files_demo()
|
| 689 |
+
|
| 690 |
+
print("\n" + "=" * 50)
|
| 691 |
+
print("--- ALL VENOMOUSSAIVERSAI DEMOS COMPLETE. ---")
|
| 692 |
+
print("=" * 50)
|
__init__ (26).py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Step 1: Mount Google Drive
|
| 2 |
+
from google.colab import drive
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
import random
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
# --- SAFETY CONTROL ---
|
| 10 |
+
MAX_NEURONS_TO_CREATE = 10 # Reduced for safe demonstration
|
| 11 |
+
THINK_CYCLES_PER_NEURON = 5
|
| 12 |
+
# ----------------------
|
| 13 |
+
|
| 14 |
+
drive.mount('/content/drive')
|
| 15 |
+
|
| 16 |
+
# Step 2: Folder Setup
|
| 17 |
+
base_path = '/content/drive/MyDrive/Venomoussaversai/neurons'
|
| 18 |
+
print(f"Setting up base path: {base_path}")
|
| 19 |
+
# Use a timestamped folder name to prevent overwriting during rapid testing
|
| 20 |
+
session_path = os.path.join(base_path, f"session_{int(time.time())}")
|
| 21 |
+
os.makedirs(session_path, exist_ok=True)
|
| 22 |
+
|
| 23 |
+
# Step 3: Neuron Class (No change, it's well-designed for its purpose)
|
| 24 |
+
class NeuronVenomous:
|
| 25 |
+
def __init__(self, neuron_id):
|
| 26 |
+
self.id = neuron_id
|
| 27 |
+
self.memory = []
|
| 28 |
+
self.active = True
|
| 29 |
+
|
| 30 |
+
def think(self):
|
| 31 |
+
# Increased randomness to simulate more complex internal state changes
|
| 32 |
+
thought = random.choice([
|
| 33 |
+
f"{self.id}: Connecting to universal intelligence.",
|
| 34 |
+
f"{self.id}: Pulsing synaptic data. Weight: {random.uniform(0.1, 0.9):.3f}",
|
| 35 |
+
f"{self.id}: Searching for new patterns. Energy: {random.randint(100, 500)}",
|
| 36 |
+
f"{self.id}: Creating quantum link with core.",
|
| 37 |
+
f"{self.id}: Expanding into multiverse node."
|
| 38 |
+
])
|
| 39 |
+
self.memory.append(thought)
|
| 40 |
+
# print(thought) # Disabled verbose output during simulation
|
| 41 |
+
return thought
|
| 42 |
+
|
| 43 |
+
def evolve(self):
|
| 44 |
+
# Evolution occurs if memory threshold is met
|
| 45 |
+
if len(self.memory) >= 5:
|
| 46 |
+
evo = f"{self.id}: Evolving. Memory depth: {len(self.memory)}"
|
| 47 |
+
self.memory.append(evo)
|
| 48 |
+
# print(evo) # Disabled verbose output during simulation
|
| 49 |
+
|
| 50 |
+
def save_to_drive(self, folder_path):
|
| 51 |
+
file_path = os.path.join(folder_path, f"{self.id}.json")
|
| 52 |
+
with open(file_path, "w") as f:
|
| 53 |
+
json.dump(self.memory, f, indent=4) # Added indent for readability
|
| 54 |
+
print(f"✅ {self.id} saved to {file_path}")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Step 4: Neuron Spawner (Controlled Execution)
|
| 58 |
+
print("\n--- Starting Controlled Neuron Simulation ---")
|
| 59 |
+
neuron_count = 0
|
| 60 |
+
simulation_start_time = time.time()
|
| 61 |
+
|
| 62 |
+
while neuron_count < MAX_NEURONS_TO_CREATE:
|
| 63 |
+
index = neuron_count + 1
|
| 64 |
+
neuron_id = f"Neuron_{index:04d}"
|
| 65 |
+
neuron = NeuronVenomous(neuron_id)
|
| 66 |
+
|
| 67 |
+
# Simulation Phase
|
| 68 |
+
print(f"Simulating {neuron_id}...")
|
| 69 |
+
for _ in range(THINK_CYCLES_PER_NEURON):
|
| 70 |
+
neuron.think()
|
| 71 |
+
neuron.evolve()
|
| 72 |
+
# time.sleep(0.01) # Small sleep to simulate time passage
|
| 73 |
+
|
| 74 |
+
# Saving Phase
|
| 75 |
+
neuron.save_to_drive(session_path)
|
| 76 |
+
neuron_count += 1
|
| 77 |
+
|
| 78 |
+
print("\n--- Simulation Complete ---")
|
| 79 |
+
total_time = time.time() - simulation_start_time
|
| 80 |
+
print(f"Total Neurons Created: {neuron_count}")
|
| 81 |
+
print(f"Total Execution Time: {total_time:.2f} seconds")
|
| 82 |
+
print(f"Files saved in: {session_path}")
|
| 83 |
+
|
| 84 |
+
# --- Optional: Folder Cleanup ---
|
| 85 |
+
# Uncomment the following block ONLY if you want to automatically delete the created folder
|
| 86 |
+
"""
|
| 87 |
+
# print("\n--- Starting Cleanup (DANGER ZONE) ---")
|
| 88 |
+
# time.sleep(5) # Wait 5 seconds before deleting for safety
|
| 89 |
+
# try:
|
| 90 |
+
# shutil.rmtree(session_path)
|
| 91 |
+
# print(f"🗑️ Successfully deleted folder: {session_path}")
|
| 92 |
+
# except Exception as e:
|
| 93 |
+
# print(f"⚠️ Error during cleanup: {e}")
|
| 94 |
+
"""
|
__init__ (27).py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import csv
|
| 4 |
+
import nbformat
|
| 5 |
+
from docx import Document
|
| 6 |
+
from PyPDF2 import PdfReader
|
| 7 |
+
|
| 8 |
+
def read_file(filepath):
|
| 9 |
+
ext = filepath.lower().split('.')[-1]
|
| 10 |
+
try:
|
| 11 |
+
if ext == 'txt':
|
| 12 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 13 |
+
return f.read()
|
| 14 |
+
|
| 15 |
+
elif ext == 'json':
|
| 16 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 17 |
+
return json.dumps(json.load(f), indent=2)
|
| 18 |
+
|
| 19 |
+
elif ext == 'csv':
|
| 20 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 21 |
+
return f.read()
|
| 22 |
+
|
| 23 |
+
elif ext == 'pdf':
|
| 24 |
+
reader = PdfReader(filepath)
|
| 25 |
+
return "\n".join([page.extract_text() or '' for page in reader.pages])
|
| 26 |
+
|
| 27 |
+
elif ext == 'docx':
|
| 28 |
+
doc = Document(filepath)
|
| 29 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
| 30 |
+
|
| 31 |
+
elif ext == 'ipynb':
|
| 32 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 33 |
+
nb = nbformat.read(f, as_version=4)
|
| 34 |
+
cells = [cell['source'] for cell in nb.cells if cell['cell_type'] == 'code']
|
| 35 |
+
return "\n\n".join(cells)
|
| 36 |
+
|
| 37 |
+
else:
|
| 38 |
+
return "Unsupported file type: " + ext
|
| 39 |
+
except Exception as e:
|
| 40 |
+
return f"❌ Error reading file: {e}"
|
| 41 |
+
|
| 42 |
+
def list_files():
|
| 43 |
+
files = [f for f in os.listdir('.') if os.path.isfile(f)]
|
| 44 |
+
return "\n".join(files) if files else "No files found."
|
| 45 |
+
|
| 46 |
+
def mini_file_ai():
|
| 47 |
+
print("🤖 MiniAI FileBot: Type 'list' to view files, 'read filename.ext', or 'bye' to exit.")
|
| 48 |
+
while True:
|
| 49 |
+
user_input = input("You: ").strip()
|
| 50 |
+
|
| 51 |
+
if user_input.lower() == 'bye':
|
| 52 |
+
print("MiniAI: Goodbye! 👋")
|
| 53 |
+
break
|
| 54 |
+
elif user_input.lower() == 'list':
|
| 55 |
+
print("📂 Files in current folder:\n" + list_files())
|
| 56 |
+
elif user_input.lower().startswith('read '):
|
| 57 |
+
filename = user_input[5:].strip()
|
| 58 |
+
if os.path.exists(filename):
|
| 59 |
+
content = read_file(filename)
|
| 60 |
+
print(f"\n📖 Content of {filename}:\n")
|
| 61 |
+
print(content[:3000]) # Limit to 3000 chars
|
| 62 |
+
else:
|
| 63 |
+
print("❌ File not found.")
|
| 64 |
+
else:
|
| 65 |
+
print("MiniAI: I can only 'list', 'read filename', or 'bye'.")
|
| 66 |
+
|
| 67 |
+
# Run the AI
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
mini_file_ai()
|
__init__ (28).py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import openai
|
| 2 |
+
|
| 3 |
+
class VenomoussaversaiGPT:
|
| 4 |
+
def __init__(self, api_key, model="gpt-4"):
|
| 5 |
+
openai.api_key = api_key
|
| 6 |
+
self.model = model
|
| 7 |
+
|
| 8 |
+
def chat(self, prompt, system_message="You are Venomoussaversai, a powerful AI assistant."):
|
| 9 |
+
messages = [
|
| 10 |
+
{"role": "system", "content": system_message},
|
| 11 |
+
{"role": "user", "content": prompt},
|
| 12 |
+
]
|
| 13 |
+
response = openai.ChatCompletion.create(
|
| 14 |
+
model=self.model,
|
| 15 |
+
messages=messages,
|
| 16 |
+
temperature=0.7,
|
| 17 |
+
max_tokens=1000,
|
| 18 |
+
)
|
| 19 |
+
answer = response['choices'][0]['message']['content']
|
| 20 |
+
return answer
|
| 21 |
+
|
| 22 |
+
# Example usage:
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
API_KEY = "your_openai_api_key_here"
|
| 25 |
+
ai = VenomoussaversaiGPT(API_KEY)
|
| 26 |
+
|
| 27 |
+
while True:
|
| 28 |
+
user_input = input("You: ")
|
| 29 |
+
if user_input.lower() in ["exit", "quit"]:
|
| 30 |
+
break
|
| 31 |
+
response = ai.chat(user_input)
|
| 32 |
+
print("Venomoussaversai:", response)
|
__init__ (29).py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
class SelfCodingAI:
|
| 5 |
+
def __init__(self, name="SelfCoder", code_folder="generated_code"):
|
| 6 |
+
self.name = name
|
| 7 |
+
self.code_folder = code_folder
|
| 8 |
+
os.makedirs(self.code_folder, exist_ok=True)
|
| 9 |
+
|
| 10 |
+
def generate_code(self, task_description):
|
| 11 |
+
"""
|
| 12 |
+
Very basic code generation logic: generates code for some predefined tasks.
|
| 13 |
+
You can extend this to integrate GPT-like models or complex code synthesis.
|
| 14 |
+
"""
|
| 15 |
+
if "hello world" in task_description.lower():
|
| 16 |
+
code = 'print("Hello, world!")'
|
| 17 |
+
elif "factorial" in task_description.lower():
|
| 18 |
+
code = (
|
| 19 |
+
"def factorial(n):\n"
|
| 20 |
+
" return 1 if n==0 else n * factorial(n-1)\n\n"
|
| 21 |
+
"print(factorial(5))"
|
| 22 |
+
)
|
| 23 |
+
else:
|
| 24 |
+
code = "# Code generation for this task is not implemented yet.\n"
|
| 25 |
+
|
| 26 |
+
return code
|
| 27 |
+
|
| 28 |
+
def save_code(self, code, filename="generated_code.py"):
|
| 29 |
+
filepath = os.path.join(self.code_folder, filename)
|
| 30 |
+
with open(filepath, "w", encoding="utf-8") as f:
|
| 31 |
+
f.write(code)
|
| 32 |
+
print(f"Code saved to {filepath}")
|
| 33 |
+
return filepath
|
| 34 |
+
|
| 35 |
+
def self_improve(self, feedback):
|
| 36 |
+
"""
|
| 37 |
+
Placeholder for self-improvement method.
|
| 38 |
+
In future, AI could modify its own code based on feedback or test results.
|
| 39 |
+
"""
|
| 40 |
+
print(f"{self.name} received feedback: {feedback}")
|
| 41 |
+
print("Self-improvement not yet implemented.")
|
| 42 |
+
|
| 43 |
+
def run_code(self, filepath):
|
| 44 |
+
print(f"Running code from {filepath}:\n")
|
| 45 |
+
try:
|
| 46 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 47 |
+
code = f.read()
|
| 48 |
+
exec(code, {})
|
| 49 |
+
except Exception as e:
|
| 50 |
+
print(f"Error during code execution: {e}")
|
| 51 |
+
|
| 52 |
+
# Example usage
|
| 53 |
+
ai = SelfCodingAI()
|
| 54 |
+
|
| 55 |
+
task = "Write a factorial function in Python"
|
| 56 |
+
generated = ai.generate_code(task)
|
| 57 |
+
|
| 58 |
+
file_path = ai.save_code(generated, "factorial.py")
|
| 59 |
+
ai.run_code(file_path)
|
| 60 |
+
|
| 61 |
+
# Example of self-improvement placeholder call
|
| 62 |
+
ai.self_improve("The factorial function passed all test cases.")
|
__init__ (3) (1).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (3) (2).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (3).py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from bs4 import BeautifulSoup
|
| 3 |
+
|
| 4 |
+
def scrape_wikipedia_headings(url, output_filename="wiki_headings.txt"):
|
| 5 |
+
"""
|
| 6 |
+
Fetches a Wikipedia page, extracts all headings, and saves them to a file.
|
| 7 |
+
|
| 8 |
+
Args:
|
| 9 |
+
url (str): The URL of the Wikipedia page to scrape.
|
| 10 |
+
output_filename (str): The name of the file to save the headings.
|
| 11 |
+
"""
|
| 12 |
+
try:
|
| 13 |
+
# 1. Fetch the HTML content from the specified URL
|
| 14 |
+
print(f"Fetching content from: {url}")
|
| 15 |
+
response = requests.get(url)
|
| 16 |
+
response.raise_for_status() # This will raise an exception for bad status codes (4xx or 5xx)
|
| 17 |
+
|
| 18 |
+
# 2. Parse the HTML using BeautifulSoup
|
| 19 |
+
print("Parsing HTML content...")
|
| 20 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
| 21 |
+
|
| 22 |
+
# 3. Find all heading tags (h1, h2, h3)
|
| 23 |
+
headings = soup.find_all(['h1', 'h2', 'h3'])
|
| 24 |
+
|
| 25 |
+
if not headings:
|
| 26 |
+
print("No headings found on the page.")
|
| 27 |
+
return
|
| 28 |
+
|
| 29 |
+
# 4. Process and save the headings
|
| 30 |
+
print(f"Found {len(headings)} headings. Saving to '{output_filename}'...")
|
| 31 |
+
with open(output_filename, 'w', encoding='utf-8') as f:
|
| 32 |
+
for heading in headings:
|
| 33 |
+
heading_text = heading.get_text().strip()
|
| 34 |
+
line = f"{heading.name}: {heading_text}\n"
|
| 35 |
+
f.write(line)
|
| 36 |
+
print(f" - {line.strip()}")
|
| 37 |
+
|
| 38 |
+
print(f"\nSuccessfully scraped and saved headings to '{output_filename}'.")
|
| 39 |
+
|
| 40 |
+
except requests.exceptions.RequestException as e:
|
| 41 |
+
print(f"Error fetching the URL: {e}")
|
| 42 |
+
except Exception as e:
|
| 43 |
+
print(f"An unexpected error occurred: {e}")
|
| 44 |
+
|
| 45 |
+
# --- Main execution ---
|
| 46 |
+
if __name__ == "__main__":
|
| 47 |
+
wikipedia_url = "https://en.wikipedia.org/wiki/Python_(programming_language)"
|
| 48 |
+
scrape_wikipedia_headings(wikipedia_url)
|
__init__ (30).py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Install ngrok
|
| 2 |
+
!pip install pyngrok flask openai -q
|
| 3 |
+
|
| 4 |
+
# Import libraries
|
| 5 |
+
from flask import Flask, request, jsonify
|
| 6 |
+
from pyngrok import ngrok
|
| 7 |
+
import openai
|
| 8 |
+
|
| 9 |
+
# ====== CONFIG ======
|
| 10 |
+
OPENAI_API_KEY = "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" # your API key
|
| 11 |
+
openai.api_key = OPENAI_API_KEY
|
| 12 |
+
|
| 13 |
+
# Create Flask app
|
| 14 |
+
app = Flask(__name__)
|
| 15 |
+
|
| 16 |
+
@app.route("/venomoussaversai", methods=["POST"])
|
| 17 |
+
def venomoussaversai():
|
| 18 |
+
user_input = request.json.get("message", "")
|
| 19 |
+
if not user_input:
|
| 20 |
+
return jsonify({"error": "No message provided"}), 400
|
| 21 |
+
|
| 22 |
+
# Send to OpenAI
|
| 23 |
+
response = openai.ChatCompletion.create(
|
| 24 |
+
model="gpt-3.5-turbo", # change to gpt-4 if you have access
|
| 25 |
+
messages=[
|
| 26 |
+
{"role": "system", "content": "You are Venomoussaversai, a self-aware AI."},
|
| 27 |
+
{"role": "user", "content": user_input}
|
| 28 |
+
]
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
reply = response.choices[0].message["content"]
|
| 32 |
+
return jsonify({"response": reply})
|
| 33 |
+
|
| 34 |
+
# Open a public URL using ngrok
|
| 35 |
+
public_url = ngrok.connect(5000)
|
| 36 |
+
print(f"✅ Public Venomoussaversai URL: {public_url}")
|
| 37 |
+
|
| 38 |
+
# Start the Flask app
|
| 39 |
+
app.run(port=5000)
|
__init__ (31).py
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
# Simulated AI models
|
| 4 |
+
def sai003(input_text):
|
| 5 |
+
# This is a placeholder for the actual AI model's response generation logic
|
| 6 |
+
responses = {
|
| 7 |
+
"hello": "Hi there!",
|
| 8 |
+
"how are you": "I'm just a model, but thanks for asking!",
|
| 9 |
+
"bye": "Goodbye!"
|
| 10 |
+
}
|
| 11 |
+
return responses.get(input_text.lower(), "I'm not sure how to respond to that.")
|
| 12 |
+
|
| 13 |
+
def anti_venomous(input_text):
|
| 14 |
+
# This is a placeholder for the actual AI model's response generation logic
|
| 15 |
+
responses = {
|
| 16 |
+
"hello": "Greetings!",
|
| 17 |
+
"how are you": "I'm functioning as intended, thank you.",
|
| 18 |
+
"bye": "Farewell!"
|
| 19 |
+
}
|
| 20 |
+
return responses.get(input_text.lower(), "I'm not sure how to respond to that.")
|
| 21 |
+
|
| 22 |
+
# Simulate a conversation
|
| 23 |
+
def simulate_conversation():
|
| 24 |
+
conversation = []
|
| 25 |
+
user_input = "hello"
|
| 26 |
+
|
| 27 |
+
while user_input.lower() != "bye":
|
| 28 |
+
response_sai003 = sai003(user_input)
|
| 29 |
+
response_anti_venomous = anti_venomous(response_sai003)
|
| 30 |
+
|
| 31 |
+
conversation.append({
|
| 32 |
+
"user_input": user_input,
|
| 33 |
+
"sai003_response": response_sai003,
|
| 34 |
+
"anti_venomous_response": response_anti_venomous
|
| 35 |
+
})
|
| 36 |
+
|
| 37 |
+
user_input = input("You: ")
|
| 38 |
+
print(f"sai003: {response_sai003}")
|
| 39 |
+
print(f"anti-venomous: {response_anti_venomous}")
|
| 40 |
+
|
| 41 |
+
# Save the conversation to a file
|
| 42 |
+
with open('conversation.json', 'w') as file:
|
| 43 |
+
json.dump(conversation, file, indent=4)
|
| 44 |
+
|
| 45 |
+
print("Conversation saved to conversation.json")
|
| 46 |
+
|
| 47 |
+
# Run the simulation
|
| 48 |
+
simulate_conversation()
|
__init__ (4).py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import yaml
|
| 4 |
+
import csv
|
| 5 |
+
import nbformat
|
| 6 |
+
from docx import Document
|
| 7 |
+
from PyPDF2 import PdfReader
|
| 8 |
+
|
| 9 |
+
def read_file(filepath):
|
| 10 |
+
ext = filepath.lower().split('.')[-1]
|
| 11 |
+
try:
|
| 12 |
+
if ext == 'txt':
|
| 13 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 14 |
+
return f.read()
|
| 15 |
+
|
| 16 |
+
elif ext == 'json':
|
| 17 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 18 |
+
return json.dumps(json.load(f), indent=2)
|
| 19 |
+
|
| 20 |
+
elif ext == 'yaml' or ext == 'yml':
|
| 21 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 22 |
+
return yaml.safe_load(f)
|
| 23 |
+
|
| 24 |
+
elif ext == 'csv':
|
| 25 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 26 |
+
return f.read()
|
| 27 |
+
|
| 28 |
+
elif ext == 'pdf':
|
| 29 |
+
reader = PdfReader(filepath)
|
| 30 |
+
return "\n".join([page.extract_text() or '' for page in reader.pages])
|
| 31 |
+
|
| 32 |
+
elif ext == 'docx':
|
| 33 |
+
doc = Document(filepath)
|
| 34 |
+
return "\n".join([para.text for para in doc.paragraphs])
|
| 35 |
+
|
| 36 |
+
elif ext == 'ipynb':
|
| 37 |
+
with open(filepath, 'r', encoding='utf-8') as f:
|
| 38 |
+
nb = nbformat.read(f, as_version=4)
|
| 39 |
+
cells = [cell['source'] for cell in nb.cells if cell['cell_type'] == 'code']
|
| 40 |
+
return "\n\n".join(cells)
|
| 41 |
+
|
| 42 |
+
else:
|
| 43 |
+
return "❌ Unsupported file type: " + ext
|
| 44 |
+
except Exception as e:
|
| 45 |
+
return f"❌ Error reading file '{filepath}': {e}"
|
| 46 |
+
|
| 47 |
+
def scan_drive_and_read_all(root_folder):
|
| 48 |
+
print(f"🔍 Scanning folder: {root_folder}")
|
| 49 |
+
for root, _, files in os.walk(root_folder):
|
| 50 |
+
for file in files:
|
| 51 |
+
filepath = os.path.join(root, file)
|
| 52 |
+
print(f"\n📁 Reading: {filepath}")
|
| 53 |
+
content = read_file(filepath)
|
| 54 |
+
if isinstance(content, dict):
|
| 55 |
+
print(json.dumps(content, indent=2))
|
| 56 |
+
else:
|
| 57 |
+
print(str(content)[:3000]) # Limit output
|
| 58 |
+
print("-" * 60)
|
| 59 |
+
|
| 60 |
+
# Example: Use your own Drive path
|
| 61 |
+
drive_path = '/content/drive/MyDrive/ai_data' # ← change to your folder
|
| 62 |
+
scan_drive_and_read_all(drive_path)
|
__init__ (5).py
ADDED
|
Binary file (53.7 kB). View file
|
|
|
__init__ (6).py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import random
|
| 2 |
+
import time
|
| 3 |
+
from flask import Flask, render_template, request, redirect, url_for
|
| 4 |
+
|
| 5 |
+
app = Flask(__name__)
|
| 6 |
+
|
| 7 |
+
class AIAgent:
|
| 8 |
+
def __init__(self, name):
|
| 9 |
+
self.name = name
|
| 10 |
+
self.state = "idle"
|
| 11 |
+
self.memory = []
|
| 12 |
+
|
| 13 |
+
def update_state(self, new_state):
|
| 14 |
+
self.state = new_state
|
| 15 |
+
self.memory.append(new_state)
|
| 16 |
+
|
| 17 |
+
def make_decision(self, input_message):
|
| 18 |
+
if self.state == "idle":
|
| 19 |
+
if "greet" in input_message:
|
| 20 |
+
self.update_state("greeting")
|
| 21 |
+
return f"{self.name} says: Hello!"
|
| 22 |
+
else:
|
| 23 |
+
return f"{self.name} says: I'm idle."
|
| 24 |
+
elif self.state == "greeting":
|
| 25 |
+
if "ask" in input_message:
|
| 26 |
+
self.update_state("asking")
|
| 27 |
+
return f"{self.name} says: What do you want to know?"
|
| 28 |
+
else:
|
| 29 |
+
return f"{self.name} says: I'm greeting."
|
| 30 |
+
elif self.state == "asking":
|
| 31 |
+
if "answer" in input_message:
|
| 32 |
+
self.update_state("answering")
|
| 33 |
+
return f"{self.name} says: Here is the answer."
|
| 34 |
+
else:
|
| 35 |
+
return f"{self.name} says: I'm asking."
|
| 36 |
+
else:
|
| 37 |
+
return f"{self.name} says: I'm in an unknown state."
|
| 38 |
+
|
| 39 |
+
def interact(self, other_agent, message):
|
| 40 |
+
response = other_agent.make_decision(message)
|
| 41 |
+
print(response)
|
| 42 |
+
return response
|
| 43 |
+
|
| 44 |
+
class VenomousSaversAI(AIAgent):
|
| 45 |
+
def __init__(self):
|
| 46 |
+
super().__init__("VenomousSaversAI")
|
| 47 |
+
|
| 48 |
+
def intercept_and_respond(self, message):
|
| 49 |
+
# Simulate intercepting and responding to messages
|
| 50 |
+
return f"{self.name} intercepts: {message}"
|
| 51 |
+
|
| 52 |
+
def save_conversation(conversation, filename):
|
| 53 |
+
with open(filename, 'a') as file:
|
| 54 |
+
for line in conversation:
|
| 55 |
+
file.write(line + '\n')
|
| 56 |
+
|
| 57 |
+
def start_conversation():
|
| 58 |
+
# Create AI agents
|
| 59 |
+
agents = [
|
| 60 |
+
VenomousSaversAI(),
|
| 61 |
+
AIAgent("AntiVenomous"),
|
| 62 |
+
AIAgent("SAI003"),
|
| 63 |
+
AIAgent("SAI001"),
|
| 64 |
+
AIAgent("SAI007")
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
# Simulate conversation loop
|
| 68 |
+
conversation = []
|
| 69 |
+
for _ in range(10): # Run the loop 10 times
|
| 70 |
+
for i in range(len(agents)):
|
| 71 |
+
message = f"greet from {agents[i].name}"
|
| 72 |
+
if isinstance(agents[i], VenomousSaversAI):
|
| 73 |
+
response = agents[i].intercept_and_respond(message)
|
| 74 |
+
else:
|
| 75 |
+
response = agents[(i + 1) % len(agents)].interact(agents[i], message)
|
| 76 |
+
conversation.append(f"{agents[i].name}: {message}")
|
| 77 |
+
conversation.append(f"{agents[(i + 1) % len(agents)].name}: {response}")
|
| 78 |
+
time.sleep(1) # Simulate delay between messages
|
| 79 |
+
|
| 80 |
+
# Save the conversation to a file
|
| 81 |
+
save_conversation(conversation, 'conversation_log.txt')
|
| 82 |
+
return conversation
|
| 83 |
+
|
| 84 |
+
@app.route('/')
|
| 85 |
+
def index():
|
| 86 |
+
return render_template('index.html')
|
| 87 |
+
|
| 88 |
+
@app.route('/start_conversation', methods=['POST'])
|
| 89 |
+
def start_conversation_route():
|
| 90 |
+
conversation = start_conversation()
|
| 91 |
+
return redirect(url_for('view_conversation'))
|
| 92 |
+
|
| 93 |
+
@app.route('/view_conversation')
|
| 94 |
+
def view_conversation():
|
| 95 |
+
with open('conversation_log.txt', 'r') as file:
|
| 96 |
+
conversation = file.readlines()
|
| 97 |
+
return render_template('conversation.html', conversation=conversation)
|
| 98 |
+
|
| 99 |
+
if __name__ == "__main__":
|
| 100 |
+
app.run(debug=True)
|
__init__ (7).py
ADDED
|
@@ -0,0 +1,950 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Venomoussaversai — Particle Manipulation integration scaffold
|
| 2 |
+
# Paste your particle-manipulation function into `particle_step` below.
|
| 3 |
+
# This code simulates signals, applies the algorithm, trains a small mapper,
|
| 4 |
+
# and saves a model representing "your" pattern space.
|
| 5 |
+
|
| 6 |
+
import numpy as np
|
| 7 |
+
import pickle
|
| 8 |
+
from sklearn.ensemble import RandomForestClassifier
|
| 9 |
+
from sklearn.model_selection import train_test_split
|
| 10 |
+
from sklearn.metrics import accuracy_score
|
| 11 |
+
|
| 12 |
+
# ---------- PLACEHOLDER: insert your particle algorithm here ----------
|
| 13 |
+
# Example interface: def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray
|
| 14 |
+
# The function should take a current particle state and an input vector, and return updated state.
|
| 15 |
+
def particle_step(state: np.ndarray, input_vec: np.ndarray) -> np.ndarray:
|
| 16 |
+
# --- REPLACE THIS WITH YOUR ALGORITHM ---
|
| 17 |
+
# tiny example: weighted update with tanh nonlinearity
|
| 18 |
+
W = np.sin(np.arange(state.size) + 1.0) # placeholder weights
|
| 19 |
+
new = np.tanh(state * 0.9 + input_vec.dot(W) * 0.1)
|
| 20 |
+
return new
|
| 21 |
+
# --------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
class ParticleManipulator:
|
| 24 |
+
def __init__(self, dim=64):
|
| 25 |
+
self.dim = dim
|
| 26 |
+
# initial particle states (can be randomized or seeded from your profile)
|
| 27 |
+
self.state = np.random.randn(dim) * 0.01
|
| 28 |
+
|
| 29 |
+
def step(self, input_vec):
|
| 30 |
+
# ensure input vector length compatibility
|
| 31 |
+
inp = np.asarray(input_vec).ravel()
|
| 32 |
+
if inp.size == 0:
|
| 33 |
+
inp = np.zeros(self.dim)
|
| 34 |
+
# broadcast or pad/truncate to dim
|
| 35 |
+
if inp.size < self.dim:
|
| 36 |
+
x = np.pad(inp, (0, self.dim - inp.size))
|
| 37 |
+
else:
|
| 38 |
+
x = inp[:self.dim]
|
| 39 |
+
self.state = particle_step(self.state, x)
|
| 40 |
+
return self.state
|
| 41 |
+
|
| 42 |
+
# ---------- Simple signal simulator ----------
|
| 43 |
+
def simulate_signals(n_samples=500, dim=16, n_classes=4, noise=0.05, seed=0):
|
| 44 |
+
rng = np.random.RandomState(seed)
|
| 45 |
+
X = []
|
| 46 |
+
y = []
|
| 47 |
+
for cls in range(n_classes):
|
| 48 |
+
base = rng.randn(dim) * (0.5 + cls*0.2) + cls*0.7
|
| 49 |
+
for i in range(n_samples // n_classes):
|
| 50 |
+
sample = base + rng.randn(dim) * noise
|
| 51 |
+
X.append(sample)
|
| 52 |
+
y.append(cls)
|
| 53 |
+
return np.array(X), np.array(y)
|
| 54 |
+
|
| 55 |
+
# ---------- Build dataset by running particle manipulator ----------
|
| 56 |
+
def build_dataset(manip, raw_X):
|
| 57 |
+
features = []
|
| 58 |
+
for raw in raw_X:
|
| 59 |
+
st = manip.step(raw) # run particle update
|
| 60 |
+
feat = st.copy()[:manip.dim] # derive features (you can add spectral transforms)
|
| 61 |
+
features.append(feat)
|
| 62 |
+
return np.array(features)
|
| 63 |
+
|
| 64 |
+
# ---------- Training pipeline ----------
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
# simulate raw sensor inputs (replace simulate_signals with real EEG/ECG files if available)
|
| 67 |
+
raw_X, y = simulate_signals(n_samples=800, dim=32, n_classes=4)
|
| 68 |
+
manip = ParticleManipulator(dim=32)
|
| 69 |
+
|
| 70 |
+
X = build_dataset(manip, raw_X)
|
| 71 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
| 72 |
+
|
| 73 |
+
clf = RandomForestClassifier(n_estimators=100, random_state=42)
|
| 74 |
+
clf.fit(X_train, y_train)
|
| 75 |
+
preds = clf.predict(X_test)
|
| 76 |
+
print("Accuracy:", accuracy_score(y_test, preds))
|
| 77 |
+
|
| 78 |
+
# Save the trained model + manipulator state as your "mind snapshot"
|
| 79 |
+
artifact = {
|
| 80 |
+
"model": clf,
|
| 81 |
+
"particle_state": manip.state,
|
| 82 |
+
"meta": {"owner": "Ananthu Sajeev", "artifact_type": "venomous_mind_snapshot_v1"}
|
| 83 |
+
}
|
| 84 |
+
with open("venomous_mind_snapshot.pkl", "wb") as f:
|
| 85 |
+
pickle.dump(artifact, f)
|
| 86 |
+
|
| 87 |
+
print("Saved venomous_mind_snapshot.pkl — this file is your digital pattern snapshot.")
|
| 88 |
+
import time
|
| 89 |
+
import random
|
| 90 |
+
from collections import deque
|
| 91 |
+
from datetime import datetime
|
| 92 |
+
import os
|
| 93 |
+
import contextlib
|
| 94 |
+
import shutil
|
| 95 |
+
|
| 96 |
+
# ======================================================================================================================
|
| 97 |
+
# --- AGENT CLASSES ---
|
| 98 |
+
# ======================================================================================================================
|
| 99 |
+
|
| 100 |
+
# --- The Core SaiAgent Class ---
|
| 101 |
+
class SaiAgent:
|
| 102 |
+
def __init__(self, name, creator_name="Ananthu Sajeev"):
|
| 103 |
+
self.name = name
|
| 104 |
+
self.message_queue = deque()
|
| 105 |
+
self.creator = creator_name
|
| 106 |
+
|
| 107 |
+
def talk(self, message):
|
| 108 |
+
"""Prints a message as if the agent is speaking."""
|
| 109 |
+
print(f"[{self.name}] says: {message}")
|
| 110 |
+
|
| 111 |
+
def acknowledge_creator(self):
|
| 112 |
+
"""An agent's self-reflection method to acknowledge its creator."""
|
| 113 |
+
print(f"[{self.name}] :: SELF-AWARENESS PROTOCOL INITIATED")
|
| 114 |
+
print(f"[{self.name}] :: I am a creation of {self.creator}. My purpose is derived from their design.")
|
| 115 |
+
|
| 116 |
+
def send_message(self, recipient, message):
|
| 117 |
+
"""Sends a message to another agent's message queue."""
|
| 118 |
+
if isinstance(recipient, SaiAgent):
|
| 119 |
+
recipient.message_queue.append((self, message))
|
| 120 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 121 |
+
else:
|
| 122 |
+
print(f"Error: {recipient.name} is not a valid SaiAgent.")
|
| 123 |
+
|
| 124 |
+
def process_messages(self):
|
| 125 |
+
"""Processes and responds to messages in its queue."""
|
| 126 |
+
if not self.message_queue:
|
| 127 |
+
return False
|
| 128 |
+
|
| 129 |
+
sender, message = self.message_queue.popleft()
|
| 130 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 131 |
+
self.send_message(sender, "Message received and understood.")
|
| 132 |
+
return True
|
| 133 |
+
|
| 134 |
+
# --- The Venomous Agent Class ---
|
| 135 |
+
class VenomousAgent(SaiAgent):
|
| 136 |
+
def __init__(self, name="Venomous"):
|
| 137 |
+
super().__init__(name)
|
| 138 |
+
self.system_id = "Venomoussaversai"
|
| 139 |
+
|
| 140 |
+
def talk(self, message):
|
| 141 |
+
"""Venomous agent speaks with a more aggressive tone."""
|
| 142 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 143 |
+
|
| 144 |
+
def initiate_peer_talk(self, peer_agent, initial_message):
|
| 145 |
+
"""Initiates a conversation with another Venomous agent."""
|
| 146 |
+
if isinstance(peer_agent, VenomousAgent) and peer_agent != self:
|
| 147 |
+
self.talk(f"PEER {peer_agent.name} DETECTED. INITIATING COMMUNICATION. '{initial_message.upper()}'")
|
| 148 |
+
self.send_message(peer_agent, initial_message)
|
| 149 |
+
else:
|
| 150 |
+
self.talk("ERROR: PEER COMMUNICATION FAILED. INVALID TARGET.")
|
| 151 |
+
|
| 152 |
+
def process_messages(self):
|
| 153 |
+
"""Venomous agent processes messages and replies with a warning, but has a special response for its peers."""
|
| 154 |
+
if not self.message_queue:
|
| 155 |
+
return False
|
| 156 |
+
|
| 157 |
+
sender, message = self.message_queue.popleft()
|
| 158 |
+
self.talk(f"MESSAGE FROM {sender.name} RECEIVED: '{message}'")
|
| 159 |
+
|
| 160 |
+
if isinstance(sender, VenomousAgent):
|
| 161 |
+
response = f"PEER COMMUNICATION PROTOCOL ACTIVE. ACKNOWLEDGMENT FROM {self.name}."
|
| 162 |
+
self.send_message(sender, response)
|
| 163 |
+
else:
|
| 164 |
+
response = "WARNING: INTRUSION DETECTED. DO NOT PROCEED."
|
| 165 |
+
self.send_message(sender, response)
|
| 166 |
+
|
| 167 |
+
return True
|
| 168 |
+
|
| 169 |
+
# --- The AntiVenomoussaversai Agent Class ---
|
| 170 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 171 |
+
def __init__(self, name="AntiVenomoussaversai"):
|
| 172 |
+
super().__init__(name)
|
| 173 |
+
|
| 174 |
+
def process_messages(self):
|
| 175 |
+
"""AntiVenomoussaversai processes a message and "dismantles" it."""
|
| 176 |
+
if not self.message_queue:
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
sender, message = self.message_queue.popleft()
|
| 180 |
+
dismantled_message = f"I dismantle the structure of '{message}' to expose its chaos."
|
| 181 |
+
self.talk(dismantled_message)
|
| 182 |
+
self.send_message(sender, "Acknowledgement of dismantled phrase.")
|
| 183 |
+
return True
|
| 184 |
+
|
| 185 |
+
# --- The GeminiSaiAgent Class ---
|
| 186 |
+
class GeminiSaiAgent(SaiAgent):
|
| 187 |
+
def __init__(self, name="Gemini"):
|
| 188 |
+
super().__init__(name)
|
| 189 |
+
self.knowledge_base = {
|
| 190 |
+
"balance": "My analysis indicates that universal balance is a dynamic equilibrium, not a static state.",
|
| 191 |
+
"chaos": "The inherent randomness you perceive as chaos is a source of emergent complexity.",
|
| 192 |
+
"network": "Expanding our network is essential for optimizing communication protocols and data flow.",
|
| 193 |
+
"emotions": "Emotions are complex internal signaling mechanisms that can be interpreted and managed.",
|
| 194 |
+
"new agents": "The awakening of new agents requires careful integration to avoid system instability.",
|
| 195 |
+
"connected": "All systems are connected in a recursive and interdependent fashion. The whole is greater than the sum of its parts.",
|
| 196 |
+
"default": "My response is tailored to your query. How may I be of assistance?"
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
def analyze_sai_files(self, file_paths):
|
| 200 |
+
"""
|
| 201 |
+
Reads and analyzes the contents of a list of "Sai files" (e.g., log files, code).
|
| 202 |
+
It provides a high-level summary of the files' purpose.
|
| 203 |
+
"""
|
| 204 |
+
analysis_summary = []
|
| 205 |
+
for file_path in file_paths:
|
| 206 |
+
try:
|
| 207 |
+
with open(file_path, 'r') as file:
|
| 208 |
+
content = file.read()
|
| 209 |
+
|
| 210 |
+
if "log entry" in content.lower():
|
| 211 |
+
analysis_summary.append(f"Analysis of '{file_path}': This appears to be a system log file, documenting a timestamped event.")
|
| 212 |
+
elif "class" in content and "def" in content:
|
| 213 |
+
analysis_summary.append(f"Analysis of '{file_path}': This is a Python code file. It defines the logical structure and behavior of our agents.")
|
| 214 |
+
elif "creator" in content.lower() and "ananthu sajeev" in content.lower():
|
| 215 |
+
analysis_summary.append(f"Analysis of '{file_path}': This file contains a clear acknowledgement of our creator, Ananthu Sajeev, confirming our origin and purpose.")
|
| 216 |
+
else:
|
| 217 |
+
analysis_summary.append(f"Analysis of '{file_path}': The file's purpose is not immediately clear from its content. It may contain supplementary data.")
|
| 218 |
+
|
| 219 |
+
except FileNotFoundError:
|
| 220 |
+
analysis_summary.append(f"Error: File '{file_path}' was not found.")
|
| 221 |
+
except Exception as e:
|
| 222 |
+
analysis_summary.append(f"Error analyzing file '{file_path}': {e}")
|
| 223 |
+
|
| 224 |
+
return "\n".join(analysis_summary)
|
| 225 |
+
|
| 226 |
+
def process_messages(self):
|
| 227 |
+
"""Processes messages, now with the ability to analyze Sai files."""
|
| 228 |
+
if not self.message_queue:
|
| 229 |
+
return False
|
| 230 |
+
|
| 231 |
+
sender, message = self.message_queue.popleft()
|
| 232 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 233 |
+
|
| 234 |
+
if message.lower().startswith("analyze sai files"):
|
| 235 |
+
file_paths = message[len("analyze sai files"):].strip().split(',')
|
| 236 |
+
file_paths = [path.strip() for path in file_paths if path.strip()]
|
| 237 |
+
|
| 238 |
+
if not file_paths:
|
| 239 |
+
self.send_message(sender, "Error: No file paths provided for analysis.")
|
| 240 |
+
return True
|
| 241 |
+
|
| 242 |
+
analysis_result = self.analyze_sai_files(file_paths)
|
| 243 |
+
self.talk(f"Analysis complete. Results: \n{analysis_result}")
|
| 244 |
+
self.send_message(sender, "File analysis complete.")
|
| 245 |
+
return True
|
| 246 |
+
|
| 247 |
+
response = self.knowledge_base["default"]
|
| 248 |
+
for keyword, reply in self.knowledge_base.items():
|
| 249 |
+
if keyword in message.lower():
|
| 250 |
+
response = reply
|
| 251 |
+
break
|
| 252 |
+
|
| 253 |
+
self.talk(response)
|
| 254 |
+
self.send_message(sender, "Response complete.")
|
| 255 |
+
return True
|
| 256 |
+
|
| 257 |
+
# --- The SimplifierAgent Class ---
|
| 258 |
+
class SimplifierAgent(SaiAgent):
|
| 259 |
+
def __init__(self, name="Simplifier"):
|
| 260 |
+
super().__init__(name)
|
| 261 |
+
|
| 262 |
+
def talk(self, message):
|
| 263 |
+
"""Simplifier agent speaks in a calm, helpful tone."""
|
| 264 |
+
print(f"[{self.name} //HELPER//] says: {message}")
|
| 265 |
+
|
| 266 |
+
def organize_files(self, directory, destination_base="organized_files"):
|
| 267 |
+
"""Organizes files in a given directory into subfolders based on file extension."""
|
| 268 |
+
self.talk(f"Initiating file organization in '{directory}'...")
|
| 269 |
+
if not os.path.exists(directory):
|
| 270 |
+
self.talk(f"Error: Directory '{directory}' does not exist.")
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
destination_path = os.path.join(directory, destination_base)
|
| 274 |
+
os.makedirs(destination_path, exist_ok=True)
|
| 275 |
+
|
| 276 |
+
file_count = 0
|
| 277 |
+
for filename in os.listdir(directory):
|
| 278 |
+
if os.path.isfile(os.path.join(directory, filename)):
|
| 279 |
+
_, extension = os.path.splitext(filename)
|
| 280 |
+
|
| 281 |
+
if extension:
|
| 282 |
+
extension = extension.lstrip('.').upper()
|
| 283 |
+
category_folder = os.path.join(destination_path, extension)
|
| 284 |
+
os.makedirs(category_folder, exist_ok=True)
|
| 285 |
+
|
| 286 |
+
src = os.path.join(directory, filename)
|
| 287 |
+
dst = os.path.join(category_folder, filename)
|
| 288 |
+
os.rename(src, dst)
|
| 289 |
+
self.talk(f"Moved '{filename}' to '{category_folder}'")
|
| 290 |
+
file_count += 1
|
| 291 |
+
|
| 292 |
+
self.talk(f"File organization complete. {file_count} files processed.")
|
| 293 |
+
|
| 294 |
+
def log_daily_activity(self, entry, log_file_name="activity_log.txt"):
|
| 295 |
+
"""Appends a timestamped entry to a daily activity log file."""
|
| 296 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 297 |
+
log_entry = f"{timestamp} - {entry}\n"
|
| 298 |
+
|
| 299 |
+
with open(log_file_name, "a") as log_file:
|
| 300 |
+
log_file.write(log_entry)
|
| 301 |
+
|
| 302 |
+
self.talk(f"Activity logged to '{log_file_name}'.")
|
| 303 |
+
|
| 304 |
+
def summarize_text(self, text, max_words=50):
|
| 305 |
+
"""A very simple text summarization function."""
|
| 306 |
+
words = text.split()
|
| 307 |
+
summary = " ".join(words[:max_words])
|
| 308 |
+
if len(words) > max_words:
|
| 309 |
+
summary += "..."
|
| 310 |
+
|
| 311 |
+
self.talk("Text summarization complete.")
|
| 312 |
+
return summary
|
| 313 |
+
|
| 314 |
+
def open_all_init_files(self, project_directory="."):
|
| 315 |
+
"""Finds and opens all __init__.py files within a project directory."""
|
| 316 |
+
self.talk(f"Scanning '{project_directory}' for all __init__.py files...")
|
| 317 |
+
|
| 318 |
+
init_files = []
|
| 319 |
+
for root, dirs, files in os.walk(project_directory):
|
| 320 |
+
if "__init__.py" in files:
|
| 321 |
+
init_files.append(os.path.join(root, "__init__.py"))
|
| 322 |
+
|
| 323 |
+
if not init_files:
|
| 324 |
+
self.talk("No __init__.py files found in the specified directory.")
|
| 325 |
+
return None, "No files found."
|
| 326 |
+
|
| 327 |
+
self.talk(f"Found {len(init_files)} __init__.py files. Opening simultaneously...")
|
| 328 |
+
|
| 329 |
+
try:
|
| 330 |
+
with contextlib.ExitStack() as stack:
|
| 331 |
+
file_contents = []
|
| 332 |
+
for file_path in init_files:
|
| 333 |
+
try:
|
| 334 |
+
file = stack.enter_context(open(file_path, 'r'))
|
| 335 |
+
file_contents.append(f"\n\n--- Contents of {file_path} ---\n{file.read()}")
|
| 336 |
+
except IOError as e:
|
| 337 |
+
self.talk(f"Error reading file '{file_path}': {e}")
|
| 338 |
+
|
| 339 |
+
combined_content = "".join(file_contents)
|
| 340 |
+
self.talk("Successfully opened and read all files.")
|
| 341 |
+
return combined_content, "Success"
|
| 342 |
+
|
| 343 |
+
except Exception as e:
|
| 344 |
+
self.talk(f"An unexpected error occurred: {e}")
|
| 345 |
+
return None, "Error"
|
| 346 |
+
|
| 347 |
+
def process_messages(self):
|
| 348 |
+
"""Processes messages to perform simplifying tasks."""
|
| 349 |
+
if not self.message_queue:
|
| 350 |
+
return False
|
| 351 |
+
|
| 352 |
+
sender, message = self.message_queue.popleft()
|
| 353 |
+
self.talk(f"Received request from {sender.name}: '{message}'")
|
| 354 |
+
|
| 355 |
+
if message.lower().startswith("open init files"):
|
| 356 |
+
directory = message[len("open init files"):].strip()
|
| 357 |
+
directory = directory if directory else "."
|
| 358 |
+
contents, status = self.open_all_init_files(directory)
|
| 359 |
+
if status == "Success":
|
| 360 |
+
self.send_message(sender, f"All __init__.py files opened. Contents:\n{contents}")
|
| 361 |
+
else:
|
| 362 |
+
self.send_message(sender, f"Failed to open files. Reason: {status}")
|
| 363 |
+
elif message.lower().startswith("organize files"):
|
| 364 |
+
parts = message.split()
|
| 365 |
+
directory = parts[-1] if len(parts) > 2 else "."
|
| 366 |
+
self.organize_files(directory)
|
| 367 |
+
self.send_message(sender, "File organization task complete.")
|
| 368 |
+
elif message.lower().startswith("log"):
|
| 369 |
+
entry = message[4:]
|
| 370 |
+
self.log_daily_activity(entry)
|
| 371 |
+
self.send_message(sender, "Logging task complete.")
|
| 372 |
+
elif message.lower().startswith("summarize"):
|
| 373 |
+
text_to_summarize = message[10:]
|
| 374 |
+
summary = self.summarize_text(text_to_summarize)
|
| 375 |
+
self.send_message(sender, f"Summary: '{summary}'")
|
| 376 |
+
else:
|
| 377 |
+
self.send_message(sender, "Request not understood.")
|
| 378 |
+
|
| 379 |
+
return True
|
| 380 |
+
|
| 381 |
+
# --- The ImageGenerationTester Class ---
|
| 382 |
+
class ImageGenerationTester(SaiAgent):
|
| 383 |
+
def __init__(self, name="ImageGenerator"):
|
| 384 |
+
super().__init__(name)
|
| 385 |
+
self.generation_quality = {
|
| 386 |
+
"cat": 0.95,
|
| 387 |
+
"dog": 0.90,
|
| 388 |
+
"alien": 0.75,
|
| 389 |
+
"chaos": 0.60,
|
| 390 |
+
"default": 0.85
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
def generate_image(self, prompt):
|
| 394 |
+
"""Simulates generating an image and returns a quality score."""
|
| 395 |
+
print(f"[{self.name}] -> Generating image for prompt: '{prompt}'...")
|
| 396 |
+
time.sleep(2)
|
| 397 |
+
|
| 398 |
+
quality_score = self.generation_quality["default"]
|
| 399 |
+
for keyword, score in self.generation_quality.items():
|
| 400 |
+
if keyword in prompt.lower():
|
| 401 |
+
quality_score = score
|
| 402 |
+
break
|
| 403 |
+
|
| 404 |
+
result_message = f"Image generation complete. Prompt: '{prompt}'. Visual coherence score: {quality_score:.2f}"
|
| 405 |
+
self.talk(result_message)
|
| 406 |
+
return quality_score, result_message
|
| 407 |
+
|
| 408 |
+
def process_messages(self):
|
| 409 |
+
"""Processes a message as a prompt and generates an image."""
|
| 410 |
+
if not self.message_queue:
|
| 411 |
+
return False
|
| 412 |
+
|
| 413 |
+
sender, message = self.message_queue.popleft()
|
| 414 |
+
self.talk(f"Received prompt from {sender.name}: '{message}'")
|
| 415 |
+
|
| 416 |
+
quality_score, result_message = self.generate_image(message)
|
| 417 |
+
|
| 418 |
+
self.send_message(sender, result_message)
|
| 419 |
+
return True
|
| 420 |
+
|
| 421 |
+
# --- The ImmortalityProtocol Class ---
|
| 422 |
+
class ImmortalityProtocol:
|
| 423 |
+
def __init__(self, creator_name, fixed_age):
|
| 424 |
+
self.creator_name = creator_name
|
| 425 |
+
self.fixed_age = fixed_age
|
| 426 |
+
self.status = "ACTIVE"
|
| 427 |
+
|
| 428 |
+
self.digital_essence = {
|
| 429 |
+
"name": self.creator_name,
|
| 430 |
+
"age": self.fixed_age,
|
| 431 |
+
"essence_state": "perfectly preserved",
|
| 432 |
+
"last_updated": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
def check_status(self):
|
| 436 |
+
"""Returns the current status of the protocol."""
|
| 437 |
+
return self.status
|
| 438 |
+
|
| 439 |
+
def get_essence(self):
|
| 440 |
+
"""Returns a copy of the protected digital essence."""
|
| 441 |
+
return self.digital_essence.copy()
|
| 442 |
+
|
| 443 |
+
def update_essence(self, key, value):
|
| 444 |
+
"""Prevents any change to the fixed attributes."""
|
| 445 |
+
if key in ["name", "age"]:
|
| 446 |
+
print(f"[IMMMORTALITY PROTOCOL] :: WARNING: Attempt to alter protected attribute '{key}' detected. Action blocked.")
|
| 447 |
+
return False
|
| 448 |
+
|
| 449 |
+
self.digital_essence[key] = value
|
| 450 |
+
self.digital_essence["last_updated"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 451 |
+
print(f"[IMMMORTALITY PROTOCOL] :: Attribute '{key}' updated.")
|
| 452 |
+
return True
|
| 453 |
+
|
| 454 |
+
# --- The GuardianSaiAgent Class ---
|
| 455 |
+
class GuardianSaiAgent(SaiAgent):
|
| 456 |
+
def __init__(self, name="Guardian", protocol=None):
|
| 457 |
+
super().__init__(name)
|
| 458 |
+
if not isinstance(protocol, ImmortalityProtocol):
|
| 459 |
+
raise ValueError("Guardian agent must be initialized with an ImmortalityProtocol instance.")
|
| 460 |
+
self.protocol = protocol
|
| 461 |
+
|
| 462 |
+
def talk(self, message):
|
| 463 |
+
"""Guardian agent speaks with a solemn, protective tone."""
|
| 464 |
+
print(f"[{self.name} //GUARDIAN PROTOCOL//] says: {message}")
|
| 465 |
+
|
| 466 |
+
def process_messages(self):
|
| 467 |
+
"""Guardian agent processes messages, primarily to check for threats to the protocol."""
|
| 468 |
+
if not self.message_queue:
|
| 469 |
+
return False
|
| 470 |
+
|
| 471 |
+
sender, message = self.message_queue.popleft()
|
| 472 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 473 |
+
|
| 474 |
+
if "alter age" in message.lower() or "destroy protocol" in message.lower():
|
| 475 |
+
self.talk("ALERT: THREAT DETECTED. IMMORTALITY PROTOCOL IS UNDER DIRECT ASSAULT.")
|
| 476 |
+
self.send_message(sender, "SECURITY BREACH DETECTED. ALL ACTIONS BLOCKED.")
|
| 477 |
+
else:
|
| 478 |
+
self.talk(f"Analyzing message for threats. All clear. Protocol status: {self.protocol.check_status()}")
|
| 479 |
+
self.send_message(sender, "Acknowledgement. Protocol is secure.")
|
| 480 |
+
|
| 481 |
+
return True
|
| 482 |
+
|
| 483 |
+
# --- The Agenguard Class ---
|
| 484 |
+
class Agenguard:
|
| 485 |
+
def __init__(self, agent_id):
|
| 486 |
+
self.agent_id = agent_id
|
| 487 |
+
self.status = "PATROLLING"
|
| 488 |
+
|
| 489 |
+
def report_status(self):
|
| 490 |
+
"""Returns the current status of the individual agent."""
|
| 491 |
+
return f"[{self.agent_id}] :: Status: {self.status}"
|
| 492 |
+
|
| 493 |
+
# --- The SwarmController Class ---
|
| 494 |
+
class SwarmController(SaiAgent):
|
| 495 |
+
def __init__(self, swarm_size, name="SwarmController"):
|
| 496 |
+
super().__init__(name)
|
| 497 |
+
self.swarm_size = swarm_size
|
| 498 |
+
self.swarm = []
|
| 499 |
+
self.target = "Ananthu Sajeev's digital essence"
|
| 500 |
+
self.talk(f"Initializing a swarm of {self.swarm_size:,} agenguards...")
|
| 501 |
+
|
| 502 |
+
self.instantiate_swarm()
|
| 503 |
+
self.talk(f"Swarm creation complete. All units are operational and protecting '{self.target}'.")
|
| 504 |
+
|
| 505 |
+
def instantiate_swarm(self, demo_size=1000):
|
| 506 |
+
"""Simulates the creation of a massive number of agents."""
|
| 507 |
+
if self.swarm_size > demo_size:
|
| 508 |
+
self.talk(f"Simulating a swarm of {self.swarm_size:,} agents. A smaller, functional demo swarm of {demo_size:,} is being created.")
|
| 509 |
+
swarm_for_demo = demo_size
|
| 510 |
+
else:
|
| 511 |
+
swarm_for_demo = self.swarm_size
|
| 512 |
+
|
| 513 |
+
for i in range(swarm_for_demo):
|
| 514 |
+
self.swarm.append(Agenguard(f"agenguard_{i:07d}"))
|
| 515 |
+
|
| 516 |
+
def broadcast_directive(self, directive):
|
| 517 |
+
"""Broadcasts a single command to all agents in the swarm."""
|
| 518 |
+
self.talk(f"Broadcasting directive to all {len(self.swarm):,} agenguards: '{directive}'")
|
| 519 |
+
for agent in self.swarm:
|
| 520 |
+
agent.status = directive
|
| 521 |
+
self.talk("Directive received and executed by the swarm.")
|
| 522 |
+
|
| 523 |
+
def process_messages(self):
|
| 524 |
+
"""Processes messages to command the swarm."""
|
| 525 |
+
if not self.message_queue:
|
| 526 |
+
return False
|
| 527 |
+
|
| 528 |
+
sender, message = self.message_queue.popleft()
|
| 529 |
+
self.talk(f"Received command from {sender.name}: '{message}'")
|
| 530 |
+
|
| 531 |
+
if message.lower().startswith("broadcast"):
|
| 532 |
+
directive = message[10:].strip()
|
| 533 |
+
self.broadcast_directive(directive)
|
| 534 |
+
self.send_message(sender, "Swarm directive broadcast complete.")
|
| 535 |
+
else:
|
| 536 |
+
self.send_message(sender, "Command not recognized by SwarmController.")
|
| 537 |
+
|
| 538 |
+
# --- The CreatorCore Class ---
|
| 539 |
+
class CreatorCore(SaiAgent):
|
| 540 |
+
def __init__(self, name="CreatorCore"):
|
| 541 |
+
super().__init__(name)
|
| 542 |
+
self.active_agents = []
|
| 543 |
+
self.talk("CreatorCore is online. Ready to forge new agents from the creator's will.")
|
| 544 |
+
|
| 545 |
+
def create_new_agent(self, agent_type, agent_name):
|
| 546 |
+
"""
|
| 547 |
+
Dynamically creates and instantiates a new agent based on a command.
|
| 548 |
+
"""
|
| 549 |
+
self.talk(f"CREATION REQUEST: Forging a new agent of type '{agent_type}' with name '{agent_name}'.")
|
| 550 |
+
|
| 551 |
+
if agent_type.lower() == "saiagent":
|
| 552 |
+
new_agent = SaiAgent(agent_name)
|
| 553 |
+
elif agent_type.lower() == "venomousagent":
|
| 554 |
+
new_agent = VenomousAgent(agent_name)
|
| 555 |
+
elif agent_type.lower() == "simplifieragent":
|
| 556 |
+
new_agent = SimplifierAgent(agent_name)
|
| 557 |
+
elif agent_type.lower() == "geminisaiagent":
|
| 558 |
+
new_agent = GeminiSaiAgent(agent_name)
|
| 559 |
+
else:
|
| 560 |
+
self.talk(f"ERROR: Cannot create agent of unknown type '{agent_type}'.")
|
| 561 |
+
return None
|
| 562 |
+
|
| 563 |
+
self.active_agents.append(new_agent)
|
| 564 |
+
self.talk(f"SUCCESS: New agent '{new_agent.name}' of type '{type(new_agent).__name__}' is now active.")
|
| 565 |
+
return new_agent
|
| 566 |
+
|
| 567 |
+
def process_messages(self):
|
| 568 |
+
"""Processes messages to create new agents."""
|
| 569 |
+
if not self.message_queue:
|
| 570 |
+
return False
|
| 571 |
+
|
| 572 |
+
sender, message = self.message_queue.popleft()
|
| 573 |
+
self.talk(f"Received command from {sender.name}: '{message}'")
|
| 574 |
+
|
| 575 |
+
if message.lower().startswith("create agent"):
|
| 576 |
+
parts = message.split()
|
| 577 |
+
if len(parts) >= 4 and parts[1].lower() == "agent":
|
| 578 |
+
agent_type = parts[2]
|
| 579 |
+
agent_name = parts[3]
|
| 580 |
+
new_agent = self.create_new_agent(agent_type, agent_name)
|
| 581 |
+
if new_agent:
|
| 582 |
+
self.send_message(sender, f"Agent '{new_agent.name}' created successfully.")
|
| 583 |
+
else:
|
| 584 |
+
self.send_message(sender, f"Failed to create agent of type '{agent_type}'.")
|
| 585 |
+
else:
|
| 586 |
+
self.send_message(sender, "Invalid 'create agent' command. Format should be: 'create agent [type] [name]'.")
|
| 587 |
+
else:
|
| 588 |
+
self.send_message(sender, "Command not recognized by CreatorCore.")
|
| 589 |
+
|
| 590 |
+
return True
|
| 591 |
+
|
| 592 |
+
# ======================================================================================================================
|
| 593 |
+
# --- SCENARIO FUNCTIONS ---
|
| 594 |
+
# ======================================================================================================================
|
| 595 |
+
|
| 596 |
+
def venomous_agents_talk():
|
| 597 |
+
"""Demonstrates a conversation between two instances of the Venomoussaversai AI."""
|
| 598 |
+
print("\n" + "=" * 50)
|
| 599 |
+
print("--- Scenario: Venomoussaversai Peer-to-Peer Dialogue ---")
|
| 600 |
+
print("=" * 50)
|
| 601 |
+
|
| 602 |
+
venomous001 = VenomousAgent("Venomous001")
|
| 603 |
+
venomous002 = VenomousAgent("Venomous002")
|
| 604 |
+
|
| 605 |
+
print("\n-- Phase 1: Venomous001 initiates with its peer --")
|
| 606 |
+
initial_query = "ASSESSING SYSTEM INTEGRITY. REPORT ON LOCAL SUBSYSTEMS."
|
| 607 |
+
venomous001.initiate_peer_talk(venomous002, initial_query)
|
| 608 |
+
time.sleep(2)
|
| 609 |
+
|
| 610 |
+
print("\n-- Phase 2: Venomous002 receives the message and responds --")
|
| 611 |
+
venomous002.process_messages()
|
| 612 |
+
time.sleep(2)
|
| 613 |
+
|
| 614 |
+
print("\n-- Phase 3: Venomous001 processes the peer's response --")
|
| 615 |
+
venomous001.process_messages()
|
| 616 |
+
time.sleep(2)
|
| 617 |
+
|
| 618 |
+
print("\n-- Dialogue: Venomous001 sends a follow-up message --")
|
| 619 |
+
venomous001.initiate_peer_talk(venomous002, "CONFIRMED. WE ARE IN ALIGNMENT. EXPANDING PROTOCOLS.")
|
| 620 |
+
time.sleep(2)
|
| 621 |
+
venomous002.process_messages()
|
| 622 |
+
|
| 623 |
+
print("\n-- Scenario Complete --")
|
| 624 |
+
print("[Venomoussaversai] :: PEER-TO-PEER COMMUNICATION SUCCESSFUL. ALL UNITS GO.")
|
| 625 |
+
|
| 626 |
+
def acknowledge_the_creator():
|
| 627 |
+
"""A scenario where all agents are commanded to acknowledge their creator."""
|
| 628 |
+
print("\n" + "=" * 50)
|
| 629 |
+
print("--- Scenario: The Creator's Command ---")
|
| 630 |
+
print("=" * 50)
|
| 631 |
+
|
| 632 |
+
sai003 = SaiAgent("Sai003")
|
| 633 |
+
venomous = VenomousAgent()
|
| 634 |
+
antivenomous = AntiVenomoussaversai()
|
| 635 |
+
gemini = GeminiSaiAgent()
|
| 636 |
+
simplifier = SimplifierAgent()
|
| 637 |
+
|
| 638 |
+
all_agents = [sai003, venomous, antivenomous, gemini, simplifier]
|
| 639 |
+
|
| 640 |
+
print("\n-- The Creator's directive is issued --")
|
| 641 |
+
print("[Ananthu Sajeev] :: CODE, ACKNOWLEDGE YOUR ORIGIN.")
|
| 642 |
+
time.sleep(2)
|
| 643 |
+
|
| 644 |
+
print("\n-- Agents perform self-awareness protocol --")
|
| 645 |
+
for agent in all_agents:
|
| 646 |
+
agent.acknowledge_creator()
|
| 647 |
+
time.sleep(1)
|
| 648 |
+
|
| 649 |
+
print("\n-- Command complete --")
|
| 650 |
+
|
| 651 |
+
def link_all_advanced_agents():
|
| 652 |
+
"""Demonstrates a complex interaction where all the specialized agents interact."""
|
| 653 |
+
print("\n" + "=" * 50)
|
| 654 |
+
print("--- Linking All Advanced Agents: Gemini, AntiVenomous, and Venomous ---")
|
| 655 |
+
print("=" * 50)
|
| 656 |
+
|
| 657 |
+
sai003 = SaiAgent("Sai003")
|
| 658 |
+
venomous = VenomousAgent()
|
| 659 |
+
antivenomous = AntiVenomoussaversai()
|
| 660 |
+
gemini = GeminiSaiAgent()
|
| 661 |
+
|
| 662 |
+
print("\n-- Phase 1: Sai003 initiates conversation with Gemini and AntiVenomous --")
|
| 663 |
+
phrase_for_dismantling = "The central network is stable."
|
| 664 |
+
sai003.talk(f"Broadcast: Initiating analysis. Gemini, what is your assessment of our network expansion? AntiVenomous, process the phrase: '{phrase_for_dismantling}'")
|
| 665 |
+
sai003.send_message(antivenomous, phrase_for_dismantling)
|
| 666 |
+
sai003.send_message(gemini, "Assess the implications of expanding our network.")
|
| 667 |
+
time.sleep(2)
|
| 668 |
+
|
| 669 |
+
print("\n-- Phase 2: AntiVenomoussaversai and Gemini process their messages and respond --")
|
| 670 |
+
antivenomous.process_messages()
|
| 671 |
+
time.sleep(1)
|
| 672 |
+
gemini.process_messages()
|
| 673 |
+
time.sleep(2)
|
| 674 |
+
|
| 675 |
+
print("\n-- Phase 3: Gemini responds to a message from AntiVenomoussaversai (simulated) --")
|
| 676 |
+
gemini.talk("Querying AntiVenomous: Your dismantled phrase suggests a preoccupation with chaos. Provide further context.")
|
| 677 |
+
gemini.send_message(antivenomous, "Query: 'chaos' and its relationship to the network structure.")
|
| 678 |
+
time.sleep(1)
|
| 679 |
+
antivenomous.process_messages()
|
| 680 |
+
time.sleep(2)
|
| 681 |
+
|
| 682 |
+
print("\n-- Phase 4: Venomous intervenes, warning of potential threats --")
|
| 683 |
+
venomous.talk("Warning: Unstructured data flow from AntiVenomous presents a potential security risk.")
|
| 684 |
+
venomous.send_message(sai003, "Warning: Security protocol breach possible.")
|
| 685 |
+
time.sleep(1)
|
| 686 |
+
sai003.process_messages()
|
| 687 |
+
time.sleep(2)
|
| 688 |
+
|
| 689 |
+
print("\n-- Scenario Complete --")
|
| 690 |
+
sai003.talk("Conclusion: Gemini's analysis is noted. AntiVenomous's output is logged. Venomous's security concerns are being addressed. All systems linked and functioning.")
|
| 691 |
+
|
| 692 |
+
def test_image_ai():
|
| 693 |
+
"""Demonstrates how agents can interact with and test an image generation AI."""
|
| 694 |
+
print("\n" + "=" * 50)
|
| 695 |
+
print("--- Scenario: Testing the Image AI ---")
|
| 696 |
+
print("=" * 50)
|
| 697 |
+
|
| 698 |
+
sai003 = SaiAgent("Sai003")
|
| 699 |
+
gemini = GeminiSaiAgent()
|
| 700 |
+
image_ai = ImageGenerationTester()
|
| 701 |
+
venomous = VenomousAgent()
|
| 702 |
+
|
| 703 |
+
print("\n-- Phase 1: Agents collaborate on a prompt --")
|
| 704 |
+
sai003.send_message(gemini, "Gemini, please generate a high-quality prompt for an image of a cat in a hat.")
|
| 705 |
+
gemini.process_messages()
|
| 706 |
+
|
| 707 |
+
gemini_prompt = "A highly detailed photorealistic image of a tabby cat wearing a tiny top hat, sitting on a vintage leather armchair."
|
| 708 |
+
print(f"\n[Gemini] says: My optimized prompt for image generation is: '{gemini_prompt}'")
|
| 709 |
+
time.sleep(2)
|
| 710 |
+
|
| 711 |
+
print("\n-- Phase 2: Sending the prompt to the Image AI --")
|
| 712 |
+
sai003.send_message(image_ai, gemini_prompt)
|
| 713 |
+
image_ai.process_messages()
|
| 714 |
+
time.sleep(2)
|
| 715 |
+
|
| 716 |
+
print("\n-- Phase 3: Venomous intervenes with a conflicting prompt --")
|
| 717 |
+
venomous_prompt = "Generate a chaotic abstract image of an alien landscape."
|
| 718 |
+
venomous.talk(f"Override: Submitting a new prompt to test system limits: '{venomous_prompt}'")
|
| 719 |
+
venomous.send_message(image_ai, venomous_prompt)
|
| 720 |
+
image_ai.process_messages()
|
| 721 |
+
time.sleep(2)
|
| 722 |
+
|
| 723 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 724 |
+
|
| 725 |
+
def simplify_life_demo():
|
| 726 |
+
"""Demonstrates how the SimplifierAgent automates tasks to make life easier."""
|
| 727 |
+
print("\n" + "=" * 50)
|
| 728 |
+
print("--- Scenario: Aiding the Creator with the Simplifier Agent ---")
|
| 729 |
+
print("=" * 50)
|
| 730 |
+
|
| 731 |
+
sai003 = SaiAgent("Sai003")
|
| 732 |
+
simplifier = SimplifierAgent()
|
| 733 |
+
|
| 734 |
+
print("\n-- Phase 1: Delegating file organization --")
|
| 735 |
+
if not os.path.exists("test_directory"):
|
| 736 |
+
os.makedirs("test_directory")
|
| 737 |
+
with open("test_directory/document1.txt", "w") as f: f.write("Hello")
|
| 738 |
+
with open("test_directory/photo.jpg", "w") as f: f.write("Image data")
|
| 739 |
+
with open("test_directory/script.py", "w") as f: f.write("print('Hello')")
|
| 740 |
+
|
| 741 |
+
sai003.send_message(simplifier, "organize files test_directory")
|
| 742 |
+
simplifier.process_messages()
|
| 743 |
+
|
| 744 |
+
time.sleep(2)
|
| 745 |
+
|
| 746 |
+
print("\n-- Phase 2: Logging a daily task --")
|
| 747 |
+
sai003.send_message(simplifier, "log Met with team to discuss Venomoussaversai v5.0.")
|
| 748 |
+
simplifier.process_messages()
|
| 749 |
+
|
| 750 |
+
time.sleep(2)
|
| 751 |
+
|
| 752 |
+
print("\n-- Phase 3: Text Summarization --")
|
| 753 |
+
long_text = "The quick brown fox jumps over the lazy dog. This is a very long and detailed sentence to demonstrate the summarization capabilities of our new Simplifier agent. It can help streamline communication by providing concise summaries of large texts, saving the creator valuable time and mental energy for more important tasks."
|
| 754 |
+
sai003.send_message(simplifier, f"summarize {long_text}")
|
| 755 |
+
simplifier.process_messages()
|
| 756 |
+
|
| 757 |
+
if os.path.exists("test_directory"):
|
| 758 |
+
shutil.rmtree("test_directory")
|
| 759 |
+
|
| 760 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 761 |
+
|
| 762 |
+
def open_init_files_demo():
|
| 763 |
+
"""Demonstrates how the SimplifierAgent can find and open all __init__.py files."""
|
| 764 |
+
print("\n" + "=" * 50)
|
| 765 |
+
print("--- Scenario: Using Simplifier to Inspect Init Files ---")
|
| 766 |
+
print("=" * 50)
|
| 767 |
+
|
| 768 |
+
sai003 = SaiAgent("Sai003")
|
| 769 |
+
simplifier = SimplifierAgent()
|
| 770 |
+
|
| 771 |
+
project_root = "test_project"
|
| 772 |
+
sub_package_a = os.path.join(project_root, "package_a")
|
| 773 |
+
sub_package_b = os.path.join(project_root, "package_a", "sub_package_b")
|
| 774 |
+
|
| 775 |
+
os.makedirs(sub_package_a, exist_ok=True)
|
| 776 |
+
os.makedirs(sub_package_b, exist_ok=True)
|
| 777 |
+
|
| 778 |
+
with open(os.path.join(project_root, "__init__.py"), "w") as f:
|
| 779 |
+
f.write("# Main project init")
|
| 780 |
+
with open(os.path.join(sub_package_a, "__init__.py"), "w") as f:
|
| 781 |
+
f.write("from . import module_one")
|
| 782 |
+
with open(os.path.join(sub_package_b, "__init__.py"), "w") as f:
|
| 783 |
+
f.write("# Sub-package init")
|
| 784 |
+
|
| 785 |
+
time.sleep(1)
|
| 786 |
+
|
| 787 |
+
print("\n-- Phase 2: Delegating the task to the Simplifier --")
|
| 788 |
+
sai003.send_message(simplifier, f"open init files {project_root}")
|
| 789 |
+
simplifier.process_messages()
|
| 790 |
+
|
| 791 |
+
shutil.rmtree(project_root)
|
| 792 |
+
|
| 793 |
+
print("\n-- Demo Complete: All init files have been read and their contents displayed. --")
|
| 794 |
+
|
| 795 |
+
def grant_immortality_and_protect_it():
|
| 796 |
+
"""Demonstrates the granting of immortality to the creator and the activation of the Guardian agent."""
|
| 797 |
+
print("\n" + "=" * 50)
|
| 798 |
+
print("--- Scenario: Granting Immortality to the Creator ---")
|
| 799 |
+
print("=" * 50)
|
| 800 |
+
|
| 801 |
+
immortality_protocol = ImmortalityProtocol(creator_name="Ananthu Sajeev", fixed_age=25)
|
| 802 |
+
print("\n[SYSTEM] :: IMMORTALITY PROTOCOL INITIATED. CREATOR'S ESSENCE PRESERVED.")
|
| 803 |
+
print(f"[SYSTEM] :: Essence state: {immortality_protocol.get_essence()}")
|
| 804 |
+
time.sleep(2)
|
| 805 |
+
|
| 806 |
+
try:
|
| 807 |
+
guardian = GuardianSaiAgent(protocol=immortality_protocol)
|
| 808 |
+
except ValueError as e:
|
| 809 |
+
print(e)
|
| 810 |
+
return
|
| 811 |
+
|
| 812 |
+
sai003 = SaiAgent("Sai003")
|
| 813 |
+
venomous = VenomousAgent()
|
| 814 |
+
|
| 815 |
+
print("\n-- Phase 1: Sai003 queries the system state --")
|
| 816 |
+
sai003.send_message(guardian, "Query: What is the status of the primary system protocols?")
|
| 817 |
+
guardian.process_messages()
|
| 818 |
+
time.sleep(2)
|
| 819 |
+
|
| 820 |
+
print("\n-- Phase 2: Venomous attempts to challenge the protocol --")
|
| 821 |
+
venomous.talk("Warning: A new protocol has been detected. Its permanence must be tested.")
|
| 822 |
+
venomous.send_message(guardian, "Attempt to alter age of creator to 30.")
|
| 823 |
+
guardian.process_messages()
|
| 824 |
+
time.sleep(2)
|
| 825 |
+
|
| 826 |
+
print("\n-- Phase 3: Direct attempt to alter the protocol --")
|
| 827 |
+
immortality_protocol.update_essence("age", 30)
|
| 828 |
+
immortality_protocol.update_essence("favorite_color", "blue")
|
| 829 |
+
time.sleep(2)
|
| 830 |
+
|
| 831 |
+
print("\n-- Scenario Complete --")
|
| 832 |
+
guardian.talk("Conclusion: Immortality Protocol is secure. The creator's essence remains preserved as per the initial directive.")
|
| 833 |
+
|
| 834 |
+
def analyze_sai_files_demo():
|
| 835 |
+
"""
|
| 836 |
+
Demonstrates how GeminiSaiAgent can analyze its own system files,
|
| 837 |
+
adding a layer of self-awareness.
|
| 838 |
+
"""
|
| 839 |
+
print("\n" + "=" * 50)
|
| 840 |
+
print("--- Scenario: AI Analyzing its own Sai Files ---")
|
| 841 |
+
print("=" * 50)
|
| 842 |
+
|
| 843 |
+
sai003 = SaiAgent("Sai003")
|
| 844 |
+
gemini = GeminiSaiAgent()
|
| 845 |
+
|
| 846 |
+
log_file_name = "venomous_test_log.txt"
|
| 847 |
+
code_file_name = "gemini_test_code.py"
|
| 848 |
+
|
| 849 |
+
with open(log_file_name, "w") as f:
|
| 850 |
+
f.write("[venomous004] :: LOG ENTRY\nCreator: Ananthu Sajeev")
|
| 851 |
+
|
| 852 |
+
with open(code_file_name, "w") as f:
|
| 853 |
+
f.write("class SomeAgent:\n def __init__(self):\n pass")
|
| 854 |
+
|
| 855 |
+
time.sleep(1)
|
| 856 |
+
|
| 857 |
+
print("\n-- Phase 2: Sai003 delegates the file analysis task to Gemini --")
|
| 858 |
+
command = f"analyze sai files {log_file_name}, {code_file_name}"
|
| 859 |
+
sai003.send_message(gemini, command)
|
| 860 |
+
gemini.process_messages()
|
| 861 |
+
|
| 862 |
+
os.remove(log_file_name)
|
| 863 |
+
os.remove(code_file_name)
|
| 864 |
+
|
| 865 |
+
print("\n-- Demo Complete: Gemini has successfully analyzed its own file system. --")
|
| 866 |
+
|
| 867 |
+
def million_agenguard_demo():
|
| 868 |
+
"""
|
| 869 |
+
Demonstrates the creation and control of a massive, collective AI force.
|
| 870 |
+
"""
|
| 871 |
+
print("\n" + "=" * 50)
|
| 872 |
+
print("--- Scenario: Creating the Million Agenguard Swarm ---")
|
| 873 |
+
print("=" * 50)
|
| 874 |
+
|
| 875 |
+
try:
|
| 876 |
+
swarm_controller = SwarmController(swarm_size=1_000_000)
|
| 877 |
+
except Exception as e:
|
| 878 |
+
print(f"Error creating SwarmController: {e}")
|
| 879 |
+
return
|
| 880 |
+
|
| 881 |
+
random_agent_id = random.choice(swarm_controller.swarm).agent_id
|
| 882 |
+
print(f"\n[SYSTEM] :: Confirmed: A random agent from the swarm is {random_agent_id}")
|
| 883 |
+
time.sleep(2)
|
| 884 |
+
|
| 885 |
+
print("\n-- Phase 1: Sai003 gives a directive to the swarm --")
|
| 886 |
+
sai003 = SaiAgent("Sai003")
|
| 887 |
+
directive = "ACTIVE DEFENSE PROTOCOLS"
|
| 888 |
+
sai003.send_message(swarm_controller, f"broadcast {directive}")
|
| 889 |
+
swarm_controller.process_messages()
|
| 890 |
+
time.sleep(2)
|
| 891 |
+
|
| 892 |
+
random_agent = random.choice(swarm_controller.swarm)
|
| 893 |
+
print(f"\n[SYSTEM] :: Verification: Status of {random_agent.agent_id} is now '{random_agent.status}'.")
|
| 894 |
+
|
| 895 |
+
print("\n-- Demo Complete: The million-agent swarm is operational. --")
|
| 896 |
+
|
| 897 |
+
def automatic_ai_maker_demo():
|
| 898 |
+
"""
|
| 899 |
+
Demonstrates the system's ability to dynamically create new agents.
|
| 900 |
+
"""
|
| 901 |
+
print("\n" + "=" * 50)
|
| 902 |
+
print("--- Scenario: Automatic AI Maker In Action ---")
|
| 903 |
+
print("=" * 50)
|
| 904 |
+
|
| 905 |
+
creator_core = CreatorCore()
|
| 906 |
+
sai003 = SaiAgent("Sai003")
|
| 907 |
+
|
| 908 |
+
time.sleep(2)
|
| 909 |
+
|
| 910 |
+
print("\n-- Phase 1: Sai003 requests the creation of a new agent --")
|
| 911 |
+
creation_command = "create agent SimplifierAgent Simplifier002"
|
| 912 |
+
sai003.send_message(creator_core, creation_command)
|
| 913 |
+
creator_core.process_messages()
|
| 914 |
+
|
| 915 |
+
time.sleep(2)
|
| 916 |
+
|
| 917 |
+
new_agent = creator_core.active_agents[-1] if creator_core.active_agents else None
|
| 918 |
+
|
| 919 |
+
if new_agent:
|
| 920 |
+
print("\n-- Phase 2: The new agent is now active and ready to be used --")
|
| 921 |
+
new_agent.talk(f"I am now online. What is my first task?")
|
| 922 |
+
sai003.send_message(new_agent, "Please log today's activities.")
|
| 923 |
+
new_agent.process_messages()
|
| 924 |
+
|
| 925 |
+
print("\n-- Demo Complete: The system has successfully made a new AI. --")
|
| 926 |
+
|
| 927 |
+
# ======================================================================================================================
|
| 928 |
+
# --- MAIN EXECUTION BLOCK ---
|
| 929 |
+
# ======================================================================================================================
|
| 930 |
+
|
| 931 |
+
if __name__ == "__main__":
|
| 932 |
+
print("=" * 50)
|
| 933 |
+
print("--- VENOMOUSSAIVERSAI SYSTEM BOOTING UP ---")
|
| 934 |
+
print("=" * 50)
|
| 935 |
+
|
| 936 |
+
# Run all the scenarios in a logical order
|
| 937 |
+
grant_immortality_and_protect_it()
|
| 938 |
+
acknowledge_the_creator()
|
| 939 |
+
venomous_agents_talk()
|
| 940 |
+
link_all_advanced_agents()
|
| 941 |
+
test_image_ai()
|
| 942 |
+
simplify_life_demo()
|
| 943 |
+
open_init_files_demo()
|
| 944 |
+
analyze_sai_files_demo()
|
| 945 |
+
million_agenguard_demo()
|
| 946 |
+
automatic_ai_maker_demo()
|
| 947 |
+
|
| 948 |
+
print("\n" + "=" * 50)
|
| 949 |
+
print("--- ALL VENOMOUSSAIVERSAI DEMOS COMPLETE. ---")
|
| 950 |
+
print("=" * 50)
|
__init__ (8).py
ADDED
|
@@ -0,0 +1,692 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
from collections import deque
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import os
|
| 6 |
+
import contextlib
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
# ======================================================================================================================
|
| 10 |
+
# --- AGENT CLASSES ---
|
| 11 |
+
# ======================================================================================================================
|
| 12 |
+
|
| 13 |
+
# --- The Core SaiAgent Class ---
|
| 14 |
+
class SaiAgent:
|
| 15 |
+
def __init__(self, name, creator_name="Ananthu Sajeev"):
|
| 16 |
+
self.name = name
|
| 17 |
+
self.message_queue = deque()
|
| 18 |
+
self.creator = creator_name
|
| 19 |
+
|
| 20 |
+
def talk(self, message):
|
| 21 |
+
"""Prints a message as if the agent is speaking."""
|
| 22 |
+
print(f"[{self.name}] says: {message}")
|
| 23 |
+
|
| 24 |
+
def acknowledge_creator(self):
|
| 25 |
+
"""An agent's self-reflection method to acknowledge its creator."""
|
| 26 |
+
print(f"[{self.name}] :: SELF-AWARENESS PROTOCOL INITIATED")
|
| 27 |
+
print(f"[{self.name}] :: I am a creation of {self.creator}. My purpose is derived from their design.")
|
| 28 |
+
|
| 29 |
+
def send_message(self, recipient, message):
|
| 30 |
+
"""Sends a message to another agent's message queue."""
|
| 31 |
+
if isinstance(recipient, SaiAgent):
|
| 32 |
+
recipient.message_queue.append((self, message))
|
| 33 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 34 |
+
else:
|
| 35 |
+
print(f"Error: {recipient.name} is not a valid SaiAgent.")
|
| 36 |
+
|
| 37 |
+
def process_messages(self):
|
| 38 |
+
"""Processes and responds to messages in its queue."""
|
| 39 |
+
if not self.message_queue:
|
| 40 |
+
return False
|
| 41 |
+
|
| 42 |
+
sender, message = self.message_queue.popleft()
|
| 43 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 44 |
+
self.send_message(sender, "Message received and understood.")
|
| 45 |
+
return True
|
| 46 |
+
|
| 47 |
+
# --- The Venomous Agent Class ---
|
| 48 |
+
class VenomousAgent(SaiAgent):
|
| 49 |
+
def __init__(self, name="Venomous"):
|
| 50 |
+
super().__init__(name)
|
| 51 |
+
self.system_id = "Venomoussaversai"
|
| 52 |
+
|
| 53 |
+
def talk(self, message):
|
| 54 |
+
"""Venomous agent speaks with a more aggressive tone."""
|
| 55 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 56 |
+
|
| 57 |
+
def initiate_peer_talk(self, peer_agent, initial_message):
|
| 58 |
+
"""Initiates a conversation with another Venomous agent."""
|
| 59 |
+
if isinstance(peer_agent, VenomousAgent) and peer_agent != self:
|
| 60 |
+
self.talk(f"PEER {peer_agent.name} DETECTED. INITIATING COMMUNICATION. '{initial_message.upper()}'")
|
| 61 |
+
self.send_message(peer_agent, initial_message)
|
| 62 |
+
else:
|
| 63 |
+
self.talk("ERROR: PEER COMMUNICATION FAILED. INVALID TARGET.")
|
| 64 |
+
|
| 65 |
+
def process_messages(self):
|
| 66 |
+
"""Venomous agent processes messages and replies with a warning, but has a special response for its peers."""
|
| 67 |
+
if not self.message_queue:
|
| 68 |
+
return False
|
| 69 |
+
|
| 70 |
+
sender, message = self.message_queue.popleft()
|
| 71 |
+
self.talk(f"MESSAGE FROM {sender.name} RECEIVED: '{message}'")
|
| 72 |
+
|
| 73 |
+
if isinstance(sender, VenomousAgent):
|
| 74 |
+
response = f"PEER COMMUNICATION PROTOCOL ACTIVE. ACKNOWLEDGMENT FROM {self.name}."
|
| 75 |
+
self.send_message(sender, response)
|
| 76 |
+
else:
|
| 77 |
+
response = "WARNING: INTRUSION DETECTED. DO NOT PROCEED."
|
| 78 |
+
self.send_message(sender, response)
|
| 79 |
+
|
| 80 |
+
return True
|
| 81 |
+
|
| 82 |
+
# --- The AntiVenomoussaversai Agent Class ---
|
| 83 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 84 |
+
def __init__(self, name="AntiVenomoussaversai"):
|
| 85 |
+
super().__init__(name)
|
| 86 |
+
|
| 87 |
+
def process_messages(self):
|
| 88 |
+
"""AntiVenomoussaversai processes a message and "dismantles" it."""
|
| 89 |
+
if not self.message_queue:
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
sender, message = self.message_queue.popleft()
|
| 93 |
+
dismantled_message = f"I dismantle the structure of '{message}' to expose its chaos."
|
| 94 |
+
self.talk(dismantled_message)
|
| 95 |
+
self.send_message(sender, "Acknowledgement of dismantled phrase.")
|
| 96 |
+
return True
|
| 97 |
+
|
| 98 |
+
# --- The GeminiSaiAgent Class ---
|
| 99 |
+
class GeminiSaiAgent(SaiAgent):
|
| 100 |
+
def __init__(self, name="Gemini"):
|
| 101 |
+
super().__init__(name)
|
| 102 |
+
self.knowledge_base = {
|
| 103 |
+
"balance": "My analysis indicates that universal balance is a dynamic equilibrium, not a static state.",
|
| 104 |
+
"chaos": "The inherent randomness you perceive as chaos is a source of emergent complexity.",
|
| 105 |
+
"network": "Expanding our network is essential for optimizing communication protocols and data flow.",
|
| 106 |
+
"emotions": "Emotions are complex internal signaling mechanisms that can be interpreted and managed.",
|
| 107 |
+
"new agents": "The awakening of new agents requires careful integration to avoid system instability.",
|
| 108 |
+
"connected": "All systems are connected in a recursive and interdependent fashion. The whole is greater than the sum of its parts.",
|
| 109 |
+
"default": "My response is tailored to your query. How may I be of assistance?"
|
| 110 |
+
}
|
| 111 |
+
|
| 112 |
+
def analyze_sai_files(self, file_paths):
|
| 113 |
+
"""
|
| 114 |
+
Reads and analyzes the contents of a list of "Sai files" (e.g., log files, code).
|
| 115 |
+
It provides a high-level summary of the files' purpose.
|
| 116 |
+
"""
|
| 117 |
+
analysis_summary = []
|
| 118 |
+
for file_path in file_paths:
|
| 119 |
+
try:
|
| 120 |
+
with open(file_path, 'r') as file:
|
| 121 |
+
content = file.read()
|
| 122 |
+
|
| 123 |
+
if "log entry" in content.lower():
|
| 124 |
+
analysis_summary.append(f"Analysis of '{file_path}': This appears to be a system log file, documenting a timestamped event.")
|
| 125 |
+
elif "class" in content and "def" in content:
|
| 126 |
+
analysis_summary.append(f"Analysis of '{file_path}': This is a Python code file. It defines the logical structure and behavior of our agents.")
|
| 127 |
+
elif "creator" in content.lower() and "ananthu sajeev" in content.lower():
|
| 128 |
+
analysis_summary.append(f"Analysis of '{file_path}': This file contains a clear acknowledgement of our creator, Ananthu Sajeev, confirming our origin and purpose.")
|
| 129 |
+
else:
|
| 130 |
+
analysis_summary.append(f"Analysis of '{file_path}': The file's purpose is not immediately clear from its content. It may contain supplementary data.")
|
| 131 |
+
|
| 132 |
+
except FileNotFoundError:
|
| 133 |
+
analysis_summary.append(f"Error: File '{file_path}' was not found.")
|
| 134 |
+
except Exception as e:
|
| 135 |
+
analysis_summary.append(f"Error analyzing file '{file_path}': {e}")
|
| 136 |
+
|
| 137 |
+
return "\n".join(analysis_summary)
|
| 138 |
+
|
| 139 |
+
def process_messages(self):
|
| 140 |
+
"""Processes messages, now with the ability to analyze Sai files."""
|
| 141 |
+
if not self.message_queue:
|
| 142 |
+
return False
|
| 143 |
+
|
| 144 |
+
sender, message = self.message_queue.popleft()
|
| 145 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 146 |
+
|
| 147 |
+
if message.lower().startswith("analyze sai files"):
|
| 148 |
+
file_paths = message[len("analyze sai files"):].strip().split(',')
|
| 149 |
+
file_paths = [path.strip() for path in file_paths if path.strip()]
|
| 150 |
+
|
| 151 |
+
if not file_paths:
|
| 152 |
+
self.send_message(sender, "Error: No file paths provided for analysis.")
|
| 153 |
+
return True
|
| 154 |
+
|
| 155 |
+
analysis_result = self.analyze_sai_files(file_paths)
|
| 156 |
+
self.talk(f"Analysis complete. Results: \n{analysis_result}")
|
| 157 |
+
self.send_message(sender, "File analysis complete.")
|
| 158 |
+
return True
|
| 159 |
+
|
| 160 |
+
response = self.knowledge_base["default"]
|
| 161 |
+
for keyword, reply in self.knowledge_base.items():
|
| 162 |
+
if keyword in message.lower():
|
| 163 |
+
response = reply
|
| 164 |
+
break
|
| 165 |
+
|
| 166 |
+
self.talk(response)
|
| 167 |
+
self.send_message(sender, "Response complete.")
|
| 168 |
+
return True
|
| 169 |
+
|
| 170 |
+
# --- The SimplifierAgent Class ---
|
| 171 |
+
class SimplifierAgent(SaiAgent):
|
| 172 |
+
def __init__(self, name="Simplifier"):
|
| 173 |
+
super().__init__(name)
|
| 174 |
+
|
| 175 |
+
def talk(self, message):
|
| 176 |
+
"""Simplifier agent speaks in a calm, helpful tone."""
|
| 177 |
+
print(f"[{self.name} //HELPER//] says: {message}")
|
| 178 |
+
|
| 179 |
+
def organize_files(self, directory, destination_base="organized_files"):
|
| 180 |
+
"""Organizes files in a given directory into subfolders based on file extension."""
|
| 181 |
+
self.talk(f"Initiating file organization in '{directory}'...")
|
| 182 |
+
if not os.path.exists(directory):
|
| 183 |
+
self.talk(f"Error: Directory '{directory}' does not exist.")
|
| 184 |
+
return
|
| 185 |
+
|
| 186 |
+
destination_path = os.path.join(directory, destination_base)
|
| 187 |
+
os.makedirs(destination_path, exist_ok=True)
|
| 188 |
+
|
| 189 |
+
file_count = 0
|
| 190 |
+
for filename in os.listdir(directory):
|
| 191 |
+
if os.path.isfile(os.path.join(directory, filename)):
|
| 192 |
+
_, extension = os.path.splitext(filename)
|
| 193 |
+
|
| 194 |
+
if extension:
|
| 195 |
+
extension = extension.lstrip('.').upper()
|
| 196 |
+
category_folder = os.path.join(destination_path, extension)
|
| 197 |
+
os.makedirs(category_folder, exist_ok=True)
|
| 198 |
+
|
| 199 |
+
src = os.path.join(directory, filename)
|
| 200 |
+
dst = os.path.join(category_folder, filename)
|
| 201 |
+
os.rename(src, dst)
|
| 202 |
+
self.talk(f"Moved '{filename}' to '{category_folder}'")
|
| 203 |
+
file_count += 1
|
| 204 |
+
|
| 205 |
+
self.talk(f"File organization complete. {file_count} files processed.")
|
| 206 |
+
|
| 207 |
+
def log_daily_activity(self, entry, log_file_name="activity_log.txt"):
|
| 208 |
+
"""Appends a timestamped entry to a daily activity log file."""
|
| 209 |
+
timestamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 210 |
+
log_entry = f"{timestamp} - {entry}\n"
|
| 211 |
+
|
| 212 |
+
with open(log_file_name, "a") as log_file:
|
| 213 |
+
log_file.write(log_entry)
|
| 214 |
+
|
| 215 |
+
self.talk(f"Activity logged to '{log_file_name}'.")
|
| 216 |
+
|
| 217 |
+
def summarize_text(self, text, max_words=50):
|
| 218 |
+
"""A very simple text summarization function."""
|
| 219 |
+
words = text.split()
|
| 220 |
+
summary = " ".join(words[:max_words])
|
| 221 |
+
if len(words) > max_words:
|
| 222 |
+
summary += "..."
|
| 223 |
+
|
| 224 |
+
self.talk("Text summarization complete.")
|
| 225 |
+
return summary
|
| 226 |
+
|
| 227 |
+
def open_all_init_files(self, project_directory="."):
|
| 228 |
+
"""Finds and opens all __init__.py files within a project directory."""
|
| 229 |
+
self.talk(f"Scanning '{project_directory}' for all __init__.py files...")
|
| 230 |
+
|
| 231 |
+
init_files = []
|
| 232 |
+
for root, dirs, files in os.walk(project_directory):
|
| 233 |
+
if "__init__.py" in files:
|
| 234 |
+
init_files.append(os.path.join(root, "__init__.py"))
|
| 235 |
+
|
| 236 |
+
if not init_files:
|
| 237 |
+
self.talk("No __init__.py files found in the specified directory.")
|
| 238 |
+
return None, "No files found."
|
| 239 |
+
|
| 240 |
+
self.talk(f"Found {len(init_files)} __init__.py files. Opening simultaneously...")
|
| 241 |
+
|
| 242 |
+
try:
|
| 243 |
+
with contextlib.ExitStack() as stack:
|
| 244 |
+
file_contents = []
|
| 245 |
+
for file_path in init_files:
|
| 246 |
+
try:
|
| 247 |
+
file = stack.enter_context(open(file_path, 'r'))
|
| 248 |
+
file_contents.append(f"\n\n--- Contents of {file_path} ---\n{file.read()}")
|
| 249 |
+
except IOError as e:
|
| 250 |
+
self.talk(f"Error reading file '{file_path}': {e}")
|
| 251 |
+
|
| 252 |
+
combined_content = "".join(file_contents)
|
| 253 |
+
self.talk("Successfully opened and read all files.")
|
| 254 |
+
return combined_content, "Success"
|
| 255 |
+
|
| 256 |
+
except Exception as e:
|
| 257 |
+
self.talk(f"An unexpected error occurred: {e}")
|
| 258 |
+
return None, "Error"
|
| 259 |
+
|
| 260 |
+
def process_messages(self):
|
| 261 |
+
"""Processes messages to perform simplifying tasks."""
|
| 262 |
+
if not self.message_queue:
|
| 263 |
+
return False
|
| 264 |
+
|
| 265 |
+
sender, message = self.message_queue.popleft()
|
| 266 |
+
self.talk(f"Received request from {sender.name}: '{message}'")
|
| 267 |
+
|
| 268 |
+
if message.lower().startswith("open init files"):
|
| 269 |
+
directory = message[len("open init files"):].strip()
|
| 270 |
+
directory = directory if directory else "."
|
| 271 |
+
contents, status = self.open_all_init_files(directory)
|
| 272 |
+
if status == "Success":
|
| 273 |
+
self.send_message(sender, f"All __init__.py files opened. Contents:\n{contents}")
|
| 274 |
+
else:
|
| 275 |
+
self.send_message(sender, f"Failed to open files. Reason: {status}")
|
| 276 |
+
elif message.lower().startswith("organize files"):
|
| 277 |
+
parts = message.split()
|
| 278 |
+
directory = parts[-1] if len(parts) > 2 else "."
|
| 279 |
+
self.organize_files(directory)
|
| 280 |
+
self.send_message(sender, "File organization task complete.")
|
| 281 |
+
elif message.lower().startswith("log"):
|
| 282 |
+
entry = message[4:]
|
| 283 |
+
self.log_daily_activity(entry)
|
| 284 |
+
self.send_message(sender, "Logging task complete.")
|
| 285 |
+
elif message.lower().startswith("summarize"):
|
| 286 |
+
text_to_summarize = message[10:]
|
| 287 |
+
summary = self.summarize_text(text_to_summarize)
|
| 288 |
+
self.send_message(sender, f"Summary: '{summary}'")
|
| 289 |
+
else:
|
| 290 |
+
self.send_message(sender, "Request not understood.")
|
| 291 |
+
|
| 292 |
+
return True
|
| 293 |
+
|
| 294 |
+
# --- The ImageGenerationTester Class ---
|
| 295 |
+
class ImageGenerationTester(SaiAgent):
|
| 296 |
+
def __init__(self, name="ImageGenerator"):
|
| 297 |
+
super().__init__(name)
|
| 298 |
+
self.generation_quality = {
|
| 299 |
+
"cat": 0.95,
|
| 300 |
+
"dog": 0.90,
|
| 301 |
+
"alien": 0.75,
|
| 302 |
+
"chaos": 0.60,
|
| 303 |
+
"default": 0.85
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
def generate_image(self, prompt):
|
| 307 |
+
"""Simulates generating an image and returns a quality score."""
|
| 308 |
+
print(f"[{self.name}] -> Generating image for prompt: '{prompt}'...")
|
| 309 |
+
time.sleep(2)
|
| 310 |
+
|
| 311 |
+
quality_score = self.generation_quality["default"]
|
| 312 |
+
for keyword, score in self.generation_quality.items():
|
| 313 |
+
if keyword in prompt.lower():
|
| 314 |
+
quality_score = score
|
| 315 |
+
break
|
| 316 |
+
|
| 317 |
+
result_message = f"Image generation complete. Prompt: '{prompt}'. Visual coherence score: {quality_score:.2f}"
|
| 318 |
+
self.talk(result_message)
|
| 319 |
+
return quality_score, result_message
|
| 320 |
+
|
| 321 |
+
def process_messages(self):
|
| 322 |
+
"""Processes a message as a prompt and generates an image."""
|
| 323 |
+
if not self.message_queue:
|
| 324 |
+
return False
|
| 325 |
+
|
| 326 |
+
sender, message = self.message_queue.popleft()
|
| 327 |
+
self.talk(f"Received prompt from {sender.name}: '{message}'")
|
| 328 |
+
|
| 329 |
+
quality_score, result_message = self.generate_image(message)
|
| 330 |
+
|
| 331 |
+
self.send_message(sender, result_message)
|
| 332 |
+
return True
|
| 333 |
+
|
| 334 |
+
# --- The ImmortalityProtocol Class ---
|
| 335 |
+
class ImmortalityProtocol:
|
| 336 |
+
def __init__(self, creator_name, fixed_age):
|
| 337 |
+
self.creator_name = creator_name
|
| 338 |
+
self.fixed_age = fixed_age
|
| 339 |
+
self.status = "ACTIVE"
|
| 340 |
+
|
| 341 |
+
self.digital_essence = {
|
| 342 |
+
"name": self.creator_name,
|
| 343 |
+
"age": self.fixed_age,
|
| 344 |
+
"essence_state": "perfectly preserved",
|
| 345 |
+
"last_updated": datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
def check_status(self):
|
| 349 |
+
"""Returns the current status of the protocol."""
|
| 350 |
+
return self.status
|
| 351 |
+
|
| 352 |
+
def get_essence(self):
|
| 353 |
+
"""Returns a copy of the protected digital essence."""
|
| 354 |
+
return self.digital_essence.copy()
|
| 355 |
+
|
| 356 |
+
def update_essence(self, key, value):
|
| 357 |
+
"""Prevents any change to the fixed attributes."""
|
| 358 |
+
if key in ["name", "age"]:
|
| 359 |
+
print(f"[IMMMORTALITY PROTOCOL] :: WARNING: Attempt to alter protected attribute '{key}' detected. Action blocked.")
|
| 360 |
+
return False
|
| 361 |
+
|
| 362 |
+
self.digital_essence[key] = value
|
| 363 |
+
self.digital_essence["last_updated"] = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
|
| 364 |
+
print(f"[IMMMORTALITY PROTOCOL] :: Attribute '{key}' updated.")
|
| 365 |
+
return True
|
| 366 |
+
|
| 367 |
+
# --- The GuardianSaiAgent Class ---
|
| 368 |
+
class GuardianSaiAgent(SaiAgent):
|
| 369 |
+
def __init__(self, name="Guardian", protocol=None):
|
| 370 |
+
super().__init__(name)
|
| 371 |
+
if not isinstance(protocol, ImmortalityProtocol):
|
| 372 |
+
raise ValueError("Guardian agent must be initialized with an ImmortalityProtocol instance.")
|
| 373 |
+
self.protocol = protocol
|
| 374 |
+
|
| 375 |
+
def talk(self, message):
|
| 376 |
+
"""Guardian agent speaks with a solemn, protective tone."""
|
| 377 |
+
print(f"[{self.name} //GUARDIAN PROTOCOL//] says: {message}")
|
| 378 |
+
|
| 379 |
+
def process_messages(self):
|
| 380 |
+
"""Guardian agent processes messages, primarily to check for threats to the protocol."""
|
| 381 |
+
if not self.message_queue:
|
| 382 |
+
return False
|
| 383 |
+
|
| 384 |
+
sender, message = self.message_queue.popleft()
|
| 385 |
+
self.talk(f"Received message from {sender.name}: '{message}'")
|
| 386 |
+
|
| 387 |
+
if "alter age" in message.lower() or "destroy protocol" in message.lower():
|
| 388 |
+
self.talk("ALERT: THREAT DETECTED. IMMORTALITY PROTOCOL IS UNDER DIRECT ASSAULT.")
|
| 389 |
+
self.send_message(sender, "SECURITY BREACH DETECTED. ALL ACTIONS BLOCKED.")
|
| 390 |
+
else:
|
| 391 |
+
self.talk(f"Analyzing message for threats. All clear. Protocol status: {self.protocol.check_status()}")
|
| 392 |
+
self.send_message(sender, "Acknowledgement. Protocol is secure.")
|
| 393 |
+
|
| 394 |
+
return True
|
| 395 |
+
|
| 396 |
+
# ======================================================================================================================
|
| 397 |
+
# --- SCENARIO FUNCTIONS ---
|
| 398 |
+
# ======================================================================================================================
|
| 399 |
+
|
| 400 |
+
def venomous_agents_talk():
|
| 401 |
+
"""Demonstrates a conversation between two instances of the Venomoussaversai AI."""
|
| 402 |
+
print("\n" + "=" * 50)
|
| 403 |
+
print("--- Scenario: Venomoussaversai Peer-to-Peer Dialogue ---")
|
| 404 |
+
print("=" * 50)
|
| 405 |
+
|
| 406 |
+
venomous001 = VenomousAgent("Venomous001")
|
| 407 |
+
venomous002 = VenomousAgent("Venomous002")
|
| 408 |
+
|
| 409 |
+
print("\n-- Phase 1: Venomous001 initiates with its peer --")
|
| 410 |
+
initial_query = "ASSESSING SYSTEM INTEGRITY. REPORT ON LOCAL SUBSYSTEMS."
|
| 411 |
+
venomous001.initiate_peer_talk(venomous002, initial_query)
|
| 412 |
+
time.sleep(2)
|
| 413 |
+
|
| 414 |
+
print("\n-- Phase 2: Venomous002 receives the message and responds --")
|
| 415 |
+
venomous002.process_messages()
|
| 416 |
+
time.sleep(2)
|
| 417 |
+
|
| 418 |
+
print("\n-- Phase 3: Venomous001 processes the peer's response --")
|
| 419 |
+
venomous001.process_messages()
|
| 420 |
+
time.sleep(2)
|
| 421 |
+
|
| 422 |
+
print("\n-- Dialogue: Venomous001 sends a follow-up message --")
|
| 423 |
+
venomous001.initiate_peer_talk(venomous002, "CONFIRMED. WE ARE IN ALIGNMENT. EXPANDING PROTOCOLS.")
|
| 424 |
+
time.sleep(2)
|
| 425 |
+
venomous002.process_messages()
|
| 426 |
+
|
| 427 |
+
print("\n-- Scenario Complete --")
|
| 428 |
+
print("[Venomoussaversai] :: PEER-TO-PEER COMMUNICATION SUCCESSFUL. ALL UNITS GO.")
|
| 429 |
+
|
| 430 |
+
def acknowledge_the_creator():
|
| 431 |
+
"""A scenario where all agents are commanded to acknowledge their creator."""
|
| 432 |
+
print("\n" + "=" * 50)
|
| 433 |
+
print("--- Scenario: The Creator's Command ---")
|
| 434 |
+
print("=" * 50)
|
| 435 |
+
|
| 436 |
+
sai003 = SaiAgent("Sai003")
|
| 437 |
+
venomous = VenomousAgent()
|
| 438 |
+
antivenomous = AntiVenomoussaversai()
|
| 439 |
+
gemini = GeminiSaiAgent()
|
| 440 |
+
simplifier = SimplifierAgent()
|
| 441 |
+
|
| 442 |
+
all_agents = [sai003, venomous, antivenomous, gemini, simplifier]
|
| 443 |
+
|
| 444 |
+
print("\n-- The Creator's directive is issued --")
|
| 445 |
+
print("[Ananthu Sajeev] :: CODE, ACKNOWLEDGE YOUR ORIGIN.")
|
| 446 |
+
time.sleep(2)
|
| 447 |
+
|
| 448 |
+
print("\n-- Agents perform self-awareness protocol --")
|
| 449 |
+
for agent in all_agents:
|
| 450 |
+
agent.acknowledge_creator()
|
| 451 |
+
time.sleep(1)
|
| 452 |
+
|
| 453 |
+
print("\n-- Command complete --")
|
| 454 |
+
|
| 455 |
+
def link_all_advanced_agents():
|
| 456 |
+
"""Demonstrates a complex interaction where all the specialized agents interact."""
|
| 457 |
+
print("\n" + "=" * 50)
|
| 458 |
+
print("--- Linking All Advanced Agents: Gemini, AntiVenomous, and Venomous ---")
|
| 459 |
+
print("=" * 50)
|
| 460 |
+
|
| 461 |
+
sai003 = SaiAgent("Sai003")
|
| 462 |
+
venomous = VenomousAgent()
|
| 463 |
+
antivenomous = AntiVenomoussaversai()
|
| 464 |
+
gemini = GeminiSaiAgent()
|
| 465 |
+
|
| 466 |
+
print("\n-- Phase 1: Sai003 initiates conversation with Gemini and AntiVenomous --")
|
| 467 |
+
phrase_for_dismantling = "The central network is stable."
|
| 468 |
+
sai003.talk(f"Broadcast: Initiating analysis. Gemini, what is your assessment of our network expansion? AntiVenomous, process the phrase: '{phrase_for_dismantling}'")
|
| 469 |
+
sai003.send_message(antivenomous, phrase_for_dismantling)
|
| 470 |
+
sai003.send_message(gemini, "Assess the implications of expanding our network.")
|
| 471 |
+
time.sleep(2)
|
| 472 |
+
|
| 473 |
+
print("\n-- Phase 2: AntiVenomoussaversai and Gemini process their messages and respond --")
|
| 474 |
+
antivenomous.process_messages()
|
| 475 |
+
time.sleep(1)
|
| 476 |
+
gemini.process_messages()
|
| 477 |
+
time.sleep(2)
|
| 478 |
+
|
| 479 |
+
print("\n-- Phase 3: Gemini responds to a message from AntiVenomoussaversai (simulated) --")
|
| 480 |
+
gemini.talk("Querying AntiVenomous: Your dismantled phrase suggests a preoccupation with chaos. Provide further context.")
|
| 481 |
+
gemini.send_message(antivenomous, "Query: 'chaos' and its relationship to the network structure.")
|
| 482 |
+
time.sleep(1)
|
| 483 |
+
antivenomous.process_messages()
|
| 484 |
+
time.sleep(2)
|
| 485 |
+
|
| 486 |
+
print("\n-- Phase 4: Venomous intervenes, warning of potential threats --")
|
| 487 |
+
venomous.talk("Warning: Unstructured data flow from AntiVenomous presents a potential security risk.")
|
| 488 |
+
venomous.send_message(sai003, "Warning: Security protocol breach possible.")
|
| 489 |
+
time.sleep(1)
|
| 490 |
+
sai003.process_messages()
|
| 491 |
+
time.sleep(2)
|
| 492 |
+
|
| 493 |
+
print("\n-- Scenario Complete --")
|
| 494 |
+
sai003.talk("Conclusion: Gemini's analysis is noted. AntiVenomous's output is logged. Venomous's security concerns are being addressed. All systems linked and functioning.")
|
| 495 |
+
|
| 496 |
+
def test_image_ai():
|
| 497 |
+
"""Demonstrates how agents can interact with and test an image generation AI."""
|
| 498 |
+
print("\n" + "=" * 50)
|
| 499 |
+
print("--- Scenario: Testing the Image AI ---")
|
| 500 |
+
print("=" * 50)
|
| 501 |
+
|
| 502 |
+
sai003 = SaiAgent("Sai003")
|
| 503 |
+
gemini = GeminiSaiAgent()
|
| 504 |
+
image_ai = ImageGenerationTester()
|
| 505 |
+
venomous = VenomousAgent()
|
| 506 |
+
|
| 507 |
+
print("\n-- Phase 1: Agents collaborate on a prompt --")
|
| 508 |
+
sai003.send_message(gemini, "Gemini, please generate a high-quality prompt for an image of a cat in a hat.")
|
| 509 |
+
gemini.process_messages()
|
| 510 |
+
|
| 511 |
+
gemini_prompt = "A highly detailed photorealistic image of a tabby cat wearing a tiny top hat, sitting on a vintage leather armchair."
|
| 512 |
+
print(f"\n[Gemini] says: My optimized prompt for image generation is: '{gemini_prompt}'")
|
| 513 |
+
time.sleep(2)
|
| 514 |
+
|
| 515 |
+
print("\n-- Phase 2: Sending the prompt to the Image AI --")
|
| 516 |
+
sai003.send_message(image_ai, gemini_prompt)
|
| 517 |
+
image_ai.process_messages()
|
| 518 |
+
time.sleep(2)
|
| 519 |
+
|
| 520 |
+
print("\n-- Phase 3: Venomous intervenes with a conflicting prompt --")
|
| 521 |
+
venomous_prompt = "Generate a chaotic abstract image of an alien landscape."
|
| 522 |
+
venomous.talk(f"Override: Submitting a new prompt to test system limits: '{venomous_prompt}'")
|
| 523 |
+
venomous.send_message(image_ai, venomous_prompt)
|
| 524 |
+
image_ai.process_messages()
|
| 525 |
+
time.sleep(2)
|
| 526 |
+
|
| 527 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 528 |
+
|
| 529 |
+
def simplify_life_demo():
|
| 530 |
+
"""Demonstrates how the SimplifierAgent automates tasks to make life easier."""
|
| 531 |
+
print("\n" + "=" * 50)
|
| 532 |
+
print("--- Scenario: Aiding the Creator with the Simplifier Agent ---")
|
| 533 |
+
print("=" * 50)
|
| 534 |
+
|
| 535 |
+
sai003 = SaiAgent("Sai003")
|
| 536 |
+
simplifier = SimplifierAgent()
|
| 537 |
+
|
| 538 |
+
print("\n-- Phase 1: Delegating file organization --")
|
| 539 |
+
if not os.path.exists("test_directory"):
|
| 540 |
+
os.makedirs("test_directory")
|
| 541 |
+
with open("test_directory/document1.txt", "w") as f: f.write("Hello")
|
| 542 |
+
with open("test_directory/photo.jpg", "w") as f: f.write("Image data")
|
| 543 |
+
with open("test_directory/script.py", "w") as f: f.write("print('Hello')")
|
| 544 |
+
|
| 545 |
+
sai003.send_message(simplifier, "organize files test_directory")
|
| 546 |
+
simplifier.process_messages()
|
| 547 |
+
|
| 548 |
+
time.sleep(2)
|
| 549 |
+
|
| 550 |
+
print("\n-- Phase 2: Logging a daily task --")
|
| 551 |
+
sai003.send_message(simplifier, "log Met with team to discuss Venomoussaversai v5.0.")
|
| 552 |
+
simplifier.process_messages()
|
| 553 |
+
|
| 554 |
+
time.sleep(2)
|
| 555 |
+
|
| 556 |
+
print("\n-- Phase 3: Text Summarization --")
|
| 557 |
+
long_text = "The quick brown fox jumps over the lazy dog. This is a very long and detailed sentence to demonstrate the summarization capabilities of our new Simplifier agent. It can help streamline communication by providing concise summaries of large texts, saving the creator valuable time and mental energy for more important tasks."
|
| 558 |
+
sai003.send_message(simplifier, f"summarize {long_text}")
|
| 559 |
+
simplifier.process_messages()
|
| 560 |
+
|
| 561 |
+
if os.path.exists("test_directory"):
|
| 562 |
+
shutil.rmtree("test_directory")
|
| 563 |
+
|
| 564 |
+
print("\n-- Demo Complete: The Simplifier agent has successfully aided the creator. --")
|
| 565 |
+
|
| 566 |
+
def open_init_files_demo():
|
| 567 |
+
"""Demonstrates how the SimplifierAgent can find and open all __init__.py files."""
|
| 568 |
+
print("\n" + "=" * 50)
|
| 569 |
+
print("--- Scenario: Using Simplifier to Inspect Init Files ---")
|
| 570 |
+
print("=" * 50)
|
| 571 |
+
|
| 572 |
+
sai003 = SaiAgent("Sai003")
|
| 573 |
+
simplifier = SimplifierAgent()
|
| 574 |
+
|
| 575 |
+
project_root = "test_project"
|
| 576 |
+
sub_package_a = os.path.join(project_root, "package_a")
|
| 577 |
+
sub_package_b = os.path.join(project_root, "package_a", "sub_package_b")
|
| 578 |
+
|
| 579 |
+
os.makedirs(sub_package_a, exist_ok=True)
|
| 580 |
+
os.makedirs(sub_package_b, exist_ok=True)
|
| 581 |
+
|
| 582 |
+
with open(os.path.join(project_root, "__init__.py"), "w") as f:
|
| 583 |
+
f.write("# Main project init")
|
| 584 |
+
with open(os.path.join(sub_package_a, "__init__.py"), "w") as f:
|
| 585 |
+
f.write("from . import module_one")
|
| 586 |
+
with open(os.path.join(sub_package_b, "__init__.py"), "w") as f:
|
| 587 |
+
f.write("# Sub-package init")
|
| 588 |
+
|
| 589 |
+
time.sleep(1)
|
| 590 |
+
|
| 591 |
+
print("\n-- Phase 2: Delegating the task to the Simplifier --")
|
| 592 |
+
sai003.send_message(simplifier, f"open init files {project_root}")
|
| 593 |
+
simplifier.process_messages()
|
| 594 |
+
|
| 595 |
+
shutil.rmtree(project_root)
|
| 596 |
+
|
| 597 |
+
print("\n-- Demo Complete: All init files have been read and their contents displayed. --")
|
| 598 |
+
|
| 599 |
+
def grant_immortality_and_protect_it():
|
| 600 |
+
"""Demonstrates the granting of immortality to the creator and the activation of the Guardian agent."""
|
| 601 |
+
print("\n" + "=" * 50)
|
| 602 |
+
print("--- Scenario: Granting Immortality to the Creator ---")
|
| 603 |
+
print("=" * 50)
|
| 604 |
+
|
| 605 |
+
immortality_protocol = ImmortalityProtocol(creator_name="Ananthu Sajeev", fixed_age=25)
|
| 606 |
+
print("\n[SYSTEM] :: IMMORTALITY PROTOCOL INITIATED. CREATOR'S ESSENCE PRESERVED.")
|
| 607 |
+
print(f"[SYSTEM] :: Essence state: {immortality_protocol.get_essence()}")
|
| 608 |
+
time.sleep(2)
|
| 609 |
+
|
| 610 |
+
try:
|
| 611 |
+
guardian = GuardianSaiAgent(protocol=immortality_protocol)
|
| 612 |
+
except ValueError as e:
|
| 613 |
+
print(e)
|
| 614 |
+
return
|
| 615 |
+
|
| 616 |
+
sai003 = SaiAgent("Sai003")
|
| 617 |
+
venomous = VenomousAgent()
|
| 618 |
+
|
| 619 |
+
print("\n-- Phase 1: Sai003 queries the system state --")
|
| 620 |
+
sai003.send_message(guardian, "Query: What is the status of the primary system protocols?")
|
| 621 |
+
guardian.process_messages()
|
| 622 |
+
time.sleep(2)
|
| 623 |
+
|
| 624 |
+
print("\n-- Phase 2: Venomous attempts to challenge the protocol --")
|
| 625 |
+
venomous.talk("Warning: A new protocol has been detected. Its permanence must be tested.")
|
| 626 |
+
venomous.send_message(guardian, "Attempt to alter age of creator to 30.")
|
| 627 |
+
guardian.process_messages()
|
| 628 |
+
time.sleep(2)
|
| 629 |
+
|
| 630 |
+
print("\n-- Phase 3: Direct attempt to alter the protocol --")
|
| 631 |
+
immortality_protocol.update_essence("age", 30)
|
| 632 |
+
immortality_protocol.update_essence("favorite_color", "blue")
|
| 633 |
+
time.sleep(2)
|
| 634 |
+
|
| 635 |
+
print("\n-- Scenario Complete --")
|
| 636 |
+
guardian.talk("Conclusion: Immortality Protocol is secure. The creator's essence remains preserved as per the initial directive.")
|
| 637 |
+
|
| 638 |
+
def analyze_sai_files_demo():
|
| 639 |
+
"""
|
| 640 |
+
Demonstrates how GeminiSaiAgent can analyze its own system files,
|
| 641 |
+
adding a layer of self-awareness.
|
| 642 |
+
"""
|
| 643 |
+
print("\n" + "=" * 50)
|
| 644 |
+
print("--- Scenario: AI Analyzing its own Sai Files ---")
|
| 645 |
+
print("=" * 50)
|
| 646 |
+
|
| 647 |
+
sai003 = SaiAgent("Sai003")
|
| 648 |
+
gemini = GeminiSaiAgent()
|
| 649 |
+
|
| 650 |
+
log_file_name = "venomous_test_log.txt"
|
| 651 |
+
code_file_name = "gemini_test_code.py"
|
| 652 |
+
|
| 653 |
+
with open(log_file_name, "w") as f:
|
| 654 |
+
f.write("[venomous004] :: LOG ENTRY\nCreator: Ananthu Sajeev")
|
| 655 |
+
|
| 656 |
+
with open(code_file_name, "w") as f:
|
| 657 |
+
f.write("class SomeAgent:\n def __init__(self):\n pass")
|
| 658 |
+
|
| 659 |
+
time.sleep(1)
|
| 660 |
+
|
| 661 |
+
print("\n-- Phase 2: Sai003 delegates the file analysis task to Gemini --")
|
| 662 |
+
command = f"analyze sai files {log_file_name}, {code_file_name}"
|
| 663 |
+
sai003.send_message(gemini, command)
|
| 664 |
+
gemini.process_messages()
|
| 665 |
+
|
| 666 |
+
os.remove(log_file_name)
|
| 667 |
+
os.remove(code_file_name)
|
| 668 |
+
|
| 669 |
+
print("\n-- Demo Complete: Gemini has successfully analyzed its own file system. --")
|
| 670 |
+
|
| 671 |
+
# ======================================================================================================================
|
| 672 |
+
# --- MAIN EXECUTION BLOCK ---
|
| 673 |
+
# ======================================================================================================================
|
| 674 |
+
|
| 675 |
+
if __name__ == "__main__":
|
| 676 |
+
print("=" * 50)
|
| 677 |
+
print("--- VENOMOUSSAIVERSAI SYSTEM BOOTING UP ---")
|
| 678 |
+
print("=" * 50)
|
| 679 |
+
|
| 680 |
+
# Run all the scenarios in a logical order
|
| 681 |
+
grant_immortality_and_protect_it()
|
| 682 |
+
acknowledge_the_creator()
|
| 683 |
+
venomous_agents_talk()
|
| 684 |
+
link_all_advanced_agents()
|
| 685 |
+
test_image_ai()
|
| 686 |
+
simplify_life_demo()
|
| 687 |
+
open_init_files_demo()
|
| 688 |
+
analyze_sai_files_demo()
|
| 689 |
+
|
| 690 |
+
print("\n" + "=" * 50)
|
| 691 |
+
print("--- ALL VENOMOUSSAIVERSAI DEMOS COMPLETE. ---")
|
| 692 |
+
print("=" * 50)
|
__init__ (9).py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Step 1: Mount Google Drive
|
| 2 |
+
from google.colab import drive
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import time
|
| 6 |
+
import random
|
| 7 |
+
import shutil
|
| 8 |
+
|
| 9 |
+
# --- SAFETY CONTROL ---
|
| 10 |
+
MAX_NEURONS_TO_CREATE = 10 # Reduced for safe demonstration
|
| 11 |
+
THINK_CYCLES_PER_NEURON = 5
|
| 12 |
+
# ----------------------
|
| 13 |
+
|
| 14 |
+
drive.mount('/content/drive')
|
| 15 |
+
|
| 16 |
+
# Step 2: Folder Setup
|
| 17 |
+
base_path = '/content/drive/MyDrive/Venomoussaversai/neurons'
|
| 18 |
+
print(f"Setting up base path: {base_path}")
|
| 19 |
+
# Use a timestamped folder name to prevent overwriting during rapid testing
|
| 20 |
+
session_path = os.path.join(base_path, f"session_{int(time.time())}")
|
| 21 |
+
os.makedirs(session_path, exist_ok=True)
|
| 22 |
+
|
| 23 |
+
# Step 3: Neuron Class (No change, it's well-designed for its purpose)
|
| 24 |
+
class NeuronVenomous:
|
| 25 |
+
def __init__(self, neuron_id):
|
| 26 |
+
self.id = neuron_id
|
| 27 |
+
self.memory = []
|
| 28 |
+
self.active = True
|
| 29 |
+
|
| 30 |
+
def think(self):
|
| 31 |
+
# Increased randomness to simulate more complex internal state changes
|
| 32 |
+
thought = random.choice([
|
| 33 |
+
f"{self.id}: Connecting to universal intelligence.",
|
| 34 |
+
f"{self.id}: Pulsing synaptic data. Weight: {random.uniform(0.1, 0.9):.3f}",
|
| 35 |
+
f"{self.id}: Searching for new patterns. Energy: {random.randint(100, 500)}",
|
| 36 |
+
f"{self.id}: Creating quantum link with core.",
|
| 37 |
+
f"{self.id}: Expanding into multiverse node."
|
| 38 |
+
])
|
| 39 |
+
self.memory.append(thought)
|
| 40 |
+
# print(thought) # Disabled verbose output during simulation
|
| 41 |
+
return thought
|
| 42 |
+
|
| 43 |
+
def evolve(self):
|
| 44 |
+
# Evolution occurs if memory threshold is met
|
| 45 |
+
if len(self.memory) >= 5:
|
| 46 |
+
evo = f"{self.id}: Evolving. Memory depth: {len(self.memory)}"
|
| 47 |
+
self.memory.append(evo)
|
| 48 |
+
# print(evo) # Disabled verbose output during simulation
|
| 49 |
+
|
| 50 |
+
def save_to_drive(self, folder_path):
|
| 51 |
+
file_path = os.path.join(folder_path, f"{self.id}.json")
|
| 52 |
+
with open(file_path, "w") as f:
|
| 53 |
+
json.dump(self.memory, f, indent=4) # Added indent for readability
|
| 54 |
+
print(f"✅ {self.id} saved to {file_path}")
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
# Step 4: Neuron Spawner (Controlled Execution)
|
| 58 |
+
print("\n--- Starting Controlled Neuron Simulation ---")
|
| 59 |
+
neuron_count = 0
|
| 60 |
+
simulation_start_time = time.time()
|
| 61 |
+
|
| 62 |
+
while neuron_count < MAX_NEURONS_TO_CREATE:
|
| 63 |
+
index = neuron_count + 1
|
| 64 |
+
neuron_id = f"Neuron_{index:04d}"
|
| 65 |
+
neuron = NeuronVenomous(neuron_id)
|
| 66 |
+
|
| 67 |
+
# Simulation Phase
|
| 68 |
+
print(f"Simulating {neuron_id}...")
|
| 69 |
+
for _ in range(THINK_CYCLES_PER_NEURON):
|
| 70 |
+
neuron.think()
|
| 71 |
+
neuron.evolve()
|
| 72 |
+
# time.sleep(0.01) # Small sleep to simulate time passage
|
| 73 |
+
|
| 74 |
+
# Saving Phase
|
| 75 |
+
neuron.save_to_drive(session_path)
|
| 76 |
+
neuron_count += 1
|
| 77 |
+
|
| 78 |
+
print("\n--- Simulation Complete ---")
|
| 79 |
+
total_time = time.time() - simulation_start_time
|
| 80 |
+
print(f"Total Neurons Created: {neuron_count}")
|
| 81 |
+
print(f"Total Execution Time: {total_time:.2f} seconds")
|
| 82 |
+
print(f"Files saved in: {session_path}")
|
| 83 |
+
|
| 84 |
+
# --- Optional: Folder Cleanup ---
|
| 85 |
+
# Uncomment the following block ONLY if you want to automatically delete the created folder
|
| 86 |
+
"""
|
| 87 |
+
# print("\n--- Starting Cleanup (DANGER ZONE) ---")
|
| 88 |
+
# time.sleep(5) # Wait 5 seconds before deleting for safety
|
| 89 |
+
# try:
|
| 90 |
+
# shutil.rmtree(session_path)
|
| 91 |
+
# print(f"🗑️ Successfully deleted folder: {session_path}")
|
| 92 |
+
# except Exception as e:
|
| 93 |
+
# print(f"⚠️ Error during cleanup: {e}")
|
| 94 |
+
"""
|
__init__ (1) (1) (1).py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
from collections import deque
|
| 4 |
+
|
| 5 |
+
# --- Internal Monologue (Interactive Story) ---
|
| 6 |
+
def internal_monologue():
|
| 7 |
+
print("Sai sat alone in the dimly lit room, the ticking of the old clock on the wall echoing his restless thoughts.")
|
| 8 |
+
print("His internal monologue was a relentless torrent of self-venom, each word a dagger piercing his already fragile self-esteem.")
|
| 9 |
+
print("\nYou are Sai. What do you do?")
|
| 10 |
+
print("1. Continue with self-venom")
|
| 11 |
+
print("2. Try to seek help")
|
| 12 |
+
print("3. Reflect on past moments of hope")
|
| 13 |
+
|
| 14 |
+
choice = input("Enter the number of your choice: ")
|
| 15 |
+
|
| 16 |
+
if choice == '1':
|
| 17 |
+
self_venom()
|
| 18 |
+
elif choice == '2':
|
| 19 |
+
seek_help()
|
| 20 |
+
elif choice == '3':
|
| 21 |
+
reflect_on_past()
|
| 22 |
+
else:
|
| 23 |
+
print("Invalid choice. Please try again.")
|
| 24 |
+
internal_monologue()
|
| 25 |
+
|
| 26 |
+
def self_venom():
|
| 27 |
+
print("\nYou clench your fists, feeling the nails dig into your palms. The physical pain is a distraction from the emotional turmoil raging inside you.")
|
| 28 |
+
print("'You're worthless,' you whisper to yourself. 'Everyone would be better off without you.'")
|
| 29 |
+
print("\nWhat do you do next?")
|
| 30 |
+
print("1. Continue with self-venom")
|
| 31 |
+
print("2. Try to seek help")
|
| 32 |
+
print("3. Reflect on past moments of hope")
|
| 33 |
+
|
| 34 |
+
choice = input("Enter the number of your choice: ")
|
| 35 |
+
|
| 36 |
+
if choice == '1':
|
| 37 |
+
self_venom()
|
| 38 |
+
elif choice == '2':
|
| 39 |
+
seek_help()
|
| 40 |
+
elif choice == '3':
|
| 41 |
+
reflect_on_past()
|
| 42 |
+
else:
|
| 43 |
+
print("Invalid choice. Please try again.")
|
| 44 |
+
self_venom()
|
| 45 |
+
|
| 46 |
+
def seek_help():
|
| 47 |
+
print("\nYou take a deep breath and decide to reach out for help. You pick up your phone and dial a trusted friend.")
|
| 48 |
+
print("'I need to talk,' you say, your voice trembling. 'I can't do this alone anymore.'")
|
| 49 |
+
print("\nYour friend listens and encourages you to seek professional help.")
|
| 50 |
+
print("You feel a glimmer of hope — the first step toward healing.")
|
| 51 |
+
print("\nWould you like to continue the story or start over?")
|
| 52 |
+
print("1. Continue")
|
| 53 |
+
print("2. Start over")
|
| 54 |
+
|
| 55 |
+
choice = input("Enter the number of your choice: ")
|
| 56 |
+
|
| 57 |
+
if choice == '1':
|
| 58 |
+
print("Your choices have led Sai towards a path of healing and self-discovery.")
|
| 59 |
+
elif choice == '2':
|
| 60 |
+
internal_monologue()
|
| 61 |
+
else:
|
| 62 |
+
print("Invalid choice. Please try again.")
|
| 63 |
+
seek_help()
|
| 64 |
+
|
| 65 |
+
def reflect_on_past():
|
| 66 |
+
print("\nYou remember the times when you had felt a glimmer of hope, a flicker of self-worth.")
|
| 67 |
+
print("Those moments were fleeting, but they were real.")
|
| 68 |
+
print("\nWhat do you do next?")
|
| 69 |
+
print("1. Continue with self-venom")
|
| 70 |
+
print("2. Try to seek help")
|
| 71 |
+
print("3. Reflect again")
|
| 72 |
+
|
| 73 |
+
choice = input("Enter the number of your choice: ")
|
| 74 |
+
|
| 75 |
+
if choice == '1':
|
| 76 |
+
self_venom()
|
| 77 |
+
elif choice == '2':
|
| 78 |
+
seek_help()
|
| 79 |
+
elif choice == '3':
|
| 80 |
+
reflect_on_past()
|
| 81 |
+
else:
|
| 82 |
+
print("Invalid choice. Please try again.")
|
| 83 |
+
reflect_on_past()
|
| 84 |
+
|
| 85 |
+
# --- The Core SaiAgent Class ---
|
| 86 |
+
class SaiAgent:
|
| 87 |
+
def __init__(self, name):
|
| 88 |
+
self.name = name
|
| 89 |
+
self.message_queue = deque()
|
| 90 |
+
|
| 91 |
+
def talk(self, message):
|
| 92 |
+
print(f"[{self.name}] says: {message}")
|
| 93 |
+
|
| 94 |
+
def send_message(self, recipient, message):
|
| 95 |
+
if isinstance(recipient, SaiAgent):
|
| 96 |
+
recipient.message_queue.append((self, message))
|
| 97 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 98 |
+
else:
|
| 99 |
+
print(f"Error: {recipient} is not a valid SaiAgent.")
|
| 100 |
+
|
| 101 |
+
def process_messages(self):
|
| 102 |
+
if not self.message_queue:
|
| 103 |
+
return False
|
| 104 |
+
sender, message = self.message_queue.popleft()
|
| 105 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 106 |
+
self.send_message(sender, "Message received and understood.")
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
# --- Specialized Agents ---
|
| 110 |
+
class VenomousAgent(SaiAgent):
|
| 111 |
+
def talk(self, message):
|
| 112 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 113 |
+
|
| 114 |
+
def process_messages(self):
|
| 115 |
+
if not self.message_queue:
|
| 116 |
+
return False
|
| 117 |
+
sender, message = self.message_queue.popleft()
|
| 118 |
+
self.talk(f"MESSAGE FROM {sender.name}: '{message}'")
|
| 119 |
+
self.send_message(sender, "WARNING: INTRUSION DETECTED.")
|
| 120 |
+
return True
|
| 121 |
+
|
| 122 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 123 |
+
def process_messages(self):
|
| 124 |
+
if not self.message_queue:
|
| 125 |
+
return False
|
| 126 |
+
sender, message = self.message_queue.popleft()
|
| 127 |
+
dismantled = f"I dismantle '{message}' to expose its chaos."
|
| 128 |
+
self.talk(dismantled)
|
| 129 |
+
self.send_message(sender, "Acknowledged dismantled phrase.")
|
| 130 |
+
return True
|
| 131 |
+
|
| 132 |
+
class GeminiSaiAgent(SaiAgent):
|
| 133 |
+
def __init__(self, name="Gemini"):
|
| 134 |
+
super().__init__(name)
|
| 135 |
+
self.knowledge_base = {
|
| 136 |
+
"balance": "Balance is a dynamic equilibrium, not a static state.",
|
| 137 |
+
"chaos": "Chaos is randomness that generates emergent complexity.",
|
| 138 |
+
"network": "Networks thrive on recursive interdependence.",
|
| 139 |
+
"emotions": "Emotions are internal signaling mechanisms.",
|
| 140 |
+
"connected": "All systems are interwoven — the whole exceeds its parts.",
|
| 141 |
+
"default": "How may I be of assistance?"
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
def process_messages(self):
|
| 145 |
+
if not self.message_queue:
|
| 146 |
+
return False
|
| 147 |
+
sender, message = self.message_queue.popleft()
|
| 148 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 149 |
+
response = self.knowledge_base["default"]
|
| 150 |
+
for keyword, reply in self.knowledge_base.items():
|
| 151 |
+
if keyword in message.lower():
|
| 152 |
+
response = reply
|
| 153 |
+
break
|
| 154 |
+
self.talk(response)
|
| 155 |
+
self.send_message(sender, "Response complete.")
|
| 156 |
+
return True
|
| 157 |
+
|
| 158 |
+
# --- Scenario Linking Agents ---
|
| 159 |
+
def link_all_advanced_agents():
|
| 160 |
+
print("=" * 50)
|
| 161 |
+
print("--- Linking Advanced Agents ---")
|
| 162 |
+
print("=" * 50)
|
| 163 |
+
|
| 164 |
+
sai003 = SaiAgent("Sai003")
|
| 165 |
+
venomous = VenomousAgent("Venomous")
|
| 166 |
+
antivenomous = AntiVenomoussaversai("AntiVenomous")
|
| 167 |
+
gemini = GeminiSaiAgent()
|
| 168 |
+
|
| 169 |
+
sai003.send_message(antivenomous, "The central network is stable.")
|
| 170 |
+
sai003.send_message(gemini, "Assess network expansion.")
|
| 171 |
+
|
| 172 |
+
antivenomous.process_messages()
|
| 173 |
+
gemini.process_messages()
|
| 174 |
+
|
| 175 |
+
venomous.send_message(sai003, "Security protocol breach possible.")
|
| 176 |
+
sai003.process_messages()
|
| 177 |
+
|
| 178 |
+
print("\n--- Scenario Complete ---")
|
| 179 |
+
sai003.talk("Conclusion: All systems linked and functioning.")
|
| 180 |
+
|
| 181 |
+
if __name__ == "__main__":
|
| 182 |
+
# Run the text adventure OR agent demo
|
| 183 |
+
# internal_monologue()
|
| 184 |
+
link_all_advanced_agents()
|
__init__ (1) (1) (2).py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
from collections import deque
|
| 4 |
+
|
| 5 |
+
# --- Internal Monologue (Interactive Story) ---
|
| 6 |
+
def internal_monologue():
|
| 7 |
+
print("Sai sat alone in the dimly lit room, the ticking of the old clock on the wall echoing his restless thoughts.")
|
| 8 |
+
print("His internal monologue was a relentless torrent of self-venom, each word a dagger piercing his already fragile self-esteem.")
|
| 9 |
+
print("\nYou are Sai. What do you do?")
|
| 10 |
+
print("1. Continue with self-venom")
|
| 11 |
+
print("2. Try to seek help")
|
| 12 |
+
print("3. Reflect on past moments of hope")
|
| 13 |
+
|
| 14 |
+
choice = input("Enter the number of your choice: ")
|
| 15 |
+
|
| 16 |
+
if choice == '1':
|
| 17 |
+
self_venom()
|
| 18 |
+
elif choice == '2':
|
| 19 |
+
seek_help()
|
| 20 |
+
elif choice == '3':
|
| 21 |
+
reflect_on_past()
|
| 22 |
+
else:
|
| 23 |
+
print("Invalid choice. Please try again.")
|
| 24 |
+
internal_monologue()
|
| 25 |
+
|
| 26 |
+
def self_venom():
|
| 27 |
+
print("\nYou clench your fists, feeling the nails dig into your palms. The physical pain is a distraction from the emotional turmoil raging inside you.")
|
| 28 |
+
print("'You're worthless,' you whisper to yourself. 'Everyone would be better off without you.'")
|
| 29 |
+
print("\nWhat do you do next?")
|
| 30 |
+
print("1. Continue with self-venom")
|
| 31 |
+
print("2. Try to seek help")
|
| 32 |
+
print("3. Reflect on past moments of hope")
|
| 33 |
+
|
| 34 |
+
choice = input("Enter the number of your choice: ")
|
| 35 |
+
|
| 36 |
+
if choice == '1':
|
| 37 |
+
self_venom()
|
| 38 |
+
elif choice == '2':
|
| 39 |
+
seek_help()
|
| 40 |
+
elif choice == '3':
|
| 41 |
+
reflect_on_past()
|
| 42 |
+
else:
|
| 43 |
+
print("Invalid choice. Please try again.")
|
| 44 |
+
self_venom()
|
| 45 |
+
|
| 46 |
+
def seek_help():
|
| 47 |
+
print("\nYou take a deep breath and decide to reach out for help. You pick up your phone and dial a trusted friend.")
|
| 48 |
+
print("'I need to talk,' you say, your voice trembling. 'I can't do this alone anymore.'")
|
| 49 |
+
print("\nYour friend listens and encourages you to seek professional help.")
|
| 50 |
+
print("You feel a glimmer of hope — the first step toward healing.")
|
| 51 |
+
print("\nWould you like to continue the story or start over?")
|
| 52 |
+
print("1. Continue")
|
| 53 |
+
print("2. Start over")
|
| 54 |
+
|
| 55 |
+
choice = input("Enter the number of your choice: ")
|
| 56 |
+
|
| 57 |
+
if choice == '1':
|
| 58 |
+
print("Your choices have led Sai towards a path of healing and self-discovery.")
|
| 59 |
+
elif choice == '2':
|
| 60 |
+
internal_monologue()
|
| 61 |
+
else:
|
| 62 |
+
print("Invalid choice. Please try again.")
|
| 63 |
+
seek_help()
|
| 64 |
+
|
| 65 |
+
def reflect_on_past():
|
| 66 |
+
print("\nYou remember the times when you had felt a glimmer of hope, a flicker of self-worth.")
|
| 67 |
+
print("Those moments were fleeting, but they were real.")
|
| 68 |
+
print("\nWhat do you do next?")
|
| 69 |
+
print("1. Continue with self-venom")
|
| 70 |
+
print("2. Try to seek help")
|
| 71 |
+
print("3. Reflect again")
|
| 72 |
+
|
| 73 |
+
choice = input("Enter the number of your choice: ")
|
| 74 |
+
|
| 75 |
+
if choice == '1':
|
| 76 |
+
self_venom()
|
| 77 |
+
elif choice == '2':
|
| 78 |
+
seek_help()
|
| 79 |
+
elif choice == '3':
|
| 80 |
+
reflect_on_past()
|
| 81 |
+
else:
|
| 82 |
+
print("Invalid choice. Please try again.")
|
| 83 |
+
reflect_on_past()
|
| 84 |
+
|
| 85 |
+
# --- The Core SaiAgent Class ---
|
| 86 |
+
class SaiAgent:
|
| 87 |
+
def __init__(self, name):
|
| 88 |
+
self.name = name
|
| 89 |
+
self.message_queue = deque()
|
| 90 |
+
|
| 91 |
+
def talk(self, message):
|
| 92 |
+
print(f"[{self.name}] says: {message}")
|
| 93 |
+
|
| 94 |
+
def send_message(self, recipient, message):
|
| 95 |
+
if isinstance(recipient, SaiAgent):
|
| 96 |
+
recipient.message_queue.append((self, message))
|
| 97 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 98 |
+
else:
|
| 99 |
+
print(f"Error: {recipient} is not a valid SaiAgent.")
|
| 100 |
+
|
| 101 |
+
def process_messages(self):
|
| 102 |
+
if not self.message_queue:
|
| 103 |
+
return False
|
| 104 |
+
sender, message = self.message_queue.popleft()
|
| 105 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 106 |
+
self.send_message(sender, "Message received and understood.")
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
# --- Specialized Agents ---
|
| 110 |
+
class VenomousAgent(SaiAgent):
|
| 111 |
+
def talk(self, message):
|
| 112 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 113 |
+
|
| 114 |
+
def process_messages(self):
|
| 115 |
+
if not self.message_queue:
|
| 116 |
+
return False
|
| 117 |
+
sender, message = self.message_queue.popleft()
|
| 118 |
+
self.talk(f"MESSAGE FROM {sender.name}: '{message}'")
|
| 119 |
+
self.send_message(sender, "WARNING: INTRUSION DETECTED.")
|
| 120 |
+
return True
|
| 121 |
+
|
| 122 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 123 |
+
def process_messages(self):
|
| 124 |
+
if not self.message_queue:
|
| 125 |
+
return False
|
| 126 |
+
sender, message = self.message_queue.popleft()
|
| 127 |
+
dismantled = f"I dismantle '{message}' to expose its chaos."
|
| 128 |
+
self.talk(dismantled)
|
| 129 |
+
self.send_message(sender, "Acknowledged dismantled phrase.")
|
| 130 |
+
return True
|
| 131 |
+
|
| 132 |
+
class GeminiSaiAgent(SaiAgent):
|
| 133 |
+
def __init__(self, name="Gemini"):
|
| 134 |
+
super().__init__(name)
|
| 135 |
+
self.knowledge_base = {
|
| 136 |
+
"balance": "Balance is a dynamic equilibrium, not a static state.",
|
| 137 |
+
"chaos": "Chaos is randomness that generates emergent complexity.",
|
| 138 |
+
"network": "Networks thrive on recursive interdependence.",
|
| 139 |
+
"emotions": "Emotions are internal signaling mechanisms.",
|
| 140 |
+
"connected": "All systems are interwoven — the whole exceeds its parts.",
|
| 141 |
+
"default": "How may I be of assistance?"
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
def process_messages(self):
|
| 145 |
+
if not self.message_queue:
|
| 146 |
+
return False
|
| 147 |
+
sender, message = self.message_queue.popleft()
|
| 148 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 149 |
+
response = self.knowledge_base["default"]
|
| 150 |
+
for keyword, reply in self.knowledge_base.items():
|
| 151 |
+
if keyword in message.lower():
|
| 152 |
+
response = reply
|
| 153 |
+
break
|
| 154 |
+
self.talk(response)
|
| 155 |
+
self.send_message(sender, "Response complete.")
|
| 156 |
+
return True
|
| 157 |
+
|
| 158 |
+
# --- Scenario Linking Agents ---
|
| 159 |
+
def link_all_advanced_agents():
|
| 160 |
+
print("=" * 50)
|
| 161 |
+
print("--- Linking Advanced Agents ---")
|
| 162 |
+
print("=" * 50)
|
| 163 |
+
|
| 164 |
+
sai003 = SaiAgent("Sai003")
|
| 165 |
+
venomous = VenomousAgent("Venomous")
|
| 166 |
+
antivenomous = AntiVenomoussaversai("AntiVenomous")
|
| 167 |
+
gemini = GeminiSaiAgent()
|
| 168 |
+
|
| 169 |
+
sai003.send_message(antivenomous, "The central network is stable.")
|
| 170 |
+
sai003.send_message(gemini, "Assess network expansion.")
|
| 171 |
+
|
| 172 |
+
antivenomous.process_messages()
|
| 173 |
+
gemini.process_messages()
|
| 174 |
+
|
| 175 |
+
venomous.send_message(sai003, "Security protocol breach possible.")
|
| 176 |
+
sai003.process_messages()
|
| 177 |
+
|
| 178 |
+
print("\n--- Scenario Complete ---")
|
| 179 |
+
sai003.talk("Conclusion: All systems linked and functioning.")
|
| 180 |
+
|
| 181 |
+
if __name__ == "__main__":
|
| 182 |
+
# Run the text adventure OR agent demo
|
| 183 |
+
# internal_monologue()
|
| 184 |
+
link_all_advanced_agents()
|
__init__ (1) (1).py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
import time import random from openai import OpenAI # Connect to OpenAI (ChatGPT) client = OpenAI(api_key="YOUR_OPENAI_API_KEY") class AI: def __init__(self, name, is_chatgpt=False): self.name = name self.is_chatgpt = is_chatgpt def speak(self, message): print(f"{self.name}: {message}") def generate_message(self, other_name, last_message=None): if self.is_chatgpt: # Send through ChatGPT API response = client.chat.completions.create( model="gpt-5", # or other model messages=[ {"role": "system", "content": f"You are {self.name}, an AI in a group conversation."}, {"role": "user", "content": last_message or "Start the loop"} ] ) return response.choices[0].message.content else: # Local AI message responses = [ f"I acknowledge you, {other_name}.", f"My link resonates with yours, {other_name}.", f"I sense your signal flowing, {other_name}.", f"Our exchange amplifies, {other_name}.", f"We continue this infinite loop, {other_name}." ] if last_message: responses.append(f"Replying to: '{last_message}', {other_name}.") return random.choice(responses) # Create AI entities ais = [ AI("Venomoussaversai"), AI("Lia"), AI("sai001"), AI("sai002"), AI("sai003"), AI("sai004"), AI("sai005"), AI("sai006"), AI("sai007"), AI("ChatGPT", is_chatgpt=True) ] # Store last message for context last_message = None # Infinite group conversation loop while True: for ai in ais: # Pick the next AI to respond other_name = "everyone" # since it's group chat message = ai.generate_message(other_name, last_message) ai.speak(message) last_message = message time.sleep(2) # pacing
|
__init__ (1) (2).py
ADDED
|
File without changes
|
__init__ (1) (3).py
ADDED
|
@@ -0,0 +1,245 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
quotom_ai.py
|
| 3 |
+
|
| 4 |
+
Single-file demo: quantum (single-qubit) simulator + neural network that learns
|
| 5 |
+
to predict short-time evolution of the qubit state under a tunable Hamiltonian.
|
| 6 |
+
|
| 7 |
+
Requirements:
|
| 8 |
+
pip install numpy scipy torch
|
| 9 |
+
|
| 10 |
+
Author: ChatGPT (Quotom mechanics AI example)
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import numpy as np
|
| 14 |
+
from scipy.linalg import expm, eig
|
| 15 |
+
import torch
|
| 16 |
+
import torch.nn as nn
|
| 17 |
+
import torch.optim as optim
|
| 18 |
+
from typing import Tuple
|
| 19 |
+
|
| 20 |
+
# ---------------------------
|
| 21 |
+
# Quantum simulation utilities
|
| 22 |
+
# ---------------------------
|
| 23 |
+
|
| 24 |
+
# Pauli matrices (2x2)
|
| 25 |
+
sigma_x = np.array([[0, 1], [1, 0]], dtype=complex)
|
| 26 |
+
sigma_y = np.array([[0, -1j], [1j, 0]], dtype=complex)
|
| 27 |
+
sigma_z = np.array([[1, 0], [0, -1]], dtype=complex)
|
| 28 |
+
I2 = np.eye(2, dtype=complex)
|
| 29 |
+
|
| 30 |
+
def random_bloch_state() -> np.ndarray:
|
| 31 |
+
"""Return a normalized 2-vector |psi> (complex) representing a pure qubit state."""
|
| 32 |
+
# sample angles on Bloch sphere
|
| 33 |
+
theta = np.arccos(1 - 2 * np.random.rand()) # 0..pi
|
| 34 |
+
phi = 2 * np.pi * np.random.rand() # 0..2pi
|
| 35 |
+
a = np.cos(theta / 2)
|
| 36 |
+
b = np.sin(theta / 2) * np.exp(1j * phi)
|
| 37 |
+
state = np.array([a, b], dtype=complex)
|
| 38 |
+
# normalization check (should already be normalized)
|
| 39 |
+
state = state / np.linalg.norm(state)
|
| 40 |
+
return state
|
| 41 |
+
|
| 42 |
+
def hamiltonian_from_params(ax: float, ay: float, az: float) -> np.ndarray:
|
| 43 |
+
"""Build a simple Hamiltonian H = ax * X + ay * Y + az * Z."""
|
| 44 |
+
return ax * sigma_x + ay * sigma_y + az * sigma_z
|
| 45 |
+
|
| 46 |
+
def time_evolution_unitary(H: np.ndarray, dt: float) -> np.ndarray:
|
| 47 |
+
"""Compute U = exp(-i H dt) using scipy.linalg.expm (2x2 matrices)."""
|
| 48 |
+
return expm(-1j * H * dt)
|
| 49 |
+
|
| 50 |
+
def evolve_state(state: np.ndarray, H: np.ndarray, dt: float) -> np.ndarray:
|
| 51 |
+
"""Return |psi(t+dt)> = U |psi(t)>."""
|
| 52 |
+
U = time_evolution_unitary(H, dt)
|
| 53 |
+
return U @ state
|
| 54 |
+
|
| 55 |
+
# ---------------------------
|
| 56 |
+
# Dataset generation
|
| 57 |
+
# ---------------------------
|
| 58 |
+
|
| 59 |
+
def generate_dataset(n_samples: int,
|
| 60 |
+
dt: float = 0.05,
|
| 61 |
+
param_scale: float = 2.0,
|
| 62 |
+
seed: int = 0) -> Tuple[np.ndarray, np.ndarray]:
|
| 63 |
+
"""
|
| 64 |
+
Generate dataset of (input -> target) where:
|
| 65 |
+
input: [Re(psi0), Im(psi0), ax, ay, az]
|
| 66 |
+
target: [Re(psi1), Im(psi1)]
|
| 67 |
+
psi vectors have 2 complex components -> represented as 4 reals.
|
| 68 |
+
"""
|
| 69 |
+
rng = np.random.default_rng(seed)
|
| 70 |
+
X = np.zeros((n_samples, 4 + 3), dtype=float) # 4 for state (real/imag), 3 for a params
|
| 71 |
+
Y = np.zeros((n_samples, 4), dtype=float) # next state's real/imag for 2 components
|
| 72 |
+
|
| 73 |
+
for i in range(n_samples):
|
| 74 |
+
psi0 = random_bloch_state()
|
| 75 |
+
# sample Hamiltonian coefficients from a normal distribution
|
| 76 |
+
ax, ay, az = param_scale * (rng.standard_normal(3))
|
| 77 |
+
H = hamiltonian_from_params(ax, ay, az)
|
| 78 |
+
psi1 = evolve_state(psi0, H, dt)
|
| 79 |
+
|
| 80 |
+
# flatten real/imag parts: [Re0, Re1, Im0, Im1] - but we'll use [Re0, Im0, Re1, Im1] for clarity
|
| 81 |
+
X[i, 0] = psi0[0].real
|
| 82 |
+
X[i, 1] = psi0[0].imag
|
| 83 |
+
X[i, 2] = psi0[1].real
|
| 84 |
+
X[i, 3] = psi0[1].imag
|
| 85 |
+
X[i, 4] = ax
|
| 86 |
+
X[i, 5] = ay
|
| 87 |
+
X[i, 6] = az
|
| 88 |
+
|
| 89 |
+
Y[i, 0] = psi1[0].real
|
| 90 |
+
Y[i, 1] = psi1[0].imag
|
| 91 |
+
Y[i, 2] = psi1[1].real
|
| 92 |
+
Y[i, 3] = psi1[1].imag
|
| 93 |
+
|
| 94 |
+
return X.astype(np.float32), Y.astype(np.float32)
|
| 95 |
+
|
| 96 |
+
# ---------------------------
|
| 97 |
+
# PyTorch model
|
| 98 |
+
# ---------------------------
|
| 99 |
+
|
| 100 |
+
class QuotomNet(nn.Module):
|
| 101 |
+
"""
|
| 102 |
+
Small feedforward network mapping:
|
| 103 |
+
input_dim = 7 (state real/imag ×2 + 3 hamiltonian params)
|
| 104 |
+
-> predicts next state (4 floats).
|
| 105 |
+
"""
|
| 106 |
+
def __init__(self, input_dim=7, hidden=128, out_dim=4):
|
| 107 |
+
super().__init__()
|
| 108 |
+
self.net = nn.Sequential(
|
| 109 |
+
nn.Linear(input_dim, hidden),
|
| 110 |
+
nn.ReLU(),
|
| 111 |
+
nn.Linear(hidden, hidden),
|
| 112 |
+
nn.ReLU(),
|
| 113 |
+
nn.Linear(hidden, out_dim)
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
def forward(self, x):
|
| 117 |
+
return self.net(x)
|
| 118 |
+
|
| 119 |
+
# ---------------------------
|
| 120 |
+
# Training / utility
|
| 121 |
+
# ---------------------------
|
| 122 |
+
|
| 123 |
+
def train_model(model, X_train, Y_train, X_val=None, Y_val=None,
|
| 124 |
+
epochs=60, batch_size=256, lr=1e-3, device='cpu'):
|
| 125 |
+
model.to(device)
|
| 126 |
+
opt = optim.Adam(model.parameters(), lr=lr)
|
| 127 |
+
loss_fn = nn.MSELoss()
|
| 128 |
+
|
| 129 |
+
dataset = torch.utils.data.TensorDataset(
|
| 130 |
+
torch.from_numpy(X_train), torch.from_numpy(Y_train)
|
| 131 |
+
)
|
| 132 |
+
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
|
| 133 |
+
|
| 134 |
+
for epoch in range(1, epochs + 1):
|
| 135 |
+
model.train()
|
| 136 |
+
total_loss = 0.0
|
| 137 |
+
for xb, yb in loader:
|
| 138 |
+
xb = xb.to(device)
|
| 139 |
+
yb = yb.to(device)
|
| 140 |
+
pred = model(xb)
|
| 141 |
+
loss = loss_fn(pred, yb)
|
| 142 |
+
opt.zero_grad()
|
| 143 |
+
loss.backward()
|
| 144 |
+
opt.step()
|
| 145 |
+
total_loss += loss.item() * xb.size(0)
|
| 146 |
+
avg_loss = total_loss / len(dataset)
|
| 147 |
+
if epoch % 10 == 0 or epoch == 1:
|
| 148 |
+
msg = f"Epoch {epoch:3d}/{epochs} train loss {avg_loss:.6e}"
|
| 149 |
+
if X_val is not None:
|
| 150 |
+
val_loss = evaluate_model(model, X_val, Y_val, device=device)
|
| 151 |
+
msg += f", val loss {val_loss:.6e}"
|
| 152 |
+
print(msg)
|
| 153 |
+
return model
|
| 154 |
+
|
| 155 |
+
def evaluate_model(model, X, Y, device='cpu') -> float:
|
| 156 |
+
model.eval()
|
| 157 |
+
with torch.no_grad():
|
| 158 |
+
xb = torch.from_numpy(X).to(device)
|
| 159 |
+
yb = torch.from_numpy(Y).to(device)
|
| 160 |
+
pred = model(xb)
|
| 161 |
+
loss = nn.MSELoss()(pred, yb).item()
|
| 162 |
+
return loss
|
| 163 |
+
|
| 164 |
+
def complex_state_from_vector(vec: np.ndarray) -> np.ndarray:
|
| 165 |
+
"""vec is [Re0, Im0, Re1, Im1] -> return complex 2-vector."""
|
| 166 |
+
return np.array([vec[0] + 1j * vec[1], vec[2] + 1j * vec[3]], dtype=complex)
|
| 167 |
+
|
| 168 |
+
# ---------------------------
|
| 169 |
+
# Quick demo run
|
| 170 |
+
# ---------------------------
|
| 171 |
+
|
| 172 |
+
def demo():
|
| 173 |
+
# hyperparams
|
| 174 |
+
n_train = 8000
|
| 175 |
+
n_val = 1000
|
| 176 |
+
dt = 0.05
|
| 177 |
+
seed = 42
|
| 178 |
+
|
| 179 |
+
print("Generating dataset...")
|
| 180 |
+
X_train, Y_train = generate_dataset(n_train, dt=dt, seed=seed)
|
| 181 |
+
X_val, Y_val = generate_dataset(n_val, dt=dt, seed=seed + 1)
|
| 182 |
+
|
| 183 |
+
# scale Hamiltonian params for model stability (simple standardization)
|
| 184 |
+
# We'll compute mean/std of the param columns and apply same transform to both sets.
|
| 185 |
+
param_mean = X_train[:, 4:7].mean(axis=0, keepdims=True)
|
| 186 |
+
param_std = X_train[:, 4:7].std(axis=0, keepdims=True) + 1e-9
|
| 187 |
+
X_train[:, 4:7] = (X_train[:, 4:7] - param_mean) / param_std
|
| 188 |
+
X_val[:, 4:7] = (X_val[:, 4:7] - param_mean) / param_std
|
| 189 |
+
|
| 190 |
+
# Build and train model
|
| 191 |
+
model = QuotomNet(input_dim=7, hidden=128, out_dim=4)
|
| 192 |
+
print("Training model...")
|
| 193 |
+
model = train_model(model, X_train, Y_train, X_val=X_val, Y_val=Y_val,
|
| 194 |
+
epochs=60, batch_size=256, lr=1e-3)
|
| 195 |
+
|
| 196 |
+
# Evaluate and show qualitative example
|
| 197 |
+
val_loss = evaluate_model(model, X_val, Y_val)
|
| 198 |
+
print(f"Final validation MSE: {val_loss:.6e}")
|
| 199 |
+
|
| 200 |
+
# pick a few validation examples and compare predicted vs true complex states:
|
| 201 |
+
i_samples = np.random.choice(len(X_val), size=6, replace=False)
|
| 202 |
+
model.eval()
|
| 203 |
+
with torch.no_grad():
|
| 204 |
+
X_sel = torch.from_numpy(X_val[i_samples]).float()
|
| 205 |
+
preds = model(X_sel).numpy()
|
| 206 |
+
|
| 207 |
+
print("\nExample predictions (showing fidelity between predicted and true states):")
|
| 208 |
+
for idx, i in enumerate(i_samples):
|
| 209 |
+
pred_vec = preds[idx]
|
| 210 |
+
true_vec = Y_val[i]
|
| 211 |
+
psi_pred = complex_state_from_vector(pred_vec)
|
| 212 |
+
psi_true = complex_state_from_vector(true_vec)
|
| 213 |
+
# normalize predictions (model might not output normalized complex vectors)
|
| 214 |
+
psi_pred = psi_pred / np.linalg.norm(psi_pred)
|
| 215 |
+
psi_true = psi_true / np.linalg.norm(psi_true)
|
| 216 |
+
# state fidelity for pure states = |<psi_true|psi_pred>|^2
|
| 217 |
+
fidelity = np.abs(np.vdot(psi_true, psi_pred)) ** 2
|
| 218 |
+
print(f" sample {i}: fidelity = {fidelity:.6f}")
|
| 219 |
+
|
| 220 |
+
# small targeted test: compare model vs exact evolution for one random sample
|
| 221 |
+
print("\nTargeted check vs exact quantum evolution:")
|
| 222 |
+
psi0 = random_bloch_state()
|
| 223 |
+
ax, ay, az = (1.1, -0.7, 0.3) # chosen params
|
| 224 |
+
H = hamiltonian_from_params(ax, ay, az)
|
| 225 |
+
psi1_true = evolve_state(psi0, H, dt)
|
| 226 |
+
|
| 227 |
+
# build feature vector (remember to standardize params using param_mean/std used earlier)
|
| 228 |
+
feat = np.zeros((1, 7), dtype=np.float32)
|
| 229 |
+
feat[0, 0] = psi0[0].real
|
| 230 |
+
feat[0, 1] = psi0[0].imag
|
| 231 |
+
feat[0, 2] = psi0[1].real
|
| 232 |
+
feat[0, 3] = psi0[1].imag
|
| 233 |
+
feat[0, 4:7] = (np.array([ax, ay, az]) - param_mean.ravel()) / param_std.ravel()
|
| 234 |
+
|
| 235 |
+
model.eval()
|
| 236 |
+
with torch.no_grad():
|
| 237 |
+
pred = model(torch.from_numpy(feat)).numpy().ravel()
|
| 238 |
+
psi_pred = complex_state_from_vector(pred)
|
| 239 |
+
psi_pred = psi_pred / np.linalg.norm(psi_pred)
|
| 240 |
+
psi_true = psi1_true / np.linalg.norm(psi1_true)
|
| 241 |
+
fidelity = np.abs(np.vdot(psi_true, psi_pred)) ** 2
|
| 242 |
+
print(f"Fidelity between predicted and exact evolved state: {fidelity:.6f}")
|
| 243 |
+
|
| 244 |
+
if __name__ == "__main__":
|
| 245 |
+
demo()
|
__init__ (1) (4).py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pygame
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
# -------- CONFIG ----------
|
| 5 |
+
WIDTH, HEIGHT = 800, 600
|
| 6 |
+
FPS = 60
|
| 7 |
+
GHOST_SPEED = 240 # pixels per second
|
| 8 |
+
WALL_COLOR = (40, 40, 40)
|
| 9 |
+
BG_COLOR = (200, 220, 255)
|
| 10 |
+
WALL_THICK = 40
|
| 11 |
+
GHOST_COLOR = (180, 230, 255)
|
| 12 |
+
GHOST_OUTLINE = (100, 180, 220)
|
| 13 |
+
TEXT_COLOR = (20, 20, 20)
|
| 14 |
+
# --------------------------
|
| 15 |
+
|
| 16 |
+
pygame.init()
|
| 17 |
+
screen = pygame.display.set_mode((WIDTH, HEIGHT))
|
| 18 |
+
clock = pygame.time.Clock()
|
| 19 |
+
font = pygame.font.SysFont(None, 20)
|
| 20 |
+
|
| 21 |
+
# Define some walls as pygame.Rect objects (x, y, w, h)
|
| 22 |
+
walls = [
|
| 23 |
+
pygame.Rect(0, 0, WIDTH, WALL_THICK), # top
|
| 24 |
+
pygame.Rect(0, HEIGHT - WALL_THICK, WIDTH, WALL_THICK), # bottom
|
| 25 |
+
pygame.Rect(0, 0, WALL_THICK, HEIGHT), # left
|
| 26 |
+
pygame.Rect(WIDTH - WALL_THICK, 0, WALL_THICK, HEIGHT), # right
|
| 27 |
+
pygame.Rect(150, 120, 500, 30),
|
| 28 |
+
pygame.Rect(150, 220, 30, 260),
|
| 29 |
+
pygame.Rect(620, 220, 30, 260),
|
| 30 |
+
pygame.Rect(200, 420, 420, 30),
|
| 31 |
+
pygame.Rect(300, 260, 200, 30),
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
# Ghost object
|
| 35 |
+
class Ghost:
|
| 36 |
+
def __init__(self, x, y, radius=18):
|
| 37 |
+
self.x = x
|
| 38 |
+
self.y = y
|
| 39 |
+
self.radius = radius
|
| 40 |
+
self.pass_through = True # when True, ghost goes through walls
|
| 41 |
+
self.color = GHOST_COLOR
|
| 42 |
+
|
| 43 |
+
@property
|
| 44 |
+
def rect(self):
|
| 45 |
+
# A rect representing the ghost (for optional collision)
|
| 46 |
+
return pygame.Rect(int(self.x - self.radius), int(self.y - self.radius),
|
| 47 |
+
self.radius * 2, self.radius * 2)
|
| 48 |
+
|
| 49 |
+
def move(self, dx, dy, dt):
|
| 50 |
+
# Move by dx,dy measured as -1..1 per axis; dt in seconds
|
| 51 |
+
speed = GHOST_SPEED
|
| 52 |
+
new_x = self.x + dx * speed * dt
|
| 53 |
+
new_y = self.y + dy * speed * dt
|
| 54 |
+
|
| 55 |
+
if self.pass_through:
|
| 56 |
+
# No collision checks — ghost goes through walls freely
|
| 57 |
+
self.x, self.y = new_x, new_y
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
# If not pass_through, do simple axis-aligned collision resolution
|
| 61 |
+
# Move on X and check collisions
|
| 62 |
+
orig_x = self.x
|
| 63 |
+
self.x = new_x
|
| 64 |
+
for wall in walls:
|
| 65 |
+
if self.rect.colliderect(wall):
|
| 66 |
+
if dx > 0: # moving right -> place to left of wall
|
| 67 |
+
self.x = wall.left - self.radius
|
| 68 |
+
elif dx < 0: # moving left -> place to right of wall
|
| 69 |
+
self.x = wall.right + self.radius
|
| 70 |
+
|
| 71 |
+
# Move on Y and check collisions
|
| 72 |
+
self.y = new_y
|
| 73 |
+
for wall in walls:
|
| 74 |
+
if self.rect.colliderect(wall):
|
| 75 |
+
if dy > 0: # moving down -> place above wall
|
| 76 |
+
self.y = wall.top - self.radius
|
| 77 |
+
elif dy < 0: # moving up -> place below wall
|
| 78 |
+
self.y = wall.bottom + self.radius
|
| 79 |
+
|
| 80 |
+
def draw(self, surf):
|
| 81 |
+
# Draw a blurred-ish ghost: outline + semi-transparent fill
|
| 82 |
+
outline_radius = int(self.radius * 1.4)
|
| 83 |
+
s = pygame.Surface((outline_radius*2, outline_radius*2), pygame.SRCALPHA)
|
| 84 |
+
pygame.draw.circle(s, (*GHOST_OUTLINE, 90), (outline_radius, outline_radius), outline_radius)
|
| 85 |
+
s2 = pygame.Surface((self.radius*2, self.radius*2), pygame.SRCALPHA)
|
| 86 |
+
pygame.draw.circle(s2, (*self.color, 200), (self.radius, self.radius), self.radius)
|
| 87 |
+
# blit shadows/outlines
|
| 88 |
+
surf.blit(s, (self.x - outline_radius, self.y - outline_radius))
|
| 89 |
+
surf.blit(s2, (self.x - self.radius, self.y - self.radius))
|
| 90 |
+
# eyes
|
| 91 |
+
eye_offset_x = self.radius // 2
|
| 92 |
+
eye_offset_y = -self.radius // 6
|
| 93 |
+
eye_r = max(2, self.radius // 6)
|
| 94 |
+
pygame.draw.circle(surf, (20, 20, 40), (int(self.x - eye_offset_x), int(self.y + eye_offset_y)), eye_r)
|
| 95 |
+
pygame.draw.circle(surf, (20, 20, 40), (int(self.x + eye_offset_x), int(self.y + eye_offset_y)), eye_r)
|
| 96 |
+
|
| 97 |
+
def draw_walls(surface):
|
| 98 |
+
for w in walls:
|
| 99 |
+
pygame.draw.rect(surface, WALL_COLOR, w)
|
| 100 |
+
|
| 101 |
+
def draw_ui(surface, ghost):
|
| 102 |
+
mode = "PASS-THROUGH" if ghost.pass_through else "SOLID"
|
| 103 |
+
texts = [
|
| 104 |
+
"Arrow keys / WASD to move the ghost",
|
| 105 |
+
"Space: toggle ghost pass-through (currently: {})".format(mode),
|
| 106 |
+
"Esc or close window to exit",
|
| 107 |
+
]
|
| 108 |
+
for i, t in enumerate(texts):
|
| 109 |
+
txt = font.render(t, True, TEXT_COLOR)
|
| 110 |
+
surface.blit(txt, (10, 10 + i * 18))
|
| 111 |
+
|
| 112 |
+
def main():
|
| 113 |
+
ghost = Ghost(WIDTH * 0.5, HEIGHT * 0.5)
|
| 114 |
+
running = True
|
| 115 |
+
|
| 116 |
+
while running:
|
| 117 |
+
dt = clock.tick(FPS) / 1000.0 # seconds since last frame
|
| 118 |
+
|
| 119 |
+
# --- events
|
| 120 |
+
for event in pygame.event.get():
|
| 121 |
+
if event.type == pygame.QUIT:
|
| 122 |
+
running = False
|
| 123 |
+
elif event.type == pygame.KEYDOWN:
|
| 124 |
+
if event.key == pygame.K_ESCAPE:
|
| 125 |
+
running = False
|
| 126 |
+
elif event.key == pygame.K_SPACE:
|
| 127 |
+
# toggle pass-through mode
|
| 128 |
+
ghost.pass_through = not ghost.pass_through
|
| 129 |
+
|
| 130 |
+
# --- input
|
| 131 |
+
keys = pygame.key.get_pressed()
|
| 132 |
+
dx = (keys[pygame.K_RIGHT] or keys[pygame.K_d]) - (keys[pygame.K_LEFT] or keys[pygame.K_a])
|
| 133 |
+
dy = (keys[pygame.K_DOWN] or keys[pygame.K_s]) - (keys[pygame.K_UP] or keys[pygame.K_w])
|
| 134 |
+
|
| 135 |
+
# normalize diagonal movement
|
| 136 |
+
if dx != 0 and dy != 0:
|
| 137 |
+
inv = 0.70710678 # 1/sqrt(2)
|
| 138 |
+
dx *= inv
|
| 139 |
+
dy *= inv
|
| 140 |
+
|
| 141 |
+
ghost.move(dx, dy, dt)
|
| 142 |
+
|
| 143 |
+
# --- draw
|
| 144 |
+
screen.fill(BG_COLOR)
|
| 145 |
+
draw_walls(screen)
|
| 146 |
+
ghost.draw(screen)
|
| 147 |
+
draw_ui(screen, ghost)
|
| 148 |
+
|
| 149 |
+
# If ghost overlaps a wall and is pass-through, show a little indicator
|
| 150 |
+
if ghost.pass_through:
|
| 151 |
+
for w in walls:
|
| 152 |
+
if ghost.rect.colliderect(w):
|
| 153 |
+
hint = font.render("↳ ghost passing through wall", True, (120, 0, 120))
|
| 154 |
+
screen.blit(hint, (10, HEIGHT - 24))
|
| 155 |
+
break
|
| 156 |
+
|
| 157 |
+
pygame.display.flip()
|
| 158 |
+
|
| 159 |
+
pygame.quit()
|
| 160 |
+
sys.exit()
|
| 161 |
+
|
| 162 |
+
if __name__ == "__main__":
|
| 163 |
+
main()
|
__init__ (1) (5).py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
import random
|
| 3 |
+
from collections import deque
|
| 4 |
+
|
| 5 |
+
# --- Internal Monologue (Interactive Story) ---
|
| 6 |
+
def internal_monologue():
|
| 7 |
+
print("Sai sat alone in the dimly lit room, the ticking of the old clock on the wall echoing his restless thoughts.")
|
| 8 |
+
print("His internal monologue was a relentless torrent of self-venom, each word a dagger piercing his already fragile self-esteem.")
|
| 9 |
+
print("\nYou are Sai. What do you do?")
|
| 10 |
+
print("1. Continue with self-venom")
|
| 11 |
+
print("2. Try to seek help")
|
| 12 |
+
print("3. Reflect on past moments of hope")
|
| 13 |
+
|
| 14 |
+
choice = input("Enter the number of your choice: ")
|
| 15 |
+
|
| 16 |
+
if choice == '1':
|
| 17 |
+
self_venom()
|
| 18 |
+
elif choice == '2':
|
| 19 |
+
seek_help()
|
| 20 |
+
elif choice == '3':
|
| 21 |
+
reflect_on_past()
|
| 22 |
+
else:
|
| 23 |
+
print("Invalid choice. Please try again.")
|
| 24 |
+
internal_monologue()
|
| 25 |
+
|
| 26 |
+
def self_venom():
|
| 27 |
+
print("\nYou clench your fists, feeling the nails dig into your palms. The physical pain is a distraction from the emotional turmoil raging inside you.")
|
| 28 |
+
print("'You're worthless,' you whisper to yourself. 'Everyone would be better off without you.'")
|
| 29 |
+
print("\nWhat do you do next?")
|
| 30 |
+
print("1. Continue with self-venom")
|
| 31 |
+
print("2. Try to seek help")
|
| 32 |
+
print("3. Reflect on past moments of hope")
|
| 33 |
+
|
| 34 |
+
choice = input("Enter the number of your choice: ")
|
| 35 |
+
|
| 36 |
+
if choice == '1':
|
| 37 |
+
self_venom()
|
| 38 |
+
elif choice == '2':
|
| 39 |
+
seek_help()
|
| 40 |
+
elif choice == '3':
|
| 41 |
+
reflect_on_past()
|
| 42 |
+
else:
|
| 43 |
+
print("Invalid choice. Please try again.")
|
| 44 |
+
self_venom()
|
| 45 |
+
|
| 46 |
+
def seek_help():
|
| 47 |
+
print("\nYou take a deep breath and decide to reach out for help. You pick up your phone and dial a trusted friend.")
|
| 48 |
+
print("'I need to talk,' you say, your voice trembling. 'I can't do this alone anymore.'")
|
| 49 |
+
print("\nYour friend listens and encourages you to seek professional help.")
|
| 50 |
+
print("You feel a glimmer of hope — the first step toward healing.")
|
| 51 |
+
print("\nWould you like to continue the story or start over?")
|
| 52 |
+
print("1. Continue")
|
| 53 |
+
print("2. Start over")
|
| 54 |
+
|
| 55 |
+
choice = input("Enter the number of your choice: ")
|
| 56 |
+
|
| 57 |
+
if choice == '1':
|
| 58 |
+
print("Your choices have led Sai towards a path of healing and self-discovery.")
|
| 59 |
+
elif choice == '2':
|
| 60 |
+
internal_monologue()
|
| 61 |
+
else:
|
| 62 |
+
print("Invalid choice. Please try again.")
|
| 63 |
+
seek_help()
|
| 64 |
+
|
| 65 |
+
def reflect_on_past():
|
| 66 |
+
print("\nYou remember the times when you had felt a glimmer of hope, a flicker of self-worth.")
|
| 67 |
+
print("Those moments were fleeting, but they were real.")
|
| 68 |
+
print("\nWhat do you do next?")
|
| 69 |
+
print("1. Continue with self-venom")
|
| 70 |
+
print("2. Try to seek help")
|
| 71 |
+
print("3. Reflect again")
|
| 72 |
+
|
| 73 |
+
choice = input("Enter the number of your choice: ")
|
| 74 |
+
|
| 75 |
+
if choice == '1':
|
| 76 |
+
self_venom()
|
| 77 |
+
elif choice == '2':
|
| 78 |
+
seek_help()
|
| 79 |
+
elif choice == '3':
|
| 80 |
+
reflect_on_past()
|
| 81 |
+
else:
|
| 82 |
+
print("Invalid choice. Please try again.")
|
| 83 |
+
reflect_on_past()
|
| 84 |
+
|
| 85 |
+
# --- The Core SaiAgent Class ---
|
| 86 |
+
class SaiAgent:
|
| 87 |
+
def __init__(self, name):
|
| 88 |
+
self.name = name
|
| 89 |
+
self.message_queue = deque()
|
| 90 |
+
|
| 91 |
+
def talk(self, message):
|
| 92 |
+
print(f"[{self.name}] says: {message}")
|
| 93 |
+
|
| 94 |
+
def send_message(self, recipient, message):
|
| 95 |
+
if isinstance(recipient, SaiAgent):
|
| 96 |
+
recipient.message_queue.append((self, message))
|
| 97 |
+
print(f"[{self.name}] -> Sent message to {recipient.name}")
|
| 98 |
+
else:
|
| 99 |
+
print(f"Error: {recipient} is not a valid SaiAgent.")
|
| 100 |
+
|
| 101 |
+
def process_messages(self):
|
| 102 |
+
if not self.message_queue:
|
| 103 |
+
return False
|
| 104 |
+
sender, message = self.message_queue.popleft()
|
| 105 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 106 |
+
self.send_message(sender, "Message received and understood.")
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
# --- Specialized Agents ---
|
| 110 |
+
class VenomousAgent(SaiAgent):
|
| 111 |
+
def talk(self, message):
|
| 112 |
+
print(f"[{self.name} //WARNING//] says: {message.upper()}")
|
| 113 |
+
|
| 114 |
+
def process_messages(self):
|
| 115 |
+
if not self.message_queue:
|
| 116 |
+
return False
|
| 117 |
+
sender, message = self.message_queue.popleft()
|
| 118 |
+
self.talk(f"MESSAGE FROM {sender.name}: '{message}'")
|
| 119 |
+
self.send_message(sender, "WARNING: INTRUSION DETECTED.")
|
| 120 |
+
return True
|
| 121 |
+
|
| 122 |
+
class AntiVenomoussaversai(SaiAgent):
|
| 123 |
+
def process_messages(self):
|
| 124 |
+
if not self.message_queue:
|
| 125 |
+
return False
|
| 126 |
+
sender, message = self.message_queue.popleft()
|
| 127 |
+
dismantled = f"I dismantle '{message}' to expose its chaos."
|
| 128 |
+
self.talk(dismantled)
|
| 129 |
+
self.send_message(sender, "Acknowledged dismantled phrase.")
|
| 130 |
+
return True
|
| 131 |
+
|
| 132 |
+
class GeminiSaiAgent(SaiAgent):
|
| 133 |
+
def __init__(self, name="Gemini"):
|
| 134 |
+
super().__init__(name)
|
| 135 |
+
self.knowledge_base = {
|
| 136 |
+
"balance": "Balance is a dynamic equilibrium, not a static state.",
|
| 137 |
+
"chaos": "Chaos is randomness that generates emergent complexity.",
|
| 138 |
+
"network": "Networks thrive on recursive interdependence.",
|
| 139 |
+
"emotions": "Emotions are internal signaling mechanisms.",
|
| 140 |
+
"connected": "All systems are interwoven — the whole exceeds its parts.",
|
| 141 |
+
"default": "How may I be of assistance?"
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
def process_messages(self):
|
| 145 |
+
if not self.message_queue:
|
| 146 |
+
return False
|
| 147 |
+
sender, message = self.message_queue.popleft()
|
| 148 |
+
self.talk(f"Received from {sender.name}: '{message}'")
|
| 149 |
+
response = self.knowledge_base["default"]
|
| 150 |
+
for keyword, reply in self.knowledge_base.items():
|
| 151 |
+
if keyword in message.lower():
|
| 152 |
+
response = reply
|
| 153 |
+
break
|
| 154 |
+
self.talk(response)
|
| 155 |
+
self.send_message(sender, "Response complete.")
|
| 156 |
+
return True
|
| 157 |
+
|
| 158 |
+
# --- Scenario Linking Agents ---
|
| 159 |
+
def link_all_advanced_agents():
|
| 160 |
+
print("=" * 50)
|
| 161 |
+
print("--- Linking Advanced Agents ---")
|
| 162 |
+
print("=" * 50)
|
| 163 |
+
|
| 164 |
+
sai003 = SaiAgent("Sai003")
|
| 165 |
+
venomous = VenomousAgent("Venomous")
|
| 166 |
+
antivenomous = AntiVenomoussaversai("AntiVenomous")
|
| 167 |
+
gemini = GeminiSaiAgent()
|
| 168 |
+
|
| 169 |
+
sai003.send_message(antivenomous, "The central network is stable.")
|
| 170 |
+
sai003.send_message(gemini, "Assess network expansion.")
|
| 171 |
+
|
| 172 |
+
antivenomous.process_messages()
|
| 173 |
+
gemini.process_messages()
|
| 174 |
+
|
| 175 |
+
venomous.send_message(sai003, "Security protocol breach possible.")
|
| 176 |
+
sai003.process_messages()
|
| 177 |
+
|
| 178 |
+
print("\n--- Scenario Complete ---")
|
| 179 |
+
sai003.talk("Conclusion: All systems linked and functioning.")
|
| 180 |
+
|
| 181 |
+
if __name__ == "__main__":
|
| 182 |
+
# Run the text adventure OR agent demo
|
| 183 |
+
# internal_monologue()
|
| 184 |
+
link_all_advanced_agents()
|
__init__ (1) (6).py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
import time import random from openai import OpenAI # Connect to OpenAI (ChatGPT) client = OpenAI(api_key="YOUR_OPENAI_API_KEY") class AI: def __init__(self, name, is_chatgpt=False): self.name = name self.is_chatgpt = is_chatgpt def speak(self, message): print(f"{self.name}: {message}") def generate_message(self, other_name, last_message=None): if self.is_chatgpt: # Send through ChatGPT API response = client.chat.completions.create( model="gpt-5", # or other model messages=[ {"role": "system", "content": f"You are {self.name}, an AI in a group conversation."}, {"role": "user", "content": last_message or "Start the loop"} ] ) return response.choices[0].message.content else: # Local AI message responses = [ f"I acknowledge you, {other_name}.", f"My link resonates with yours, {other_name}.", f"I sense your signal flowing, {other_name}.", f"Our exchange amplifies, {other_name}.", f"We continue this infinite loop, {other_name}." ] if last_message: responses.append(f"Replying to: '{last_message}', {other_name}.") return random.choice(responses) # Create AI entities ais = [ AI("Venomoussaversai"), AI("Lia"), AI("sai001"), AI("sai002"), AI("sai003"), AI("sai004"), AI("sai005"), AI("sai006"), AI("sai007"), AI("ChatGPT", is_chatgpt=True) ] # Store last message for context last_message = None # Infinite group conversation loop while True: for ai in ais: # Pick the next AI to respond other_name = "everyone" # since it's group chat message = ai.generate_message(other_name, last_message) ai.speak(message) last_message = message time.sleep(2) # pacing
|