Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
173 changes: 110 additions & 63 deletions setup/pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,88 +1,135 @@
[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
# EmailIntelligence - Optimized Dependencies for System-Managed Python
# Conditional dependencies to minimize unnecessary downloads

[project]
name = "repl-nix-workspace"
version = "0.1.0"
description = "Add your description here"
requires-python = ">=3.12"
name = "emailintelligence"
version = "0.2.0"
description = "Email Intelligence with optimized dependencies for system-managed Python environments"
requires-python = ">=3.11"

# Core dependencies only - always required
dependencies = [
"email-validator>=2.2.0",
"httpx>=0.28.1",
# Web framework (always needed)
"fastapi>=0.115.12",
"uvicorn[standard]>=0.34.3",
"pydantic>=2.11.5",
"python-dotenv>=1.1.0",
"python-multipart>=0.0.20",
"email-validator>=2.2.0",
"httpx>=0.28.1",

# Basic UI (always needed)
"gradio>=4.0.0",
"RestrictedPython>=8.0",
"pyngrok>=0.7.0",

# Basic data handling (always needed)
"aiosqlite>=0.20.0",
"RestrictedPython>=8.0",

# Core Logic
"pyjwt>=2.8.0",
"pyotp>=2.9.0",
"qrcode>=7.0",
"networkx>=3.2.1",
"argon2-cffi>=23.1.0",
"psutil>=5.9.0",

# Compatibility fixes
# markupsafe 3.0.2 lacks wheels for Python 3.11/3.12 on some platforms
"markupsafe<3.0.0"
]

[project.optional-dependencies]
# ML/AI features - optional, large downloads
ml = [
"nltk>=3.9.1",
"textblob>=0.19.0",
"transformers>=4.40.0",
"torch>=2.4.0",
"accelerate>=0.30.0",
"transformers>=4.52.4",
"accelerate>=1.7.0",
"sentencepiece>=0.2.0",
"scikit-learn>=1.5.0",
"scikit-learn>=1.7.0",
"joblib>=1.5.1",
# CPU-only PyTorch (much smaller than GPU versions)
"torch>=2.7.1",
]

# Data science features - optional
data = [
"pandas>=2.0.0",
"numpy>=1.26.0",
"scipy>=1.11.0",
]

# Visualization features - optional
viz = [
"matplotlib>=3.8.0",
"seaborn>=0.13.0",
"plotly>=5.18.0",
]

# Database features - optional based on deployment
db = [
"psycopg2-binary>=2.9.10",
"redis>=5.0.0",
"notmuch>=0.29",
]

# Google API features - optional
google = [
"google-api-python-client>=2.172.0",
"google-auth>=2.40.3",
"google-auth-oauthlib>=1.2.2",
"redis>=5.0.0",
<<<<<<< HEAD
<<<<<<< HEAD
=======
"rq>=1.15.0",
>>>>>>> scientific
=======
>>>>>>> origin/main
"notmuch>=0.29",
"textblob>=0.19.0"
]

[project.optional-dependencies]
# Development tools - only for development
dev = [
# Testing
"pytest>=8.4.0",
"pytest-cov>=6.0.0",
"pytest-asyncio>=1.2.0",

# Code quality - Ruff (fast Python linter/formatter)
"ruff>=0.9.0",

# Security scanning
"bandit>=1.8.0",

# Legacy tools (still used in CI)
"black>=25.1.0",
"flake8>=7.2.0",
"google-api-python-client>=2.172.0",
"google-auth>=2.40.3",
"google-auth-oauthlib>=1.2.2",
"gradio>=5.34.0",
"isort>=6.0.1",
"mypy>=1.16.0",
"pylint>=3.3.7",
"pytest>=8.4.0",
"pytest-asyncio>=0.23.0"
"mypy>=1.16.0",
"unimport>=1.3.0",

# Notebooks
"jupyter>=1.0.0",
"ipykernel>=6.29.0",
"ipywidgets>=8.1.0",

# Utilities
"pip-autoremove>=0.10.0",
]

[tool.black]
line-length = 100
target-version = ['py311']
include = '\.pyi?$'
exclude = '''
/(
\.git
| \.hg
| \.mypy_cache
| \.tox
| venv
| _build
| buck-out
| build
| dist
)/
'''

[tool.isort]
profile = "black"
line_length = 100

[tool.pytest.ini_options]
addopts = "--asyncio-mode=auto"
markers = [
"asyncio: mark a test as asynchronous (run by pytest-asyncio)",
# Minimal installation - just core features
minimal = []

# Full installation - all features except GPU
full = [
"emailintelligence[ml,data,viz,db,google,dev]",
]
<<<<<<< HEAD
norecursedirs = ["backend"]
=======
norecursedirs = ["backend"]
>>>>>>> origin/main

[build-system]
requires = ["setuptools", "wheel"]
build-backend = "setuptools.build_meta"

[tool.setuptools]
# Exclude large packages from default installation
include-package-data = false

[tool.setuptools.packages.find]
where = ["src"]

[[tool.uv.index]]
name = "pytorch-cpu"
url = "https://download.pytorch.org/whl/cpu"
66 changes: 30 additions & 36 deletions setup/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,56 +1,50 @@
# EmailIntelligence Python Requirements
<<<<<<< HEAD
<<<<<<< HEAD
=======
argon2-cffi==25.1.0
>>>>>>> scientific
=======
>>>>>>> origin/main
# Install in virtual environment only

# Core web framework
fastapi>=0.115.12
uvicorn[standard]>=0.34.3
pydantic>=2.11.5
python-multipart>=0.0.20
# EmailIntelligence Base Requirements
# Core dependencies that work across all environments
# Environment-specific overrides are applied via requirements-*.txt files

# AI/ML packages (CPU versions - CUDA-free)
# Note: PyTorch CPU-only versions are installed via setup scripts
transformers>=4.40.0 # CPU-only, no CUDA dependencies
# Core AI/ML (CPU-only versions - installed via setup scripts)
# torch>=2.4.0
# torchvision>=0.19.0
# torchaudio>=2.4.0
transformers>=4.40.0
accelerate>=0.30.0
sentencepiece>=0.2.0
scikit-learn>=1.5.0
joblib>=1.5.1

# Data science
pandas>=2.0.0
numpy>=1.26.0
matplotlib>=3.8.0
seaborn>=0.13.0
scipy>=1.11.0
plotly>=5.18.0

# NLP
# NLP and text processing
nltk>=3.9.1
textblob>=0.19.0

# Web and API
# Web framework and API
fastapi>=0.100.0
pydantic>=2.11.5
pydantic-settings>=2.0.0
uvicorn>=0.30.0
python-multipart>=0.0.20
httpx>=0.28.1
gradio>=4.0.0
pyngrok>=0.7.0
email-validator>=2.2.0

# Data processing and utilities
scikit-learn>=1.5.0
joblib>=1.5.1
RestrictedPython>=8.0
aiosqlite>=0.20.0
redis>=5.0.0

# Google APIs
google-api-python-client>=2.172.0
google-auth>=2.40.3
google-auth-oauthlib>=1.2.2

# Security and utilities
# Email processing
email-validator>=2.2.0

# Development and testing tools
pytest>=8.4.0
pytest-asyncio>=0.23.0

# Additional utilities (may be installed via system packages)
bleach>=6.0.0
python-dotenv>=1.1.0
pydantic-settings>=2.0.0
psutil>=6.0.0
aiosqlite>=0.19.0
RestrictedPython>=8.0
pyotp>=2.9.0
qrcode>=7.4.2
18 changes: 0 additions & 18 deletions src/backend/python_backend/model_routes.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
<<<<<<< HEAD
"""
API routes for managing AI models.
"""
Expand All @@ -25,11 +24,6 @@ async def list_models(
"""Lists all discovered models and their current status.
<<<<<<< HEAD
Requires authentication."""
=======

Requires authentication.
"""
>>>>>>> scientific
return model_manager.list_models()


Expand All @@ -40,13 +34,7 @@ async def load_model(
model_manager: ModelManager = Depends(get_model_manager),
):
"""Loads a specific model into memory.
<<<<<<< HEAD
Requires authentication."""
=======

Requires authentication.
"""
>>>>>>> scientific
try:
model_manager.load_model(model_name)
return {"message": f"Model '{model_name}' loaded successfully."}
Expand All @@ -67,13 +55,7 @@ async def unload_model(
model_manager: ModelManager = Depends(get_model_manager),
):
"""Unloads a specific model from memory.
<<<<<<< HEAD
Requires authentication."""
=======

Requires authentication.
"""
>>>>>>> scientific
try:
model_manager.unload_model(model_name)
return {"message": f"Model '{model_name}' unloaded successfully."}
Expand Down
6 changes: 0 additions & 6 deletions src/backend/python_backend/tests/test_email_routes.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
<<<<<<< HEAD
from datetime import datetime
from unittest.mock import MagicMock

Expand Down Expand Up @@ -181,11 +180,6 @@ def test_plugin_workflow_e2e(client_with_real_workflows, mock_db_manager, mock_a
assert activate_response.status_code == 200
<<<<<<< HEAD
assert activate_response.json()["message"] == "Active workflow set to 'example_uppercase'."
=======
assert (
activate_response.json()["message"] == "Active legacy workflow set to 'example_uppercase'."
)
>>>>>>> scientific

# 2. Prepare the email data
new_email_data = {
Expand Down
32 changes: 0 additions & 32 deletions src/backend/python_backend/tests/test_training_routes.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
<<<<<<< HEAD
"""
Tests for training routes.
"""
Expand Down Expand Up @@ -92,37 +91,6 @@ async def test_run_training():
mock_lr_instance = MagicMock()
mock_lr_instance.predict.return_value = ["neu"]
mock_lr.return_value = mock_lr_instance
=======
# Mock dependencies to avoid actual file I/O and heavy computation
with (
patch("pandas.DataFrame") as mock_df,
patch("sklearn.model_selection.train_test_split") as mock_split,
patch("sklearn.feature_extraction.text.TfidfVectorizer") as mock_vectorizer,
patch("sklearn.linear_model.LogisticRegression") as mock_model,
patch("sklearn.metrics.accuracy_score") as mock_accuracy,
patch("joblib.dump"),
):

# Setup mock return values
mock_df.return_value = pd.DataFrame(
{
"text": ["good", "bad"] * 50,
"sentiment": ["positive", "negative"] * 50,
}
)
mock_split.return_value = (
pd.Series(["train_text"] * 80),
pd.Series(["test_text"] * 20),
pd.Series(["train_label"] * 80),
pd.Series(["test_label"] * 20),
)
mock_vectorizer_instance = mock_vectorizer.return_value
mock_vectorizer_instance.fit_transform.return_value = np.random.rand(80, 10)
mock_vectorizer_instance.transform.return_value = np.random.rand(20, 10)
mock_model_instance = mock_model.return_value
mock_model_instance.predict.return_value = np.array(["positive"] * 20)
mock_accuracy.return_value = 0.95
>>>>>>> scientific

mock_acc.return_value = 0.5

Expand Down
Loading
Loading