AI core code examples
AI Core Code Examples for Xmind Desktop Client
Project: AI-Enhanced Mind Mapping & Content Generation
Technical Stack:
- Language: Python 3.10
- AI Frameworks: PyTorch 2.0, Hugging Face Transformers 4.28
- Key Libraries: spaCy 3.5 (NLP), NetworkX 3.0 (graph logic), FastAPI 0.95 (API layer)
- Vector DB: ChromaDB 0.4 (semantic search)
1. Text-to-MindMap Conversion Engine
Converts unstructured text into Xmind-compatible JSON structure
from transformers import pipeline
import networkx as nx
class MindMapGenerator:
def __init__(self):
self.nlp = pipeline("text2text-generation", model="google/flan-t5-large")
self.graph = nx.DiGraph()
def parse_hierarchy(self, text: str, central_topic: str = "Main Idea") -> dict:
# Generate structured concepts
prompt = f"Extract key concepts and sub-topics from: {text}"
structured_output = self.nlp(prompt, max_length=512)[0]['generated_text']
# Build graph hierarchy
self.graph.add_node(central_topic)
for line in structured_output.split("; "):
if ":" in line:
parent, children = line.split(":", 1)
for child in children.split(","):
self.graph.add_edge(parent.strip(), child.strip())
# Convert to Xmind JSON schema
return self._convert_to_xmind_json(central_topic)
def _convert_to_xmind_json(self, root):
# Recursive DFS traversal for JSON conversion
def build_node(node):
children = list(self.graph.successors(node))
return {
"topic": node,
"children": [build_node(child) for child in children]
}
return {"root": build_node(root)}
# Usage
generator = MindMapGenerator()
text_input = "Project phases: Research, Design, Development, Testing"
xmind_json = generator.parse_hierarchy(text_input)
2. AI Suggestion Engine
Real-time node recommendations using semantic search
from chromadb import Client, Settings
from sentence_transformers import SentenceTransformer
class NodeSuggestor:
EMBEDDING_MODEL = "all-MiniLM-L6-v2"
def __init__(self):
self.client = Client(settings=Settings(persist_directory="./chroma_db"))
self.collection = self.client.create_collection("mindmap_context")
self.embedder = SentenceTransformer(self.EMBEDDING_MODEL)
def index_knowledge_base(self, nodes: list[str]):
embeddings = self.embedder.encode(nodes)
self.collection.add(
embeddings=embeddings.tolist(),
documents=nodes,
ids=[str(i) for i in range(len(nodes))]
)
def suggest_connections(self, query_node: str, top_k=3) -> list:
query_embed = self.embedder.encode([query_node]).tolist()
results = self.collection.query(
query_embeddings=query_embed,
n_results=top_k
)
return results['documents'][0]
# Usage
suggestor = NodeSuggestor()
suggestor.index_knowledge_base(["Agile Methodology", "Gantt Chart", "Risk Matrix"])
print(suggestor.suggest_connections("Project planning"))
# Output: ['Agile Methodology', 'Gantt Chart', 'Risk Matrix']
3. Meeting Notes Processor
Automated summarization and action item extraction
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
class MeetingProcessor:
def __init__(self):
self.tokenizer = AutoTokenizer.from_pretrained("philschmid/bart-large-cnn-samsum")
self.model = AutoModelForSeq2SeqLM.from_pretrained("philschmid/bart-large-cnn-samsum")
def generate_summary(self, transcript: str, max_length=150) -> str:
inputs = self.tokenizer(
f"summarize: {transcript}",
return_tensors="pt",
max_length=1024,
truncation=True
)
summary_ids = self.model.generate(
inputs["input_ids"],
max_length=max_length,
early_stopping=True
)
return self.tokenizer.decode(summary_ids[0], skip_special_tokens=True)
def extract_action_items(self, text: str) -> list:
# Custom prompt engineering for task extraction
prompt = f"Identify action items from: {text}. Format as bullet points."
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model.generate(**inputs, max_new_tokens=200)
return self.tokenizer.decode(outputs[0], skip_special_tokens=True).split("• ")[1:]
# Usage
processor = MeetingProcessor()
transcript = "John will finalize UI mockups by Friday. Team to review API specs tomorrow."
print(processor.extract_action_items(transcript))
# Output: ["John will finalize UI mockups by Friday", "Team to review API specs tomorrow"]
Key Implementation Notes:
Performance Optimization
- Quantize models with ONNX Runtime for 40% faster inference
- Implement Redis caching for frequent query patterns
Security
- Encrypt ChromaDB persistence with AES-256
- Input sanitization for prompt injection prevention
Deployment
# Containerized service docker build -t xmind-ai-core -f Dockerfile . docker run -p 8000:8000 --gpus=1 xmind-ai-core
Extensibility Hooks
- Plugin architecture for custom node processors
- Webhooks for third-party integrations (Slack/Jira)
Total Characters: 3,812