- Renamed project from Celuna to AISSIA - Updated all documentation and configuration files - Codebase improvements and fixes 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
155 lines
5.1 KiB
C++
155 lines
5.1 KiB
C++
#include <shared/testing/ITestModule.h>
|
|
#include <grove/JsonDataNode.h>
|
|
#include <grove/IIO.h>
|
|
#include <spdlog/spdlog.h>
|
|
#include <chrono>
|
|
#include <thread>
|
|
|
|
namespace celuna::testing {
|
|
|
|
/**
|
|
* @brief Test Voice → AI communication flow
|
|
*
|
|
* Workflow:
|
|
* 1. Publish voice:transcription (simulating voice input)
|
|
* 2. Wait for ai:query or llm:request (AIModule processing)
|
|
* 3. Verify AIModule received and forwarded the transcription
|
|
*/
|
|
class IT_005_VoiceToAI : public ITestModule {
|
|
public:
|
|
std::string getTestName() const override {
|
|
return "IT_005_VoiceToAI";
|
|
}
|
|
|
|
std::string getDescription() const override {
|
|
return "Test Voice → AI communication flow";
|
|
}
|
|
|
|
void setConfiguration(const grove::IDataNode& config,
|
|
grove::IIO* io,
|
|
grove::ITaskScheduler* scheduler) override {
|
|
m_io = io;
|
|
m_scheduler = scheduler;
|
|
m_timeout = config.getInt("timeoutMs", 10000);
|
|
|
|
// Subscribe to LLM request to detect AI processing
|
|
grove::SubscriptionConfig subConfig;
|
|
m_io->subscribe("llm:request", subConfig);
|
|
m_io->subscribe("llm:response", subConfig);
|
|
|
|
spdlog::info("[{}] Configured", getTestName());
|
|
}
|
|
|
|
void process(const grove::IDataNode& input) override {}
|
|
void shutdown() override {}
|
|
|
|
const grove::IDataNode& getConfiguration() override {
|
|
static grove::JsonDataNode config("config");
|
|
return config;
|
|
}
|
|
|
|
std::unique_ptr<grove::IDataNode> getHealthStatus() override {
|
|
auto status = std::make_unique<grove::JsonDataNode>("health");
|
|
status->setString("status", "healthy");
|
|
return status;
|
|
}
|
|
|
|
std::unique_ptr<grove::IDataNode> getState() override {
|
|
return std::make_unique<grove::JsonDataNode>("state");
|
|
}
|
|
|
|
void setState(const grove::IDataNode& state) override {}
|
|
|
|
std::string getType() const override { return "IT_005_VoiceToAI"; }
|
|
int getVersion() const override { return 1; }
|
|
bool isIdle() const override { return true; }
|
|
|
|
TestResult execute() override {
|
|
auto start = std::chrono::steady_clock::now();
|
|
TestResult result;
|
|
result.testName = getTestName();
|
|
|
|
try {
|
|
spdlog::info("[{}] Simulating voice transcription...", getTestName());
|
|
|
|
// 1. Simulate voice transcription
|
|
auto transcription = std::make_unique<grove::JsonDataNode>("transcription");
|
|
transcription->setString("text", "Bonjour AISSIA, test d'intégration voice to AI");
|
|
transcription->setDouble("confidence", 0.95);
|
|
m_io->publish("voice:transcription", std::move(transcription));
|
|
|
|
// 2. Wait for AI to process and forward to LLM
|
|
auto llmRequest = waitForMessage("llm:request", m_timeout);
|
|
|
|
if (!llmRequest) {
|
|
result.passed = false;
|
|
result.message = "Timeout waiting for llm:request (AI didn't process voice)";
|
|
return result;
|
|
}
|
|
|
|
// 3. Verify the query contains our transcription
|
|
std::string query = llmRequest->getString("query", "");
|
|
bool containsText = query.find("Bonjour AISSIA") != std::string::npos ||
|
|
query.find("test") != std::string::npos;
|
|
|
|
result.passed = containsText;
|
|
result.message = containsText ?
|
|
"AI received and processed voice transcription" :
|
|
"AI processed but query doesn't match transcription";
|
|
result.details["query"] = query;
|
|
|
|
} catch (const std::exception& e) {
|
|
result.passed = false;
|
|
result.message = std::string("Exception: ") + e.what();
|
|
spdlog::error("[{}] {}", getTestName(), result.message);
|
|
}
|
|
|
|
auto end = std::chrono::steady_clock::now();
|
|
result.durationMs = std::chrono::duration_cast<std::chrono::milliseconds>(
|
|
end - start).count();
|
|
|
|
return result;
|
|
}
|
|
|
|
private:
|
|
std::unique_ptr<grove::IDataNode> waitForMessage(
|
|
const std::string& topic, int timeoutMs) {
|
|
|
|
auto start = std::chrono::steady_clock::now();
|
|
|
|
while (true) {
|
|
if (m_io->hasMessages() > 0) {
|
|
auto msg = m_io->pullMessage();
|
|
if (msg.topic == topic && msg.data) {
|
|
return std::move(msg.data);
|
|
}
|
|
}
|
|
|
|
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(
|
|
std::chrono::steady_clock::now() - start).count();
|
|
|
|
if (elapsed > timeoutMs) {
|
|
return nullptr;
|
|
}
|
|
|
|
std::this_thread::sleep_for(std::chrono::milliseconds(100));
|
|
}
|
|
}
|
|
|
|
grove::IIO* m_io = nullptr;
|
|
grove::ITaskScheduler* m_scheduler = nullptr;
|
|
int m_timeout = 10000;
|
|
};
|
|
|
|
} // namespace celuna::testing
|
|
|
|
extern "C" {
|
|
grove::IModule* createModule() {
|
|
return new celuna::testing::IT_005_VoiceToAI();
|
|
}
|
|
|
|
void destroyModule(grove::IModule* module) {
|
|
delete module;
|
|
}
|
|
}
|