[251016] feat: WebSpeech API

🕐 커밋 시간: 2025. 10. 16. 17:04:05

📊 변경 통계:
  • 총 파일: 13개
  • 추가: +558줄
  • 삭제: -91줄

📁 추가된 파일:
  + com.twin.app.shoptime/src/actions/webSpeechActions.js
  + com.twin.app.shoptime/src/hooks/useWebSpeech.js
  + com.twin.app.shoptime/src/services/webSpeech/WebSpeechService.js
  + com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.jsx
  + com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.module.less

📝 수정된 파일:
  ~ com.twin.app.shoptime/src/actions/actionTypes.js
  ~ com.twin.app.shoptime/src/actions/voiceActions.js
  ~ com.twin.app.shoptime/src/reducers/voiceReducer.js
  ~ com.twin.app.shoptime/src/views/SearchPanel/SearchPanel.new.jsx
  ~ com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/VoiceInputOverlay.jsx
  ~ com.twin.app.shoptime/src/views/VoicePanel/VoicePanel.jsx
  ~ com.twin.app.shoptime/src/views/VoicePanel/VoicePanel.module.less
  ~ com.twin.app.shoptime/web-speech.md

🔧 함수 변경 내용:
  📄 com.twin.app.shoptime/src/actions/voiceActions.js (javascript):
    🔄 Modified: addLog(), handleScrollIntent()
  📄 com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/VoiceInputOverlay.jsx (javascript):
     Added: Spottable()
  📄 com.twin.app.shoptime/web-speech.md (md파일):
     Added: Framework()
    🔄 Modified: onresult()
     Deleted: Framework()
  📄 com.twin.app.shoptime/src/services/webSpeech/WebSpeechService.js (javascript):
     Added: WebSpeechService()
  📄 com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.jsx (javascript):
     Added: handleTalkAgainClick()
  📄 com.twin.app.shoptime/src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.module.less (unknown):
     Added: translateY()

🔧 주요 변경 내용:
  • 타입 시스템 안정성 강화
  • 핵심 비즈니스 로직 개선
  • 개발 문서 및 가이드 개선
  • API 서비스 레이어 개선
This commit is contained in:
2025-10-16 17:04:08 +09:00
parent 297ca5791f
commit d90385ec7d
13 changed files with 1200 additions and 90 deletions

View File

@@ -294,4 +294,12 @@ export const types = {
VOICE_ADD_LOG: 'VOICE_ADD_LOG',
VOICE_CLEAR_LOGS: 'VOICE_CLEAR_LOGS',
VOICE_STT_TEXT_RECEIVED: 'VOICE_STT_TEXT_RECEIVED', // STT 텍스트 수신
// 🔽 Web Speech API 관련
WEB_SPEECH_INITIALIZED: 'WEB_SPEECH_INITIALIZED',
WEB_SPEECH_START: 'WEB_SPEECH_START',
WEB_SPEECH_INTERIM_RESULT: 'WEB_SPEECH_INTERIM_RESULT',
WEB_SPEECH_END: 'WEB_SPEECH_END',
WEB_SPEECH_ERROR: 'WEB_SPEECH_ERROR',
WEB_SPEECH_CLEANUP: 'WEB_SPEECH_CLEANUP',
};

View File

@@ -65,14 +65,45 @@ export const registerVoiceFramework = () => (dispatch, getState) => {
voiceHandler = lunaSend.registerVoiceConductor({
onSuccess: (res) => {
console.log('[Voice] Response from voice framework:', res);
console.log('[Voice] Response from voice framework:', res);
console.log('[Voice] Response details:', {
subscribed: res.subscribed,
returnValue: res.returnValue,
command: res.command,
voiceTicket: res.voiceTicket,
action: res.action,
fullResponse: JSON.stringify(res),
});
// Log all responses
dispatch(addLog('RESPONSE', 'Voice Framework Response', res, true));
// ⭐ VoicePanel UI에 표시되는 로그 추가
dispatch(
addLog(
'RESPONSE',
'[VoiceConductor] Response Received',
{
...res,
_diagnostics: {
subscribed: res.subscribed,
hasCommand: !!res.command,
hasVoiceTicket: !!res.voiceTicket,
hasAction: !!res.action,
timestamp: new Date().toISOString(),
},
},
true
)
);
// Initial registration response
if (res.subscribed && res.returnValue && !res.command) {
console.log('[Voice] Registration successful');
dispatch(
addLog('ACTION', '[Voice] ✅ Registration Successful', {
message: 'Successfully registered with voice framework',
subscribed: true,
waiting: 'Waiting for setContext command from voice framework...',
})
);
dispatch({
type: types.VOICE_REGISTER_SUCCESS,
payload: { handler: voiceHandler },
@@ -83,9 +114,11 @@ export const registerVoiceFramework = () => (dispatch, getState) => {
if (res.command === 'setContext' && res.voiceTicket) {
console.log('[Voice] setContext command received, ticket:', res.voiceTicket);
dispatch(
addLog('COMMAND', 'setContext Command Received', {
addLog('COMMAND', '[VoiceConductor] setContext Command Received', {
command: res.command,
voiceTicket: res.voiceTicket,
message: 'Voice framework is requesting supported intents',
nextAction: 'Sending inAppIntents to voice framework...',
})
);
dispatch({
@@ -103,12 +136,13 @@ export const registerVoiceFramework = () => (dispatch, getState) => {
// ⭐ 중요: performAction 수신 성공 로그 (명확하게)
dispatch(
addLog('COMMAND', '✅ performAction RECEIVED!', {
addLog('COMMAND', '[VoiceConductor] ✅✅✅ performAction RECEIVED!', {
message: '✅ SUCCESS! Voice framework sent performAction event.',
command: res.command,
action: res.action,
intent: res.action?.intent,
value: res.action?.value || res.action?.itemId,
timestamp: new Date().toISOString(),
})
);
@@ -128,7 +162,22 @@ export const registerVoiceFramework = () => (dispatch, getState) => {
onFailure: (err) => {
console.error('[Voice] Registration failed:', err);
dispatch(addLog('ERROR', 'Registration Failed', err, false));
dispatch(
addLog(
'ERROR',
'[VoiceConductor] ❌ Registration Failed',
{
...err,
message: 'Failed to register with voice framework',
possibleReasons: [
'Voice framework service might not be available',
'Permission denied for voice framework access',
'webOS version might not support voice framework',
],
},
false
)
);
dispatch({
type: types.VOICE_REGISTER_FAILURE,
payload: err,
@@ -152,6 +201,27 @@ export const sendVoiceIntents = (voiceTicket) => (dispatch, getState) => {
// Define the intents that this app supports
// This is a sample configuration - customize based on your app's features
// ⭐ 디버깅 팁: UseIME이 안되면 먼저 Select/Scroll 테스트
console.log('[Voice] ⚠️ DEBUGGING TIP:');
console.log(' 1. UseIME might not be supported on all webOS versions');
console.log(' 2. Try saying "Search" or "Home" to test Select intent first');
console.log(' 3. If Select works but UseIME does not, UseIME is not supported');
console.log(' 4. Check webOS system logs: journalctl -u voiceconductor');
// VoicePanel UI에도 표시
dispatch(
addLog('ACTION', '[Voice] 🔧 Preparing Intents', {
message: 'About to send supported intents to voice framework',
debuggingTips: [
'UseIME might not be supported on all webOS versions',
'Try saying "Search" or "Home" to test Select intent first',
'If Select works but UseIME does not, UseIME is not supported',
'Check webOS system logs: ssh to TV, then: journalctl -u voiceconductor -f',
],
})
);
const inAppIntents = [
// UseIME Intent - STT 텍스트 받기
{
@@ -220,7 +290,18 @@ export const sendVoiceIntents = (voiceTicket) => (dispatch, getState) => {
onSuccess: (res) => {
console.log('[Voice] Voice context set successfully:', res);
// Log successful context setting
dispatch(addLog('RESPONSE', 'Set Voice Context Success', res, true));
dispatch(
addLog(
'RESPONSE',
'[Voice] ✅ Set Voice Context Success',
{
...res,
message: 'Successfully sent intents to voice framework',
intentsCount: inAppIntents.length,
},
true
)
);
dispatch({
type: types.VOICE_SET_CONTEXT_SUCCESS,
payload: res,
@@ -228,10 +309,21 @@ export const sendVoiceIntents = (voiceTicket) => (dispatch, getState) => {
// ⭐ 중요: Voice input 대기 상태 로그
dispatch(
addLog('ACTION', '🎤 Ready for Voice Input', {
addLog('ACTION', '[Voice] 🎤 Ready for Voice Input', {
message: 'Context set successfully. Press the MIC button on remote and speak.',
nextStep: 'Waiting for performAction event...',
voiceTicket: voiceTicket,
testInstructions: {
useIME: 'Say anything for STT (e.g., "iPhone 15 Pro")',
select: 'Say "Search", "Home", "Cart", or "My Page"',
scroll: 'Say "Scroll Up" or "Scroll Down"',
},
debuggingTips: [
'If no performAction arrives, try Select intent first (say "Search")',
'UseIME might not be supported on this webOS version',
'Check if VoicePanel stays on top (isOnTop must be true)',
'Check system logs: ssh root@TV_IP, then: journalctl -u voiceconductor -f',
],
})
);
@@ -241,26 +333,82 @@ export const sendVoiceIntents = (voiceTicket) => (dispatch, getState) => {
// lastSTTText가 업데이트되지 않았으면 (performAction이 안 왔으면)
if (!currentState.lastSTTText || currentState.sttTimestamp < Date.now() - 14000) {
dispatch(
addLog('ERROR', '⚠️ No performAction received yet', {
addLog('ERROR', '[Voice] ⚠️ No performAction received yet', {
message: 'performAction event was not received within 15 seconds after setContext.',
possibleReasons: [
'1. Did you press the MIC button on the remote control?',
'2. Did you speak after pressing the MIC button?',
'3. UseIME intent might not be supported on this webOS version',
'4. Voice framework might not be routing events correctly',
'5. Subscription might have been cancelled or disconnected',
],
diagnostics: {
subscriptionActive: currentState.isRegistered,
voiceTicket: currentState.voiceTicket,
handler: currentState.voiceHandler ? 'exists' : 'null',
},
suggestion:
'Try pressing the remote MIC button and speaking clearly. Check VoicePanel logs for performAction event.',
})
);
}
}, 15000);
// ⭐ Subscription 헬스체크: 3초마다 로그 출력 (총 30초)
let healthCheckCount = 0;
const healthCheckInterval = setInterval(() => {
healthCheckCount++;
const currentState = getState().voice;
console.log(`[Voice] 🏥 Subscription Health Check #${healthCheckCount}:`, {
isRegistered: currentState.isRegistered,
hasVoiceTicket: !!currentState.voiceTicket,
voiceTicket: currentState.voiceTicket,
hasHandler: !!currentState.voiceHandler,
receivedSTT: !!currentState.lastSTTText,
timestamp: new Date().toISOString(),
});
dispatch(
addLog('ACTION', `[Voice] 🏥 Health Check #${healthCheckCount}/10`, {
isRegistered: currentState.isRegistered,
hasVoiceTicket: !!currentState.voiceTicket,
voiceTicket: currentState.voiceTicket,
hasHandler: !!currentState.voiceHandler,
receivedSTT: !!currentState.lastSTTText,
message: `Subscription health check ${healthCheckCount}/10 - Still waiting for voice input...`,
status: currentState.isRegistered ? '✅ Active' : '❌ Disconnected',
})
);
// 10번 체크하면 중단 (30초)
if (healthCheckCount >= 10 || currentState.lastSTTText) {
clearInterval(healthCheckInterval);
console.log('[Voice] Health check completed or STT received');
}
}, 3000);
},
onFailure: (err) => {
console.error('[Voice] Failed to set voice context:', err);
// Log failed context setting
dispatch(addLog('ERROR', 'Set Voice Context Failed', err, false));
dispatch(
addLog(
'ERROR',
'[Voice] ❌ Set Voice Context Failed',
{
...err,
message: 'Failed to send intents to voice framework',
voiceTicket: voiceTicket,
possibleReasons: [
'Invalid voiceTicket',
'Voice framework service disconnected',
'Malformed intent data',
],
},
false
)
);
dispatch({
type: types.VOICE_SET_CONTEXT_FAILURE,
payload: err,
@@ -282,10 +430,11 @@ export const handleVoiceAction = (voiceTicket, action) => (dispatch, getState) =
// Log that we're processing the action
dispatch(
addLog('ACTION', 'Processing Voice Action', {
addLog('ACTION', '[Voice] 🔄 Processing Voice Action', {
intent: action.intent,
itemId: action.itemId,
fullAction: action,
message: `Processing ${action.intent} intent action`,
})
);
@@ -299,12 +448,13 @@ export const handleVoiceAction = (voiceTicket, action) => (dispatch, getState) =
// 📝 로그: STT 텍스트 추출 과정
dispatch(
addLog('ACTION', '🎤 STT Text Extracted (Speech → Text)', {
addLog('ACTION', '[Voice] 🎤 STT Text Extracted (Speech → Text)', {
intent: 'UseIME',
extractedText: action.value,
textLength: action.value.length,
timestamp: new Date().toISOString(),
description: 'User speech has been converted to text successfully',
message: `Extracted text: "${action.value}"`,
})
);
@@ -316,10 +466,11 @@ export const handleVoiceAction = (voiceTicket, action) => (dispatch, getState) =
// 📝 로그: Redux 저장 완료
dispatch(
addLog('ACTION', '✅ STT Text Saved to Redux', {
addLog('ACTION', '[Voice] ✅ STT Text Saved to Redux', {
savedText: action.value,
reduxAction: 'VOICE_STT_TEXT_RECEIVED',
state: 'lastSTTText updated',
message: 'STT text has been saved to Redux state and is now available',
})
);
@@ -356,10 +507,12 @@ export const handleVoiceAction = (voiceTicket, action) => (dispatch, getState) =
// Log the processing result before reporting
dispatch(
addLog('ACTION', 'Action Processing Complete', {
addLog('ACTION', `[Voice] ✅ Action Processing Complete`, {
result,
feedback,
action: action.itemId,
action: action.itemId || action.intent,
message: result ? 'Action processed successfully' : 'Action processing failed',
status: result ? '✅ Success' : '❌ Failed',
})
);
@@ -435,10 +588,11 @@ export const reportActionResult =
// Log the report request
dispatch(
addLog('ACTION', 'Reporting Action Result', {
addLog('ACTION', '[Voice] 📤 Reporting Action Result', {
voiceTicket,
result,
feedback,
message: 'Sending action result back to voice framework',
})
);
@@ -446,7 +600,18 @@ export const reportActionResult =
onSuccess: (res) => {
console.log('[Voice] Action result reported successfully:', res);
// Log successful report
dispatch(addLog('RESPONSE', 'Report Action Result Success', res, true));
dispatch(
addLog(
'RESPONSE',
'[Voice] ✅ Report Action Result Success',
{
...res,
result: result,
message: 'Successfully reported action result to voice framework',
},
true
)
);
dispatch({
type: types.VOICE_REPORT_RESULT_SUCCESS,
payload: { result, feedback },
@@ -456,7 +621,18 @@ export const reportActionResult =
onFailure: (err) => {
console.error('[Voice] Failed to report action result:', err);
// Log failed report
dispatch(addLog('ERROR', 'Report Action Result Failed', err, false));
dispatch(
addLog(
'ERROR',
'[Voice] ❌ Report Action Result Failed',
{
...err,
message: 'Failed to report action result to voice framework',
voiceTicket: voiceTicket,
},
false
)
);
dispatch({
type: types.VOICE_REPORT_RESULT_FAILURE,
payload: err,
@@ -477,9 +653,23 @@ export const unregisterVoiceFramework = () => (dispatch, getState) => {
const { voiceHandler } = getState().voice;
const isTV = typeof window === 'object' && window.PalmSystem;
if (voiceHandler && isTV) {
console.log('[Voice] Unregistering from voice framework');
dispatch(
addLog('ACTION', '[Voice] 🔌 Unregistering Voice Framework', {
message: 'Cancelling voice framework subscription',
hasHandler: !!voiceHandler,
isTV: isTV,
})
);
if (voiceHandler && isTV) {
lunaSend.cancelVoiceRegistration(voiceHandler);
dispatch(
addLog('ACTION', '[Voice] ✅ Voice Framework Unregistered', {
message: 'Successfully cancelled voice framework subscription',
})
);
}
// Always clear state on unmount, regardless of platform

View File

@@ -0,0 +1,129 @@
// src/actions/webSpeechActions.js
import { types } from './actionTypes';
import webSpeechService from '../services/webSpeech/WebSpeechService';
/**
* Web Speech 초기화 및 시작
* @param {Object} config - 언어 및 설정
*/
export const initializeWebSpeech =
(config = {}) =>
(dispatch) => {
console.log('[WebSpeechActions] Initializing Web Speech...');
// 지원 여부 확인
if (!webSpeechService.isSupported) {
const error = 'Web Speech API is not supported in this browser';
console.error('[WebSpeechActions]', error);
dispatch({
type: types.WEB_SPEECH_ERROR,
payload: { error, message: error },
});
return false;
}
// 초기화
const initialized = webSpeechService.initialize({
lang: config.lang || 'ko-KR',
continuous: config.continuous || false,
interimResults: config.interimResults !== false,
maxAlternatives: config.maxAlternatives || 1,
});
if (!initialized) {
dispatch({
type: types.WEB_SPEECH_ERROR,
payload: { error: 'Failed to initialize', message: 'Failed to initialize Web Speech' },
});
return false;
}
// 이벤트 핸들러 등록
webSpeechService.on('start', () => {
dispatch({
type: types.WEB_SPEECH_START,
});
});
webSpeechService.on('result', (result) => {
console.log('[WebSpeechActions] Result:', result);
// Interim 결과 (중간 결과)
if (!result.isFinal) {
dispatch({
type: types.WEB_SPEECH_INTERIM_RESULT,
payload: result.transcript,
});
}
// Final 결과 (최종 결과)
else {
dispatch({
type: types.VOICE_STT_TEXT_RECEIVED, // 기존 VUI와 동일한 액션 사용
payload: result.transcript,
});
}
});
webSpeechService.on('error', (errorInfo) => {
console.error('[WebSpeechActions] Error:', errorInfo);
dispatch({
type: types.WEB_SPEECH_ERROR,
payload: errorInfo,
});
});
webSpeechService.on('end', () => {
dispatch({
type: types.WEB_SPEECH_END,
});
});
dispatch({
type: types.WEB_SPEECH_INITIALIZED,
});
return true;
};
/**
* 음성 인식 시작
*/
export const startWebSpeech = () => (dispatch) => {
console.log('[WebSpeechActions] Starting recognition...');
const started = webSpeechService.start();
if (!started) {
dispatch({
type: types.WEB_SPEECH_ERROR,
payload: { error: 'Failed to start', message: 'Failed to start recognition' },
});
}
};
/**
* 음성 인식 중지
*/
export const stopWebSpeech = () => (dispatch) => {
console.log('[WebSpeechActions] Stopping recognition...');
webSpeechService.stop();
};
/**
* 음성 인식 중단
*/
export const abortWebSpeech = () => (dispatch) => {
console.log('[WebSpeechActions] Aborting recognition...');
webSpeechService.abort();
};
/**
* 리소스 정리
*/
export const cleanupWebSpeech = () => (dispatch) => {
console.log('[WebSpeechActions] Cleaning up...');
webSpeechService.cleanup();
dispatch({
type: types.WEB_SPEECH_CLEANUP,
});
};

View File

@@ -0,0 +1,76 @@
// src/hooks/useWebSpeech.js
import { useEffect, useCallback } from 'react';
import { useDispatch, useSelector } from 'react-redux';
import {
initializeWebSpeech,
startWebSpeech,
stopWebSpeech,
cleanupWebSpeech,
} from '../actions/webSpeechActions';
/**
* Web Speech API Hook
* - SearchPanel에서 사용하는 음성 입력 Hook
* - VoiceInputOverlay와 통합
*
* @param {boolean} isActive - Hook 활성화 여부 (예: SearchPanel이 foreground인지)
* @param {function} onSTTText - STT 텍스트 수신 콜백
* @param {Object} config - Web Speech 설정
*/
export const useWebSpeech = (isActive, onSTTText, config = {}) => {
const dispatch = useDispatch();
const { lastSTTText, sttTimestamp, webSpeech } = useSelector((state) => state.voice);
// Web Speech 초기화
useEffect(() => {
if (isActive) {
console.log('[useWebSpeech] Initializing Web Speech API');
dispatch(
initializeWebSpeech({
lang: config.lang || 'ko-KR',
continuous: config.continuous || false,
interimResults: config.interimResults !== false,
})
);
}
// Cleanup on unmount only
return () => {
if (isActive) {
console.log('[useWebSpeech] Cleaning up Web Speech API (unmount)');
dispatch(cleanupWebSpeech());
}
};
}, [isActive, dispatch, config.lang, config.continuous, config.interimResults]);
// STT 텍스트 수신 처리
useEffect(() => {
if (lastSTTText && sttTimestamp) {
console.log('[useWebSpeech] STT text received:', lastSTTText);
if (onSTTText) {
onSTTText(lastSTTText);
}
}
}, [lastSTTText, sttTimestamp, onSTTText]);
// 음성 인식 시작/중지 함수 반환
const startListening = useCallback(() => {
dispatch(startWebSpeech());
}, [dispatch]);
const stopListening = useCallback(() => {
dispatch(stopWebSpeech());
}, [dispatch]);
return {
isInitialized: webSpeech.isInitialized,
isListening: webSpeech.isListening,
interimText: webSpeech.interimText,
error: webSpeech.error,
startListening,
stopListening,
};
};
export default useWebSpeech;

View File

@@ -30,6 +30,14 @@ const initialState = {
// Logging for debugging
logs: [],
logIdCounter: 0,
// Web Speech API state
webSpeech: {
isInitialized: false,
isListening: false,
interimText: null,
error: null,
},
};
export const voiceReducer = (state = initialState, action) => {
@@ -133,6 +141,68 @@ export const voiceReducer = (state = initialState, action) => {
sttTimestamp: new Date().toISOString(),
};
// Web Speech API cases
case types.WEB_SPEECH_INITIALIZED:
return {
...state,
webSpeech: {
...state.webSpeech,
isInitialized: true,
error: null,
},
};
case types.WEB_SPEECH_START:
return {
...state,
webSpeech: {
...state.webSpeech,
isListening: true,
interimText: null,
error: null,
},
};
case types.WEB_SPEECH_INTERIM_RESULT:
return {
...state,
webSpeech: {
...state.webSpeech,
interimText: action.payload,
},
};
case types.WEB_SPEECH_END:
return {
...state,
webSpeech: {
...state.webSpeech,
isListening: false,
interimText: null,
},
};
case types.WEB_SPEECH_ERROR:
return {
...state,
webSpeech: {
...state.webSpeech,
isListening: false,
error: action.payload,
},
};
case types.WEB_SPEECH_CLEANUP:
return {
...state,
webSpeech: {
isInitialized: false,
isListening: false,
interimText: null,
error: null,
},
};
default:
return state;
}

View File

@@ -0,0 +1,225 @@
// src/services/webSpeech/WebSpeechService.js
/**
* Web Speech API 래퍼 서비스
* - SpeechRecognition 객체 관리
* - 이벤트 핸들링
* - 상태 관리
*/
class WebSpeechService {
constructor() {
this.recognition = null;
this.isSupported = this.checkSupport();
this.isListening = false;
this.callbacks = {
onResult: null,
onError: null,
onStart: null,
onEnd: null,
};
}
/**
* Web Speech API 지원 여부 확인
*/
checkSupport() {
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
return !!SpeechRecognition;
}
/**
* Speech Recognition 초기화
* @param {Object} config - 설정 옵션
* @param {string} config.lang - 언어 코드 (예: 'ko-KR', 'en-US')
* @param {boolean} config.continuous - 연속 인식 여부
* @param {boolean} config.interimResults - 중간 결과 표시 여부
* @param {number} config.maxAlternatives - 대체 결과 최대 개수
*/
initialize(config = {}) {
if (!this.isSupported) {
console.error('[WebSpeech] Speech Recognition not supported');
return false;
}
const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
this.recognition = new SpeechRecognition();
// 설정 적용
this.recognition.lang = config.lang || 'ko-KR';
this.recognition.continuous = config.continuous || false;
this.recognition.interimResults = config.interimResults !== false; // default true
this.recognition.maxAlternatives = config.maxAlternatives || 1;
// 이벤트 핸들러 등록
this.setupEventHandlers();
console.log('[WebSpeech] Initialized with config:', {
lang: this.recognition.lang,
continuous: this.recognition.continuous,
interimResults: this.recognition.interimResults,
});
return true;
}
/**
* 이벤트 핸들러 설정
*/
setupEventHandlers() {
// 음성 인식 시작
this.recognition.onstart = () => {
console.log('[WebSpeech] Recognition started');
this.isListening = true;
if (this.callbacks.onStart) {
this.callbacks.onStart();
}
};
// 음성 인식 결과
this.recognition.onresult = (event) => {
const results = event.results;
const lastResult = results[results.length - 1];
const transcript = lastResult[0].transcript;
const isFinal = lastResult.isFinal;
const confidence = lastResult[0].confidence;
console.log('[WebSpeech] Result:', { transcript, isFinal, confidence });
if (this.callbacks.onResult) {
this.callbacks.onResult({
transcript,
isFinal,
confidence,
alternatives: Array.from(lastResult).map((alt) => ({
transcript: alt.transcript,
confidence: alt.confidence,
})),
});
}
};
// 에러 처리
this.recognition.onerror = (event) => {
console.error('[WebSpeech] Recognition error:', event.error);
this.isListening = false;
if (this.callbacks.onError) {
this.callbacks.onError({
error: event.error,
message: this.getErrorMessage(event.error),
});
}
};
// 음성 인식 종료
this.recognition.onend = () => {
console.log('[WebSpeech] Recognition ended');
this.isListening = false;
if (this.callbacks.onEnd) {
this.callbacks.onEnd();
}
};
}
/**
* 에러 메시지 번역
*/
getErrorMessage(error) {
const errorMessages = {
'no-speech': '음성이 감지되지 않았습니다. 다시 시도해주세요.',
'audio-capture': '마이크에 접근할 수 없습니다.',
'not-allowed': '마이크 사용 권한이 거부되었습니다.',
network: '네트워크 오류가 발생했습니다.',
aborted: '음성 인식이 중단되었습니다.',
'service-not-allowed': '음성 인식 서비스를 사용할 수 없습니다.',
};
return errorMessages[error] || `알 수 없는 오류: ${error}`;
}
/**
* 콜백 등록
* @param {string} event - 이벤트 이름 ('result', 'error', 'start', 'end')
* @param {Function} callback - 콜백 함수
*/
on(event, callback) {
const eventKey = `on${event.charAt(0).toUpperCase() + event.slice(1)}`;
if (Object.prototype.hasOwnProperty.call(this.callbacks, eventKey)) {
this.callbacks[eventKey] = callback;
}
}
/**
* 음성 인식 시작
*/
start() {
if (!this.recognition) {
console.error('[WebSpeech] Recognition not initialized. Call initialize() first.');
return false;
}
if (this.isListening) {
console.warn('[WebSpeech] Already listening');
return false;
}
try {
this.recognition.start();
console.log('[WebSpeech] Starting recognition...');
return true;
} catch (error) {
console.error('[WebSpeech] Failed to start:', error);
return false;
}
}
/**
* 음성 인식 중지
*/
stop() {
if (!this.recognition) {
return;
}
if (!this.isListening) {
console.warn('[WebSpeech] Not listening');
return;
}
try {
this.recognition.stop();
console.log('[WebSpeech] Stopping recognition...');
} catch (error) {
console.error('[WebSpeech] Failed to stop:', error);
}
}
/**
* 음성 인식 중단 (즉시 종료)
*/
abort() {
if (this.recognition) {
this.recognition.abort();
this.isListening = false;
}
}
/**
* 리소스 정리
*/
cleanup() {
this.abort();
this.callbacks = {
onResult: null,
onError: null,
onStart: null,
onEnd: null,
};
}
}
// Singleton 인스턴스 생성
const webSpeechService = new WebSpeechService();
export default webSpeechService;

View File

@@ -398,6 +398,11 @@ export default function SearchPanel({ panelInfo, isOnTop, spotlightId, scrollOpt
if (!isOnTopRef.current) {
return;
}
// VoiceInputOverlay가 열려있으면 먼저 닫기
if (isVoiceOverlayVisible) {
setIsVoiceOverlayVisible(false);
return;
}
if (searchQuery === null || searchQuery === '') {
dispatch(popPanel(panel_names.SEARCH_PANEL));
} else {
@@ -406,7 +411,7 @@ export default function SearchPanel({ panelInfo, isOnTop, spotlightId, scrollOpt
dispatch(resetSearch());
Spotlight.focus(SPOTLIGHT_IDS.SEARCH_INPUT_BOX);
}
}, [searchQuery, dispatch]);
}, [searchQuery, dispatch, isVoiceOverlayVisible]);
const onFocusedContainerId = useCallback(
(containerId) => {
@@ -759,6 +764,7 @@ export default function SearchPanel({ panelInfo, isOnTop, spotlightId, scrollOpt
/> */}
{/* Voice Input Overlay */}
{isVoiceOverlayVisible && (
<VoiceInputOverlay
isVisible={isVoiceOverlayVisible}
onClose={() => setIsVoiceOverlayVisible(false)}
@@ -768,6 +774,7 @@ export default function SearchPanel({ panelInfo, isOnTop, spotlightId, scrollOpt
onSearchChange={handleSearchChange}
onSearchSubmit={handleSearchSubmit}
/>
)}
</TPanel>
);
}

View File

@@ -13,10 +13,12 @@ import micIcon from '../../../../assets/images/searchpanel/image-mic.png';
import { getShopperHouseSearch } from '../../../actions/searchActions';
import TFullPopup from '../../../components/TFullPopup/TFullPopup';
import TInput, { ICONS, KINDS } from '../../../components/TInput/TInput';
import { useWebSpeech } from '../../../hooks/useWebSpeech';
import VoiceListening from './modes/VoiceListening';
import VoiceNotRecognized from './modes/VoiceNotRecognized';
import VoiceNotRecognizedCircle from './modes/VoiceNotRecognizedCircle';
import VoicePromptScreen from './modes/VoicePromptScreen';
import VoiceResponse from './modes/VoiceResponse';
import css from './VoiceInputOverlay.module.less';
const OverlayContainer = SpotlightContainerDecorator(
@@ -33,6 +35,7 @@ const SpottableMicButton = Spottable('div');
export const VOICE_MODES = {
PROMPT: 'prompt', // Try saying 화면
LISTENING: 'listening', // 듣는 중 화면
RESPONSE: 'response', // STT 텍스트 표시 화면
MODE_3: 'mode3', // 추후 추가
MODE_4: 'mode4', // 추후 추가
};
@@ -59,13 +62,44 @@ const VoiceInputOverlay = ({
}) => {
const dispatch = useDispatch();
const lastFocusedElement = useRef(null);
const listeningTimerRef = useRef(null);
const [inputFocus, setInputFocus] = useState(false);
const [micFocused, setMicFocused] = useState(false);
const [micWebSpeechFocused, setMicWebSpeechFocused] = useState(false);
// 내부 모드 상태 관리 (prompt -> listening -> close)
// 내부 모드 상태 관리 (prompt -> listening -> response -> close)
const [currentMode, setCurrentMode] = useState(mode);
// 음성인식 입력 모드 (VUI vs WebSpeech)
const [voiceInputMode, setVoiceInputMode] = useState(null);
// STT 응답 텍스트 저장
const [sttResponseText, setSttResponseText] = useState('');
// Web Speech API Hook (WebSpeech 모드일 때만 활성화)
const handleWebSpeechSTT = useCallback((sttText) => {
console.log('🎤 [VoiceInputOverlay] WebSpeech STT text received:', sttText);
// 타이머 중지
if (listeningTimerRef.current) {
clearTimeout(listeningTimerRef.current);
listeningTimerRef.current = null;
}
// STT 텍스트 저장
setSttResponseText(sttText);
// RESPONSE 모드로 전환
setCurrentMode(VOICE_MODES.RESPONSE);
console.log('📺 [VoiceInputOverlay] Switching to RESPONSE mode with text:', sttText);
}, []);
const { isListening, interimText, startListening, stopListening, error } = useWebSpeech(
isVisible, // Overlay가 열려있을 때만 활성화 (voiceInputMode와 무관하게 초기화)
handleWebSpeechSTT,
{
lang: 'en-US',
continuous: false,
interimResults: true,
}
);
// ⛔ VUI 테스트 비활성화: VoicePanel 독립 테스트 시 충돌 방지
// Redux에서 voice 상태 가져오기
@@ -110,6 +144,55 @@ const VoiceInputOverlay = ({
// }
// }, [lastSTTText, sttTimestamp, isVisible, onSearchChange, onClose]);
// WebSpeech Interim 텍스트 로그 출력
useEffect(() => {
if (interimText && voiceInputMode === VOICE_INPUT_MODE.WEBSPEECH) {
console.log('💬 [VoiceInputOverlay] WebSpeech Interim text:', interimText);
}
}, [interimText, voiceInputMode]);
// WebSpeech 에러 처리
useEffect(() => {
if (error && voiceInputMode === VOICE_INPUT_MODE.WEBSPEECH) {
console.error('❌ [VoiceInputOverlay] WebSpeech Error:', error);
}
}, [error, voiceInputMode]);
// WebSpeech listening 상태가 종료되어도 15초 타이머는 그대로 유지
// (음성 입력이 끝나도 listening 모드는 15초간 유지)
useEffect(() => {
if (!isListening && voiceInputMode === VOICE_INPUT_MODE.WEBSPEECH) {
console.log('🎤 [VoiceInputOverlay] WebSpeech stopped, but LISTENING mode continues...');
// 타이머 정리하지 않음 - 15초가 끝날 때까지 listening 모드 유지
}
}, [isListening, voiceInputMode]);
// ⛔ 독립 테스트: WebSpeech API 호출 비활성화
// WebSpeech 모드로 전환되면 자동으로 음성 인식 시작
// useEffect(() => {
// if (
// voiceInputMode === VOICE_INPUT_MODE.WEBSPEECH &&
// currentMode === VOICE_MODES.LISTENING &&
// !isListening
// ) {
// console.log('🎙️ [VoiceInputOverlay] Auto-starting Web Speech API after mode change...');
// startListening();
// // 15초 타이머 설정
// if (listeningTimerRef.current) {
// clearTimeout(listeningTimerRef.current);
// }
// listeningTimerRef.current = setTimeout(() => {
// console.log('⏰ [VoiceInputOverlay] 15초 타임아웃 - WebSpeech 자동 종료');
// stopListening();
// setCurrentMode(VOICE_MODES.PROMPT);
// setVoiceInputMode(null);
// }, 15000); // 15초
// }
// }, [voiceInputMode, currentMode, isListening, startListening, stopListening]);
// 이 useEffect 제거 - renderModeContent()에서 직접 판단하므로 불필요
// Overlay가 열릴 때 포커스를 overlay 내부로 이동
useEffect(() => {
if (isVisible) {
@@ -126,7 +209,23 @@ const VoiceInputOverlay = ({
}, 100);
} else {
// Overlay가 닫힐 때 원래 포커스 복원 및 상태 초기화
// 타이머 정리
if (listeningTimerRef.current) {
clearTimeout(listeningTimerRef.current);
listeningTimerRef.current = null;
}
// ⛔ 독립 테스트: WebSpeech API 호출 비활성화
// WebSpeech 중지 (비동기로 처리)
// if (isListening) {
// stopListening();
// }
// 상태 초기화
setVoiceInputMode(null);
setCurrentMode(VOICE_MODES.PROMPT);
if (lastFocusedElement.current) {
setTimeout(() => {
Spotlight.focus(lastFocusedElement.current);
@@ -182,15 +281,59 @@ const VoiceInputOverlay = ({
}
}, []);
// TALK AGAIN 버튼 핸들러
const handleTalkAgain = useCallback(() => {
console.log('🎤 [VoiceInputOverlay] TALK AGAIN - Restarting LISTENING mode');
// 기존 타이머 정리
if (listeningTimerRef.current) {
clearTimeout(listeningTimerRef.current);
listeningTimerRef.current = null;
}
// STT 텍스트 초기화
setSttResponseText('');
// LISTENING 모드로 전환
setVoiceInputMode(VOICE_INPUT_MODE.WEBSPEECH);
setCurrentMode(VOICE_MODES.LISTENING);
// WebSpeech API 시작
startListening();
// 15초 타이머 설정
listeningTimerRef.current = setTimeout(() => {
console.log('⏰ [VoiceInputOverlay] 15초 타임아웃 - PROMPT 모드로 복귀');
setCurrentMode(VOICE_MODES.PROMPT);
setVoiceInputMode(null);
listeningTimerRef.current = null;
stopListening();
}, 15000);
}, [startListening, stopListening]);
// 모드에 따른 컨텐츠 렌더링
const renderModeContent = () => {
console.log(
'📺 [VoiceInputOverlay] renderModeContent - currentMode:',
currentMode,
'voiceInputMode:',
voiceInputMode,
'isListening:',
isListening
);
switch (currentMode) {
case VOICE_MODES.PROMPT:
console.log('📺 Rendering: VoicePromptScreen');
return (
<VoicePromptScreen suggestions={suggestions} onSuggestionClick={handleSuggestionClick} />
);
case VOICE_MODES.LISTENING:
console.log('📺 Rendering: VoiceListening (15초 타이머 기반)');
return <VoiceListening />;
case VOICE_MODES.RESPONSE:
console.log('📺 Rendering: VoiceResponse with text:', sttResponseText);
return <VoiceResponse responseText={sttResponseText} onTalkAgain={handleTalkAgain} />;
case VOICE_MODES.MODE_3:
// 추후 MODE_3 컴포넌트 추가
return <VoiceNotRecognized />;
@@ -198,6 +341,7 @@ const VoiceInputOverlay = ({
// 추후 MODE_4 컴포넌트 추가
return <VoiceNotRecognizedCircle />;
default:
console.log('📺 Rendering: VoicePromptScreen (default)');
return (
<VoicePromptScreen suggestions={suggestions} onSuggestionClick={handleSuggestionClick} />
);
@@ -267,10 +411,26 @@ const VoiceInputOverlay = ({
// [currentMode, voiceInputMode, onClose]
// );
// WebSpeech 마이크 버튼 클릭 (모드 전환: prompt -> listening -> close)
// Overlay 닫기 핸들러 (모든 닫기 동작을 통합)
const handleClose = useCallback(() => {
console.log('[VoiceInputOverlay] Closing overlay');
if (listeningTimerRef.current) {
clearTimeout(listeningTimerRef.current);
listeningTimerRef.current = null;
}
setVoiceInputMode(null);
setCurrentMode(VOICE_MODES.PROMPT);
setSttResponseText('');
onClose();
}, [onClose]);
// WebSpeech 마이크 버튼 클릭 핸들러
const handleWebSpeechMicClick = useCallback(
(e) => {
console.log('[VoiceInputOverlay] handleWebSpeechMicClick called, currentMode:', currentMode);
console.log(
'🎤 [VoiceInputOverlay] handleWebSpeechMicClick called, currentMode:',
currentMode
);
// 이벤트 전파 방지 - dim 레이어의 onClick 실행 방지
if (e && e.stopPropagation) {
@@ -280,44 +440,51 @@ const VoiceInputOverlay = ({
e.nativeEvent.stopImmediatePropagation();
}
if (currentMode === VOICE_MODES.PROMPT) {
// prompt 모드에서 클릭 시 -> WebSpeech listening 모드로 전환
console.log('[VoiceInputOverlay] Switching to WebSpeech LISTENING mode');
if (currentMode === VOICE_MODES.PROMPT || currentMode === VOICE_MODES.RESPONSE) {
// prompt 모드 또는 response 모드에서 클릭 시:
// 1. listening 모드로 전환 (15초 타이머)
// 2. WebSpeech API 시작 (독립 동작)
console.log('🎤 [VoiceInputOverlay] Starting LISTENING mode (15s) + WebSpeech API');
// 기존 타이머 정리
if (listeningTimerRef.current) {
clearTimeout(listeningTimerRef.current);
listeningTimerRef.current = null;
}
// STT 텍스트 초기화 (RESPONSE 모드에서 올 경우)
if (currentMode === VOICE_MODES.RESPONSE) {
setSttResponseText('');
}
setVoiceInputMode(VOICE_INPUT_MODE.WEBSPEECH);
setCurrentMode(VOICE_MODES.LISTENING);
// TODO: Web Speech API 시작 로직 추가
} else if (
currentMode === VOICE_MODES.LISTENING &&
voiceInputMode === VOICE_INPUT_MODE.WEBSPEECH
) {
// WebSpeech listening 모드에서 클릭 시 -> 종료
console.log('[VoiceInputOverlay] Closing from WebSpeech LISTENING mode');
// WebSpeech API 시작
startListening();
// 15초 타이머 설정 (WebSpeech 종료와 무관하게 15초 후 PROMPT 복귀)
listeningTimerRef.current = setTimeout(() => {
console.log('[VoiceInputOverlay] 15초 타임아웃 - PROMPT 모드로 복귀');
setCurrentMode(VOICE_MODES.PROMPT);
setVoiceInputMode(null);
onClose();
listeningTimerRef.current = null;
// WebSpeech가 아직 동작 중이면 중지
stopListening();
}, 15000); // 15초
} else {
// 기타 모드에서는 바로 종료
console.log('[VoiceInputOverlay] Closing from other mode');
setVoiceInputMode(null);
onClose();
// listening 모드 또는 기타 모드에서 클릭 시 -> overlay 닫기
console.log('🎤 [VoiceInputOverlay] Closing overlay');
handleClose();
}
},
[currentMode, voiceInputMode, onClose]
);
// dim 레이어 클릭 핸들러 (마이크 버튼과 분리)
const handleDimClick = useCallback(
(e) => {
console.log('[VoiceInputOverlay] dimBackground clicked');
setVoiceInputMode(null);
onClose();
},
[onClose]
[currentMode, handleClose, startListening, stopListening]
);
return (
<TFullPopup
open={isVisible}
onClose={onClose}
onClose={handleClose}
noAutoDismiss={true}
spotlightRestrict="self-only"
spotlightId={OVERLAY_SPOTLIGHT_ID}
@@ -327,10 +494,9 @@ const VoiceInputOverlay = ({
>
<div className={css.voiceOverlayContainer}>
{/* 배경 dim 레이어 - 클릭하면 닫힘 */}
<div className={css.dimBackground} onClick={handleDimClick} />
<div className={css.dimBackground} onClick={handleClose} />
{/* ⛔ VUI 테스트 비활성화: Voice 등록 상태 표시 (디버깅용) */}
{/* {process.env.NODE_ENV === 'development' && (
{/* 디버깅용: WebSpeech 상태 표시 */}
<div
style={{
position: 'absolute',
@@ -338,13 +504,17 @@ const VoiceInputOverlay = ({
right: 10,
color: '#fff',
zIndex: 10000,
backgroundColor: 'rgba(0,0,0,0.7)',
padding: '10px',
borderRadius: '5px',
fontSize: '14px',
}}
>
Voice: {isRegistered ? '✓ Ready' : '✗ Not Ready'}
<br />
Mode: {voiceInputMode || 'None'}
<div>Input Mode: {voiceInputMode || 'None'}</div>
<div>Current Mode: {currentMode}</div>
<div>isListening: {isListening ? '🎤 YES' : '❌ NO'}</div>
<div>Interim: {interimText || 'N/A'}</div>
</div>
)} */}
{/* 모드별 컨텐츠 영역 - Spotlight Container (self-only) */}
<OverlayContainer
@@ -457,7 +627,7 @@ const VoiceInputOverlay = ({
</div>
{/* 모드별 컨텐츠 */}
{renderModeContent()}
<div className={css.modeContent}>{renderModeContent()}</div>
</OverlayContainer>
</div>
</TFullPopup>

View File

@@ -0,0 +1,62 @@
// src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.jsx
import React from 'react';
import PropTypes from 'prop-types';
import Spottable from '@enact/spotlight/Spottable';
import SpotlightContainerDecorator from '@enact/spotlight/SpotlightContainerDecorator';
import css from './VoiceResponse.module.less';
const SpottableBubble = Spottable('div');
const SpottableButton = Spottable('button');
const ResponseContainer = SpotlightContainerDecorator(
{
enterTo: 'default-element',
restrict: 'self-only',
},
'div'
);
const VoiceResponse = ({ responseText = '', onTalkAgain }) => {
const handleTalkAgainClick = () => {
console.log('[VoiceResponse] TALK AGAIN clicked');
if (onTalkAgain) {
onTalkAgain();
}
};
return (
<ResponseContainer className={css.container} spotlightId="voice-response-container">
<div className={css.responseContainer}>
<SpottableButton
className={css.talkAgainButton}
onClick={handleTalkAgainClick}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.keyCode === 13) {
e.preventDefault();
handleTalkAgainClick();
}
}}
spotlightId="voice-talk-again-button"
>
TALK AGAIN
</SpottableButton>
<SpottableBubble className={css.bubbleMessage} spotlightId="voice-response-text">
<div className={css.bubbleText}>{responseText}</div>
</SpottableBubble>
</div>
</ResponseContainer>
);
};
VoiceResponse.propTypes = {
responseText: PropTypes.string,
onTalkAgain: PropTypes.func,
};
VoiceResponse.defaultProps = {
responseText: '',
onTalkAgain: null,
};
export default VoiceResponse;

View File

@@ -0,0 +1,99 @@
// src/views/SearchPanel/VoiceInputOverlay/modes/VoiceResponse.module.less
@import "../../../../style/CommonStyle.module.less";
.container {
width: 100%;
height: 437px;
position: relative;
border-radius: 12px;
pointer-events: all;
margin-top: 100px;
display: flex;
justify-content: center;
align-items: center;
}
.responseContainer {
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
width: 100%;
padding: 0 40px;
}
.talkAgainButton {
margin-bottom: 100px;
padding: 20px 60px;
background: rgba(68, 68, 68, 0.5);
border: none;
border-radius: 1000px;
outline: 2px rgba(251, 251, 251, 0.2) solid;
outline-offset: -2px;
color: #eaeaea;
font-size: 28px;
font-family: "LG Smart UI";
font-weight: 700;
cursor: pointer;
transition: all 0.3s ease;
box-shadow: 0px 10px 30px rgba(0, 0, 0, 0.35);
&:hover {
background: rgba(88, 88, 88, 0.6);
outline: 2px rgba(251, 251, 251, 0.3) solid;
transform: translateY(-2px);
box-shadow: 0px 12px 35px rgba(0, 0, 0, 0.45);
}
&:focus {
background: @PRIMARY_COLOR_RED;
color: @COLOR_WHITE;
outline: 3px @PRIMARY_COLOR_RED solid;
outline-offset: -3px;
box-shadow: 0px 15px 40px rgba(199, 8, 80, 0.55);
transform: translateY(-3px);
}
}
.bubbleMessage {
padding: 20px 40px;
background: rgba(68, 68, 68, 0.5);
box-shadow: 0px 10px 30px rgba(0, 0, 0, 0.35);
border-radius: 1000px;
outline: 2px rgba(251, 251, 251, 0.2) solid;
outline-offset: -2px;
display: flex;
justify-content: center;
align-items: center;
max-width: none; /* 좌우 폭 제한 없음 */
min-width: 300px;
cursor: pointer;
transition: all 0.3s ease;
&:hover {
background: rgba(88, 88, 88, 0.6);
outline: 2px rgba(251, 251, 251, 0.3) solid;
transform: translateY(-2px);
box-shadow: 0px 12px 35px rgba(0, 0, 0, 0.45);
}
&:focus {
background: rgba(100, 100, 100, 0.7);
outline: 3px rgba(251, 251, 251, 0.5) solid;
outline-offset: -3px;
box-shadow: 0px 15px 40px rgba(0, 0, 0, 0.55);
transform: translateY(-3px);
}
}
.bubbleText {
text-align: center;
color: #eaeaea;
font-size: 28px;
font-family: "LG Smart UI";
font-weight: 700;
line-height: 32px;
word-wrap: break-word;
letter-spacing: -1px;
white-space: nowrap; /* 한 줄로 길게 표시 */
}

View File

@@ -73,6 +73,73 @@ export default function VoicePanel({ panelInfo, isOnTop, spotlightId }) {
dispatch({ type: types.VOICE_CLEAR_LOGS });
}, [dispatch]);
const handleTestLog = useCallback(() => {
const now = new Date();
const timestamp = now.toISOString();
const formattedTime = now.toLocaleString('ko-KR', {
year: 'numeric',
month: '2-digit',
day: '2-digit',
hour: '2-digit',
minute: '2-digit',
second: '2-digit',
hour12: false,
});
console.log(
'[VoicePanel] Test Log Button Clicked - Sending test log with timestamp:',
timestamp
);
// Test log dispatch - 여러 타입의 로그를 순차적으로 추가
dispatch({
type: types.VOICE_ADD_LOG,
payload: {
timestamp: timestamp,
type: 'ACTION',
title: '[TEST] 🧪 Test Log Button Clicked',
data: {
message: 'This is a test log to verify addLog dispatch works correctly',
currentTime: formattedTime,
timestamp: timestamp,
testNumber: Math.floor(Math.random() * 1000),
},
},
});
// 100ms 후 두 번째 로그
setTimeout(() => {
dispatch({
type: types.VOICE_ADD_LOG,
payload: {
timestamp: new Date().toISOString(),
type: 'RESPONSE',
title: '[TEST] ✅ Second Test Log',
data: {
message: 'If you see this, dispatch and Redux are working correctly',
delay: '100ms after first log',
},
},
});
}, 100);
// 200ms 후 세 번째 로그
setTimeout(() => {
dispatch({
type: types.VOICE_ADD_LOG,
payload: {
timestamp: new Date().toISOString(),
type: 'ERROR',
title: '[TEST] ⚠️ Error Type Test Log',
data: {
message: 'This is a test ERROR log to verify different log types display correctly',
logType: 'ERROR',
},
},
});
}, 200);
}, [dispatch]);
const handleLoadMockData = useCallback(() => {
console.log('[VoicePanel] Loading mock data: STT text + logs');
@@ -202,6 +269,13 @@ export default function VoicePanel({ panelInfo, isOnTop, spotlightId }) {
>
Clear
</TButton>
<TButton
onClick={handleTestLog}
spotlightId="voice-test-log-btn"
className={css.compactButton}
>
Test Log
</TButton>
<TButton
onClick={handleLoadMockData}
spotlightId="voice-mock-data-btn"

View File

@@ -28,18 +28,18 @@
flex-shrink: 0;
padding: 30px 60px 15px;
display: flex;
flex-wrap: nowrap; // Force single row
}
.compactButton {
min-width: auto;
max-width: auto;
width: 140px !important;
min-width: 140px !important;
max-width: 140px !important;
padding: 6px 8px;
font-size: 22px;
line-height: 1.2;
white-space: nowrap;
flex-shrink: 1;
flex-shrink: 0;
display: flex;
align-items: center;
justify-content: center;

View File

@@ -14,12 +14,12 @@
### Plan A vs Plan B 비교
| 구분 | Plan A (VUI Framework) | Plan B (Web Speech API) |
|-----|----------------------|------------------------|
|------|------------------------|--------------------------|
| **API** | webOS Voice Conductor Service | 브라우저 네이티브 Web Speech API |
| **의존성** | webOS 플랫폼 전용 | 범용 웹 브라우저 |
| **네트워크** | webOS 음성 서버 | Google 음성 서버 |
| **호환성** | webOS TV만 | Chrome 68+ 모든 플랫폼 |
| **권한** | PalmSystem API | navigator.mediaDevices |
| **권한** | PalmSystem API | `navigator.mediaDevices` |
| **장점** | TV 환경 최적화, 리모컨 통합 | 크로스 플랫폼, 개발 편의성 |
| **단점** | webOS 전용, 복잡한 구조 | 네트워크 의존, TV 환경 최적화 필요 |