// face_interop.js — TasQ web face verification bridge. // // Liveness : MediaPipe FaceLandmarker (blend shapes) — blink OR smile. // Comparison: face-api.js (128-D face descriptors via faceRecognitionNet). // // Pinned CDN versions — update these constants when upgrading: // @mediapipe/tasks-vision 0.10.21 // @vladmandic/face-api 1 'use strict'; // ── Shared state ───────────────────────────────────────────────────────────── let _livenessRunning = false; let _activeOverlay = null; // for cancelWebLiveness() to clean up eagerly // ── face-api.js — lazy-loaded for face descriptor comparison ───────────────── let _faceApiLoaded = false; let _faceApiPromise = null; async function _ensureFaceApi() { if (_faceApiLoaded) return; if (!_faceApiPromise) { _faceApiPromise = (async () => { if (!window.faceapi) { await new Promise((res, rej) => { const s = document.createElement('script'); s.src = 'https://cdn.jsdelivr.net/npm/@vladmandic/face-api@1/dist/face-api.js'; s.onload = res; s.onerror = () => rej(new Error('Failed to load face-api.js')); document.head.appendChild(s); }); } const MODEL_URL = 'https://cdn.jsdelivr.net/npm/@vladmandic/face-api@1/model/'; await faceapi.nets.tinyFaceDetector.loadFromUri(MODEL_URL); await faceapi.nets.faceLandmark68Net.loadFromUri(MODEL_URL); await faceapi.nets.faceRecognitionNet.loadFromUri(MODEL_URL); _faceApiLoaded = true; })().catch(e => { _faceApiPromise = null; throw e; }); } await _faceApiPromise; } /// Called by Dart on dialog open. Begins background face-api load so /// that descriptors are ready for compareFaces() after liveness succeeds. async function initFaceApi() { _ensureFaceApi().catch(e => console.warn('[face-api bg]', e)); return true; // always succeeds; real errors surface in getFaceDescriptor* } // ── MediaPipe FaceLandmarker — lazy-loaded for liveness ────────────────────── // Update _MP_VER when a newer release is available on jsDelivr. const _MP_VER = '0.10.21'; const _MP_CDN = `https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@${_MP_VER}`; const _MP_MODEL = 'https://storage.googleapis.com/mediapipe-models/' + 'face_landmarker/face_landmarker/float16/1/face_landmarker.task'; let _faceLandmarker = null; let _mpPromise = null; async function _ensureMediaPipe(onStatus) { if (_faceLandmarker) return; if (!_mpPromise) { _mpPromise = (async () => { onStatus?.('Loading face detection…'); const { FaceLandmarker, FilesetResolver } = await import(`${_MP_CDN}/vision_bundle.mjs`); onStatus?.('Initializing model…'); const fileset = await FilesetResolver.forVisionTasks(`${_MP_CDN}/wasm`); const opts = { outputFaceBlendshapes: true, runningMode: 'VIDEO', numFaces: 1, }; const mkLandmarker = (delegate) => FaceLandmarker.createFromOptions(fileset, { baseOptions: { modelAssetPath: _MP_MODEL, delegate }, ...opts, }); // Prefer GPU for throughput; fall back to CPU if unavailable. try { _faceLandmarker = await mkLandmarker('GPU'); } catch { _faceLandmarker = await mkLandmarker('CPU'); } })().catch(e => { _mpPromise = null; throw e; }); } await _mpPromise; } // ── Liveness overlay — MediaPipe, blink OR smile ───────────────────────────── /// Creates a fullscreen overlay appended to document.body so that both the ///