vladmandic/face-api

Usage with Google Cloud Functions

luicfrr opened this issue · 8 comments

Issue Description
Hello, thanks for this amazing library. I'm trying to use it in nodejs GCP with express and I'm getting a few errors while following documentations:

1 - Following node-simple prop node and dispose from faceapi.tf.... does not exists.
2 - Moving to node-face-compare and importing tf from @tensorflow/tfjs-node. When I run functions in GCP emulator, tf is undefined but it's installed and I have TS intellisense.
3 - 'Tensor3D | Tensor4D' cannot be assigned to 'TNetInput':

import tf from '@tensorflow/tfjs-node'
...
const selfie = tf.node.decodeImage( uploads[ 0 ].buffer, 3 )
const results = await detectAllFaces(
  selfie, // here is the error
  faceDetectorOptions
).withFaceLandmarks()
 .withFaceDescriptors()

Steps to Reproduce
Here's my functions file:

import 'reflect-metadata'
import * as functions from 'firebase-functions'
import express, {
  Request,
  Response
} from 'express'
import cors from 'cors'

const app = express()
app.use( cors() )

import {
  FaceMatcher,
  SsdMobilenetv1Options,
  detectAllFaces,
  detectSingleFace,
  nets,
  // tf  - error 1
} from '@vladmandic/face-api'
import tf from '@tensorflow/tfjs-node' // error 2
import * as fs from 'fs'
/**
 * Test route
 */
app.post( '/',
  async (
    req: Request,
    res: Response
  ) => {
    // i'm using a custom middleware to handle uploads file
    // but importing local files have same error
    const { uploads } = req

    const path = './models'
    await tf.ready()
    await nets.ssdMobilenetv1.loadFromDisk( path )
    await nets.faceLandmark68Net.loadFromDisk( path )
    await nets.faceRecognitionNet.loadFromDisk( path )

    const faceDetectorOptions = new SsdMobilenetv1Options( {
      maxResults: 2
    } )

    const selfie = fs.readFileSync('./selfie.png')
    const selfieDecoded = tf.node.decodeImage( selfie, 3 )
    const results = await detectAllFaces(
      selfieDecoded,
      faceDetectorOptions
    ).withFaceLandmarks()
      .withFaceDescriptors()

    console.log( 'faces found', results.length )

    const faceMatcher = new FaceMatcher( results[ 0 ] )
    console.log( 'face matcher data', faceMatcher )

    const compare = fs.readFileSync('./selfie.png')
    const compareDecoded = tf.node.decodeImage( selfie, 3 )
    const singleResult = await detectSingleFace(
      compareDecoded,
      faceDetectorOptions
    ).withFaceLandmarks()
      .withFaceDescriptor()

    if ( !singleResult ) {
      console.log( 'no faces to compare' )
      return res.end()
    }

    const match = faceMatcher.findBestMatch( singleResult.descriptor )
    console.log( 'match result', match.toString() )

    return res.end()
  } )

exports.api = functions
  .region( FUNCTIONS_REGION )
  .https.onRequest( app )

Using provided package.json, just run yarn dev | npm run dev and send a post request to:
http://localhost:5001/PROJECT/FUNCTIONS_REGION/api

**Environment
MacOS Monterey 12.6.7
node 18.16.0

Additional
{
"name": "backend",
"version": "0.0.2",
"private": true,
"scripts": {
"lint": "eslint --quiet --fix --ext .js,.ts .",
"build": "npm run lint && tsc && tsc-alias",
"watch": "concurrently "tsc -w" "tsc-alias -w"",
"dev": "tsc && tsc-alias && concurrently --kill-others "npm run watch" "firebase emulators:start --only functions"",
"shell": "npm run build && firebase functions:shell",
"start": "npm run shell",
"deploy": "firebase deploy -P dev --only functions",
"release": "firebase deploy -P prod --only functions"
},
"engines": {
"node": "18"
},
"main": "build/index.js",
"dependencies": {
"@tensorflow/tfjs": "^4.8.0",
"@tensorflow/tfjs-node": "^4.8.0",
"@vladmandic/face-api": "^1.7.12",
"axios": "^1.4.0",
"busboy": "^1.6.0",
"chromiumly": "^2.0.8",
"class-transformer": "^0.5.1",
"class-validator": "^0.14.0",
"cors": "^2.8.5",
"dochelper": "^1.1.1",
"express": "^4.18.2",
"firebase-admin": "^11.9.0",
"firebase-functions": "^4.4.1",
"jsonwebtoken": "^9.0.0",
"md5-file": "^5.0.0",
"nodemailer": "^6.9.3",
"pdf-lib": "^1.17.1",
"qrcode": "^1.5.3",
"reflect-metadata": "^0.1.13",
"uuid": "^9.0.0"
},
"devDependencies": {
"@types/axios": "^0.14.0",
"@types/busboy": "^1.5.0",
"@types/express": "^4.17.17",
"@types/jsonwebtoken": "^9.0.2",
"@types/node": "^20.3.1",
"@types/nodemailer": "^6.4.8",
"@types/qrcode": "^1.5.0",
"@types/uuid": "^9.0.2",
"@typescript-eslint/eslint-plugin": "^5.60.0",
"@typescript-eslint/parser": "^5.60.0",
"concurrently": "^8.2.0",
"eslint": "8.43.0",
"eslint-config-google": "^0.14.0",
"eslint-plugin-import": "^2.27.5",
"tsc-alias": "^1.8.6",
"typescript": "^5.1.3"
}
}

sorry, i have no idea how nodejs GCP works, clearly its not the same are real nodejs environment.

GCP works with old faceapi, I've found this repository so I think your faceapi should work too.

But those erros I've mentioned, do you know how to solve them?

I'm following this and this examples and having canvas type errors also:

async function getImage(
      input: Buffer
      ) {
      const img = await canvas.loadImage(input)
      const canva = canvas.createCanvas(img.width, img.height)
      const ctx = canva.getContext('2d')
      ctx.drawImage(img, 0, 0, img.width, img.height)
      return canva
    }

    const path = './models'
    const { Canvas, Image, ImageData } = canvas
    faceapi.env.monkeyPatch({Canvas, Image, ImageData})

    await faceapi.nets.ssdMobilenetv1.loadFromDisk( path )
    await faceapi.nets.faceLandmark68Net.loadFromDisk( path )
    await faceapi.nets.faceRecognitionNet.loadFromDisk( path )

    const faceDetectorOptions = new faceapi.SsdMobilenetv1Options( {
      maxResults: 2
    } )

    const selfie = await getImage( uploads[ 0 ].buffer )
    const results = await faceapi.detectAllFaces(
      selfie,
      faceDetectorOptions
    ).withFaceLandmarks()
      .withFaceDescriptors()

    console.log( 'faces found', results.length )

Captura de Tela 2023-07-26 às 16 26 15

Captura de Tela 2023-07-26 às 16 31 40

all those messages are about type mismatach that come from GCP, i have no idea why its mismatching perfectly fine types. i never used gcp nodejs workers nor i plan to.

I don't think these type errors are related to GCP because they happens locally. I think this is something related to TS. Anyway, I've managed to get rid of these errors casting types as unknown.

I just have one more doubt: Is it possible to store face detection data to use them after in FaceMatcher?

E.g:

// stringfy detection data
const detections = await detectAllFaces(
      selfie as unknown as TNetInput,
      faceDetectorOptions
    ).withFaceLandmarks()
      .withFaceDescriptors()

await saveToDb(
  JSON.stringfy(detections)
)

// then parse it back 
const stringDetections = await getFromDb()
const detections = JSON.parse(stringDetections)
const faceMatcher = new FaceMatcher( detections )

I'm trying but this leads to error. Do you have a better sollution?

its definitely possible and quite common use-case, search previous issues.

@vladmandic Thanks for your help. I've managed to get this working.

If anyone needs something like this here's a little example:

async function getDetections(
  input: Buffer,
  faceDetectorOptions: SsdMobilenetv1Options
): Promise<FaceDetectionResult> {
  // needs to require, importing will set tf to undefined
  const tf = require( '@tensorflow/tfjs-node' )
  const tensor = tf.node.decodeImage( input, 3 )
  const detections = await detectAllFaces(
    tensor as unknown as TNetInput,
    faceDetectorOptions
  ).withFaceLandmarks()
    .withFaceDescriptors()

  tf.dispose( tensor )
  return detections
}

// we will save descriptors in firestore so they need to be as string
const descriptors: string[] = []

// initialize and load detections models
....

// loop through images uploaded
for await ( const { buffer } of uploads ) {
      const detections = await getDetections(
        buffer,
        faceDetectorOptions
      )

      if ( isEmpty( detections ) ) // handle 0 faces detected
      if ( detections.length > 1 ) // handle multiple faces detected

      descriptors.push( detections[ 0 ].descriptor.toString() )
}

await firestore.doc<FaceMatcherDoc>( `faces/${ label }` )
      .set( { descriptors } )

/**
 *  now when you need to validate a face just restore stored face descriptors
 */

const faceMatcherDoc = await firestore
      .doc( `faces/${ label }` )
      .get()
    const { descriptors } = faceMatcherDoc.data()!
    const labeledDescriptors = new LabeledFaceDescriptors(
      label,
      // converts string back to Float32Array[]
      descriptors.map( descriptor => Float32Array.from(
        descriptor.split( "," ),
        parseFloat
      ) )
    )
    const faceMatcher = new FaceMatcher( labeledDescriptors )
    const match = faceMatcher.findBestMatch(
      detections[ 0 ].descriptor
    )
    const confidence = 1 - match.distance
    if ( confidence < parseFloat( FACE_MIN_CONFIDENCE ) )  // handle face do not match

    const stringDescriptor = detections[ 0 ].descriptor.toString()
    // add new descriptos to current saved so detection will be more precise
    if ( !arrayContains(
      descriptors,
      stringDescriptor
    ) ) {
      await firestore.doc( `faces/${ label }` )
        .set( {
          descriptors: [
            ...descriptors,
            stringDescriptor
          ]
        } )
    }

@nonam4 do you happen to have a repo available showing this? Having trouble getting this to work