본 문서에서는 Android를 위한 TFLite를 설치하는 방법을 안내합니다.
깃헙에서 소스를 직접 받아 Bazel을 이용하여 Tensorflow Lite를 빌드합니다.
bazel 설치
리눅스
맥OS
git repo에서 Tensorflow clone
# 2.8.0 기준 android-gpu 안돌아감. 2.9.x 이상으로 권고
git clone -b v2.9.0 --single-branch https://github.com/tensorflow/tensorflow.git
Shell
복사
bazel 컴파일 설정
(base) nalbi@nalbi-desktop:~/tensorflow$ ./configure
You have bazel 5.1.1 installed.
Please specify the location of python. [Default is /home/nalbi/anaconda3/bin/python3]:
Found possible Python library paths:
/home/nalbi/anaconda3/lib/python3.9/site-packages
Please input the desired Python library path to use. Default is [/home/nalbi/anaconda3/lib/python3.9/site-packages]
Do you wish to build TensorFlow with ROCm support? [y/N]: n
No ROCm support will be enabled for TensorFlow.
Do you wish to build TensorFlow with CUDA support? [y/N]: n
No CUDA support will be enabled for TensorFlow.
Do you wish to download a fresh release of clang? (Experimental) [y/N]: n
Clang will not be downloaded.
Please specify optimization flags to use during compilation when bazel option "--config=opt" is specified [Default is -Wno-sign-compare]:-D_GLIBCXX_USE_CXX11_ABI=0 -O3
Would you like to interactively configure ./WORKSPACE for Android builds? [y/N]: y
Searching for NDK and SDK installations.
Please specify the home path of the Android NDK to use. [Default is /home/nalbi/Android/Sdk/ndk-bundle]: /home/nalbi/Android/Sdk/ndk/21.4.7075529
Please specify the (min) Android NDK API level to use. [Available levels: ['16', '17', '18', '19', '21', '22', '23', '24', '26', '27', '28', '29', '30']] [Default is 21]: 27
Please specify the home path of the Android SDK to use. [Default is /home/nalbi/Android/Sdk]:
Please specify the Android SDK API level to use. [Available levels: ['26', '32']] [Default is 32]:
Please specify an Android build tools version to use. [Available versions: ['30.0.3', '32.0.0', '32.1.0-rc1', '33.0.0-rc3']] [Default is 33.0.0-rc3]:
Preconfigured Bazel build configs. You can use any of the below by adding "--config=<>" to your build command. See .bazelrc for more details.
--config=mkl # Build with MKL support.
--config=mkl_aarch64 # Build with oneDNN and Compute Library for the Arm Architecture (ACL).
--config=monolithic # Config for mostly static monolithic build.
--config=numa # Build with NUMA support.
--config=dynamic_kernels # (Experimental) Build kernels into separate shared objects.
--config=v1 # Build with TensorFlow 1 API instead of TF 2 API.
Preconfigured Bazel build configs to DISABLE default on features:
--config=nogcp # Disable GCP support.
--config=nonccl # Disable NVIDIA NCCL support.
Configuration finished
Bash
복사
•
Android NDK API level은 NNAPI를 활용해야 할 경우 27 이상으로 세팅
•
bazel option에서 D_GLIBCXX_USE_CXX11_ABI 옵션은 필수로 0으로 세팅
TFLite CPU용 빌드
# arm64-v8a
bazel build -c opt --config=android_arm64 --cpu=arm64-v8a
--define tflite_with_xnnpack=true //tensorflow/lite/c:tensorflowlite_c
Bash
복사
# armeabi-v7a
bazel build -c opt --config=android_arm --cpu=armeabi-v7a
--define tflite_with_xnnpack=true //tensorflow/lite/c:tensorflowlite_c
Bash
복사
TFLite GPU용 빌드
# arm64-v8a
bazel build -c opt --config=android_arm64 --cpu=arm64-v8a //tensorflow/lite/delegates/gpu:libtensorflowlite_gpu_delegate.so
Bash
복사
# armeabi-v7a
bazel build -c opt --config=android_arm --cpu=armeabi-v7a //tensorflow/lite/delegates/gpu:libtensorflowlite_gpu_delegate.so
Bash
복사
TFLite NPU용 빌드
# arm64-v8a
bazel build -c opt --config=android_arm64 --cpu=arm64-v8a //tensorflow/lite/delegates/nnapi:nnapi_delegate_no_nnapi_implementation
bazel build -c opt --config=android_arm64 --cpu=arm64-v8a //tensorflow/lite/nnapi:nnapi_implementation
bazel build -c opt --config=android_arm64 --cpu=arm64-v8a //tensorflow/lite/nnapi:nnapi_util
Bash
복사
# armeabi-v7a
bazel build -c opt --config=android_arm --cpu=armeabi-v7a //tensorflow/lite/delegates/nnapi:nnapi_delegate_no_nnapi_implementation
bazel build -c opt --config=android_arm --cpu=armeabi-v7a //tensorflow/lite/nnapi:nnapi_implementation
bazel build -c opt --config=android_arm --cpu=armeabi-v7a //tensorflow/lite/nnapi:nnapi_util
Bash
복사
전체 스크립트
리눅스
•
위 스크립트를 실행하면 tflite 폴더에 header 파일과 so 파일이 세팅됩니다. clone한 tensorflow 폴더 안에 위치 시킨 후 실행하도록 합니다.
•
안드로이드 프로세스에 따라 version_flag0값과 version_flag1값을 변경하여 사용합니다.(우선 둘 다 해서 저장하는 것 추천)
•
macos의 경우 복사 명령어가 다르므로 아래와 같이 바꿔씁니다.
# cp -R —parents [SOURCE] [DIRECTORY]가 없으므로 rsync -R [SOURCE] [DIRECTORY]를 사용합니다.
# find tensorflow/lite -name \*.h -exec cp -R --parents {} ../tflite/headers/ \;를
# 아래와 같이 변경
find tensorflow/lite -name \*.h -exec rsync -R {} ../tflite/headers/ \;
Bash
복사
•
CMakeLists.txt 세팅하기
# For more information about using CMake with Android Studio, read the
# documentation: https://d.android.com/studio/projects/add-native-code.html
# Sets the minimum version of CMake required to build the native library.
cmake_minimum_required(VERSION 3.18.1)
# Declares and names the project.
project("projectname")
# ------------------------
# TFLITE
# ------------------------
set(TFLITE_ROOT "${CMAKE_SOURCE_DIR}/../../../../tflite" ABSOLUTE)
set(TFLITE_INCLUDE_DIR "${TFLITE_ROOT}/headers")
set(TFLITE_LIBRARY_DIR "${TFLITE_ROOT}/jni/${CMAKE_ANDROID_ARCH_ABI}")
include_directories(${TFLITE_INCLUDE_DIR})
link_directories(${TFLITE_LIBRARY_DIR})
message(STATUS "
# -----------------------------------------------
CMAKE_SOURCE_DIR : ${CMAKE_SOURCE_DIR}
CMAKE_ANDROID_ARCH_ABI : ${CMAKE_ANDROID_ARCH_ABI}
TFLITE_LIBRARY_DIR: ${TFLITE_LIBRARY_DIR}
# -----------------------------------------------
")
add_library( # Sets the name of the library.
projectname
# Sets the library as a shared library.
SHARED
# Provides a relative path to your source file(s).)
find_library( # Sets the name of the path variable.
log-lib
android-lib
# Specifies the name of the NDK library that
# you want CMake to locate.
log)
# STATIC LIB
add_library(
nnapi_delegate_no_nnapi_implementation
STATIC
IMPORTED)
set_target_properties(
nnapi_delegate_no_nnapi_implementation
PROPERTIES IMPORTED_LOCATION
${TFLITE_LIBRARY_DIR}/libnnapi_delegate_no_nnapi_implementation.a
)
target_link_libraries( # Specifies the target library.
projectname
# TFLite
tensorflowlite_c
tensorflowlite_gpu_delegate
# TFLite NNAPI
nnapi_delegate_no_nnapi_implementation
nnapi_implementation
nnapi_util)
Bash
복사
CMakeLists.txt 예시
CMakeLists.txt 세팅 후, build.gradle(:app) 파일에 아래와 같은 코드를 defaultConfig 하위에 추가합니다.
externalNativeBuild {
cmake {
cppFlags "-std=c++14",
"-O3",
"-fopenmp -static-openmp",
"-mfpu=neon"
arguments "-DANDROID_STL=c++_shared",
"-DANDROID_TOOLCHAIN=clang",
"-DANDROID_ARM_NEON=TRUE", // use neon
"-DCMAKE_BUILD_TYPE=Debug"
}
}
ndk {
// Specifies the ABI configurations of your native
// libraries Gradle should build and package with your app.
// abiFilters 'x86', 'x86_64', 'armeabi', 'armeabi-v7a', 'arm64-v8a'
// reference : https://developer.android.com/ndk/guides/cmake#android_abi
abiFilters 'armeabi-v7a', 'arm64-v8a'
}
Java
복사
ndk에 위 구문을 추가함으로써 CMakeLists.txt 에서 ${CMAKE_ANDROID_ARCH_ABI} 부분을 필터링 합니다. 저희는 안드로이드용이므로 armeabi 와 armeabi-v7a 만 필터링하여 사용합니다.
이후 app > src > main 폴더에 jniLibs 폴더를 만들고 tflite > jni 폴더 안에 것들을 jniLibs 하위에 옮깁니다.
static {
// tflite
System.loadLibrary("tensorflowlite_c");
System.loadLibrary("tensorflowlite_gpu_delegate");
System.loadLibrary("nnapi_implementation");
}
Java
복사
위 과정이 모두 끝나면 위와 같이 java 에서 cpp 라이브러리를 호출하여 사용하면 됩니다.