diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..be058e0 --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +*.iml +.gradle +/local.properties +/.idea/caches +/.idea/libraries +/.idea/modules.xml +/.idea/workspace.xml +/.idea/navEditor.xml +/.idea/assetWizardSettings.xml +.DS_Store +/build +/captures +.externalNativeBuild +.cxx +local.properties + +# Project exclude paths +/openCVLibrary3413/build/ +/openCVLibrary3413/build/intermediates/javac/debug/classes/ \ No newline at end of file diff --git a/.idea/.gitignore b/.idea/.gitignore new file mode 100644 index 0000000..26d3352 --- /dev/null +++ b/.idea/.gitignore @@ -0,0 +1,3 @@ +# Default ignored files +/shelf/ +/workspace.xml diff --git a/.idea/.name b/.idea/.name new file mode 100644 index 0000000..9baa76b --- /dev/null +++ b/.idea/.name @@ -0,0 +1 @@ +ASLR \ No newline at end of file diff --git a/.idea/compiler.xml b/.idea/compiler.xml new file mode 100644 index 0000000..61a9130 --- /dev/null +++ b/.idea/compiler.xml @@ -0,0 +1,6 @@ + + + + + + \ No newline at end of file diff --git a/.idea/gradle.xml b/.idea/gradle.xml new file mode 100644 index 0000000..7253a67 --- /dev/null +++ b/.idea/gradle.xml @@ -0,0 +1,21 @@ + + + + + + + \ No newline at end of file diff --git a/.idea/jarRepositories.xml b/.idea/jarRepositories.xml new file mode 100644 index 0000000..a5f05cd --- /dev/null +++ b/.idea/jarRepositories.xml @@ -0,0 +1,25 @@ + + + + + + + + + + + + + \ No newline at end of file diff --git a/.idea/misc.xml b/.idea/misc.xml new file mode 100644 index 0000000..613b1dd --- /dev/null +++ b/.idea/misc.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/app/.gitignore b/app/.gitignore new file mode 100644 index 0000000..956c004 --- /dev/null +++ b/app/.gitignore @@ -0,0 +1,2 @@ +/build +/release \ No newline at end of file diff --git a/app/build.gradle b/app/build.gradle new file mode 100644 index 0000000..c42f743 --- /dev/null +++ b/app/build.gradle @@ -0,0 +1,62 @@ +plugins { + id 'com.android.application' +} + +android { + compileSdkVersion 30 + buildToolsVersion "30.0.2" + + defaultConfig { + applicationId "com.amaan.aslr" + minSdkVersion 21 + targetSdkVersion 30 + versionCode 1 + versionName "1.0" + + testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + compileOptions { + sourceCompatibility JavaVersion.VERSION_1_8 + targetCompatibility JavaVersion.VERSION_1_8 + } + sourceSets { + main { + jni { + srcDirs 'src\\main\\jni', 'src\\main\\jnilibs' + } + } + } + + aaptOptions { + noCompress "tflite" + noCompress "lite" + } + buildFeatures { + mlModelBinding true + } +} + +dependencies { + + implementation 'org.tensorflow:tensorflow-lite-metadata:0.1.0-rc1' + implementation 'org.tensorflow:tensorflow-lite-gpu:2.2.0' + implementation 'org.tensorflow:tensorflow-lite-support:0.1.0' + implementation 'org.tensorflow:tensorflow-lite-task-vision:0.1.0' + implementation 'org.tensorflow:tensorflow-lite-task-text:0.1.0' + + + implementation 'androidx.appcompat:appcompat:1.2.0' + implementation 'com.google.android.material:material:1.3.0' + implementation 'androidx.constraintlayout:constraintlayout:2.0.4' + implementation project(path: ':openCVLibrary3413') + testImplementation 'junit:junit:4.+' + androidTestImplementation 'androidx.test.ext:junit:1.1.2' + androidTestImplementation 'androidx.test.espresso:espresso-core:3.3.0' +} \ No newline at end of file diff --git a/app/proguard-rules.pro b/app/proguard-rules.pro new file mode 100644 index 0000000..481bb43 --- /dev/null +++ b/app/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile \ No newline at end of file diff --git a/app/src/androidTest/java/com/amaan/aslr/ExampleInstrumentedTest.java b/app/src/androidTest/java/com/amaan/aslr/ExampleInstrumentedTest.java new file mode 100644 index 0000000..d9be4e6 --- /dev/null +++ b/app/src/androidTest/java/com/amaan/aslr/ExampleInstrumentedTest.java @@ -0,0 +1,26 @@ +package com.amaan.aslr; + +import android.content.Context; + +import androidx.test.platform.app.InstrumentationRegistry; +import androidx.test.ext.junit.runners.AndroidJUnit4; + +import org.junit.Test; +import org.junit.runner.RunWith; + +import static org.junit.Assert.*; + +/** + * Instrumented test, which will execute on an Android device. + * + * @see Testing documentation + */ +@RunWith(AndroidJUnit4.class) +public class ExampleInstrumentedTest { + @Test + public void useAppContext() { + // Context of the app under test. + Context appContext = InstrumentationRegistry.getInstrumentation().getTargetContext(); + assertEquals("com.example.imagepro", appContext.getPackageName()); + } +} \ No newline at end of file diff --git a/app/src/main/AndroidManifest.xml b/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000..559de40 --- /dev/null +++ b/app/src/main/AndroidManifest.xml @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/app/src/main/assets/Sign_lang_model.tflite b/app/src/main/assets/Sign_lang_model.tflite new file mode 100644 index 0000000..0ca5f86 Binary files /dev/null and b/app/src/main/assets/Sign_lang_model.tflite differ diff --git a/app/src/main/assets/custom_label.txt b/app/src/main/assets/custom_label.txt new file mode 100644 index 0000000..9bf91a1 --- /dev/null +++ b/app/src/main/assets/custom_label.txt @@ -0,0 +1 @@ +hand \ No newline at end of file diff --git a/app/src/main/assets/hand_model.tflite b/app/src/main/assets/hand_model.tflite new file mode 100644 index 0000000..5770150 Binary files /dev/null and b/app/src/main/assets/hand_model.tflite differ diff --git a/app/src/main/java/com/amaan/aslr/CameraActivity.java b/app/src/main/java/com/amaan/aslr/CameraActivity.java new file mode 100644 index 0000000..a96093e --- /dev/null +++ b/app/src/main/java/com/amaan/aslr/CameraActivity.java @@ -0,0 +1,141 @@ +package com.amaan.aslr; + +import android.Manifest; +import android.app.Activity; +import android.content.pm.PackageManager; +import android.os.Bundle; +import android.util.Log; +import android.view.SurfaceView; +import android.view.Window; +import android.view.WindowManager; + +import androidx.core.app.ActivityCompat; +import androidx.core.content.ContextCompat; + +import org.opencv.android.BaseLoaderCallback; +import org.opencv.android.CameraBridgeViewBase; +import org.opencv.android.LoaderCallbackInterface; +import org.opencv.android.OpenCVLoader; +import org.opencv.core.CvType; +import org.opencv.core.Mat; + +import java.io.IOException; + +public class CameraActivity extends Activity implements CameraBridgeViewBase.CvCameraViewListener2 { + private static final String TAG = "MainActivity"; + + private Mat mRgba; + private Mat mGray; + private CameraBridgeViewBase mOpenCvCameraView; + private ObjectDetector objectDetectorClass; + private final BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) { + @Override + public void onManagerConnected(int status) { + switch (status) { + case LoaderCallbackInterface + .SUCCESS: { + Log.i(TAG, "OpenCv Is loaded"); + mOpenCvCameraView.enableView(); + } + default: { + super.onManagerConnected(status); + + } + break; + } + } + }; + + public CameraActivity() { + Log.i(TAG, "Instantiated new " + this.getClass()); + } + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + requestWindowFeature(Window.FEATURE_NO_TITLE); + getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); + + int MY_PERMISSIONS_REQUEST_CAMERA = 0; + // if camera permission is not given it will ask for it on device + if (ContextCompat.checkSelfPermission(CameraActivity.this, Manifest.permission.CAMERA) + == PackageManager.PERMISSION_DENIED) { + ActivityCompat.requestPermissions(CameraActivity.this, new String[]{Manifest.permission.CAMERA}, MY_PERMISSIONS_REQUEST_CAMERA); + } + + setContentView(R.layout.activity_camera); + + mOpenCvCameraView = findViewById(R.id.frame_Surface); + mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE); + mOpenCvCameraView.setCvCameraViewListener(this); + try { + // now first change model name and input size + // input_size of tensorflow model is 300. + // Now change custom_label.txt for hand detection + // Change layout for hand detection + // Now select device and run + // If you want to know how I made this app watch my tutorial on custom object detection and + // Real-time object detection android app + // Everything is working + // Next tutorial series I will make sign language detection + // bye + objectDetectorClass = new ObjectDetector(getAssets(), "hand_model.tflite", "custom_label.txt", 300, "Sign_lang_model.tflite", 96); + Log.d("MainActivity", "Model is successfully loaded"); + } catch (IOException e) { + Log.d("MainActivity", "Getting some error"); + e.printStackTrace(); + } + } + + @Override + protected void onResume() { + super.onResume(); + if (OpenCVLoader.initDebug()) { + //if load success + Log.d(TAG, "Opencv initialization is done"); + mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS); + } else { + //if not loaded + Log.d(TAG, "Opencv is not loaded. try again"); + OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, mLoaderCallback); + } + } + + @Override + protected void onPause() { + super.onPause(); + if (mOpenCvCameraView != null) { + mOpenCvCameraView.disableView(); + } + } + + public void onDestroy() { + super.onDestroy(); + if (mOpenCvCameraView != null) { + mOpenCvCameraView.disableView(); + } + + } + + public void onCameraViewStarted(int width, int height) { + mRgba = new Mat(height, width, CvType.CV_8UC4); + mGray = new Mat(height, width, CvType.CV_8UC1); + } + + public void onCameraViewStopped() { + mRgba.release(); + } + + public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) { + mRgba = inputFrame.rgba(); + mGray = inputFrame.gray(); + // Before watching this video please watch previous video of loading tensorflow lite model + + // now call that function + Mat out = new Mat(); + out = objectDetectorClass.recognizeImage(mRgba); + + return out; + } + +} \ No newline at end of file diff --git a/app/src/main/java/com/amaan/aslr/CombineLettersActivity.java b/app/src/main/java/com/amaan/aslr/CombineLettersActivity.java new file mode 100644 index 0000000..96fa9ca --- /dev/null +++ b/app/src/main/java/com/amaan/aslr/CombineLettersActivity.java @@ -0,0 +1,159 @@ +package com.amaan.aslr; + +import android.Manifest; +import android.app.Activity; +import android.content.pm.PackageManager; +import android.os.Bundle; +import android.util.Log; +import android.view.SurfaceView; +import android.view.Window; +import android.view.WindowManager; +import android.widget.Button; +import android.widget.TextView; + +import androidx.core.app.ActivityCompat; +import androidx.core.content.ContextCompat; + +import org.opencv.android.BaseLoaderCallback; +import org.opencv.android.CameraBridgeViewBase; +import org.opencv.android.LoaderCallbackInterface; +import org.opencv.android.OpenCVLoader; +import org.opencv.core.CvType; +import org.opencv.core.Mat; + +import java.io.IOException; + +public class CombineLettersActivity extends Activity implements CameraBridgeViewBase.CvCameraViewListener2 { + private static final String TAG = "MainActivity"; + + private Mat mRgba; + private Mat mGray; + private CameraBridgeViewBase mOpenCvCameraView; + private SignLanguage signLanguageClass; + + //now define Add ad Clear Button here + private Button clear_button; + private Button add_button; + //define textview also + private TextView change_text; + + //define button for tts + private Button text_speech_button; + + + private final BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) { + @Override + public void onManagerConnected(int status) { + switch (status) { + case LoaderCallbackInterface + .SUCCESS: { + Log.i(TAG, "OpenCv Is loaded"); + mOpenCvCameraView.enableView(); + } + default: { + super.onManagerConnected(status); + + } + break; + } + } + }; + + public CombineLettersActivity() { + Log.i(TAG, "Instantiated new " + this.getClass()); + } + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + requestWindowFeature(Window.FEATURE_NO_TITLE); + getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); + + int MY_PERMISSIONS_REQUEST_CAMERA = 0; + // if camera permission is not given it will ask for it on device + if (ContextCompat.checkSelfPermission(CombineLettersActivity.this, Manifest.permission.CAMERA) + == PackageManager.PERMISSION_DENIED) { + ActivityCompat.requestPermissions(CombineLettersActivity.this, new String[]{Manifest.permission.CAMERA}, MY_PERMISSIONS_REQUEST_CAMERA); + } + + setContentView(R.layout.activity_combine_letters); + + + mOpenCvCameraView = findViewById(R.id.frame_Surface); + mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE); + mOpenCvCameraView.setCvCameraViewListener(this); + clear_button = findViewById(R.id.clear_button); + add_button = findViewById(R.id.add_button); + change_text = findViewById(R.id.change_text); + text_speech_button = findViewById(R.id.text_speech_button); + + + try { + // so we have to us clear_button, ad_button , change_text in signlanguageclass + //due to shortage of time we haven't found any other method and will use this method + //we will just pass clear_button, add_button, change_text here + + //we will pass text_speech_button through signLanguageClass + //we will aslo need context in this class as well + + //now I can define this button and text in signlanguageClass and can use it. + signLanguageClass = new SignLanguage(CombineLettersActivity.this, clear_button, add_button, change_text, text_speech_button, getAssets(), "hand_model.tflite", "custom_label.txt", 300, "Sign_lang_model.tflite", 96); + Log.d("MainActivity", "Model is successfully loaded"); + } catch (IOException e) { + Log.d("MainActivity", "Getting some error"); + e.printStackTrace(); + } + } + + @Override + protected void onResume() { + super.onResume(); + if (OpenCVLoader.initDebug()) { + //if load success + Log.d(TAG, "Opencv initialization is done"); + mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS); + } else { + //if not loaded + Log.d(TAG, "Opencv is not loaded. try again"); + OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, mLoaderCallback); + } + } + + @Override + protected void onPause() { + super.onPause(); + if (mOpenCvCameraView != null) { + mOpenCvCameraView.disableView(); + } + } + + public void onDestroy() { + super.onDestroy(); + if (mOpenCvCameraView != null) { + mOpenCvCameraView.disableView(); + } + + } + + public void onCameraViewStarted(int width, int height) { + mRgba = new Mat(height, width, CvType.CV_8UC4); + mGray = new Mat(height, width, CvType.CV_8UC1); + } + + public void onCameraViewStopped() { + mRgba.release(); + } + + public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) { + mRgba = inputFrame.rgba(); + mGray = inputFrame.gray(); + // Before watching this video please watch previous video of loading tensorflow lite model + + // now call that function + Mat out = new Mat(); + out = signLanguageClass.recognizeImage(mRgba); + + return out; + } + +} \ No newline at end of file diff --git a/app/src/main/java/com/amaan/aslr/MainActivity.java b/app/src/main/java/com/amaan/aslr/MainActivity.java new file mode 100644 index 0000000..ddddda9 --- /dev/null +++ b/app/src/main/java/com/amaan/aslr/MainActivity.java @@ -0,0 +1,52 @@ +package com.amaan.aslr; + +import android.content.Intent; +import android.os.Bundle; +import android.util.Log; +import android.view.View; +import android.widget.Button; + +import androidx.appcompat.app.AppCompatActivity; + +import org.opencv.android.OpenCVLoader; + +public class MainActivity extends AppCompatActivity { + static { + if (OpenCVLoader.initDebug()) { + Log.d("MainActivity: ", "Opencv is loaded"); + } else { + Log.d("MainActivity: ", "Opencv failed to load"); + } + } + + private Button camera_button; + private Button combine_letter_button; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_main); + + + camera_button = findViewById(R.id.camera_button); + camera_button.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View v) { + startActivity(new Intent(MainActivity.this, CameraActivity.class).addFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK | Intent.FLAG_ACTIVITY_CLEAR_TOP)); + } + }); + + + combine_letter_button = findViewById(R.id.combine_letter_button); + combine_letter_button.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View view) { + //when this button is clicked, navigate to CombineLettersActivity + //before that, we have to create CombineLettersActivity + // now add startActivity + startActivity(new Intent(MainActivity.this, CombineLettersActivity.class).addFlags(Intent.FLAG_ACTIVITY_CLEAR_TASK | Intent.FLAG_ACTIVITY_CLEAR_TOP)); + //now we create a copy of cameraActivity + } + }); + } +} \ No newline at end of file diff --git a/app/src/main/java/com/amaan/aslr/ObjectDetector.java b/app/src/main/java/com/amaan/aslr/ObjectDetector.java new file mode 100644 index 0000000..9f829f6 --- /dev/null +++ b/app/src/main/java/com/amaan/aslr/ObjectDetector.java @@ -0,0 +1,388 @@ +package com.amaan.aslr; + +import android.content.res.AssetFileDescriptor; +import android.content.res.AssetManager; +import android.graphics.Bitmap; +import android.util.Log; + +import org.opencv.android.Utils; +import org.opencv.core.Core; +import org.opencv.core.Mat; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.Scalar; +import org.opencv.imgproc.Imgproc; +import org.tensorflow.lite.Interpreter; +import org.tensorflow.lite.gpu.GpuDelegate; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.reflect.Array; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.TreeMap; + +public class ObjectDetector { + // should start from small letter + + // this is used to load model and predict + private final Interpreter interp; + // creating another interpreter for sign language model + private final Interpreter interp2; + // store all label in array + private final List labelList; + private final int INPUT_SIZE; + private final int PIXEL_SIZE = 3; // for RGB + private final int IMAGE_MEAN = 0; + private final float IMAGE_STD = 255.0f; + // use to initialize gpu in app + private final GpuDelegate graphicsProcessingUnitDelegate; + private int height = 0; + private int width = 0; + private int Classification_Input_Size = 0; + + ObjectDetector(AssetManager assetManager, String modelPath, String labelPath, int inputSize, String classification_model, int classification_input_size) throws IOException { + INPUT_SIZE = inputSize; + Classification_Input_Size = classification_input_size; + // use to define gpu or cpu // no. of threads + Interpreter.Options options = new Interpreter.Options(); + graphicsProcessingUnitDelegate = new GpuDelegate(); + options.addDelegate(graphicsProcessingUnitDelegate); + options.setNumThreads(4); // set it according to your phone + // loading model + interp = new Interpreter(loadModelFile(assetManager, modelPath), options); + // load labelmap + labelList = loadLabelList(assetManager, labelPath); + + //code for loading sign language model + Interpreter.Options options2 = new Interpreter.Options(); + //add 2 threads to this interpreter + options2.setNumThreads(2); + //load model + interp2 = new Interpreter(loadModelFile(assetManager, classification_model), options2); + + + } + + private List loadLabelList(AssetManager assetManager, String labelPath) throws IOException { + // to store label + List labelList = new ArrayList<>(); + // create a new reader + BufferedReader reader = new BufferedReader(new InputStreamReader(assetManager.open(labelPath))); + String line; + // loop through each line and store it to labelList + while ((line = reader.readLine()) != null) { + labelList.add(line); + } + reader.close(); + return labelList; + } + + private ByteBuffer loadModelFile(AssetManager assetManager, String modelPath) throws IOException { + // use to get description of file + AssetFileDescriptor fileDescriptor = assetManager.openFd(modelPath); + FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor()); + FileChannel fileChannel = inputStream.getChannel(); + long startOffset = fileDescriptor.getStartOffset(); + long declaredLength = fileDescriptor.getDeclaredLength(); + + return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength); + } + + // create new Mat function + public Mat recognizeImage(Mat mat_image) { + // Rotate original image by 90 degree get get portrait frame + + // This change was done in video: Does Your App Keep Crashing? | Watch This Video For Solution. + // This will fix crashing problem of the app + + Mat rotated_mat_image = new Mat(); + + Mat a = mat_image.t(); + Core.flip(a, rotated_mat_image, 1); + // Release mat + a.release(); + + // if you do not do this process you will get improper prediction, less no. of object + // now convert it to bitmap + Bitmap bitmap = null; + bitmap = Bitmap.createBitmap(rotated_mat_image.cols(), rotated_mat_image.rows(), Bitmap.Config.ARGB_8888); + Utils.matToBitmap(rotated_mat_image, bitmap); + // define height and width + height = bitmap.getHeight(); + width = bitmap.getWidth(); + + // scale the bitmap to input size of model + Bitmap scaledBitmap = Bitmap.createScaledBitmap(bitmap, INPUT_SIZE, INPUT_SIZE, false); + + // convert bitmap to bytebuffer as model input should be in it + ByteBuffer byteBuffer = convertBitmapToByteBuffer(scaledBitmap); + + // defining output + // 10: top 10 object detected + // 4: there coordinate in image + // float[][][]result=new float[1][10][4]; + Object[] input = new Object[1]; + input[0] = byteBuffer; + + Map output_map = new TreeMap<>(); + // we are not going to use this method of output + // instead we create treemap of three array (boxes,score,classes) + + float[][][] boxes = new float[1][10][4]; + // 10: top 10 object detected + // 4: there coordinate in image + float[][] scores = new float[1][10]; + // stores scores of 10 object + float[][] classes = new float[1][10]; + // stores class of object + + // add it to object_map; + output_map.put(0, boxes); + output_map.put(1, classes); + output_map.put(2, scores); + + // now predict + interp.runForMultipleInputsOutputs(input, output_map); + // Before watching this video please watch my previous 2 video of + // 1. Loading tensorflow lite model + // 2. Predicting object + // In this video we will draw boxes and label it with it's name + + Object value = output_map.get(0); + Object Object_class = output_map.get(1); + Object score = output_map.get(2); + + // loop through each object + // as output has only 10 boxes + for (int i = 0; i < 10; i++) { + //here we are looping through each hand which is detected -Amaan + float class_value = (float) Array.get(Array.get(Object_class, 0), i); + float score_value = (float) Array.get(Array.get(score, 0), i); + // define threshold for score + + // Here you can change threshold according to your model + // Now we will do some change to improve app + if (score_value > 0.5) { + Object box1 = Array.get(Array.get(value, 0), i); + // we are multiplying it with Original height and width of frame + //change this into x1,y1 and x2,y2 coordinates -Amaan + + float y1 = (float) Array.get(box1, 0) * height; + float x1 = (float) Array.get(box1, 1) * width; + float y2 = (float) Array.get(box1, 2) * height; + float x2 = (float) Array.get(box1, 3) * width; + + //set boundary limit -Amaan + if (y1 < 0) { + y1 = 0; + } + if (x1 < 0) { + x1 = 0; + } + if (x2 > width) { + x2 = width; + } + if (y2 > height) { + y2 = height; + } + //now set height and width of box + float w1 = x2 - x1; + float h1 = y2 - y1; + + //so if you don't know + //(x1,y1) is the starting point of the hand + //(x2,y2) is the end point of the hand + + //crop hand image from original frame + + Rect cropped_roi = new Rect((int) x1, (int) y1, (int) w1, (int) h1); + Mat cropped = new Mat(rotated_mat_image, cropped_roi).clone(); + + //Now convert this cropped Mat to Bitmap + + Bitmap bitmap1 = null; + bitmap1 = Bitmap.createBitmap(cropped.cols(), cropped.rows(), Bitmap.Config.ARGB_8888); + Utils.matToBitmap(cropped, bitmap1); + + + //Resize bitmap to classification input size=96 + Bitmap scaledBitmap1 = Bitmap.createScaledBitmap(bitmap1, Classification_Input_Size, Classification_Input_Size, false); + //convert scaled bitmap to byte buffer + ByteBuffer byteBuffer1 = convertBitmapToByteBuffer1(scaledBitmap1); + + //create an array of output of interpreter2 + float[][] output_class_value = new float[1][1]; + + //predict output for bytebuffer1 + interp2.run(byteBuffer1, output_class_value); + + //If you want to see output_class_value + Log.d("objectDetectionClass", "output_class_value: " + output_class_value[0][0]); + //convert output_class_value to alphabets + //now create get_alphabets function + + String sign_value = get_alphabets(output_class_value[0][0]); + //use puttext to add Class name in image + // input/output text starting point font size text color:white + Imgproc.putText(rotated_mat_image, "" + sign_value, new Point(x1 + 10, y1 + 40), 2, 1.5, new Scalar(255, 255, 255, 255), 2); + + // draw rectangle in Original frame // starting point // ending point of box // color of box thickness + Imgproc.rectangle(rotated_mat_image, new Point(x1, y1), new Point(x2, y2), new Scalar(0, 255, 0, 255), 2); + //Now we will change Layout + } + + } + // select device and run + + // before returning rotate back by -90 degree + + // Do same here + Mat b = rotated_mat_image.t(); + Core.flip(b, mat_image, 0); + b.release(); + // Now for second change go to CameraBridgeViewBase + return mat_image; + } + + private String get_alphabets(float sig_v) { + String val = ""; + if (sig_v >= -0.5 & sig_v < 0.5) { + val = "A"; + } else if (sig_v >= 0.5 & sig_v < 1.5) { + val = "B"; + } else if (sig_v >= 1.5 & sig_v < 2.5) { + val = "C"; + } else if (sig_v >= 2.5 & sig_v < 3.5) { + val = "D"; + } else if (sig_v >= 3.5 & sig_v < 4.5) { + val = "E"; + } else if (sig_v >= 4.5 & sig_v < 5.5) { + val = "F"; + } else if (sig_v >= 5.5 & sig_v < 6.5) { + val = "G"; + } else if (sig_v >= 6.5 & sig_v < 7.5) { + val = "H"; + } else if (sig_v >= 7.5 & sig_v < 8.5) { + val = "I"; + } else if (sig_v >= 8.5 & sig_v < 9.5) { + val = "J"; + } else if (sig_v >= 9.5 & sig_v < 10.5) { + val = "K"; + } else if (sig_v >= 10.5 & sig_v < 11.5) { + val = "L"; + } else if (sig_v >= 11.5 & sig_v < 12.5) { + val = "M"; + } else if (sig_v >= 12.5 & sig_v < 13.5) { + val = "N"; + } else if (sig_v >= 13.5 & sig_v < 14.5) { + val = "O"; + } else if (sig_v >= 14.5 & sig_v < 15.5) { + val = "P"; + } else if (sig_v >= 15.5 & sig_v < 16.5) { + val = "Q"; + } else if (sig_v >= 16.5 & sig_v < 17.5) { + val = "R"; + } else if (sig_v >= 17.5 & sig_v < 18.5) { + val = "S"; + } else if (sig_v >= 18.5 & sig_v < 19.5) { + val = "T"; + } else if (sig_v >= 19.5 & sig_v < 20.5) { + val = "U"; + } else if (sig_v >= 20.5 & sig_v < 21.5) { + val = "V"; + } else if (sig_v >= 21.5 & sig_v < 22.5) { + val = "W"; + } else if (sig_v >= 22.5 & sig_v < 23.5) { + val = "X"; + } else { + val = "Y"; + } + + return val; + } + + private ByteBuffer convertBitmapToByteBuffer(Bitmap bitmap) { + ByteBuffer byteBuffer; + // some model input should be quant=0 for some quant=1 + // for this quant=0 + // Change quant=1 + // As we are scaling image from 0-255 to 0-1 + int quant = 1; + int size_images = INPUT_SIZE; + if (quant == 0) { + byteBuffer = ByteBuffer.allocateDirect(1 * size_images * size_images * 3); + } else { + byteBuffer = ByteBuffer.allocateDirect(4 * 1 * size_images * size_images * 3); + } + byteBuffer.order(ByteOrder.nativeOrder()); + int[] intValues = new int[size_images * size_images]; + bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight()); + int pixel = 0; + + // some error + //now run + for (int i = 0; i < size_images; ++i) { + for (int j = 0; j < size_images; ++j) { + final int val = intValues[pixel++]; + if (quant == 0) { + byteBuffer.put((byte) ((val >> 16) & 0xFF)); + byteBuffer.put((byte) ((val >> 8) & 0xFF)); + byteBuffer.put((byte) (val & 0xFF)); + } else { + // paste this + byteBuffer.putFloat((((val >> 16) & 0xFF)) / 255.0f); + byteBuffer.putFloat((((val >> 8) & 0xFF)) / 255.0f); + byteBuffer.putFloat((((val) & 0xFF)) / 255.0f); + } + } + } + return byteBuffer; + } + + //create a copy of createbitmap to byte buffer + //add 1 + + private ByteBuffer convertBitmapToByteBuffer1(Bitmap bitmap) { + ByteBuffer byteBuffer; + + int quant = 1; + //change input size + int size_images = Classification_Input_Size; + if (quant == 0) { + byteBuffer = ByteBuffer.allocateDirect(1 * size_images * size_images * 3); + } else { + byteBuffer = ByteBuffer.allocateDirect(4 * 1 * size_images * size_images * 3); + } + byteBuffer.order(ByteOrder.nativeOrder()); + int[] intValues = new int[size_images * size_images]; + bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight()); + int pixel = 0; + //remove 255.0f as we did not scale the image + + for (int i = 0; i < size_images; ++i) { + for (int j = 0; j < size_images; ++j) { + final int val = intValues[pixel++]; + if (quant == 0) { + byteBuffer.put((byte) ((val >> 16) & 0xFF)); + byteBuffer.put((byte) ((val >> 8) & 0xFF)); + byteBuffer.put((byte) (val & 0xFF)); + } else { + byteBuffer.putFloat((((val >> 16) & 0xFF))); + byteBuffer.putFloat((((val >> 8) & 0xFF))); + byteBuffer.putFloat((((val) & 0xFF))); + } + } + } + return byteBuffer; + } + +} +// Next video is about drawing box and labeling it +// If you have any problem please inform me \ No newline at end of file diff --git a/app/src/main/java/com/amaan/aslr/SignLanguage.java b/app/src/main/java/com/amaan/aslr/SignLanguage.java new file mode 100644 index 0000000..e0daa50 --- /dev/null +++ b/app/src/main/java/com/amaan/aslr/SignLanguage.java @@ -0,0 +1,456 @@ +package com.amaan.aslr; + +import android.content.Context; +import android.content.res.AssetFileDescriptor; +import android.content.res.AssetManager; +import android.graphics.Bitmap; +import android.speech.tts.TextToSpeech; +import android.util.Log; +import android.view.View; +import android.widget.Button; +import android.widget.TextView; + +import org.opencv.android.Utils; +import org.opencv.core.Core; +import org.opencv.core.Mat; +import org.opencv.core.Point; +import org.opencv.core.Rect; +import org.opencv.core.Scalar; +import org.opencv.imgproc.Imgproc; +import org.tensorflow.lite.Interpreter; +import org.tensorflow.lite.gpu.GpuDelegate; + +import java.io.BufferedReader; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.lang.reflect.Array; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.FileChannel; +import java.util.ArrayList; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.TreeMap; + +public class SignLanguage { + // should start from small letter + + // this is used to load model and predict + private final Interpreter interp; + // creating another interpreter for sign language model + private final Interpreter interp2; + // store all label in array + private final List labelList; + private final int INPUT_SIZE; + private final int PIXEL_SIZE = 3; // for RGB + private final int IMAGE_MEAN = 0; + private final float IMAGE_STD = 255.0f; + // use to initialize gpu in app + private final GpuDelegate graphicsProcessingUnitDelegate; + //define TextToSpeech + private final TextToSpeech tts; + private int height = 0; + private int width = 0; + private int Classification_Input_Size = 0; + //create a string that hold combine letters + private String final_text = ""; + //also create a string to hold current letter that sign language app is recognizing + private String current_text = ""; + + + SignLanguage(Context context, Button clear_button, Button add_button, TextView change_text, Button text_speech_button, AssetManager assetManager, String modelPath, String labelPath, int inputSize, String classification_model, int classification_input_size) throws IOException { + INPUT_SIZE = inputSize; + Classification_Input_Size = classification_input_size; + // use to define gpu or cpu // no. of threads + Interpreter.Options options = new Interpreter.Options(); + graphicsProcessingUnitDelegate = new GpuDelegate(); + options.addDelegate(graphicsProcessingUnitDelegate); + options.setNumThreads(4); // set it according to your phone + // loading model + interp = new Interpreter(loadModelFile(assetManager, modelPath), options); + // load labelmap + labelList = loadLabelList(assetManager, labelPath); + + //code for loading sign language model + Interpreter.Options options2 = new Interpreter.Options(); + //add 2 threads to this interpreter + options2.setNumThreads(2); + //load model + interp2 = new Interpreter(loadModelFile(assetManager, classification_model), options2); + + clear_button.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View view) { + //when clear button is clicked, clear final_text + final_text = ""; + //after setting it to empty, add it to change_text + change_text.setText(final_text); + //basically removing everything in textview + + } + }); + + add_button.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View view) { + //when this button is clicked, add current_text to final_text + // and then add it to TextView(change_text) + //if our final text is "XYZ" + //if we press add button, "A" is added to "XYZ" + //so final_text will be"XYZA" + final_text = final_text + current_text; + //set TextView value + change_text.setText(final_text); + + + } + }); + + //initialise text to speech + tts = new TextToSpeech(context, new TextToSpeech.OnInitListener() { + @Override + public void onInit(int status) { + //if there is no error + if (status != TextToSpeech.ERROR) { + //set textToSpeech Language + tts.setLanguage(Locale.ENGLISH); + + } + } + }); + //Now use setOnClickLIstener on textToSpeech button + text_speech_button.setOnClickListener(new View.OnClickListener() { + @Override + public void onClick(View view) { + //when this button is clicked, read the text + // the text I want to read is fnal_text + tts.speak(final_text, TextToSpeech.QUEUE_FLUSH, null); + //this is all you have to do to add text to speech converter + } + }); + + + } + + private List loadLabelList(AssetManager assetManager, String labelPath) throws IOException { + // to store label + List labelList = new ArrayList<>(); + // create a new reader + BufferedReader reader = new BufferedReader(new InputStreamReader(assetManager.open(labelPath))); + String line; + // loop through each line and store it to labelList + while ((line = reader.readLine()) != null) { + labelList.add(line); + } + reader.close(); + return labelList; + } + + private ByteBuffer loadModelFile(AssetManager assetManager, String modelPath) throws IOException { + // use to get description of file + AssetFileDescriptor fileDescriptor = assetManager.openFd(modelPath); + FileInputStream inputStream = new FileInputStream(fileDescriptor.getFileDescriptor()); + FileChannel fileChannel = inputStream.getChannel(); + long startOffset = fileDescriptor.getStartOffset(); + long declaredLength = fileDescriptor.getDeclaredLength(); + + return fileChannel.map(FileChannel.MapMode.READ_ONLY, startOffset, declaredLength); + } + + // create new Mat function + public Mat recognizeImage(Mat mat_image) { + // Rotate original image by 90 degree get get portrait frame + + // This change was done in video: Does Your App Keep Crashing? | Watch This Video For Solution. + // This will fix crashing problem of the app + + Mat rotated_mat_image = new Mat(); + + Mat a = mat_image.t(); + Core.flip(a, rotated_mat_image, 1); + // Release mat + a.release(); + + // if you do not do this process you will get improper prediction, less no. of object + // now convert it to bitmap + Bitmap bitmap = null; + bitmap = Bitmap.createBitmap(rotated_mat_image.cols(), rotated_mat_image.rows(), Bitmap.Config.ARGB_8888); + Utils.matToBitmap(rotated_mat_image, bitmap); + // define height and width + height = bitmap.getHeight(); + width = bitmap.getWidth(); + + // scale the bitmap to input size of model + Bitmap scaledBitmap = Bitmap.createScaledBitmap(bitmap, INPUT_SIZE, INPUT_SIZE, false); + + // convert bitmap to bytebuffer as model input should be in it + ByteBuffer byteBuffer = convertBitmapToByteBuffer(scaledBitmap); + + // defining output + // 10: top 10 object detected + // 4: there coordinate in image + // float[][][]result=new float[1][10][4]; + Object[] input = new Object[1]; + input[0] = byteBuffer; + + Map output_map = new TreeMap<>(); + // we are not going to use this method of output + // instead we create treemap of three array (boxes,score,classes) + + float[][][] boxes = new float[1][10][4]; + // 10: top 10 object detected + // 4: there coordinate in image + float[][] scores = new float[1][10]; + // stores scores of 10 object + float[][] classes = new float[1][10]; + // stores class of object + + // add it to object_map; + output_map.put(0, boxes); + output_map.put(1, classes); + output_map.put(2, scores); + + // now predict + interp.runForMultipleInputsOutputs(input, output_map); + // Before watching this video please watch my previous 2 video of + // 1. Loading tensorflow lite model + // 2. Predicting object + // In this video we will draw boxes and label it with it's name + + Object value = output_map.get(0); + Object Object_class = output_map.get(1); + Object score = output_map.get(2); + + // loop through each object + // as output has only 10 boxes + for (int i = 0; i < 10; i++) { + //here we are looping through each hand which is detected -Amaan + float class_value = (float) Array.get(Array.get(Object_class, 0), i); + float score_value = (float) Array.get(Array.get(score, 0), i); + // define threshold for score + + // Here you can change threshold according to your model + // Now we will do some change to improve app + if (score_value > 0.5) { + Object box1 = Array.get(Array.get(value, 0), i); + // we are multiplying it with Original height and width of frame + //change this into x1,y1 and x2,y2 coordinates -Amaan + + float y1 = (float) Array.get(box1, 0) * height; + float x1 = (float) Array.get(box1, 1) * width; + float y2 = (float) Array.get(box1, 2) * height; + float x2 = (float) Array.get(box1, 3) * width; + + //set boundary limit -Amaan + if (y1 < 0) { + y1 = 0; + } + if (x1 < 0) { + x1 = 0; + } + if (x2 > width) { + x2 = width; + } + if (y2 > height) { + y2 = height; + } + //now set height and width of box + float w1 = x2 - x1; + float h1 = y2 - y1; + + //so if you don't know + //(x1,y1) is the starting point of the hand + //(x2,y2) is the end point of the hand + + //crop hand image from original frame + + Rect cropped_roi = new Rect((int) x1, (int) y1, (int) w1, (int) h1); + Mat cropped = new Mat(rotated_mat_image, cropped_roi).clone(); + + //Now convert this cropped Mat to Bitmap + + Bitmap bitmap1 = null; + bitmap1 = Bitmap.createBitmap(cropped.cols(), cropped.rows(), Bitmap.Config.ARGB_8888); + Utils.matToBitmap(cropped, bitmap1); + + + //Resize bitmap to classification input size=96 + Bitmap scaledBitmap1 = Bitmap.createScaledBitmap(bitmap1, Classification_Input_Size, Classification_Input_Size, false); + //convert scaled bitmap to byte buffer + ByteBuffer byteBuffer1 = convertBitmapToByteBuffer1(scaledBitmap1); + + //create an array of output of interpreter2 + float[][] output_class_value = new float[1][1]; + + //predict output for bytebuffer1 + interp2.run(byteBuffer1, output_class_value); + + //If you want to see output_class_value + Log.d("signLanguageClass", "output_class_value: " + output_class_value[0][0]); + //convert output_class_value to alphabets + //now create get_alphabets function + + String sign_value = get_alphabets(output_class_value[0][0]); + + //now everytime we detect letters, change current_text value + current_text = sign_value; + // so if are detecting "A" + //current_text will be "A" + + //use puttext to add Class name in image + // input/output text starting point font size text color:white + Imgproc.putText(rotated_mat_image, "" + sign_value, new Point(x1 + 10, y1 + 40), 2, 1.5, new Scalar(255, 255, 255, 255), 2); + Imgproc.rectangle(rotated_mat_image, new Point(x1, y1), new Point(x2, y2), new Scalar(0, 255, 0, 255), 2); + + } + + } + // select device and run + + // before returning rotate back by -90 degree + + // Do same here + Mat b = rotated_mat_image.t(); + Core.flip(b, mat_image, 0); + b.release(); + // Now for second change go to CameraBridgeViewBase + return mat_image; + } + + private String get_alphabets(float sig_v) { + String val = ""; + if (sig_v >= -0.5 & sig_v < 0.5) { + val = "A"; + } else if (sig_v >= 0.5 & sig_v < 1.5) { + val = "B"; + } else if (sig_v >= 1.5 & sig_v < 2.5) { + val = "C"; + } else if (sig_v >= 2.5 & sig_v < 3.5) { + val = "D"; + } else if (sig_v >= 3.5 & sig_v < 4.5) { + val = "E"; + } else if (sig_v >= 4.5 & sig_v < 5.5) { + val = "F"; + } else if (sig_v >= 5.5 & sig_v < 6.5) { + val = "G"; + } else if (sig_v >= 6.5 & sig_v < 7.5) { + val = "H"; + } else if (sig_v >= 7.5 & sig_v < 8.5) { + val = "I"; + } else if (sig_v >= 8.5 & sig_v < 9.5) { + val = "J"; + } else if (sig_v >= 9.5 & sig_v < 10.5) { + val = "K"; + } else if (sig_v >= 10.5 & sig_v < 11.5) { + val = "L"; + } else if (sig_v >= 11.5 & sig_v < 12.5) { + val = "M"; + } else if (sig_v >= 12.5 & sig_v < 13.5) { + val = "N"; + } else if (sig_v >= 13.5 & sig_v < 14.5) { + val = "O"; + } else if (sig_v >= 14.5 & sig_v < 15.5) { + val = "P"; + } else if (sig_v >= 15.5 & sig_v < 16.5) { + val = "Q"; + } else if (sig_v >= 16.5 & sig_v < 17.5) { + val = "R"; + } else if (sig_v >= 17.5 & sig_v < 18.5) { + val = "S"; + } else if (sig_v >= 18.5 & sig_v < 19.5) { + val = "T"; + } else if (sig_v >= 19.5 & sig_v < 20.5) { + val = "U"; + } else if (sig_v >= 20.5 & sig_v < 21.5) { + val = "V"; + } else if (sig_v >= 21.5 & sig_v < 22.5) { + val = "W"; + } else if (sig_v >= 22.5 & sig_v < 23.5) { + val = "X"; + } else { + val = "Y"; + } + + return val; + } + + private ByteBuffer convertBitmapToByteBuffer(Bitmap bitmap) { + ByteBuffer byteBuffer; + // some model input should be quant=0 for some quant=1 + // for this quant=0 + // Change quant=1 + // As we are scaling image from 0-255 to 0-1 + int quant = 1; + int size_images = INPUT_SIZE; + if (quant == 0) { + byteBuffer = ByteBuffer.allocateDirect(1 * size_images * size_images * 3); + } else { + byteBuffer = ByteBuffer.allocateDirect(4 * 1 * size_images * size_images * 3); + } + byteBuffer.order(ByteOrder.nativeOrder()); + int[] intValues = new int[size_images * size_images]; + bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight()); + int pixel = 0; + + // some error + //now run + for (int i = 0; i < size_images; ++i) { + for (int j = 0; j < size_images; ++j) { + final int val = intValues[pixel++]; + if (quant == 0) { + byteBuffer.put((byte) ((val >> 16) & 0xFF)); + byteBuffer.put((byte) ((val >> 8) & 0xFF)); + byteBuffer.put((byte) (val & 0xFF)); + } else { + // paste this + byteBuffer.putFloat((((val >> 16) & 0xFF)) / 255.0f); + byteBuffer.putFloat((((val >> 8) & 0xFF)) / 255.0f); + byteBuffer.putFloat((((val) & 0xFF)) / 255.0f); + } + } + } + return byteBuffer; + } + + //create a copy of createbitmap to byte buffer + //add 1 + + private ByteBuffer convertBitmapToByteBuffer1(Bitmap bitmap) { + ByteBuffer byteBuffer; + + int quant = 1; + //change input size + int size_images = Classification_Input_Size; + if (quant == 0) { + byteBuffer = ByteBuffer.allocateDirect(1 * size_images * size_images * 3); + } else { + byteBuffer = ByteBuffer.allocateDirect(4 * 1 * size_images * size_images * 3); + } + byteBuffer.order(ByteOrder.nativeOrder()); + int[] intValues = new int[size_images * size_images]; + bitmap.getPixels(intValues, 0, bitmap.getWidth(), 0, 0, bitmap.getWidth(), bitmap.getHeight()); + int pixel = 0; + //remove 255.0f as we did not scale the image + + for (int i = 0; i < size_images; ++i) { + for (int j = 0; j < size_images; ++j) { + final int val = intValues[pixel++]; + if (quant == 0) { + byteBuffer.put((byte) ((val >> 16) & 0xFF)); + byteBuffer.put((byte) ((val >> 8) & 0xFF)); + byteBuffer.put((byte) (val & 0xFF)); + } else { + byteBuffer.putFloat((((val >> 16) & 0xFF))); + byteBuffer.putFloat((((val >> 8) & 0xFF))); + byteBuffer.putFloat((((val) & 0xFF))); + } + } + } + return byteBuffer; + } + +} +// Next video is about drawing box and labeling it +// If you have any problem please inform me \ No newline at end of file diff --git a/app/src/main/jnilibs/arm64-v8a/libopencv_java3.so b/app/src/main/jnilibs/arm64-v8a/libopencv_java3.so new file mode 100644 index 0000000..c69e314 Binary files /dev/null and b/app/src/main/jnilibs/arm64-v8a/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/armeabi-v7a/libopencv_java3.so b/app/src/main/jnilibs/armeabi-v7a/libopencv_java3.so new file mode 100644 index 0000000..bf52b42 Binary files /dev/null and b/app/src/main/jnilibs/armeabi-v7a/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/armeabi/libopencv_java3.so b/app/src/main/jnilibs/armeabi/libopencv_java3.so new file mode 100644 index 0000000..4b1cd9b Binary files /dev/null and b/app/src/main/jnilibs/armeabi/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/mips/libopencv_java3.so b/app/src/main/jnilibs/mips/libopencv_java3.so new file mode 100644 index 0000000..66d1fc3 Binary files /dev/null and b/app/src/main/jnilibs/mips/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/mips64/libopencv_java3.so b/app/src/main/jnilibs/mips64/libopencv_java3.so new file mode 100644 index 0000000..ea66343 Binary files /dev/null and b/app/src/main/jnilibs/mips64/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/x86/libopencv_java3.so b/app/src/main/jnilibs/x86/libopencv_java3.so new file mode 100644 index 0000000..976e57d Binary files /dev/null and b/app/src/main/jnilibs/x86/libopencv_java3.so differ diff --git a/app/src/main/jnilibs/x86_64/libopencv_java3.so b/app/src/main/jnilibs/x86_64/libopencv_java3.so new file mode 100644 index 0000000..f97b49e Binary files /dev/null and b/app/src/main/jnilibs/x86_64/libopencv_java3.so differ diff --git a/app/src/main/res/drawable-v24/ic_launcher_foreground.xml b/app/src/main/res/drawable-v24/ic_launcher_foreground.xml new file mode 100644 index 0000000..2b068d1 --- /dev/null +++ b/app/src/main/res/drawable-v24/ic_launcher_foreground.xml @@ -0,0 +1,30 @@ + + + + + + + + + + + \ No newline at end of file diff --git a/app/src/main/res/drawable/ic_launcher_background.xml b/app/src/main/res/drawable/ic_launcher_background.xml new file mode 100644 index 0000000..07d5da9 --- /dev/null +++ b/app/src/main/res/drawable/ic_launcher_background.xml @@ -0,0 +1,170 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/app/src/main/res/drawable/rounded_button.xml b/app/src/main/res/drawable/rounded_button.xml new file mode 100644 index 0000000..22ddc8e --- /dev/null +++ b/app/src/main/res/drawable/rounded_button.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/app/src/main/res/layout/activity_camera.xml b/app/src/main/res/layout/activity_camera.xml new file mode 100644 index 0000000..b72e97c --- /dev/null +++ b/app/src/main/res/layout/activity_camera.xml @@ -0,0 +1,12 @@ + + + + + + \ No newline at end of file diff --git a/app/src/main/res/layout/activity_combine_letters.xml b/app/src/main/res/layout/activity_combine_letters.xml new file mode 100644 index 0000000..967fd6d --- /dev/null +++ b/app/src/main/res/layout/activity_combine_letters.xml @@ -0,0 +1,71 @@ + + + + + + + + + + + + + + +