{ "cells": [ { "cell_type": "code", "execution_count": 1, "metadata": {}, "outputs": [ { "ename": "ValueError", "evalue": "Images of type float must be between -1 and 1.", "output_type": "error", "traceback": [ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)", "\u001b[1;32m\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[1;31m# so we first tranpose to the RGB image and then convert to the gray scale\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 22\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 23\u001b[1;33m \u001b[0mframe_a\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimg_as_uint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrgb2gray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mframe_a\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 24\u001b[0m \u001b[0mframe_b\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mimg_as_uint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mrgb2gray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mframe_b\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 25\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mc_\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mframe_a\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mframe_b\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcmap\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcm\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mgray\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32m~\\Anaconda3\\envs\\openpiv\\lib\\site-packages\\skimage\\util\\dtype.py\u001b[0m in \u001b[0;36mimg_as_uint\u001b[1;34m(image, force_copy)\u001b[0m\n\u001b[0;32m 452\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 453\u001b[0m \"\"\"\n\u001b[1;32m--> 454\u001b[1;33m \u001b[1;32mreturn\u001b[0m \u001b[0mconvert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0muint16\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mforce_copy\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 455\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 456\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;32m~\\Anaconda3\\envs\\openpiv\\lib\\site-packages\\skimage\\util\\dtype.py\u001b[0m in \u001b[0;36mconvert\u001b[1;34m(image, dtype, force_copy, uniform)\u001b[0m\n\u001b[0;32m 275\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 276\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmin\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m<\u001b[0m \u001b[1;33m-\u001b[0m\u001b[1;36m1.0\u001b[0m \u001b[1;32mor\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mimage\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m>\u001b[0m \u001b[1;36m1.0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 277\u001b[1;33m \u001b[1;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"Images of type float must be between -1 and 1.\"\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 278\u001b[0m \u001b[1;31m# floating point -> integer\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 279\u001b[0m \u001b[1;31m# use float type that can represent output integer type\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n", "\u001b[1;31mValueError\u001b[0m: Images of type float must be between -1 and 1." ] } ], "source": [ "# %load red_Cell.py\n", "from openpiv import tools, pyprocess, scaling, filters, \\\n", " validation, process\n", "import numpy as np\n", "from skimage import data\n", "import matplotlib.pyplot as plt\n", "\n", "from scipy import ndimage\n", "from skimage import feature\n", "from PIL import Image\n", "from pylab import *\n", "%matplotlib inline\n", "from skimage.color import rgb2gray\n", "from skimage import img_as_uint\n", "\n", "frame_a = tools.imread('../test3/Y4-S3_Camera000398.tif') \n", "frame_b = tools.imread('../test3/Y4-S3_Camera000399.tif')\n", "\n", "\n", "# for whatever reason the shape of frame_a is (3, 284, 256)\n", "# so we first tranpose to the RGB image and then convert to the gray scale\n", "\n", "frame_a = img_as_uint(rgb2gray(frame_a))\n", "frame_b = img_as_uint(rgb2gray(frame_b))\n", "plt.imshow(np.c_[frame_a,frame_b],cmap=plt.cm.gray)\n" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# frame_a.dtype" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "#u, v, sig2noise = openpiv.process.extended_search_area_piv( frame_a, frame_b, window_size=24, overlap=12, dt=0.02, search_area_size=64, sig2noise_method='peak2peak' )\n", "u, v, sig2noise = pyprocess.extended_search_area_piv( frame_a, frame_b, window_size=32, overlap=8, dt=.1, sig2noise_method='peak2peak' )\n", "x, y = pyprocess.get_coordinates( image_size=frame_a.shape, window_size=32, overlap=8 )" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "u, v, mask = validation.sig2noise_val( u, v, sig2noise, threshold = 1.3 )\n", "u, v = filters.replace_outliers( u, v, method='localmean', max_iter=10, kernel_size=2)\n", "x, y, u, v = scaling.uniform(x, y, u, v, scaling_factor = 96.52 )\n", "quiver(x,y,u,v)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tools.save(x, y, u, v, mask, 'Y4-S3_Camera000398.txt' )\n", "tools.display_vector_field('Y4-S3_Camera000398.txt', scale=3, width=0.0125)\n", "# frame_vectors = io.imshow(vectors)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "x,y,u,v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), ones_like(frame_a).astype(np.int32), min_window_size=32, overlap_ratio=0.25, coarse_factor=0, dt=0.1, validation_method='mean_velocity', trust_1st_iter=0, validation_iter=0, tolerance=0.7, nb_iter_max=1, sig2noise_method='peak2peak')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tools.save(x, y, u, v, zeros_like(u), 'Y4-S3_Camera000398.txt' )\n", "tools.display_vector_field('Y4-S3_Camera000398.txt', scale=300, width=0.005)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "x,y,u,v, mask = process.WiDIM(frame_a.astype(np.int32), frame_b.astype(np.int32), ones_like(frame_a).astype(np.int32), min_window_size=16, overlap_ratio=0.25, coarse_factor=2, dt=0.1, validation_method='mean_velocity', trust_1st_iter=1, validation_iter=2, tolerance=0.7, nb_iter_max=4, sig2noise_method='peak2peak')" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "tools.save(x, y, u, v, zeros_like(u), 'Y4-S3_Camera000398.txt' )\n", "tools.display_vector_field('Y4-S3_Camera000398.txt', scale=300, width=0.005)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python [conda env:openpiv] *", "language": "python", "name": "conda-env-openpiv-py" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.7.3" }, "nteract": { "version": "0.14.3" } }, "nbformat": 4, "nbformat_minor": 2 }